diff --git "a/KdE3T4oBgHgl3EQfXwqt/content/tmp_files/load_file.txt" "b/KdE3T4oBgHgl3EQfXwqt/content/tmp_files/load_file.txt" new file mode 100644--- /dev/null +++ "b/KdE3T4oBgHgl3EQfXwqt/content/tmp_files/load_file.txt" @@ -0,0 +1,1084 @@ +filepath=/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf,len=1083 +page_content='Multiple-level Point Embedding for Solving Human Trajectory Imputation with Prediction KYLE K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' QIN, RMIT University, Australia YONGLI REN, RMIT University, Australia WEI SHAO, RMIT University, Australia BRENNAN LAKE, Cuebiq Inc.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=', USA FILIPPO PRIVITERA, Cuebiq Inc.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=', USA FLORA D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' SALIM, University of New South Wales, Australia Sparsity is a common issue in many trajectory datasets, including human mobility data.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' This issue frequently brings more difficulty to relevant learning tasks, such as trajectory imputation and prediction.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' Nowadays, little existing work simultaneously deals with imputation and prediction on human trajectories.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' This work plans to explore whether the learning process of imputation and prediction could benefit from each other to achieve better outcomes.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' And the question will be answered by studying the coexistence patterns between missing points and observed ones in incomplete trajectories.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' More specifically, the proposed model develops an imputation component based on the self-attention mechanism to capture the coexistence patterns between observations and missing points among encoder-decoder layers.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' Meanwhile, a recurrent unit is integrated to extract the sequential embeddings from newly imputed sequences for predicting the following location.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' Furthermore, a new implementation called Imputation Cycle is introduced to enable gradual imputation with prediction enhancement at multiple levels, which helps to accelerate the speed of convergence.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' The experimental results on three different real-world mobility datasets show that the proposed approach has significant advantages over the competitive baselines across both imputation and prediction tasks in terms of accuracy and stability.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' CCS Concepts: • Information systems → Spatial-temporal systems.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' Additional Key Words and Phrases: Location Imputation;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' Human Trajectory Prediction;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' Self-attention Network ACM Reference Format: Kyle K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' Qin, Yongli Ren, Wei Shao, Brennan Lake, Filippo Privitera, and Flora D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' Salim.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' 2022.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' Multiple-level Point Embedding for Solving Human Trajectory Imputation with Prediction.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' ACM Trans.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' Spatial Algorithms Syst.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' 1, 1, Article 1 (January 2022), 22 pages.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' https://doi.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='org/10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='1145/1122445.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='1122333 1 INTRODUCTION Mining knowledge on datasets such as time series and human mobility data has received much attention from the public [4, 6, 17, 24, 28].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' However, missing values that commonly exist in this type of dataset can implicitly influence the Authors’ addresses: Kyle K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' Qin, RMIT University, 124 La Trobe St, Melbourne, VIC, Australia, 3000, kyle.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='qin@hotmail.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='com;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' Yongli Ren, RMIT University, 124 La Trobe St, Melbourne, VIC, Australia, 3000, yongli.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='ren@rmit.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='edu.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='au;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' Wei Shao, RMIT University, 124 La Trobe St, Melbourne, VIC, Australia, 3000, wei.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='shao@rmit.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='edu.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='au;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' Brennan Lake, Cuebiq Inc.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=', 15 West 27th Street, New York, NY, USA, 10001, blake@cuebiq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='com;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' Filippo Privitera, Cuebiq Inc.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=', 15 West 27th Street, New York, NY, USA, 10001, fprivitera@cuebiq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='com;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' Flora D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' Salim, University of New South Wales, School of Computer Science and Engineering, Engineering Rd, Kensington, NSW, Australia, 2052, flora.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='salim@unsw.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='edu.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='au.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' Permission to make digital or hard copies of all or part of this work for personal or classroom use is granted without fee provided that copies are not made or distributed for profit or commercial advantage and that copies bear this notice and the full citation on the first page.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' Copyrights for components of this work owned by others than ACM must be honored.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' Abstracting with credit is permitted.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' To copy otherwise, or republish, to post on servers or to redistribute to lists, requires prior specific permission and/or a fee.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' Request permissions from permissions@acm.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='org.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' © 2022 Association for Computing Machinery.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' Manuscript submitted to ACM Manuscript submitted to ACM 1 arXiv:2301.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='04482v1 [cs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='LG] 11 Jan 2023 2 Qin et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' quality of the analysis and learning process.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' As we know, data on human mobility at a fine-grained scale can help with understanding people’s movement patterns, activities, and potential intentions, allowing customized recommendations and services to be delivered to individuals or groups.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' However, human trajectory data is frequently incomplete in practice for many reasons, such as power or bandwidth limitations on sensor-based devices and varying communication frequency of signals between devices and servers.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' It has been mentioned that the sparsity of data could cause deterioration of performance on learning human activities or movements [18, 23].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' Therefore, imputing missing values in a trajectory becomes a fundamental problem for other learning tasks, such as predictions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' Human mobility prediction or imputation has become prevalent, with various types of mobility data available for collection and processing.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' Previous works of human trajectory prediction mainly focused on forecasting future positions at different levels of granularity, including coordinates [9, 11, 26], specific locations such as Point-of-Interests (POIs) [7, 31, 32, 38] and grids or regions on map [8, 22].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' Intuitively, the imputation of human trajectories can be formulated as the problem of time series imputation, which a number of relevant approaches have solved.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' Previous algorithms proposed for sequence imputation generally leverage the advantages of Generative Adversarial Networks (GANs) [6, 20, 21] or Recurrent Neural Networks (RNNs) [3].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' However, those generative models that are autoregressive could be vulnerable to compounding errors in long-range temporal sequence modeling, and the recurrent models may face issues of vanishing gradients and difficult training.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' Recently, two non-autoregressive models [18, 25] were claimed to have more merits for imputing values in sequential data such as traffic flows and pedestrians’ trajectories in a specific scene.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' Nonetheless, these approaches first lack explicit observation and evaluation of the imputation of human mobility in the scope of the city.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' As we know, compared with walking or running trajectories in a small scene, the patterns and factors could be dramatically varied when the distribution of movements is spread at the macro city scope (e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=', check-in data).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' We would encounter more challenges in this kind of dataset which is normally accompanied by arbitrary movements, sparsity, and no uniform frequency of points collection.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' Secondly, the specific dependencies or coexistence patterns between visited locations and missing ones in trajectories were not well considered during the training by the previous methods.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' Moreover, little existing work deals with prediction while simultaneously imputing values in the data of trajectories.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' This paper plans to tackle the imputation issue by studying the coexistence patterns or dependencies between missing points and observed ones while incomplete human trajectories are given.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' Meanwhile, we argue that while solving the data sparsity issue, the imputation and prediction tasks can complement each other.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' In other words, we develop an approach that could achieve mutually beneficial effects between both tasks with interactive optimization.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' As we know, information will be lost when passing messages in a sequential manner of learning.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' Missing values are often randomly distributed in human movement trajectories, potentially inhibiting the efficacy of imputation in a traditional sequential manner (e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=', RNNs).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' Transformer Networks [29] can effectively learn representations of observations for prediction purposes by weighting the values of sequences via the attention mechanism.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' This method is naturally suitable for learning in a non-sequential way.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' We propose a new approach called multIple-level poiNt embeddinG for solving tRajectory imputAtion wIth predictioN (INGRAIN).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' INGRAIN can effectively capture coexistence dependencies among observed and missing values via multi-head attention in encoder-decoder layers, which helps ameliorate the imputation efficiency.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' Meanwhile, the model effectively integrates with a recurrent unit to extract sequential embeddings from newly imputed sequences to predict the next moving positions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' For flexible learning, a new process called the Imputation Cycle is designed to enable progressive imputation with prediction at multiple levels by constraining the number of missing points for each imputation recurrence.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' In addition, the model delivers messages from the imputation module to Manuscript submitted to ACM Multiple-level Point Embedding for Solving Human Trajectory Imputation with Prediction 3 RNN units to generate sequential embeddings for forecasting purposes.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' In summary, the key contributions of this paper are as follows: We propose one new framework that integrates both autoregressive and non-autoregressive components to impute missing points in human trajectories and predict future movements.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' A model is established for trajectory imputation with prediction based on gradual amelioration at multiple levels via setting the granularity of imputing points.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' It is applicable to different human mobility datasets.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' Comprehensive evaluations are conducted to show the efficiency and effectiveness of our model on three real- world human trajectory datasets.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' The paper provides insights into how the method satisfies accurate estimations on missing points and next positions and how trade-offs could be handled in this type of cooperative learning.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' The rest of the paper is organized as follows: Section 2 introduces the related work, and Section 3 provides the definitions for the problem of human trajectory imputation with prediction.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' Section 4 presents the details of the proposed solution and its important components.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' Section 5 shows the experimental results, and the conclusion is given in Section 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' 2 RELATED WORK 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='1 Human Trajectory Prediction Many studies have been devoted to human trajectory prediction.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' They can be categorized according to the granularity of targeted locations in prediction.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' A part of the literature has focused on forecasting grids or regions for next movements on maps [8, 22], whereas other parts have explored predicting coordinates [9, 11, 26] or POIs [7, 31, 32] in the future.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' We can further classify the relevant literature from the perspective of traveling scope in human mobility data: some studies [7, 26, 31, 32] have concentrated on predicting locations of people across city areas that are often sparsely or scattered populated, and others [1, 9, 11] have examined the motions of persons in smaller-scale scenes such as crowds on the street or customers on the floor of a building.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' The patterns and factors of human movement could be particularly distinct while the scale of traveling distance is changed.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' For example, by extracting grid-based stay time statistics of users and periodically analyzing their frequency of visiting regions [8], the Inhomogeneous Continuous-Time Markov model was used to capture temporal and sequential regularities for predicting the leaving time of objects in regions and their next locations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' There is a certain positive correlation between people’s morning trajectories and corresponding afternoon trajectories [26] in cities, so the integrated similarity metric was developed to estimate similar segments of trajectories with temporal segmentation and temporal correlation extraction.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' In a series of RNNs, LSTM [13] and Gated Recurrent Units (GRU) [5] are two popular variants that effectively moderate short-term memory and can help to avoid the vanishing gradient problem.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' Social Long Short-Term Memory [1] was proposed based on LSTM to estimate the motions of pedestrians among the crowd in different scenes while taking into account the navigation of all their walking neighbors in a shared site.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='2 Missing Data Imputation Imputation of missing values on trajectory datasets has become an indispensable work in many applications.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' Previously, a considerable number of methods for trajectory completion focused on inferring missing portion of traffic trajectories from sparse GPS samples based on the geometry of road network [16, 19, 33, 36].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' For instance, the History-based Route Inference System (HRIS) [36] was established with a set of new mapping algorithms that could effectively extract and learn the traveling patterns from historical trajectories and incorporate them into the route inference process.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' Manuscript submitted to ACM 4 Qin et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' Along with estimating the traffic flows across junctions in a road network, Li et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' utilized the GPS samples within each flow cluster to achieve fine-level completion of individual trajectories [16].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' Yin et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' developed a map-matching algorithm by evaluating the traveling cost of candidate routes and considering the distance feature and road selection behavior of users [33].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' However, the imputation methods for traffic trajectories on roads are hard to adapt to outdoor human mobility in city areas.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' Those road network mapping techniques could become ineffective while human beings’ movements are relatively arbitrary on the map and affected by a variety of factors.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' More recent research related to missing value imputation has mainly relied on techniques to impute sequential data including Matrix Factorisation [23], GANs [6, 20, 21, 34] or RNNs [3, 35].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' Naghizade et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' [23] proposed a contextual model to predict information at missing locations in sparse indoor trajectories via using Graph-regularised Non- negative Matrix Factorisation with consideration of implicit social ties among individuals.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' A model called BRITS [3] was built on RNNs to directly learn missing values for time series in a bidirectional recurrent dynamical system without strong assumptions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' In GAIN [34], the generator imputes the missing values conditioned on observed data, and the discriminator then attempts to identify which parts of the conjectured vector are actually observed and which are imputed.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' The former module focuses on the imputation quality, and the latter is forced to learn according to real data distribution.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' In contrast, MASKGAN [6] explicitly trains the generator to produce high-quality samples for infilling text on sentences.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' GRUI [20], an autoregressive model based on GAN, was developed for the imputation of multivariate time series, such as electronic medical record datasets or air quality and weather data.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' Additionally, Liu et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' recently presented a non-autoregressive model NAOMI [18] to impute missing values in different sequential datasets, such as traffic flows and trajectories of basketball players.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' The missing values are filled recursively from coarse to fine-grained resolutions via a forward and backward RNN-based model.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' NAOMI considers multiple-resolution imputation, wherein new imputations will be performed based on the previous inferences of values.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='3 Attention Mechanism In the learning process, recurrent models can make predictions by transiting successive dependencies of observations from the beginning of entire sequences.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' That gives recurrent models the expressive ability to deal with long sequential data while maintaining hidden states.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' Nowadays, autoregressive models such as the Transformer Network are competi- tive with or even supersede RNNs on a diverse set of missions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' Transformer Network is claimed to effectively weigh and learn representations over available observations via the multi-head attention mechanism.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' The original Transformer [29] was established to model and predict sequences in the natural language processing field.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' Subsequently, GATs [30] was developed to operate on graph-structured data, leveraging self-attentional layers to address weighting nodes for graph convolutions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' Recently, it was adopted by Giuliari et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' [9] to forecast the future motions of people in different scenes, which renders better performance than the LSTM-based and Linear approaches.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='4 Main Research Gaps In this paper, we attempt to examine whether the imputation of missing points in human historical mobility can bring sake to the prediction of future movements, which is rarely investigated by the existing works.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' As we mentioned, our objective focus on inferring missing locations of daily human mobility in city areas.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' The road network mapping techniques for vehicle traffic trajectories can not adapt well to such datasets as the distribution of human beings’ movements is relatively arbitrary without strictly following the road networks.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' And human activities could be affected heavily by social connections, professions, and weather conditions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' In addition, many GAN-based approaches, such as MASKGAN, GRUI, and GAIN were not designed or tested for complex human mobility data in terms of constructing Manuscript submitted to ACM Multiple-level Point Embedding for Solving Human Trajectory Imputation with Prediction 5 Table 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' Symbols and notations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='Symbols ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='Description ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='𝑝𝑡 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='The point was recorded at time 𝑡 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='𝑆𝑢 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='The original mobility sequence of user 𝑢 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='𝑡𝑟𝑤 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='𝑢 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='One sub-trajectory of 𝑆𝑢 in 𝑤-𝑡ℎ time window ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='𝑈 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='The number of users involved ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='𝑇 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='The maximum number of points considered ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='𝐿 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='The width of each time window ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='𝑆 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='′ ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='𝑢 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='A set of sub-trajectories of user 𝑢 from 𝑆𝑢 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='𝑝𝑙 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='The point at step 𝑙 in a trajectory ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='𝑝𝑖 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='A missing point in one trajectory ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='���� ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='A masking vector for missing values ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='𝐼 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='The total number of missing points in trajectory ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='𝐷 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='The dimension of initial representation ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='𝑍 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='An initial representation of input points ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='𝑧𝑙 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='𝑜𝑏𝑠 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='The initial representation of point 𝑝𝑙 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='𝑧𝑖 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='𝑚𝑖𝑠 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='The initial representation of missing point 𝑝𝑖 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='𝜆1 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='The weight of imputation component ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='𝜆2 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='The weight of prediction component ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='𝜆3 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='The weight of movement velocity ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='coordinates of locations with Spatio-temporal dimensions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' Recently, NAOMI and another variant called SingleRes [18] were proposed to apply forward and backward RNN on observed points to infer missing values in each trajectory.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' However, those methods still rely on learning sequential dependencies of points in trajectories, and the experiments only tested the trajectories of agents in a relatively small scene (e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=', a billiard table or basketball square).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' The performance of daily human movements’ trajectories across city regions remains unknown.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' This type of trajectory contains more arbitrary movements occurring in an ample space with possibly more sparsity.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' Our approach can provide insights for capturing the coexisting dependencies between missing positions and observed ones on different human mobility datasets.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' And the model is established based on the conditional independence assumption, considering both spatial and temporal perspectives, which the previous work did not examine sufficiently.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' 3 PROBLEM DEFINITION In this section, we define the problem of trajectory prediction and imputation task as follows: The Prediction Task: we denote an original mobility sequence of user 𝑢 as 𝑆𝑢 = {𝑝1, 𝑝2, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=', 𝑝𝑇 }, where 𝑝𝑡 ∈ R2 is the coordinates of point recorded at time frame 𝑡 and 𝑇 is the maximum number of observed values in consideration.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' A trajectory 𝑡𝑟𝑤 𝑢 is one sub-sequence of 𝑆𝑢 in 𝑤-𝑡ℎ time window, and the width of the window is 𝐿.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' Therefore, 𝑡𝑟𝑤 𝑢 = {𝑝𝑤 1 , 𝑝𝑤 2 , .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=', 𝑝𝑤 𝐿 }, 𝑙-𝑡ℎ is the number of points in the sub-sequence.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' A set of such trajectories can be extracted from the original sequence with a defined time window for user 𝑢: 𝑆 ′ 𝑢 = {𝑡𝑟1𝑢,𝑡𝑟2𝑢, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='𝑡𝑟𝑊 𝑢 }.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' The whole dataset can be denoted as 𝑆 = {𝑆 ′ 1,𝑆 ′ 2, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='𝑆 ′ 𝑈 } and 𝑈 is the total number of users.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' The goal of this task is to predict the coordinates of the next movement when giving a trajectory.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' Manuscript submitted to ACM 6 Qin et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' The Imputation Task: we assume that 𝑡𝑟𝑤 𝑢 denotes one of user 𝑢’s trajectories.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' In reality, 𝑡𝑟𝑤 𝑢 may contain a portion of missing points for many reasons.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' Thus, the states of all the points inside the trajectory are represented with one masking vector 𝑀 = [𝑚1,𝑚2, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=',𝑚𝐿], where 𝑚𝑙 equals to zero if 𝑝𝑤 𝑙 is not observed.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' Otherwise, 𝑚𝑙 is set to one.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' The purpose is to infer and substitute the missing values with appropriate alternatives in each trajectory.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' Moreover, Table 1 provides more information about the symbols and notations used in this paper.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' 4 PROPOSED APPROACH Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' 1 illustrates the architecture of our proposed method for both human trajectory imputation and prediction.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' For imputation purposes, encoders and decoders based on self-attention are applied to learn coexisting patterns between observations and missing points.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' Then, a recurrent component plays a role in extracting sequential dependencies on newly learned embeddings from the Supplement Layer for forwarding prediction.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' Meanwhile, a mechanism called the Imputation Cycle is introduced to achieve progressive learning of both imputation and prediction at multiple levels.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' The model interactively considers learning two main components and improves overall performance.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' Algorithm 1 shows an overview of training conducted using the proposed approach, and the details of the main components are described in the following subsections.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='1 Imputation Component In the beginning, the representations of each trajectory with missing points are generated via Linear layers and Frame-positional Encoding.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' Then, the imputation component takes the initial representations and captures the spatial- temporal dependencies among points through self-attention in either Encoders or Decoders.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' This process will produce embeddings for observations and missing values, respectively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' Here, one agile design allows the model to decode 𝑛 number of missing points at each imputation time, and the process is repeated by the Imputation Cycle.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' Thus, the final impute layer is responsible for inferring 𝑛 number of missing points each time.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='1 Feature Initialisation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' In the first stage, the framework projects representations of an incomplete trajectory into a higher 𝐷-dimensional space via the Linear Embedding Layer.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' Specifically, the initial representation of one observed point 𝑝𝑙 is 𝑧𝑙 𝑜𝑏𝑠 = 𝑝⊤ 𝑙 𝑊𝑜𝑏𝑠, where ⊤ denotes transpose matrix, 𝑊𝑜𝑏𝑠 is the weight matrix, and 𝑝𝑙 is a zero vector if it is missing.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' We obtain a queue for the incomplete trajectory 𝐸𝑜𝑏𝑠 = {𝑧1 𝑜𝑏𝑠,𝑧2 𝑜𝑏𝑠, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=',𝑧𝐿 𝑜𝑏𝑠}.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' We extract another queue of missing points from 𝐸𝑜𝑏𝑠 as 𝐸𝑚𝑖𝑠 = {𝑧1 𝑚𝑖𝑠,𝑧2 𝑚𝑖𝑠, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=',𝑧𝐼 𝑚𝑖𝑠}.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' Similarly, the initial representation of a missing point is 𝑧𝑖 𝑚𝑖𝑠 = 𝑝⊤ 𝑖 𝑊𝑚𝑖𝑠, where 𝑝𝑖 is also zero vector (0 ≤ 𝑖 ≤ 𝐼) and 𝐼 is the total number of missing points in a trajectory.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' Adding time information to the initial representations of trajectories is essential because the imputation unit relies on the attention mechanism for learning without any sequential knowledge.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' We insert time representation to the points as distinctive identifications in trajectory based on the Positional Encoding method [29].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' The relevant equations are given as follows: 𝐹 (𝑡,𝑑) = \uf8f1\uf8f4\uf8f4\uf8f2 \uf8f4\uf8f4\uf8f3 sin( 𝑡 10000𝑑/𝐷 ), when d is even, cos( 𝑡 10000𝑑/𝐷 ), when d is odd, (1) where 𝐹 (𝑡,𝑑) outputs the representation vector for time frame 𝑡 recorded for a point in the trajectory, and 𝑑 is the 𝑑-th dimension in the vector, which is also 𝐷-dimensional.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' Then, the representation of the time frame is added to (e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=', element-wise addition) the initial embedding of the corresponding point in either 𝐸𝑜𝑏𝑠 or 𝐸𝑚𝑖𝑠.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' Noticeably, the Manuscript submitted to ACM Multiple-level Point Embedding for Solving Human Trajectory Imputation with Prediction 7 Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' The proposed framework solves trajectory imputation and prediction jointly.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' 𝑃1, 𝑃4, 𝑃6, and 𝑃8 are the observed points in the trajectory, 𝑃2, 𝑃3, 𝑃5, and 𝑃7 are the missing values.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' Multiple Encoders and Decoders are employed in the learning process to produce high-level embeddings for the observed trajectory and missing values, respectively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' First, the Linear embedding layer initializes the representations of the trajectory queue and missing point(s), and Time Frame Encoding is responsible for adding observed time points to relevant representations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' Next, the trajectory embeddings from Encoders are fed to Encoder-Decoder Attention to enhance the embedding of missing points.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' Finally, new embeddings of the trajectory are reconstructed by the Supplement Layer and transferred to an RNN-based unit for prediction purposes.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' In addition, the Imputation Cycle enables the model to conduct gradual imputation on multiple levels.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' 𝐹 is applied to the missing points in both queues 𝐸𝑜𝑏𝑠 and 𝐸𝑚𝑖𝑠, such that the model can have consistent temporal information of missing values from both queues during the imputation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' The original Positional Encoding encodes the positions of words in a sequence.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' Here, we encode the time frames of points in a trajectory within the observed period.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' One time frame 𝑡 is a unique numeric index like 0, 1, 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=', which stands for the order in which people’s movements occurred in the observation period (day, week, or month).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' Some existing literature [7] has mentioned that the patterns of human mobility tend to occur periodically, and integrating with this information can help to learn relevant tasks.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' Therefore, we add time information using the time positional encoder to embed the time frame of points during a considered period.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' Although the missing points are initially represented with Manuscript submitted to ACM RNN learn Units Predict Layer Supplement Layer ImputeLayer Traj Encoded Mssing Point Ermbedding Embedding 个 Add&Normalize Add&Normalize Feed Forward Feed Forward Enc-DecAttention Nx Encoder NxDecoder Add&Normalize Add&Normalize Multi-HeadAttention Multi-Head Attention Time Frame Encoding LinearEmbedding LinearEmbedding P1 P2 P3 P4 P5 P6 P7 P8 P2 P3 P5 P78 Qin et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' zero vectors, the Positional Encoding attaches time features on each missing value which will be further updated by the attention-based Encoders and Decoders in the next training stage.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='2 Encoder and Decoder.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' As Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' 1 shows, multiple Encoders and Decoders are employed to produce embeddings by weighting the correlation between observed and missing values, respectively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' They share a similar structure: Self-attention, Normalisation, and Feed-forward Layers.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' The self-attention block of the first encoder and decoder acquires 𝐸𝑜𝑏𝑠 and 𝐸𝑚𝑖𝑠 as inputs, respectively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' And they are responsible for obtaining the internal dependencies among the points on each side.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' Moreover, each decoder contains an Encode-decode Attention to learn further external dependencies between an incomplete trajectory and its missing values.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' More specifically, a 𝑑𝑘-dimensional 𝑞𝑢𝑒𝑟𝑦 vector, 𝑑𝑘-dimensional 𝑘𝑒𝑦 vector, and 𝑑𝑣-dimensional 𝑣𝑎𝑙𝑢𝑒 vector are built for each point by multiplying its embedding with three different weight matrices, respectively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' In practice, we deal with a set of points together by wrapping the 𝑞𝑢𝑒𝑟𝑦 vectors into matrix 𝑄, 𝑘𝑒𝑦𝑠 into matrix 𝐾, and 𝑣𝑎𝑙𝑢𝑒𝑠 into matrix 𝑉 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' The final output of the self-attention layer is calculated using the following formula: 𝐴𝑡𝑡𝑒𝑛𝑡𝑖𝑜𝑛(𝑄, 𝐾,𝑉 ) = 𝑠𝑜𝑓 𝑡𝑚𝑎𝑥(𝑄𝐾⊤ √︁ 𝑑𝑘 )𝑉, (2) where 𝑑𝑘 is the dimension of 𝐾.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' Multiple encoders or decoders work sequentially in the imputation component: the output of one block will be taken as input by the next block.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' According to [29], multi-head attention that comprises several self-attentions can be applied to jointly synthesize the initial representation 𝑍 of points in each input queue from different representation sub-spaces, which helps enhance embedding learning.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' The functions of the operation are given as follows: 𝑀𝑢𝑙𝑡𝑖𝐻𝑒𝑎𝑑(𝑍) = 𝐶𝑜𝑛𝑐𝑎𝑡(ℎ𝑒𝑎𝑑1,ℎ𝑒𝑎𝑑2, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=',ℎ𝑒𝑎𝑑ℎ)𝑊 𝑂, 𝑊 𝑂 ∈ Rℎ𝑑𝑣×𝐷, (3) ℎ𝑒𝑎𝑑𝑖 = 𝐴𝑡𝑡𝑒𝑛𝑡𝑖𝑜𝑛(𝑍𝑊 𝑄 𝑖 ,𝑍𝑊 𝐾 𝑖 ,𝑍𝑊 𝑉 𝑖 ), 𝑊 𝑄 𝑖 ∈ R𝐷×𝑑𝑘,𝑊 𝐾 𝑖 ∈ R𝐷×𝑑𝑘,𝑊 𝑉 𝑖 ∈ R𝐷×𝑑𝑣 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' (4) Here, ℎ is the total number of heads in consideration, and each ℎ𝑒𝑎𝑑𝑖 is an individual of self-attention.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' 𝑄𝑖 = 𝑍𝑊 𝑄 𝑖 , 𝐾𝑖 = 𝑍𝑊 𝐾 𝑖 and 𝑉𝑖 = 𝑍𝑊 𝑉 𝑖 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' One of the main merits of multi-head attention is that attending computations can be executed in parallel to boost the overall runtime.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' We apply two heads of self-attention in both encoders or decoders in the experiments.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' The proposed framework learns the coexistence patterns of the observed and missing points by capturing their dependencies between the point embeddings from the encoders and decoders.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' 2 illustrates the component of an Encoder-Decoder Attention that extracts the dependent relations between the observed trajectory and missing points.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' For the imputation of one incomplete trajectory, the queues are created for missing points and the relevant trajectory, respectively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' Furthermore, the model imputes 𝑛 missing points each time from the queue of missing points.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' In the example of Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' 2, when 𝑛 = 2, missing points 𝑃2 and 𝑃3 are first ejected into the Decoder Self-Attention for producing embeddings of 𝑃2 and 𝑃3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' Simultaneously, the queue of the incomplete trajectory is injected into the Encoder Self-Attention for trajectory embedding production.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' Then, the Encoder-Decoder Attention is responsible for learning the dependencies by weighting the relationship of missing points (𝑃2, 𝑃3) and the observed trajectory.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' Finally, the locations of the missing points can be inferred based on their learned embeddings.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' Manuscript submitted to ACM Multiple-level Point Embedding for Solving Human Trajectory Imputation with Prediction 9 P1 P2 P3 P4 P5 P6 P7 P8 Enc-Dec Attention P2 P3 P2 P3 P5 P7 n = 2 Weighting Decoder Self-Attention P1 P2 P3 P4 P5 P6 P7 P8 Encoder Self-Attention Missing Points Queue Traj Queue Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' The diagram shows that the Encoder-Decoder Attention captures the dependent patterns between the missing points and an observed trajectory.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='3 Imputation Recurrence.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' The model is capable of carrying out gradual imputation at multiple levels, which gives the model more flexibility by inferring 𝑛 (1 < 𝑛 < 𝐼) missing nodes in each imputation cycle.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' The encoders yield the embeddings of an incomplete trajectory and then transfer them to decoders for weighting with missing values in imputation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' Before that, the decoder takes the 𝑛 number of missing values as inputs in each cycle.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' Further, 𝑛 alternatives are yielded from the Imputing Linear Layer, and new learning circulation is started based on the previous state.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' The model can train and learn on each trajectory progressively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' As Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' 3 shows, the model takes 𝑛 points from the missing points queue in each imputation cycle.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' In a decoder, the embeddings of 𝑛 missing points are weighted based on observed trajectory embedding produced by an encoder.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' Then, the impute layer outputs the locations of 𝑛 missing points.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' The imputation cycle will be repeated until all the missing points have been learned.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='2 Prediction Component The details of the prediction component are provided in this section.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' At the upper part of the framework in Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' 1, it is the Supplement Layer that fetches the embeddings of observed portion 𝑌𝑜𝑏𝑠 and missing values 𝑌𝑚𝑖𝑠 from the final layer of encoder and decoder, respectively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' The Supplement layer is responsible for reconstructing a new trajectory sequence by replenishing the trajectory representation with the embeddings of the missing points after the encoding stage.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' Subsequently, an RNN-based unit is built upon this module to capture sequential dependency.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' The formula of a Manuscript submitted to ACM 10 Qin et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' P1 P2 P3 P4 P5 P6 P7 P8 P2 P3 P5 P7 Encoder Decoder Linear & Time Enc P2 Impute n = 1 P2 P3 P5 P7 Encoder Decoder Linear & Time Enc P3 Impute .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' P1 P2 P3 P4 P5 P6 P7 P8 P2 P3 P5 P7 Encoder Decoder Linear & Time Enc P2 Impute n = 2 P2 P3 P5 P7 Encoder Decoder Linear & Time Enc P5 Impute P3 P7 Missing Points Queue Traj Queue Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' The Imputation Cycle enables the model to conduct gradual imputation on multiple levels.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' In each imputation cycle, 𝑛 missing points will be taken into the model for learning with the relevant trajectory information.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' The diagram shows the imputation process when 𝑛 equals 1 or 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' supplement operation is given below: 𝑌𝑠𝑒𝑞(𝑝𝑙) = \uf8f1\uf8f4\uf8f4\uf8f2 \uf8f4\uf8f4\uf8f3 𝑌𝑚𝑖𝑠 (𝑚𝑖), if 𝑝𝑙 = 𝑚𝑖 𝑌𝑜𝑏𝑠 (𝑜𝑖), if 𝑝𝑙 = 𝑜𝑖 (5) where 𝑌𝑠𝑒𝑞 is the embedding matrix of the whole trajectory, 𝑝𝑙 is a point at step 𝑙 in the trajectory, 𝑚𝑖 is a point in the missing queue, and 𝑜𝑖 denotes a point in the observation queue.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' We interpolate the embedding of a point from matrix 𝑌𝑚𝑖𝑠 if it is missing;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' otherwise, the corresponding embedding is extracted from matrix 𝑌𝑜𝑏𝑠.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' In addition to using a ’Replace operation’, we can alternatively consider an ’Add operation’ in the Supplement layer to add the embedding of missing values to the corresponding positions in the trajectory representation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' In the prediction stage, GRU [5] is selected as the recurrent unit because of its efficient computation without performance deterioration.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' This layer is implanted by receiving the embeddings of the newly reconstructed sequence from the supplement layer and produces relevant hidden states.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' The updated formulations of GRU are as follows: 𝑓𝑡 = 𝜎(𝑊𝑓 𝑥𝑥𝑡 +𝑊𝑓 ℎℎ𝑡−1 + 𝑏𝑓 ), (6) 𝑟𝑡 = 𝜎(𝑊𝑟𝑥𝑥𝑡 +𝑊𝑟ℎℎ𝑡−1 + 𝑏𝑟), (7) Manuscript submitted to ACM Multiple-level Point Embedding for Solving Human Trajectory Imputation with Prediction 11 Algorithm 1 Overview of INGRAIN Training Input: A set of user trajectory 𝑆 ′ 𝑢 = {𝑡𝑟1𝑢,𝑡𝑟2𝑢, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='𝑡𝑟𝑊 𝑢 };' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' One masking vector 𝑀 = [𝑚1,𝑚2, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=',𝑚𝐿];' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' Epochs of training 𝑒𝑝𝑜;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' Number of points for each imputation cycle, 𝑛 < 𝐼.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' 1: while training epoch < 𝑒𝑝𝑜 do 2: for each trajectory 𝑡𝑟𝑤 𝑢 ∈ 𝑆 ′ 𝑢 do 3: Apply mask 𝑀 on 𝑡𝑟𝑤 𝑢 ;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' 4: Initialize representations of observed trajectory, 𝐸𝑜𝑏𝑠;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' 5: Encode time information with 𝐸𝑜𝑏𝑠;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' 6: while has missing values do 7: Initialize representations of 𝑛 missing points, 𝐸𝑚𝑖𝑠;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' 8: Apply time encoding on 𝐸𝑚𝑖𝑠;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' 9: Compute attention for observations, 𝐸 ′ 𝑜𝑏𝑠;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' 10: Compute attention of 𝑛 missing values in observations, 𝐸′𝑚𝑖𝑠;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' 11: Reconstruct trajectory based on 𝐸 ′ 𝑜𝑏𝑠 and 𝐸′𝑚𝑖𝑠;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' 12: Conduct imputation and prediction;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' 13: Optimisation with fused objective function L𝑙𝑒𝑎𝑟𝑛.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' 14: end while 15: end for 16: end while 𝑐𝑡 = 𝑡𝑎𝑛ℎ(𝑊𝑐𝑥𝑥𝑡 + 𝑟𝑡 ◦𝑊𝑐ℎℎ𝑡−1 + 𝑏𝑐), (8) ℎ𝑡 = (1 − 𝑓𝑡) ◦ 𝑐𝑡 + 𝑓𝑡 ◦ ℎ𝑡−1, (9) where 𝑥𝑡 is the embedding of the point in time 𝑡, ℎ𝑡−1 is the output of the last unit, 𝑊 is the weight matrix, 𝑏 is the bias vector, ◦ means element-wise multiplication, 𝑓𝑡 is the update gate, 𝑟𝑡 is the reset gate, 𝑐𝑡 is the candidate, and ℎ𝑡 is the output.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' Noticeably, the model flexibly subjoins prediction training in each imputation cycle.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' In other words, it can recurrently learn and make forward predictions based on the latest status of the sequence restored by previous imputation recurrences.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' In the testing step, we need to detach the forward prediction from the imputing cycles and solely execute it when the imputation of the whole trajectory is completed.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='3 Collaborative Learning Objective As we mentioned, there are two main components established in the framework: imputation and prediction units.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' During the training stage of imputation, the purpose is to minimize the mean squared error between imputing values and ground truth with the following objective function: L𝑖𝑚𝑝 = E𝑋∼𝐶,� 𝑋∼𝐺𝜃 (𝑋,𝑀) ��𝐿 𝑙=1 ��� ˆ𝑥𝑙 − 𝑥𝑙 ��� 2 � , (10) where 𝐶 = {𝑋 ∗} is the set of original sequences, 𝑀 is a masking vector for missing values, 𝐺𝜃 (𝑋, 𝑀) denotes the function to infer missing values for imputation with parameter 𝜃, and ˆ𝑥 is one of the imputed values �𝑋.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' Subsequently, we generate embedding 𝑌 of the sequence from the previous component in the training process of prediction.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' 𝐽𝜔 (𝑌) is the predictor of the next movement with parameter 𝜔, and the objective function of the prediction task is constructed Manuscript submitted to ACM 12 Qin et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' as follows: L𝑝𝑟𝑒 = E𝑋∼𝐶,𝑌∼𝐺𝜃 (𝑋,𝑀), ˆ𝑦∼𝐽𝜔 (𝑌) ����ˆ𝑦 − 𝑦 ��� 2 � .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' (11) Furthermore, we introduce a third loss function L𝑣𝑒𝑙 to constraint the movement velocity between imputed and observed points and comply with the speed observed in trajectories: L𝑣𝑒𝑙 = E𝑋∼𝐶,� 𝑋∼𝐺𝜃 (𝑋,𝑀),𝑣∼𝐻 (𝑋−� 𝑋),ˆ𝑣∼𝐻 ( � 𝑋) ����ˆ𝑣 − 𝑣 ��� 2 � , (12) where 𝐻 is the function to compute movement speed between each pair of points, 𝑣 is the observed speed in a trajectory, and ˆ𝑣 is the speed computed between imputed and observed points.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' To train the entire model, we fuse the optimization of the above objective functions in each execution as shown below: L𝑙𝑒𝑎𝑟𝑛 = 𝜆1L𝑖𝑚𝑝 + 𝜆2L𝑝𝑟𝑒 + 𝜆3L𝑣𝑒𝑙.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' (13) Here, 𝜆1, 𝜆2, and 𝜆3 are the hyperparameters representing the weights of different loss functions correspondingly in training.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' In this way, the model can benefit from optimizing different modules, and this collaborative learning could eventually give the model a latent boost of convergence.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='4 Computational Complexity In this part, we take into account the main phases of the proposed framework for calculating computational complexity.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' The meanings of symbols used here are independent of the notations in the previous sections.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' The essential phases include initial representation generation, encoder and decoder attention, and RNN-based prediction module.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' The stage of initial representation generation is directly built on Multi-Layer Perceptron (MLP) [15], which approxi- mately has time complexity 𝑂(𝑙 ∗ 𝑛 ∗ 𝑑) for one layer of implementation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' Here, 𝑙 is the trajectory length, 𝑛 is the input dimension (it is regularly a small value), and 𝑑 is the embedding dimension.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' In the imputation stage, the encoder and decoder mainly rely on the self-attention mechanism, which has 𝑂(𝑙2 ∗ 𝑑) [29].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' On the other hand, the RNN-based module typically contributes to time complexity 𝑂(𝑙 ∗ 𝑑2).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' In most cases, the trajectory length 𝑙 is smaller than the embedding dimension 𝑑.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' However, self-attention is unnecessary to conduct sequential operations on all the points.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' It is able to consider neighbor locations of size 𝑟 in the input trajectory if it is particularly long [29].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' So, the complexity could reduce to 𝑂(𝑙 ∗ 𝑟 ∗ 𝑑), which will be considered in future work.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' Overall, the 𝑂(𝑚𝑎𝑥{𝑙 ∗ 𝑛 ∗ 𝑑,𝑙2 ∗ 𝑑,𝑙 ∗ 𝑑2}) is the total complexity of the framework.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' In the experiments, 𝑛 is two for the input size, and 𝑙 and 𝑑 are configured correspondingly to different values.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' 5 EXPERIMENTAL RESULTS This section compares the performance of trajectory imputation and prediction for the proposed model and the baselines on different human mobility datasets.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' Then, the primary hyperparameters of the proposed model are assessed intensively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' A ablation study is provided at the end of the section.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='1 Experimental Setup 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='1 Datasets.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' We use real-world human mobility datasets from three different cities worldwide for the experiments.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' The datasets record a broad range of users’ movements among city areas: Geolife Data [37] contains outdoor GPS trajectories of 182 users from April 2007 to August 2012 in Beijing, China.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' The sampling rates vary in trajectories: approximately 91% are logged every 1 to 5 seconds or every 5 to Manuscript submitted to ACM Multiple-level Point Embedding for Solving Human Trajectory Imputation with Prediction 13 Table 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' The total number of trajectories used in each dataset and 𝐿 denotes the length of each trajectory.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' Geolife Cuebiq-AU Cuebiq-US L = 20 L = 50 L = 100 L = 20 L = 50 L = 100 L = 20 L = 50 L = 100 20,435 20,435 20,301 30,030 30,030 30,030 30,030 30,030 30,030 10 meters per point.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' Each record contains timestamp, user ID, latitude and longitude.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' We selected 30 users with the most GPS records in January and February 2009 for evaluation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' Cuebiq-US Data [14] contains more diverse human movements on a daily basis.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' The data collection period ranged from January 2018 to June 2018, and the location was New York, USA.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' Sampling frequencies of approxi- mately 91% of data range from 1 to 600 seconds per record, and each record has timestamp, device ID, latitude, and longitude.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' Cuebiq’s anonymized and privacy-enhanced data is collected from users who opted for anonymous data sharing for research purposes through a GDPR-compliant framework.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' The trajectories of the 30 most active users in May 2018 are extracted for experiments.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' Cuebiq-AU Data [14] has the same data format as Cuebiq-US.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' It was collected over two years, from December 2017 to November 2019, in cities in Australia.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' The sampling rate of the collected data is similar to that of Cuebiq-US for most records.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' Likewise, The trajectories of 30 users who were most active in October 2019 are used for testing.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' Based on the original movement sequences of the users in each dataset, we produce a set of sub-trajectories of the users with a defined length (see Section 3).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' Table 2 gives the total number of trajectories used in each dataset by length.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' Then, the extracted trajectories are randomly separated into a training part (80%) and a testing part (20%).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' Additionally, a varied size of masking vector is randomly created to imitate different degrees of missing values in sub-trajectories for imputation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' The default probability of generating missing values follows a discrete uniform distribution.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' Finally, the model requires predicting the location of the next movement when an observed trajectory with missing points is given.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='2 Metrics.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' In this paper, the losses of L𝑖𝑚𝑝 and L𝑝𝑟𝑒 are basically average euclidean distances between 2D points.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' In imputation, we infer the coordinates of missing points in trajectories and calculate the average 𝐿2 loss between imputed values and ground truth among all users.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' Moreover, we assess the proposed approach for the prediction task, which is to forecast the coordinates of the next location for users if a historical trajectory with missing points is given.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' The average 𝐿2 loss between the predicted values and ground truth is used for the evaluation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' In our model, the predicted values will be evaluated merely after a trajectory’s imputation is completed during the testing phase.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='3 Baselines.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' For trajectory imputation, two state-of-the-art methods for comparison are NAOMI [18] and Sin- gleRes [18].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' NAOMI is one of the latest non-autoregressive approaches for sequence imputation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' In contrast, SingleRes is the autoregressive counterpart, and it can be reduced to BRITS [3] if the adversarial training is discarded.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' GRUI [20] is also an autoregressive model with GAN for time series imputation, which is used to handle the completion of a trajectory sequence.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' Moreover, GAIN [34] is another recent method to impute missing data using GANs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' We also test the imputation task with a classical approach named KNN + Linear [12], which searches K nearest neighbors from samples and then applies Linear regression to impute missing points based on those neighbors.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' Here, we also implemented this approach by inferring a defined number of missing points in a trajectory in different degrees.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' Simultaneously, several RNN variations are used for comparison with the proposed model for prediction purpose, such as stacked LSTM Manuscript submitted to ACM 14 Qin et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' Table 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' The results show the L2 loss of imputation and prediction on three different datasets.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' The percentage of missing points in each trajectory is 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='8 for all the tests, and 𝐿 denotes the length of trajectories in different trials.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' L2 Loss for Imputation Methods Geolife Cuebiq - AU Cuebiq - US L = 20 L = 50 L = 100 L = 20 L = 50 L = 100 L = 20 L = 50 L = 100 KNN + Linear [12] 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='7095 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='7014 8.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='7260 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='6466 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='9836 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='9567 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='0238 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='0254 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='0323 GAIN [34] 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='1597 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='1903 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='9478 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='8619 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='8842 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='0788 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='0125 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='0126 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='0137 GRUI [20] 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='3884 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='2584 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='2443 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='3150 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='2062 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='2088 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='8407 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='2871 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='2189 NAOMI [18] 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='0498 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='1613 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='0598 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='2007 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='0186 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='0524 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='0122 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='0129 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='0127 SingleRes [18] 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='4365 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='0405 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='0171 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='0716 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='0190 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='0622 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='0121 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='0128 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='0126 INGRAIN (ours) 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='0270 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='0075 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='0062 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='0122 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='0116 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='0117 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='0055 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='0050 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='0046 L2 Loss for Prediction B-LSTM [10] 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='1856 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='0427 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='4948 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='8294 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='9935 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='0853 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='0120 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='0127 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='0126 RNNSearch [2] 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='2282 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='0754 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='5726 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='8809 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='0151 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='0941 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='0962 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='0187 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='0176 S-GRU [5] 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='2309 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='0437 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='5305 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='8276 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='9919 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='0895 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='0121 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='0129 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='0126 S-LSTM [27] 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='2081 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='0656 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='5289 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='8257 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='9947 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='0846 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='0120 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='0128 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='0125 INGRAIN (ours) 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='0525 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='0464 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='0751 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='0496 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='0139 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='9194 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='0073 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='0070 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='0071 (S-LSTM) [27], bidirectional LSTM (B-LSTM) [10] and stacked GRU (S-GRU) [5].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' Another sequence forecasting method is RNNSearch [2], which implements the attention mechanism based on RNN to selectively retrieve information from the encoder to the decoder for prediction.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='4 Implementation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' The proposed method (INGRAIN) is implemented with Pytorch, and the Adam algorithm is used as the optimizer with a learning rate of 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='001 and batch size of 70.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' In the imputation component, we use two layers of either encoders or decoders.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' The number of heads (self-attention) in each layer is two, and the dimension of learning embedding is 256.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' In the prediction part, 1-layer GRU is adopted with a hidden size of 256.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' For training in different settings, the number of epochs is 60, and we compute the mean of the best test results for each task in five runs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' For other imputation methods, NAOMI and SingleRes [18] use the same values of some basic parameters as our model: learning rate 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='001, batch size 70, and training epochs 60.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' The other parameters are default in their implementation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' GRUI [20], and GAIN [34] are faster for training but harder to achieve convergence.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' Furthermore, we tried a different number of iterations for training to obtain optimal results on different datasets, ranging from 50 to 1000.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' The baselines are all RNN based for the prediction methods, and we can directly compare them with our prediction component by applying similar parameters for training, such as learning rate, batch size, training epochs, or size of hidden features.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='2 Performance Analysis The evaluations are conducted on Geolife, Cuebiq-AU, and Cuebiq-US, and the sampling rates of points collection vary drastically.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' This section selected the top 20 users of each dataset who were most active within the observed period for the learning task evaluation, sensitivity analysis, and ablation study.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' In addition, to further assess the model’s effectiveness, we generate three different groups of users, and each group contains ten persons randomly picked from Manuscript submitted to ACM Multiple-level Point Embedding for Solving Human Trajectory Imputation with Prediction 15 (a) Geolife - Imputation (b) Cuebiq-AU - Imputation (c) Cuebiq-US - Imputation (d) Geolife - Prediction (e) Cuebiq-AU - Prediction (f) Cuebiq-US - Prediction Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' We use the 13 most active users of Geolife and the 20 most active users of Cuebiq-AU and Cuebiq-US for this experiment, and the length of a trajectory is 20.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' (a), (b) and (c) demonstrate the imputation loss of the proposed model and baselines on different datasets, with varying percentages of missing points in trajectories.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' (d), (e) and (f) show the results for prediction loss on three datasets, respectively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' the 30 users in each dataset.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' The data of groups were tested directly on two learning tasks, and the results are shown in Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='1 Imputation Results.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' We first evaluate imputation performance for different lengths of trajectories with a certain degree of missing values.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' Table 3 displays the experimental results on three different lengths (20, 50 and 100) of trajectories across all the datasets.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' The missing rate of points is 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='8.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' The parameters 𝜆1 and 𝜆2 are configured to one for our model, which means that the model fully considers the feedback from different components for training.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' 𝜆3 is not considered in this test.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' Overall, the proposed model has the best imputation performance on these datasets regarding average L2 loss.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' It is also noticeable that the INGRAIN can keep contributing to a minor loss of imputation when the length of tested trajectories is increased, whereas no such obvious advantage can be found for the baselines.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' One reason for this could be that longer trajectories contain more sample fragments, and the model can effectively utilize this increment of data to infer missing values.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' Another argument is that a good combination of attention-based imputation and prediction components can better enable INGRAIN to overcome imputation in longer trajectories.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' The proposed imputation component is built with an attention mechanism that learns embeddings by weighting the relations between the points in trajectories.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' And the other two baselines rely on learning embeddings in sequential dependencies of the trajectories, which could be affected more heavily when more random points are missing.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' Manuscript submitted to ACM ++16 Qin et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' (a) Geolife - Imputation (b) Cuebiq-AU - Imputation (c) Cuebiq-US - Imputation (d) Geolife - Prediction (e) Cuebiq-AU - Prediction (f) Cuebiq-US - Prediction Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' The experiment was run with three groups of 10 users randomly picked from the 30 most active ones in each dataset.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' The length of a trajectory is 20 in the tests.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' (a), (b) and (c) demonstrate the imputation loss of the proposed model and baselines on different datasets, with varying percentages of missing points in trajectories.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' (d), (e) and (f) show the results for prediction loss on three datasets, respectively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' Next, we assess the impact of different missing rates of points on the task of imputation by the algorithms.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' 4a and 4b demonstrate the stability of our method in solving imputation on both Geolife and Cuebiq-AU when the percentage of missing points varies from 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='2 to 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='8 with a trajectory length of 20.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' As the missing rate rises, the INGRAIN can maintain the loss at a relatively low position while the loss of either NAOMI or SingleRes fluctuates dramatically and tends to rise or stay between the missing rate from 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='5 to 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='8.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' However, the SingleRes performs better on Geolife when the rate is smaller than 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' As we can see from Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' 4c, the baselines become steadier on the dataset Cuebiq-US, but have (approximately two times) higher loss of imputation than that of our model.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' In addition, in other tests with different groups of random users, Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' 5 further demonstrates the model’s prominent ability on trajectory imputation in terms of accurate estimation and stability.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' We claim that learning point embeddings based on the mode of a fully connected graph (attention mechanism) could better capture the dependencies between missing points and the observed trajectories for solving the imputation of daily human mobility in the city regions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='2 Prediction Results.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' Forward prediction is conducted along with imputation by the proposed model.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' The results of the proposed model again show its advantages in predicting future values after the imputation of the trajectory is processed.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' Table 3 reveals that the INGRAIN is superior to all the other RNN-based baselines (S-GRU, S-LSTM, B-LSTM, and RNNSearch) on both Geolife and Cuebiq-US datasets with missing rates of 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='8.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' For the dataset Cuebiq-AU, the Manuscript submitted to ACM Multiple-level Point Embedding for Solving Human Trajectory Imputation with Prediction 17 INGRAIN tends to improve in longer trajectories (𝐿=100), although it is worse in shorter ones (𝐿=20 or 50) compared with its counterparts.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' Overall, the results of the baselines deteriorated slightly while the length of trajectories was prolonged, with the same degree of missing rate.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' Our model shows a more reliable capability to overcome the impact of missing values in longer trajectories for next-location forecasting.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' Moreover, Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' 4d, 4e and 4f display the results of prediction with missing rates from 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='2 to 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='8 on three different datasets, respectively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' And the length of all the trajectories in this test is 20.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' The performance of the RNN-based baselines is stable among the three datasets but weaker in regard to the ability to converge at a smaller loss.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' In contrast, the figure of INGRAIN fluctuates on the Cuebiq-AU but can keep prediction loss at significantly lower values on the other two datasets.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' 5e and 5f also show the advantages of the proposed model for prediction tasks with different groups of random users.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' In general, the previous results indicate that effectively incorporating the imputation component with the prediction unit in INGRAIN would eventually benefit both learning tasks and outperform the counterparts of baselines.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' We claim that the proposed model conducts the prediction based on the status or effect of imputation on trajectories, which could potentially enhance the performance.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='3 Sensitivity Analysis.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' In this section, we examine the effect of primary hyperparameters on the performance of INGRAIN for both tasks.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' That gives us more insights into how the proposed method converges in different configurations and what trade-offs can be made between two learning tasks.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' As this paper mentioned, the model is agile to infer a defined number of missing points in each imputing cycle.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' This process iterates until the whole imputation work of each trajectory is finished.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' Thus, we first check how this number potentially acts on the results of two learning tasks with the dataset Geolife.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' In Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' 6a, the number of missing points from 1 to 5 per imputing cycle is inspected for the best average L2 loss of imputation and prediction in each test simultaneously.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' There is an increase in the loss for both criteria when more points are imputed in each cycle.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' The loss of imputation and prediction is relatively minor when the number is one.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' Although the prediction loss arrives at its lowest position when the number is two, the imputation loss value soars.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' Therefore, using fewer missing points in each imputation operation can offer better learning results for both tasks.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' It is well known that the learning rate is a fundamental factor that affects the convergence of a deep learning model.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' 6b illustrates that the performance of two tasks becomes gradually worse when we increase the value of the learning rate from 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='001 (10−1) to 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='15 (10−1).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' As we can see, learning rates 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='001 (10−1) and 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='01 (10−1) allow the model to produce better results for both imputation and prediction.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' In the assessment of using a different window length for constructing trajectories, Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' 6c shows that imputation loss tends to become smaller in the learning with longer trajectories, and prediction loss fluctuates slightly between around 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='02 to 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='07.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' We see that the performance of both imputation and prediction work is relatively advantageous when the length is 70.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' 6d indicates that using two heads of self-attention can simultaneously perform better for both learning tasks.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' Furthermore, the computational requirement becomes relatively less than applying a bigger number of heads.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' In addition, the evaluation of embedding and hidden size used in the model demonstrates that a smaller value can already contribute to a good performance of two tasks, such as 128 or 256.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' As Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' 6e and Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' 6f show, more computation with an increased value of such parameters did not provide better results.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' In the following, we examine the functions of imputation and prediction components by changing the values of 𝜆1 and 𝜆2 without considering the constraint of movement velocity (𝜆3 = 0).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' As we mentioned in Section 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='3, 𝜆1 and 𝜆2 are two hyperparameters that control the feedback of imputation and prediction unit received by the model during training, respectively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' In Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' 6g and 6h, higher values indicate that more strength is considered for the corresponding component during entire training.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' We see that the loss of imputation reduces slightly in Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' 6g when bigger 𝜆1 is configured.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' Manuscript submitted to ACM 18 Qin et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' (a) # of Points per Imp-cycle (b) Change of Learning Rate (c) Window Length (d) Head Number (e) Embedding Size (f) Hidden Size (g) Evaluation of 𝜆1 (h) Evaluation of 𝜆2 (i) Evaluation of 𝜆3 Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' We evaluate the main hyperparameters of the proposed model for both imputation and prediction on Geolife.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' (a) shows the results for applying different numbers of missing points in each imputing cycle, and (b) illustrates the results with the change in learning rate.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' (c) and (d) show the evaluation of the model for window length and head number, respectively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' The figures related to embedding and hidden size are given in (e) and (f).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' In addition, (g), (h) and (i) are the results of investigation for 𝜆1, 𝜆2 and 𝜆3, respectively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' Meanwhile, the prediction loss simultaneously undergoes a reversed change (rise).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' However, the combination 𝜆1 = 1 and 𝜆2 = 1 could offer a better trade-off for the performance of both tasks.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' Similarly, in a different run, the loss of prediction decreases drastically while the value of 𝜆2 is raised from 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='2 to 1 along with a fixed 𝜆1 in Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' 6h, and both tasks are beneficial when 𝜆2 is round 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' In addition, Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' 6i presents the results of varied 𝜆3 when 𝜆1 = 1 and 𝜆2 = 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' It is apparent that the performance of both task benefit at most when 𝜆3 = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='8.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' In Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' 7, we compare the performance of the proposed model and the baselines with different distributions of missing values generation, such as Uniform and Poisson distribution.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' 7a, 7b and 7c demonstrate the imputation loss of the proposed model and baselines on different datasets, respectively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' 7d, 7e and 7f are the results for the prediction Manuscript submitted to ACM O-Loss-lmp Loss-Pred 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='038 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='3 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='036 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='25 Loss LOSS 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='034 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='2 Imputation 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='032 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='15 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='03 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='1 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='028 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='05 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='026 0 2 cCi 4 5 Number of PointsO-Loss-Imp Loss-Pred 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='8 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='6 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='5 Imputation Loss LOsS 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='4 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='2 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='5 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='001 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='005 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='01 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='05 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='1 Learning Rate (10-1)O- Loss-Imp Loss-Pred 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='04 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='08 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='03 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='06 Loss Loss Imputation Prediction 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='02 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='04 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='01 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='02 0 20 35 50 70 100 Window LengthO- Loss-lmp Loss-Pred 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='03 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='07 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='027 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='06 Loss LOss Imputation L 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='024 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='05 iction 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='021 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='04 redi P 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='018 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='03 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='015 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='02 2 4 8 16 32 Number of HeadsO-Loss-Imp Loss-Pred 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='05 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='5 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='04 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='2 Loss LOSS Imputationl 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='03 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='9 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='02 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='6 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='01 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='3 0 128 256 512 768 1024 Embedding SizeO-Loss-lmp Loss-Pred 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='05 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='8 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='04 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='6 Imputation Loss Prediction Loss 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='03 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='4 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='02 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='2 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='01 0 128 256 512 768 1024 Hidden SizeO-Loss-Imp Loss-Pred 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='036 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='15 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='034 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='13 Loss Loss Imputation L 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='032 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='11 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='03 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='09 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='028 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='07 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='026 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='05 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='2 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='4 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='6 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='8 入1 (入2= 1)O- Loss-lmp Loss-Pred 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='032 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='2 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='03 1 Loss LOss 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='028 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='8 Imputation I Prediction 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='026 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='6 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='024 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='4 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='022 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='2 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='02 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='2 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='4 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='6 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='8 入2 (入1= 1)O- Loss-Imp Loss-Pred 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='01 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='1 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='0098 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='08 Loss Loss Imputation l Prediction 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='0096 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='06 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='0094 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='04 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='0092 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='02 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='2 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='4 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='6 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='8 1 入3 (入1=入2= 1)Multiple-level Point Embedding for Solving Human Trajectory Imputation with Prediction 19 (a) Geolife - Imputation (b) Cuebiq-AU - Imputation (c) Cuebiq-US - Imputation (d) Geolife - Prediction (e) Cuebiq-AU - Prediction (f) Cuebiq-US - Prediction Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' 7.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' This experiment was run with a group of 10 users randomly picked from the 30 most active ones in each dataset, and the trajectory length is 20.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' (a), (b) and (c) demonstrate the imputation loss of the proposed model and baselines on three datasets, with different distributions of missing values generation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' (d), (e) and (f) show the results for the prediction task on three datasets, respectively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' task on three datasets, respectively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' As we can see, when using various distributions for the experiment, the proposed approach exhibits significant advantages on the imputation task.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' Meanwhile, our model can still keep a competitive performance on the prediction task compared with the other algorithms.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='4 Ablation Study.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' We conduct an ablation study of 𝜆1, 𝜆2, and 𝜆3 to check the model’s performance when feedback of the imputation component, prediction component, or speed constraint is totally discarded during optimization.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' The proposed model will fully consider the feedback of imputation in the optimization process when 𝜆1 = 1 and 𝜆2 = 1 indicates that the optimizer will receive the feedback of the prediction component without any abandon.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' In contrast, zero value means that the feedback from one component is totally left out during training.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' It is found that when the feedback from one task is totally discarded during the training of the whole model, the outputs of that task will be meaningless and random floats because no specific optimization arises based on the ground truth.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' Thus, we omit those relevant results on the Table 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' We temporarily neglect the movement speed constraint by setting 𝜆3 to zero in the beginning.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' Furthermore, the table illustrates that the setting of 𝜆1 = 1 and 𝜆2 = 0 could achieve better imputation results in some cases, accompanied by the sacrifice of optimization for the prediction.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' However, considering the full feedback of both components (𝜆1 = 1, 𝜆2 = 1) could also provide competitive prediction performance in most cases (the length of trajectory in 20 or 50).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' This experiment also indicates that switching one of two main components could give the model the flexibility to concentrate better on optimizing a single task.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' In addition, adding movement speed Manuscript submitted to ACM GRA NAO onolekGRA AO SngleRIGRA NAO onoerNGRA GFGRA S-GRIGRA GF20 Qin et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' Table 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' The model will not consider the feedback from the imputation unit during training when 𝜆1=0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' Otherwise, 𝜆1=1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' 𝜆2 and 𝜆3 are responsible for controlling the feedback from the prediction unit and the weight of speed constraint, respectively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' Loss-I and Loss-P represent the L2 loss of imputation and prediction, respectively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' Further, the portion of missing points is 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='8 for all the tests, and L denotes the length of trajectories in different trials.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' Basically, the learning of a task is infeasible if its designated optimization is totally ignored.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' However, another task has the chance of getting a slight improvement than considering more optimization units in the same iterations of training.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' Weights Tasks Geolife Cuebiq - AU Cuebiq - US 𝜆1 𝜆2 𝜆3 L = 20 L = 50 L = 20 L = 50 L = 20 L = 50 1 1 0 Loss-I 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='0270 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='0075 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='0122 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='0117 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='0055 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='0050 Loss-P 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='0525 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='0464 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='0497 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='0140 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='0073 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='0070 1 0 0 Loss-I 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='0275 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='0098 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='0113 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='0109 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='0054 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='0055 Loss-P ∼ ∼ ∼ ∼ ∼ ∼ 0 1 0 Loss-I ∼ ∼ ∼ ∼ ∼ ∼ Loss-P 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='0959 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='0393 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='8611 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='1230 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='0114 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='0082 1 1 1 Loss-I 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='0107 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='0062 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='0120 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='0110 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='0055 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='0048 Loss-P 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='0648 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='0499 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='9350 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='2609 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='0070 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='0076 Table 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' The RNN-based unit and the Supplement layer are two modules that support the imputation and prediction learning process.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' The table shows the impact of these two modules on the model’s performance.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' ’Add operation’ or ’Replace operation’ is used individually in the Supplement layer with or without the RNN unit.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' The tests were conducted on Geolife with a trajectory length of 20.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' Components Geolife RNN Unit Add Operation Replace Operation Loss-I Loss-P 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='0105 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='0143 ✓ 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='0097 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='0480 ✓ 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='0159 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='0695 ✓ ✓ 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='0094 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='0732 ✓ ✓ 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='0090 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='0886 constraint (𝜆3 = 1) could lead to a slight improvement of imputation on Geolife, which has a more stable sampling rate of points collection than the other two datasets.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' As mentioned before, the RNN-based unit and the Supplement layer are two modules that support the imputation and prediction learning process.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' An evaluation is given of their effects on the overall performance of the proposed model in Table 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' ’Add operation’ or ’Replace operation’ is used individually in the Supplement layer to incorporate missing values’ embedding into the trajectory representation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' Five tests were conducted for each combination on Geolife with a trajectory length of 20, and the mean values were reported.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' We can see that combining a supplement operation and an RNN unit tends to contribute to a minor loss of imputation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' In contrast, the prediction can not benefit too much from the joint use of two components.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' However, we can find that integration of RNN unit for learning can improve imputation performance to some extent.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' Manuscript submitted to ACM Multiple-level Point Embedding for Solving Human Trajectory Imputation with Prediction 21 6 CONCLUSION Usually, human mobility data are incomplete in practice, leading to bias or difficulties in learning tasks, such as imputation and prediction.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' We propose a new approach incorporating non-autoregressive and autoregressive components to help trajectory imputation and prediction.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' The model effectively learns the dependence between observations and missing values on multiple levels with the advantage of self-attention.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' Meanwhile, one RNN-based unit is applied to extract potential features recurrently from the newly learned sequences.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' Intensive experiments are conducted on three datasets: Geolife, Cuebiq-AU, and Cuebiq-US.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' The results show that the proposed model can achieve advanced performance in both learning tasks compared to the baselines.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' Additionally, the analysis of primary hyperparameters reveals how trade-offs could be made between different tasks with proper settings.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' Moreover, the flexible configuration of switching the acceptance of additional feedback enables us to pay more attention to individual units to attain better results for a specific task.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' In the future, we plan to conduct more experiments on more diverse types of mobility datasets (e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=', POIs and grid-based datasets) and analyze the potential factors that crucially influence the learning of different imputation algorithms.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' REFERENCES [1] Alexandre Alahi, Kratarth Goel, Vignesh Ramanathan, Alexandre Robicquet, Li Fei-Fei, and Silvio Savarese.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' 2016.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' Social lstm: Human trajectory prediction in crowded spaces.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' In IEEE conference on computer vision and pattern recognition.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' 961–971.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' [2] Dzmitry Bahdanau, Kyunghyun Cho, and Yoshua Bengio.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' 2014.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' Neural machine translation by jointly learning to align and translate.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' arXiv preprint arXiv:1409.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='0473 (2014).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' [3] Wei Cao, Dong Wang, Jian Li, Hao Zhou, Lei Li, and Yitan Li.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' 2018.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' Brits: Bidirectional recurrent imputation for time series.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' In Advances in Neural Information Processing Systems.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' 6775–6785.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' [4] Cynthia Chen, Jingtao Ma, Yusak Susilo, Yu Liu, and Menglin Wang.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' 2016.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' The promises of big data and small data for travel behavior (aka human mobility) analysis.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' Transportation research part C: emerging technologies 68 (2016), 285–299.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' [5] Junyoung Chung, Caglar Gulcehre, KyungHyun Cho, and Yoshua Bengio.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' 2014.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' Empirical evaluation of gated recurrent neural networks on sequence modeling.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' arXiv preprint arXiv:1412.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='3555 (2014).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' [6] William Fedus, Ian Goodfellow, and Andrew M Dai.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' 2018.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' MaskGAN: Better text generation via filling in the_.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' arXiv preprint arXiv:1801.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='07736 (2018).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' [7] Jie Feng, Yong Li, Chao Zhang, Funing Sun, Fanchao Meng, Ang Guo, and Depeng Jin.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' 2018.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' Deepmove: Predicting human mobility with attentional recurrent networks.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' In 2018 world wide web conference.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' 1459–1468.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' [8] Győző Gidófalvi and Fang Dong.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' 2012.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' When and where next: individual mobility prediction.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' In SIGSPATIAL.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' 57–64.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' [9] Francesco Giuliari, Irtiza Hasan, Marco Cristani, and Fabio Galasso.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' 2020.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' Transformer Networks for Trajectory Forecasting.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' arXiv preprint arXiv:2003.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='08111 (2020).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' [10] Alex Graves, Santiago Fernández, and Jürgen Schmidhuber.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' 2005.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' Bidirectional LSTM networks for improved phoneme classification and recognition.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' In International conference on artificial neural networks.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' Springer, 799–804.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' [11] Agrim Gupta, Justin Johnson, Li Fei-Fei, Silvio Savarese, and Alexandre Alahi.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' 2018.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' Social gan: Socially acceptable trajectories with generative adversarial networks.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' In IEEE Conference on Computer Vision and Pattern Recognition.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' 2255–2264.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' [12] Trevor Hastie, Robert Tibshirani, and Jerome Friedman.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' 2009.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' The elements of statistical learning: data mining, inference, and prediction.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' Springer Science & Business Media.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' [13] Sepp Hochreiter and Jürgen Schmidhuber.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' 1997.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' Long short-term memory.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' Neural computation 9, 8 (1997), 1735–1780.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' [14] Cuebiq Inc.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' 2021.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' Data for Good - Cuebiq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' https://www.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='cuebiq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='com/about/data-for-good/ [15] Tarun Khanna.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' 1990.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' Foundations of neural networks.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' Addison-Wesley Longman Publishing Co.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=', Inc.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' [16] Yang Li, Yangyan Li, Dimitrios Gunopulos, and Leonidas Guibas.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' 2016.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' Knowledge-based trajectory completion from sparse GPS samples.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' In Proceedings of the 24th ACM SIGSPATIAL International Conference on Advances in Geographic Information Systems.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' 1–10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' [17] Biwei Liang, Tengjiao Wang, Shun Li, Wei Chen, Hongyan Li, and Kai Lei.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' 2016.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' Online learning for accurate real-time map matching.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' In PAKDD.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' Springer, 67–78.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' [18] Yukai Liu, Rose Yu, Stephan Zheng, Eric Zhan, and Yisong Yue.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' 2019.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' NAOMI: Non-autoregressive multiresolution sequence imputation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' In Advances in Neural Information Processing Systems.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' 11238–11248.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' [19] Yin Lou, Chengyang Zhang, Yu Zheng, Xing Xie, Wei Wang, and Yan Huang.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' 2009.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' Map-matching for low-sampling-rate GPS trajectories.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' In Proceedings of the 17th ACM SIGSPATIAL international conference on advances in geographic information systems.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' 352–361.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' Manuscript submitted to ACM 22 Qin et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' [20] Yonghong Luo, Xiangrui Cai, Ying Zhang, Jun Xu, et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' 2018.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' Multivariate time series imputation with generative adversarial networks.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' In Advances in Neural Information Processing Systems.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' 1596–1607.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' [21] Yonghong Luo, Ying Zhang, Xiangrui Cai, and Xiaojie Yuan.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' 2019.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' E2GAN: End-to-End Generative Adversarial Network for Multivariate Time Series Imputation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' In 28th International Joint Conference on Artificial Intelligence.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' AAAI Press, 3094–3100.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' [22] Anna Monreale, Fabio Pinelli, Roberto Trasarti, and Fosca Giannotti.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' 2009.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' Wherenext: a location predictor on trajectory pattern mining.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' In Proceedings of the 15th ACM SIGKDD international conference on Knowledge discovery and data mining.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' 637–646.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' [23] Elham Naghizade, Jeffrey Chan, Yongli Ren, and Martin Tomko.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' 2018.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' Contextual Location Imputation for Confined WiFi Trajectories.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' In Pacific-Asia Conference on Knowledge Discovery and Data Mining.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' Springer, 444–457.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' [24] Elham Naghizade, Lars Kulik, Egemen Tanin, and James Bailey.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' 2020.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' Privacy-and context-aware release of trajectory data.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' ACM Transactions on Spatial Algorithms and Systems (TSAS) 6, 1 (2020), 1–25.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' [25] Mengshi Qi, Jie Qin, Yu Wu, and Yi Yang.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' 2020.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' Imitative Non-Autoregressive Modeling for Trajectory Forecasting and Imputation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' In IEEE/CVF.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' 12736–12745.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' [26] Amin Sadri, Flora D Salim, Yongli Ren, Wei Shao, John C Krumm, and Cecilia Mascolo.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' 2018.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' What will you do for the rest of the day?' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' an approach to continuous trajectory prediction.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' Proc.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' of the ACM on Interactive, Mobile, Wearable and Ubiquitous Technologies 2, 4 (2018), 1–26.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' [27] Martin Sundermeyer, Ralf Schlüter, and Hermann Ney.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' 2012.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' LSTM neural networks for language modeling.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' In Thirteenth annual conference of the international speech communication association.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' [28] Douglas Do Couto Teixeira, Aline Carneiro Viana, Jussara M Almeida, and Mrio S Alvim.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' 2021.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' The impact of stationarity, regularity, and context on the predictability of individual human mobility.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' ACM Transactions on Spatial Algorithms and Systems 7, 4 (2021), 1–24.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' [29] Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez, Łukasz Kaiser, and Illia Polosukhin.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' 2017.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' Attention is all you need.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' In Advances in neural information processing systems.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' 5998–6008.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' [30] Petar Veličković, Guillem Cucurull, Arantxa Casanova, Adriana Romero, Pietro Lio, and Yoshua Bengio.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' 2017.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' Graph attention networks.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' arXiv preprint arXiv:1710.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='10903 (2017).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' [31] Hao Wang, Huawei Shen, Wentao Ouyang, and Xueqi Cheng.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' 2018.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' Exploiting POI-Specific Geographical Influence for Point-of-Interest Recommen- dation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content='. In IJCAI.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' 3877–3883.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' [32] Xianjing Wang, Flora D Salim, Yongli Ren, and Piotr Koniusz.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' 2020.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' Relation Embedding for Personalised Translation-Based POI Recommendation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' In Pacific-Asia Conference on Knowledge Discovery and Data Mining.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' Springer, 53–64.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' [33] Yifang Yin, Rajiv Ratn Shah, Guanfeng Wang, and Roger Zimmermann.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' 2018.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' Feature-based map matching for low-sampling-rate GPS trajectories.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' ACM Transactions on Spatial Algorithms and Systems (TSAS) 4, 2 (2018), 1–24.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' [34] Jinsung Yoon, James Jordon, and Mihaela Schaar.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' 2018.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' Gain: Missing data imputation using generative adversarial nets.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' In International Conference on Machine Learning.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' PMLR, 5689–5698.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' [35] Jinsung Yoon, William R Zame, and Mihaela van der Schaar.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' 2018.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' Estimating missing data in temporal data streams using multi-directional recurrent neural networks.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' IEEE Transactions on Biomedical Engineering 66, 5 (2018), 1477–1490.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' [36] Kai Zheng, Yu Zheng, Xing Xie, and Xiaofang Zhou.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' 2012.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' Reducing uncertainty of low-sampling-rate trajectories.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' In 2012 IEEE 28th international conference on data engineering.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' IEEE, 1144–1155.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' [37] Yu Zheng, Lizhu Zhang, Xing Xie, and Wei-Ying Ma.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' 2009.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' Mining interesting locations and travel sequences from GPS trajectories.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' In 18th International conference on World Wide Web.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' 791–800.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' [38] Fan Zhou, Hantao Wu, Goce Trajcevski, Ashfaq Khokhar, and Kunpeng Zhang.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' 2020.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' Semi-supervised trajectory understanding with poi attention for end-to-end trip recommendation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' ACM Transactions on Spatial Algorithms and Systems (TSAS) 6, 2 (2020), 1–25.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'} +page_content=' Manuscript submitted to ACM' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/KdE3T4oBgHgl3EQfXwqt/content/2301.04482v1.pdf'}