diff --git "a/5dE0T4oBgHgl3EQfegDz/content/tmp_files/load_file.txt" "b/5dE0T4oBgHgl3EQfegDz/content/tmp_files/load_file.txt" new file mode 100644--- /dev/null +++ "b/5dE0T4oBgHgl3EQfegDz/content/tmp_files/load_file.txt" @@ -0,0 +1,1624 @@ +filepath=/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf,len=1623 +page_content='IEEE TRANSACTIONS ON MEDICAL IMAGING, VOL.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' XX, NO.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' XX, XXXX 1 Graph Convolution Based Cross-Network Multi-Scale Feature Fusion for Deep Vessel Segmentation Gangming Zhao, Kongming Liang, Chengwei Pan, Fandong Zhang, Xianpeng Wu, Xinyang Hu, and Yizhou Yu, Fellow, IEEE Abstract— Vessel segmentation is widely used to help with vascular disease diagnosis.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Vessels reconstructed using existing methods are often not sufficiently accurate to meet clinical use standards.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' This is because 3D vessel structures are highly complicated and exhibit unique char- acteristics, including sparsity and anisotropy.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' In this paper, we propose a novel hybrid deep neural network for ves- sel segmentation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Our network consists of two cascaded subnetworks performing initial and refined segmentation respectively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' The second subnetwork further has two tightly coupled components, a traditional CNN-based U-Net and a graph U-Net.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Cross-network multi-scale feature fusion is performed between these two U-shaped networks to effec- tively support high-quality vessel segmentation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' The entire cascaded network can be trained from end to end.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' The graph in the second subnetwork is constructed according to a vessel probability map as well as appearance and semantic similarities in the original CT volume.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' To tackle the challenges caused by the sparsity and anisotropy of vessels, a higher percentage of graph nodes are distributed in areas that potentially contain vessels while a higher per- centage of edges follow the orientation of potential nearby vessels.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Extensive experiments demonstrate our deep net- work achieves state-of-the-art 3D vessel segmentation per- formance on multiple public and in-house datasets.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Index Terms— Vessel Segmentation, Graph Convolu- tional Networks, Deep Learning This work was funded in part by National Key Research and Develop- ment Program of China (No.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' 2019YFC0118101), National Natural Sci- ence Foundation of China (Grant Nos.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' 62141605 and 82072005), Key Program of Beijing Municipal Natural Science Foundation (No.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='7191003), and Zhejiang Province Key Research & Development Program (No.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' 2020C03073).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' (Corresponding authors: Yizhou Yu and Xinyang Hu.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=') Gangming Zhao and Yizhou Yu are with the Department of Com- puter Science, The University of Hong Kong, Hong Kong (e-mail: gmzhao@connect.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='hku.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='hk, yizhouy@acm.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='org).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Kongming Liang is with Pattern Recognition and Intelligent Sys- tem Laboratory, School of Artificial Intelligence, Beijing University of Posts and Telecommunications, Beijing, China (e-mail: liangkong- ming@bupt.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='edu.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='cn).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Chengwei Pan is with Institute of Artificial Intelligence, Beihang Uni- versity, Beijing, China (e-mail: pancw@buaa.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='edu.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='cn).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Fandong Zhang is with the AI Lab, Deepwise Healthcare, Beijing, China (e-mail: zhangfandong@deepwise.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='com).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Xinyang Hu and Xianpeng Wu are with Department of Cardiol- ogy of the Second Affiliated Hospital, School of Medicine, Zhejiang University, Hangzhou, China, and Key Laboratory of Cardiovascular of Zhejiang Province, Hangzhou, China (e-mail: hxy0507@zju.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='edu.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='cn, wxpzju123@163.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='com) G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Zhao, K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Liang and C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Pan have equal contribution.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' I.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' INTRODUCTION V ESSEL segmentation is widely used in daily practice to characterize many vascular diseases [1], [2].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' For example, the obstructed vessels may lead to coronary heart disease, which is the worldwide leading cause of death [3], [4].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Since clinicians mainly rely on interactive tracing and segmentation, vessel reconstruction is traditionally a very time-consuming process and affects the efficiency of diagnosis and intervention.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Thus, automatic vessel segmentation can facilitate the reviewing process and plays an important role in medical image analysis.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Over the years, numerous methods have been proposed for automatic vessel segmentation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Due to the state-of-the-art per- formance of convolutional neural networks (CNNs) on a wide range of pixel-level labelling tasks [5]–[7], CNNs has also been applied to vessel segmentation [8]–[10].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Nonetheless, the reconstructed vessels are often not sufficiently accurate to meet clinical use standards.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' This is because vessel structures in 3D CT volumes are highly complicated and exhibit unique characteristics.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' First, since vessels are thin structures, they only occupy a sparse subset of pixels.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Thus, there exists a severe imbalance between vessel and non-vessel pixels.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Second, vessel segments are elongated tubular structures that are highly directional and anisotropic.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Conventional CNNs adopt uniform spatial sampling, and therefore, are inept at modeling such sparse and anisotropic structures, giving rise to broken or incomplete results.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Thus it becomes critical to design deep neural networks that can effectively exploit the aforementioned characteristics of vessels.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' In this paper, we propose a novel hybrid deep neural network for vessel segmentation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Our network consists of two cascaded subnetworks performing initial and refined segmen- tation respectively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' The second subnetwork further consists of two tightly coupled components, a traditional CNN-based U- shaped network and a graph-based U-shaped network based on graph convolutions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Cross-network multi-scale feature fusion is performed between these two U-shaped networks to ef- fectively support high-quality vessel segmentation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' The entire cascaded network can be trained from end to end.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' As shown in previous work [11]–[13], graph convolutional networks naturally possess a complex shape modeling ability which is well suited for structured data.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' By setting local arXiv:2301.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='02393v1 [eess.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='IV] 6 Jan 2023 EMB NPS UFFC SignalProcessing Society 0222 IEEE TRANSACTIONS ON MEDICAL IMAGING, VOL.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' XX, NO.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' XX, XXXX regions (supervoxels) in a CT volume as nodes and con- nections among nearby local regions as edges, the whole CT volume can be regarded as a graph.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Specifically, the graph in the second subnetwork is constructed according to a vessel probability map as well as appearance and semantic similarities in the original CT volume.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' To tackle the challenges brought up by the aforementioned characteristics of vessels, a higher percentage of graph nodes are distributed in areas that potentially contain vessels while a higher percentage of edges follow the orientation of potential nearby vessels.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' In addition, the CNN-based U-shaped network is first utilized to extract multi-scale features from the original CT volume.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Then at every scale, the features from the CNN are mapped into node features at the corresponding scale of the graph-based U-shaped network and propagated by the GCN at that scale to counteract sparsity and anisotropy.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Finally, the enhanced features are reversely mapped into the spatial domain and fused with the original features extracted by the CNN-based U-shaped network.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' In summary, our contributions in this paper are three-fold: We propose a cascaded deep neural network for vessel segmentation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' The two subnetworks in the cascade are respectively responsible for initial and refined segmen- tation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' There are a pair of tightly coupled U-shaped networks in the second subnetwork of the cascade, one based on CNN and the other based on GCN.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Cross- network multi-scale feature fusion is performed between these two U-shaped networks to effectively support high- quality vessel segmentation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' We propose a novel way to transform a dense 3D CT volume to a sparse graph format, which can efficiently represent sparse and anisotropic vessel structures.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' More- over, our method integrates both appearance and semantic similarities for graph construction.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Extensive experiments indicate our deep network achieves state-of-the-art 3D vessel segmentation perfor- mance on multiple public and in-house datasets for coro- nary vessels as well as head and neck vessels, including the public ASOCA dataset.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' II.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' RELATED WORK A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Graph Convolutional Networks Though CNNs achieve impressive performance in many computer vision tasks, they can not efficiently handle graph- structured data.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' To operate directly on graphs, GCN [11] is proposed by using layer-wise propagation rule for neural net- work models.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Li et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' [13] further adapted the residual/dense connections and dilated convolutions from CNNs into GCN which can solve vanishing gradient problem and increase the depth of GCN.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Gao et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' [14] proposed graph pooling and unpooling operations to develop an encoder-decoder model on graph for node classification.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' The above methods show that GCNs can achieve promising results on modeling graph structure.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' However, it is still challenging to integrate GCNs into an existing image segmentation framework which is dominated by CNNs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Multi-scale feature modeling Multi-scale feature modeling can efficiently capture the global contextual dependencies which plays an important role in image segmentation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Kamnitsas et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' [15] proposed a dual pathway deep convolutional neural network.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' The proposed dual pathway network incorporates both local and larger con- textual information by processing the input images at multiple scales simultaneously.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Chen et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' [16] proposed to use several parallel atrous convolution with different rates to model the contextual dependencies at multiple scales.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Zhao et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' [17] proposed a pyramid pooling module to generate feature maps in different levels for scene parsing.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Recently, Tao et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' [18] proposed to combine multi-scale predictions with attention mechanism and achieved the state-of-the-art on Cityscapes and Mapillary Vistas.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' However, all the above methods adopt uniform spatial sampling for multi-scale feature learning and fail to model the sparsity and anisotropy of vessel.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Medical Image Segmentation Deep learning has become a methodology of choice for medical image segmentation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Ronneberger et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' proposed UNET [19], which has an encoder-decoder architecture.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' To avoid missing spatial information, the decoder features from the previous level are up-sampled and combined with the encoder features at the corresponding level through skip con- nections.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' The 3D version of UNET [20] was further proposed by replacing all 2D operations with their 3D counterparts.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' In addition, a hybrid densely connected UNET [21] was proposed to extract intra-slice features with a 2D DenseUNET and aggregate volumetric contexts with its 3D counterpart.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Dou et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' [22] presented a 3D fully convolutional network equipped with a 3D deep supervision mechanism to combat potential optimization difficulties.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Likewise, Zhu et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' [23] proposed to use eight additional deeply supervised layers in their architec- ture.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Jiang et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' [24] developed two multi-resolution residually connected networks to simultaneously combine features across multiple image resolutions and feature levels.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' ACSNet [25] combines global contexts and local details to deal with the shape and size variations of segmented regions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Similarly, PraNet [26] aggregates multi-scale features and successively refines the segmentation map through boundary extraction.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Recently, Isensee et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' proposed nnUNET [27], which auto- matically adapts its architecture according to the geometry of input images.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Zhou et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' [28] introduced nnFormer, which is an encoder-decoder architecture for volumetric medical image segmentation through the combination of convolution layers and Transformer blocks.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' In addition, the gated axial-attention model in [29] extends the existing architectures and introduces an additional control mechanism with a Local-Global training strategy.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Vessel Segmentation Vessel segmentation plays an important role in medical image analysis.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Kong et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' [9] proposed to use a tree- structured convolutional gated recurrent unit (ConvGRU) layer for modeling the anatomical structure of vessels.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Since the ZHAO et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' : GRAPH CONVOLUTION BASED CROSS-NETWORK MULTI-SCALE FEATURE FUSION FOR DEEP VESSEL SEGMENTATION ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='3 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='Stage1 Segmentation ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='Loss ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='UNET-1 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='Encoder ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='UNET-1 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='Decoder ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='𝑨𝟎 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='UNET-0 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='Encoder ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='UNET-0 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='Decoder ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='𝒀𝟎 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='Weight Initialized From UNET-0 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='Loss ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='Trained From Scratch ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='UNET-G ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='Encoder ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='UNET-2 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='Encoder ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='UNET-G ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='Decoder ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='UNET-2 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='Decoder ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='Stage0 Segmentation ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='Graph Construction ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='Preliminary Segmentation ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='Cross-network Multi-scale Feature Fusion ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='Forward ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='Backward ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='× ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='Vessel Structure Modeling ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='Forward ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='Backward ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Our proposed pipeline for vessel segmentation consists of three stages, preliminary segmentation using a U-Net (UNET-0), graph construction, and final segmentation with a cascaded network, which further consists of two subnetworks with the first subnetwork being a U- Net (UNET-1) and the second subnetwork being a pair of tightly coupled U-shaped networks, a CNN-based U-Net (UNET-2) and a graph U-Net (UNET-G).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' The preliminary segmentation in the first stage is used by the second stage to construct a graph, whose topology becomes the first level graph in UNET-G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' input of the ConvGRU layer is a uniform local patch, their method cannot well exploit the anisotropy of vessels.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Wang et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' [10] proposed a multi-task network to predict a vessel segmentation mask and a distance map.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Values in the map represent distances from the center to the surface of every vessel.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' However, the global structure of vessels is not consid- ered, which limits contextual dependency modeling.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' There is much work [8], [30]–[32] on the utilization of graph neural networks for vessel segmentation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Shin et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' [8] incorporated a GCN into a CNN architecture to exploit the global structure of vessel shape.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' However, only the pixel with maximum vessel probability within every rectangular patch is sampled as a graph node, which limits the representation ability of the graph.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' In addition, GCN features are only calculated at a single scale and do not interact with CNN features.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' In contrast, our framework exhibits a very different way to learn the structural information of vessels.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Specifically, we exploit superpixel generation algorithms such as SLIC [33] to better model the sparsity and anisotropy of vessels, and tightly couple a graph U-Net and a traditional CNN-based U-Net through multi-scale feature fusion across these two networks to better support high-quality vessel segmentation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' III.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' OUR FRAMEWORK A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Overview Consider an input 3D image volume X ∈ RD×H×W , where D, H and W are the spatial depth, height and width respectively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' The pipeline of our proposed method for vessel segmentation can be decomposed into three stages as shown in Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Preliminary Segmentation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' An U-shaped network, UNET- 0, is first utilized to create a probability map of the input image volume.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' This probability map is used for discovering local image regions that have a relatively high probability to contain vessels.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Since the probability map may not be very accurate, to reduce the chance of missing regions that actually contain vessels, we apply the dilation operator, a type of image morphological operators, to the probability map to increase the size of image areas with relatively high probability values.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' The result is a preliminary probability map denoted as A0 ∈ (0, 1)D×H×W , which is further thresholded to produce a preliminary segmentation mask denoted as Y 0 ∈ {0, 1}D×H×W .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' The preliminary segmentation mask is used for indicating vessel orientations in regions where vessels are likely to occur.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' In our experiments, we use a 7 × 7 square as the kernel of the dilation operator.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Graph Construction.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' On the basis of the preliminary segmen- tation mask Y 0 and probability map A0, a graph G = (V, E) is constructed with a node set V, and an edge set E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' To counteract the characteristics of vessel structures including sparsity and anisotropy, a higher percentage of graph nodes are distributed in regions where the preliminary probability map has relatively large values while a higher percentage of edges follow the orientation of the preliminary vessel segmentation mask.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Final Segmentation with a Cascaded Network.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Instead of using a network to refine the preliminary segmentation result obtained in the first stage, we start from scratch and train a cascaded network that takes the original 3D image volume as the input, and performs end-to-end segmentation to produce the final segmentation result.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' This network con- sists of two cascaded subnetworks performing initial and refined segmentation respectively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' The first subnetwork is an U-shaped network, UNET-1, that shares the same network 4 IEEE TRANSACTIONS ON MEDICAL IMAGING, VOL.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' XX, NO.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' XX, XXXX architecture with UNET-0 in the first stage, but have different network weights because it is trained together with the second subnetwork.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' The second subnetwork further consists of two tightly coupled components, a traditional CNN-based U-Net (UNET-2) and a graph U-Net (UNET-G) [14].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' The graph G constructed in the second stage becomes the graph with the highest spatial resolution in UNET-G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Cross-network multi- scale feature fusion is performed between UNET-2 and UNET- G to effectively support high-quality vessel segmentation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' UNET-1 and UNET-2 are cascaded.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' The output from UNET-1 includes a hard mask and a soft probability map P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Since the input to UNET-2 is the product of P and the original input image I, the cascaded network is differentiable.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Note that UNET-0 is used to construct graphs as a pre-process.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Once the graphs for all training samples have been precomputed, the entire cascaded network can be trained from end to end through gradient backpropagation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Now let us focus on the second subnetwork.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' For UNET-2, we represent its convolutional encoder and decoder features as Ec 1:L = {Ec l } and Dc 1:L = {Dc l } respectively with L being the number of feature levels.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' The Lth decoder and encoder stages have the lowest spatial resolution.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' UNET- G has the same number of feature levels as UNET-2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' The encoder and decoder stages in UNET-2 and UNET-G have one-to-one correspondence.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' For the lth encoder in UNET-G, its initial graph feature is created as Eg l = f(Ec l , G) through a forward mapping function f(·) proposed in [34] aiming to transform the features between spatial domain and node domain.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' The forward mapping function f(·) is called KNN- map, which utilizes the K nearest neighborhoods to create the corresponding node feature.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Once graph convolutions have been performed on Eg l , the resulting graph convolutional feature is mapped back to the original convolutional feature space of UNET-2 through a backward mapping function g(·) also proposed in [34] and fused with its initial encoder feature Ec l .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Graph Construction Graph Nodes.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Since graph neural networks cannot process dense 3D images directly due to high computational cost, we first group all the pixels from a 3D image into super-pixels and then represent each super-pixel as a graph node.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Here we use the SLIC algorithm [33] for super-pixel generation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' In order to capture the 3D structure of vessels, the local region (super-pixel) represented by a graph node should satisfy the following properties: 1) the summation of the vessel probabilities in the region is high;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' 2) the pixels in the region have similar appearance;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' 3) the shape of the region follows the local shape of the vessels.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' The SLIC algorithm is based on a distance measure, which originally consists of two terms, grayscale difference and Euclidean distance.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' To satisfy the aforementioned properties, we add a third term based on geodesic distance.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' The updated distance measure for SLIC and its three terms are formulated as follows.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' d(i,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' j) = dgray(i,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' j) + ddis(i,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' j) + dgeo(i,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' j),' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' (1) where dgray(i,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' j) = |Xi − Xj|,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='(2) ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='Preliminary ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='Segmentation ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='Y ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='A ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='SP Images ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='10 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='1 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='2 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='3 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='4 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='5 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='6 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='7 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='8 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='9 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='10 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='11 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='1 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='2 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='3 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='4 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='1 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='2 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='3 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='5 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='4 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='6 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='7 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='8 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='9 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='10 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='11 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='13 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='12 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='1 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='2 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='4 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='5 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='3 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='6 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='7 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='8 9 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='10 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='11 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='13 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='12 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='Edge: ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='1: ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='1-2 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='2: ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='2-3 2-4 2-5 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='3: ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='3-4 3-6 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='4: ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='4-6 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='5: ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='5-9 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='6: ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='6-7 6-11 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='7: ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='7-8 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='8: ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='8-12 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='9: ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='9-10 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='11: 11-12 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='12: 12-13 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='Edge: ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='1: 1-2 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='2: 2-3 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='3: 3-4 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='4: None ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='Edge: ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='1: 1-2 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='2: ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='2-3 2-4 2-5 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='3: 3-4 3-6 3-7 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='4: ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='4-5 4-10 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='5: 5-9 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='6: ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='6-7 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='7: 7-8 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='10: 10-11 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='Edge: ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='1: ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='1-2 1-3 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='2: 2-3 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='3: ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='3-4 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='4: 4-5 4-6 4-11 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='5: ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='5-11 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='6: 6-7 6-8 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='7: ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='7-8 7-9 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='8: 8-9 8-10 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='10: 11-13 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='Nodes and Edges ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='CT Images ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='1 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='3 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='2 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' A simple example illustrating the graph construction process.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' ddis(i, j) = � (xi − xj)2 + (yi − yj)2 + (zi − zj)2, (3) dgeo(i, j) = min Q∈Pi,j � q∈Q A0 q∥∇(Xq + X0 q ) · uq∥, (4) where Xi denotes the gray scale of the ith pixel, [xi, yi, zi]T denotes its spatial coordinates, Pi,j represents the complete set of paths from pixel i to pixel j, Q denotes one path in Pi,j, q denotes any pixel on Q, and uq represents the unit tangent vector of path Q at q.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' The geodesic distance between two points is defined as the minimum of the integration of X, X0 and A0 as in (4) among all the paths in Pi,j, where X0 = X ◦ Y 0 and ◦ stands for element-wise multiplication.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' ∇(Xq + X0 q ) means the gradient of Xq + X0 q .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Xq + X0 q doubles the value in vessel areas, therefore, it will create more graph nodes in vessels because of a larger distance between different nodes in these areas.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' In practice, we use the Dijkstra’s algorithm to calculate the geodesic distance.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' The definition of geodesic distance in (4) ensures that regions potentially containing vessels have a higher density of graph nodes.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Note that the three distance terms have been individually normalized before added together in the overall distance measure.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Graph Edges.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' As there typically exist a large number of graph nodes in a 3D image volume, in this paper, we only consider locally connected graphs to reduce computational cost.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Each node i is only connected to other nearby nodes whose geodesic distance is below a predefined threshold tgeo.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' That is, there exists an edge between nodes i and j if and only if d′ geo(i, j) < tgeo, where d′ geo(i, j) is a modified version of the geodesic distance in (4) where A0 is replaced with (1−A0).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Since our geodesic distance is affected by the vessel mask and probability map, this connection rule implies that the Euclidean distance between two connected nodes has a larger threshold when the nodes are near potential vessels and the ZHAO et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' : GRAPH CONVOLUTION BASED CROSS-NETWORK MULTI-SCALE FEATURE FUSION FOR DEEP VESSEL SEGMENTATION 5 Layers Output size UNET-0,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='1,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='2 en-conv0 256 × 256 × 256 conv(3 × 3 × 3,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' 16) en-conv1 128 × 128 × 128 2 × BuildBlock(3 × 3 × 3,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' 32) en-conv2 64 × 64 × 64 2 × BuildBlock(3 × 3 × 3,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' 64)) en-conv3 32 × 32 × 32 2 × BuildBlock(3 × 3 × 3,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' 128) en-conv4 16 × 16 × 16 2 × BuildBlock(3 × 3 × 3,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' 256) de-conv4 16 × 16 × 16 2 × BuildBlock(3 × 3 × 3,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' 256) de-conv3 32 × 32 × 32 2 × BuildBlock(3 × 3 × 3,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' 128) de-conv2 64 × 64 × 64 2 × BuildBlock(3 × 3 × 3,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' 64) de-conv1 128 × 128 × 128 2 × BuildBlock(3 × 3 × 3,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' 32) de-conv0 256 × 256 × 256 2 × BuildBlock(3 × 3 × 3,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' 16) classifier 256 × 256 × 256 conv(1 × 1 × 1,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' 2) TABLE I NETWORK ARCHITECTURE OF UNET-0,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='1,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='2 USED IN THE PROPOSED PIPELINE.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' CONVOLUTION LAYERS IN THE ENCODER OF THE ORIGINAL U-NET ARE REPLACED WITH RESIDUAL BLOCKS.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' INSIDE THE BRACKETS ARE THE SHAPE OF THE RESIDUAL BLOCKS, AND OUTSIDE THE BRACKETS IS THE NUMBER OF STACKED BLOCKS IN A STAGE.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' DOWNSAMPLING (MAX POOLING) IS PERFORMED AFTER EN-CONV0, EN-CONV1, EN-CONV2, EN-CONV3 WITH STRIDE 2, RESPECTIVELY.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' UPSAMPLING IS PERFORMED AFTER EACH DE-CONV STAGE, AND THE NUMBER OF INPUT CHANNELS OF EACH LAYER CAN BE FOUND FROM THE PRECEDING LAYER.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' orientation of the edge between the nodes roughly follows the local orientation of the preliminary vessel mask.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' As a result, the constructed graph has denser and longer connections in regions potentially containing vessels.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' In our constructed graph, every edge is associated with an edge weight, which is a product of two components, ew = es wea w, where es w and ea w represent semantic consistency and appearance similarity respectively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' For a convolutional feature map F in UNET-2, we first create its node representation FV ∈ R|V |×C through the forward mapping function f(·) in [34] on the feature map F.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Then we define the semantic consistency of the edge between nodes i and j as es w(i, j) = σ([F i V , F j V ]ws), (5) where F i V , F j V represent the i-th and j-th node features, ws ∈ R2C is a trainable weight vector fusing the two node features, and σ(·) is the sigmoid activation function.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' [] means a concatenation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' We use the gray-scale information associated with graph nodes to define the appearance similarity of an edge as ea w(i, j) = σ([F i X, F j X]wg) (6) where FX = f(X◦Y 0◦A0, G), wg ∈ R2C is another trainable weight vector fusing the mapped features at the two nodes.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Instead of using the gray-scale information X from the input image volume only, we also include the semantic information Y 0 and A0 from the preliminary segmentation to focus on potential vessel regions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' A simple example illustrating the above graph construction process is given in Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Cross-Network Multi-Scale Feature Fusion The features from UNET-2 need to be mapped into the node domain of UNET-G and further enhanced through graph convolutions over the constructed graph structure to better Dataset Avg #Nodes per Image Avg #Edges per Node ASOCA 12000 8.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='12 ACA 9600 7.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='11 HNA 13000 7.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='03 TABLE II STATISTICS OF CONSTRUCTED GRAPHS.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' AVERAGE NUMBER OF NODES PER IMAGE IS CALCULATED USING ALL IMAGES IN A DATASET.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' AND AVERAGE NUMBER OF EDGES PER NODE IS CALCULATED USING ALL NODES IN A DATASET.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Dataset Avg #Nodes per Image Avg #Edges per Node Set1 1: n segmetns is 28000 ASOCA 19010 8.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='67 ACA 14300 7.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='40 HNA 22420 8.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='10 Set1 2: n segmetns is 14000 ASOCA 12000 8.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='12 ACA 9600 7.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='11 HNA 13000 7.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='03 Set1 3: n segmetns is 7000 ASOCA 6020 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='12 ACA 3110 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='11 HNA 5200 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='03 Set1 4: n segmetns is 3500 ASOCA 3210 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='12 ACA 2930 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='03 HNA 2122 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='14 TABLE III AVERAGE NUMBER OF GRAPH NODES AND EDGES FOR DIFFERENT VALUES OF N_SEGMENTS WHEN MIN_SIZE_FACTOR IS FIXED TO 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' observe global priors of vessel connectivity.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Afterwards we reversely map the enhanced features to the spatial domain of UNET-2 and fuse them with the original features there through a residual connection.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Encoder Feature Fusion.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' The encoder feature map Ec l from the lth stage of UNET-2 is transformed into node features at the corresponding level of UNET-G through the forward mapping function f(·) defined in [34].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Then the mapped fea- tures are fused with the down-sampled encoder features from the previous stage in UNET-G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' A residual graph convolution module Ω(·) is utilized to enhance the fused features for more accurately modeling complex vessel structures and better observing global priors of vessel connectivity.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Therefore, the graph convolutional encoder features at the lth stage of UNET- G are created as Eg l = Ω(f(Ec l , G) + down(Eg l−1)).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' (7) Then the graph convolutional features Eg l are reversely mapped to the original convolutional feature space of UNET- 2 through the backward mapping function g(·) defined in [34] and fused with its initial encoder feature to produce the enhanced encoder feature at the lth level, El = g(Eg l ) + Ec l .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' (8) Decoder Feature Fusion.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' The decoder feature Dc l from the lth stage of UNET-2 is transformed into node features at the corresponding level of UNET-G through the same forward 6 IEEE TRANSACTIONS ON MEDICAL IMAGING, VOL.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' XX, NO.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' XX, XXXX Ground Truth UNet nnUNet DVS DDT Ours Ground Truth UNet nnUNet DVS DDT Ours Ground Truth UNet nnUNet DVS DDT Ours Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' From left to right, it is the ground truth, the results of UNET, nnUNET, DVS, DDT and our model, respectively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' The aorta and the coronary vessels are marked with red and green.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Although DDT achieves the best performance compared with other previous state-of-the-art methods, it may generate incomplete vessel masks when the structure of vessels is complicated.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Dataset Avg #Nodes per Image Avg #Edges per Node Set2 1: min size factor is 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='3 ASOCA 8900 8.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='01 ACA 7230 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='89 HNA 9600 7.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='01 Set2 2: min size factor is 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='4 ASOCA 10300 8.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='02 ACA 8410 7.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='12 HNA 11200 7.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='13 Set2 3: min size factor is 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='5 ASOCA 12000 8.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='12 ACA 9600 7.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='11 HNA 13000 7.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='03 Set2 4 min size factor is 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='6 ASOCA 12100 8.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='13 ACA 9870 7.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='21 HNA 13210 7.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='13 TABLE IV AVERAGE NUMBER OF GRAPH NODES AND EDGES FOR DIFFERENT VALUES OF MIN_SIZE_FACTOR.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' N_SEGMENTS IS FIXED TO 14000.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' mapping function f(·).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Then the mapped features are fused with the up-sampled decoder features from the previous stage in UNET-G, and the fused features are enhanced with the same residual graph convolution module Ω(·) before further fused with the graph encoder feature Eg l through the skip connection at the lth stage of UNET-G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Thus the graph convolutional decoder features at the lth stage of UNET-G are defined as Dg l = Ω(f(Dc l , G) + up(Dg l+1)) + Eg l .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' (9) Then the graph convolutional decoder features Dg l are re- versely mapped to the original feature space of UNET-2 through the same backward mapping function g(·).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' We further fuse the reversely mapped features with both the initial decoder feature of UNET-2 and the skip-connected enhanced encoder feature El to produce the enhanced decoder feature at the lth level, Dl = g(Dg l ) + Dc l + El.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' (10) The last enhanced decoder feature is used to produce the final segmentation of vessels with a pixel-wise softmax classifier.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Forward and Backward Mappings We adopt the forward and backward mapping functions defined in [34] to map pixel- level features in a CNN-based U-Net to node features in a graph U-Net and vice versa.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' The key consideration during feature mapping design lies in revealing the relations between node and pixel-level features.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' As illustrated in the following equations, the kNN (k Nearest Neighbor) based forward map- ping φk with its auxiliary matrix A aggregates pixel-level features over irregular regions to obtain corresponding node features adaptively according to their spatial relations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' φk(F, N) = (Qf)T F, (11) ZHAO et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' : GRAPH CONVOLUTION BASED CROSS-NETWORK MULTI-SCALE FEATURE FUSION FOR DEEP VESSEL SEGMENTATION 7 Ground Truth UNet nnUNet DVS DDT Ours Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Sample visual results on the ACA dataset.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' From left to right, it is the ground truth, the results of UNET, nnUNET, DVS, DDT and our model, respectively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' The aorta and the coronary vessels are marked with red and green.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Although DDT achieves the best performance compared with other previous state-of-the-art methods, it may generate incomplete vessel masks when the structure of vessels is complicated.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Qf = A(Λf)−1, (12) Aij = � 1 if j th node is kNN of i th pixel 0 Otherwise , (13) where N ∈ {V, U} represents the node set corresponding to pixel-level spatial visual features F ∈ RHW ×C, A ∈ RHW ×|N | is an auxiliary matrix that assigns spatial features to k nearest graph nodes, Λf ∈ R|N |×|N | is a diagonal matrix, Λf jj = HW � i=1 Aij, and Qf ∈ RHW ×|N | is a normalized form of A and serves as the forward mapping matrix.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' The backward mapping function ψk projects each graph node feature back to the spatial domain.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' The backward map- ping follows similar design principles as the forward mapping and makes use of the same number of nearest neighbors.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Formally, ψk is formulated as follows.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' ψk(Z, N) = Qr[Z]e, (14) Qr = (Λr)−1A, (15) where N ∈ {V, U} represents the node set of the graph, A ∈ RHW ×|N | is similar to the definition in Equation 13, [·]e indicates the indexing operator which selects nodes in the graph, Λr ∈ RHW ×HW is a diagonal matrix, Λr ii = |N | � j=1 Aij, and Qr ∈ RHW ×|N | is the backward mapping matrix, which is also a normalized form of A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' IV.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' EXPERIMENTS A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Datasets ASOCA Automated Segmentation of Coronary Arteries Dataset (ASOCA) is a public dataset in MICCAI-2020 chal- lenge 1 which aims to segment the coronary artery lumen.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' The dataset consists of a training set of 40 Cardiac Computed Tomography Angiography (CCTA) images and a test set of 20 CCTA images.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' The images in the testing set were anno- tated and verified by experts we invited.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' The original image resolution of the ASOCA dataset is 512×512×N, where N is between 168 and 334.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' ACA Aorta and Coronary Artery Dataset (ACA) is an in- house dataset which contains 1000 CCTA images.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' The dataset is utilized to segment both aorta and coronary arteries.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Each image is annotated by one expert annotator and verified by a second expert.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' We split the dataset into a training set of 800 images, a validation set of 100 images and a test set of 100 images.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' The original image resolution of the ACA dataset is 512×512×N, where N is between 192 and 600.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' HNA Head and Neck Artery Dataset (HNA) is an in-house dataset which contains 800 CT angiography (CTA) images of head and neck.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' The images are annotated in the same way as ACA.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Cerebral, vertebral and carotid arteries are annotated as the target vessel mask.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' The dataset is split into a training set of 640 images, a validation set of 80 images and a test set of 1https://asoca.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='grand-challenge.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='org 8 IEEE TRANSACTIONS ON MEDICAL IMAGING, VOL.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' XX, NO.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' XX, XXXX GT UNet nnUNet DVS DDT Ours Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Sample visual results on the HNA dataset.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' From left to right, it is the ground truth, the results of UNET, nnUNET, DVS, DDT and our model, respectively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' 80 images.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' The original image resolution of the HNA dataset is also 512×512×N, where N is between 192 and 600.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Experimental Setup Evaluation Metrics.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Dice coefficient (DICE) and average symmetric surface distance (ASSD) (ASSD is measured in millimeters) are adopted as the evaluation metrics since they are commonly used in medical image segmentation [35].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' In addition, to evaluate unique characteristics of tubular structure, another two metrics called skeleton recall (SR) and skeleton precision (SP) are defined as follows: SR(S, G) = |S � Q(G)| |Q(G)| , (16) SP(S, G) = |Q(S) � G| |Q(S)| , (17) where S and G are the segmentation result and the ground truth annotation respectively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' The function Q(·) is used to acquire the skeleton of a tubular mask, which can preserve original vascular topology and connectivity.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Here we use skeletonization function [36] as the implementation of Q(·).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Network Structure and Training.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Each sub-network of the proposed method is a U-shaped network.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' All CNN-based U- shaped networks, including UNET-0, UNET-1 and UNET-2 are based on the original U-Net [19] except that the original convolution layers in its encoder are replaced with residual blocks [37].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' The network architecture of UNET-0,1,2 used in the proposed pipeline is given in Table I.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' UNET-G is a graph U-Net [14].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Each downsampling operation in UNET-G halves the number of graph nodes, and each upsampling operation doubles the number of nodes.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' The feature dimension of every graph node is always set to 256 in all the experiments reported in this paper.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' The input image is always resized to 256×256× 256, and the batch size on a single GPU is 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' The proposed cascaded network is trained by jointly optimizing the weighted cross-entropy loss, Lwbce = −βy·log(p)−(1−y)·log(1−p), and the dice loss, LDice = 1 − 2y·p ∥y∥1+∥p∥1 , where y and p are the ground-truth and predicted masks, respectively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' We set β = 5 to increase the vessel recall.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' All models are trained for 100 epochs from scratch using PyTorch [38] on NVIDIA Titan Xp pascal GPUs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' We set the weight decay to 1e-4 and use Adam [39] as the optimizer with the initial learning rate set to 1e-4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' The learning rate is reduced by a factor of 10 after every 40 epochs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Graph Hyperparameter Setting.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' We use a 3D version of the SLIC algorithm [33] to generate superpixels.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Two parameters of the algorithm control the total number of superpixels in an image.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' One of them is ‘n segments’, which is the maximum ZHAO et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' : GRAPH CONVOLUTION BASED CROSS-NETWORK MULTI-SCALE FEATURE FUSION FOR DEEP VESSEL SEGMENTATION 9 Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Sample visual results on the HNA dataset.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' From left to right are the ground truth, the results of UNET, nnUNET, DVS, DDT and our model, respectively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' number of superpixels, and the other is ‘min size factor’, which defines the ratio between the minimum size of a super- pixel and the average size of a superpixel.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' In the experiments reported in this paper, ‘n segments’ is always set to 14000, and ‘min size factor’ is set between 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='3 and 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='65.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' In a graph, each node i is only connected to other nearby nodes whose geodesic distance is below a predefined threshold tgeo.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' That is, there exists an edge between nodes i and j if and only if d′ geo(i, j) < tgeo, where d′ geo(i, j) is the geodesic distance between nodes i and j.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' tgeo is a hyperparameter that needs to be empirically set only once for each dataset.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' For the ASOCA dataset, tgeo is set to 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='30.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' For the ACA dataset, tgeo is set to 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='35.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' For the HNA dataset, tgeo is set to 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='40.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Table II shows the statistics of graph nodes and edges.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Comparison with the State of the Art We compared our proposed model with existing state- of-the-art algorithms for vessel segmentation on the three datasets.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' The methods in these comparisons include DDT [10], DVS [8], UNET3d [20], nnUNET [27], ResUNET [40], DenseUNET [21], PSP-Net [17] and HMSA [18].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' DDT performs tubular structure modeling and is specifically de- signed for vessel segmentation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' For medical image analysis, nnUNET is considered a strong baseline as it achieves state- of-the-art performance on many well-established segmentation challenges.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' PSP-Net [17] and HMSA [18] are included for comparison since they are state-of-the-art methods for generic semantic segmentation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' In addition, we include DVS for com- parison since it also uses a GCN for structure modeling.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Since the proposed framework is not limited to a specific backbone network, we integrate it with more powerful backbones, e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' ResUNET, DenseUNET and H-DenseUNET.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' As shown in Table V, VI and VII, the performance can be improved.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' As shown in Table V, the proposed method achieves the state-of-the-art performance in terms of four evaluation metrics on the ASOCA dataset, and outperforms the top-6 methods in the challenge leaderboard.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Specifically, our method achieves 89.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='91% DICE, 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='530 ASSD 95.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='8% SP and 96.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='0% SR.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' The DICE of our method is higher than that of DDT and the top-1 method in the leaderboard by around 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='5%.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' On the ACA and HNA datasets, the proposed method also achieves the best performance among all the methods considered in the comparisons.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Specifically, the proposed method outperforms DVS by 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='7% and 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='2% on ACA and HNA respectively in terms of DICE.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' This demonstrates that multi-scale feature interaction between CNNs and GCNs is important for vessel structure modeling.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' The above experiments demonstrate the superiority of the proposed method on three vessel segmentation tasks.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Com- pared to other methods [17], [18], [20], [27], [40], the main advantage of our approach is that it constructs a vessel graph to capture the 3D structure of vessels.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' On the basis of the constructed vessel graph, our proposed method uses GCNs to enhance feature propagation along vessel structures, and improve the interconnection between isolated vessel predic- GT UNet nnUNet DVS DDT Ours10 IEEE TRANSACTIONS ON MEDICAL IMAGING, VOL.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' XX, NO.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' XX, XXXX Method DICE (%) ASSD SP (%) SR (%) LB-1st 88.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='56 LB-2nd 88.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='00 LB-3rd 87.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='94 LB-4th 87.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='36 LB-5th 87.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='17 LB-6th 87.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='11 DDT [10] 88.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='21 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='571 95.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='0 94.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='5 DVS [8] 87.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='32 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='582 94.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='1 93.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='2 UNET3d [20] 83.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='20 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='644 93.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='0 91.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='0 ResUNET [40] 83.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='20 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='644 93.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='0 91.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='0 DenseUNET [21] 83.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='20 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='644 93.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='0 91.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='0 H-DenseUNET [21] 83.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='20 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='644 93.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='0 91.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='0 nnUNET [27] 85.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='11 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='572 94.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='3 92.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='4 PSP-Net [17] 84.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='12 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='593 94.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='2 92.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='1 HMSA [18] 86.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='23 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='561 95.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='2 93.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='3 Ours 89.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='89 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='544 95.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='6 95.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='9 Ours+ResUNET 89.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='90 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='541 95.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='7 95.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='9 Ours+DenseUNET 89.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='89 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='540 95.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='7 95.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='9 Ours+H-DenseUNET 89.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='91 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='530 95.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='8 96.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='0 TABLE V PERFORMANCE COMPARISON ON THE ASOCA DATASET AMONG STATE-OF-THE-ART SEGMENTATION ALGORITHMS.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' THE RESULTS OF MICCAI LEADERBOARD ARE SHOWN IN HTTPS://ASOCA.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' GRAND-CHALLENGE.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='ORG/EVALUATION/CHALLENGE/LEADERBOARD/, WHICH ONLY SHOWS THE PERFORMANCE IN TERMS OF DICE.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' FOR OTHER METHODS, WE EVALUATE THEM IN TERMS OF FOUR PERFORMANCE METRICS INCLUDING DICE, ASSD, SP AND SR.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' tions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Although DVS [8] and our method both exploit GCNs, the major distinction is that we use a super-pixel algorithm to generate graph nodes from a preliminary segmentation and the pixel values of the input image.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Leveraging super-pixels makes our constructed graph more completely cover potential vessel regions, and therefore, improve the skeleton recall.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' In addition, we make use of forward and backward feature mappings to perform more thorough feature fusion between the CNN-based UNET and the graph UNET.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' To further validate the robustness of the proposed method, we collect two subsets of 35 hard samples from the test sets of ACA and HNA, respectively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Arteries in the chosen samples have calcifications, stents or tortuous segments, which significantly increase the difficulty of vessel segmentation in clinical practice.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Experimental results in Table VIII and Table IX show that the proposed method performs the best on these two subsets, which demonstrates the robustness of the proposed method on hard samples.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Furthermore, we compare the inference time complexity of state-of-the-art networks in Table X.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' As shown in the table, the inference time of our method for a computed tomography an- giography image is 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='190/0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='193/0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='198 second on the ASOCA, ACA and HNA datasets, respectively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Since we use a GPU- based implementation [41] of the SLIC algorithm to generate super-pixels, the graph construction step of our method is very efficient, and the overall inference time of our method is comparable to that of other methods.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Ablation Study Ablation of graph node construction.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' We investigate the ef- fectiveness of the three components of Eqn.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' (1) for graph node construction on the ACA dataset.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' As shown in Table XIV, all Method DICE (%) ASSD SP (%) SR (%) DDT [10] 91.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='2 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='497 96.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='0 89.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='2 DVS [8] 90.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='1 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='503 95.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='1 88.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='3 UNET3d [20] 87.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='3 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='711 94.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='0 89.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='4 ResUNET [40] 88.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='4 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='612 95.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='1 89.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='6 DenseUNET [21] 88.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='9 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='568 95.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='2 89.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='4 H-DenseUNET [21] 89.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='9 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='528 95.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='3 90.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='1 nnUNET [27] 88.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='3 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='630 95.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='5 90.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='6 PSP-Net [17] 89.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='642 95.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='0 89.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='6 HMSA [18] 90.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='2 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='592 96.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='7 90.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='1 Ours 94.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='2 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='448 97.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='1 95.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='1 Ours+ResUNET 94.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='6 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='445 97.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='3 95.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='2 Ours+DenseUNET 94.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='8 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='444 97.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='3 95.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='3 Ours+H-DenseUNET 94.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='8 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='443 97.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='3 95.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='2 TABLE VI PERFORMANCE COMPARISON ON THE ACA DATASET AMONG STATE-OF-THE-ART SEGMENTATION ALGORITHMS.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Method DICE (%) ASSD SP (%) SR (%) DDT [10] 92.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='4 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='401 96.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='1 93.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='3 DVS [8] 91.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='3 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='472 97.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='2 92.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='4 UNET3d [20] 87.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='3 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='664 95.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='1 90.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='5 ResUNET [40] 87.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='7 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='661 95.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='2 90.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='7 DenseUNET [21] 88.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='1 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='618 95.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='3 90.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='8 H-DenseUNET [21] 89.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='1 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='588 95.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='7 92.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='0 nnUNET [27] 89.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='9 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='600 94.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='3 91.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='2 PSP-Net [17] 90.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='1 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='593 94.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='7 90.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='0 HMSA [18] 91.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='4 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='543 95.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='6 91.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='1 Ours 94.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='3 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='379 97.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='1 96.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='3 Ours+ResUNET 94.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='4 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='376 97.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='1 96.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='6 Ours+DenseUNET 94.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='4 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='375 97.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='2 96.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='6 Ours+H-DenseUNET 94.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='5 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='374 97.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='2 96.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='5 TABLE VII PERFORMANCE COMPARISON ON THE HNA DATASET AMONG STATE-OF-THE-ART SEGMENTATION ALGORITHMS.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' the components play important roles in the node construction process, and dgeo is the most important for the segmentation performance.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Removing A0 in dgeo leads to 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='6% performance drop and removing X0 leads to about 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='2% performance drop in terms of DICE.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' We further investigate how the hy- perparameter ‘n segments’ and ‘min size factor’ of the SLIC algorithm affect the performance of our method.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' For the ablation study on ‘n segments’, we first fix ‘min size factor’ to 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='5 and change the value of ‘n segments’ to 28000 (Set1 1), 14000 (Set1 2), 7000 (Set1 3), and 3500 (Set1 4).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Then we fix ‘n segments’ to 14000 and change the value of ‘min size factor’ to 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='3 (Set2 1), 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='4 (Set2 2), 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='5 (Set2 3), and 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='6 (Set2 4).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' For the above eight settings, we demonstrate how the number of nodes and edges changes in Table III and Table IV.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Then we conduct an ablation study on all three datasets and report the results in Table XV.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' From the exper- imental results, we can see that our model achieves the best performance by setting ‘n segments’ and ‘min size factor’ to 14000 and 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='5, respectively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' The corresponding number of nodes per image is 12000, 9600, and 13000 on the ASOCA, ACA and HNA datasets, respectively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Ablation of graph edge construction.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Next, we investi- gate the effectiveness of the two components of graph edge construction.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' As shown in Table XV, both es w and ea w are important for vessel segmentation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Furthermore, if we discard ZHAO et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' : GRAPH CONVOLUTION BASED CROSS-NETWORK MULTI-SCALE FEATURE FUSION FOR DEEP VESSEL SEGMENTATION 11 Method DICE (%) ASSD SP (%) SR (%) DDT [10] 87.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='1 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='511 92.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='0 88.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='2 DVS [8] 86.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='2 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='544 91.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='1 87.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='3 UNET3d [20] 86.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='722 91.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='0 87.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='4 ResUNET [40] 86.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='4 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='712 91.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='2 87.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='6 DenseUNET [21] 86.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='5 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='701 91.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='4 87.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='6 H-DenseUNET [21] 87.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='1 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='690 91.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='6 87.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='9 nnUNET [27] 87.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='2 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='631 92.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='5 86.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='6 PSP-Net [17] 84.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='742 91.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='0 84.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='6 HMSA [18] 85.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='2 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='762 89.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='7 85.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='2 Ours 92.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='1 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='453 96.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='4 93.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='3 Ours+ResUNET 92.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='2 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='451 96.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='5 93.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='5 Ours+DenseUNET 92.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='4 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='450 96.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='5 93.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='6 Ours+H-DenseUNET 92.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='5 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='448 96.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='6 93.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='6 TABLE VIII PERFORMANCE COMPARISON ON A SUBSET OF HARD SAMPLES FROM THE ACA DATASET AMONG STATE-OF-THE-ART SEGMENTATION ALGORITHMS.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' ARTERIES IN THESE HARD SAMPLES HAVE CALCIFICATIONS, STENTS OR TORTUOUS SEGMENTS.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Method DICE (%) ASSD SP (%) SR (%) DDT [10] 88.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='4 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='504 94.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='1 91.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='3 DVS [8] 87.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='3 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='584 93.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='2 90.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='4 UNET3d [20] 86.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='4 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='654 92.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='1 89.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='5 ResUNET [40] 86.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='6 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='631 92.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='6 89.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='9 DenseUNET [21] 86.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='7 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='598 92.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='7 89.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='9 H-DenseUNET [21] 86.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='9 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='552 92.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='9 89.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='9 nnUNET [27] 87.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='9 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='554 93.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='3 90.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='5 PSP-Net [17] 87.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='1 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='567 92.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='7 89.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='0 HMSA [18] 88.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='4 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='542 92.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='1 91.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='3 Ours 90.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='1 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='449 96.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='0 94.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='2 Ours+ResUNet 90.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='6 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='441 96.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='2 94.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='3 Ours+DenseUNet 90.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='6 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='440 96.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='1 94.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='5 Ours+H-DenseUNet 90.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='8 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='432 96.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='3 94.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='6 TABLE IX PERFORMANCE COMPARISON ON A SUBSET OF HARD SAMPLES FROM THE HNA DATASET AMONG STATE-OF-THE-ART SEGMENTATION ALGORITHMS.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' ARTERIES IN THESE HARD SAMPLES HAVE CALCIFICATIONS OR TORTUOUS SEGMENTS.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' the two edge terms and use the traditional binary edge, the performance drops by 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='7% in terms of DICE.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Ablation of cross-network feature fusion.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' To show the effectiveness of cross-network feature fusion, we first discard UNET-G of our proposed framework and only keep the cascaded UNET.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' As shown in Tables XI, XII and XIII, the performance drops by 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='0%, 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='9% and 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='77% on ACA, HNA and ASOCA respectively, which further validates the impor- tance of vessel structure modeling.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' In addition, we find that the improvement of UNET-G on ASOCA dataset is much more significant than ACA and HNA.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' As the training set of ASOCA only contains 40 CT images, this demonstrates that CNNs cannot well exploit the characteristics of vessels when the size of training data is small.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Then we evaluate the effectiveness of using multi-scale fusion and graph convolution.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Experimental results show that both components are important for vessel segmentation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Visualization As shown in Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' 3, the proposed method generates higher quality vessel masks than other state-of-the-art algorithms, including DDT, in most of the cases.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Specifically, the proposed Method ASOCA ACA HNA DDT [10] 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='182 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='184 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='187 DVS [8] 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='186 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='187 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='190 UNET3d [20] 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='132 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='134 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='136 nnUNET [27] 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='201 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='204 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='206 ResUNET [40] 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='136 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='137 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='139 DenseUNET [21] 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='141 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='144 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='147 H-DenseUNET [21] 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='139 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='142 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='146 PSP-Net [17] 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='142 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='144 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='145 HMSA [18] 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='341 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='344 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='347 Ours 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='190 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='193 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='198 Ours+ResUNET 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='192 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='195 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='201 Ours+DenseUNET 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='196 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='198 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='204 Ours+H-DenseUNET 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='195 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='197 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='202 TABLE X COMPARISON OF INFERENCE TIME AMONG STATE-OF-THE-ART SEGMENTATION ALGORITHMS ON THE ASOCA, ACA AND HNA DATASETS.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' THE AVERAGE INFERENCE TIME OF EACH ALGORITHM ON EACH DATASET IS SHOWN.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' THE UNIT IS SECOND PER SAMPLE.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' method can well exploit vessel structures and generate more complete vessel masks.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' In comparison to the proposed method, DDT may generate isolated segmentation masks since it is incapable of modeling the global structure of vessels.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' 4 and Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' 6 further visualize vessel segmentation results from different methods on the ACA and HNA datasets respectively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' We add more examples for qualitative comparison in Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' 7.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' The good cases show that our GCN-based cascaded network can improve vessel connectivity among individual vessel pre- dictions and achieve a higher skeleton recall.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' In the meantime, most of the false positive predictions can be removed.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' From the bad case, we find that the proposed method is limited when the initial segmentation is far from the ground truth.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' In such cases, vessel segmentation errors in the initial segmentation Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' 7.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Sample visual results with and without the graph module on the ACA dataset.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' From left to right, it is the ground truth, the result without the graph module and the result with the graph module, respectively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' 12 IEEE TRANSACTIONS ON MEDICAL IMAGING, VOL.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' XX, NO.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' XX, XXXX UNET-0 UNET-G Cfeature in UNET-G Graph Convolution Ω DICE (%) ASSD SP (%) SR (%) ✓ ✓ ✓ ✓ 94.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='2 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='448 97.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='1 95.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='1 ✓ ✓ ✓ ⊠ 93.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='1 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='469 95.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='3 94.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='2 ✓ ✓ ⊠ ✓ 92.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='8 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='487 95.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='1 94.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='0 ⊠ 92.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='2 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='469 95.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='3 94.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='2 ⊠ ✓ ✓ ✓ 92.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='4 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='470 95.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='9 92.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='7 TABLE XI EFFECTIVENESS OF DIFFERENT COMPONENTS ON THE ACA DATASET.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' ‘UNET-G’ MEANS THE GRAPH UNET STRUCTURE ON OUR MODEL.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' IF IT IS REMOVED, OUR FRAMEWORK IS DEGENERATED INTO A CASCADED MODEL WITH TWO CNN-UNET STRUCTURES.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' ‘CFEATURE IN UNET-G’ MEANS WE FUSE CNN FEATURES OF DIFFERENT STAGES INTO UNET-G .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' IF IT IS DISCARDED, THE FEATURES OF UNET-G ARE ONLY ACQUIRED FROM ITS FIRST GRAPH FEATURES Eg 1 THAT IS ACQUIRED BY CONDUCTING FORWARD MAPPING f ON Ec 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' ‘GRAPH CONVOLUTION Ω’ AIMS TO PROPAGATE MESSAGE AND FUSE THE CNN FEATURES INTO UNET-G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' WE UTILIZE IT TO COMPARE THE IMPORTANCE OF THE VESSEL GRAPH MODELLING ABILITY.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' UNET-0 UNET-G Cfeature in UNET-G Graph Convolution Ω DICE (%) ASSD SP (%) SR (%) ✓ ✓ ✓ ✓ 94.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='3 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='379 97.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='1 96.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='3 ✓ ✓ ✓ ⊠ 93.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='1 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='412 96.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='3 95.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='4 ✓ ✓ ⊠ ✓ 93.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='434 96.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='1 95.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='2 ⊠ 92.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='4 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='471 95.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='9 94.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='8 ⊠ ✓ ✓ ✓ 92.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='6 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='462 96.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='1 94.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='9 TABLE XII EFFECTIVENESS OF DIFFERENT COMPONENTS ON THE HNA DATASET.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' UNET-0 UNET-G Cfeature in UNET-G Graph Convolution Ω DICE (%) ASSD SP (%) SR (%) ✓ ✓ ✓ ✓ 89.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='89 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='544 95.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='6 95.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='9 ✓ ✓ ✓ ⊠ 88.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='03 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='567 95.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='0 94.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='7 ✓ ✓ ⊠ ✓ 88.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='01 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='568 94.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='8 94.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='4 ⊠ 87.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='12 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='579 94.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='8 93.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='4 ⊠ ✓ ✓ ✓ 87.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='91 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='573 94.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='9 93.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='8 TABLE XIII EFFECTIVENESS OF DIFFERENT COMPONENTS ON THE ASOCA DATASET.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' cannot be completely corrected by our cascaded network.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' V.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' CONCLUSIONS AND FUTURE WORK In this paper, we have presented a cascaded deep neural network for vessel segmentation on CTA images.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Our ap- proach represents a new paradigm for modeling the structural information of 3D vessels using deep neural networks through the interaction between a pair of CNN-based U-Net and graph U-Net.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' By fusing the features across these two types of networks, our method successfully tackles the challenges brought up by the sparsity and anisotropy of vessel structures.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Extensive experiments on both public and in-house datasets verify the superiority and effectiveness of our method.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' By constructing a vessel graph to complement CNNs, our method not only outperforms baseline methods but also achieves the state-of-the-art performance with DICE 89.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='91/94.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='8/94.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='5 on the ASOCA/ACA/HNA datasets, respectively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Our proposed framework provides a stronger spatial struc- ture representation by learning 3D vessel connectivity priors.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Our future work includes 1) building a more powerful graph neural network to enhance message passing in our cross- network feature fusion module, 2) investigating better graph construction methods by exploiting more domain knowledge from medical experts, and 3) building a high-quality annotated dataset and a friendly open-source code base for 3D vessel segmentation tasks.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Acknowledgment The retrospective study on our in-house datasets has been approved by the institutional review board of the Second Affiliated Hospital of Zhejiang University School of Medicine, and was carried out following the principles of the Declaration of Helsinki.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' REFERENCES [1] F.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Gr´elard, F.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Baldacci, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Vialard, and J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='-P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Domenger, “New methods for the geometrical analysis of tubular organs,” Medical image analysis, vol.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' 42, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' 89–101, 2017.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' [2] J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Leipsic, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Abbara, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Achenbach, R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Cury, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Earls, G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Mancini, K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Nieman, G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Pontone, and G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Raff, “Scct guidelines for the interpretation and reporting of coronary ct angiography: a report of the society of cardiovascular computed tomography guidelines committee,” Journal of cardiovascular computed tomography, vol.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' 8, no.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' 5, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' 342– 358, 2014.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' [3] W.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Organization et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=', “Fact sheet: the top ten causes of death,” Fact sheet, no.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' 310, 2008.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' [4] V.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Roger, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Go, D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Lloyd-Jones, R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Adams, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Berry, T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Brown, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Carnethon, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Dai, G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' De Simone, E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Ford et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=', “Heart disease and stroke statistics—2011 update: a report from the american heart association,” Circulation, vol.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' 123, no.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' 4, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' e18–e209, 2011.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' [5] J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Long, E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Shelhamer, and T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Darrell, “Fully convolutional networks for semantic segmentation,” in Proceedings of the IEEE conference on computer vision and pattern recognition, 2015, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' 3431–3440.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' [6] Z.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Huang, X.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Wang, L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Huang, C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Huang, Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Wei, and W.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Liu, “Ccnet: Criss-cross attention for semantic segmentation,” in Proceedings of the IEEE International Conference on Computer Vision, 2019, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' 603–612.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' ZHAO et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' : GRAPH CONVOLUTION BASED CROSS-NETWORK MULTI-SCALE FEATURE FUSION FOR DEEP VESSEL SEGMENTATION 13 dgray ddis dgeo DICE ASSD SP SR X0 A0 ✓ ✓ ✓ ✓ 94.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='2 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='448 97.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='1 95.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='1 ⊠ ✓ ✓ ✓ 93.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='2 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='462 95.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='3 94.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='2 ✓ ⊠ ✓ ✓ 93.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='3 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='465 95.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='2 93.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='1 ✓ ✓ ⊠ ✓ 93.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='478 94.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='1 92.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='2 ✓ ✓ ✓ ⊠ 93.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='6 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='487 95.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='1 94.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='2 ⊠ ⊠ ✓ ✓ 93.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='2 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='488 94.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='2 93.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='3 ⊠ ✓ ⊠ ✓ 93.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='7 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='481 95.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='0 94.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='3 ⊠ ✓ ✓ ⊠ 93.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='8 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='472 95.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='3 94.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='0 ✓ ⊠ ⊠ ✓ 92.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='9 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='512 94.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='0 94.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='7 ✓ ⊠ ✓ ⊠ 92.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='8 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='522 93.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='7 94.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='2 ✓ ✓ ⊠ ⊠ 92.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='8 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='513 94.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='1 94.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='0 ⊠ ⊠ ⊠ ✓ 92.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='1 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='552 93.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='2 93.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='5 ⊠ ⊠ ✓ ⊠ 92.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='561 93.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='1 93.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='2 ⊠ ✓ ⊠ ⊠ 92.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='3 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='557 93.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='0 93.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='1 ✓ ⊠ ⊠ ⊠ 92.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='1 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='562 93.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='4 93.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='3 ⊠ ⊠ ⊠ ⊠ 92.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='1 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='541 95.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='1 92.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='1 TABLE XIV EFFECTIVENESS OF GRAPH NODE SET CONSTRUCTION ON THE ACA DATASET.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' WE REMOVE DIFFERENT COMPONENTS OF GRAPH NODES TO EXPLORE THEIR INFLUENCE ON OUR FRAMEWORK.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' NOTE THAT DICE, SP AND SR ARE PRESENTED AS PERCENTAGE.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' es w ea w DICE (%) ASSD SP (%) SR (%) Y 0 A0 ✓ ✓ ✓ 94.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='2 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='448 97.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='1 95.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='1 ✓ ✓ ⊠ 93.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='6 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='465 91.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='3 90.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='7 ✓ ⊠ ✓ 93.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='9 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='462 91.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='6 90.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='1 ⊠ ✓ ✓ 93.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='7 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='461 91.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='0 89.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='8 ✓ ⊠ ⊠ 92.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='4 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='466 90.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='2 88.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='8 ⊠ ✓ ⊠ 92.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='3 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='472 90.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='1 87.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='4 ⊠ ⊠ ✓ 92.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='2 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='486 90.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='3 88.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='1 ⊠ ⊠ ⊠ 92.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='5 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='484 86.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='1 85.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='2 TABLE XV EFFECTIVENESS OF GRAPH EDGE SET CONSTRUCTION ON THE ACA DATASET.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' WE REMOVE DIFFERENT COMPONENTS OF GRAPH EDGES TO EXPLORE THEIR INFLUENCE ON OUR FRAMEWORK.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' IF ALL COMPONENTS ARE REMOVED, GRAPH EDGES BECOME THE TRADITIONAL BINARY EDGES.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' [7] J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Fu, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Liu, H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Tian, Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Li, Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Bao, Z.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Fang, and H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Lu, “Dual attention network for scene segmentation,” in Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, 2019, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' 3146–3154.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' [8] S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Shin, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Lee, I.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Yun, and K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Lee, “Deep vessel segmentation by learning graphical connectivity,” Medical image analysis, vol.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' 58, p.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' 101556, 2019.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' [9] B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Kong, X.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Wang, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Bai, Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Lu, F.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Gao, K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Cao, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Xia, Q.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Song, and Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Yin, “Learning tree-structured representation for 3d coronary artery segmentation,” Computerized Medical Imaging and Graphics, vol.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' 80, p.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' 101688, 2020.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' [10] Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Wang, X.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Wei, F.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Liu, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Chen, Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Zhou, W.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Shen, E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Fishman, and A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Yuille, “Deep distance transform for tubular structure segmentation in ct scans,” in Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, 2020, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' 3833–3842.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' [11] T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Kipf and M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Welling, “Semi-supervised classification with graph convolutional networks,” arXiv preprint arXiv:1609.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='02907, 2016.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' [12] P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Veliˇckovi´c, G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Cucurull, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Casanova, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Romero, P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Lio, and Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Ben- gio, “Graph attention networks,” arXiv preprint arXiv:1710.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='10903, 2017.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' [13] G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Li, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Muller, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Thabet, and B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Ghanem, “Deepgcns: Can gcns go as deep as cnns?”' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' in Proceedings of the IEEE International Conference on Computer Vision, 2019, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' 9267–9276.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' [14] H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Gao and S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Ji, “Graph u-nets,” in Proceedings of the 36th Interna- tional Conference on Machine Learning, 2019.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' [15] K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Kamnitsas, C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Ledig, V.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' F.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Newcombe, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Simpson, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Kane, D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Menon, D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Rueckert, and B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Glocker, “Efficient multi-scale 3d cnn with fully connected crf for accurate brain lesion segmentation,” Medical Image Analysis, vol.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' 36, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' 61–78, 2017.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' [16] L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='-C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Chen, G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Papandreou, F.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Schroff, and H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Adam, “Rethinking Method DICE (%) ASSD SP (%) SR (%) ASOCA Dataset Set1 1 88.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='12 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='593 94.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='1 95.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='1 Set1 2 89.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='89 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='544 95.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='6 95.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='9 Set1 3 89.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='01 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='566 93.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='1 94.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='2 Set1 4 88.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='76 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='612 93.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='0 93.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='8 Set2 1 88.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='78 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='641 93.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='3 94.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='0 Set2 2 88.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='64 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='633 93.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='1 93.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='9 Set2 3 89.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='89 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='544 95.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='6 95.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='9 Set2 4 87.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='19 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='646 92.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='9 94.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='1 ACA Dataset Set1 1 93.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='46 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='510 95.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='4 95.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='2 Set1 2 94.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='20 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='448 97.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='1 95.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='1 Set1 3 93.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='12 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='534 95.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='1 93.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='2 Set1 4 93.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='22 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='512 96.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='1 94.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='3 Set2 1 93.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='80 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='476 96.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='2 94.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='3 Set2 2 92.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='91 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='487 94.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='4 93.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='2 Set2 3 94.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='20 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='448 97.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='1 95.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='1 Set2 4 93.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='12 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='493 96.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='2 93.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='1 HNA Dataset Set1 1 89.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='12 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='498 95.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='1 94.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='0 Set1 2 90.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='10 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='449 96.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='0 94.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='2 Set1 3 88.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='81 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='564 93.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='2 92.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='9 Set1 4 89.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='11 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='571 92.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='6 91.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='7 Set2 1 88.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='24 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='464 95.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='0 94.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='0 Set2 2 89.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='98 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='541 95.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='1 93.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='9 Set2 3 90.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='10 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='449 96.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='0 94.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='2 Set2 4 90.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='01 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='448 95.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='7 93.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='7 TABLE XVI PERFORMANCE COMPARISON ON THE ASOCA, ACA, AND HNA DATASETS AMONG DIFFERENT SETTINGS OF N_SEGMENTS AND MIN_SIZE_FACTOR.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' PERFORMANCE IS MEASURED IN TERMS OF FOUR METRICS INCLUDING DICE, ASSD, SP AND SR.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' atrous convolution for semantic image segmentation,” arXiv preprint arXiv:1706.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='05587, 2017.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' [17] H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Zhao, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Shi, X.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Qi, X.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Wang, and J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Jia, “Pyramid scene parsing network,” in Proceedings of the IEEE conference on computer vision and pattern recognition, 2017, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' 2881–2890.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' [18] A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Tao, K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Sapra, and B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Catanzaro, “Hierarchical multi-scale attention for semantic segmentation,” arXiv preprint arXiv:2005.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='10821, 2020.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' [19] O.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Ronneberger, P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Fischer, and T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Brox, “U-net: Convolutional networks for biomedical image segmentation,” in International Conference on Medical image computing and computer-assisted intervention.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Springer, 2015, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' 234–241.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' [20] ¨Ozg¨un C¸ ic¸ek, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Abdulkadir, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Lienkamp, T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Brox, and O.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Ron- neberger, “3d u-net: Learning dense volumetric segmentation from sparse annotation,” in International Conference on Medical Image Computing and Computer-Assisted Intervention, 2016, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' 424–432.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' [21] X.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Li, H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Chen, X.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Qi, Q.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Dou, C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='-W.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Fu, and P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='-A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Heng, “H-denseunet: hybrid densely connected unet for liver and tumor segmentation from ct volumes,” IEEE transactions on medical imaging, vol.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' 37, no.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' 12, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' 2663–2674, 2018.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' [22] Q.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Dou, L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Yu, H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Chen, Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Jin, X.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Yang, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Qin, and P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='-A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Heng, “3d deeply supervised network for automated segmentation of volumetric medical images,” Medical Image Analysis, vol.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' 41, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' 40–54, 2017, special Issue on the 2016 Conference on Medical Image Computing and Computer Assisted Intervention (Analog to MICCAI 2015).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' [Online].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Available: https://www.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='sciencedirect.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='com/ science/article/pii/S1361841517300725 [23] Q.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Zhu, B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Du, B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Turkbey, P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Choyke, and P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Yan, “Deeply-supervised cnn for prostate segmentation,” in 2017 International Joint Conference on Neural Networks (IJCNN), 2017, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' 178–184.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' [24] J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Jiang, Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='-C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Hu, C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='-J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Liu, D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Halpenny, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Hellmann, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' O.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Deasy, G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Mageras, and H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Veeraraghavan, “Multiple resolution residually connected feature streams for automatic lung tumor segmentation from ct images,” IEEE Transactions on Medical Imaging, vol.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' 38, no.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' 1, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' 134–144, 2019.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' [25] R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Zhang, G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Li, Z.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Li, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Cui, D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Qian, and Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Yu, “Adaptive context selection for polyp segmentation,” in International Conference on Med- ical Image Computing and Computer-Assisted Intervention.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Springer, 2020, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' 253–262.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' 14 IEEE TRANSACTIONS ON MEDICAL IMAGING, VOL.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' XX, NO.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' XX, XXXX [26] D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='-P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Fan, G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='-P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Ji, T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Zhou, G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Chen, H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Fu, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Shen, and L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Shao, “Pranet: Parallel reverse attention network for polyp segmentation,” in International Conference on Medical Image Computing and Computer- Assisted Intervention.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Springer, 2020, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' 263–273.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' [27] F.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Isensee, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Petersen, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Klein, D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Zimmerer, P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' F.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Jaeger, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Kohl, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Wasserthal, G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Koehler, T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Norajitra, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Wirkert et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=', “nnu-net: Self- adapting framework for u-net-based medical image segmentation,” arXiv preprint arXiv:1809.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='10486, 2018.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' [28] H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Zhou, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Guo, Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Zhang, L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Yu, L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Wang, and Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Yu, “nnformer: Interleaved transformer for volumetric segmentation,” CoRR, vol.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' abs/2109.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='03201, 2021.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' [Online].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Available: https://arxiv.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='org/abs/2109.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' 03201 [29] J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Valanarasu, P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Oza, I.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Hacihaliloglu, and V.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Patel, “Medical transformer: Gated axial-attention for medical image segmentation,” in Medical Image Computing and Computer Assisted Intervention – MIC- CAI 2021, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' de Bruijne, P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Cattin, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Cotin, N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Padoy, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Speidel, Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Zheng, and C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Essert, Eds.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Cham: Springer International Publishing, 2021, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' 36–46.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' [30] D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Zhang, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Liu, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Chaganti, E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Gibson, Z.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Xu, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Grbic, W.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Cai, and D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Comaniciu, “Graph attention network based pruning for recon- structing 3d liver vessel morphology from contrasted ct images,” arXiv preprint arXiv:2003.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='07999, 2020.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' [31] L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Yao, P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Jiang, Z.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Xue, Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Zhan, D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Wu, L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Zhang, Q.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Wang, F.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Shi, and D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Shen, “Graph convolutional network based point cloud for head and neck vessel labeling,” in International Workshop on Machine Learning in Medical Imaging.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Springer, 2020, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' 474–483.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' [32] X.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Xu, T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Wang, Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Shi, H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Yuan, Q.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Jia, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Huang, and J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Zhuang, “Whole heart and great vessel segmentation in congenital heart disease using deep neural networks and graph matching,” in International Conference on Medical Image Computing and Computer-Assisted In- tervention.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Springer, 2019, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' 477–485.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' [33] R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Achanta, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Shaji, K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Smith, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Lucchi, P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Fua, and S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' S¨usstrunk, “Slic superpixels compared to state-of-the-art superpixel methods,” IEEE transactions on pattern analysis and machine intelligence, vol.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' 34, no.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' 11, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' 2274–2282, 2012.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' [34] Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Liu, F.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Zhang, Q.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Zhang, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Wang, Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Wang, and Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Yu, “Cross-view correspondence reasoning based on bipartite graph convolutional net- work for mammogram mass detection,” in Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, 2020, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' 3812–3822.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' [35] Q.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Yue, X.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Luo, Q.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Ye, L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Xu, and X.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Zhuang, “Cardiac segmentation from lge mri using deep neural network incorporating shape and spatial priors,” in International Conference on Medical Image Computing and Computer-Assisted Intervention.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Springer, 2019, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' 559–567.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' [36] S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' van der Walt, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Sch¨onberger, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Nunez-Iglesias, F.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Boulogne, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Warner, N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Yager, E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Gouillart, T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Yu, and the scikit-image contributors, “scikit-image: image processing in Python,” PeerJ, vol.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' 2, p.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' e453, 6 2014.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' [Online].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Available: https://doi.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='org/10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='7717/peerj.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='453 [37] K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' He, X.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Zhang, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Ren, and J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Sun, “Deep residual learning for image recognition,” in Proceedings of the IEEE conference on computer vision and pattern recognition, 2016, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' 770–778.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' [38] A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Paszke, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Gross, F.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Massa, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Lerer, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Bradbury, G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Chanan, T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Killeen, Z.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Lin, N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Gimelshein, L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Antiga et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=', “Pytorch: An imperative style, high-performance deep learning library,” in Advances in neural information processing systems, 2019, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' 8026–8037.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' [39] D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Kingma and J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Ba, “Adam: A method for stochastic optimization,” arXiv preprint arXiv:1412.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='6980, 2014.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' [40] Z.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Zhang, Q.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Liu, and Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Wang, “Road extraction by deep residual u- net,” IEEE Geoscience and Remote Sensing Letters, vol.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' 15, no.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' 5, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' 749–753, 2018.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' [41] SLIC CUDA, https://github.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='com/fderue/SLIC CUDA.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' [Online].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content=' Avail- able: https://github.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'} +page_content='com/fderue/SLIC CUDA' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/5dE0T4oBgHgl3EQfegDz/content/2301.02393v1.pdf'}