File size: 23,588 Bytes
2fafc94
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
# from langchain import HuggingFaceHub, LLMChain
from langchain.chains import LLMChain
from langchain.llms import HuggingFacePipeline
from transformers import (
    AutoModelForCausalLM,
    AutoTokenizer,
    pipeline,
    T5Tokenizer,
    T5ForConditionalGeneration,
    GPT2TokenizerFast,
)
from transformers import LlamaForCausalLM, AutoModelForCausalLM, LlamaTokenizer
from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
from langchain.prompts import SystemMessagePromptTemplate, HumanMessagePromptTemplate, ChatPromptTemplate, PromptTemplate

# model_path = "/mnt/localstorage/yinghan/llm/orca_mini_v3_13b"
# model = LlamaForCausalLM.from_pretrained(model_path, device_map="auto")#, load_in_8bit=True)
# tokenizer = AutoTokenizer.from_pretrained(model_path)
from langchain.chat_models import ChatOpenAI
# from langchain_openai import ChatOpenAI
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.embeddings import HuggingFaceEmbeddings
from langchain.vectorstores import Chroma
from langchain.text_splitter import (
    CharacterTextSplitter,
    RecursiveCharacterTextSplitter,
)
from langchain.document_loaders import TextLoader, UnstructuredHTMLLoader, PyPDFLoader
from langchain.chains.retrieval_qa.base import RetrievalQA
from langchain.llms import HuggingFaceHub
from dotenv import load_dotenv
from langchain.llms import HuggingFaceTextGenInference
from langchain.chains.question_answering import load_qa_chain
from langchain.chains import ConversationalRetrievalChain
from langchain.chains.conversation.memory import (
    ConversationBufferMemory,
    ConversationBufferWindowMemory,
)
# from ragas.llms import LangchainLLM


def get_llm_hf_online(inference_api_url=""):
    if not inference_api_url:  # default api url
        inference_api_url = (
            "https://api-inference.huggingface.co/models/HuggingFaceH4/zephyr-7b-beta"
        )

    llm = HuggingFaceTextGenInference(
        # cache=None,  # Optional: Cache verwenden oder nicht
        verbose=True,  # Provides detailed logs of operation
        # callbacks=[StreamingStdOutCallbackHandler()],  # Handeling Streams
        max_new_tokens=1024,  # Maximum number of token that can be generated.
        # top_k=2,  # Die Anzahl der Top-K Tokens, die beim Generieren berücksichtigt werden sollen
        top_p=0.95,  # Threshold for controlling randomness in text generation process.
        typical_p=0.95,  #
        temperature=0.1,  # For choosing probable words.
        # repetition_penalty=None,  # Wiederholungsstrafe beim Generieren
        # truncate=None,  # Schneidet die Eingabe-Tokens auf die gegebene Größe
        # stop_sequences=None,  # Eine Liste von Stop-Sequenzen beim Generieren
        inference_server_url=inference_api_url,  # URL des Inferenzservers
        timeout=10,  # Timeout for connection  with the url
        # streaming=True,  # Streaming the answer
    )

    return llm


def get_llm_hf_local(model_path):
    # model_path = "/mnt/localstorage/yinghan/llm/orca_mini_v3_13b"
    # model_path = "/mnt/localstorage/yinghan/llm/zephyr-7b-beta"
    model = LlamaForCausalLM.from_pretrained(
        model_path, device_map="auto"
    )  # , load_in_8bit=True)
    # model = AutoModelForCausalLM.from_pretrained(model_path, device_map="auto")#, load_in_8bit=True)  # which is better?
    tokenizer = AutoTokenizer.from_pretrained(model_path)

    # print('making a pipeline...')
    # max_length has typically been deprecated for max_new_tokens
    pipe = pipeline(
        "text-generation",
        model=model,
        tokenizer=tokenizer,
        max_new_tokens=1024,
        model_kwargs={"temperature": 0.1},
    )
    llm = HuggingFacePipeline(pipeline=pipe)

    return llm


def get_llm_hf_local_zephyr(model_path):
    # model_path = "/mnt/localstorage/yinghan/llm/orca_mini_v3_13b"
    # model_path = "/mnt/localstorage/yinghan/llm/zephyr-7b-beta"
    model = LlamaForCausalLM.from_pretrained(
        model_path, device_map="auto"
    )  # , load_in_8bit=True)
    #import torch
    #model = AutoModelForCausalLM.from_pretrained(model_path, torch_dtype=torch.bfloat16, device_map="auto", trust_remote_code=True)#, load_in_8bit=True)  # which is better?
    tokenizer = AutoTokenizer.from_pretrained(model_path)

    # print('making a pipeline...')
    # max_length has typically been deprecated for max_new_tokens
    pipe = pipeline(
        "text-generation",
        model=model,
        tokenizer=tokenizer,
        max_new_tokens=1024,
        temperature=0.1,
        # top_p=0.8,
        # do_sample=True,
        # repetition_penalty=1.1,
        return_full_text=True
        # model_kwargs={"temperature": 0.1},
    )
    llm = HuggingFacePipeline(pipeline=pipe)

    return llm


def get_chat_vllm(model_name, inference_server_url, langfuse_callback=None):
    
    # to fix
    # Create vLLM Langchain instance

    # Some defaults
    # chat_model_name = "openchat/openchat_3.5"
    # inference_server_url = "http://localhost:8080/v1"
    chat = ChatOpenAI(
        model=model_name,
        openai_api_key="EMPTY",
        openai_api_base=inference_server_url,
        max_tokens=512,  # better setting?
        temperature=0.1,  # default 0.7, better setting?
        # callbacks=[langfuse_callback],
    )

    # The following is not required for builing normal llm
    # use the Ragas LangchainLLM wrapper to create a RagasLLM instance
    # vllm = LangchainLLM(llm=chat)
    # return vllm
    return chat

def get_chat_vllm_stream(model_name, inference_server_url, langfuse_callback=None):
    
    # to fix
    # Create vLLM Langchain instance

    # Some defaults
    # chat_model_name = "openchat/openchat_3.5"
    # inference_server_url = "http://localhost:8080/v1"
    chat = ChatOpenAI(
        model=model_name,
        openai_api_key="EMPTY",
        openai_api_base=inference_server_url,
        max_tokens=512,  # better setting?
        temperature=0.1,  # default 0.7, better setting?
        streaming=True,
        callbacks=[StreamingStdOutCallbackHandler(), langfuse_callback],
    )

    # The following is not required for builing normal llm
    # use the Ragas LangchainLLM wrapper to create a RagasLLM instance
    # vllm = LangchainLLM(llm=chat)
    # return vllm
    return chat


def get_chat_vllm_stream_TODO(model_name, inference_server_url, streaming=True):
    
    # to fix
    # Create vLLM Langchain instance

    if streaming:
        streaming_callback = StreamingStdOutCallbackHandler()
    else:
        streaming_callback = None

    from langchain.callbacks.manager import CallbackManager
    callback_manager = CallbackManager([StreamingStdOutCallbackHandler()])
    # Some defaults
    # chat_model_name = "openchat/openchat_3.5"
    # inference_server_url = "http://localhost:8080/v1"
    chat = ChatOpenAI(
        model=model_name,
        openai_api_key="EMPTY",
        openai_api_base=inference_server_url,
        max_tokens=512,  # better setting?
        temperature=0.1,  # default 0.7, better setting?
        streaming=streaming,
        callbacks=[streaming_callback],
        callback_manager=callback_manager,
        stream=True,
    )
    
    from langchain_community.llms import VLLMOpenAI
    from langchain.callbacks.manager import CallbackManager
    callback_manager = CallbackManager([StreamingStdOutCallbackHandler()])

    llm = VLLMOpenAI(
        openai_api_key="EMPTY",
        openai_api_base=inference_server_url,
        model=model_name,
        max_tokens=512,  # better setting?
        temperature=0.1,  # default 0.7, better setting?
        streaming=True,
        stream=True,  # necessary?
        callbacks=[streaming_callback],
        callback_manager=callback_manager,
    )

    # The following is not required for builing normal llm
    # use the Ragas LangchainLLM wrapper to create a RagasLLM instance
    # vllm = LangchainLLM(llm=chat)
    # return vllm
    return chat



def _get_llm_hf_local(model_path):
    model_path = "/mnt/localstorage/yinghan/llm/orca_mini_v3_13b"
    model_path = "/mnt/localstorage/yinghan/llm/zephyr-7b-beta"
    model = LlamaForCausalLM.from_pretrained(
        model_path, device_map="auto"
    )  # , load_in_8bit=True)
    tokenizer = AutoTokenizer.from_pretrained(model_path)

    # print('making a pipeline...')
    # max_length has typically been deprecated for max_new_tokens
    pipe = pipeline(
        "text-generation",
        model=model,
        tokenizer=tokenizer,
        max_new_tokens=1024,
        model_kwargs={"temperature": 0},
    )
    llm = HuggingFacePipeline(pipeline=pipe)

    return llm



from langchain.chains import RetrievalQAWithSourcesChain, StuffDocumentsChain

def get_cite_combine_docs_chain(llm):
    
    # Ref: https://github.com/langchain-ai/langchain/issues/7239
    # Function to format each document with an index, source, and content.
    def format_document(doc, index, prompt):
        """Format a document into a string based on a prompt template."""
        # Create a dictionary with document content and metadata.
        base_info = {"page_content": doc.page_content, "index": index, "source": doc.metadata["source"]}
        
        # Check if any metadata is missing.
        missing_metadata = set(prompt.input_variables).difference(base_info)
        if len(missing_metadata) > 0:
            raise ValueError(f"Missing metadata: {list(missing_metadata)}.")
        
        # Filter only necessary variables for the prompt.
        document_info = {k: base_info[k] for k in prompt.input_variables}
        return prompt.format(**document_info)

    # Custom chain class to handle document combination with source indices.
    class StuffDocumentsWithIndexChain(StuffDocumentsChain):
        def _get_inputs(self, docs, **kwargs):
            # Format each document and combine them.
            doc_strings = [
                format_document(doc, i, self.document_prompt)
                for i, doc in enumerate(docs, 1)
            ]
            
            # Filter only relevant input variables for the LLM chain prompt.
            inputs = {k: v for k, v in kwargs.items() if k in self.llm_chain.prompt.input_variables}
            inputs[self.document_variable_name] = self.document_separator.join(doc_strings)
            return inputs

    # Ref: https://huggingface.co/spaces/Ekimetrics/climate-question-answering/blob/main/climateqa/engine/prompts.py
    # Define a chat prompt with instructions for citing documents.
    combine_doc_prompt = PromptTemplate(
        input_variables=["context", "question"],
        template="""You are given a question and passages. Provide a clear and structured Helpful Answer based on the passages provided, 
        the context and the guidelines.
        
        Guidelines:
        - If the passages have useful facts or numbers, use them in your answer.
        - When you use information from a passage, mention where it came from by using format [[i]] at the end of the sentence. i stands for the paper index of the document.
        - Do not cite the passage in a style like 'passage i', always use format [[i]] where i stands for the passage index of the document.
        - Do not use the sentence such as 'Doc i says ...' or '... in Doc i' or 'Passage i ...'  to say where information came from.
        - If the same thing is said in more than one document, you can mention all of them like this: [[i]], [[j]], [[k]].
        - Do not just summarize each passage one by one. Group your summaries to highlight the key parts in the explanation.
        - If it makes sense, use bullet points and lists to make your answers easier to understand.
        - You do not need to use every passage. Only use the ones that help answer the question.
        - If the documents do not have the information needed to answer the question, just say you do not have enough information.
        - If the passage is the caption of a picture, you can still use it as part of your answer as any other document.

        -----------------------
        Passages:
        {context}
        -----------------------
        Question: {question}

        Helpful Answer with format citations:"""
    )

    # Initialize the custom chain with a specific document format.
    combine_docs_chain = StuffDocumentsWithIndexChain(
        llm_chain=LLMChain(
            llm=llm,
            prompt=combine_doc_prompt,
        ),
        document_prompt=PromptTemplate(
            input_variables=["index", "source", "page_content"],
            template="[[{index}]]\nsource: {source}:\n{page_content}",
        ),
        document_variable_name="context",
    )
    
    return combine_docs_chain


class ConversationChainFactory_bp:
    def __init__(
        self, memory_key="chat_history", output_key="answer", return_messages=True
    ):
        self.memory_key = memory_key
        self.output_key = output_key
        self.return_messages = return_messages

    def create(self, vectorstore, llm):
        memory = ConversationBufferWindowMemory(  # ConversationBufferMemory(
            memory_key=self.memory_key,
            return_messages=self.return_messages,
            output_key=self.output_key,
        )

        # https://github.com/langchain-ai/langchain/issues/4608
        conversation_chain = ConversationalRetrievalChain.from_llm(
            llm=llm,
            retriever=vectorstore.as_retriever(),  # search_kwargs={"k": 8}),
            memory=memory,
            return_source_documents=True,
        )

        return conversation_chain


class ConversationChainFactory:
    def __init__(
        self, memory_key="chat_history", output_key="answer", return_messages=True
    ):
        self.memory_key = memory_key
        self.output_key = output_key
        self.return_messages = return_messages

    def create(self, retriver, llm, langfuse_callback=None):
        memory = ConversationBufferWindowMemory(  # ConversationBufferMemory(
            memory_key=self.memory_key,
            return_messages=self.return_messages,
            output_key=self.output_key,
        )
        
        # prompt: 
        # https://github.com/langchain-ai/langchain/issues/6530
        
        
        prompt_template = """You are a helpful research assistant. Use the following pieces of context to answer the question at the end.
        Please ignore the contexts if they are not related to the question. If you don't know the answer, just say that you don't know,
        don't try to make up an answer.
        
        {context} 
        
        Question: {question}
        
        Helpful Answer:""" 
        PROMPT = PromptTemplate(
            template=prompt_template, input_variables=["context", "question"]
        )

        # Rephrase question based on history
        # https://www.paepper.com/blog/posts/how-to-build-a-chatbot-out-of-your-website-content/
        # tested: Be careful with the technical abbreviations and items, do not modify them unless necessary -> worse
        # You are a helpful research assistant.  -> worse, tend to expand question
        # My testing prompt
        # _template = """Given the following conversation and a follow up question,
        # rephrase the follow up question to be a standalone question only when it is necessary.
        # If the conversation is not related to the question, do not rephrase the follow up question
        # and just put the standalone question exactly the same as the original follow up question.
        # The standalone question should be in its original language, which is usually english.
        
        # Chat History: {chat_history}
        
        # Follow Up Question: {question}
        
        # Standalone Question:"""
        
        # Type 2: https://github.com/langchain-ai/langchain/issues/4076
        _template = """Return text in the original language of the follow up question.
            If the follow up question does not need context, return the exact same text back.
            Never rephrase the follow up question given the chat history unless the follow up question needs context.
            
            Chat History: {chat_history}
            
            Follow Up Question: {question}
            
            Standalone Question:"""
        CONDENSE_QUESTION_PROMPT = PromptTemplate.from_template(_template)
        # or just turn if off, see https://github.com/langchain-ai/langchain/issues/4076
        
        # Change prompt to context-based QA
        # system_template = """You are a professional scientist. Use the following pieces of context to answer the users question.  
        # Please ignore the contexts if they are not related to the question. If you don't know the answer, just say that you don't know, don't try to make up an answer. 
        # ---------------- 
        # {context}""" 
        # messages = [ 
        #     SystemMessagePromptTemplate.from_template(system_template), 
        #     HumanMessagePromptTemplate.from_template("{question}"), 
        # ] 
        # QA_CHAT_PROMPT = ChatPromptTemplate.from_messages(messages)

        # https://github.com/langchain-ai/langchain/issues/4608
        conversation_chain = ConversationalRetrievalChain.from_llm(
            llm=llm,
            retriever=retriver,
            memory=memory,
            return_source_documents=True,
            # return_generated_question=True,  # for debug
            rephrase_question=False,  # Disable rephrase, for test purpose
            get_chat_history=lambda x: x,
            # callbacks=[langfuse_callback]
            # verbose=True,
            # combine_docs_chain_kwargs={"prompt": PROMPT},
            # condense_question_prompt=CONDENSE_QUESTION_PROMPT,
        )
        


        return conversation_chain


class ConversationChainFactoryDev:
    def __init__(
        self, memory_key="chat_history", output_key="answer", return_messages=True
    ):
        self.memory_key = memory_key
        self.output_key = output_key
        self.return_messages = return_messages

    def create(self, retriver, llm, langfuse_callback=None):
        memory = ConversationBufferWindowMemory(  # ConversationBufferMemory(
            memory_key=self.memory_key,
            return_messages=self.return_messages,
            output_key=self.output_key,
        )
        
        # prompt: 
        # https://github.com/langchain-ai/langchain/issues/6530
        
        
        prompt_template = """You are a helpful research assistant. Use the following pieces of context to answer the question at the end.
        Please ignore the contexts if they are not related to the question. If you don't know the answer, just say that you don't know,
        don't try to make up an answer.
        
        {context} 
        
        Question: {question}
        
        Helpful Answer:""" 
        PROMPT = PromptTemplate(
            template=prompt_template, input_variables=["context", "question"]
        )

        # Rephrase question based on history
        # https://www.paepper.com/blog/posts/how-to-build-a-chatbot-out-of-your-website-content/
        # tested: Be careful with the technical abbreviations and items, do not modify them unless necessary -> worse
        # You are a helpful research assistant.  -> worse, tend to expand question
        # My testing prompt
        # _template = """Given the following conversation and a follow up question,
        # rephrase the follow up question to be a standalone question only when it is necessary.
        # If the conversation is not related to the question, do not rephrase the follow up question
        # and just put the standalone question exactly the same as the original follow up question.
        # The standalone question should be in its original language, which is usually english.
        
        # Chat History: {chat_history}
        
        # Follow Up Question: {question}
        
        # Standalone Question:"""
        
        # Type 2: https://github.com/langchain-ai/langchain/issues/4076
        _template = """Return text in the original language of the follow up question.
            If the follow up question does not need context, return the exact same text back.
            Never rephrase the follow up question given the chat history unless the follow up question needs context.
            
            Chat History: {chat_history}
            
            Follow Up Question: {question}
            
            Standalone Question:"""
        CONDENSE_QUESTION_PROMPT = PromptTemplate.from_template(_template)
        # or just turn if off, see https://github.com/langchain-ai/langchain/issues/4076
        
        # Change prompt to context-based QA
        # system_template = """You are a professional scientist. Use the following pieces of context to answer the users question.  
        # Please ignore the contexts if they are not related to the question. If you don't know the answer, just say that you don't know, don't try to make up an answer. 
        # ---------------- 
        # {context}""" 
        # messages = [ 
        #     SystemMessagePromptTemplate.from_template(system_template), 
        #     HumanMessagePromptTemplate.from_template("{question}"), 
        # ] 
        # QA_CHAT_PROMPT = ChatPromptTemplate.from_messages(messages)

        # https://github.com/langchain-ai/langchain/issues/4608
        
        
        
        conversation_chain = ConversationalRetrievalChain.from_llm(
            llm=llm,
            retriever=retriver,
            memory=memory,
            return_source_documents=True,
            # return_generated_question=True,  # for debug
            rephrase_question=False,  # Disable rephrase, for test purpose
            get_chat_history=lambda x: x,
            # callbacks=[langfuse_callback]
            # verbose=True,
            # combine_docs_chain_kwargs={"prompt": PROMPT},
            # condense_question_prompt=CONDENSE_QUESTION_PROMPT,
        )
        

        return conversation_chain


class RAGChain:
    def __init__(
        self, memory_key="chat_history", output_key="answer", return_messages=True
    ):
        self.memory_key = memory_key
        self.output_key = output_key
        self.return_messages = return_messages

    def create(self, retriever, llm, add_citation=False):
        memory = ConversationBufferWindowMemory(  # ConversationBufferMemory(
            k=2,
            memory_key=self.memory_key,
            return_messages=self.return_messages,
            output_key=self.output_key,
        )

        # https://github.com/langchain-ai/langchain/issues/4608
        conversation_chain = ConversationalRetrievalChain.from_llm(
            llm=llm,
            retriever=retriever,
            memory=memory,
            return_source_documents=True,
            rephrase_question=False,  # disable rephrase, for test purpose
            get_chat_history=lambda x: x,
            # return_generated_question=True,  # for debug
            # verbose=True,
            # combine_docs_chain_kwargs={"prompt": PROMPT},  # additional prompt control
            # condense_question_prompt=CONDENSE_QUESTION_PROMPT,  # additional prompt control
        )
        
        # Add citation, ATTENTION: experimental
        if add_citation:
            # from models import get_cite_combine_docs_chain
            cite_combine_docs_chain = get_cite_combine_docs_chain(llm)
            conversation_chain.combine_docs_chain = cite_combine_docs_chain

        return conversation_chain