Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -5,8 +5,7 @@ from langchain.prompts import ChatPromptTemplate
|
|
5 |
from langchain.text_splitter import RecursiveCharacterTextSplitter
|
6 |
from langchain_huggingface import HuggingFaceEmbeddings
|
7 |
from transformers import pipeline
|
8 |
-
from
|
9 |
-
|
10 |
from langchain.retrievers import ParentDocumentRetriever
|
11 |
from langchain.storage import InMemoryStore
|
12 |
from langchain_chroma import Chroma
|
@@ -83,18 +82,12 @@ def get_chain(temperature):
|
|
83 |
# Replace the local OLMOLLM with the Hugging Face model
|
84 |
|
85 |
model_name = "gpt2"
|
86 |
-
pipe = pipeline("text-generation", model=model_name)
|
|
|
87 |
|
88 |
# Initialize the LangChain HuggingFacePipeline
|
89 |
-
llm = HuggingFacePipeline
|
90 |
-
|
91 |
-
task="text-generation",
|
92 |
-
pipeline_kwargs={
|
93 |
-
"max_new_tokens": 100,
|
94 |
-
"top_k": 50,
|
95 |
-
"temperature": temperature,
|
96 |
-
},
|
97 |
-
)
|
98 |
|
99 |
# Initialize LangChain
|
100 |
# llm = HuggingFaceLLM(
|
|
|
5 |
from langchain.text_splitter import RecursiveCharacterTextSplitter
|
6 |
from langchain_huggingface import HuggingFaceEmbeddings
|
7 |
from transformers import pipeline
|
8 |
+
from langchain_community import llms.huggingface_pipeline.HuggingFacePipeline
|
|
|
9 |
from langchain.retrievers import ParentDocumentRetriever
|
10 |
from langchain.storage import InMemoryStore
|
11 |
from langchain_chroma import Chroma
|
|
|
82 |
# Replace the local OLMOLLM with the Hugging Face model
|
83 |
|
84 |
model_name = "gpt2"
|
85 |
+
pipe = pipeline("text-generation", model=model_name, max_length=100, max_new_tokens=50, temperature = temperature)
|
86 |
+
|
87 |
|
88 |
# Initialize the LangChain HuggingFacePipeline
|
89 |
+
llm = HuggingFacePipeline(pipeline=pipe)
|
90 |
+
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
91 |
|
92 |
# Initialize LangChain
|
93 |
# llm = HuggingFaceLLM(
|