dharmendra commited on
Commit
51e51e6
·
1 Parent(s): 48d0a68

Implement streaming responses for LLM API

Browse files
Files changed (1) hide show
  1. app.py +1 -0
app.py CHANGED
@@ -10,6 +10,7 @@ from langchain.chains import ConversationChain
10
  from starlette.responses import StreamingResponse # <-- NEW IMPORT
11
  import asyncio
12
  from langchain_community.llms import HuggingFacePipeline
 
13
 
14
  app = FastAPI()
15
  # Get the Hugging Face API token from environment variables (BEST PRACTICE)
 
10
  from starlette.responses import StreamingResponse # <-- NEW IMPORT
11
  import asyncio
12
  from langchain_community.llms import HuggingFacePipeline
13
+ import json
14
 
15
  app = FastAPI()
16
  # Get the Hugging Face API token from environment variables (BEST PRACTICE)