Shreyas094 commited on
Commit
cdaacc7
·
verified ·
1 Parent(s): 1634f94

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +1 -26
app.py CHANGED
@@ -483,30 +483,6 @@ You are an expert AI assistant. Write a detailed summary of the information prov
483
  Base your summary strictly on the information from this source. Only include information that is directly supported by the given content.
484
  If any part of the information cannot be verified from this source, clearly state that it could not be confirmed."""
485
 
486
- if model == "@cf/meta/llama-3.1-8b-instruct":
487
- # Use Cloudflare API
488
- source_response = ""
489
- for response in get_response_from_cloudflare(prompt="", context=context, query=query, num_calls=1, temperature=temperature, search_type="web"):
490
- source_response += response
491
- accumulated_response += f"Source {i} ({source}):\n\n{source_response}\n\n"
492
- yield accumulated_response, ""
493
- else:
494
- # Use Hugging Face API
495
- client = InferenceClient(model, token=huggingface_token)
496
-
497
- source_response = ""
498
- for message in client.chat_completion(
499
- messages=[{"role": "user", "content": prompt}],
500
- max_tokens=2000,
501
- temperature=temperature,
502
- stream=True,
503
- ):
504
- if message.choices and message.choices[0].delta and message.choices[0].delta.content:
505
- chunk = message.choices[0].delta.content
506
- source_response += chunk
507
- accumulated_response += f"Source {i} ({source}):\n\n{source_response}\n\n"
508
- yield accumulated_response, ""
509
-
510
  # Generate an overall summary after processing all sources
511
  overall_prompt = f"""Based on the summaries you've generated for each source: '{accumulated_response}', provide a concise overall summary that addresses the user's query: '{query}'
512
  Highlight any conflicting information or gaps in the available data."""
@@ -523,7 +499,7 @@ Highlight any conflicting information or gaps in the available data."""
523
  overall_summary = ""
524
  for message in client.chat_completion(
525
  messages=[{"role": "user", "content": overall_prompt}],
526
- max_tokens=2000,
527
  temperature=temperature,
528
  stream=True,
529
  ):
@@ -533,7 +509,6 @@ Highlight any conflicting information or gaps in the available data."""
533
  accumulated_response += f"Overall Summary:\n\n{overall_summary}\n\n"
534
  yield accumulated_response, ""
535
 
536
-
537
  def get_response_from_pdf(query, model, selected_docs, num_calls=3, temperature=0.2):
538
  logging.info(f"Entering get_response_from_pdf with query: {query}, model: {model}, selected_docs: {selected_docs}")
539
 
 
483
  Base your summary strictly on the information from this source. Only include information that is directly supported by the given content.
484
  If any part of the information cannot be verified from this source, clearly state that it could not be confirmed."""
485
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
486
  # Generate an overall summary after processing all sources
487
  overall_prompt = f"""Based on the summaries you've generated for each source: '{accumulated_response}', provide a concise overall summary that addresses the user's query: '{query}'
488
  Highlight any conflicting information or gaps in the available data."""
 
499
  overall_summary = ""
500
  for message in client.chat_completion(
501
  messages=[{"role": "user", "content": overall_prompt}],
502
+ max_tokens=10000,
503
  temperature=temperature,
504
  stream=True,
505
  ):
 
509
  accumulated_response += f"Overall Summary:\n\n{overall_summary}\n\n"
510
  yield accumulated_response, ""
511
 
 
512
  def get_response_from_pdf(query, model, selected_docs, num_calls=3, temperature=0.2):
513
  logging.info(f"Entering get_response_from_pdf with query: {query}, model: {model}, selected_docs: {selected_docs}")
514