Bhaskar2611 commited on
Commit
7f0753e
·
verified ·
1 Parent(s): 7d0a3f0

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +60 -4
app.py CHANGED
@@ -436,6 +436,63 @@ For more information on `huggingface_hub` Inference API support, please check th
436
 
437
  # app.py
438
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
439
  # app.py
440
 
441
  import os
@@ -449,7 +506,7 @@ HF_TOKEN = os.getenv("HF_TOKEN")
449
 
450
  # Initialize Hugging Face Inference Client
451
  client = InferenceClient(
452
- model="mistralai/Codestral-22B-v0.1",
453
  token=HF_TOKEN
454
  )
455
 
@@ -461,7 +518,7 @@ system_message = (
461
  "based on their requirements."
462
  )
463
 
464
- # Streaming chatbot logic using chat.completions
465
  def respond(message, history):
466
  # Prepare messages with system prompt
467
  messages = [{"role": "system", "content": system_message}]
@@ -472,7 +529,7 @@ def respond(message, history):
472
  # Stream response from the model
473
  response = ""
474
  for chunk in client.chat.completions.create(
475
- model="mistralai/Codestral-22B-v0.1",
476
  messages=messages,
477
  max_tokens=1024,
478
  temperature=0.7,
@@ -498,4 +555,3 @@ if __name__ == "__main__":
498
 
499
 
500
 
501
-
 
436
 
437
  # app.py
438
 
439
+ # app.py
440
+
441
+ # import os
442
+ # import gradio as gr
443
+ # from huggingface_hub import InferenceClient
444
+ # from dotenv import load_dotenv
445
+
446
+ # # Load environment variables
447
+ # load_dotenv()
448
+ # HF_TOKEN = os.getenv("HF_TOKEN")
449
+
450
+ # # Initialize Hugging Face Inference Client
451
+ # client = InferenceClient(
452
+ # model="mistralai/Codestral-22B-v0.1",
453
+ # token=HF_TOKEN
454
+ # )
455
+
456
+ # # System prompt for coding assistant
457
+ # system_message = (
458
+ # "You are a helpful and experienced coding assistant specialized in web development. "
459
+ # "Help the user by generating complete and functional code for building websites. "
460
+ # "You can provide HTML, CSS, JavaScript, and backend code (like Flask, Node.js, etc.) "
461
+ # "based on their requirements."
462
+ # )
463
+
464
+ # # Streaming chatbot logic using chat.completions
465
+ # def respond(message, history):
466
+ # # Prepare messages with system prompt
467
+ # messages = [{"role": "system", "content": system_message}]
468
+ # for msg in history:
469
+ # messages.append(msg)
470
+ # messages.append({"role": "user", "content": message})
471
+
472
+ # # Stream response from the model
473
+ # response = ""
474
+ # for chunk in client.chat.completions.create(
475
+ # model="mistralai/Codestral-22B-v0.1",
476
+ # messages=messages,
477
+ # max_tokens=1024,
478
+ # temperature=0.7,
479
+ # top_p=0.95,
480
+ # stream=True,
481
+ # ):
482
+ # token = chunk.choices[0].delta.get("content", "") or ""
483
+ # response += token
484
+ # yield response
485
+
486
+ # # Create Gradio interface
487
+ # with gr.Blocks() as demo:
488
+ # chatbot = gr.Chatbot(type='messages') # Use modern message format
489
+ # gr.ChatInterface(fn=respond, chatbot=chatbot, type="messages") # Match format
490
+
491
+ # # Launch app
492
+ # if __name__ == "__main__":
493
+ # demo.launch()
494
+
495
+
496
  # app.py
497
 
498
  import os
 
506
 
507
  # Initialize Hugging Face Inference Client
508
  client = InferenceClient(
509
+ model="mistralai/Mistral-7B-Instruct-v0.3",
510
  token=HF_TOKEN
511
  )
512
 
 
518
  "based on their requirements."
519
  )
520
 
521
+ # Streaming chatbot logic
522
  def respond(message, history):
523
  # Prepare messages with system prompt
524
  messages = [{"role": "system", "content": system_message}]
 
529
  # Stream response from the model
530
  response = ""
531
  for chunk in client.chat.completions.create(
532
+ model="mistralai/Mistral-7B-Instruct-v0.3",
533
  messages=messages,
534
  max_tokens=1024,
535
  temperature=0.7,
 
555
 
556
 
557