c commited on
Commit
52f04a2
·
verified ·
1 Parent(s): a8051c1

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +11 -15
app.py CHANGED
@@ -1,23 +1,15 @@
1
  import gradio as gr
2
  import torch
3
- from transformers import AutoTokenizer, AutoModelForCausalLM, BitsAndBytesConfig
4
- from peft import PeftModel
5
 
6
  base_model = "mistralai/Mistral-7B-v0.1"
7
 
8
- bnb_config = BitsAndBytesConfig(
9
- load_in_4bit= True,
10
- bnb_4bit_quant_type= "nf4",
11
- bnb_4bit_compute_dtype= torch.bfloat16,
12
- bnb_4bit_use_double_quant= False,
13
- )
14
- model = AutoModelForCausalLM.from_pretrained(
15
- base_model,
16
- quantization_config=bnb_config,
17
- device_map={"": 0}
18
- )
19
 
20
- ft_model = PeftModel.from_pretrained(model, 'kiki7sun/mixtral-academic-finetune-QLoRA-0121')
21
 
22
  tokenizer = AutoTokenizer.from_pretrained(
23
  base_model_id,
@@ -34,6 +26,10 @@ def greet(eval_prompt,max_new_tokens):
34
 
35
  return result
36
 
37
- demo = gr.Interface(fn=greet, inputs="textbox", outputs="textbox")
 
 
 
 
38
 
39
  demo.queue().launch(debug=True, share=True, inline=False)
 
1
  import gradio as gr
2
  import torch
3
+ from transformers import AutoTokenizer, AutoModelForCausalLM
4
+ from peft import PeftModel, PeftConfig
5
 
6
  base_model = "mistralai/Mistral-7B-v0.1"
7
 
8
+ config = PeftConfig.from_pretrained("kiki7sun/mixtral-academic-finetune0119")
9
+ model = AutoModelForCausalLM.from_pretrained("mistralai/Mistral-7B-v0.1")
10
+ ft_model = PeftModel.from_pretrained(model, "kiki7sun/mixtral-academic-finetune0119")
 
 
 
 
 
 
 
 
11
 
12
+ # ft_model = PeftModel.from_pretrained(model, 'kiki7sun/mixtral-academic-finetune-QLoRA-0121')
13
 
14
  tokenizer = AutoTokenizer.from_pretrained(
15
  base_model_id,
 
26
 
27
  return result
28
 
29
+ demo = gr.Interface(fn=greet,
30
+ inputs="textbox",
31
+ outputs="textbox",
32
+ title="My Academic Chat",
33
+ )
34
 
35
  demo.queue().launch(debug=True, share=True, inline=False)