alexkueck commited on
Commit
5d22ed0
·
1 Parent(s): 1cbb75d

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +4 -4
app.py CHANGED
@@ -18,7 +18,7 @@ from transformers import LlamaForCausalLM, LlamaTokenizer
18
  #chatbot = hugchat.ChatBot(cookie_path="cookies.json")
19
 
20
  #Alternativ mit beliebigen Modellen:
21
- base_model = "project-baize/baize-v2-7b" #load_8bit = False (in load_tokenizer_and_model)
22
  #base_model = "MAGAer13/mPLUG-Owl" #load_8bit = False (in load_tokenizer_and_model)
23
  #base_model = "alexkueck/li-tis-tuned-2" #load_8bit = False (in load_tokenizer_and_model)
24
  #base_model = "TheBloke/airoboros-13B-HF" #load_8bit = False (in load_tokenizer_and_model)
@@ -26,20 +26,20 @@ base_model = "project-baize/baize-v2-7b" #load_8bit = False (in load_tokenizer_
26
  #base_model = "TheBloke/airoboros-13B-HF" #load_8bit = True
27
  #base_model = "TheBloke/vicuna-13B-1.1-HF" #load_8bit = ?
28
  #following runs only on GPU upgrade
29
- #base_model = "TheBloke/airoboros-65B-gpt4-1.3-GPTQ" #model_basename = "airoboros-65b-gpt4-1.3-GPTQ-4bit--1g.act.order"
30
  #base_model = "lmsys/vicuna-13b-v1.3"
31
  #base_model = "gpt2-xl" # options: ['gpt2', 'gpt2-medium', 'gpt2-large', 'gpt2-xl']
32
 
33
  ####################################
34
  #Model und Tokenzier laden
35
- tokenizer,model,device = load_tokenizer_and_model(base_model,False)
36
 
37
  ################################
38
  #Alternativ: Model und Tokenizer für GPT2
39
  #tokenizer,model,device = load_tokenizer_and_model_gpt2(base_model,False)
40
 
41
  #Alternativ bloke gpt3 und4 - only with GPU upgarde
42
- #tokenizer,model,device = load_tokenizer_and_model_bloke_gpt(base_model, "airoboros-65b-gpt4-1.3-GPTQ-4bit--1g.act.order")
43
 
44
  #Alternativ Model und Tokenzier laden für Baize
45
  #tokenizer,model,device = load_tokenizer_and_model_Baize(base_model,False)
 
18
  #chatbot = hugchat.ChatBot(cookie_path="cookies.json")
19
 
20
  #Alternativ mit beliebigen Modellen:
21
+ #base_model = "project-baize/baize-v2-7b" #load_8bit = False (in load_tokenizer_and_model)
22
  #base_model = "MAGAer13/mPLUG-Owl" #load_8bit = False (in load_tokenizer_and_model)
23
  #base_model = "alexkueck/li-tis-tuned-2" #load_8bit = False (in load_tokenizer_and_model)
24
  #base_model = "TheBloke/airoboros-13B-HF" #load_8bit = False (in load_tokenizer_and_model)
 
26
  #base_model = "TheBloke/airoboros-13B-HF" #load_8bit = True
27
  #base_model = "TheBloke/vicuna-13B-1.1-HF" #load_8bit = ?
28
  #following runs only on GPU upgrade
29
+ base_model = "TheBloke/airoboros-65B-gpt4-1.3-GPTQ" #model_basename = "airoboros-65b-gpt4-1.3-GPTQ-4bit--1g.act.order"
30
  #base_model = "lmsys/vicuna-13b-v1.3"
31
  #base_model = "gpt2-xl" # options: ['gpt2', 'gpt2-medium', 'gpt2-large', 'gpt2-xl']
32
 
33
  ####################################
34
  #Model und Tokenzier laden
35
+ #tokenizer,model,device = load_tokenizer_and_model(base_model,False)
36
 
37
  ################################
38
  #Alternativ: Model und Tokenizer für GPT2
39
  #tokenizer,model,device = load_tokenizer_and_model_gpt2(base_model,False)
40
 
41
  #Alternativ bloke gpt3 und4 - only with GPU upgarde
42
+ tokenizer,model,device = load_tokenizer_and_model_bloke_gpt(base_model, "airoboros-65b-gpt4-1.3-GPTQ-4bit--1g.act.order")
43
 
44
  #Alternativ Model und Tokenzier laden für Baize
45
  #tokenizer,model,device = load_tokenizer_and_model_Baize(base_model,False)