KaizeShi commited on
Commit
d0afb9d
·
1 Parent(s): d573b77

Add application file

Browse files
app.py ADDED
@@ -0,0 +1,220 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import sys
3
+
4
+ import fire
5
+ import gradio as gr
6
+ import torch
7
+ import transformers
8
+ from peft import PeftModel
9
+ from transformers import GenerationConfig, LlamaForCausalLM, LlamaTokenizer
10
+
11
+ from utils.callbacks import Iteratorize, Stream
12
+ from utils.prompter import Prompter
13
+
14
+ if torch.cuda.is_available():
15
+ device = "cuda"
16
+ else:
17
+ device = "cpu"
18
+
19
+ try:
20
+ if torch.backends.mps.is_available():
21
+ device = "mps"
22
+ except: # noqa: E722
23
+ pass
24
+
25
+
26
+ def main(
27
+ load_8bit: bool = False,
28
+ base_model: str = "",
29
+ lora_weights: str = "tloen/alpaca-lora-7b",
30
+ prompt_template: str = "", # The prompt template to use, will default to alpaca.
31
+ server_name: str = "0.0.0.0", # Allows to listen on all interfaces by providing '0.
32
+ share_gradio: bool = False,
33
+ ):
34
+ print("lora_weights: " + str(lora_weights))
35
+ base_model = base_model or os.environ.get("BASE_MODEL", "")
36
+ assert (
37
+ base_model
38
+ ), "Please specify a --base_model, e.g. --base_model='huggyllama/llama-7b'"
39
+
40
+ prompter = Prompter(prompt_template)
41
+ tokenizer = LlamaTokenizer.from_pretrained(base_model)
42
+ if device == "cuda":
43
+ model = LlamaForCausalLM.from_pretrained(
44
+ base_model,
45
+ load_in_8bit=load_8bit,
46
+ torch_dtype=torch.float16,
47
+ device_map="auto",
48
+ )
49
+ model = PeftModel.from_pretrained(
50
+ model,
51
+ lora_weights,
52
+ torch_dtype=torch.float16,
53
+ )
54
+ elif device == "mps":
55
+ model = LlamaForCausalLM.from_pretrained(
56
+ base_model,
57
+ device_map={"": device},
58
+ torch_dtype=torch.float16,
59
+ )
60
+ model = PeftModel.from_pretrained(
61
+ model,
62
+ lora_weights,
63
+ device_map={"": device},
64
+ torch_dtype=torch.float16,
65
+ )
66
+ else:
67
+ model = LlamaForCausalLM.from_pretrained(
68
+ base_model, device_map={"": device}, low_cpu_mem_usage=True
69
+ )
70
+ model = PeftModel.from_pretrained(
71
+ model,
72
+ lora_weights,
73
+ device_map={"": device},
74
+ )
75
+
76
+ # unwind broken decapoda-research config
77
+ model.config.pad_token_id = tokenizer.pad_token_id = 0 # unk
78
+ model.config.bos_token_id = 1
79
+ model.config.eos_token_id = 2
80
+
81
+ if not load_8bit:
82
+ model.half() # seems to fix bugs for some users.
83
+
84
+ model.eval()
85
+ if torch.__version__ >= "2" and sys.platform != "win32":
86
+ model = torch.compile(model)
87
+
88
+ def evaluate(
89
+ instruction,
90
+ input=None,
91
+ temperature=0.1,
92
+ top_p=0.75,
93
+ top_k=40,
94
+ num_beams=4,
95
+ max_new_tokens=128,
96
+ stream_output=False,
97
+ **kwargs,
98
+ ):
99
+ prompt = prompter.generate_prompt(instruction, input)
100
+ inputs = tokenizer(prompt, return_tensors="pt")
101
+ input_ids = inputs["input_ids"].to(device)
102
+ generation_config = GenerationConfig(
103
+ temperature=temperature,
104
+ top_p=top_p,
105
+ top_k=top_k,
106
+ num_beams=num_beams,
107
+ **kwargs,
108
+ )
109
+
110
+ generate_params = {
111
+ "input_ids": input_ids,
112
+ "generation_config": generation_config,
113
+ "return_dict_in_generate": True,
114
+ "output_scores": True,
115
+ "max_new_tokens": max_new_tokens,
116
+ }
117
+
118
+ if stream_output:
119
+ # Stream the reply 1 token at a time.
120
+ # This is based on the trick of using 'stopping_criteria' to create an iterator,
121
+ # from https://github.com/oobabooga/text-generation-webui/blob/ad37f396fc8bcbab90e11ecf17c56c97bfbd4a9c/modules/text_generation.py#L216-L243.
122
+
123
+ def generate_with_callback(callback=None, **kwargs):
124
+ kwargs.setdefault(
125
+ "stopping_criteria", transformers.StoppingCriteriaList()
126
+ )
127
+ kwargs["stopping_criteria"].append(
128
+ Stream(callback_func=callback)
129
+ )
130
+ with torch.no_grad():
131
+ model.generate(**kwargs)
132
+
133
+ def generate_with_streaming(**kwargs):
134
+ return Iteratorize(
135
+ generate_with_callback, kwargs, callback=None
136
+ )
137
+
138
+ with generate_with_streaming(**generate_params) as generator:
139
+ for output in generator:
140
+ # new_tokens = len(output) - len(input_ids[0])
141
+ decoded_output = tokenizer.decode(output)
142
+
143
+ if output[-1] in [tokenizer.eos_token_id]:
144
+ break
145
+
146
+ yield prompter.get_response(decoded_output)
147
+ return # early return for stream_output
148
+
149
+ # Without streaming
150
+ with torch.no_grad():
151
+ generation_output = model.generate(
152
+ input_ids=input_ids,
153
+ generation_config=generation_config,
154
+ return_dict_in_generate=True,
155
+ output_scores=True,
156
+ max_new_tokens=max_new_tokens,
157
+ )
158
+ s = generation_output.sequences[0]
159
+ output = tokenizer.decode(s)
160
+ yield prompter.get_response(output)
161
+
162
+ gr.Interface(
163
+ fn=evaluate,
164
+ inputs=[
165
+ gr.components.Textbox(
166
+ lines=2,
167
+ label="Instruction",
168
+ placeholder="Generate an Ad for the iPhone 14.",
169
+ ),
170
+ gr.components.Textbox(lines=2, label="Input", placeholder="none"),
171
+ gr.components.Slider(
172
+ minimum=0, maximum=1, value=0.1, label="Temperature"
173
+ ),
174
+ gr.components.Slider(
175
+ minimum=0, maximum=1, value=0.75, label="Top p"
176
+ ),
177
+ gr.components.Slider(
178
+ minimum=0, maximum=100, step=1, value=40, label="Top k"
179
+ ),
180
+ gr.components.Slider(
181
+ minimum=1, maximum=4, step=1, value=4, label="Beams"
182
+ ),
183
+ gr.components.Slider(
184
+ minimum=1, maximum=2000, step=1, value=128, label="Max tokens"
185
+ ),
186
+ gr.components.Checkbox(label="Stream output"),
187
+ ],
188
+ outputs=[
189
+ gr.inputs.Textbox(
190
+ lines=5,
191
+ label="Output",
192
+ )
193
+ ],
194
+ title="🦙🛍️ LLaMA-E-LoRA",
195
+ description="LLaMA-E-LoRA is a series of fine-tuned LLaMA model following the E-commerce instructions. It is developed by DSMI (http://dsmi.tech/) @ University of Technology Sydney, and trained on the 120k instruction set. This model is for academic research use only. For more details please contact: Kaize.Shi@uts.edu.au",
196
+ # noqa: E501
197
+ ).queue().launch(server_name="0.0.0.0", share=share_gradio)
198
+ # Old testing code follows.
199
+
200
+ """
201
+ # testing code for readme
202
+ for instruction in [
203
+ "Tell me about alpacas.",
204
+ "Tell me about the president of Mexico in 2019.",
205
+ "Tell me about the king of France in 2019.",
206
+ "List all Canadian provinces in alphabetical order.",
207
+ "Write a Python program that prints the first 10 Fibonacci numbers.",
208
+ "Write a program that prints the numbers from 1 to 100. But for multiples of three print 'Fizz' instead of the number and for the multiples of five print 'Buzz'. For numbers which are multiples of both three and five print 'FizzBuzz'.", # noqa: E501
209
+ "Tell me five words that rhyme with 'shock'.",
210
+ "Translate the sentence 'I have no mouth but I must scream' into Spanish.",
211
+ "Count up from 1 to 500.",
212
+ ]:
213
+ print("Instruction:", instruction)
214
+ print("Response:", evaluate(instruction))
215
+ print()
216
+ """
217
+
218
+
219
+ if __name__ == "__main__":
220
+ fire.Fire(main)
requirements.txt ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ accelerate
2
+ appdirs
3
+ loralib
4
+ bitsandbytes
5
+ black
6
+ black[jupyter]
7
+ datasets
8
+ fire
9
+ git+https://github.com/huggingface/peft.git
10
+ transformers>=4.28.0
11
+ sentencepiece
12
+ gradio
templates/README.md ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Prompt templates
2
+
3
+ This directory contains template styles for the prompts used to finetune LoRA models.
4
+
5
+ ## Format
6
+
7
+ A template is described via a JSON file with the following keys:
8
+
9
+ - `prompt_input`: The template to use when input is not None. Uses `{instruction}` and `{input}` placeholders.
10
+ - `prompt_no_input`: The template to use when input is None. Uses `{instruction}` placeholders.
11
+ - `description`: A short description of the template, with possible use cases.
12
+ - `response_split`: The text to use as separator when cutting real response from the model output.
13
+
14
+ No `{response}` placeholder was used, since the response is always the last element of the template and is just to be concatenated to the rest.
15
+
16
+ ## Example template
17
+
18
+ The default template, used unless otherwise specified, is `alpaca.json`
19
+
20
+ ```json
21
+ {
22
+ "description": "Template used by Alpaca-LoRA.",
23
+ "prompt_input": "Below is an instruction that describes a task, paired with an input that provides further context. Write a response that appropriately completes the request.\n\n### Instruction:\n{instruction}\n\n### Input:\n{input}\n\n### Response:\n",
24
+ "prompt_no_input": "Below is an instruction that describes a task. Write a response that appropriately completes the request.\n\n### Instruction:\n{instruction}\n\n### Response:\n",
25
+ "response_split": "### Response:"
26
+ }
27
+
28
+ ```
29
+
30
+ ## Current templates
31
+
32
+ ### alpaca
33
+
34
+ Default template used for generic LoRA fine tunes so far.
35
+
36
+ ### alpaca_legacy
37
+
38
+ Legacy template used by the original alpaca repo, with no `\n` after the response field. Kept for reference and experiments.
39
+
40
+ ### alpaca_short
41
+
42
+ A trimmed down alpaca template which seems to perform just as well and spare some tokens. Models created with the default template seem to be queryable by the short tempalte as well. More experiments are welcome.
43
+
44
+ ### vigogne
45
+
46
+ The default alpaca template, translated to french. This template was used to train the "Vigogne" LoRA and is to be used to query it, or for extra fine tuning.
templates/alpaca.json ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ {
2
+ "description": "Template used by Alpaca-LoRA.",
3
+ "prompt_input": "Below is an instruction that describes a task, paired with an input that provides further context. Write a response that appropriately completes the request.\n\n### Instruction:\n{instruction}\n\n### Input:\n{input}\n\n### Response:\n",
4
+ "prompt_no_input": "Below is an instruction that describes a task. Write a response that appropriately completes the request.\n\n### Instruction:\n{instruction}\n\n### Response:\n",
5
+ "response_split": "### Response:"
6
+ }
templates/alpaca_legacy.json ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ {
2
+ "description": "Legacy template, used by Original Alpaca repository.",
3
+ "prompt_input": "Below is an instruction that describes a task, paired with an input that provides further context. Write a response that appropriately completes the request.\n\n### Instruction:\n{instruction}\n\n### Input:\n{input}\n\n### Response:",
4
+ "prompt_no_input": "Below is an instruction that describes a task. Write a response that appropriately completes the request.\n\n### Instruction:\n{instruction}\n\n### Response:",
5
+ "response_split": "### Response:"
6
+ }
templates/alpaca_short.json ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ {
2
+ "description": "A shorter template to experiment with.",
3
+ "prompt_input": "### Instruction:\n{instruction}\n\n### Input:\n{input}\n\n### Response:\n",
4
+ "prompt_no_input": "### Instruction:\n{instruction}\n\n### Response:\n",
5
+ "response_split": "### Response:"
6
+ }
templates/vigogne.json ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ {
2
+ "description": "French template, used by Vigogne for finetuning.",
3
+ "prompt_input": "Ci-dessous se trouve une instruction qui décrit une tâche, associée à une entrée qui fournit un contexte supplémentaire. Écrivez une réponse qui complète correctement la demande.\n\n### Instruction:\n{instruction}\n\n### Entrée:\n{input}\n\n### Réponse:\n",
4
+ "prompt_no_input": "Ci-dessous se trouve une instruction qui décrit une tâche. Écrivez une réponse qui complète correctement la demande.\n\n### Instruction:\n{instruction}\n\n### Réponse:\n",
5
+ "response_split": "### Réponse:"
6
+ }
utils/README.md ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Directory for helpers modules
2
+
3
+ ## prompter.py
4
+
5
+ Prompter class, a template manager.
6
+
7
+ `from utils.prompter import Prompter`
8
+
9
+ ## callbacks.py
10
+
11
+ Helpers to support streaming generate output.
12
+
13
+ `from utils.callbacks import Iteratorize, Stream`
utils/__init__.py ADDED
File without changes
utils/callbacks.py ADDED
@@ -0,0 +1,75 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Helpers to support streaming generate output.
3
+ Borrowed from https://github.com/oobabooga/text-generation-webui/blob/ad37f396fc8bcbab90e11ecf17c56c97bfbd4a9c/modules/callbacks.py
4
+ """
5
+
6
+ import gc
7
+ import traceback
8
+ from queue import Queue
9
+ from threading import Thread
10
+
11
+ import torch
12
+ import transformers
13
+
14
+
15
+ class Stream(transformers.StoppingCriteria):
16
+ def __init__(self, callback_func=None):
17
+ self.callback_func = callback_func
18
+
19
+ def __call__(self, input_ids, scores) -> bool:
20
+ if self.callback_func is not None:
21
+ self.callback_func(input_ids[0])
22
+ return False
23
+
24
+
25
+ class Iteratorize:
26
+
27
+ """
28
+ Transforms a function that takes a callback
29
+ into a lazy iterator (generator).
30
+ """
31
+
32
+ def __init__(self, func, kwargs={}, callback=None):
33
+ self.mfunc = func
34
+ self.c_callback = callback
35
+ self.q = Queue()
36
+ self.sentinel = object()
37
+ self.kwargs = kwargs
38
+ self.stop_now = False
39
+
40
+ def _callback(val):
41
+ if self.stop_now:
42
+ raise ValueError
43
+ self.q.put(val)
44
+
45
+ def gentask():
46
+ try:
47
+ ret = self.mfunc(callback=_callback, **self.kwargs)
48
+ except ValueError:
49
+ pass
50
+ except:
51
+ traceback.print_exc()
52
+ pass
53
+
54
+ self.q.put(self.sentinel)
55
+ if self.c_callback:
56
+ self.c_callback(ret)
57
+
58
+ self.thread = Thread(target=gentask)
59
+ self.thread.start()
60
+
61
+ def __iter__(self):
62
+ return self
63
+
64
+ def __next__(self):
65
+ obj = self.q.get(True, None)
66
+ if obj is self.sentinel:
67
+ raise StopIteration
68
+ else:
69
+ return obj
70
+
71
+ def __enter__(self):
72
+ return self
73
+
74
+ def __exit__(self, exc_type, exc_val, exc_tb):
75
+ self.stop_now = True
utils/prompter.py ADDED
@@ -0,0 +1,51 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ A dedicated helper to manage templates and prompt building.
3
+ """
4
+
5
+ import json
6
+ import os.path as osp
7
+ from typing import Union
8
+
9
+
10
+ class Prompter(object):
11
+ __slots__ = ("template", "_verbose")
12
+
13
+ def __init__(self, template_name: str = "", verbose: bool = False):
14
+ self._verbose = verbose
15
+ if not template_name:
16
+ # Enforce the default here, so the constructor can be called with '' and will not break.
17
+ template_name = "alpaca"
18
+ file_name = osp.join("templates", f"{template_name}.json")
19
+ if not osp.exists(file_name):
20
+ raise ValueError(f"Can't read {file_name}")
21
+ with open(file_name) as fp:
22
+ self.template = json.load(fp)
23
+ if self._verbose:
24
+ print(
25
+ f"Using prompt template {template_name}: {self.template['description']}"
26
+ )
27
+
28
+ def generate_prompt(
29
+ self,
30
+ instruction: str,
31
+ input: Union[None, str] = None,
32
+ label: Union[None, str] = None,
33
+ ) -> str:
34
+ # returns the full prompt from instruction and optional input
35
+ # if a label (=response, =output) is provided, it's also appended.
36
+ if input:
37
+ res = self.template["prompt_input"].format(
38
+ instruction=instruction, input=input
39
+ )
40
+ else:
41
+ res = self.template["prompt_no_input"].format(
42
+ instruction=instruction
43
+ )
44
+ if label:
45
+ res = f"{res}{label}"
46
+ if self._verbose:
47
+ print(res)
48
+ return res
49
+
50
+ def get_response(self, output: str) -> str:
51
+ return output.split(self.template["response_split"])[1].strip()