Spaces:
Runtime error
Runtime error
File size: 8,079 Bytes
b838e7b f855bd6 1826a9b f855bd6 b838e7b |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 |
import streamlit as st
import torch
import tiktoken
from dataclasses import dataclass
import torch.nn as nn
from torch.nn import functional as F
@dataclass
class GPTConfig:
block_size: int = 1024 # max sequence length
vocab_size: int = 50257 # number of tokens: 50,000 BPE merges + 256 bytes tokens + 1 <|endoftext|> token
n_layer: int = 12 # number of layers
n_head: int = 12 # number of heads
n_embd: int = 768 # embedding dimension
class GPT(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.transformer = nn.ModuleDict(dict(
wte = nn.Embedding(config.vocab_size, config.n_embd),
wpe = nn.Embedding(config.block_size, config.n_embd),
h = nn.ModuleList([Block(config) for _ in range(config.n_layer)]),
ln_f = nn.LayerNorm(config.n_embd),
))
self.lm_head = nn.Linear(config.n_embd, config.vocab_size, bias=False)
# weight sharing
self.transformer.wte.weight = self.lm_head.weight
# weight initialization
self.apply(self._init_weights)
def _init_weights(self, module):
if isinstance(module, nn.Linear):
std = 0.02
if hasattr(module, 'NANGPT_SCALE_INIT'):
std *= (2 * self.config.n_layer) ** -0.5
torch.nn.init.normal_(module.weight, mean = 0.0, std = std)
if module.bias is not None:
torch.nn.init.zeros_(module.bias)
elif isinstance(module, nn.Embedding):
torch.nn.init.normal_(module.weight, mean=0.0, std = 0.02)
def print_num_parameters(self):
num_params = sum(p.numel() for p in self.parameters())
print(f"Number of model parameters: {num_params}")
def forward(self, idx, targets=None):
# idx is of shape (B, T)
B, T = idx.size()
assert T <= self.config.block_size, f"Cannot forward sequence of length {T}, block size is only {self.config.block_size}"
# forward the token and posisition embeddings
pos = torch.arange(0, T, dtype=torch.long, device=idx.device) # shape (T)
pos_emb = self.transformer.wpe(pos) # position embeddings of shape (T, n_embd)
tok_emb = self.transformer.wte(idx) # token embeddings of shape (B, T, n_embd)
x = tok_emb + pos_emb
# forward the blocks of the transformer
for block in self.transformer.h:
x = block(x)
# forward the final layernorm and the classifier
x = self.transformer.ln_f(x)
logits = self.lm_head(x) # (B, T, vocab_size)
loss = None
if targets is not None:
loss = F.cross_entropy(logits.view(-1, logits.size(-1)), targets.view(-1))
return logits, loss
@classmethod
def from_pretrained(cls, model_type):
"""Loads pretrained GPT-2 model weights from huggingface"""
assert model_type in {'gpt2', 'gpt2-medium', 'gpt2-large', 'gpt2-xl'}
from transformers import GPT2LMHeadModel
print("loading weights from pretrained gpt: %s" % model_type)
# n_layer, n_head and n_embd are determined from model_type
config_args = {
'gpt2': dict(n_layer=12, n_head=12, n_embd=768), # 124M params
'gpt2-medium': dict(n_layer=24, n_head=16, n_embd=1024), # 350M params
'gpt2-large': dict(n_layer=36, n_head=20, n_embd=1280), # 774M params
'gpt2-xl': dict(n_layer=48, n_head=25, n_embd=1600), # 1558M params
}[model_type]
config_args['vocab_size'] = 50257 # always 50257 for GPT model checkpoints
config_args['block_size'] = 1024 # always 1024 for GPT model checkpoints
# create a from-scratch initialized minGPT model
config = GPTConfig(**config_args)
model = GPT(config)
sd = model.state_dict()
sd_keys = sd.keys()
sd_keys = [k for k in sd_keys if not k.endswith('.attn.bias')] # discard this mask / buffer, not a param
# init a huggingface/transformers model
model_hf = GPT2LMHeadModel.from_pretrained(model_type)
sd_hf = model_hf.state_dict()
# copy while ensuring all of the parameters are aligned and match in names and shapes
sd_keys_hf = sd_hf.keys()
sd_keys_hf = [k for k in sd_keys_hf if not k.endswith('.attn.masked_bias')] # ignore these, just a buffer
sd_keys_hf = [k for k in sd_keys_hf if not k.endswith('.attn.bias')] # same, just the mask (buffer)
transposed = ['attn.c_attn.weight', 'attn.c_proj.weight', 'mlp.c_fc.weight', 'mlp.c_proj.weight']
# basically the openai checkpoints use a "Conv1D" module, but we only want to use a vanilla Linear
# this means that we have to transpose these weights when we import them
assert len(sd_keys_hf) == len(sd_keys), f"mismatched keys: {len(sd_keys_hf)} != {len(sd_keys)}"
for k in sd_keys_hf:
if any(k.endswith(w) for w in transposed):
# special treatment for the Conv1D weights we need to transpose
assert sd_hf[k].shape[::-1] == sd[k].shape
with torch.no_grad():
sd[k].copy_(sd_hf[k].t())
else:
# vanilla copy over the other parameters
assert sd_hf[k].shape == sd[k].shape
with torch.no_grad():
sd[k].copy_(sd_hf[k])
return model
# Load the trained model
@st.cache_resource
def load_model():
config = GPTConfig()
model = GPT(config)
try:
# Load the model with map_location to handle CPU-only environments
model.load_state_dict(torch.load('trained_model_quantized.pt', map_location=torch.device('cpu')), strict=False)
model.eval() # Set the model to evaluation mode
st.success("Model loaded successfully!")
except Exception as e:
st.error(f"Error loading model: {e}")
return model
# Load the tokenizer
def load_tokenizer():
return tiktoken.get_encoding('gpt2')
# Generate text function
def generate_text(model, tokenizer, input_text, length, num_sequences):
# Encode the input text
input_ids = tokenizer.encode(input_text)
input_tensor = torch.tensor(input_ids).unsqueeze(0) # Add batch dimension (shape: [1, T])
generated_sequences = []
for _ in range(num_sequences):
# Generate additional tokens
with torch.no_grad():
for _ in range(length):
logits = model(input_tensor)[0] # Get logits
next_token_logits = logits[:, -1, :] # Get the last token's logits
next_token_probs = torch.softmax(next_token_logits, dim=-1)
next_token = torch.multinomial(next_token_probs, num_samples=1) # Sample from the distribution
# Ensure the next_token has the correct shape for concatenation
next_token = next_token.view(1, -1) # Reshape to [1, 1] if necessary
input_tensor = torch.cat((input_tensor, next_token), dim=1) # Append the new token
# Decode the generated tokens
generated_sequences.append(tokenizer.decode(input_tensor[0].tolist()))
return generated_sequences
# Streamlit app layout
st.title("GPT Text Generator")
st.write("Enter your text and specify the length of additional text to generate.")
input_text = st.text_area("Input Text", "Once upon a time", max_chars=512) # Limit to 512 characters
length = st.slider("Predict Additional Text of Length", 1, 50, 10)
num_sequences = st.slider("Number of Sequences to Generate", 1, 5, 1)
if st.button("Generate"):
model = load_model() # Load the model for inference
tokenizer = load_tokenizer() # Load the tokenizer
st.write("Generating text...")
generated_texts = generate_text(model, tokenizer, input_text, length, num_sequences)
st.write("Text generation complete.")
st.write("Generated Texts:")
for i, text in enumerate(generated_texts):
st.subheader(f"Sequence {i + 1}")
st.write(text) |