Spaces:
Sleeping
Sleeping
parkermoe
commited on
Commit
·
5bf5b48
1
Parent(s):
521b87a
Add application file
Browse files
app.py
ADDED
@@ -0,0 +1,99 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import torch
|
2 |
+
import torch.nn as nn
|
3 |
+
import os
|
4 |
+
import pickle
|
5 |
+
from torch.functional import F
|
6 |
+
import numpy as np
|
7 |
+
import gradio as gr
|
8 |
+
import torchtext
|
9 |
+
|
10 |
+
device = torch.device("cuda" if torch.cuda.is_available() else "mps")
|
11 |
+
VOCAB_SIZE = 10000
|
12 |
+
MAX_LEN = 200
|
13 |
+
EMBEDDING_DIM = 100
|
14 |
+
N_UNITS = 128
|
15 |
+
VALIDATION_SPLIT = 0.2
|
16 |
+
SEED = 42
|
17 |
+
LOAD_MODEL = False
|
18 |
+
BATCH_SIZE = 128
|
19 |
+
EPOCHS = 25
|
20 |
+
# loading model from checkpoint
|
21 |
+
class LSTMModel(nn.Module):
|
22 |
+
def __init__(self, vocab_size, embedding_dim, hidden_dim):
|
23 |
+
super(LSTMModel, self).__init__()
|
24 |
+
self.embedding = nn.Embedding(vocab_size, embedding_dim)
|
25 |
+
self.lstm = nn.LSTM(embedding_dim, hidden_dim, batch_first=True)
|
26 |
+
self.fc = nn.Linear(hidden_dim, vocab_size)
|
27 |
+
self.log_softmax = nn.LogSoftmax(dim=2)
|
28 |
+
|
29 |
+
def forward(self, x):
|
30 |
+
x = self.embedding(x)
|
31 |
+
x, _ = self.lstm(x)
|
32 |
+
x = self.fc(x)
|
33 |
+
return self.log_softmax(x)
|
34 |
+
|
35 |
+
# loading model from checkpoint
|
36 |
+
model = LSTMModel(VOCAB_SIZE, EMBEDDING_DIM, N_UNITS).to(device)
|
37 |
+
|
38 |
+
|
39 |
+
checkpoint_path = '/Users/parkermoesta/Library/Mobile Documents/com~apple~CloudDocs/Generative Models/LSTM/recipe_generator_LSTM/checkpoint_epoch_99.pth'
|
40 |
+
checkpoint = torch.load(checkpoint_path)
|
41 |
+
model.load_state_dict(checkpoint['model_state_dict'])
|
42 |
+
print('Loaded model from checkpoint')
|
43 |
+
|
44 |
+
def load_vocab(directory):
|
45 |
+
file_path = os.path.join(directory, 'vocab.pkl')
|
46 |
+
with open(file_path, 'rb') as input:
|
47 |
+
vocab = pickle.load(input)
|
48 |
+
print(f"Vocabulary loaded from {file_path}")
|
49 |
+
return vocab
|
50 |
+
|
51 |
+
vocab = load_vocab('/Users/parkermoesta/Library/Mobile Documents/com~apple~CloudDocs/Generative Models/LSTM/recipe_generator_LSTM/data')
|
52 |
+
|
53 |
+
|
54 |
+
|
55 |
+
|
56 |
+
class TextGenerator:
|
57 |
+
def __init__(self, vocab, top_k=10):
|
58 |
+
self.vocab = vocab
|
59 |
+
self.top_k = top_k
|
60 |
+
|
61 |
+
def sample_from(self, logits, temperature):
|
62 |
+
probs = F.softmax(logits / temperature, dim=-1).cpu().numpy()
|
63 |
+
return np.random.choice(len(probs), p=probs)
|
64 |
+
|
65 |
+
def generate(self, model, device, start_prompt, max_tokens, temperature):
|
66 |
+
model.eval()
|
67 |
+
|
68 |
+
tokens = [self.vocab.get_stoi()[token] for token in start_prompt.split()]
|
69 |
+
tokens = torch.LongTensor(tokens).unsqueeze(0).to(device)
|
70 |
+
|
71 |
+
with torch.no_grad():
|
72 |
+
for _ in range(max_tokens):
|
73 |
+
output = model(tokens)
|
74 |
+
next_token_logits = output[0, -1, :]
|
75 |
+
next_token = self.sample_from(next_token_logits, temperature)
|
76 |
+
tokens = torch.cat([tokens, torch.LongTensor([[next_token]]).to(device)], dim=1)
|
77 |
+
|
78 |
+
generated_text = ' '.join(self.vocab.get_itos()[token] for token in tokens[0])
|
79 |
+
return generated_text
|
80 |
+
|
81 |
+
text_generator = TextGenerator(vocab=vocab, top_k=10)
|
82 |
+
generated_text = text_generator.generate(model=model, device=device, start_prompt="recipe for", max_tokens=100, temperature=0.5)
|
83 |
+
|
84 |
+
print(f"\nGenerated Text: {generated_text}")
|
85 |
+
|
86 |
+
|
87 |
+
|
88 |
+
def generate_recipe():
|
89 |
+
return text_generator.generate(model=model, device=device, start_prompt="recipe for", max_tokens=100, temperature=0.5)
|
90 |
+
|
91 |
+
iface = gr.Interface(
|
92 |
+
fn=generate_recipe,
|
93 |
+
inputs=[],
|
94 |
+
outputs="text",
|
95 |
+
title="Recipe Generator",
|
96 |
+
description="This is a LSTM based Recurrent Neural Network trained to generate recipes. Press submit to generate a new recipe!",
|
97 |
+
)
|
98 |
+
|
99 |
+
iface.launch()
|