Yuchan5386 commited on
Commit
f724a0c
·
verified ·
1 Parent(s): 98a2a20

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +200 -0
app.py ADDED
@@ -0,0 +1,200 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+
3
+ import json
4
+ import numpy as np
5
+ import tensorflow as tf
6
+ from tensorflow.keras import layers
7
+ import sentencepiece as spm
8
+ import requests
9
+ import gradio as gr
10
+
11
+
12
+ sp = spm.SentencePieceProcessor()
13
+ sp.load("kolig_unigram.model")
14
+
15
+ pad_id = sp.piece_to_id("<pad>")
16
+ if pad_id == -1: pad_id = 0
17
+ start_id = sp.piece_to_id("<start>")
18
+ if start_id == -1: start_id = 1
19
+ end_id = sp.piece_to_id("< end >")
20
+ if end_id == -1: end_id = 2
21
+ unk_id = sp.piece_to_id("<unk>")
22
+ if unk_id == -1: unk_id = 3
23
+
24
+ vocab_size = sp.get_piece_size()
25
+ max_len = 100
26
+
27
+ def text_to_ids(text):
28
+ return sp.encode(text, out_type=int)
29
+
30
+ def ids_to_text(ids):
31
+ return sp.decode(ids)
32
+
33
+ class RotaryPositionalEmbedding(layers.Layer):
34
+ def __init__(self, dim):
35
+ super().__init__()
36
+ inv_freq = 1.0 / (10000 ** (np.arange(0, dim, 2) / dim))
37
+ self.inv_freq = tf.constant(inv_freq, dtype=tf.float32)
38
+
39
+ def call(self, x):
40
+ batch, heads, seq_len, depth = tf.unstack(tf.shape(x))
41
+ t = tf.range(seq_len, dtype=tf.float32)
42
+ freqs = tf.einsum('i,j->ij', t, self.inv_freq)
43
+ emb_sin = tf.sin(freqs)
44
+ emb_cos = tf.cos(freqs)
45
+ emb_cos = tf.reshape(emb_cos, [1, 1, seq_len, -1])
46
+ emb_sin = tf.reshape(emb_sin, [1, 1, seq_len, -1])
47
+ x1 = x[..., ::2]
48
+ x2 = x[..., 1::2]
49
+ x_rotated = tf.stack([
50
+ x1 * emb_cos - x2 * emb_sin,
51
+ x1 * emb_sin + x2 * emb_cos
52
+ ], axis=-1)
53
+ x_rotated = tf.reshape(x_rotated, tf.shape(x))
54
+ return x_rotated
55
+
56
+ class SwiGLU(tf.keras.layers.Layer):
57
+ def __init__(self, d_model, d_ff):
58
+ super().__init__()
59
+ self.proj = tf.keras.layers.Dense(d_ff * 2)
60
+ self.out = tf.keras.layers.Dense(d_model)
61
+
62
+ def call(self, x):
63
+ x_proj = self.proj(x)
64
+ x_val, x_gate = tf.split(x_proj, 2, axis=-1)
65
+ return self.out(x_val * tf.nn.silu(x_gate))
66
+
67
+ class GPTBlock(tf.keras.layers.Layer):
68
+ def __init__(self, d_model, d_ff, num_heads=8, dropout_rate=0.1, adapter_dim=64):
69
+ super().__init__()
70
+ self.ln1 = tf.keras.layers.LayerNormalization(epsilon=1e-5)
71
+ self.mha = tf.keras.layers.MultiHeadAttention(num_heads=num_heads, key_dim=d_model // num_heads)
72
+ self.dropout1 = tf.keras.layers.Dropout(dropout_rate)
73
+ self.adapter_down = tf.keras.layers.Dense(adapter_dim, activation='gelu')
74
+ self.adapter_up = tf.keras.layers.Dense(d_model)
75
+
76
+ self.ln2 = tf.keras.layers.LayerNormalization(epsilon=1e-5)
77
+ self.ffn = SwiGLU(d_model, d_ff)
78
+ self.dropout2 = tf.keras.layers.Dropout(dropout_rate)
79
+ self.rope = RotaryPositionalEmbedding(d_model // num_heads)
80
+
81
+ def call(self, x, training=False):
82
+ x_norm = self.ln1(x)
83
+ b, s, _ = tf.shape(x_norm)[0], tf.shape(x_norm)[1], tf.shape(x_norm)[2]
84
+ h = self.mha.num_heads
85
+ d = x_norm.shape[-1] // h
86
+
87
+ qkv = tf.reshape(x_norm, [b, s, h, d])
88
+ qkv = tf.transpose(qkv, [0, 2, 1, 3])
89
+ q = self.rope(qkv)
90
+ k = self.rope(qkv)
91
+ q = tf.reshape(tf.transpose(q, [0, 2, 1, 3]), [b, s, h * d])
92
+ k = tf.reshape(tf.transpose(k, [0, 2, 1, 3]), [b, s, h * d])
93
+
94
+ attn_out = self.mha(query=q, value=x_norm, key=k, use_causal_mask=True, training=training)
95
+ attn_out = self.dropout1(attn_out, training=training)
96
+
97
+ adapter_out = self.adapter_up(self.adapter_down(attn_out))
98
+ attn_out = attn_out + adapter_out
99
+
100
+ x = x + attn_out
101
+ ffn_out = self.ffn(self.ln2(x))
102
+ x = x + self.dropout2(ffn_out, training=training)
103
+ return x
104
+
105
+ class InteractGPT(tf.keras.Model):
106
+ def __init__(self, vocab_size, seq_len, d_model, d_ff, n_layers, num_heads=8, dropout_rate=0.1):
107
+ super().__init__()
108
+ self.token_embedding = tf.keras.layers.Embedding(vocab_size, d_model)
109
+ self.blocks = [GPTBlock(d_model, d_ff, num_heads, dropout_rate) for _ in range(n_layers)]
110
+ self.ln_f = tf.keras.layers.LayerNormalization(epsilon=1e-5)
111
+
112
+ def call(self, x, training=False):
113
+ x = self.token_embedding(x)
114
+ for block in self.blocks:
115
+ x = block(x, training=training)
116
+ x = self.ln_f(x)
117
+ logits = tf.matmul(x, self.token_embedding.embeddings, transpose_b=True)
118
+ return logits
119
+
120
+ model = InteractGPT(vocab_size=vocab_size, seq_len=max_len, d_model=256, d_ff=1024, n_layers=6)
121
+
122
+ dummy_input = tf.zeros((1, max_len), dtype=tf.int32) # 배치1, 시퀀스길이 max_len
123
+ _ = model(dummy_input) # 모델이 빌드됨
124
+ model.load_weights("InteractGPT.weights.h5")
125
+ print("모델 가중치 로드 완료!")
126
+
127
+ def decode_sp_tokens(tokens):
128
+ text = ''.join(tokens).replace('▁', ' ').strip()
129
+ return text
130
+
131
+ def generate_text_better_sampling(model, prompt, max_len=100, max_gen=98, top_k=50, p=0.9, temperature=0.8, min_len=20, repetition_penalty=1.2):
132
+ model_input = text_to_ids(f"<start> {prompt} <sep>")
133
+ model_input = model_input[:max_len]
134
+ generated = list(model_input)
135
+ text_so_far = []
136
+
137
+ for step in range(max_gen):
138
+ pad_length = max(0, max_len - len(generated))
139
+ input_padded = np.pad(generated, (0, pad_length), constant_values=pad_id)
140
+ input_tensor = tf.convert_to_tensor([input_padded])
141
+ logits = model(input_tensor, training=False)
142
+ next_token_logits = logits[0, len(generated) - 1].numpy()
143
+
144
+ # 반복 페널티 (frequency penalty)
145
+ token_counts = {}
146
+ for t in generated:
147
+ token_counts[t] = token_counts.get(t, 0) + 1
148
+ for token_id, count in token_counts.items():
149
+ next_token_logits[token_id] /= (repetition_penalty ** count)
150
+
151
+ # 최소 길이 넘으면 종료 토큰 확률 낮추기
152
+ if len(generated) >= min_len:
153
+ next_token_logits[end_id] -= 5.0
154
+ next_token_logits[pad_id] -= 10.0
155
+
156
+ logits_temp = next_token_logits / temperature
157
+ probs = tf.nn.softmax(logits_temp).numpy()
158
+
159
+ # top-k 필터링
160
+ top_k_indices = np.argpartition(probs, -top_k)[-top_k:]
161
+ top_k_probs = probs[top_k_indices]
162
+ top_k_probs /= top_k_probs.sum()
163
+
164
+ # top-p 필터링 (cumulative sum with side='left')
165
+ sorted_indices = top_k_indices[np.argsort(top_k_probs)[::-1]]
166
+ sorted_probs = np.sort(top_k_probs)[::-1]
167
+ cumulative_probs = np.cumsum(sorted_probs)
168
+ cutoff = np.searchsorted(cumulative_probs, p, side='left') + 1
169
+ filtered_indices = sorted_indices[:cutoff]
170
+ filtered_probs = sorted_probs[:cutoff]
171
+ filtered_probs /= filtered_probs.sum()
172
+
173
+ next_token_id = np.random.choice(filtered_indices, p=filtered_probs)
174
+ generated.append(int(next_token_id))
175
+
176
+ next_word = sp.id_to_piece(int(next_token_id))
177
+ text_so_far.append(next_word)
178
+ decoded_text = decode_sp_tokens(text_so_far)
179
+
180
+ if len(generated) >= min_len and next_token_id == end_id:
181
+ break
182
+ if len(generated) >= min_len and decoded_text.endswith(('.', '!', '?', '<end>')):
183
+ break
184
+
185
+ yield decoded_text
186
+
187
+
188
+ def apply_nickname_to_input(text):
189
+ return text.replace("@사용자1@", "사용자")
190
+
191
+ def respond(user_input, chat_history):
192
+ user_input = apply_nickname_to_input(user_input)
193
+ response = ""
194
+ for partial in generate_text_better_sampling(model, user_input):
195
+ response = partial
196
+ yield response
197
+
198
+ with gr.Blocks() as demo:
199
+ chatbot = gr.ChatInterface(fn=respond, height=600, width=400) # 적당히 조절해보기
200
+ demo.launch(favicon_path="1.png")