Spaces:
Sleeping
Sleeping
Update api.py
Browse files
api.py
CHANGED
@@ -1,57 +1,56 @@
|
|
1 |
from fastapi import FastAPI, Request
|
2 |
from fastapi.responses import StreamingResponse
|
3 |
import asyncio
|
4 |
-
import
|
5 |
-
import
|
6 |
-
|
7 |
-
|
8 |
-
import sentencepiece as spm
|
9 |
-
import requests
|
10 |
|
11 |
app = FastAPI()
|
12 |
-
|
13 |
-
|
14 |
-
sp.
|
15 |
-
|
16 |
-
|
17 |
-
|
18 |
-
|
19 |
-
|
20 |
-
|
21 |
-
|
22 |
-
|
23 |
-
|
24 |
-
|
25 |
-
|
26 |
-
|
27 |
-
|
28 |
-
|
29 |
-
|
30 |
-
|
31 |
-
|
32 |
-
|
33 |
-
|
34 |
-
|
35 |
-
|
36 |
-
|
37 |
-
|
38 |
-
|
39 |
-
|
40 |
-
|
41 |
-
|
42 |
-
|
43 |
-
|
44 |
-
|
45 |
-
|
46 |
-
emb_cos = tf.
|
47 |
-
|
48 |
-
|
49 |
-
|
50 |
-
|
51 |
-
|
52 |
-
x1 *
|
53 |
-
|
54 |
-
|
|
|
55 |
return x_rotated
|
56 |
|
57 |
class SwiGLU(tf.keras.layers.Layer):
|
@@ -64,77 +63,77 @@ class SwiGLU(tf.keras.layers.Layer):
|
|
64 |
x_proj = self.proj(x)
|
65 |
x_val, x_gate = tf.split(x_proj, 2, axis=-1)
|
66 |
return self.out(x_val * tf.nn.silu(x_gate))
|
67 |
-
|
68 |
class GPTBlock(tf.keras.layers.Layer):
|
69 |
-
def __init__(self, d_model, d_ff, num_heads=8, dropout_rate=0.1, adapter_dim=64):
|
70 |
-
super().__init__()
|
71 |
-
self.ln1 = tf.keras.layers.LayerNormalization(epsilon=1e-5)
|
72 |
-
self.mha = tf.keras.layers.MultiHeadAttention(num_heads=num_heads, key_dim=d_model // num_heads)
|
73 |
-
self.dropout1 = tf.keras.layers.Dropout(dropout_rate)
|
74 |
-
self.adapter_down = tf.keras.layers.Dense(adapter_dim, activation='gelu')
|
75 |
-
self.adapter_up = tf.keras.layers.Dense(d_model)
|
76 |
-
|
77 |
-
self.ln2 = tf.keras.layers.LayerNormalization(epsilon=1e-5)
|
78 |
-
self.ffn = SwiGLU(d_model, d_ff)
|
79 |
-
self.dropout2 = tf.keras.layers.Dropout(dropout_rate)
|
80 |
-
self.rope = RotaryPositionalEmbedding(d_model // num_heads)
|
81 |
-
|
82 |
-
def call(self, x, training=False):
|
83 |
-
x_norm = self.ln1(x)
|
84 |
-
b, s, _ = tf.shape(x_norm)[0], tf.shape(x_norm)[1], tf.shape(x_norm)[2]
|
85 |
-
h = self.mha.num_heads
|
86 |
-
d = x_norm.shape[-1] // h
|
87 |
-
|
88 |
-
qkv = tf.reshape(x_norm, [b, s, h, d])
|
89 |
-
qkv = tf.transpose(qkv, [0, 2, 1, 3])
|
90 |
-
q = self.rope(qkv)
|
91 |
-
k = self.rope(qkv)
|
92 |
-
q = tf.reshape(tf.transpose(q, [0, 2, 1, 3]), [b, s, h * d])
|
93 |
-
k = tf.reshape(tf.transpose(k, [0, 2, 1, 3]), [b, s, h * d])
|
94 |
-
|
95 |
-
attn_out = self.mha(query=q, value=x_norm, key=k, use_causal_mask=True, training=training)
|
96 |
-
attn_out = self.dropout1(attn_out, training=training)
|
97 |
|
98 |
adapter_out = self.adapter_up(self.adapter_down(attn_out))
|
99 |
-
attn_out = attn_out + adapter_out
|
100 |
-
|
101 |
-
x = x + attn_out
|
102 |
-
ffn_out = self.ffn(self.ln2(x))
|
103 |
-
x = x + self.dropout2(ffn_out, training=training)
|
104 |
return x
|
105 |
|
106 |
-
class InteractGPT(tf.keras.Model):
|
107 |
-
def __init__(self, vocab_size, seq_len, d_model, d_ff, n_layers, num_heads=8, dropout_rate=0.1):
|
108 |
-
super().__init__()
|
109 |
-
self.token_embedding = tf.keras.layers.Embedding(vocab_size, d_model)
|
110 |
-
self.blocks = [GPTBlock(d_model, d_ff, num_heads, dropout_rate) for _ in range(n_layers)]
|
111 |
-
self.ln_f = tf.keras.layers.LayerNormalization(epsilon=1e-5)
|
112 |
-
|
113 |
-
def call(self, x, training=False):
|
114 |
-
x = self.token_embedding(x)
|
115 |
-
for block in self.blocks:
|
116 |
-
x = block(x, training=training)
|
117 |
-
x = self.ln_f(x)
|
118 |
-
logits = tf.matmul(x, self.token_embedding.embeddings, transpose_b=True)
|
119 |
-
return logits
|
120 |
-
|
121 |
-
model = InteractGPT(vocab_size=vocab_size, seq_len=max_len, d_model=256, d_ff=1024, n_layers=6)
|
122 |
-
|
123 |
-
dummy_input = tf.zeros((1, max_len), dtype=tf.int32)
|
124 |
-
_ = model(dummy_input)
|
125 |
-
model.load_weights("InteractGPT.weights.h5")
|
126 |
-
print("모델 가중치 로드 완료!")
|
127 |
-
|
128 |
-
|
129 |
-
|
130 |
-
|
131 |
model_input = text_to_ids(f"<start> {prompt} <sep>")
|
132 |
model_input = model_input[:max_len]
|
133 |
generated = list(model_input)
|
134 |
|
135 |
-
tau = 5.0
|
136 |
|
137 |
-
|
138 |
pad_length = max(0, max_len - len(generated))
|
139 |
input_padded = np.pad(generated, (0, pad_length), constant_values=pad_id)
|
140 |
input_tensor = tf.convert_to_tensor([input_padded])
|
@@ -148,69 +147,35 @@ def generate_text_mirostat_top_p(model, prompt, max_len=100, max_gen=98,
|
|
148 |
for token_id, count in token_counts.items():
|
149 |
next_token_logits[token_id] /= (repetition_penalty ** count)
|
150 |
|
151 |
-
|
152 |
-
if len(generated) >= min_len:
|
153 |
next_token_logits[end_id] -= 5.0
|
154 |
next_token_logits[pad_id] -= 10.0
|
|
|
155 |
|
156 |
-
# 온도 조절
|
157 |
-
next_token_logits = next_token_logits / temperature
|
158 |
-
|
159 |
-
# --- 미로스타트 + Top-p 샘플링 ---
|
160 |
logits_stable = next_token_logits - np.max(next_token_logits)
|
161 |
probs = np.exp(logits_stable)
|
162 |
probs /= probs.sum()
|
163 |
|
164 |
-
# 1. mirostat top-m 후보 추리기
|
165 |
sorted_indices = np.argsort(-probs)
|
166 |
-
top_indices = sorted_indices[:
|
167 |
top_probs = probs[top_indices]
|
168 |
top_probs /= top_probs.sum()
|
169 |
|
170 |
-
# 2. mirostat 샘플링
|
171 |
sampled_index = np.random.choice(top_indices, p=top_probs)
|
172 |
-
|
173 |
-
|
174 |
-
|
175 |
-
|
176 |
-
#
|
177 |
-
|
178 |
-
|
179 |
-
|
180 |
-
|
181 |
-
|
182 |
-
|
183 |
-
filtered_probs /= filtered_probs.sum()
|
184 |
-
|
185 |
-
# 4. 최종 토큰 샘플링
|
186 |
-
final_token = np.random.choice(filtered_indices, p=filtered_probs)
|
187 |
-
generated.append(int(final_token))
|
188 |
-
|
189 |
-
decoded_text = sp.decode(generated)
|
190 |
-
# 특수 토큰 제거
|
191 |
-
for token in ["<start>", "<sep>", "<end>"]:
|
192 |
-
decoded_text = decoded_text.replace(token, "")
|
193 |
-
|
194 |
-
decoded_text = decoded_text.strip()
|
195 |
-
|
196 |
-
if len(generated) >= min_len and (final_token == end_id or decoded_text.endswith(('.', '!', '?'))):
|
197 |
-
yield decoded_text
|
198 |
-
break
|
199 |
-
|
200 |
-
async def async_generator_wrapper(prompt: str):
|
201 |
-
# 동기 제너레이터를 비동기로 감싸기
|
202 |
-
loop = asyncio.get_event_loop()
|
203 |
-
gen = generate_text_mirostat_top_p(model, prompt)
|
204 |
-
|
205 |
-
for text_piece in gen:
|
206 |
-
yield text_piece
|
207 |
-
# 토큰 생성 속도 조절 (0.1초 딜레이)
|
208 |
await asyncio.sleep(0.1)
|
209 |
|
210 |
@app.get("/generate")
|
211 |
async def generate(request: Request):
|
212 |
-
# 쿼리 파라미터로 prompt 받음, 없으면 기본값
|
213 |
prompt = request.query_params.get("prompt", "안녕하세요")
|
214 |
-
|
215 |
-
# 스트리밍 응답으로 보냄
|
216 |
-
return StreamingResponse(async_generator_wrapper(prompt), media_type="text/plain")
|
|
|
1 |
from fastapi import FastAPI, Request
|
2 |
from fastapi.responses import StreamingResponse
|
3 |
import asyncio
|
4 |
+
import numpy as np
|
5 |
+
import tensorflow as tf
|
6 |
+
from tensorflow.keras import layers
|
7 |
+
import sentencepiece as spm
|
|
|
|
|
8 |
|
9 |
app = FastAPI()
|
10 |
+
|
11 |
+
# SentencePiece 로드
|
12 |
+
sp = spm.SentencePieceProcessor()
|
13 |
+
sp.load("kolig_unigram.model")
|
14 |
+
|
15 |
+
pad_id = sp.piece_to_id("<pad>")
|
16 |
+
if pad_id == -1: pad_id = 0
|
17 |
+
start_id = sp.piece_to_id("<start>")
|
18 |
+
if start_id == -1: start_id = 1
|
19 |
+
end_id = sp.piece_to_id("<end>")
|
20 |
+
if end_id == -1: end_id = 2
|
21 |
+
unk_id = sp.piece_to_id("<unk>")
|
22 |
+
if unk_id == -1: unk_id = 3
|
23 |
+
|
24 |
+
vocab_size = sp.get_piece_size()
|
25 |
+
max_len = 100
|
26 |
+
|
27 |
+
def text_to_ids(text):
|
28 |
+
return sp.encode(text, out_type=int)
|
29 |
+
|
30 |
+
def ids_to_text(ids):
|
31 |
+
return sp.decode(ids)
|
32 |
+
|
33 |
+
class RotaryPositionalEmbedding(layers.Layer):
|
34 |
+
def __init__(self, dim):
|
35 |
+
super().__init__()
|
36 |
+
inv_freq = 1.0 / (10000 ** (np.arange(0, dim, 2) / dim))
|
37 |
+
self.inv_freq = tf.constant(inv_freq, dtype=tf.float32)
|
38 |
+
|
39 |
+
def call(self, x):
|
40 |
+
batch, heads, seq_len, depth = tf.unstack(tf.shape(x))
|
41 |
+
t = tf.range(seq_len, dtype=tf.float32)
|
42 |
+
freqs = tf.einsum('i,j->ij', t, self.inv_freq)
|
43 |
+
emb_sin = tf.sin(freqs)
|
44 |
+
emb_cos = tf.cos(freqs)
|
45 |
+
emb_cos = tf.reshape(emb_cos, [1, 1, seq_len, -1])
|
46 |
+
emb_sin = tf.reshape(emb_sin, [1, 1, seq_len, -1])
|
47 |
+
x1 = x[..., ::2]
|
48 |
+
x2 = x[..., 1::2]
|
49 |
+
x_rotated = tf.stack([
|
50 |
+
x1 * emb_cos - x2 * emb_sin,
|
51 |
+
x1 * emb_sin + x2 * emb_cos
|
52 |
+
], axis=-1)
|
53 |
+
x_rotated = tf.reshape(x_rotated, tf.shape(x))
|
54 |
return x_rotated
|
55 |
|
56 |
class SwiGLU(tf.keras.layers.Layer):
|
|
|
63 |
x_proj = self.proj(x)
|
64 |
x_val, x_gate = tf.split(x_proj, 2, axis=-1)
|
65 |
return self.out(x_val * tf.nn.silu(x_gate))
|
66 |
+
|
67 |
class GPTBlock(tf.keras.layers.Layer):
|
68 |
+
def __init__(self, d_model, d_ff, num_heads=8, dropout_rate=0.1, adapter_dim=64):
|
69 |
+
super().__init__()
|
70 |
+
self.ln1 = tf.keras.layers.LayerNormalization(epsilon=1e-5)
|
71 |
+
self.mha = tf.keras.layers.MultiHeadAttention(num_heads=num_heads, key_dim=d_model // num_heads)
|
72 |
+
self.dropout1 = tf.keras.layers.Dropout(dropout_rate)
|
73 |
+
self.adapter_down = tf.keras.layers.Dense(adapter_dim, activation='gelu')
|
74 |
+
self.adapter_up = tf.keras.layers.Dense(d_model)
|
75 |
+
|
76 |
+
self.ln2 = tf.keras.layers.LayerNormalization(epsilon=1e-5)
|
77 |
+
self.ffn = SwiGLU(d_model, d_ff)
|
78 |
+
self.dropout2 = tf.keras.layers.Dropout(dropout_rate)
|
79 |
+
self.rope = RotaryPositionalEmbedding(d_model // num_heads)
|
80 |
+
|
81 |
+
def call(self, x, training=False):
|
82 |
+
x_norm = self.ln1(x)
|
83 |
+
b, s, _ = tf.shape(x_norm)[0], tf.shape(x_norm)[1], tf.shape(x_norm)[2]
|
84 |
+
h = self.mha.num_heads
|
85 |
+
d = x_norm.shape[-1] // h
|
86 |
+
|
87 |
+
qkv = tf.reshape(x_norm, [b, s, h, d])
|
88 |
+
qkv = tf.transpose(qkv, [0, 2, 1, 3])
|
89 |
+
q = self.rope(qkv)
|
90 |
+
k = self.rope(qkv)
|
91 |
+
q = tf.reshape(tf.transpose(q, [0, 2, 1, 3]), [b, s, h * d])
|
92 |
+
k = tf.reshape(tf.transpose(k, [0, 2, 1, 3]), [b, s, h * d])
|
93 |
+
|
94 |
+
attn_out = self.mha(query=q, value=x_norm, key=k, use_causal_mask=True, training=training)
|
95 |
+
attn_out = self.dropout1(attn_out, training=training)
|
96 |
|
97 |
adapter_out = self.adapter_up(self.adapter_down(attn_out))
|
98 |
+
attn_out = attn_out + adapter_out
|
99 |
+
|
100 |
+
x = x + attn_out
|
101 |
+
ffn_out = self.ffn(self.ln2(x))
|
102 |
+
x = x + self.dropout2(ffn_out, training=training)
|
103 |
return x
|
104 |
|
105 |
+
class InteractGPT(tf.keras.Model):
|
106 |
+
def __init__(self, vocab_size, seq_len, d_model, d_ff, n_layers, num_heads=8, dropout_rate=0.1):
|
107 |
+
super().__init__()
|
108 |
+
self.token_embedding = tf.keras.layers.Embedding(vocab_size, d_model)
|
109 |
+
self.blocks = [GPTBlock(d_model, d_ff, num_heads, dropout_rate) for _ in range(n_layers)]
|
110 |
+
self.ln_f = tf.keras.layers.LayerNormalization(epsilon=1e-5)
|
111 |
+
|
112 |
+
def call(self, x, training=False):
|
113 |
+
x = self.token_embedding(x)
|
114 |
+
for block in self.blocks:
|
115 |
+
x = block(x, training=training)
|
116 |
+
x = self.ln_f(x)
|
117 |
+
logits = tf.matmul(x, self.token_embedding.embeddings, transpose_b=True)
|
118 |
+
return logits
|
119 |
+
|
120 |
+
model = InteractGPT(vocab_size=vocab_size, seq_len=max_len, d_model=256, d_ff=1024, n_layers=6)
|
121 |
+
|
122 |
+
dummy_input = tf.zeros((1, max_len), dtype=tf.int32)
|
123 |
+
_ = model(dummy_input)
|
124 |
+
model.load_weights("InteractGPT.weights.h5")
|
125 |
+
print("모델 가중치 로드 완료!")
|
126 |
+
|
127 |
+
repetition_penalty = 1.2
|
128 |
+
|
129 |
+
async def generate_text_stream(prompt: str):
|
130 |
model_input = text_to_ids(f"<start> {prompt} <sep>")
|
131 |
model_input = model_input[:max_len]
|
132 |
generated = list(model_input)
|
133 |
|
134 |
+
tau = 5.0
|
135 |
|
136 |
+
while True:
|
137 |
pad_length = max(0, max_len - len(generated))
|
138 |
input_padded = np.pad(generated, (0, pad_length), constant_values=pad_id)
|
139 |
input_tensor = tf.convert_to_tensor([input_padded])
|
|
|
147 |
for token_id, count in token_counts.items():
|
148 |
next_token_logits[token_id] /= (repetition_penalty ** count)
|
149 |
|
150 |
+
if len(generated) >= 20:
|
|
|
151 |
next_token_logits[end_id] -= 5.0
|
152 |
next_token_logits[pad_id] -= 10.0
|
153 |
+
next_token_logits = next_token_logits / 1.0 # temperature 고정
|
154 |
|
|
|
|
|
|
|
|
|
155 |
logits_stable = next_token_logits - np.max(next_token_logits)
|
156 |
probs = np.exp(logits_stable)
|
157 |
probs /= probs.sum()
|
158 |
|
|
|
159 |
sorted_indices = np.argsort(-probs)
|
160 |
+
top_indices = sorted_indices[:100]
|
161 |
top_probs = probs[top_indices]
|
162 |
top_probs /= top_probs.sum()
|
163 |
|
|
|
164 |
sampled_index = np.random.choice(top_indices, p=top_probs)
|
165 |
+
generated.append(int(sampled_index))
|
166 |
+
|
167 |
+
new_token_text = sp.decode([int(sampled_index)])
|
168 |
+
|
169 |
+
# 특수 토큰 무시 및 종료 처리
|
170 |
+
if any(tok in new_token_text for tok in ["<start>", "<sep>", "<end>", "<pad>"]):
|
171 |
+
if sampled_index == end_id:
|
172 |
+
break
|
173 |
+
continue
|
174 |
+
|
175 |
+
yield new_token_text
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
176 |
await asyncio.sleep(0.1)
|
177 |
|
178 |
@app.get("/generate")
|
179 |
async def generate(request: Request):
|
|
|
180 |
prompt = request.query_params.get("prompt", "안녕하세요")
|
181 |
+
return StreamingResponse(generate_text_stream(prompt), media_type="text/plain")
|
|
|
|