model-prototype / Model.py
Yuchan
Update Model.py
17d47d0 verified
raw
history blame
11.1 kB
!pip install sentencepiece
import sentencepiece as spm
import os, json, numpy as np, tensorflow as tf
from tensorflow.keras import layers, Model
import requests
from tensorflow import keras
from tensorflow.keras import layers
import tensorflow.keras.backend as K
print('1')
tf.get_logger().setLevel("ERROR")
SEED = 42
tf.random.set_seed(SEED)
np.random.seed(SEED)
# TPU ์ดˆ๊ธฐํ™”
try:
resolver = tf.distribute.cluster_resolver.TPUClusterResolver(tpu="local")
tf.tpu.experimental.initialize_tpu_system(resolver)
strategy = tf.distribute.TPUStrategy(resolver)
print("โœ… TPU ์ดˆ๊ธฐํ™” ์™„๋ฃŒ:", resolver.cluster_spec().as_dict())
on_tpu = True
except Exception as e:
print("โš ๏ธ TPU ๋ฏธ์‚ฌ์šฉ, GPU/CPU๋กœ ์ง„ํ–‰:", e)
strategy = tf.distribute.get_strategy()
on_tpu = False
# Mixed precision
from tensorflow.keras import mixed_precision
policy = mixed_precision.Policy("mixed_bfloat16" if on_tpu else "float32")
mixed_precision.set_global_policy(policy)
print("โœ… Mixed precision:", policy)
# =======================
# 1) ํŒŒ์ผ ๋‹ค์šด๋กœ๋“œ
# =======================
def download_file(url, save_path):
r = requests.get(url, stream=True)
r.raise_for_status()
with open(save_path, "wb") as f:
for chunk in r.iter_content(8192*2):
f.write(chunk)
print(f"โœ… {save_path} ์ €์žฅ๋จ")
DATA_PATH = "corpus.txt"
TOKENIZER_PATH = "ko_unigram.model"
if not os.path.exists(DATA_PATH):
download_file(
"https://huggingface.co/datasets/Yuchan5386/Prototype/resolve/main/corpus_ko.txt?download=true",
DATA_PATH
)
if not os.path.exists(TOKENIZER_PATH):
download_file(
"https://huggingface.co/Yuchan5386/Respiso/resolve/main/bpe.model?download=true",
TOKENIZER_PATH
)
sp = spm.SentencePieceProcessor(TOKENIZER_PATH)
pad_id = sp.piece_to_id("<pad>") if sp.piece_to_id("<pad>") != -1 else 0
start_id = sp.piece_to_id("<start>")
sep_id = sp.piece_to_id("<sep>")
end_id = sp.piece_to_id("<end>")
unk_id = sp.piece_to_id("<unk>")
vocab_size = sp.get_piece_size()
print(f"โœ… Vocabulary size: {vocab_size}")
max_len = 512
batch_size = 256
def text_to_ids(text):
return sp.encode(text, out_type=int)
def ids_to_text(ids):
return sp.decode(ids)
def txt_stream(file_path):
with open(file_path, "r", encoding="utf-8") as f:
for line in f:
text = line.strip()
if not text:
continue
ids = text_to_ids(text)
ids = ids[:max_len - 1] # ๋งˆ์ง€๋ง‰์— <end> ๋„ฃ๊ธฐ ์œ„ํ•ด -1
full_input = ids + [end_id]
pad_len = max_len - len(full_input)
full_input += [pad_id] * pad_len
# target = next-token shifted sequence
target = full_input[1:] + [pad_id]
yield (
tf.convert_to_tensor(full_input, dtype=tf.int32),
tf.convert_to_tensor(target, dtype=tf.int32)
)
dataset = tf.data.Dataset.from_generator(
lambda: txt_stream(DATA_PATH),
output_signature=(
tf.TensorSpec(shape=(max_len,), dtype=tf.int32),
tf.TensorSpec(shape=(max_len,), dtype=tf.int32),
)
)
dataset = dataset.shuffle(2000, seed=SEED).batch(batch_size, drop_remainder=True).prefetch(tf.data.AUTOTUNE)
with strategy.scope():
dist_dataset = strategy.experimental_distribute_dataset(dataset)
class Lo(layers.Layer):
def __init__(self, d_model):
super().__init__()
self.proj = layers.Dense(d_model, use_bias=True, dtype='float32')
self.p = layers.Dense(128, use_bias=True, dtype='float32')
self._out_dtype = 'float32'
def call(self, x):
x_f32 = tf.cast(x, tf.float32)
x = self.proj(x_f32)
x = tf.nn.gelu(x)
x = self.p(x)
return tf.cast(x, self._out_dtype)
class SwiGLU(layers.Layer):
def __init__(self, d_model):
super().__init__()
self.W = layers.Dense(3500, dtype='float32')
self.W1 = layers.Dense(d_model, dtype='float32')
def call(self, x):
x = tf.cast(x, tf.float32)
x = self.W(x)
a, b = tf.split(x, 2, axis=-1)
out = self.W1(tf.nn.silu(a) * b)
return tf.cast(out, x.dtype)
class LoU(layers.Layer):
def __init__(self, d_model, clip_value=5.0, eps=1e-6):
super().__init__()
self.d_model = d_model
self.clip_value = float(clip_value)
self.eps = float(eps)
self.Q = layers.Dense(d_model, dtype='float32')
self.K = layers.Dense(d_model, dtype='float32')
self.V = layers.Dense(d_model, dtype='float32')
self.Qr = Lo(d_model)
self.Kr = Lo(d_model)
self.Vr = Lo(d_model)
self.proj = layers.Dense(d_model, use_bias=True, dtype='float32')
self.O = layers.Dense(d_model, dtype='float32')
self.norm = layers.LayerNormalization(epsilon=1e-5, dtype='float32')
self.norm1 = layers.LayerNormalization(epsilon=1e-5, dtype='float32')
self.alpha_linear = layers.Dense(1, activation='sigmoid', dtype='float32')
def _ema_over_time(self, score, alpha_dynamic):
seq = tf.transpose(score, perm=[1, 0, 2])
alpha_seq = tf.transpose(alpha_dynamic, perm=[1, 0, 2])
def step(prev_ema, inputs):
x_t, alpha_t = inputs
new = alpha_t * x_t + (1.0 - alpha_t) * prev_ema
return new
init = seq[0]
first_alpha = alpha_seq[0]
remaining_seq = seq[1:]
remaining_alpha = alpha_seq[1:]
elems = (remaining_seq, remaining_alpha)
ema_seq = tf.scan(fn=step, elems=elems, initializer=init)
ema_seq = tf.concat([tf.expand_dims(init, 0), ema_seq], axis=0)
ema = tf.transpose(ema_seq, perm=[1, 0, 2])
return ema
def call(self, x):
x_f32 = tf.cast(x, tf.float32)
residual = x_f32
x_f32 = self.norm1(x)
q = self.Q(x_f32)
k = self.K(x_f32)
V = self.V(x_f32)
q = self.Qr(q)
k = self.Kr(k)
V = self.Vr(V)
# ๊ธฐ์กด ์ฝ”๋“œ:
# g_q = tf.nn.sigmoid(q)
# g_k = tf.nn.sigmoid(k)
g_q = (tf.nn.tanh(q) + 1.0) / 2.0
g_k = (tf.nn.tanh(k) + 1.0) / 2.0
score = g_q * g_k
alpha_dynamic = self.alpha_linear(x_f32)
score_ema = self._ema_over_time(score, alpha_dynamic)
mean_last = tf.reduce_mean(score_ema, axis=-1, keepdims=True)
denom = tf.maximum(mean_last, self.eps)
score_norm = score_ema / denom
score_clipped = tf.clip_by_value(score_norm, -self.clip_value, self.clip_value)
x_comb = score_clipped * V
out = self.proj(x_comb)
out = self.norm(out + residual)
return tf.cast(out, x.dtype)
class Block(layers.Layer):
def __init__(self, d_model):
super().__init__()
self.lou = LoU(d_model)
self.glu = SwiGLU(d_model)
self.norm = layers.LayerNormalization(epsilon=1e-5, dtype='float32')
def call(self, x):
x = self.lou(x)
x = self.norm(self.glu(x)) + x
return x
class ReLM(tf.keras.Model):
def __init__(self, vocab_size, max_seq_len, d_model, n_layers, dropout_rate=0.1):
super().__init__()
self.token_embedding = layers.Embedding(vocab_size, d_model)
self.pos_embedding = layers.Embedding(max_seq_len, d_model)
self.blocks = [Block(d_model) for _ in range(n_layers)]
self.ln_f = layers.LayerNormalization(epsilon=1e-5, dtype="float32")
def call(self, x, training=False):
batch_size, seq_len = tf.shape(x)[0], tf.shape(x)[1]
positions = tf.range(seq_len)[tf.newaxis, :]
x = self.token_embedding(x) + self.pos_embedding(positions)
for block in self.blocks:
x = block(x)
x = self.ln_f(x)
embedding_matrix = tf.cast(self.token_embedding.embeddings, x.dtype)
logits = tf.matmul(x, embedding_matrix, transpose_b=True)
return tf.cast(logits, tf.float32)
loss_fn = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True, reduction='none')
def masked_loss(y_true, y_pred):
loss = loss_fn(y_true, y_pred)
mask = tf.cast(tf.not_equal(y_true, pad_id), tf.float32)
masked_loss = tf.reduce_sum(loss * mask) / tf.reduce_sum(mask)
return masked_loss
def masked_perplexity(y_true, y_pred):
loss = loss_fn(y_true, y_pred)
mask = tf.cast(tf.not_equal(y_true, pad_id), tf.float32)
avg_loss = tf.reduce_sum(loss * mask) / tf.reduce_sum(mask)
return tf.exp(tf.minimum(avg_loss, 10.0)) # ์ˆ˜์น˜ ์•ˆ์ •์„ฑ ํ™•๋ณด
def create_lr_schedule(initial_lr=5e-5, decay_steps=10000, decay_rate=0.9):
return tf.keras.optimizers.schedules.ExponentialDecay(
initial_learning_rate=initial_lr,
decay_steps=decay_steps,
decay_rate=decay_rate,
staircase=False
)
# ๋ชจ๋ธ ์ƒ์„ฑ
model = ReLM(
vocab_size=vocab_size,
max_seq_len=max_len,
d_model=700,
n_layers=2
)
# ์˜ตํ‹ฐ๋งˆ์ด์ € ์„ค์ •
optimizer = tf.keras.optimizers.Adam(
learning_rate=create_lr_schedule(),
beta_1=0.9,
beta_2=0.95,
epsilon=1e-8,
clipnorm=1.0
)
# ๋ชจ๋ธ ์ปดํŒŒ์ผ
model.compile(
optimizer=optimizer,
loss=masked_loss,
metrics=[
masked_perplexity
]
)
# ๋”๋ฏธ ์ธํ’‹์œผ๋กœ ๋ชจ๋ธ ์ดˆ๊ธฐํ™”
dummy_input = np.zeros((1, max_len), dtype=np.int32)
model(dummy_input)
model.summary()
history = model.fit(dataset, epochs=1, verbose=1)
# ๊ฐ€์ค‘์น˜ ์ €์žฅ
model.save_weights("model.weights.h5")
print("๋ชจ๋ธ ๊ฐ€์ค‘์น˜ ์ €์žฅ ์™„๋ฃŒ!")
def generate_text_topp(model, prompt, max_len=150, max_gen=150, p=0.9, temperature=0.8, min_len=20):
model_input = text_to_ids(f"<start> {prompt}")
model_input = model_input[:max_len]
generated = list(model_input)
for step in range(max_gen):
if len(generated) > max_len:
input_seq = generated[-max_len:]
else:
input_seq = generated
input_padded = np.pad(input_seq, (0, max_len - len(input_seq)), constant_values=pad_id)
input_tensor = tf.convert_to_tensor([input_padded])
logits = model(input_tensor, training=False)
next_token_logits = logits[0, len(input_seq) - 1].numpy()
next_token_logits[end_id] -= 5.0
next_token_logits[pad_id] -= 10.0
probs = tf.nn.softmax(next_token_logits / temperature).numpy()
sorted_indices = np.argsort(probs)[::-1]
sorted_probs = probs[sorted_indices]
cumulative_probs = np.cumsum(sorted_probs)
cutoff = np.searchsorted(cumulative_probs, p)
top_indices = sorted_indices[:cutoff + 1]
top_probs = sorted_probs[:cutoff + 1]
top_probs /= np.sum(top_probs)
next_token_id = np.random.choice(top_indices, p=top_probs)
if next_token_id == end_id and len(generated) >= min_len:
break
generated.append(int(next_token_id))
return ids_to_text(generated)
print("\n\n===== ์ƒ์„ฑ ๊ฒฐ๊ณผ =====")
print(generate_text_topp(model, "์ง€๋‚œ 2๋…„ ๋™์•ˆ ์ถœ์—ฐ์—ฐ์ด ๊ตญ๊ฐ€๊ฐ€ ํ•„์š”ํ•œ ์—ฐ๊ตฌ๋ฅผ", p=0.9))