Yuchan
commited on
Update Model.py
Browse files
Model.py
CHANGED
|
@@ -54,7 +54,7 @@ if not os.path.exists(DATA_PATH):
|
|
| 54 |
|
| 55 |
if not os.path.exists(TOKENIZER_PATH):
|
| 56 |
download_file(
|
| 57 |
-
"https://huggingface.co/Yuchan5386/
|
| 58 |
TOKENIZER_PATH
|
| 59 |
)
|
| 60 |
|
|
@@ -68,7 +68,7 @@ unk_id = sp.piece_to_id("<unk>")
|
|
| 68 |
vocab_size = sp.get_piece_size()
|
| 69 |
print(f"✅ Vocabulary size: {vocab_size}")
|
| 70 |
|
| 71 |
-
max_len =
|
| 72 |
batch_size = 128
|
| 73 |
|
| 74 |
def text_to_ids(text):
|
|
@@ -327,8 +327,8 @@ def create_lr_schedule(initial_lr=5e-5, decay_steps=10000, decay_rate=0.9):
|
|
| 327 |
model = ReLaM(
|
| 328 |
vocab_size=vocab_size,
|
| 329 |
max_seq_len=max_len,
|
| 330 |
-
d_model=
|
| 331 |
-
n_layers=
|
| 332 |
)
|
| 333 |
|
| 334 |
# 옵티마이저 설정
|
|
|
|
| 54 |
|
| 55 |
if not os.path.exists(TOKENIZER_PATH):
|
| 56 |
download_file(
|
| 57 |
+
"https://huggingface.co/datasets/Yuchan5386/Prototype/resolve/main/bpe.model?download=true",
|
| 58 |
TOKENIZER_PATH
|
| 59 |
)
|
| 60 |
|
|
|
|
| 68 |
vocab_size = sp.get_piece_size()
|
| 69 |
print(f"✅ Vocabulary size: {vocab_size}")
|
| 70 |
|
| 71 |
+
max_len = 230
|
| 72 |
batch_size = 128
|
| 73 |
|
| 74 |
def text_to_ids(text):
|
|
|
|
| 327 |
model = ReLaM(
|
| 328 |
vocab_size=vocab_size,
|
| 329 |
max_seq_len=max_len,
|
| 330 |
+
d_model=128,
|
| 331 |
+
n_layers=2
|
| 332 |
)
|
| 333 |
|
| 334 |
# 옵티마이저 설정
|