OpenLab-NLP commited on
Commit
42be625
·
verified ·
1 Parent(s): a8ba472

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +27 -23
app.py CHANGED
@@ -35,7 +35,7 @@ if not os.path.exists(TOKENIZER_PATH):
35
  MAX_LEN = 128
36
  EMBED_DIM = 384
37
  LATENT_DIM = 384
38
- DROPOUT_RATE = 0.01
39
 
40
  # ===============================
41
  # 1️⃣ 토크나이저 로딩
@@ -51,12 +51,13 @@ def pad_sentence(tokens):
51
  return tokens + [pad_id]*(MAX_LEN - len(tokens))
52
 
53
  class EncoderBlock(tf.keras.layers.Layer):
54
- def __init__(self, embed_dim=EMBED_DIM, ff_dim=1152, seq_len=MAX_LEN):
55
  super().__init__()
56
  self.embed_dim = embed_dim
57
  self.seq_len = seq_len
 
58
 
59
- self.fc1 = layers.Dense(ff_dim)
60
  self.fc2 = layers.Dense(embed_dim)
61
  self.fc3 = layers.Dense(ff_dim//2)
62
  self.fc4 = layers.Dense(embed_dim)
@@ -71,15 +72,15 @@ class EncoderBlock(tf.keras.layers.Layer):
71
  self.ln3 = layers.LayerNormalization(epsilon=1e-5)
72
  self.ln4 = layers.LayerNormalization(epsilon=1e-5)
73
 
74
- def call(self, x, mask):
75
- mask = mask
76
- # x: (B, L, D)
77
  x_norm = self.ln(x)
78
 
79
  h = self.fc1(x_norm)
80
- g, v = tf.split(h, 2, axis=-1)
81
  h = tf.nn.silu(g) * v
82
- h = self.fc2(h)
83
 
84
  h = x + self.ln1(h)
85
 
@@ -92,12 +93,13 @@ class EncoderBlock(tf.keras.layers.Layer):
92
  v = self.token_mixer(v) * self.token_gate(v)
93
  v = tf.transpose(v, [0, 2, 1])
94
 
95
- x_norm = attn + self.ln3(v)
96
- x = self.fc3(x_norm)
97
  x = tf.nn.silu(x)
98
  x = self.fc4(x)
99
 
100
- return x_norm + self.ln4(x)
 
101
 
102
  class L2NormLayer(layers.Layer):
103
  def __init__(self, axis=1, epsilon=1e-10, **kwargs):
@@ -110,34 +112,36 @@ class L2NormLayer(layers.Layer):
110
  return {"axis": self.axis, "epsilon": self.epsilon, **super().get_config()}
111
 
112
  class SentenceEncoder(tf.keras.Model):
113
- def __init__(self, vocab_size, embed_dim=384, latent_dim=384, max_len=128, pad_id=pad_id):
114
  super().__init__()
115
  self.pad_id = pad_id
116
  self.embed = layers.Embedding(vocab_size, embed_dim)
117
  self.pos_embed = layers.Embedding(input_dim=max_len, output_dim=embed_dim)
118
- self.blocks = [EncoderBlock() for _ in range(2)]
119
  self.attn_pool = layers.Dense(1)
120
  self.ln_f = layers.LayerNormalization(epsilon=1e-5, dtype=tf.float32)
121
- self.latent = layers.Dense(latent_dim, activation=None) # tanh 제거
122
- self.l2norm = L2NormLayer() # 추가
 
123
 
124
- def call(self, x):
125
  positions = tf.range(tf.shape(x)[1])[tf.newaxis, :]
126
  x_embed = self.embed(x) + self.pos_embed(positions)
 
 
127
  mask = tf.cast(tf.not_equal(x, self.pad_id), tf.float32)
128
- x = x_embed
129
  for block in self.blocks:
130
- x = block(x, mask)
131
- x = self.ln_f(x)
132
 
133
- scores = self.attn_pool(x)
134
  scores = tf.where(tf.equal(mask[..., tf.newaxis], 0), -1e9, scores)
135
  scores = tf.nn.softmax(scores, axis=1)
136
- pooled = tf.reduce_sum(x * scores, axis=1)
137
 
138
  latent = self.latent(pooled)
139
- return self.l2norm(latent) # L2 정규화 후 반환
140
-
141
  # 3️⃣ 모델 로드
142
  # ===============================
143
  encoder = SentenceEncoder(vocab_size=vocab_size)
 
35
  MAX_LEN = 128
36
  EMBED_DIM = 384
37
  LATENT_DIM = 384
38
+ DROP_RATE = 0.1
39
 
40
  # ===============================
41
  # 1️⃣ 토크나이저 로딩
 
51
  return tokens + [pad_id]*(MAX_LEN - len(tokens))
52
 
53
  class EncoderBlock(tf.keras.layers.Layer):
54
+ def __init__(self, embed_dim=EMBED_DIM, ff_dim=1152, seq_len=MAX_LEN, drop_rate=DROP_RATE):
55
  super().__init__()
56
  self.embed_dim = embed_dim
57
  self.seq_len = seq_len
58
+ self.drop_rate = drop_rate
59
 
60
+ self.fc1 = layers.Dense(ff_dim*2)
61
  self.fc2 = layers.Dense(embed_dim)
62
  self.fc3 = layers.Dense(ff_dim//2)
63
  self.fc4 = layers.Dense(embed_dim)
 
72
  self.ln3 = layers.LayerNormalization(epsilon=1e-5)
73
  self.ln4 = layers.LayerNormalization(epsilon=1e-5)
74
 
75
+ self.dropout = layers.Dropout(drop_rate)
76
+
77
+ def call(self, x, mask, training=False):
78
  x_norm = self.ln(x)
79
 
80
  h = self.fc1(x_norm)
81
+ g, v = tf.split(h, 2, axis=-1)
82
  h = tf.nn.silu(g) * v
83
+ h = self.fc2(h)
84
 
85
  h = x + self.ln1(h)
86
 
 
93
  v = self.token_mixer(v) * self.token_gate(v)
94
  v = tf.transpose(v, [0, 2, 1])
95
 
96
+ x_norm2 = attn + self.ln3(v)
97
+ x = self.fc3(x_norm2)
98
  x = tf.nn.silu(x)
99
  x = self.fc4(x)
100
 
101
+ x = self.dropout(x, training=training)
102
+ return x_norm2 + self.ln4(x)
103
 
104
  class L2NormLayer(layers.Layer):
105
  def __init__(self, axis=1, epsilon=1e-10, **kwargs):
 
112
  return {"axis": self.axis, "epsilon": self.epsilon, **super().get_config()}
113
 
114
  class SentenceEncoder(tf.keras.Model):
115
+ def __init__(self, vocab_size, embed_dim=EMBED_DIM, latent_dim=LATENT_DIM, max_len=MAX_LEN, pad_id=pad_id, drop_rate=DROP_RATE):
116
  super().__init__()
117
  self.pad_id = pad_id
118
  self.embed = layers.Embedding(vocab_size, embed_dim)
119
  self.pos_embed = layers.Embedding(input_dim=max_len, output_dim=embed_dim)
120
+ self.blocks = [EncoderBlock(embed_dim=embed_dim, drop_rate=drop_rate) for _ in range(2)]
121
  self.attn_pool = layers.Dense(1)
122
  self.ln_f = layers.LayerNormalization(epsilon=1e-5, dtype=tf.float32)
123
+ self.latent = layers.Dense(latent_dim, activation=None)
124
+ self.l2norm = L2NormLayer()
125
+ self.drop_embed = layers.Dropout(drop_rate)
126
 
127
+ def call(self, x, training=False):
128
  positions = tf.range(tf.shape(x)[1])[tf.newaxis, :]
129
  x_embed = self.embed(x) + self.pos_embed(positions)
130
+ x_embed = self.drop_embed(x_embed, training=training)
131
+
132
  mask = tf.cast(tf.not_equal(x, self.pad_id), tf.float32)
133
+ h = x_embed
134
  for block in self.blocks:
135
+ h = block(h, mask, training=training)
136
+ h = self.ln_f(h)
137
 
138
+ scores = self.attn_pool(h)
139
  scores = tf.where(tf.equal(mask[..., tf.newaxis], 0), -1e9, scores)
140
  scores = tf.nn.softmax(scores, axis=1)
141
+ pooled = tf.reduce_sum(h * scores, axis=1)
142
 
143
  latent = self.latent(pooled)
144
+ return self.l2norm(latent)
 
145
  # 3️⃣ 모델 로드
146
  # ===============================
147
  encoder = SentenceEncoder(vocab_size=vocab_size)