File size: 7,692 Bytes
1a0a3d9
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5d9caf3
1a0a3d9
 
 
 
 
2b1ee7e
1a0a3d9
 
 
77840d0
 
 
4ad0ec1
 
 
 
 
 
 
 
 
 
1a0a3d9
 
 
 
 
 
 
 
 
 
 
 
 
77840d0
38d8cee
77840d0
38d8cee
77840d0
38d8cee
77840d0
 
 
38d8cee
77840d0
 
 
38d8cee
 
 
 
77840d0
38d8cee
 
 
77840d0
38d8cee
 
 
 
77840d0
 
 
38d8cee
77840d0
38d8cee
 
 
 
77840d0
38d8cee
77840d0
 
38d8cee
a8ba472
38d8cee
a8ba472
 
 
 
38d8cee
 
a8ba472
77840d0
38d8cee
 
 
 
42be625
38d8cee
 
a8ba472
 
38d8cee
 
77840d0
38d8cee
 
a8ba472
38d8cee
 
 
 
 
 
a8ba472
38d8cee
 
a8ba472
38d8cee
1a0a3d9
77840d0
1a0a3d9
 
 
 
 
 
 
 
0ca9e64
77840d0
1a0a3d9
 
 
 
77840d0
38d8cee
1a0a3d9
 
77840d0
 
1a0a3d9
77840d0
1a0a3d9
 
77840d0
 
1a0a3d9
77840d0
 
1a0a3d9
77840d0
 
 
1a0a3d9
77840d0
 
 
 
 
1a0a3d9
 
77840d0
1a0a3d9
77840d0
 
 
 
38d8cee
1a0a3d9
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
import numpy as np
import tensorflow as tf
from tensorflow.keras import layers
import sentencepiece as spm
import gradio as gr
import requests
import os

# ----------------------
# 파일 다운로드 유틸
# ----------------------
def download_file(url, save_path):
    r = requests.get(url, stream=True)
    r.raise_for_status()
    with open(save_path, "wb") as f:
        for chunk in r.iter_content(8192*2):
            f.write(chunk)
    print(f"✅ {save_path} 저장됨")

MODEL_PATH = "encoder.weights.h5"
TOKENIZER_PATH = "bpe.model"

if not os.path.exists(MODEL_PATH):
    download_file(
        "https://huggingface.co/OpenLab-NLP/openlem2/resolve/main/encoder_fit.weights.h5?download=true",
        MODEL_PATH
    )

if not os.path.exists(TOKENIZER_PATH):
    download_file(
        "https://huggingface.co/OpenLab-NLP/openlem2/resolve/main/bpe.model?download=true",
        TOKENIZER_PATH
    )

MAX_LEN = 384
EMBED_DIM = 512
LATENT_DIM = 512
BATCH_SIZE = 768           # global batch size (Keras/TPU가 replica-wise로 나눠서 처리)
EPOCHS = 1
SHUFFLE_BUFFER = 200000
LEARNING_RATE = 1e-4
TEMPERATURE = 0.05
DROPOUT_AUG = 0.1
EMBED_DROPOUT = 0.1
SEED = 42
DROPOUT_AUG = 0.1
EMBED_DROPOUT = 0.1
# ===============================
# 1️⃣ 토크나이저 로딩
# ===============================
sp = spm.SentencePieceProcessor(TOKENIZER_PATH)
pad_id = sp.piece_to_id("<pad>") if sp.piece_to_id("<pad>") != -1 else 0
vocab_size = sp.get_piece_size()

def encode_sentence(sentence, max_len=MAX_LEN):
    return sp.encode(sentence, out_type=int)[:max_len]

def pad_sentence(tokens):
    return tokens + [pad_id]*(MAX_LEN - len(tokens))


class DynamicConv(layers.Layer):
    def __init__(self, d_model, k=7):
        super().__init__()
        assert k % 2 == 1
        self.k = k
        self.dense = layers.Dense(d_model, activation='silu')
        self.proj = layers.Dense(d_model)
        self.generator = layers.Dense(k, dtype='float32')
    def call(self, x):
        x_in = x
        x = tf.cast(x, tf.float32)

        B = tf.shape(x)[0]
        L = tf.shape(x)[1]
        D = tf.shape(x)[2]

        kernels = self.generator(self.dense(x))
        kernels = tf.nn.softmax(kernels, axis=-1)

        pad = (self.k - 1) // 2
        x_pad = tf.pad(x, [[0,0],[pad,pad],[0,0]])

        x_pad_4d = tf.expand_dims(x_pad, axis=1)
        patches = tf.image.extract_patches(
            images=x_pad_4d,
            sizes=[1,1,self.k,1],
            strides=[1,1,1,1],
            rates=[1,1,1,1],
            padding='VALID'
        )
        patches = tf.reshape(patches, [B, L, self.k, D])

        kernels_exp = tf.expand_dims(kernels, axis=-1)
        out = tf.reduce_sum(patches * kernels_exp, axis=2)
        out = self.proj(out)

        # 🔥 원래 dtype으로 돌려줌
        return tf.cast(out, x_in.dtype)

class EncoderBlock(tf.keras.layers.Layer):
    def __init__(self, embed_dim=EMBED_DIM, ff_dim=1152, seq_len=MAX_LEN, num_conv_layers=2):
        super().__init__()
        self.embed_dim = embed_dim
        self.seq_len = seq_len

        # MLP / FFN
        self.fc1 = layers.Dense(ff_dim)
        self.fc2 = layers.Dense(embed_dim)
        self.blocks = [DynamicConv(d_model=embed_dim, k=7) for _ in range(num_conv_layers)]
        # LayerNorm
        self.ln = layers.LayerNormalization(epsilon=1e-5)   # 입력 정규화
        self.ln1 = layers.LayerNormalization(epsilon=1e-5)  # Conv residual
        self.ln2 = layers.LayerNormalization(epsilon=1e-5)  # FFN residual

    def call(self, x, mask=None):
        # 입력 정규화
        x_norm = self.ln(x)

        # DynamicConv 여러 층 통과
        out = x_norm
        for block in self.blocks: out = block(out)
        # Conv residual 연결
        x = x_norm + self.ln1(out)

        # FFN / GLU
        v = out
        h = self.fc1(v)
        g, v_split = tf.split(h, 2, axis=-1)
        h = tf.nn.silu(g) * v_split
        h = self.fc2(h)

        # FFN residual 연결
        x = x + self.ln2(h)

        return x


class L2NormLayer(layers.Layer):
    def __init__(self, axis=1, epsilon=1e-10, **kwargs):
        super().__init__(**kwargs)
        self.axis = axis
        self.epsilon = epsilon
    def call(self, inputs):
        return tf.math.l2_normalize(inputs, axis=self.axis, epsilon=self.epsilon)

class SentenceEncoder(tf.keras.Model):
    def __init__(self, vocab_size, embed_dim=EMBED_DIM, latent_dim=LATENT_DIM, max_len=MAX_LEN, pad_id=pad_id, dropout_rate=EMBED_DROPOUT):
        super().__init__()
        self.pad_id = pad_id
        self.embed = layers.Embedding(vocab_size, embed_dim)
        self.pos_embed = layers.Embedding(input_dim=max_len, output_dim=embed_dim)
        self.dropout = layers.Dropout(dropout_rate)
        self.blocks = [EncoderBlock() for _ in range(2)]
        self.attn_pool = layers.Dense(1)
        self.ln_f = layers.LayerNormalization(epsilon=1e-5, dtype=tf.float32)
        self.latent = layers.Dense(latent_dim, activation=None)
        self.l2norm = L2NormLayer(axis=1)

    def call(self, x, training=None):
        positions = tf.range(tf.shape(x)[1])[tf.newaxis, :]
        x_embed = self.embed(x) + self.pos_embed(positions)
        x_embed = self.dropout(x_embed, training=training)

        mask = tf.cast(tf.not_equal(x, self.pad_id), tf.float32)

        h = x_embed
        for block in self.blocks:
            h = block(h, training=training)

        h = self.ln_f(h)

        # 🔥 scores를 float32 강제
        scores = self.attn_pool(h)
        scores = tf.cast(scores, tf.float32)

        scores = tf.where(mask[..., tf.newaxis] == 0, tf.constant(-1e9, tf.float32), scores)
        scores = tf.nn.softmax(scores, axis=1)

        pooled = tf.reduce_sum(h * scores, axis=1)
        latent = self.latent(pooled)
        latent = self.l2norm(latent)

        # 🔥 출력만 float32
        return tf.cast(latent, tf.float32)

# 3️⃣ 모델 로드
# ===============================
encoder = SentenceEncoder(vocab_size=vocab_size)
encoder(np.zeros((1, MAX_LEN), dtype=np.int32))  # 모델 빌드
encoder.load_weights(MODEL_PATH)

# ===============================
# 4️⃣ 벡터화 함수
# ===============================
def get_sentence_vector(sentence):
    tokens = pad_sentence(encode_sentence(sentence))
    vec = encoder(np.array([tokens])).numpy()[0]
    return vec / np.linalg.norm(vec)

# ===============================
# 5️⃣ 가장 비슷한 문장 찾기
# ===============================
def find_most_similar(query, s1, s2, s3):
    candidates = [s1, s2, s3]
    candidate_vectors = np.stack([get_sentence_vector(c) for c in candidates]).astype(np.float32)
    query_vector = get_sentence_vector(query)
    
    sims = candidate_vectors @ query_vector  # cosine similarity
    top_idx = np.argmax(sims)
    
    return {
        "가장 비슷한 문장": candidates[top_idx],
        "유사도": float(sims[top_idx])
    }

# ===============================
# 6️⃣ Gradio UI
# ===============================
with gr.Blocks() as demo:
    gr.Markdown("## 🔍 문장 유사도 검색기 (쿼리 1개 + 후보 3개)")
    with gr.Row():
        query_input = gr.Textbox(label="검색할 문장 (Query)", placeholder="여기에 입력")
    with gr.Row():
        s1_input = gr.Textbox(label="검색 후보 1")
        s2_input = gr.Textbox(label="검색 후보 2")
        s3_input = gr.Textbox(label="검색 후보 3")
    output = gr.JSON(label="결과")
    
    search_btn = gr.Button("가장 비슷한 문장 찾기")
    search_btn.click(
        fn=find_most_similar,
        inputs=[query_input, s1_input, s2_input, s3_input],
        outputs=output
    )

demo.launch()