Create multilingual translator code
Browse files- multiligual translator code +64 -0
multiligual translator code
ADDED
|
@@ -0,0 +1,64 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
import torch.nn.functional as F
|
| 3 |
+
from transformers import AutoModelForSequenceClassification, AutoTokenizer, AutoModelForSeq2SeqLM
|
| 4 |
+
from sentence_transformers import SentenceTransformer
|
| 5 |
+
import gradio as gr
|
| 6 |
+
|
| 7 |
+
# Load models
|
| 8 |
+
lang_detect_model = AutoModelForSequenceClassification.from_pretrained("papluca/xlm-roberta-base-language-detection")
|
| 9 |
+
lang_detect_tokenizer = AutoTokenizer.from_pretrained("papluca/xlm-roberta-base-language-detection")
|
| 10 |
+
trans_model = AutoModelForSeq2SeqLM.from_pretrained("facebook/nllb-200-distilled-600M")
|
| 11 |
+
trans_tokenizer = AutoTokenizer.from_pretrained("facebook/nllb-200-distilled-600M")
|
| 12 |
+
|
| 13 |
+
# Language maps
|
| 14 |
+
id2lang = lang_detect_model.config.id2label
|
| 15 |
+
|
| 16 |
+
nllb_langs = {
|
| 17 |
+
"eng_Latn": "English", "fra_Latn": "French", "hin_Deva": "Hindi",
|
| 18 |
+
"spa_Latn": "Spanish", "deu_Latn": "German", "tam_Taml": "Tamil",
|
| 19 |
+
"tel_Telu": "Telugu", "jpn_Jpan": "Japanese", "zho_Hans": "Chinese",
|
| 20 |
+
"arb_Arab": "Arabic", "san_Deva": "Sanskrit"
|
| 21 |
+
}
|
| 22 |
+
|
| 23 |
+
xlm_to_nllb = {
|
| 24 |
+
"en": "eng_Latn", "fr": "fra_Latn", "hi": "hin_Deva", "es": "spa_Latn", "de": "deu_Latn",
|
| 25 |
+
"ta": "tam_Taml", "te": "tel_Telu", "ja": "jpn_Jpan", "zh": "zho_Hans", "ar": "arb_Arab",
|
| 26 |
+
"sa": "san_Deva"
|
| 27 |
+
}
|
| 28 |
+
|
| 29 |
+
# Detection
|
| 30 |
+
def detect_language(text):
|
| 31 |
+
inputs = lang_detect_tokenizer(text, return_tensors="pt", truncation=True, padding=True)
|
| 32 |
+
with torch.no_grad():
|
| 33 |
+
outputs = lang_detect_model(**inputs)
|
| 34 |
+
probs = F.softmax(outputs.logits, dim=1)
|
| 35 |
+
pred = torch.argmax(probs, dim=1).item()
|
| 36 |
+
return id2lang[pred]
|
| 37 |
+
|
| 38 |
+
# Translation
|
| 39 |
+
def translate_text(input_text, target_code):
|
| 40 |
+
detected = detect_language(input_text)
|
| 41 |
+
src_nllb = xlm_to_nllb.get(detected, "eng_Latn")
|
| 42 |
+
trans_tokenizer.src_lang = src_nllb
|
| 43 |
+
encoded = trans_tokenizer(input_text, return_tensors="pt", truncation=True, padding=True)
|
| 44 |
+
try:
|
| 45 |
+
lang_id = trans_tokenizer.convert_tokens_to_ids([target_code])[0]
|
| 46 |
+
generated = trans_model.generate(**encoded, forced_bos_token_id=lang_id)
|
| 47 |
+
result = trans_tokenizer.decode(generated[0], skip_special_tokens=True)
|
| 48 |
+
return f"Detected: {detected}\n\nTranslated:\n{result}"
|
| 49 |
+
except:
|
| 50 |
+
return "Translation failed."
|
| 51 |
+
|
| 52 |
+
# Gradio UI
|
| 53 |
+
demo = gr.Interface(
|
| 54 |
+
fn=translate_text,
|
| 55 |
+
inputs=[
|
| 56 |
+
gr.Textbox(label="Input Text", lines=6),
|
| 57 |
+
gr.Dropdown(choices=list(nllb_langs.keys()), label="Target Language")
|
| 58 |
+
],
|
| 59 |
+
outputs="text",
|
| 60 |
+
title="Multilingual Text Translator 🌍",
|
| 61 |
+
description="Enter your text and select a target language to translate."
|
| 62 |
+
)
|
| 63 |
+
|
| 64 |
+
demo.launch()
|