Spaces:
Runtime error
Runtime error
| from transformers import Wav2Vec2ForCTC, Wav2Vec2Processor | |
| import torch | |
| import librosa | |
| import gradio as gr | |
| # Cargamos el modelo de guaraní | |
| model_name = "ivangtorre/wav2vec2-xlsr-300m-guarani" | |
| processor = Wav2Vec2Processor.from_pretrained(model_name) | |
| model = Wav2Vec2ForCTC.from_pretrained(model_name) | |
| # Transcripción | |
| def transcribir(audio): | |
| audio_data, _ = librosa.load(audio, sr=16000) | |
| inputs = processor(audio_data, sampling_rate=16000, return_tensors="pt", padding=True) | |
| with torch.no_grad(): | |
| logits = model(**inputs).logits | |
| predicted_ids = torch.argmax(logits, dim=-1) | |
| transcription = processor.batch_decode(predicted_ids)[0] | |
| return transcription.lower() | |
| # Interfaz de Gradio | |
| demo = gr.Interface( | |
| fn=transcribir, | |
| inputs=gr.Audio(type="filepath"), | |
| outputs="text", | |
| title="Transcriptor Guaraní", | |
| description="Subí un audio en guaraní (.ogg, .wav) y obtené la transcripción" | |
| ) | |