import gradio as gr import tensorflow as tf import text_hammer as th from transformers import DistilBertTokenizer, TFDistilBertForSequenceClassification tokenizer = DistilBertTokenizer.from_pretrained("distilbert-base-uncased") model = TFDistilBertForSequenceClassification.from_pretrained("Elegbede/Distilbert_FInetuned_For_Text_Classification") # Define a function to make predictions def predict(texts): # Preprocess the input texts preprocessed_texts = [] for text in texts: text = str(text).lower() text = th.cont_exp(text) text = th.remove_special_chars(text) text = th.remove_accented_chars(text) preprocessed_texts.append(text) # Join the list of preprocessed texts back into a single string preprocessed_text = ' '.join(preprocessed_texts) # Tokenize and preprocess the new text new_encodings = tokenizer(preprocessed_text, truncation=True, padding=True, max_length=70, return_tensors='tf') new_predictions = model(new_encodings) # Make predictions new_predictions = model(new_encodings) new_labels_pred = tf.argmax(new_predictions.logits, axis=1) new_labels_pred = new_labels_pred.numpy()[0] labels_list = ["Sadness", "Joy", "Love", "Anger", "fear", "suprise"] emotion = labels_list[new_labels_pred] return emotion # Create a Gradio interface iface = gr.Interface( fn=predict, inputs="text", outputs=gr.outputs.Label(num_top_classes = 6), # Corrected output type examples=[["I Love you."], ["I hate you"], ["I am scared"], ["I am amazed"], ], title="Emotion Classification", description="Predict the emotion probabilities of a text using a fine-tuned DistilBERT model." ) # Launch the interfac iface.launch()