import gradio as gr import tensorflow as tf from transformers import DistilBertTokenizer, TFDistilBertForSequenceClassification tokenizer = DistilBertTokenizer.from_pretrained("distilbert-base-uncased") model = TFDistilBertForSequenceClassification.from_pretrained("Elegbede/Distilbert_FInetuned_For_Text_Classification") # Define a function to make predictions def predict(texts): # Tokenize and preprocess the new text new_encodings = tokenizer(texts, truncation=True, padding=True, max_length=70, return_tensors='tf') new_predictions = model(new_encodings) # Make predictions new_predictions = model(new_encodings) new_labels_pred = tf.argmax(new_predictions.logits, axis=1) new_labels_pred = new_labels_pred.numpy()[0] labels_list = ["Sadness", "Joy", "Love", "Anger", "fear", "suprise"] emotion = labels_list[new_labels_pred] return emotion # Create a Gradio interface iface = gr.Interface( fn=predict, inputs="text", outputs=gr.outputs.Label(num_top_classes = 6), # Corrected output type examples=[["I Love you."],["I hate you"], ["I am scared"], ["I am amazed"]] title="Emotion Classification", description="Predict the emotion probabilities of a text using a fine-tuned DistilBERT model." ) # Launch the interfac iface.launch()