Spaces:
Sleeping
Sleeping
| import gradio as gr | |
| import numpy as np | |
| from sentence_transformers import SentenceTransformer | |
| from sklearn.metrics.pairwise import cosine_similarity | |
| # Load the new model and tokenizer | |
| context_model = SentenceTransformer("all-MiniLM-L6-v2") | |
| # Define your labels | |
| labels = [ | |
| "aerospace", "anatomy", "anthropology", "art", | |
| "automotive", "blockchain", "biology", "chemistry", | |
| "cryptocurrency", "data science", "design", "e-commerce", | |
| "education", "engineering", "entertainment", "environment", | |
| "fashion", "finance", "food commerce", "general", | |
| "gaming", "healthcare", "history", "html", | |
| "information technology", "IT", "keywords", "legal", | |
| "literature", "machine learning", "marketing", "medicine", | |
| "music", "personal development", "philosophy", "physics", | |
| "politics", "poetry", "programming", "real estate", "retail", | |
| "robotics", "slang", "social media", "speech", "sports", | |
| "sustained", "technical", "theater", "tourism", "travel" | |
| ] | |
| # Pre-compute label embeddings | |
| label_embeddings = context_model.encode(labels) | |
| def detect_context(input_text, top_n=3, score_threshold=0.05): | |
| # Encode input text | |
| input_embedding = context_model.encode([input_text]) | |
| # Compute cosine similarity with labels | |
| similarities = cosine_similarity(input_embedding, label_embeddings)[0] | |
| # Pair labels with scores | |
| label_scores = [(label, score) for label, score in zip(labels, similarities)] | |
| # Sort by score and filter by threshold | |
| sorted_labels = sorted(label_scores, key=lambda x: x[1], reverse=True) | |
| filtered_labels = [label for label, score in sorted_labels if score > score_threshold] | |
| # Return top N contexts | |
| return filtered_labels[:top_n] if filtered_labels else ["general"] | |
| # Translation client for space_3 | |
| from gradio_client import Client | |
| translation_client = Client("Frenchizer/space_3") # Replace with your Space name | |
| def translate_text(input_text): | |
| # Call the translation model | |
| result = translation_client.predict(input_text) | |
| return result | |
| def process_request(input_text): | |
| # Detect context | |
| context = detect_context(input_text) | |
| print(f"Detected context: {context}") | |
| # Translate text | |
| translation = translate_text(input_text) | |
| return translation | |
| # Create a Gradio interface | |
| interface = gr.Interface( | |
| fn=process_request, | |
| inputs="text", | |
| outputs="text", | |
| title="Frenchizer", | |
| description="Translate text from English to French with context detection." | |
| ) | |
| # Launch the Gradio app | |
| interface.launch() | |