Spaces:
Sleeping
Sleeping
| from fastapi import APIRouter | |
| from datetime import datetime | |
| from datasets import load_dataset | |
| from sklearn.metrics import accuracy_score | |
| import random | |
| import numpy as np | |
| from huggingface_hub import PyTorchModelHubMixin | |
| from tqdm import tqdm, trange | |
| from sentence_transformers import SentenceTransformer | |
| import os | |
| import torch | |
| import torch.nn as nn | |
| from torch.utils.data import TensorDataset, DataLoader, RandomSampler, SequentialSampler | |
| from transformers import BertForPreTraining, BertModel, AutoTokenizer, AutoModel, ModernBertForSequenceClassification, BertForSequenceClassification, RobertaForSequenceClassification | |
| from .utils.evaluation import TextEvaluationRequest | |
| from .utils.emissions import tracker, clean_emissions_data, get_space_info | |
| router = APIRouter() | |
| DESCRIPTION = "Submission 1: FT-Modern-BERT-Large" | |
| ROUTE = "/text" | |
| if torch.cuda.is_available(): | |
| device = torch.device("cuda") | |
| else: | |
| device = torch.device("cpu") | |
| MODEL = "modern-large" #sk, mlp, ct, modern-base, modern-large, gte-base, gte-large | |
| class ConspiracyClassification768( | |
| nn.Module, | |
| PyTorchModelHubMixin, | |
| # optionally, you can add metadata which gets pushed to the model card | |
| ): | |
| def __init__(self, num_classes=8): | |
| super().__init__() | |
| self.h1 = nn.Linear(768, 100) | |
| self.h2 = nn.Linear(100, 100) | |
| self.h3 = nn.Linear(100, 100) | |
| self.h4 = nn.Linear(100, 50) | |
| self.h5 = nn.Linear(50, num_classes) | |
| self.dropout = nn.Dropout(0.2) | |
| self.activation = nn.ReLU() | |
| def forward(self, input_texts): | |
| outputs = self.h1(input_texts) | |
| outputs = self.activation(outputs) | |
| outputs = self.dropout(outputs) | |
| outputs = self.h2(outputs) | |
| outputs = self.activation(outputs) | |
| outputs = self.dropout(outputs) | |
| outputs = self.h3(outputs) | |
| outputs = self.activation(outputs) | |
| outputs = self.dropout(outputs) | |
| outputs = self.h4(outputs) | |
| outputs = self.activation(outputs) | |
| outputs = self.dropout(outputs) | |
| outputs = self.h5(outputs) | |
| return outputs | |
| class CTBERT( | |
| nn.Module, | |
| PyTorchModelHubMixin, | |
| # optionally, you can add metadata which gets pushed to the model card | |
| ): | |
| def __init__(self, num_classes=8): | |
| super().__init__() | |
| self.bert = BertForPreTraining.from_pretrained('digitalepidemiologylab/covid-twitter-bert-v2') | |
| self.bert.cls.seq_relationship = nn.Linear(1024, num_classes) | |
| def forward(self, input_ids, input_mask, token_type_ids): | |
| outputs = self.bert(input_ids = input_ids, token_type_ids = token_type_ids, attention_mask = input_mask) | |
| logits = outputs[1] | |
| return logits | |
| class conspiracyModelBase( | |
| nn.Module, | |
| PyTorchModelHubMixin, | |
| # optionally, you can add metadata which gets pushed to the model card | |
| ): | |
| def __init__(self, num_classes=8): | |
| super().__init__() | |
| self.n_classes = num_classes | |
| self.bert = ModernBertForSequenceClassification.from_pretrained('answerdotai/ModernBERT-base', num_labels=num_classes) | |
| def forward(self, input_ids, input_mask): | |
| outputs = self.bert(input_ids = input_ids, attention_mask = input_mask) | |
| return outputs.logits | |
| class conspiracyModelLarge( | |
| nn.Module, | |
| PyTorchModelHubMixin, | |
| # optionally, you can add metadata which gets pushed to the model card | |
| ): | |
| def __init__(self, num_classes=8): | |
| super().__init__() | |
| self.n_classes = num_classes | |
| self.bert = ModernBertForSequenceClassification.from_pretrained('answerdotai/ModernBERT-large', num_labels=num_classes) | |
| def forward(self, input_ids, input_mask): | |
| outputs = self.bert(input_ids = input_ids, attention_mask = input_mask) | |
| return outputs.logits | |
| class gteModelLarge( | |
| nn.Module, | |
| PyTorchModelHubMixin, | |
| # optionally, you can add metadata which gets pushed to the model card | |
| ): | |
| def __init__(self, num_classes=8): | |
| super().__init__() | |
| self.n_classes = num_classes | |
| self.gte = AutoModel.from_pretrained('Alibaba-NLP/gte-large-en-v1.5', trust_remote_code=True) | |
| self.cls = nn.Linear(1024, num_classes) | |
| def forward(self, input_ids, input_mask, input_type_ids): | |
| outputs = self.gte(input_ids = input_ids, attention_mask = input_mask, token_type_ids = input_type_ids) | |
| embeddings = outputs.last_hidden_state[:, 0] | |
| logits = self.cls(embeddings) | |
| return logits | |
| class gteModel( | |
| nn.Module, | |
| PyTorchModelHubMixin, | |
| # optionally, you can add metadata which gets pushed to the model card | |
| ): | |
| def __init__(self, num_classes=8): | |
| super().__init__() | |
| self.n_classes = num_classes | |
| self.gte = AutoModel.from_pretrained('Alibaba-NLP/gte-base-en-v1.5', trust_remote_code=True) | |
| self.cls = nn.Linear(768, num_classes) | |
| def forward(self, input_ids, input_mask, input_type_ids): | |
| outputs = self.gte(input_ids = input_ids, attention_mask = input_mask, token_type_ids = input_type_ids) | |
| embeddings = outputs.last_hidden_state[:, 0] | |
| logits = self.cls(embeddings) | |
| return logits | |
| async def evaluate_text(request: TextEvaluationRequest): | |
| """ | |
| Evaluate text classification for climate disinformation detection. | |
| Current Model: Random Baseline | |
| - Makes random predictions from the label space (0-7) | |
| - Used as a baseline for comparison | |
| """ | |
| # Get space info | |
| username, space_url = get_space_info() | |
| # Define the label mapping | |
| LABEL_MAPPING = { | |
| "0_not_relevant": 0, | |
| "1_not_happening": 1, | |
| "2_not_human": 2, | |
| "3_not_bad": 3, | |
| "4_solutions_harmful_unnecessary": 4, | |
| "5_science_unreliable": 5, | |
| "6_proponents_biased": 6, | |
| "7_fossil_fuels_needed": 7 | |
| } | |
| # Load and prepare the dataset | |
| dataset = load_dataset(request.dataset_name, token=os.getenv("HF_TOKEN")) | |
| # Convert string labels to integers | |
| dataset = dataset.map(lambda x: {"label": LABEL_MAPPING[x["label"]]}) | |
| # Split dataset | |
| test_dataset = dataset["test"] | |
| if MODEL =="mlp": | |
| model = ConspiracyClassification768.from_pretrained("ypesk/frugal-ai-EURECOM-mlp-768-fullset") | |
| model = model.to(device) | |
| emb_model = SentenceTransformer("sentence-transformers/sentence-t5-large") | |
| batch_size = 6 | |
| test_texts = torch.Tensor(emb_model.encode([t['quote'] for t in test_dataset])) | |
| test_data = TensorDataset(test_texts) | |
| test_sampler = SequentialSampler(test_data) | |
| test_dataloader = DataLoader(test_data, sampler=test_sampler, batch_size=batch_size) | |
| elif MODEL == "sk": | |
| emb_model = SentenceTransformer("sentence-transformers/sentence-t5-large") | |
| batch_size = 512 | |
| test_texts = torch.Tensor(emb_model.encode([t['quote'] for t in test_dataset])) | |
| test_data = TensorDataset(test_texts) | |
| test_sampler = SequentialSampler(test_data) | |
| test_dataloader = DataLoader(test_data, sampler=test_sampler, batch_size=batch_size) | |
| model = pickle.load(open('../svm.pkl', "rb")) | |
| elif MODEL == "ct": | |
| model = CTBERT.from_pretrained("ypesk/frugal-ai-EURECOM-ct-bert-baseline") | |
| model = model.to(device) | |
| tokenizer = AutoTokenizer.from_pretrained('digitalepidemiologylab/covid-twitter-bert-fullset') | |
| test_texts = [t['quote'] for t in test_dataset] | |
| MAX_LEN = 256 #1024 # < m some tweets will be truncated | |
| tokenized_test = tokenizer(test_texts, max_length=MAX_LEN, padding='max_length', truncation=True) | |
| test_input_ids, test_token_type_ids, test_attention_mask = tokenized_test['input_ids'], tokenized_test['token_type_ids'], tokenized_test['attention_mask'] | |
| test_token_type_ids = torch.tensor(test_token_type_ids) | |
| test_input_ids = torch.tensor(test_input_ids) | |
| test_attention_mask = torch.tensor(test_attention_mask) | |
| batch_size = 12 # | |
| test_data = TensorDataset(test_input_ids, test_attention_mask, test_token_type_ids) | |
| test_sampler = SequentialSampler(test_data) | |
| test_dataloader = DataLoader(test_data, sampler=test_sampler, batch_size=batch_size) | |
| elif MODEL == "modern-base": | |
| model = conspiracyModelBase.from_pretrained("ypesk/frugal-ai-EURECOM-modern-base-fullset") | |
| model = model.to(device) | |
| tokenizer = AutoTokenizer.from_pretrained("answerdotai/ModernBERT-base") | |
| test_texts = [t['quote'] for t in test_dataset] | |
| MAX_LEN = 256 #1024 # < m some tweets will be truncated | |
| tokenized_test = tokenizer(test_texts, max_length=MAX_LEN, padding='max_length', truncation=True) | |
| test_input_ids, test_attention_mask = tokenized_test['input_ids'], tokenized_test['attention_mask'] | |
| test_input_ids = torch.tensor(test_input_ids) | |
| test_attention_mask = torch.tensor(test_attention_mask) | |
| batch_size = 12 # | |
| test_data = TensorDataset(test_input_ids, test_attention_mask) | |
| test_sampler = SequentialSampler(test_data) | |
| test_dataloader = DataLoader(test_data, sampler=test_sampler, batch_size=batch_size) | |
| elif MODEL == "modern-large": | |
| model = conspiracyModelLarge.from_pretrained('ypesk/frugal-ai-EURECOM-modern-large-fullset') | |
| model = model.to(device) | |
| tokenizer = AutoTokenizer.from_pretrained("answerdotai/ModernBERT-large") | |
| test_texts = [t['quote'] for t in test_dataset] | |
| MAX_LEN = 256 #1024 # < m some tweets will be truncated | |
| tokenized_test = tokenizer(test_texts, max_length=MAX_LEN, padding='max_length', truncation=True) | |
| test_input_ids, test_attention_mask = tokenized_test['input_ids'], tokenized_test['attention_mask'] | |
| test_input_ids = torch.tensor(test_input_ids) | |
| test_attention_mask = torch.tensor(test_attention_mask) | |
| batch_size = 12 # | |
| test_data = TensorDataset(test_input_ids, test_attention_mask) | |
| test_sampler = SequentialSampler(test_data) | |
| test_dataloader = DataLoader(test_data, sampler=test_sampler, batch_size=batch_size) | |
| elif MODEL == "gte-base": | |
| model = gteModel.from_pretrained("ypesk/frugal-ai-EURECOM-gte-base-fullset") | |
| model = model.to(device) | |
| tokenizer = AutoTokenizer.from_pretrained('Alibaba-NLP/gte-base-en-v1.5') | |
| test_texts = [t['quote'] for t in test_dataset] | |
| MAX_LEN = 256 #1024 # < m some tweets will be truncated | |
| tokenized_test = tokenizer(test_texts, max_length=MAX_LEN, padding='max_length', truncation=True) | |
| test_input_ids, test_attention_mask, test_token_type_ids = tokenized_test['input_ids'], tokenized_test['attention_mask'], tokenized_test['token_type_ids'] | |
| test_input_ids = torch.tensor(test_input_ids) | |
| test_attention_mask = torch.tensor(test_attention_mask) | |
| test_token_type_ids = torch.tensor(test_token_type_ids) | |
| batch_size = 12 # | |
| test_data = TensorDataset(test_input_ids, test_attention_mask, test_token_type_ids) | |
| test_sampler = SequentialSampler(test_data) | |
| test_dataloader = DataLoader(test_data, sampler=test_sampler, batch_size=batch_size) | |
| elif MODEL == "gte-large": | |
| model = gteModelLarge.from_pretrained("ypesk/frugal-ai-EURECOM-gte-large-fullset") | |
| model = model.to(device) | |
| tokenizer = AutoTokenizer.from_pretrained('Alibaba-NLP/gte-large-en-v1.5') | |
| test_texts = [t['quote'] for t in test_dataset] | |
| MAX_LEN = 256 #1024 # < m some tweets will be truncated | |
| tokenized_test = tokenizer(test_texts, max_length=MAX_LEN, padding='max_length', truncation=True) | |
| test_input_ids, test_attention_mask, test_token_type_ids = tokenized_test['input_ids'], tokenized_test['attention_mask'], tokenized_test['token_type_ids'] | |
| test_input_ids = torch.tensor(test_input_ids) | |
| test_attention_mask = torch.tensor(test_attention_mask) | |
| test_token_type_ids = torch.tensor(test_token_type_ids) | |
| batch_size = 12 # | |
| test_data = TensorDataset(test_input_ids, test_attention_mask, test_token_type_ids) | |
| test_sampler = SequentialSampler(test_data) | |
| test_dataloader = DataLoader(test_data, sampler=test_sampler, batch_size=batch_size) | |
| # Start tracking emissions | |
| tracker.start() | |
| tracker.start_task("inference") | |
| #-------------------------------------------------------------------------------------------- | |
| # YOUR MODEL INFERENCE CODE HERE | |
| # Update the code below to replace the random baseline by your model inference within the inference pass where the energy consumption and emissions are tracked. | |
| #-------------------------------------------------------------------------------------------- | |
| predictions = [] | |
| model.eval() | |
| for batch in tqdm(test_dataloader): | |
| batch = tuple(t.to(device) for t in batch) | |
| with torch.no_grad(): | |
| if MODEL =="mlp": | |
| b_texts = batch[0] | |
| logits = model(b_texts) | |
| elif MODEL == "modern-base" or MODEL=="modern-large": | |
| b_input_ids, b_input_mask = batch | |
| logits = model(b_input_ids, b_input_mask) | |
| elif MODEL == "gte-base" or MODEL=="gte-large" or MODEL=="ct": | |
| b_input_ids, b_input_mask, b_token_type_ids = batch | |
| logits = model(b_input_ids, b_input_mask, b_token_type_ids) | |
| logits = logits.detach().cpu().numpy() | |
| predictions.extend(logits.argmax(1)) | |
| true_labels = test_dataset["label"] | |
| # Make random predictions (placeholder for actual model inference) | |
| #true_labels = test_dataset["label"] | |
| #predictions = [random.randint(0, 7) for _ in range(len(true_labels))] | |
| #-------------------------------------------------------------------------------------------- | |
| # YOUR MODEL INFERENCE STOPS HERE | |
| #-------------------------------------------------------------------------------------------- | |
| # Stop tracking emissions | |
| emissions_data = tracker.stop_task() | |
| # Calculate accuracy | |
| accuracy = accuracy_score(true_labels, predictions) | |
| # Prepare results dictionary | |
| results = { | |
| "username": username, | |
| "space_url": space_url, | |
| "submission_timestamp": datetime.now().isoformat(), | |
| "model_description": DESCRIPTION, | |
| "accuracy": float(accuracy), | |
| "energy_consumed_wh": emissions_data.energy_consumed * 1000, | |
| "emissions_gco2eq": emissions_data.emissions * 1000, | |
| "emissions_data": clean_emissions_data(emissions_data), | |
| "api_route": ROUTE, | |
| "dataset_config": { | |
| "dataset_name": request.dataset_name, | |
| "test_size": request.test_size, | |
| "test_seed": request.test_seed | |
| } | |
| } | |
| return results |