modelId stringlengths 4 62 | sha null | lastModified null | pipeline_tag stringclasses 9
values | author null | securityStatus null | likes int64 0 1.03k | downloads int64 0 62.4M | dataset list | arxiv list | license list | tags list | doi list | card stringlengths 0 14k |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
albert-base-v1 | null | null | fill-mask | null | null | 1 | 41,336 | [
"bookcorpus",
"wikipedia"
] | [
"1909.11942"
] | [
"apache-2.0"
] | [
"pytorch",
"tf",
"safetensors",
"albert",
"fill-mask",
"en",
"transformers",
"exbert",
"autotrain_compatible",
"has_space"
] | null |
# ALBERT Base v1
Pretrained model on English language using a masked language modeling (MLM) objective. It was introduced in
[this paper](https://arxiv.org/abs/1909.11942) and first released in
[this repository](https://github.com/google-research/albert). This model, as all ALBERT models, is uncased: it does not make... |
albert-base-v2 | null | null | fill-mask | null | null | 50 | 4,543,047 | [
"bookcorpus",
"wikipedia"
] | [
"1909.11942"
] | [
"apache-2.0"
] | [
"pytorch",
"tf",
"jax",
"rust",
"safetensors",
"albert",
"fill-mask",
"en",
"transformers",
"autotrain_compatible",
"has_space"
] | null |
# ALBERT Base v2
Pretrained model on English language using a masked language modeling (MLM) objective. It was introduced in
[this paper](https://arxiv.org/abs/1909.11942) and first released in
[this repository](https://github.com/google-research/albert). This model, as all ALBERT models, is uncased: it does not make... |
albert-large-v1 | null | null | fill-mask | null | null | 0 | 651 | [
"bookcorpus",
"wikipedia"
] | [
"1909.11942"
] | [
"apache-2.0"
] | [
"pytorch",
"tf",
"albert",
"fill-mask",
"en",
"transformers",
"autotrain_compatible",
"has_space"
] | null |
# ALBERT Large v1
Pretrained model on English language using a masked language modeling (MLM) objective. It was introduced in
[this paper](https://arxiv.org/abs/1909.11942) and first released in
[this repository](https://github.com/google-research/albert). This model, as all ALBERT models, is uncased: it does not mak... |
albert-large-v2 | null | null | fill-mask | null | null | 11 | 12,476 | [
"bookcorpus",
"wikipedia"
] | [
"1909.11942"
] | [
"apache-2.0"
] | [
"pytorch",
"tf",
"safetensors",
"albert",
"fill-mask",
"en",
"transformers",
"autotrain_compatible",
"has_space"
] | null |
# ALBERT Large v2
Pretrained model on English language using a masked language modeling (MLM) objective. It was introduced in
[this paper](https://arxiv.org/abs/1909.11942) and first released in
[this repository](https://github.com/google-research/albert). This model, as all ALBERT models, is uncased: it does not mak... |
albert-xlarge-v1 | null | null | fill-mask | null | null | 0 | 385 | [
"bookcorpus",
"wikipedia"
] | [
"1909.11942"
] | [
"apache-2.0"
] | [
"pytorch",
"tf",
"albert",
"fill-mask",
"en",
"transformers",
"autotrain_compatible",
"has_space"
] | null |
# ALBERT XLarge v1
Pretrained model on English language using a masked language modeling (MLM) objective. It was introduced in
[this paper](https://arxiv.org/abs/1909.11942) and first released in
[this repository](https://github.com/google-research/albert). This model, as all ALBERT models, is uncased: it does not ma... |
albert-xlarge-v2 | null | null | fill-mask | null | null | 3 | 3,124 | [
"bookcorpus",
"wikipedia"
] | [
"1909.11942"
] | [
"apache-2.0"
] | [
"pytorch",
"tf",
"albert",
"fill-mask",
"en",
"transformers",
"autotrain_compatible",
"has_space"
] | null |
# ALBERT XLarge v2
Pretrained model on English language using a masked language modeling (MLM) objective. It was introduced in
[this paper](https://arxiv.org/abs/1909.11942) and first released in
[this repository](https://github.com/google-research/albert). This model, as all ALBERT models, is uncased: it does not ma... |
albert-xxlarge-v1 | null | null | fill-mask | null | null | 2 | 8,119 | [
"bookcorpus",
"wikipedia"
] | [
"1909.11942"
] | [
"apache-2.0"
] | [
"pytorch",
"tf",
"albert",
"fill-mask",
"en",
"transformers",
"autotrain_compatible",
"has_space"
] | null |
# ALBERT XXLarge v1
Pretrained model on English language using a masked language modeling (MLM) objective. It was introduced in
[this paper](https://arxiv.org/abs/1909.11942) and first released in
[this repository](https://github.com/google-research/albert). This model, as all ALBERT models, is uncased: it does not m... |
albert-xxlarge-v2 | null | null | fill-mask | null | null | 9 | 40,731 | [
"bookcorpus",
"wikipedia"
] | [
"1909.11942"
] | [
"apache-2.0"
] | [
"pytorch",
"tf",
"safetensors",
"albert",
"fill-mask",
"en",
"transformers",
"exbert",
"autotrain_compatible",
"has_space"
] | null |
# ALBERT XXLarge v2
Pretrained model on English language using a masked language modeling (MLM) objective. It was introduced in
[this paper](https://arxiv.org/abs/1909.11942) and first released in
[this repository](https://github.com/google-research/albert). This model, as all ALBERT models, is uncased: it does not m... |
bert-base-cased-finetuned-mrpc | null | null | fill-mask | null | null | 0 | 9,686 | null | null | null | [
"pytorch",
"tf",
"jax",
"bert",
"fill-mask",
"transformers",
"autotrain_compatible",
"has_space"
] | null | |
bert-base-cased | null | null | fill-mask | null | null | 104 | 7,716,025 | [
"bookcorpus",
"wikipedia"
] | [
"1810.04805"
] | [
"apache-2.0"
] | [
"pytorch",
"tf",
"jax",
"safetensors",
"bert",
"fill-mask",
"en",
"transformers",
"exbert",
"autotrain_compatible",
"has_space"
] | null |
# BERT base model (cased)
Pretrained model on English language using a masked language modeling (MLM) objective. It was introduced in
[this paper](https://arxiv.org/abs/1810.04805) and first released in
[this repository](https://github.com/google-research/bert). This model is case-sensitive: it makes a difference bet... |
bert-base-chinese | null | null | fill-mask | null | null | 358 | 2,273,140 | null | [
"1810.04805"
] | null | [
"pytorch",
"tf",
"jax",
"safetensors",
"bert",
"fill-mask",
"zh",
"transformers",
"autotrain_compatible",
"has_space"
] | null |
# Bert-base-chinese
## Table of Contents
- [Model Details](#model-details)
- [Uses](#uses)
- [Risks, Limitations and Biases](#risks-limitations-and-biases)
- [Training](#training)
- [Evaluation](#evaluation)
- [How to Get Started With the Model](#how-to-get-started-with-the-model)
## Model Details
### Model Descri... |
bert-base-german-cased | null | null | fill-mask | null | null | 31 | 112,445 | null | null | [
"mit"
] | [
"pytorch",
"tf",
"jax",
"safetensors",
"bert",
"fill-mask",
"de",
"transformers",
"exbert",
"autotrain_compatible",
"has_space"
] | null |
<a href="https://huggingface.co/exbert/?model=bert-base-german-cased">
<img width="300px" src="https://cdn-media.huggingface.co/exbert/button.png">
</a>
# German BERT

## Overview
**Language model:** bert-base-cased
**L... |
bert-base-german-dbmdz-cased | null | null | fill-mask | null | null | 0 | 2,071 | null | null | [
"mit"
] | [
"pytorch",
"jax",
"bert",
"fill-mask",
"de",
"transformers",
"autotrain_compatible",
"has_space"
] | null |
This model is the same as [dbmdz/bert-base-german-cased](https://huggingface.co/dbmdz/bert-base-german-cased). See the [dbmdz/bert-base-german-cased model card](https://huggingface.co/dbmdz/bert-base-german-cased) for details on the model. |
bert-base-german-dbmdz-uncased | null | null | fill-mask | null | null | 2 | 50,194 | null | null | [
"mit"
] | [
"pytorch",
"jax",
"safetensors",
"bert",
"fill-mask",
"de",
"transformers",
"autotrain_compatible",
"has_space"
] | null |
This model is the same as [dbmdz/bert-base-german-uncased](https://huggingface.co/dbmdz/bert-base-german-uncased). See the [dbmdz/bert-base-german-cased model card](https://huggingface.co/dbmdz/bert-base-german-uncased) for details on the model.
|
bert-base-multilingual-cased | null | null | fill-mask | null | null | 157 | 5,672,763 | [
"wikipedia"
] | [
"1810.04805"
] | [
"apache-2.0"
] | [
"pytorch",
"tf",
"jax",
"safetensors",
"bert",
"fill-mask",
"multilingual",
"af",
"sq",
"ar",
"an",
"hy",
"ast",
"az",
"ba",
"eu",
"bar",
"be",
"bn",
"inc",
"bs",
"br",
"bg",
"my",
"ca",
"ceb",
"ce",
"zh",
"cv",
"hr",
"cs",
"da",
"nl",
"en",
"et",
... | null |
# BERT multilingual base model (cased)
Pretrained model on the top 104 languages with the largest Wikipedia using a masked language modeling (MLM) objective.
It was introduced in [this paper](https://arxiv.org/abs/1810.04805) and first released in
[this repository](https://github.com/google-research/bert). This model... |
bert-base-multilingual-uncased | null | null | fill-mask | null | null | 38 | 257,915 | [
"wikipedia"
] | [
"1810.04805"
] | [
"apache-2.0"
] | [
"pytorch",
"tf",
"jax",
"safetensors",
"bert",
"fill-mask",
"multilingual",
"af",
"sq",
"ar",
"an",
"hy",
"ast",
"az",
"ba",
"eu",
"bar",
"be",
"bn",
"inc",
"bs",
"br",
"bg",
"my",
"ca",
"ceb",
"ce",
"zh",
"cv",
"hr",
"cs",
"da",
"nl",
"en",
"et",
... | null |
# BERT multilingual base model (uncased)
Pretrained model on the top 102 languages with the largest Wikipedia using a masked language modeling (MLM) objective.
It was introduced in [this paper](https://arxiv.org/abs/1810.04805) and first released in
[this repository](https://github.com/google-research/bert). This mod... |
bert-base-uncased | null | null | fill-mask | null | null | 839 | 62,377,709 | [
"bookcorpus",
"wikipedia"
] | [
"1810.04805"
] | [
"apache-2.0"
] | [
"pytorch",
"tf",
"jax",
"rust",
"safetensors",
"bert",
"fill-mask",
"en",
"transformers",
"exbert",
"autotrain_compatible",
"has_space"
] | null |
# BERT base model (uncased)
Pretrained model on English language using a masked language modeling (MLM) objective. It was introduced in
[this paper](https://arxiv.org/abs/1810.04805) and first released in
[this repository](https://github.com/google-research/bert). This model is uncased: it does not make a difference
... |
bert-large-cased-whole-word-masking-finetuned-squad | null | null | question-answering | null | null | 0 | 11,494 | [
"bookcorpus",
"wikipedia"
] | [
"1810.04805"
] | [
"apache-2.0"
] | [
"pytorch",
"tf",
"jax",
"rust",
"safetensors",
"bert",
"question-answering",
"en",
"transformers",
"autotrain_compatible",
"has_space"
] | null |
# BERT large model (cased) whole word masking finetuned on SQuAD
Pretrained model on English language using a masked language modeling (MLM) objective. It was introduced in
[this paper](https://arxiv.org/abs/1810.04805) and first released in
[this repository](https://github.com/google-research/bert). This model is ca... |
bert-large-cased-whole-word-masking | null | null | fill-mask | null | null | 3 | 3,774 | [
"bookcorpus",
"wikipedia"
] | [
"1810.04805"
] | [
"apache-2.0"
] | [
"pytorch",
"tf",
"jax",
"bert",
"fill-mask",
"en",
"transformers",
"autotrain_compatible",
"has_space"
] | null |
# BERT large model (cased) whole word masking
Pretrained model on English language using a masked language modeling (MLM) objective. It was introduced in
[this paper](https://arxiv.org/abs/1810.04805) and first released in
[this repository](https://github.com/google-research/bert). This model is cased: it makes a dif... |
bert-large-cased | null | null | fill-mask | null | null | 7 | 342,338 | [
"bookcorpus",
"wikipedia"
] | [
"1810.04805"
] | [
"apache-2.0"
] | [
"pytorch",
"tf",
"jax",
"safetensors",
"bert",
"fill-mask",
"en",
"transformers",
"autotrain_compatible",
"has_space"
] | null |
# BERT large model (cased)
Pretrained model on English language using a masked language modeling (MLM) objective. It was introduced in
[this paper](https://arxiv.org/abs/1810.04805) and first released in
[this repository](https://github.com/google-research/bert). This model is cased: it makes a difference
between eng... |
bert-large-uncased-whole-word-masking-finetuned-squad | null | null | question-answering | null | null | 85 | 519,563 | [
"bookcorpus",
"wikipedia"
] | [
"1810.04805"
] | [
"apache-2.0"
] | [
"pytorch",
"tf",
"jax",
"safetensors",
"bert",
"question-answering",
"en",
"transformers",
"autotrain_compatible",
"has_space"
] | null |
# BERT large model (uncased) whole word masking finetuned on SQuAD
Pretrained model on English language using a masked language modeling (MLM) objective. It was introduced in
[this paper](https://arxiv.org/abs/1810.04805) and first released in
[this repository](https://github.com/google-research/bert). This model is ... |
bert-large-uncased-whole-word-masking | null | null | fill-mask | null | null | 6 | 61,415 | [
"bookcorpus",
"wikipedia"
] | [
"1810.04805"
] | [
"apache-2.0"
] | [
"pytorch",
"tf",
"jax",
"safetensors",
"bert",
"fill-mask",
"en",
"transformers",
"autotrain_compatible",
"has_space"
] | null |
# BERT large model (uncased) whole word masking
Pretrained model on English language using a masked language modeling (MLM) objective. It was introduced in
[this paper](https://arxiv.org/abs/1810.04805) and first released in
[this repository](https://github.com/google-research/bert). This model is uncased: it does no... |
bert-large-uncased | null | null | fill-mask | null | null | 26 | 1,076,096 | [
"bookcorpus",
"wikipedia"
] | [
"1810.04805"
] | [
"apache-2.0"
] | [
"pytorch",
"tf",
"jax",
"safetensors",
"bert",
"fill-mask",
"en",
"transformers",
"autotrain_compatible",
"has_space"
] | null |
# BERT large model (uncased)
Pretrained model on English language using a masked language modeling (MLM) objective. It was introduced in
[this paper](https://arxiv.org/abs/1810.04805) and first released in
[this repository](https://github.com/google-research/bert). This model is uncased: it does not make a difference... |
camembert-base | null | null | fill-mask | null | null | 36 | 1,247,645 | [
"oscar"
] | [
"1911.03894"
] | [
"mit"
] | [
"pytorch",
"tf",
"safetensors",
"camembert",
"fill-mask",
"fr",
"transformers",
"autotrain_compatible",
"has_space"
] | null |
# CamemBERT: a Tasty French Language Model
## Table of Contents
- [Model Details](#model-details)
- [Uses](#uses)
- [Risks, Limitations and Biases](#risks-limitations-and-biases)
- [Training](#training)
- [Evaluation](#evaluation)
- [Citation Information](#citation-information)
- [How to Get Started With the Model](#... |
ctrl | null | null | null | null | null | 1 | 13,392 | null | [
"1909.05858",
"1910.09700"
] | [
"bsd-3-clause"
] | [
"pytorch",
"tf",
"ctrl",
"en",
"transformers",
"has_space"
] | null |
# ctrl
# Table of Contents
1. [Model Details](#model-details)
2. [Uses](#uses)
3. [Bias, Risks, and Limitations](#bias-risks-and-limitations)
4. [Training](#training)
5. [Evaluation](#evaluation)
6. [Environmental Impact](#environmental-impact)
7. [Technical Specifications](#technical-specifications)
8. [Citation](... |
distilbert-base-cased-distilled-squad | null | null | question-answering | null | null | 115 | 261,012 | [
"squad"
] | [
"1910.01108",
"1910.09700"
] | [
"apache-2.0"
] | [
"pytorch",
"tf",
"rust",
"safetensors",
"openvino",
"distilbert",
"question-answering",
"en",
"transformers",
"model-index",
"autotrain_compatible",
"has_space"
] | null |
# DistilBERT base cased distilled SQuAD
## Table of Contents
- [Model Details](#model-details)
- [How To Get Started With the Model](#how-to-get-started-with-the-model)
- [Uses](#uses)
- [Risks, Limitations and Biases](#risks-limitations-and-biases)
- [Training](#training)
- [Evaluation](#evaluation)
- [Environmental... |
distilbert-base-cased | null | null | null | null | null | 16 | 585,915 | [
"bookcorpus",
"wikipedia"
] | [
"1910.01108"
] | [
"apache-2.0"
] | [
"pytorch",
"tf",
"onnx",
"distilbert",
"en",
"transformers",
"has_space"
] | null |
# Model Card for DistilBERT base model (cased)
This model is a distilled version of the [BERT base model](https://huggingface.co/bert-base-cased).
It was introduced in [this paper](https://arxiv.org/abs/1910.01108).
The code for the distillation process can be found
[here](https://github.com/huggingface/transformers/... |
distilbert-base-german-cased | null | null | fill-mask | null | null | 9 | 42,160 | null | null | [
"apache-2.0"
] | [
"pytorch",
"safetensors",
"distilbert",
"fill-mask",
"de",
"transformers",
"autotrain_compatible",
"has_space"
] | null | ## distilbert-base-german-cased
|
distilbert-base-multilingual-cased | null | null | fill-mask | null | null | 44 | 4,227,537 | [
"wikipedia"
] | [
"1910.01108",
"1910.09700"
] | [
"apache-2.0"
] | [
"pytorch",
"tf",
"onnx",
"safetensors",
"distilbert",
"fill-mask",
"multilingual",
"af",
"sq",
"ar",
"an",
"hy",
"ast",
"az",
"ba",
"eu",
"bar",
"be",
"bn",
"inc",
"bs",
"br",
"bg",
"my",
"ca",
"ceb",
"ce",
"zh",
"cv",
"hr",
"cs",
"da",
"nl",
"en",
... | null |
# Model Card for DistilBERT base multilingual (cased)
# Table of Contents
1. [Model Details](#model-details)
2. [Uses](#uses)
3. [Bias, Risks, and Limitations](#bias-risks-and-limitations)
4. [Training Details](#training-details)
5. [Evaluation](#evaluation)
6. [Environmental Impact](#environmental-impact)
7. [Citat... |
distilbert-base-uncased-distilled-squad | null | null | question-answering | null | null | 29 | 218,844 | [
"squad"
] | [
"1910.01108",
"1910.09700"
] | [
"apache-2.0"
] | [
"pytorch",
"tf",
"tflite",
"coreml",
"safetensors",
"distilbert",
"question-answering",
"en",
"transformers",
"autotrain_compatible",
"has_space"
] | null |
# DistilBERT base uncased distilled SQuAD
## Table of Contents
- [Model Details](#model-details)
- [How To Get Started With the Model](#how-to-get-started-with-the-model)
- [Uses](#uses)
- [Risks, Limitations and Biases](#risks-limitations-and-biases)
- [Training](#training)
- [Evaluation](#evaluation)
- [Environment... |
distilbert-base-uncased-finetuned-sst-2-english | null | null | text-classification | null | null | 212 | 2,590,281 | [
"sst2",
"glue"
] | [
"1910.01108"
] | [
"apache-2.0"
] | [
"pytorch",
"tf",
"rust",
"safetensors",
"distilbert",
"text-classification",
"en",
"transformers",
"model-index",
"has_space"
] | [
"10.57967/hf/0181"
] |
# DistilBERT base uncased finetuned SST-2
## Table of Contents
- [Model Details](#model-details)
- [How to Get Started With the Model](#how-to-get-started-with-the-model)
- [Uses](#uses)
- [Risks, Limitations and Biases](#risks-limitations-and-biases)
- [Training](#training)
## Model Details
**Model Description:** T... |
distilbert-base-uncased | null | null | fill-mask | null | null | 195 | 8,848,342 | [
"bookcorpus",
"wikipedia"
] | [
"1910.01108"
] | [
"apache-2.0"
] | [
"pytorch",
"tf",
"jax",
"rust",
"safetensors",
"distilbert",
"fill-mask",
"en",
"transformers",
"exbert",
"autotrain_compatible",
"has_space"
] | null |
# DistilBERT base model (uncased)
This model is a distilled version of the [BERT base model](https://huggingface.co/bert-base-uncased). It was
introduced in [this paper](https://arxiv.org/abs/1910.01108). The code for the distillation process can be found
[here](https://github.com/huggingface/transformers/tree/main/e... |
distilgpt2 | null | null | text-generation | null | null | 193 | 1,349,168 | [
"openwebtext"
] | [
"1910.01108",
"2201.08542",
"2203.12574",
"1910.09700",
"1503.02531"
] | [
"apache-2.0"
] | [
"pytorch",
"tf",
"jax",
"tflite",
"rust",
"coreml",
"safetensors",
"gpt2",
"text-generation",
"en",
"transformers",
"exbert",
"model-index",
"co2_eq_emissions",
"has_space"
] | null |
# DistilGPT2
DistilGPT2 (short for Distilled-GPT2) is an English-language model pre-trained with the supervision of the smallest version of Generative Pre-trained Transformer 2 (GPT-2). Like GPT-2, DistilGPT2 can be used to generate text. Users of this model card should also consider information about the design, tra... |
distilroberta-base | null | null | fill-mask | null | null | 67 | 3,760,208 | [
"openwebtext"
] | [
"1910.01108",
"1910.09700"
] | [
"apache-2.0"
] | [
"pytorch",
"tf",
"jax",
"rust",
"safetensors",
"roberta",
"fill-mask",
"en",
"transformers",
"exbert",
"autotrain_compatible",
"has_space"
] | null |
# Model Card for DistilRoBERTa base
# Table of Contents
1. [Model Details](#model-details)
2. [Uses](#uses)
3. [Bias, Risks, and Limitations](#bias-risks-and-limitations)
4. [Training Details](#training-details)
5. [Evaluation](#evaluation)
6. [Environmental Impact](#environmental-impact)
7. [Citation](#citation)
8.... |
gpt2-large | null | null | text-generation | null | null | 106 | 1,447,259 | null | [
"1910.09700"
] | [
"mit"
] | [
"pytorch",
"tf",
"jax",
"rust",
"safetensors",
"gpt2",
"text-generation",
"en",
"transformers",
"has_space"
] | null |
# GPT-2 Large
## Table of Contents
- [Model Details](#model-details)
- [How To Get Started With the Model](#how-to-get-started-with-the-model)
- [Uses](#uses)
- [Risks, Limitations and Biases](#risks-limitations-and-biases)
- [Training](#training)
- [Evaluation](#evaluation)
- [Environmental Impact](#environmental-im... |
gpt2-medium | null | null | text-generation | null | null | 50 | 842,954 | null | [
"1910.09700"
] | [
"mit"
] | [
"pytorch",
"tf",
"jax",
"rust",
"safetensors",
"gpt2",
"text-generation",
"en",
"transformers",
"has_space"
] | null |
# GPT-2 Medium
## Model Details
**Model Description:** GPT-2 Medium is the **355M parameter** version of GPT-2, a transformer-based language model created and released by OpenAI. The model is a pretrained model on English language using a causal language modeling (CLM) objective.
- **Developed by:** OpenAI, see [a... |
gpt2-xl | null | null | text-generation | null | null | 121 | 298,230 | null | [
"1910.09700"
] | [
"mit"
] | [
"pytorch",
"tf",
"jax",
"rust",
"gpt2",
"text-generation",
"en",
"transformers",
"has_space"
] | null |
# GPT-2 XL
## Table of Contents
- [Model Details](#model-details)
- [How To Get Started With the Model](#how-to-get-started-with-the-model)
- [Uses](#uses)
- [Risks, Limitations and Biases](#risks-limitations-and-biases)
- [Training](#training)
- [Evaluation](#evaluation)
- [Environmental Impact](#environmental-impac... |
gpt2 | null | null | text-generation | null | null | 1,033 | 20,964,100 | null | null | [
"mit"
] | [
"pytorch",
"tf",
"jax",
"tflite",
"rust",
"safetensors",
"gpt2",
"text-generation",
"en",
"transformers",
"exbert",
"has_space"
] | [
"10.57967/hf/0039"
] |
# GPT-2
Test the whole generation capabilities here: https://transformer.huggingface.co/doc/gpt2-large
Pretrained model on English language using a causal language modeling (CLM) objective. It was introduced in
[this paper](https://d4mucfpksywv.cloudfront.net/better-language-models/language_models_are_unsupervised_... |
openai-gpt | null | null | text-generation | null | null | 133 | 92,366 | null | [
"1705.11168",
"1803.02324",
"1910.09700"
] | [
"mit"
] | [
"pytorch",
"tf",
"rust",
"safetensors",
"openai-gpt",
"text-generation",
"en",
"transformers",
"has_space"
] | null |
# OpenAI GPT
## Table of Contents
- [Model Details](#model-details)
- [How To Get Started With the Model](#how-to-get-started-with-the-model)
- [Uses](#uses)
- [Risks, Limitations and Biases](#risks-limitations-and-biases)
- [Training](#training)
- [Evaluation](#evaluation)
- [Environmental Impact](#environmental-imp... |
roberta-base-openai-detector | null | null | text-classification | null | null | 71 | 210,561 | [
"bookcorpus",
"wikipedia"
] | [
"1904.09751",
"1910.09700"
] | [
"mit"
] | [
"pytorch",
"tf",
"jax",
"safetensors",
"roberta",
"text-classification",
"en",
"transformers",
"exbert",
"has_space"
] | null |
# RoBERTa Base OpenAI Detector
## Table of Contents
- [Model Details](#model-details)
- [Uses](#uses)
- [Risks, Limitations and Biases](#risks-limitations-and-biases)
- [Training](#training)
- [Evaluation](#evaluation)
- [Environmental Impact](#environmental-impact)
- [Technical Specifications](#technical-specificati... |
roberta-base | null | null | fill-mask | null | null | 161 | 10,416,827 | [
"bookcorpus",
"wikipedia"
] | [
"1907.11692",
"1806.02847"
] | [
"mit"
] | [
"pytorch",
"tf",
"jax",
"rust",
"safetensors",
"roberta",
"fill-mask",
"en",
"transformers",
"exbert",
"autotrain_compatible",
"has_space"
] | null |
# RoBERTa base model
Pretrained model on English language using a masked language modeling (MLM) objective. It was introduced in
[this paper](https://arxiv.org/abs/1907.11692) and first released in
[this repository](https://github.com/pytorch/fairseq/tree/master/examples/roberta). This model is case-sensitive: it
mak... |
roberta-large-mnli | null | null | text-classification | null | null | 64 | 127,340 | [
"multi_nli",
"wikipedia",
"bookcorpus"
] | [
"1907.11692",
"1806.02847",
"1804.07461",
"1704.05426",
"1508.05326",
"1809.05053",
"1910.09700"
] | [
"mit"
] | [
"pytorch",
"tf",
"jax",
"safetensors",
"roberta",
"text-classification",
"en",
"transformers",
"autogenerated-modelcard",
"has_space"
] | null |
# roberta-large-mnli
## Table of Contents
- [Model Details](#model-details)
- [How To Get Started With the Model](#how-to-get-started-with-the-model)
- [Uses](#uses)
- [Risks, Limitations and Biases](#risks-limitations-and-biases)
- [Training](#training)
- [Evaluation](#evaluation-results)
- [Environmental Impact](#e... |
roberta-large-openai-detector | null | null | text-classification | null | null | 5 | 10,420 | [
"bookcorpus",
"wikipedia"
] | [
"1904.09751",
"1910.09700"
] | [
"mit"
] | [
"pytorch",
"jax",
"roberta",
"text-classification",
"en",
"transformers",
"exbert",
"has_space"
] | null |
# RoBERTa Large OpenAI Detector
## Table of Contents
- [Model Details](#model-details)
- [Uses](#uses)
- [Risks, Limitations and Biases](#risks-limitations-and-biases)
- [Training](#training)
- [Evaluation](#evaluation)
- [Environmental Impact](#environmental-impact)
- [Technical Specifications](#technical-specificat... |
roberta-large | null | null | fill-mask | null | null | 106 | 5,178,710 | [
"bookcorpus",
"wikipedia"
] | [
"1907.11692",
"1806.02847"
] | [
"mit"
] | [
"pytorch",
"tf",
"jax",
"onnx",
"safetensors",
"roberta",
"fill-mask",
"en",
"transformers",
"exbert",
"autotrain_compatible",
"has_space"
] | null |
# RoBERTa large model
Pretrained model on English language using a masked language modeling (MLM) objective. It was introduced in
[this paper](https://arxiv.org/abs/1907.11692) and first released in
[this repository](https://github.com/pytorch/fairseq/tree/master/examples/roberta). This model is case-sensitive: ... |
t5-11b | null | null | translation | null | null | 28 | 21,420 | [
"c4"
] | [
"1805.12471",
"1708.00055",
"1704.05426",
"1606.05250",
"1808.09121",
"1810.12885",
"1905.10044",
"1910.09700"
] | [
"apache-2.0"
] | [
"pytorch",
"tf",
"t5",
"text2text-generation",
"en",
"fr",
"ro",
"de",
"multilingual",
"transformers",
"summarization",
"translation",
"autotrain_compatible",
"has_space"
] | null |
# Model Card for T5 11B

# Table of Contents
1. [Model Details](#model-details)
2. [Uses](#uses)
3. [... |
t5-3b | null | null | translation | null | null | 14 | 92,431 | [
"c4"
] | [
"1805.12471",
"1708.00055",
"1704.05426",
"1606.05250",
"1808.09121",
"1810.12885",
"1905.10044",
"1910.09700"
] | [
"apache-2.0"
] | [
"pytorch",
"tf",
"t5",
"text2text-generation",
"en",
"fr",
"ro",
"de",
"multilingual",
"transformers",
"summarization",
"translation",
"autotrain_compatible",
"has_space"
] | null |
# Model Card for T5-3B

# Table of Contents
1. [Model Details](#model-details)
2. [Uses](#uses)
3. [B... |
t5-base | null | null | translation | null | null | 231 | 8,093,851 | [
"c4"
] | [
"1805.12471",
"1708.00055",
"1704.05426",
"1606.05250",
"1808.09121",
"1810.12885",
"1905.10044",
"1910.09700"
] | [
"apache-2.0"
] | [
"pytorch",
"tf",
"jax",
"rust",
"safetensors",
"t5",
"text2text-generation",
"en",
"fr",
"ro",
"de",
"transformers",
"summarization",
"translation",
"autotrain_compatible",
"has_space"
] | null |
# Model Card for T5 Base

# Table of Contents
1. [Model Details](#model-details)
2. [Uses](#uses)
3. ... |
t5-large | null | null | translation | null | null | 62 | 410,799 | [
"c4"
] | [
"1805.12471",
"1708.00055",
"1704.05426",
"1606.05250",
"1808.09121",
"1810.12885",
"1905.10044",
"1910.09700"
] | [
"apache-2.0"
] | [
"pytorch",
"tf",
"jax",
"safetensors",
"t5",
"text2text-generation",
"en",
"fr",
"ro",
"de",
"multilingual",
"transformers",
"summarization",
"translation",
"autotrain_compatible",
"has_space"
] | null |
# Model Card for T5 Large

# Table of Contents
1. [Model Details](#model-details)
2. [Uses](#uses)
3.... |
t5-small | null | null | translation | null | null | 100 | 1,851,548 | [
"c4"
] | [
"1805.12471",
"1708.00055",
"1704.05426",
"1606.05250",
"1808.09121",
"1810.12885",
"1905.10044",
"1910.09700"
] | [
"apache-2.0"
] | [
"pytorch",
"tf",
"jax",
"rust",
"safetensors",
"t5",
"text2text-generation",
"en",
"fr",
"ro",
"de",
"multilingual",
"transformers",
"summarization",
"translation",
"autotrain_compatible",
"has_space"
] | null |
# Model Card for T5 Small

# Table of Contents
1. [Model Details](#model-details)
2. [Uses](#uses)
3.... |
transfo-xl-wt103 | null | null | text-generation | null | null | 4 | 16,864 | [
"wikitext-103"
] | [
"1901.02860"
] | null | [
"pytorch",
"tf",
"transfo-xl",
"text-generation",
"en",
"transformers",
"has_space"
] | null |
# Transfo-xl-wt103
## Table of Contents
- [Model Details](#model-details)
- [Uses](#uses)
- [Risks, Limitations and Biases](#risks-limitations-and-biases)
- [Training](#training)
- [Evaluation](#evaluation)
- [Citation Information](#citation-information)
- [How to Get Started With the Model](#how-to-get-started-with... |
xlm-clm-ende-1024 | null | null | fill-mask | null | null | 0 | 27,875 | null | [
"1901.07291",
"1910.09700"
] | null | [
"pytorch",
"tf",
"safetensors",
"xlm",
"fill-mask",
"multilingual",
"en",
"de",
"transformers",
"autotrain_compatible",
"has_space"
] | null |
# xlm-clm-ende-1024
# Table of Contents
1. [Model Details](#model-details)
2. [Uses](#uses)
3. [Bias, Risks, and Limitations](#bias-risks-and-limitations)
4. [Training](#training)
5. [Evaluation](#evaluation)
6. [Environmental Impact](#environmental-impact)
7. [Technical Specifications](#technical-specifications)
8... |
xlm-clm-enfr-1024 | null | null | fill-mask | null | null | 0 | 267 | null | [
"1901.07291",
"1910.09700"
] | null | [
"pytorch",
"tf",
"xlm",
"fill-mask",
"multilingual",
"en",
"fr",
"transformers",
"autotrain_compatible",
"has_space"
] | null |
# xlm-clm-enfr-1024
# Table of Contents
1. [Model Details](#model-details)
2. [Uses](#uses)
3. [Bias, Risks, and Limitations](#bias-risks-and-limitations)
4. [Training](#training)
5. [Evaluation](#evaluation)
6. [Environmental Impact](#environmental-impact)
7. [Technical Specifications](#technical-specifications)
8... |
xlm-mlm-100-1280 | null | null | fill-mask | null | null | 0 | 1,108 | null | [
"1901.07291",
"1911.02116",
"1910.09700"
] | [
"cc-by-nc-4.0"
] | [
"pytorch",
"tf",
"xlm",
"fill-mask",
"multilingual",
"en",
"es",
"fr",
"de",
"zh",
"ru",
"pt",
"it",
"ar",
"ja",
"id",
"tr",
"nl",
"pl",
"fa",
"vi",
"sv",
"ko",
"he",
"ro",
"no",
"hi",
"uk",
"cs",
"fi",
"hu",
"th",
"da",
"ca",
"el",
"bg",
"sr",... | null |
# xlm-mlm-100-1280
# Table of Contents
1. [Model Details](#model-details)
2. [Uses](#uses)
3. [Bias, Risks, and Limitations](#bias-risks-and-limitations)
4. [Training](#training)
5. [Evaluation](#evaluation)
6. [Environmental Impact](#environmental-impact)
7. [Technical Specifications](#technical-specifications)
8.... |
xlm-mlm-17-1280 | null | null | fill-mask | null | null | 2 | 185 | null | [
"1901.07291",
"1911.02116",
"1910.09700"
] | [
"cc-by-nc-4.0"
] | [
"pytorch",
"tf",
"xlm",
"fill-mask",
"multilingual",
"en",
"fr",
"es",
"de",
"it",
"pt",
"nl",
"sv",
"pl",
"ru",
"ar",
"tr",
"zh",
"ja",
"ko",
"hi",
"vi",
"transformers",
"autotrain_compatible",
"has_space"
] | null |
# xlm-mlm-17-1280
# Table of Contents
1. [Model Details](#model-details)
2. [Uses](#uses)
3. [Bias, Risks, and Limitations](#bias-risks-and-limitations)
4. [Training](#training)
5. [Evaluation](#evaluation)
6. [Environmental Impact](#environmental-impact)
7. [Technical Specifications](#technical-specifications)
8. ... |
xlm-mlm-en-2048 | null | null | fill-mask | null | null | 0 | 5,577 | null | [
"1901.07291",
"1911.02116",
"1910.09700"
] | [
"cc-by-nc-4.0"
] | [
"pytorch",
"tf",
"xlm",
"fill-mask",
"en",
"transformers",
"exbert",
"autotrain_compatible",
"has_space"
] | null |
# xlm-mlm-en-2048
# Table of Contents
1. [Model Details](#model-details)
2. [Uses](#uses)
3. [Bias, Risks, and Limitations](#bias-risks-and-limitations)
4. [Training](#training)
5. [Evaluation](#evaluation)
6. [Environmental Impact](#environmental-impact)
7. [Citation](#citation)
8. [Model Card Authors](#model-card... |
xlm-mlm-ende-1024 | null | null | fill-mask | null | null | 1 | 277 | null | [
"1901.07291",
"1910.09700"
] | [
"cc-by-nc-4.0"
] | [
"pytorch",
"tf",
"xlm",
"fill-mask",
"multilingual",
"en",
"de",
"transformers",
"autotrain_compatible",
"has_space"
] | null |
# xlm-mlm-ende-1024
# Table of Contents
1. [Model Details](#model-details)
2. [Uses](#uses)
3. [Bias, Risks, and Limitations](#bias-risks-and-limitations)
4. [Training](#training)
5. [Evaluation](#evaluation)
6. [Environmental Impact](#environmental-impact)
7. [Technical Specifications](#technical-specifications)
8... |
xlm-mlm-enfr-1024 | null | null | fill-mask | null | null | 0 | 346 | null | [
"1901.07291",
"1910.09700"
] | [
"cc-by-nc-4.0"
] | [
"pytorch",
"tf",
"xlm",
"fill-mask",
"multilingual",
"en",
"fr",
"transformers",
"autotrain_compatible",
"has_space"
] | null |
# xlm-mlm-enfr-1024
# Table of Contents
1. [Model Details](#model-details)
2. [Uses](#uses)
3. [Bias, Risks, and Limitations](#bias-risks-and-limitations)
4. [Training](#training)
5. [Evaluation](#evaluation)
6. [Environmental Impact](#environmental-impact)
7. [Technical Specifications](#technical-specifications)
8... |
xlm-mlm-enro-1024 | null | null | fill-mask | null | null | 0 | 30 | null | [
"1901.07291",
"1910.09700"
] | [
"cc-by-nc-4.0"
] | [
"pytorch",
"tf",
"xlm",
"fill-mask",
"multilingual",
"en",
"ro",
"transformers",
"autotrain_compatible",
"has_space"
] | null |
# xlm-mlm-enro-1024
# Table of Contents
1. [Model Details](#model-details)
2. [Uses](#uses)
3. [Bias, Risks, and Limitations](#bias-risks-and-limitations)
4. [Training](#training)
5. [Evaluation](#evaluation)
6. [Environmental Impact](#environmental-impact)
7. [Technical Specifications](#technical-specifications)
8... |
xlm-mlm-tlm-xnli15-1024 | null | null | fill-mask | null | null | 0 | 289 | null | [
"1901.07291",
"1910.09700"
] | [
"cc-by-nc-4.0"
] | [
"pytorch",
"tf",
"xlm",
"fill-mask",
"multilingual",
"en",
"fr",
"es",
"de",
"el",
"bg",
"ru",
"tr",
"ar",
"vi",
"th",
"zh",
"hi",
"sw",
"ur",
"transformers",
"autotrain_compatible",
"has_space"
] | null |
# xlm-mlm-tlm-xnli15-1024
# Table of Contents
1. [Model Details](#model-details)
2. [Uses](#uses)
3. [Bias, Risks, and Limitations](#bias-risks-and-limitations)
4. [Training Details](#training-details)
5. [Evaluation](#evaluation)
6. [Environmental Impact](#environmental-impact)
7. [Technical Specifications](#techn... |
xlm-mlm-xnli15-1024 | null | null | fill-mask | null | null | 0 | 2,060 | null | [
"1901.07291",
"1910.09700"
] | [
"cc-by-nc-4.0"
] | [
"pytorch",
"tf",
"xlm",
"fill-mask",
"multilingual",
"en",
"fr",
"es",
"de",
"el",
"bg",
"ru",
"tr",
"ar",
"vi",
"th",
"zh",
"hi",
"sw",
"ur",
"transformers",
"autotrain_compatible",
"has_space"
] | null |
# xlm-mlm-xnli15-1024
# Table of Contents
1. [Model Details](#model-details)
2. [Uses](#uses)
3. [Bias, Risks, and Limitations](#bias-risks-and-limitations)
4. [Training Details](#training-details)
5. [Evaluation](#evaluation)
6. [Environmental Impact](#environmental-impact)
7. [Technical Specifications](#technical... |
xlm-roberta-base | null | null | fill-mask | null | null | 286 | 20,991,848 | null | [
"1911.02116"
] | [
"mit"
] | [
"pytorch",
"tf",
"jax",
"onnx",
"safetensors",
"xlm-roberta",
"fill-mask",
"multilingual",
"af",
"am",
"ar",
"as",
"az",
"be",
"bg",
"bn",
"br",
"bs",
"ca",
"cs",
"cy",
"da",
"de",
"el",
"en",
"eo",
"es",
"et",
"eu",
"fa",
"fi",
"fr",
"fy",
"ga",
"... | null |
# XLM-RoBERTa (base-sized model)
XLM-RoBERTa model pre-trained on 2.5TB of filtered CommonCrawl data containing 100 languages. It was introduced in the paper [Unsupervised Cross-lingual Representation Learning at Scale](https://arxiv.org/abs/1911.02116) by Conneau et al. and first released in [this repository](https... |
xlm-roberta-large-finetuned-conll02-dutch | null | null | fill-mask | null | null | 0 | 963 | null | [
"1911.02116",
"1910.09700"
] | null | [
"pytorch",
"rust",
"xlm-roberta",
"fill-mask",
"multilingual",
"af",
"am",
"ar",
"as",
"az",
"be",
"bg",
"bn",
"br",
"bs",
"ca",
"cs",
"cy",
"da",
"de",
"el",
"en",
"eo",
"es",
"et",
"eu",
"fa",
"fi",
"fr",
"fy",
"ga",
"gd",
"gl",
"gu",
"ha",
"he... | null |
# xlm-roberta-large-finetuned-conll02-dutch
# Table of Contents
1. [Model Details](#model-details)
2. [Uses](#uses)
3. [Bias, Risks, and Limitations](#bias-risks-and-limitations)
4. [Training](#training)
5. [Evaluation](#evaluation)
6. [Environmental Impact](#environmental-impact)
7. [Technical Specifications](#tec... |
xlm-roberta-large-finetuned-conll02-spanish | null | null | fill-mask | null | null | 0 | 367 | null | [
"1911.02116",
"1910.09700"
] | null | [
"pytorch",
"rust",
"xlm-roberta",
"fill-mask",
"multilingual",
"af",
"am",
"ar",
"as",
"az",
"be",
"bg",
"bn",
"br",
"bs",
"ca",
"cs",
"cy",
"da",
"de",
"el",
"en",
"eo",
"es",
"et",
"eu",
"fa",
"fi",
"fr",
"fy",
"ga",
"gd",
"gl",
"gu",
"ha",
"he... | null |
# xlm-roberta-large-finetuned-conll02-spanish
# Table of Contents
1. [Model Details](#model-details)
2. [Uses](#uses)
3. [Bias, Risks, and Limitations](#bias-risks-and-limitations)
4. [Training](#training)
5. [Evaluation](#evaluation)
6. [Environmental Impact](#environmental-impact)
7. [Technical Specifications](#t... |
xlm-roberta-large-finetuned-conll03-english | null | null | token-classification | null | null | 53 | 110,887 | null | [
"1911.02116",
"2008.03415",
"1910.09700"
] | null | [
"pytorch",
"rust",
"xlm-roberta",
"token-classification",
"multilingual",
"af",
"am",
"ar",
"as",
"az",
"be",
"bg",
"bn",
"br",
"bs",
"ca",
"cs",
"cy",
"da",
"de",
"el",
"en",
"eo",
"es",
"et",
"eu",
"fa",
"fi",
"fr",
"fy",
"ga",
"gd",
"gl",
"gu",
... | null |
# xlm-roberta-large-finetuned-conll03-english
# Table of Contents
1. [Model Details](#model-details)
2. [Uses](#uses)
3. [Bias, Risks, and Limitations](#bias-risks-and-limitations)
4. [Training](#training)
5. [Evaluation](#evaluation)
6. [Environmental Impact](#environmental-impact)
7. [Technical Specifications](#t... |
xlm-roberta-large-finetuned-conll03-german | null | null | token-classification | null | null | 1 | 2,862 | null | [
"1911.02116",
"1910.09700"
] | null | [
"pytorch",
"rust",
"xlm-roberta",
"token-classification",
"multilingual",
"af",
"am",
"ar",
"as",
"az",
"be",
"bg",
"bn",
"br",
"bs",
"ca",
"cs",
"cy",
"da",
"de",
"el",
"en",
"eo",
"es",
"et",
"eu",
"fa",
"fi",
"fr",
"fy",
"ga",
"gd",
"gl",
"gu",
... | null |
# xlm-roberta-large-finetuned-conll03-german
# Table of Contents
1. [Model Details](#model-details)
2. [Uses](#uses)
3. [Bias, Risks, and Limitations](#bias-risks-and-limitations)
4. [Training](#training)
5. [Evaluation](#evaluation)
6. [Environmental Impact](#environmental-impact)
7. [Technical Specifications](#te... |
xlm-roberta-large | null | null | fill-mask | null | null | 139 | 7,372,242 | null | [
"1911.02116"
] | [
"mit"
] | [
"pytorch",
"tf",
"jax",
"onnx",
"safetensors",
"xlm-roberta",
"fill-mask",
"multilingual",
"af",
"am",
"ar",
"as",
"az",
"be",
"bg",
"bn",
"br",
"bs",
"ca",
"cs",
"cy",
"da",
"de",
"el",
"en",
"eo",
"es",
"et",
"eu",
"fa",
"fi",
"fr",
"fy",
"ga",
"... | null |
# XLM-RoBERTa (large-sized model)
XLM-RoBERTa model pre-trained on 2.5TB of filtered CommonCrawl data containing 100 languages. It was introduced in the paper [Unsupervised Cross-lingual Representation Learning at Scale](https://arxiv.org/abs/1911.02116) by Conneau et al. and first released in [this repository](http... |
xlnet-base-cased | null | null | text-generation | null | null | 34 | 163,782 | [
"bookcorpus",
"wikipedia"
] | [
"1906.08237"
] | [
"mit"
] | [
"pytorch",
"tf",
"rust",
"xlnet",
"text-generation",
"en",
"transformers",
"has_space"
] | null |
# XLNet (base-sized model)
XLNet model pre-trained on English language. It was introduced in the paper [XLNet: Generalized Autoregressive Pretraining for Language Understanding](https://arxiv.org/abs/1906.08237) by Yang et al. and first released in [this repository](https://github.com/zihangdai/xlnet/).
Disclaimer... |
xlnet-large-cased | null | null | text-generation | null | null | 12 | 12,332 | [
"bookcorpus",
"wikipedia"
] | [
"1906.08237"
] | [
"mit"
] | [
"pytorch",
"tf",
"xlnet",
"text-generation",
"en",
"transformers",
"has_space"
] | null |
# XLNet (large-sized model)
XLNet model pre-trained on English language. It was introduced in the paper [XLNet: Generalized Autoregressive Pretraining for Language Understanding](https://arxiv.org/abs/1906.08237) by Yang et al. and first released in [this repository](https://github.com/zihangdai/xlnet/).
Disclaime... |
007J/smile | null | null | null | null | null | 0 | 0 | null | null | null | null | null | |
0307061430/xuangou | null | null | null | null | null | 0 | 0 | null | null | null | null | null | |
09panesara/distilbert-base-uncased-finetuned-cola | null | null | text-classification | null | null | 0 | 13 | [
"glue"
] | null | [
"apache-2.0"
] | [
"pytorch",
"tensorboard",
"distilbert",
"text-classification",
"transformers",
"generated_from_trainer",
"model-index"
] | null |
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# distilbert-base-uncased-finetuned-cola
This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/di... |
0x7194633/keyt5-base | null | null | text2text-generation | null | null | 0 | 44 | null | null | [
"mit"
] | [
"pytorch",
"t5",
"text2text-generation",
"ru",
"transformers",
"autotrain_compatible"
] | null | ## keyT5. Base (small) version
[](https://github.com/0x7o/text2keywords "Go to GitHub repo")
[](https://github.com... |
0x7194633/keyt5-large | null | null | text2text-generation | null | null | 0 | 205 | null | null | [
"mit"
] | [
"pytorch",
"safetensors",
"t5",
"text2text-generation",
"ru",
"transformers",
"autotrain_compatible"
] | null | ## keyT5. Large version
[](https://github.com/0x7o/text2keywords "Go to GitHub repo")
[](https://github.com/0x7o/t... |
0xDEADBEA7/DialoGPT-small-rick | null | null | conversational | null | null | 0 | 22 | null | null | null | [
"pytorch",
"gpt2",
"text-generation",
"transformers",
"conversational"
] | null |
# Rick n Morty DialoGPT Model |
123123/ghfk | null | null | null | null | null | 0 | 0 | null | null | null | null | null | |
123456/Arcanegan | null | null | null | null | null | 1 | 0 | null | null | null | null | null | |
1234567/1234567 | null | null | null | null | null | 0 | 0 | null | null | null | null | null | |
123abhiALFLKFO/albert-base-v2-finetuned-sst2 | null | null | null | null | null | 0 | 0 | null | null | null | null | null | |
123abhiALFLKFO/albert-base-v2-yelp-polarity-finetuned-sst2 | null | null | null | null | null | 0 | 0 | null | null | null | null | null | |
123abhiALFLKFO/distilbert-base-uncased-finetuned-cola | null | null | text-classification | null | null | 0 | 29 | [
"glue"
] | null | [
"apache-2.0"
] | [
"pytorch",
"tensorboard",
"distilbert",
"text-classification",
"transformers",
"generated_from_trainer"
] | null |
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# distilbert-base-uncased-finetuned-cola
This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/di... |
123addfg/ar | null | null | null | null | null | 0 | 0 | null | null | null | null | null | |
123www/test_model | null | null | null | null | null | 0 | 3 | null | null | null | [
"pytorch",
"wav2vec2",
"transformers"
] | null | |
13048909972/wav2vec2-common_voice-tr-demo | null | null | automatic-speech-recognition | null | null | 0 | 3 | null | null | null | [
"pytorch",
"tensorboard",
"wav2vec2",
"automatic-speech-recognition",
"transformers"
] | null | |
13048909972/wav2vec2-large-xls-r-300m-tr-colab | null | null | automatic-speech-recognition | null | null | 0 | 2 | null | null | null | [
"pytorch",
"tensorboard",
"wav2vec2",
"automatic-speech-recognition",
"transformers"
] | null | |
13048909972/wav2vec2-large-xlsr-53_common_voice_20211210112254 | null | null | null | null | null | 0 | 0 | null | null | null | null | null | |
13048909972/wav2vec2-large-xlsr-53_common_voice_20211211085606 | null | null | automatic-speech-recognition | null | null | 0 | 2 | null | null | null | [
"pytorch",
"tensorboard",
"wav2vec2",
"automatic-speech-recognition",
"transformers"
] | null | |
13306330378/huiqi_model | null | null | null | null | null | 0 | 0 | null | null | null | null | null | |
13on/gpt2-wishes | null | null | text-generation | null | null | 0 | 2 | null | null | null | [
"pytorch",
"gpt2",
"text-generation",
"transformers"
] | null | |
13on/kw2t-wishes | null | null | text2text-generation | null | null | 0 | 3 | null | null | null | [
"pytorch",
"t5",
"text2text-generation",
"transformers",
"autotrain_compatible"
] | null | |
13onn/gpt2-wishes-2 | null | null | null | null | null | 0 | 0 | null | null | null | null | null | |
13onnn/gpt2-wish | null | null | null | null | null | 0 | 0 | null | null | null | null | null | |
1503277708/namo | null | null | null | null | null | 0 | 0 | null | null | null | null | null | |
1575/7447 | null | null | null | null | null | 0 | 0 | null | null | null | null | null | |
1712871/manual_vn_electra_small | null | null | null | null | null | 0 | 0 | null | null | null | null | null | |
1757968399/tinybert_4_312_1200 | null | null | null | null | null | 0 | 2 | null | null | null | [
"pytorch",
"transformers"
] | null | |
17luke/wav2vec2-large-xls-r-300m-icelandic-samromur | null | null | null | null | null | 0 | 0 | null | null | null | null | null | |
18811449050/bert_cn_finetuning | null | null | text-classification | null | null | 0 | 17 | null | null | null | [
"pytorch",
"jax",
"bert",
"text-classification",
"transformers"
] | null | |
18811449050/bert_finetuning_test | null | null | text-classification | null | null | 0 | 22 | null | null | null | [
"pytorch",
"jax",
"bert",
"text-classification",
"transformers"
] | null | |
1Basco/DialoGPT-small-jake | null | null | conversational | null | null | 0 | 0 | null | null | null | [
"pytorch",
"gpt2",
"text-generation",
"transformers",
"conversational"
] | null |
#Jake Peralta DialoGPT Model |
1n3skh/idk | null | null | null | null | null | 0 | 0 | null | null | null | null | null |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.