|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
"""Siglip2NaViT vision encoder model configuration.""" |
|
|
|
|
|
from transformers import PretrainedConfig |
|
|
|
|
|
|
|
|
class Siglip2NaViTVisionConfig(PretrainedConfig): |
|
|
|
|
|
model_type = "siglip2_navit" |
|
|
|
|
|
def __init__( |
|
|
self, |
|
|
hidden_size=768, |
|
|
intermediate_size=3072, |
|
|
num_hidden_layers=12, |
|
|
num_attention_heads=12, |
|
|
num_channels=3, |
|
|
patch_size=16, |
|
|
hidden_act="gelu_pytorch_tanh", |
|
|
layer_norm_eps=1e-6, |
|
|
attention_dropout=0.0, |
|
|
preserve_original_pe=True, |
|
|
disable_rope=False, |
|
|
num_patches=1024, |
|
|
rope_theta=10000.0, |
|
|
merge_size=2, |
|
|
**kwargs, |
|
|
): |
|
|
super().__init__(**kwargs) |
|
|
|
|
|
self.hidden_size = hidden_size |
|
|
self.intermediate_size = intermediate_size |
|
|
self.num_hidden_layers = num_hidden_layers |
|
|
self.num_attention_heads = num_attention_heads |
|
|
self.num_channels = num_channels |
|
|
self.patch_size = patch_size |
|
|
self.attention_dropout = attention_dropout |
|
|
self.layer_norm_eps = layer_norm_eps |
|
|
self.hidden_act = hidden_act |
|
|
self.preserve_original_pe = preserve_original_pe |
|
|
self.disable_rope = disable_rope |
|
|
self.rope_theta = rope_theta |
|
|
self.merge_size = merge_size |
|
|
self.num_patches = num_patches |
|
|
|