danielhanchen commited on
Commit
4b9740e
·
verified ·
1 Parent(s): df03bda

Add files using upload-large-folder tool

Browse files
.gitattributes CHANGED
@@ -42,3 +42,9 @@ QwQ-32B-Q3_K_M.gguf filter=lfs diff=lfs merge=lfs -text
42
  QwQ-32B-Q6_K.gguf filter=lfs diff=lfs merge=lfs -text
43
  QwQ-32B-Q4_K_M.gguf filter=lfs diff=lfs merge=lfs -text
44
  BF16/QwQ-32B.BF16-00001-of-00002.gguf filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
42
  QwQ-32B-Q6_K.gguf filter=lfs diff=lfs merge=lfs -text
43
  QwQ-32B-Q4_K_M.gguf filter=lfs diff=lfs merge=lfs -text
44
  BF16/QwQ-32B.BF16-00001-of-00002.gguf filter=lfs diff=lfs merge=lfs -text
45
+ QwQ-32B-UD-IQ1_S.gguf filter=lfs diff=lfs merge=lfs -text
46
+ QwQ-32B-UD-IQ1_M.gguf filter=lfs diff=lfs merge=lfs -text
47
+ QwQ-32B-UD-IQ2_M.gguf filter=lfs diff=lfs merge=lfs -text
48
+ QwQ-32B-UD-Q2_K_XL.gguf filter=lfs diff=lfs merge=lfs -text
49
+ QwQ-32B-UD-IQ3_XXS.gguf filter=lfs diff=lfs merge=lfs -text
50
+ QwQ-32B-UD-Q4_K_XL.gguf filter=lfs diff=lfs merge=lfs -text
QwQ-32B-UD-IQ1_M.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4c9b656e233a98a3022ecb0006a8c2a6f03ac8a372927d85ae3ccf97b0f62866
3
+ size 8298465888
QwQ-32B-UD-IQ1_S.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ef15e30452761d008f5590221c4b8828f8fd40100835b2bd2897844ba5eb2c46
3
+ size 7685786208
QwQ-32B-UD-IQ2_M.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f8f60020593e5f698601b79ec388937aba897a193b645d1b04067aefac67193a
3
+ size 11495189088
QwQ-32B-UD-IQ3_XXS.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:40e30d9b5ea5cabb5a252e587750b9b3dc7aaf3a86dd27fcd9eddca9f406ddbc
3
+ size 13052652128
QwQ-32B-UD-Q2_K_XL.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6e913a72619551b13df3ba6685911b2ea15ff88e5eaaa1882f324675fbbdfd18
3
+ size 12702628448
QwQ-32B-UD-Q4_K_XL.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:10039b22d7e6dc4b7d561509e5db4ba404f368531ace9b06466741abf016792c
3
+ size 20057528928
config.json CHANGED
@@ -1,5 +1,4 @@
1
  {
2
- "_name_or_path": "Qwen/QwQ-32B",
3
  "architectures": [
4
  "Qwen2ForCausalLM"
5
  ],
@@ -9,7 +8,7 @@
9
  "hidden_size": 5120,
10
  "initializer_range": 0.02,
11
  "intermediate_size": 27648,
12
- "max_position_embeddings": 131072,
13
  "max_window_layers": 64,
14
  "model_type": "qwen2",
15
  "num_attention_heads": 40,
@@ -22,7 +21,7 @@
22
  "sliding_window": 32768,
23
  "tie_word_embeddings": false,
24
  "torch_dtype": "bfloat16",
25
- "transformers_version": "4.49.0",
26
  "unsloth_fixed": true,
27
  "use_cache": true,
28
  "use_sliding_window": false,
 
1
  {
 
2
  "architectures": [
3
  "Qwen2ForCausalLM"
4
  ],
 
8
  "hidden_size": 5120,
9
  "initializer_range": 0.02,
10
  "intermediate_size": 27648,
11
+ "max_position_embeddings": 32768,
12
  "max_window_layers": 64,
13
  "model_type": "qwen2",
14
  "num_attention_heads": 40,
 
21
  "sliding_window": 32768,
22
  "tie_word_embeddings": false,
23
  "torch_dtype": "bfloat16",
24
+ "transformers_version": "4.51.3",
25
  "unsloth_fixed": true,
26
  "use_cache": true,
27
  "use_sliding_window": false,