D-eon commited on
Commit
6df7af4
·
verified ·
1 Parent(s): 07ba71f

Upload folder using huggingface_hub

Browse files
model/Molmo-7B-D-0924/config.yaml ADDED
@@ -0,0 +1,794 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ run_name: multitask_train
2
+ seed: 6198
3
+ epoch: null
4
+ dry_run: false
5
+ model:
6
+ d_model: 3584
7
+ n_heads: 28
8
+ n_kv_heads: 4
9
+ qkv_bias: true
10
+ clip_qkv: null
11
+ n_layers: 28
12
+ mlp_ratio: 4
13
+ mlp_hidden_size: 37888
14
+ activation_type: swiglu
15
+ block_type: sequential
16
+ block_group_size: 1
17
+ alibi: false
18
+ alibi_bias_max: 8.0
19
+ rope: true
20
+ rope_full_precision: true
21
+ rope_theta: 1000000.0
22
+ rope_impl: llama
23
+ vision_backbone:
24
+ image_model_type: openai
25
+ image_default_input_size:
26
+ - 336
27
+ - 336
28
+ image_patch_size: 14
29
+ image_pos_patch_size: 14
30
+ image_emb_dim: 1024
31
+ image_num_heads: 16
32
+ image_num_key_value_heads: 16
33
+ image_num_layers: 23
34
+ image_head_dim: 64
35
+ image_mlp_dim: 4096
36
+ image_mlp_activations: quick_gelu
37
+ image_dropout_rate: 0.0
38
+ image_num_pos: 577
39
+ image_norm_eps: 1.0e-05
40
+ attention_dropout: 0.0
41
+ residual_dropout: 0.0
42
+ initializer_range: 0.02
43
+ fsdp_wrap: false
44
+ resize_mode: default
45
+ vit_load_path: /weka/oe-training-default/mm-olmo/pretrained_image_encoders/vit-l-14-336.pt
46
+ llm_load_path: /weka/oe-training-default/mm-olmo/pretrained_llms/qwen2-7b.pt
47
+ low_cpu_fsdp: true
48
+ attention_type: sdpa
49
+ float32_attention: true
50
+ attention_dropout: 0.0
51
+ response_attention_dropout: 0.0
52
+ multi_query_attention: null
53
+ attention_layer_norm: false
54
+ residual_dropout: 0.1
55
+ response_residual_dropout: 0.0
56
+ embedding_dropout: 0.0
57
+ layer_norm_type: rms
58
+ layer_norm_with_affine: true
59
+ layer_norm_eps: 1.0e-06
60
+ attention_layer_norm_with_affine: true
61
+ max_sequence_length: 4096
62
+ max_position_embeddings: null
63
+ include_bias: false
64
+ bias_for_layer_norm: null
65
+ scale_logits: false
66
+ vocab_size: 152064
67
+ embedding_size: 152064
68
+ additional_vocab_size: 128
69
+ new_embedding_init_range: 0.02
70
+ weight_tying: false
71
+ pad_token_id: -1
72
+ init_device: null
73
+ init_fn: normal
74
+ init_std: 0.02
75
+ init_cutoff_factor: null
76
+ norm_after: false
77
+ precision: amp_bf16
78
+ max_crops: 12
79
+ crop_mode: overlap-and-resize-c2
80
+ do_random_scale: false
81
+ use_col_tokens: true
82
+ prompt_type: uber_model
83
+ system_prompt_kind: demo_or_style
84
+ message_formatting: role
85
+ always_start_with_space: true
86
+ prompt_override: null
87
+ default_inference_len: 65
88
+ overlap_margins:
89
+ - 4
90
+ - 4
91
+ image_padding_embed: pad_and_partial_pad
92
+ vit_layers:
93
+ - -2
94
+ - -9
95
+ image_pooling_h: 2
96
+ image_pooling_w: 2
97
+ image_pooling_2d: attention_meanq
98
+ image_projector: mlp
99
+ image_feature_dropout: 0.0
100
+ use_cls_feature: false
101
+ fix_image_input_idx: 2
102
+ unconditioned: false
103
+ pad_to: null
104
+ initializer_range: 0.02
105
+ pad_tokenizer: true
106
+ normalize_input_embeds: false
107
+ use_position_ids: true
108
+ query_pre_attn_scalar: 224
109
+ attn_logit_softcapping: null
110
+ final_logit_softcapping: null
111
+ head_dim: null
112
+ tokenizer:
113
+ identifier: mm:hf-Qwen/Qwen2-7B
114
+ truncate_direction: right
115
+ tokenizer_adds_space: false
116
+ tokenizer_dir: null
117
+ olmo_bos_token_id: null
118
+ olmo_eos_token_id: null
119
+ loss_token_weighting: root_subsegments
120
+ gin_bindings: null
121
+ ft_llm: true
122
+ ft_vit: true
123
+ ft_connector: true
124
+ ft_embedding: lm_head
125
+ optimizer:
126
+ name: adamw
127
+ learning_rate: 0.0001
128
+ weight_decay: 0.01
129
+ betas:
130
+ - 0.9
131
+ - 0.95
132
+ eps: 1.0e-05
133
+ connector_learning_rate: 5.0e-06
134
+ vit_learning_rate: 5.0e-06
135
+ llm_learning_rate: 1.0e-05
136
+ connector_weight_decay: 0.0
137
+ vit_weight_decay: 0.0
138
+ llm_weight_decay: 0.0
139
+ connector_betas:
140
+ - 0.9
141
+ - 0.95
142
+ vit_betas:
143
+ - 0.9
144
+ - 0.95
145
+ llm_betas:
146
+ - 0.9
147
+ - 0.95
148
+ connector_eps: 1.0e-06
149
+ vit_eps: 1.0e-06
150
+ llm_eps: 1.0e-06
151
+ no_decay_norm_and_bias: null
152
+ decay_norm_and_bias: false
153
+ decay_embeddings: false
154
+ metrics_log_interval: 20
155
+ scheduler:
156
+ name: multimodal
157
+ units: steps
158
+ t_warmup: 100
159
+ t_max: null
160
+ alpha_f: 0.1
161
+ connector_t_warmup: 200
162
+ vit_t_warmup: 200
163
+ llm_t_warmup: 200
164
+ grad_clip_warmup_steps: null
165
+ grad_clip_warmup_factor: null
166
+ warmup_min_lr: 0.0
167
+ data:
168
+ multi_modal: true
169
+ mixture_or_task_name: null
170
+ paths: null
171
+ datasets: null
172
+ label_mask_paths: null
173
+ pad_direction: right
174
+ generate_attention_mask: false
175
+ num_workers: 0
176
+ drop_last: true
177
+ pin_memory: false
178
+ prefetch_factor: null
179
+ persistent_workers: false
180
+ timeout: 0
181
+ seed: null
182
+ instance_filter: null
183
+ mixture:
184
+ user_qa: 3.772204620047811
185
+ cockatoo_712k_sept6: 3.161736404536387
186
+ synthetic_qa_v3_as_user_qa: 5.70612401246295
187
+ point_qa: 2.359934962952852
188
+ coco_2014_vqa_multi: 3.067124765028162
189
+ text_vqa: 1.982948627824974
190
+ okvqa: 1.011810438733119
191
+ chart_qa_weighted: 1.793272988041348
192
+ doc_qa: 2.1176584276768065
193
+ info_qa: 1.6495950748744952
194
+ ai2_diagram_v2_mix_transparent: 1.307415575130731
195
+ a_okvqa_mc: 1.3921930323623322
196
+ a_okvqa_da: 1.3921930323623322
197
+ android_control: 2.91381420538637
198
+ science_qa_img: 0.8405938747465043
199
+ tabwmp_da: 1.618754903035765
200
+ st_qa: 1.6871928539865735
201
+ tally_qa: 3.8873680368227395
202
+ clocks: 5.330044888030191
203
+ scifi_charts_qa: 5.152496802868815
204
+ scifi_table_qa: 3.251519607143135
205
+ scifi_document_qa: 4.024928615904329
206
+ scifi_diagram_qa: 1.9394919410872649
207
+ dv_qa: 1.0660089776060382
208
+ figure_qa: 1.0660089776060382
209
+ plot_qa: 1.507564353741936
210
+ pointing: 8.963869696866821
211
+ pointing_high_freq: 5.438911590336815
212
+ fast_flickr_count_qa_pointing: 3.0972187127963617
213
+ point_count: 8.963869696866821
214
+ point_count_high_freq: 5.438911590336815
215
+ fast_flickr_count_qa_point_count: 3.0972187127963617
216
+ sequence_length: 2304
217
+ shuffle: true
218
+ for_inference: false
219
+ split: train
220
+ use_memory_cache: false
221
+ num_epochs: null
222
+ shuffle_buffer_size: 200
223
+ per_node_data_loader: null
224
+ restore_dataloader: true
225
+ fast_forward_batches: null
226
+ evaluators: []
227
+ eval_interval: 2000
228
+ inf_eval_interval: 2000
229
+ inf_evaluators:
230
+ - label: chart_qa
231
+ type: multi_modal
232
+ data:
233
+ multi_modal: true
234
+ mixture_or_task_name: chart_qa
235
+ paths: null
236
+ datasets: null
237
+ label_mask_paths: null
238
+ pad_direction: right
239
+ generate_attention_mask: false
240
+ num_workers: 0
241
+ drop_last: false
242
+ pin_memory: false
243
+ prefetch_factor: null
244
+ persistent_workers: false
245
+ timeout: 0
246
+ seed: null
247
+ instance_filter: null
248
+ mixture: null
249
+ sequence_length: 1792
250
+ shuffle: false
251
+ for_inference: true
252
+ split: validation
253
+ use_memory_cache: false
254
+ num_epochs: 1
255
+ shuffle_buffer_size: 1000
256
+ per_node_data_loader: null
257
+ device_eval_batch_size: null
258
+ subset_num_batches: -1
259
+ max_new_tokens: 16
260
+ mm_evaluator:
261
+ cider: ''
262
+ num_wandb_examples: 32
263
+ ptb_tokenizer: false
264
+ save_html: 0
265
+ save_predictions: null
266
+ named_entity_eval: false
267
+ save_tokens: false
268
+ vqa_eval: scifi_relaxed_correctness,relaxed_correctness,em
269
+ n_to_log: 0
270
+ mme_eval: false
271
+ mmbench_eval: false
272
+ sugar_crepe_eval: false
273
+ pointing_eval: false
274
+ count_eval: false
275
+ point_count_eval: false
276
+ refexp_eval: false
277
+ pointing: false
278
+ android_eval: false
279
+ clock_eval: false
280
+ gpt_eval: null
281
+ save_dir: null
282
+ save_to_checkpoint_dir: false
283
+ eval_name: null
284
+ skip_if_metrics_cached: true
285
+ - label: info_qa
286
+ type: multi_modal
287
+ data:
288
+ multi_modal: true
289
+ mixture_or_task_name: info_qa
290
+ paths: null
291
+ datasets: null
292
+ label_mask_paths: null
293
+ pad_direction: right
294
+ generate_attention_mask: false
295
+ num_workers: 0
296
+ drop_last: true
297
+ pin_memory: false
298
+ prefetch_factor: null
299
+ persistent_workers: false
300
+ timeout: 0
301
+ seed: null
302
+ instance_filter: null
303
+ mixture: null
304
+ sequence_length: 1792
305
+ shuffle: false
306
+ for_inference: true
307
+ split: validation
308
+ use_memory_cache: false
309
+ num_epochs: null
310
+ shuffle_buffer_size: 200
311
+ per_node_data_loader: null
312
+ device_eval_batch_size: null
313
+ subset_num_batches: 4
314
+ max_new_tokens: 16
315
+ mm_evaluator:
316
+ cider: ''
317
+ num_wandb_examples: 32
318
+ ptb_tokenizer: false
319
+ save_html: 0
320
+ save_predictions: null
321
+ named_entity_eval: false
322
+ save_tokens: false
323
+ vqa_eval: ansl,em
324
+ n_to_log: 0
325
+ mme_eval: false
326
+ mmbench_eval: false
327
+ sugar_crepe_eval: false
328
+ pointing_eval: false
329
+ count_eval: false
330
+ point_count_eval: false
331
+ refexp_eval: false
332
+ pointing: false
333
+ android_eval: false
334
+ clock_eval: false
335
+ gpt_eval: null
336
+ save_dir: null
337
+ save_to_checkpoint_dir: false
338
+ eval_name: null
339
+ skip_if_metrics_cached: true
340
+ - label: doc_qa
341
+ type: multi_modal
342
+ data:
343
+ multi_modal: true
344
+ mixture_or_task_name: doc_qa
345
+ paths: null
346
+ datasets: null
347
+ label_mask_paths: null
348
+ pad_direction: right
349
+ generate_attention_mask: false
350
+ num_workers: 0
351
+ drop_last: true
352
+ pin_memory: false
353
+ prefetch_factor: null
354
+ persistent_workers: false
355
+ timeout: 0
356
+ seed: null
357
+ instance_filter: null
358
+ mixture: null
359
+ sequence_length: 1792
360
+ shuffle: false
361
+ for_inference: true
362
+ split: validation
363
+ use_memory_cache: false
364
+ num_epochs: null
365
+ shuffle_buffer_size: 200
366
+ per_node_data_loader: null
367
+ device_eval_batch_size: null
368
+ subset_num_batches: 4
369
+ max_new_tokens: 16
370
+ mm_evaluator:
371
+ cider: ''
372
+ num_wandb_examples: 32
373
+ ptb_tokenizer: false
374
+ save_html: 0
375
+ save_predictions: null
376
+ named_entity_eval: false
377
+ save_tokens: false
378
+ vqa_eval: ansl,em
379
+ n_to_log: 0
380
+ mme_eval: false
381
+ mmbench_eval: false
382
+ sugar_crepe_eval: false
383
+ pointing_eval: false
384
+ count_eval: false
385
+ point_count_eval: false
386
+ refexp_eval: false
387
+ pointing: false
388
+ android_eval: false
389
+ clock_eval: false
390
+ gpt_eval: null
391
+ save_dir: null
392
+ save_to_checkpoint_dir: false
393
+ eval_name: null
394
+ skip_if_metrics_cached: true
395
+ - label: a_okvqa_da
396
+ type: multi_modal
397
+ data:
398
+ multi_modal: true
399
+ mixture_or_task_name: a_okvqa_da
400
+ paths: null
401
+ datasets: null
402
+ label_mask_paths: null
403
+ pad_direction: right
404
+ generate_attention_mask: false
405
+ num_workers: 0
406
+ drop_last: false
407
+ pin_memory: false
408
+ prefetch_factor: null
409
+ persistent_workers: false
410
+ timeout: 0
411
+ seed: null
412
+ instance_filter: null
413
+ mixture: null
414
+ sequence_length: 1792
415
+ shuffle: false
416
+ for_inference: true
417
+ split: validation
418
+ use_memory_cache: false
419
+ num_epochs: 1
420
+ shuffle_buffer_size: 1000
421
+ per_node_data_loader: null
422
+ device_eval_batch_size: null
423
+ subset_num_batches: -1
424
+ max_new_tokens: 16
425
+ mm_evaluator:
426
+ cider: ''
427
+ num_wandb_examples: 32
428
+ ptb_tokenizer: false
429
+ save_html: 0
430
+ save_predictions: null
431
+ named_entity_eval: false
432
+ save_tokens: false
433
+ vqa_eval: a_okvqa_score
434
+ n_to_log: 0
435
+ mme_eval: false
436
+ mmbench_eval: false
437
+ sugar_crepe_eval: false
438
+ pointing_eval: false
439
+ count_eval: false
440
+ point_count_eval: false
441
+ refexp_eval: false
442
+ pointing: false
443
+ android_eval: false
444
+ clock_eval: false
445
+ gpt_eval: null
446
+ save_dir: null
447
+ save_to_checkpoint_dir: false
448
+ eval_name: null
449
+ skip_if_metrics_cached: true
450
+ - label: ai2_diagram
451
+ type: multi_modal
452
+ data:
453
+ multi_modal: true
454
+ mixture_or_task_name: ai2_diagram_v2_mix_transparent
455
+ paths: null
456
+ datasets: null
457
+ label_mask_paths: null
458
+ pad_direction: right
459
+ generate_attention_mask: false
460
+ num_workers: 0
461
+ drop_last: false
462
+ pin_memory: false
463
+ prefetch_factor: null
464
+ persistent_workers: false
465
+ timeout: 0
466
+ seed: null
467
+ instance_filter: null
468
+ mixture: null
469
+ sequence_length: 1792
470
+ shuffle: false
471
+ for_inference: true
472
+ split: validation
473
+ use_memory_cache: false
474
+ num_epochs: 1
475
+ shuffle_buffer_size: 1000
476
+ per_node_data_loader: null
477
+ device_eval_batch_size: null
478
+ subset_num_batches: -1
479
+ max_new_tokens: 16
480
+ mm_evaluator:
481
+ cider: ''
482
+ num_wandb_examples: 32
483
+ ptb_tokenizer: false
484
+ save_html: 0
485
+ save_predictions: null
486
+ named_entity_eval: false
487
+ save_tokens: false
488
+ vqa_eval: mc_ai2d_opaque,mc_ai2d_transparent
489
+ n_to_log: 0
490
+ mme_eval: false
491
+ mmbench_eval: false
492
+ sugar_crepe_eval: false
493
+ pointing_eval: false
494
+ count_eval: false
495
+ point_count_eval: false
496
+ refexp_eval: false
497
+ pointing: false
498
+ android_eval: false
499
+ clock_eval: false
500
+ gpt_eval: null
501
+ save_dir: null
502
+ save_to_checkpoint_dir: false
503
+ eval_name: null
504
+ skip_if_metrics_cached: true
505
+ - label: clocks
506
+ type: multi_modal
507
+ data:
508
+ multi_modal: true
509
+ mixture_or_task_name: clocks
510
+ paths: null
511
+ datasets: null
512
+ label_mask_paths: null
513
+ pad_direction: right
514
+ generate_attention_mask: false
515
+ num_workers: 0
516
+ drop_last: true
517
+ pin_memory: false
518
+ prefetch_factor: null
519
+ persistent_workers: false
520
+ timeout: 0
521
+ seed: null
522
+ instance_filter: null
523
+ mixture: null
524
+ sequence_length: 1792
525
+ shuffle: false
526
+ for_inference: true
527
+ split: validation
528
+ use_memory_cache: false
529
+ num_epochs: null
530
+ shuffle_buffer_size: 200
531
+ per_node_data_loader: null
532
+ device_eval_batch_size: null
533
+ subset_num_batches: 4
534
+ max_new_tokens: 16
535
+ mm_evaluator:
536
+ cider: ''
537
+ num_wandb_examples: 32
538
+ ptb_tokenizer: false
539
+ save_html: 0
540
+ save_predictions: null
541
+ named_entity_eval: false
542
+ save_tokens: false
543
+ vqa_eval: ''
544
+ n_to_log: 0
545
+ mme_eval: false
546
+ mmbench_eval: false
547
+ sugar_crepe_eval: false
548
+ pointing_eval: false
549
+ count_eval: false
550
+ point_count_eval: false
551
+ refexp_eval: false
552
+ pointing: false
553
+ android_eval: false
554
+ clock_eval: true
555
+ gpt_eval: null
556
+ save_dir: null
557
+ save_to_checkpoint_dir: false
558
+ eval_name: null
559
+ skip_if_metrics_cached: true
560
+ - label: android_control_ll
561
+ type: multi_modal
562
+ data:
563
+ multi_modal: true
564
+ mixture_or_task_name: android_control_ll
565
+ paths: null
566
+ datasets: null
567
+ label_mask_paths: null
568
+ pad_direction: right
569
+ generate_attention_mask: false
570
+ num_workers: 0
571
+ drop_last: false
572
+ pin_memory: false
573
+ prefetch_factor: null
574
+ persistent_workers: false
575
+ timeout: 0
576
+ seed: null
577
+ instance_filter: null
578
+ mixture: null
579
+ sequence_length: 1792
580
+ shuffle: false
581
+ for_inference: true
582
+ split: validation
583
+ use_memory_cache: false
584
+ num_epochs: 1
585
+ shuffle_buffer_size: 1000
586
+ per_node_data_loader: null
587
+ device_eval_batch_size: null
588
+ subset_num_batches: -1
589
+ max_new_tokens: 16
590
+ mm_evaluator:
591
+ cider: ''
592
+ num_wandb_examples: 32
593
+ ptb_tokenizer: false
594
+ save_html: 0
595
+ save_predictions: null
596
+ named_entity_eval: false
597
+ save_tokens: false
598
+ vqa_eval: ''
599
+ n_to_log: 0
600
+ mme_eval: false
601
+ mmbench_eval: false
602
+ sugar_crepe_eval: false
603
+ pointing_eval: false
604
+ count_eval: false
605
+ point_count_eval: false
606
+ refexp_eval: false
607
+ pointing: false
608
+ android_eval: true
609
+ clock_eval: false
610
+ gpt_eval: null
611
+ save_dir: null
612
+ save_to_checkpoint_dir: false
613
+ eval_name: null
614
+ skip_if_metrics_cached: true
615
+ - label: pointing_test
616
+ type: multi_modal
617
+ data:
618
+ multi_modal: true
619
+ mixture_or_task_name: pointing_test
620
+ paths: null
621
+ datasets: null
622
+ label_mask_paths: null
623
+ pad_direction: right
624
+ generate_attention_mask: false
625
+ num_workers: 0
626
+ drop_last: false
627
+ pin_memory: false
628
+ prefetch_factor: null
629
+ persistent_workers: false
630
+ timeout: 0
631
+ seed: null
632
+ instance_filter: null
633
+ mixture: null
634
+ sequence_length: 1792
635
+ shuffle: false
636
+ for_inference: true
637
+ split: test
638
+ use_memory_cache: false
639
+ num_epochs: 1
640
+ shuffle_buffer_size: 1000
641
+ per_node_data_loader: null
642
+ device_eval_batch_size: null
643
+ subset_num_batches: -1
644
+ max_new_tokens: 384
645
+ mm_evaluator:
646
+ cider: ''
647
+ num_wandb_examples: 32
648
+ ptb_tokenizer: false
649
+ save_html: 0
650
+ save_predictions: null
651
+ named_entity_eval: false
652
+ save_tokens: false
653
+ vqa_eval: ''
654
+ n_to_log: 0
655
+ mme_eval: false
656
+ mmbench_eval: false
657
+ sugar_crepe_eval: false
658
+ pointing_eval: false
659
+ count_eval: false
660
+ point_count_eval: false
661
+ refexp_eval: false
662
+ pointing: true
663
+ android_eval: false
664
+ clock_eval: false
665
+ gpt_eval: null
666
+ save_dir: null
667
+ save_to_checkpoint_dir: false
668
+ eval_name: null
669
+ skip_if_metrics_cached: true
670
+ - label: countbench_qa
671
+ type: multi_modal
672
+ data:
673
+ multi_modal: true
674
+ mixture_or_task_name: countbench_qa
675
+ paths: null
676
+ datasets: null
677
+ label_mask_paths: null
678
+ pad_direction: right
679
+ generate_attention_mask: false
680
+ num_workers: 0
681
+ drop_last: false
682
+ pin_memory: false
683
+ prefetch_factor: null
684
+ persistent_workers: false
685
+ timeout: 0
686
+ seed: null
687
+ instance_filter: null
688
+ mixture: null
689
+ sequence_length: 1792
690
+ shuffle: false
691
+ for_inference: true
692
+ split: huggingface
693
+ use_memory_cache: false
694
+ num_epochs: 1
695
+ shuffle_buffer_size: 1000
696
+ per_node_data_loader: null
697
+ device_eval_batch_size: null
698
+ subset_num_batches: -1
699
+ max_new_tokens: 384
700
+ mm_evaluator:
701
+ cider: ''
702
+ num_wandb_examples: 32
703
+ ptb_tokenizer: false
704
+ save_html: 0
705
+ save_predictions: null
706
+ named_entity_eval: false
707
+ save_tokens: false
708
+ vqa_eval: ''
709
+ n_to_log: 0
710
+ mme_eval: false
711
+ mmbench_eval: false
712
+ sugar_crepe_eval: false
713
+ pointing_eval: false
714
+ count_eval: false
715
+ point_count_eval: true
716
+ refexp_eval: false
717
+ pointing: false
718
+ android_eval: false
719
+ clock_eval: false
720
+ gpt_eval: null
721
+ save_dir: null
722
+ save_to_checkpoint_dir: false
723
+ eval_name: null
724
+ skip_if_metrics_cached: true
725
+ save_folder: /weka/oe-training-default/chrisc/cockatoo/models/uber-model-v11/7b-5510-3.2-synthetic
726
+ remote_save_folder: null
727
+ canceled_check_interval: 50
728
+ save_interval: 4000
729
+ save_interval_unsharded: 30000
730
+ save_interval_ephemeral: null
731
+ save_num_checkpoints_to_keep: 1
732
+ save_num_unsharded_checkpoints_to_keep: -1
733
+ save_overwrite: true
734
+ force_save_unsharded: false
735
+ no_pre_train_checkpoint: true
736
+ initial_model_checkpoint: /weka/oe-training-default/chrisc/cockatoo/models/dense-captioner-v22-qwen2/v2-lr2620/step22300-unsharded
737
+ load_model_config: null
738
+ load_path: null
739
+ load_path_sharded_checkpointer: null
740
+ reset_optimizer_state: false
741
+ reset_trainer_state: false
742
+ save_dataloader_state: false
743
+ reset_dataloader_state: false
744
+ sharded_checkpointer: torch_legacy
745
+ new_style_checkpoints: null
746
+ max_duration: 30000
747
+ global_train_batch_size: 256
748
+ device_train_batch_size: 2
749
+ device_train_microbatch_size: 2
750
+ device_eval_batch_size: 4
751
+ eval_subset_num_batches: 4
752
+ eval_on_load: false
753
+ device_inf_eval_batch_size: 4
754
+ inf_eval_subset_num_batches: -1
755
+ device_train_grad_accum: 1
756
+ max_grad_norm: 1.0
757
+ batch_divisor: global_batch
758
+ max_grad_norm_ratio: null
759
+ precision: amp_bf16
760
+ wandb:
761
+ project: cockatoo
762
+ entity: prior-ai2
763
+ group: uber-model-v11
764
+ name: 7b-5510-3.2-synthetic
765
+ tags:
766
+ - watching
767
+ log_artifacts: false
768
+ rank_zero_only: true
769
+ log_interval: 20
770
+ speed_monitor:
771
+ window_size: 20
772
+ gpu_flops_available: null
773
+ console_log_interval: 20
774
+ gen1_gc_interval: 1
775
+ compile: null
776
+ fsdp:
777
+ use_orig_params: true
778
+ sharding_strategy: FULL_SHARD
779
+ wrapping_strategy: by_block_and_size
780
+ precision: float
781
+ hybrid_sharding_num_model_replicas: null
782
+ softmax_auxiliary_loss: true
783
+ softmax_auxiliary_loss_scale: 0.0001
784
+ time_limit: null
785
+ extra_steps_after_cancel: 10
786
+ early_stopping_factor: null
787
+ save_data_indices: true
788
+ python_profiling: false
789
+ torch_profiling: false
790
+ stop_at: 30000
791
+ stop_after: null
792
+ activation_checkpointing: one_in_two
793
+ fused_loss: null
794
+ tfds_dir: /weka/oe-training-default/mm-olmo/tensorflow_datasets
model/Molmo-7B-D-0924/model.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ccbcdeaf570dcdc90d022c27431652719da0afb6fd22f51acf56ea2696cfaff0
3
+ size 32084399338