Datasets:

Modalities:
Image
Text
Formats:
parquet
Languages:
English
ArXiv:
Libraries:
Datasets
pandas
License:
ellisbrown commited on
Commit
3889e23
·
1 Parent(s): 3a50856

add video shards

Browse files
README.md CHANGED
@@ -68,6 +68,9 @@ dataset_info:
68
  dataset_size: 413638
69
  ---
70
 
 
 
 
71
  # 🍅 TOMATO
72
 
73
  [**📄 Paper**](https://arxiv.org/abs/2410.23266) | [**💻 Code**](https://github.com/yale-nlp/TOMATO) | [**🎬 Videos**](https://drive.google.com/file/d/1-dNt9bZcp6C3RXuGoAO3EBgWkAHg8NWR/view?usp=drive_link)
 
68
  dataset_size: 413638
69
  ---
70
 
71
+ > [!IMPORTANT]
72
+ > This is a fork of the original [`yale-nlp/TOMATO`](https://huggingface.co/datasets/yale-nlp/TOMATO) HF dataset. We have uploaded the `videos.zip` file directly here so that eval with `lmms-eval` works right out of the box without running a separate process.
73
+
74
  # 🍅 TOMATO
75
 
76
  [**📄 Paper**](https://arxiv.org/abs/2410.23266) | [**💻 Code**](https://github.com/yale-nlp/TOMATO) | [**🎬 Videos**](https://drive.google.com/file/d/1-dNt9bZcp6C3RXuGoAO3EBgWkAHg8NWR/view?usp=drive_link)
tar_videos.py ADDED
@@ -0,0 +1,103 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ # /// script
3
+ # requires-python = ">=3.10"
4
+ # ///
5
+ """
6
+ Tar the videos in the TOMATO dataset into shards of 5 GiB each
7
+ """
8
+
9
+ import subprocess
10
+ from pathlib import Path
11
+ import random
12
+
13
+ # ---- Config ----
14
+ root = Path(__file__).parent
15
+ seed = 42
16
+ TARGET_MAX_GB = 2.25 # hard cap: 2.25 GiB
17
+ TARGET_MAX_BYTES = TARGET_MAX_GB * 1024**3 # hard cap
18
+ SAFETY_MARGIN = 256 * 1024**2 # keep ~256 MiB headroom
19
+ PACK_LIMIT = TARGET_MAX_BYTES - SAFETY_MARGIN # packing budget per shard
20
+ SHARD_PREFIX = "video_shard" # name prefix
21
+ ZSTD_ARGS = "zstd -T0 -3" # multithreaded, modest level
22
+ DRY_RUN = False # set True to preview
23
+ EXTENSIONS = [".mp4"] # video extensions
24
+
25
+ # ---- 1) Collect & shuffle globally ----
26
+ # Adjust the filter to match only video extensions if needed (mp4, mkv, etc.)
27
+ print(f"Collecting video files from {root}...")
28
+ video_files = sorted(root.rglob("videos/*/*.mp4"))
29
+ print(f"Found {len(video_files)} files")
30
+
31
+ # filter to only video files
32
+ video_files = [f for f in video_files if f.suffix.lower() in EXTENSIONS]
33
+ print(f"Filtered to {len(video_files)} video files")
34
+
35
+ random.seed(seed)
36
+ random.shuffle(video_files)
37
+
38
+ def human(n):
39
+ for u in ["B","KiB","MiB","GiB","TiB"]:
40
+ if n < 1024 or u == "TiB": return f"{n:.1f} {u}"
41
+ n /= 1024
42
+
43
+ # ---- 2) Greedy size-based packing ----
44
+ shards = []
45
+ cur_list, cur_bytes = [], 0
46
+
47
+ for f in video_files:
48
+ sz = f.stat().st_size
49
+ # If a single file exceeds PACK_LIMIT, put it alone to respect 5 GiB cap
50
+ if sz > PACK_LIMIT:
51
+ if cur_list:
52
+ shards.append(cur_list)
53
+ cur_list, cur_bytes = [], 0
54
+ shards.append([f])
55
+ continue
56
+
57
+ if cur_bytes + sz > PACK_LIMIT and cur_list:
58
+ shards.append(cur_list)
59
+ cur_list, cur_bytes = [f], sz
60
+ else:
61
+ cur_list.append(f)
62
+ cur_bytes += sz
63
+
64
+ if cur_list:
65
+ shards.append(cur_list)
66
+
67
+ print(f"Planned {len(shards)} shards")
68
+ for i, s in enumerate(shards):
69
+ total = sum(p.stat().st_size for p in s)
70
+ print(f" {i:03d}: {len(s)} files, ~{human(total)} uncompressed")
71
+
72
+ # ---- 3) Materialize shards with tar -T and zstd ----
73
+ for i, shard in enumerate(shards):
74
+ listfile = root / f".tarlist_{i:03d}.txt"
75
+ listfile.write_text("\n".join(str(p.relative_to(root)) for p in shard))
76
+
77
+ tar_name = f"{SHARD_PREFIX}_{i:03d}.tar.zst"
78
+ tar_path = root / tar_name
79
+
80
+ cmd = [
81
+ "tar",
82
+ "--use-compress-program", ZSTD_ARGS,
83
+ "-cf", str(tar_path),
84
+ "-C", str(root),
85
+ "--checkpoint=1000",
86
+ "--checkpoint-action=dot",
87
+ "-T", str(listfile),
88
+ ]
89
+ print(f"\nCreating {tar_name} with {len(shard)} files...")
90
+ print("Command:", " ".join(cmd))
91
+
92
+ if not DRY_RUN:
93
+ # shell=True so --use-compress-program arg is parsed correctly
94
+ subprocess.run(cmd, check=True)
95
+ listfile.unlink(missing_ok=True)
96
+ # Optional: hard assert size cap
97
+ out_sz = tar_path.stat().st_size
98
+ if out_sz > TARGET_MAX_BYTES:
99
+ raise RuntimeError(
100
+ f"{tar_name} is {human(out_sz)} which exceeds the 5 GiB cap"
101
+ )
102
+ else:
103
+ print("DRY RUN — not executing tar")
video_shard_000.tar.zst ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ad8306ee0fa091c93a0bd5df68a08a905a34cfb84e275fac188084b77c3b56e9
3
+ size 2116126950
video_shard_001.tar.zst ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7a2830777a10216d85428d8b455a2535d7b81dc327b840cf2920ecebf100c09a
3
+ size 2091538640
video_shard_002.tar.zst ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b8fd07c2a11a2c9a5dc60edb5cbf06fe6166e54be6c9d0f8383c083104471114
3
+ size 2132517052
video_shard_003.tar.zst ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:de35cd0eff41db8323712500edf67dd0e6224f7caf99f832de8ccf06a5616171
3
+ size 2106316428
video_shard_004.tar.zst ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ae26854d69924a24a0ed9eaef291aab1101c579fc098a535008c3d528bb50258
3
+ size 2128406059
video_shard_005.tar.zst ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:99c76cc03e44e54b83a4fbc7c66c294ad06c0378618e46eb51eba55a01483002
3
+ size 1452639983