File size: 6,526 Bytes
cb37b4f
f68f712
cb37b4f
 
 
56299c3
ff5babb
cb37b4f
 
 
 
 
 
 
 
 
 
14fcb88
 
 
56299c3
cb37b4f
 
 
 
 
 
fa4c071
2a316c4
899950a
 
 
 
 
94b339b
2a316c4
fa4c071
cb37b4f
 
 
 
 
 
 
 
56299c3
 
 
 
 
 
 
 
 
 
cb37b4f
 
 
 
 
 
56299c3
adf68e9
22d643e
b437ba0
 
 
56299c3
 
f68f712
56299c3
adf68e9
f68f712
 
 
e3c1456
 
14fcb88
f68f712
 
 
 
 
 
 
56299c3
 
 
 
 
f68f712
 
56299c3
 
cb37b4f
 
56299c3
 
f68f712
cb37b4f
 
8bcb25e
f68f712
 
22d643e
f53039e
22d643e
 
 
 
15f6207
f68f712
 
1deff62
f68f712
 
15f6207
adf68e9
15f6207
f68f712
 
cb37b4f
adf68e9
74da774
f68f712
 
 
 
 
 
 
 
22d643e
f68f712
f53039e
f68f712
 
 
 
 
 
 
 
15f6207
f68f712
 
22d643e
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
import json
import os

import datasets

_DESCRIPTION = "Dataset with video and audio references for epic and ego4d tasks."
_HOMEPAGE = "https://huggingface.co/datasets/gorjanradevski/dave"
_LICENSE = "MIT"

_MEDIA_FIELDS = [
    "compressed_video_path",
    "event_video_path",
    "video_with_overlayed_audio_path",
    "silent_video_path",
    "overlayed_audio_path",
]

def count_files_in_directory(directory):
    return sum(len(files) for _, _, files in os.walk(directory))

class DaveDataset(datasets.GeneratorBasedBuilder):
    def _info(self):
        return datasets.DatasetInfo(
            description=_DESCRIPTION,
            features=datasets.Features({
                "compressed_video_path": datasets.Value("string"),
                "overlayed_event_index": datasets.Value("int32"),
                "events": [
                    {
                        "start": datasets.Value("string"),
                        "end": datasets.Value("string"),
                        "duration": datasets.Value("float64"),
                        "narration": datasets.Value("string"),
                        "action": datasets.Value("string"),
                        "raw_narration": datasets.Value("string"),
                    }
                ],
                "event_video_path": datasets.Value("string"),
                "audio_class": datasets.Value("string"),
                "video_with_overlayed_audio_path": datasets.Value("string"),
                "silent_video_path": datasets.Value("string"),
                "overlayed_audio_path": datasets.Value("string"),
                "video_id": datasets.Value("string"),
                "participant_id": datasets.Value("string"),
                "type": datasets.Value("string"),
                "raw_choices_simple_audio_classification": datasets.Sequence(datasets.Value("string")),
                "raw_choices_overlayed_full_audio_classification": datasets.Sequence(datasets.Value("string")),
                "raw_choices_video_segment": datasets.Sequence(datasets.Value("string")),
                "correct_temporal_order": datasets.Sequence(datasets.Value("string")),
                "raw_choices_temporal_video": datasets.Sequence(datasets.Value("string")),
                "raw_choices_multimodal": datasets.Sequence(datasets.Value("string")),
                "raw_choices_silent_video": datasets.Sequence(datasets.Value("string")),
                "raw_choices_audio": datasets.Sequence(datasets.Value("string")),
                "raw_choices_text_only": datasets.Sequence(datasets.Value("string")),
                "raw_choices_pipeline_event_classification": datasets.Sequence(datasets.Value("string")),
            }),
            homepage=_HOMEPAGE,
            license=_LICENSE,
        )

    def _split_generators(self, dl_manager):
        base_url = "https://huggingface.co/datasets/gorjanradevski/dave/resolve/main/"
        
        # Add both epic and ego4d files and json files
        json_files = {"ego4d": "ego4d.json", "epic": "epic.json"}
        zip_urls = {"ego4d": base_url + "ego4d.zip", "epic": base_url + "epic.zip"}
        
        split_generators = []
        for split_name, json_file in json_files.items():
            # Download JSON metadata file
            json_path = dl_manager.download(base_url + json_file)
            
            # Download and extract ZIP file
            print(f"Downloading and extracting {split_name}_files.zip...")
            extracted_dir = dl_manager.download_and_extract(zip_urls[split_name])

            print(f"Extracted to: {extracted_dir}")
            print(f"Total number of files extracted: {count_files_in_directory(extracted_dir)}")
            
            # Path to the extracted directory
            if isinstance(extracted_dir, str):
                files_dir = extracted_dir
            else:
                files_dir = extracted_dir[zip_urls[split_name]]
                
            split_generators.append(
                datasets.SplitGenerator(
                    name=split_name,
                    gen_kwargs={
                        "json_path": json_path,
                        "files_dir": files_dir,
                        "split_name": split_name,
                    },
                )
            )

        return split_generators

    def _generate_examples(self, json_path, files_dir, split_name):
        with open(json_path, "r", encoding="utf-8") as f:
            data = json.load(f)
        
        print(f"Processing {split_name} split with extracted files in {files_dir}")
        
        # Check for the respective files directory based on the split
        files_dir = os.path.join(files_dir, f"{split_name}_files")
        if not os.path.exists(files_dir):
            print(f"Warning: '{split_name}_files' directory not found in {files_dir}")
            print(f"Available directories: {os.listdir(files_dir)}")
            raise ValueError(f"Could not find '{split_name}_files' directory at {files_dir}")
        
        # Create a mapping of original file paths to local file paths
        file_mapping = {}
        
        for idx, item in enumerate(data):
            # Debug first item
            if idx == 0:
                print(f"Processing first item: {item['video_id'] if 'video_id' in item else 'unknown'}")
            
            # Replace file paths with local paths for all media fields
            all_fields_resolved = True
            for field in _MEDIA_FIELDS:
                if field not in item or not item[field]:
                    continue
                
                original_path = item[field]
                
                # Check if we already processed this file path
                if original_path in file_mapping:
                    item[field] = file_mapping[original_path]
                    continue
                
                # Extract file name and construct local path
                file_name = os.path.basename(original_path)
                local_path = os.path.join(files_dir, file_name)
                
                # Check if the file exists
                if os.path.exists(local_path):
                    item[field] = local_path
                    file_mapping[original_path] = local_path
                else:
                    print(f"Warning: File not found for {field}: {local_path}")
                    all_fields_resolved = False
                    break
            
            if all_fields_resolved:
                yield idx, item