Datasets:

ArXiv:
DOI:
License:
ProgramComputer commited on
Commit
263385c
·
verified ·
1 Parent(s): 14fb964

Delete VGGFace2.py

Browse files
Files changed (1) hide show
  1. VGGFace2.py +0 -186
VGGFace2.py DELETED
@@ -1,186 +0,0 @@
1
- # coding=utf-8
2
- # Copyright 2022 The HuggingFace Datasets Authors and ProgramComputer.
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
-
16
- # Lint as: python3
17
- """VGGFace2 audio-visual human speech dataset."""
18
-
19
- import json
20
- import os
21
- import re
22
- from urllib.parse import urlparse, parse_qs
23
- from getpass import getpass
24
- from hashlib import sha256
25
- from itertools import repeat
26
- from multiprocessing import Manager, Pool, Process
27
- from pathlib import Path
28
- from shutil import copyfileobj
29
- from warnings import catch_warnings, filterwarnings
30
- from urllib3.exceptions import InsecureRequestWarning
31
-
32
- import pandas as pd
33
- import requests
34
-
35
- import datasets
36
-
37
- _DESCRIPTION = "VGGFace2 is a large-scale face recognition dataset. Images are downloaded from Google Image Search and have large variations in pose, age, illumination, ethnicity and profession."
38
- _CITATION = """\
39
- @article{DBLP:journals/corr/abs-1710-08092,
40
- author = {Qiong Cao and
41
- Li Shen and
42
- Weidi Xie and
43
- Omkar M. Parkhi and
44
- Andrew Zisserman},
45
- title = {VGGFace2: {A} dataset for recognising faces across pose and age},
46
- journal = {CoRR},
47
- volume = {abs/1710.08092},
48
- year = {2017},
49
- url = {http://arxiv.org/abs/1710.08092},
50
- eprinttype = {arXiv},
51
- eprint = {1710.08092},
52
- timestamp = {Wed, 04 Aug 2021 07:50:14 +0200},
53
- biburl = {https://dblp.org/rec/journals/corr/abs-1710-08092.bib},
54
- bibsource = {dblp computer science bibliography, https://dblp.org}
55
- }
56
- """
57
-
58
-
59
-
60
- _URLS = {
61
- "default": {
62
- "train": "https://huggingface.co/datasets/ProgramComputer/VGGFace2/resolve/main/data/vggface2_train.tar.gz",
63
- "test": "https://huggingface.co/datasets/ProgramComputer/VGGFace2/resolve/main/data/vggface2_test.tar.gz",
64
- }
65
- }
66
-
67
-
68
-
69
- class VGGFace2(datasets.GeneratorBasedBuilder):
70
- """VGGFace2 is dataset contains faces from Google Search"""
71
-
72
- VERSION = datasets.Version("1.0.0")
73
-
74
- BUILDER_CONFIGS = [
75
- datasets.BuilderConfig( version=VERSION
76
- )
77
- ]
78
-
79
- def _info(self):
80
- features = {
81
- "image": datasets.Image(),
82
- "image_id": datasets.Value("string"),
83
- "class_id": datasets.Value("string"),
84
- "identity": datasets.Value("string"),
85
- 'gender': datasets.Value("string"),
86
- 'sample_num':datasets.Value("uint64"),
87
- 'flag':datasets.Value("bool"),
88
- "male": datasets.Value("bool"),
89
- "black_hair": datasets.Value("bool"),
90
- "gray_hair": datasets.Value("bool"),
91
- "blond_hair": datasets.Value("bool"),
92
- "long_hair": datasets.Value("bool"),
93
- "mustache_or_beard": datasets.Value("bool"),
94
- "wearing_hat": datasets.Value("bool"),
95
- "eyeglasses": datasets.Value("bool"),
96
- "sunglasses": datasets.Value("bool"),
97
- "mouth_open": datasets.Value("bool"),
98
- }
99
-
100
- return datasets.DatasetInfo(
101
- description=_DESCRIPTION,
102
- supervised_keys=datasets.info.SupervisedKeysData("file", "class_id"),
103
- features=datasets.Features(features),
104
- citation=_CITATION,
105
- )
106
-
107
- def _split_generators(self, dl_manager):
108
- targets = (
109
- ["01-Male.txt", "02-Black_Hair.txt","03-Brown_Hair.txt","04-Gray_Hair.txt","05-Blond_Hair.txt","06-Long_Hair.txt","07-Mustache_or_Beard.txt","08-Wearing_Hat.txt","09-Eyeglasses.txt","10-Sunglasses.txt","11-Mouth_Open.txt"]
110
- )
111
- target_dict = dict(
112
- (
113
- re.sub(r"^\d+-|\.txt$","",target),
114
- f"https://raw.githubusercontent.com/ox-vgg/vgg_face2/master/attributes/{target}",
115
- )
116
- for target in targets
117
- )
118
- target_dict['identity'] = "https://huggingface.co/datasets/ProgramComputer/VGGFace2/raw/main/meta/identity_meta.csv"
119
- metadata = dl_manager.download(
120
- target_dict
121
- )
122
-
123
- mapped_paths_train = dl_manager.iter_archive(
124
- _URLS["default"]["train"]
125
- )
126
- mapped_paths_test = dl_manager.iter_archive(
127
- _URLS["default"]["test"]
128
- )
129
- return [
130
- datasets.SplitGenerator(
131
- name="train",
132
- gen_kwargs={
133
- "paths": mapped_paths_train,
134
- "meta_paths": metadata,
135
- },
136
- ),
137
- datasets.SplitGenerator(
138
- name="test",
139
- gen_kwargs={
140
- "paths": mapped_paths_test,
141
- "meta_paths": metadata,
142
- },
143
- ),
144
- ]
145
-
146
- def _generate_examples(self, paths, meta_paths):
147
- key = 0
148
- meta = pd.read_csv(
149
- meta_paths["identity"],
150
- sep=", "
151
- )
152
- for key,conf in [(k,v) for (k,v) in meta_paths.items() if k != "identity"]:
153
-
154
- temp = pd.read_csv(conf,sep='\t', header=None)
155
- temp.columns = ['Image_Path', key]
156
-
157
- temp['Class_ID'] = temp['Image_Path'].str.split('/').str[0]
158
- #temp['Image_Name'] = temp['Image_Path'].str.split('/').str[1]
159
-
160
- temp.drop(columns=['Image_Path'], inplace=True)
161
-
162
- meta = meta.merge(temp, on='Class_ID', how='left')
163
- for file_path, file_obj in paths:
164
-
165
- label = file_path.split("/")[2]
166
- yield file_path, {
167
- "image": {"path": file_path, "bytes": file_obj.read()},
168
- # "image_id": datasets.Value("string"),
169
- # "class_id": datasets.Value("string"),
170
- # "identity": datasets.Value("string"),
171
- # 'gender': dataset.Value("string"),
172
- # 'sample_num':dataset.Value("uint64"),
173
- # 'flag':dataset.Value("bool"),
174
- # "male": datasets.Value("bool"),
175
- # "black_hair": datasets.Value("bool"),
176
- # "gray_hair": datasets.Value("bool"),
177
- # "blond_hair": datasets.Value("bool"),
178
- # "long_hair": datasets.Value("bool"),
179
- # "mustache_or_beard": datasets.Value("bool"),
180
- # "wearing_hat": datasets.Value("bool"),
181
- # "eyeglasses": datasets.Value("bool"),
182
- # "sunglasses": datasets.Value("bool"),
183
- #"mouth_open": datasets.Value("bool")
184
- }
185
- key+= 1
186
-