relative_path
stringclasses 812
values | section
stringclasses 339
values | filename
stringlengths 2
61
| text
stringlengths 6
1.76M
|
---|---|---|---|
Tools/DGLPyTorch/SyntheticGraphGeneration/demos/basic_examples | basic_examples | e2e_ieee_demo | #!/usr/bin/env python
# coding: utf-8
# Copyright 2023 NVIDIA Corporation. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# # End to end graph generation demo (IEEE)
# ## Overview
#
# In this notebook, we have walked through the complete process of generating a synthetic dataset based on an IEEE dataset. The IEEE dataset includes information about e-commerce transactions, so it can be iterpret as bipartite graph (user / product) with edge features (transaction info).
#
# Content:
#
# 1. [Prepare the original dataset](#1)
# 1. [Preprare SynGen Configuration](#2)
# 1. [Dataset Generation](#3)
# 1. [Tabular data evaluation](#4)
# 1. [Structure evaluation](#5)
# ### Imports
# In[1]:
# preprocessing
from syngen.preprocessing.datasets import IEEEPreprocessing
# config
from syngen.configuration import SynGenConfiguration
# generation
from syngen.synthesizer import ConfigurationGraphSynthesizer
# evaluation
from syngen.analyzer.tabular import TabularMetrics
from syngen.analyzer.graph import Graph
from syngen.analyzer.graph.stats import get_dd_simmilarity_score
from syngen.analyzer.graph.analyser import AnalysisModule
# utils
import copy
from syngen.utils.types import MetaData
# <a id="1"></a>
# ## Prepare original dataset
# SynGen requires the data to be in SynGen dataset format or simply SynGen format, so firstly, we transform the raw Cora dataset into SynGen format. If you don't download IEEE before please follow the instruction in the `scripts/get_datasets.sh`
# In[2]:
data_path = '/workspace/data/ieee-fraud'
preprocessed_path = '/workspace/data/ieee_preprocessed'
# In[3]:
preprocessing = IEEEPreprocessing(source_path=data_path, destination_path=preprocessed_path)
# In[4]:
feature_spec_original = preprocessing.transform(use_cache=True)
# In[5]:
feature_spec_original
# <a id="2"></a>
# ## Preprare SynGen Configuration
# SynGen generation process is driven by the configuration that is the superset of the SynGen format metadata file. Let us create two configurations: a proper one that will mimic Cora dataset tabular and structural features and a random one.
# ### Proper Synthetic
# In[6]:
feature_spec_synthetic = feature_spec_original.copy()
feature_spec_synthetic[MetaData.EDGES][0][MetaData.TABULAR_GENERATORS] = [
{
MetaData.TYPE: "kde",
MetaData.FEATURES_LIST: -1, # copies all tabular features
MetaData.DATA_SOURCE: {
MetaData.TYPE: 'configuration',
MetaData.PATH: preprocessed_path,
MetaData.NAME: "user-product",
},
MetaData.PARAMS: {
}
}
]
feature_spec_synthetic[MetaData.EDGES][0][MetaData.STRUCTURE_GENERATOR] = {
MetaData.TYPE: "RMAT",
MetaData.DATA_SOURCE: {
MetaData.TYPE: 'cfg', # the same a 'configuration'
MetaData.PATH: preprocessed_path,
MetaData.NAME: "user-product",
},
MetaData.PARAMS: {
"has_self_loop": False,
}
}
# aligns 'TransactionAmt' edge feature based on the 'user-product' edges
feature_spec_synthetic[MetaData.ALIGNERS] = [
{
MetaData.TYPE: "xgboost",
MetaData.GRAPHS: ['user-product'],
MetaData.NODES: {},
MetaData.EDGES: {"user-product": ["TransactionAmt"]},
MetaData.PARAMS: {},
}
]
config_proper = SynGenConfiguration(feature_spec_synthetic)
# In[7]:
config_proper
# ### Random
# In[8]:
feature_spec_random = feature_spec_original.copy()
feature_spec_random[MetaData.EDGES][0][MetaData.TABULAR_GENERATORS] = [
{
MetaData.TYPE: "random",
MetaData.FEATURES_LIST: -1, # copies all tabular features
MetaData.DATA_SOURCE: {
MetaData.TYPE: 'random',
},
MetaData.PARAMS: {
}
}
]
feature_spec_random[MetaData.EDGES][0][MetaData.STRUCTURE_GENERATOR] = {
MetaData.TYPE: "RMAT",
MetaData.DATA_SOURCE: {
MetaData.TYPE: 'rnd', # the save as 'random'
},
MetaData.PARAMS: {
"has_self_loop": False,
}
}
config_random = SynGenConfiguration(feature_spec_random)
# In[9]:
config_random
# <a id="3"></a>
# ## Dataset Generation
# In[10]:
save_path_proper = '/workspace/data/ieee_generated'
save_path_random = '/workspace/data/ieee_random'
# ### Create Synthesizers
# In[11]:
synthesizer_proper = ConfigurationGraphSynthesizer(configuration=config_proper, save_path=save_path_proper, gpu=True)
synthesizer_random = ConfigurationGraphSynthesizer(configuration=config_random, save_path=save_path_random, gpu=True)
# ### Fit Synthesizers
# In[12]:
synthesizer_proper.fit()
# In[13]:
synthesizer_random.fit()
# ### Generation
# In[14]:
feature_spec_generated_proper = synthesizer_proper.generate()
# In[15]:
feature_spec_generated_proper
# In[16]:
feature_spec_generated_random = synthesizer_random.generate()
# In[17]:
feature_spec_generated_random
# <a id="4"></a>
# ## Tabular Data Evaluation
# In[18]:
original_tabular_data, categorical_features = feature_spec_original.get_tabular_data(MetaData.EDGES, 'user-product', return_cat_feats=True)
# In[19]:
proper_tabular_data = feature_spec_generated_proper.get_tabular_data(MetaData.EDGES, 'user-product')
# In[20]:
random_tabular_data = feature_spec_generated_random.get_tabular_data(MetaData.EDGES, 'user-product')
# In[21]:
tab_eval = TabularMetrics(original_tabular_data,
proper_tabular_data,
categorical_columns=categorical_features)
# In[22]:
tab_eval.visual_evaluation()
# In[23]:
tab_eval = TabularMetrics(original_tabular_data,
random_tabular_data,
categorical_columns=categorical_features)
# In[24]:
tab_eval.visual_evaluation()
# <a id="5"></a>
# ## Structute Evaluation
# In[25]:
original_graph_structure = feature_spec_original.get_structural_data('user-product')
proper_graph_structure = feature_spec_generated_proper.get_structural_data('user-product')
random_graph_structure = feature_spec_generated_random.get_structural_data('user-product')
# In[26]:
orig_proper = get_dd_simmilarity_score(original_graph_structure, proper_graph_structure, cdf_points=1000)
orig_random = get_dd_simmilarity_score(original_graph_structure, random_graph_structure, cdf_points=1000)
print("DEGREE SIMILLARITY SCORE")
print("ORIG vs PROPER:", orig_proper)
print("ORIG vs RANDOM:", orig_random)
# In[27]:
original_snap_graph = Graph.instantiate_from_feature_spec(feature_spec_original, 'user-product', graph_name='original')
proper_snap_graph = Graph.instantiate_from_feature_spec(feature_spec_generated_proper, 'user-product', graph_name='properly_generated')
random_graph_structure = Graph.instantiate_from_feature_spec(feature_spec_generated_random, 'user-product', graph_name='randomly_generated')
all_graphs = [original_snap_graph, proper_snap_graph, random_graph_structure]
# In[28]:
graph_analyser = AnalysisModule()
# In[29]:
df = graph_analyser.compare_graph_stats(*all_graphs)
df
# In[30]:
from matplotlib.pyplot import set_loglevel
set_loglevel('warning')
_ = graph_analyser.compare_graph_plots(*all_graphs)
# In[ ]:
|
PyTorch/SpeechRecognition/Jasper/utils | utils | download_utils | # Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#!/usr/bin/env python
import hashlib
import requests
import os
import tarfile
import tqdm
def download_file(url, dest_folder, fname, overwrite=False):
fpath = os.path.join(dest_folder, fname)
if os.path.isfile(fpath):
if overwrite:
print("Overwriting existing file")
else:
print("File exists, skipping download.")
return
tmp_fpath = fpath + '.tmp'
if not os.path.exists(os.path.dirname(tmp_fpath)):
os.makedirs(os.path.dirname(tmp_fpath))
r = requests.get(url, stream=True)
file_size = int(r.headers['Content-Length'])
chunk_size = 1024 * 1024 # 1MB
total_chunks = int(file_size / chunk_size)
with open(tmp_fpath, 'wb') as fp:
content_iterator = r.iter_content(chunk_size=chunk_size)
chunks = tqdm.tqdm(content_iterator, total=total_chunks,
unit='MB', desc=fpath, leave=True)
for chunk in chunks:
fp.write(chunk)
os.rename(tmp_fpath, fpath)
def md5_checksum(fpath, target_hash):
file_hash = hashlib.md5()
with open(fpath, "rb") as fp:
for chunk in iter(lambda: fp.read(1024*1024), b""):
file_hash.update(chunk)
return file_hash.hexdigest() == target_hash
def extract(fpath, dest_folder):
if fpath.endswith('.tar.gz'):
mode = 'r:gz'
elif fpath.endswith('.tar'):
mode = 'r:'
else:
raise IOError('fpath has unknown extention: %s' % fpath)
with tarfile.open(fpath, mode) as tar:
members = tar.getmembers()
for member in tqdm.tqdm(iterable=members, total=len(members), leave=True):
tar.extract(path=dest_folder, member=member)
|
PyTorch/Translation/GNMT/seq2seq/data | data | config | # Copyright (c) 2017 Elad Hoffer
# Copyright (c) 2018-2020, NVIDIA CORPORATION. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
PAD_TOKEN = '<pad>'
UNK_TOKEN = '<unk>'
BOS_TOKEN = '<s>'
EOS_TOKEN = '<\s>'
# special PAD, UNKNOWN, BEGIN-OF-STRING, END-OF-STRING tokens
PAD, UNK, BOS, EOS = [0, 1, 2, 3]
# path to the moses detokenizer, relative to the data directory
DETOKENIZER = 'mosesdecoder/scripts/tokenizer/detokenizer.perl'
|
PyTorch/Classification/ConvNets/image_classification | image_classification | autoaugment | from PIL import Image, ImageEnhance, ImageOps
import numpy as np
import random
class AutoaugmentImageNetPolicy(object):
"""
Randomly choose one of the best 24 Sub-policies on ImageNet.
Reference: https://arxiv.org/abs/1805.09501
"""
def __init__(self):
self.policies = [
SubPolicy(0.8, "equalize", 1, 0.8, "shearY", 4),
SubPolicy(0.4, "color", 9, 0.6, "equalize", 3),
SubPolicy(0.4, "color", 1, 0.6, "rotate", 8),
SubPolicy(0.8, "solarize", 3, 0.4, "equalize", 7),
SubPolicy(0.4, "solarize", 2, 0.6, "solarize", 2),
SubPolicy(0.2, "color", 0, 0.8, "equalize", 8),
SubPolicy(0.4, "equalize", 8, 0.8, "solarizeadd", 3),
SubPolicy(0.2, "shearX", 9, 0.6, "rotate", 8),
SubPolicy(0.6, "color", 1, 1.0, "equalize", 2),
SubPolicy(0.4, "invert", 9, 0.6, "rotate", 0),
SubPolicy(1.0, "equalize", 9, 0.6, "shearY", 3),
SubPolicy(0.4, "color", 7, 0.6, "equalize", 0),
SubPolicy(0.4, "posterize", 6, 0.4, "autocontrast", 7),
SubPolicy(0.6, "solarize", 8, 0.6, "color", 9),
SubPolicy(0.2, "solarize", 4, 0.8, "rotate", 9),
SubPolicy(1.0, "rotate", 7, 0.8, "translateY", 9),
SubPolicy(0.0, "shearX", 0, 0.8, "solarize", 4),
SubPolicy(0.8, "shearY", 0, 0.6, "color", 4),
SubPolicy(1.0, "color", 0, 0.6, "rotate", 2),
SubPolicy(0.8, "equalize", 4, 0.0, "equalize", 8),
SubPolicy(1.0, "equalize", 4, 0.6, "autocontrast", 2),
SubPolicy(0.4, "shearY", 7, 0.6, "solarizeadd", 7),
SubPolicy(0.8, "posterize", 2, 0.6, "solarize", 10),
SubPolicy(0.6, "solarize", 8, 0.6, "equalize", 1),
SubPolicy(0.8, "color", 6, 0.4, "rotate", 5),
]
def __call__(self, img):
policy_idx = random.randint(0, len(self.policies) - 1)
return self.policies[policy_idx](img)
def __repr__(self):
return "AutoAugment ImageNet Policy"
class SubPolicy(object):
def __init__(self, p1, method1, magnitude_idx1, p2, method2, magnitude_idx2):
operation_factory = OperationFactory()
self.p1 = p1
self.p2 = p2
self.operation1 = operation_factory.get_operation(method1, magnitude_idx1)
self.operation2 = operation_factory.get_operation(method2, magnitude_idx2)
def __call__(self, img):
if random.random() < self.p1:
img = self.operation1(img)
if random.random() < self.p2:
img = self.operation2(img)
return img
class OperationFactory:
def __init__(self):
fillcolor = (128, 128, 128)
self.ranges = {
"shearX": np.linspace(0, 0.3, 11),
"shearY": np.linspace(0, 0.3, 11),
"translateX": np.linspace(0, 250, 11),
"translateY": np.linspace(0, 250, 11),
"rotate": np.linspace(0, 30, 11),
"color": np.linspace(0.1, 1.9, 11),
"posterize": np.round(np.linspace(0, 4, 11), 0).astype(np.int),
"solarize": np.linspace(0, 256, 11),
"solarizeadd": np.linspace(0, 110, 11),
"contrast": np.linspace(0.1, 1.9, 11),
"sharpness": np.linspace(0.1, 1.9, 11),
"brightness": np.linspace(0.1, 1.9, 11),
"autocontrast": [0] * 10,
"equalize": [0] * 10,
"invert": [0] * 10
}
def rotate_with_fill(img, magnitude):
magnitude *= random.choice([-1, 1])
rot = img.convert("RGBA").rotate(magnitude)
return Image.composite(rot, Image.new("RGBA", rot.size, (128,) * 4), rot).convert(img.mode)
def solarize_add(image, addition=0, threshold=128):
lut = []
for i in range(256):
if i < threshold:
res = i + addition if i + addition <= 255 else 255
res = res if res >= 0 else 0
lut.append(res)
else:
lut.append(i)
from PIL.ImageOps import _lut
return _lut(image, lut)
self.operations = {
"shearX": lambda img, magnitude: img.transform(
img.size, Image.AFFINE, (1, magnitude * random.choice([-1, 1]), 0, 0, 1, 0),
Image.BICUBIC, fillcolor=fillcolor),
"shearY": lambda img, magnitude: img.transform(
img.size, Image.AFFINE, (1, 0, 0, magnitude * random.choice([-1, 1]), 1, 0),
Image.BICUBIC, fillcolor=fillcolor),
"translateX": lambda img, magnitude: img.transform(
img.size, Image.AFFINE, (1, 0, magnitude * random.choice([-1, 1]), 0, 1, 0),
fillcolor=fillcolor),
"translateY": lambda img, magnitude: img.transform(
img.size, Image.AFFINE, (1, 0, 0, 0, 1, magnitude * random.choice([-1, 1])),
fillcolor=fillcolor),
"rotate": lambda img, magnitude: rotate_with_fill(img, magnitude),
"color": lambda img, magnitude: ImageEnhance.Color(img).enhance(magnitude),
"posterize": lambda img, magnitude: ImageOps.posterize(img, magnitude),
"solarize": lambda img, magnitude: ImageOps.solarize(img, magnitude),
"solarizeadd": lambda img, magnitude: solarize_add(img, magnitude),
"contrast": lambda img, magnitude: ImageEnhance.Contrast(img).enhance(magnitude),
"sharpness": lambda img, magnitude: ImageEnhance.Sharpness(img).enhance(magnitude),
"brightness": lambda img, magnitude: ImageEnhance.Brightness(img).enhance(magnitude),
"autocontrast": lambda img, _: ImageOps.autocontrast(img),
"equalize": lambda img, _: ImageOps.equalize(img),
"invert": lambda img, _: ImageOps.invert(img)
}
def get_operation(self, method, magnitude_idx):
magnitude = self.ranges[method][magnitude_idx]
return lambda img: self.operations[method](img, magnitude)
|
PyTorch/SpeechSynthesis/Tacotron2/waveglow | waveglow | __init__ | from .entrypoints import nvidia_waveglow
|
PyTorch/Forecasting/TFT/triton | triton | calculate_metrics | #!/usr/bin/env python3
# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""
Using `calculate_metrics.py` script, you can obtain model accuracy/error metrics using defined `MetricsCalculator` class.
Data provided to `MetricsCalculator` are obtained from dump files
stored in directory pointed by `--dump-dir` argument.
Above files are prepared by `run_inference_on_fw.py` and `run_inference_on_triton.py` scripts.
Output data is stored in csv file pointed by `--csv` argument.
Example call:
```shell script
python ./triton/calculate_metrics.py \
--dump-dir /results/dump_triton \
--csv /results/accuracy_results.csv \
--metrics metrics.py \
--metric-class-param1 value
```
"""
import argparse
import csv
import logging
import string
from pathlib import Path
# method from PEP-366 to support relative import in executed modules
if __package__ is None:
__package__ = Path(__file__).parent.name
from .deployment_toolkit.args import ArgParserGenerator
from .deployment_toolkit.core import BaseMetricsCalculator, load_from_file
from .deployment_toolkit.dump import JsonDumpReader
LOGGER = logging.getLogger("calculate_metrics")
TOTAL_COLUMN_NAME = "_total_"
def main():
logging.basicConfig(level=logging.INFO)
parser = argparse.ArgumentParser(description="Run models with given dataloader", allow_abbrev=False)
parser.add_argument("--metrics", help="Path to python module containing metrics calculator", required=True)
parser.add_argument("--csv", help="Path to csv file", required=True)
parser.add_argument("--dump-dir", help="Path to directory with dumped outputs (and labels)", required=True)
args, *_ = parser.parse_known_args()
MetricsCalculator = load_from_file(args.metrics, "metrics", "MetricsCalculator")
ArgParserGenerator(MetricsCalculator).update_argparser(parser)
args = parser.parse_args()
LOGGER.info("args:")
for key, value in vars(args).items():
LOGGER.info(f" {key} = {value}")
MetricsCalculator = load_from_file(args.metrics, "metrics", "MetricsCalculator")
metrics_calculator: BaseMetricsCalculator = ArgParserGenerator(MetricsCalculator).from_args(args)
reader = JsonDumpReader(args.dump_dir)
for ids, x, y_true, y_pred in reader.iterate_over(["ids", "inputs", "labels", "outputs"]):
ids = list(ids["ids"]) if ids is not None else None
metrics_calculator.update(ids=ids, x=x, y_pred=y_pred, y_real=y_true)
metrics = metrics_calculator.metrics
metric_names_with_space = [name for name in metrics if any([c in string.whitespace for c in name])]
if metric_names_with_space:
raise ValueError(f"Metric names shall have no spaces; Incorrect names: {', '.join(metric_names_with_space)}")
csv_path = Path(args.csv)
csv_path.parent.mkdir(parents=True, exist_ok=True)
with csv_path.open("w") as csv_file:
writer = csv.DictWriter(csv_file, fieldnames=list(metrics.keys()))
writer.writeheader()
writer.writerow(metrics)
if __name__ == "__main__":
main()
|
PyTorch/SpeechRecognition/QuartzNet | QuartzNet | README | # QuartzNet For PyTorch
This repository provides a script and recipe to train the QuartzNet model to achieve state-of-the-art accuracy. The content of this repository is tested and maintained by NVIDIA.
## Table Of Contents
- [Model overview](#model-overview)
* [Model architecture](#model-architecture)
* [Default configuration](#default-configuration)
* [Feature support matrix](#feature-support-matrix)
* [Features](#features)
* [Mixed precision training](#mixed-precision-training)
* [Enabling mixed precision](#enabling-mixed-precision)
* [Enabling TF32](#enabling-tf32)
* [Glossary](#glossary)
* [Language support and NeMo compatibility](#language-support-and-nemo-compatibility)
- [Setup](#setup)
* [Requirements](#requirements)
- [Quick Start Guide](#quick-start-guide)
- [Advanced](#advanced)
* [Scripts and sample code](#scripts-and-sample-code)
* [Parameters](#parameters)
* [Command-line options](#command-line-options)
* [Getting the data](#getting-the-data)
* [Dataset guidelines](#dataset-guidelines)
* [Multi-dataset](#multi-dataset)
* [Training process](#training-process)
* [Inference process](#inference-process)
- [Performance](#performance)
* [Benchmarking](#benchmarking)
* [Training performance benchmark](#training-performance-benchmark)
* [Inference performance benchmark](#inference-performance-benchmark)
* [Results](#results)
* [Training accuracy results](#training-accuracy-results)
* [Training accuracy: NVIDIA DGX A100 (8x A100 80GB)](#training-accuracy-nvidia-dgx-a100-8x-a100-80gb)
* [Training stability test](#training-stability-test)
* [Training performance results](#training-performance-results)
* [Training performance: NVIDIA DGX A100 (8x A100 80GB)](#training-performance-nvidia-dgx-a100-8x-a100-80gb)
* [Training performance: NVIDIA DGX-2 (16x V100 32GB)](#training-performance-nvidia-dgx-2-16x-v100-32gb)
* [Inference performance results](#inference-performance-results)
* [Inference performance: NVIDIA DGX A100 (1x A100 80GB)](#inference-performance-nvidia-dgx-a100-1x-a100-80gb)
* [Inference performance: NVIDIA DGX-2 (1x V100 32GB)](#inference-performance-nvidia-dgx-2-1x-v100-32gb)
- [Release notes](#release-notes)
* [Changelog](#changelog)
* [Known issues](#known-issues)
## Model overview
This repository provides an implementation of the QuartzNet model in PyTorch from the paper [QuartzNet: Deep Automatic Speech Recognition with 1D Time-Channel Separable Convolutions](https://arxiv.org/pdf/1910.10261).
The QuartzNet model is an end-to-end neural acoustic model for automatic speech recognition (ASR), that provides high accuracy at a low memory footprint. The QuartzNet architecture of convolutional layers was designed to facilitate fast GPU inference, by allowing whole sub-blocks to be fused into a single GPU kernel. This is important for meeting strict real-time requirements of ASR systems in deployment.
This repository is a PyTorch implementation of QuartzNet and provides scripts to train the QuartzNet 10x5 model from scratch on the [LibriSpeech](http://www.openslr.org/12) dataset to achieve the greedy decoding results improved upon the original paper.
The repository is self-contained and includes data preparation scripts, training, and inference scripts.
Both training and inference scripts offer the option to use Automatic Mixed Precision (AMP) to benefit from Tensor Cores for better performance.
In addition to providing the hyperparameters for training a model checkpoint, we publish a thorough inference analysis across different NVIDIA GPU platforms, for example, DGX-2, NVIDIA A100 GPU, and T4.
This model is trained with mixed precision using Tensor Cores on Volta, Turing, and the NVIDIA Ampere GPU architectures. Therefore, researchers can get results [1.4]x faster than training without Tensor Cores, while experiencing the benefits of mixed precision training. This model is tested against each NGC monthly container release to ensure consistent accuracy and performance over time.
### Model architecture
QuartzNet is an end-to-end neural acoustic model that is based on efficient, time-channel separable convolutions (Figure 1).
In the audio processing stage, each frame is transformed into mel-scale spectrogram features, which the acoustic model takes as input and outputs a probability distribution over the vocabulary for each frame.
<p align="center">
<img src="./img/model.png" alt="QuartzNet model architecture" width="50%" />
</p>
<p align="center">
<em>Figure 1. Architecture of QuartzNet (<a href=”https://arxiv.org/abs/1910.10261”>source</a>)
</em>
</p>
### Default configuration
The following features were implemented in this model:
* GPU-supported feature extraction with data augmentation options [SpecAugment](https://arxiv.org/abs/1904.08779) and [Cutout](https://arxiv.org/pdf/1708.04552.pdf) using the DALI library
* offline and online [Speed Perturbation](https://www.danielpovey.com/files/2015_interspeech_augmentation.pdf) using the DALI library
* data-parallel multi-GPU training and evaluation
* AMP with dynamic loss scaling for Tensor Core training
* FP16 inference
### Feature support matrix
| **Feature** | **QuartzNet** |
|---------------|---------------|
|[Apex AMP](https://nvidia.github.io/apex/amp.html) | Yes |
|[DALI](https://docs.nvidia.com/deeplearning/dali/release-notes/index.html) | Yes |
#### Features
**DALI**
NVIDIA Data Loading Library (DALI) is a collection of highly optimized building blocks, and an execution engine, to accelerate the pre-processing of the input data for deep learning applications. DALI provides both the performance and the flexibility for accelerating different data pipelines as a single library. This single library can then be easily integrated into different deep learning training and inference applications. For details, see example sources in this repository or see the [DALI documentation](https://docs.nvidia.com/deeplearning/dali/index.html).
**Automatic Mixed Precision (AMP)**
Computation graphs can be modified by PyTorch on runtime to support mixed precision training. A detailed explanation of mixed precision can be found in the next section.
### Mixed precision training
Mixed precision is the combined use of different numerical precisions in a computational method. [Mixed precision](https://arxiv.org/abs/1710.03740) training offers significant computational speedup by performing operations in half-precision format while storing minimal information in single-precision to retain as much information as possible in critical parts of the network. Since the introduction of [Tensor Cores](https://developer.nvidia.com/tensor-cores) in Volta, and following with both the Turing and Ampere architectures, significant training speedups are experienced by switching to mixed precision -- up to 3x overall speedup on the most arithmetically intense model architectures. Using [mixed precision training](https://docs.nvidia.com/deeplearning/performance/mixed-precision-training/index.html) previously required two steps:
1. Porting the model to use the FP16 data type where appropriate.
2. Adding loss scaling to preserve small gradient values.
For information about:
- How to train using mixed precision, see the [Mixed Precision Training](https://arxiv.org/abs/1710.03740) paper and [Training With Mixed Precision](https://docs.nvidia.com/deeplearning/performance/mixed-precision-training/index.html) documentation.
- Techniques used for mixed precision training, see the [Mixed-Precision Training of Deep Neural Networks](https://devblogs.nvidia.com/mixed-precision-training-deep-neural-networks/) blog.
- APEX tools for mixed precision training, see the [NVIDIA Apex: Tools for Easy Mixed-Precision Training in PyTorch](https://devblogs.nvidia.com/apex-pytorch-easy-mixed-precision-training/).
#### Enabling mixed precision
For training, mixed precision can be enabled by setting the flag: `train.py --amp`. When using bash helper scripts, mixed precision can be enabled with the environment variable `AMP=true`, for example, `AMP=true bash scripts/train.sh`, `AMP=true bash scripts/inference.sh`, etc.
#### Enabling TF32
TensorFloat-32 (TF32) is the new math mode in [NVIDIA A100](https://www.nvidia.com/en-us/data-center/a100/) GPUs for handling the matrix math also called tensor operations. TF32 running on Tensor Cores in A100 GPUs can provide up to 10x speedups compared to single-precision floating-point math (FP32) on Volta GPUs.
TF32 Tensor Cores can speed up networks using FP32, typically with no loss of accuracy. It is more robust than FP16 for models which require high dynamic range for weights or activations.
For more information, refer to the [TensorFloat-32 in the A100 GPU Accelerates AI Training, HPC up to 20x](https://blogs.nvidia.com/blog/2020/05/14/tensorfloat-32-precision-format/) blog post.
TF32 is supported in the NVIDIA Ampere GPU architecture and is enabled by default.
### Glossary
**Time-channel separable (TCS) convolution**
A module composed mainly of two convolutional layers: a 1D depthwise convolutional layer,
and a pointwise convolutional layer (Figure 2). The former operates across K time frames, and the latter across all channels. By decoupling time and channel axes, the separable module uses less parameters and calculates the result faster, than it would otherwise would.
<p align="center">
<img src="./img/tcs_conv.png" alt="Time-channel separable (TCS) convolutional module" width="50%" />
</p>
<p align="center">
<em>Figure 2. Time-channel separable (TCS) convolutional module: (a) basic design, (b) TCS with a group shuffle layer, added to increase cross-group interchange</em>
</p>
**Automatic Speech Recognition (ASR)**
Uses both an acoustic model and a language model to output the transcript of an input audio signal.
**Acoustic model**
Assigns a probability distribution over a vocabulary of characters given an audio frame. Typically, a large part of the entire ASR model.
**Language model**
Assigns a probability distribution over a sequence of words. Given a sequence of words, it assigns a probability to the whole sequence.
**Pre-training**
Training a model on vast amounts of data on the same (or different) task to build general understandings.
### Language support and NeMo compatibility
This repository allows to train and run models in laguages other than English.
During inference, QuartzNet models trained with [NVIDIA NeMo](https://github.com/NVIDIA/NeMo) can also be used, for instance one of pre-trained models
for Catalan, French, German, Italian, Mandarin Chinese, Polish, Russian or Spanish available on [NGC](https://ngc.nvidia.com/).
To download automatically, run:
```bash
bash scripts/download_quartznet.sh [ca|fr|de|it|zh|pl|ru|es]
```
Pre-trained models can be explicitly converted from the `.nemo` checkpoint format to `.pt` and vice versa.
For more details, run:
```bash
python nemo_dle_model_converter.py --help
```
## Setup
The following section lists the requirements that you need to meet in order to start training the QuartzNet model.
### Requirements
This repository contains Dockerfile which extends the PyTorch 21.07-py3 NGC container and encapsulates some dependencies. Aside from these dependencies, ensure you have the following components:
- [NVIDIA Docker](https://github.com/NVIDIA/nvidia-docker)
- [PyTorch 21.07-py3 NGC container](https://ngc.nvidia.com/catalog/containers/nvidia:pytorch)
- Supported GPUs:
- [NVIDIA Volta architecture](https://www.nvidia.com/en-us/data-center/volta-gpu-architecture/)
- [NVIDIA Turing architecture](https://www.nvidia.com/en-us/design-visualization/technologies/turing-architecture/)
- [NVIDIA Ampere architecture](https://www.nvidia.com/en-us/data-center/nvidia-ampere-gpu-architecture/)
For more information about how to get started with NGC containers, see the following sections from the NVIDIA GPU Cloud Documentation and the Deep Learning Documentation:
- [Getting Started Using NVIDIA GPU Cloud](https://docs.nvidia.com/ngc/ngc-getting-started-guide/index.html)
- [Accessing And Pulling From The NGC Container Registry](https://docs.nvidia.com/deeplearning/frameworks/user-guide/index.html#accessing_registry)
- [Running PyTorch](https://docs.nvidia.com/deeplearning/dgx/pytorch-release-notes/running.html#running)
Further required Python packages are listed in `requirements.txt`, which are automatically installed with the built Docker container. To manually install them, run:
```bash
pip install -r requirements.txt
```
For those unable to use the PyTorch 21.07-py3 NGC container, to set up the required environment or create your own container, see the versioned [NVIDIA Container Support Matrix](https://docs.nvidia.com/deeplearning/frameworks/support-matrix/index.html).
## Quick Start Guide
To train your model using mixed or TF32 precision with Tensor Cores or using FP32, perform the following steps using the default parameters of the QuartzNet model on the LibriSpeech dataset. For the specifics concerning training and inference, see the [Advanced](#advanced) section.
1. Clone the repository.
```bash
git clone https://github.com/NVIDIA/DeepLearningExamples
cd DeepLearningExamples/PyTorch/SpeechRecognition/QuartzNet
```
2. Build the QuartzNet PyTorch NGC container.
```bash
bash scripts/docker/build.sh
```
3. Start an interactive session in the NGC container to prepare the dataset, or run training/inference.
Specify a local mountpoint for the dataset with the `DATA_DIR` variable:
```bash
DATA_DIR=<path_on_the_host> bash scripts/docker/launch.sh
```
4. Download and preprocess the dataset.
No GPU is required for data download and preprocessing.
It can take several hours to complete, and requires over 250GB of free disk space.
This repository provides scripts to download and extract LibriSpeech [http://www.openslr.org/12](http://www.openslr.org/12). The dataset contains 1000 hours of 16kHz read English speech derived from public domain audiobooks from the LibriVox project and has been carefully segmented and aligned. For more information, see the [LIBRISPEECH: AN ASR CORPUS BASED ON PUBLIC DOMAIN AUDIO BOOKS](http://www.danielpovey.com/files/2015_icassp_librispeech.pdf) paper.
Inside the container, download and extract the datasets into the required format for later training and inference:
```bash
bash scripts/download_librispeech.sh
```
After the data download is complete, the following folders should exist:
```bash
datasets/LibriSpeech/
├── dev-clean
├── dev-other
├── test-clean
├── test-other
├── train-clean-100
├── train-clean-360
└── train-other-500
```
Since `/datasets/` is mounted to `DATA_DIR` on the host, after the dataset is downloaded it will be accessible from outside of the container at `$DATA_DIR/LibriSpeech`.
Next, convert the data into WAV files:
```bash
bash scripts/preprocess_librispeech.sh
```
After the data is converted, the following additional files and folders should exist:
```bash
datasets/LibriSpeech/
├── dev-clean-wav
├── dev-other-wav
├── librispeech-train-clean-100-wav.json
├── librispeech-train-clean-360-wav.json
├── librispeech-train-other-500-wav.json
├── librispeech-dev-clean-wav.json
├── librispeech-dev-other-wav.json
├── librispeech-test-clean-wav.json
├── librispeech-test-other-wav.json
├── test-clean-wav
├── test-other-wav
├── train-clean-100-wav
├── train-clean-360-wav
└── train-other-500-wav
```
5. Start training.
Inside the container, use the following script to start training.
Make sure the downloaded and preprocessed dataset is located at `$DATA_DIR/LibriSpeech` on the host, which is mounted as `/datasets/LibriSpeech` inside the container.
```bash
[OPTION1=value1 OPTION2=value2 ...] bash scripts/train.sh
```
By default, automatic precision is disabled, batch size is 144 over two gradient accumulation steps, and the recipe is run on a total of 8 GPUs. The hyperparameters are tuned for a GPU with at least 32GB of memory and will require adjustment for different configurations (for example, by lowering the batch size and using more gradient accumulation steps).
Options are being passed as environment variables. More details on the available options can be found in the [Parameters](#parameters) and [Training process](#training-process) sections.
6. Start validation/evaluation.
Inside the container, use the following script to run evaluation.
Make sure the downloaded and preprocessed dataset is located at `$DATA_DIR/LibriSpeech` on the host, which is mounted as `/datasets/LibriSpeech` inside the container.
```bash
[OPTION1=value1 OPTION2=value2 ...] bash scripts/evaluation.sh [OPTIONS]
```
By default, this will use full precision, a batch size of 64, and run on a single GPU.
Options are being passed as environment variables. More details on the available options can be found in the [Parameters](#parameters) and [Evaluation process](#evaluation-process) sections.
7. Start inference/predictions.
Inside the container, use the following script to run inference.
Make sure the downloaded and preprocessed dataset is located at `$DATA_DIR/LibriSpeech` on the host, which is mounted as `/datasets/LibriSpeech` inside the container.
A pretrained model checkpoint can be downloaded from [NGC model repository](https://ngc.nvidia.com/catalog/models), manually or automatically using `scripts/download_quartznet.sh`.
```bash
[OPTION1=value1 OPTION2=value2 ...] bash scripts/inference.sh
```
By default, this will use single precision, a batch size of 64, and run on a single GPU.
Options are being passed as environment variables. More details on the available options can be found in the [Parameters](#parameters) and [Inference process](#inference-process) sections.
Now that you have your model trained and evaluated, you can choose to compare your training results with our [Training accuracy results](#training-accuracy-results). You can also choose to benchmark your performance to [Training performance benchmark](#training-performance-results), or [Inference performance benchmark](#inference-performance-results). Following the steps in these sections will ensure that you achieve the same accuracy and performance results as stated in the [Results](#results) section.
## Advanced
The following sections provide greater details of the dataset, running training and inference, and the training results.
### Scripts and sample code
In the `root` directory, the most important files are:
```
quartznet
├── common # data pre-processing, logging, etc.
├── configs # model configurations
├── Dockerfile # container with the basic set of dependencies to run QuartzNet
├── inference.py # entry point for inference
├── quartznet # model-specific code
├── scripts # one-click scripts required for running various supported functionalities
│ ├── docker # contains the scripts for building and launching the container
│ ├── download_librispeech.sh # downloads LibriSpeech dataset
│ ├── evaluation.sh # runs evaluation using the `inference.py` script
│ ├── inference_benchmark.sh # runs the inference benchmark using the `inference_benchmark.py` script
│ ├── inference.sh # runs inference using the `inference.py` script
│ ├── preprocess_librispeech.sh # preprocess LibriSpeech raw data files for training and inference
│ ├── train_benchmark.sh # runs the training performance benchmark using the `train.py` script
│ └── train.sh # runs training using the `train.py` script
├── train.py # entry point for training
└── utils # data downloading and common routines
```
### Parameters
Parameters should be set as environment variables.
The complete list of available parameters for `scripts/train.sh` script contains:
```bash
DATA_DIR: directory of dataset. (default: '/datasets/LibriSpeech')
MODEL_CONFIG: relative path to model configuration. (default: 'configs/quartznet10x5dr_speedp_online_speca.yaml')
OUTPUT_DIR: directory for results, logs, and created checkpoints. (default: '/results')
CHECKPOINT: a specific model checkpoint to continue training from. To resume training from the last checkpoint, see the RESUME option.
RESUME: resume training from the last checkpoint found in OUTPUT_DIR, or from scratch if there are no checkpoints (default: true)
CUDNN_BENCHMARK: boolean that indicates whether to enable cudnn benchmark mode for using more optimized kernels. (default: true)
NUM_GPUS: number of GPUs to use. (default: 8)
AMP: if set to `true`, enables automatic mixed precision (default: false)
GPU_BATCH_SIZE: batch size for every forward/backward pass. The effective batch size might be higher, if gradient accumulation is enabled (default: 72)
GRAD_ACCUMULATION: number of forward/backward passes until the optimizer updates weights. (default: 2)
LEARNING_RATE: initial learning rate. (default: 0.01)
MIN_LEARNING_RATE: minimum learning rate, despite LR scheduling (default: 1e-5)
LR_POLICY: how to decay LR (default: exponential)
LR_EXP_GAMMA: decay factor for the exponential LR schedule (default: 0.981)
EMA: decay factor for exponential averages of checkpoints (default: 0.999)
SEED: seed for random number generator and used for ensuring reproducibility. (default: 0)
EPOCHS: number of training epochs. (default: 440)
WARMUP_EPOCHS: number of initial epoch of linearly increasing LR. (default: 2)
HOLD_EPOCHS: number of epochs to hold maximum LR after warmup. (default: 140)
SAVE_FREQUENCY: number of epochs between saving the model to disk. (default: 10)
EPOCHS_THIS_JOB: run training for this number of epochs. Does not affect LR schedule like the EPOCHS parameter. (default: 0)
DALI_DEVICE: device to run the DALI pipeline on for calculation of filterbanks. Valid choices: cpu, gpu, none. (default: gpu)
PAD_TO_MAX_DURATION: pad all sequences with zeros to maximum length. (default: false)
EVAL_FREQUENCY: number of steps between evaluations on the validation set. (default: 544)
PREDICTION_FREQUENCY: the number of steps between writing a sample prediction to stdout. (default: 544)
TRAIN_MANIFESTS: lists of .json training set files
VAL_MANIFESTS: lists of .json validation set files
```
The complete list of available parameters for `scripts/inference.sh` script contains:
```bash
DATA_DIR: directory of dataset. (default: '/datasets/LibriSpeech')
MODEL_CONFIG: model configuration. (default: 'configs/quartznet10x5dr_speedp-online_speca.yaml')
OUTPUT_DIR: directory for results and logs. (default: '/results')
CHECKPOINT: model checkpoint path. (required)
DATASET: name of the LibriSpeech subset to use. (default: 'dev-clean')
LOG_FILE: path to the DLLogger .json logfile. (default: '')
CUDNN_BENCHMARK: enable cudnn benchmark mode for using more optimized kernels. (default: false)
MAX_DURATION: filter out recordings shorter then MAX_DURATION seconds. (default: "")
PAD_TO_MAX_DURATION: pad all sequences with zeros to maximum length. (default: false)
NUM_GPUS: number of GPUs to use. Note that with > 1 GPUs WER results might be inaccurate due to the batching policy. (default: 1)
NUM_STEPS: number of batches to evaluate, loop the dataset if necessary. (default: 0)
NUM_WARMUP_STEPS: number of initial steps before measuring performance. (default: 0)
AMP: enable FP16 inference with AMP. (default: false)
BATCH_SIZE: data batch size. (default: 64)
EMA: Attempt to load exponentially averaged weights from a checkpoint. (default: true)
SEED: seed for random number generator and used for ensuring reproducibility. (default: 0)
DALI_DEVICE: device to run the DALI pipeline on for calculation of filterbanks. Valid choices: cpu, gpu, none. (default: gpu)
CPU: run inference on CPU. (default: false)
LOGITS_FILE: dump logit matrices to a file. (default: "")
PREDICTION_FILE: save predictions to a file. (default: "${OUTPUT_DIR}/${DATASET}.predictions")
```
The complete list of available parameters for `scripts/evaluation.sh` is the same as `scripts/inference.sh`. Only the defaults have changed.
```bash
PREDICTION_FILE: (default: "")
DATASET: (default: "test-other")
```
The `scripts/inference_benchmark.sh` script pads all input to a fixed duration and computes the mean, 90%, 95%, 99% percentile of latency for the specified number of inference steps. Latency is measured in milliseconds per batch. The `scripts/inference_benchmark.sh` measures latency for a single GPU and loops over a number of batch sizes and durations. It extends `scripts/inference.sh` and changes the defaults with:
```bash
BATCH_SIZE_SEQ: batch sizes to measure with. (default: "1 2 4 8 16")
MAX_DURATION_SEQ: input durations (in seconds) to measure with (default: "2 7 16.7")
CUDNN_BENCHMARK: (default: true)
PAD_TO_MAX_DURATION: (default: true)
NUM_WARMUP_STEPS: (default: 10)
NUM_STEPS: (default: 500)
DALI_DEVICE: (default: "cpu")
```
The `scripts/train_benchmark.sh` script pads all input to the same length according to the input argument `MAX_DURATION` and measures average training latency and throughput performance. Latency is measured in seconds per batch, throughput in sequences per second.
Training performance is measured with online speed perturbation and NVIDIA cuDNN benchmark mode enabled.
The script `scripts/train_benchmark.sh` loops over a number of batch sizes and GPU counts.
It extends `scripts/train.sh`, the complete list of available parameters for `scripts/train_benchmark.sh` script contains:
```bash
ACC_BATCH_SIZE: accumulated (effective) batch size to measure with. (default: "144")
GRAD_ACC_SEQ: the sequence of gradient accumulation settings to measure with. (default: "4 2")
NUM_GPUS_SEQ: number of GPUs to run the training on. (default: "1 4 8")
MODEL_CONFIG: (default: "configs/quartznet10x5dr_speedp-online_train-benchmark.yaml")
TRAIN_MANIFESTS: (default: "$DATA_DIR/librispeech-train-clean-100-wav.json")
RESUME: (default: false)
EPOCHS_THIS_JOB: (default: 2)
EPOCHS: (default: 100000)
SAVE_FREQUENCY: (default: 100000)
EVAL_FREQUENCY: (default: 100000)
GRAD_ACCUMULATION_STEPS: (default: 1)
PAD_TO_MAX_DURATION: (default: true)
EMA: (default: 0)
```
### Command-line options
To see the full list of available options and their descriptions, use the `-h` or `--help` command-line option, for example:
```bash
python train.py --help
python inference.py --help
```
### Getting the data
QuartzNet is trained on the LibriSpeech dataset. We use the concatenation of `train-clean-100`, `train-clean-360`, and `train-other-500` for training and `dev-clean` for validation.
This repository contains the `scripts/download_librispeech.sh` and `scripts/preprocess_librispeech.sh` scripts that automatically downloads and preprocesses the training, test, and development datasets. By default, data is downloaded to the `/datasets/LibriSpeech` directory. A minimum of 250GB free space is required for download and preprocessing; the final preprocessed dataset is approximately 100GB.
#### Dataset guidelines
The `scripts/preprocess_librispeech.sh` script converts the input audio files to WAV format with a sample rate of 16kHz. The target transcripts are stripped from whitespace characters, then lower-cased. No offline augmentations are stored on the disk - these are computed online with the DALI library without any impact on training time.
After preprocessing, the script creates JSON metadata files with output file paths, sample rate, target transcript and other metadata. These JSON files are used by the training script to identify training and validation datasets.
The QuartzNet model was tuned on audio signals with a sample rate of 16kHz. If you wish to use a different sampling rate, then some hyperparameters might need to be changed - specifically, the window size and step size.
#### Multi-dataset
Training scripts in this repository treat the training subsets of LibriSpeech (`train-clean-100`, `train-clean-360`, `train-other-500`) as three independent training datasets.
In order to add more datasets, follow the format of LibriSpeech, adjust the provided pre-processing scripts to generate metadata JSON files, and point them with the `TRAIN_MANIFESTS` variable to the `scripts/train.sh` script.
### Training process
Training is performed using the `train.py` script along with parameters defined in `scripts/train.sh`.
The `scripts/train.sh` script runs a job on a single node that trains the QuartzNet model from scratch using LibriSpeech as training data. To make training more efficient, we discard audio samples longer than 16.7 seconds from the training dataset, the total number of these samples is less than 1%. Such filtering does not degrade accuracy, but it allows us to decrease the number of time steps in a batch, which requires less GPU memory and increases training speed.
Apart from the default arguments as listed in the [Parameters](#parameters) section, by default the training script:
* Runs on 8 GPUs with at least 32GB of memory and training/evaluation batch size 48, split over three gradient accumulation steps
* Uses TF32 precision (A100 GPU) or FP32 (other GPUs)
* Trains on the concatenation of all 3 LibriSpeech training datasets and evaluates on the LibriSpeech dev-clean dataset
* Maintains an exponential moving average of parameters for evaluation
* Has cuDNN benchmark enabled
* Runs for 260 epochs
* Uses an initial learning rate of 0.02 and an exponential learning rate decay
* Saves a checkpoint every 10 epochs
* Automatically removes old checkpoints and preserves milestone checkpoints
* Runs evaluation on the development dataset every epoch and at the end of training
* Maintains a separate checkpoint with the lowest WER on development set
* Prints out training progress every iteration to `stdout`
* Creates a DLLogger log file and a TensorBoard log
* Calculates speed perturbation online during training
* Uses `SpecAugment` in data pre-processing
* Filters out audio samples longer than 16.7 seconds
* Pads each batch so its length is divisible by 16
* Uses time-channel separable convolutions as described in the paper
* Uses weight decay of 0.001
* Uses [Novograd](https://arxiv.org/pdf/1905.11286.pdf) as optimizer with betas=(0.95, 0)
Enabling AMP permits batch size 144 with one gradient accumulation step. Since each batch has to be padded to the longest sequence, all GPUs have to wait for the slowest one, and two accumulation steps are slightly faster.
The current training setup improves upon the greedy WER [Results](#results) of the QuartzNet paper.
### Inference process
Inference is performed using the `inference.py` script along with parameters defined in `scripts/inference.sh`.
The `scripts/inference.sh` script runs the job on a single GPU, taking a pre-trained QuartzNet model checkpoint and running it on the specified dataset.
Apart from the default arguments as listed in the [Parameters](#parameters) section, by default, the inference script:
* Evaluates on the LibriSpeech dev-clean dataset and prints out the final word error rate
* Uses a batch size of 64
* Creates a log file with progress and results which will be stored in the `results` folder
* Pads each batch so its length would be divisible by 16
* Does not use data augmentation
* Does greedy decoding and optionally saves the transcriptions in the results folder
* Has the option to save the model output tensors for more complex decoding, for example, beam search
* Has cuDNN benchmark disabled
To view all available options for inference, run `python inference.py --help`.
## Performance
### Benchmarking
The following section shows how to run benchmarks measuring the model performance in training and inference modes.
#### Training performance benchmark
To benchmark the training performance with a number of specific configurations, run:
```bash
GRAD_ACC_SEQ=<SEQUENCE> NUM_GPUS_SEQ=<NUMS_OF_GPUS> bash scripts/train_benchmark.sh
```
for example:
```bash
GRAD_ACC_SEQ="12 24" NUM_GPUS_SEQ="4 8" bash scripts/train_benchmark.sh
```
This invocation will measure performance in four setups (two different batch sizes for every single forward/backward pass times two hardware setups).
By default, this script makes forward/backward pre-allocation passes with all possible audio lengths
enabling immediate stabilization of training step times in the cuDNN benchmark mode,
and trains for two epochs on the `train-clean-100` subset of LibriSpeech.
#### Inference performance benchmark
To benchmark the inference performance on a specific batch size and audio length, run:
```bash
BATCH_SIZE_SEQ=<BATCH_SIZES> MAX_DURATION_SEQ=<DURATIONS> bash scripts/inference_benchmark.sh
```
for example:
```bash
BATCH_SIZE_SEQ="24 48" MAX_DURATION_SEQ="2 7 16.7" bash scripts/inference_benchmark.sh
```
The script runs on a single GPU and evaluates on the dataset of fixed-length utterances shorter than `MAX_DURATION` and padded to that duration.
### Results
The following sections provide details on how we achieved our performance and accuracy in training and inference.
#### Training accuracy results
##### Training accuracy: NVIDIA DGX A100 (8x A100 80GB)
Our results were obtained by running the `scripts/train.sh` training script in the PyTorch 21.07-py3 NGC container on NVIDIA DGX A100 (8x A100 80GB) GPUs.
| Number of GPUs | Batch size per GPU | Precision | dev-clean WER | dev-other WER | test-clean WER | test-other WER | Time to train |
|-----|-----|-------|-------|-------|------|-------|------|
| 8 | 144 | mixed | 3.47 | 10.84 | 3.69 | 10.69 | 34 h |
The table reports word error rate (WER) of the acoustic model with greedy decoding on all LibriSpeech dev and test datasets for mixed precision training.
##### Training stability test
The following table compares greedy decoding word error rates across 8 different training runs with different seeds for mixed precision training.
| DGX A100 80GB, FP16, 8x GPU | Seed #1 | Seed #2 | Seed #3 | Seed #4 | Seed #5 | Seed #6 | Seed #7 | Seed #8 | Mean | Std |
|-----------:|----------:|----------:|----------:|----------:|----------:|----------:|----------:|----------:|-------:|------:|
| dev-clean | 3.57 | 3.48 | 3.54 | 3.48 | 3.47 | 3.69 | 3.51 | 3.59 | 3.54 | 0.07 |
| dev-other | 10.68 | 10.78 | 10.47 | 10.72 | 10.84 | 11.03 | 10.67 | 10.86 | 10.76 | 0.15 |
| test-clean | 3.70 | 3.82 | 3.79 | 3.84 | 3.69 | 4.03 | 3.82 | 3.80 | 3.81 | 0.10 |
| test-other | 10.75 | 10.62 | 10.54 | 10.90 | 10.69 | 11.14 | 10.41 | 10.82 | 10.73 | 0.21 |
#### Training performance results
##### Training performance: NVIDIA DGX A100 (8x A100 80GB)
Our results were obtained by running:
```bash
AMP=true NUM_GPUS_SEQ="1" GRAD_ACC_SEQ="16 24" bash scripts/train_benchmark.sh
AMP=true NUM_GPUS_SEQ="4" GRAD_ACC_SEQ="4 6" bash scripts/train_benchmark.sh
AMP=true NUM_GPUS_SEQ="8" GRAD_ACC_SEQ="2 3" bash scripts/train_benchmark.sh
AMP=false NUM_GPUS_SEQ="1" GRAD_ACC_SEQ="16 24" bash scripts/train_benchmark.sh
AMP=false NUM_GPUS_SEQ="4" GRAD_ACC_SEQ="4 6" bash scripts/train_benchmark.sh
AMP=false NUM_GPUS_SEQ="8" GRAD_ACC_SEQ="2 3" bash scripts/train_benchmark.sh
```
in the PyTorch 21.07-py3 NGC container on NVIDIA DGX A100 with (8x A100 80GB) GPUs. Performance numbers (in items/images per second) were averaged over an entire training epoch.
| Batch size / GPU | Grad accumulation | GPUs | Throughput - TF32 | Throughput - mixed precision | Throughput speedup (TF32 to mixed precision) | Weak scaling - TF32 | Weak scaling - mixed precision |
|-----:|-----:|-------:|----------:|-------:|--------:|-----:|------:|
| 48 | 24 | 1 | 78.89 | 89.69 | 1.14 | 1.00 | 1.00 |
| 72 | 16 | 1 | 79.01 | 88.70 | 1.12 | 1.00 | 1.00 |
| 48 | 6 | 4 | 303.16 | 343.06 | 1.13 | 3.84 | 3.82 |
| 72 | 4 | 4 | 304.47 | 341.95 | 1.12 | 3.85 | 3.86 |
| 48 | 3 | 8 | 576.37 | 644.27 | 1.12 | 7.31 | 7.18 |
| 72 | 2 | 8 | 583.31 | 651.60 | 1.12 | 7.38 | 7.35 |
To achieve these same results, follow the steps in the [Quick Start Guide](#quick-start-guide).
##### Training performance: NVIDIA DGX-2 (16x V100 32GB)
Our results were obtained by running:
```bash
AMP=true NUM_GPUS_SEQ="1" GRAD_ACC_SEQ="24 48" bash scripts/train_benchmark.sh
AMP=true NUM_GPUS_SEQ="4" GRAD_ACC_SEQ="6 12" bash scripts/train_benchmark.sh
AMP=true NUM_GPUS_SEQ="8" GRAD_ACC_SEQ="3 6" bash scripts/train_benchmark.sh
AMP=true NUM_GPUS_SEQ="16" GRAD_ACC_SEQ="3" bash scripts/train_benchmark.sh
AMP=false NUM_GPUS_SEQ="1" GRAD_ACC_SEQ="48" bash scripts/train_benchmark.sh
AMP=false NUM_GPUS_SEQ="4" GRAD_ACC_SEQ="12" bash scripts/train_benchmark.sh
AMP=false NUM_GPUS_SEQ="8" GRAD_ACC_SEQ="6" bash scripts/train_benchmark.sh
AMP=false NUM_GPUS_SEQ="16" GRAD_ACC_SEQ="3" bash scripts/train_benchmark.sh
```
in the PyTorch 21.07-py3 NGC container on NVIDIA DGX-2 with (16x V100 32GB) GPUs. Performance numbers (in items/images per second) were averaged over an entire training epoch.
| Batch size / GPU | Grad accumulation | GPUs | Throughput - FP32 | Throughput - mixed precision | Throughput speedup (FP32 to mixed precision) | Weak scaling - FP32 | Weak scaling - mixed precision |
|-----:|-----:|-------:|-------:|-------:|------:|------:|------:|
| 24 | 48 | 1 | 44.65 | 67.95 | 1.52 | 1.00 | 1.00 |
| 48 | 24 | 1 | - | 67.49 | - | 1.00 | 1.00 |
| 24 | 12 | 4 | 170.18 | 258.56 | 1.52 | 3.81 | 3.81 |
| 48 | 6 | 4 | - | 254.58 | - | - | 3.77 |
| 24 | 6 | 8 | 330.53 | 495.52 | 1.50 | 7.40 | 7.29 |
| 48 | 3 | 8 | - | 477.87 | - | - | 7.08 |
| 24 | 3 | 16 | 616.51 | 872.99 | 1.42 | 13.81 | 12.85 |
To achieve these same results, follow the steps in the [Quick Start Guide](#quick-start-guide).
#### Inference performance results
##### Inference performance: NVIDIA DGX A100 (1x A100 80GB)
Our results were obtained by running:
```bash
bash AMP=false scripts/inference_benchmark.sh
bash AMP=true scripts/inference_benchmark.sh
```
in the PyTorch 21.07-py3 NGC container on NVIDIA DGX A100 (1x A100 80GB) GPU.
Performance numbers (latency in milliseconds per batch) were averaged over 500 iterations.
| | | FP16 Latency (ms) Percentiles | | | | TF32 Latency (ms) Percentiles | | | | FP16/TF32 speed up |
|-----:|---------------:|------:|------:|------:|------:|------:|------:|------:|------:|------:|
| BS | Duration (s) | 90% | 95% | 99% | Avg | 90% | 95% | 99% | Avg | Avg |
| 1 | 2.0 | 35.51 | 36.36 | 55.57 | 35.71 | 33.23 | 33.86 | 40.05 | 33.23 | 0.93 |
| 2 | 2.0 | 38.05 | 38.91 | 52.67 | 38.21 | 34.17 | 35.17 | 39.32 | 33.73 | 0.88 |
| 4 | 2.0 | 38.43 | 38.98 | 45.44 | 37.78 | 35.02 | 36.00 | 44.10 | 34.75 | 0.92 |
| 8 | 2.0 | 38.63 | 39.37 | 45.43 | 37.94 | 35.49 | 36.70 | 45.94 | 34.53 | 0.91 |
| 16 | 2.0 | 42.33 | 44.58 | 61.02 | 40.28 | 35.66 | 36.93 | 45.38 | 34.78 | 0.86 |
| 1 | 7.0 | 37.72 | 38.54 | 42.56 | 37.28 | 33.23 | 34.16 | 40.54 | 33.13 | 0.89 |
| 2 | 7.0 | 39.44 | 41.35 | 53.62 | 38.56 | 35.15 | 35.81 | 41.83 | 34.82 | 0.90 |
| 4 | 7.0 | 38.39 | 39.48 | 45.01 | 37.98 | 37.54 | 38.51 | 42.67 | 36.12 | 0.95 |
| 8 | 7.0 | 40.82 | 41.76 | 54.20 | 39.43 | 37.67 | 39.97 | 45.24 | 36.12 | 0.92 |
| 16 | 7.0 | 42.80 | 44.80 | 56.92 | 41.52 | 40.66 | 41.96 | 53.24 | 39.24 | 0.95 |
| 1 | 16.7 | 38.22 | 38.98 | 44.15 | 37.80 | 33.89 | 34.98 | 42.66 | 33.23 | 0.88 |
| 2 | 16.7 | 39.84 | 41.09 | 52.50 | 39.34 | 35.86 | 37.16 | 42.04 | 34.39 | 0.87 |
| 4 | 16.7 | 41.02 | 42.64 | 54.96 | 39.50 | 35.98 | 37.02 | 39.30 | 34.87 | 0.88 |
| 8 | 16.7 | 40.93 | 42.06 | 56.26 | 39.36 | 40.93 | 42.06 | 45.50 | 39.34 | 1.00 |
| 16 | 16.7 | 57.21 | 58.65 | 71.33 | 57.78 | 62.74 | 63.82 | 71.13 | 61.49 | 1.06 |
To achieve these same results, follow the steps in the [Quick Start Guide](#quick-start-guide).
##### Inference performance: NVIDIA DGX-2 (1x V100 32GB)
Our results were obtained by running:
```bash
bash AMP=false scripts/inference_benchmark.sh
bash AMP=true scripts/inference_benchmark.sh
```
in the PyTorch 21.07-py3 NGC container on NVIDIA DGX-2 with (1x V100 32GB) GPU.
Performance numbers (latency in milliseconds per batch) were averaged over 500 iterations.
| | | FP16 Latency (ms) Percentiles | | | | FP32 Latency (ms) Percentiles | | | | FP16/FP32 speed up |
|-----:|---------------:|------:|------:|------:|------:|-------:|-------:|-------:|-------:|------:|
| BS | Duration (s) | 90% | 95% | 99% | Avg | 90% | 95% | 99% | Avg | Avg |
| 1 | 2.0 | 36.89 | 38.16 | 41.80 | 35.85 | 33.44 | 33.78 | 38.09 | 33.01 | 0.92 |
| 2 | 2.0 | 40.47 | 41.33 | 45.70 | 40.02 | 32.62 | 33.27 | 36.38 | 32.09 | 0.80 |
| 4 | 2.0 | 41.50 | 42.85 | 49.65 | 41.12 | 34.56 | 34.83 | 37.10 | 34.04 | 0.83 |
| 8 | 2.0 | 49.87 | 50.48 | 51.99 | 49.19 | 34.90 | 35.17 | 36.57 | 34.27 | 0.70 |
| 16 | 2.0 | 46.39 | 46.77 | 47.87 | 40.04 | 45.37 | 45.89 | 47.52 | 44.46 | 1.11 |
| 1 | 7.0 | 48.83 | 49.16 | 52.22 | 48.26 | 33.87 | 34.50 | 36.45 | 33.24 | 0.69 |
| 2 | 7.0 | 41.48 | 41.82 | 45.07 | 41.03 | 42.32 | 42.66 | 43.86 | 41.79 | 1.02 |
| 4 | 7.0 | 42.48 | 43.25 | 47.29 | 41.56 | 37.20 | 38.18 | 39.74 | 36.46 | 0.88 |
| 8 | 7.0 | 39.78 | 40.49 | 44.73 | 38.89 | 46.84 | 47.17 | 48.07 | 44.78 | 1.15 |
| 16 | 7.0 | 49.85 | 50.56 | 53.04 | 44.95 | 60.21 | 60.68 | 64.92 | 57.94 | 1.29 |
| 1 | 16.7 | 40.80 | 41.16 | 42.96 | 40.52 | 42.04 | 42.53 | 44.59 | 37.08 | 0.92 |
| 2 | 16.7 | 41.37 | 41.69 | 43.74 | 40.85 | 35.61 | 36.49 | 40.32 | 34.68 | 0.85 |
| 4 | 16.7 | 50.22 | 51.07 | 54.13 | 49.51 | 40.95 | 41.38 | 44.09 | 40.39 | 0.82 |
| 8 | 16.7 | 44.93 | 45.38 | 49.24 | 44.16 | 62.54 | 62.92 | 65.95 | 61.86 | 1.40 |
| 16 | 16.7 | 70.74 | 71.56 | 75.16 | 69.87 | 102.52 | 103.57 | 108.20 | 101.57 | 1.45 |
To achieve these same results, follow the steps in the [Quick Start Guide](#quick-start-guide).
## Release notes
We're constantly refining and improving our performance on AI and HPC workloads even on the same hardware with frequent updates to our software stack. For our latest performance data, refer to these pages for [AI](#https://developer.nvidia.com/deep-learning-performance-training-inference) and [HPC](#https://developer.nvidia.com/hpc-application-performance) benchmarks.
### Changelog
September 2021
- Initial release
### Known issues
There are no known issues in this release.
|
Tools/PyTorch/TimeSeriesPredictionPlatform/models/tft_pyt | tft_pyt | modeling | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch import Tensor
from typing import Dict, Tuple, Optional, List
if os.environ.get("TFT_SCRIPTING", False):
from torch.nn import LayerNorm
else:
from apex.normalization.fused_layer_norm import FusedLayerNorm as LayerNorm
class MaybeLayerNorm(nn.Module):
def __init__(self, output_size, hidden_size, eps):
super().__init__()
if output_size and output_size == 1:
self.ln = nn.Identity()
else:
self.ln = LayerNorm(output_size if output_size else hidden_size, eps=eps)
def forward(self, x):
return self.ln(x)
class GLU(nn.Module):
def __init__(self, hidden_size, output_size):
super().__init__()
self.lin = nn.Linear(hidden_size, output_size * 2)
def forward(self, x: Tensor) -> Tensor:
x = self.lin(x)
x = F.glu(x)
return x
class GRN(nn.Module):
def __init__(self,
input_size,
hidden_size,
output_size=None,
context_hidden_size=None,
dropout=0):
super().__init__()
self.layer_norm = MaybeLayerNorm(output_size, hidden_size, eps=1e-3)
self.lin_a = nn.Linear(input_size, hidden_size)
if context_hidden_size is not None:
self.lin_c = nn.Linear(context_hidden_size, hidden_size, bias=False)
self.lin_i = nn.Linear(hidden_size, hidden_size)
self.glu = GLU(hidden_size, output_size if output_size else hidden_size)
self.dropout = nn.Dropout(dropout)
self.out_proj = nn.Linear(input_size, output_size) if output_size else None
def forward(self, a: Tensor, c: Optional[Tensor] = None):
x = self.lin_a(a)
if c is not None:
x = x + self.lin_c(c).unsqueeze(1)
x = F.elu(x)
x = self.lin_i(x)
x = self.dropout(x)
x = self.glu(x)
y = a if not self.out_proj else self.out_proj(a)
x = x + y
x = self.layer_norm(x)
return x
class TFTEmbedding(nn.Module):
def __init__(self, config):
super().__init__()
self.s_cat_inp_lens = config.static_categorical_inp_lens
self.t_cat_k_inp_lens = config.temporal_known_categorical_inp_lens
self.t_cat_o_inp_lens = config.temporal_observed_categorical_inp_lens
self.s_cont_inp_size = config.static_continuous_inp_size
self.t_cont_k_inp_size = config.temporal_known_continuous_inp_size
self.t_cont_o_inp_size = config.temporal_observed_continuous_inp_size
self.t_tgt_size = config.temporal_target_size
self.hidden_size = config.hidden_size
# There are 7 types of input:
# 1. Static categorical
# 2. Static continuous
# 3. Temporal known a priori categorical
# 4. Temporal known a priori continuous
# 5. Temporal observed categorical
# 6. Temporal observed continuous
# 7. Temporal observed targets (time series obseved so far)
self.s_cat_embed = nn.ModuleList([
nn.Embedding(n, self.hidden_size) for n in self.s_cat_inp_lens]) if self.s_cat_inp_lens else None
self.t_cat_k_embed = nn.ModuleList([
nn.Embedding(n, self.hidden_size) for n in self.t_cat_k_inp_lens]) if self.t_cat_k_inp_lens else None
self.t_cat_o_embed = nn.ModuleList([
nn.Embedding(n, self.hidden_size) for n in self.t_cat_o_inp_lens]) if self.t_cat_o_inp_lens else None
self.s_cont_embedding_vectors = nn.Parameter(torch.Tensor(self.s_cont_inp_size, self.hidden_size)) if self.s_cont_inp_size else None
self.t_cont_k_embedding_vectors = nn.Parameter(torch.Tensor(self.t_cont_k_inp_size, self.hidden_size)) if self.t_cont_k_inp_size else None
self.t_cont_o_embedding_vectors = nn.Parameter(torch.Tensor(self.t_cont_o_inp_size, self.hidden_size)) if self.t_cont_o_inp_size else None
self.t_tgt_embedding_vectors = nn.Parameter(torch.Tensor(self.t_tgt_size, self.hidden_size))
self.s_cont_embedding_bias = nn.Parameter(torch.zeros(self.s_cont_inp_size, self.hidden_size)) if self.s_cont_inp_size else None
self.t_cont_k_embedding_bias = nn.Parameter(torch.zeros(self.t_cont_k_inp_size, self.hidden_size)) if self.t_cont_k_inp_size else None
self.t_cont_o_embedding_bias = nn.Parameter(torch.zeros(self.t_cont_o_inp_size, self.hidden_size)) if self.t_cont_o_inp_size else None
self.t_tgt_embedding_bias = nn.Parameter(torch.zeros(self.t_tgt_size, self.hidden_size))
if self.s_cont_embedding_vectors is not None:
torch.nn.init.xavier_normal_(self.s_cont_embedding_vectors)
if self.t_cont_k_embedding_vectors is not None:
torch.nn.init.xavier_normal_(self.t_cont_k_embedding_vectors)
if self.t_cont_o_embedding_vectors is not None:
torch.nn.init.xavier_normal_(self.t_cont_o_embedding_vectors)
torch.nn.init.xavier_normal_(self.t_tgt_embedding_vectors)
def _apply_embedding(self,
cat: Optional[Tensor],
cont: Optional[Tensor],
cat_emb: Optional[nn.ModuleList],
cont_emb: Tensor,
cont_bias: Tensor,
) -> Tuple[Optional[Tensor], Optional[Tensor]]:
e_cat = torch.stack([embed(cat[...,i]) for i, embed in enumerate(cat_emb)], dim=-2) if cat is not None else None
if cont is not None:
#the line below is equivalent to following einsums
#e_cont = torch.einsum('btf,fh->bthf', cont, cont_emb)
#e_cont = torch.einsum('bf,fh->bhf', cont, cont_emb)
e_cont = torch.mul(cont.unsqueeze(-1), cont_emb)
e_cont = e_cont + cont_bias
else:
e_cont = None
if e_cat is not None and e_cont is not None:
return torch.cat([e_cat, e_cont], dim=-2)
elif e_cat is not None:
return e_cat
elif e_cont is not None:
return e_cont
else:
return None
def forward(self, x: Dict[str, Tensor]):
# temporal/static categorical/continuous known/observed input
s_cat_inp = x.get('s_cat', None)
s_cont_inp = x.get('s_cont', None)
t_cat_k_inp = x.get('k_cat', None)
t_cont_k_inp = x.get('k_cont', None)
t_cat_o_inp = x.get('o_cat', None)
t_cont_o_inp = x.get('o_cont', None)
t_tgt_obs = x['target'] # Has to be present
# Static inputs are expected to be equal for all timesteps
# For memory efficiency there is no assert statement
s_cat_inp = s_cat_inp[:,0,:] if s_cat_inp is not None else None
s_cont_inp = s_cont_inp[:,0,:] if s_cont_inp is not None else None
s_inp = self._apply_embedding(s_cat_inp,
s_cont_inp,
self.s_cat_embed,
self.s_cont_embedding_vectors,
self.s_cont_embedding_bias)
t_known_inp = self._apply_embedding(t_cat_k_inp,
t_cont_k_inp,
self.t_cat_k_embed,
self.t_cont_k_embedding_vectors,
self.t_cont_k_embedding_bias)
t_observed_inp = self._apply_embedding(t_cat_o_inp,
t_cont_o_inp,
self.t_cat_o_embed,
self.t_cont_o_embedding_vectors,
self.t_cont_o_embedding_bias)
# Temporal observed targets
# t_observed_tgt = torch.einsum('btf,fh->btfh', t_tgt_obs, self.t_tgt_embedding_vectors)
t_observed_tgt = torch.matmul(t_tgt_obs.unsqueeze(3).unsqueeze(4), self.t_tgt_embedding_vectors.unsqueeze(1)).squeeze(3)
t_observed_tgt = t_observed_tgt + self.t_tgt_embedding_bias
return s_inp, t_known_inp, t_observed_inp, t_observed_tgt
class VariableSelectionNetwork(nn.Module):
def __init__(self, config, num_inputs):
super().__init__()
self.joint_grn = GRN(config.hidden_size*num_inputs, config.hidden_size, output_size=num_inputs, context_hidden_size=config.hidden_size)
self.var_grns = nn.ModuleList([GRN(config.hidden_size, config.hidden_size, dropout=config.dropout) for _ in range(num_inputs)])
def forward(self, x: Tensor, context: Optional[Tensor] = None):
Xi = x.reshape(*x.shape[:-2], -1)
grn_outputs = self.joint_grn(Xi, c=context)
sparse_weights = F.softmax(grn_outputs, dim=-1)
transformed_embed_list = [m(x[...,i,:]) for i, m in enumerate(self.var_grns)]
transformed_embed = torch.stack(transformed_embed_list, dim=-1)
#the line below performs batched matrix vector multiplication
#for temporal features it's bthf,btf->bth
#for static features it's bhf,bf->bh
variable_ctx = torch.matmul(transformed_embed, sparse_weights.unsqueeze(-1)).squeeze(-1)
return variable_ctx, sparse_weights
class StaticCovariateEncoder(nn.Module):
def __init__(self, config):
super().__init__()
self.vsn = VariableSelectionNetwork(config, config.num_static_vars)
self.context_grns = nn.ModuleList([GRN(config.hidden_size, config.hidden_size, dropout=config.dropout) for _ in range(4)])
def forward(self, x: Tensor) -> Tuple[Tensor, Tensor, Tensor, Tensor]:
variable_ctx, sparse_weights = self.vsn(x)
# Context vectors:
# variable selection context
# enrichment context
# state_c context
# state_h context
cs, ce, ch, cc = tuple(m(variable_ctx) for m in self.context_grns)
return cs, ce, ch, cc
class InterpretableMultiHeadAttention(nn.Module):
def __init__(self, config):
super().__init__()
self.n_head = config.n_head
assert config.hidden_size % config.n_head == 0
self.d_head = config.hidden_size // config.n_head
self.qkv_linears = nn.Linear(config.hidden_size, (2 * self.n_head + 1) * self.d_head, bias=False)
self.out_proj = nn.Linear(self.d_head, config.hidden_size, bias=False)
self.attn_dropout = nn.Dropout(config.attn_dropout)
self.out_dropout = nn.Dropout(config.dropout)
self.scale = self.d_head**-0.5
self.register_buffer("_mask", torch.triu(torch.full((config.example_length, config.example_length), float('-inf')), 1).unsqueeze(0))
def forward(self, x: Tensor, mask_future_timesteps: bool = True) -> Tuple[Tensor, Tensor]:
bs, t, h_size = x.shape
qkv = self.qkv_linears(x)
q, k, v = qkv.split((self.n_head * self.d_head, self.n_head * self.d_head, self.d_head), dim=-1)
q = q.view(bs, t, self.n_head, self.d_head)
k = k.view(bs, t, self.n_head, self.d_head)
v = v.view(bs, t, self.d_head)
# attn_score = torch.einsum('bind,bjnd->bnij', q, k)
attn_score = torch.matmul(q.permute((0, 2, 1, 3)), k.permute((0, 2, 3, 1)))
attn_score.mul_(self.scale)
if mask_future_timesteps:
attn_score = attn_score + self._mask
attn_prob = F.softmax(attn_score, dim=3)
attn_prob = self.attn_dropout(attn_prob)
# attn_vec = torch.einsum('bnij,bjd->bnid', attn_prob, v)
attn_vec = torch.matmul(attn_prob, v.unsqueeze(1))
m_attn_vec = torch.mean(attn_vec, dim=1)
out = self.out_proj(m_attn_vec)
out = self.out_dropout(out)
return out, attn_vec
class TemporalFusionTransformer(nn.Module):
"""
Implementation of https://arxiv.org/abs/1912.09363
"""
def __init__(self, config):
super().__init__()
if hasattr(config, 'model'):
config = config.model
self.encoder_length = config.encoder_length #this determines from how distant past we want to use data from
self.embedding = TFTEmbedding(config)
self.static_encoder = StaticCovariateEncoder(config)
self.history_vsn = VariableSelectionNetwork(config, config.num_historic_vars)
self.history_encoder = nn.LSTM(config.hidden_size, config.hidden_size, batch_first=True)
self.future_vsn = VariableSelectionNetwork(config, config.num_future_vars)
self.future_encoder = nn.LSTM(config.hidden_size, config.hidden_size, batch_first=True)
self.input_gate = GLU(config.hidden_size, config.hidden_size)
self.input_gate_ln = LayerNorm(config.hidden_size, eps=1e-3)
self.enrichment_grn = GRN(config.hidden_size,
config.hidden_size,
context_hidden_size=config.hidden_size,
dropout=config.dropout)
self.attention = InterpretableMultiHeadAttention(config)
self.attention_gate = GLU(config.hidden_size, config.hidden_size)
self.attention_ln = LayerNorm(config.hidden_size, eps=1e-3)
self.positionwise_grn = GRN(config.hidden_size,
config.hidden_size,
dropout=config.dropout)
self.decoder_gate = GLU(config.hidden_size, config.hidden_size)
self.decoder_ln = LayerNorm(config.hidden_size, eps=1e-3)
self.quantile_proj = nn.Linear(config.hidden_size, len(config.quantiles))
def forward(self, x: Dict[str, Tensor]) -> Tensor:
s_inp, t_known_inp, t_observed_inp, t_observed_tgt = self.embedding(x)
# Static context
cs, ce, ch, cc = self.static_encoder(s_inp)
ch, cc = ch.unsqueeze(0), cc.unsqueeze(0) #lstm initial states
# Temporal input
_historical_inputs = [t_known_inp[:,:self.encoder_length,:], t_observed_tgt[:,:self.encoder_length,:]]
if t_observed_inp is not None:
_historical_inputs.insert(0,t_observed_inp[:,:self.encoder_length,:])
historical_inputs = torch.cat(_historical_inputs, dim=-2)
future_inputs = t_known_inp[:, self.encoder_length:]
# Encoders
historical_features, _ = self.history_vsn(historical_inputs, cs)
history, state = self.history_encoder(historical_features, (ch, cc))
future_features, _ = self.future_vsn(future_inputs, cs)
future, _ = self.future_encoder(future_features, state)
torch.cuda.synchronize() # this call gives perf boost for unknown reasons
# skip connection
input_embedding = torch.cat([historical_features, future_features], dim=1)
temporal_features = torch.cat([history, future], dim=1)
temporal_features = self.input_gate(temporal_features)
temporal_features = temporal_features + input_embedding
temporal_features = self.input_gate_ln(temporal_features)
# Static enrichment
enriched = self.enrichment_grn(temporal_features, c=ce)
# Temporal self attention
x, _ = self.attention(enriched, mask_future_timesteps=True)
# Don't compute hictorical quantiles
x = x[:, self.encoder_length:, :]
temporal_features = temporal_features[:, self.encoder_length:, :]
enriched = enriched[:, self.encoder_length:, :]
x = self.attention_gate(x)
x = x + enriched
x = self.attention_ln(x)
# Position-wise feed-forward
x = self.positionwise_grn(x)
# Final skip connection
x = self.decoder_gate(x)
x = x + temporal_features
x = self.decoder_ln(x)
out = self.quantile_proj(x)
return out
|
TensorFlow2/Detection/Efficientdet | Efficientdet | eval | # Copyright 2020 Google Research. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Eval libraries."""
import os
from mpi4py import MPI
from absl import app
from absl import flags
from absl import logging
import tensorflow as tf
import horovod.tensorflow.keras as hvd
from model import anchors
from model import coco_metric
from model import dataloader
from model import efficientdet_keras
from model import label_util
from model import postprocess
from utils import hparams_config
from utils import model_utils
from utils import util_keras
from utils.horovod_utils import get_rank, get_world_size, is_main_process
flags.DEFINE_integer('eval_samples', 5000, 'Number of eval samples.')
flags.DEFINE_string('val_file_pattern', None,
'Glob for eval tfrecords, e.g. coco/val-*.tfrecord.')
flags.DEFINE_string('val_json_file', None,
'Groudtruth, e.g. annotations/instances_val2017.json.')
flags.DEFINE_string('model_name', 'efficientdet-d0', 'Model name to use.')
flags.DEFINE_string('ckpt_path', None, 'Checkpoint path to evaluate')
flags.DEFINE_integer('batch_size', 8, 'Local batch size.')
flags.DEFINE_string('only_this_epoch', None, 'Evaluate only this epoch checkpoint.')
flags.DEFINE_bool('enable_map_parallelization', True, 'Parallelize stateless map transformations in dataloader')
flags.DEFINE_bool('amp', True, 'Use mixed precision for eval.')
flags.DEFINE_string('hparams', '', 'Comma separated k=v pairs or a yaml file.')
FLAGS = flags.FLAGS
def main(_):
hvd.init()
gpus = tf.config.experimental.list_physical_devices('GPU')
for gpu in gpus:
tf.config.experimental.set_memory_growth(gpu, True)
if gpus:
tf.config.experimental.set_visible_devices(gpus[hvd.local_rank()], 'GPU')
if FLAGS.amp:
policy = tf.keras.mixed_precision.experimental.Policy('mixed_float16')
tf.keras.mixed_precision.experimental.set_policy(policy)
else:
os.environ['TF_ENABLE_AUTO_MIXED_PRECISION'] = '0'
config = hparams_config.get_efficientdet_config(FLAGS.model_name)
config.override(FLAGS.hparams)
config.val_json_file = FLAGS.val_json_file
config.nms_configs.max_nms_inputs = anchors.MAX_DETECTION_POINTS
config.drop_remainder = False # eval all examples w/o drop.
config.image_size = model_utils.parse_image_size(config['image_size'])
@tf.function
def model_fn(images, labels):
cls_outputs, box_outputs = model(images, training=False)
detections = postprocess.generate_detections(config, cls_outputs, box_outputs,
labels['image_scales'],
labels['source_ids'])
tf.numpy_function(evaluator.update_state,
[labels['groundtruth_data'],
postprocess.transform_detections(detections)], [])
# Network
model = efficientdet_keras.EfficientDetNet(config=config)
model.build((None, *config.image_size, 3))
# dataset
batch_size = FLAGS.batch_size # local batch size.
ds = dataloader.InputReader(
FLAGS.val_file_pattern,
is_training=False,
max_instances_per_image=config.max_instances_per_image,
enable_map_parallelization=FLAGS.enable_map_parallelization)(
config, batch_size=batch_size)
ds = ds.shard(get_world_size(), get_rank())
# Evaluator for AP calculation.
label_map = label_util.get_label_map(config.label_map)
evaluator = coco_metric.EvaluationMetric(
filename=config.val_json_file, label_map=label_map)
util_keras.restore_ckpt(model, FLAGS.ckpt_path, config.moving_average_decay,
steps_per_epoch=0, skip_mismatch=False, expect_partial=True)
if FLAGS.eval_samples:
num_samples = (FLAGS.eval_samples + get_world_size() - 1) // get_world_size()
num_samples = (num_samples + batch_size - 1) // batch_size
ds = ds.take(num_samples)
evaluator.reset_states()
# evaluate all images.
pbar = tf.keras.utils.Progbar(num_samples)
for i, (images, labels) in enumerate(ds):
model_fn(images, labels)
if is_main_process():
pbar.update(i)
# gather detections from all ranks
evaluator.gather()
if is_main_process():
# compute the final eval results.
metrics = evaluator.result()
metric_dict = {}
for i, name in enumerate(evaluator.metric_names):
metric_dict[name] = metrics[i]
if label_map:
for i, cid in enumerate(sorted(label_map.keys())):
name = 'AP_/%s' % label_map[cid]
metric_dict[name] = metrics[i + len(evaluator.metric_names)]
# csv format
csv_metrics = ['AP','AP50','AP75','APs','APm','APl']
csv_format = ",".join([str(round(metric_dict[key] * 100, 2)) for key in csv_metrics])
print(FLAGS.model_name, metric_dict, "csv format:", csv_format)
MPI.COMM_WORLD.Barrier()
if __name__ == '__main__':
flags.mark_flag_as_required('val_file_pattern')
flags.mark_flag_as_required('val_json_file')
flags.mark_flag_as_required('ckpt_path')
logging.set_verbosity(logging.WARNING)
app.run(main)
|
TensorFlow2/Recommendation/WideAndDeep/triton/deployment_toolkit/triton_performance_runner/perf_analyzer | perf_analyzer | warmup | # Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import pathlib
from distutils.version import LooseVersion
from importlib.metadata import version
from typing import List, Optional
# method from PEP-366 to support relative import in executed modules
if __package__ is None:
__package__ = pathlib.Path(__file__).parent.name
from ...core import EvaluationMode, MeasurementMode, OfflineMode
from ...utils import parse_server_url
from .perf_analyzer import PerfAnalyzer
from .perf_config import PerfAnalyzerConfig
LOGGER = logging.getLogger("warmup")
TRITON_CLIENT_VERSION = LooseVersion(version("tritonclient"))
class PerfAnalyzerWarmupRunner:
def __init__(
self,
server_url: str,
model_name: str,
batch_sizes: List[int],
concurrency: List[int],
input_data: str,
input_shapes: List[str],
measurement_mode: MeasurementMode,
measurement_interval: int,
measurement_request_count: int,
offline_mode: OfflineMode,
evaluation_mode: EvaluationMode,
output_shared_memory_size: int,
timeout: Optional[int],
):
self._model_name = model_name
self._input_data = input_data
self._input_shapes = input_shapes
self._measurement_mode = measurement_mode
self._offline_mode = offline_mode
self._evaluation_mode = evaluation_mode
self._output_shared_memory_size = output_shared_memory_size
self._protocol, self._host, self._port = parse_server_url(server_url)
self._measurement_interval = 2 * measurement_interval
self._measurement_request_count = 2 * measurement_request_count
self._batch_sizes = [min(batch_sizes)]
self._concurrency = [max(concurrency)]
self._timeout = timeout
def run(self):
for batch_size in self._batch_sizes:
for concurrency in self._concurrency:
params = {
"model-name": self._model_name,
"model-version": 1,
"batch-size": batch_size,
"url": f"{self._host}:{self._port}",
"protocol": self._protocol.value,
"input-data": self._input_data,
"measurement-interval": self._measurement_interval,
"concurrency-range": f"{concurrency}:{concurrency}:1",
"verbose": True,
}
if TRITON_CLIENT_VERSION >= LooseVersion("2.11.0"):
params["measurement-mode"] = self._measurement_mode.value
params["measurement-request-count"] = self._measurement_request_count
if self._evaluation_mode == EvaluationMode.OFFLINE:
params["shared-memory"] = self._offline_mode.value
params["output-shared-memory-size"] = self._output_shared_memory_size
config = PerfAnalyzerConfig()
for param, value in params.items():
config[param] = value
for shape in self._input_shapes:
config["shape"] = shape
perf_analyzer = PerfAnalyzer(config=config, timeout=self._timeout)
perf_analyzer.run()
|
DGLPyTorch/DrugDiscovery/SE3Transformer/se3_transformer/model | model | transformer | # Copyright (c) 2021-2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
# SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES
# SPDX-License-Identifier: MIT
import logging
from typing import Optional, Literal, Dict
import torch
import torch.nn as nn
from dgl import DGLGraph
from torch import Tensor
from se3_transformer.model.basis import get_basis, update_basis_with_fused
from se3_transformer.model.layers.attention import AttentionBlockSE3
from se3_transformer.model.layers.convolution import ConvSE3, ConvSE3FuseLevel
from se3_transformer.model.layers.norm import NormSE3
from se3_transformer.model.layers.pooling import GPooling
from se3_transformer.runtime.utils import str2bool
from se3_transformer.model.fiber import Fiber
class Sequential(nn.Sequential):
""" Sequential module with arbitrary forward args and kwargs. Used to pass graph, basis and edge features. """
def forward(self, input, *args, **kwargs):
for module in self:
input = module(input, *args, **kwargs)
return input
def get_populated_edge_features(relative_pos: Tensor, edge_features: Optional[Dict[str, Tensor]] = None):
""" Add relative positions to existing edge features """
edge_features = edge_features.copy() if edge_features else {}
r = relative_pos.norm(dim=-1, keepdim=True)
if '0' in edge_features:
edge_features['0'] = torch.cat([edge_features['0'], r[..., None]], dim=1)
else:
edge_features['0'] = r[..., None]
return edge_features
class SE3Transformer(nn.Module):
def __init__(self,
num_layers: int,
fiber_in: Fiber,
fiber_hidden: Fiber,
fiber_out: Fiber,
num_heads: int,
channels_div: int,
fiber_edge: Fiber = Fiber({}),
return_type: Optional[int] = None,
pooling: Optional[Literal['avg', 'max']] = None,
norm: bool = True,
use_layer_norm: bool = True,
tensor_cores: bool = False,
low_memory: bool = False,
**kwargs):
"""
:param num_layers: Number of attention layers
:param fiber_in: Input fiber description
:param fiber_hidden: Hidden fiber description
:param fiber_out: Output fiber description
:param fiber_edge: Input edge fiber description
:param num_heads: Number of attention heads
:param channels_div: Channels division before feeding to attention layer
:param return_type: Return only features of this type
:param pooling: 'avg' or 'max' graph pooling before MLP layers
:param norm: Apply a normalization layer after each attention block
:param use_layer_norm: Apply layer normalization between MLP layers
:param tensor_cores: True if using Tensor Cores (affects the use of fully fused convs, and padded bases)
:param low_memory: If True, will use slower ops that use less memory
"""
super().__init__()
self.num_layers = num_layers
self.fiber_edge = fiber_edge
self.num_heads = num_heads
self.channels_div = channels_div
self.return_type = return_type
self.pooling = pooling
self.max_degree = max(*fiber_in.degrees, *fiber_hidden.degrees, *fiber_out.degrees)
self.tensor_cores = tensor_cores
self.low_memory = low_memory
if low_memory:
self.fuse_level = ConvSE3FuseLevel.NONE
else:
# Fully fused convolutions when using Tensor Cores (and not low memory mode)
self.fuse_level = ConvSE3FuseLevel.FULL if tensor_cores else ConvSE3FuseLevel.PARTIAL
graph_modules = []
for i in range(num_layers):
graph_modules.append(AttentionBlockSE3(fiber_in=fiber_in,
fiber_out=fiber_hidden,
fiber_edge=fiber_edge,
num_heads=num_heads,
channels_div=channels_div,
use_layer_norm=use_layer_norm,
max_degree=self.max_degree,
fuse_level=self.fuse_level,
low_memory=low_memory))
if norm:
graph_modules.append(NormSE3(fiber_hidden))
fiber_in = fiber_hidden
graph_modules.append(ConvSE3(fiber_in=fiber_in,
fiber_out=fiber_out,
fiber_edge=fiber_edge,
self_interaction=True,
use_layer_norm=use_layer_norm,
max_degree=self.max_degree,
fuse_level=self.fuse_level,
low_memory=low_memory))
self.graph_modules = Sequential(*graph_modules)
if pooling is not None:
assert return_type is not None, 'return_type must be specified when pooling'
self.pooling_module = GPooling(pool=pooling, feat_type=return_type)
def forward(self, graph: DGLGraph, node_feats: Dict[str, Tensor],
edge_feats: Optional[Dict[str, Tensor]] = None,
basis: Optional[Dict[str, Tensor]] = None):
# Compute bases in case they weren't precomputed as part of the data loading
basis = basis or get_basis(graph.edata['rel_pos'], max_degree=self.max_degree, compute_gradients=False,
use_pad_trick=self.tensor_cores and not self.low_memory,
amp=torch.is_autocast_enabled())
# Add fused bases (per output degree, per input degree, and fully fused) to the dict
basis = update_basis_with_fused(basis, self.max_degree, use_pad_trick=self.tensor_cores and not self.low_memory,
fully_fused=self.fuse_level == ConvSE3FuseLevel.FULL)
edge_feats = get_populated_edge_features(graph.edata['rel_pos'], edge_feats)
node_feats = self.graph_modules(node_feats, edge_feats, graph=graph, basis=basis)
if self.pooling is not None:
return self.pooling_module(node_feats, graph=graph)
if self.return_type is not None:
return node_feats[str(self.return_type)]
return node_feats
@staticmethod
def add_argparse_args(parser):
parser.add_argument('--num_layers', type=int, default=7,
help='Number of stacked Transformer layers')
parser.add_argument('--num_heads', type=int, default=8,
help='Number of heads in self-attention')
parser.add_argument('--channels_div', type=int, default=2,
help='Channels division before feeding to attention layer')
parser.add_argument('--pooling', type=str, default=None, const=None, nargs='?', choices=['max', 'avg'],
help='Type of graph pooling')
parser.add_argument('--norm', type=str2bool, nargs='?', const=True, default=False,
help='Apply a normalization layer after each attention block')
parser.add_argument('--use_layer_norm', type=str2bool, nargs='?', const=True, default=False,
help='Apply layer normalization between MLP layers')
parser.add_argument('--low_memory', type=str2bool, nargs='?', const=True, default=False,
help='If true, will use fused ops that are slower but that use less memory '
'(expect 25 percent less memory). '
'Only has an effect if AMP is enabled on Volta GPUs, or if running on Ampere GPUs')
return parser
class SE3TransformerPooled(nn.Module):
def __init__(self,
fiber_in: Fiber,
fiber_out: Fiber,
fiber_edge: Fiber,
num_degrees: int,
num_channels: int,
output_dim: int,
**kwargs):
super().__init__()
kwargs['pooling'] = kwargs['pooling'] or 'max'
self.transformer = SE3Transformer(
fiber_in=fiber_in,
fiber_hidden=Fiber.create(num_degrees, num_channels),
fiber_out=fiber_out,
fiber_edge=fiber_edge,
return_type=0,
**kwargs
)
n_out_features = fiber_out.num_features
self.mlp = nn.Sequential(
nn.Linear(n_out_features, n_out_features),
nn.ReLU(),
nn.Linear(n_out_features, output_dim)
)
def forward(self, graph, node_feats, edge_feats, basis=None):
feats = self.transformer(graph, node_feats, edge_feats, basis).squeeze(-1)
y = self.mlp(feats).squeeze(-1)
return y
@staticmethod
def add_argparse_args(parent_parser):
parser = parent_parser.add_argument_group("Model architecture")
SE3Transformer.add_argparse_args(parser)
parser.add_argument('--num_degrees',
help='Number of degrees to use. Hidden features will have types [0, ..., num_degrees - 1]',
type=int, default=4)
parser.add_argument('--num_channels', help='Number of channels for the hidden features', type=int, default=32)
return parent_parser
|
CUDA-Optimized/FastSpeech/fastspeech/trt | trt | common | # Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the NVIDIA CORPORATION nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import argparse
import os
from itertools import chain
import numpy as np
import torch
import pycuda.autoinit
import pycuda.driver as cuda
import tensorrt as trt
##
# Common
##
def GiB(val):
return val * 1 << 30
def input_binding_indices(engine):
return [i for i in range(engine.num_bindings) if engine.binding_is_input(i)]
def output_binding_indices(engine):
return [i for i in range(engine.num_bindings) if not engine.binding_is_input(i)]
def trt_input_names(engine):
return [engine.get_binding_name(i) for i in input_binding_indices(engine)]
def trt_output_names(engine):
return [engine.get_binding_name(i) for i in output_binding_indices(engine)]
def set_input_shapes(engine, context, inputs):
def is_dimension_dynamic(dim):
return dim is None or dim <= 0
def is_shape_dynamic(shape):
return any([is_dimension_dynamic(dim) for dim in shape])
for idx, tensor in enumerate(inputs):
if engine.is_shape_binding(idx) and is_shape_dynamic(context.get_shape(idx)):
context.set_shape_input(idx, tensor)
elif is_shape_dynamic(engine.get_binding_shape(idx)):
context.set_binding_shape(idx, tensor.shape)
return context
##
# Pytorch Compatibility
##
# Modified from https://github.com/NVIDIA-AI-IOT/jetbot/blob/cf3e264ae6/jetbot/tensorrt_model.py
def torch_dtype_to_trt(dtype):
if dtype == torch.bool:
return trt.bool
elif dtype == torch.int8:
return trt.int8
elif dtype == torch.int32:
return trt.int32
elif dtype == torch.float16:
return trt.float16
elif dtype == torch.float32:
return trt.float32
else:
raise TypeError('%s is not supported by tensorrt' % dtype)
def torch_dtype_from_trt(dtype):
if dtype == trt.bool:
return torch.bool
elif dtype == trt.int8:
return torch.int8
elif dtype == trt.int32:
return torch.int32
elif dtype == trt.float16:
return torch.float16
elif dtype == trt.float32:
return torch.float32
else:
raise TypeError('%s is not supported by torch' % dtype)
def torch_device_to_trt(device):
if device.type == torch.device('cuda').type:
return trt.TensorLocation.DEVICE
elif device.type == torch.device('cpu').type:
return trt.TensorLocation.HOST
else:
return TypeError('%s is not supported by tensorrt' % device)
def torch_device_from_trt(device):
if device == trt.TensorLocation.DEVICE:
return torch.device('cuda')
elif device == trt.TensorLocation.HOST:
return torch.device('cpu')
else:
return TypeError('%s is not supported by torch' % device)
def create_inputs_from_torch(engine, inputs_torch):
input_ids = input_binding_indices(engine)
for i, idx in enumerate(input_ids):
inputs_torch[i] = inputs_torch[i].to(torch_device_from_trt(engine.get_location(idx)))
inputs_torch[i] = inputs_torch[i].type(torch_dtype_from_trt(engine.get_binding_dtype(idx)))
return inputs_torch
def create_outputs_from_torch(engine, outputs_shapes=None):
output_ids = output_binding_indices(engine)
outputs = [None] * len(output_ids)
for i, idx in enumerate(output_ids):
dtype = torch_dtype_from_trt(engine.get_binding_dtype(idx))
shape = outputs_shapes[i] if outputs_shapes and outputs_shapes[i] else tuple(engine.get_binding_shape(idx))
device = torch_device_from_trt(engine.get_location(idx))
output = torch.empty(size=shape, dtype=dtype, device=device)
outputs[i] = output
return outputs
|
PyTorch/LanguageModeling/BERT/triton/large/scripts | scripts | setup_parameters | #!/usr/bin/env bash
# Copyright (c) 2021 NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
echo "Setting up deployment parameters"
export FORMAT="onnx"
export PRECISION="fp16"
export EXPORT_FORMAT="onnx"
export EXPORT_PRECISION="fp16"
export ACCELERATOR="trt"
export ACCELERATOR_PRECISION="fp16"
export CAPTURE_CUDA_GRAPH="0"
export BATCH_SIZE="16"
export MAX_BATCH_SIZE="16"
export MAX_SEQ_LENGTH="384"
export CHECKPOINT_VARIANT="large-qa"
export CHECKPOINT_DIR=${CHECKPOINTS_DIR}/${CHECKPOINT_VARIANT}
export TRITON_MAX_QUEUE_DELAY="1"
export TRITON_GPU_ENGINE_COUNT="1"
export TRITON_PREFERRED_BATCH_SIZES="1"
if [[ "${FORMAT}" == "ts-trace" || "${FORMAT}" == "ts-script" ]]; then
export CONFIG_FORMAT="torchscript"
else
export CONFIG_FORMAT="${FORMAT}"
fi
if [[ "${EXPORT_FORMAT}" == "trt" ]]; then
export FLAG="--fixed-batch-dim"
else
export FLAG=""
fi
if [[ "${FORMAT}" == "ts-trace" || "${FORMAT}" == "ts-script" ]]; then
export CONFIG_FORMAT="torchscript"
else
export CONFIG_FORMAT="${FORMAT}"
fi
if [[ "${FORMAT}" == "trt" ]]; then
export MBS="0"
else
export MBS="${MAX_BATCH_SIZE}"
fi
if [[ "${EXPORT_FORMAT}" == "ts-trace" || "${EXPORT_FORMAT}" == "ts-script" ]]; then
export FORMAT_SUFFIX="pt"
else
export FORMAT_SUFFIX="${EXPORT_FORMAT}"
fi
|
PaddlePaddle/LanguageModeling/BERT/utils | utils | task | # Copyright (c) 2022 NVIDIA Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from enum import Enum
class Task(Enum):
pretrain = 'Pretrain'
squad = 'SQuAD'
|
PyTorch/Classification/GPUNet/triton/runner | runner | experiment | # Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import dataclasses
import pathlib
from datetime import datetime
from typing import Any, Dict, Optional
# method from PEP-366 to support relative import in executed modules
if __name__ == "__main__" and __package__ is None:
__package__ = pathlib.Path(__file__).parent.name
from .core import DataObject
class ExperimentStatus(object):
"""
Experiment status flags object
"""
SUCCEED = "Succeed"
FAILED = "Failed"
class StageStatus:
"""
Stages status flags object
"""
SUCCEED = "Succeed"
FAILED = "Failed"
class Stage(DataObject):
"""
Stage data object
"""
name: str
status: str
started_at: Optional[int]
ended_at: Optional[int]
result_path: Optional[str]
result_type: Optional[str]
def __init__(
self,
name: str,
result_path: Optional[str],
result_type: Optional[str],
status: str = StageStatus.FAILED,
started_at: Optional[int] = None,
ended_at: Optional[int] = None,
):
"""
Args:
name: name of stage
result_path: path where results file is stored
result_type: type of results
status: success/fail status
started_at: time when stage has started
ended_at: time when stage has ended
"""
self.name = name
self.status = status
self.started_at = started_at
self.ended_at = ended_at
self.result_path = result_path
self.result_type = result_type
def start(self) -> None:
"""
Update stage execution info at start
Returns:
None
"""
self.started_at = int(datetime.utcnow().timestamp())
def end(self) -> None:
"""
Update stage execution info at end
Returns:
None
"""
self.status = StageStatus.SUCCEED
self.ended_at = int(datetime.utcnow().timestamp())
class Experiment(DataObject):
"""
Experiment data object
"""
experiment_id: int
parameters: Dict
stages: Dict[str, Stage]
results: Dict[str, str]
status: str
checkpoint_variant: str
started_at: Optional[int]
ended_at: Optional[int]
def __init__(
self,
experiment_id: int,
parameters: Dict,
stages: Dict[str, Stage],
results: Dict[str, str],
checkpoint: str,
started_at: Optional[int] = None,
ended_at: Optional[int] = None,
status: str = ExperimentStatus.FAILED,
):
"""
Args:
experiment_id: experiment identifier
parameters: dictionary with experiment configuration
stages: dictionary with stages run in experiment
results: mapping between results types and location where are stored
started_at: time when experiment has started
ended_at: time when experiment has ended
status: experiment success/fail information
checkpoint: Checkpoint used for experiment
"""
self.experiment_id = experiment_id
self.started_at = started_at
self.ended_at = ended_at
self.parameters = parameters
self.stages = stages
self.status = status
self.checkpoint = checkpoint
self.results = results
self.results_dir = f"experiment_{experiment_id}"
def start(self) -> None:
"""
Update experiment execution info at start
Returns:
None
"""
self.started_at = int(datetime.utcnow().timestamp())
def end(self) -> None:
"""
Update experiment execution info at end
Returns:
None
"""
self.status = ExperimentStatus.SUCCEED
self.ended_at = int(datetime.utcnow().timestamp())
@dataclasses.dataclass
class Status:
state: ExperimentStatus
message: str
@dataclasses.dataclass
class ExperimentResult:
"""
Experiment result object
"""
status: Status
experiment: Experiment
results: Dict[str, pathlib.Path]
payload: Dict[str, Any] = dataclasses.field(default_factory=dict)
|
TensorFlow/Segmentation/UNet_Industrial/scripts | scripts | UNet_AMP_8GPU_XLA | #!/usr/bin/env bash
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This script launches UNet training in FP32-AMP on 8 GPUs using 16 batch size (2 per GPU)
# Usage ./UNet_AMP_8GPU_XLA.sh <path to result repository> <path to dataset> <dagm classID (1-10)>
BASEDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
export TF_CPP_MIN_LOG_LEVEL=3
mpirun \
-np 8 \
-H localhost:8 \
-bind-to none \
-map-by slot \
-x NCCL_DEBUG=VERSION \
-x LD_LIBRARY_PATH \
-x PATH \
-mca pml ob1 -mca btl ^openib \
--allow-run-as-root \
python "${BASEDIR}/../main.py" \
--unet_variant='tinyUNet' \
--activation_fn='relu' \
--exec_mode='train_and_evaluate' \
--iter_unit='batch' \
--num_iter=2500 \
--batch_size=2 \
--warmup_step=10 \
--results_dir="${1}" \
--data_dir="${2}" \
--dataset_name='DAGM2007' \
--dataset_classID="${3}" \
--data_format='NCHW' \
--use_auto_loss_scaling \
--amp \
--xla \
--learning_rate=1e-4 \
--learning_rate_decay_factor=0.8 \
--learning_rate_decay_steps=500 \
--rmsprop_decay=0.9 \
--rmsprop_momentum=0.8 \
--loss_fn_name='adaptive_loss' \
--weight_decay=1e-5 \
--weight_init_method='he_uniform' \
--augment_data \
--display_every=250 \
--debug_verbosity=0
|
PyTorch/SpeechSynthesis/Tacotron2/tacotron2/text | text | __init__ | """ from https://github.com/keithito/tacotron """
import re
from tacotron2.text import cleaners
from tacotron2.text.symbols import symbols
# Mappings from symbol to numeric ID and vice versa:
_symbol_to_id = {s: i for i, s in enumerate(symbols)}
_id_to_symbol = {i: s for i, s in enumerate(symbols)}
# Regular expression matching text enclosed in curly braces:
_curly_re = re.compile(r'(.*?)\{(.+?)\}(.*)')
def text_to_sequence(text, cleaner_names):
'''Converts a string of text to a sequence of IDs corresponding to the symbols in the text.
The text can optionally have ARPAbet sequences enclosed in curly braces embedded
in it. For example, "Turn left on {HH AW1 S S T AH0 N} Street."
Args:
text: string to convert to a sequence
cleaner_names: names of the cleaner functions to run the text through
Returns:
List of integers corresponding to the symbols in the text
'''
sequence = []
# Check for curly braces and treat their contents as ARPAbet:
while len(text):
m = _curly_re.match(text)
if not m:
sequence += _symbols_to_sequence(_clean_text(text, cleaner_names))
break
sequence += _symbols_to_sequence(_clean_text(m.group(1), cleaner_names))
sequence += _arpabet_to_sequence(m.group(2))
text = m.group(3)
return sequence
def sequence_to_text(sequence):
'''Converts a sequence of IDs back to a string'''
result = ''
for symbol_id in sequence:
if symbol_id in _id_to_symbol:
s = _id_to_symbol[symbol_id]
# Enclose ARPAbet back in curly braces:
if len(s) > 1 and s[0] == '@':
s = '{%s}' % s[1:]
result += s
return result.replace('}{', ' ')
def _clean_text(text, cleaner_names):
for name in cleaner_names:
cleaner = getattr(cleaners, name)
if not cleaner:
raise Exception('Unknown cleaner: %s' % name)
text = cleaner(text)
return text
def _symbols_to_sequence(symbols):
return [_symbol_to_id[s] for s in symbols if _should_keep_symbol(s)]
def _arpabet_to_sequence(text):
return _symbols_to_sequence(['@' + s for s in text.split()])
def _should_keep_symbol(s):
return s in _symbol_to_id and s is not '_' and s is not '~'
|
PyTorch/Translation/Transformer/fairseq | fairseq | utils | # Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree. An additional grant of patent rights
# can be found in the PATENTS file in the same directory.
#
#--------------------------------------------------------------------
#
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import defaultdict, OrderedDict
import logging
import os
import re
import torch
import traceback
from torch.serialization import default_restore_location
def torch_persistent_save(*args, **kwargs):
for i in range(3):
try:
return torch.save(*args, **kwargs)
except Exception:
if i == 2:
logging.error(traceback.format_exc())
def convert_state_dict_type(state_dict, ttype=torch.FloatTensor):
if isinstance(state_dict, dict):
cpu_dict = OrderedDict()
for k, v in state_dict.items():
cpu_dict[k] = convert_state_dict_type(v)
return cpu_dict
elif isinstance(state_dict, list):
return [convert_state_dict_type(v) for v in state_dict]
elif torch.is_tensor(state_dict):
return state_dict.type(ttype)
else:
return state_dict
def save_state(filename, args, model, criterion, optimizer, lr_scheduler,
num_updates, optim_history=None, extra_state=None):
if optim_history is None:
optim_history = []
if extra_state is None:
extra_state = {}
state_dict = {
'args': args,
'model': convert_state_dict_type(model.state_dict()),
'optimizer_history': optim_history + [
{
'criterion_name': criterion.__class__.__name__,
'optimizer_name': optimizer.__class__.__name__,
'lr_scheduler_state': lr_scheduler.state_dict(),
'num_updates': num_updates,
}
],
'last_optimizer_state': convert_state_dict_type(optimizer.state_dict()),
'extra_state': extra_state,
}
torch_persistent_save(state_dict, filename)
def load_model_state(filename, model):
if not os.path.exists(filename):
return None, [], None
state = torch.load(filename, map_location=lambda s, l: default_restore_location(s, 'cpu'))
# load model parameters
try:
model.load_state_dict(state['model'], strict=True)
except Exception:
raise Exception('Cannot load model parameters from checkpoint, '
'please ensure that the architectures match')
return state['extra_state'], state['optimizer_history'], state['last_optimizer_state']
def move_to_cuda(sample):
if len(sample) == 0:
return {}
def _move_to_cuda(maybe_tensor):
if torch.is_tensor(maybe_tensor):
return maybe_tensor.cuda()
elif isinstance(maybe_tensor, dict):
return {
key: _move_to_cuda(value)
for key, value in maybe_tensor.items()
}
elif isinstance(maybe_tensor, list):
return [_move_to_cuda(x) for x in maybe_tensor]
else:
return maybe_tensor
return _move_to_cuda(sample)
INCREMENTAL_STATE_INSTANCE_ID = defaultdict(lambda: 0)
def _get_full_incremental_state_key(module_instance, key):
module_name = module_instance.__class__.__name__
# assign a unique ID to each module instance, so that incremental state is
# not shared across module instances
if not hasattr(module_instance, '_fairseq_instance_id'):
INCREMENTAL_STATE_INSTANCE_ID[module_name] += 1
module_instance._fairseq_instance_id = INCREMENTAL_STATE_INSTANCE_ID[module_name]
return '{}.{}.{}'.format(module_name, module_instance._fairseq_instance_id, key)
def get_incremental_state(module, incremental_state, key):
"""Helper for getting incremental state for an nn.Module."""
full_key = _get_full_incremental_state_key(module, key)
if incremental_state is None or full_key not in incremental_state:
return None
return incremental_state[full_key]
def set_incremental_state(module, incremental_state, key, value):
"""Helper for setting incremental state for an nn.Module."""
if incremental_state is not None:
full_key = _get_full_incremental_state_key(module, key)
incremental_state[full_key] = value
def load_align_dict(replace_unk):
if replace_unk is None:
align_dict = None
elif isinstance(replace_unk, str):
# Load alignment dictionary for unknown word replacement if it was passed as an argument.
align_dict = {}
with open(replace_unk, 'r') as f:
for line in f:
cols = line.split()
align_dict[cols[0]] = cols[1]
else:
# No alignment dictionary provided but we still want to perform unknown word replacement by copying
# the original source word.
align_dict = {}
return align_dict
def print_embed_overlap(embed_dict, vocab_dict):
embed_keys = set(embed_dict.keys())
vocab_keys = set(vocab_dict.symbols)
overlap = len(embed_keys & vocab_keys)
print("| Found {}/{} types in embedding file.".format(overlap, len(vocab_dict)))
def parse_embedding(embed_path):
"""Parse embedding text file into a dictionary of word and embedding tensors.
The first line can have vocabulary size and dimension. The following lines
should contain word and embedding separated by spaces.
Example:
2 5
the -0.0230 -0.0264 0.0287 0.0171 0.1403
at -0.0395 -0.1286 0.0275 0.0254 -0.0932
"""
embed_dict = {}
with open(embed_path) as f_embed:
next(f_embed) # skip header
for line in f_embed:
pieces = line.rstrip().split(" ")
embed_dict[pieces[0]] = torch.Tensor([float(weight) for weight in pieces[1:]])
return embed_dict
def load_embedding(embed_dict, vocab, embedding):
for idx in range(len(vocab)):
token = vocab[idx]
if token in embed_dict:
embedding.weight.data[idx] = embed_dict[token]
return embedding
def replace_unk(hypo_str, src_str, alignment, align_dict, unk):
from fairseq import tokenizer
# Tokens are strings here
hypo_tokens = tokenizer.tokenize_line(hypo_str)
# TODO: Very rare cases where the replacement is '<eos>' should be handled gracefully
src_tokens = tokenizer.tokenize_line(src_str) + ['<eos>']
for i, ht in enumerate(hypo_tokens):
if ht == unk:
src_token = src_tokens[alignment[i]]
# Either take the corresponding value in the aligned dictionary or just copy the original value.
hypo_tokens[i] = align_dict.get(src_token, src_token)
return ' '.join(hypo_tokens)
def post_process_prediction(hypo_tokens, src_str, alignment, align_dict, tgt_dict, remove_bpe):
from fairseq import tokenizer
hypo_str = tgt_dict.string(hypo_tokens, remove_bpe)
if align_dict is not None:
hypo_str = replace_unk(hypo_str, src_str, alignment, align_dict, tgt_dict.unk_string())
if align_dict is not None or remove_bpe is not None:
# Convert back to tokens for evaluating with unk replacement or without BPE
# Note that the dictionary can be modified inside the method.
hypo_tokens = tokenizer.Tokenizer.tokenize(hypo_str, tgt_dict, add_if_not_exist=True)
return hypo_tokens, hypo_str, alignment
def make_positions(tensor, padding_idx, left_pad):
"""Replace non-padding symbols with their position numbers.
Position numbers begin at padding_idx+1.
Padding symbols are ignored, but it is necessary to specify whether padding
is added on the left side (left_pad=True) or right side (left_pad=False).
"""
max_pos = padding_idx + 1 + tensor.size(1)
if not hasattr(make_positions, 'range_buf'):
make_positions.range_buf = torch.arange(padding_idx + 1, 768,
dtype=tensor.dtype, device=tensor.device)
make_positions.range_buf = make_positions.range_buf.type_as(tensor)
if make_positions.range_buf.numel() < max_pos:
torch.arange(padding_idx + 1, max_pos, out=make_positions.range_buf)
mask = tensor.ne(padding_idx)
positions = make_positions.range_buf[:tensor.size(1)].expand_as(tensor)
if left_pad:
positions = positions - mask.size(1) + mask.long().sum(dim=1).unsqueeze(1)
return tensor.clone().masked_scatter_(mask, positions[mask])
def strip_pad(tensor, pad):
return tensor[tensor.ne(pad)]
def buffered_arange(max):
if not hasattr(buffered_arange, 'buf'):
buffered_arange.buf = torch.LongTensor()
if max > buffered_arange.buf.numel():
torch.arange(max, out=buffered_arange.buf)
return buffered_arange.buf[:max]
def convert_padding_direction(src_tokens, padding_idx, right_to_left=False, left_to_right=False):
assert right_to_left ^ left_to_right
pad_mask = src_tokens.eq(padding_idx)
if not pad_mask.any():
# no padding, return early
return src_tokens
if left_to_right and not pad_mask[:, 0].any():
# already right padded
return src_tokens
if right_to_left and not pad_mask[:, -1].any():
# already left padded
return src_tokens
max_len = src_tokens.size(1)
range = buffered_arange(max_len).type_as(src_tokens).expand_as(src_tokens)
num_pads = pad_mask.long().sum(dim=1, keepdim=True)
if right_to_left:
index = torch.remainder(range - num_pads, max_len)
else:
index = torch.remainder(range + num_pads, max_len)
return src_tokens.gather(1, index)
def item(tensor):
if hasattr(tensor, 'item'):
return tensor.item()
if hasattr(tensor, '__getitem__'):
return tensor[0]
return tensor
def clip_grad_norm_(tensor, max_norm):
grad_norm = item(torch.norm(tensor))
if grad_norm > max_norm > 0:
clip_coef = max_norm / (grad_norm + 1e-6)
tensor.mul_(clip_coef)
return grad_norm
def fill_with_neg_inf(t):
"""FP16-compatible function that fills a tensor with -inf."""
return t.float().fill_(float('-inf')).type_as(t)
def checkpoint_paths(path, pattern=r'checkpoint(\d+)\.pt'):
"""Retrieves all checkpoints found in `path` directory.
Checkpoints are identified by matching filename to the specified pattern. If
the pattern contains groups, the result will be sorted by the first group in
descending order.
"""
pt_regexp = re.compile(pattern)
files = os.listdir(path)
entries = []
for i, f in enumerate(files):
m = pt_regexp.fullmatch(f)
if m is not None:
idx = int(m.group(1)) if len(m.groups()) > 0 else i
entries.append((idx, m.group(0)))
return [os.path.join(path, x[1]) for x in sorted(entries, reverse=True)]
|
TensorFlow/Detection/SSD/models/research/slim/nets/nasnet | nasnet | nasnet_utils | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A custom module for some common operations used by NASNet.
Functions exposed in this file:
- calc_reduction_layers
- get_channel_index
- get_channel_dim
- global_avg_pool
- factorized_reduction
- drop_path
Classes exposed in this file:
- NasNetABaseCell
- NasNetANormalCell
- NasNetAReductionCell
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
arg_scope = tf.contrib.framework.arg_scope
slim = tf.contrib.slim
DATA_FORMAT_NCHW = 'NCHW'
DATA_FORMAT_NHWC = 'NHWC'
INVALID = 'null'
# The cap for tf.clip_by_value, it's hinted from the activation distribution
# that the majority of activation values are in the range [-6, 6].
CLIP_BY_VALUE_CAP = 6
def calc_reduction_layers(num_cells, num_reduction_layers):
"""Figure out what layers should have reductions."""
reduction_layers = []
for pool_num in range(1, num_reduction_layers + 1):
layer_num = (float(pool_num) / (num_reduction_layers + 1)) * num_cells
layer_num = int(layer_num)
reduction_layers.append(layer_num)
return reduction_layers
@tf.contrib.framework.add_arg_scope
def get_channel_index(data_format=INVALID):
assert data_format != INVALID
axis = 3 if data_format == 'NHWC' else 1
return axis
@tf.contrib.framework.add_arg_scope
def get_channel_dim(shape, data_format=INVALID):
assert data_format != INVALID
assert len(shape) == 4
if data_format == 'NHWC':
return int(shape[3])
elif data_format == 'NCHW':
return int(shape[1])
else:
raise ValueError('Not a valid data_format', data_format)
@tf.contrib.framework.add_arg_scope
def global_avg_pool(x, data_format=INVALID):
"""Average pool away the height and width spatial dimensions of x."""
assert data_format != INVALID
assert data_format in ['NHWC', 'NCHW']
assert x.shape.ndims == 4
if data_format == 'NHWC':
return tf.reduce_mean(x, [1, 2])
else:
return tf.reduce_mean(x, [2, 3])
@tf.contrib.framework.add_arg_scope
def factorized_reduction(net, output_filters, stride, data_format=INVALID):
"""Reduces the shape of net without information loss due to striding."""
assert data_format != INVALID
if stride == 1:
net = slim.conv2d(net, output_filters, 1, scope='path_conv')
net = slim.batch_norm(net, scope='path_bn')
return net
if data_format == 'NHWC':
stride_spec = [1, stride, stride, 1]
else:
stride_spec = [1, 1, stride, stride]
# Skip path 1
path1 = tf.nn.avg_pool(
net, [1, 1, 1, 1], stride_spec, 'VALID', data_format=data_format)
path1 = slim.conv2d(path1, int(output_filters / 2), 1, scope='path1_conv')
# Skip path 2
# First pad with 0's on the right and bottom, then shift the filter to
# include those 0's that were added.
if data_format == 'NHWC':
pad_arr = [[0, 0], [0, 1], [0, 1], [0, 0]]
path2 = tf.pad(net, pad_arr)[:, 1:, 1:, :]
concat_axis = 3
else:
pad_arr = [[0, 0], [0, 0], [0, 1], [0, 1]]
path2 = tf.pad(net, pad_arr)[:, :, 1:, 1:]
concat_axis = 1
path2 = tf.nn.avg_pool(
path2, [1, 1, 1, 1], stride_spec, 'VALID', data_format=data_format)
# If odd number of filters, add an additional one to the second path.
final_filter_size = int(output_filters / 2) + int(output_filters % 2)
path2 = slim.conv2d(path2, final_filter_size, 1, scope='path2_conv')
# Concat and apply BN
final_path = tf.concat(values=[path1, path2], axis=concat_axis)
final_path = slim.batch_norm(final_path, scope='final_path_bn')
return final_path
@tf.contrib.framework.add_arg_scope
def drop_path(net, keep_prob, is_training=True):
"""Drops out a whole example hiddenstate with the specified probability."""
if is_training:
batch_size = tf.shape(net)[0]
noise_shape = [batch_size, 1, 1, 1]
random_tensor = keep_prob
random_tensor += tf.random_uniform(noise_shape, dtype=tf.float32)
binary_tensor = tf.cast(tf.floor(random_tensor), net.dtype)
keep_prob_inv = tf.cast(1.0 / keep_prob, net.dtype)
net = net * keep_prob_inv * binary_tensor
return net
def _operation_to_filter_shape(operation):
splitted_operation = operation.split('x')
filter_shape = int(splitted_operation[0][-1])
assert filter_shape == int(
splitted_operation[1][0]), 'Rectangular filters not supported.'
return filter_shape
def _operation_to_num_layers(operation):
splitted_operation = operation.split('_')
if 'x' in splitted_operation[-1]:
return 1
return int(splitted_operation[-1])
def _operation_to_info(operation):
"""Takes in operation name and returns meta information.
An example would be 'separable_3x3_4' -> (3, 4).
Args:
operation: String that corresponds to convolution operation.
Returns:
Tuple of (filter shape, num layers).
"""
num_layers = _operation_to_num_layers(operation)
filter_shape = _operation_to_filter_shape(operation)
return num_layers, filter_shape
def _stacked_separable_conv(net, stride, operation, filter_size,
use_bounded_activation):
"""Takes in an operations and parses it to the correct sep operation."""
num_layers, kernel_size = _operation_to_info(operation)
activation_fn = tf.nn.relu6 if use_bounded_activation else tf.nn.relu
for layer_num in range(num_layers - 1):
net = activation_fn(net)
net = slim.separable_conv2d(
net,
filter_size,
kernel_size,
depth_multiplier=1,
scope='separable_{0}x{0}_{1}'.format(kernel_size, layer_num + 1),
stride=stride)
net = slim.batch_norm(
net, scope='bn_sep_{0}x{0}_{1}'.format(kernel_size, layer_num + 1))
stride = 1
net = activation_fn(net)
net = slim.separable_conv2d(
net,
filter_size,
kernel_size,
depth_multiplier=1,
scope='separable_{0}x{0}_{1}'.format(kernel_size, num_layers),
stride=stride)
net = slim.batch_norm(
net, scope='bn_sep_{0}x{0}_{1}'.format(kernel_size, num_layers))
return net
def _operation_to_pooling_type(operation):
"""Takes in the operation string and returns the pooling type."""
splitted_operation = operation.split('_')
return splitted_operation[0]
def _operation_to_pooling_shape(operation):
"""Takes in the operation string and returns the pooling kernel shape."""
splitted_operation = operation.split('_')
shape = splitted_operation[-1]
assert 'x' in shape
filter_height, filter_width = shape.split('x')
assert filter_height == filter_width
return int(filter_height)
def _operation_to_pooling_info(operation):
"""Parses the pooling operation string to return its type and shape."""
pooling_type = _operation_to_pooling_type(operation)
pooling_shape = _operation_to_pooling_shape(operation)
return pooling_type, pooling_shape
def _pooling(net, stride, operation, use_bounded_activation):
"""Parses operation and performs the correct pooling operation on net."""
padding = 'SAME'
pooling_type, pooling_shape = _operation_to_pooling_info(operation)
if use_bounded_activation:
net = tf.nn.relu6(net)
if pooling_type == 'avg':
net = slim.avg_pool2d(net, pooling_shape, stride=stride, padding=padding)
elif pooling_type == 'max':
net = slim.max_pool2d(net, pooling_shape, stride=stride, padding=padding)
else:
raise NotImplementedError('Unimplemented pooling type: ', pooling_type)
return net
class NasNetABaseCell(object):
"""NASNet Cell class that is used as a 'layer' in image architectures.
Args:
num_conv_filters: The number of filters for each convolution operation.
operations: List of operations that are performed in the NASNet Cell in
order.
used_hiddenstates: Binary array that signals if the hiddenstate was used
within the cell. This is used to determine what outputs of the cell
should be concatenated together.
hiddenstate_indices: Determines what hiddenstates should be combined
together with the specified operations to create the NASNet cell.
use_bounded_activation: Whether or not to use bounded activations. Bounded
activations better lend themselves to quantized inference.
"""
def __init__(self, num_conv_filters, operations, used_hiddenstates,
hiddenstate_indices, drop_path_keep_prob, total_num_cells,
total_training_steps, use_bounded_activation=False):
self._num_conv_filters = num_conv_filters
self._operations = operations
self._used_hiddenstates = used_hiddenstates
self._hiddenstate_indices = hiddenstate_indices
self._drop_path_keep_prob = drop_path_keep_prob
self._total_num_cells = total_num_cells
self._total_training_steps = total_training_steps
self._use_bounded_activation = use_bounded_activation
def _reduce_prev_layer(self, prev_layer, curr_layer):
"""Matches dimension of prev_layer to the curr_layer."""
# Set the prev layer to the current layer if it is none
if prev_layer is None:
return curr_layer
curr_num_filters = self._filter_size
prev_num_filters = get_channel_dim(prev_layer.shape)
curr_filter_shape = int(curr_layer.shape[2])
prev_filter_shape = int(prev_layer.shape[2])
activation_fn = tf.nn.relu6 if self._use_bounded_activation else tf.nn.relu
if curr_filter_shape != prev_filter_shape:
prev_layer = activation_fn(prev_layer)
prev_layer = factorized_reduction(
prev_layer, curr_num_filters, stride=2)
elif curr_num_filters != prev_num_filters:
prev_layer = activation_fn(prev_layer)
prev_layer = slim.conv2d(
prev_layer, curr_num_filters, 1, scope='prev_1x1')
prev_layer = slim.batch_norm(prev_layer, scope='prev_bn')
return prev_layer
def _cell_base(self, net, prev_layer):
"""Runs the beginning of the conv cell before the predicted ops are run."""
num_filters = self._filter_size
# Check to be sure prev layer stuff is setup correctly
prev_layer = self._reduce_prev_layer(prev_layer, net)
net = tf.nn.relu6(net) if self._use_bounded_activation else tf.nn.relu(net)
net = slim.conv2d(net, num_filters, 1, scope='1x1')
net = slim.batch_norm(net, scope='beginning_bn')
# num_or_size_splits=1
net = [net]
net.append(prev_layer)
return net
def __call__(self, net, scope=None, filter_scaling=1, stride=1,
prev_layer=None, cell_num=-1, current_step=None):
"""Runs the conv cell."""
self._cell_num = cell_num
self._filter_scaling = filter_scaling
self._filter_size = int(self._num_conv_filters * filter_scaling)
i = 0
with tf.variable_scope(scope):
net = self._cell_base(net, prev_layer)
for iteration in range(5):
with tf.variable_scope('comb_iter_{}'.format(iteration)):
left_hiddenstate_idx, right_hiddenstate_idx = (
self._hiddenstate_indices[i],
self._hiddenstate_indices[i + 1])
original_input_left = left_hiddenstate_idx < 2
original_input_right = right_hiddenstate_idx < 2
h1 = net[left_hiddenstate_idx]
h2 = net[right_hiddenstate_idx]
operation_left = self._operations[i]
operation_right = self._operations[i+1]
i += 2
# Apply conv operations
with tf.variable_scope('left'):
h1 = self._apply_conv_operation(h1, operation_left,
stride, original_input_left,
current_step)
with tf.variable_scope('right'):
h2 = self._apply_conv_operation(h2, operation_right,
stride, original_input_right,
current_step)
# Combine hidden states using 'add'.
with tf.variable_scope('combine'):
h = h1 + h2
if self._use_bounded_activation:
h = tf.nn.relu6(h)
# Add hiddenstate to the list of hiddenstates we can choose from
net.append(h)
with tf.variable_scope('cell_output'):
net = self._combine_unused_states(net)
return net
def _apply_conv_operation(self, net, operation,
stride, is_from_original_input, current_step):
"""Applies the predicted conv operation to net."""
# Dont stride if this is not one of the original hiddenstates
if stride > 1 and not is_from_original_input:
stride = 1
input_filters = get_channel_dim(net.shape)
filter_size = self._filter_size
if 'separable' in operation:
net = _stacked_separable_conv(net, stride, operation, filter_size,
self._use_bounded_activation)
if self._use_bounded_activation:
net = tf.clip_by_value(net, -CLIP_BY_VALUE_CAP, CLIP_BY_VALUE_CAP)
elif operation in ['none']:
if self._use_bounded_activation:
net = tf.nn.relu6(net)
# Check if a stride is needed, then use a strided 1x1 here
if stride > 1 or (input_filters != filter_size):
if not self._use_bounded_activation:
net = tf.nn.relu(net)
net = slim.conv2d(net, filter_size, 1, stride=stride, scope='1x1')
net = slim.batch_norm(net, scope='bn_1')
if self._use_bounded_activation:
net = tf.clip_by_value(net, -CLIP_BY_VALUE_CAP, CLIP_BY_VALUE_CAP)
elif 'pool' in operation:
net = _pooling(net, stride, operation, self._use_bounded_activation)
if input_filters != filter_size:
net = slim.conv2d(net, filter_size, 1, stride=1, scope='1x1')
net = slim.batch_norm(net, scope='bn_1')
if self._use_bounded_activation:
net = tf.clip_by_value(net, -CLIP_BY_VALUE_CAP, CLIP_BY_VALUE_CAP)
else:
raise ValueError('Unimplemented operation', operation)
if operation != 'none':
net = self._apply_drop_path(net, current_step=current_step)
return net
def _combine_unused_states(self, net):
"""Concatenate the unused hidden states of the cell."""
used_hiddenstates = self._used_hiddenstates
final_height = int(net[-1].shape[2])
final_num_filters = get_channel_dim(net[-1].shape)
assert len(used_hiddenstates) == len(net)
for idx, used_h in enumerate(used_hiddenstates):
curr_height = int(net[idx].shape[2])
curr_num_filters = get_channel_dim(net[idx].shape)
# Determine if a reduction should be applied to make the number of
# filters match.
should_reduce = final_num_filters != curr_num_filters
should_reduce = (final_height != curr_height) or should_reduce
should_reduce = should_reduce and not used_h
if should_reduce:
stride = 2 if final_height != curr_height else 1
with tf.variable_scope('reduction_{}'.format(idx)):
net[idx] = factorized_reduction(
net[idx], final_num_filters, stride)
states_to_combine = (
[h for h, is_used in zip(net, used_hiddenstates) if not is_used])
# Return the concat of all the states
concat_axis = get_channel_index()
net = tf.concat(values=states_to_combine, axis=concat_axis)
return net
@tf.contrib.framework.add_arg_scope # No public API. For internal use only.
def _apply_drop_path(self, net, current_step=None,
use_summaries=False, drop_connect_version='v3'):
"""Apply drop_path regularization.
Args:
net: the Tensor that gets drop_path regularization applied.
current_step: a float32 Tensor with the current global_step value,
to be divided by hparams.total_training_steps. Usually None, which
defaults to tf.train.get_or_create_global_step() properly casted.
use_summaries: a Python boolean. If set to False, no summaries are output.
drop_connect_version: one of 'v1', 'v2', 'v3', controlling whether
the dropout rate is scaled by current_step (v1), layer (v2), or
both (v3, the default).
Returns:
The dropped-out value of `net`.
"""
drop_path_keep_prob = self._drop_path_keep_prob
if drop_path_keep_prob < 1.0:
assert drop_connect_version in ['v1', 'v2', 'v3']
if drop_connect_version in ['v2', 'v3']:
# Scale keep prob by layer number
assert self._cell_num != -1
# The added 2 is for the reduction cells
num_cells = self._total_num_cells
layer_ratio = (self._cell_num + 1)/float(num_cells)
if use_summaries:
with tf.device('/cpu:0'):
tf.summary.scalar('layer_ratio', layer_ratio)
drop_path_keep_prob = 1 - layer_ratio * (1 - drop_path_keep_prob)
if drop_connect_version in ['v1', 'v3']:
# Decrease the keep probability over time
if current_step is None:
current_step = tf.train.get_or_create_global_step()
current_step = tf.cast(current_step, tf.float32)
drop_path_burn_in_steps = self._total_training_steps
current_ratio = current_step / drop_path_burn_in_steps
current_ratio = tf.minimum(1.0, current_ratio)
if use_summaries:
with tf.device('/cpu:0'):
tf.summary.scalar('current_ratio', current_ratio)
drop_path_keep_prob = (1 - current_ratio * (1 - drop_path_keep_prob))
if use_summaries:
with tf.device('/cpu:0'):
tf.summary.scalar('drop_path_keep_prob', drop_path_keep_prob)
net = drop_path(net, drop_path_keep_prob)
return net
class NasNetANormalCell(NasNetABaseCell):
"""NASNetA Normal Cell."""
def __init__(self, num_conv_filters, drop_path_keep_prob, total_num_cells,
total_training_steps, use_bounded_activation=False):
operations = ['separable_5x5_2',
'separable_3x3_2',
'separable_5x5_2',
'separable_3x3_2',
'avg_pool_3x3',
'none',
'avg_pool_3x3',
'avg_pool_3x3',
'separable_3x3_2',
'none']
used_hiddenstates = [1, 0, 0, 0, 0, 0, 0]
hiddenstate_indices = [0, 1, 1, 1, 0, 1, 1, 1, 0, 0]
super(NasNetANormalCell, self).__init__(num_conv_filters, operations,
used_hiddenstates,
hiddenstate_indices,
drop_path_keep_prob,
total_num_cells,
total_training_steps,
use_bounded_activation)
class NasNetAReductionCell(NasNetABaseCell):
"""NASNetA Reduction Cell."""
def __init__(self, num_conv_filters, drop_path_keep_prob, total_num_cells,
total_training_steps, use_bounded_activation=False):
operations = ['separable_5x5_2',
'separable_7x7_2',
'max_pool_3x3',
'separable_7x7_2',
'avg_pool_3x3',
'separable_5x5_2',
'none',
'avg_pool_3x3',
'separable_3x3_2',
'max_pool_3x3']
used_hiddenstates = [1, 1, 1, 0, 0, 0, 0]
hiddenstate_indices = [0, 1, 0, 1, 0, 1, 3, 2, 2, 0]
super(NasNetAReductionCell, self).__init__(num_conv_filters, operations,
used_hiddenstates,
hiddenstate_indices,
drop_path_keep_prob,
total_num_cells,
total_training_steps,
use_bounded_activation)
|
TensorFlow/Detection/SSD/models/research/object_detection/core | core | box_list_ops_test | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for object_detection.core.box_list_ops."""
import numpy as np
import tensorflow as tf
from object_detection.core import box_list
from object_detection.core import box_list_ops
from object_detection.utils import test_case
class BoxListOpsTest(test_case.TestCase):
"""Tests for common bounding box operations."""
def test_area(self):
corners = tf.constant([[0.0, 0.0, 10.0, 20.0], [1.0, 2.0, 3.0, 4.0]])
exp_output = [200.0, 4.0]
boxes = box_list.BoxList(corners)
areas = box_list_ops.area(boxes)
with self.test_session() as sess:
areas_output = sess.run(areas)
self.assertAllClose(areas_output, exp_output)
def test_height_width(self):
corners = tf.constant([[0.0, 0.0, 10.0, 20.0], [1.0, 2.0, 3.0, 4.0]])
exp_output_heights = [10., 2.]
exp_output_widths = [20., 2.]
boxes = box_list.BoxList(corners)
heights, widths = box_list_ops.height_width(boxes)
with self.test_session() as sess:
output_heights, output_widths = sess.run([heights, widths])
self.assertAllClose(output_heights, exp_output_heights)
self.assertAllClose(output_widths, exp_output_widths)
def test_scale(self):
corners = tf.constant([[0, 0, 100, 200], [50, 120, 100, 140]],
dtype=tf.float32)
boxes = box_list.BoxList(corners)
boxes.add_field('extra_data', tf.constant([[1], [2]]))
y_scale = tf.constant(1.0/100)
x_scale = tf.constant(1.0/200)
scaled_boxes = box_list_ops.scale(boxes, y_scale, x_scale)
exp_output = [[0, 0, 1, 1], [0.5, 0.6, 1.0, 0.7]]
with self.test_session() as sess:
scaled_corners_out = sess.run(scaled_boxes.get())
self.assertAllClose(scaled_corners_out, exp_output)
extra_data_out = sess.run(scaled_boxes.get_field('extra_data'))
self.assertAllEqual(extra_data_out, [[1], [2]])
def test_clip_to_window_filter_boxes_which_fall_outside_the_window(
self):
window = tf.constant([0, 0, 9, 14], tf.float32)
corners = tf.constant([[5.0, 5.0, 6.0, 6.0],
[-1.0, -2.0, 4.0, 5.0],
[2.0, 3.0, 5.0, 9.0],
[0.0, 0.0, 9.0, 14.0],
[-100.0, -100.0, 300.0, 600.0],
[-10.0, -10.0, -9.0, -9.0]])
boxes = box_list.BoxList(corners)
boxes.add_field('extra_data', tf.constant([[1], [2], [3], [4], [5], [6]]))
exp_output = [[5.0, 5.0, 6.0, 6.0], [0.0, 0.0, 4.0, 5.0],
[2.0, 3.0, 5.0, 9.0], [0.0, 0.0, 9.0, 14.0],
[0.0, 0.0, 9.0, 14.0]]
pruned = box_list_ops.clip_to_window(
boxes, window, filter_nonoverlapping=True)
with self.test_session() as sess:
pruned_output = sess.run(pruned.get())
self.assertAllClose(pruned_output, exp_output)
extra_data_out = sess.run(pruned.get_field('extra_data'))
self.assertAllEqual(extra_data_out, [[1], [2], [3], [4], [5]])
def test_clip_to_window_without_filtering_boxes_which_fall_outside_the_window(
self):
window = tf.constant([0, 0, 9, 14], tf.float32)
corners = tf.constant([[5.0, 5.0, 6.0, 6.0],
[-1.0, -2.0, 4.0, 5.0],
[2.0, 3.0, 5.0, 9.0],
[0.0, 0.0, 9.0, 14.0],
[-100.0, -100.0, 300.0, 600.0],
[-10.0, -10.0, -9.0, -9.0]])
boxes = box_list.BoxList(corners)
boxes.add_field('extra_data', tf.constant([[1], [2], [3], [4], [5], [6]]))
exp_output = [[5.0, 5.0, 6.0, 6.0], [0.0, 0.0, 4.0, 5.0],
[2.0, 3.0, 5.0, 9.0], [0.0, 0.0, 9.0, 14.0],
[0.0, 0.0, 9.0, 14.0], [0.0, 0.0, 0.0, 0.0]]
pruned = box_list_ops.clip_to_window(
boxes, window, filter_nonoverlapping=False)
with self.test_session() as sess:
pruned_output = sess.run(pruned.get())
self.assertAllClose(pruned_output, exp_output)
extra_data_out = sess.run(pruned.get_field('extra_data'))
self.assertAllEqual(extra_data_out, [[1], [2], [3], [4], [5], [6]])
def test_prune_outside_window_filters_boxes_which_fall_outside_the_window(
self):
window = tf.constant([0, 0, 9, 14], tf.float32)
corners = tf.constant([[5.0, 5.0, 6.0, 6.0],
[-1.0, -2.0, 4.0, 5.0],
[2.0, 3.0, 5.0, 9.0],
[0.0, 0.0, 9.0, 14.0],
[-10.0, -10.0, -9.0, -9.0],
[-100.0, -100.0, 300.0, 600.0]])
boxes = box_list.BoxList(corners)
boxes.add_field('extra_data', tf.constant([[1], [2], [3], [4], [5], [6]]))
exp_output = [[5.0, 5.0, 6.0, 6.0],
[2.0, 3.0, 5.0, 9.0],
[0.0, 0.0, 9.0, 14.0]]
pruned, keep_indices = box_list_ops.prune_outside_window(boxes, window)
with self.test_session() as sess:
pruned_output = sess.run(pruned.get())
self.assertAllClose(pruned_output, exp_output)
keep_indices_out = sess.run(keep_indices)
self.assertAllEqual(keep_indices_out, [0, 2, 3])
extra_data_out = sess.run(pruned.get_field('extra_data'))
self.assertAllEqual(extra_data_out, [[1], [3], [4]])
def test_prune_completely_outside_window(self):
window = tf.constant([0, 0, 9, 14], tf.float32)
corners = tf.constant([[5.0, 5.0, 6.0, 6.0],
[-1.0, -2.0, 4.0, 5.0],
[2.0, 3.0, 5.0, 9.0],
[0.0, 0.0, 9.0, 14.0],
[-10.0, -10.0, -9.0, -9.0],
[-100.0, -100.0, 300.0, 600.0]])
boxes = box_list.BoxList(corners)
boxes.add_field('extra_data', tf.constant([[1], [2], [3], [4], [5], [6]]))
exp_output = [[5.0, 5.0, 6.0, 6.0],
[-1.0, -2.0, 4.0, 5.0],
[2.0, 3.0, 5.0, 9.0],
[0.0, 0.0, 9.0, 14.0],
[-100.0, -100.0, 300.0, 600.0]]
pruned, keep_indices = box_list_ops.prune_completely_outside_window(boxes,
window)
with self.test_session() as sess:
pruned_output = sess.run(pruned.get())
self.assertAllClose(pruned_output, exp_output)
keep_indices_out = sess.run(keep_indices)
self.assertAllEqual(keep_indices_out, [0, 1, 2, 3, 5])
extra_data_out = sess.run(pruned.get_field('extra_data'))
self.assertAllEqual(extra_data_out, [[1], [2], [3], [4], [6]])
def test_prune_completely_outside_window_with_empty_boxlist(self):
window = tf.constant([0, 0, 9, 14], tf.float32)
corners = tf.zeros(shape=[0, 4], dtype=tf.float32)
boxes = box_list.BoxList(corners)
boxes.add_field('extra_data', tf.zeros(shape=[0], dtype=tf.int32))
pruned, keep_indices = box_list_ops.prune_completely_outside_window(boxes,
window)
pruned_boxes = pruned.get()
extra = pruned.get_field('extra_data')
exp_pruned_boxes = np.zeros(shape=[0, 4], dtype=np.float32)
exp_extra = np.zeros(shape=[0], dtype=np.int32)
with self.test_session() as sess:
pruned_boxes_out, keep_indices_out, extra_out = sess.run(
[pruned_boxes, keep_indices, extra])
self.assertAllClose(exp_pruned_boxes, pruned_boxes_out)
self.assertAllEqual([], keep_indices_out)
self.assertAllEqual(exp_extra, extra_out)
def test_intersection(self):
corners1 = tf.constant([[4.0, 3.0, 7.0, 5.0], [5.0, 6.0, 10.0, 7.0]])
corners2 = tf.constant([[3.0, 4.0, 6.0, 8.0], [14.0, 14.0, 15.0, 15.0],
[0.0, 0.0, 20.0, 20.0]])
exp_output = [[2.0, 0.0, 6.0], [1.0, 0.0, 5.0]]
boxes1 = box_list.BoxList(corners1)
boxes2 = box_list.BoxList(corners2)
intersect = box_list_ops.intersection(boxes1, boxes2)
with self.test_session() as sess:
intersect_output = sess.run(intersect)
self.assertAllClose(intersect_output, exp_output)
def test_matched_intersection(self):
corners1 = tf.constant([[4.0, 3.0, 7.0, 5.0], [5.0, 6.0, 10.0, 7.0]])
corners2 = tf.constant([[3.0, 4.0, 6.0, 8.0], [14.0, 14.0, 15.0, 15.0]])
exp_output = [2.0, 0.0]
boxes1 = box_list.BoxList(corners1)
boxes2 = box_list.BoxList(corners2)
intersect = box_list_ops.matched_intersection(boxes1, boxes2)
with self.test_session() as sess:
intersect_output = sess.run(intersect)
self.assertAllClose(intersect_output, exp_output)
def test_iou(self):
corners1 = tf.constant([[4.0, 3.0, 7.0, 5.0], [5.0, 6.0, 10.0, 7.0]])
corners2 = tf.constant([[3.0, 4.0, 6.0, 8.0], [14.0, 14.0, 15.0, 15.0],
[0.0, 0.0, 20.0, 20.0]])
exp_output = [[2.0 / 16.0, 0, 6.0 / 400.0], [1.0 / 16.0, 0.0, 5.0 / 400.0]]
boxes1 = box_list.BoxList(corners1)
boxes2 = box_list.BoxList(corners2)
iou = box_list_ops.iou(boxes1, boxes2)
with self.test_session() as sess:
iou_output = sess.run(iou)
self.assertAllClose(iou_output, exp_output)
def test_matched_iou(self):
corners1 = tf.constant([[4.0, 3.0, 7.0, 5.0], [5.0, 6.0, 10.0, 7.0]])
corners2 = tf.constant([[3.0, 4.0, 6.0, 8.0], [14.0, 14.0, 15.0, 15.0]])
exp_output = [2.0 / 16.0, 0]
boxes1 = box_list.BoxList(corners1)
boxes2 = box_list.BoxList(corners2)
iou = box_list_ops.matched_iou(boxes1, boxes2)
with self.test_session() as sess:
iou_output = sess.run(iou)
self.assertAllClose(iou_output, exp_output)
def test_iouworks_on_empty_inputs(self):
corners1 = tf.constant([[4.0, 3.0, 7.0, 5.0], [5.0, 6.0, 10.0, 7.0]])
corners2 = tf.constant([[3.0, 4.0, 6.0, 8.0], [14.0, 14.0, 15.0, 15.0],
[0.0, 0.0, 20.0, 20.0]])
boxes1 = box_list.BoxList(corners1)
boxes2 = box_list.BoxList(corners2)
boxes_empty = box_list.BoxList(tf.zeros((0, 4)))
iou_empty_1 = box_list_ops.iou(boxes1, boxes_empty)
iou_empty_2 = box_list_ops.iou(boxes_empty, boxes2)
iou_empty_3 = box_list_ops.iou(boxes_empty, boxes_empty)
with self.test_session() as sess:
iou_output_1, iou_output_2, iou_output_3 = sess.run(
[iou_empty_1, iou_empty_2, iou_empty_3])
self.assertAllEqual(iou_output_1.shape, (2, 0))
self.assertAllEqual(iou_output_2.shape, (0, 3))
self.assertAllEqual(iou_output_3.shape, (0, 0))
def test_ioa(self):
corners1 = tf.constant([[4.0, 3.0, 7.0, 5.0], [5.0, 6.0, 10.0, 7.0]])
corners2 = tf.constant([[3.0, 4.0, 6.0, 8.0], [14.0, 14.0, 15.0, 15.0],
[0.0, 0.0, 20.0, 20.0]])
exp_output_1 = [[2.0 / 12.0, 0, 6.0 / 400.0],
[1.0 / 12.0, 0.0, 5.0 / 400.0]]
exp_output_2 = [[2.0 / 6.0, 1.0 / 5.0],
[0, 0],
[6.0 / 6.0, 5.0 / 5.0]]
boxes1 = box_list.BoxList(corners1)
boxes2 = box_list.BoxList(corners2)
ioa_1 = box_list_ops.ioa(boxes1, boxes2)
ioa_2 = box_list_ops.ioa(boxes2, boxes1)
with self.test_session() as sess:
ioa_output_1, ioa_output_2 = sess.run([ioa_1, ioa_2])
self.assertAllClose(ioa_output_1, exp_output_1)
self.assertAllClose(ioa_output_2, exp_output_2)
def test_prune_non_overlapping_boxes(self):
corners1 = tf.constant([[4.0, 3.0, 7.0, 5.0], [5.0, 6.0, 10.0, 7.0]])
corners2 = tf.constant([[3.0, 4.0, 6.0, 8.0], [14.0, 14.0, 15.0, 15.0],
[0.0, 0.0, 20.0, 20.0]])
boxes1 = box_list.BoxList(corners1)
boxes2 = box_list.BoxList(corners2)
minoverlap = 0.5
exp_output_1 = boxes1
exp_output_2 = box_list.BoxList(tf.constant(0.0, shape=[0, 4]))
output_1, keep_indices_1 = box_list_ops.prune_non_overlapping_boxes(
boxes1, boxes2, min_overlap=minoverlap)
output_2, keep_indices_2 = box_list_ops.prune_non_overlapping_boxes(
boxes2, boxes1, min_overlap=minoverlap)
with self.test_session() as sess:
(output_1_, keep_indices_1_, output_2_, keep_indices_2_, exp_output_1_,
exp_output_2_) = sess.run(
[output_1.get(), keep_indices_1,
output_2.get(), keep_indices_2,
exp_output_1.get(), exp_output_2.get()])
self.assertAllClose(output_1_, exp_output_1_)
self.assertAllClose(output_2_, exp_output_2_)
self.assertAllEqual(keep_indices_1_, [0, 1])
self.assertAllEqual(keep_indices_2_, [])
def test_prune_small_boxes(self):
boxes = tf.constant([[4.0, 3.0, 7.0, 5.0],
[5.0, 6.0, 10.0, 7.0],
[3.0, 4.0, 6.0, 8.0],
[14.0, 14.0, 15.0, 15.0],
[0.0, 0.0, 20.0, 20.0]])
exp_boxes = [[3.0, 4.0, 6.0, 8.0],
[0.0, 0.0, 20.0, 20.0]]
boxes = box_list.BoxList(boxes)
pruned_boxes = box_list_ops.prune_small_boxes(boxes, 3)
with self.test_session() as sess:
pruned_boxes = sess.run(pruned_boxes.get())
self.assertAllEqual(pruned_boxes, exp_boxes)
def test_prune_small_boxes_prunes_boxes_with_negative_side(self):
boxes = tf.constant([[4.0, 3.0, 7.0, 5.0],
[5.0, 6.0, 10.0, 7.0],
[3.0, 4.0, 6.0, 8.0],
[14.0, 14.0, 15.0, 15.0],
[0.0, 0.0, 20.0, 20.0],
[2.0, 3.0, 1.5, 7.0], # negative height
[2.0, 3.0, 5.0, 1.7]]) # negative width
exp_boxes = [[3.0, 4.0, 6.0, 8.0],
[0.0, 0.0, 20.0, 20.0]]
boxes = box_list.BoxList(boxes)
pruned_boxes = box_list_ops.prune_small_boxes(boxes, 3)
with self.test_session() as sess:
pruned_boxes = sess.run(pruned_boxes.get())
self.assertAllEqual(pruned_boxes, exp_boxes)
def test_change_coordinate_frame(self):
corners = tf.constant([[0.25, 0.5, 0.75, 0.75], [0.5, 0.0, 1.0, 1.0]])
window = tf.constant([0.25, 0.25, 0.75, 0.75])
boxes = box_list.BoxList(corners)
expected_corners = tf.constant([[0, 0.5, 1.0, 1.0], [0.5, -0.5, 1.5, 1.5]])
expected_boxes = box_list.BoxList(expected_corners)
output = box_list_ops.change_coordinate_frame(boxes, window)
with self.test_session() as sess:
output_, expected_boxes_ = sess.run([output.get(), expected_boxes.get()])
self.assertAllClose(output_, expected_boxes_)
def test_ioaworks_on_empty_inputs(self):
corners1 = tf.constant([[4.0, 3.0, 7.0, 5.0], [5.0, 6.0, 10.0, 7.0]])
corners2 = tf.constant([[3.0, 4.0, 6.0, 8.0], [14.0, 14.0, 15.0, 15.0],
[0.0, 0.0, 20.0, 20.0]])
boxes1 = box_list.BoxList(corners1)
boxes2 = box_list.BoxList(corners2)
boxes_empty = box_list.BoxList(tf.zeros((0, 4)))
ioa_empty_1 = box_list_ops.ioa(boxes1, boxes_empty)
ioa_empty_2 = box_list_ops.ioa(boxes_empty, boxes2)
ioa_empty_3 = box_list_ops.ioa(boxes_empty, boxes_empty)
with self.test_session() as sess:
ioa_output_1, ioa_output_2, ioa_output_3 = sess.run(
[ioa_empty_1, ioa_empty_2, ioa_empty_3])
self.assertAllEqual(ioa_output_1.shape, (2, 0))
self.assertAllEqual(ioa_output_2.shape, (0, 3))
self.assertAllEqual(ioa_output_3.shape, (0, 0))
def test_pairwise_distances(self):
corners1 = tf.constant([[0.0, 0.0, 0.0, 0.0],
[1.0, 1.0, 0.0, 2.0]])
corners2 = tf.constant([[3.0, 4.0, 1.0, 0.0],
[-4.0, 0.0, 0.0, 3.0],
[0.0, 0.0, 0.0, 0.0]])
exp_output = [[26, 25, 0], [18, 27, 6]]
boxes1 = box_list.BoxList(corners1)
boxes2 = box_list.BoxList(corners2)
dist_matrix = box_list_ops.sq_dist(boxes1, boxes2)
with self.test_session() as sess:
dist_output = sess.run(dist_matrix)
self.assertAllClose(dist_output, exp_output)
def test_boolean_mask(self):
corners = tf.constant(
[4 * [0.0], 4 * [1.0], 4 * [2.0], 4 * [3.0], 4 * [4.0]])
indicator = tf.constant([True, False, True, False, True], tf.bool)
expected_subset = [4 * [0.0], 4 * [2.0], 4 * [4.0]]
boxes = box_list.BoxList(corners)
subset = box_list_ops.boolean_mask(boxes, indicator)
with self.test_session() as sess:
subset_output = sess.run(subset.get())
self.assertAllClose(subset_output, expected_subset)
def test_static_boolean_mask_with_field(self):
def graph_fn(corners, weights, indicator):
boxes = box_list.BoxList(corners)
boxes.add_field('weights', weights)
subset = box_list_ops.boolean_mask(
boxes,
indicator, ['weights'],
use_static_shapes=True,
indicator_sum=3)
return (subset.get_field('boxes'), subset.get_field('weights'))
corners = np.array(
[4 * [0.0], 4 * [1.0], 4 * [2.0], 4 * [3.0], 4 * [4.0]],
dtype=np.float32)
indicator = np.array([True, False, True, False, True], dtype=np.bool)
weights = np.array([[.1], [.3], [.5], [.7], [.9]], dtype=np.float32)
result_boxes, result_weights = self.execute(graph_fn,
[corners, weights, indicator])
expected_boxes = [4 * [0.0], 4 * [2.0], 4 * [4.0]]
expected_weights = [[.1], [.5], [.9]]
self.assertAllClose(result_boxes, expected_boxes)
self.assertAllClose(result_weights, expected_weights)
def test_dynamic_boolean_mask_with_field(self):
corners = tf.placeholder(tf.float32, [None, 4])
indicator = tf.placeholder(tf.bool, [None])
weights = tf.placeholder(tf.float32, [None, 1])
expected_subset = [4 * [0.0], 4 * [2.0], 4 * [4.0]]
expected_weights = [[.1], [.5], [.9]]
boxes = box_list.BoxList(corners)
boxes.add_field('weights', weights)
subset = box_list_ops.boolean_mask(boxes, indicator, ['weights'])
with self.test_session() as sess:
subset_output, weights_output = sess.run(
[subset.get(), subset.get_field('weights')],
feed_dict={
corners:
np.array(
[4 * [0.0], 4 * [1.0], 4 * [2.0], 4 * [3.0], 4 * [4.0]]),
indicator:
np.array([True, False, True, False, True]).astype(np.bool),
weights:
np.array([[.1], [.3], [.5], [.7], [.9]])
})
self.assertAllClose(subset_output, expected_subset)
self.assertAllClose(weights_output, expected_weights)
def test_gather(self):
corners = tf.constant(
[4 * [0.0], 4 * [1.0], 4 * [2.0], 4 * [3.0], 4 * [4.0]])
indices = tf.constant([0, 2, 4], tf.int32)
expected_subset = [4 * [0.0], 4 * [2.0], 4 * [4.0]]
boxes = box_list.BoxList(corners)
subset = box_list_ops.gather(boxes, indices)
with self.test_session() as sess:
subset_output = sess.run(subset.get())
self.assertAllClose(subset_output, expected_subset)
def test_static_gather_with_field(self):
def graph_fn(corners, weights, indices):
boxes = box_list.BoxList(corners)
boxes.add_field('weights', weights)
subset = box_list_ops.gather(
boxes, indices, ['weights'], use_static_shapes=True)
return (subset.get_field('boxes'), subset.get_field('weights'))
corners = np.array([4 * [0.0], 4 * [1.0], 4 * [2.0], 4 * [3.0],
4 * [4.0]], dtype=np.float32)
weights = np.array([[.1], [.3], [.5], [.7], [.9]], dtype=np.float32)
indices = np.array([0, 2, 4], dtype=np.int32)
result_boxes, result_weights = self.execute(graph_fn,
[corners, weights, indices])
expected_boxes = [4 * [0.0], 4 * [2.0], 4 * [4.0]]
expected_weights = [[.1], [.5], [.9]]
self.assertAllClose(result_boxes, expected_boxes)
self.assertAllClose(result_weights, expected_weights)
def test_dynamic_gather_with_field(self):
corners = tf.placeholder(tf.float32, [None, 4])
indices = tf.placeholder(tf.int32, [None])
weights = tf.placeholder(tf.float32, [None, 1])
expected_subset = [4 * [0.0], 4 * [2.0], 4 * [4.0]]
expected_weights = [[.1], [.5], [.9]]
boxes = box_list.BoxList(corners)
boxes.add_field('weights', weights)
subset = box_list_ops.gather(boxes, indices, ['weights'],
use_static_shapes=True)
with self.test_session() as sess:
subset_output, weights_output = sess.run(
[subset.get(), subset.get_field('weights')],
feed_dict={
corners:
np.array(
[4 * [0.0], 4 * [1.0], 4 * [2.0], 4 * [3.0], 4 * [4.0]]),
indices:
np.array([0, 2, 4]).astype(np.int32),
weights:
np.array([[.1], [.3], [.5], [.7], [.9]])
})
self.assertAllClose(subset_output, expected_subset)
self.assertAllClose(weights_output, expected_weights)
def test_gather_with_invalid_field(self):
corners = tf.constant([4 * [0.0], 4 * [1.0]])
indices = tf.constant([0, 1], tf.int32)
weights = tf.constant([[.1], [.3]], tf.float32)
boxes = box_list.BoxList(corners)
boxes.add_field('weights', weights)
with self.assertRaises(ValueError):
box_list_ops.gather(boxes, indices, ['foo', 'bar'])
def test_gather_with_invalid_inputs(self):
corners = tf.constant(
[4 * [0.0], 4 * [1.0], 4 * [2.0], 4 * [3.0], 4 * [4.0]])
indices_float32 = tf.constant([0, 2, 4], tf.float32)
boxes = box_list.BoxList(corners)
with self.assertRaises(ValueError):
_ = box_list_ops.gather(boxes, indices_float32)
indices_2d = tf.constant([[0, 2, 4]], tf.int32)
boxes = box_list.BoxList(corners)
with self.assertRaises(ValueError):
_ = box_list_ops.gather(boxes, indices_2d)
def test_gather_with_dynamic_indexing(self):
corners = tf.constant([4 * [0.0], 4 * [1.0], 4 * [2.0], 4 * [3.0], 4 * [4.0]
])
weights = tf.constant([.5, .3, .7, .1, .9], tf.float32)
indices = tf.reshape(tf.where(tf.greater(weights, 0.4)), [-1])
expected_subset = [4 * [0.0], 4 * [2.0], 4 * [4.0]]
expected_weights = [.5, .7, .9]
boxes = box_list.BoxList(corners)
boxes.add_field('weights', weights)
subset = box_list_ops.gather(boxes, indices, ['weights'])
with self.test_session() as sess:
subset_output, weights_output = sess.run([subset.get(), subset.get_field(
'weights')])
self.assertAllClose(subset_output, expected_subset)
self.assertAllClose(weights_output, expected_weights)
def test_sort_by_field_ascending_order(self):
exp_corners = [[0, 0, 1, 1], [0, 0.1, 1, 1.1], [0, -0.1, 1, 0.9],
[0, 10, 1, 11], [0, 10.1, 1, 11.1], [0, 100, 1, 101]]
exp_scores = [.95, .9, .75, .6, .5, .3]
exp_weights = [.2, .45, .6, .75, .8, .92]
shuffle = [2, 4, 0, 5, 1, 3]
corners = tf.constant([exp_corners[i] for i in shuffle], tf.float32)
boxes = box_list.BoxList(corners)
boxes.add_field('scores', tf.constant(
[exp_scores[i] for i in shuffle], tf.float32))
boxes.add_field('weights', tf.constant(
[exp_weights[i] for i in shuffle], tf.float32))
sort_by_weight = box_list_ops.sort_by_field(
boxes,
'weights',
order=box_list_ops.SortOrder.ascend)
with self.test_session() as sess:
corners_out, scores_out, weights_out = sess.run([
sort_by_weight.get(),
sort_by_weight.get_field('scores'),
sort_by_weight.get_field('weights')])
self.assertAllClose(corners_out, exp_corners)
self.assertAllClose(scores_out, exp_scores)
self.assertAllClose(weights_out, exp_weights)
def test_sort_by_field_descending_order(self):
exp_corners = [[0, 0, 1, 1], [0, 0.1, 1, 1.1], [0, -0.1, 1, 0.9],
[0, 10, 1, 11], [0, 10.1, 1, 11.1], [0, 100, 1, 101]]
exp_scores = [.95, .9, .75, .6, .5, .3]
exp_weights = [.2, .45, .6, .75, .8, .92]
shuffle = [2, 4, 0, 5, 1, 3]
corners = tf.constant([exp_corners[i] for i in shuffle], tf.float32)
boxes = box_list.BoxList(corners)
boxes.add_field('scores', tf.constant(
[exp_scores[i] for i in shuffle], tf.float32))
boxes.add_field('weights', tf.constant(
[exp_weights[i] for i in shuffle], tf.float32))
sort_by_score = box_list_ops.sort_by_field(boxes, 'scores')
with self.test_session() as sess:
corners_out, scores_out, weights_out = sess.run([sort_by_score.get(
), sort_by_score.get_field('scores'), sort_by_score.get_field('weights')])
self.assertAllClose(corners_out, exp_corners)
self.assertAllClose(scores_out, exp_scores)
self.assertAllClose(weights_out, exp_weights)
def test_sort_by_field_invalid_inputs(self):
corners = tf.constant([4 * [0.0], 4 * [0.5], 4 * [1.0], 4 * [2.0], 4 *
[3.0], 4 * [4.0]])
misc = tf.constant([[.95, .9], [.5, .3]], tf.float32)
weights = tf.constant([.1, .2], tf.float32)
boxes = box_list.BoxList(corners)
boxes.add_field('misc', misc)
boxes.add_field('weights', weights)
with self.assertRaises(ValueError):
box_list_ops.sort_by_field(boxes, 'area')
with self.assertRaises(ValueError):
box_list_ops.sort_by_field(boxes, 'misc')
with self.assertRaises(ValueError):
box_list_ops.sort_by_field(boxes, 'weights')
def test_visualize_boxes_in_image(self):
image = tf.zeros((6, 4, 3))
corners = tf.constant([[0, 0, 5, 3],
[0, 0, 3, 2]], tf.float32)
boxes = box_list.BoxList(corners)
image_and_boxes = box_list_ops.visualize_boxes_in_image(image, boxes)
image_and_boxes_bw = tf.to_float(
tf.greater(tf.reduce_sum(image_and_boxes, 2), 0.0))
exp_result = [[1, 1, 1, 0],
[1, 1, 1, 0],
[1, 1, 1, 0],
[1, 0, 1, 0],
[1, 1, 1, 0],
[0, 0, 0, 0]]
with self.test_session() as sess:
output = sess.run(image_and_boxes_bw)
self.assertAllEqual(output.astype(int), exp_result)
def test_filter_field_value_equals(self):
corners = tf.constant([[0, 0, 1, 1],
[0, 0.1, 1, 1.1],
[0, -0.1, 1, 0.9],
[0, 10, 1, 11],
[0, 10.1, 1, 11.1],
[0, 100, 1, 101]], tf.float32)
boxes = box_list.BoxList(corners)
boxes.add_field('classes', tf.constant([1, 2, 1, 2, 2, 1]))
exp_output1 = [[0, 0, 1, 1], [0, -0.1, 1, 0.9], [0, 100, 1, 101]]
exp_output2 = [[0, 0.1, 1, 1.1], [0, 10, 1, 11], [0, 10.1, 1, 11.1]]
filtered_boxes1 = box_list_ops.filter_field_value_equals(
boxes, 'classes', 1)
filtered_boxes2 = box_list_ops.filter_field_value_equals(
boxes, 'classes', 2)
with self.test_session() as sess:
filtered_output1, filtered_output2 = sess.run([filtered_boxes1.get(),
filtered_boxes2.get()])
self.assertAllClose(filtered_output1, exp_output1)
self.assertAllClose(filtered_output2, exp_output2)
def test_filter_greater_than(self):
corners = tf.constant([[0, 0, 1, 1],
[0, 0.1, 1, 1.1],
[0, -0.1, 1, 0.9],
[0, 10, 1, 11],
[0, 10.1, 1, 11.1],
[0, 100, 1, 101]], tf.float32)
boxes = box_list.BoxList(corners)
boxes.add_field('scores', tf.constant([.1, .75, .9, .5, .5, .8]))
thresh = .6
exp_output = [[0, 0.1, 1, 1.1], [0, -0.1, 1, 0.9], [0, 100, 1, 101]]
filtered_boxes = box_list_ops.filter_greater_than(boxes, thresh)
with self.test_session() as sess:
filtered_output = sess.run(filtered_boxes.get())
self.assertAllClose(filtered_output, exp_output)
def test_clip_box_list(self):
boxlist = box_list.BoxList(
tf.constant([[0.1, 0.1, 0.4, 0.4], [0.1, 0.1, 0.5, 0.5],
[0.6, 0.6, 0.8, 0.8], [0.2, 0.2, 0.3, 0.3]], tf.float32))
boxlist.add_field('classes', tf.constant([0, 0, 1, 1]))
boxlist.add_field('scores', tf.constant([0.75, 0.65, 0.3, 0.2]))
num_boxes = 2
clipped_boxlist = box_list_ops.pad_or_clip_box_list(boxlist, num_boxes)
expected_boxes = [[0.1, 0.1, 0.4, 0.4], [0.1, 0.1, 0.5, 0.5]]
expected_classes = [0, 0]
expected_scores = [0.75, 0.65]
with self.test_session() as sess:
boxes_out, classes_out, scores_out = sess.run(
[clipped_boxlist.get(), clipped_boxlist.get_field('classes'),
clipped_boxlist.get_field('scores')])
self.assertAllClose(expected_boxes, boxes_out)
self.assertAllEqual(expected_classes, classes_out)
self.assertAllClose(expected_scores, scores_out)
def test_pad_box_list(self):
boxlist = box_list.BoxList(
tf.constant([[0.1, 0.1, 0.4, 0.4], [0.1, 0.1, 0.5, 0.5]], tf.float32))
boxlist.add_field('classes', tf.constant([0, 1]))
boxlist.add_field('scores', tf.constant([0.75, 0.2]))
num_boxes = 4
padded_boxlist = box_list_ops.pad_or_clip_box_list(boxlist, num_boxes)
expected_boxes = [[0.1, 0.1, 0.4, 0.4], [0.1, 0.1, 0.5, 0.5],
[0, 0, 0, 0], [0, 0, 0, 0]]
expected_classes = [0, 1, 0, 0]
expected_scores = [0.75, 0.2, 0, 0]
with self.test_session() as sess:
boxes_out, classes_out, scores_out = sess.run(
[padded_boxlist.get(), padded_boxlist.get_field('classes'),
padded_boxlist.get_field('scores')])
self.assertAllClose(expected_boxes, boxes_out)
self.assertAllEqual(expected_classes, classes_out)
self.assertAllClose(expected_scores, scores_out)
def test_select_random_box(self):
boxes = [[0., 0., 1., 1.],
[0., 1., 2., 3.],
[0., 2., 3., 4.]]
corners = tf.constant(boxes, dtype=tf.float32)
boxlist = box_list.BoxList(corners)
random_bbox, valid = box_list_ops.select_random_box(boxlist)
with self.test_session() as sess:
random_bbox_out, valid_out = sess.run([random_bbox, valid])
norm_small = any(
[np.linalg.norm(random_bbox_out - box) < 1e-6 for box in boxes])
self.assertTrue(norm_small)
self.assertTrue(valid_out)
def test_select_random_box_with_empty_boxlist(self):
corners = tf.constant([], shape=[0, 4], dtype=tf.float32)
boxlist = box_list.BoxList(corners)
random_bbox, valid = box_list_ops.select_random_box(boxlist)
with self.test_session() as sess:
random_bbox_out, valid_out = sess.run([random_bbox, valid])
expected_bbox_out = np.array([[-1., -1., -1., -1.]], dtype=np.float32)
self.assertAllEqual(expected_bbox_out, random_bbox_out)
self.assertFalse(valid_out)
def test_get_minimal_coverage_box(self):
boxes = [[0., 0., 1., 1.],
[-1., 1., 2., 3.],
[0., 2., 3., 4.]]
expected_coverage_box = [[-1., 0., 3., 4.]]
corners = tf.constant(boxes, dtype=tf.float32)
boxlist = box_list.BoxList(corners)
coverage_box = box_list_ops.get_minimal_coverage_box(boxlist)
with self.test_session() as sess:
coverage_box_out = sess.run(coverage_box)
self.assertAllClose(expected_coverage_box, coverage_box_out)
def test_get_minimal_coverage_box_with_empty_boxlist(self):
corners = tf.constant([], shape=[0, 4], dtype=tf.float32)
boxlist = box_list.BoxList(corners)
coverage_box = box_list_ops.get_minimal_coverage_box(boxlist)
with self.test_session() as sess:
coverage_box_out = sess.run(coverage_box)
self.assertAllClose([[0.0, 0.0, 1.0, 1.0]], coverage_box_out)
class ConcatenateTest(tf.test.TestCase):
def test_invalid_input_box_list_list(self):
with self.assertRaises(ValueError):
box_list_ops.concatenate(None)
with self.assertRaises(ValueError):
box_list_ops.concatenate([])
with self.assertRaises(ValueError):
corners = tf.constant([[0, 0, 0, 0]], tf.float32)
boxlist = box_list.BoxList(corners)
box_list_ops.concatenate([boxlist, 2])
def test_concatenate_with_missing_fields(self):
corners1 = tf.constant([[0, 0, 0, 0], [1, 2, 3, 4]], tf.float32)
scores1 = tf.constant([1.0, 2.1])
corners2 = tf.constant([[0, 3, 1, 6], [2, 4, 3, 8]], tf.float32)
boxlist1 = box_list.BoxList(corners1)
boxlist1.add_field('scores', scores1)
boxlist2 = box_list.BoxList(corners2)
with self.assertRaises(ValueError):
box_list_ops.concatenate([boxlist1, boxlist2])
def test_concatenate_with_incompatible_field_shapes(self):
corners1 = tf.constant([[0, 0, 0, 0], [1, 2, 3, 4]], tf.float32)
scores1 = tf.constant([1.0, 2.1])
corners2 = tf.constant([[0, 3, 1, 6], [2, 4, 3, 8]], tf.float32)
scores2 = tf.constant([[1.0, 1.0], [2.1, 3.2]])
boxlist1 = box_list.BoxList(corners1)
boxlist1.add_field('scores', scores1)
boxlist2 = box_list.BoxList(corners2)
boxlist2.add_field('scores', scores2)
with self.assertRaises(ValueError):
box_list_ops.concatenate([boxlist1, boxlist2])
def test_concatenate_is_correct(self):
corners1 = tf.constant([[0, 0, 0, 0], [1, 2, 3, 4]], tf.float32)
scores1 = tf.constant([1.0, 2.1])
corners2 = tf.constant([[0, 3, 1, 6], [2, 4, 3, 8], [1, 0, 5, 10]],
tf.float32)
scores2 = tf.constant([1.0, 2.1, 5.6])
exp_corners = [[0, 0, 0, 0],
[1, 2, 3, 4],
[0, 3, 1, 6],
[2, 4, 3, 8],
[1, 0, 5, 10]]
exp_scores = [1.0, 2.1, 1.0, 2.1, 5.6]
boxlist1 = box_list.BoxList(corners1)
boxlist1.add_field('scores', scores1)
boxlist2 = box_list.BoxList(corners2)
boxlist2.add_field('scores', scores2)
result = box_list_ops.concatenate([boxlist1, boxlist2])
with self.test_session() as sess:
corners_output, scores_output = sess.run(
[result.get(), result.get_field('scores')])
self.assertAllClose(corners_output, exp_corners)
self.assertAllClose(scores_output, exp_scores)
class NonMaxSuppressionTest(tf.test.TestCase):
def test_select_from_three_clusters(self):
corners = tf.constant([[0, 0, 1, 1],
[0, 0.1, 1, 1.1],
[0, -0.1, 1, 0.9],
[0, 10, 1, 11],
[0, 10.1, 1, 11.1],
[0, 100, 1, 101]], tf.float32)
boxes = box_list.BoxList(corners)
boxes.add_field('scores', tf.constant([.9, .75, .6, .95, .5, .3]))
iou_thresh = .5
max_output_size = 3
exp_nms = [[0, 10, 1, 11],
[0, 0, 1, 1],
[0, 100, 1, 101]]
nms = box_list_ops.non_max_suppression(
boxes, iou_thresh, max_output_size)
with self.test_session() as sess:
nms_output = sess.run(nms.get())
self.assertAllClose(nms_output, exp_nms)
def test_select_at_most_two_boxes_from_three_clusters(self):
corners = tf.constant([[0, 0, 1, 1],
[0, 0.1, 1, 1.1],
[0, -0.1, 1, 0.9],
[0, 10, 1, 11],
[0, 10.1, 1, 11.1],
[0, 100, 1, 101]], tf.float32)
boxes = box_list.BoxList(corners)
boxes.add_field('scores', tf.constant([.9, .75, .6, .95, .5, .3]))
iou_thresh = .5
max_output_size = 2
exp_nms = [[0, 10, 1, 11],
[0, 0, 1, 1]]
nms = box_list_ops.non_max_suppression(
boxes, iou_thresh, max_output_size)
with self.test_session() as sess:
nms_output = sess.run(nms.get())
self.assertAllClose(nms_output, exp_nms)
def test_select_at_most_thirty_boxes_from_three_clusters(self):
corners = tf.constant([[0, 0, 1, 1],
[0, 0.1, 1, 1.1],
[0, -0.1, 1, 0.9],
[0, 10, 1, 11],
[0, 10.1, 1, 11.1],
[0, 100, 1, 101]], tf.float32)
boxes = box_list.BoxList(corners)
boxes.add_field('scores', tf.constant([.9, .75, .6, .95, .5, .3]))
iou_thresh = .5
max_output_size = 30
exp_nms = [[0, 10, 1, 11],
[0, 0, 1, 1],
[0, 100, 1, 101]]
nms = box_list_ops.non_max_suppression(
boxes, iou_thresh, max_output_size)
with self.test_session() as sess:
nms_output = sess.run(nms.get())
self.assertAllClose(nms_output, exp_nms)
def test_select_single_box(self):
corners = tf.constant([[0, 0, 1, 1]], tf.float32)
boxes = box_list.BoxList(corners)
boxes.add_field('scores', tf.constant([.9]))
iou_thresh = .5
max_output_size = 3
exp_nms = [[0, 0, 1, 1]]
nms = box_list_ops.non_max_suppression(
boxes, iou_thresh, max_output_size)
with self.test_session() as sess:
nms_output = sess.run(nms.get())
self.assertAllClose(nms_output, exp_nms)
def test_select_from_ten_identical_boxes(self):
corners = tf.constant(10 * [[0, 0, 1, 1]], tf.float32)
boxes = box_list.BoxList(corners)
boxes.add_field('scores', tf.constant(10 * [.9]))
iou_thresh = .5
max_output_size = 3
exp_nms = [[0, 0, 1, 1]]
nms = box_list_ops.non_max_suppression(
boxes, iou_thresh, max_output_size)
with self.test_session() as sess:
nms_output = sess.run(nms.get())
self.assertAllClose(nms_output, exp_nms)
def test_copy_extra_fields(self):
corners = tf.constant([[0, 0, 1, 1],
[0, 0.1, 1, 1.1]], tf.float32)
boxes = box_list.BoxList(corners)
tensor1 = np.array([[1], [4]])
tensor2 = np.array([[1, 1], [2, 2]])
boxes.add_field('tensor1', tf.constant(tensor1))
boxes.add_field('tensor2', tf.constant(tensor2))
new_boxes = box_list.BoxList(tf.constant([[0, 0, 10, 10],
[1, 3, 5, 5]], tf.float32))
new_boxes = box_list_ops._copy_extra_fields(new_boxes, boxes)
with self.test_session() as sess:
self.assertAllClose(tensor1, sess.run(new_boxes.get_field('tensor1')))
self.assertAllClose(tensor2, sess.run(new_boxes.get_field('tensor2')))
class CoordinatesConversionTest(tf.test.TestCase):
def test_to_normalized_coordinates(self):
coordinates = tf.constant([[0, 0, 100, 100],
[25, 25, 75, 75]], tf.float32)
img = tf.ones((128, 100, 100, 3))
boxlist = box_list.BoxList(coordinates)
normalized_boxlist = box_list_ops.to_normalized_coordinates(
boxlist, tf.shape(img)[1], tf.shape(img)[2])
expected_boxes = [[0, 0, 1, 1],
[0.25, 0.25, 0.75, 0.75]]
with self.test_session() as sess:
normalized_boxes = sess.run(normalized_boxlist.get())
self.assertAllClose(normalized_boxes, expected_boxes)
def test_to_normalized_coordinates_already_normalized(self):
coordinates = tf.constant([[0, 0, 1, 1],
[0.25, 0.25, 0.75, 0.75]], tf.float32)
img = tf.ones((128, 100, 100, 3))
boxlist = box_list.BoxList(coordinates)
normalized_boxlist = box_list_ops.to_normalized_coordinates(
boxlist, tf.shape(img)[1], tf.shape(img)[2])
with self.test_session() as sess:
with self.assertRaisesOpError('assertion failed'):
sess.run(normalized_boxlist.get())
def test_to_absolute_coordinates(self):
coordinates = tf.constant([[0, 0, 1, 1],
[0.25, 0.25, 0.75, 0.75]], tf.float32)
img = tf.ones((128, 100, 100, 3))
boxlist = box_list.BoxList(coordinates)
absolute_boxlist = box_list_ops.to_absolute_coordinates(boxlist,
tf.shape(img)[1],
tf.shape(img)[2])
expected_boxes = [[0, 0, 100, 100],
[25, 25, 75, 75]]
with self.test_session() as sess:
absolute_boxes = sess.run(absolute_boxlist.get())
self.assertAllClose(absolute_boxes, expected_boxes)
def test_to_absolute_coordinates_already_abolute(self):
coordinates = tf.constant([[0, 0, 100, 100],
[25, 25, 75, 75]], tf.float32)
img = tf.ones((128, 100, 100, 3))
boxlist = box_list.BoxList(coordinates)
absolute_boxlist = box_list_ops.to_absolute_coordinates(boxlist,
tf.shape(img)[1],
tf.shape(img)[2])
with self.test_session() as sess:
with self.assertRaisesOpError('assertion failed'):
sess.run(absolute_boxlist.get())
def test_convert_to_normalized_and_back(self):
coordinates = np.random.uniform(size=(100, 4))
coordinates = np.round(np.sort(coordinates) * 200)
coordinates[:, 2:4] += 1
coordinates[99, :] = [0, 0, 201, 201]
img = tf.ones((128, 202, 202, 3))
boxlist = box_list.BoxList(tf.constant(coordinates, tf.float32))
boxlist = box_list_ops.to_normalized_coordinates(boxlist,
tf.shape(img)[1],
tf.shape(img)[2])
boxlist = box_list_ops.to_absolute_coordinates(boxlist,
tf.shape(img)[1],
tf.shape(img)[2])
with self.test_session() as sess:
out = sess.run(boxlist.get())
self.assertAllClose(out, coordinates)
def test_convert_to_absolute_and_back(self):
coordinates = np.random.uniform(size=(100, 4))
coordinates = np.sort(coordinates)
coordinates[99, :] = [0, 0, 1, 1]
img = tf.ones((128, 202, 202, 3))
boxlist = box_list.BoxList(tf.constant(coordinates, tf.float32))
boxlist = box_list_ops.to_absolute_coordinates(boxlist,
tf.shape(img)[1],
tf.shape(img)[2])
boxlist = box_list_ops.to_normalized_coordinates(boxlist,
tf.shape(img)[1],
tf.shape(img)[2])
with self.test_session() as sess:
out = sess.run(boxlist.get())
self.assertAllClose(out, coordinates)
def test_to_absolute_coordinates_maximum_coordinate_check(self):
coordinates = tf.constant([[0, 0, 1.2, 1.2],
[0.25, 0.25, 0.75, 0.75]], tf.float32)
img = tf.ones((128, 100, 100, 3))
boxlist = box_list.BoxList(coordinates)
absolute_boxlist = box_list_ops.to_absolute_coordinates(
boxlist,
tf.shape(img)[1],
tf.shape(img)[2],
maximum_normalized_coordinate=1.1)
with self.test_session() as sess:
with self.assertRaisesOpError('assertion failed'):
sess.run(absolute_boxlist.get())
class BoxRefinementTest(tf.test.TestCase):
def test_box_voting(self):
candidates = box_list.BoxList(
tf.constant([[0.1, 0.1, 0.4, 0.4], [0.6, 0.6, 0.8, 0.8]], tf.float32))
candidates.add_field('ExtraField', tf.constant([1, 2]))
pool = box_list.BoxList(
tf.constant([[0.1, 0.1, 0.4, 0.4], [0.1, 0.1, 0.5, 0.5],
[0.6, 0.6, 0.8, 0.8]], tf.float32))
pool.add_field('scores', tf.constant([0.75, 0.25, 0.3]))
averaged_boxes = box_list_ops.box_voting(candidates, pool)
expected_boxes = [[0.1, 0.1, 0.425, 0.425], [0.6, 0.6, 0.8, 0.8]]
expected_scores = [0.5, 0.3]
with self.test_session() as sess:
boxes_out, scores_out, extra_field_out = sess.run(
[averaged_boxes.get(), averaged_boxes.get_field('scores'),
averaged_boxes.get_field('ExtraField')])
self.assertAllClose(expected_boxes, boxes_out)
self.assertAllClose(expected_scores, scores_out)
self.assertAllEqual(extra_field_out, [1, 2])
def test_box_voting_fails_with_negative_scores(self):
candidates = box_list.BoxList(
tf.constant([[0.1, 0.1, 0.4, 0.4]], tf.float32))
pool = box_list.BoxList(tf.constant([[0.1, 0.1, 0.4, 0.4]], tf.float32))
pool.add_field('scores', tf.constant([-0.2]))
averaged_boxes = box_list_ops.box_voting(candidates, pool)
with self.test_session() as sess:
with self.assertRaisesOpError('Scores must be non negative'):
sess.run([averaged_boxes.get()])
def test_box_voting_fails_when_unmatched(self):
candidates = box_list.BoxList(
tf.constant([[0.1, 0.1, 0.4, 0.4]], tf.float32))
pool = box_list.BoxList(tf.constant([[0.6, 0.6, 0.8, 0.8]], tf.float32))
pool.add_field('scores', tf.constant([0.2]))
averaged_boxes = box_list_ops.box_voting(candidates, pool)
with self.test_session() as sess:
with self.assertRaisesOpError('Each box in selected_boxes must match '
'with at least one box in pool_boxes.'):
sess.run([averaged_boxes.get()])
def test_refine_boxes(self):
pool = box_list.BoxList(
tf.constant([[0.1, 0.1, 0.4, 0.4], [0.1, 0.1, 0.5, 0.5],
[0.6, 0.6, 0.8, 0.8]], tf.float32))
pool.add_field('ExtraField', tf.constant([1, 2, 3]))
pool.add_field('scores', tf.constant([0.75, 0.25, 0.3]))
refined_boxes = box_list_ops.refine_boxes(pool, 0.5, 10)
expected_boxes = [[0.1, 0.1, 0.425, 0.425], [0.6, 0.6, 0.8, 0.8]]
expected_scores = [0.5, 0.3]
with self.test_session() as sess:
boxes_out, scores_out, extra_field_out = sess.run(
[refined_boxes.get(), refined_boxes.get_field('scores'),
refined_boxes.get_field('ExtraField')])
self.assertAllClose(expected_boxes, boxes_out)
self.assertAllClose(expected_scores, scores_out)
self.assertAllEqual(extra_field_out, [1, 3])
def test_refine_boxes_multi_class(self):
pool = box_list.BoxList(
tf.constant([[0.1, 0.1, 0.4, 0.4], [0.1, 0.1, 0.5, 0.5],
[0.6, 0.6, 0.8, 0.8], [0.2, 0.2, 0.3, 0.3]], tf.float32))
pool.add_field('classes', tf.constant([0, 0, 1, 1]))
pool.add_field('scores', tf.constant([0.75, 0.25, 0.3, 0.2]))
refined_boxes = box_list_ops.refine_boxes_multi_class(pool, 3, 0.5, 10)
expected_boxes = [[0.1, 0.1, 0.425, 0.425], [0.6, 0.6, 0.8, 0.8],
[0.2, 0.2, 0.3, 0.3]]
expected_scores = [0.5, 0.3, 0.2]
with self.test_session() as sess:
boxes_out, scores_out, extra_field_out = sess.run(
[refined_boxes.get(), refined_boxes.get_field('scores'),
refined_boxes.get_field('classes')])
self.assertAllClose(expected_boxes, boxes_out)
self.assertAllClose(expected_scores, scores_out)
self.assertAllEqual(extra_field_out, [0, 1, 1])
def test_sample_boxes_by_jittering(self):
boxes = box_list.BoxList(
tf.constant([[0.1, 0.1, 0.4, 0.4],
[0.1, 0.1, 0.5, 0.5],
[0.6, 0.6, 0.8, 0.8],
[0.2, 0.2, 0.3, 0.3]], tf.float32))
sampled_boxes = box_list_ops.sample_boxes_by_jittering(
boxlist=boxes, num_boxes_to_sample=10)
iou = box_list_ops.iou(boxes, sampled_boxes)
iou_max = tf.reduce_max(iou, axis=0)
with self.test_session() as sess:
(np_sampled_boxes, np_iou_max) = sess.run([sampled_boxes.get(), iou_max])
self.assertAllEqual(np_sampled_boxes.shape, [10, 4])
self.assertAllGreater(np_iou_max, 0.5)
if __name__ == '__main__':
tf.test.main()
|
TensorFlow2/Segmentation/nnUNet/data_loading | data_loading | utils | # Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import glob
import os
import numpy as np
from runtime.utils import get_task_code
from sklearn.model_selection import KFold
def get_split(data, idx):
return list(np.array(data)[idx])
def load_data(path, files_pattern):
return sorted(glob.glob(os.path.join(path, files_pattern)))
def get_path(args):
data_path = str(args.data)
if data_path != "/data":
return data_path
data_path = os.path.join(data_path, get_task_code(args))
if args.exec_mode == "predict" and not args.benchmark:
data_path = os.path.join(data_path, "test")
return data_path
def get_test_fnames(args, data_path, meta=None):
kfold = KFold(n_splits=args.nfolds, shuffle=True, random_state=12345)
test_imgs = load_data(data_path, "*_x.npy")
if args.exec_mode == "predict" and "val" in data_path:
_, val_idx = list(kfold.split(test_imgs))[args.fold]
test_imgs = sorted(get_split(test_imgs, val_idx))
if meta is not None:
meta = sorted(get_split(meta, val_idx))
return test_imgs, meta
|
PyTorch/LanguageModeling/BART/utils | utils | file_utils | # Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""
Utilities for working with the local dataset cache.
This file is adapted from the AllenNLP library at https://github.com/allenai/allennlp
Copyright by the AllenNLP authors.
"""
import fnmatch
import json
import logging
import os
import re
import shutil
import sys
import tarfile
import tempfile
from collections import OrderedDict
from contextlib import contextmanager
from dataclasses import fields
from functools import partial, wraps
from hashlib import sha256
from pathlib import Path
from typing import Any, Dict, Optional, Tuple, Union
from urllib.parse import urlparse
from zipfile import ZipFile, is_zipfile
import numpy as np
from tqdm.auto import tqdm
import requests
from filelock import FileLock
__version__ = "3.0.2"
logger = logging.getLogger(__name__) # pylint: disable=invalid-name
try:
USE_TF = os.environ.get("USE_TF", "AUTO").upper()
USE_TORCH = os.environ.get("USE_TORCH", "AUTO").upper()
if USE_TORCH in ("1", "ON", "YES", "AUTO") and USE_TF not in ("1", "ON", "YES"):
import torch
_torch_available = True # pylint: disable=invalid-name
logger.info("PyTorch version {} available.".format(torch.__version__))
else:
logger.info("Disabling PyTorch because USE_TF is set")
_torch_available = False
except ImportError:
_torch_available = False # pylint: disable=invalid-name
try:
USE_TF = os.environ.get("USE_TF", "AUTO").upper()
USE_TORCH = os.environ.get("USE_TORCH", "AUTO").upper()
if USE_TF in ("1", "ON", "YES", "AUTO") and USE_TORCH not in ("1", "ON", "YES"):
import tensorflow as tf
assert hasattr(tf, "__version__") and int(tf.__version__[0]) >= 2
_tf_available = True # pylint: disable=invalid-name
logger.info("TensorFlow version {} available.".format(tf.__version__))
else:
logger.info("Disabling Tensorflow because USE_TORCH is set")
_tf_available = False
except (ImportError, AssertionError):
_tf_available = False # pylint: disable=invalid-name
try:
import nlp # noqa: F401
_nlp_available = True
except ImportError:
_nlp_available = False
try:
from torch.hub import _get_torch_home
torch_cache_home = _get_torch_home()
except ImportError:
torch_cache_home = os.path.expanduser(
os.getenv("TORCH_HOME", os.path.join(os.getenv("XDG_CACHE_HOME", "~/.cache"), "torch"))
)
try:
import torch_xla.core.xla_model as xm # noqa: F401
if _torch_available:
_torch_tpu_available = True # pylint: disable=
else:
_torch_tpu_available = False
except ImportError:
_torch_tpu_available = False
try:
import psutil # noqa: F401
_psutil_available = True
except ImportError:
_psutil_available = False
try:
import py3nvml # noqa: F401
_py3nvml_available = True
except ImportError:
_py3nvml_available = False
try:
from apex import amp # noqa: F401
_has_apex = True
except ImportError:
_has_apex = False
default_cache_path = os.path.join(torch_cache_home, "transformers")
PYTORCH_PRETRAINED_BERT_CACHE = os.getenv("PYTORCH_PRETRAINED_BERT_CACHE", default_cache_path)
PYTORCH_TRANSFORMERS_CACHE = os.getenv("PYTORCH_TRANSFORMERS_CACHE", PYTORCH_PRETRAINED_BERT_CACHE)
TRANSFORMERS_CACHE = os.getenv("TRANSFORMERS_CACHE", PYTORCH_TRANSFORMERS_CACHE)
WEIGHTS_NAME = "pytorch_model.bin"
TF2_WEIGHTS_NAME = "tf_model.h5"
TF_WEIGHTS_NAME = "model.ckpt"
CONFIG_NAME = "config.json"
MODEL_CARD_NAME = "modelcard.json"
MULTIPLE_CHOICE_DUMMY_INPUTS = [[[0], [1]], [[0], [1]]]
DUMMY_INPUTS = [[7, 6, 0, 0, 1], [1, 2, 3, 0, 0], [0, 0, 0, 4, 5]]
DUMMY_MASK = [[1, 1, 1, 1, 1], [1, 1, 1, 0, 0], [0, 0, 0, 1, 1]]
S3_BUCKET_PREFIX = "https://s3.amazonaws.com/models.huggingface.co/bert"
CLOUDFRONT_DISTRIB_PREFIX = "https://cdn.huggingface.co"
def is_torch_available():
return _torch_available
def is_tf_available():
return _tf_available
def is_torch_tpu_available():
return _torch_tpu_available
def is_nlp_available():
return _nlp_available
def is_psutil_available():
return _psutil_available
def is_py3nvml_available():
return _py3nvml_available
def is_apex_available():
return _has_apex
def add_start_docstrings(*docstr):
def docstring_decorator(fn):
fn.__doc__ = "".join(docstr) + (fn.__doc__ if fn.__doc__ is not None else "")
return fn
return docstring_decorator
def add_start_docstrings_to_model_forward(*docstr):
def docstring_decorator(fn):
class_name = ":class:`~transformers.{}`".format(fn.__qualname__.split(".")[0])
intro = " The {} forward method, overrides the :func:`__call__` special method.".format(class_name)
note = r"""
.. note::
Although the recipe for forward pass needs to be defined within this function, one should call the
:class:`Module` instance afterwards instead of this since the former takes care of running the pre and post
processing steps while the latter silently ignores them.
"""
fn.__doc__ = intro + note + "".join(docstr) + (fn.__doc__ if fn.__doc__ is not None else "")
return fn
return docstring_decorator
def add_end_docstrings(*docstr):
def docstring_decorator(fn):
fn.__doc__ = fn.__doc__ + "".join(docstr)
return fn
return docstring_decorator
PT_RETURN_INTRODUCTION = r"""
Returns:
:class:`~{full_output_type}` or :obj:`tuple(torch.FloatTensor)`:
A :class:`~{full_output_type}` (if ``return_dict=True`` is passed or when ``config.return_dict=True``) or a
tuple of :obj:`torch.FloatTensor` comprising various elements depending on the configuration
(:class:`~transformers.{config_class}`) and inputs.
"""
TF_RETURN_INTRODUCTION = r"""
Returns:
:class:`~{full_output_type}` or :obj:`tuple(tf.Tensor)`:
A :class:`~{full_output_type}` (if ``return_dict=True`` is passed or when ``config.return_dict=True``) or a
tuple of :obj:`tf.Tensor` comprising various elements depending on the configuration
(:class:`~transformers.{config_class}`) and inputs.
"""
def _get_indent(t):
"""Returns the indentation in the first line of t"""
search = re.search(r"^(\s*)\S", t)
return "" if search is None else search.groups()[0]
def _convert_output_args_doc(output_args_doc):
"""Convert output_args_doc to display properly."""
# Split output_arg_doc in blocks argument/description
indent = _get_indent(output_args_doc)
blocks = []
current_block = ""
for line in output_args_doc.split("\n"):
# If the indent is the same as the beginning, the line is the name of new arg.
if _get_indent(line) == indent:
if len(current_block) > 0:
blocks.append(current_block[:-1])
current_block = f"{line}\n"
else:
# Otherwise it's part of the description of the current arg.
# We need to remove 2 spaces to the indentation.
current_block += f"{line[2:]}\n"
blocks.append(current_block[:-1])
# Format each block for proper rendering
for i in range(len(blocks)):
blocks[i] = re.sub(r"^(\s+)(\S+)(\s+)", r"\1- **\2**\3", blocks[i])
blocks[i] = re.sub(r":\s*\n\s*(\S)", r" -- \1", blocks[i])
return "\n".join(blocks)
def _prepare_output_docstrings(output_type, config_class):
"""
Prepares the return part of the docstring using `output_type`.
"""
docstrings = output_type.__doc__
# Remove the head of the docstring to keep the list of args only
lines = docstrings.split("\n")
i = 0
while i < len(lines) and re.search(r"^\s*(Args|Parameters):\s*$", lines[i]) is None:
i += 1
if i < len(lines):
docstrings = "\n".join(lines[(i + 1) :])
docstrings = _convert_output_args_doc(docstrings)
# Add the return introduction
full_output_type = f"{output_type.__module__}.{output_type.__name__}"
intro = TF_RETURN_INTRODUCTION if output_type.__name__.startswith("TF") else PT_RETURN_INTRODUCTION
intro = intro.format(full_output_type=full_output_type, config_class=config_class)
return intro + docstrings
PT_TOKEN_CLASSIFICATION_SAMPLE = r"""
Example::
>>> from transformers import {tokenizer_class}, {model_class}
>>> import torch
>>> tokenizer = {tokenizer_class}.from_pretrained('{checkpoint}')
>>> model = {model_class}.from_pretrained('{checkpoint}', return_dict=True)
>>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt")
>>> labels = torch.tensor([1] * inputs["input_ids"].size(1)).unsqueeze(0) # Batch size 1
>>> outputs = model(**inputs, labels=labels)
>>> loss = outputs.loss
>>> logits = outputs.logits
"""
PT_QUESTION_ANSWERING_SAMPLE = r"""
Example::
>>> from transformers import {tokenizer_class}, {model_class}
>>> import torch
>>> tokenizer = {tokenizer_class}.from_pretrained('{checkpoint}')
>>> model = {model_class}.from_pretrained('{checkpoint}', return_dict=True)
>>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt")
>>> start_positions = torch.tensor([1])
>>> end_positions = torch.tensor([3])
>>> outputs = model(**inputs, start_positions=start_positions, end_positions=end_positions)
>>> loss = outputs.loss
>>> start_scores = outputs.start_scores
>>> end_scores = outputs.end_scores
"""
PT_SEQUENCE_CLASSIFICATION_SAMPLE = r"""
Example::
>>> from transformers import {tokenizer_class}, {model_class}
>>> import torch
>>> tokenizer = {tokenizer_class}.from_pretrained('{checkpoint}')
>>> model = {model_class}.from_pretrained('{checkpoint}', return_dict=True)
>>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt")
>>> labels = torch.tensor([1]).unsqueeze(0) # Batch size 1
>>> outputs = model(**inputs, labels=labels)
>>> loss = outputs.loss
>>> logits = outputs.logits
"""
PT_MASKED_LM_SAMPLE = r"""
Example::
>>> from transformers import {tokenizer_class}, {model_class}
>>> import torch
>>> tokenizer = {tokenizer_class}.from_pretrained('{checkpoint}')
>>> model = {model_class}.from_pretrained('{checkpoint}', return_dict=True)
>>> input_ids = tokenizer("Hello, my dog is cute", return_tensors="pt")["input_ids"]
>>> outputs = model(input_ids, labels=input_ids)
>>> loss = outputs.loss
>>> prediction_logits = outputs.logits
"""
PT_BASE_MODEL_SAMPLE = r"""
Example::
>>> from transformers import {tokenizer_class}, {model_class}
>>> import torch
>>> tokenizer = {tokenizer_class}.from_pretrained('{checkpoint}')
>>> model = {model_class}.from_pretrained('{checkpoint}', return_dict=True)
>>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt")
>>> outputs = model(**inputs)
>>> last_hidden_states = outputs.last_hidden_state
"""
PT_MULTIPLE_CHOICE_SAMPLE = r"""
Example::
>>> from transformers import {tokenizer_class}, {model_class}
>>> import torch
>>> tokenizer = {tokenizer_class}.from_pretrained('{checkpoint}')
>>> model = {model_class}.from_pretrained('{checkpoint}', return_dict=True)
>>> prompt = "In Italy, pizza served in formal settings, such as at a restaurant, is presented unsliced."
>>> choice0 = "It is eaten with a fork and a knife."
>>> choice1 = "It is eaten while held in the hand."
>>> labels = torch.tensor(0).unsqueeze(0) # choice0 is correct (according to Wikipedia ;)), batch size 1
>>> encoding = tokenizer([[prompt, prompt], [choice0, choice1]], return_tensors='pt', padding=True)
>>> outputs = model(**{{k: v.unsqueeze(0) for k,v in encoding.items()}}, labels=labels) # batch size is 1
>>> # the linear classifier still needs to be trained
>>> loss = outputs.loss
>>> logits = outputs.logits
"""
PT_CAUSAL_LM_SAMPLE = r"""
Example::
>>> import torch
>>> from transformers import {tokenizer_class}, {model_class}
>>> tokenizer = {tokenizer_class}.from_pretrained('{checkpoint}')
>>> model = {model_class}.from_pretrained('{checkpoint}', return_dict=True)
>>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt")
>>> outputs = model(**inputs, labels=inputs["input_ids"])
>>> loss = outputs.loss
>>> logits = outputs.logits
"""
TF_TOKEN_CLASSIFICATION_SAMPLE = r"""
Example::
>>> from transformers import {tokenizer_class}, {model_class}
>>> import tensorflow as tf
>>> tokenizer = {tokenizer_class}.from_pretrained('{checkpoint}')
>>> model = {model_class}.from_pretrained('{checkpoint}')
>>> inputs = tokenizer("Hello, my dog is cute", return_tensors="tf")
>>> input_ids = inputs["input_ids"]
>>> inputs["labels"] = tf.reshape(tf.constant([1] * tf.size(input_ids).numpy()), (-1, tf.size(input_ids))) # Batch size 1
>>> outputs = model(inputs)
>>> loss, scores = outputs[:2]
"""
TF_QUESTION_ANSWERING_SAMPLE = r"""
Example::
>>> from transformers import {tokenizer_class}, {model_class}
>>> import tensorflow as tf
>>> tokenizer = {tokenizer_class}.from_pretrained('{checkpoint}')
>>> model = {model_class}.from_pretrained('{checkpoint}')
>>> question, text = "Who was Jim Henson?", "Jim Henson was a nice puppet"
>>> input_dict = tokenizer(question, text, return_tensors='tf')
>>> start_scores, end_scores = model(input_dict)
>>> all_tokens = tokenizer.convert_ids_to_tokens(input_dict["input_ids"].numpy()[0])
>>> answer = ' '.join(all_tokens[tf.math.argmax(start_scores, 1)[0] : tf.math.argmax(end_scores, 1)[0]+1])
"""
TF_SEQUENCE_CLASSIFICATION_SAMPLE = r"""
Example::
>>> from transformers import {tokenizer_class}, {model_class}
>>> import tensorflow as tf
>>> tokenizer = {tokenizer_class}.from_pretrained('{checkpoint}')
>>> model = {model_class}.from_pretrained('{checkpoint}')
>>> inputs = tokenizer("Hello, my dog is cute", return_tensors="tf")
>>> inputs["labels"] = tf.reshape(tf.constant(1), (-1, 1)) # Batch size 1
>>> outputs = model(inputs)
>>> loss, logits = outputs[:2]
"""
TF_MASKED_LM_SAMPLE = r"""
Example::
>>> from transformers import {tokenizer_class}, {model_class}
>>> import tensorflow as tf
>>> tokenizer = {tokenizer_class}.from_pretrained('{checkpoint}')
>>> model = {model_class}.from_pretrained('{checkpoint}')
>>> input_ids = tf.constant(tokenizer.encode("Hello, my dog is cute", add_special_tokens=True))[None, :] # Batch size 1
>>> outputs = model(input_ids)
>>> prediction_scores = outputs[0]
"""
TF_BASE_MODEL_SAMPLE = r"""
Example::
>>> from transformers import {tokenizer_class}, {model_class}
>>> import tensorflow as tf
>>> tokenizer = {tokenizer_class}.from_pretrained('{checkpoint}')
>>> model = {model_class}.from_pretrained('{checkpoint}')
>>> inputs = tokenizer("Hello, my dog is cute", return_tensors="tf")
>>> outputs = model(inputs)
>>> last_hidden_states = outputs[0] # The last hidden-state is the first element of the output tuple
"""
TF_MULTIPLE_CHOICE_SAMPLE = r"""
Example::
>>> from transformers import {tokenizer_class}, {model_class}
>>> import tensorflow as tf
>>> tokenizer = {tokenizer_class}.from_pretrained('{checkpoint}')
>>> model = {model_class}.from_pretrained('{checkpoint}')
>>> prompt = "In Italy, pizza served in formal settings, such as at a restaurant, is presented unsliced."
>>> choice0 = "It is eaten with a fork and a knife."
>>> choice1 = "It is eaten while held in the hand."
>>> encoding = tokenizer([[prompt, prompt], [choice0, choice1]], return_tensors='tf', padding=True)
>>> inputs = {{k: tf.expand_dims(v, 0) for k, v in encoding.items()}}
>>> outputs = model(inputs) # batch size is 1
>>> # the linear classifier still needs to be trained
>>> logits = outputs[0]
"""
TF_CAUSAL_LM_SAMPLE = r"""
Example::
>>> from transformers import {tokenizer_class}, {model_class}
>>> import tensorflow as tf
>>> tokenizer = {tokenizer_class}.from_pretrained('{checkpoint}')
>>> model = {model_class}.from_pretrained('{checkpoint}')
>>> inputs = tokenizer("Hello, my dog is cute", return_tensors="tf")
>>> outputs = model(inputs)
>>> logits = outputs[0]
"""
def add_code_sample_docstrings(*docstr, tokenizer_class=None, checkpoint=None, output_type=None, config_class=None):
def docstring_decorator(fn):
model_class = fn.__qualname__.split(".")[0]
is_tf_class = model_class[:2] == "TF"
if "SequenceClassification" in model_class:
code_sample = TF_SEQUENCE_CLASSIFICATION_SAMPLE if is_tf_class else PT_SEQUENCE_CLASSIFICATION_SAMPLE
elif "QuestionAnswering" in model_class:
code_sample = TF_QUESTION_ANSWERING_SAMPLE if is_tf_class else PT_QUESTION_ANSWERING_SAMPLE
elif "TokenClassification" in model_class:
code_sample = TF_TOKEN_CLASSIFICATION_SAMPLE if is_tf_class else PT_TOKEN_CLASSIFICATION_SAMPLE
elif "MultipleChoice" in model_class:
code_sample = TF_MULTIPLE_CHOICE_SAMPLE if is_tf_class else PT_MULTIPLE_CHOICE_SAMPLE
elif "MaskedLM" in model_class:
code_sample = TF_MASKED_LM_SAMPLE if is_tf_class else PT_MASKED_LM_SAMPLE
elif "LMHead" in model_class:
code_sample = TF_CAUSAL_LM_SAMPLE if is_tf_class else PT_CAUSAL_LM_SAMPLE
elif "Model" in model_class:
code_sample = TF_BASE_MODEL_SAMPLE if is_tf_class else PT_BASE_MODEL_SAMPLE
else:
raise ValueError(f"Docstring can't be built for model {model_class}")
output_doc = _prepare_output_docstrings(output_type, config_class) if output_type is not None else ""
built_doc = code_sample.format(model_class=model_class, tokenizer_class=tokenizer_class, checkpoint=checkpoint)
fn.__doc__ = (fn.__doc__ or "") + "".join(docstr) + output_doc + built_doc
return fn
return docstring_decorator
def replace_return_docstrings(output_type=None, config_class=None):
def docstring_decorator(fn):
docstrings = fn.__doc__
lines = docstrings.split("\n")
i = 0
while i < len(lines) and re.search(r"^\s*Returns?:\s*$", lines[i]) is None:
i += 1
if i < len(lines):
lines[i] = _prepare_output_docstrings(output_type, config_class)
docstrings = "\n".join(lines)
else:
raise ValueError(
f"The function {fn} should have an empty 'Return:' or 'Returns:' in its docstring as placeholder, current docstring is:\n{docstrings}"
)
fn.__doc__ = docstrings
return fn
return docstring_decorator
def is_remote_url(url_or_filename):
parsed = urlparse(url_or_filename)
return parsed.scheme in ("http", "https")
def hf_bucket_url(model_id: str, filename: str, use_cdn=True) -> str:
"""
Resolve a model identifier, and a file name, to a HF-hosted url
on either S3 or Cloudfront (a Content Delivery Network, or CDN).
Cloudfront is replicated over the globe so downloads are way faster
for the end user (and it also lowers our bandwidth costs). However, it
is more aggressively cached by default, so may not always reflect the
latest changes to the underlying file (default TTL is 24 hours).
In terms of client-side caching from this library, even though
Cloudfront relays the ETags from S3, using one or the other
(or switching from one to the other) will affect caching: cached files
are not shared between the two because the cached file's name contains
a hash of the url.
"""
endpoint = CLOUDFRONT_DISTRIB_PREFIX if use_cdn else S3_BUCKET_PREFIX
legacy_format = "/" not in model_id
if legacy_format:
return f"{endpoint}/{model_id}-{filename}"
else:
return f"{endpoint}/{model_id}/{filename}"
def url_to_filename(url, etag=None):
"""
Convert `url` into a hashed filename in a repeatable way.
If `etag` is specified, append its hash to the url's, delimited
by a period.
If the url ends with .h5 (Keras HDF5 weights) adds '.h5' to the name
so that TF 2.0 can identify it as a HDF5 file
(see https://github.com/tensorflow/tensorflow/blob/00fad90125b18b80fe054de1055770cfb8fe4ba3/tensorflow/python/keras/engine/network.py#L1380)
"""
url_bytes = url.encode("utf-8")
url_hash = sha256(url_bytes)
filename = url_hash.hexdigest()
if etag:
etag_bytes = etag.encode("utf-8")
etag_hash = sha256(etag_bytes)
filename += "." + etag_hash.hexdigest()
if url.endswith(".h5"):
filename += ".h5"
return filename
def filename_to_url(filename, cache_dir=None):
"""
Return the url and etag (which may be ``None``) stored for `filename`.
Raise ``EnvironmentError`` if `filename` or its stored metadata do not exist.
"""
if cache_dir is None:
cache_dir = TRANSFORMERS_CACHE
if isinstance(cache_dir, Path):
cache_dir = str(cache_dir)
cache_path = os.path.join(cache_dir, filename)
if not os.path.exists(cache_path):
raise EnvironmentError("file {} not found".format(cache_path))
meta_path = cache_path + ".json"
if not os.path.exists(meta_path):
raise EnvironmentError("file {} not found".format(meta_path))
with open(meta_path, encoding="utf-8") as meta_file:
metadata = json.load(meta_file)
url = metadata["url"]
etag = metadata["etag"]
return url, etag
def cached_path(
url_or_filename,
cache_dir=None,
force_download=False,
proxies=None,
resume_download=False,
user_agent: Union[Dict, str, None] = None,
extract_compressed_file=False,
force_extract=False,
local_files_only=False,
) -> Optional[str]:
"""
Given something that might be a URL (or might be a local path),
determine which. If it's a URL, download the file and cache it, and
return the path to the cached file. If it's already a local path,
make sure the file exists and then return the path.
Args:
cache_dir: specify a cache directory to save the file to (overwrite the default cache dir).
force_download: if True, re-dowload the file even if it's already cached in the cache dir.
resume_download: if True, resume the download if incompletly recieved file is found.
user_agent: Optional string or dict that will be appended to the user-agent on remote requests.
extract_compressed_file: if True and the path point to a zip or tar file, extract the compressed
file in a folder along the archive.
force_extract: if True when extract_compressed_file is True and the archive was already extracted,
re-extract the archive and overide the folder where it was extracted.
Return:
None in case of non-recoverable file (non-existent or inaccessible url + no cache on disk).
Local path (string) otherwise
"""
if cache_dir is None:
cache_dir = TRANSFORMERS_CACHE
if isinstance(url_or_filename, Path):
url_or_filename = str(url_or_filename)
if isinstance(cache_dir, Path):
cache_dir = str(cache_dir)
if is_remote_url(url_or_filename):
# URL, so get it from the cache (downloading if necessary)
output_path = get_from_cache(
url_or_filename,
cache_dir=cache_dir,
force_download=force_download,
proxies=proxies,
resume_download=resume_download,
user_agent=user_agent,
local_files_only=local_files_only,
)
elif os.path.exists(url_or_filename):
# File, and it exists.
output_path = url_or_filename
elif urlparse(url_or_filename).scheme == "":
# File, but it doesn't exist.
raise EnvironmentError("file {} not found".format(url_or_filename))
else:
# Something unknown
raise ValueError("unable to parse {} as a URL or as a local path".format(url_or_filename))
if extract_compressed_file:
if not is_zipfile(output_path) and not tarfile.is_tarfile(output_path):
return output_path
# Path where we extract compressed archives
# We avoid '.' in dir name and add "-extracted" at the end: "./model.zip" => "./model-zip-extracted/"
output_dir, output_file = os.path.split(output_path)
output_extract_dir_name = output_file.replace(".", "-") + "-extracted"
output_path_extracted = os.path.join(output_dir, output_extract_dir_name)
if os.path.isdir(output_path_extracted) and os.listdir(output_path_extracted) and not force_extract:
return output_path_extracted
# Prevent parallel extractions
lock_path = output_path + ".lock"
with FileLock(lock_path):
shutil.rmtree(output_path_extracted, ignore_errors=True)
os.makedirs(output_path_extracted)
if is_zipfile(output_path):
with ZipFile(output_path, "r") as zip_file:
zip_file.extractall(output_path_extracted)
zip_file.close()
elif tarfile.is_tarfile(output_path):
tar_file = tarfile.open(output_path)
tar_file.extractall(output_path_extracted)
tar_file.close()
else:
raise EnvironmentError("Archive format of {} could not be identified".format(output_path))
return output_path_extracted
return output_path
def http_get(url, temp_file, proxies=None, resume_size=0, user_agent: Union[Dict, str, None] = None):
ua = "transformers/{}; python/{}".format(__version__, sys.version.split()[0])
if is_torch_available():
ua += "; torch/{}".format(torch.__version__)
if is_tf_available():
ua += "; tensorflow/{}".format(tf.__version__)
if isinstance(user_agent, dict):
ua += "; " + "; ".join("{}/{}".format(k, v) for k, v in user_agent.items())
elif isinstance(user_agent, str):
ua += "; " + user_agent
headers = {"user-agent": ua}
if resume_size > 0:
headers["Range"] = "bytes=%d-" % (resume_size,)
response = requests.get(url, stream=True, proxies=proxies, headers=headers)
if response.status_code == 416: # Range not satisfiable
return
content_length = response.headers.get("Content-Length")
total = resume_size + int(content_length) if content_length is not None else None
progress = tqdm(
unit="B",
unit_scale=True,
total=total,
initial=resume_size,
desc="Downloading",
disable=bool(logger.getEffectiveLevel() == logging.NOTSET),
)
for chunk in response.iter_content(chunk_size=1024):
if chunk: # filter out keep-alive new chunks
progress.update(len(chunk))
temp_file.write(chunk)
progress.close()
def get_from_cache(
url,
cache_dir=None,
force_download=False,
proxies=None,
etag_timeout=10,
resume_download=False,
user_agent: Union[Dict, str, None] = None,
local_files_only=False,
) -> Optional[str]:
"""
Given a URL, look for the corresponding file in the local cache.
If it's not there, download it. Then return the path to the cached file.
Return:
None in case of non-recoverable file (non-existent or inaccessible url + no cache on disk).
Local path (string) otherwise
"""
if cache_dir is None:
cache_dir = TRANSFORMERS_CACHE
if isinstance(cache_dir, Path):
cache_dir = str(cache_dir)
os.makedirs(cache_dir, exist_ok=True)
etag = None
if not local_files_only:
try:
response = requests.head(url, allow_redirects=True, proxies=proxies, timeout=etag_timeout)
if response.status_code == 200:
etag = response.headers.get("ETag")
except (EnvironmentError, requests.exceptions.Timeout):
# etag is already None
pass
filename = url_to_filename(url, etag)
# get cache path to put the file
cache_path = os.path.join(cache_dir, filename)
# etag is None = we don't have a connection, or url doesn't exist, or is otherwise inaccessible.
# try to get the last downloaded one
if etag is None:
if os.path.exists(cache_path):
return cache_path
else:
matching_files = [
file
for file in fnmatch.filter(os.listdir(cache_dir), filename + ".*")
if not file.endswith(".json") and not file.endswith(".lock")
]
if len(matching_files) > 0:
return os.path.join(cache_dir, matching_files[-1])
else:
# If files cannot be found and local_files_only=True,
# the models might've been found if local_files_only=False
# Notify the user about that
if local_files_only:
raise ValueError(
"Cannot find the requested files in the cached path and outgoing traffic has been"
" disabled. To enable model look-ups and downloads online, set 'local_files_only'"
" to False."
)
return None
# From now on, etag is not None.
if os.path.exists(cache_path) and not force_download:
return cache_path
# Prevent parallel downloads of the same file with a lock.
lock_path = cache_path + ".lock"
with FileLock(lock_path):
# If the download just completed while the lock was activated.
if os.path.exists(cache_path) and not force_download:
# Even if returning early like here, the lock will be released.
return cache_path
if resume_download:
incomplete_path = cache_path + ".incomplete"
@contextmanager
def _resumable_file_manager():
with open(incomplete_path, "a+b") as f:
yield f
temp_file_manager = _resumable_file_manager
if os.path.exists(incomplete_path):
resume_size = os.stat(incomplete_path).st_size
else:
resume_size = 0
else:
temp_file_manager = partial(tempfile.NamedTemporaryFile, dir=cache_dir, delete=False)
resume_size = 0
# Download to temporary file, then copy to cache dir once finished.
# Otherwise you get corrupt cache entries if the download gets interrupted.
with temp_file_manager() as temp_file:
logger.info("%s not found in cache or force_download set to True, downloading to %s", url, temp_file.name)
http_get(url, temp_file, proxies=proxies, resume_size=resume_size, user_agent=user_agent)
logger.info("storing %s in cache at %s", url, cache_path)
os.replace(temp_file.name, cache_path)
logger.info("creating metadata file for %s", cache_path)
meta = {"url": url, "etag": etag}
meta_path = cache_path + ".json"
with open(meta_path, "w") as meta_file:
json.dump(meta, meta_file)
return cache_path
class cached_property(property):
"""
Descriptor that mimics @property but caches output in member variable.
From tensorflow_datasets
Built-in in functools from Python 3.8.
"""
def __get__(self, obj, objtype=None):
# See docs.python.org/3/howto/descriptor.html#properties
if obj is None:
return self
if self.fget is None:
raise AttributeError("unreadable attribute")
attr = "__cached_" + self.fget.__name__
cached = getattr(obj, attr, None)
if cached is None:
cached = self.fget(obj)
setattr(obj, attr, cached)
return cached
def torch_required(func):
# Chose a different decorator name than in tests so it's clear they are not the same.
@wraps(func)
def wrapper(*args, **kwargs):
if is_torch_available():
return func(*args, **kwargs)
else:
raise ImportError(f"Method `{func.__name__}` requires PyTorch.")
return wrapper
def tf_required(func):
# Chose a different decorator name than in tests so it's clear they are not the same.
@wraps(func)
def wrapper(*args, **kwargs):
if is_tf_available():
return func(*args, **kwargs)
else:
raise ImportError(f"Method `{func.__name__}` requires TF.")
return wrapper
def is_tensor(x):
""" Tests if ``x`` is a :obj:`torch.Tensor`, :obj:`tf.Tensor` or :obj:`np.ndarray`. """
if is_torch_available():
import torch
if isinstance(x, torch.Tensor):
return True
if is_tf_available():
import tensorflow as tf
if isinstance(x, tf.Tensor):
return True
return isinstance(x, np.ndarray)
class ModelOutput(OrderedDict):
"""
Base class for all model outputs as dataclass. Has a ``__getitem__`` that allows indexing by integer or slice (like
a tuple) or strings (like a dictionnary) that will ignore the ``None`` attributes. Otherwise behaves like a
regular python dictionary.
.. warning::
You can't unpack a :obj:`ModelOutput` directly. Use the :meth:`~transformers.file_utils.ModelOutput.to_tuple`
method to convert it to a tuple before.
"""
def __post_init__(self):
class_fields = fields(self)
# Safety and consistency checks
assert len(class_fields), f"{self.__class__.__name__} has no fields."
assert all(
field.default is None for field in class_fields[1:]
), f"{self.__class__.__name__} should not have more than one required field."
first_field = getattr(self, class_fields[0].name)
other_fields_are_none = all(getattr(self, field.name) is None for field in class_fields[1:])
if other_fields_are_none and not is_tensor(first_field):
try:
iterator = iter(first_field)
first_field_iterator = True
except TypeError:
first_field_iterator = False
# if we provided an iterator as first field and the iterator is a (key, value) iterator
# set the associated fields
if first_field_iterator:
for element in iterator:
if (
not isinstance(element, (list, tuple))
or not len(element) == 2
or not isinstance(element[0], str)
):
break
setattr(self, element[0], element[1])
if element[1] is not None:
self[element[0]] = element[1]
else:
for field in class_fields:
v = getattr(self, field.name)
if v is not None:
self[field.name] = v
def __delitem__(self, *args, **kwargs):
raise Exception(f"You cannot use ``__delitem__`` on a {self.__class__.__name__} instance.")
def setdefault(self, *args, **kwargs):
raise Exception(f"You cannot use ``setdefault`` on a {self.__class__.__name__} instance.")
def pop(self, *args, **kwargs):
raise Exception(f"You cannot use ``pop`` on a {self.__class__.__name__} instance.")
def update(self, *args, **kwargs):
raise Exception(f"You cannot use ``update`` on a {self.__class__.__name__} instance.")
def __getitem__(self, k):
if isinstance(k, str):
inner_dict = {k: v for (k, v) in self.items()}
return inner_dict[k]
else:
return self.to_tuple()[k]
def to_tuple(self) -> Tuple[Any]:
"""
Convert self to a tuple containing all the attributes/keys that are not ``None``.
"""
return tuple(self[k] for k in self.keys())
|
PyTorch/Detection/SSD | SSD | LICENSE | Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright 2018 NVIDIA Corporation
Copyright 2018 The MLPerf Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. |
PyTorch/Segmentation/MaskRCNN/pytorch/configs | configs | e2e_faster_rcnn_X_101_32x8d_FPN_1x | MODEL:
META_ARCHITECTURE: "GeneralizedRCNN"
WEIGHT: "catalog://ImageNetPretrained/FAIR/20171220/X-101-32x8d"
BACKBONE:
CONV_BODY: "R-101-FPN"
OUT_CHANNELS: 256
RPN:
USE_FPN: True
ANCHOR_STRIDE: (4, 8, 16, 32, 64)
PRE_NMS_TOP_N_TRAIN: 2000
PRE_NMS_TOP_N_TEST: 1000
POST_NMS_TOP_N_TEST: 1000
FPN_POST_NMS_TOP_N_TEST: 1000
ROI_HEADS:
USE_FPN: True
ROI_BOX_HEAD:
POOLER_RESOLUTION: 7
POOLER_SCALES: (0.25, 0.125, 0.0625, 0.03125)
POOLER_SAMPLING_RATIO: 2
FEATURE_EXTRACTOR: "FPN2MLPFeatureExtractor"
PREDICTOR: "FPNPredictor"
RESNETS:
STRIDE_IN_1X1: False
NUM_GROUPS: 32
WIDTH_PER_GROUP: 8
DATASETS:
TRAIN: ("coco_2014_train", "coco_2014_valminusminival")
TEST: ("coco_2014_minival",)
DATALOADER:
SIZE_DIVISIBILITY: 32
SOLVER:
BASE_LR: 0.01
WEIGHT_DECAY: 0.0001
STEPS: (120000, 160000)
MAX_ITER: 180000
IMS_PER_BATCH: 8
|
Tools/PyTorch/TimeSeriesPredictionPlatform/models/tft_pyt/triton/deployment_toolkit/bermuda | bermuda | tensorrt | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import sys
from pathlib import Path
from typing import Dict, NamedTuple, Optional, Union
import numpy as np
# pytype: disable=import-error
try:
import pycuda.autoinit
import pycuda.driver as cuda
except Exception as e:
logging.getLogger(__name__).warning(f"Problems with importing pycuda package; {e}")
# pytype: enable=import-error
import tensorrt as trt # pytype: disable=import-error
from ..core import BaseLoader, BaseRunner, BaseRunnerSession, Format, Model, TensorSpec
from ..extensions import loaders, runners
LOGGER = logging.getLogger(__name__)
TRT_LOGGER = trt.Logger(trt.Logger.INFO)
# documentation:
# https://docs.nvidia.com/deeplearning/tensorrt/api/python_api/index.html
# https://docs.nvidia.com/deeplearning/tensorrt/developer-guide/index.html#python_samples_section
_NP_DTYPE2TRT_DTYPE = {
np.dtype("float32"): trt.DataType.FLOAT,
np.dtype("float16"): trt.DataType.HALF,
np.dtype("int8"): trt.DataType.INT8,
np.dtype("int32"): trt.DataType.INT32,
np.dtype("bool"): trt.DataType.BOOL,
}
class TensorRTLoader(BaseLoader):
def load(self, model_path: Union[str, Path], **_) -> Model:
model_path = Path(model_path)
LOGGER.debug(f"Loading TensorRT engine from {model_path}")
engine = self._load_engine(model_path)
if engine is None:
LOGGER.debug("Unable to load engine without plugins. Loading plugins.")
trt.init_libnvinfer_plugins(logger=TRT_LOGGER, namespace="")
LOGGER.debug(f"Loading TensorRT engine with plugins from {model_path}")
engine = self._load_engine(model_path)
if engine is None:
raise RuntimeError(f"Could not load ICudaEngine from {model_path}")
inputs = {}
outputs = {}
for binding_idx in range(engine.num_bindings):
name = engine.get_binding_name(binding_idx)
is_input = engine.binding_is_input(binding_idx)
dtype = np.dtype(trt.nptype(engine.get_binding_dtype(binding_idx))).name
shape = engine.get_binding_shape(binding_idx)
if is_input:
inputs[name] = TensorSpec(name, dtype, shape)
else:
outputs[name] = TensorSpec(name, dtype, shape)
return Model(engine, None, inputs, outputs)
def _load_engine(self, model_path: Path):
with model_path.open("rb") as fh, trt.Runtime(TRT_LOGGER) as runtime:
engine = runtime.deserialize_cuda_engine(fh.read())
return engine
class TRTBuffers(NamedTuple):
x_host: Optional[Dict[str, object]]
x_dev: Dict[str, object]
y_pred_host: Dict[str, object]
y_pred_dev: Dict[str, object]
class TensorRTRunner(BaseRunner):
def __init__(self):
pass
def init_inference(self, model: Model):
return TensorRTRunnerSession(model=model)
class TensorRTRunnerSession(BaseRunnerSession):
def __init__(self, model: Model):
super().__init__(model)
assert isinstance(model.handle, trt.ICudaEngine)
self._model = model
self._has_dynamic_shapes = None
self._context = None
self._engine: trt.ICudaEngine = self._model.handle
self._cuda_context = pycuda.autoinit.context
self._input_names = None
self._output_names = None
self._buffers = None
def __enter__(self):
self._context = self._engine.create_execution_context()
self._context.__enter__()
self._input_names = [
self._engine[idx] for idx in range(self._engine.num_bindings) if self._engine.binding_is_input(idx)
]
self._output_names = [
self._engine[idx] for idx in range(self._engine.num_bindings) if not self._engine.binding_is_input(idx)
]
# all_binding_shapes_specified is True for models without dynamic shapes
# so initially this variable is False for models with dynamic shapes
self._has_dynamic_shapes = not self._context.all_binding_shapes_specified
return self
def __exit__(self, exc_type, exc_value, traceback):
self._context.__exit__(exc_type, exc_value, traceback)
self._input_names = None
self._output_names = None
# TODO: are cuda buffers dealloc automatically?
self._buffers = None
def __call__(self, x):
buffers = self._prepare_buffers_if_needed(x)
bindings = self._update_bindings(buffers)
for name in self._input_names:
cuda.memcpy_htod(buffers.x_dev[name], buffers.x_host[name])
self._cuda_context.push()
self._context.execute_v2(bindings=bindings)
self._cuda_context.pop()
for name in self._output_names:
cuda.memcpy_dtoh(buffers.y_pred_host[name], buffers.y_pred_dev[name])
return buffers.y_pred_host
def _update_bindings(self, buffers: TRTBuffers):
bindings = [None] * self._engine.num_bindings
for name in buffers.y_pred_dev:
binding_idx: int = self._engine[name]
bindings[binding_idx] = buffers.y_pred_dev[name]
for name in buffers.x_dev:
binding_idx: int = self._engine[name]
bindings[binding_idx] = buffers.x_dev[name]
return bindings
def _set_dynamic_input_shapes(self, x_host):
def _is_shape_dynamic(input_shape):
return any([dim is None or dim == -1 for dim in input_shape])
for name in self._input_names:
bindings_idx = self._engine[name]
data_shape = x_host[name].shape # pytype: disable=attribute-error
if self._engine.is_shape_binding(bindings_idx):
input_shape = self._context.get_shape(bindings_idx)
if _is_shape_dynamic(input_shape):
self._context.set_shape_input(bindings_idx, data_shape)
else:
input_shape = self._engine.get_binding_shape(bindings_idx)
if _is_shape_dynamic(input_shape):
self._context.set_binding_shape(bindings_idx, data_shape)
assert self._context.all_binding_shapes_specified and self._context.all_shape_inputs_specified
def _prepare_buffers_if_needed(self, x_host: Dict[str, object]):
# pytype: disable=attribute-error
new_batch_size = list(x_host.values())[0].shape[0]
current_batch_size = list(self._buffers.y_pred_host.values())[0].shape[0] if self._buffers else 0
# pytype: enable=attribute-error
if self._has_dynamic_shapes or new_batch_size != current_batch_size:
# TODO: are CUDA buffers dealloc automatically?
self._set_dynamic_input_shapes(x_host)
y_pred_host = {}
for name in self._output_names:
shape = self._context.get_binding_shape(self._engine[name])
binding_idx: int = self._engine[name]
dtype_from_trt_binding = np.dtype(trt.nptype(self._engine.get_binding_dtype(binding_idx)))
dtype_from_model_spec = np.dtype(self._model.outputs[name].dtype)
assert dtype_from_model_spec == dtype_from_trt_binding
y_pred_host[name] = np.zeros(shape, dtype=dtype_from_model_spec)
y_pred_dev = {name: cuda.mem_alloc(data.nbytes) for name, data in y_pred_host.items()}
# cast host input into binding dtype
def _cast_input(name, data):
binding_idx: int = self._engine[name]
np_dtype = trt.nptype(self._engine.get_binding_dtype(binding_idx))
return data.astype(np_dtype)
x_host = {name: _cast_input(name, host_input) for name, host_input in x_host.items()}
x_dev = {
name: cuda.mem_alloc(host_input.nbytes)
for name, host_input in x_host.items()
if name in self._input_names # pytype: disable=attribute-error
}
self._buffers = TRTBuffers(None, x_dev, y_pred_host, y_pred_dev)
return self._buffers._replace(x_host=x_host)
if "pycuda.driver" in sys.modules:
loaders.register_extension(Format.TRT.value, TensorRTLoader)
runners.register_extension(Format.TRT.value, TensorRTRunner)
else:
LOGGER.warning("Do not register TensorRT extension due problems with importing pycuda.driver package.")
|
TensorFlow2/Segmentation/Contrib/UNet3P/callbacks | callbacks | timing_callback | import sys
from timeit import default_timer as timer
import tensorflow as tf
class TimingCallback(tf.keras.callbacks.Callback):
"""
Custom callback to note training time, latency and throughput
"""
def __init__(self, ):
super(TimingCallback, self).__init__()
self.train_start_time = None
self.train_end_time = None
self.batch_time = []
self.batch_start_time = None
def on_train_begin(self, logs: dict):
tf.print("Training starting time noted.", output_stream=sys.stdout)
self.train_start_time = timer()
def on_train_end(self, logs: dict):
tf.print("Training ending time noted.", output_stream=sys.stdout)
self.train_end_time = timer()
def on_train_batch_begin(self, batch: int, logs: dict):
self.batch_start_time = timer()
def on_train_batch_end(self, batch: int, logs: dict):
self.batch_time.append(timer() - self.batch_start_time)
|
PyTorch/Translation/Transformer | Transformer | CONTRIBUTING | # Contributing to FAIR Sequence-to-Sequence Toolkit (PyTorch)
We want to make contributing to this project as easy and transparent as
possible.
## Pull Requests
We actively welcome your pull requests.
1. Fork the repo and create your branch from `master`.
2. If you've added code that should be tested, add tests.
3. If you've changed APIs, update the documentation.
4. Ensure the test suite passes.
5. Make sure your code lints.
6. If you haven't already, complete the Contributor License Agreement ("CLA").
## Contributor License Agreement ("CLA")
In order to accept your pull request, we need you to submit a CLA. You only need
to do this once to work on any of Facebook's open source projects.
Complete your CLA here: <https://code.facebook.com/cla>
## Issues
We use GitHub issues to track public bugs. Please ensure your description is
clear and has sufficient instructions to be able to reproduce the issue.
## Coding Style
We try to follow the PEP style guidelines and encourage you to as well.
## License
By contributing to FAIR Sequence-to-Sequence Toolkit, you agree that your contributions will be licensed
under the LICENSE file in the root directory of this source tree. |
CUDA-Optimized/FastSpeech/fastspeech/hparams | hparams | base | # Path
dataset_path: "/workspace/fastspeech/LJSpeech-1.1"
tacotron2_path: "/workspace/fastspeech/tacotron2_statedict.pt"
waveglow_path: "/workspace/fastspeech/nvidia_waveglow256pyt_fp16"
mels_path: "/workspace/fastspeech/mels_ljspeech1.1"
aligns_path: "/workspace/fastspeech/aligns_ljspeech1.1"
log_path: "/workspace/fastspeech/logs"
checkpoint_path: "/workspace/fastspeech/checkpoints"
# Audio
sr: 22050
n_fft: 1024
win_len: 1024
hop_len: 256
num_mels: 80
mel_fmin: 0.0
mel_fmax: 8000.0
# Text
text_cleaners: ['english_cleaners']
# Model
d_model: 384
phoneme_side_n_layer: 6
phoneme_side_head: 2
phoneme_side_conv1d_filter_size: 1536
max_seq_len: 2048 # 23s
phoneme_side_output_size: 384
mel_side_n_layer: 6
mel_side_head: 2
mel_side_conv1d_filter_size: 1536
mel_side_output_size: 384
fft_conv1d_kernel: 3
fft_conv1d_padding: 1
duration_predictor_filter_size: 256
duration_predictor_kernel_size: 3
dropout: 0.1
fused_layernorm: False |
PyTorch/LanguageModeling/BERT/data | data | GLUEDownloader | # Copyright (c) 2019-2021 NVIDIA CORPORATION. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import wget
from pathlib import Path
def mkdir(path):
Path(path).mkdir(parents=True, exist_ok=True)
class GLUEDownloader:
def __init__(self, save_path):
self.save_path = save_path + '/glue'
def download(self, task_name):
mkdir(self.save_path)
if task_name in {'mrpc', 'mnli'}:
task_name = task_name.upper()
elif task_name == 'cola':
task_name = 'CoLA'
else: # SST-2
assert task_name == 'sst-2'
task_name = 'SST'
wget.download(
'https://gist.githubusercontent.com/roclark/9ab385e980c5bdb9e15ecad5963848e0/raw/c9dcc44a6e1336d2411e3333c25bcfd507c39c81/download_glue_data.py',
out=self.save_path,
)
sys.path.append(self.save_path)
import download_glue_data
download_glue_data.main(
['--data_dir', self.save_path, '--tasks', task_name])
sys.path.pop()
|
PyTorch/SpeechSynthesis/Tacotron2/trtis_cpp/src/trt/denoiser | denoiser | denoiserLoader | /*
* Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of the NVIDIA CORPORATION nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "denoiserLoader.h"
#include "denoiserBuilder.h"
#include "engineCache.h"
#include "jsonModelImporter.h"
#include "utils.h"
#include "NvInfer.h"
#include <stdexcept>
using namespace nvinfer1;
namespace tts
{
/******************************************************************************
* PUBLIC STATIC METHODS ******************************************************
*****************************************************************************/
std::shared_ptr<DenoiserInstance> DenoiserLoader::load(
EngineCache& cache, IBuilder& builder, const std::string& filename, const bool fp16, const int batchSize)
{
TRTPtr<ICudaEngine> engine;
if (Utils::hasExtension(filename, ".json")) {
DenoiserBuilder denoiserBuilder(2 << 13);
JSONModelImporter importer(filename);
engine = denoiserBuilder.build(importer, builder, batchSize, fp16);
// save generated engine
const std::string engFilename(filename + ".eng");
cache.save(*engine, engFilename);
}
else if (Utils::hasExtension(filename, ".eng"))
{
engine = cache.load(filename);
if (engine->getMaxBatchSize() < batchSize)
{
throw std::runtime_error(
"Engine " + filename
+ " does not support "
" the requested batch size: "
+ std::to_string(engine->getMaxBatchSize()) + " / "
+ std::to_string(batchSize)
+ "."
"Rebuild the engine with the larger batch size.");
}
}
else
{
throw std::runtime_error("Unknown model file type: " + filename);
}
return std::make_shared<DenoiserInstance>(std::move(engine));
}
} // namespace tts
|
TensorFlow/LanguageModeling/BERT/data | data | WikiDownloader | # Copyright (c) 2019 NVIDIA CORPORATION. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import bz2
import os
import urllib.request
import sys
import subprocess
class WikiDownloader:
def __init__(self, language, save_path):
self.save_path = save_path + '/wikicorpus_' + language
if not os.path.exists(self.save_path):
os.makedirs(self.save_path)
self.language = language
self.download_urls = {
'en' : 'https://dumps.wikimedia.org/enwiki/latest/enwiki-latest-pages-articles.xml.bz2',
'zh' : 'https://dumps.wikimedia.org/zhwiki/latest/zhwiki-latest-pages-articles.xml.bz2'
}
self.output_files = {
'en' : 'wikicorpus_en.xml.bz2',
'zh' : 'wikicorpus_zh.xml.bz2'
}
def download(self):
if self.language in self.download_urls:
url = self.download_urls[self.language]
filename = self.output_files[self.language]
print('Downloading:', url)
if os.path.isfile(self.save_path + '/' + filename):
print('** Download file already exists, skipping download')
else:
cmd = ['wget', url, '--output-document={}'.format(self.save_path + '/' + filename)]
print('Running:', cmd)
status = subprocess.run(cmd)
if status.returncode != 0:
raise RuntimeError('Wiki download not successful')
# Always unzipping since this is relatively fast and will overwrite
print('Unzipping:', self.output_files[self.language])
subprocess.run('bzip2 -dk ' + self.save_path + '/' + filename, shell=True, check=True)
else:
assert False, 'WikiDownloader not implemented for this language yet.'
|
PyTorch/Segmentation/MaskRCNN/pytorch/maskrcnn_benchmark/modeling/roi_heads/mask_head | mask_head | roi_mask_predictors | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
from torch import nn
from torch.nn import functional as F
from maskrcnn_benchmark.layers import Conv2d
from maskrcnn_benchmark.layers import ConvTranspose2d
class MaskRCNNC4Predictor(nn.Module):
def __init__(self, cfg):
super(MaskRCNNC4Predictor, self).__init__()
num_classes = cfg.MODEL.ROI_BOX_HEAD.NUM_CLASSES
dim_reduced = cfg.MODEL.ROI_MASK_HEAD.CONV_LAYERS[-1]
if cfg.MODEL.ROI_HEADS.USE_FPN:
num_inputs = dim_reduced
else:
stage_index = 4
stage2_relative_factor = 2 ** (stage_index - 1)
res2_out_channels = cfg.MODEL.RESNETS.RES2_OUT_CHANNELS
num_inputs = res2_out_channels * stage2_relative_factor
self.conv5_mask = ConvTranspose2d(num_inputs, dim_reduced, 2, 2, 0)
self.mask_fcn_logits = Conv2d(dim_reduced, num_classes, 1, 1, 0)
for name, param in self.named_parameters():
if "bias" in name:
nn.init.constant_(param, 0)
elif "weight" in name:
# Caffe2 implementation uses MSRAFill, which in fact
# corresponds to kaiming_normal_ in PyTorch
nn.init.kaiming_normal_(param, mode="fan_out", nonlinearity="relu")
def forward(self, x):
x = F.relu(self.conv5_mask(x))
return self.mask_fcn_logits(x)
_ROI_MASK_PREDICTOR = {"MaskRCNNC4Predictor": MaskRCNNC4Predictor}
def make_roi_mask_predictor(cfg):
func = _ROI_MASK_PREDICTOR[cfg.MODEL.ROI_MASK_HEAD.PREDICTOR]
return func(cfg)
|
TensorFlow/Segmentation/UNet_Medical/examples | examples | unet_TRAIN_BENCHMARK_TF-AMP_1GPU | # Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This script launches U-Net run in FP16 on 1 GPU for training benchmarking. Usage:
# bash unet_TRAIN_BENCHMARK_TF-AMP_1GPU.sh <path to dataset> <path to results directory> <batch size>
horovodrun -np 1 python main.py --data_dir $1 --model_dir $2 --batch_size $3 --exec_mode train --augment --benchmark --warmup_steps 200 --max_steps 1000 --xla --amp |
TensorFlow/Detection/SSD/models/research/slim/datasets | datasets | download_and_convert_flowers | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""Downloads and converts Flowers data to TFRecords of TF-Example protos.
This module downloads the Flowers data, uncompresses it, reads the files
that make up the Flowers data and creates two TFRecord datasets: one for train
and one for test. Each TFRecord dataset is comprised of a set of TF-Example
protocol buffers, each of which contain a single image and label.
The script should take about a minute to run.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import os
import random
import sys
import tensorflow as tf
from datasets import dataset_utils
# The URL where the Flowers data can be downloaded.
_DATA_URL = 'http://download.tensorflow.org/example_images/flower_photos.tgz'
# The number of images in the validation set.
_NUM_VALIDATION = 350
# Seed for repeatability.
_RANDOM_SEED = 0
# The number of shards per dataset split.
_NUM_SHARDS = 5
class ImageReader(object):
"""Helper class that provides TensorFlow image coding utilities."""
def __init__(self):
# Initializes function that decodes RGB JPEG data.
self._decode_jpeg_data = tf.placeholder(dtype=tf.string)
self._decode_jpeg = tf.image.decode_jpeg(self._decode_jpeg_data, channels=3)
def read_image_dims(self, sess, image_data):
image = self.decode_jpeg(sess, image_data)
return image.shape[0], image.shape[1]
def decode_jpeg(self, sess, image_data):
image = sess.run(self._decode_jpeg,
feed_dict={self._decode_jpeg_data: image_data})
assert len(image.shape) == 3
assert image.shape[2] == 3
return image
def _get_filenames_and_classes(dataset_dir):
"""Returns a list of filenames and inferred class names.
Args:
dataset_dir: A directory containing a set of subdirectories representing
class names. Each subdirectory should contain PNG or JPG encoded images.
Returns:
A list of image file paths, relative to `dataset_dir` and the list of
subdirectories, representing class names.
"""
flower_root = os.path.join(dataset_dir, 'flower_photos')
directories = []
class_names = []
for filename in os.listdir(flower_root):
path = os.path.join(flower_root, filename)
if os.path.isdir(path):
directories.append(path)
class_names.append(filename)
photo_filenames = []
for directory in directories:
for filename in os.listdir(directory):
path = os.path.join(directory, filename)
photo_filenames.append(path)
return photo_filenames, sorted(class_names)
def _get_dataset_filename(dataset_dir, split_name, shard_id):
output_filename = 'flowers_%s_%05d-of-%05d.tfrecord' % (
split_name, shard_id, _NUM_SHARDS)
return os.path.join(dataset_dir, output_filename)
def _convert_dataset(split_name, filenames, class_names_to_ids, dataset_dir):
"""Converts the given filenames to a TFRecord dataset.
Args:
split_name: The name of the dataset, either 'train' or 'validation'.
filenames: A list of absolute paths to png or jpg images.
class_names_to_ids: A dictionary from class names (strings) to ids
(integers).
dataset_dir: The directory where the converted datasets are stored.
"""
assert split_name in ['train', 'validation']
num_per_shard = int(math.ceil(len(filenames) / float(_NUM_SHARDS)))
with tf.Graph().as_default():
image_reader = ImageReader()
with tf.Session('') as sess:
for shard_id in range(_NUM_SHARDS):
output_filename = _get_dataset_filename(
dataset_dir, split_name, shard_id)
with tf.python_io.TFRecordWriter(output_filename) as tfrecord_writer:
start_ndx = shard_id * num_per_shard
end_ndx = min((shard_id+1) * num_per_shard, len(filenames))
for i in range(start_ndx, end_ndx):
sys.stdout.write('\r>> Converting image %d/%d shard %d' % (
i+1, len(filenames), shard_id))
sys.stdout.flush()
# Read the filename:
image_data = tf.gfile.FastGFile(filenames[i], 'rb').read()
height, width = image_reader.read_image_dims(sess, image_data)
class_name = os.path.basename(os.path.dirname(filenames[i]))
class_id = class_names_to_ids[class_name]
example = dataset_utils.image_to_tfexample(
image_data, b'jpg', height, width, class_id)
tfrecord_writer.write(example.SerializeToString())
sys.stdout.write('\n')
sys.stdout.flush()
def _clean_up_temporary_files(dataset_dir):
"""Removes temporary files used to create the dataset.
Args:
dataset_dir: The directory where the temporary files are stored.
"""
filename = _DATA_URL.split('/')[-1]
filepath = os.path.join(dataset_dir, filename)
tf.gfile.Remove(filepath)
tmp_dir = os.path.join(dataset_dir, 'flower_photos')
tf.gfile.DeleteRecursively(tmp_dir)
def _dataset_exists(dataset_dir):
for split_name in ['train', 'validation']:
for shard_id in range(_NUM_SHARDS):
output_filename = _get_dataset_filename(
dataset_dir, split_name, shard_id)
if not tf.gfile.Exists(output_filename):
return False
return True
def run(dataset_dir):
"""Runs the download and conversion operation.
Args:
dataset_dir: The dataset directory where the dataset is stored.
"""
if not tf.gfile.Exists(dataset_dir):
tf.gfile.MakeDirs(dataset_dir)
if _dataset_exists(dataset_dir):
print('Dataset files already exist. Exiting without re-creating them.')
return
dataset_utils.download_and_uncompress_tarball(_DATA_URL, dataset_dir)
photo_filenames, class_names = _get_filenames_and_classes(dataset_dir)
class_names_to_ids = dict(zip(class_names, range(len(class_names))))
# Divide into train and test:
random.seed(_RANDOM_SEED)
random.shuffle(photo_filenames)
training_filenames = photo_filenames[_NUM_VALIDATION:]
validation_filenames = photo_filenames[:_NUM_VALIDATION]
# First, convert the training and validation sets.
_convert_dataset('train', training_filenames, class_names_to_ids,
dataset_dir)
_convert_dataset('validation', validation_filenames, class_names_to_ids,
dataset_dir)
# Finally, write the labels file:
labels_to_class_names = dict(zip(range(len(class_names)), class_names))
dataset_utils.write_label_file(labels_to_class_names, dataset_dir)
_clean_up_temporary_files(dataset_dir)
print('\nFinished converting the Flowers dataset!')
|
PyTorch/SpeechSynthesis/Tacotron2/trtis_cpp/src/test | test | Taco2ProjectionLayerPlugin_test | /*
* Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of the NVIDIA CORPORATION nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "UnitTest.hpp"
#include "binding.h"
#include "cudaMemory.h"
#include "cudaUtils.h"
#include "logging.h"
#include "taco2ProjectionLayerPlugin.h"
#include "trtUtils.h"
#include "NvInfer.h"
#include <random>
#include <vector>
using namespace nvinfer1;
using namespace nvinfer1::plugin;
using namespace tts;
/******************************************************************************
* HELPER FUNCTIONS ***********************************************************
*****************************************************************************/
namespace
{
template <typename RNG>
std::vector<float> genVec(const size_t size, RNG& rng)
{
std::uniform_real_distribution<float> dist(-1.0, 1.0);
std::vector<float> vec(size);
for (size_t i = 0; i < size; ++i) {
vec[i] = dist(rng);
}
return vec;
}
} // namespace
/******************************************************************************
* UNIT TESTS *****************************************************************
*****************************************************************************/
TEST(CPUCompareTest)
{
std::mt19937 rng(0);
const int hiddenInputLength = 1024;
const int contextInputLength = 512;
const int numChannelDimensions = 80;
const int numGateDimensions = 1;
const int inputLength = hiddenInputLength + contextInputLength;
const int numDimensions = numChannelDimensions + numGateDimensions;
// weights
std::vector<float> weightChannel
= genVec(inputLength * numChannelDimensions, rng);
std::vector<float> weightGate = genVec(inputLength * numGateDimensions, rng);
std::vector<float> biasChannel = genVec(numChannelDimensions, rng);
std::vector<float> biasGate = genVec(numGateDimensions, rng);
Taco2ProjectionLayerPlugin layer(
TRTUtils::toWeights(weightChannel),
TRTUtils::toWeights(weightGate),
TRTUtils::toWeights(biasChannel),
TRTUtils::toWeights(biasGate),
hiddenInputLength,
contextInputLength,
numChannelDimensions,
numGateDimensions);
std::vector<float> inputHidden = genVec(hiddenInputLength, rng);
std::vector<float> inputContext = genVec(contextInputLength, rng);
CudaMemory<float> inputHiddenDevice(inputHidden);
CudaMemory<float> inputContextDevice(inputContext);
std::vector<Dims> inputDims{Dims3(1, 1, hiddenInputLength),
Dims3(1, 1, contextInputLength)};
const std::vector<Dims> outputDims{Dims3(1, 1, numDimensions)};
const std::vector<DataType> dataTypes(2, DataType::kFLOAT);
const std::vector<DynamicPluginTensorDesc> inDynDesc{
{{Dims3(-1, 1, hiddenInputLength),
DataType::kFLOAT,
TensorFormat::kLINEAR,
1.0f},
Dims3(1, 1, hiddenInputLength),
Dims3(1, 1, hiddenInputLength)},
{{Dims3(-1, 1, contextInputLength),
DataType::kFLOAT,
TensorFormat::kLINEAR,
1.0f},
Dims3(1, 1, contextInputLength),
Dims3(1, 1, contextInputLength)}};
const std::vector<DynamicPluginTensorDesc> outDynDesc{
{{Dims3(-1, 1, numDimensions),
DataType::kFLOAT,
TensorFormat::kLINEAR,
1.0f},
Dims3(1, 1, numDimensions),
Dims3(1, 1, numDimensions)}};
layer.configurePlugin(
inDynDesc.data(), inDynDesc.size(), outDynDesc.data(), outDynDesc.size());
layer.initialize();
std::vector<const float*> inputs{inputHiddenDevice.data(),
inputContextDevice.data()};
CudaMemory<float> outputDevice(numDimensions);
std::vector<float*> outputs{outputDevice.data()};
const std::vector<PluginTensorDesc> inDesc{
{Dims3(1, 1, hiddenInputLength),
DataType::kFLOAT,
TensorFormat::kLINEAR,
1.0f},
{Dims3(1, 1, contextInputLength),
DataType::kFLOAT,
TensorFormat::kLINEAR,
1.0f},
};
const std::vector<PluginTensorDesc> outDesc{{Dims3(1, 1, numDimensions),
DataType::kFLOAT,
TensorFormat::kLINEAR,
1.0f}};
CudaMemory<uint8_t> workspace(layer.getWorkspaceSize(
inDesc.data(),
static_cast<int>(inDesc.size()),
outDesc.data(),
static_cast<int>(outDesc.size())));
layer.enqueue(
inDesc.data(),
outDesc.data(),
reinterpret_cast<const void* const*>(inputs.data()),
reinterpret_cast<void**>(outputs.data()),
workspace.data(),
0);
CudaUtils::sync(0);
// perform operations on cpu
std::vector<float> expOutput(numDimensions);
for (int i = 0; i < numChannelDimensions; ++i) {
float v = 0.0f;
for (int j = 0; j < hiddenInputLength; ++j) {
v += inputHidden[j] * weightChannel[i * inputLength + j];
}
for (int j = 0; j < contextInputLength; ++j) {
v += inputContext[j]
* weightChannel[i * inputLength + j + hiddenInputLength];
}
expOutput[i] = v + biasChannel[i];
}
for (int i = 0; i < numGateDimensions; ++i) {
float v = 0.0f;
for (int j = 0; j < hiddenInputLength; ++j) {
v += inputHidden[j] * weightGate[i * inputLength + j];
}
for (int j = 0; j < contextInputLength; ++j) {
v += inputContext[j]
* weightGate[i * inputLength + j + hiddenInputLength];
}
expOutput[i + numChannelDimensions] = v + biasGate[i];
}
// match outputs
const std::vector<float> actOutput = outputDevice.toHost();
ASSERT_EQ(expOutput.size(), actOutput.size());
for (size_t i = 0; i < expOutput.size(); ++i) {
EXPECT_NEAR(expOutput[i], actOutput[i], 1e-4) << "i = " << i;
}
}
|
Tools/DGLPyTorch/SyntheticGraphGeneration/docker_scripts | docker_scripts | run_docker_notebook | if [ ! "$(ls | grep -c docker_scripts)" -eq 1 ]; then
echo "Run this script from root directory. Usage: bash ./docker_scripts/run_docker_notebook.sh"
exit 1
fi
IMG="${IMAGE:=graph_gen}"
CMD='cd /workspace && echo -e "\nOPEN http://<your_ip>:9916/ and copy token\n\n" && jupyter notebook --ip=0.0.0.0 --port=9916'
nvidia-docker run --rm -it \
--ipc=host \
--net=host \
-v "$(pwd)":/workspace \
${IMG} \
bash -c "${CMD}"
# OPEN http://<your_ip>:9916/
|
TensorFlow2/Segmentation/UNet_Medical/examples | examples | unet_INFER_BENCHMARK_TF-AMP | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This script launches U-Net run in FP16 on 1 GPU for inference benchmarking. Usage:
# bash unet_INFER_BENCHMARK_TF-AMP.sh <path to dataset> <path to results directory> <batch size>
horovodrun -np 1 python main.py --data_dir $1 --model_dir $2 --batch_size $3 --exec_mode predict --benchmark --warmup_steps 200 --max_steps 600 --xla --amp --fold 0 |
PyTorch/LanguageModeling/BERT/lamb_amp_opt | lamb_amp_opt | main | import copy
import apex
import torch
import numpy as np
from sklearn.preprocessing import StandardScaler
from sklearn.datasets import make_classification, make_regression
from fused_lamb import FusedLAMBAMP
N_SAMPLES = 10000
N_FEATURES = 32
BATCH_SIZE = 100
def print_param_diff(optimizer):
with torch.no_grad():
for i, (group, master_group) in enumerate(zip(optimizer.param_groups, optimizer.param_groups_fp32)):
for ii, (p, master_p) in enumerate(zip(group['params'], master_group['params'])):
diff = (p - master_p.half()).float().abs().mean().item()
print(f" {i}th group, {ii}th param diff: {diff}")
class TestMod(torch.nn.Module):
def __init__(self):
super().__init__()
self.layers = torch.nn.Sequential(
torch.nn.Linear(N_FEATURES, N_FEATURES // 2),
torch.nn.ReLU(),
torch.nn.Linear(N_FEATURES // 2, 2),
)
def forward(self, inputs) :
return self.layers(inputs)
def main() :
loss = torch.nn.CrossEntropyLoss(ignore_index=-1)
model = TestMod()
model.cuda()
model.half()
model.train()
model = torch.jit.script(model)
param_optimizer = list(model.named_parameters())
no_decay = ['bias', 'gamma', 'beta', 'LayerNorm']
optimizer_grouped_parameters = [
{'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)], 'weight_decay': 0.01},
{'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}]
grad_scaler = torch.cuda.amp.GradScaler(enabled=True)
optimizer = FusedLAMBAMP(optimizer_grouped_parameters)
x, y = make_classification(n_samples=N_SAMPLES, n_features=N_FEATURES, random_state=0)
x = StandardScaler().fit_transform(x)
inputs = torch.from_numpy(x).cuda().half()
targets = torch.from_numpy(y).cuda().long()
del x, y
dataset = torch.utils.data.TensorDataset(inputs, targets)
loader = torch.utils.data.DataLoader(dataset, batch_size=BATCH_SIZE)
for epoch in range(20):
loss_values = []
for i, (x, y) in enumerate(loader):
with torch.cuda.amp.autocast():
out1 = model(x)
# Might be better to run `CrossEntropyLoss` in
# `with torch.cuda.amp.autocast(enabled=False)` context.
out2 = loss(out1, y)
grad_scaler.scale(out2).backward()
grad_scaler.step(optimizer)
grad_scaler.update()
optimizer.zero_grad(set_to_none=True)
loss_values.append(out2.item())
print(f"Epoch: {epoch}, Loss avg: {np.mean(loss_values)}")
print_param_diff(optimizer)
print("state dict check")
optimizer.load_state_dict(optimizer.state_dict())
print_param_diff(optimizer)
if __name__ == '__main__':
main()
|
PyTorch/SpeechRecognition/QuartzNet/platform | platform | DGX2_QuartzNet_AMP_8GPU | #!/bin/bash
set -a
: ${NUM_GPUS:=8}
: ${GPU_BATCH_SIZE:=36}
: ${GRAD_ACCUMULATION:=4}
: ${AMP=:true}
bash scripts/train.sh "$@"
|
PyTorch/Segmentation/nnUNet | nnUNet | evaluate | # Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import glob
import os
from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser
import nibabel
import numpy as np
from tqdm import tqdm
parser = ArgumentParser(formatter_class=ArgumentDefaultsHelpFormatter)
parser.add_argument("--preds", type=str, required=True, help="Path to predictions")
parser.add_argument("--lbls", type=str, required=True, help="Path to labels")
def get_stats(pred, targ, class_idx):
tp = np.logical_and(pred == class_idx, targ == class_idx).sum()
fn = np.logical_and(pred != class_idx, targ == class_idx).sum()
fp = np.logical_and(pred == class_idx, targ != class_idx).sum()
return tp, fn, fp
if __name__ == "__main__":
args = parser.parse_args()
y_pred = sorted(glob.glob(os.path.join(args.preds, "*.npy")))
y_true = [os.path.join(args.lbls, os.path.basename(pred).replace("npy", "nii.gz")) for pred in y_pred]
assert len(y_pred) > 0
n_class = np.load(y_pred[0]).shape[0] - 1
dice = [[] for _ in range(n_class)]
for pr, lb in tqdm(zip(y_pred, y_true), total=len(y_pred)):
prd = np.transpose(np.argmax(np.load(pr), axis=0), (2, 1, 0))
lbl = nibabel.load(lb).get_fdata().astype(np.uint8)
for i in range(1, n_class + 1):
counts = np.count_nonzero(lbl == i) + np.count_nonzero(prd == i)
if counts == 0: # no foreground class
dice[i - 1].append(1)
else:
tp, fn, fp = get_stats(prd, lbl, i)
denum = 2 * tp + fp + fn
dice[i - 1].append(2 * tp / denum if denum != 0 else 0)
dice_score = np.mean(np.array(dice), axis=-1)
dice_cls = " ".join([f"L{i+1} {round(dice_score[i], 4)}" for i, dice in enumerate(dice_score)])
print(f"mean dice: {round(np.mean(dice_score), 4)} - {dice_cls}")
|
PyTorch/Forecasting/TFT | TFT | README | # Temporal Fusion Transformer For PyTorch
This repository provides a script and recipe to train the Temporal Fusion Transformer model to achieve state-of-the-art accuracy. The content of this repository is tested and maintained by NVIDIA.
## Table Of Contents
- [Model overview](#model-overview)
* [Model architecture](#model-architecture)
* [Default configuration](#default-configuration)
* [Feature support matrix](#feature-support-matrix)
* [Features](#features)
* [Mixed precision training](#mixed-precision-training)
* [Enabling mixed precision](#enabling-mixed-precision)
* [Enabling TF32](#enabling-tf32)
* [Glossary](#glossary)
- [Setup](#setup)
* [Requirements](#requirements)
- [Quick Start Guide](#quick-start-guide)
- [Advanced](#advanced)
* [Scripts and sample code](#scripts-and-sample-code)
* [Command-line options](#command-line-options)
* [Getting the data](#getting-the-data)
* [Dataset guidelines](#dataset-guidelines)
* [Multi-dataset](#multi-dataset)
* [Training process](#training-process)
* [Inference process](#inference-process)
* [Triton deployment](#triton-deployment)
- [Performance](#performance)
* [Benchmarking](#benchmarking)
* [Training performance benchmark](#training-performance-benchmark)
* [Inference performance benchmark](#inference-performance-benchmark)
* [Results](#results)
* [Training accuracy results](#training-accuracy-results)
* [Training accuracy: NVIDIA DGX A100 (8x A100 80GB)](#training-accuracy-nvidia-dgx-a100-8x-a100-80gb)
* [Training accuracy: NVIDIA DGX-1 (8x V100 16GB)](#training-accuracy-nvidia-dgx-1-8x-v100-16gb)
* [Training stability test](#training-stability-test)
* [Training performance results](#training-performance-results)
* [Training performance: NVIDIA DGX A100 (8x A100 80GB)](#training-performance-nvidia-dgx-a100-8x-a100-80gb)
* [Training performance: NVIDIA DGX-1 (8x V100 16GB)](#training-performance-nvidia-dgx-1-8x-v100-16gb)
* [Inference performance results](#inference-performance-results)
* [Inference Performance: NVIDIA DGX A100](#inference-performance-nvidia-dgx-a100)
* [Inference Performance: NVIDIA DGX-1 V100](#inference-performance-nvidia-dgx-1-v100)
- [Release notes](#release-notes)
* [Changelog](#changelog)
* [Known issues](#known-issues)
## Model overview
The Temporal Fusion Transformer [TFT](https://arxiv.org/abs/1912.09363) model is a state-of-the-art architecture for interpretable, multi-horizon time-series prediction. The model was first developed and [implemented by Google](https://github.com/google-research/google-research/tree/master/tft) with the collaboration with the University of Oxford.
This implementation differs from the reference implementation by addressing the issue of missing data, which is common in production datasets, by either masking their values in attention matrices or embedding them as a special value in the latent space.
This model enables the prediction of confidence intervals for future values of time series for multiple future timesteps.
This model is trained with mixed precision using Tensor Cores on Volta, Turing, and the NVIDIA Ampere GPU architectures. Therefore, researchers can get results 1.45x faster than training without Tensor Cores while experiencing the benefits of mixed precision training. This model is tested against each NGC monthly container release to ensure consistent accuracy and performance over time.
### Model architecture
The TFT model is a hybrid architecture joining LSTM encoding of time series and interpretability of transformer attention layers. Prediction is based on three types of variables: static (constant for a given time series), known (known in advance for whole history and future), observed (known only for historical data). All these variables come in two flavors: categorical, and continuous. In addition to historical data, we feed the model with historical values of time series. All variables are embedded in high-dimensional space by learning an embedding vector. Categorical variables embeddings are learned in the classical sense of embedding discrete values. The model learns a single vector for each continuous variable, which is then scaled by this variable’s value for further processing. The next step is to filter variables through the Variable Selection Network (VSN), which assigns weights to the inputs in accordance with their relevance to the prediction. Static variables are used as a context for variable selection of other variables and as an initial state of LSTM encoders.
After encoding, variables are passed to multi-head attention layers (decoder), which produce the final prediction. Whole architecture is interwoven with residual connections with gating mechanisms that allow the architecture to adapt to various problems by skipping some parts of it.
For the sake of explainability, heads of self-attention layers share value matrices. This allows interpreting self-attention as an ensemble of models predicting different temporal patterns over the same feature set. The other feature that helps us understand the model is VSN activations, which tells us how relevant the given feature is to the prediction.

*image source: https://arxiv.org/abs/1912.09363*
### Default configuration
The specific configuration of the TFT model depends on the dataset used. Not only is the volume of the model subject to change but so are the data sampling and preprocessing strategies. During preprocessing, data is normalized per feature. For a part of the datasets, we apply scaling per-time-series, which takes into account shifts in distribution between entities (i.e., a factory consumes more electricity than an average house). The model is trained with the quantile loss: <img src="https://render.githubusercontent.com/render/math?math=\Large\sum_{i=1}^N\sum_{q\in\mathcal{Q}}\sum_{t=1}^{t_{max}}\frac{QL(y_it,\hat{y}_i(q,t),q)}{Nt_{max}}">
For quantiles in [0.1, 0.5, 0.9]. The default configurations are tuned for distributed training on DGX-1-32G with mixed precision. We use dynamic loss scaling. Specific values are provided in the table below.
| Dataset | Training samples | Validation samples | Test samples | History length | Forecast horizon | Dropout | Hidden size | #Heads | BS | LR | Gradient clipping |
| --- | --- | --- | --- | --- | --- | --- | --- | --- | --- | --- | --- |
| Electricity | 450k | 50k | 53.5k | 168 | 24 | 0.1 | 128 | 4 | 8x1024 | 1e-3 | 0.0 |
| Traffic | 450k | 50k | 139.6k | 168 | 24 | 0.3 | 128 | 4 | 8x1024 | 1e-3 | 0.0
### Feature support matrix
The following features are supported by this model:
| Feature | Yes column
|----------------------------|--------------------------
|Distributed data parallel | Yes
|PyTorch AMP | Yes
#### Features
[Automatic Mixed Precision](https://pytorch.org/docs/stable/amp.html)
provides an easy way to leverage Tensor Cores’ performance. It allows the execution of parts of a network in lower precision. Refer to [Mixed precision training](#mixed-precision-training) for more information.
[PyTorch
DistributedDataParallel](https://pytorch.org/docs/stable/nn.html#torch.nn.parallel.DistributedDataParallel) - a module
wrapper that enables easy multiprocess distributed data-parallel
training.
### Mixed precision training
Mixed precision is the combined use of different numerical precisions in a
computational method.
[Mixed precision](https://arxiv.org/abs/1710.03740) training offers significant
computational speedup by performing operations in half-precision format while
storing minimal information in single-precision to retain as much information
as possible in critical parts of the network. Since the introduction of [Tensor Cores](https://developer.nvidia.com/tensor-cores) in Volta, and following with
both the Turing and Ampere architectures, significant training speedups are
experienced by switching to
mixed precision -- up to 3x overall speedup on the most arithmetically intense
model architectures. Using mixed precision training previously required two
steps:
1. Porting the model to use the FP16 data type where appropriate.
2. Manually adding loss scaling to preserve small gradient values.
The ability to train deep learning networks with lower precision was introduced
in the Pascal architecture and first supported in [CUDA
8](https://devblogs.nvidia.com/parallelforall/tag/fp16/) in the NVIDIA Deep
Learning SDK.
For information about:
* How to train using mixed precision, refer to the [Mixed Precision
Training](https://arxiv.org/abs/1710.03740) paper and [Training With Mixed
Precision](https://docs.nvidia.com/deeplearning/sdk/mixed-precision-training/index.html)
documentation.
* Techniques used for mixed precision training, refer to the [Mixed-Precision
Training of Deep Neural
Networks](https://devblogs.nvidia.com/mixed-precision-training-deep-neural-networks/)
blog.
#### Enabling mixed precision
Mixed precision is enabled in PyTorch by using the Automatic Mixed Precision torch.cuda.amp module, which casts variables to half-precision upon retrieval while storing variables in single-precision format. Furthermore, to preserve small gradient magnitudes in backpropagation, a [loss scaling](https://docs.nvidia.com/deeplearning/sdk/mixed-precision-training/index.html#lossscaling) step must be included when applying gradients. In PyTorch, loss scaling can be applied automatically by the GradScaler class. All the necessary steps to implement AMP are verbosely described [here](https://pytorch.org/docs/stable/notes/amp_examples.html#amp-examples).
To enable mixed precision for TFT, simply add the `--use_amp` option to the training script.
#### Enabling TF32
TensorFloat-32 (TF32) is the new math mode in [NVIDIA A100](https://www.nvidia.com/en-us/data-center/a100/) GPUs for handling the matrix math, also called tensor operations. TF32 running on Tensor Cores in A100 GPUs can provide up to 10x speedups compared to single-precision floating-point math (FP32) on Volta GPUs.
TF32 Tensor Cores can speed up networks using FP32, typically with no loss of accuracy. It is more robust than FP16 for models which require high dynamic range for weights or activations.
For more information, refer to the [TensorFloat-32 in the A100 GPU Accelerates AI Training, HPC up to 20x](https://blogs.nvidia.com/blog/2020/05/14/tensorfloat-32-precision-format/) blog post.
TF32 is supported in the NVIDIA Ampere GPU architecture and is enabled by default.
### Glossary
**Multi horizon prediction**
Process of estimating values of a time series for multiple future time steps.
**Quantiles**
Cut points dividing the range of a probability distribution intervals with equal probabilities.
**Time series**
Series of data points indexed and equally spaced in time.
**Transformer**
The paper [Attention Is All You Need](https://arxiv.org/abs/1706.03762) introduces a novel architecture called Transformer that uses an attention mechanism and transforms one sequence into another.
## Setup
The following section lists the requirements that you need to meet in order to start training the TFT model.
### Requirements
This repository contains Dockerfile, which extends the PyTorch NGC container and encapsulates some dependencies. Aside from these dependencies, ensure you have the following components:
- [NVIDIA Docker](https://github.com/NVIDIA/nvidia-docker)
- [PyTorch 22.11 NGC container](https://ngc.nvidia.com/catalog/containers/nvidia:pytorch)
- Supported GPUs:
- [NVIDIA Volta architecture](https://www.nvidia.com/en-us/data-center/volta-gpu-architecture/)
- [NVIDIA Turing architecture](https://www.nvidia.com/en-us/design-visualization/technologies/turing-architecture/)
- [NVIDIA Ampere architecture](https://www.nvidia.com/en-us/data-center/nvidia-ampere-gpu-architecture/)
For more information about how to get started with NGC containers, refer to the following sections from the NVIDIA GPU Cloud Documentation and the Deep Learning Documentation:
- [Getting Started Using NVIDIA GPU Cloud](https://docs.nvidia.com/ngc/ngc-getting-started-guide/index.html)
- [Accessing And Pulling From The NGC Container Registry](https://docs.nvidia.com/deeplearning/frameworks/user-guide/index.html#accessing_registry)
- Running [PyTorch](https://docs.nvidia.com/deeplearning/frameworks/pytorch-release-notes/running.html#running)
For those unable to use the PyTorch NGC container to set up the required environment or create your own container, refer to the versioned [NVIDIA Container Support Matrix](https://docs.nvidia.com/deeplearning/frameworks/support-matrix/index.html).
## Quick Start Guide
To train your model using mixed or TF32 precision with Tensor Cores, perform the following steps using the default parameters of the TFT model on any of the benchmark datasets. For the specifics concerning training and inference, refer to the [Advanced](#advanced) section.
1. Clone the repository.
```bash
git clone https://github.com/NVIDIA/DeepLearningExamples
cd DeepLearningExamples/PyTorch/Forecasting/TFT
```
2. Build the TFT PyTorch NGC container.
```bash
docker build --network=host -t tft .
```
3. Start an interactive session in the NGC container to run training/inference.
```bash
docker run -it --rm --ipc=host --network=host --gpus all -v /path/to/your/data:/data/ tft
```
Note: Ensure to mount your dataset using the -v flag to make it available for training inside the NVIDIA Docker container.
4. Download and preprocess datasets.
```bash
bash scripts/get_data.sh
```
5. Start training. Choose one of the scripts provided in the `scripts/` directory. Results are stored in the `/results` directory.
These scripts are tuned for DGX1-32G. If you have a different system, use NGPU and BATCH_SIZE variables to adjust the parameters for your system.
```bash
bash scripts/run_electricity.sh
bash scripts/run_traffic.sh
```
6. Start validation/evaluation. The metric we use for evaluation is q-risk. We can compare it per-quantile in the Pareto sense or jointly as one number indicating accuracy.
```bash
python inference.py \
--checkpoint <your_checkpoint> \
--data /data/processed/<dataset>/test.csv \
--cat_encodings /data/processed/<dataset>/cat_encodings.bin \
--tgt_scalers /data/processed/<dataset>/tgt_scalers.bin
```
7. Start inference/predictions. Visualize and save predictions by running the following command.
```bash
python inference.py \
--checkpoint <your_checkpoint> \
--data /data/processed/<dataset>/test.csv \
--cat_encodings /data/processed/<dataset>/cat_encodings.bin \
--tgt_scalers /data/processed/<dataset>/tgt_scalers.bin \
--visualize \
--save_predictions
```
Now that you have your model trained and evaluated, you can choose to compare your training results with our [Training accuracy results](#training-accuracy-results). You can also choose to benchmark your performance to [Training performance benchmark](#training-performance-results). Following the steps in these sections will ensure that you achieve the same accuracy and performance results as stated in the [Results](#results) section.
## Advanced
The following sections provide more details about the dataset, running training and inference, and the training results.
### Scripts and sample code
In the root directory, the most important files are:
`train.py`: Entry point for training
`data_utils.py`: File containing the dataset implementation and preprocessing functions
`modeling.py`: Definition of the model
`configuration.py`: Contains configuration classes for various experiments
`test.py`: Entry point testing trained model.
`Dockerfile`: Container definition
`log_helper.py`: Contains helper functions for setting up dllogger
`criterions.py`: Definitions of loss functions
The `scripts` directory contains scripts for default use cases:
`run_electricity.sh`: train default model on the electricity dataset
`run_traffic.sh`: train default model on the traffic dataset
### Command-line options
To view the full list of available options and their descriptions, use the `-h` or `--help` command-line option, for example:
`python train.py --help`.
The following example output is printed when running the model:
```
usage: train.py [-h] --data_path DATA_PATH --dataset {electricity,traffic} [--epochs EPOCHS] [--sample_data SAMPLE_DATA SAMPLE_DATA] [--batch_size BATCH_SIZE] [--lr LR] [--seed SEED] [--use_amp] [--clip_grad CLIP_GRAD]
[--early_stopping EARLY_STOPPING] [--results RESULTS] [--log_file LOG_FILE] [--distributed_world_size N] [--distributed_rank DISTRIBUTED_RANK] [--local_rank LOCAL_RANK] [--overwrite_config OVERWRITE_CONFIG]
optional arguments:
-h, --help show this help message and exit
--data_path DATA_PATH
--dataset {electricity,traffic}
--epochs EPOCHS
--sample_data SAMPLE_DATA SAMPLE_DATA
--batch_size BATCH_SIZE
--lr LR
--seed SEED
--use_amp Enable automatic mixed precision
--clip_grad CLIP_GRAD
--early_stopping EARLY_STOPPING
Stop training if validation loss does not improve for more than this number of epochs.
--results RESULTS
--log_file LOG_FILE
--distributed_world_size N
total number of GPUs across all nodes (default: all visible GPUs)
--distributed_rank DISTRIBUTED_RANK
rank of the current worker
--local_rank LOCAL_RANK
rank of the current worker
--overwrite_config OVERWRITE_CONFIG
JSON string used to overload config
```
### Getting the data
The TFT model was trained on the electricity and traffic benchmark datasets. This repository contains the `get_data.sh` download script, which for electricity and and traffic datasets will automatically download and preprocess the training, validation and test datasets, and produce files that contain scalers.
#### Dataset guidelines
The `data_utils.py` file contains all functions that are used to preprocess the data. Initially the data is loaded to a `pandas.DataFrame` and parsed to the common format which contains the features we will use for training. Then standardized data is cleaned, normalized, encoded and binarized.
This step does the following:
Drop all the columns that are not marked in the configuration file as used for training or preprocessing
Flatten indices in case time series are indexed by more than one column
Split the data into training, validation and test splits
Filter out all the time series shorter than minimal example length
Normalize columns marked as continuous in the configuration file
Encode as integers columns marked as categorical
Save the data in csv and binary formats
#### Multi-dataset
In order to use an alternate dataset, you have to write a function that parses your data to a common format. The format is as follows:
There is at least one id column
There is exactly one time column (that can also be used as a feature column)
Each feature is in a separate column
Each row represents a moment in time for only one time series
Additionally, you must specify a configuration of the network, including a data description. Refer to the example in `configuration.py` file.
### Training process
The `train.py` script is an entry point for a training procedure. Refined recipes can be found in the `scripts` directory.
The model trains for at most `--epochs` epochs. If option `--early_stopping N` is set, then training will end if for N subsequent epochs validation loss hadn’t improved.
The details of the architecture and the dataset configuration are encapsulated by the `--dataset` option. This option chooses one of the configurations stored in the `configuration.py` file. You can enable mixed precision training by providing the `--use_amp` option. The training script supports multi-GPU training with the APEX package. To enable distributed training prepend training command with `python -m torch.distributed.run --nproc_per_node=${NGPU}`.
Example command:
```
python -m torch.distributed.run --nproc_per_node=8 train.py \
--dataset electricity \
--data_path /data/processed/electricity_bin \
--batch_size=1024 \
--sample 450000 50000 \
--lr 1e-3 \
--epochs 25 \
--early_stopping 5 \
--seed 1 \
--use_amp \
--results /results/TFT_electricity_bs8x1024_lr1e-3/seed_1
```
The model is trained by optimizing quantile loss <img src="https://render.githubusercontent.com/render/math?math=\Large\sum_{i=1}^N\sum_{q\in\mathcal{Q}}\sum_{t=1}^{t_{max}}\frac{QL(y_{it},\hat{y}_i(q,t),q)}{Nt_{max}}">
. After training, the checkpoint with the least validation loss is evaluated on a test split with q-risk metric <img src="https://render.githubusercontent.com/render/math?math=\Large\frac{2\sum_{y\in\Omega}\sum_{t=1}^{t_{max}}QL(y_t,\hat{y}(q,t),q)}{\sum_{y\in\Omega}\sum_{t=1}^{t_{max}}|y_t|}">.
Results are by default stored in the `/results` directory. This can be changed by providing the `--results` option. At the end of the training, the results directory will contain the trained checkpoint which had the lowest validation loss, dllogger logs (in dictionary per line format), and TensorBoard logs.
### Inference process
Inference can be run by launching the `inference.py` script. The script requires a trained checkpoint to run. It is crucial to prepare the data in the same way as training data prior to running the inference. Example command:
```
python inference.py \
--checkpoint /results/checkpoint.pt \
--data /data/processed/electricity_bin/test.csv \
--tgt_scalers /data/processed/electricity_bin/tgt_scalers.bin \
--cat_encodings /data/processed/electricity_bin/cat_encodings.bin \
--batch_size 2048 \
--visualize \
--save_predictions \
--joint_visualization \
--results /results
```
In the default setting, it performs the evaluation of the model on a specified dataset and prints q-risk evaluated on this dataset. In order to save the predictions, use the `--save_predictions` option. Predictions will be stored in the directory specified by the `--results` option in the csv format. Option `--joint_visualization` allows us to plot graphs in TensorBoard format, allowing us to inspect the results and compare them to true values. Using `--visualize`, you can save plots for each example in a separate file.
### Triton deployment
The [NVIDIA Triton Inference Server](https://github.com/triton-inference-server/server) provides a cloud inferencing solution optimized for NVIDIA GPUs. The server provides an inference service via an HTTP or GRPC endpoint, allowing remote clients to request inferencing for any model being managed by the server. More information on how to perform inference using NVIDIA Triton Inference Server can be found in [triton/README.md](https://github.com/NVIDIA/DeepLearningExamples/tree/master/PyTorch/Forecasting/TFT/triton).
## Performance
### Benchmarking
The following section shows how to run benchmarks measuring the model performance in training and inference modes. Note that the first 3 steps of each epoch are not used in the throughput or latency calculation. This is due to the fact that the nvFuser performs the optimizations on the 3rd step of the first epoch causing a multi-second pause.
#### Training performance benchmark
In order to run training benchmarks, use the `scripts/benchmark.sh` script.
#### Inference performance benchmark
To benchmark the inference performance on a specific batch size and dataset, run the `inference.py` script.
### Results
The following sections provide details on how we achieved our performance and accuracy in training and inference.
#### Training accuracy results
We conducted an extensive hyperparameter search along with stability tests. The presented results are the averages from the hundreds of runs.
##### Training accuracy: NVIDIA DGX A100 (8x A100 80GB)
Our results were obtained by running the `train.sh` training script in the [PyTorch 22.11 NGC container](https://ngc.nvidia.com/catalog/containers/nvidia:pytorch) on NVIDIA A100 (8x A100 80GB) GPUs.
| Dataset | GPUs | Batch size / GPU | Accuracy - TF32 | Accuracy - mixed precision | Time to train - TF32 | Time to train - mixed precision | Time to train speedup (TF32 to mixed precision)
|-------------|---|------|-----------------------|-----------------------|-------|-------|-------
| Electricity | 8 | 1024 | 0.026 / 0.056 / 0.029 | 0.028 / 0.058 / 0.029 | 200s | 176s | 1.136x
| Traffic | 8 | 1024 | 0.044 / 0.108 / 0.078 | 0.044 / 0.109 / 0.079 | 140s | 129s | 1.085x
##### Training accuracy: NVIDIA DGX-1 (8x V100 16GB)
Our results were obtained by running the `train.sh` training script in the [PyTorch 22.11 NGC container](https://ngc.nvidia.com/catalog/containers/nvidia:pytorch) on NVIDIA DGX-1 with (8x V100 16GB) GPUs.
| Dataset | GPUs | Batch size / GPU | Accuracy - FP32 | Accuracy - mixed precision | Time to train - FP32 | Time to train - mixed precision | Time to train speedup (FP32 to mixed precision)
|-------------|---|------|-----------------------|-----------------------|-------|-------|-----------
| Electricity | 8 | 1024 | 0.028 / 0.057 / 0.028 | 0.027 / 0.059 / 0.030 | 371s | 269s | 1.379x
| Traffic | 8 | 1024 | 0.042 / 0.110 / 0.080 | 0.043 / 0.109 / 0.080 | 251s | 191s | 1.314x
##### Training stability test
In order to get a greater picture of the model’s accuracy, we performed a hyperparameter search along with stability tests on 100 random seeds for each configuration. Then, for each benchmark dataset, we have chosen the architecture with the least mean test q-risk. The table below summarizes the best configurations.
| Dataset | #GPU | Hidden size | #Heads | Local BS | LR | Gradient clipping | Dropout | Mean q-risk | Std q-risk | Min q-risk | Max q-risk
|-------------|------|-------------|--------|----------|------|-------------------|---------|-------------|------------| -----------|------
| Electricity | 8 | 128 | 4 | 1024 | 1e-3 | 0.0 | 0.1 | 0.1129 | 0.0025 | 0.1074 | 0.1244
| Traffic | 8 | 128 | 4 | 1024 | 1e-3 | 0.0 | 0.3 | 0.2262 | 0.0027 | 0.2207 | 0.2331
#### Training performance results
##### Training performance: NVIDIA DGX A100 (8x A100 80GB)
Our results were obtained by running the `train.sh` training script in the [PyTorch 22.11 NGC container](https://ngc.nvidia.com/catalog/containers/nvidia:pytorch) on NVIDIA A100 (8x A100 80GB) GPUs. Performance numbers (in items/images per second) were averaged over an entire training epoch.
| Dataset | GPUs | Batch size / GPU | Throughput - TF32 | Throughput - mixed precision | Throughput speedup (TF32 - mixed precision) | Weak scaling - TF32 | Weak scaling - mixed precision
|-------------|---|------|--------|--------|-------|-------|-----
| Electricity | 1 | 1024 | 12435 | 17608 | 1.42x | 1 | 1
| Electricity | 8 | 1024 | 94389 | 130769 | 1.39x | 7.59x | 7.42x
| Traffic | 1 | 1024 | 12509 | 17591 | 1.40x | 1 | 1
| Traffic | 8 | 1024 | 94476 | 130992 | 1.39x | 7.55x | 7.45x
To achieve these same results, follow the steps in the [Quick Start Guide](#quick-start-guide).
The performance metrics used were items per second.
##### Training performance: NVIDIA DGX-1 (8x V100 16GB)
Our results were obtained by running the `train.sh` training script in the [PyTorch 22.11 NGC container](https://ngc.nvidia.com/catalog/containers/nvidia:pytorch) on NVIDIA DGX-1 with (8x V100 16GB) GPUs. Performance numbers (in items/images per second) were averaged over an entire training epoch.
| Dataset | GPUs | Batch size / GPU | Throughput - FP32 | Throughput - mixed precision | Throughput speedup (FP32 - mixed precision) | Weak scaling - FP32 | Weak scaling - mixed precision
|-------------|---|------|-------|-------|-------|------|----
| Electricity | 1 | 1024 | 5932 | 10163 | 1.71x | 1 | 1
| Electricity | 8 | 1024 | 45566 | 75660 | 1.66x | 7.68x | 7.44x
| Traffic | 1 | 1024 | 5971 | 10166 | 1.70x | 1 | 1
| Traffic | 8 | 1024 | 45925 | 75640 | 1.64x | 7.69x | 7.44x
To achieve these same results, follow the steps in the [Quick Start Guide](#quick-start-guide).
The performance metrics used were items per second.
#### Inference Performance Results
##### Inference Performance: NVIDIA DGX A100
Our results were obtained by running the `inference.py` script in the [PyTorch 22.11 NGC container](https://ngc.nvidia.com/catalog/containers/nvidia:pytorch) on NVIDIA DGX A100. Throughput is measured in items per second and latency is measured in milliseconds.
To benchmark the inference performance on a specific batch size and dataset, run the `inference.py` script.
| Dataset | GPUs | Batch size / GPU | Throughput - mixed precision (item/s) | Average Latency (ms) | Latency p90 (ms) | Latency p95 (ms) | Latency p99 (ms)
|-------------|--------|-----|---------------------------------|-----------------|-------------|-------------|------------
| Electricity | 1 | 1 | 272.43 | 3.67 | 3.70 | 3.87 | 4.18
| Electricity | 1 | 2 | 518.13 | 3.86 | 3.88 | 3.93 | 4.19
| Electricity | 1 | 4 | 1039.31 | 3.85 | 3.89 | 3.97 | 4.15
| Electricity | 1 | 8 | 2039.54 | 3.92 | 3.93 | 3.95 | 4.32
| Traffic | 1 | 1 | 269.59 | 3.71 | 3.74 | 3.79 | 4.30
| Traffic | 1 | 2 | 518.73 | 3.86 | 3.78 | 3.91 | 4.66
| Traffic | 1 | 4 | 1021.49 | 3.92 | 3.94 | 3.95 | 4.25
| Traffic | 1 | 8 | 2005.54 | 3.99 | 4.01 | 4.03 | 4.39
##### Inference Performance: NVIDIA DGX-1 V100
Our results were obtained by running the `inference.py` script in the [PyTorch 22.11 NGC container](https://ngc.nvidia.com/catalog/containers/nvidia:pytorch) on NVIDIA DGX-1 V100. Throughput is measured in items per second and latency is measured in milliseconds.
To benchmark the inference performance on a specific batch size and dataset, run the `inference.py` script.
| Dataset | GPUs | Batch size / GPU | Throughput - mixed precision (item/s) | Average Latency (ms) | Latency p90 (ms) | Latency p95 (ms) | Latency p99 (ms)
|-------------|--------|-----|---------------------------------|-----------------|-------------|-------------|------------
| Electricity | 1 | 1 | 171.68 | 5.82 | 5.99 | 6.17 | 7.00
| Electricity | 1 | 2 | 318.92 | 6.27 | 6.43 | 6.60 | 7.51
| Electricity | 1 | 4 | 684.79 | 5.84 | 6.02 | 6.08 | 6.47
| Electricity | 1 | 8 | 1275.54 | 6.27 | 7.31 | 7.36 | 7.51
| Traffic | 1 | 1 | 183.39 | 5.45 | 5.64 | 5.86 | 6.73
| Traffic | 1 | 2 | 340.73 | 5.87 | 6.07 | 6.77 | 7.25
| Traffic | 1 | 4 | 647.33 | 6.18 | 6.35 | 7.99 | 8.07
| Traffic | 1 | 8 | 1364.39 | 5.86 | 6.07 | 6.40 | 7.31
## Release notes
The performance measurements in this document were conducted at the time of publication and may not reflect the performance achieved from NVIDIA’s latest software release. For the most up-to-date performance measurements, go to https://developer.nvidia.com/deep-learning-performance-training-inference.
### Changelog
March 2023
- 23.01 Container Update
- Switch from NVIDIA Apex AMP and NVIDIA Apex FusedLayerNorm to Native PyTorch AMP and Native PyTorch LayerNorm
- Acceleration using NvFuser
February 2022
- 21.12 Container Update
- Triton Inference Performance Numbers
November 2021
- Initial release
### Known issues
There are no known issues with this model.
|
PyTorch/Segmentation/MaskRCNN/pytorch/maskrcnn_benchmark/csrc/cuda | cuda | ROIPool_cuda | // Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
// Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#include <ATen/ATen.h>
#include <ATen/ceil_div.h>
#include <ATen/cuda/CUDAContext.h>
#include <THC/THC.h>
#include <THC/THCAtomics.cuh>
// TODO make it in a common file
#define CUDA_1D_KERNEL_LOOP(i, n) \
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; \
i += blockDim.x * gridDim.x)
template <typename T>
__global__ void RoIPoolFForward(const int nthreads, const T* bottom_data,
const T spatial_scale, const int channels, const int height,
const int width, const int pooled_height, const int pooled_width,
const T* bottom_rois, T* top_data, int* argmax_data) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
// (n, c, ph, pw) is an element in the pooled output
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int c = (index / pooled_width / pooled_height) % channels;
int n = index / pooled_width / pooled_height / channels;
const T* offset_bottom_rois = bottom_rois + n * 5;
int roi_batch_ind = offset_bottom_rois[0];
int roi_start_w = round(offset_bottom_rois[1] * spatial_scale);
int roi_start_h = round(offset_bottom_rois[2] * spatial_scale);
int roi_end_w = round(offset_bottom_rois[3] * spatial_scale);
int roi_end_h = round(offset_bottom_rois[4] * spatial_scale);
// Force malformed ROIs to be 1x1
int roi_width = max(roi_end_w - roi_start_w + 1, 1);
int roi_height = max(roi_end_h - roi_start_h + 1, 1);
T bin_size_h = static_cast<T>(roi_height)
/ static_cast<T>(pooled_height);
T bin_size_w = static_cast<T>(roi_width)
/ static_cast<T>(pooled_width);
int hstart = static_cast<int>(floor(static_cast<T>(ph)
* bin_size_h));
int wstart = static_cast<int>(floor(static_cast<T>(pw)
* bin_size_w));
int hend = static_cast<int>(ceil(static_cast<T>(ph + 1)
* bin_size_h));
int wend = static_cast<int>(ceil(static_cast<T>(pw + 1)
* bin_size_w));
// Add roi offsets and clip to input boundaries
hstart = min(max(hstart + roi_start_h, 0), height);
hend = min(max(hend + roi_start_h, 0), height);
wstart = min(max(wstart + roi_start_w, 0), width);
wend = min(max(wend + roi_start_w, 0), width);
bool is_empty = (hend <= hstart) || (wend <= wstart);
// Define an empty pooling region to be zero
T maxval = is_empty ? 0 : -FLT_MAX;
// If nothing is pooled, argmax = -1 causes nothing to be backprop'd
int maxidx = -1;
const T* offset_bottom_data =
bottom_data + (roi_batch_ind * channels + c) * height * width;
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
int bottom_index = h * width + w;
if (offset_bottom_data[bottom_index] > maxval) {
maxval = offset_bottom_data[bottom_index];
maxidx = bottom_index;
}
}
}
top_data[index] = maxval;
argmax_data[index] = maxidx;
}
}
template <typename T>
__global__ void RoIPoolFBackward(const int nthreads, const T* top_diff,
const int* argmax_data, const int num_rois, const T spatial_scale,
const int channels, const int height, const int width,
const int pooled_height, const int pooled_width, T* bottom_diff,
const T* bottom_rois) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
// (n, c, ph, pw) is an element in the pooled output
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int c = (index / pooled_width / pooled_height) % channels;
int n = index / pooled_width / pooled_height / channels;
const T* offset_bottom_rois = bottom_rois + n * 5;
int roi_batch_ind = offset_bottom_rois[0];
int bottom_offset = (roi_batch_ind * channels + c) * height * width;
int top_offset = (n * channels + c) * pooled_height * pooled_width;
const T* offset_top_diff = top_diff + top_offset;
T* offset_bottom_diff = bottom_diff + bottom_offset;
const int* offset_argmax_data = argmax_data + top_offset;
int argmax = offset_argmax_data[ph * pooled_width + pw];
if (argmax != -1) {
atomicAdd(
offset_bottom_diff + argmax,
static_cast<T>(offset_top_diff[ph * pooled_width + pw]));
}
}
}
std::tuple<at::Tensor, at::Tensor> ROIPool_forward_cuda(const at::Tensor& input,
const at::Tensor& rois,
const float spatial_scale,
const int pooled_height,
const int pooled_width) {
AT_ASSERTM(input.is_cuda(), "input must be a CUDA tensor");
AT_ASSERTM(rois.is_cuda(), "rois must be a CUDA tensor");
auto num_rois = rois.size(0);
auto channels = input.size(1);
auto height = input.size(2);
auto width = input.size(3);
auto output = at::empty({num_rois, channels, pooled_height, pooled_width}, input.options());
auto output_size = num_rois * pooled_height * pooled_width * channels;
auto argmax = at::zeros({num_rois, channels, pooled_height, pooled_width}, input.options().dtype(at::kInt));
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
dim3 grid(std::min(at::ceil_div(output_size, 512L), 4096L));
dim3 block(512);
if (output.numel() == 0) {
C10_CUDA_CHECK(cudaGetLastError());
return std::make_tuple(output, argmax);
}
AT_DISPATCH_FLOATING_TYPES(input.scalar_type(), "ROIPool_forward", [&] {
RoIPoolFForward<scalar_t><<<grid, block, 0, stream>>>(
output_size,
input.contiguous().data_ptr<scalar_t>(),
spatial_scale,
channels,
height,
width,
pooled_height,
pooled_width,
rois.contiguous().data_ptr<scalar_t>(),
output.data_ptr<scalar_t>(),
argmax.data_ptr<int>());
});
C10_CUDA_CHECK(cudaGetLastError());
return std::make_tuple(output, argmax);
}
// TODO remove the dependency on input and use instead its sizes -> save memory
at::Tensor ROIPool_backward_cuda(const at::Tensor& grad,
const at::Tensor& input,
const at::Tensor& rois,
const at::Tensor& argmax,
const float spatial_scale,
const int pooled_height,
const int pooled_width,
const int batch_size,
const int channels,
const int height,
const int width) {
AT_ASSERTM(grad.is_cuda(), "grad must be a CUDA tensor");
AT_ASSERTM(rois.is_cuda(), "rois must be a CUDA tensor");
// TODO add more checks
auto num_rois = rois.size(0);
auto grad_input = at::zeros({batch_size, channels, height, width}, grad.options());
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
dim3 grid(std::min(at::ceil_div(grad.numel(), 512L), 4096L));
dim3 block(512);
// handle possibly empty gradients
if (grad.numel() == 0) {
C10_CUDA_CHECK(cudaGetLastError());
return grad_input;
}
AT_DISPATCH_FLOATING_TYPES(grad.scalar_type(), "ROIPool_backward", [&] {
RoIPoolFBackward<scalar_t><<<grid, block, 0, stream>>>(
grad.numel(),
grad.contiguous().data_ptr<scalar_t>(),
argmax.data_ptr<int>(),
num_rois,
spatial_scale,
channels,
height,
width,
pooled_height,
pooled_width,
grad_input.data_ptr<scalar_t>(),
rois.contiguous().data_ptr<scalar_t>());
});
C10_CUDA_CHECK(cudaGetLastError());
return grad_input;
}
|
TensorFlow/Classification/ConvNets | ConvNets | requirements | git+https://github.com/NVIDIA/dllogger#egg=dllogger
mpi4py |
TensorFlow/Segmentation/VNet | VNet | .gitignore | .idea/
*.tar
.ipynb_checkpoints
/_python_build
*.pyc
__pycache__
*.swp
/datasets
/results
|
PyTorch/Classification/GPUNet/triton/runner | runner | summary | # Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import csv
import json
import pathlib
from typing import Dict, List, Union
# method from PEP-366 to support relative import in executed modules
import yaml
if __name__ == "__main__" and __package__ is None:
__package__ = pathlib.Path(__file__).parent.name
from ..deployment_toolkit.report import save_results, sort_results
from .logger import LOGGER
def save_summary(result_type: str, results: List, summary_dir: pathlib.Path) -> None:
"""
Create file with summary for results of given type
Args:
result_type: Type of results to dump
results: Results data
summary_dir: Path where results should be stored
Returns:
None
"""
if len(results) == 0:
LOGGER.warning(f"No {result_type} results found.")
return
results = sort_results(results=results)
kind_file = summary_dir / f"{result_type}_summary.csv"
save_results(filename=kind_file.as_posix(), data=results, formatted=True)
LOGGER.info(f"Summary for {result_type} stored in {kind_file}")
def load_results(*, results_path: Union[pathlib.Path, str], result_type: str, parameters: Dict) -> List:
"""
Update results
Args:
results_path: Path to file or directory from which data should be read
result_type: type of results
parameters: Parameters used in experiment which generated results
Returns:
List of result rows
"""
LOGGER.debug(f"Loading {result_type} from {results_path} for summary")
results_path = pathlib.Path(results_path)
if results_path.is_file():
files = [results_path]
elif results_path.is_dir():
files = list(results_path.iterdir())
else:
LOGGER.debug(f"Unable to load file: {results_path}. Generating empty rows.")
data = [{}]
return data
if any([file.name.endswith(".ckpt") for file in files]):
model_analyzer_metrics = results_path / "metrics-model-inference.csv"
files = [model_analyzer_metrics]
else:
files = [file for file in files if file.name.endswith(".csv")]
results = list()
parameters_cpy = {key: value for key, value in parameters.items() if key != "batch"}
for file in files:
if file.suffix == ".csv":
data = _generate_data_from_csv(file=file)
elif file.suffix == ".json":
data = _generate_data_from_json(file=file)
elif file.suffix == ".yaml":
data = _generate_data_from_yaml(file=file)
else:
raise ValueError(f"Unsupported file extension: {file.suffix}")
for item in data:
result = {**parameters_cpy, **item}
results.append(result)
LOGGER.debug(f"Loading done. Collected {len(results)} results.")
return results
def _normalize_key(*, key: str) -> str:
"""
Normalize key
Args:
key: Key to normalize
Returns:
Normalized string
"""
key = "_".join(key.split(sep=" "))
key = key.lower()
return key
def _normalize_keys(*, data: Dict) -> Dict:
"""
Normalize keys in dictionary
Args:
data: Dictionary to normalize
Returns:
Normalized dictionary
"""
keys = {_normalize_key(key=key): value for key, value in data.items()}
return keys
def _generate_data_from_csv(*, file: Union[pathlib.Path, str]) -> List[Dict]:
"""
Generate result rows from CSV file
Args:
file: CSV file path
Returns:
List of rows
"""
LOGGER.debug(f"Reading data from {file}")
filtered_rows: List[Dict] = []
with open(file, "r") as csvfile:
reader = csv.DictReader(csvfile)
for r in reader:
r = _normalize_keys(data=r)
filtered_row = {k: v for k, v in r.items()}
filtered_rows.append(filtered_row)
LOGGER.debug("done")
return filtered_rows
def _generate_data_from_json(file: pathlib.Path) -> List[Dict]:
LOGGER.info(f"Reading data from {file}")
filtered_rows: List[Dict] = list()
with open(file, "r") as json_file:
file_data = json.load(json_file)
if not isinstance(file_data, list):
file_data = [file_data]
for r in file_data:
r = _normalize_keys(data=r)
filtered_row = {k: v for k, v in r.items()}
filtered_rows.append(filtered_row)
LOGGER.info("done")
return filtered_rows
def _generate_data_from_yaml(file: pathlib.Path) -> List[Dict]:
LOGGER.info(f"Reading data from {file}")
filtered_rows: List[Dict] = list()
with open(file, "r") as yaml_file:
file_data = yaml.safe_load(yaml_file)
if not isinstance(file_data, list):
file_data = [file_data]
for r in file_data:
r = _normalize_keys(data=r)
filtered_row = {k: v for k, v in r.items()}
filtered_rows.append(filtered_row)
LOGGER.info("done")
return filtered_rows
|
TensorFlow2/Classification/ConvNets/model | model | common_modules | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Common modeling utilities."""
from __future__ import absolute_import
from __future__ import division
# from __future__ import google_type_annotations
from __future__ import print_function
import numpy as np
import math
import tensorflow as tf
from typing import Text, Optional
__all__ = ['count_params', 'load_weights', 'round_filters', 'round_repeats']
def count_params(model, trainable_only=True):
"""Returns the count of all model parameters, or just trainable ones."""
if not trainable_only:
return model.count_params()
else:
return int(np.sum([tf.keras.backend.count_params(p)
for p in model.trainable_weights]))
def load_weights(model: tf.keras.Model,
model_weights_path: Text,
weights_format: Text = 'saved_model'):
"""Load model weights from the given file path.
Args:
model: the model to load weights into
model_weights_path: the path of the model weights
weights_format: the model weights format. One of 'saved_model', 'h5',
or 'checkpoint'.
"""
if weights_format == 'saved_model':
loaded_model = tf.keras.models.load_model(model_weights_path)
# The weight values should be passed in the order they are created by the layer.
# Note that the layer's weights must be instantiated before calling this function, by calling the layer.
model.set_weights(loaded_model.get_weights()) # list to list assignment (order matters)
else:
model.load_weights(model_weights_path)
def round_filters(filters: int,
config: dict) -> int:
"""Round number of filters based on width coefficient."""
width_coefficient = config.mparams.width_coefficient
min_depth = config.mparams.min_depth
divisor = config.mparams.depth_divisor
orig_filters = filters
if not width_coefficient:
return filters
filters *= width_coefficient
min_depth = min_depth or divisor
new_filters = max(min_depth, int(filters + divisor / 2) // divisor * divisor)
# Make sure that round down does not go down by more than 10%.
if new_filters < 0.9 * filters:
new_filters += divisor
return int(new_filters)
def round_repeats(repeats: int, depth_coefficient: float) -> int:
"""Round number of repeats based on depth coefficient."""
return int(math.ceil(depth_coefficient * repeats))
|
PyTorch/SpeechRecognition/wav2vec2/scripts | scripts | finetune_base_10h | #!/usr/bin/env bash
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -a
# A100 80GiB FP16: UPDATE_FREQ=1
# A100 80GiB TF32: UPDATE_FREQ=1
# IO
: ${DATASET_DIR:="/datasets/LibriSpeech"}
: ${TRAIN_SUBSET:="train-10h"}
: ${OUTPUT_DIR:="results/finetune_base_10h"}
: ${PRETRAINED_MODEL:=results/pretrain_base/wav2vec2_update400000.pt}
# Batching
: ${NUM_GPUS:=8}
: ${MAX_TOKENS:=3200000}
: ${NUM_CONCAT_BATCHES:=1}
: ${UPDATE_FREQ:=1}
# Training
: ${LEARNING_RATE:=0.00005}
: ${FREEZE_FINETUNE_UPDATES:=10000}
: ${MAX_UPDATE:=20000}
: ${MASK_CHANNEL_PROB:=0.5}
: ${MASK_PROB:=0.65}
: ${LAYERDROP:=0.05}
bash scripts/finetune_vox_960h.sh "$@"
|
TensorFlow2/LanguageModeling/ELECTRA | ELECTRA | build_pretraining_dataset | # coding=utf-8
# Copyright 2020 The Google Research Authors.
# Copyright (c) 2020 NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Writes out text data as tfrecords that ELECTRA can be pre-trained on."""
import argparse
import multiprocessing
import os
import random
import time
import tensorflow as tf
import utils
from tokenization import ElectraTokenizer
def create_int_feature(values):
feature = tf.train.Feature(int64_list=tf.train.Int64List(value=list(values)))
return feature
class ExampleBuilder(object):
"""Given a stream of input text, creates pretraining examples."""
def __init__(self, tokenizer, max_length):
self._tokenizer = tokenizer
self._current_sentences = []
self._current_length = 0
self._max_length = max_length
self._target_length = max_length
def add_line(self, line):
"""Adds a line of text to the current example being built."""
line = line.strip().replace("\n", " ")
if (not line) and self._current_length != 0: # empty lines separate docs
return self._create_example()
bert_tokens = self._tokenizer.tokenize(line)
bert_tokids = self._tokenizer.convert_tokens_to_ids(bert_tokens)
self._current_sentences.append(bert_tokids)
self._current_length += len(bert_tokids)
if self._current_length >= self._target_length:
return self._create_example()
return None
def _create_example(self):
"""Creates a pre-training example from the current list of sentences."""
# small chance to only have one segment as in classification tasks
if random.random() < 0.1:
first_segment_target_length = 100000
else:
# -3 due to not yet having [CLS]/[SEP] tokens in the input text
first_segment_target_length = (self._target_length - 3) // 2
first_segment = []
second_segment = []
for sentence in self._current_sentences:
# the sentence goes to the first segment if (1) the first segment is
# empty, (2) the sentence doesn't put the first segment over length or
# (3) 50% of the time when it does put the first segment over length
if (len(first_segment) == 0 or
len(first_segment) + len(sentence) < first_segment_target_length or
(len(second_segment) == 0 and
len(first_segment) < first_segment_target_length and
random.random() < 0.5)):
first_segment += sentence
else:
second_segment += sentence
# trim to max_length while accounting for not-yet-added [CLS]/[SEP] tokens
first_segment = first_segment[:self._max_length - 2]
second_segment = second_segment[:max(0, self._max_length -
len(first_segment) - 3)]
# prepare to start building the next example
self._current_sentences = []
self._current_length = 0
# small chance for random-length instead of max_length-length example
if random.random() < 0.05:
self._target_length = random.randint(5, self._max_length)
else:
self._target_length = self._max_length
return self._make_tf_example(first_segment, second_segment)
def _make_tf_example(self, first_segment, second_segment):
"""Converts two "segments" of text into a tf.train.Example."""
vocab = self._tokenizer.vocab
input_ids = [vocab["[CLS]"]] + first_segment + [vocab["[SEP]"]]
segment_ids = [0] * len(input_ids)
if second_segment:
input_ids += second_segment + [vocab["[SEP]"]]
segment_ids += [1] * (len(second_segment) + 1)
input_mask = [1] * len(input_ids)
input_ids += [0] * (self._max_length - len(input_ids))
input_mask += [0] * (self._max_length - len(input_mask))
segment_ids += [0] * (self._max_length - len(segment_ids))
tf_example = tf.train.Example(features=tf.train.Features(feature={
"input_ids": create_int_feature(input_ids),
"input_mask": create_int_feature(input_mask),
"segment_ids": create_int_feature(segment_ids)
}))
return tf_example
class ExampleWriter(object):
"""Writes pre-training examples to disk."""
def __init__(self, job_id, vocab_file, output_dir, max_seq_length,
num_jobs, blanks_separate_docs, do_lower_case,
num_out_files=1000):
self._blanks_separate_docs = blanks_separate_docs
tokenizer = ElectraTokenizer(
vocab_file=vocab_file,
do_lower_case=do_lower_case)
self._example_builder = ExampleBuilder(tokenizer, max_seq_length)
self._writers = []
for i in range(num_out_files):
if i % num_jobs == job_id:
output_fname = os.path.join(
output_dir, "pretrain_data.tfrecord-{:}-of-{:}".format(
i, num_out_files))
self._writers.append(tf.io.TFRecordWriter(output_fname))
self.n_written = 0
def write_examples(self, input_file):
"""Writes out examples from the provided input file."""
with tf.io.gfile.GFile(input_file) as f:
for line in f:
line = line.strip()
if line or self._blanks_separate_docs:
example = self._example_builder.add_line(line)
if example:
self._writers[self.n_written % len(self._writers)].write(
example.SerializeToString())
self.n_written += 1
example = self._example_builder.add_line("")
if example:
self._writers[self.n_written % len(self._writers)].write(
example.SerializeToString())
self.n_written += 1
def finish(self):
for writer in self._writers:
writer.close()
def write_examples(job_id, args):
"""A single process creating and writing out pre-processed examples."""
def log(*args):
msg = " ".join(map(str, args))
print("Job {}:".format(job_id), msg)
log("Creating example writer")
example_writer = ExampleWriter(
job_id=job_id,
vocab_file=args.vocab_file,
output_dir=args.output_dir,
max_seq_length=args.max_seq_length,
num_jobs=args.num_processes,
blanks_separate_docs=args.blanks_separate_docs,
do_lower_case=args.do_lower_case,
num_out_files=args.num_out_files,
)
log("Writing tf examples")
fnames = sorted(tf.io.gfile.listdir(args.corpus_dir))
fnames = [f for (i, f) in enumerate(fnames)
if i % args.num_processes == job_id]
random.shuffle(fnames)
start_time = time.time()
for file_no, fname in enumerate(fnames):
if file_no > 0:
elapsed = time.time() - start_time
log("processed {:}/{:} files ({:.1f}%), ELAPSED: {:}s, ETA: {:}s, "
"{:} examples written".format(
file_no, len(fnames), 100.0 * file_no / len(fnames), int(elapsed),
int((len(fnames) - file_no) / (file_no / elapsed)),
example_writer.n_written))
example_writer.write_examples(os.path.join(args.corpus_dir, fname))
example_writer.finish()
log("Done!")
# python build_pretraining_dataset --corpus-dir
def main():
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument("--corpus-dir", required=True,
help="Location of pre-training text files.")
parser.add_argument("--vocab-file", required=True,
help="Location of vocabulary file.")
parser.add_argument("--output-dir", required=True,
help="Where to write out the tfrecords.")
parser.add_argument("--max-seq-length", default=128, type=int,
help="Number of tokens per example.")
parser.add_argument("--num-processes", default=1, type=int,
help="Parallelize across multiple processes.")
parser.add_argument("--blanks-separate-docs", default=True, type=bool,
help="Whether blank lines indicate document boundaries.")
parser.add_argument("--do-lower-case", dest='do_lower_case',
action='store_true', help="Lower case input text.")
parser.add_argument("--no-lower-case", dest='do_lower_case',
action='store_false', help="Don't lower case input text.")
parser.add_argument("--num-out-files", default=1000, type=int,
help="Number of output files.")
parser.add_argument("--seed", default=1314, type=int)
args = parser.parse_args()
random.seed(args.seed)
utils.rmkdir(args.output_dir)
if args.num_processes == 1:
write_examples(0, args)
else:
jobs = []
for i in range(args.num_processes):
job = multiprocessing.Process(target=write_examples, args=(i, args))
jobs.append(job)
job.start()
for job in jobs:
job.join()
if __name__ == "__main__":
main()
|
TensorFlow/Detection/SSD/models/research/slim/nets | nets | nets_factory_test | # Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for slim.inception."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from nets import nets_factory
class NetworksTest(tf.test.TestCase):
def testGetNetworkFnFirstHalf(self):
batch_size = 5
num_classes = 1000
for net in list(nets_factory.networks_map.keys())[:10]:
with tf.Graph().as_default() as g, self.test_session(g):
net_fn = nets_factory.get_network_fn(net, num_classes=num_classes)
# Most networks use 224 as their default_image_size
image_size = getattr(net_fn, 'default_image_size', 224)
if net not in ['i3d', 's3dg']:
inputs = tf.random_uniform(
(batch_size, image_size, image_size, 3))
logits, end_points = net_fn(inputs)
self.assertTrue(isinstance(logits, tf.Tensor))
self.assertTrue(isinstance(end_points, dict))
self.assertEqual(logits.get_shape().as_list()[0], batch_size)
self.assertEqual(logits.get_shape().as_list()[-1], num_classes)
def testGetNetworkFnSecondHalf(self):
batch_size = 5
num_classes = 1000
for net in list(nets_factory.networks_map.keys())[10:]:
with tf.Graph().as_default() as g, self.test_session(g):
net_fn = nets_factory.get_network_fn(net, num_classes=num_classes)
# Most networks use 224 as their default_image_size
image_size = getattr(net_fn, 'default_image_size', 224)
if net not in ['i3d', 's3dg']:
inputs = tf.random_uniform(
(batch_size, image_size, image_size, 3))
logits, end_points = net_fn(inputs)
self.assertTrue(isinstance(logits, tf.Tensor))
self.assertTrue(isinstance(end_points, dict))
self.assertEqual(logits.get_shape().as_list()[0], batch_size)
self.assertEqual(logits.get_shape().as_list()[-1], num_classes)
def testGetNetworkFnVideoModels(self):
batch_size = 5
num_classes = 400
for net in ['i3d', 's3dg']:
with tf.Graph().as_default() as g, self.test_session(g):
net_fn = nets_factory.get_network_fn(net, num_classes=num_classes)
# Most networks use 224 as their default_image_size
image_size = getattr(net_fn, 'default_image_size', 224) // 2
inputs = tf.random_uniform(
(batch_size, 10, image_size, image_size, 3))
logits, end_points = net_fn(inputs)
self.assertTrue(isinstance(logits, tf.Tensor))
self.assertTrue(isinstance(end_points, dict))
self.assertEqual(logits.get_shape().as_list()[0], batch_size)
self.assertEqual(logits.get_shape().as_list()[-1], num_classes)
if __name__ == '__main__':
tf.test.main()
|
Tools/PyTorch/TimeSeriesPredictionPlatform/models/tft_pyt/triton/runner | runner | config_NVIDIA-DGX-A100-(1x-A100-80GB) | checkpoints:
- name: electricity_bin
url: https://api.ngc.nvidia.com/v2/models/nvidia/tft_pyt_ckpt_base_eletricity_amp/versions/21.06.0/zip
- name: traffic_bin
url: https://api.ngc.nvidia.com/v2/models/nvidia/tft_pyt_ckpt_base_traffic_amp/versions/21.06.0/zip
configurations:
- accelerator: none
batch_size:
- 1
- 2
- 4
- 8
- 16
- 32
- 64
- 128
- 256
- 512
- 1024
batch_sizes: 1 2 4 8 16 32 64 128 256 512 1024
capture_cuda_graph: 0
checkpoint_variant: electricity_bin
dataset: electricity_bin
device: gpu
export_format: onnx
export_precision: fp32
format: trt
max_batch_size: 1024
precision: fp16
request_count: 500
triton_gpu_engine_count: 2
triton_max_queue_delay: 1
triton_preferred_batch_sizes: 512 1024
- accelerator: none
batch_size:
- 1
- 2
- 4
- 8
- 16
- 32
- 64
- 128
- 256
- 512
- 1024
batch_sizes: 1 2 4 8 16 32 64 128 256 512 1024
capture_cuda_graph: 0
checkpoint_variant: traffic_bin
dataset: traffic_bin
device: gpu
export_format: onnx
export_precision: fp32
format: trt
max_batch_size: 1024
precision: fp16
request_count: 500
triton_gpu_engine_count: 2
triton_max_queue_delay: 1
triton_preferred_batch_sizes: 512 1024
- accelerator: none
batch_size:
- 1
- 2
- 4
- 8
- 16
- 32
- 64
- 128
- 256
- 512
- 1024
batch_sizes: 1 2 4 8 16 32 64 128 256 512 1024
capture_cuda_graph: 0
checkpoint_variant: electricity_bin
dataset: electricity_bin
device: gpu
export_format: ts-trace
export_precision: fp32
format: ts-trace
max_batch_size: 1024
precision: fp16
request_count: 500
triton_gpu_engine_count: 2
triton_max_queue_delay: 1
triton_preferred_batch_sizes: 512 1024
- accelerator: none
batch_size:
- 1
- 2
- 4
- 8
- 16
- 32
- 64
- 128
- 256
- 512
- 1024
batch_sizes: 1 2 4 8 16 32 64 128 256 512 1024
capture_cuda_graph: 0
checkpoint_variant: traffic_bin
dataset: traffic_bin
device: gpu
export_format: ts-trace
export_precision: fp32
format: ts-trace
max_batch_size: 1024
precision: fp16
request_count: 500
triton_gpu_engine_count: 2
triton_max_queue_delay: 1
triton_preferred_batch_sizes: 512 1024
container_version: '21.12'
datasets:
- name: electricity_bin
- name: traffic_bin
datasets_dir: datasets
framework: PyTorch
model_name: TFT
triton_container_image: null
triton_custom_operations: null
triton_dockerfile: null
triton_load_model_method: explicit
|
PyTorch/LanguageModeling/BERT/triton/deployment_toolkit/model_analyzer | model_analyzer | exceptions | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class ModelAnalyzerException(Exception):
def __init__(self, message: str):
self._message = message
def __str__(self):
"""
Get the exception string representation.
Returns
-------
str
The message associated with this exception, or None if no message.
"""
return self._message
@property
def message(self):
"""
Get the exception message.
Returns
-------
str
The message associated with this exception, or None if no message.
"""
return self._message
|
PyTorch/Translation/Transformer | Transformer | README | # Transformer For PyTorch
This repository provides a script and recipe to train the Transformer model to achieve state of the art accuracy, and is tested and maintained by NVIDIA.
## Table Of Contents
* [Model overview](#model-overview)
* [Model architecture](#model-architecture)
* [Default configuration](#default-configuration)
* [Feature support matrix](#feature-support-matrix)
* [Features](#features)
* [Mixed precision training](#mixed-precision-training)
* [Enabling mixed precision](#enabling-mixed-precision)
* [Enabling TF32](#enabling-tf32)
* [Glossary](#glossary)
* [Setup](#setup)
* [Requirements](#requirements)
* [Quick Start Guide](#quick-start-guide)
* [Advanced](#advanced)
* [Scripts and sample code](#scripts-and-sample-code)
* [Parameters](#parameters)
* [Command-line options](#command-line-options)
* [Getting the data](#getting-the-data)
* [Dataset guidelines](#dataset-guidelines)
* [Multi-dataset](#multi-dataset)
* [Training process](#training-process)
* [Inference process](#inference-process)
* [Performance](#performance)
* [Benchmarking](#benchmarking)
* [Training performance benchmark](#training-performance-benchmark)
* [Inference performance benchmark](#inference-performance-benchmark)
* [Results](#results)
* [Training accuracy results](#training-accuracy-results)
* [Training accuracy: NVIDIA DGX A100 (8x A100 40GB)](#training-accuracy-nvidia-dgx-a100-8x-a100-40gb)
* [Training accuracy: NVIDIA DGX-1 (8x V100 16GB)](#training-accuracy-nvidia-dgx-1-8x-v100-16gb)
* [Training stability test](#training-stability-test)
* [Training performance results](#training-performance-results)
* [Training performance: NVIDIA DGX A100 (8x A100 40GB)](#training-performance-nvidia-dgx-a100-8x-a100-40gb)
* [Training performance: NVIDIA DGX-1 (8x V100 16GB)](#training-performance-nvidia-dgx-1-8x-v100-16gb)
* [Training performance: NVIDIA DGX-2 (16x V100 32GB)](#training-performance-nvidia-dgx-2-16x-v100-32gb)
* [Inference performance results](#inference-performance-results)
* [Inference performance: NVIDIA DGX A100 (1x A100 40GB)](#inference-performance-nvidia-dgx-a100-1x-a100-40gb)
* [Inference performance: NVIDIA DGX-1 (1x V100 16GB)](#inference-performance-nvidia-dgx-1-1x-v100-16gb)
* [Release notes](#release-notes)
* [Changelog](#changelog)
* [Known issues](#known-issues)
## Model overview
The Transformer is a Neural Machine Translation (NMT) model which uses attention mechanism to boost training speed and overall accuracy. The Transformer model was introduced in [Attention Is All You Need](https://arxiv.org/abs/1706.03762) and improved in [Scaling Neural Machine Translation](https://arxiv.org/abs/1806.00187).
This implementation is based on the optimized implementation in [Facebook's Fairseq NLP toolkit](https://github.com/pytorch/fairseq), built on top of PyTorch.
This model is trained with mixed precision using Tensor Cores on NVIDIA Volta, Turing and Ampere GPU architectures. Therefore, researchers can get results 6.5x faster than training without Tensor Cores, while experiencing the benefits of mixed precision training. This model is tested against each NGC monthly container release to ensure consistent accuracy and performance over time.
### Model architecture
The Transformer model uses standard NMT encoder-decoder architecture. This model unlike other NMT models, uses no recurrent connections and operates on fixed size context window.
The encoder stack is made up of N identical layers. Each layer is composed of the following sublayers:
1. Self-attention layer
2. Feedforward network (which is 2 fully-connected layers)
Like the encoder stack, the decoder stack is made up of N identical layers. Each layer is composed of the sublayers:
1. Self-attention layer
2. Multi-headed attention layer combining encoder outputs with results from
the previous self-attention layer.
3. Feedforward network (2 fully-connected layers)
The encoder uses self-attention to compute a representation of the input sequence. The decoder generates the output sequence one token at a time, taking the encoder output and previous decoder-outputted tokens as inputs.
The model also applies embeddings on the input and output tokens, and adds a constant positional encoding. The positional encoding adds information about the position of each token.
<p align="center">
<img width="50%" src="./transformer.png" />
<br>
Figure 1. The architecture of a Transformer model.
</p>
The complete description of the Transformer architecture can be found in [Attention Is All You Need](https://arxiv.org/abs/1706.03762) paper.
### Default configuration
The Transformer uses Byte Pair Encoding tokenization scheme using [Moses decoder](https://github.com/moses-smt/mosesdecoder). This is a lossy compression method (we drop information about white spaces). Tokenization is applied over whole [WMT14](http://statmt.org/wmt14/translation-task.html#Download) en-de dataset including test set. Default vocabulary size is 33708, excluding all special tokens. Encoder and decoder are using shared embeddings.
We use 6 blocks in each encoder and decoder stacks. Self attention layer computes it's outputs according to the following formula $`Attention(Q,K,V) = softmax(\frac{QK^T}{\sqrt{d_k}})V`$. At each attention step, the model computes 16 different attention representations (which we will call attention heads) and concatenates them.
We trained the Transformer model using the Adam optimizer with betas `(0.9, 0.997)`, epsilon `1e-9` and learning rate `6e-4`. We used the inverse square root training schedule preceded with linear warmup of 4000 steps.
The implementation allows to perform training in mixed precision. We use dynamic loss scaling and custom mixed precision optimizer. Distributed multi-GPU and multi-Node is implemented with `torch.distirbuted` module with NCCL backend.
For inference, we use beam search with default beam size of 5. Model performance is evaluated with BLEU4 metrics. For clarity, we report internal (legacy) BLEU implementation as well as external [SacreBleu](https://github.com/mjpost/sacreBLEU) score.
### Feature support matrix
The following features are supported by this model.<br>
| Feature | Yes column
|--------------------------|--------------------------
| Multi-GPU training with [Distributed Communication Package](https://pytorch.org/docs/stable/distributed.html) | Yes
| Nvidia APEX | Yes
| AMP | Yes
| TorchScript | Yes
#### Features
* Multi-GPU training with [Distributed Communication Package](https://pytorch.org/docs/stable/distributed.html): Our model uses torch.distributed package to implement efficient multi-GPU training with NCCL.
To enable multi-GPU training with torch.distributed, you have to initialize your model identically in every process spawned by torch.distributed.launch. Distributed strategy is implemented with APEX's DistributedDataParallel.
For details, see example sources in this repo or see the [pytorch tutorial](https://pytorch.org/docs/stable/distributed.html)
* Nvidia APEX: The purpose of the APEX is to provide easy and intuitive framework for distributed training and mixed precision training. For details, see official [APEX repository](https://github.com/NVIDIA/apex).
* AMP: This implementation uses Apex's AMP to perform mixed precision training.
* TorchScript: Transformer can be converted to TorchScript format offering ease of deployment on platforms without Python dependencies. For more information see official [TorchScript](https://pytorch.org/docs/stable/jit.html) documentation.
### Mixed precision training
Mixed precision is the combined use of different numerical precisions in a computational method. [Mixed precision](https://arxiv.org/abs/1710.03740) training offers significant computational speedup by performing operations in half-precision format, while storing minimal information in single-precision to retain as much information as possible in critical parts of the network. Since the introduction of [Tensor Cores](https://developer.nvidia.com/tensor-cores) in the Volta and Turing architecture, significant training speedups are experienced by switching to mixed precision -- up to 3x overall speedup on the most arithmetically intense model architectures. Using mixed precision training requires two steps:
1. Porting the model to use the FP16 data type where appropriate.
2. Adding loss scaling to preserve small gradient values.
The ability to train deep learning networks with lower precision was introduced in the Pascal architecture and first supported in [CUDA 8](https://devblogs.nvidia.com/parallelforall/tag/fp16/) in the NVIDIA Deep Learning SDK.
For information about:
- How to train using mixed precision, see the [Mixed Precision Training](https://arxiv.org/abs/1710.03740) paper and [Training With Mixed Precision](https://docs.nvidia.com/deeplearning/sdk/mixed-precision-training/index.html) documentation.
- Techniques used for mixed precision training, see the [Mixed-Precision Training of Deep Neural Networks](https://devblogs.nvidia.com/mixed-precision-training-deep-neural-networks/) blog.
- APEX tools for mixed precision training, see the [NVIDIA Apex: Tools for Easy Mixed-Precision Training in PyTorch](https://devblogs.nvidia.com/apex-pytorch-easy-mixed-precision-training/).
#### Enabling mixed precision
Mixed precision is enabled using the `--amp` option in the `train.py` script. The default is optimization level `O2` but can be overriden with `--amp-level $LVL` option (for details see [amp documentation](https://nvidia.github.io/apex/amp.html)). Forward and backward pass are computed with FP16 precision with exclusion of a loss function which is computed in FP32 precision. Default optimization level keeps a copy of a model in higher precision in order to perform accurate weight update. After the update FP32 weights are again copied to FP16 model. We use dynamic loss scaling with initial scale of 2^7 increasing it by a factor of 2 every 2000 successful iterations. Overflow is being checked after reducing gradients from all of the workers. If we encounter infs or nans the whole batch is dropped.
#### Enabling TF32
TensorFloat-32 (TF32) is the new math mode in [NVIDIA A100](https://www.nvidia.com/en-us/data-center/a100/) GPUs for handling the matrix math also called tensor operations. TF32 running on Tensor Cores in A100 GPUs can provide up to 10x speedups compared to single-precision floating-point math (FP32) on Volta GPUs.
TF32 Tensor Cores can speed up networks using FP32, typically with no loss of accuracy. It is more robust than FP16 for models which require high dynamic range for weights or activations.
For more information, refer to the [TensorFloat-32 in the A100 GPU Accelerates AI Training, HPC up to 20x](https://blogs.nvidia.com/blog/2020/05/14/tensorfloat-32-precision-format/) blog post.
TF32 is supported in the NVIDIA Ampere GPU architecture and is enabled by default.
### Glossary
Attention layer - Layer that computes which elements of input sequence or it's hidden representation contribute the most to the currently considered output element.
Beam search - A heuristic search algorithm which at each step of predictions keeps N most possible outputs as a base to perform further prediction.
BPE - Binary Pair Encoding, compression algorithm that find most common pair of symbols in a data and replaces them with new symbol absent in the data.
EOS - End of a sentence.
Self attention layer - Attention layer that computes hidden representation of input using the same tensor as query, key and value.
Token - A string that is representable within the model. We also refer to the token's position in the dictionary as a token. There are special non-string tokens: alphabet tokens (all characters in a dataset), EOS token, PAD token.
Tokenizer - Object that converts raw strings to sequences of tokens.
Vocabulary embedding - Layer that projects one-hot token representations to a high dimensional space which preserves some information about correlations between tokens.
## Setup
The following section lists the requirements in order to start training the Transformer model.
### Requirements
This repository contains Dockerfile which extends the PyTorch NGC container and encapsulates some dependencies. Aside from these dependencies, ensure you have the following components:
- [NVIDIA Docker](https://github.com/NVIDIA/nvidia-docker)
- [PyTorch 22.06-py3+ NGC container](https://ngc.nvidia.com/registry/nvidia-pytorch)
- GPU-based architecture:
- [NVIDIA Volta](https://www.nvidia.com/en-us/data-center/volta-gpu-architecture/)
- [NVIDIA Turing](https://www.nvidia.com/en-us/geforce/turing/)
- [NVIDIA Ampere architecture](https://www.nvidia.com/en-us/data-center/nvidia-ampere-gpu-architecture/)
For more information about how to get started with NGC containers, see the following sections from the NVIDIA GPU Cloud Documentation and the Deep Learning Documentation:
- [Getting Started Using NVIDIA GPU Cloud](https://docs.nvidia.com/ngc/ngc-getting-started-guide/index.html)
- [Accessing And Pulling From The NGC Container Registry](https://docs.nvidia.com/deeplearning/dgx/user-guide/index.html#accessing_registry)
- Running [PyTorch NGC container](https://ngc.nvidia.com/catalog/containers/nvidia:pytorch)
For those unable to use the PyTorch NGC container, to set up the required environment or create your own container, see the versioned [NVIDIA Container Support Matrix](https://docs.nvidia.com/deeplearning/frameworks/support-matrix/index.html).
## Quick Start Guide
To train your model using mixed or TF32 precision with Tensor Cores or using FP32, perform the following steps using the default parameters of the Transformer model on the [WMT14 English-German](http://statmt.org/wmt14/translation-task.html#Download) dataset. For the specifics concerning training and inference, see the [Advanced](#advanced) section.
1. Clone the repository
```
git clone https://github.com/NVIDIA/DeepLearningExamples.git
cd DeepLearningExamples/PyTorch/Translation/Transformer
```
2. Build and launch the Transformer PyTorch NGC container
```bash
docker build . -t your.repository:transformer
nvidia-docker run -it --rm --ipc=host your.repository:transformer bash
```
If you already have preprocessed data, use:
```bash
nvidia-docker run -it --rm --ipc=host -v <path to your preprocessed data>:/data/wmt14_en_de_joined_dict your.repository:transformer bash
```
If you already have data downloaded, but it has not yet been preprocessed, use:
```bash
nvidia-docker run -it --rm --ipc=host -v <path to your unprocessed data>:/workspace/translation/examples/translation/orig your.repository:transformer bash
```
3. Download and preprocess dataset: Download and preprocess the WMT14 English-German dataset.
```bash
scripts/run_preprocessing.sh
```
After running this command, data will be downloaded to `/workspace/translation/examples/translation/orig` directory and this data will be processed and put into `/data/wmt14_en_de_joined_dict` directory.
4. Start training
```bash
python -m torch.distributed.run --nproc_per_node 8 /workspace/translation/train.py /data/wmt14_en_de_joined_dict \
--arch transformer_wmt_en_de_big_t2t \
--share-all-embeddings \
--optimizer adam \
--adam-betas '(0.9, 0.997)' \
--adam-eps "1e-9" \
--clip-norm 0.0 \
--lr-scheduler inverse_sqrt \
--warmup-init-lr 0.0 \
--warmup-updates 4000 \
--lr 0.0006 \
--min-lr 0.0 \
--dropout 0.1 \
--weight-decay 0.0 \
--criterion label_smoothed_cross_entropy \
--label-smoothing 0.1 \
--max-tokens 5120 \
--seed 1 \
--fuse-layer-norm \
--amp \
--amp-level O2 \
--save-dir /workspace/checkpoints
```
The script saves checkpoints every epoch to the directory specified in the `--save-dir` option. In addition, the best performing checkpoint (in terms of loss) and the latest checkpoints are saved separately.
**WARNING**: If you don't have access to sufficient disk space, use the `--save-interval $N` option. The checkpoints are ~3.4GB large. For example, it takes the Transformer model 30 epochs for the validation loss to plateau. The default option is to save last checkpoint, the best checkpoint and a checkpoint for every epoch, which means (30+1+1)*3.4GB = 108.8GB of a disk space used. Specifying `--save-interval 10` reduces this to (30/10+1+1)*3.4GB = 17GB.
5. Start interactive inference
```bash
python inference.py \
--buffer-size 5000 \
--path /path/to/your/checkpoint.pt \
--max-tokens 10240 \
--fuse-dropout-add \
--remove-bpe \
--bpe-codes /path/to/bpe_code_file \
--fp16
```
where,
* `--path` option is the location of the checkpoint file.
* `--bpe-codes` option is the location of the `code` file. If the default training command mentioned above is used, this file can be found in the preprocessed data ( i.e., `/data/wmt14_en_de_joined_dict` ) directory.
## Advanced
The following sections provide greater details of the dataset, running training and inference, and the training results.
### Scripts and sample code
The `preprocess.py` script performs binarization of the dataset obtained and tokenized by the `examples/translation/prepare-wmt14en2de.sh` script. The `train.py` script contains training loop as well as statistics gathering code. Steps performed in single training step can be found in `fairseq/ddp_trainer.py`. Model definition is placed in the file `fairseq/models/transformer.py`. Model specific modules including multiheaded attention and sinusoidal positional embedding are inside the `fairseq/modules/` directory. Finally, the data wrappers are placed inside the `fairseq/data/` directory.
### Parameters
In this section we give a user friendly description of the most common options used in the `train.py` script.
### Command-line options
`--arch` - select the specific configuration for the model. You can select between various predefined hyper parameters values like number of encoder/decoder blocks, dropout value or size of hidden state representation.<br/>
`--share-all-embeddings` - use the same set of weights for encoder and decoder words embedding.<br/>
`--optimizer` - choose optimization algorithm.<br/>
`--clip-norm` - set a value that gradients will be clipped to.<br/>
`--lr-scheduler` - choose learning rate change strategy.<br/>
`--warmup-init-lr` - start linear warmup with a learning rate at this value.<br/>
`--warmup-updates` - set number of optimization steps after which linear warmup will end.<br/>
`--lr` - set learning rate.<br/>
`--min-lr` - prevent learning rate to fall below this value using arbitrary learning rate schedule.<br/>
`--dropout` - set dropout value.<br/>
`--weight-decay` - set weight decay value.<br/>
`--criterion` - select loss function.<br/>
`--label-smoothing` - distribute value of one-hot labels between all entries of a dictionary. Value set by this option will be a value subtracted from one-hot label.<br/>
`--max-tokens` - set batch size in terms of tokens.<br/>
`--max-sentences` - set batch size in terms of sentences. Note that then the actual batchsize will vary a lot more than when using `--max-tokens` option.<br/>
`--seed` - set random seed for NumPy and PyTorch RNGs.<br/>
`--max-epochs` - set the maximum number of epochs.<br/>
`--online-eval` - perform inference on test set and then compute BLEU score after every epoch.<br/>
`--target-bleu` - works like `--online-eval` and sets a BLEU score threshold which after being attained will cause training to stop.<br/>
`--amp` - use mixed precision.<br/>
`--save-dir` - set directory for saving checkpoints.<br/>
`--distributed-init-method` - method for initializing torch.distributed package. You can either provide addresses with the `tcp` method or use the envionment variables initialization with `env` method<br/>
`--update-freq` - use gradient accumulation. Set number of training steps across which gradient will be accumulated.<br/>
To see the full list of available options and their descriptions, use the `-h` or `--help` command line option, for example:
```
python train.py --help
```
The following (partial) output is printed when running the sample:
```
usage: train.py [-h] [--no-progress-bar] [--log-interval N]
[--log-format {json,none,simple,tqdm}] [--seed N] [--fp16]
[--task TASK] [--skip-invalid-size-inputs-valid-test] [--max-tokens N]
[--max-sentences N] [--sentencepiece] [--train-subset SPLIT]
[--valid-subset SPLIT] [--max-sentences-valid N]
[--gen-subset SPLIT] [--num-shards N] [--shard-id ID]
[--distributed-world-size N]
[--distributed-rank DISTRIBUTED_RANK]
[--local_rank LOCAL_RANK]
[--distributed-backend DISTRIBUTED_BACKEND]
[--distributed-init-method DISTRIBUTED_INIT_METHOD]
[--distributed-port DISTRIBUTED_PORT] [--device-id DEVICE_ID]
--arch ARCH [--criterion CRIT] [--max-epoch N]
[--max-update N] [--target-bleu TARGET] [--clip-norm NORM]
[--sentence-avg] [--update-freq N] [--optimizer OPT]
[--lr LR_1,LR_2,...,LR_N] [--momentum M] [--weight-decay WD]
[--lr-scheduler LR_SCHEDULER] [--lr-shrink LS] [--min-lr LR]
[--min-loss-scale D] [--enable-parallel-backward-allred-opt]
[--parallel-backward-allred-opt-threshold N]
[--enable-parallel-backward-allred-opt-correctness-check]
[--save-dir DIR] [--restore-file RESTORE_FILE]
[--save-interval N] [--save-interval-updates N]
[--keep-interval-updates N] [--no-save]
[--no-epoch-checkpoints] [--validate-interval N] [--path FILE]
[--remove-bpe [REMOVE_BPE]] [--cpu] [--quiet] [--beam N]
[--nbest N] [--max-len-a N] [--max-len-b N] [--min-len N]
[--no-early-stop] [--unnormalized] [--no-beamable-mm]
[--lenpen LENPEN] [--unkpen UNKPEN]
[--replace-unk [REPLACE_UNK]] [--score-reference]
[--prefix-size PS] [--sampling] [--sampling-topk PS]
[--sampling-temperature N] [--print-alignment]
[--model-overrides DICT] [--online-eval]
[--bpe-codes CODES] [--fuse-dropout-add] [--fuse-relu-dropout]
```
### Getting the data
The Transformer model was trained on the [WMT14 English-German](http://statmt.org/wmt14/translation-task.html#Download) dataset. Concatenation of the *commoncrawl*, *europarl* and *news-commentary* is used as train and validation dataset and *newstest2014* is used as test dataset.<br/>
This repository contains the `run_preprocessing.sh` script which will automatically downloads and preprocesses the training and test datasets. By default, data will be stored in the `/data/wmt14_en_de_joined_dict` directory.<br/>
Our download script utilizes [Moses decoder](https://github.com/moses-smt/mosesdecoder) to perform tokenization of the dataset and [subword-nmt](https://github.com/rsennrich/subword-nmt) to segment text into subword units (BPE). By default, the script builds a shared vocabulary of 33708 tokens, which is consistent with [Scaling Neural Machine Translation](https://arxiv.org/abs/1806.00187).
#### Dataset guidelines
The Transformer model works with a fixed sized vocabulary. Prior to the training, we need to learn a data representation that allows us to store the entire dataset as a sequence of tokens. To achieve this we use Binary Pair Encoding. This algorithm builds a vocabulary by iterating over a dataset, looking for the most frequent pair of symbols and replacing them with a new symbol, yet absent in the dataset. After identifying the desired number of encodings (new symbols can also be merged together) it outputs a code file that is used as an input for the `Dictionary` class.
This approach does not minimize the length of the encoded dataset, however this is allowed using [SentencePiece](https://github.com/google/sentencepiece/) to tokenize the dataset with the unigram model. This approach tries to find encoding that is close to the theoretical entropy limit.
Data is then sorted by length (in terms of tokens) and examples with similar length are batched together, padded if necessary.
#### Multi-dataset
The model has been tested oni the [wmt14 en-fr](http://www.statmt.org/wmt14/translation-task.html) dataset. Achieving state of the art accuracy of 41.4 BLEU.
### Training process
The default training configuration can be launched by running the `train.py` training script. By default, the script saves one checkpoint every epoch in addition to the latest and the best ones. The best checkpoint is considered the one with the lowest value of loss, not the one with the highest BLEU score. To override this behavior use the `--save-interval $N` option to save epoch checkpoints every N epoch or `--no-epoch-checkpoints` to disable them entirely (with this option the latest and the best checkpoints still will be saved). Specify save the directory with `--save-dir` option.<br/>
In order to run multi-GPU training, launch the training script with `python -m torch.distributed.launch --nproc_per_node $N` prepended, where N is the number of GPUs.
We have tested reliance on up to 16 GPUs on a single node.<br/>
After each training epoch, the script runs a loss validation on the validation split of the dataset and outputs the validation loss. By default the evaluation after each epoch is disabled. To enable it, use the `--online-eval` option or to use the BLEU score value as the training stopping condition use the `--target-bleu $TGT` option. The BLEU scores computed are case insensitive. The BLEU is computed by the internal fairseq algorithm which implementation can be found in the `fairseq/bleu.py` script.<br/>
By default, the `train.py` script will launch FP32 training without Tensor Cores. To use mixed precision with Tensor Cores use the `--fp16` option.<br/>
To reach the BLEU score reported in [Scaling Neural Machine Translation](https://arxiv.org/abs/1806.00187) research paper, we used mixed precision training with a batch size of 5120 per GPU and learning rate of 6e-4 on a DGX-1V system with 8 Tesla V100s 16G. If you use a different setup, we recommend you scale your hyperparameters by applying the following rules:
1. To use FP32, reduce the batch size to 2560 and set the `--update-freq 2` option.
2. To train on a fewer GPUs, multiply `--update-freq` by the reciprocal of the scaling factor.
For example, when training in FP32 mode on 4 GPUs, use the `--update-freq=4` option.
### Inference process
Inference on a raw input can be performed by piping file to be translated into the `inference.py` script. It requires a pre-trained model checkpoint, BPE codes file and dictionary file (both are produced by the `run_preprocessing.sh` script and can be found in the dataset directory).<br/>
In order to run interactive inference, run command:
```
python inference.py --path /path/to/your/checkpoint.pt --fuse-dropout-add --remove-bpe --bpe-codes /path/to/code/file
```
The `--buffer-size` option allows the batching of input sentences up to `--max_token` length.
To test model checkpoint accuracy on wmt14 test set run following command:
```bash
sacrebleu -t wmt14/full -l en-de --echo src | python inference.py --buffer-size 5000 --path /path/to/your/checkpoint.pt --max-tokens 10240 --fuse-dropout-add --remove-bpe --bpe-codes /data/code --fp16 | sacrebleu -t wmt14/full -l en-de -lc
```
## Performance
### Benchmarking
The following section shows how to run benchmarks measuring the model performance in training and inference modes.
#### Training performance benchmark
To benchmark the training performance on a specific batch size, run `train.py` training script. Performance in tokens/s will be printed to standard output every N iterations, specified by the `--log-interval` option. Additionally performance and loss values will be logged by [dllogger](https://github.com/NVIDIA/dllogger) to the file specified in `--stat-file` option. Every line in the output file will be a valid JSON file prepended with `DLLL` prefix.
#### Inference performance benchmark
To benchmark the inference performance on a specific batch size, run following command to start the benchmark
```bash
for i in {1..10}; do sacrebleu -t wmt14/full -l en-de --echo src; done | python inference.py --buffer-size 5000 --path /path/to/your/checkpoint.pt --max-tokens 10240 --fuse-dropout-add --remove-bpe --bpe-codes /data/code --fp16 > /dev/null
```
Results will be printed to stderr.
### Results
The following sections provide details on how we achieved our performance and accuracy in training and inference.
#### Training accuracy results
Following the spirit of the paper [A Call for Clarity in Reporting BLEU Scores](https://arxiv.org/pdf/1804.08771.pdf) we decided to change evaluation metric implemented in fairseq to [SacreBleu](https://github.com/mjpost/sacreBLEU) score. We have calculated that the new metric has almost linear relationship with the old one. We run linear regression on nearly 2000 checkpoints to discover that the SacreBleu score almost perfectly follows the formula: newScore = 0.978 * oldScore - 0.05.
<p align="center">
<img src="./bleu_relationship.png" />
<br>
Figure 2. Linear relationship between old and new BLEU metric.
</p>
To take into account the varibaility of the results we computed basic statistics that help us verify whether a model trains correctly. Evaluating nearly 2000 checkpoints from 20 runs, the best score we achieved is 28.09 BLEU (which corresponds to 28.77 old score). Variance of the score of the best performing model between those 20 runs is 0.011. Knowing that max statistic is skewed toward higher values we have also run studies which calculate threshold beyond which validation loss is no longer correlated with BLEU score.
Of course our hope is that dev's set distribution is similar to test's set distribution and when validation loss drops, BLEU score rises. But due to the finiteness of the validation and test sets we expect that there is such a loss value that makes performance on both sets decoupled from each other. To find this point we used Pearson correlation coefficient as a metric. The results indicate that optimizing beyond 4.02 validation loss value is no longer beneficial for the BLEU score. Further optimization does not cause overfitting but results become stochastic.
Mean BLEU score after reaching 4.02 validation loss is 27.38. We observe variance of 0.08, which translate to nearly 0.3 BLEU average difference between mean score and obtained score.
<p align="center">
<img src="./decorrelation_threshold.png" />
<br>
Figure 3. Validation loss vs BLEU score. Plots are trimmed to certain validation loss threshold.
</p>
##### Training accuracy: NVIDIA DGX A100 (8x A100 40GB)
Our results were obtained by running the `run_DGXA100_AMP_8GPU.sh` and `run_DGXA100_TF32_8GPU.sh` training scripts in the pytorch-22.06-py3 NGC container on NVIDIA DGX A100 (8x A100 40GB) GPUs. We report average accuracy over 6 runs. We consider a model trained when it reaches minimal validation loss. Time to train contains only training time without validation. Depending on a configuration and frequency of validation it can take up to additional minute per epoch.
| GPUs | Batch size / GPU | Accuracy - TF32 | Accuracy - mixed precision | Time to train - TF32 | Time to train - mixed precision | Time to train speedup (TF32 to mixed precision)
|---------|---------------------|------------------|-----------------------------|-------------------------|----------------------------------|------------------------------------
| 8 | 10240 | 27.92 | 27.76 | 2.74 hours | 2.64 hours | x1.04
##### Training accuracy: NVIDIA DGX-1 (8x V100 16GB)
Our results were obtained by running the `run_DGX1_AMP_8GPU.sh` and `run_DGX1_FP32_8GPU.sh` training scripts in the pytorch-22.06-py3 NGC container on NVIDIA DGX-1 (8x V100 16GB) GPUs. We report average accuracy over 6 runs. We consider a model trained when it reaches minimal validation loss. Time to train contains only training time without validation. Depending on a configuration and frequency of validation it can take up to additional minute per epoch. Using mixed precision we could fit a larger batch size in the memory, further speeding up the training.
| GPUs | Batch size / GPU | Accuracy - FP32 | Accuracy - mixed precision | Time to train - FP32 | Time to train - mixed precision | Time to train speedup (FP32 to mixed precision)
|---------|---------------------|------------------|-----------------------------|-------------------------|----------------------------------|------------------------------------
| 8 | 5120/2560 | 27.66 | 27.82 | 11.8 hours | 4.5 hours | x2.62
#### Training performance results
##### Training performance: NVIDIA DGX A100 (8x A100 40GB)
Our results were obtained by running the `run_DGXA100_AMP_8GPU.sh` and `run_DGXA100_TF32_8GPU.sh` training scripts in the pytorch-22.06-py3 NGC container on NVIDIA DGX A100 (8x A100 40GB) GPUs. Performance numbers (in tokens per second) were averaged over an entire training epoch.
| GPUs | Batch size / GPU | Throughput - TF32 | Throughput - mixed precision | Throughput speedup (TF32 - mixed precision) | Weak scaling - TF32 | Weak scaling - mixed precision
|--------|--------------------|----------------------|---------------------------------|-----------------------------------------------|------------------------|-----
| 8 | 10240 | 347936 | 551599 | x1.59 | 6.81 | 6.72
| 4 | 10240 | 179245 | 286081 | x1.60 | 3.51 | 3.49
| 1 | 10240 | 51057 | 82059 | x1.60 | 1 | 1
To achieve these same results, follow the steps in the [Quick Start Guide](#quick-start-guide).
##### Training stability test
The following plot shows average validation loss curves for different configs. We can see that training with AMP O2 converges slightly slower that FP32 and TF32 training. In order to mitigate this, you can use option `--amp-level O1` at the cost of 20% performance drop compared to the default AMP setting.
<p align="center">
<img width="75%" hight="75%" src="./average_valid_loss.png" />
<br>
Figure 4. Validation loss curves
</p>
##### Training performance: NVIDIA DGX-1 (8x V100 16GB)
Our results were obtained by running the `run_DGX1_AMP_8GPU.sh` and `run_DGX1_FP32_8GPU.sh` training scripts in the pytorch-22.06-py3 NGC container on NVIDIA DGX-1 with (8x V100 16GB) GPUs. Performance numbers (in tokens per second) were averaged over an entire training epoch. Using mixed precision we could fit a larger batch size in the memory, further speeding up the training.
| GPUs | Batch size / GPU | Throughput - FP32 | Throughput - mixed precision | Throughput speedup (FP32 - mixed precision) | Weak scaling - FP32 | Weak scaling - mixed precision
|--------|--------------------|----------------------|---------------------------------|-----------------------------------------------|------------------------|-----
| 8 | 5120/2560 | 59316 | 214656 | x3.62 | 6.79 | 6.52
| 4 | 5120/2560 | 30204 | 109726 | x3.63 | 3.46 | 3.33
| 1 | 5120/2560 | 8742 | 32942 | x3.77 | 1 | 1
To achieve these same results, follow the steps in the [Quick Start Guide](#quick-start-guide).
##### Training performance: NVIDIA DGX-2 (16x V100 32GB)
Our results were obtained by running the `run_DGX1_AMP_8GPU.sh` and `run_DGX1_FP32_8GPU.sh` training scripts setting number of GPUs to 16 in the pytorch-22.06-py3 NGC container on NVIDIA DGX-2 with (16x V100 32GB) GPUs. Performance numbers (in tokens per second) were averaged over an entire training epoch. Using mixed precision we could fit a larger batch size in the memory, further speeding up the training.
| GPUs | Batch size / GPU | Throughput - FP32 | Throughput - mixed precision | Throughput speedup (FP32 - mixed precision) | Weak scaling - FP32 | Weak scaling - mixed precision
|--------|--------------------|----------------------|---------------------------------|-----------------------------------------------|------------------------|-----
| 16 | 10240/5120 | 136253 | 517227 | x3.80 | 13.87 | 12.96
| 8 | 10240/5120 | 68929 | 267815 | x3.89 | 7.01 | 6.71
| 4 | 10240/5120 | 35216 | 137703 | x3.91 | 3.58 | 3.45
| 1 | 10240/5120 | 9827 | 39911 | x4.06 | 1 | 1
To achieve these same results, follow the steps in the [Quick Start Guide](#quick-start-guide).
#### Inference performance results
Our implementation of the Transformer has dynamic batching algorithm, which batches sentences together in such a way that there are no more than `N` tokens in each batch or no more than `M` sentences in each batch. In this benchmark we use the first option in order to get the most stable results.
##### Inference performance: NVIDIA DGX A100 (1x A100 40GB)
Our results were obtained by running the `inference.py` inferencing benchmarking script in the pytorch-22.06-py3 NGC container on NVIDIA DGX A100 (1x A100 40GB) GPU.
| Precision | Batch size | Throughput Avg | Latency Avg | Latency 90% |Latency 95% |Latency 99% |
|-----------|------------|----------------|-------------|-------------|------------|------------|
| TF32 | 10240 | 7105 | 1.22s | 1.67s | 1.67s | 1.67s |
| FP16 | 10240 | 7988 | 1.09s | 1.73s | 1.73s | 1.73s |
To achieve these same results, follow the steps in the [Quick Start Guide](#quick-start-guide).
##### Inference performance: NVIDIA DGX-1 (1x V100 16GB)
Our results were obtained by running the `inference.py` inferencing benchmarking script in the pytorch-22.06-py3 NGC container on NVIDIA DGX-1 with (1x V100 16GB) GPU.
| Precision | Batch size | Throughput Avg | Latency Avg | Latency 90% | Latency 95% | Latency 99% |
|-----------|------------|----------------|-------------|-------------|-------------|-------------|
| FP32 | 10240 | 3461 | 2.51s | 3.19 s | 3.19s | 3.19s |
| FP16 | 10240 | 5983 | 1.45s | 2.03 s | 2.03s | 2.03s |
To achieve these same results, follow the steps in the [Quick Start Guide](#quick-start-guide).
## Release notes
### Changelog
February 2022:
- Update depricated calls in PyTorch CPP and Python API
- Update README with latest performance measurements
June 2020
- add TorchScript support
- Ampere support
March 2020
- remove language modeling from the repository
- one inference script for large chunks of data as well as for interactive demo
- change custom distributed strategy to APEX's DDP
- replace custom fp16 training with AMP
- major refactoring of the codebase
December 2019
- Change evaluation metric
August 2019
- add basic AMP support
July 2019
- Replace custom fused operators with jit functions
June 2019
- New README
March 2019
- Add mid-training [SacreBLEU](https://pypi.org/project/sacrebleu/1.2.10/) evaluation. Better handling of OOMs.
Initial commit, forked from [fairseq](https://github.com/pytorch/fairseq/commit/ac5fddfc691267285a84c81d39475411da5ed1c6)
## Known issues
- Using batch size greater than 16k causes indexing error in strided_batched_gemm module
|
PaddlePaddle/Classification/RN50v1.5/scripts/training | training | train_resnet50_TF32_90E_DGXA100 | # Copyright (c) 2022 NVIDIA Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
python -m paddle.distributed.launch --gpus=0,1,2,3,4,5,6,7 train.py --epochs 90
|
PyTorch/Forecasting/TFT/triton/deployment_toolkit | deployment_toolkit | utils | # Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from typing import Tuple
LOGGER = logging.getLogger(__name__)
def parse_server_url(server_url: str) -> Tuple[str, str, int]:
DEFAULT_PORTS = {"http": 8000, "grpc": 8001}
# extract protocol
server_url_items = server_url.split("://")
if len(server_url_items) != 2:
raise ValueError("Prefix server_url with protocol ex.: grpc://127.0.0.1:8001")
requested_protocol, server_url = server_url_items
requested_protocol = requested_protocol.lower()
if requested_protocol not in DEFAULT_PORTS:
raise ValueError(f"Unsupported protocol: {requested_protocol}")
# extract host and port
default_port = DEFAULT_PORTS[requested_protocol]
server_url_items = server_url.split(":")
if len(server_url_items) == 1:
host, port = server_url, default_port
elif len(server_url_items) == 2:
host, port = server_url_items
port = int(port)
if port != default_port:
LOGGER.warning(
f"Current server URL is {server_url} while default {requested_protocol} port is {default_port}"
)
else:
raise ValueError(f"Could not parse {server_url}. Example of correct server URL: grpc://127.0.0.1:8001")
return requested_protocol, host, port
|
PyTorch/SpeechSynthesis/HiFiGAN/fastpitch | fastpitch | arg_parser | # *****************************************************************************
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the NVIDIA CORPORATION nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# *****************************************************************************
import argparse
def parse_fastpitch_args(parent, add_help=False):
"""
Parse commandline arguments.
"""
parser = argparse.ArgumentParser(parents=[parent], add_help=add_help,
allow_abbrev=False)
io = parser.add_argument_group('io parameters')
io.add_argument('--n-mel-channels', default=80, type=int,
help='Number of bins in mel-spectrograms')
io.add_argument('--max-seq-len', default=2048, type=int,
help='')
symbols = parser.add_argument_group('symbols parameters')
symbols.add_argument('--n-symbols', default=148, type=int,
help='Number of symbols in dictionary')
symbols.add_argument('--padding-idx', default=0, type=int,
help='Index of padding symbol in dictionary')
symbols.add_argument('--symbols-embedding-dim', default=384, type=int,
help='Input embedding dimension')
in_fft = parser.add_argument_group('input FFT parameters')
in_fft.add_argument('--in-fft-n-layers', default=6, type=int,
help='Number of FFT blocks')
in_fft.add_argument('--in-fft-n-heads', default=1, type=int,
help='Number of attention heads')
in_fft.add_argument('--in-fft-d-head', default=64, type=int,
help='Dim of attention heads')
in_fft.add_argument('--in-fft-conv1d-kernel-size', default=3, type=int,
help='Conv-1D kernel size')
in_fft.add_argument('--in-fft-conv1d-filter-size', default=1536, type=int,
help='Conv-1D filter size')
in_fft.add_argument('--in-fft-output-size', default=384, type=int,
help='Output dim')
in_fft.add_argument('--p-in-fft-dropout', default=0.1, type=float,
help='Dropout probability')
in_fft.add_argument('--p-in-fft-dropatt', default=0.1, type=float,
help='Multi-head attention dropout')
in_fft.add_argument('--p-in-fft-dropemb', default=0.0, type=float,
help='Dropout added to word+positional embeddings')
out_fft = parser.add_argument_group('output FFT parameters')
out_fft.add_argument('--out-fft-n-layers', default=6, type=int,
help='Number of FFT blocks')
out_fft.add_argument('--out-fft-n-heads', default=1, type=int,
help='Number of attention heads')
out_fft.add_argument('--out-fft-d-head', default=64, type=int,
help='Dim of attention head')
out_fft.add_argument('--out-fft-conv1d-kernel-size', default=3, type=int,
help='Conv-1D kernel size')
out_fft.add_argument('--out-fft-conv1d-filter-size', default=1536, type=int,
help='Conv-1D filter size')
out_fft.add_argument('--out-fft-output-size', default=384, type=int,
help='Output dim')
out_fft.add_argument('--p-out-fft-dropout', default=0.1, type=float,
help='Dropout probability for out_fft')
out_fft.add_argument('--p-out-fft-dropatt', default=0.1, type=float,
help='Multi-head attention dropout')
out_fft.add_argument('--p-out-fft-dropemb', default=0.0, type=float,
help='Dropout added to word+positional embeddings')
dur_pred = parser.add_argument_group('duration predictor parameters')
dur_pred.add_argument('--dur-predictor-kernel-size', default=3, type=int,
help='Duration predictor conv-1D kernel size')
dur_pred.add_argument('--dur-predictor-filter-size', default=256, type=int,
help='Duration predictor conv-1D filter size')
dur_pred.add_argument('--p-dur-predictor-dropout', default=0.1, type=float,
help='Dropout probability for duration predictor')
dur_pred.add_argument('--dur-predictor-n-layers', default=2, type=int,
help='Number of conv-1D layers')
pitch_pred = parser.add_argument_group('pitch predictor parameters')
pitch_pred.add_argument('--pitch-predictor-kernel-size', default=3, type=int,
help='Pitch predictor conv-1D kernel size')
pitch_pred.add_argument('--pitch-predictor-filter-size', default=256, type=int,
help='Pitch predictor conv-1D filter size')
pitch_pred.add_argument('--p-pitch-predictor-dropout', default=0.1, type=float,
help='Pitch probability for pitch predictor')
pitch_pred.add_argument('--pitch-predictor-n-layers', default=2, type=int,
help='Number of conv-1D layers')
energy_pred = parser.add_argument_group('energy predictor parameters')
energy_pred.add_argument('--energy-conditioning', action='store_true')
energy_pred.add_argument('--energy-predictor-kernel-size', default=3, type=int,
help='Pitch predictor conv-1D kernel size')
energy_pred.add_argument('--energy-predictor-filter-size', default=256, type=int,
help='Pitch predictor conv-1D filter size')
energy_pred.add_argument('--p-energy-predictor-dropout', default=0.1, type=float,
help='Pitch probability for energy predictor')
energy_pred.add_argument('--energy-predictor-n-layers', default=2, type=int,
help='Number of conv-1D layers')
cond = parser.add_argument_group('conditioning parameters')
cond.add_argument('--pitch-embedding-kernel-size', default=3, type=int,
help='Pitch embedding conv-1D kernel size')
cond.add_argument('--energy-embedding-kernel-size', default=3, type=int,
help='Pitch embedding conv-1D kernel size')
cond.add_argument('--speaker-emb-weight', type=float, default=1.0,
help='Scale speaker embedding')
return parser
|
PyTorch/LanguageModeling/BART/utils | utils | data_utils | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import numpy as np
import torch
try:
from .utils import LegacySeq2SeqDataset
except ImportError:
from utils.utils import LegacySeq2SeqDataset
from torch.utils.data import DataLoader
import distributed_utils
class Seq2SeqDataLoader(DataLoader):
def __init__(self, type_path, data_dir, tokenizer, batch_size, device='cpu',
max_source_length=1024, max_target_length=1024, n_obs=None,
shuffle=False, sortish_sampler=False, num_workers=4):
"""
data -- list[LongTensor] -- there is no order among the LongTensors
"""
self.data_dir = data_dir
self.tokenizer = tokenizer
self.n_obs = n_obs
self.sortish_sampler = sortish_sampler
self.device = device
self.max_source_length = max_source_length
self.max_target_length = max_target_length
self.dataset = self.get_dataset(type_path)
# Partition data for DistributedDataParallel
world_size = distributed_utils.get_world_size()
rank = distributed_utils.get_rank()
sampler = None
if world_size > 1 and type_path == "train":
sampler =self.dataset.make_sortish_sampler(batch_size, distributed=True, rank=rank, num_replicas=world_size)
shuffle = False
super().__init__(
self.dataset,
batch_size=batch_size,
collate_fn=self.dataset.collate_fn,
shuffle=shuffle,
num_workers=num_workers,
sampler=sampler,
)
def get_dataset(self, type_path):
dataset = LegacySeq2SeqDataset(
data_dir=self.data_dir,
tokenizer=self.tokenizer,
type_path=type_path,
n_obs=self.n_obs,
max_source_length=self.max_source_length,
max_target_length=self.max_target_length,
src_lang="", tgt_lang=""
)
return dataset
|
PyTorch/SpeechSynthesis/Tacotron2/exports | exports | export_tacotron2_ts_config | # *****************************************************************************
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the NVIDIA CORPORATION nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# *****************************************************************************
import os
import argparse
def parse_args(parser):
"""
Parse commandline arguments.
"""
parser.add_argument("--trtis_model_name",
type=str,
default='tacotron2',
help="exports to appropriate directory for TRTIS")
parser.add_argument("--trtis_model_version",
type=int,
default=1,
help="exports to appropriate directory for TRTIS")
parser.add_argument("--trtis_max_batch_size",
type=int,
default=1,
help="Specifies the 'max_batch_size' in the TRTIS model config.\
See the TRTIS documentation for more info.")
parser.add_argument('--fp16', action='store_true',
help='inference with mixed precision')
return parser
def main():
parser = argparse.ArgumentParser(
description='PyTorch Tacotron 2 TRTIS config exporter')
parser = parse_args(parser)
args = parser.parse_args()
# prepare repository
model_folder = os.path.join('./trtis_repo', args.trtis_model_name)
version_folder = os.path.join(model_folder, str(args.trtis_model_version))
if not os.path.exists(version_folder):
os.makedirs(version_folder)
# build the config for TRTIS
config_filename = os.path.join(model_folder, "config.pbtxt")
config_template = r"""
name: "{model_name}"
platform: "pytorch_libtorch"
max_batch_size: {max_batch_size}
input [
{{
name: "sequence__0"
data_type: TYPE_INT64
dims: [-1]
}},
{{
name: "input_lengths__1"
data_type: TYPE_INT64
dims: [1]
reshape: {{ shape: [ ] }}
}}
]
output [
{{
name: "mel_outputs_postnet__0"
data_type: {fp_type}
dims: [80,-1]
}},
{{
name: "mel_lengths__1"
data_type: TYPE_INT32
dims: [1]
reshape: {{ shape: [ ] }}
}},
{{
name: "alignments__2"
data_type: {fp_type}
dims: [-1,-1]
}}
]
"""
config_values = {
"model_name": args.trtis_model_name,
"max_batch_size": args.trtis_max_batch_size,
"fp_type": "TYPE_FP16" if args.fp16 else "TYPE_FP32"
}
with open(model_folder + "/config.pbtxt", "w") as file:
final_config_str = config_template.format_map(config_values)
file.write(final_config_str)
if __name__ == '__main__':
main()
|
PyTorch/Detection/SSD | SSD | main | # Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import time
from argparse import ArgumentParser
import torch
import numpy as np
from torch.optim.lr_scheduler import MultiStepLR
import torch.utils.data.distributed
from ssd.model import SSD300, ResNet, Loss
from ssd.utils import dboxes300_coco, Encoder
from ssd.logger import Logger, BenchLogger
from ssd.evaluate import evaluate
from ssd.train import train_loop, tencent_trick, load_checkpoint, benchmark_train_loop, benchmark_inference_loop
from ssd.data import get_train_loader, get_val_dataset, get_val_dataloader, get_coco_ground_truth
import dllogger as DLLogger
# Apex imports
try:
from apex.parallel import DistributedDataParallel as DDP
except ImportError:
raise ImportError("Please install APEX from https://github.com/nvidia/apex")
def generate_mean_std(args):
mean_val = [0.485, 0.456, 0.406]
std_val = [0.229, 0.224, 0.225]
mean = torch.tensor(mean_val).cuda()
std = torch.tensor(std_val).cuda()
view = [1, len(mean_val), 1, 1]
mean = mean.view(*view)
std = std.view(*view)
return mean, std
def make_parser():
parser = ArgumentParser(description="Train Single Shot MultiBox Detector"
" on COCO")
parser.add_argument('--data', '-d', type=str, default='/coco', required=True,
help='path to test and training data files')
parser.add_argument('--epochs', '-e', type=int, default=65,
help='number of epochs for training')
parser.add_argument('--batch-size', '--bs', type=int, default=32,
help='number of examples for each iteration')
parser.add_argument('--eval-batch-size', '--ebs', type=int, default=32,
help='number of examples for each evaluation iteration')
parser.add_argument('--no-cuda', action='store_true',
help='use available GPUs')
parser.add_argument('--seed', '-s', type=int,
help='manually set random seed for torch')
parser.add_argument('--checkpoint', type=str, default=None,
help='path to model checkpoint file')
parser.add_argument('--torchvision-weights-version', type=str, default="IMAGENET1K_V2",
choices=['IMAGENET1K_V1', 'IMAGENET1K_V2', 'DEFAULT'],
help='The torchvision weights version to use when --checkpoint is not specified')
parser.add_argument('--save', type=str, default=None,
help='save model checkpoints in the specified directory')
parser.add_argument('--mode', type=str, default='training',
choices=['training', 'evaluation', 'benchmark-training', 'benchmark-inference'])
parser.add_argument('--evaluation', nargs='*', type=int, default=[21, 31, 37, 42, 48, 53, 59, 64],
help='epochs at which to evaluate')
parser.add_argument('--multistep', nargs='*', type=int, default=[43, 54],
help='epochs at which to decay learning rate')
# Hyperparameters
parser.add_argument('--learning-rate', '--lr', type=float, default=2.6e-3,
help='learning rate')
parser.add_argument('--momentum', '-m', type=float, default=0.9,
help='momentum argument for SGD optimizer')
parser.add_argument('--weight-decay', '--wd', type=float, default=0.0005,
help='momentum argument for SGD optimizer')
parser.add_argument('--warmup', type=int, default=None)
parser.add_argument('--benchmark-iterations', type=int, default=20, metavar='N',
help='Run N iterations while benchmarking (ignored when training and validation)')
parser.add_argument('--benchmark-warmup', type=int, default=20, metavar='N',
help='Number of warmup iterations for benchmarking')
parser.add_argument('--backbone', type=str, default='resnet50',
choices=['resnet18', 'resnet34', 'resnet50', 'resnet101', 'resnet152'])
parser.add_argument('--backbone-path', type=str, default=None,
help='Path to chekcpointed backbone. It should match the'
' backbone model declared with the --backbone argument.'
' When it is not provided, pretrained model from torchvision'
' will be downloaded.')
parser.add_argument('--num-workers', type=int, default=8)
parser.add_argument("--amp", dest='amp', action="store_true",
help="Enable Automatic Mixed Precision (AMP).")
parser.add_argument("--no-amp", dest='amp', action="store_false",
help="Disable Automatic Mixed Precision (AMP).")
parser.set_defaults(amp=True)
parser.add_argument("--allow-tf32", dest='allow_tf32', action="store_true",
help="Allow TF32 computations on supported GPUs.")
parser.add_argument("--no-allow-tf32", dest='allow_tf32', action="store_false",
help="Disable TF32 computations.")
parser.set_defaults(allow_tf32=True)
parser.add_argument('--data-layout', default="channels_last", choices=['channels_first', 'channels_last'],
help="Model data layout. It's recommended to use channels_first with --no-amp")
parser.add_argument('--log-interval', type=int, default=20,
help='Logging interval.')
parser.add_argument('--json-summary', type=str, default=None,
help='If provided, the json summary will be written to'
'the specified file.')
# Distributed
parser.add_argument('--local_rank', default=os.getenv('LOCAL_RANK',0), type=int,
help='Used for multi-process training. Can either be manually set ' +
'or automatically set by using \'python -m multiproc\'.')
return parser
def train(train_loop_func, logger, args):
# Check that GPUs are actually available
use_cuda = not args.no_cuda
# Setup multi-GPU if necessary
args.distributed = False
if 'WORLD_SIZE' in os.environ:
args.distributed = int(os.environ['WORLD_SIZE']) > 1
if args.distributed:
torch.cuda.set_device(args.local_rank)
torch.distributed.init_process_group(backend='nccl', init_method='env://')
args.N_gpu = torch.distributed.get_world_size()
else:
args.N_gpu = 1
if args.seed is None:
args.seed = np.random.randint(1e4)
if args.distributed:
args.seed = (args.seed + torch.distributed.get_rank()) % 2**32
print("Using seed = {}".format(args.seed))
torch.manual_seed(args.seed)
np.random.seed(seed=args.seed)
# Setup data, defaults
dboxes = dboxes300_coco()
encoder = Encoder(dboxes)
cocoGt = get_coco_ground_truth(args)
train_loader = get_train_loader(args, args.seed - 2**31)
val_dataset = get_val_dataset(args)
val_dataloader = get_val_dataloader(val_dataset, args)
ssd300 = SSD300(backbone=ResNet(backbone=args.backbone,
backbone_path=args.backbone_path,
weights=args.torchvision_weights_version))
args.learning_rate = args.learning_rate * args.N_gpu * (args.batch_size / 32)
start_epoch = 0
iteration = 0
loss_func = Loss(dboxes)
if use_cuda:
ssd300.cuda()
loss_func.cuda()
optimizer = torch.optim.SGD(tencent_trick(ssd300), lr=args.learning_rate,
momentum=args.momentum, weight_decay=args.weight_decay)
scheduler = MultiStepLR(optimizer=optimizer, milestones=args.multistep, gamma=0.1)
if args.distributed:
ssd300 = DDP(ssd300)
if args.checkpoint is not None:
if os.path.isfile(args.checkpoint):
load_checkpoint(ssd300.module if args.distributed else ssd300, args.checkpoint)
checkpoint = torch.load(args.checkpoint,
map_location=lambda storage, loc: storage.cuda(torch.cuda.current_device()))
start_epoch = checkpoint['epoch']
iteration = checkpoint['iteration']
scheduler.load_state_dict(checkpoint['scheduler'])
optimizer.load_state_dict(checkpoint['optimizer'])
else:
print('Provided checkpoint is not path to a file')
return
inv_map = {v: k for k, v in val_dataset.label_map.items()}
total_time = 0
if args.mode == 'evaluation':
acc = evaluate(ssd300, val_dataloader, cocoGt, encoder, inv_map, args)
if args.local_rank == 0:
print('Model precision {} mAP'.format(acc))
return
scaler = torch.cuda.amp.GradScaler(enabled=args.amp)
mean, std = generate_mean_std(args)
for epoch in range(start_epoch, args.epochs):
start_epoch_time = time.time()
iteration = train_loop_func(ssd300, loss_func, scaler,
epoch, optimizer, train_loader, val_dataloader, encoder, iteration,
logger, args, mean, std)
if args.mode in ["training", "benchmark-training"]:
scheduler.step()
end_epoch_time = time.time() - start_epoch_time
total_time += end_epoch_time
if args.local_rank == 0:
logger.update_epoch_time(epoch, end_epoch_time)
if epoch in args.evaluation:
acc = evaluate(ssd300, val_dataloader, cocoGt, encoder, inv_map, args)
if args.local_rank == 0:
logger.update_epoch(epoch, acc)
if args.save and args.local_rank == 0:
print("saving model...")
obj = {'epoch': epoch + 1,
'iteration': iteration,
'optimizer': optimizer.state_dict(),
'scheduler': scheduler.state_dict(),
'label_map': val_dataset.label_info}
if args.distributed:
obj['model'] = ssd300.module.state_dict()
else:
obj['model'] = ssd300.state_dict()
os.makedirs(args.save, exist_ok=True)
save_path = os.path.join(args.save, f'epoch_{epoch}.pt')
torch.save(obj, save_path)
logger.log('model path', save_path)
train_loader.reset()
DLLogger.log((), { 'total time': total_time })
logger.log_summary()
def log_params(logger, args):
logger.log_params({
"dataset path": args.data,
"epochs": args.epochs,
"batch size": args.batch_size,
"eval batch size": args.eval_batch_size,
"no cuda": args.no_cuda,
"seed": args.seed,
"checkpoint path": args.checkpoint,
"mode": args.mode,
"eval on epochs": args.evaluation,
"lr decay epochs": args.multistep,
"learning rate": args.learning_rate,
"momentum": args.momentum,
"weight decay": args.weight_decay,
"lr warmup": args.warmup,
"backbone": args.backbone,
"backbone path": args.backbone_path,
"num workers": args.num_workers,
"AMP": args.amp,
"precision": 'amp' if args.amp else 'fp32',
})
if __name__ == "__main__":
parser = make_parser()
args = parser.parse_args()
args.local_rank = int(os.environ.get('LOCAL_RANK', args.local_rank))
if args.local_rank == 0:
os.makedirs('./models', exist_ok=True)
torch.backends.cuda.matmul.allow_tf32 = args.allow_tf32
torch.backends.cudnn.allow_tf32 = args.allow_tf32
torch.backends.cudnn.benchmark = True
# write json only on the main thread
args.json_summary = args.json_summary if args.local_rank == 0 else None
if args.mode == 'benchmark-training':
train_loop_func = benchmark_train_loop
logger = BenchLogger('Training benchmark', log_interval=args.log_interval,
json_output=args.json_summary)
args.epochs = 1
elif args.mode == 'benchmark-inference':
train_loop_func = benchmark_inference_loop
logger = BenchLogger('Inference benchmark', log_interval=args.log_interval,
json_output=args.json_summary)
args.epochs = 1
else:
train_loop_func = train_loop
logger = Logger('Training logger', log_interval=args.log_interval,
json_output=args.json_summary)
log_params(logger, args)
train(train_loop_func, logger, args)
|
PyTorch/LanguageModeling/Transformer-XL | Transformer-XL | README | # Transformer-XL For PyTorch
This repository provides a script and recipe to train the Transformer-XL model
to achieve state-of-the-art accuracy, and is tested and maintained by NVIDIA.
## Table Of Contents
<!-- TOC GFM -->
* [Model overview](#model-overview)
* [Model architecture](#model-architecture)
* [Default configuration](#default-configuration)
* [Feature support matrix](#feature-support-matrix)
* [Features](#features)
* [Mixed precision training](#mixed-precision-training)
* [Enabling mixed precision](#enabling-mixed-precision)
* [Enabling TF32](#enabling-tf32)
* [Setup](#setup)
* [Requirements](#requirements)
* [Quick Start Guide](#quick-start-guide)
* [Advanced](#advanced)
* [Scripts and sample code](#scripts-and-sample-code)
* [Parameters](#parameters)
* [Command-line options](#command-line-options)
* [Getting the data](#getting-the-data)
* [Dataset guidelines](#dataset-guidelines)
* [Multi-dataset](#multi-dataset)
* [Training process](#training-process)
* [Multi-node](#multi-node)
* [Inference process](#inference-process)
* [Performance](#performance)
* [Benchmarking](#benchmarking)
* [Training performance benchmark](#training-performance-benchmark)
* [Training performance benchmark for multi-node](#training-performance-benchmark-for-multi-node)
* [Inference performance benchmark](#inference-performance-benchmark)
* [Results](#results)
* [Training accuracy results](#training-accuracy-results)
* [Training accuracy: NVIDIA DGX A100 (8x A100 40GB)](#training-accuracy-nvidia-dgx-a100-8x-a100-40gb)
* [Base model](#base-model)
* [Large model](#large-model)
* [Training accuracy: NVIDIA DGX-1 (8x V100 16GB)](#training-accuracy-nvidia-dgx-1-8x-v100-16gb)
* [Base model](#base-model-1)
* [Large model](#large-model-1)
* [Training accuracy: NVIDIA DGX-2H (16x V100 32GB)](#training-accuracy-nvidia-dgx-2h-16x-v100-32gb)
* [Base model](#base-model-2)
* [Large model](#large-model-2)
* [Training accuracy: 8x NVIDIA DGX-2H (16x V100 32GB)](#training-accuracy-8x-nvidia-dgx-2h-16x-v100-32gb)
* [Large model](#large-model-3)
* [Training accuracy plots](#training-accuracy-plots)
* [Base model](#base-model-3)
* [Large model (single-node)](#large-model-single-node)
* [Large model (multi-node)](#large-model-multi-node)
* [Training stability test](#training-stability-test)
* [Base model](#base-model-4)
* [Large model (single-node)](#large-model-single-node-1)
* [Large model (multi-node)](#large-model-multi-node-1)
* [Training performance results](#training-performance-results)
* [Training performance: NVIDIA DGX A100 (8x A100 40GB)](#training-performance-nvidia-dgx-a100-8x-a100-40gb)
* [Base model](#base-model-5)
* [Large model](#large-model-4)
* [Training performance: NVIDIA DGX-1 (8x V100 16GB)](#training-performance-nvidia-dgx-1-8x-v100-16gb)
* [Base model](#base-model-6)
* [Large model](#large-model-5)
* [Training performance: NVIDIA DGX-2H (16x V100 32GB)](#training-performance-nvidia-dgx-2h-16x-v100-32gb)
* [Base model](#base-model-7)
* [Large model](#large-model-6)
* [Training performance: 8x NVIDIA DGX-2H (16x V100 32GB)](#training-performance-8x-nvidia-dgx-2h-16x-v100-32gb)
* [Large model](#large-model-7)
* [Inference performance results](#inference-performance-results)
* [Inference performance: NVIDIA DGX A100 (1x A100 40GB)](#inference-performance-nvidia-dgx-a100-1x-a100-40gb)
* [Base model](#base-model-8)
* [Large model](#large-model-8)
* [Inference performance: NVIDIA DGX-1 (1x V100 16GB)](#inference-performance-nvidia-dgx-1-1x-v100-16gb)
* [Base model](#base-model-9)
* [Large model](#large-model-9)
* [Inference performance: NVIDIA T4](#inference-performance-nvidia-t4)
* [Base model](#base-model-10)
* [Large model](#large-model-10)
* [Release notes](#release-notes)
* [Changelog](#changelog)
* [Known issues](#known-issues)
<!-- /TOC -->
## Model overview
This repository provides an implementation of the Transformer-XL model in
[PyTorch](https://pytorch.org) from the paper [Transformer-XL: Attentive
Language Models Beyond a Fixed-Length
Context](https://arxiv.org/abs/1901.02860). Transformer-XL is a
transformer-based language model with a segment-level recurrence and a novel
relative positional encoding. Enhancements introduced in Transformer-XL help
capture better long-term dependencies by attending to tokens from multiple
previous segments.
Our implementation is based on the
[codebase](https://github.com/kimiyoung/transformer-xl) published by the
authors of the Transformer-XL paper.
Our implementation uses a modified model architecture. Our
modifications were made to achieve better hardware utilization and to take
advantage of Tensor Cores. Similar modifications were also proposed in an
implementation available from
[github.com/cybertronai/transformer-xl](https://github.com/cybertronai/transformer-xl).
Refer to the [Model architecture](#model-architecture) section for more
details.
This model is trained with mixed precision using Tensor Cores on NVIDIA Volta
and the NVIDIA Ampere GPU architectures and evaluated on Volta, Turing and the
NVIDIA Ampere GPU architectures.
Therefore, researchers can get results up to 2.5x faster than training without
Tensor Cores, while experiencing the benefits of mixed precision training. This
model is tested against each NGC monthly container release to ensure consistent
accuracy and performance over time.
### Model architecture
The Transformer-XL "base" model for WikiText-103 dataset available in this
repository was modified to use the following hyperparameter values:
|**Hyperparameter**|**Description**|**Original setting for the base model**|**Our modification for the base model**|
|------------------|---------------|--------------------------------------:|--------------------------------------:|
| `d_model` | hidden size | 410 | 512 |
| `n_head` | number of attention heads | 10 | 8 |
| `d_head` | size of each attention head | 41 | 64 |
| `d_inner` | hidden size in fully-connected layers | 2100 | 2048 |
| `tgt_len` | number of tokens to predict during training | 150 | 192 |
| `mem_len` | number of tokens cached from previous iterations during training | 150 | 192 |
Changes described above were made to align certain hyperparameters with powers
of two, with this modification, the model is able to achieve better hardware
utilization, and therefore higher training throughput.
The Transformer-XL "large" model for WikiText-103 dataset available in this
repository uses the original hyperparameters from the [reference
implementation](https://github.com/kimiyoung/transformer-xl).
The following table lists the hyperparameters for the large and the base
Transformer-XL models for WikiText-103 dataset available in this repository.
| **Hyperparameter** | **Description** | **Base model** | **Large model** |
| ------------------ | ---------------------------------------------------------------- | -------------: | ---------------: |
| `n_layer` | number of layers | 16 | 18 |
| `d_model` | hidden size | 512 | 1024 |
| `n_head` | number of attention heads | 8 | 16 |
| `d_head` | size of each attention head | 64 | 64 |
| `d_inner` | inner hidden size in fully-connected layers | 2048 | 4096 |
| `dropout` | dropout | 0.1 | 0.2 |
| `dropatt` | dropout after softmax in the attention | 0.0 | 0.2 |
| `lr` | base learning rate | 0.01 | 0.01 |
| `eta_min` | minimum learning rate (for cosine decay) | 0.001 | 0.0001 |
| `max_step` | number of training steps | 40,000 | 100,000 |
| `warmup_step` | number of learning rate warmup steps | 1,000 | 16,000 |
| `batch_size` | training batch size | 256 | 128 |
| `tgt_len` | number of tokens to predict during training | 192 | 384 |
| `mem_len` | number of tokens cached from previous iterations during training | 192 | 384 |
The Transformer-XL model addresses the limitations of vanilla transformer-based
language models, which are only able to use relatively short context, bounded
by the segment length. The Transformer-XL introduces a recurrence mechanism,
which is able to use a cached hidden state from previous segments. During
training, the context consists of a concatenation of current segment's hidden
state and cached states from previous iterations. Gradients are backpropagated
only through the current segment, although the model is able to take advantage
of the extra information stored in the cache and therefore is able to model
long-term dependencies.
An illustration of the recurrence mechanism taken from the [Transformer-XL
paper](https://arxiv.org/abs/1901.02860) is shown below.

### Default configuration
The following features were implemented in this model:
* general
* single-node or multi-node, data-parallel multi-GPU training
* training and inference with mixed precision using Tensor Cores
* mixed precision training implemented using
[Apex AMP](https://nvidia.github.io/apex/amp.html), with `O2` optimization
level and with a dynamic loss scaling
* model
* 16-layer base Transformer-XL model with hidden size 512, 8 attention heads,
each head with hidden size 64
* 18-layer large Transformer-XL model with hidden size 1024, 16 attention
heads, each head with hidden size 64
* the model trained on
[WikiText-103](https://blog.einstein.ai/the-wikitext-long-term-dependency-language-modeling-dataset/)
dataset, using word-level vocabulary and
adaptive softmax
* embedding weights are tied with weights in the classifier
* training
* training with [LAMB](https://arxiv.org/abs/1904.00962) optimizer, the
implementation of the optimizer uses
[TorchScript](https://pytorch.org/docs/stable/jit.html), which enables
the fusion of elementwise operations and accelerates the training
* support for training with a gradient accumulation
* base model:
* linear learning rate warmup for 1,000 iterations, followed by the cosine
learning rate schedule, the initial learning rate is set to 0.01, and the final
learning rate is set to 0.001
* training for 40,000 steps, using a batch size of 256
* large model:
* single node:
* linear learning rate warmup for 16,000 iterations, followed by the cosine
learning rate schedule, the initial learning rate is set to 0.01, and the final
learning rate is set to 0.0001
* training for 100,000 steps, using a batch size of 128
* multi node:
* linear learning rate warmup for 16,000 iterations, followed by the cosine
learning rate schedule, the initial learning rate is set to 0.02, and the final
learning rate is set to 0.0002
* training for 25,000 steps, using a batch size of 512
* inference
* support for multi-gpu inference
* support for [TorchScript](https://pytorch.org/docs/stable/jit.html) and
pure Python inference
* each token is using the same size of the context from previous time steps.
* base model:
* target length is set to 64, length of memory is set to 640
* positional embeddings are clamped after 400 time steps
* large model:
* target length is set to 128, length of memory is set to 1,600
* positional embeddings are clamped after 1,000 time steps
### Feature support matrix
The following features are supported by this model:
| **Feature** | **Transformer-XL** |
|:------------|-------------------:|
|[Apex AMP](https://nvidia.github.io/apex/amp.html) | Yes |
|[PyTorch DistributedDataParallel](https://pytorch.org/docs/stable/nn.html#torch.nn.parallel.DistributedDataParallel) | Yes |
|[LAMB](https://arxiv.org/abs/1904.00962v3) | Yes |
| Inference with [TorchScript](https://pytorch.org/docs/stable/jit.html) | Yes |
| Multi-node training | Yes |
#### Features
[Apex AMP](https://nvidia.github.io/apex/amp.html) - a tool that enables Tensor
Core-accelerated training. Refer to the [Enabling mixed
precision](#enabling-mixed-precision) section for more details.
[PyTorch
DistributedDataParallel](https://pytorch.org/docs/stable/nn.html#torch.nn.parallel.DistributedDataParallel) - a module
wrapper that enables easy multiprocess distributed data-parallel
training.
[LAMB](https://arxiv.org/abs/1904.00962v3) - stands for Layerwise Adaptive
Moments Based optimizer, is a large batch optimization technique that helps
accelerate training of deep neural networks using large minibatches.
[TorchScript](https://pytorch.org/docs/stable/jit.html) - is a way to create
serializable and optimizable models from PyTorch code. Any TorchScript program
can be saved from a Python process and loaded in a process where there is no
Python dependency.
### Mixed precision training
Mixed precision is the combined use of different numerical precisions in a
computational method.
[Mixed precision](https://arxiv.org/abs/1710.03740) training offers significant
computational speedup by performing operations in half-precision format while
storing minimal information in single-precision to retain as much information
as possible in critical parts of the network. Since the introduction of [Tensor
Cores](https://developer.nvidia.com/tensor-cores) in Volta, and following with
both the Turing and Ampere architectures, significant training speedups are
experienced by switching to
mixed precision -- up to 3x overall speedup on the most arithmetically intense
model architectures. Using mixed precision training previously required two
steps:
1. Porting the model to use the FP16 data type where appropriate.
2. Manually adding loss scaling to preserve small gradient values.
The ability to train deep learning networks with lower precision was introduced
in the Pascal architecture and first supported in [CUDA
8](https://devblogs.nvidia.com/parallelforall/tag/fp16/) in the NVIDIA Deep
Learning SDK.
For information about:
* How to train using mixed precision, see the [Mixed Precision
Training](https://arxiv.org/abs/1710.03740) paper and [Training With Mixed
Precision](https://docs.nvidia.com/deeplearning/sdk/mixed-precision-training/index.html)
documentation.
* Techniques used for mixed precision training, see the [Mixed-Precision
Training of Deep Neural
Networks](https://devblogs.nvidia.com/mixed-precision-training-deep-neural-networks/)
blog.
* APEX tools for mixed precision training, see the [NVIDIA Apex: Tools for Easy
Mixed-Precision Training in
PyTorch](https://devblogs.nvidia.com/apex-pytorch-easy-mixed-precision-training/)
.
#### Enabling mixed precision
The `pytorch/train.py` training script launches mixed precision training
with Tensor Cores if the flag `--fp16` is set.
Mixed precision is enabled in PyTorch by using the Automatic Mixed Precision
(AMP), library from [APEX](https://github.com/NVIDIA/apex) that casts variables
to half-precision upon retrieval, while storing variables in single-precision
format. Furthermore, to preserve small gradient magnitudes in backpropagation,
a [loss
scaling](https://docs.nvidia.com/deeplearning/sdk/mixed-precision-training/index.html#lossscaling)
step must be included when applying gradients. In PyTorch, loss scaling can be
easily applied by using `scale_loss()` method provided by AMP. The scaling
value to be used can be
[dynamic](https://nvidia.github.io/apex/amp.html#apex.amp.initialize) or fixed.
For an in-depth walk through on AMP, check out sample usage
[here](https://nvidia.github.io/apex/amp.html#).
[APEX](https://github.com/NVIDIA/apex) is a PyTorch extension that contains
utility libraries, such as AMP, which require minimal network code changes to
leverage Tensor Cores performance.
The following steps were needed to enable mixed precision training in
Transformer-XL:
1. Import AMP from APEX:
```
from apex import amp
```
2. Initialize AMP and wrap the model and the optimizer before starting the
training:
```
model, optimizer = amp.initialize(
model,
optimizer,
opt_level='O2',
)
```
3. Apply `scale_loss` context manager:
```
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
```
4. Apply gradient clipping on single precision master weights:
```
torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), args.clip)
```
#### Enabling TF32
TensorFloat-32 (TF32) is the new math mode in [NVIDIA
A100](https://www.nvidia.com/en-us/data-center/a100/) GPUs for handling the
matrix math also called tensor operations. TF32 running on Tensor Cores in A100
GPUs can provide up to 10x speedups compared to single-precision floating-point
math (FP32) on Volta GPUs.
TF32 Tensor Cores can speed up networks using FP32, typically with no loss of
accuracy. It is more robust than FP16 for models which require high dynamic
range for weights or activations.
For more information, refer to the [TensorFloat-32 in the A100 GPU Accelerates
AI Training, HPC up to
20x](https://blogs.nvidia.com/blog/2020/05/14/tensorfloat-32-precision-format/)
blog post.
TF32 is supported in the NVIDIA Ampere GPU architecture and is enabled by
default.
## Setup
The following section lists the requirements that you need to meet in order to
start training the Transformer-XL model.
### Requirements
This repository contains `Dockerfile` which extends the PyTorch NGC container
and encapsulates some dependencies. Aside from these dependencies, ensure you
have the following components:
* [NVIDIA Docker](https://github.com/NVIDIA/nvidia-docker)
* [PyTorch 20.06-py3 NGC container](https://ngc.nvidia.com/registry/nvidia-pytorch)
* GPU architecture:
* [NVIDIA Volta](https://www.nvidia.com/en-us/data-center/volta-gpu-architecture/)
* [NVIDIA Turing](https://www.nvidia.com/en-us/geforce/turing/)
* [NVIDIA Ampere architecture](https://www.nvidia.com/en-us/data-center/nvidia-ampere-gpu-architecture/)
For more information about how to get started with NGC containers, see the
following sections from the NVIDIA GPU Cloud Documentation and the Deep
Learning DGX Documentation:
* [Getting Started Using NVIDIA GPU Cloud](https://docs.nvidia.com/ngc/ngc-getting-started-guide/index.html),
* [Accessing And Pulling From The NGC container registry](https://docs.nvidia.com/deeplearning/dgx/user-guide/index.html#accessing_registry),
* [Running PyTorch](https://docs.nvidia.com/deeplearning/dgx/pytorch-release-notes/running.html#running).
For those unable to use the Pytorch NGC container, to set up the required
environment or create your own container, see the versioned [NVIDIA Container
Support
Matrix](https://docs.nvidia.com/deeplearning/frameworks/support-matrix/index.html).
For multi-node, the sample provided in this repository requires
[Enroot](https://github.com/NVIDIA/enroot) and
[Pyxis](https://github.com/NVIDIA/pyxis) set up on a
[SLURM](https://slurm.schedmd.com) cluster.
## Quick Start Guide
To train your model using mixed or TF32 precision with Tensor Cores or using
FP32, perform the following steps using the default parameters of the
Transformer-XL base model on the
[WikiText-103](https://blog.einstein.ai/the-wikitext-long-term-dependency-language-modeling-dataset/)
dataset.
For the specifics concerning training
and inference, see the [Advanced](#advanced) section.
1. Clone the repository.
```
git clone https://github.com/NVIDIA/DeepLearningExamples
cd DeepLearningExamples/PyTorch/LanguageModeling/Transformer-XL
```
2. Download and preprocess the dataset.
```
bash getdata.sh
```
3. Build the Transformer-XL PyTorch NGC container.
```
bash pytorch/scripts/docker/build.sh
```
4. Start an interactive session in the NGC container to run training/inference.
```
bash pytorch/scripts/docker/interactive.sh
```
5. Start training.
This repository contains a number of predefined configurations to run the
training on NVIDIA DGX-1, NVIDIA DGX-2H or NVIDIA DGX A100 nodes.
To start the training on NVIDIA DGX-1 or NVIDIA DGX-2H, run:
```
bash run_wt103_{base,large}.sh train <#GPUs> --config {dgx1,dgx2}_<#GPUs>gpu_{fp16,fp32}
```
To start the training on NVIDIA DGX A100, run:
```
bash run_wt103_{base,large}.sh train <#GPUs> --config dgxa100_<#GPUs>gpu_{fp16,tf32}
```
* use the `run_wt103_base.sh` script to train the base model, and use the
`run_wt103_large.sh` script to train the large model
* the training is executed on `<#GPUs>` GPUs, supported values for `<#GPUs>`
for NVIDIA DGX-1 and NVIDIA DGX A100 are: 1, 2, 4, 8 and for NVIDIA DGX-2H:
1, 2, 4, 8, 16
* use configs with the `dgx1` prefix to run on a NVIDIA DGX-1, configs with the
`dgx2` prefix to run on a NVIDIA DGX-2H and configs with the `dgxa100` prefix
to run on a NVIDIA DGX A100
* configs with the `fp16` suffix are launching mixed precision training,
configs with the `fp32` suffix are launching FP32 training, configs with the
`tf32` suffix are launching TF32 training
Examples:
To launch TF32 training of the base Transformer-XL model on a NVIDIA DGX A100
using 8 GPUs, run:
```
bash run_wt103_base.sh train 8 --config dgxa100_8gpu_tf32
```
To launch FP32 training of the base Transformer-XL model on a NVIDIA DGX-1
using 8 GPUs, run:
```
bash run_wt103_base.sh train 8 --config dgx1_8gpu_fp32
```
To launch mixed precision training of the large Transformer-XL model on a
NVIDIA DGX-2H using 16 GPUs, run:
```
bash run_wt103_large.sh train 16 --config dgx2_16gpu_fp16
```
To launch mixed precision training of the large Transformer-XL model on a
NVIDIA DGX A100 using 8 GPUs, run:
```
bash run_wt103_large.sh train 8 --config dgxa100_8gpu_fp16
```
To run on multiple nodes, see the [Multi-node](#multi-node) section.
For more information on the available options, and for an explanation of what
happens at the end of training, refer to the [Training
process](#training-process) section.
6. Start evaluation.
To start inference on the test set using `<#GPUs>` GPUs, run:
```
bash run_wt103_{base,large}.sh eval <#GPUs> [--fp16] [--type {pytorch, torchscript}]
```
Select `run_wt103_base.sh` for the base Transformer-XL model and
`run_wt103_large.sh` for the large Transformer-XL model.
The `--fp16` flag is optional, however, if it's specified, then the script
launches mixed precision inference with Tensor Cores. If the flag is not
present, then the script launches FP32 inference on NVIDIA Volta and NVIDIA
Turing GPUs and TF32 inference on NVIDIA Ampere GPUs.
By default, the script is loading the checkpoint from
`LM-TFM/checkpoint_best.pt`, which contains the model corresponding to the
lowest value of the validation loss from the previous training run. Path to the
checkpoint can be customized by setting the `--model` flag.
Inference can use pure Python execution or TorchScript from using the `--type`
flag.
Supported values for `<#GPUs>` are: 1, 2, 4, 8 for NVIDIA DGX-1 and NVIDIA DGX
A100 and 1, 2, 4, 8, 16 for NVIDIA DGX-2H.
Additionally, one can pass the input text directly from the command-line using
the `--manual` flag. This mode of operation supports only 1 GPU and batch size
of 1. The script outputs average loss and perplexity for the provided input
text.
Examples:
```
bash run_wt103_base.sh eval 1 \
--model LM-TFM/checkpoint_best.pt \
--fp16 \
--manual "recognize speech"
===============================================================================
| test loss 6.20 | test ppl 494.291
===============================================================================
```
```
bash run_wt103_base.sh eval 1 \
--model LM-TFM/checkpoint_best.pt \
--fp16 \
--manual "wreck a nice beach"
===============================================================================
| test loss 8.04 | test ppl 3099.706
===============================================================================
```
For more information on the available options, refer to the [Inference
process](#inference-process) section.
## Advanced
The following sections provide greater details of the dataset, running training
and inference, and the training results.
### Scripts and sample code
In the root directory, the most important files are:
* `Dockerfile`: container with the basic set of dependencies to run
Transformer-XL
* `requirements.txt`: set of extra requirements for running Transformer-XL
* `getdata.sh`: script for downloading datasets
In the `pytorch` directory, the most important files are:
* `data_utils.py`: data loading utilities
* `eval.py`: serves as the entry point to launch the evaluation and inference
* `lamb.py`: implementation of [LAMB](https://arxiv.org/abs/1904.00962)
optimizer
* `mem_transformer.py`: implementation of the Transformer-XL model
* `train.py`: serves as the entry point to launch the training
* `run.sub`: Slurm batch script for launching multi-node training
The `pytorch/utils` directory contains the following additional modules:
* `adaptive_softmax.py`: implementation of adaptive softmax
* `data_parallel.py`: implementation of `BalancedDataParallel` class
* `distributed.py`: utility functions for running distributed training
* `exp_utils.py`: utility functions for running training and benchmarking
* `log_uniform_sampler.py`: implementation of log-uniform sampler
* `proj_adaptive_softmax.py`: implementation of projected adaptive softmax
* `vocabulary.py`: implementation of word-level vocabulary and BPE-based
vocabulary
The `pytorch/inference` directory contains modules optimized for running
inference with TorchScript:
* `mem_transformer_jit.py`: implementation of TorchScript-compatible
Transformer-XL model
* `proj_adaptive_softmax_jit.py`: implementation of TorchScript-compatible
projected adaptive softmax
### Parameters
**Training**
The complete list of available parameters for the `pytorch/train.py` training
script contains:
```
general setup:
--work_dir WORK_DIR Directory for the results
--append_dataset Automatically append dataset name to work_dir
--append_time Automatically append current time to work_dir
--cuda Run training on a GPU using CUDA
--fp16 Run training in fp16/mixed precision
--restart RESTART Restart training from the saved checkpoint
--debug Run in debug mode (do not create exp dir)
--log_all_ranks Enable logging from all distributed ranks
--dllog_file DLLOG_FILE
Name of the DLLogger output file
--txtlog_file TXTLOG_FILE
Name of the txt log file
--save_all Save all checkpoints
--no_env Do not print info on execution env
--no_eval Disable model evaluation
--log_interval LOG_INTERVAL
Report interval
--target_throughput TARGET_THROUGHPUT
Target training throughput (for benchmarking)
--target_perplexity TARGET_PERPLEXITY
Target validation perplexity (for benchmarking)
--amp_mode {O0,O1,O2,O3}
Optimization level for apex amp
dataset setup:
--data DATA Location of the data corpus
--dataset {wt103,lm1b,enwik8,text8}
Dataset name
--vocab {word,bpe} Type of vocabulary
model setup:
--n_layer N_LAYER Number of total layers
--n_head N_HEAD Number of heads
--d_head D_HEAD Head dimension
--d_embed D_EMBED Embedding dimension
--d_model D_MODEL Model dimension
--d_inner D_INNER Inner dimension in feedforward layer
--dropout DROPOUT Global dropout rate
--dropatt DROPATT Attention probability dropout rate
--pre_lnorm Apply LayerNorm to the input instead of the output
--attn_type ATTN_TYPE
Attention type. 0 for ours, 1 for Shaw et al,2 for
Vaswani et al, 3 for Al Rfou et al.
--not_tied Do not tie the word embedding and softmax weights
--clamp_len CLAMP_LEN
Use the same pos embeddings after clamp_len
--adaptive Use adaptive softmax
--div_val DIV_VAL Dividend value for adaptive input and softmax
--sample_softmax SAMPLE_SOFTMAX
Number of samples in sampled softmax
--init INIT Parameter initializer to use
--emb_init EMB_INIT Parameter initializer to use
--init_range INIT_RANGE
Parameters initialized by U(-init_range, init_range)
--emb_init_range EMB_INIT_RANGE
Parameters initialized by U(-init_range, init_range)
--init_std INIT_STD Parameters initialized by N(0, init_std)
--proj_init_std PROJ_INIT_STD
Parameters initialized by N(0, init_std)
optimizer setup:
--optim {adam,sgd,adagrad,lamb,jitlamb}
Optimizer to use
--lr LR Initial learning rate
--mom MOM Momentum for sgd
--scheduler {cosine,inv_sqrt,dev_perf,constant}
LR scheduler to use
--max_step_scheduler MAX_STEP_SCHEDULER
Max number of training steps for LR scheduler
--warmup_step WARMUP_STEP
Number of iterations for LR warmup
--decay_rate DECAY_RATE
Decay factor when ReduceLROnPlateau is used
--lr_min LR_MIN Minimum learning rate during annealing
--clip CLIP Gradient clipping
--weight_decay WEIGHT_DECAY
Weight decay for adam|lamb
--clip_nonemb Only clip the gradient of non-embedding params
--patience PATIENCE Patience
--eta_min ETA_MIN Min learning rate for cosine scheduler
training setup:
--max_step MAX_STEP Max number of training steps
--batch_size BATCH_SIZE
Global batch size
--local_batch_size LOCAL_BATCH_SIZE
Local (per-device) batch size, this setting overrides
global --batch_size and sets batch_size to
local_batch_size * world_size
--batch_chunk BATCH_CHUNK
Split batch into chunks and train with gradient
accumulation
--roll Enable random shifts within each data stream
--tgt_len TGT_LEN Number of tokens to predict
--ext_len EXT_LEN Length of the extended context
--mem_len MEM_LEN Length of the retained previous heads
--seed SEED Random seed
--multi_gpu {ddp,dp} Use multiple GPU
--gpu0_bsz GPU0_BSZ Batch size on gpu 0 (for "dp" backend)
--same_length Use the same attn length for all tokens
--varlen Use variable length
validation setup:
--eval_tgt_len EVAL_TGT_LEN
Number of tokens to predict for evaluation
--eval_batch_size EVAL_BATCH_SIZE
Eval batch size
--eval_max_steps EVAL_MAX_STEPS
Max eval steps
--eval_interval EVAL_INTERVAL
Evaluation interval
```
**Inference**
The complete list of available parameters for the `eval.py` inference
script contains:
```
--work_dir WORK_DIR experiment directory
--debug run in debug mode (do not create exp dir)
--data DATA location of the data corpus
--manual MANUAL [MANUAL ...]
run model on raw input data
--dataset {wt103,lm1b,enwik8,text8}
dataset name
--split {all,valid,test}
which split to evaluate
--type {pytorch,torchscript}
type of runtime to use
--batch_size BATCH_SIZE
batch size
--tgt_len TGT_LEN number of tokens to predict
--ext_len EXT_LEN length of the extended context
--mem_len MEM_LEN length of the retained previous heads
--seed SEED Random seed
--clamp_len CLAMP_LEN
max positional embedding index
--cuda Run evaluation on a GPU using CUDA
--model MODEL path to the checkpoint
--manual_config MANUAL_CONFIG
Manually specify config for the model
--manual_vocab {word,bpe}
Manually specify type of vocabulary
--fp16 Run training in fp16/mixed precision
--log_all_ranks Enable logging for all distributed ranks
--dllog_file DLLOG_FILE
Name of the DLLogger output file
--same_length set same length attention with masking
--no_env Do not print info on execution env
--log_interval LOG_INTERVAL
Report interval
--target_perplexity TARGET_PERPLEXITY
target perplexity
--target_throughput TARGET_THROUGHPUT
target throughput
--save_data save latency and throughput data to a file
--repeat REPEAT loop over the dataset REPEAT times
--max_size MAX_SIZE run inference on up to MAX_SIZE batches
--percentiles PERCENTILES [PERCENTILES ...]
percentiles for latency confidence intervals
--save_torchscript SAVE_TORCHSCRIPT
save torchscript model to a file
--load_torchscript LOAD_TORCHSCRIPT
load torchscript model from a file
```
### Command-line options
To see the full list of available options and their descriptions, use the `-h`
or `--help` command-line option. For example, for training:
```
python3 train.py --help
usage: train.py [-h] [--work_dir WORK_DIR] [--append_dataset] [--append_time]
[--cuda] [--fp16] [--restart RESTART] [--debug]
[--log_all_ranks] [--dllog_file DLLOG_FILE]
[--txtlog_file TXTLOG_FILE] [--save_all] [--no_env]
[--no_eval] [--log_interval LOG_INTERVAL]
[--target_throughput TARGET_THROUGHPUT]
[--target_perplexity TARGET_PERPLEXITY]
[--amp_mode {O0,O1,O2,O3}] [--data DATA]
[--dataset {wt103,lm1b,enwik8,text8}] [--vocab {word,bpe}]
[--n_layer N_LAYER] [--n_head N_HEAD] [--d_head D_HEAD]
[--d_embed D_EMBED] [--d_model D_MODEL] [--d_inner D_INNER]
[--dropout DROPOUT] [--dropatt DROPATT] [--pre_lnorm]
[--attn_type ATTN_TYPE] [--not_tied] [--clamp_len CLAMP_LEN]
[--adaptive] [--div_val DIV_VAL]
[--sample_softmax SAMPLE_SOFTMAX] [--init INIT]
[--emb_init EMB_INIT] [--init_range INIT_RANGE]
[--emb_init_range EMB_INIT_RANGE] [--init_std INIT_STD]
[--proj_init_std PROJ_INIT_STD]
[--optim {adam,sgd,adagrad,lamb,jitlamb}] [--lr LR]
[--mom MOM] [--scheduler {cosine,inv_sqrt,dev_perf,constant}]
[--max_step_scheduler MAX_STEP_SCHEDULER]
[--warmup_step WARMUP_STEP] [--decay_rate DECAY_RATE]
[--lr_min LR_MIN] [--clip CLIP] [--weight_decay WEIGHT_DECAY]
[--clip_nonemb] [--patience PATIENCE] [--eta_min ETA_MIN]
[--max_step MAX_STEP] [--batch_size BATCH_SIZE]
[--local_batch_size LOCAL_BATCH_SIZE]
[--batch_chunk BATCH_CHUNK] [--roll] [--tgt_len TGT_LEN]
[--ext_len EXT_LEN] [--mem_len MEM_LEN] [--seed SEED]
[--multi_gpu {ddp,dp}] [--gpu0_bsz GPU0_BSZ] [--same_length]
[--varlen] [--eval_tgt_len EVAL_TGT_LEN]
[--eval_batch_size EVAL_BATCH_SIZE]
[--eval_max_steps EVAL_MAX_STEPS]
[--eval_interval EVAL_INTERVAL] [--local_rank LOCAL_RANK]
```
For example, for inference:
```
python3 eval.py --help
usage: eval.py [-h] [--work_dir WORK_DIR] [--debug] [--data DATA]
[--manual MANUAL [MANUAL ...]]
[--dataset {wt103,lm1b,enwik8,text8}]
[--split {all,valid,test}] [--type {pytorch,torchscript}]
[--batch_size BATCH_SIZE] [--tgt_len TGT_LEN]
[--ext_len EXT_LEN] [--mem_len MEM_LEN] [--seed SEED]
[--clamp_len CLAMP_LEN] [--cuda] [--model MODEL]
[--manual_config MANUAL_CONFIG] [--manual_vocab {word,bpe}]
[--fp16] [--log_all_ranks] [--dllog_file DLLOG_FILE]
[--same_length] [--no_env] [--log_interval LOG_INTERVAL]
[--target_perplexity TARGET_PERPLEXITY]
[--target_throughput TARGET_THROUGHPUT] [--save_data]
[--repeat REPEAT] [--max_size MAX_SIZE]
[--percentiles PERCENTILES [PERCENTILES ...]]
[--save_torchscript SAVE_TORCHSCRIPT]
[--load_torchscript LOAD_TORCHSCRIPT] [--local_rank LOCAL_RANK]
```
### Getting the data
The Transformer-XL model was trained on the
[WikiText-103](https://blog.einstein.ai/the-wikitext-long-term-dependency-language-modeling-dataset/)
dataset. The WikiText-103 dataset is a collection of over 100 million tokens
extracted from the set of verified
[Good](https://en.wikipedia.org/wiki/Wikipedia:Good_articles) and
[Featured](https://en.wikipedia.org/wiki/Wikipedia:Featured_articles) articles
on Wikipedia.
This repository contains the `getdata.sh` download script which
automatically downloads and extracts the training, validation and test
datasets. By default, data is downloaded to the `data` directory.
In order to test with other datasets, the script needs to be customized
accordingly.
#### Dataset guidelines
The WikiText-103 dataset was already pre-tokenized with word-level tokens. The
dataset features a large vocabulary of 267,735 tokens and retains the original
case, punctuation and numbers.
The `getdata.sh` script downloads the data, extracts the archive and renames
the training, validation, and test set to `train.txt`, `valid.txt`, `test.txt`
respectively.
#### Multi-dataset
Using other datasets requires changes in the following files:
* `pytorch/train.py`:
* the name of the new dataset should be added to the `dataset` argument in
the `parse_args()` function
* desired values of cutoffs for adaptive softmax should be added in the
`main()` function, after the section which builds train/valid/test data
iterators
* `pytorch/data_utils.py`:
* the support for the new dataset needs to be added to the `Corpus` class:
names of files containing training, validation and test data, options for
the tokenizer, and dataset iterator
The current codebase supports training with word-level vocabulary
(automatically generated based on the provided dataset) and with BPE vocabulary
(using pre-built vocabulary from pretrained GPT2 model imported from
[github.com/huggingface/transformers](https://github.com/huggingface/transformers).
Additionally, using other datasets may require changes in some hyperparameters
(for example, batch size, learning rate, number of training steps,
and the configuration of learning rate scheduler).
### Training process
The default training configuration can be launched by running the
`run_wt103_base.sh` or the `run_wt103_large.sh` script with the first argument
set to `train`. By default, the training results are saved to the `LM-TFM`
directory; this can be customized by setting the `--work_dir` parameter.
The training script launches a single-node data-parallel training with a fixed
global batch size of 256, optionally with gradient accumulation to allow
training on configurations with less than 8 GPUs. Logs from the training are
automatically saved to the `LM-TFM/train_log.log` file.
**Command-line**
You can launch training of the Transformer-XL base/large model on the
WikiText-103 dataset with the word-based vocabulary and adaptive softmax using
`<#GPUs>` GPUs. For example:
```
bash run_wt103_base.sh train <#GPUs> [--fp16] [--batch_chunk CHUNK]
```
and
```
bash run_wt103_large.sh train <#GPUs> [--fp16] [--batch_chunk CHUNK]
```
The `--fp16` flag is optional, however, if it's specified, then the script
launches mixed precision training with Tensor Cores; if the flag is not
present, then the script launches FP32 training on NVIDIA Volta GPUs and TF32
training on NVIDIA Ampere GPUs.
The `--batch_chunk CHUNK` parameter controls gradient accumulation. With
gradient accumulation, the batch size is split into `CHUNK` chunks of equal
size, the training script executes the forward and backward pass using each
chunk and then executes the optimizer using accumulated gradients.
**Examples**
You can launch mixed precision training of the Transformer-XL base model on the
WikiText-103 dataset using 16 GPUs. For example:
```
bash run_wt103_base.sh train 16 --fp16 --batch_chunk 1
```
The batch size per GPU is equal to the default global batch size of 256 divided
by the product of the number of GPUs times the number of chunks, in this case
batch size per GPU is equal to `256 / (16 * 1) = 16`.
You can launch FP32 training using 8 GPUs; the batch size per GPU is equal to
16 (`--batch_chunk` was set to `2` because a local batch size of 32 runs out of
memory on a NVIDIA DGX-1 with Tesla V100 16GB in FP32 training). For example:
```
bash run_wt103_base.sh train 8 --batch_chunk 2
```
A progress summary of the training progress is printed after every 10 training
iterations; this can be customized by setting the `--log_interval` parameter.
The summary is printed in the following format:
```
| epoch 18 step 36000 | batches 283 / 2101 | lr 1.220e-03 | ms/batch 185.1 | tok/s 265585 | loss 3.12 | ppl 22.71
```
which contains information about a current training epoch, current training
step, number of batches processed within the current epoch, current learning
rate, execution time in milliseconds per batch, throughput in tokens per
second, current training loss and training perplexity.
The script saves two checkpoints: `checkpoint_best.pt` which contains the model
corresponding to the lowest value of the validation loss and
`checkpoint_last.pt` which contains the model corresponding to the last
execution of the validation step. By default, the validation is executed every
5000 training steps, this can be customized by setting the `--eval_interval`
parameter. The summary of results on the validation dataset is printed in the
following format:
```
| Eval 7 at step 35000 | time: 1.37s | valid loss 3.14 | valid ppl 23.132
```
which contains information about the current epoch, current training step, time
needed to execute the validation, current validation loss, and validation
perplexity.
At the end of the training, the training script automatically runs evaluation
on the test dataset. This automatic evaluation is executed with values of
`mem_len` and `tgt_len` hyperparameters inherited from the training setup.
Evaluation (inference) benefits from longer attention sequences, therefore to
reproduce perplexity values reported in the [Transformer-XL
paper](https://arxiv.org/abs/1901.02860), it's necessary to run the final
evaluation with a dedicated inference script. Refer to the [Inference
process](#inference-process) section for more details.
#### Multi-node
Multi-node runs can be launched on a pyxis/enroot Slurm cluster (see
[Requirements](#requirements)). To launch a multi-node run, issue the
`run.sub` script with the following command for an 8-node DGX-2H training, for
example:
```
sbatch run.sub all
```
This repository contains a number of predefined configurations to run the
multi-node training on DGX-2H nodes. By default, `run.sub` launches 8-node
training.
To launch multi-node training on `<NODES>` DGX-2H nodes, run:
```
CONFIG=<NODES>dgx2_16gpu_{fp16,fp32} sbatch -N <NODES> run.sub all
```
* supported values for `<NODES>` parameter are: 1, 2, 4, 8
* configs with `fp16` suffix launch mixed precision training, configs with
`fp32` suffix launch FP32 training
Examples:
To launch 4-node mixed-precision training, run:
```
CONFIG=4dgx2_16gpu_fp16 sbatch -N 4 run.sub all
```
To launch 2-node FP32 training, run:
```
CONFIG=2dgx2_16gpu_fp32 sbatch -N 2 run.sub all
```
Note that the `run.sub` script is a starting point that has to be adapted
depending on the environment. In particular, variables such as `WORK_DIR`
handle the location of the workspace in the file system. The variable `CONT`
should point to the location of the Transformer-XL Docker container. It's
assumed that the Docker container built with the `scripts/docker/build.sh`
script was pushed to a Docker registry accessible from all compute nodes.
Refer to the contents of the file to see the full list of variables to adjust
for your system.
### Inference process
Inference can be run by launching the `run_wt103_base.sh` or the
`run_wt103_large.sh` script with the first argument set to `eval`. Running
inference requires a pre-trained model checkpoint.
The script supports single-node multi-GPU inference, each batch is split
equally among all GPUs running the inference and the loss is averaged over the
global batch. Logs from the inference are automatically saved to the
`LM-TFM/eval_log.log` file.
**Command-line**
You can launch inference of the Transformer-XL base/large model on the
WikiText-103 dataset with the word-based vocabulary and adaptive softmax using
`<#GPUs>` GPUs. For example:
```
bash run_wt103_base.sh eval <#GPUs> --model <PATH TO THE CHECKPOINT> [--fp16] [--type {pytorch, torchscript}]
```
and
```
bash run_wt103_large.sh eval <#GPUs> --model <PATH TO THE CHECKPOINT> [--fp16] [--type {pytorch, torchscript}]
```
The `--fp16` flag is optional, however, if it's specified, then the script
launches inference with Tensor Cores; if the flag is not present, then the
script launches FP32 inference on NVIDIA Volta and NVIDIA Turing GPUs and TF32
inference on NVIDIA Ampere GPUs.
The `--type` flag selects between pure Python PyTorch execution and TorchScript
execution.
Supported values for `<#GPUs>` are: 1, 2, 4, 8 for NVIDIA DGX-1 and NVIDIA DGX
A100 and 1, 2, 4, 8, 16 for NVIDIA DGX-2H.
**Examples**
To launch TorchScript mixed precision inference on 8 GPUs using a checkpoint
loaded from `LM-TFM/checkpoint_best.pt`, run:
```
bash run_wt103_base.sh eval 8 --model LM-TFM/checkpoint_best.pt --fp16 --type torchscript
```
To launch pure Python TF32/FP32 inference on a single GPU using a checkpoint loaded
from `LM-TFM/checkpoint_best.pt`, run:
```
bash run_wt103_base.sh eval 1 --model LM-TFM/checkpoint_best.pt --type pytorch
```
After the execution, the script prints a summary in the following format:
```
Evaluating with math fp16 type torchscript bsz 16 tgt_len 64 ext_len 0 mem_len 640 clamp_len 400
Time : 5.29s, 22.05ms/segment
====================================================================================================
| test loss 3.15 | test ppl 23.304
====================================================================================================
```
which contains information about runtime parameters, execution time, loss and
perplexity on the test dataset.
## Performance
The performance measurements in this document were conducted at the time of
publication and may not reflect the performance achieved from NVIDIA’s latest
software release. For the most up-to-date performance measurements, go to
[NVIDIA Data Center Deep Learning Product
Performance](https://developer.nvidia.com/deep-learning-performance-training-inference).
### Benchmarking
The following section shows how to run benchmarks measuring the model
performance in training and inference modes.
#### Training performance benchmark
To benchmark the training performance for a specific local (per-gpu) batch size
`<LBS>`, with a specific number of GPUs `<#GPUs>` for a specific number of
training iterations `<ITER>`, run:
```
bash run_wt103_{base,large}.sh train <#GPUs> --config trainbench --local_batch_size <LBS> --max_step <ITER> [--fp16]
```
* use the `run_wt103_base.sh` script to run the benchmark for the base model,
and use the `run_wt103_large.sh` script to run the benchmark for the large
model
* it's recommended to launch at least 500 training steps to get a reliable
estimate of training performace.
* the `--fp16` flag is optional, however, if it's specified, then the script
launches mixed precision training with Tensor Cores. If the flag is not
present, then the script launches FP32 training on NVIDIA Volta GPUs and TF32
training on NVIDIA Ampere GPUs.
For more information about the available options, refer to the [Training
process](#training-process) section.
The training script prints information in the following format:
```
(...)
| epoch 1 step 499 | batches 499 / 16802 | lr 4.990e-03 | ms/batch 219.9 | tok/s 27947 | loss 6.43 | ppl 620.80
| epoch 1 step 500 | batches 500 / 16802 | lr 5.000e-03 | ms/batch 221.4 | tok/s 27747 | loss 6.42 | ppl 611.70
-------------------------------------------------------------------------------
(...)
Training time: 1.81 minutes
Training throughput: 28508.91 tok/s
```
The last two lines contain information on the total training time and on the
average training throughput measured in tokens per second.
##### Training performance benchmark for multi-node
To benchmark the multi-node training performance of the large model on a
specific number of DGX-2H nodes `<NODES>` and a specific local batch size
`<LBS>`, run:
For mixed precision:
```
FP16=1 LOCAL_BATCH_SIZE=<LBS> CONFIG=trainbench_multinode sbatch -N <NODES> run.sub train
```
For FP32:
```
LOCAL_BATCH_SIZE=<LBS> CONFIG=trainbench_multinode sbatch -N <NODES> run.sub train
```
#### Inference performance benchmark
The inference performance and accuracy benchmarks require a checkpoint from a
trained model.
To benchmark the inference performance on a specific global batch size `<BS>`
with a specific number of GPUs `<#GPUs>`, run:
For the base model:
```
bash run_wt103_base.sh eval <#GPUs> --model <CHECKPOINT> --batch_size <BS> --save_data [--fp16] [--type {pytorch, torchscript}]
```
For the large model:
```
bash run_wt103_large.sh eval <#GPUs> --model <CHECKPOINT> --batch_size <BS> --save_data [--fp16] [--type {pytorch, torchscript}]
```
The inference script prints information in the following format:
```
Evaluating with math fp16 type torchscript bsz 16 tgt_len 64 ext_len 0 mem_len 640 clamp_len 400
Time : 5.25s, 21.88ms/segment
====================================================================================================
| test loss 3.15 | test ppl 23.304
====================================================================================================
Throughput Avg: 46316.64 tok/s
Latency Avg: 22.09 ms
Latency 90%: 22.22 ms
Latency 95%: 22.25 ms
Latency 99%: 22.37 ms
====================================================================================================
```
The output contains information on the achieved test loss and test perplexity,
average inference throughput (measured in tokens per second), average inference
latency and latency at 90%, 95% and 99% confidence intervals (measured in
milliseconds).
The `scripts/inference_benchmark.sh` benchmarking script is provided for
convenience, it automatically launches TF32/FP32 and FP16 inference for various
batch sizes.
### Results
The following sections provide details on how we achieved our performance and
accuracy in training and inference.
#### Training accuracy results
##### Training accuracy: NVIDIA DGX A100 (8x A100 40GB)
###### Base model
Our results were obtained by running the `pytorch/run_wt103_base.sh`
training script in the pytorch-20.06-py3 NGC container on NVIDIA DGX A100
with 8x A100 40GB GPUs.
|**GPUs**|**Batch Size / GPU**|**Accuracy - TF32 (perplexity)**|**Accuracy - Mixed precision (perplexity)**|**Time to Train - TF32 (minutes)**|**Time to Train - Mixed precision (minutes)**|**Time to Train Speedup (TF32 to Mixed precision)**|
|-------:|-------------------:|-------------------------------:|------------------------------------------:|---------------------------------:|--------------------------------------------:|--------------------------------------------------:|
| 8 | 32 | 23.24 | 23.24 | 110 | 76 | 1.45 |
###### Large model
Our results were obtained by running the `pytorch/run_wt103_large.sh`
training script in the pytorch-20.06-py3 NGC container on NVIDIA DGX A100
with 8x A100 40GB GPUs.
|**GPUs**|**Batch Size / GPU**|**Accuracy - TF32 (perplexity)**|**Accuracy - Mixed precision (perplexity)**|**Time to Train - TF32 (minutes)**|**Time to Train - Mixed precision (minutes)**|**Time to Train Speedup (TF32 to Mixed precision)**|
|-------:|-------------------:|-------------------------------:|------------------------------------------:|---------------------------------:|--------------------------------------------:|--------------------------------------------------:|
| 8 | 8 | 18.18 | 18.18 | 735 | 477 | 1.54 |
| 8 | 16 | N/A | 18.19 | N/A | 430 | 1.71 |
##### Training accuracy: NVIDIA DGX-1 (8x V100 16GB)
###### Base model
Our results were obtained by running the `pytorch/run_wt103_base.sh`
training script in the pytorch-20.06-py3 NGC container on NVIDIA DGX-1
with 8x V100 16GB GPUs.
|**GPUs**|**Batch Size / GPU**|**Accuracy - FP32 (perplexity)**|**Accuracy - Mixed precision (perplexity)**|**Time to Train - FP32 (minutes)**|**Time to Train - Mixed precision (minutes)**|**Time to Train Speedup (FP32 to Mixed precision)**|
|-------:|-------------------:|-------------------------------:|------------------------------------------:|---------------------------------:|--------------------------------------------:|--------------------------------------------------:|
| 1 | 16 | 23.12 | 23.13 | 2146 | 960 | 2.24 |
| 8 | 16 | 23.17 | 23.14 | 316 | 167 | 1.89 |
| 1 | 32 | N/A | 23.15 | N/A | 766 | 2.80 |
| 8 | 32 | N/A | 23.18 | N/A | 121 | 2.61 |
###### Large model
Our results were obtained by running the `pytorch/run_wt103_large.sh`
training script in the pytorch-20.06-py3 NGC container on NVIDIA DGX-1
with 8x V100 16GB GPUs.
|**GPUs**|**Batch Size / GPU**|**Accuracy - FP32 (perplexity)**|**Accuracy - Mixed precision (perplexity)**|**Time to Train - FP32 (minutes)**|**Time to Train - Mixed precision (minutes)**|**Time to Train Speedup (FP32 to Mixed precision)**|
|-------:|-------------------:|-------------------------------:|------------------------------------------:|---------------------------------:|--------------------------------------------:|--------------------------------------------------:|
| 8 | 2 | 18.22 | 18.20 | 2983 | 1480 | 2.01 |
| 8 | 4 | N/A | 18.17 | N/A | 984 | 3.03 |
##### Training accuracy: NVIDIA DGX-2H (16x V100 32GB)
###### Base model
Our results were obtained by running the `pytorch/run_wt103_base.sh`
training script in the pytorch-20.06-py3 NGC container on NVIDIA DGX-2H
with 16x V100 32GB GPUs.
|**GPUs**|**Batch Size / GPU**|**Accuracy - FP32 (perplexity)**|**Accuracy - Mixed precision (perplexity)**|**Time to Train - FP32 (minutes)**|**Time to Train - Mixed precision (minutes)**|**Time to Train Speedup (FP32 to Mixed precision)**|
|-------:|-------------------:|-------------------------------:|------------------------------------------:|---------------------------------:|--------------------------------------------:|--------------------------------------------------:|
| 16 | 16 | 23.22 | 23.22 | 149 | 80 | 1.86 |
###### Large model
Our results were obtained by running the `pytorch/run_wt103_large.sh`
training script in the pytorch-20.06-py3 NGC container on NVIDIA DGX-2H
with 16x V100 32GB GPUs.
|**GPUs**|**Batch Size / GPU**|**Accuracy - FP32 (perplexity)**|**Accuracy - Mixed precision (perplexity)**|**Time to Train - FP32 (minutes)**|**Time to Train - Mixed precision (minutes)**|**Time to Train Speedup (FP32 to Mixed precision)**|
|-------:|-------------------:|-------------------------------:|------------------------------------------:|---------------------------------:|--------------------------------------------:|--------------------------------------------------:|
| 16 | 8 | 18.21 | 18.20 | 1075 | 394 | 2.73 |
##### Training accuracy: 8x NVIDIA DGX-2H (16x V100 32GB)
###### Large model
Our results were obtained by running the `pytorch/run.sub`
training script in the pytorch-20.06-py3 NGC container on 8x NVIDIA DGX-2H
with 16x V100 32GB GPUs.
|**DGX System**|**Nodes**|**Batch Size / GPU**|**Accuracy - FP32 (perplexity)**|**Accuracy - Mixed precision (perplexity)**|**Time to Train - FP32 (minutes)**|**Time to Train - Mixed precision (minutes)**|**Time to Train Speedup (FP32 to Mixed precision)**|
|-------------:|--------:|-------------------:|-------------------------------:|------------------------------------------:|---------------------------------:|--------------------------------------------:|--------------------------------------------------:|
| DGX-2H | 8 | 4 | 18.27 | 18.28 | 156 | 74 | 2.11 |
##### Training accuracy plots
###### Base model

###### Large model (single-node)

###### Large model (multi-node)

##### Training stability test
###### Base model
The Transformer-XL base model was trained for 40,000 training steps, starting
from 16 different initial random seeds. After every 5,000 training steps, the
model was evaluated on the validation dataset and validation perplexity was
recorded. The training was performed in the pytorch-20.06-py3 NGC container on
NVIDIA DGX A100 with 8x A100 40GB GPUs. The following table summarizes the
perplexity of our validation dataset.
|**Training step**|**Average perplexity**|**Standard deviation**|**Minimum**|**Maximum**|**Median**|
|----------------:|----------:|---------------------:|----------:|----------:|---------:|
| 5000 | 42.62 | 0.27311 | 42.01 | 43.09 | 42.67 |
| 10000 | 32.31 | 0.12814 | 32.10 | 32.59 | 32.31 |
| 15000 | 28.38 | 0.10764 | 28.23 | 28.57 | 28.35 |
| 20000 | 26.14 | 0.10218 | 25.96 | 26.36 | 26.14 |
| 25000 | 24.59 | 0.09060 | 24.42 | 24.81 | 24.60 |
| 30000 | 23.71 | 0.07259 | 23.61 | 23.84 | 23.71 |
| 35000 | 23.15 | 0.04781 | 23.05 | 23.26 | 23.15 |
| 40000 | 22.93 | 0.05593 | 22.83 | 23.04 | 22.94 |
After training, the models were evaluated on the test dataset. The following
table summarizes the final perplexity on the test set.
|**Average perplexity**|**Standard deviation**|**Minimum**|**Maximum**|**Median**|
|----------:|---------------------:|----------:|----------:|---------:|
| 23.24| 0.07794| 23.11| 23.38| 23.25|
###### Large model (single-node)
The Transformer-XL large model was trained for 100,000 training steps, starting
from 16 different initial random seeds. After every 10,000 training steps, the
model was evaluated on the validation dataset and validation perplexity was
recorded. The training was performed in the pytorch-20.06-py3 NGC container on
NVIDIA DGX A100 with 8x A100 40GB GPUs. The following table summarizes the
perplexity of our validation dataset.
|**Training step**|**Average perplexity**|**Standard deviation**|**Minimum**|**Maximum**|**Median**|
|----------------:|----------:|---------------------:|----------:|----------:|---------:|
| 10000 | 32.63 | 0.20432 | 32.34 | 33.05 | 32.62 |
| 20000 | 24.08 | 0.10980 | 23.90 | 24.28 | 24.10 |
| 30000 | 21.52 | 0.09069 | 21.36 | 21.66 | 21.52 |
| 40000 | 20.17 | 0.06922 | 20.06 | 20.27 | 20.17 |
| 50000 | 19.23 | 0.05975 | 19.11 | 19.33 | 19.24 |
| 60000 | 18.57 | 0.06008 | 18.47 | 18.72 | 18.56 |
| 70000 | 18.17 | 0.06473 | 18.08 | 18.32 | 18.15 |
| 80000 | 17.95 | 0.06506 | 17.82 | 18.08 | 17.94 |
| 90000 | 17.80 | 0.04350 | 17.71 | 17.90 | 17.80 |
| 100000 | 17.80 | 0.03592 | 17.74 | 17.86 | 17.81 |
After training, the models were evaluated on the test dataset. The following
table summarizes the final perplexity on the test set.
|**Average perplexity**|**Standard deviation**|**Minimum**|**Maximum**|**Median**|
|---------------------:|---------------------:|----------:|----------:|---------:|
| 18.17 | 0.04016 | 18.09 | 18.24 | 18.17 |
###### Large model (multi-node)
The Transformer-XL large model was trained for 25,000 training steps, starting
from 10 different initial random seeds. After every 1,000 training steps, the
model was evaluated on the validation dataset and validation perplexity was
recorded. The training was performed in the pytorch-20.06-py3 NGC container on
8x NVIDIA DGX-2H with 16x V100 32GB GPUs. The following table summarizes the
perplexity of our validation dataset.
|**Training step**|**Average perplexity**|**Standard deviation**|**Minimum**|**Maximum**|**Median**|
|----------------:|----------:|---------------------:|----------:|----------:|---------:|
| 1000 | 608.09 | 3.80116 | 600.65 | 613.73 | 609.40 |
| 2000 | 142.75 | 0.94452 | 141.21 | 143.84 | 143.07 |
| 3000 | 62.19 | 0.44544 | 61.38 | 63.01 | 62.18 |
| 4000 | 40.22 | 0.16397 | 39.93 | 40.54 | 40.20 |
| 5000 | 32.00 | 0.15850 | 31.61 | 32.19 | 32.02 |
| 6000 | 28.05 | 0.17854 | 27.81 | 28.41 | 28.05 |
| 7000 | 25.65 | 0.10946 | 25.51 | 25.87 | 25.65 |
| 8000 | 24.20 | 0.11385 | 23.98 | 24.36 | 24.20 |
| 9000 | 23.18 | 0.14936 | 22.84 | 23.37 | 23.20 |
| 10000 | 22.88 | 0.22752 | 22.54 | 23.33 | 22.94 |
| 11000 | 21.99 | 0.16232 | 21.73 | 22.29 | 21.97 |
| 12000 | 21.69 | 0.10824 | 21.46 | 21.81 | 21.73 |
| 13000 | 21.42 | 0.09154 | 21.25 | 21.57 | 21.44 |
| 14000 | 21.33 | 0.13821 | 21.15 | 21.55 | 21.27 |
| 15000 | 21.24 | 0.15526 | 20.95 | 21.57 | 21.20 |
| 16000 | 21.19 | 0.10521 | 21.01 | 21.44 | 21.18 |
| 17000 | 20.89 | 0.18239 | 20.69 | 21.18 | 20.82 |
| 18000 | 20.36 | 0.10715 | 20.21 | 20.53 | 20.34 |
| 19000 | 19.74 | 0.12803 | 19.45 | 19.92 | 19.75 |
| 20000 | 19.18 | 0.10020 | 19.05 | 19.39 | 19.15 |
| 21000 | 18.49 | 0.06319 | 18.36 | 18.60 | 18.49 |
| 22000 | 18.17 | 0.03674 | 18.11 | 18.22 | 18.16 |
| 23000 | 17.98 | 0.03682 | 17.90 | 18.04 | 17.99 |
| 24000 | 17.88 | 0.02880 | 17.84 | 17.92 | 17.89 |
| 25000 | 17.85 | 0.02793 | 17.80 | 17.90 | 17.86 |
After training, the models were evaluated on the test dataset. The following
table summarizes the final perplexity on the test set.
|**Average perplexity**|**Standard deviation**|**Minimum**|**Maximum**|**Median**|
|----------:|---------------------:|----------:|----------:|---------:|
| 18.30 | 0.02747 | 18.24 | 18.33 | 18.30 |
#### Training performance results
##### Training performance: NVIDIA DGX A100 (8x A100 40GB)
###### Base model
Our results were obtained by running the `pytorch/run_wt103_base.sh` training
script in the pytorch-20.06-py3 NGC container on NVIDIA DGX A100 with 8x A100
40GB GPUs. Performance numbers (in tokens per second) were averaged over 500
training iterations.
|**GPUs**|**Batch Size / GPU**|**Throughput - TF32 (tok/s)**|**Throughput - Mixed precision (tok/s)**|**Throughput speedup (TF32 to Mixed precision)**|**Weak Scaling - TF32**|**Weak Scaling - Mixed precision**|
|-------:|-------------------:|----------------------------:|---------------------------------------:|-----------------------------------------------:|----------------------:|---------------------------------:|
| 1 | 32 | 41,527 | 59,961 | 1.444 | 1.000 | 1.000 |
| 2 | 32 | 77,625 | 113,238 | 1.459 | 1.869 | 1.889 |
| 4 | 32 | 153,945 | 225,609 | 1.466 | 3.707 | 3.763 |
| 8 | 32 | 305,933 | 449,890 | 1.471 | 7.367 | 7.503 |
To achieve these same results, follow the steps in the
[Quick Start Guide](#quick-start-guide) to download the dataset and setup
the container, and then proceed to the
[Training performance benchmark](#training-performance-benchmark) section for
instruction on how to launch the benchmark.
###### Large model
Our results were obtained by running the `pytorch/run_wt103_large.sh` training
script in the pytorch-20.06-py3 NGC container on NVIDIA DGX A100 with 8x A100
40GB GPUs. Performance numbers (in tokens per second) were averaged over 500
training iterations.
|**GPUs**|**Batch Size / GPU**|**Throughput - TF32 (tok/s)**|**Throughput - Mixed precision (tok/s)**|**Throughput speedup (TF32 to Mixed precision)**|**Weak Scaling - TF32**|**Weak Scaling - Mixed precision**|
|-------:|-------------------:|----------------------------:|---------------------------------------:|-----------------------------------------------:|----------------------:|---------------------------------:|
| 1 | 8 | 14,497 | 21,554 | 1.487 | 1.000 | 1.000 |
| 2 | 8 | 27,304 | 40,222 | 1.473 | 1.883 | 1.866 |
| 4 | 8 | 53,756 | 80,226 | 1.492 | 3.708 | 3.722 |
| 8 | 8 | 106,651 | 159,185 | 1.493 | 7.357 | 7.385 |
| 1 | 16 | N/A | 25,084 | 1.730 | N/A | 1.000 |
| 2 | 16 | N/A | 48,562 | 1.779 | N/A | 1.936 |
| 4 | 16 | N/A | 95,997 | 1.786 | N/A | 3.827 |
| 8 | 16 | N/A | 191,148 | 1.792 | N/A | 7.620 |
To achieve these same results, follow the steps in the
[Quick Start Guide](#quick-start-guide) to download the dataset and setup
the container, and then proceed to the
[Training performance benchmark](#training-performance-benchmark) section for
instruction on how to launch the benchmark.
##### Training performance: NVIDIA DGX-1 (8x V100 16GB)
###### Base model
Our results were obtained by running the `pytorch/run_wt103_base.sh` training
script in the pytorch-20.06-py3 NGC container on NVIDIA DGX-1 with 8x V100 16GB
GPUs. Performance numbers (in tokens per second) were averaged over 500
training iterations.
|**GPUs**|**Batch Size / GPU**|**Throughput - FP32 (tok/s)**|**Throughput - Mixed precision (tok/s)**|**Throughput speedup (FP32 to Mixed precision)**|**Weak Scaling - FP32**|**Weak Scaling - Mixed precision**|
|-------:|-------------------:|----------------------------:|---------------------------------------:|-----------------------------------------------:|----------------------:|---------------------------------:|
| 1 | 16 | 13,981 | 26,639 | 1.905 | 1.000 | 1.000 |
| 2 | 16 | 23,163 | 45,299 | 1.956 | 1.657 | 1.700 |
| 4 | 16 | 48,893 | 92,618 | 1.894 | 3.497 | 3.477 |
| 8 | 16 | 97,005 | 170,532 | 1.758 | 6.938 | 6.402 |
| 1 | 32 | N/A | 36,692 | 2.624 | N/A | 1.000 |
| 2 | 32 | N/A | 65,889 | 2.845 | N/A | 1.796 |
| 4 | 32 | N/A | 133,838 | 2.737 | N/A | 3.648 |
| 8 | 32 | N/A | 258,648 | 2.666 | N/A | 7.049 |
To achieve these same results, follow the steps in the
[Quick Start Guide](#quick-start-guide) to download the dataset and setup
the container, and then proceed to the
[Training performance benchmark](#training-performance-benchmark) section for
instruction on how to launch the benchmark.
###### Large model
Our results were obtained by running the `pytorch/run_wt103_large.sh` training
script in the pytorch-20.06-py3 NGC container on NVIDIA DGX-1 with 8x V100 16GB
GPUs. Performance numbers (in tokens per second) were averaged over 500
training iterations.
|**GPUs**|**Batch Size / GPU**|**Throughput - FP32 (tok/s)**|**Throughput - Mixed precision (tok/s)**|**Throughput speedup (FP32 to Mixed precision)**|**Weak Scaling - FP32**|**Weak Scaling - Mixed precision**|
|-------:|-------------------:|----------------------------:|---------------------------------------:|-----------------------------------------------:|----------------------:|---------------------------------:|
| 1 | 2 | 3,558 | 6,907 | 1.941 | 1.000 | 1.000 |
| 2 | 2 | 6,153 | 11,272 | 1.832 | 1.729 | 1.632 |
| 4 | 2 | 12,492 | 22,530 | 1.804 | 3.511 | 3.262 |
| 8 | 2 | 24,595 | 40,920 | 1.664 | 6.913 | 5.925 |
| 1 | 4 | N/A | 10,210 | 2.870 | N/A | 1.000 |
| 2 | 4 | N/A | 17,984 | 2.923 | N/A | 1.761 |
| 4 | 4 | N/A | 36,340 | 2.909 | N/A | 3.559 |
| 8 | 4 | N/A | 66,716 | 2.713 | N/A | 6.535 |
To achieve these same results, follow the steps in the
[Quick Start Guide](#quick-start-guide) to download the dataset and setup
the container, and then proceed to the
[Training performance benchmark](#training-performance-benchmark) section for
instruction on how to launch the benchmark.
##### Training performance: NVIDIA DGX-2H (16x V100 32GB)
###### Base model
Our results were obtained by running the `pytorch/run_wt103_base.sh` training
script in the pytorch-20.06-py3 NGC container on NVIDIA DGX-2H with 16x V100
32GB GPUs. Performance numbers (in tokens per second) were averaged over 500
training iterations.
|**GPUs**|**Batch Size / GPU**|**Throughput - FP32 (tok/s)**|**Throughput - Mixed precision (tok/s)**|**Throughput speedup (FP32 to Mixed precision)**|**Weak Scaling - FP32**|**Weak Scaling - Mixed precision**|
|-------:|-------------------:|----------------------------:|---------------------------------------:|-----------------------------------------------:|----------------------:|---------------------------------:|
| 1 | 16 | 16,150 | 32,875 | 2.036 | 1.000 | 1.000 |
| 2 | 16 | 29,712 | 59,058 | 1.988 | 1.840 | 1.796 |
| 4 | 16 | 58,011 | 113,985 | 1.965 | 3.592 | 3.467 |
| 8 | 16 | 114,655 | 223,907 | 1.953 | 7.099 | 6.811 |
| 16 | 16 | 222,920 | 414,994 | 1.862 | 13.803 | 12.623 |
To achieve these same results, follow the steps in the
[Quick Start Guide](#quick-start-guide) to download the dataset and setup
the container, and then proceed to the
[Training performance benchmark](#training-performance-benchmark) section for
instruction on how to launch the benchmark.
###### Large model
Our results were obtained by running the `pytorch/run_wt103_large.sh` training
script in the pytorch-20.06-py3 NGC container on NVIDIA DGX-2H with 16x V100
32GB GPUs. Performance numbers (in tokens per second) were averaged over 500
training iterations.
|**GPUs**|**Batch Size / GPU**|**Throughput - FP32 (tok/s)**|**Throughput - Mixed precision (tok/s)**|**Throughput speedup (FP32 to Mixed precision)**|**Weak Scaling - FP32**|**Weak Scaling - Mixed precision**|
|-------:|-------------------:|----------------------------:|---------------------------------------:|-----------------------------------------------:|----------------------:|---------------------------------:|
| 1 | 8 | 5,169 | 14,787 | 2.861 | 1.000 | 1.000 |
| 2 | 8 | 9,977 | 27,710 | 2.777 | 1.930 | 1.874 |
| 4 | 8 | 19,691 | 54,207 | 2.753 | 3.810 | 3.666 |
| 8 | 8 | 39,157 | 107,073 | 2.734 | 7.576 | 7.241 |
| 16 | 8 | 77,568 | 211,387 | 2.725 | 15.008 | 14.296 |
To achieve these same results, follow the steps in the
[Quick Start Guide](#quick-start-guide) to download the dataset and setup
the container, and then proceed to the
[Training performance benchmark](#training-performance-benchmark) section for
instruction on how to launch the benchmark.
##### Training performance: 8x NVIDIA DGX-2H (16x V100 32GB)
Our results were obtained by running the `pytorch/run.sub` training script in
the pytorch-20.06-py3 NGC container. Performance numbers (in tokens per second)
were averaged over 500 training iterations.
###### Large model
|**DGX System**|**Nodes**|**Batch Size / GPU**|**Throughput - FP32 (tok/s)**|**Throughput - Mixed precision (tok/s)**|**Throughput speedup (FP32 to Mixed precision)**|**Weak Scaling - FP32**|**Weak scaling - Mixed precision**|
|-------------:|--------:|-------------------:|-------------------------------:|------------------------------------------:|---------------------------------:|--------------------------------------------:|--------------------------------------------------:|
| DGX-2H | 1 | 4 | 69,070 | 154,950 | 2.24 | 1.00 | 1.00 |
| DGX-2H | 2 | 4 | 136,960 | 307,520 | 2.25 | 1.98 | 1.98 |
| DGX-2H | 4 | 4 | 270,120 | 605,530 | 2.24 | 3.91 | 3.91 |
| DGX-2H | 8 | 4 | 514,500 | 1,189,700 | 2.31 | 7.45 | 7.68 |
To achieve these same results, follow the steps in the
[Quick Start Guide](#quick-start-guide) to download the dataset and then
proceed to the
[Training performance benchmark for
multi-node](#training-performance-benchmark-for-multi-node) section for
instruction on how to launch the multi-node performance benchmark. The numbers
presented above were obtained with `LOCAL_BATCH_SIZE=4`.
#### Inference performance results
##### Inference performance: NVIDIA DGX A100 (1x A100 40GB)
###### Base model
Our results were obtained by running the
`pytorch/scripts/inference_benchmark.sh` inferencing benchmarking script in the
pytorch-20.06-py3 NGC container on NVIDIA DGX A100 with 1x A100 40GB GPU.
The command to launch the inference performance benchmark is provided in the
[Inference performance benchmark](#inference-performance-benchmark) section.
**FP16, pure Python**
|**Batch size**|**Sequence length**|**Memory length**|**Throughput Avg (tok/s)**|**Latency Avg (ms)**|**Latency 90% (ms)**|**Latency 95% (ms)**|**Latency 99% (ms)**|
|-------------:|------------------:|----------------:|-------------------------:|-------------------:|-------------------:|-------------------:|-------------------:|
| 1 | 64 | 640 | 4,163.7 | 15.38 | 15.58 | 15.66 | 16.12 |
| 2 | 64 | 640 | 7,915.4 | 16.17 | 16.36 | 16.42 | 17.19 |
| 4 | 64 | 640 | 15,710.2 | 16.29 | 16.45 | 16.49 | 17.38 |
| 8 | 64 | 640 | 32,712.1 | 15.64 | 15.77 | 15.82 | 16.65 |
| 16 | 64 | 640 | 59,378.6 | 17.23 | 17.32 | 17.36 | 18.39 |
| 32 | 64 | 640 | 91,654.2 | 22.33 | 22.39 | 22.53 | 23.63 |
**FP16, TorchScript**
|**Batch size**|**Sequence length**|**Memory length**|**Throughput Avg (tok/s)**|**Latency Avg (ms)**|**Latency 90% (ms)**|**Latency 95% (ms)**|**Latency 99% (ms)**|
|-------------:|------------------:|----------------:|-------------------------:|-------------------:|-------------------:|-------------------:|-------------------:|
| 1 | 64 | 640 | 6,935.9 | 9.231 | 9.388 | 9.445 | 9.534 |
| 2 | 64 | 640 | 12,649.4 | 10.120 | 10.253 | 10.294 | 10.945 |
| 4 | 64 | 640 | 25,029.5 | 10.223 | 10.346 | 10.381 | 10.475 |
| 8 | 64 | 640 | 52,666.3 | 9.716 | 9.808 | 9.851 | 10.540 |
| 16 | 64 | 640 | 90,767.8 | 11.274 | 11.321 | 11.334 | 11.800 |
| 32 | 64 | 640 | 107,082.4 | 19.109 | 19.138 | 19.162 | 19.608 |
**TF32, pure Python**
|**Batch size**|**Sequence length**|**Memory length**|**Throughput Avg (tok/s)**|**Latency Avg (ms)**|**Latency 90% (ms)**|**Latency 95% (ms)**|**Latency 99% (ms)**|
|-------------:|------------------:|----------------:|-------------------------:|-------------------:|-------------------:|-------------------:|-------------------:|
| 1 | 64 | 640 | 4,003.8 | 15.99 | 16.26 | 16.36 | 16.58 |
| 2 | 64 | 640 | 7,499.2 | 17.07 | 17.32 | 17.39 | 17.86 |
| 4 | 64 | 640 | 14,835.4 | 17.25 | 17.46 | 17.50 | 18.34 |
| 8 | 64 | 640 | 30,001.5 | 17.06 | 17.22 | 17.28 | 18.40 |
| 16 | 64 | 640 | 50,189.7 | 20.39 | 20.48 | 20.52 | 21.41 |
| 32 | 64 | 640 | 63,660.5 | 32.14 | 32.17 | 32.29 | 33.19 |
**TF32, TorchScript**
|**Batch size**|**Sequence length**|**Memory length**|**Throughput Avg (tok/s)**|**Latency Avg (ms)**|**Latency 90% (ms)**|**Latency 95% (ms)**|**Latency 99% (ms)**|
|-------------:|------------------:|----------------:|-------------------------:|-------------------:|-------------------:|-------------------:|-------------------:|
| 1 | 64 | 640 | 6,084.5 | 10.52 | 10.74 | 10.84 | 10.95 |
| 2 | 64 | 640 | 11,680.6 | 10.96 | 11.17 | 11.22 | 11.76 |
| 4 | 64 | 640 | 22,867.3 | 11.19 | 11.35 | 11.40 | 12.07 |
| 8 | 64 | 640 | 45,165.5 | 11.33 | 11.46 | 11.49 | 12.03 |
| 16 | 64 | 640 | 61,042.0 | 16.76 | 16.84 | 16.86 | 17.13 |
| 32 | 64 | 640 | 71,124.1 | 28.77 | 28.81 | 28.84 | 28.86 |
To achieve these same results, follow the steps in the
[Quick Start Guide](#quick-start-guide) to download the dataset and setup
the container, and then proceed to the
[Inference performance benchmark](#inference-performance-benchmark) section for
instruction on how to launch the benchmark.
###### Large model
Our results were obtained by running the
`pytorch/scripts/inference_benchmark.sh` inferencing benchmarking script in the
pytorch-20.06-py3 NGC container on NVIDIA DGX A100 with 1x A100 40GB GPU.
The command to launch the inference performance benchmark is provided in the
[Inference performance benchmark](#inference-performance-benchmark) section.
**FP16, pure Python**
|**Batch size**|**Sequence length**|**Memory length**|**Throughput Avg (tok/s)**|**Latency Avg (ms)**|**Latency 90% (ms)**|**Latency 95% (ms)**|**Latency 99% (ms)**|
|-------------:|------------------:|----------------:|-------------------------:|-------------------:|-------------------:|-------------------:|-------------------:|
| 1 | 128 | 1,600 | 7,033.0 | 18.20 | 18.57 | 18.64 | 18.93 |
| 2 | 128 | 1,600 | 12,832.5 | 19.94 | 20.23 | 20.29 | 21.07 |
| 4 | 128 | 1,600 | 21,500.2 | 23.80 | 23.99 | 24.07 | 25.09 |
| 8 | 128 | 1,600 | 25,797.1 | 39.66 | 39.74 | 39.91 | 41.00 |
| 16 | 128 | 1,600 | 28,143.5 | 72.71 | 72.74 | 73.12 | 74.00 |
| 32 | 128 | 1,600 | 28,533.6 | 143.44 | 143.30 | 143.48 | 149.07 |
**FP16, TorchScript**
|**Batch size**|**Sequence length**|**Memory length**|**Throughput Avg (tok/s)**|**Latency Avg (ms)**|**Latency 90% (ms)**|**Latency 95% (ms)**|**Latency 99% (ms)**|
|-------------:|------------------:|----------------:|-------------------------:|-------------------:|-------------------:|-------------------:|-------------------:|
| 1 | 128 | 1,600 | 11,068.2 | 11.57 | 11.83 | 11.88 | 12.42 |
| 2 | 128 | 1,600 | 19,847.0 | 12.89 | 13.09 | 13.11 | 13.27 |
| 4 | 128 | 1,600 | 24,450.7 | 20.92 | 21.08 | 21.10 | 21.15 |
| 8 | 128 | 1,600 | 27,938.4 | 36.62 | 36.72 | 36.75 | 36.86 |
| 16 | 128 | 1,600 | 30,783.0 | 66.48 | 66.54 | 66.59 | 66.98 |
| 32 | 128 | 1,600 | 32,161.6 | 127.26 | 127.19 | 127.34 | 131.64 |
**TF32, pure Python**
|**Batch size**|**Sequence length**|**Memory length**|**Throughput Avg (tok/s)**|**Latency Avg (ms)**|**Latency 90% (ms)**|**Latency 95% (ms)**|**Latency 99% (ms)**|
|-------------:|------------------:|----------------:|-------------------------:|-------------------:|-------------------:|-------------------:|-------------------:|
| 1 | 128 | 1,600 | 6,558.8 | 19.52 | 19.87 | 19.95 | 20.44 |
| 2 | 128 | 1,600 | 10,658.4 | 24.00 | 24.28 | 24.36 | 25.17 |
| 4 | 128 | 1,600 | 14,769.6 | 34.64 | 34.82 | 34.89 | 35.74 |
| 8 | 128 | 1,600 | 16,852.6 | 60.71 | 60.82 | 61.05 | 62.17 |
| 16 | 128 | 1,600 | 18,071.8 | 113.23 | 113.28 | 113.37 | 114.64 |
| 32 | 128 | 1,600 | 17,619.2 | 234.04 | 229.98 | 239.30 | 328.15 |
**TF32, TorchScript**
|**Batch size**|**Sequence length**|**Memory length**|**Throughput Avg (tok/s)**|**Latency Avg (ms)**|**Latency 90% (ms)**|**Latency 95% (ms)**|**Latency 99% (ms)**|
|-------------:|------------------:|----------------:|-------------------------:|-------------------:|-------------------:|-------------------:|-------------------:|
| 1 | 128 | 1,600 | 9,084.4 | 14.09 | 14.37 | 14.40 | 14.46 |
| 2 | 128 | 1,600 | 12,839.4 | 19.92 | 20.15 | 20.17 | 20.25 |
| 4 | 128 | 1,600 | 15,582.4 | 32.83 | 33.00 | 33.02 | 33.28 |
| 8 | 128 | 1,600 | 17,825.0 | 57.40 | 57.55 | 57.59 | 57.94 |
| 16 | 128 | 1,600 | 19,419.2 | 105.38 | 105.49 | 105.54 | 105.91 |
| 32 | 128 | 1,600 | 20,079.4 | 203.81 | 203.77 | 203.84 | 207.47 |
To achieve these same results, follow the steps in the
[Quick Start Guide](#quick-start-guide) to download the dataset and setup
the container, and then proceed to the
[Inference performance benchmark](#inference-performance-benchmark) section for
instruction on how to launch the benchmark.
##### Inference performance: NVIDIA DGX-1 (1x V100 16GB)
###### Base model
Our results were obtained by running the
`pytorch/scripts/inference_benchmark.sh` inferencing benchmarking script in the
pytorch-20.06-py3 NGC container on NVIDIA DGX-1 with 1x V100 16GB GPU.
The command to launch the inference performance benchmark is provided in the
[Inference performance benchmark](#inference-performance-benchmark) section.
**FP16, pure Python**
|**Batch size**|**Sequence length**|**Memory length**|**Throughput Avg (tok/s)**|**Latency Avg (ms)**|**Latency 90% (ms)**|**Latency 95% (ms)**|**Latency 99% (ms)**|
|-------------:|------------------:|----------------:|-------------------------:|-------------------:|-------------------:|-------------------:|-------------------:|
| 1 | 64 | 640 | 2,999.6 | 21.36 | 21.72 | 21.90 | 24.86 |
| 2 | 64 | 640 | 5,738.5 | 22.32 | 22.64 | 22.89 | 25.97 |
| 4 | 64 | 640 | 11,773.5 | 21.73 | 21.92 | 22.06 | 22.68 |
| 8 | 64 | 640 | 22,604.7 | 22.63 | 22.92 | 23.08 | 23.56 |
| 16 | 64 | 640 | 41,481.6 | 24.67 | 24.83 | 24.99 | 25.73 |
| 32 | 64 | 640 | 58,556.9 | 34.95 | 35.13 | 35.24 | 35.85 |
**FP16, TorchScript**
|**Batch size**|**Sequence length**|**Memory length**|**Throughput Avg (tok/s)**|**Latency Avg (ms)**|**Latency 90% (ms)**|**Latency 95% (ms)**|**Latency 99% (ms)**|
|-------------:|------------------:|----------------:|-------------------------:|-------------------:|-------------------:|-------------------:|-------------------:|
| 1 | 64 | 640 | 5,199.9 | 12.31 | 12.59 | 12.65 | 12.98 |
| 2 | 64 | 640 | 9,802.5 | 13.06 | 13.30 | 13.42 | 13.82 |
| 4 | 64 | 640 | 19,609.4 | 13.05 | 13.17 | 13.24 | 13.94 |
| 8 | 64 | 640 | 37,598.7 | 13.61 | 13.71 | 13.77 | 14.62 |
| 16 | 64 | 640 | 57,840.2 | 17.69 | 17.73 | 17.76 | 18.36 |
| 32 | 64 | 640 | 66,955.9 | 30.57 | 30.78 | 30.86 | 30.96 |
**FP32, pure Python**
|**Batch size**|**Sequence length**|**Memory length**|**Throughput Avg (tok/s)**|**Latency Avg (ms)**|**Latency 90% (ms)**|**Latency 95% (ms)**|**Latency 99% (ms)**|
|-------------:|------------------:|----------------:|-------------------------:|-------------------:|-------------------:|-------------------:|-------------------:|
| 1 | 64 | 640 | 2,940.0 | 21.79 | 22.23 | 22.42 | 25.52 |
| 2 | 64 | 640 | 5,652.0 | 22.66 | 23.00 | 23.20 | 26.86 |
| 4 | 64 | 640 | 10,526.0 | 24.30 | 24.62 | 24.72 | 25.03 |
| 8 | 64 | 640 | 15,767.2 | 32.45 | 32.67 | 32.78 | 33.32 |
| 16 | 64 | 640 | 20,303.2 | 50.39 | 50.82 | 50.89 | 51.07 |
| 32 | 64 | 640 | 21,707.1 | 94.26 | 94.76 | 94.94 | 95.26 |
**FP32, TorchScript**
|**Batch size**|**Sequence length**|**Memory length**|**Throughput Avg (tok/s)**|**Latency Avg (ms)**|**Latency 90% (ms)**|**Latency 95% (ms)**|**Latency 99% (ms)**|
|-------------:|------------------:|----------------:|-------------------------:|-------------------:|-------------------:|-------------------:|-------------------:|
| 1 | 64 | 640 | 4,974.1 | 12.88 | 13.25 | 13.37 | 13.69 |
| 2 | 64 | 640 | 9,625.3 | 13.30 | 13.58 | 13.72 | 14.15 |
| 4 | 64 | 640 | 15,069.9 | 16.98 | 17.27 | 17.35 | 17.54 |
| 8 | 64 | 640 | 18,269.8 | 28.00 | 28.23 | 28.28 | 28.37 |
| 16 | 64 | 640 | 20,884.5 | 48.99 | 49.46 | 49.50 | 49.63 |
| 32 | 64 | 640 | 22,289.2 | 91.80 | 92.25 | 92.56 | 92.67 |
To achieve these same results, follow the steps in the
[Quick Start Guide](#quick-start-guide) to download the dataset and setup
the container, and then proceed to the
[Inference performance benchmark](#inference-performance-benchmark) section for
instruction on how to launch the benchmark.
###### Large model
Our results were obtained by running the
`pytorch/scripts/inference_benchmark.sh` inferencing benchmarking script in the
pytorch-20.06-py3 NGC container on NVIDIA DGX-1 with 1x V100 16GB GPU.
The command to launch the inference performance benchmark is provided in the
[Inference performance benchmark](#inference-performance-benchmark) section.
**FP16, pure Python**
|**Batch size**|**Sequence length**|**Memory length**|**Throughput Avg (tok/s)**|**Latency Avg (ms)**|**Latency 90% (ms)**|**Latency 95% (ms)**|**Latency 99% (ms)**|
|-------------:|------------------:|----------------:|-------------------------:|-------------------:|-------------------:|-------------------:|-------------------:|
| 1 | 128 | 1,600 | 5,119.6 | 25.00 | 25.47 | 25.66 | 26.12 |
| 2 | 128 | 1,600 | 8,676.1 | 29.49 | 29.81 | 29.94 | 30.88 |
| 4 | 128 | 1,600 | 12,960.9 | 39.47 | 39.84 | 39.91 | 40.69 |
| 8 | 128 | 1,600 | 14,870.6 | 68.81 | 69.28 | 69.42 | 69.76 |
| 16 | 128 | 1,600 | 15,528.5 | 131.78 | 132.74 | 132.86 | 133.07 |
| 32 | 128 | 1,600 | 15,649.4 | 261.54 | 262.45 | 262.99 | 271.10 |
**FP16, TorchScript**
|**Batch size**|**Sequence length**|**Memory length**|**Throughput Avg (tok/s)**|**Latency Avg (ms)**|**Latency 90% (ms)**|**Latency 95% (ms)**|**Latency 99% (ms)**|
|-------------:|------------------:|----------------:|-------------------------:|-------------------:|-------------------:|-------------------:|-------------------:|
| 1 | 128 | 1,600 | 8,718.2 | 14.68 | 15.01 | 15.07 | 15.50 |
| 2 | 128 | 1,600 | 12,157.8 | 21.04 | 21.29 | 21.31 | 21.38 |
| 4 | 128 | 1,600 | 14,534.8 | 35.20 | 35.48 | 35.53 | 35.93 |
| 8 | 128 | 1,600 | 15,863.8 | 64.50 | 64.90 | 65.15 | 65.31 |
| 16 | 128 | 1,600 | 16,674.0 | 122.73 | 123.34 | 123.66 | 123.92 |
| 32 | 128 | 1,600 | 17,154.1 | 238.60 | 239.48 | 239.73 | 247.48 |
**FP32, pure Python**
|**Batch size**|**Sequence length**|**Memory length**|**Throughput Avg (tok/s)**|**Latency Avg (ms)**|**Latency 90% (ms)**|**Latency 95% (ms)**|**Latency 99% (ms)**|
|-------------:|------------------:|----------------:|-------------------------:|-------------------:|-------------------:|-------------------:|-------------------:|
| 1 | 128 | 1,600 | 3,009.8 | 42.52 | 43.01 | 43.09 | 43.53 |
| 2 | 128 | 1,600 | 3,838.4 | 66.64 | 67.24 | 67.45 | 67.83 |
| 4 | 128 | 1,600 | 4,265.3 | 119.94 | 120.87 | 121.00 | 121.39 |
| 8 | 128 | 1,600 | 4,646.5 | 220.19 | 221.30 | 221.50 | 221.68 |
| 16 | 128 | 1,600 | 4,805.4 | 426.39 | 426.25 | 426.47 | 427.25 |
| 32 | 128 | 1,600 | 4,787.4 | 855.09 | 854.95 | 855.46 | 912.05 |
**FP32, TorchScript**
|**Batch size**|**Sequence length**|**Memory length**|**Throughput Avg (tok/s)**|**Latency Avg (ms)**|**Latency 90% (ms)**|**Latency 95% (ms)**|**Latency 99% (ms)**|
|-------------:|------------------:|----------------:|-------------------------:|-------------------:|-------------------:|-------------------:|-------------------:|
| 1 | 128 | 1,600 | 3,319.0 | 38.56 | 38.91 | 39.01 | 39.19 |
| 2 | 128 | 1,600 | 3,925.2 | 65.16 | 65.74 | 65.89 | 66.12 |
| 4 | 128 | 1,600 | 4,344.1 | 117.76 | 118.46 | 118.55 | 118.69 |
| 8 | 128 | 1,600 | 4,716.2 | 216.94 | 217.99 | 218.27 | 218.69 |
| 16 | 128 | 1,600 | 4,922.1 | 415.72 | 417.16 | 417.32 | 417.59 |
| 32 | 128 | 1,600 | 4,965.2 | 824.98 | 821.79 | 831.71 | 952.47 |
To achieve these same results, follow the steps in the
[Quick Start Guide](#quick-start-guide) to download the dataset and setup
the container, and then proceed to the
[Inference performance benchmark](#inference-performance-benchmark) section for
instruction on how to launch the benchmark.
##### Inference performance: NVIDIA T4
###### Base model
Our results were obtained by running the
`pytorch/scripts/inference_benchmark.sh` inferencing benchmarking script in the
pytorch-20.06-py3 NGC container on NVIDIA T4.
The command to launch the inference performance benchmark is provided in the
[Inference performance benchmark](#inference-performance-benchmark) section.
**FP16, pure Python**
|**Batch size**|**Sequence length**|**Memory length**|**Throughput Avg (tok/s)**|**Latency Avg (ms)**|**Latency 90% (ms)**|**Latency 95% (ms)**|**Latency 99% (ms)**|
|-------------:|------------------:|----------------:|-------------------------:|-------------------:|-------------------:|-------------------:|-------------------:|
| 1 | 64 | 640 | 3,775.3 | 16.97 | 17.51 | 17.84 | 18.18 |
| 2 | 64 | 640 | 6,417.4 | 19.96 | 20.49 | 20.56 | 21.52 |
| 4 | 64 | 640 | 9,988.6 | 25.64 | 26.07 | 26.14 | 27.32 |
| 8 | 64 | 640 | 11,878.9 | 43.07 | 43.42 | 43.46 | 44.24 |
| 16 | 64 | 640 | 13,630.0 | 75.07 | 75.26 | 75.32 | 76.07 |
| 32 | 64 | 640 | 14,511.2 | 141.01 | 141.38 | 141.41 | 142.16 |
**FP16, TorchScript**
|**Batch size**|**Sequence length**|**Memory length**|**Throughput Avg (tok/s)**|**Latency Avg (ms)**|**Latency 90% (ms)**|**Latency 95% (ms)**|**Latency 99% (ms)**|
|-------------:|------------------:|----------------:|-------------------------:|-------------------:|-------------------:|-------------------:|-------------------:|
| 1 | 64 | 640 | 6,132.5 | 10.47 | 10.93 | 11.31 | 11.45 |
| 2 | 64 | 640 | 8,319.4 | 15.39 | 15.89 | 15.92 | 16.10 |
| 4 | 64 | 640 | 11,259.1 | 22.74 | 23.16 | 23.23 | 23.30 |
| 8 | 64 | 640 | 13,120.3 | 38.99 | 39.35 | 39.37 | 39.42 |
| 16 | 64 | 640 | 15,120.0 | 67.67 | 67.90 | 67.94 | 68.06 |
| 32 | 64 | 640 | 16,158.1 | 126.65 | 126.97 | 127.03 | 127.18 |
**FP32, pure Python**
|**Batch size**|**Sequence length**|**Memory length**|**Throughput Avg (tok/s)**|**Latency Avg (ms)**|**Latency 90% (ms)**|**Latency 95% (ms)**|**Latency 99% (ms)**|
|-------------:|------------------:|----------------:|-------------------------:|-------------------:|-------------------:|-------------------:|-------------------:|
| 1 | 64 | 640 | 2,323.1 | 27.59 | 29.39 | 29.56 | 29.86 |
| 2 | 64 | 640 | 3,094.8 | 41.39 | 42.49 | 42.78 | 43.47 |
| 4 | 64 | 640 | 3,889.8 | 65.82 | 66.60 | 66.71 | 67.57 |
| 8 | 64 | 640 | 4,270.1 | 119.80 | 120.61 | 120.68 | 120.89 |
| 16 | 64 | 640 | 4,765.7 | 214.68 | 215.87 | 216.01 | 216.14 |
| 32 | 64 | 640 | 4,985.2 | 410.43 | 413.58 | 413.67 | 413.92 |
**FP32, TorchScript**
|**Batch size**|**Sequence length**|**Memory length**|**Throughput Avg (tok/s)**|**Latency Avg (ms)**|**Latency 90% (ms)**|**Latency 95% (ms)**|**Latency 99% (ms)**|
|-------------:|------------------:|----------------:|-------------------------:|-------------------:|-------------------:|-------------------:|-------------------:|
| 1 | 64 | 640 | 2,486.3 | 25.78 | 27.52 | 27.66 | 27.92 |
| 2 | 64 | 640 | 3,260.7 | 39.28 | 40.32 | 40.49 | 40.84 |
| 4 | 64 | 640 | 4,033.3 | 63.48 | 64.28 | 64.35 | 64.56 |
| 8 | 64 | 640 | 4,411.4 | 115.96 | 116.74 | 116.85 | 116.89 |
| 16 | 64 | 640 | 4,924.9 | 207.74 | 208.91 | 209.04 | 209.21 |
| 32 | 64 | 640 | 5,163.1 | 396.29 | 399.42 | 399.50 | 399.70 |
To achieve these same results, follow the steps in the
[Quick Start Guide](#quick-start-guide) to download the dataset and setup
the container, and then proceed to the
[Inference performance benchmark](#inference-performance-benchmark) section for
instruction on how to launch the benchmark.
###### Large model
Our results were obtained by running the
`pytorch/scripts/inference_benchmark.sh` inferencing benchmarking script in the
pytorch-20.06-py3 NGC container on NVIDIA T4.
The command to launch the inference performance benchmark is provided in the
[Inference performance benchmark](#inference-performance-benchmark) section.
**FP16, pure Python**
|**Batch size**|**Sequence length**|**Memory length**|**Throughput Avg (tok/s)**|**Latency Avg (ms)**|**Latency 90% (ms)**|**Latency 95% (ms)**|**Latency 99% (ms)**|
|-------------:|------------------:|----------------:|-------------------------:|-------------------:|-------------------:|-------------------:|-------------------:|
| 1 | 128 | 1,600 | 2,978.0 | 42.99 | 43.40 | 43.44 | 44.40 |
| 2 | 128 | 1,600 | 3,161.4 | 80.98 | 81.38 | 81.45 | 81.75 |
| 4 | 128 | 1,600 | 3,459.3 | 147.89 | 148.11 | 148.14 | 148.49 |
| 8 | 128 | 1,600 | 3,657.8 | 279.74 | 279.82 | 279.86 | 280.48 |
| 16 | 128 | 1,600 | 3,762.9 | 543.92 | 543.48 | 543.55 | 544.43 |
| 32 | 128 | 1,600 | 3,794.4 | 1079.15 | 1076.23 | 1076.37 | 1158.93 |
**FP16, TorchScript**
|**Batch size**|**Sequence length**|**Memory length**|**Throughput Avg (tok/s)**|**Latency Avg (ms)**|**Latency 90% (ms)**|**Latency 95% (ms)**|**Latency 99% (ms)**|
|-------------:|------------------:|----------------:|-------------------------:|-------------------:|-------------------:|-------------------:|-------------------:|
| 1 | 128 | 1,600 | 3,066.4 | 41.74 | 42.08 | 42.12 | 42.19 |
| 2 | 128 | 1,600 | 3,399.2 | 75.31 | 75.54 | 75.57 | 75.64 |
| 4 | 128 | 1,600 | 3,721.5 | 137.47 | 137.65 | 137.70 | 137.82 |
| 8 | 128 | 1,600 | 3,932.9 | 260.19 | 260.23 | 260.29 | 260.50 |
| 16 | 128 | 1,600 | 4,057.9 | 504.43 | 503.97 | 504.01 | 504.14 |
| 32 | 128 | 1,600 | 4,117.8 | 994.54 | 991.40 | 991.46 | 1079.17 |
**FP32, pure Python**
|**Batch size**|**Sequence length**|**Memory length**|**Throughput Avg (tok/s)**|**Latency Avg (ms)**|**Latency 90% (ms)**|**Latency 95% (ms)**|**Latency 99% (ms)**|
|-------------:|------------------:|----------------:|-------------------------:|-------------------:|-------------------:|-------------------:|-------------------:|
| 1 | 128 | 1,600 | 786.9 | 162.7 | 163.2 | 163.3 | 163.9 |
| 2 | 128 | 1,600 | 889.6 | 287.8 | 288.1 | 288.2 | 288.4 |
| 4 | 128 | 1,600 | 992.1 | 515.6 | 516.0 | 516.0 | 516.5 |
| 8 | 128 | 1,600 | 1,047.0 | 977.2 | 977.6 | 977.6 | 977.8 |
| 16 | 128 | 1,600 | 1,069.3 | 1913.5 | 1914.7 | 1914.7 | 1915.0 |
| 32 | 128 | 1,600 | 1,069.5 | 3826.3 | 3823.7 | 3823.8 | 3915.8 |
**FP32, TorchScript**
|**Batch size**|**Sequence length**|**Memory length**|**Throughput Avg (tok/s)**|**Latency Avg (ms)**|**Latency 90% (ms)**|**Latency 95% (ms)**|**Latency 99% (ms)**|
|-------------:|------------------:|----------------:|-------------------------:|-------------------:|-------------------:|-------------------:|-------------------:|
| 1 | 128 | 1,600 | 792.5 | 161.5 | 161.9 | 162.0 | 162.2 |
| 2 | 128 | 1,600 | 904.7 | 283.0 | 283.3 | 283.3 | 283.4 |
| 4 | 128 | 1,600 | 1,009.0 | 507.0 | 507.3 | 507.4 | 507.5 |
| 8 | 128 | 1,600 | 1,065.0 | 960.7 | 961.1 | 961.1 | 961.2 |
| 16 | 128 | 1,600 | 1,088.6 | 1879.7 | 1880.9 | 1881.0 | 1881.1 |
| 32 | 128 | 1,600 | 1,102.0 | 3713.7 | 3710.0 | 3718.1 | 3819.0 |
To achieve these same results, follow the steps in the
[Quick Start Guide](#quick-start-guide) to download the dataset and setup
the container, and then proceed to the
[Inference performance benchmark](#inference-performance-benchmark) section for
instruction on how to launch the benchmark.
## Release notes
### Changelog
* June 2020
* Added support for NVIDIA DGX A100
* Updated default NGC container to pytorch-20.06-py3
* December 2019
* Added support for the large Transformer-XL model trained on WikiText-103
dataset, the large model was trained on NVIDIA DGX-1, NVIDIA DGX-2 and on
8x NVIDIA DGX-2H (multi-node training)
* Updated default NGC container to pytorch-19.11-py3
* Added support for inference with TorchScript
* October 2019
* Initial release
* Support for FP32 and mixed precision training on NVIDIA
DGX-1, NVIDIA DGX-2, and inference on NVIDIA Tesla V100 16GB
and NVIDIA T4
### Known issues
There are no known issues with this model.
|
PyTorch/Classification/GPUNet/triton/deployment_toolkit/triton_performance_runner/perf_analyzer | perf_analyzer | __init__ | # Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .runner import PerfAnalyzerRunner # noqa: F401
from .warmup import PerfAnalyzerWarmupRunner # noqa: F401
|
TensorFlow/Detection/SSD/models/research/object_detection | object_detection | model_lib | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
#
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""Constructs model, inputs, and training environment."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import functools
import os
import tensorflow as tf
import horovod.tensorflow as hvd
from object_detection import eval_util
from object_detection import exporter as exporter_lib
from object_detection import inputs
from object_detection.builders import graph_rewriter_builder
from object_detection.builders import model_builder
from object_detection.builders import optimizer_builder
from object_detection.core import standard_fields as fields
from object_detection.utils import config_util
from object_detection.utils import label_map_util
from object_detection.utils import shape_utils
from object_detection.utils import variables_helper
from object_detection.utils import visualization_utils as vis_utils
# A map of names to methods that help build the model.
MODEL_BUILD_UTIL_MAP = {
'get_configs_from_pipeline_file':
config_util.get_configs_from_pipeline_file,
'create_pipeline_proto_from_configs':
config_util.create_pipeline_proto_from_configs,
'merge_external_params_with_configs':
config_util.merge_external_params_with_configs,
'create_train_input_fn':
inputs.create_train_input_fn,
'create_eval_input_fn':
inputs.create_eval_input_fn,
'create_predict_input_fn':
inputs.create_predict_input_fn,
}
def _prepare_groundtruth_for_eval(detection_model, class_agnostic,
max_number_of_boxes):
"""Extracts groundtruth data from detection_model and prepares it for eval.
Args:
detection_model: A `DetectionModel` object.
class_agnostic: Whether the detections are class_agnostic.
max_number_of_boxes: Max number of groundtruth boxes.
Returns:
A tuple of:
groundtruth: Dictionary with the following fields:
'groundtruth_boxes': [batch_size, num_boxes, 4] float32 tensor of boxes,
in normalized coordinates.
'groundtruth_classes': [batch_size, num_boxes] int64 tensor of 1-indexed
classes.
'groundtruth_masks': 4D float32 tensor of instance masks (if provided in
groundtruth)
'groundtruth_is_crowd': [batch_size, num_boxes] bool tensor indicating
is_crowd annotations (if provided in groundtruth).
'num_groundtruth_boxes': [batch_size] tensor containing the maximum number
of groundtruth boxes per image..
class_agnostic: Boolean indicating whether detections are class agnostic.
"""
input_data_fields = fields.InputDataFields()
groundtruth_boxes = tf.stack(
detection_model.groundtruth_lists(fields.BoxListFields.boxes))
groundtruth_boxes_shape = tf.shape(groundtruth_boxes)
# For class-agnostic models, groundtruth one-hot encodings collapse to all
# ones.
if class_agnostic:
groundtruth_classes_one_hot = tf.ones(
[groundtruth_boxes_shape[0], groundtruth_boxes_shape[1], 1])
else:
groundtruth_classes_one_hot = tf.stack(
detection_model.groundtruth_lists(fields.BoxListFields.classes))
label_id_offset = 1 # Applying label id offset (b/63711816)
groundtruth_classes = (
tf.argmax(groundtruth_classes_one_hot, axis=2) + label_id_offset)
groundtruth = {
input_data_fields.groundtruth_boxes: groundtruth_boxes,
input_data_fields.groundtruth_classes: groundtruth_classes
}
if detection_model.groundtruth_has_field(fields.BoxListFields.masks):
groundtruth[input_data_fields.groundtruth_instance_masks] = tf.stack(
detection_model.groundtruth_lists(fields.BoxListFields.masks))
if detection_model.groundtruth_has_field(fields.BoxListFields.is_crowd):
groundtruth[input_data_fields.groundtruth_is_crowd] = tf.stack(
detection_model.groundtruth_lists(fields.BoxListFields.is_crowd))
groundtruth[input_data_fields.num_groundtruth_boxes] = (
tf.tile([max_number_of_boxes], multiples=[groundtruth_boxes_shape[0]]))
return groundtruth
def unstack_batch(tensor_dict, unpad_groundtruth_tensors=True):
"""Unstacks all tensors in `tensor_dict` along 0th dimension.
Unstacks tensor from the tensor dict along 0th dimension and returns a
tensor_dict containing values that are lists of unstacked, unpadded tensors.
Tensors in the `tensor_dict` are expected to be of one of the three shapes:
1. [batch_size]
2. [batch_size, height, width, channels]
3. [batch_size, num_boxes, d1, d2, ... dn]
When unpad_groundtruth_tensors is set to true, unstacked tensors of form 3
above are sliced along the `num_boxes` dimension using the value in tensor
field.InputDataFields.num_groundtruth_boxes.
Note that this function has a static list of input data fields and has to be
kept in sync with the InputDataFields defined in core/standard_fields.py
Args:
tensor_dict: A dictionary of batched groundtruth tensors.
unpad_groundtruth_tensors: Whether to remove padding along `num_boxes`
dimension of the groundtruth tensors.
Returns:
A dictionary where the keys are from fields.InputDataFields and values are
a list of unstacked (optionally unpadded) tensors.
Raises:
ValueError: If unpad_tensors is True and `tensor_dict` does not contain
`num_groundtruth_boxes` tensor.
"""
unbatched_tensor_dict = {
key: tf.unstack(tensor) for key, tensor in tensor_dict.items()
}
if unpad_groundtruth_tensors:
if (fields.InputDataFields.num_groundtruth_boxes not in
unbatched_tensor_dict):
raise ValueError('`num_groundtruth_boxes` not found in tensor_dict. '
'Keys available: {}'.format(
unbatched_tensor_dict.keys()))
unbatched_unpadded_tensor_dict = {}
unpad_keys = set([
# List of input data fields that are padded along the num_boxes
# dimension. This list has to be kept in sync with InputDataFields in
# standard_fields.py.
fields.InputDataFields.groundtruth_instance_masks,
fields.InputDataFields.groundtruth_classes,
fields.InputDataFields.groundtruth_boxes,
fields.InputDataFields.groundtruth_keypoints,
fields.InputDataFields.groundtruth_group_of,
fields.InputDataFields.groundtruth_difficult,
fields.InputDataFields.groundtruth_is_crowd,
fields.InputDataFields.groundtruth_area,
fields.InputDataFields.groundtruth_weights
]).intersection(set(unbatched_tensor_dict.keys()))
for key in unpad_keys:
unpadded_tensor_list = []
for num_gt, padded_tensor in zip(
unbatched_tensor_dict[fields.InputDataFields.num_groundtruth_boxes],
unbatched_tensor_dict[key]):
tensor_shape = shape_utils.combined_static_and_dynamic_shape(
padded_tensor)
slice_begin = tf.zeros([len(tensor_shape)], dtype=tf.int32)
slice_size = tf.stack(
[num_gt] + [-1 if dim is None else dim for dim in tensor_shape[1:]])
unpadded_tensor = tf.slice(padded_tensor, slice_begin, slice_size)
unpadded_tensor_list.append(unpadded_tensor)
unbatched_unpadded_tensor_dict[key] = unpadded_tensor_list
unbatched_tensor_dict.update(unbatched_unpadded_tensor_dict)
return unbatched_tensor_dict
def create_model_fn(detection_model_fn, configs, hparams, use_tpu=False):
"""Creates a model function for `Estimator`.
Args:
detection_model_fn: Function that returns a `DetectionModel` instance.
configs: Dictionary of pipeline config objects.
hparams: `HParams` object.
use_tpu: Boolean indicating whether model should be constructed for
use on TPU.
Returns:
`model_fn` for `Estimator`.
"""
train_config = configs['train_config']
eval_input_config = configs['eval_input_config']
eval_config = configs['eval_config']
def model_fn(features, labels, mode, params=None):
"""Constructs the object detection model.
Args:
features: Dictionary of feature tensors, returned from `input_fn`.
labels: Dictionary of groundtruth tensors if mode is TRAIN or EVAL,
otherwise None.
mode: Mode key from tf.estimator.ModeKeys.
params: Parameter dictionary passed from the estimator.
Returns:
An `EstimatorSpec` that encapsulates the model and its serving
configurations.
"""
params = params or {}
total_loss, train_op, detections, export_outputs = None, None, None, None
is_training = mode == tf.estimator.ModeKeys.TRAIN
# Make sure to set the Keras learning phase. True during training,
# False for inference.
tf.keras.backend.set_learning_phase(is_training)
detection_model = detection_model_fn(
is_training=is_training, add_summaries=(not use_tpu))
scaffold_fn = None
if mode == tf.estimator.ModeKeys.TRAIN:
labels = unstack_batch(
labels,
unpad_groundtruth_tensors=train_config.unpad_groundtruth_tensors)
elif mode == tf.estimator.ModeKeys.EVAL:
# For evaling on train data, it is necessary to check whether groundtruth
# must be unpadded.
boxes_shape = (
labels[fields.InputDataFields.groundtruth_boxes].get_shape()
.as_list())
unpad_groundtruth_tensors = boxes_shape[1] is not None and not use_tpu
labels = unstack_batch(
labels, unpad_groundtruth_tensors=unpad_groundtruth_tensors)
if mode in (tf.estimator.ModeKeys.TRAIN, tf.estimator.ModeKeys.EVAL):
gt_boxes_list = labels[fields.InputDataFields.groundtruth_boxes]
gt_classes_list = labels[fields.InputDataFields.groundtruth_classes]
gt_masks_list = None
if fields.InputDataFields.groundtruth_instance_masks in labels:
gt_masks_list = labels[
fields.InputDataFields.groundtruth_instance_masks]
gt_keypoints_list = None
if fields.InputDataFields.groundtruth_keypoints in labels:
gt_keypoints_list = labels[fields.InputDataFields.groundtruth_keypoints]
gt_weights_list = None
if fields.InputDataFields.groundtruth_weights in labels:
gt_weights_list = labels[fields.InputDataFields.groundtruth_weights]
gt_confidences_list = None
if fields.InputDataFields.groundtruth_confidences in labels:
gt_confidences_list = labels[
fields.InputDataFields.groundtruth_confidences]
gt_is_crowd_list = None
if fields.InputDataFields.groundtruth_is_crowd in labels:
gt_is_crowd_list = labels[fields.InputDataFields.groundtruth_is_crowd]
detection_model.provide_groundtruth(
groundtruth_boxes_list=gt_boxes_list,
groundtruth_classes_list=gt_classes_list,
groundtruth_confidences_list=gt_confidences_list,
groundtruth_masks_list=gt_masks_list,
groundtruth_keypoints_list=gt_keypoints_list,
groundtruth_weights_list=gt_weights_list,
groundtruth_is_crowd_list=gt_is_crowd_list)
preprocessed_images = features[fields.InputDataFields.image]
if use_tpu and train_config.use_bfloat16:
with tf.contrib.tpu.bfloat16_scope():
prediction_dict = detection_model.predict(
preprocessed_images,
features[fields.InputDataFields.true_image_shape])
for k, v in prediction_dict.items():
if v.dtype == tf.bfloat16:
prediction_dict[k] = tf.cast(v, tf.float32)
else:
prediction_dict = detection_model.predict(
preprocessed_images,
features[fields.InputDataFields.true_image_shape])
if mode in (tf.estimator.ModeKeys.EVAL, tf.estimator.ModeKeys.PREDICT):
detections = detection_model.postprocess(
prediction_dict, features[fields.InputDataFields.true_image_shape])
if mode == tf.estimator.ModeKeys.TRAIN:
if train_config.fine_tune_checkpoint and hparams.load_pretrained:
if not train_config.fine_tune_checkpoint_type:
# train_config.from_detection_checkpoint field is deprecated. For
# backward compatibility, set train_config.fine_tune_checkpoint_type
# based on train_config.from_detection_checkpoint.
if train_config.from_detection_checkpoint:
train_config.fine_tune_checkpoint_type = 'detection'
else:
train_config.fine_tune_checkpoint_type = 'classification'
asg_map = detection_model.restore_map(
fine_tune_checkpoint_type=train_config.fine_tune_checkpoint_type,
load_all_detection_checkpoint_vars=(
train_config.load_all_detection_checkpoint_vars))
available_var_map = (
variables_helper.get_variables_available_in_checkpoint(
asg_map,
train_config.fine_tune_checkpoint,
include_global_step=False))
if use_tpu:
def tpu_scaffold():
tf.train.init_from_checkpoint(train_config.fine_tune_checkpoint,
available_var_map)
return tf.train.Scaffold()
scaffold_fn = tpu_scaffold
else:
tf.train.init_from_checkpoint(train_config.fine_tune_checkpoint,
available_var_map)
if mode in (tf.estimator.ModeKeys.TRAIN, tf.estimator.ModeKeys.EVAL):
losses_dict = detection_model.loss(
prediction_dict, features[fields.InputDataFields.true_image_shape])
losses = [loss_tensor for loss_tensor in losses_dict.values()]
if train_config.add_regularization_loss:
regularization_losses = detection_model.regularization_losses()
if regularization_losses:
regularization_loss = tf.add_n(
regularization_losses, name='regularization_loss')
losses.append(regularization_loss)
losses_dict['Loss/regularization_loss'] = regularization_loss
total_loss = tf.add_n(losses, name='total_loss')
losses_dict['Loss/total_loss'] = total_loss
if 'graph_rewriter_config' in configs:
graph_rewriter_fn = graph_rewriter_builder.build(
configs['graph_rewriter_config'], is_training=is_training)
graph_rewriter_fn()
# TODO(rathodv): Stop creating optimizer summary vars in EVAL mode once we
# can write learning rate summaries on TPU without host calls.
global_step = tf.train.get_or_create_global_step()
training_optimizer, optimizer_summary_vars = optimizer_builder.build(
train_config.optimizer)
if mode == tf.estimator.ModeKeys.TRAIN:
if use_tpu:
training_optimizer = tf.contrib.tpu.CrossShardOptimizer(
training_optimizer)
# Optionally freeze some layers by setting their gradients to be zero.
trainable_variables = None
include_variables = (
train_config.update_trainable_variables
if train_config.update_trainable_variables else None)
exclude_variables = (
train_config.freeze_variables
if train_config.freeze_variables else None)
trainable_variables = tf.contrib.framework.filter_variables(
tf.trainable_variables(),
include_patterns=include_variables,
exclude_patterns=exclude_variables)
clip_gradients_value = None
if train_config.gradient_clipping_by_norm > 0:
clip_gradients_value = train_config.gradient_clipping_by_norm
if not use_tpu:
for var in optimizer_summary_vars:
tf.summary.scalar(var.op.name, var)
summaries = [] if use_tpu else None
if train_config.summarize_gradients:
summaries = ['gradients', 'gradient_norm', 'global_gradient_norm']
train_op = tf.contrib.layers.optimize_loss(
loss=total_loss,
global_step=global_step,
learning_rate=None,
clip_gradients=clip_gradients_value,
optimizer=training_optimizer,
update_ops=detection_model.updates(),
variables=trainable_variables,
summaries=summaries,
name='') # Preventing scope prefix on all variables.
if mode == tf.estimator.ModeKeys.PREDICT:
exported_output = exporter_lib.add_output_tensor_nodes(detections)
export_outputs = {
tf.saved_model.signature_constants.PREDICT_METHOD_NAME:
tf.estimator.export.PredictOutput(exported_output)
}
eval_metric_ops = None
scaffold = None
if mode == tf.estimator.ModeKeys.EVAL:
class_agnostic = (
fields.DetectionResultFields.detection_classes not in detections)
groundtruth = _prepare_groundtruth_for_eval(
detection_model, class_agnostic,
eval_input_config.max_number_of_boxes)
use_original_images = fields.InputDataFields.original_image in features
if use_original_images:
eval_images = features[fields.InputDataFields.original_image]
true_image_shapes = tf.slice(
features[fields.InputDataFields.true_image_shape], [0, 0], [-1, 3])
original_image_spatial_shapes = features[fields.InputDataFields
.original_image_spatial_shape]
else:
eval_images = features[fields.InputDataFields.image]
true_image_shapes = None
original_image_spatial_shapes = None
eval_dict = eval_util.result_dict_for_batched_example(
eval_images,
features[inputs.HASH_KEY],
detections,
groundtruth,
class_agnostic=class_agnostic,
scale_to_absolute=True,
original_image_spatial_shapes=original_image_spatial_shapes,
true_image_shapes=true_image_shapes)
if class_agnostic:
category_index = label_map_util.create_class_agnostic_category_index()
else:
category_index = label_map_util.create_category_index_from_labelmap(
eval_input_config.label_map_path)
vis_metric_ops = None
if not use_tpu and use_original_images:
eval_metric_op_vis = vis_utils.VisualizeSingleFrameDetections(
category_index,
max_examples_to_draw=eval_config.num_visualizations,
max_boxes_to_draw=eval_config.max_num_boxes_to_visualize,
min_score_thresh=eval_config.min_score_threshold,
use_normalized_coordinates=False)
vis_metric_ops = eval_metric_op_vis.get_estimator_eval_metric_ops(
eval_dict)
# Eval metrics on a single example.
eval_metric_ops = eval_util.get_eval_metric_ops_for_evaluators(
eval_config, list(category_index.values()), eval_dict)
for loss_key, loss_tensor in iter(losses_dict.items()):
eval_metric_ops[loss_key] = tf.metrics.mean(loss_tensor)
for var in optimizer_summary_vars:
eval_metric_ops[var.op.name] = (var, tf.no_op())
if vis_metric_ops is not None:
eval_metric_ops.update(vis_metric_ops)
eval_metric_ops = {str(k): v for k, v in eval_metric_ops.items()}
if eval_config.use_moving_averages:
variable_averages = tf.train.ExponentialMovingAverage(0.0)
variables_to_restore = variable_averages.variables_to_restore()
keep_checkpoint_every_n_hours = (
train_config.keep_checkpoint_every_n_hours)
saver = tf.train.Saver(
variables_to_restore,
keep_checkpoint_every_n_hours=keep_checkpoint_every_n_hours)
scaffold = tf.train.Scaffold(saver=saver)
# EVAL executes on CPU, so use regular non-TPU EstimatorSpec.
if use_tpu and mode != tf.estimator.ModeKeys.EVAL:
return tf.contrib.tpu.TPUEstimatorSpec(
mode=mode,
scaffold_fn=scaffold_fn,
predictions=detections,
loss=total_loss,
train_op=train_op,
eval_metrics=eval_metric_ops,
export_outputs=export_outputs)
else:
if scaffold is None:
keep_checkpoint_every_n_hours = (
train_config.keep_checkpoint_every_n_hours)
saver = tf.train.Saver(
sharded=True,
keep_checkpoint_every_n_hours=keep_checkpoint_every_n_hours,
save_relative_paths=True)
tf.add_to_collection(tf.GraphKeys.SAVERS, saver)
scaffold = tf.train.Scaffold(saver=saver)
return tf.estimator.EstimatorSpec(
mode=mode,
predictions=detections,
loss=total_loss,
train_op=train_op,
eval_metric_ops=eval_metric_ops,
export_outputs=export_outputs,
scaffold=scaffold)
return model_fn
def create_estimator_and_inputs(run_config,
hparams,
pipeline_config_path,
eval_count=1,
config_override=None,
train_steps=None,
sample_1_of_n_eval_examples=1,
sample_1_of_n_eval_on_train_examples=1,
model_fn_creator=create_model_fn,
use_tpu_estimator=False,
use_tpu=False,
num_shards=1,
params=None,
override_eval_num_epochs=True,
save_final_config=False,
**kwargs):
"""Creates `Estimator`, input functions, and steps.
Args:
run_config: A `RunConfig`.
hparams: A `HParams`.
pipeline_config_path: A path to a pipeline config file.
config_override: A pipeline_pb2.TrainEvalPipelineConfig text proto to
override the config from `pipeline_config_path`.
train_steps: Number of training steps. If None, the number of training steps
is set from the `TrainConfig` proto.
sample_1_of_n_eval_examples: Integer representing how often an eval example
should be sampled. If 1, will sample all examples.
sample_1_of_n_eval_on_train_examples: Similar to
`sample_1_of_n_eval_examples`, except controls the sampling of training
data for evaluation.
model_fn_creator: A function that creates a `model_fn` for `Estimator`.
Follows the signature:
* Args:
* `detection_model_fn`: Function that returns `DetectionModel` instance.
* `configs`: Dictionary of pipeline config objects.
* `hparams`: `HParams` object.
* Returns:
`model_fn` for `Estimator`.
use_tpu_estimator: Whether a `TPUEstimator` should be returned. If False,
an `Estimator` will be returned.
use_tpu: Boolean, whether training and evaluation should run on TPU. Only
used if `use_tpu_estimator` is True.
num_shards: Number of shards (TPU cores). Only used if `use_tpu_estimator`
is True.
params: Parameter dictionary passed from the estimator. Only used if
`use_tpu_estimator` is True.
override_eval_num_epochs: Whether to overwrite the number of epochs to
1 for eval_input.
save_final_config: Whether to save final config (obtained after applying
overrides) to `estimator.model_dir`.
**kwargs: Additional keyword arguments for configuration override.
Returns:
A dictionary with the following fields:
'estimator': An `Estimator` or `TPUEstimator`.
'train_input_fn': A training input function.
'eval_input_fns': A list of all evaluation input functions.
'eval_input_names': A list of names for each evaluation input.
'eval_on_train_input_fn': An evaluation-on-train input function.
'predict_input_fn': A prediction input function.
'train_steps': Number of training steps. Either directly from input or from
configuration.
'train_batch_size': train batch size per GPU
"""
get_configs_from_pipeline_file = MODEL_BUILD_UTIL_MAP[
'get_configs_from_pipeline_file']
merge_external_params_with_configs = MODEL_BUILD_UTIL_MAP[
'merge_external_params_with_configs']
create_pipeline_proto_from_configs = MODEL_BUILD_UTIL_MAP[
'create_pipeline_proto_from_configs']
create_train_input_fn = MODEL_BUILD_UTIL_MAP['create_train_input_fn']
create_eval_input_fn = MODEL_BUILD_UTIL_MAP['create_eval_input_fn']
create_predict_input_fn = MODEL_BUILD_UTIL_MAP['create_predict_input_fn']
configs = get_configs_from_pipeline_file(pipeline_config_path,
config_override=config_override)
kwargs.update({
'train_steps': train_steps,
'sample_1_of_n_eval_examples': sample_1_of_n_eval_examples
})
if override_eval_num_epochs:
kwargs.update({'eval_num_epochs': 1})
tf.logging.warning(
'Forced number of epochs for all eval validations to be 1.')
configs = merge_external_params_with_configs(
configs, hparams, kwargs_dict=kwargs)
model_config = configs['model']
train_config = configs['train_config']
train_input_config = configs['train_input_config']
eval_config = configs['eval_config']
eval_input_configs = configs['eval_input_configs']
eval_on_train_input_config = copy.deepcopy(train_input_config)
eval_on_train_input_config.sample_1_of_n_examples = (
sample_1_of_n_eval_on_train_examples)
if override_eval_num_epochs and eval_on_train_input_config.num_epochs != 1:
tf.logging.warning('Expected number of evaluation epochs is 1, but '
'instead encountered `eval_on_train_input_config'
'.num_epochs` = '
'{}. Overwriting `num_epochs` to 1.'.format(
eval_on_train_input_config.num_epochs))
eval_on_train_input_config.num_epochs = 1
# update train_steps from config but only when non-zero value is provided
if train_steps is None and train_config.num_steps != 0:
train_steps = train_config.num_steps
detection_model_fn = functools.partial(
model_builder.build, model_config=model_config)
# Create the input functions for TRAIN/EVAL/PREDICT.
train_input_fn = create_train_input_fn(
train_config=train_config,
train_input_config=train_input_config,
model_config=model_config)
eval_input_fns = [
create_eval_input_fn(
eval_config=eval_config,
eval_input_config=eval_input_config,
model_config=model_config) for eval_input_config in eval_input_configs
]
eval_input_names = [
eval_input_config.name for eval_input_config in eval_input_configs
]
eval_on_train_input_fn = create_eval_input_fn(
eval_config=eval_config,
eval_input_config=eval_on_train_input_config,
model_config=model_config)
predict_input_fn = create_predict_input_fn(
model_config=model_config, predict_input_config=eval_input_configs[0])
export_to_tpu = hparams.get('export_to_tpu', False)
tf.logging.info('create_estimator_and_inputs: use_tpu %s, export_to_tpu %s',
use_tpu, export_to_tpu)
model_fn = model_fn_creator(detection_model_fn, configs, hparams, use_tpu)
run_config = tf.estimator.RunConfig(model_dir=run_config.model_dir,
session_config=run_config.session_config,
save_checkpoints_steps=train_steps // eval_count)
if use_tpu_estimator:
estimator = tf.contrib.tpu.TPUEstimator(
model_fn=model_fn,
train_batch_size=train_config.batch_size,
# For each core, only batch size 1 is supported for eval.
eval_batch_size=num_shards * 1 if use_tpu else 1,
use_tpu=use_tpu,
config=run_config,
# TODO(lzc): Remove conditional after CMLE moves to TF 1.9
params=params if params else {})
else:
estimator = tf.estimator.Estimator(model_fn=model_fn, config=run_config)
# Write the as-run pipeline config to disk.
if run_config.is_chief and save_final_config:
pipeline_config_final = create_pipeline_proto_from_configs(configs)
config_util.save_pipeline_config(pipeline_config_final, estimator.model_dir)
return dict(
estimator=estimator,
train_input_fn=train_input_fn,
eval_input_fns=eval_input_fns,
eval_input_names=eval_input_names,
eval_on_train_input_fn=eval_on_train_input_fn,
predict_input_fn=predict_input_fn,
train_steps=train_steps,
train_batch_size=train_config.batch_size)
def create_train_and_eval_specs(train_input_fn,
eval_input_fns,
eval_on_train_input_fn,
predict_input_fn,
train_steps,
eval_on_train_data=False,
final_exporter_name='Servo',
eval_spec_names=None):
"""Creates a `TrainSpec` and `EvalSpec`s.
Args:
train_input_fn: Function that produces features and labels on train data.
eval_input_fns: A list of functions that produce features and labels on eval
data.
eval_on_train_input_fn: Function that produces features and labels for
evaluation on train data.
predict_input_fn: Function that produces features for inference.
train_steps: Number of training steps.
eval_on_train_data: Whether to evaluate model on training data. Default is
False.
final_exporter_name: String name given to `FinalExporter`.
eval_spec_names: A list of string names for each `EvalSpec`.
Returns:
Tuple of `TrainSpec` and list of `EvalSpecs`. If `eval_on_train_data` is
True, the last `EvalSpec` in the list will correspond to training data. The
rest EvalSpecs in the list are evaluation datas.
"""
train_spec = tf.estimator.TrainSpec(
input_fn=train_input_fn,
max_steps=train_steps // hvd.size(), # no `steps' attribute; only max_steps available
hooks=[hvd.BroadcastGlobalVariablesHook(0)])
if eval_spec_names is None:
eval_spec_names = [str(i) for i in range(len(eval_input_fns))]
eval_specs = []
for index, (eval_spec_name, eval_input_fn) in enumerate(
zip(eval_spec_names, eval_input_fns)):
# Uses final_exporter_name as exporter_name for the first eval spec for
# backward compatibility.
if index == 0:
exporter_name = final_exporter_name
else:
exporter_name = '{}_{}'.format(final_exporter_name, eval_spec_name)
exporter = tf.estimator.FinalExporter(
name=exporter_name, serving_input_receiver_fn=predict_input_fn)
eval_specs.append(
tf.estimator.EvalSpec(
name=eval_spec_name,
input_fn=eval_input_fn,
steps=None,
exporters=exporter))
if eval_on_train_data:
eval_specs.append(
tf.estimator.EvalSpec(
name='eval_on_train', input_fn=eval_on_train_input_fn, steps=None))
return train_spec, eval_specs
def continuous_eval(estimator, model_dir, input_fn, train_steps, name):
"""Perform continuous evaluation on checkpoints written to a model directory.
Args:
estimator: Estimator object to use for evaluation.
model_dir: Model directory to read checkpoints for continuous evaluation.
input_fn: Input function to use for evaluation.
train_steps: Number of training steps. This is used to infer the last
checkpoint and stop evaluation loop.
name: Namescope for eval summary.
"""
def terminate_eval():
tf.logging.info('Terminating eval after 180 seconds of no checkpoints')
return True
for ckpt in tf.contrib.training.checkpoints_iterator(
model_dir, min_interval_secs=180, timeout=None,
timeout_fn=terminate_eval):
tf.logging.info('Starting Evaluation.')
try:
eval_results = estimator.evaluate(
input_fn=input_fn, steps=None, checkpoint_path=ckpt, name=name)
tf.logging.info('Eval results: %s' % eval_results)
# Terminate eval job when final checkpoint is reached
current_step = int(os.path.basename(ckpt).split('-')[1])
if current_step >= train_steps:
tf.logging.info(
'Evaluation finished after training step %d' % current_step)
break
except tf.errors.NotFoundError:
tf.logging.info(
'Checkpoint %s no longer exists, skipping checkpoint' % ckpt)
def populate_experiment(run_config,
hparams,
pipeline_config_path,
train_steps=None,
eval_steps=None,
model_fn_creator=create_model_fn,
**kwargs):
"""Populates an `Experiment` object.
EXPERIMENT CLASS IS DEPRECATED. Please switch to
tf.estimator.train_and_evaluate. As an example, see model_main.py.
Args:
run_config: A `RunConfig`.
hparams: A `HParams`.
pipeline_config_path: A path to a pipeline config file.
train_steps: Number of training steps. If None, the number of training steps
is set from the `TrainConfig` proto.
eval_steps: Number of evaluation steps per evaluation cycle. If None, the
number of evaluation steps is set from the `EvalConfig` proto.
model_fn_creator: A function that creates a `model_fn` for `Estimator`.
Follows the signature:
* Args:
* `detection_model_fn`: Function that returns `DetectionModel` instance.
* `configs`: Dictionary of pipeline config objects.
* `hparams`: `HParams` object.
* Returns:
`model_fn` for `Estimator`.
**kwargs: Additional keyword arguments for configuration override.
Returns:
An `Experiment` that defines all aspects of training, evaluation, and
export.
"""
tf.logging.warning('Experiment is being deprecated. Please use '
'tf.estimator.train_and_evaluate(). See model_main.py for '
'an example.')
train_and_eval_dict = create_estimator_and_inputs(
run_config,
hparams,
pipeline_config_path,
train_steps=train_steps,
eval_steps=eval_steps,
model_fn_creator=model_fn_creator,
save_final_config=True,
**kwargs)
estimator = train_and_eval_dict['estimator']
train_input_fn = train_and_eval_dict['train_input_fn']
eval_input_fns = train_and_eval_dict['eval_input_fns']
predict_input_fn = train_and_eval_dict['predict_input_fn']
train_steps = train_and_eval_dict['train_steps']
export_strategies = [
tf.contrib.learn.utils.saved_model_export_utils.make_export_strategy(
serving_input_fn=predict_input_fn)
]
return tf.contrib.learn.Experiment(
estimator=estimator,
train_input_fn=train_input_fn,
eval_input_fn=eval_input_fns[0],
train_steps=train_steps,
eval_steps=None,
export_strategies=export_strategies,
eval_delay_secs=120,
)
|
TensorFlow2/LanguageModeling/BERT/official/nlp/modeling/layers | layers | transformer | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Keras-based transformer block layer."""
from __future__ import absolute_import
from __future__ import division
# from __future__ import google_type_annotations
from __future__ import print_function
import tensorflow as tf
from official.nlp.modeling.layers import attention
from official.nlp.modeling.layers import dense_einsum
# @tf.keras.utils.register_keras_serializable(package="Text")
class Transformer(tf.keras.layers.Layer):
"""Transformer layer.
This layer implements the Transformer from "Attention Is All You Need".
(https://arxiv.org/abs/1706.03762).
Attributes:
num_attention_heads: Number of attention heads.
intermediate_size: Size of the intermediate layer.
intermediate_activation: Activation for the intermediate layer.
dropout_rate: Dropout probability for the post-attention and output dropout.
attention_dropout_rate: Dropout probability for within the attention layer.
kernel_initializer: Initializer for dense layer kernels.
bias_initializer: Initializer for dense layer biases.
kernel_regularizer: Regularizer for dense layer kernels.
bias_regularizer: Regularizer for dense layer biases.
activity_regularizer: Regularizer for dense layer activity.
kernel_constraint: Constraint for dense layer kernels.
bias_constraint: Constraint for dense layer kernels.
"""
def __init__(self,
num_attention_heads,
intermediate_size,
intermediate_activation,
dropout_rate=0.0,
attention_dropout_rate=0.0,
kernel_initializer="glorot_uniform",
bias_initializer="zeros",
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
**kwargs):
super(Transformer, self).__init__(**kwargs)
self._num_heads = num_attention_heads
self._intermediate_size = intermediate_size
self._intermediate_activation = intermediate_activation
self._attention_dropout_rate = attention_dropout_rate
self._dropout_rate = dropout_rate
self._kernel_initializer = tf.keras.initializers.get(kernel_initializer)
self._bias_initializer = tf.keras.initializers.get(bias_initializer)
self._kernel_regularizer = tf.keras.regularizers.get(kernel_regularizer)
self._bias_regularizer = tf.keras.regularizers.get(bias_regularizer)
self._kernel_constraint = tf.keras.constraints.get(kernel_constraint)
self._bias_constraint = tf.keras.constraints.get(bias_constraint)
def build(self, input_shape):
input_tensor = input_shape[0] if len(input_shape) == 2 else input_shape
input_tensor_shape = tf.TensorShape(input_tensor)
if len(input_tensor_shape) != 3:
raise ValueError("TransformerLayer expects a three-dimensional input of "
"shape [batch, sequence, width].")
batch_size, sequence_length, hidden_size = input_tensor_shape
if len(input_shape) == 2:
mask_tensor_shape = tf.TensorShape(input_shape[1])
expected_mask_tensor_shape = tf.TensorShape(
[batch_size, sequence_length, sequence_length])
if not expected_mask_tensor_shape.is_compatible_with(mask_tensor_shape):
raise ValueError("When passing a mask tensor to TransformerLayer, the "
"mask tensor must be of shape [batch, "
"sequence_length, sequence_length] (here %s). Got a "
"mask tensor of shape %s." %
(expected_mask_tensor_shape, mask_tensor_shape))
if hidden_size % self._num_heads != 0:
raise ValueError(
"The input size (%d) is not a multiple of the number of attention "
"heads (%d)" % (hidden_size, self._num_heads))
self._attention_head_size = int(hidden_size // self._num_heads)
self._attention_layer = attention.Attention(
num_heads=self._num_heads,
head_size=self._attention_head_size,
dropout_rate=self._attention_dropout_rate,
kernel_initializer=self._kernel_initializer,
bias_initializer=self._bias_initializer,
kernel_regularizer=self._kernel_regularizer,
bias_regularizer=self._bias_regularizer,
activity_regularizer=self._activity_regularizer,
kernel_constraint=self._kernel_constraint,
bias_constraint=self._bias_constraint,
name="self_attention")
self._attention_output_dense = dense_einsum.DenseEinsum(
output_shape=hidden_size,
num_summed_dimensions=2,
kernel_initializer=self._kernel_initializer,
bias_initializer=self._bias_initializer,
kernel_regularizer=self._kernel_regularizer,
bias_regularizer=self._bias_regularizer,
activity_regularizer=self._activity_regularizer,
kernel_constraint=self._kernel_constraint,
bias_constraint=self._bias_constraint,
name="self_attention_output")
self._attention_dropout = tf.keras.layers.Dropout(rate=self._dropout_rate)
self._attention_layer_norm = (
tf.keras.layers.LayerNormalization(
name="self_attention_layer_norm",
axis=-1,
epsilon=1e-12,
dtype=tf.float32))
self._intermediate_dense = dense_einsum.DenseEinsum(
output_shape=self._intermediate_size,
activation=None,
kernel_initializer=self._kernel_initializer,
bias_initializer=self._bias_initializer,
kernel_regularizer=self._kernel_regularizer,
bias_regularizer=self._bias_regularizer,
activity_regularizer=self._activity_regularizer,
kernel_constraint=self._kernel_constraint,
bias_constraint=self._bias_constraint,
name="intermediate")
self._intermediate_activation_layer = tf.keras.layers.Activation(
self._intermediate_activation)
self._output_dense = dense_einsum.DenseEinsum(
output_shape=hidden_size,
kernel_initializer=self._kernel_initializer,
bias_initializer=self._bias_initializer,
kernel_regularizer=self._kernel_regularizer,
bias_regularizer=self._bias_regularizer,
activity_regularizer=self._activity_regularizer,
kernel_constraint=self._kernel_constraint,
bias_constraint=self._bias_constraint,
name="output")
self._output_dropout = tf.keras.layers.Dropout(rate=self._dropout_rate)
self._output_layer_norm = tf.keras.layers.LayerNormalization(
name="output_layer_norm", axis=-1, epsilon=1e-12, dtype=tf.float32)
super(Transformer, self).build(input_shape)
def get_config(self):
config = {
"num_attention_heads":
self._num_heads,
"intermediate_size":
self._intermediate_size,
"intermediate_activation":
self._intermediate_activation,
"dropout_rate":
self._dropout_rate,
"attention_dropout_rate":
self._attention_dropout_rate,
"kernel_initializer":
tf.keras.initializers.serialize(self._kernel_initializer),
"bias_initializer":
tf.keras.initializers.serialize(self._bias_initializer),
"kernel_regularizer":
tf.keras.regularizers.serialize(self._kernel_regularizer),
"bias_regularizer":
tf.keras.regularizers.serialize(self._bias_regularizer),
"activity_regularizer":
tf.keras.regularizers.serialize(self._activity_regularizer),
"kernel_constraint":
tf.keras.constraints.serialize(self._kernel_constraint),
"bias_constraint":
tf.keras.constraints.serialize(self._bias_constraint)
}
base_config = super(Transformer, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def call(self, inputs):
if isinstance(inputs, (list, tuple)) and len(inputs) == 2:
input_tensor, attention_mask = inputs
else:
input_tensor, attention_mask = (inputs, None)
attention_inputs = [input_tensor, input_tensor]
if attention_mask is not None:
attention_inputs.append(attention_mask)
attention_output = self._attention_layer(attention_inputs)
attention_output = self._attention_output_dense(attention_output)
attention_output = self._attention_dropout(attention_output)
# Use float32 in keras layer norm and the gelu activation in the
# intermediate dense layer for numeric stability
if self.dtype == tf.float16:
input_tensor = tf.cast(input_tensor, tf.float32)
attention_output = tf.cast(attention_output, tf.float32)
attention_output = self._attention_layer_norm(input_tensor +
attention_output)
intermediate_output = self._intermediate_dense(attention_output)
if self.dtype == tf.float16:
# Casts to float32 so that activation is done in float32.
intermediate_output = tf.cast(intermediate_output, tf.float32)
intermediate_output = self._intermediate_activation_layer(
intermediate_output)
intermediate_output = tf.cast(intermediate_output, tf.float16)
else:
intermediate_output = self._intermediate_activation_layer(
intermediate_output)
layer_output = self._output_dense(intermediate_output)
layer_output = self._output_dropout(layer_output)
# Use float32 in keras layer norm for numeric stability
if self.dtype == tf.float16:
layer_output = tf.cast(layer_output, tf.float32)
layer_output = self._output_layer_norm(layer_output + attention_output)
if self.dtype == tf.float16:
layer_output = tf.cast(layer_output, tf.float16)
return layer_output
|
MxNet/Classification/RN50v1.5 | RN50v1.5 | requirements | git+https://github.com/NVIDIA/dllogger@v0.1.0#egg=dllogger |
PyTorch/SpeechSynthesis/Tacotron2/trtis_cpp/src/trt/waveglow | waveglow | waveGlowStreamingInstance | /*
* Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of the NVIDIA CORPORATION nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef TT2I_WAVEGLOWSTREAMINGINSTANCE_H
#define TT2I_WAVEGLOWSTREAMINGINSTANCE_H
#include "binding.h"
#include "engineDriver.h"
#include "normalDistribution.h"
#include "timedObject.h"
#include "trtPtr.h"
namespace nvinfer1
{
class ICudaEngine;
}
namespace tts
{
class WaveGlowStreamingInstance : public TimedObject, public EngineDriver
{
public:
/**
* @brief Create a new WaveGlowInstance from a deserialied engine.
*
* @param engine The deserialized engine.
*/
WaveGlowStreamingInstance(TRTPtr<nvinfer1::ICudaEngine>&& engine);
// disable copying
WaveGlowStreamingInstance(const WaveGlowStreamingInstance& other) = delete;
WaveGlowStreamingInstance& operator=(const WaveGlowStreamingInstance& other)
= delete;
/**
* @brief Initialize for a new round of inference. This method must be called
* before calls to `inferNext()`, however, the stream does not need to be
* synchronized on inbetween.
*
* @param stream The stream to initialize on.
*/
void startInference(int batchSize, cudaStream_t stream);
/**
* @brief Perform infernece on a chunk of mel-scale spectrograms. The stream
* must be synchronized on befor reading the output or modifying the input.
*
* @param stream The stream to perform inference on.
* @param batchSize The number of items in the batch.
* @param melsDevice The mel-scale spectrograms of all batch items.
* @param numMels The number of mel-scale spectrograms in each item.
* @param samplesDevice The output waveform for all items. This should be of
* size equal to the result of `getRequiredOutputBufferSize(batchSize)`.
* @param numSamples The number of samples per item generated.
*/
void inferNext(
cudaStream_t stream,
const float* melsDevice,
const int* numMels,
float* samplesDevice,
int* numSamples);
/**
* @brief Get the spacing between the start of each input item in terms of
* mel-spectrograms. This also serves as the maximum input length.
*
* @return The number of mel-spectrogram frames.
*/
int getMelSpacing() const;
/**
* @brief Get the maximum number of useful samples that will be produced.
* The space allocated for the output vector may need to be longer than this.
* `getRequiredOutputBufferSize()` should be used for determing the amount of
* space to allocate for output.
*
* @return The maximum number.
*/
int getMaxOutputLength() const;
/**
* @brief Get the required size of the output buffer that will be given to
* `inferNext()`.
*
* @param batchSize The number of items in the batch.
*
* @return The required size in number of samples (floats).
*/
int getRequiredOutputBufferSize(const int batchSize) const;
/**
* @brief Get the number of samples that will be generated per mel-scale
* spectrogram.
*
* @return The number of samples.
*/
int getNumberOfSamplesPerFrame() const;
/**
* @brief Get the number of mel-scale spectrogram channels expected per frame
* of the input.
*
* @return The number of channels.
*/
int getNumMelChannels() const;
/**
* @brief Get the spacing between start of each item in the batch in the
* output.
*
* @return The spacing.
*/
int getOutputSpacing() const;
private:
int mChunkSize;
int mSamplesPerFrame;
int mChunkSampleSize;
int mTruncatedChunkSampleSize;
int mInputChannels;
int mZChannels;
int mBatchSize;
Binding mBinding;
TRTPtr<nvinfer1::IExecutionContext> mContext;
NormalDistribution mRand;
CudaMemory<float> mZ;
};
} // namespace tts
#endif
|
TensorFlow/Detection/SSD/models/research/slim | slim | export_inference_graph_test | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for export_inference_graph."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import tensorflow as tf
from tensorflow.python.platform import gfile
import export_inference_graph
class ExportInferenceGraphTest(tf.test.TestCase):
def testExportInferenceGraph(self):
tmpdir = self.get_temp_dir()
output_file = os.path.join(tmpdir, 'inception_v3.pb')
flags = tf.app.flags.FLAGS
flags.output_file = output_file
flags.model_name = 'inception_v3'
flags.dataset_dir = tmpdir
export_inference_graph.main(None)
self.assertTrue(gfile.Exists(output_file))
if __name__ == '__main__':
tf.test.main()
|
TensorFlow2/LanguageModeling/ELECTRA/data | data | BookscorpusTextFormatting | # Copyright (c) 2019 NVIDIA CORPORATION. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import glob
import os
class BookscorpusTextFormatting:
def __init__(self, books_path, output_filename, recursive = False):
self.books_path = books_path
self.recursive = recursive
self.output_filename = output_filename
# This puts one book per line
def merge(self):
with open(self.output_filename, mode='w', newline='\n') as ofile:
for filename in glob.glob(self.books_path + '/' + '*.txt', recursive=True):
with open(filename, mode='r', encoding='utf-8-sig', newline='\n') as file:
for line in file:
if line.strip() != '':
ofile.write(line.strip() + ' ')
ofile.write("\n\n") |
Tools/PyTorch/TimeSeriesPredictionPlatform/models/tft_pyt/triton/runner/maintainer/docker | docker | __init__ | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
|
PyTorch/Translation/Transformer/fairseq | fairseq | options | # Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree. An additional grant of patent rights
# can be found in the PATENTS file in the same directory.
#
#-------------------------------------------------------------------------
#
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import torch
from fairseq.models import ARCH_MODEL_REGISTRY, ARCH_CONFIG_REGISTRY
from fairseq.criterions import CRITERION_REGISTRY
from fairseq.optim import OPTIMIZER_REGISTRY
from fairseq.optim.lr_scheduler import LR_SCHEDULER_REGISTRY
def get_training_parser():
parser = get_parser('Trainer')
add_dataset_args(parser, train=True, gen=True)
add_model_args(parser)
add_optimization_args(parser)
add_checkpoint_args(parser)
add_inference_args(parser)
add_perf_args(parser)
return parser
def get_inference_parser():
parser = get_parser('Generation')
add_dataset_args(parser, gen=True)
add_inference_args(parser)
add_perf_args(parser)
return parser
def parse_args_and_arch(parser, input_args=None, parse_known=False):
# The parser doesn't know about model/criterion/optimizer-specific args, so
# we parse twice. First we parse the model/criterion/optimizer, then we
# parse a second time after adding the *-specific arguments.
# If input_args is given, we will parse those args instead of sys.argv.
args, _ = parser.parse_known_args(input_args)
# Add model-specific args to parser.
if hasattr(args, 'arch'):
model_specific_group = parser.add_argument_group(
'Model-specific configuration',
# Only include attributes which are explicitly given as command-line
# arguments or which have default values.
argument_default=argparse.SUPPRESS,
)
ARCH_MODEL_REGISTRY[args.arch].add_args(model_specific_group)
# Add *-specific args to parser.
if hasattr(args, 'optimizer'):
OPTIMIZER_REGISTRY[args.optimizer].add_args(parser)
if hasattr(args, 'lr_scheduler'):
LR_SCHEDULER_REGISTRY[args.lr_scheduler].add_args(parser)
# Parse a second time.
if parse_known:
args, extra = parser.parse_known_args(input_args)
else:
args = parser.parse_args(input_args)
extra = None
# Post-process args.
if hasattr(args, 'max_sentences_valid') and args.max_sentences_valid is None:
args.max_sentences_valid = args.max_sentences
args.max_positions = (args.max_source_positions, args.max_target_positions)
if hasattr(args, 'target_bleu') and (args.online_eval or args.target_bleu) and not args.remove_bpe:
args.remove_bpe = '@@ '
# Apply architecture configuration.
if hasattr(args, 'arch'):
ARCH_CONFIG_REGISTRY[args.arch](args)
if parse_known:
return args, extra
else:
return args
def get_parser(desc):
parser = argparse.ArgumentParser(
description='Facebook AI Research Sequence-to-Sequence Toolkit -- ' + desc)
parser.add_argument('--log-interval', type=int, default=500, metavar='N',
help='print aggregated stats and flush json log every N iteration')
parser.add_argument('--seed', default=1, type=int, metavar='N',
help='pseudo random number generator seed')
parser.add_argument('--amp', action='store_true',
help='use Automatic Mixed Precision')
parser.add_argument('--stat-file', type=str, default='run_log.json',
help='Name of the file containing DLLogger output')
parser.add_argument('--save-dir', metavar='DIR', default='results',
help='path to save checkpoints and logs')
parser.add_argument('--do-sanity-check', action='store_true',
help='Perform evaluation on test set before running the training')
return parser
def add_dataset_args(parser, train=False, gen=False):
group = parser.add_argument_group('Dataset and data loading')
group.add_argument('--max-tokens', type=int, metavar='N',
help='maximum number of tokens in a batch')
group.add_argument('--max-sentences', '--batch-size', type=int, metavar='N',
help='maximum number of sentences in a batch')
parser.add_argument('-s', '--source-lang', default=None, metavar='SRC',
help='source language')
parser.add_argument('-t', '--target-lang', default=None, metavar='TARGET',
help='target language')
parser.add_argument('--raw-text', action='store_true',
help='load raw text dataset')
parser.add_argument('--left-pad-source', default=True, type=bool, metavar='BOOL',
help='pad the source on the left (default: True)')
parser.add_argument('--left-pad-target', default=False, type=bool, metavar='BOOL',
help='pad the target on the left (default: False)')
parser.add_argument('--max-source-positions', default=1024, type=int, metavar='N',
help='max number of tokens in the source sequence')
parser.add_argument('--max-target-positions', default=1024, type=int, metavar='N',
help='max number of tokens in the target sequence')
parser.add_argument('--pad-sequence', default=1, type=int, metavar='N',
help='Pad sequences to a multiple of N')
if train:
parser.add_argument('data', metavar='DIR', help='path to data directory')
group.add_argument('--train-subset', default='train', metavar='SPLIT',
choices=['train', 'valid', 'test'],
help='data subset to use for training (train, valid, test)')
group.add_argument('--valid-subset', default='valid', metavar='SPLIT',
help='comma separated list of data subsets to use for validation'
' (train, valid, valid1, test, test1)')
group.add_argument('--max-sentences-valid', type=int, metavar='N',
help='maximum number of sentences in a validation batch'
' (defaults to --max-sentences)')
if gen:
group.add_argument('--gen-subset', default='test', metavar='SPLIT',
help='data subset to generate (train, valid, test)')
group.add_argument('--num-shards', default=1, type=int, metavar='N',
help='shard generation over N shards')
group.add_argument('--shard-id', default=0, type=int, metavar='ID',
help='id of the shard to generate (id < num_shards)')
return group
def add_optimization_args(parser):
group = parser.add_argument_group('Optimization')
group.add_argument('--max-epoch', '--me', default=0, type=int, metavar='N',
help='force stop training at specified epoch')
group.add_argument('--max-update', '--mu', default=0, type=int, metavar='N',
help='force stop training at specified update')
group.add_argument('--target-bleu', default=0.0, type=float, metavar='TARGET',
help='force stop training after reaching target bleu')
group.add_argument('--clip-norm', default=25, type=float, metavar='NORM',
help='clip threshold of gradients')
group.add_argument('--update-freq', default=[1], nargs='+', type=int,
help='update parameters every N_i batches, when in epoch i')
# Optimizer definitions can be found under fairseq/optim/
group.add_argument('--optimizer', default='nag', metavar='OPT',
choices=OPTIMIZER_REGISTRY.keys(),
help='optimizer: {} (default: nag)'.format(', '.join(OPTIMIZER_REGISTRY.keys())))
group.add_argument('--lr', '--learning-rate', default=[0.25], nargs='+', type=float,
help='learning rate for the first N epochs; all epochs >N using LR_N'
' (note: this may be interpreted differently depending on --lr-scheduler)')
group.add_argument('--momentum', default=0.99, type=float, metavar='M',
help='momentum factor')
group.add_argument('--weight-decay', '--wd', default=0.0, type=float, metavar='WD',
help='weight decay')
# Learning rate schedulers can be found under fairseq/optim/lr_scheduler/
group.add_argument('--lr-scheduler', default='reduce_lr_on_plateau',
help='learning rate scheduler: {} (default: reduce_lr_on_plateau)'.format(
', '.join(LR_SCHEDULER_REGISTRY.keys())))
group.add_argument('--lr-shrink', default=0.1, type=float, metavar='LS',
help='learning rate shrink factor for annealing, lr_new = (lr * lr_shrink)')
group.add_argument('--min-lr', default=1e-5, type=float, metavar='LR',
help='minimum learning rate')
# Criterion args
parser.add_argument('--label-smoothing', default=0., type=float, metavar='D',
help='epsilon for label smoothing, 0 means no label smoothing')
return group
def add_checkpoint_args(parser):
group = parser.add_argument_group('Checkpointing')
group.add_argument('--restore-file', default='checkpoint_last.pt',
help='filename in save-dir from which to load checkpoint')
group.add_argument('--save-interval', type=int, default=1, metavar='N',
help='save a checkpoint every N epochs')
group.add_argument('--no-save', action='store_true',
help='don\'t save models or checkpoints')
group.add_argument('--no-epoch-checkpoints', action='store_true',
help='only store last and best checkpoints')
group.add_argument('--validate-interval', type=int, default=1, metavar='N',
help='validate every N epochs')
return group
def add_common_eval_args(group):
group.add_argument('--path', metavar='FILE',
help='path(s) to model file(s), colon separated')
group.add_argument('--file', metavar='FILE', default=None, type=str,
help='path to a file with input data for inference')
group.add_argument('--remove-bpe', nargs='?', const='@@ ', default=None,
help='remove BPE tokens before scoring')
group.add_argument('--cpu', action='store_true', help='generate on CPU')
group.add_argument('--quiet', action='store_true',
help='only print final scores')
def add_inference_args(parser):
group = parser.add_argument_group('Generation')
add_common_eval_args(group)
group.add_argument('--beam', default=4, type=int, metavar='N',
help='beam size')
group.add_argument('--nbest', default=1, type=int, metavar='N',
help='number of hypotheses to output')
group.add_argument('--max-len-a', default=0, type=float, metavar='N',
help=('generate sequences of maximum length ax + b, '
'where x is the source length'))
group.add_argument('--max-len-b', default=200, type=int, metavar='N',
help=('generate sequences of maximum length ax + b, '
'where x is the source length'))
group.add_argument('--min-len', default=1, type=float, metavar='N',
help=('minimum generation length'))
group.add_argument('--no-early-stop', action='store_true',
help=('continue searching even after finalizing k=beam '
'hypotheses; this is more correct, but increases '
'generation time by 50%%'))
group.add_argument('--unnormalized', action='store_true',
help='compare unnormalized hypothesis scores')
group.add_argument('--no-beamable-mm', action='store_true',
help='don\'t use BeamableMM in attention layers')
group.add_argument('--lenpen', default=1, type=float,
help='length penalty: <1.0 favors shorter, >1.0 favors longer sentences')
group.add_argument('--unkpen', default=0, type=float,
help='unknown word penalty: <0 produces more unks, >0 produces fewer')
group.add_argument('--replace-unk', nargs='?', const=True, default=None,
help='perform unknown replacement (optionally with alignment dictionary)')
group.add_argument('--prefix-size', default=0, type=int, metavar='PS',
help='initialize generation by target prefix of given length')
group.add_argument('--sampling', action='store_true',
help='sample hypotheses instead of using beam search')
group.add_argument('--sampling-topk', default=-1, type=int, metavar='PS',
help='sample from top K likely next words instead of all words')
group.add_argument('--sampling-temperature', default=1, type=float, metavar='N',
help='temperature for random sampling')
group.add_argument('--print-alignment', action='store_true',
help='if set, uses attention feedback to compute and print alignment to source tokens')
group.add_argument('--online-eval', action='store_true',
help='score model at the end of epoch')
group.add_argument('--save-predictions', action='store_true',
help='Save predictions produced with online evaluation')
group.add_argument('--test-cased-bleu', action='store_true',
help='Use cased bleu for online eval')
group.add_argument('--bpe-codes', default=None, type=str, metavar='CODES',
help='file with bpe codes')
group.add_argument('--buffer-size', default=64, type=int, metavar='N',
help='read this many sentences into a buffer before processing them')
group.add_argument('--fp16', action='store_true', help='use fp16 precision')
return group
def add_model_args(parser):
group = parser.add_argument_group('Model configuration')
# Model definitions can be found under fairseq/models/
#
# The model architecture can be specified in several ways.
# In increasing order of priority:
# 1) model defaults (lowest priority)
# 2) --arch argument
group.add_argument(
'--arch', '-a', default='fconv', metavar='ARCH', required=True,
choices=ARCH_MODEL_REGISTRY.keys(),
help='model architecture: {} (default: fconv)'.format(
', '.join(ARCH_MODEL_REGISTRY.keys())),
)
# Criterion definitions can be found under fairseq/criterions/
group.add_argument(
'--criterion', default='cross_entropy', metavar='CRIT',
choices=CRITERION_REGISTRY.keys(),
help='training criterion: {} (default: cross_entropy)'.format(
', '.join(CRITERION_REGISTRY.keys())),
)
return group
def add_perf_args(parser):
group = parser.add_argument_group('Performance')
group.add_argument('--fuse-dropout-add', action='store_true',
help='Fuse dropout and residual adds.')
group.add_argument('--fuse-relu-dropout', action='store_true',
help='Fuse Relu and Dropout.')
group.add_argument('--fuse-layer-norm', action='store_true',
help='Use APEX\'s FusedLayerNorm instead of torch.nn.LayerNorm')
return group
|
Tools/DGLPyTorch/SyntheticGraphGeneration/syngen | syngen | __init__ | # Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
|
TensorFlow/Segmentation/UNet_Medical/utils | utils | cmd_util | # Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Command line argument parsing"""
import argparse
from munch import Munch
PARSER = argparse.ArgumentParser(description="UNet-medical")
PARSER.add_argument('--exec_mode',
choices=['train', 'train_and_predict', 'predict', 'evaluate', 'train_and_evaluate'],
type=str,
default='train_and_evaluate',
help="""Execution mode of running the model""")
PARSER.add_argument('--model_dir',
type=str,
default='./results',
help="""Output directory for information related to the model""")
PARSER.add_argument('--data_dir',
type=str,
required=True,
help="""Input directory containing the dataset for training the model""")
PARSER.add_argument('--log_dir',
type=str,
default=None,
help="""Output directory for training logs""")
PARSER.add_argument('--batch_size',
type=int,
default=1,
help="""Size of each minibatch per GPU""")
PARSER.add_argument('--learning_rate',
type=float,
default=0.0001,
help="""Learning rate coefficient for AdamOptimizer""")
PARSER.add_argument('--crossvalidation_idx',
type=int,
default=None,
help="""Chosen fold for cross-validation. Use None to disable cross-validation""")
PARSER.add_argument('--max_steps',
type=int,
default=1000,
help="""Maximum number of steps (batches) used for training""")
PARSER.add_argument('--weight_decay',
type=float,
default=0.0005,
help="""Weight decay coefficient""")
PARSER.add_argument('--log_every',
type=int,
default=100,
help="""Log performance every n steps""")
PARSER.add_argument('--warmup_steps',
type=int,
default=200,
help="""Number of warmup steps""")
PARSER.add_argument('--seed',
type=int,
default=0,
help="""Random seed""")
PARSER.add_argument('--augment', dest='augment', action='store_true',
help="""Perform data augmentation during training""")
PARSER.add_argument('--benchmark', dest='benchmark', action='store_true',
help="""Collect performance metrics during training""")
PARSER.add_argument('--use_amp', '--amp', dest='use_amp', action='store_true',
help="""Train using TF-AMP""")
PARSER.add_argument('--use_xla', '--xla', dest='use_xla', action='store_true',
help="""Train using XLA""")
PARSER.add_argument('--use_trt', dest='use_trt', action='store_true',
help="""Use TF-TRT""")
PARSER.add_argument('--resume_training', dest='resume_training', action='store_true',
help="""Resume training from a checkpoint""")
def parse_args(flags):
return Munch({
'exec_mode': flags.exec_mode,
'model_dir': flags.model_dir,
'data_dir': flags.data_dir,
'log_dir': flags.log_dir,
'batch_size': flags.batch_size,
'learning_rate': flags.learning_rate,
'crossvalidation_idx': flags.crossvalidation_idx,
'max_steps': flags.max_steps,
'weight_decay': flags.weight_decay,
'log_every': flags.log_every,
'warmup_steps': flags.warmup_steps,
'augment': flags.augment,
'benchmark': flags.benchmark,
'seed': flags.seed,
'use_amp': flags.use_amp,
'use_trt': flags.use_trt,
'use_xla': flags.use_xla,
'resume_training': flags.resume_training,
})
|
PyTorch/SpeechRecognition/Jasper/platform | platform | DGX2_Jasper_FP32_8GPU | #!/bin/bash
NUM_GPUS=8 AMP=true BATCH_SIZE=64 GRAD_ACCUMULATION_STEPS=2 bash scripts/train.sh "$@"
|
CUDA-Optimized/FastSpeech/fastspeech/inferencer | inferencer | fastspeech_inferencer | # Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the NVIDIA CORPORATION nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from fastspeech.inferencer.inferencer import Inferencer
from fastspeech.utils.logging import tprint
from fastspeech.utils.tensorboard import imshow_to_buf
from fastspeech.utils.pytorch import to_device_async, to_cpu_numpy
from torch.nn import functional as F
class FastSpeechInferencer(Inferencer):
def __init__(self, model_name, model, data_loader, ckpt_path=None, ckpt_file=None, log_path=None, device='cuda', use_fp16=False, seed=None):
super(FastSpeechInferencer, self).__init__(model_name, model, data_loader, ckpt_path, ckpt_file, log_path, device, use_fp16, seed)
def infer(self, acts=None, seq_input_len=None, seq_output_len=None):
inputs = next(self.data_loader_iter)
text_encoded = inputs["text_encoded"]
text_pos = inputs["text_pos"]
if seq_input_len:
text_encoded = F.pad(text_encoded, pad=(0, seq_input_len - text_encoded.size(1))) # (b, t)
text_pos = F.pad(text_pos, pad=(0, seq_input_len - text_pos.size(1))) # (b, t)
text_encoded = to_device_async(text_encoded, self.device)
text_pos = to_device_async(text_pos, self.device)
mel, mel_mask, _ = self.model(
seq=text_encoded,
pos=text_pos,
seq_output_len=seq_output_len,
use_fp16=self.use_fp16,
acts=acts
)
# (B,T,H) => (B,H,T)
mel = mel.transpose(1, 2)
mel_mask = mel_mask.squeeze(2)
outputs = dict()
outputs['mel'] = mel
outputs['mel_mask'] = mel_mask
outputs['text'] = inputs["text_norm"]
if "mel" in inputs:
outputs['mel_tgt'] = inputs["mel"]
if "wav" in inputs:
outputs['wav_tgt'] = inputs["wav"]
if "sr" in inputs:
outputs['sr'] = inputs["sr"]
return outputs
def console_log(self, tag, output):
# console logging
msg = ""
for key, value in sorted(output.items()):
msg += ',\t{}: {}'.format(key, value)
tprint(msg)
# TODO generalize
def tensorboard_log(self, tag, output_tensor):
self.tbwriter.add_image('{}/{}'.format(tag, "mel"), imshow_to_buf(output_tensor['mel']), global_step=self.step)
self.tbwriter.add_image('{}/{}'.format(tag, "mel_tgt"), imshow_to_buf(output_tensor['mel_tgt']), global_step=self.step)
self.tbwriter.add_audio('{}/{}'.format(tag, "wav_tgt"), output_tensor['wav_tgt'], global_step=self.step, sample_rate=int(output_tensor['sr']))
self.tbwriter.add_text('{}/{}'.format(tag, "text"), output_tensor['text'], global_step=self.step) |
TensorFlow/Classification/ConvNets/se-resnext101-32x4d/training | training | DGX1_SE-RNxt101-32x4d_AMP_90E | #!/bin/bash
# Copyright (c) 2019 NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
WORKSPACE=${1:-"/workspace/rn50v15_tf"}
DATA_DIR=${2:-"/data"}
OTHER=${@:3}
if [[ ! -z "${BIND_TO_SOCKET}" ]]; then
BIND_TO_SOCKET="--bind-to socket"
fi
mpiexec --allow-run-as-root ${BIND_TO_SOCKET} -np 8 python3 main.py --arch=se-resnext101-32x4d \
--mode=train_and_evaluate --iter_unit=epoch --num_iter=90 \
--batch_size=96 --warmup_steps=100 --cosine_lr --label_smoothing 0.1 \
--lr_init=0.256 --lr_warmup_epochs=8 --momentum=0.875 --weight_decay=6.103515625e-05 \
--amp --static_loss_scale 128 \
--data_dir=${DATA_DIR}/tfrecords --data_idx_dir=${DATA_DIR}/dali_idx \
--results_dir=${WORKSPACE}/results --weight_init=fan_in ${OTHER}
|
TensorFlow2/Recommendation/WideAndDeep/tests/feature_specs | feature_specs | less_multihot | channel_spec:
label:
- clicked
map: []
multihot_categorical:
- topic_id_list
numerical:
- document_id_document_id_promo_sim_categories
- document_id_document_id_promo_sim_topics
- document_id_document_id_promo_sim_entities
- document_id_promo_ctr
- publisher_id_promo_ctr
- source_id_promo_ctr
- document_id_promo_count
- publish_time_days_since_published
- ad_id_ctr
- advertiser_id_ctr
- campaign_id_ctr
- ad_id_count
- publish_time_promo_days_since_published
onehot_categorical:
- ad_id
- document_id
- platform
- document_id_promo
- campaign_id
- advertiser_id
- source_id
- geo_location
- geo_location_country
- geo_location_state
- publisher_id
- source_id_promo
- publisher_id_promo
feature_spec:
ad_id:
cardinality: 250000
ad_id_count: {}
ad_id_ctr: {}
advertiser_id:
cardinality: 2500
advertiser_id_ctr: {}
campaign_id:
cardinality: 5000
campaign_id_ctr: {}
clicked: {}
document_id:
cardinality: 300000
document_id_document_id_promo_sim_categories: {}
document_id_document_id_promo_sim_entities: {}
document_id_document_id_promo_sim_topics: {}
document_id_promo:
cardinality: 100000
document_id_promo_count: {}
document_id_promo_ctr: {}
geo_location:
cardinality: 2500
geo_location_country:
cardinality: 300
geo_location_state:
cardinality: 2000
platform:
cardinality: 4
publish_time_days_since_published: {}
publish_time_promo_days_since_published: {}
publisher_id:
cardinality: 1000
publisher_id_promo:
cardinality: 1000
publisher_id_promo_ctr: {}
source_id:
cardinality: 4000
source_id_promo:
cardinality: 4000
source_id_promo_ctr: {}
topic_id_list:
cardinality: 350
max_hotness: 3
metadata: {}
source_spec:
test:
- features:
- clicked
- ad_id
- document_id
- platform
- document_id_promo
- campaign_id
- advertiser_id
- source_id
- geo_location
- geo_location_country
- geo_location_state
- publisher_id
- source_id_promo
- publisher_id_promo
- topic_id_list
- document_id_document_id_promo_sim_categories
- document_id_document_id_promo_sim_topics
- document_id_document_id_promo_sim_entities
- document_id_promo_ctr
- publisher_id_promo_ctr
- source_id_promo_ctr
- document_id_promo_count
- publish_time_days_since_published
- ad_id_ctr
- advertiser_id_ctr
- campaign_id_ctr
- ad_id_count
- publish_time_promo_days_since_published
files:
- valid.csv
type: csv
train:
- features:
- clicked
- ad_id
- document_id
- platform
- document_id_promo
- campaign_id
- advertiser_id
- source_id
- geo_location
- geo_location_country
- geo_location_state
- publisher_id
- source_id_promo
- publisher_id_promo
- topic_id_list
- document_id_document_id_promo_sim_categories
- document_id_document_id_promo_sim_topics
- document_id_document_id_promo_sim_entities
- document_id_promo_ctr
- publisher_id_promo_ctr
- source_id_promo_ctr
- document_id_promo_count
- publish_time_days_since_published
- ad_id_ctr
- advertiser_id_ctr
- campaign_id_ctr
- ad_id_count
- publish_time_promo_days_since_published
files:
- train.csv
type: csv
|
TensorFlow2/Classification/ConvNets/efficientnet_v1/B0/training/TF32 | TF32 | convergence_1xA100-80G | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
horovodrun -np 1 bash ./scripts/bind.sh --cpu=exclusive --ib=single -- python3 main.py \
--cfg config/efficientnet_v1/b0_cfg.py \
--mode train_and_eval \
--use_xla \
--model_dir ./output \
--data_dir /data \
--log_steps 100 \
--max_epochs 500 \
--save_checkpoint_freq 5 \
--train_batch_size 512 \
--eval_batch_size 512 \
--augmenter_name autoaugment \
--lr_decay cosine \
--memory_limit 81000 \
--defer_img_mixing \
--moving_average_decay 0.9999 \
--lr_init 0.005
|
PyTorch/Detection/Efficientdet/utils | utils | model_ema | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from copy import deepcopy
import logging
import logging.handlers
from collections import OrderedDict
_logger = logging.getLogger(__name__)
class ModelEma:
""" Model Exponential Moving Average
Keep a moving average of everything in the model state_dict (parameters and buffers).
This is intended to allow functionality like
https://www.tensorflow.org/api_docs/python/tf/train/ExponentialMovingAverage
A smoothed version of the weights is necessary for some training schemes to perform well.
E.g. Google's hyper-params for training MNASNet, MobileNet-V3, EfficientNet, etc that use
RMSprop with a short 2.4-3 epoch decay period and slow LR decay rate of .96-.99 requires EMA
smoothing of weights to match results. Pay attention to the decay constant you are using
relative to your update count per epoch.
To keep EMA from using GPU resources, set device='cpu'. This will save a bit of memory but
disable validation of the EMA weights. Validation will have to be done manually in a separate
process, or after the training stops converging.
This class is sensitive where it is initialized in the sequence of model init,
GPU assignment and distributed training wrappers.
I've tested with the sequence in my own train.py for torch.DataParallel, apex.DDP, and single-GPU.
"""
def __init__(self, model, decay=0.9999, device='', resume='', remove_params=[]):
# make a copy of the model for accumulating moving average of weights
self.ema = deepcopy(model)
self.ema.eval()
self.decay = decay
self.remove_params = remove_params
self.device = device # perform ema on different device from model if set
if device:
self.ema.to(device=device)
self.ema_has_module = hasattr(self.ema, 'module')
if resume:
self._load_checkpoint(resume)
for p in self.ema.parameters():
p.requires_grad_(False)
def _load_checkpoint(self, checkpoint_path):
checkpoint = torch.load(checkpoint_path, map_location='cpu')
assert isinstance(checkpoint, dict)
if 'state_dict_ema' in checkpoint:
new_state_dict = OrderedDict()
for k, v in checkpoint['state_dict_ema'].items():
# Check if key k is in the remove_params list
if any(remove_str in k for remove_str in self.remove_params):
continue
# ema model may have been wrapped by DataParallel, and need module prefix
if self.ema_has_module:
name = 'module.' + k if not k.startswith('module') else k
else:
name = k
new_state_dict[name] = v
if len(self.remove_params) > 0:
this_dict = self.ema.state_dict()
this_dict.update(new_state_dict)
self.ema.load_state_dict(this_dict)
else:
self.ema.load_state_dict(new_state_dict)
_logger.info("Loaded state_dict_ema")
else:
_logger.warning("Failed to find state_dict_ema, starting from loaded model weights")
def update(self, model):
x = []
y = []
needs_module = hasattr(model, 'module') and not self.ema_has_module
with torch.no_grad():
for ema_v, model_v in zip(self.ema.state_dict().values(), model.state_dict().values()):
x.append(ema_v.type(torch.float32))
if self.device:
model_v = model_v.detach().to(device=self.device)
y.append(model_v.type(torch.float32))
torch._foreach_mul_(x, self.decay)
torch._foreach_add_(x, y, alpha=1.-self.decay)
for ind, ema_v in enumerate(self.ema.state_dict().values()):
ema_v.copy_(x[ind]) |
CUDA-Optimized/FastSpeech/fastspeech/hparams | hparams | trt_fp16_multi_engine | parent_yaml: 'trt_multi_engine.yaml'
use_fp16: True
# TRT
trt_file_path_list: [
"/fastspeech/preprocessed/v0.2.0/fastspeech.fp16.i32.o256.trt",
"/fastspeech/preprocessed/v0.2.0/fastspeech.fp16.i64.o512.trt",
"/fastspeech/preprocessed/v0.2.0/fastspeech.fp16.i96.o768.trt",
"/fastspeech/preprocessed/v0.2.0/fastspeech.fp16.i128.o1024.trt",
] |
PaddlePaddle/Classification/RN50v1.5/utils | utils | save_load | # Copyright (c) 2022 NVIDIA Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import errno
import os
import re
import shutil
import tempfile
import logging
import paddle
_PDOPT_SUFFIX = '.pdopt'
_PDPARAMS_SUFFIX = '.pdparams'
def _mkdir_if_not_exist(path):
"""
Mkdir if not exists, ignore the exception when multiprocess mkdir together.
"""
if not os.path.exists(path):
try:
os.makedirs(path)
except OSError as e:
if e.errno == errno.EEXIST and os.path.isdir(path):
logging.warning(
'be happy if some process has already created %s', path)
else:
raise OSError(f'Failed to mkdir {path}')
def _load_state(path):
"""
Load model parameters from .pdparams file.
Args:
path(str): Path to .pdparams file.
Returns:
state(dict): Dict of parameters loaded from file.
"""
if os.path.exists(path + _PDOPT_SUFFIX):
tmp = tempfile.mkdtemp()
dst = os.path.join(tmp, os.path.basename(os.path.normpath(path)))
shutil.copy(path + _PDPARAMS_SUFFIX, dst + _PDPARAMS_SUFFIX)
state = paddle.static.load_program_state(dst)
shutil.rmtree(tmp)
else:
state = paddle.static.load_program_state(path)
return state
def load_params(prog, path, ignore_params=None):
"""
Load model from the given path.
Args:
prog (paddle.static.Program): Load weight to which Program object.
path (string): Model path.
ignore_params (list): Ignore variable to load when finetuning.
"""
if not (os.path.isdir(path) or os.path.exists(path + _PDPARAMS_SUFFIX)):
raise ValueError(f"Model pretrain path {path} does not exists.")
logging.info("Loading parameters from %s...", path)
ignore_set = set()
state = _load_state(path)
# ignore the parameter which mismatch the shape
# between the model and pretrain weight.
all_var_shape = {}
for block in prog.blocks:
for param in block.all_parameters():
all_var_shape[param.name] = param.shape
ignore_set.update([
name for name, shape in all_var_shape.items()
if name in state and shape != state[name].shape
])
if ignore_params:
all_var_names = [var.name for var in prog.list_vars()]
ignore_list = filter(
lambda var: any([re.match(name, var) for name in ignore_params]),
all_var_names)
ignore_set.update(list(ignore_list))
if len(ignore_set) > 0:
for k in ignore_set:
if k in state:
logging.warning(
'variable %s is already excluded automatically', k)
del state[k]
paddle.static.set_program_state(prog, state)
def init_ckpt(path_to_ckpt, program, exe):
"""
Init from checkpoints or pretrained model in given path.
Args:
path_to_ckpt(str): The path to files of checkpoints,
including '.pdparams' and '.pdopt'.
program(paddle.static.Program): The program to init model.
exe(paddle.static.Executor): The executor to run program.
"""
paddle.static.load(program, path_to_ckpt, exe)
logging.info("Finish initalizing the checkpoint from %s", path_to_ckpt)
def init_pretrained(path_to_pretrained, program):
"""
Init from checkpoints or pretrained model in given path.
Args:
path_to_pretrained(str): The path to file of pretrained model.
program(paddle.static.Program): The program to init model.
"""
if not isinstance(path_to_pretrained, list):
pretrained_model = [path_to_pretrained]
for pretrain in pretrained_model:
load_params(program, pretrain)
logging.info("Finish initalizing pretrained parameters from %s",
pretrained_model)
def init_program(args, program, exe):
"""
Init from given checkpoint or pretrained parameters .
Args:
args(Namespace): Arguments obtained from ArgumentParser.
program(paddle.static.Program): The program to init model.
exe(paddle.static.Executor): The executor to run program.
"""
if args.from_checkpoint is not None:
init_ckpt(args.from_checkpoint, program, exe)
logging.info("Training will start at the %d-th epoch",
args.start_epoch)
elif args.from_pretrained_params is not None:
init_pretrained(args.from_pretrained_params, program)
def save_model(program, model_path, epoch_id, prefix):
"""
Save a model to given path.
Args:
program(paddle.static.Program): The program to be saved.
model_path(str): The path to save model.
epoch_id(int): The current epoch id.
"""
if paddle.distributed.get_rank() != 0:
return
model_path = os.path.join(model_path, str(epoch_id))
_mkdir_if_not_exist(model_path)
model_prefix = os.path.join(model_path, prefix)
paddle.static.save(program, model_prefix)
logging.info("Already save model in %s", model_path)
|
PyTorch/Classification/GPUNet | GPUNet | eval | # Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Evaluating the latency and accuracy of GPUNet
--------Configurations of GPUNet--------
## Without distillation
# GPUNet-2
modelJSON, cpkPath = get_configs(batch=1, latency="1.75ms", gpuType="GV100")
# GPUNet-1
modelJSON, cpkPath = get_configs(batch=1, latency="0.85ms", gpuType="GV100")
# GPUNet-0
modelJSON, cpkPath = get_configs(batch=1, latency="0.65ms", gpuType="GV100")
## With distillation
# GPUNet-D2
modelJSON, cpkPath = get_configs(batch=1, latency="2.25ms-D", gpuType="GV100")
# GPUNet-D1
modelJSON, cpkPath = get_configs(batch=1, latency="1.25ms-D", gpuType="GV100")
# GPUNet-P0
modelJSON, cpkPath = get_configs(batch=1, latency="0.5ms-D", gpuType="GV100")
# GPUNet-P1
modelJSON, cpkPath = get_configs(batch=1, latency="0.8ms-D", gpuType="GV100")
----------------------------------------
What can you do?
1. Test GPUNet accuracy.
2. Benchmarking the latency:
Export GPUNet to ONNX, then 'trtexec --onnx=gpunet.onnx --fp16'.
We reported the median GPU compute time. Here is an example,
GPU Compute Time: ..., median = 0.752686 ms, ...
"""
from configs.model_hub import get_configs, get_model_list
from models.gpunet_builder import GPUNet_Builder
modelJSON, cpkPath = get_configs(batch=1, latency="0.65ms", gpuType="GV100")
print(get_model_list(1))
builder = GPUNet_Builder()
model = builder.get_model(modelJSON)
builder.export_onnx(model)
print(model, model.imgRes)
builder.test_model(
model,
testBatch=200,
checkpoint=cpkPath,
imgRes=(3, model.imgRes, model.imgRes),
dtype="fp16",
crop_pct=1,
val_path="/root/data/imagenet/val",
)
|
Tools/PyTorch/TimeSeriesPredictionPlatform/conf/trainer/optimizer | optimizer | ASGD | # Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
_target_: torch.optim.ASGD
lr: 0.01
lambd: 0.0001
alpha: 0.75
t0: 1000000.0
weight_decay: 0.0
|
PyTorch/SpeechSynthesis/Tacotron2/trtis_cpp/src/trt/util | util | hostMemory | /*
* Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of the NVIDIA CORPORATION nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef TT2I_HOSTMEMORY_H
#define TT2I_HOSTMEMORY_H
#include "cudaUtils.h"
#include <cassert>
#include <cstddef>
#include <cstdlib>
#include <new>
#include <stdexcept>
#include <vector>
namespace tts
{
template <typename T>
class HostMemory
{
public:
HostMemory() : m_ptr(nullptr), m_size(0)
{
// do nothing
}
HostMemory(const size_t size) : HostMemory()
{
m_size = size;
CudaUtils::allocHost(&m_ptr, m_size);
}
HostMemory(const std::vector<T>& data) : HostMemory(data.size())
{
memcpy(m_ptr, data.data(), data.size() * sizeof(*m_ptr));
}
HostMemory(HostMemory&& other) : m_ptr(other.m_ptr), m_size(other.m_size)
{
other.m_ptr = nullptr;
other.m_size = 0;
}
HostMemory& operator=(HostMemory&& other)
{
std::swap(m_ptr, other.m_ptr);
std::swap(m_size, other.m_size);
other.clear();
return *this;
}
// deleted constructors
HostMemory(const HostMemory& other) = delete;
HostMemory& operator=(const HostMemory& other) = delete;
~HostMemory()
{
clear();
}
const T* operator+(const size_t offset) const
{
return m_ptr + offset;
}
T* operator+(const size_t offset)
{
return m_ptr + offset;
}
const T* operator+(const int offset) const
{
assert(offset >= 0);
return m_ptr + offset;
}
T* operator+(const int offset)
{
assert(offset >= 0);
return m_ptr + offset;
}
operator T*()
{
return m_ptr;
}
operator const T*() const
{
return m_ptr;
}
operator bool() const
{
return m_ptr != nullptr;
}
T& operator[](const size_t index)
{
return m_ptr[index];
}
const T& operator[](const size_t index) const
{
return m_ptr[index];
}
T* data()
{
return m_ptr;
}
const T* data() const
{
return m_ptr;
}
size_t size() const
{
return m_size;
}
void zero()
{
memset(m_ptr, 0, sizeof(*m_ptr) * m_size);
}
void clear()
{
if (m_ptr) {
cudaFreeHost(m_ptr);
m_ptr = nullptr;
}
m_size = 0;
}
private:
T* m_ptr;
size_t m_size;
};
} // namespace tts
#endif
|
PyTorch/LanguageModeling/Transformer-XL/pytorch | pytorch | data_utils | # Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import glob
import logging
import os
import re
import numpy as np
import sacremoses
import torch
import utils
from utils.vocabulary import OpenAIVocab
from utils.vocabulary import Vocab
class LMOrderedIterator(object):
def __init__(self, data, bsz, bptt, device='cpu', mem_len=None, ext_len=None, warmup=True):
"""
data -- LongTensor -- the LongTensor is strictly ordered
"""
self.bsz = bsz
self.bptt = bptt
self.ext_len = ext_len if ext_len is not None else 0
self.mem_len = mem_len
self.warmup = warmup
self.device = device
# Work out how cleanly we can divide the dataset into bsz parts.
n_step = data.size(0) // bsz
# Trim off any extra elements that wouldn't cleanly fit (remainders).
data = data[:n_step * bsz]
# Evenly divide the data across the bsz batches.
self.data = data.view(bsz, -1).t().contiguous().pin_memory()
if mem_len and warmup:
self.warmup_batches = (mem_len + bptt - 1) // bptt
self.warmup_elems = self.warmup_batches * bptt
warmup_data = self.data.roll((self.warmup_elems, 1), (0, 1))[:self.warmup_elems]
self.data = torch.cat((warmup_data, self.data))
# Partition data for DistributedDataParallel
world_size = utils.distributed.get_world_size()
rank = utils.distributed.get_rank()
self.data = self.data.chunk(world_size, dim=1)[rank]
# Number of mini-batches
self.n_batch = (self.data.size(0) + self.bptt - 1) // self.bptt
self.last_iter = None
def roll(self, seed):
rng = torch.Generator()
rng.manual_seed(seed)
for i in range(self.data.size(1)):
row = self.data[:, i]
shift = torch.randint(0, self.data.size(0), (1,), generator=rng)
row = torch.cat((row[shift:], row[:shift]))
self.data[:, i] = row
def get_batch(self, i, bptt=None):
if bptt is None:
bptt = self.bptt
seq_len = min(bptt, self.data.size(0) - 1 - i)
end_idx = i + seq_len
beg_idx = max(0, i - self.ext_len)
data = self.data[beg_idx:end_idx].to(self.device, non_blocking=True)
target = self.data[i+1:i+1+seq_len].to(self.device, non_blocking=True)
if self.mem_len and self.warmup:
warm = i >= self.warmup_elems
else:
warm = True
return data, target, seq_len, warm
def get_fixlen_iter(self, start=0):
if start != 0:
start += self.bptt
for i in range(start, self.data.size(0) - 1, self.bptt):
self.last_iter = i
yield self.get_batch(i)
def get_varlen_iter(self, start=0, std=5, min_len=5, max_deviation=3):
max_len = self.bptt + max_deviation * std
i = start
while True:
bptt = self.bptt if np.random.random() < 0.95 else self.bptt / 2.
bptt = min(max_len, max(min_len, int(np.random.normal(bptt, std))))
data, target, seq_len = self.get_batch(i, bptt)
i += seq_len
yield data, target, seq_len
if i >= self.data.size(0) - 2:
break
def __iter__(self):
return self.get_fixlen_iter()
class LMShuffledIterator(object):
def __init__(self, data, bsz, bptt, device='cpu', ext_len=None, shuffle=False):
"""
data -- list[LongTensor] -- there is no order among the LongTensors
"""
self.data = data
self.bsz = bsz
self.bptt = bptt
self.ext_len = ext_len if ext_len is not None else 0
self.device = device
self.shuffle = shuffle
def get_sent_stream(self):
# index iterator
epoch_indices = np.random.permutation(len(self.data)) if self.shuffle \
else np.array(range(len(self.data)))
# sentence iterator
for idx in epoch_indices:
yield self.data[idx]
def stream_iterator(self, sent_stream):
# streams for each data in the batch
streams = [None] * self.bsz
data = torch.LongTensor(self.bptt, self.bsz)
target = torch.LongTensor(self.bptt, self.bsz)
n_retain = 0
while True:
# data : [n_retain+bptt x bsz]
# target : [bptt x bsz]
data[n_retain:].fill_(-1)
target.fill_(-1)
valid_batch = True
for i in range(self.bsz):
n_filled = 0
try:
while n_filled < self.bptt:
if streams[i] is None or len(streams[i]) <= 1:
streams[i] = next(sent_stream)
# number of new tokens to fill in
n_new = min(len(streams[i]) - 1, self.bptt - n_filled)
# first n_retain tokens are retained from last batch
data[n_retain+n_filled:n_retain+n_filled+n_new, i] = \
streams[i][:n_new]
target[n_filled:n_filled+n_new, i] = \
streams[i][1:n_new+1]
streams[i] = streams[i][n_new:]
n_filled += n_new
except StopIteration:
valid_batch = False
break
if not valid_batch:
return
data = data.to(self.device)
target = target.to(self.device)
yield data, target, self.bptt
n_retain = min(data.size(0), self.ext_len)
if n_retain > 0:
data[:n_retain] = data[-n_retain:]
data.resize_(n_retain + self.bptt, data.size(1))
def __iter__(self):
# sent_stream is an iterator
sent_stream = self.get_sent_stream()
for batch in self.stream_iterator(sent_stream):
yield batch
class LMMultiFileIterator(LMShuffledIterator):
def __init__(self, paths, vocab, bsz, bptt, device='cpu', ext_len=None,
shuffle=False):
self.paths = paths
self.vocab = vocab
self.bsz = bsz
self.bptt = bptt
self.ext_len = ext_len if ext_len is not None else 0
self.device = device
self.shuffle = shuffle
def get_sent_stream(self, path):
sents = self.vocab.encode_file(path, add_double_eos=True)
if self.shuffle:
np.random.shuffle(sents)
sent_stream = iter(sents)
return sent_stream
def __iter__(self):
if self.shuffle:
np.random.shuffle(self.paths)
for path in self.paths:
# sent_stream is an iterator
sent_stream = self.get_sent_stream(path)
for batch in self.stream_iterator(sent_stream):
yield batch
class Corpus(object):
def __init__(self, path, dataset, vocab, *args, **kwargs):
self.dataset = dataset
if vocab == 'word':
self.vocab = Vocab(*args, **kwargs)
elif vocab == 'bpe':
self.vocab = OpenAIVocab()
else:
raise RuntimeError('Unsupported vocab')
if self.dataset in ['ptb', 'wt2', 'enwik8', 'text8']:
self.vocab.count_file(os.path.join(path, 'train.txt'))
self.vocab.count_file(os.path.join(path, 'valid.txt'))
self.vocab.count_file(os.path.join(path, 'test.txt'))
elif self.dataset == 'wt103':
self.vocab.count_file(os.path.join(path, 'train.txt'))
elif self.dataset == 'lm1b':
train_path_pattern = os.path.join(
path, '1-billion-word-language-modeling-benchmark-r13output',
'training-monolingual.tokenized.shuffled', 'news.en-*')
train_paths = glob.glob(train_path_pattern)
# the vocab will load from file when build_vocab() is called
self.vocab.build_vocab()
if self.dataset in ['ptb', 'wt2', 'wt103']:
self.train = self.vocab.encode_file(
os.path.join(path, 'train.txt'), ordered=True)
self.valid = self.vocab.encode_file(
os.path.join(path, 'valid.txt'), ordered=True)
self.test = self.vocab.encode_file(
os.path.join(path, 'test.txt'), ordered=True)
elif self.dataset in ['enwik8', 'text8']:
self.train = self.vocab.encode_file(
os.path.join(path, 'train.txt'), ordered=True, add_eos=False)
self.valid = self.vocab.encode_file(
os.path.join(path, 'valid.txt'), ordered=True, add_eos=False)
self.test = self.vocab.encode_file(
os.path.join(path, 'test.txt'), ordered=True, add_eos=False)
elif self.dataset == 'lm1b':
self.train = train_paths
self.valid = self.vocab.encode_file(
os.path.join(path, 'valid.txt'), ordered=False, add_double_eos=True)
self.test = self.vocab.encode_file(
os.path.join(path, 'test.txt'), ordered=False, add_double_eos=True)
def get_iterator(self, split, *args, **kwargs):
if split == 'train':
if self.dataset in ['ptb', 'wt2', 'wt103', 'enwik8', 'text8']:
data_iter = LMOrderedIterator(self.train, *args, **kwargs)
elif self.dataset == 'lm1b':
kwargs['shuffle'] = True
data_iter = LMMultiFileIterator(self.train, self.vocab, *args, **kwargs)
elif split in ['valid', 'test']:
data = self.valid if split == 'valid' else self.test
if self.dataset in ['ptb', 'wt2', 'wt103', 'enwik8', 'text8']:
data_iter = LMOrderedIterator(data, *args, **kwargs)
elif self.dataset == 'lm1b':
data_iter = LMShuffledIterator(data, *args, **kwargs)
return data_iter
def get_lm_corpus(datadir, dataset, vocab):
if vocab == 'word':
fn = os.path.join(datadir, 'cache.pt')
elif vocab == 'bpe':
fn = os.path.join(datadir, 'cache.pt.bpe')
else:
raise RuntimeError('Unsupported vocab')
if os.path.exists(fn):
logging.info('Loading cached dataset...')
corpus = torch.load(fn)
else:
logging.info('Producing dataset {}...'.format(dataset))
kwargs = {}
if dataset in ['wt103', 'wt2']:
kwargs['special'] = ['<eos>']
kwargs['lower_case'] = False
elif dataset == 'ptb':
kwargs['special'] = ['<eos>']
kwargs['lower_case'] = True
elif dataset == 'lm1b':
kwargs['special'] = []
kwargs['lower_case'] = False
kwargs['vocab_file'] = os.path.join(datadir, '1b_word_vocab.txt')
elif dataset in ['enwik8', 'text8']:
pass
corpus = Corpus(datadir, dataset, vocab, **kwargs)
with utils.distributed.sync_workers() as rank:
if rank == 0:
torch.save(corpus, fn)
return corpus
def tokenize_raw(text, lang='en'):
mt = sacremoses.MosesTokenizer(lang)
text = mt.tokenize(text, return_str=True)
text = re.sub(r'"', '"', text)
text = re.sub(r''', "'", text)
text = re.sub(r'(\d)\.(\d)', r'\1 @.@ \2', text)
text = re.sub(r'(\d),(\d)', r'\1 @,@ \2', text)
text = re.sub(r'(\w)-(\w)', r'\1 @-@ \2', text)
return text
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(description='unit test')
parser.add_argument('--datadir', type=str, default='../data/text8',
help='location of the data corpus')
parser.add_argument('--dataset', type=str, default='text8',
choices=['ptb', 'wt2', 'wt103', 'lm1b', 'enwik8', 'text8'],
help='dataset name')
args = parser.parse_args()
logging.basicConfig(level=logging.INFO)
corpus = get_lm_corpus(args.datadir, args.dataset, vocab='word')
logging.info('Vocab size : {}'.format(len(corpus.vocab.idx2sym)))
|
TensorFlow/Translation/GNMT/utils | utils | math_utils | # Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Generally useful utility functions."""
from __future__ import print_function
import collections
import six
import os
import tensorflow as tf
from tensorflow.python.framework import function
from tensorflow.python.ops import gen_nn_ops
def sparse_softmax_crossent_with_logits(logits=None, labels=None, name=None):
"""docstring."""
# TODO(jamesqin): merge with tf.nn.sparse_softmax_cross_entropy_with_logits
# Basically forks the tf lib function, only that the result isn't casted
# back to tf.float16 if the input is tf.float16
# TODO(jamesqin): implement a fused kernel to reduce memory footprint.
# Reshape logits and labels to rank 2.
with tf.name_scope(name, "SparseSoftmaxCrossEntropyWithLogits",
[labels, logits]):
labels = tf.convert_to_tensor(labels)
logits = tf.convert_to_tensor(logits)
precise_logits = tf.cast(logits, tf.float32) if (tf.as_dtype(
logits.dtype) == tf.float16) else logits
# Store label shape for result later.
labels_static_shape = labels.get_shape()
labels_shape = tf.shape(labels)
static_shapes_fully_defined = (
labels_static_shape.is_fully_defined() and
logits.get_shape()[:-1].is_fully_defined())
if logits.get_shape().ndims is not None and logits.get_shape().ndims == 0:
raise ValueError(
"Logits cannot be scalars - received shape %s." % logits.get_shape())
if logits.get_shape().ndims is not None and (
labels_static_shape.ndims is not None and
labels_static_shape.ndims != logits.get_shape().ndims - 1):
raise ValueError("Rank mismatch: Rank of labels (received %s) should "
"equal rank of logits minus 1 (received %s)." %
(labels_static_shape.ndims, logits.get_shape().ndims))
if (static_shapes_fully_defined and
labels_static_shape != logits.get_shape()[:-1]):
raise ValueError("Shape mismatch: The shape of labels (received %s) "
"should equal the shape of logits except for the last "
"dimension (received %s)." % (labels_static_shape,
logits.get_shape()))
# Check if no reshapes are required.
if logits.get_shape().ndims == 2:
cost, _ = gen_nn_ops.sparse_softmax_cross_entropy_with_logits(
precise_logits, labels, name=name)
# cost.dtype is always fp32
return cost
# Perform a check of the dynamic shapes if the static shapes are not fully
# defined.
shape_checks = []
if not static_shapes_fully_defined:
xla_compile = (os.environ["xla_compile"] == "true")
use_xla = (os.environ["use_xla"] == "true")
if not (xla_compile or use_xla):
# Assert isn't registered w/ GPU, not working w/ xla.compile()
shape_checks.append(
tf.assert_equal(
tf.shape(labels),
tf.shape(logits)[:-1]))
with tf.control_dependencies(shape_checks):
# Reshape logits to 2 dim, labels to 1 dim.
num_classes = tf.shape(logits)[tf.rank(logits) - 1]
precise_logits = tf.reshape(precise_logits, [-1, num_classes])
labels = tf.reshape(labels, [-1])
# The second output tensor contains the gradients. We use it in
# _CrossEntropyGrad() in nn_grad but not here.
cost, _ = gen_nn_ops.sparse_softmax_cross_entropy_with_logits(
precise_logits, labels, name=name)
cost = tf.reshape(cost, labels_shape)
cost.set_shape(labels_static_shape)
# cost is always fp32
return cost
def clip_by_global_norm(t_list, clip_norm, use_norm=None, name=None):
"""Custom version of tf.clip_by_global_norm that doesn't check numerics."""
if (not isinstance(t_list, collections.Sequence)
or isinstance(t_list, six.string_types)):
raise TypeError("t_list should be a sequence")
t_list = list(t_list)
if use_norm is None:
use_norm = tf.global_norm(t_list, name)
with tf.name_scope(name, "clip_by_global_norm", t_list + [clip_norm]) as name:
# Calculate L2-norm, clip elements by ratio of clip_norm to L2-norm
scale = clip_norm * tf.minimum(
1.0 / use_norm,
tf.constant(1.0, dtype=use_norm.dtype) / clip_norm)
values = [
tf.convert_to_tensor(
t.values if isinstance(t, tf.IndexedSlices) else t,
name="t_%d" % i)
if t is not None else t
for i, t in enumerate(t_list)]
values_clipped = []
for i, v in enumerate(values):
if v is None:
values_clipped.append(None)
else:
with tf.colocate_with(v):
values_clipped.append(
tf.identity(v * scale, name="%s_%d" % (name, i)))
list_clipped = [
tf.IndexedSlices(c_v, t.indices, t.dense_shape)
if isinstance(t, tf.IndexedSlices)
else c_v
for (c_v, t) in zip(values_clipped, t_list)]
return list_clipped, use_norm
def BatchMatMul(a, b):
use_fp32_batch_matmul = (os.environ["use_fp32_batch_matmul"] == "true")
xla_compile = (os.environ["xla_compile"] == "true")
if use_fp32_batch_matmul:
def DoFn(a, b):
dtype = a.dtype
a = tf.to_float(a)
b = tf.to_float(b)
return tf.cast(tf.matmul(a, b), dtype)
# If using xla_compile, the fwd and bak per tower are wrapped in xla_compile
if not xla_compile:
DoFn = function.Defun(noinline=True)(DoFn)
res = DoFn(a, b)
res.set_shape((None, None, b.shape[-1].value))
else:
# If xla_compile, leave to xla to handle the casts.
res = DoFn(a, b)
else:
res = tf.matmul(a, b)
return res
|
Tools/PyTorch/TimeSeriesPredictionPlatform/models/tft_pyt/triton/runner | runner | config_NVIDIA-T4 | checkpoints:
- name: electricity_bin
url: https://api.ngc.nvidia.com/v2/models/nvidia/tft_pyt_ckpt_base_eletricity_amp/versions/21.06.0/zip
- name: traffic_bin
url: https://api.ngc.nvidia.com/v2/models/nvidia/tft_pyt_ckpt_base_traffic_amp/versions/21.06.0/zip
configurations:
- accelerator: none
batch_size:
- 1
- 2
- 4
- 8
- 16
- 32
- 64
- 128
- 256
- 512
- 1024
batch_sizes: 1 2 4 8 16 32 64 128 256 512 1024
capture_cuda_graph: 0
checkpoint_variant: electricity_bin
dataset: electricity_bin
device: gpu
export_format: onnx
export_precision: fp32
format: trt
max_batch_size: 1024
precision: fp16
request_count: 500
triton_gpu_engine_count: 2
triton_max_queue_delay: 1
triton_preferred_batch_sizes: 512 1024
- accelerator: none
batch_size:
- 1
- 2
- 4
- 8
- 16
- 32
- 64
- 128
- 256
- 512
- 1024
batch_sizes: 1 2 4 8 16 32 64 128 256 512 1024
capture_cuda_graph: 0
checkpoint_variant: traffic_bin
dataset: traffic_bin
device: gpu
export_format: onnx
export_precision: fp32
format: trt
max_batch_size: 1024
precision: fp16
request_count: 500
triton_gpu_engine_count: 2
triton_max_queue_delay: 1
triton_preferred_batch_sizes: 512 1024
- accelerator: none
batch_size:
- 1
- 2
- 4
- 8
- 16
- 32
- 64
- 128
- 256
- 512
- 1024
batch_sizes: 1 2 4 8 16 32 64 128 256 512 1024
capture_cuda_graph: 0
checkpoint_variant: electricity_bin
dataset: electricity_bin
device: gpu
export_format: ts-trace
export_precision: fp32
format: ts-trace
max_batch_size: 1024
precision: fp16
request_count: 500
triton_gpu_engine_count: 2
triton_max_queue_delay: 1
triton_preferred_batch_sizes: 512 1024
- accelerator: none
batch_size:
- 1
- 2
- 4
- 8
- 16
- 32
- 64
- 128
- 256
- 512
- 1024
batch_sizes: 1 2 4 8 16 32 64 128 256 512 1024
capture_cuda_graph: 0
checkpoint_variant: traffic_bin
dataset: traffic_bin
device: gpu
export_format: ts-trace
export_precision: fp32
format: ts-trace
max_batch_size: 1024
precision: fp16
request_count: 500
triton_gpu_engine_count: 2
triton_max_queue_delay: 1
triton_preferred_batch_sizes: 512 1024
container_version: '21.12'
datasets:
- name: electricity_bin
- name: traffic_bin
datasets_dir: datasets
framework: PyTorch
model_name: TFT
triton_container_image: null
triton_custom_operations: null
triton_dockerfile: null
triton_load_model_method: explicit
|
TensorFlow2/LanguageModeling/BERT/official/nlp/transformer | transformer | beam_search_v1 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Beam search to find the translated sequence with the highest probability.
Source implementation from Tensor2Tensor:
https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/utils/beam_search.py
"""
import numpy as np
import tensorflow.compat.v1 as tf
from tensorflow.python.util import nest
def inf(dtype):
"""Returns a value close to infinity, but is still finite in `dtype`.
This is useful to get a very large value that is still zero when multiplied by
zero. The floating-point "Inf" value is NaN when multiplied by zero.
Args:
dtype: A dtype. The returned value will be finite when casted to this dtype.
Returns:
A very large value.
"""
if dtype == "float32" or dtype == "bfloat16":
return 1e7
elif dtype == "float16":
# Disable no-member lint error, as the linter thinks np.float16 does not
# exist for some reason.
return np.finfo(np.float16).max # pylint: disable=no-member
else:
raise AssertionError('Invalid dtype: %s' % dtype)
class _StateKeys(object):
"""Keys to dictionary storing the state of the beam search loop."""
# Variable storing the loop index.
CUR_INDEX = "CUR_INDEX"
# Top sequences that are alive for each batch item. Alive sequences are ones
# that have not generated an EOS token. Sequences that reach EOS are marked as
# finished and moved to the FINISHED_SEQ tensor.
# Has shape [batch_size, beam_size, CUR_INDEX + 1]
ALIVE_SEQ = "ALIVE_SEQ"
# Log probabilities of each alive sequence. Shape [batch_size, beam_size]
ALIVE_LOG_PROBS = "ALIVE_LOG_PROBS"
# Dictionary of cached values for each alive sequence. The cache stores
# the encoder output, attention bias, and the decoder attention output from
# the previous iteration.
ALIVE_CACHE = "ALIVE_CACHE"
# Top finished sequences for each batch item.
# Has shape [batch_size, beam_size, CUR_INDEX + 1]. Sequences that are
# shorter than CUR_INDEX + 1 are padded with 0s.
FINISHED_SEQ = "FINISHED_SEQ"
# Scores for each finished sequence. Score = log probability / length norm
# Shape [batch_size, beam_size]
FINISHED_SCORES = "FINISHED_SCORES"
# Flags indicating which sequences in the finished sequences are finished.
# At the beginning, all of the sequences in FINISHED_SEQ are filler values.
# True -> finished sequence, False -> filler. Shape [batch_size, beam_size]
FINISHED_FLAGS = "FINISHED_FLAGS"
class SequenceBeamSearch(object):
"""Implementation of beam search loop."""
def __init__(self,
symbols_to_logits_fn,
vocab_size,
batch_size,
beam_size,
alpha,
max_decode_length,
eos_id,
padded_decode,
dtype=tf.float32):
"""Initialize sequence beam search.
Args:
symbols_to_logits_fn: A function to provide logits, which is the
interface to the Transformer model. The passed in arguments are:
ids -> A tensor with shape [batch_size * beam_size, index].
index -> A scalar.
cache -> A nested dictionary of tensors [batch_size * beam_size, ...].
The function must return a tuple of logits and the updated cache:
logits -> A tensor with shape [batch * beam_size, vocab_size].
updated cache -> A nested dictionary with the same structure as the
input cache.
vocab_size: An integer, the size of the vocabulary, used for topk
computation.
batch_size: An integer, the decode batch size.
beam_size: An integer, number of beams for beam search.
alpha: A float, defining the strength of length normalization.
max_decode_length: An integer, the maximum number of steps to decode
a sequence.
eos_id: An integer. ID of end of sentence token.
padded_decode: A bool, indicating if max_sequence_length padding is used
for beam search.
dtype: A tensorflow data type used for score computation. The default is
tf.float32.
"""
self.symbols_to_logits_fn = symbols_to_logits_fn
self.vocab_size = vocab_size
self.batch_size = batch_size
self.beam_size = beam_size
self.alpha = alpha
self.max_decode_length = max_decode_length
self.eos_id = eos_id
self.padded_decode = padded_decode
self.dtype = tf.as_dtype(dtype)
def search(self, initial_ids, initial_cache):
"""Beam search for sequences with highest scores."""
state, state_shapes = self._create_initial_state(initial_ids, initial_cache)
finished_state = tf.while_loop(
self._continue_search, self._search_step, loop_vars=[state],
shape_invariants=[state_shapes], parallel_iterations=1, back_prop=False)
finished_state = finished_state[0]
alive_seq = finished_state[_StateKeys.ALIVE_SEQ]
alive_log_probs = finished_state[_StateKeys.ALIVE_LOG_PROBS]
finished_seq = finished_state[_StateKeys.FINISHED_SEQ]
finished_scores = finished_state[_StateKeys.FINISHED_SCORES]
finished_flags = finished_state[_StateKeys.FINISHED_FLAGS]
# Account for corner case where there are no finished sequences for a
# particular batch item. In that case, return alive sequences for that batch
# item.
finished_seq = tf.where(
tf.reduce_any(finished_flags, 1), finished_seq, alive_seq)
finished_scores = tf.where(
tf.reduce_any(finished_flags, 1), finished_scores, alive_log_probs)
return finished_seq, finished_scores
def _create_initial_state(self, initial_ids, initial_cache):
"""Return initial state dictionary and its shape invariants.
Args:
initial_ids: initial ids to pass into the symbols_to_logits_fn.
int tensor with shape [batch_size, 1]
initial_cache: dictionary storing values to be passed into the
symbols_to_logits_fn.
Returns:
state and shape invariant dictionaries with keys from _StateKeys
"""
for key, value in initial_cache.items():
for inner_value in nest.flatten(value):
if inner_value.dtype != self.dtype:
raise TypeError(
"initial_cache element for key '%s' has dtype %s that does not "
"match SequenceBeamSearch's dtype of %s. Value: %s" %
(key, value.dtype.name, self.dtype.name, inner_value))
# Current loop index (starts at 0)
cur_index = tf.constant(0)
# Create alive sequence with shape [batch_size, beam_size, 1]
alive_seq = _expand_to_beam_size(initial_ids, self.beam_size)
alive_seq = tf.expand_dims(alive_seq, axis=2)
if self.padded_decode:
alive_seq = tf.tile(alive_seq, [1, 1, self.max_decode_length + 1])
# Create tensor for storing initial log probabilities.
# Assume initial_ids are prob 1.0
initial_log_probs = tf.constant(
[[0.] + [-float("inf")] * (self.beam_size - 1)], dtype=self.dtype)
alive_log_probs = tf.tile(initial_log_probs, [self.batch_size, 1])
# Expand all values stored in the dictionary to the beam size, so that each
# beam has a separate cache.
alive_cache = nest.map_structure(
lambda t: _expand_to_beam_size(t, self.beam_size), initial_cache)
# Initialize tensor storing finished sequences with filler values.
finished_seq = tf.zeros(tf.shape(alive_seq), tf.int32)
# Set scores of the initial finished seqs to negative infinity.
finished_scores = tf.ones([self.batch_size, self.beam_size],
dtype=self.dtype) * -inf(self.dtype)
# Initialize finished flags with all False values.
finished_flags = tf.zeros([self.batch_size, self.beam_size], tf.bool)
# Create state dictionary
state = {
_StateKeys.CUR_INDEX: cur_index,
_StateKeys.ALIVE_SEQ: alive_seq,
_StateKeys.ALIVE_LOG_PROBS: alive_log_probs,
_StateKeys.ALIVE_CACHE: alive_cache,
_StateKeys.FINISHED_SEQ: finished_seq,
_StateKeys.FINISHED_SCORES: finished_scores,
_StateKeys.FINISHED_FLAGS: finished_flags
}
# Create state invariants for each value in the state dictionary. Each
# dimension must be a constant or None. A None dimension means either:
# 1) the dimension's value is a tensor that remains the same but may
# depend on the input sequence to the model (e.g. batch size).
# 2) the dimension may have different values on different iterations.
if self.padded_decode:
state_shape_invariants = {
_StateKeys.CUR_INDEX:
tf.TensorShape([]),
_StateKeys.ALIVE_SEQ:
tf.TensorShape(
[self.batch_size, self.beam_size,
self.max_decode_length + 1]),
_StateKeys.ALIVE_LOG_PROBS:
tf.TensorShape([self.batch_size, self.beam_size]),
_StateKeys.ALIVE_CACHE:
nest.map_structure(_get_shape, alive_cache),
_StateKeys.FINISHED_SEQ:
tf.TensorShape(
[self.batch_size, self.beam_size,
self.max_decode_length + 1]),
_StateKeys.FINISHED_SCORES:
tf.TensorShape([self.batch_size, self.beam_size]),
_StateKeys.FINISHED_FLAGS:
tf.TensorShape([self.batch_size, self.beam_size])
}
else:
state_shape_invariants = {
_StateKeys.CUR_INDEX:
tf.TensorShape([]),
_StateKeys.ALIVE_SEQ:
tf.TensorShape([None, self.beam_size, None]),
_StateKeys.ALIVE_LOG_PROBS:
tf.TensorShape([None, self.beam_size]),
_StateKeys.ALIVE_CACHE:
nest.map_structure(_get_shape_keep_last_dim, alive_cache),
_StateKeys.FINISHED_SEQ:
tf.TensorShape([None, self.beam_size, None]),
_StateKeys.FINISHED_SCORES:
tf.TensorShape([None, self.beam_size]),
_StateKeys.FINISHED_FLAGS:
tf.TensorShape([None, self.beam_size])
}
return state, state_shape_invariants
def _continue_search(self, state):
"""Return whether to continue the search loop.
The loops should terminate when
1) when decode length has been reached, or
2) when the worst score in the finished sequences is better than the best
score in the alive sequences (i.e. the finished sequences are provably
unchanging)
Args:
state: A dictionary with the current loop state.
Returns:
Bool tensor with value True if loop should continue, False if loop should
terminate.
"""
i = state[_StateKeys.CUR_INDEX]
alive_log_probs = state[_StateKeys.ALIVE_LOG_PROBS]
finished_scores = state[_StateKeys.FINISHED_SCORES]
finished_flags = state[_StateKeys.FINISHED_FLAGS]
not_at_max_decode_length = tf.less(i, self.max_decode_length)
# Calculate largest length penalty (the larger penalty, the better score).
max_length_norm = _length_normalization(self.alpha, self.max_decode_length,
dtype=self.dtype)
# Get the best possible scores from alive sequences.
best_alive_scores = alive_log_probs[:, 0] / max_length_norm
# Compute worst score in finished sequences for each batch element
finished_scores *= tf.cast(finished_flags,
self.dtype) # set filler scores to zero
lowest_finished_scores = tf.reduce_min(finished_scores, axis=1)
# If there are no finished sequences in a batch element, then set the lowest
# finished score to -INF for that element.
finished_batches = tf.reduce_any(finished_flags, 1)
lowest_finished_scores += ((1.0 -
tf.cast(finished_batches, self.dtype)) *
-inf(self.dtype))
worst_finished_score_better_than_best_alive_score = tf.reduce_all(
tf.greater(lowest_finished_scores, best_alive_scores)
)
return tf.logical_and(
not_at_max_decode_length,
tf.logical_not(worst_finished_score_better_than_best_alive_score)
)
def _search_step(self, state):
"""Beam search loop body.
Grow alive sequences by a single ID. Sequences that have reached the EOS
token are marked as finished. The alive and finished sequences with the
highest log probabilities and scores are returned.
A sequence's finished score is calculating by dividing the log probability
by the length normalization factor. Without length normalization, the
search is more likely to return shorter sequences.
Args:
state: A dictionary with the current loop state.
Returns:
new state dictionary.
"""
# Grow alive sequences by one token.
new_seq, new_log_probs, topk_ids, new_cache = self._grow_alive_seq(state)
new_finished_flags = tf.equal(topk_ids, self.eos_id)
# Collect top beam_size alive sequences
alive_state = self._get_new_alive_state(new_seq, new_log_probs,
new_finished_flags, new_cache)
# Combine newly finished sequences with existing finished sequences, and
# collect the top k scoring sequences.
finished_state = self._get_new_finished_state(state, new_seq, new_log_probs,
new_finished_flags)
# Increment loop index and create new state dictionary
new_state = {_StateKeys.CUR_INDEX: state[_StateKeys.CUR_INDEX] + 1}
new_state.update(alive_state)
new_state.update(finished_state)
return [new_state]
def _grow_alive_seq(self, state):
"""Grow alive sequences by one token, and collect top 2*beam_size sequences.
2*beam_size sequences are collected because some sequences may have reached
the EOS token. 2*beam_size ensures that at least beam_size sequences are
still alive.
Args:
state: A dictionary with the current loop state.
Returns:
Tuple of
(Top 2*beam_size sequences [batch_size, 2 * beam_size, cur_index + 1],
Scores of returned sequences [batch_size, 2 * beam_size],
New alive cache, for each of the 2 * beam_size sequences)
"""
i = state[_StateKeys.CUR_INDEX]
alive_seq = state[_StateKeys.ALIVE_SEQ]
alive_log_probs = state[_StateKeys.ALIVE_LOG_PROBS]
alive_cache = state[_StateKeys.ALIVE_CACHE]
beams_to_keep = 2 * self.beam_size
# Get logits for the next candidate IDs for the alive sequences. Get the new
# cache values at the same time.
if self.padded_decode:
flat_ids = tf.reshape(
tf.slice(alive_seq, [0, 0, i], [self.batch_size, self.beam_size, 1]),
[self.batch_size * self.beam_size, -1])
else:
flat_ids = _flatten_beam_dim(alive_seq) # [batch_size * beam_size]
flat_cache = nest.map_structure(_flatten_beam_dim, alive_cache)
flat_logits, flat_cache = self.symbols_to_logits_fn(flat_ids, i, flat_cache)
# Unflatten logits to shape [batch_size, beam_size, vocab_size]
logits = _unflatten_beam_dim(flat_logits, self.batch_size, self.beam_size)
new_cache = nest.map_structure(
lambda t: _unflatten_beam_dim(t, self.batch_size, self.beam_size),
flat_cache)
# Convert logits to normalized log probs
candidate_log_probs = _log_prob_from_logits(logits)
# Calculate new log probabilities if each of the alive sequences were
# extended # by the the candidate IDs.
# Shape [batch_size, beam_size, vocab_size]
log_probs = candidate_log_probs + tf.expand_dims(alive_log_probs, axis=2)
# Each batch item has beam_size * vocab_size candidate sequences. For each
# batch item, get the k candidates with the highest log probabilities.
flat_log_probs = tf.reshape(log_probs,
[-1, self.beam_size * self.vocab_size])
topk_log_probs, topk_indices = tf.nn.top_k(flat_log_probs, k=beams_to_keep)
# Extract the alive sequences that generate the highest log probabilities
# after being extended.
topk_beam_indices = topk_indices // self.vocab_size
topk_seq, new_cache = _gather_beams(
[alive_seq, new_cache], topk_beam_indices, self.batch_size,
beams_to_keep)
# Append the most probable IDs to the topk sequences
topk_ids = topk_indices % self.vocab_size
if self.padded_decode:
topk_seq = tf.transpose(topk_seq, perm=[2, 0, 1])
# TODO(b/145533236, hongkuny): Reverts once TF fix the validation.
topk_seq = tf.tensor_scatter_nd_update(topk_seq, [[i + 1]],
tf.expand_dims(topk_ids, axis=0))
topk_seq = tf.transpose(topk_seq, perm=[1, 2, 0])
else:
topk_seq = tf.concat([topk_seq, tf.expand_dims(topk_ids, axis=2)], axis=2)
return topk_seq, topk_log_probs, topk_ids, new_cache
def _get_new_alive_state(self, new_seq, new_log_probs, new_finished_flags,
new_cache):
"""Gather the top k sequences that are still alive.
Args:
new_seq: New sequences generated by growing the current alive sequences
int32 tensor with shape [batch_size, 2 * beam_size, cur_index + 1]
new_log_probs: Log probabilities of new sequences float32 tensor with
shape [batch_size, beam_size]
new_finished_flags: A boolean Tensor indicates which sequences are live
inside the beam.
new_cache: Dict of cached values for each sequence.
Returns:
Dictionary with alive keys from _StateKeys:
{Top beam_size sequences that are still alive (don't end with eos_id)
Log probabilities of top alive sequences
Dict cache storing decoder states for top alive sequences}
"""
# To prevent finished sequences from being considered, set log probs to -inf
new_log_probs += tf.cast(new_finished_flags, self.dtype) * -inf(self.dtype)
top_alive_seq, top_alive_log_probs, top_alive_cache = _gather_topk_beams(
[new_seq, new_log_probs, new_cache], new_log_probs, self.batch_size,
self.beam_size)
return {
_StateKeys.ALIVE_SEQ: top_alive_seq,
_StateKeys.ALIVE_LOG_PROBS: top_alive_log_probs,
_StateKeys.ALIVE_CACHE: top_alive_cache
}
def _get_new_finished_state(self, state, new_seq, new_log_probs,
new_finished_flags):
"""Combine new and old finished sequences, and gather the top k sequences.
Args:
state: A dictionary with the current loop state.
new_seq: New sequences generated by growing the current alive sequences
int32 tensor with shape [batch_size, beam_size, i + 1]
new_log_probs: Log probabilities of new sequences float32 tensor with
shape [batch_size, beam_size]
new_finished_flags: A boolean Tensor indicates which sequences are live
inside the beam.
Returns:
Dictionary with finished keys from _StateKeys:
{Top beam_size finished sequences based on score,
Scores of finished sequences,
Finished flags of finished sequences}
"""
i = state[_StateKeys.CUR_INDEX]
finished_seq = state[_StateKeys.FINISHED_SEQ]
finished_scores = state[_StateKeys.FINISHED_SCORES]
finished_flags = state[_StateKeys.FINISHED_FLAGS]
# First append a column of 0-ids to finished_seq to increment the length.
# New shape of finished_seq: [batch_size, beam_size, i + 1]
if not self.padded_decode:
finished_seq = tf.concat([
finished_seq,
tf.zeros([self.batch_size, self.beam_size, 1], tf.int32)
],
axis=2)
# Calculate new seq scores from log probabilities.
length_norm = _length_normalization(self.alpha, i + 1, dtype=self.dtype)
new_scores = new_log_probs / length_norm
# Set the scores of the still-alive seq in new_seq to large negative values.
new_scores += ((1. - tf.cast(new_finished_flags, self.dtype)) *
-inf(self.dtype))
# Combine sequences, scores, and flags.
finished_seq = tf.concat([finished_seq, new_seq], axis=1)
finished_scores = tf.concat([finished_scores, new_scores], axis=1)
finished_flags = tf.concat([finished_flags, new_finished_flags], axis=1)
# Return the finished sequences with the best scores.
top_finished_seq, top_finished_scores, top_finished_flags = (
_gather_topk_beams([finished_seq, finished_scores, finished_flags],
finished_scores, self.batch_size, self.beam_size))
return {
_StateKeys.FINISHED_SEQ: top_finished_seq,
_StateKeys.FINISHED_SCORES: top_finished_scores,
_StateKeys.FINISHED_FLAGS: top_finished_flags
}
def sequence_beam_search(
symbols_to_logits_fn, initial_ids, initial_cache, vocab_size, beam_size,
alpha, max_decode_length, eos_id, padded_decode=False):
"""Search for sequence of subtoken ids with the largest probability.
Args:
symbols_to_logits_fn: A function that takes in ids, index, and cache as
arguments. The passed in arguments will have shape:
ids -> A tensor with shape [batch_size * beam_size, index].
index -> A scalar.
cache -> A nested dictionary of tensors [batch_size * beam_size, ...].
The function must return a tuple of logits and new cache:
logits -> A tensor with shape [batch * beam_size, vocab_size].
new cache -> A nested dictionary with the same shape/structure as the
inputted cache.
initial_ids: An int32 tensor with shape [batch_size]. Starting ids for
each batch item.
initial_cache: A dictionary, containing starting decoder variables
information.
vocab_size: An integer, the size of the vocabulary, used for topk
computation.
beam_size: An integer, the number of beams.
alpha: A float, defining the strength of length normalization.
max_decode_length: An integer, the maximum length to decoded a sequence.
eos_id: An integer, ID of eos token, used to determine when a sequence has
finished.
padded_decode: A bool, indicating if max_sequence_length padding is used
for beam search.
Returns:
Top decoded sequences [batch_size, beam_size, max_decode_length]
sequence scores [batch_size, beam_size]
"""
batch_size = (
initial_ids.shape.as_list()[0] if padded_decode else
tf.shape(initial_ids)[0])
sbs = SequenceBeamSearch(symbols_to_logits_fn, vocab_size, batch_size,
beam_size, alpha, max_decode_length, eos_id,
padded_decode)
return sbs.search(initial_ids, initial_cache)
def _log_prob_from_logits(logits):
return logits - tf.reduce_logsumexp(logits, axis=2, keepdims=True)
def _length_normalization(alpha, length, dtype=tf.float32):
"""Return length normalization factor."""
return tf.pow(((5. + tf.cast(length, dtype)) / 6.), alpha)
def _expand_to_beam_size(tensor, beam_size):
"""Tiles a given tensor by beam_size.
Args:
tensor: tensor to tile [batch_size, ...]
beam_size: How much to tile the tensor by.
Returns:
Tiled tensor [batch_size, beam_size, ...]
"""
tensor = tf.expand_dims(tensor, axis=1)
tile_dims = [1] * tensor.shape.ndims
tile_dims[1] = beam_size
return tf.tile(tensor, tile_dims)
def _shape_list(tensor):
"""Return a list of the tensor's shape, and ensure no None values in list."""
# Get statically known shape (may contain None's for unknown dimensions)
shape = tensor.get_shape().as_list()
# Ensure that the shape values are not None
dynamic_shape = tf.shape(tensor)
for i in range(len(shape)): # pylint: disable=consider-using-enumerate
if shape[i] is None:
shape[i] = dynamic_shape[i]
return shape
def _get_shape_keep_last_dim(tensor):
shape_list = _shape_list(tensor)
# Only the last
for i in range(len(shape_list) - 1):
shape_list[i] = None
if isinstance(shape_list[-1], tf.Tensor):
shape_list[-1] = None
return tf.TensorShape(shape_list)
def _get_shape(tensor):
"""Return the shape of the input tensor."""
return tf.TensorShape(_shape_list(tensor))
def _flatten_beam_dim(tensor):
"""Reshapes first two dimensions in to single dimension.
Args:
tensor: Tensor to reshape of shape [A, B, ...]
Returns:
Reshaped tensor of shape [A*B, ...]
"""
shape = _shape_list(tensor)
shape[0] *= shape[1]
shape.pop(1) # Remove beam dim
return tf.reshape(tensor, shape)
def _unflatten_beam_dim(tensor, batch_size, beam_size):
"""Reshapes first dimension back to [batch_size, beam_size].
Args:
tensor: Tensor to reshape of shape [batch_size*beam_size, ...]
batch_size: Tensor, original batch size.
beam_size: int, original beam size.
Returns:
Reshaped tensor of shape [batch_size, beam_size, ...]
"""
shape = _shape_list(tensor)
new_shape = [batch_size, beam_size] + shape[1:]
return tf.reshape(tensor, new_shape)
def _gather_beams(nested, beam_indices, batch_size, new_beam_size):
"""Gather beams from nested structure of tensors.
Each tensor in nested represents a batch of beams, where beam refers to a
single search state (beam search involves searching through multiple states
in parallel).
This function is used to gather the top beams, specified by
beam_indices, from the nested tensors.
Args:
nested: Nested structure (tensor, list, tuple or dict) containing tensors
with shape [batch_size, beam_size, ...].
beam_indices: int32 tensor with shape [batch_size, new_beam_size]. Each
value in beam_indices must be between [0, beam_size), and are not
necessarily unique.
batch_size: int size of batch
new_beam_size: int number of beams to be pulled from the nested tensors.
Returns:
Nested structure containing tensors with shape
[batch_size, new_beam_size, ...]
"""
# Computes the i'th coodinate that contains the batch index for gather_nd.
# Batch pos is a tensor like [[0,0,0,0,],[1,1,1,1],..].
batch_pos = tf.range(batch_size * new_beam_size) // new_beam_size
batch_pos = tf.reshape(batch_pos, [batch_size, new_beam_size])
# Create coordinates to be passed to tf.gather_nd. Stacking creates a tensor
# with shape [batch_size, beam_size, 2], where the last dimension contains
# the (i, j) gathering coordinates.
coordinates = tf.stack([batch_pos, beam_indices], axis=2)
return nest.map_structure(
lambda state: tf.gather_nd(state, coordinates), nested)
def _gather_topk_beams(nested, score_or_log_prob, batch_size, beam_size):
"""Gather top beams from nested structure."""
_, topk_indexes = tf.nn.top_k(score_or_log_prob, k=beam_size)
return _gather_beams(nested, topk_indexes, batch_size, beam_size)
|
TensorFlow/Detection/SSD/models/research/object_detection/g3doc | g3doc | evaluation_protocols | # Supported object detection evaluation protocols
The Tensorflow Object Detection API currently supports three evaluation protocols,
that can be configured in `EvalConfig` by setting `metrics_set` to the
corresponding value.
## PASCAL VOC 2010 detection metric
`EvalConfig.metrics_set='pascal_voc_detection_metrics'`
The commonly used mAP metric for evaluating the quality of object detectors,
computed according to the protocol of the PASCAL VOC Challenge 2010-2012. The
protocol is available
[here](http://host.robots.ox.ac.uk/pascal/VOC/voc2010/devkit_doc_08-May-2010.pdf).
## Weighted PASCAL VOC detection metric
`EvalConfig.metrics_set='weighted_pascal_voc_detection_metrics'`
The weighted PASCAL metric computes the mean average precision as the average
precision when treating all classes as a single class. In comparison,
PASCAL metrics computes the mean average precision as the mean of the
per-class average precisions.
For example, the test set consists of two classes, "cat" and "dog", and there
are ten times more boxes of "cat" than those of "dog". According to PASCAL VOC
2010 metric, performance on each of the two classes would contribute equally
towards the final mAP value, while for the Weighted PASCAL VOC metric the final
mAP value will be influenced by frequency of each class.
## PASCAL VOC 2010 instance segmentation metric
`EvalConfig.metrics_set='pascal_voc_instance_segmentation_metrics'`
Similar to Pascal VOC 2010 detection metric, but computes the intersection over
union based on the object masks instead of object boxes.
## Weighted PASCAL VOC instance segmentation metric
`EvalConfig.metrics_set='weighted_pascal_voc_instance_segmentation_metrics'`
Similar to the weighted pascal voc 2010 detection metric, but computes the
intersection over union based on the object masks instead of object boxes.
## COCO detection metrics
`EvalConfig.metrics_set='coco_detection_metrics'`
The COCO metrics are the official detection metrics used to score the
[COCO competition](http://cocodataset.org/) and are similar to Pascal VOC
metrics but have a slightly different implementation and report additional
statistics such as mAP at IOU thresholds of .5:.95, and precision/recall
statistics for small, medium, and large objects.
See the
[pycocotools](https://github.com/cocodataset/cocoapi/tree/master/PythonAPI)
repository for more details.
## COCO mask metrics
`EvalConfig.metrics_set='coco_mask_metrics'`
Similar to the COCO detection metrics, but computes the
intersection over union based on the object masks instead of object boxes.
## Open Images V2 detection metric
`EvalConfig.metrics_set='oid_V2_detection_metrics'`
This metric is defined originally for evaluating detector performance on [Open
Images V2 dataset](https://github.com/openimages/dataset) and is fairly similar
to the PASCAL VOC 2010 metric mentioned above. It computes interpolated average
precision (AP) for each class and averages it among all classes (mAP).
The difference to the PASCAL VOC 2010 metric is the following: Open Images
annotations contain `group-of` ground-truth boxes (see [Open Images data
description](https://github.com/openimages/dataset#annotations-human-bboxcsv)),
that are treated differently for the purpose of deciding whether detections are
"true positives", "ignored", "false positives". Here we define these three
cases:
A detection is a "true positive" if there is a non-group-of ground-truth box,
such that:
* The detection box and the ground-truth box are of the same class, and
intersection-over-union (IoU) between the detection box and the ground-truth
box is greater than the IoU threshold (default value 0.5). \
Illustration of handling non-group-of boxes: \

* yellow box - ground-truth box;
* green box - true positive;
* red boxes - false positives.
* This is the highest scoring detection for this ground truth box that
satisfies the criteria above.
A detection is "ignored" if it is not a true positive, and there is a `group-of`
ground-truth box such that:
* The detection box and the ground-truth box are of the same class, and the
area of intersection between the detection box and the ground-truth box
divided by the area of the detection is greater than 0.5. This is intended
to measure whether the detection box is approximately inside the group-of
ground-truth box. \
Illustration of handling `group-of` boxes: \

* yellow box - ground-truth box;
* grey boxes - two detections on cars, that are ignored;
* red box - false positive.
A detection is a "false positive" if it is neither a "true positive" nor
"ignored".
Precision and recall are defined as:
* Precision = number-of-true-positives/(number-of-true-positives + number-of-false-positives)
* Recall = number-of-true-positives/number-of-non-group-of-boxes
Note that detections ignored as firing on a `group-of` ground-truth box do not
contribute to the number of true positives.
The labels in Open Images are organized in a
[hierarchy](https://storage.googleapis.com/openimages/2017_07/bbox_labels_vis/bbox_labels_vis.html).
Ground-truth bounding-boxes are annotated with the most specific class available
in the hierarchy. For example, "car" has two children "limousine" and "van". Any
other kind of car is annotated as "car" (for example, a sedan). Given this
convention, the evaluation software treats all classes independently, ignoring
the hierarchy. To achieve high performance values, object detectors should
output bounding-boxes labelled in the same manner.
The old metric name is DEPRECATED.
`EvalConfig.metrics_set='open_images_V2_detection_metrics'`
## OID Challenge Object Detection Metric 2018
`EvalConfig.metrics_set='oid_challenge_detection_metrics'`
The metric for the OID Challenge Object Detection Metric 2018, Object Detection
track. The description is provided on the [Open Images Challenge
website](https://storage.googleapis.com/openimages/web/challenge.html).
The old metric name is DEPRECATED.
`EvalConfig.metrics_set='oid_challenge_object_detection_metrics'`
## OID Challenge Visual Relationship Detection Metric 2018
The metric for the OID Challenge Visual Relationship Detection Metric 2018, Visual
Relationship Detection track. The description is provided on the [Open Images
Challenge
website](https://storage.googleapis.com/openimages/web/challenge.html). Note:
this is currently a stand-alone metric, that can be used only through the
`metrics/oid_vrd_challenge_evaluation.py` util.
|
TensorFlow/Detection/SSD/models/research/object_detection/core | core | minibatch_sampler_test | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for google3.research.vale.object_detection.minibatch_sampler."""
import numpy as np
import tensorflow as tf
from object_detection.core import minibatch_sampler
class MinibatchSamplerTest(tf.test.TestCase):
def test_subsample_indicator_when_more_true_elements_than_num_samples(self):
np_indicator = [True, False, True, False, True, True, False]
indicator = tf.constant(np_indicator)
samples = minibatch_sampler.MinibatchSampler.subsample_indicator(
indicator, 3)
with self.test_session() as sess:
samples_out = sess.run(samples)
self.assertTrue(np.sum(samples_out), 3)
self.assertAllEqual(samples_out,
np.logical_and(samples_out, np_indicator))
def test_subsample_when_more_true_elements_than_num_samples_no_shape(self):
np_indicator = [True, False, True, False, True, True, False]
indicator = tf.placeholder(tf.bool)
feed_dict = {indicator: np_indicator}
samples = minibatch_sampler.MinibatchSampler.subsample_indicator(
indicator, 3)
with self.test_session() as sess:
samples_out = sess.run(samples, feed_dict=feed_dict)
self.assertTrue(np.sum(samples_out), 3)
self.assertAllEqual(samples_out,
np.logical_and(samples_out, np_indicator))
def test_subsample_indicator_when_less_true_elements_than_num_samples(self):
np_indicator = [True, False, True, False, True, True, False]
indicator = tf.constant(np_indicator)
samples = minibatch_sampler.MinibatchSampler.subsample_indicator(
indicator, 5)
with self.test_session() as sess:
samples_out = sess.run(samples)
self.assertTrue(np.sum(samples_out), 4)
self.assertAllEqual(samples_out,
np.logical_and(samples_out, np_indicator))
def test_subsample_indicator_when_num_samples_is_zero(self):
np_indicator = [True, False, True, False, True, True, False]
indicator = tf.constant(np_indicator)
samples_none = minibatch_sampler.MinibatchSampler.subsample_indicator(
indicator, 0)
with self.test_session() as sess:
samples_none_out = sess.run(samples_none)
self.assertAllEqual(
np.zeros_like(samples_none_out, dtype=bool),
samples_none_out)
def test_subsample_indicator_when_indicator_all_false(self):
indicator_empty = tf.zeros([0], dtype=tf.bool)
samples_empty = minibatch_sampler.MinibatchSampler.subsample_indicator(
indicator_empty, 4)
with self.test_session() as sess:
samples_empty_out = sess.run(samples_empty)
self.assertEqual(0, samples_empty_out.size)
if __name__ == '__main__':
tf.test.main()
|
TensorFlow/Detection/SSD/models/research/object_detection/utils | utils | learning_schedules | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Library of common learning rate schedules."""
import numpy as np
import tensorflow as tf
def exponential_decay_with_burnin(global_step,
learning_rate_base,
learning_rate_decay_steps,
learning_rate_decay_factor,
burnin_learning_rate=0.0,
burnin_steps=0,
min_learning_rate=0.0,
staircase=True):
"""Exponential decay schedule with burn-in period.
In this schedule, learning rate is fixed at burnin_learning_rate
for a fixed period, before transitioning to a regular exponential
decay schedule.
Args:
global_step: int tensor representing global step.
learning_rate_base: base learning rate.
learning_rate_decay_steps: steps to take between decaying the learning rate.
Note that this includes the number of burn-in steps.
learning_rate_decay_factor: multiplicative factor by which to decay
learning rate.
burnin_learning_rate: initial learning rate during burn-in period. If
0.0 (which is the default), then the burn-in learning rate is simply
set to learning_rate_base.
burnin_steps: number of steps to use burnin learning rate.
min_learning_rate: the minimum learning rate.
staircase: whether use staircase decay.
Returns:
a (scalar) float tensor representing learning rate
"""
if burnin_learning_rate == 0:
burnin_learning_rate = learning_rate_base
post_burnin_learning_rate = tf.train.exponential_decay(
learning_rate_base,
global_step - burnin_steps,
learning_rate_decay_steps,
learning_rate_decay_factor,
staircase=staircase)
return tf.maximum(tf.where(
tf.less(tf.cast(global_step, tf.int32), tf.constant(burnin_steps)),
tf.constant(burnin_learning_rate),
post_burnin_learning_rate), min_learning_rate, name='learning_rate')
def cosine_decay_with_warmup(global_step,
learning_rate_base,
total_steps,
warmup_learning_rate=0.0,
warmup_steps=0,
hold_base_rate_steps=0):
"""Cosine decay schedule with warm up period.
Cosine annealing learning rate as described in:
Loshchilov and Hutter, SGDR: Stochastic Gradient Descent with Warm Restarts.
ICLR 2017. https://arxiv.org/abs/1608.03983
In this schedule, the learning rate grows linearly from warmup_learning_rate
to learning_rate_base for warmup_steps, then transitions to a cosine decay
schedule.
Args:
global_step: int64 (scalar) tensor representing global step.
learning_rate_base: base learning rate.
total_steps: total number of training steps.
warmup_learning_rate: initial learning rate for warm up.
warmup_steps: number of warmup steps.
hold_base_rate_steps: Optional number of steps to hold base learning rate
before decaying.
Returns:
a (scalar) float tensor representing learning rate.
Raises:
ValueError: if warmup_learning_rate is larger than learning_rate_base,
or if warmup_steps is larger than total_steps.
"""
if total_steps < warmup_steps:
raise ValueError('total_steps must be larger or equal to '
'warmup_steps.')
learning_rate = 0.5 * learning_rate_base * (1 + tf.cos(
np.pi *
(tf.cast(global_step, tf.float32) - warmup_steps - hold_base_rate_steps
) / float(total_steps - warmup_steps - hold_base_rate_steps)))
if hold_base_rate_steps > 0:
learning_rate = tf.where(global_step > warmup_steps + hold_base_rate_steps,
learning_rate, learning_rate_base)
if warmup_steps > 0:
if learning_rate_base < warmup_learning_rate:
raise ValueError('learning_rate_base must be larger or equal to '
'warmup_learning_rate.')
slope = (learning_rate_base - warmup_learning_rate) / warmup_steps
warmup_rate = slope * tf.cast(global_step,
tf.float32) + warmup_learning_rate
learning_rate = tf.where(global_step < warmup_steps, warmup_rate,
learning_rate)
return tf.where(global_step > total_steps, 0.0, learning_rate,
name='learning_rate')
def manual_stepping(global_step, boundaries, rates, warmup=False):
"""Manually stepped learning rate schedule.
This function provides fine grained control over learning rates. One must
specify a sequence of learning rates as well as a set of integer steps
at which the current learning rate must transition to the next. For example,
if boundaries = [5, 10] and rates = [.1, .01, .001], then the learning
rate returned by this function is .1 for global_step=0,...,4, .01 for
global_step=5...9, and .001 for global_step=10 and onward.
Args:
global_step: int64 (scalar) tensor representing global step.
boundaries: a list of global steps at which to switch learning
rates. This list is assumed to consist of increasing positive integers.
rates: a list of (float) learning rates corresponding to intervals between
the boundaries. The length of this list must be exactly
len(boundaries) + 1.
warmup: Whether to linearly interpolate learning rate for steps in
[0, boundaries[0]].
Returns:
a (scalar) float tensor representing learning rate
Raises:
ValueError: if one of the following checks fails:
1. boundaries is a strictly increasing list of positive integers
2. len(rates) == len(boundaries) + 1
3. boundaries[0] != 0
"""
if any([b < 0 for b in boundaries]) or any(
[not isinstance(b, int) for b in boundaries]):
raise ValueError('boundaries must be a list of positive integers')
if any([bnext <= b for bnext, b in zip(boundaries[1:], boundaries[:-1])]):
raise ValueError('Entries in boundaries must be strictly increasing.')
if any([not isinstance(r, float) for r in rates]):
raise ValueError('Learning rates must be floats')
if len(rates) != len(boundaries) + 1:
raise ValueError('Number of provided learning rates must exceed '
'number of boundary points by exactly 1.')
if boundaries and boundaries[0] == 0:
raise ValueError('First step cannot be zero.')
if warmup and boundaries:
slope = (rates[1] - rates[0]) * 1.0 / boundaries[0]
warmup_steps = range(boundaries[0])
warmup_rates = [rates[0] + slope * step for step in warmup_steps]
boundaries = warmup_steps + boundaries
rates = warmup_rates + rates[1:]
else:
boundaries = [0] + boundaries
num_boundaries = len(boundaries)
rate_index = tf.reduce_max(tf.where(tf.greater_equal(global_step, boundaries),
list(range(num_boundaries)),
[0] * num_boundaries))
return tf.reduce_sum(rates * tf.one_hot(rate_index, depth=num_boundaries),
name='learning_rate')
|
TensorFlow2/Recommendation/DLRM_and_DCNv2/preproc | preproc | run_spark | #!/bin/bash
# Copyright (c) 2020 NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#########################################################################
# File Name: run_spark.sh
echo "Input mode option: $1"
if [ "$1" = "CPU" ]
then
echo "Run with CPU.";
shift
./run_spark_cpu.sh ${@}
elif [ "$1" = "DGX2" ]
then
echo "Run with GPU.";
shift
./run_spark_gpu.sh ${@} DGX2
else
echo "Please choose mode (CPU/DGX2).";
fi
|
PyTorch/LanguageModeling/BERT/bert_configs | bert_configs | base | {
"attention_probs_dropout_prob": 0.1,
"hidden_act": "gelu",
"hidden_dropout_prob": 0.1,
"hidden_size": 768,
"initializer_range": 0.02,
"intermediate_size": 3072,
"max_position_embeddings": 512,
"num_attention_heads": 12,
"num_hidden_layers": 12,
"type_vocab_size": 2,
"vocab_size": 30528
} |
Tools/PyTorch/TimeSeriesPredictionPlatform/conf | conf | inference_triton_config | # Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
defaults:
- inference: triton
|
TensorFlow/Detection/SSD/models/research/slim/nets | nets | inception_utils | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains common code shared by all inception models.
Usage of arg scope:
with slim.arg_scope(inception_arg_scope()):
logits, end_points = inception.inception_v3(images, num_classes,
is_training=is_training)
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
slim = tf.contrib.slim
def inception_arg_scope(weight_decay=0.00004,
use_batch_norm=True,
batch_norm_decay=0.9997,
batch_norm_epsilon=0.001,
activation_fn=tf.nn.relu,
batch_norm_updates_collections=tf.GraphKeys.UPDATE_OPS,
batch_norm_scale=False):
"""Defines the default arg scope for inception models.
Args:
weight_decay: The weight decay to use for regularizing the model.
use_batch_norm: "If `True`, batch_norm is applied after each convolution.
batch_norm_decay: Decay for batch norm moving average.
batch_norm_epsilon: Small float added to variance to avoid dividing by zero
in batch norm.
activation_fn: Activation function for conv2d.
batch_norm_updates_collections: Collection for the update ops for
batch norm.
batch_norm_scale: If True, uses an explicit `gamma` multiplier to scale the
activations in the batch normalization layer.
Returns:
An `arg_scope` to use for the inception models.
"""
batch_norm_params = {
# Decay for the moving averages.
'decay': batch_norm_decay,
# epsilon to prevent 0s in variance.
'epsilon': batch_norm_epsilon,
# collection containing update_ops.
'updates_collections': batch_norm_updates_collections,
# use fused batch norm if possible.
'fused': None,
'scale': batch_norm_scale,
}
if use_batch_norm:
normalizer_fn = slim.batch_norm
normalizer_params = batch_norm_params
else:
normalizer_fn = None
normalizer_params = {}
# Set weight_decay for weights in Conv and FC layers.
with slim.arg_scope([slim.conv2d, slim.fully_connected],
weights_regularizer=slim.l2_regularizer(weight_decay)):
with slim.arg_scope(
[slim.conv2d],
weights_initializer=slim.variance_scaling_initializer(),
activation_fn=activation_fn,
normalizer_fn=normalizer_fn,
normalizer_params=normalizer_params) as sc:
return sc
|
PaddlePaddle/Classification/RN50v1.5/utils | utils | mode | # Copyright (c) 2022 NVIDIA Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from enum import Enum
class Mode(Enum):
TRAIN = 'Train'
EVAL = 'Eval'
class RunScope(Enum):
TRAIN_ONLY = 'train_only'
EVAL_ONLY = 'eval_only'
TRAIN_EVAL = 'train_eval'
|
TensorFlow/Detection/SSD/models/research/slim/nets | nets | inception_v1 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains the definition for inception v1 classification network."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from nets import inception_utils
slim = tf.contrib.slim
trunc_normal = lambda stddev: tf.truncated_normal_initializer(0.0, stddev)
def inception_v1_base(inputs,
final_endpoint='Mixed_5c',
scope='InceptionV1'):
"""Defines the Inception V1 base architecture.
This architecture is defined in:
Going deeper with convolutions
Christian Szegedy, Wei Liu, Yangqing Jia, Pierre Sermanet, Scott Reed,
Dragomir Anguelov, Dumitru Erhan, Vincent Vanhoucke, Andrew Rabinovich.
http://arxiv.org/pdf/1409.4842v1.pdf.
Args:
inputs: a tensor of size [batch_size, height, width, channels].
final_endpoint: specifies the endpoint to construct the network up to. It
can be one of ['Conv2d_1a_7x7', 'MaxPool_2a_3x3', 'Conv2d_2b_1x1',
'Conv2d_2c_3x3', 'MaxPool_3a_3x3', 'Mixed_3b', 'Mixed_3c',
'MaxPool_4a_3x3', 'Mixed_4b', 'Mixed_4c', 'Mixed_4d', 'Mixed_4e',
'Mixed_4f', 'MaxPool_5a_2x2', 'Mixed_5b', 'Mixed_5c']
scope: Optional variable_scope.
Returns:
A dictionary from components of the network to the corresponding activation.
Raises:
ValueError: if final_endpoint is not set to one of the predefined values.
"""
end_points = {}
with tf.variable_scope(scope, 'InceptionV1', [inputs]):
with slim.arg_scope(
[slim.conv2d, slim.fully_connected],
weights_initializer=trunc_normal(0.01)):
with slim.arg_scope([slim.conv2d, slim.max_pool2d],
stride=1, padding='SAME'):
end_point = 'Conv2d_1a_7x7'
net = slim.conv2d(inputs, 64, [7, 7], stride=2, scope=end_point)
end_points[end_point] = net
if final_endpoint == end_point: return net, end_points
end_point = 'MaxPool_2a_3x3'
net = slim.max_pool2d(net, [3, 3], stride=2, scope=end_point)
end_points[end_point] = net
if final_endpoint == end_point: return net, end_points
end_point = 'Conv2d_2b_1x1'
net = slim.conv2d(net, 64, [1, 1], scope=end_point)
end_points[end_point] = net
if final_endpoint == end_point: return net, end_points
end_point = 'Conv2d_2c_3x3'
net = slim.conv2d(net, 192, [3, 3], scope=end_point)
end_points[end_point] = net
if final_endpoint == end_point: return net, end_points
end_point = 'MaxPool_3a_3x3'
net = slim.max_pool2d(net, [3, 3], stride=2, scope=end_point)
end_points[end_point] = net
if final_endpoint == end_point: return net, end_points
end_point = 'Mixed_3b'
with tf.variable_scope(end_point):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(net, 64, [1, 1], scope='Conv2d_0a_1x1')
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(net, 96, [1, 1], scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(branch_1, 128, [3, 3], scope='Conv2d_0b_3x3')
with tf.variable_scope('Branch_2'):
branch_2 = slim.conv2d(net, 16, [1, 1], scope='Conv2d_0a_1x1')
branch_2 = slim.conv2d(branch_2, 32, [3, 3], scope='Conv2d_0b_3x3')
with tf.variable_scope('Branch_3'):
branch_3 = slim.max_pool2d(net, [3, 3], scope='MaxPool_0a_3x3')
branch_3 = slim.conv2d(branch_3, 32, [1, 1], scope='Conv2d_0b_1x1')
net = tf.concat(
axis=3, values=[branch_0, branch_1, branch_2, branch_3])
end_points[end_point] = net
if final_endpoint == end_point: return net, end_points
end_point = 'Mixed_3c'
with tf.variable_scope(end_point):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(net, 128, [1, 1], scope='Conv2d_0a_1x1')
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(net, 128, [1, 1], scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(branch_1, 192, [3, 3], scope='Conv2d_0b_3x3')
with tf.variable_scope('Branch_2'):
branch_2 = slim.conv2d(net, 32, [1, 1], scope='Conv2d_0a_1x1')
branch_2 = slim.conv2d(branch_2, 96, [3, 3], scope='Conv2d_0b_3x3')
with tf.variable_scope('Branch_3'):
branch_3 = slim.max_pool2d(net, [3, 3], scope='MaxPool_0a_3x3')
branch_3 = slim.conv2d(branch_3, 64, [1, 1], scope='Conv2d_0b_1x1')
net = tf.concat(
axis=3, values=[branch_0, branch_1, branch_2, branch_3])
end_points[end_point] = net
if final_endpoint == end_point: return net, end_points
end_point = 'MaxPool_4a_3x3'
net = slim.max_pool2d(net, [3, 3], stride=2, scope=end_point)
end_points[end_point] = net
if final_endpoint == end_point: return net, end_points
end_point = 'Mixed_4b'
with tf.variable_scope(end_point):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(net, 192, [1, 1], scope='Conv2d_0a_1x1')
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(net, 96, [1, 1], scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(branch_1, 208, [3, 3], scope='Conv2d_0b_3x3')
with tf.variable_scope('Branch_2'):
branch_2 = slim.conv2d(net, 16, [1, 1], scope='Conv2d_0a_1x1')
branch_2 = slim.conv2d(branch_2, 48, [3, 3], scope='Conv2d_0b_3x3')
with tf.variable_scope('Branch_3'):
branch_3 = slim.max_pool2d(net, [3, 3], scope='MaxPool_0a_3x3')
branch_3 = slim.conv2d(branch_3, 64, [1, 1], scope='Conv2d_0b_1x1')
net = tf.concat(
axis=3, values=[branch_0, branch_1, branch_2, branch_3])
end_points[end_point] = net
if final_endpoint == end_point: return net, end_points
end_point = 'Mixed_4c'
with tf.variable_scope(end_point):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(net, 160, [1, 1], scope='Conv2d_0a_1x1')
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(net, 112, [1, 1], scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(branch_1, 224, [3, 3], scope='Conv2d_0b_3x3')
with tf.variable_scope('Branch_2'):
branch_2 = slim.conv2d(net, 24, [1, 1], scope='Conv2d_0a_1x1')
branch_2 = slim.conv2d(branch_2, 64, [3, 3], scope='Conv2d_0b_3x3')
with tf.variable_scope('Branch_3'):
branch_3 = slim.max_pool2d(net, [3, 3], scope='MaxPool_0a_3x3')
branch_3 = slim.conv2d(branch_3, 64, [1, 1], scope='Conv2d_0b_1x1')
net = tf.concat(
axis=3, values=[branch_0, branch_1, branch_2, branch_3])
end_points[end_point] = net
if final_endpoint == end_point: return net, end_points
end_point = 'Mixed_4d'
with tf.variable_scope(end_point):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(net, 128, [1, 1], scope='Conv2d_0a_1x1')
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(net, 128, [1, 1], scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(branch_1, 256, [3, 3], scope='Conv2d_0b_3x3')
with tf.variable_scope('Branch_2'):
branch_2 = slim.conv2d(net, 24, [1, 1], scope='Conv2d_0a_1x1')
branch_2 = slim.conv2d(branch_2, 64, [3, 3], scope='Conv2d_0b_3x3')
with tf.variable_scope('Branch_3'):
branch_3 = slim.max_pool2d(net, [3, 3], scope='MaxPool_0a_3x3')
branch_3 = slim.conv2d(branch_3, 64, [1, 1], scope='Conv2d_0b_1x1')
net = tf.concat(
axis=3, values=[branch_0, branch_1, branch_2, branch_3])
end_points[end_point] = net
if final_endpoint == end_point: return net, end_points
end_point = 'Mixed_4e'
with tf.variable_scope(end_point):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(net, 112, [1, 1], scope='Conv2d_0a_1x1')
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(net, 144, [1, 1], scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(branch_1, 288, [3, 3], scope='Conv2d_0b_3x3')
with tf.variable_scope('Branch_2'):
branch_2 = slim.conv2d(net, 32, [1, 1], scope='Conv2d_0a_1x1')
branch_2 = slim.conv2d(branch_2, 64, [3, 3], scope='Conv2d_0b_3x3')
with tf.variable_scope('Branch_3'):
branch_3 = slim.max_pool2d(net, [3, 3], scope='MaxPool_0a_3x3')
branch_3 = slim.conv2d(branch_3, 64, [1, 1], scope='Conv2d_0b_1x1')
net = tf.concat(
axis=3, values=[branch_0, branch_1, branch_2, branch_3])
end_points[end_point] = net
if final_endpoint == end_point: return net, end_points
end_point = 'Mixed_4f'
with tf.variable_scope(end_point):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(net, 256, [1, 1], scope='Conv2d_0a_1x1')
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(net, 160, [1, 1], scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(branch_1, 320, [3, 3], scope='Conv2d_0b_3x3')
with tf.variable_scope('Branch_2'):
branch_2 = slim.conv2d(net, 32, [1, 1], scope='Conv2d_0a_1x1')
branch_2 = slim.conv2d(branch_2, 128, [3, 3], scope='Conv2d_0b_3x3')
with tf.variable_scope('Branch_3'):
branch_3 = slim.max_pool2d(net, [3, 3], scope='MaxPool_0a_3x3')
branch_3 = slim.conv2d(branch_3, 128, [1, 1], scope='Conv2d_0b_1x1')
net = tf.concat(
axis=3, values=[branch_0, branch_1, branch_2, branch_3])
end_points[end_point] = net
if final_endpoint == end_point: return net, end_points
end_point = 'MaxPool_5a_2x2'
net = slim.max_pool2d(net, [2, 2], stride=2, scope=end_point)
end_points[end_point] = net
if final_endpoint == end_point: return net, end_points
end_point = 'Mixed_5b'
with tf.variable_scope(end_point):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(net, 256, [1, 1], scope='Conv2d_0a_1x1')
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(net, 160, [1, 1], scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(branch_1, 320, [3, 3], scope='Conv2d_0b_3x3')
with tf.variable_scope('Branch_2'):
branch_2 = slim.conv2d(net, 32, [1, 1], scope='Conv2d_0a_1x1')
branch_2 = slim.conv2d(branch_2, 128, [3, 3], scope='Conv2d_0a_3x3')
with tf.variable_scope('Branch_3'):
branch_3 = slim.max_pool2d(net, [3, 3], scope='MaxPool_0a_3x3')
branch_3 = slim.conv2d(branch_3, 128, [1, 1], scope='Conv2d_0b_1x1')
net = tf.concat(
axis=3, values=[branch_0, branch_1, branch_2, branch_3])
end_points[end_point] = net
if final_endpoint == end_point: return net, end_points
end_point = 'Mixed_5c'
with tf.variable_scope(end_point):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(net, 384, [1, 1], scope='Conv2d_0a_1x1')
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(net, 192, [1, 1], scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(branch_1, 384, [3, 3], scope='Conv2d_0b_3x3')
with tf.variable_scope('Branch_2'):
branch_2 = slim.conv2d(net, 48, [1, 1], scope='Conv2d_0a_1x1')
branch_2 = slim.conv2d(branch_2, 128, [3, 3], scope='Conv2d_0b_3x3')
with tf.variable_scope('Branch_3'):
branch_3 = slim.max_pool2d(net, [3, 3], scope='MaxPool_0a_3x3')
branch_3 = slim.conv2d(branch_3, 128, [1, 1], scope='Conv2d_0b_1x1')
net = tf.concat(
axis=3, values=[branch_0, branch_1, branch_2, branch_3])
end_points[end_point] = net
if final_endpoint == end_point: return net, end_points
raise ValueError('Unknown final endpoint %s' % final_endpoint)
def inception_v1(inputs,
num_classes=1000,
is_training=True,
dropout_keep_prob=0.8,
prediction_fn=slim.softmax,
spatial_squeeze=True,
reuse=None,
scope='InceptionV1',
global_pool=False):
"""Defines the Inception V1 architecture.
This architecture is defined in:
Going deeper with convolutions
Christian Szegedy, Wei Liu, Yangqing Jia, Pierre Sermanet, Scott Reed,
Dragomir Anguelov, Dumitru Erhan, Vincent Vanhoucke, Andrew Rabinovich.
http://arxiv.org/pdf/1409.4842v1.pdf.
The default image size used to train this network is 224x224.
Args:
inputs: a tensor of size [batch_size, height, width, channels].
num_classes: number of predicted classes. If 0 or None, the logits layer
is omitted and the input features to the logits layer (before dropout)
are returned instead.
is_training: whether is training or not.
dropout_keep_prob: the percentage of activation values that are retained.
prediction_fn: a function to get predictions out of logits.
spatial_squeeze: if True, logits is of shape [B, C], if false logits is of
shape [B, 1, 1, C], where B is batch_size and C is number of classes.
reuse: whether or not the network and its variables should be reused. To be
able to reuse 'scope' must be given.
scope: Optional variable_scope.
global_pool: Optional boolean flag to control the avgpooling before the
logits layer. If false or unset, pooling is done with a fixed window
that reduces default-sized inputs to 1x1, while larger inputs lead to
larger outputs. If true, any input size is pooled down to 1x1.
Returns:
net: a Tensor with the logits (pre-softmax activations) if num_classes
is a non-zero integer, or the non-dropped-out input to the logits layer
if num_classes is 0 or None.
end_points: a dictionary from components of the network to the corresponding
activation.
"""
# Final pooling and prediction
with tf.variable_scope(scope, 'InceptionV1', [inputs], reuse=reuse) as scope:
with slim.arg_scope([slim.batch_norm, slim.dropout],
is_training=is_training):
net, end_points = inception_v1_base(inputs, scope=scope)
with tf.variable_scope('Logits'):
if global_pool:
# Global average pooling.
net = tf.reduce_mean(net, [1, 2], keep_dims=True, name='global_pool')
end_points['global_pool'] = net
else:
# Pooling with a fixed kernel size.
net = slim.avg_pool2d(net, [7, 7], stride=1, scope='AvgPool_0a_7x7')
end_points['AvgPool_0a_7x7'] = net
if not num_classes:
return net, end_points
net = slim.dropout(net, dropout_keep_prob, scope='Dropout_0b')
logits = slim.conv2d(net, num_classes, [1, 1], activation_fn=None,
normalizer_fn=None, scope='Conv2d_0c_1x1')
if spatial_squeeze:
logits = tf.squeeze(logits, [1, 2], name='SpatialSqueeze')
end_points['Logits'] = logits
end_points['Predictions'] = prediction_fn(logits, scope='Predictions')
return logits, end_points
inception_v1.default_image_size = 224
inception_v1_arg_scope = inception_utils.inception_arg_scope
|
TensorFlow2/Classification/ConvNets/efficientnet_v1/B0/training/AMP | AMP | train_benchmark_8xA100-80G | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
horovodrun -np 8 bash ./scripts/bind.sh --cpu=exclusive --ib=single -- python3 main.py \
--cfg config/efficientnet_v1/b0_cfg.py \
--mode train_and_eval \
--use_amp \
--use_xla \
--model_dir ./output \
--data_dir /data \
--log_steps 100 \
--max_epochs 3 \
--save_checkpoint_freq 5 \
--train_batch_size 1024 \
--eval_batch_size 1024 \
--augmenter_name autoaugment \
--lr_decay cosine \
--memory_limit 81000 \
--defer_img_mixing \
--moving_average_decay 0.9999 \
--lr_init 0.005 |
TensorFlow2/LanguageModeling/BERT/official/nlp/modeling/networks | networks | albert_transformer_encoder | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""ALBERT (https://arxiv.org/abs/1810.04805) text encoder network."""
from __future__ import absolute_import
from __future__ import division
# from __future__ import google_type_annotations
from __future__ import print_function
import tensorflow as tf
from official.modeling import activations
from official.nlp.modeling import layers
@tf.keras.utils.register_keras_serializable(package='Text')
class AlbertTransformerEncoder(tf.keras.Model):
"""ALBERT (https://arxiv.org/abs/1810.04805) text encoder network.
This network implements the encoder described in the paper "ALBERT: A Lite
BERT for Self-supervised Learning of Language Representations"
(https://arxiv.org/abs/1909.11942).
Compared with BERT (https://arxiv.org/abs/1810.04805), ALBERT refactorizes
embedding parameters into two smaller matrices and shares parameters
across layers.
The default values for this object are taken from the ALBERT-Base
implementation described in the paper.
Attributes:
vocab_size: The size of the token vocabulary.
embedding_width: The width of the word embeddings. If the embedding width
is not equal to hidden size, embedding parameters will be factorized into
two matrices in the shape of ['vocab_size', 'embedding_width'] and
['embedding_width', 'hidden_size'] ('embedding_width' is usually much
smaller than 'hidden_size').
hidden_size: The size of the transformer hidden layers.
num_layers: The number of transformer layers.
num_attention_heads: The number of attention heads for each transformer. The
hidden size must be divisible by the number of attention heads.
sequence_length: The sequence length that this encoder expects. If None, the
sequence length is dynamic; if an integer, the encoder will require
sequences padded to this length.
max_sequence_length: The maximum sequence length that this encoder can
consume. If None, max_sequence_length uses the value from sequence length.
This determines the variable shape for positional embeddings.
type_vocab_size: The number of types that the 'type_ids' input can take.
intermediate_size: The intermediate size for the transformer layers.
activation: The activation to use for the transformer layers.
dropout_rate: The dropout rate to use for the transformer layers.
attention_dropout_rate: The dropout rate to use for the attention layers
within the transformer layers.
initializer: The initialzer to use for all weights in this encoder.
float_dtype: The dtype of this encoder. Can be 'float32' or 'float16'.
"""
def __init__(self,
vocab_size,
embedding_width=128,
hidden_size=768,
num_layers=12,
num_attention_heads=12,
sequence_length=512,
max_sequence_length=None,
type_vocab_size=16,
intermediate_size=3072,
activation=activations.gelu,
dropout_rate=0.1,
attention_dropout_rate=0.1,
initializer=tf.keras.initializers.TruncatedNormal(stddev=0.02),
float_dtype='float32',
**kwargs):
activation = tf.keras.activations.get(activation)
initializer = tf.keras.initializers.get(initializer)
if not max_sequence_length:
max_sequence_length = sequence_length
self._self_setattr_tracking = False
self._config_dict = {
'vocab_size': vocab_size,
'embedding_width': embedding_width,
'hidden_size': hidden_size,
'num_layers': num_layers,
'num_attention_heads': num_attention_heads,
'sequence_length': sequence_length,
'max_sequence_length': max_sequence_length,
'type_vocab_size': type_vocab_size,
'intermediate_size': intermediate_size,
'activation': tf.keras.activations.serialize(activation),
'dropout_rate': dropout_rate,
'attention_dropout_rate': attention_dropout_rate,
'initializer': tf.keras.initializers.serialize(initializer),
'float_dtype': float_dtype,
}
word_ids = tf.keras.layers.Input(
shape=(sequence_length,), dtype=tf.int32, name='input_word_ids')
mask = tf.keras.layers.Input(
shape=(sequence_length,), dtype=tf.int32, name='input_mask')
type_ids = tf.keras.layers.Input(
shape=(sequence_length,), dtype=tf.int32, name='input_type_ids')
self._embedding_layer = layers.OnDeviceEmbedding(
vocab_size=vocab_size,
embedding_width=embedding_width,
initializer=initializer,
dtype=float_dtype,
name='word_embeddings')
word_embeddings = self._embedding_layer(word_ids)
# Always uses dynamic slicing for simplicity.
self._position_embedding_layer = layers.PositionEmbedding(
initializer=initializer,
use_dynamic_slicing=True,
max_sequence_length=max_sequence_length,
dtype=float_dtype)
position_embeddings = self._position_embedding_layer(word_embeddings)
type_embeddings = (
layers.OnDeviceEmbedding(
vocab_size=type_vocab_size,
embedding_width=embedding_width,
initializer=initializer,
use_one_hot=True,
dtype=float_dtype,
name='type_embeddings')(type_ids))
embeddings = tf.keras.layers.Add()(
[word_embeddings, position_embeddings, type_embeddings])
embeddings = (
tf.keras.layers.LayerNormalization(
name='embeddings/layer_norm',
axis=-1,
epsilon=1e-12,
dtype=float_dtype)(embeddings))
embeddings = (
tf.keras.layers.Dropout(rate=dropout_rate,
dtype=tf.float32)(embeddings))
# We project the 'embedding' output to 'hidden_size' if it is not already
# 'hidden_size'.
if embedding_width != hidden_size:
embeddings = layers.DenseEinsum(
output_shape=hidden_size,
kernel_initializer=initializer,
name='embedding_projection')(
embeddings)
if float_dtype == 'float16':
embeddings = tf.cast(embeddings, tf.float16)
data = embeddings
attention_mask = layers.SelfAttentionMask()([data, mask])
shared_layer = layers.Transformer(
num_attention_heads=num_attention_heads,
intermediate_size=intermediate_size,
intermediate_activation=activation,
dropout_rate=dropout_rate,
attention_dropout_rate=attention_dropout_rate,
kernel_initializer=initializer,
dtype=float_dtype,
name='transformer')
for _ in range(num_layers):
data = shared_layer([data, attention_mask])
first_token_tensor = (
tf.keras.layers.Lambda(lambda x: tf.squeeze(x[:, 0:1, :], axis=1))(data)
)
cls_output = tf.keras.layers.Dense(
units=hidden_size,
activation='tanh',
kernel_initializer=initializer,
dtype=float_dtype,
name='pooler_transform')(
first_token_tensor)
super(AlbertTransformerEncoder, self).__init__(
inputs=[word_ids, mask, type_ids],
outputs=[data, cls_output],
**kwargs)
def get_embedding_table(self):
return self._embedding_layer.embeddings
def get_config(self):
return self._config_dict
@classmethod
def from_config(cls, config):
return cls(**config)
|
TensorFlow/Classification/ConvNets/se-resnext101-32x4d/training | training | DGX1_SE-RNxt101-32x4d_FP32_90E | #!/bin/bash
# Copyright (c) 2019 NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
WORKSPACE=${1:-"/workspace/rn50v15_tf"}
DATA_DIR=${2:-"/data"}
OTHER=${@:3}
if [[ ! -z "${BIND_TO_SOCKET}" ]]; then
BIND_TO_SOCKET="--bind-to socket"
fi
mpiexec --allow-run-as-root ${BIND_TO_SOCKET} -np 8 python3 main.py --arch=se-resnext101-32x4d \
--mode=train_and_evaluate --iter_unit=epoch --num_iter=90 \
--batch_size=64 --warmup_steps=100 --cosine_lr --label_smoothing 0.1 \
--lr_init=0.256 --lr_warmup_epochs=8 --momentum=0.875 --weight_decay=6.103515625e-05 \
--data_dir=${DATA_DIR}/tfrecords --data_idx_dir=${DATA_DIR}/dali_idx \
--results_dir=${WORKSPACE}/results --weight_init=fan_in ${OTHER}
|
CUDA-Optimized/FastSpeech/fastspeech/trainer | trainer | trainer | # Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the NVIDIA CORPORATION nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import abc
import glob
import pathlib
import numpy as np
import torch
from tensorboardX import SummaryWriter
import time
import os
import matplotlib.pyplot as plt
from torch import nn
from fastspeech.utils.logging import tprint
from fastspeech.utils.pytorch import to_device_async
from fastspeech.utils.nvtx import Nvtx
from fastspeech.utils.fp16 import cast_model_to_half
import torch.cuda.profiler as profiler
from fastspeech.utils.logging import tprint
from fastspeech.utils.time import TimeElapsed
plt.switch_backend('Agg')
class Trainer(object):
"""
set seed
set n_epochs, n_steps
save/load model
validation
logging
distributed
"""
def __init__(self, data_loader, model_name, model, optimizer_fn, final_steps, lr_scheduler_fn=None, step=0, ckpt_path=None, log_path=None, n_epochs=None, save_steps=None, log_steps=10, device='cuda', use_amp=False, nvprof_iter_start=None, nvprof_iter_end=None, pyprof_enabled=False, detect_anomaly=False, seed=None):
self.data_loader = data_loader
self.model_name = model_name
self.model = model
self.n_epochs = n_epochs
self.save_steps = save_steps
self.log_steps = log_steps
self.ckpt_path = ckpt_path
self.log_path = log_path
self.final_steps = final_steps
self.step = step
self.device = device
self.use_amp = use_amp
self.nvprof_iter_start = nvprof_iter_start
self.nvprof_iter_end = nvprof_iter_end
self.pyprof_enabled = pyprof_enabled
self.detect_anomaly = detect_anomaly
# model
self.model.train()
to_device_async(self.model, self.device)
num_param = sum(param.numel() for param in model.parameters())
tprint('The number of {} parameters: {}'.format(
self.model_name, num_param))
# optimizer
self.optimizer = optimizer_fn(model)
# lr scheduler
if lr_scheduler_fn:
self.lr_scheduler = lr_scheduler_fn(self.optimizer)
else:
self.lr_scheduler = None
# automatic mixed precision
if self.use_amp:
from apex import amp
self.model, self.optimizer = amp.initialize(self.model,
self.optimizer,
opt_level='O1')
# profile
if nvprof_iter_start and nvprof_iter_end is not None and pyprof_enabled:
from apex import pyprof
pyprof.nvtx.init()
# data parallel
self.model = nn.DataParallel(self.model)
# set seed
if seed is None:
seed = np.random.randint(2**16)
np.random.seed(seed)
torch.manual_seed(seed)
# data loader
self.data_loader_iter = self.repeat(self.data_loader, n_epochs)
# logging
if log_path:
# tensorboard log path : {log_path}/YYYYMMDD-HHMMMSS
log_path = os.path.join(log_path, time.strftime('%Y%m%d-%H%M%S'))
self.tbwriter = SummaryWriter(log_dir=log_path, flush_secs=10)
# checkpoint path
if self.ckpt_path:
self.ckpt_path = os.path.join(self.ckpt_path, self.model_name)
pathlib.Path(self.ckpt_path).mkdir(parents=True, exist_ok=True)
# load checkpoint
self.load()
def train(self):
try:
with torch.autograd.profiler.emit_nvtx(enabled=self.pyprof_enabled):
for i in range(self.step+1, self.final_steps + 1):
self.step = i
tprint("------------- TRAIN step : {} -------------".format(i))
if self.nvprof_iter_start and i == self.nvprof_iter_start:
profiler.start()
timer = TimeElapsed(name="Training time during profiling", format=":.6f")
timer.start()
with Nvtx("step #{}".format(self.step)):
loss, meta = self.do_step()
if self.nvprof_iter_end and i == self.nvprof_iter_end:
profiler.stop()
timer.end()
if self.lr_scheduler:
for param_group in self.optimizer.param_groups:
tprint("lr: {:06f}".format(param_group['lr']))
self.lr_scheduler.step(self.step)
if self.step % self.log_steps == 0:
self.log(loss, meta)
if self.ckpt_path and self.save_steps and i % self.save_steps == 0:
self.save()
tprint("Training has been done.")
except StopIteration: # done by n_epochs
tprint("Training has been done. (by n_epochs)")
except KeyboardInterrupt:
tprint("Training has been canceled.")
@abc.abstractmethod
def loss(self, inputs, model):
raise NotImplemented
def do_step(self):
with Nvtx("data load", enabled=False):
data = next(self.data_loader_iter)
with torch.autograd.set_detect_anomaly(mode=self.detect_anomaly):
with Nvtx("forward"):
loss, meta = self.loss(data, self.model)
self.optimizer.zero_grad()
with Nvtx("backward"):
if self.use_amp:
from apex import amp
with amp.scale_loss(loss, self.optimizer) as scaled_loss:
scaled_loss.backward()
else:
loss.backward()
with Nvtx("weight update"):
self.optimizer.step()
return loss, meta
def log(self, loss, meta):
self.console_log('train', loss, meta)
if self.log_path:
self.tensorboard_log('train', loss)
def save(self):
state_dict = {
'step': self.step,
'model': self.model.state_dict(),
'optim': self.optimizer.state_dict(),
}
torch.save(state_dict, self.ckpt_path +
'/checkpoint_{:06d}.pt'.format(self.step))
tprint('[Save] Model "{}". Step={}.'.format(
self.model_name, self.step))
def load(self, load_optim=True):
files_exist = glob.glob(os.path.join(self.ckpt_path, '*'))
if files_exist:
# load the latest created file.
latest_file = max(files_exist, key=os.path.getctime)
state_dict = torch.load(latest_file)
self.step = state_dict['step']
self.model.load_state_dict(state_dict['model'])
if load_optim:
self.optimizer.load_state_dict(state_dict['optim'])
tprint('[Load] Checkpoint \'{}\'. Step={}'.format(
latest_file, self.step))
else:
tprint('No checkpoints in {}. Load skipped.'.format(self.ckpt_path))
def console_log(self, tag, loss, meta):
# console logging
msg = 'loss: {:.6f}'.format(loss)
for key, value in meta.items():
msg += ',\t{}: {:.4f}'.format(key, value)
tprint(msg)
def tensorboard_log(self, tag, loss):
self.tbwriter.add_scalar(
'{}/loss'.format(tag), loss, global_step=self.step)
@staticmethod
def repeat(iterable, n_repeat=None):
cnt = 0
while n_repeat is None or cnt < n_repeat:
for x in iterable:
yield x
cnt += 1
return StopIteration()
|
TensorFlow/Detection/SSD/models/research/object_detection/core | core | box_coder | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Base box coder.
Box coders convert between coordinate frames, namely image-centric
(with (0,0) on the top left of image) and anchor-centric (with (0,0) being
defined by a specific anchor).
Users of a BoxCoder can call two methods:
encode: which encodes a box with respect to a given anchor
(or rather, a tensor of boxes wrt a corresponding tensor of anchors) and
decode: which inverts this encoding with a decode operation.
In both cases, the arguments are assumed to be in 1-1 correspondence already;
it is not the job of a BoxCoder to perform matching.
"""
from abc import ABCMeta
from abc import abstractmethod
from abc import abstractproperty
import tensorflow as tf
# Box coder types.
FASTER_RCNN = 'faster_rcnn'
KEYPOINT = 'keypoint'
MEAN_STDDEV = 'mean_stddev'
SQUARE = 'square'
class BoxCoder(object):
"""Abstract base class for box coder."""
__metaclass__ = ABCMeta
@abstractproperty
def code_size(self):
"""Return the size of each code.
This number is a constant and should agree with the output of the `encode`
op (e.g. if rel_codes is the output of self.encode(...), then it should have
shape [N, code_size()]). This abstractproperty should be overridden by
implementations.
Returns:
an integer constant
"""
pass
def encode(self, boxes, anchors):
"""Encode a box list relative to an anchor collection.
Args:
boxes: BoxList holding N boxes to be encoded
anchors: BoxList of N anchors
Returns:
a tensor representing N relative-encoded boxes
"""
with tf.name_scope('Encode'):
return self._encode(boxes, anchors)
def decode(self, rel_codes, anchors):
"""Decode boxes that are encoded relative to an anchor collection.
Args:
rel_codes: a tensor representing N relative-encoded boxes
anchors: BoxList of anchors
Returns:
boxlist: BoxList holding N boxes encoded in the ordinary way (i.e.,
with corners y_min, x_min, y_max, x_max)
"""
with tf.name_scope('Decode'):
return self._decode(rel_codes, anchors)
@abstractmethod
def _encode(self, boxes, anchors):
"""Method to be overriden by implementations.
Args:
boxes: BoxList holding N boxes to be encoded
anchors: BoxList of N anchors
Returns:
a tensor representing N relative-encoded boxes
"""
pass
@abstractmethod
def _decode(self, rel_codes, anchors):
"""Method to be overriden by implementations.
Args:
rel_codes: a tensor representing N relative-encoded boxes
anchors: BoxList of anchors
Returns:
boxlist: BoxList holding N boxes encoded in the ordinary way (i.e.,
with corners y_min, x_min, y_max, x_max)
"""
pass
def batch_decode(encoded_boxes, box_coder, anchors):
"""Decode a batch of encoded boxes.
This op takes a batch of encoded bounding boxes and transforms
them to a batch of bounding boxes specified by their corners in
the order of [y_min, x_min, y_max, x_max].
Args:
encoded_boxes: a float32 tensor of shape [batch_size, num_anchors,
code_size] representing the location of the objects.
box_coder: a BoxCoder object.
anchors: a BoxList of anchors used to encode `encoded_boxes`.
Returns:
decoded_boxes: a float32 tensor of shape [batch_size, num_anchors,
coder_size] representing the corners of the objects in the order
of [y_min, x_min, y_max, x_max].
Raises:
ValueError: if batch sizes of the inputs are inconsistent, or if
the number of anchors inferred from encoded_boxes and anchors are
inconsistent.
"""
encoded_boxes.get_shape().assert_has_rank(3)
if encoded_boxes.get_shape()[1].value != anchors.num_boxes_static():
raise ValueError('The number of anchors inferred from encoded_boxes'
' and anchors are inconsistent: shape[1] of encoded_boxes'
' %s should be equal to the number of anchors: %s.' %
(encoded_boxes.get_shape()[1].value,
anchors.num_boxes_static()))
decoded_boxes = tf.stack([
box_coder.decode(boxes, anchors).get()
for boxes in tf.unstack(encoded_boxes)
])
return decoded_boxes
|
PyTorch/Classification/ConvNets/triton/deployment_toolkit | deployment_toolkit | report | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import csv
import re
from typing import Dict, List
from natsort import natsorted
from tabulate import tabulate
def sort_results(results: List):
results = natsorted(results, key=lambda item: [item[key] for key in item.keys()])
return results
def save_results(filename: str, data: List, formatted: bool = False):
data = format_data(data=data) if formatted else data
with open(filename, "a") as csvfile:
fieldnames = data[0].keys()
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
writer.writeheader()
for row in data:
writer.writerow(row)
def format_data(data: List[Dict]) -> List[Dict]:
formatted_data = list()
for item in data:
formatted_item = format_keys(data=item)
formatted_data.append(formatted_item)
return formatted_data
def format_keys(data: Dict) -> Dict:
keys = {format_key(key=key): value for key, value in data.items()}
return keys
def format_key(key: str) -> str:
key = " ".join([k.capitalize() for k in re.split("_| ", key)])
return key
def show_results(results: List[Dict]):
headers = list(results[0].keys())
summary = map(lambda x: list(map(lambda item: item[1], x.items())), results)
print(tabulate(summary, headers=headers))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.