text
stringlengths 96
319k
| id
stringlengths 14
178
| metadata
dict |
---|---|---|
# Copyright 2025 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from dataclasses import dataclass, field
from typing import Optional
from datasets import load_dataset
from huggingface_hub import ModelCard
from transformers import HfArgumentParser
@dataclass
class ScriptArguments:
r"""
Arguments for the script.
Args:
push_to_hub (`bool`, *optional*, defaults to `False`):
Whether to push the dataset to the Hugging Face Hub.
repo_id (`str`, *optional*, defaults to `"trl-lib/tldr-preference"`):
Hugging Face repository ID to push the dataset to.
dataset_num_proc (`int` or `None`, *optional*, defaults to `None`):
Number of workers to use for dataset processing.
"""
push_to_hub: bool = field(
default=False,
metadata={"help": "Whether to push the dataset to the Hugging Face Hub."},
)
repo_id: str = field(
default="trl-lib/tldr-preference",
metadata={"help": "Hugging Face repository ID to push the dataset to."},
)
dataset_num_proc: Optional[int] = field(
default=None,
metadata={"help": "Number of workers to use for dataset processing."},
)
def to_preference(example):
info = example["info"]
if example["batch"] in ["batch0_cnndm", "cnndm0", "cnndm2"]: # CNN Daily Mail batches
article = info["article"].replace("\n\n", "\n")
prompt = f"TITLE: {info['title']}\n\n{article}\n\nTL;DR:"
elif example["batch"] in [f"batch{i}" for i in range(3, 23)] + ["edit_b2_eval_test"]: # Reddit batches
post = info["post"].replace("\n\n", "\n")
prompt = f"SUBREDDIT: r/{info['subreddit']}\n\nTITLE: {info['title']}\n\nPOST: {post}\n\nTL;DR:"
else:
raise ValueError(f"Unknown batch: {example['batch']}")
chosen_idx = example["choice"]
rejected_idx = 1 - chosen_idx
chosen = example["summaries"][chosen_idx]["text"]
rejected = example["summaries"][rejected_idx]["text"]
return {"prompt": prompt, "chosen": chosen, "rejected": rejected}
model_card = ModelCard("""
---
tags: [trl]
---
# TL;DR Dataset for Preference Learning
## Summary
The TL;DR dataset is a processed version of Reddit posts, specifically curated to train models using the [TRL library](https://github.com/huggingface/trl) for preference learning and Reinforcement Learning from Human Feedback (RLHF) tasks. It leverages the common practice on Reddit where users append "TL;DR" (Too Long; Didn't Read) summaries to lengthy posts, providing a rich source of paired text data for training models to understand and generate concise summaries.
## Data Structure
- **Format**: [Standard](https://huggingface.co/docs/trl/main/dataset_formats#standard)
- **Type**: [Preference](https://huggingface.co/docs/trl/main/dataset_formats#preference)
Columns:
- `"prompt"`: The unabridged Reddit post.
- `"chosen"`: The concise "TL;DR" summary appended by the author.
- `"rejected"`: An alternative summary or response that was not selected.
This structure enables models to learn the relationship between detailed content and its abbreviated form, enhancing their summarization capabilities.
## Generation script
The script used to generate this dataset can be found [here](https://github.com/huggingface/trl/blob/main/examples/datasets/tldr_preference.py).
""")
if __name__ == "__main__":
parser = HfArgumentParser(ScriptArguments)
script_args = parser.parse_args_into_dataclasses()[0]
dataset = load_dataset("openai/summarize_from_feedback", "comparisons")
dataset = dataset.map(
to_preference,
num_proc=script_args.dataset_num_proc,
remove_columns=["info", "summaries", "choice", "worker", "batch", "split", "extra"],
)
if script_args.push_to_hub:
dataset.push_to_hub(script_args.repo_id)
model_card.push_to_hub(script_args.repo_id, repo_type="dataset")
| trl/examples/datasets/tldr_preference.py/0 | {
"file_path": "trl/examples/datasets/tldr_preference.py",
"repo_id": "trl",
"token_count": 1547
} |
# Copyright 2025 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Fine-Tune Llama2-7b on SE paired dataset
import os
from dataclasses import dataclass, field
from typing import Optional
import torch
from accelerate import Accelerator
from datasets import load_dataset
from peft import AutoPeftModelForCausalLM, LoraConfig
from tqdm import tqdm
from transformers import (
AutoModelForCausalLM,
AutoTokenizer,
BitsAndBytesConfig,
HfArgumentParser,
is_torch_npu_available,
is_torch_xpu_available,
set_seed,
)
from trl import SFTConfig, SFTTrainer
from trl.trainer import ConstantLengthDataset
@dataclass
class ScriptArguments:
model_name: Optional[str] = field(default="meta-llama/Llama-2-7b-hf", metadata={"help": "the model name"})
dataset_name: Optional[str] = field(default="lvwerra/stack-exchange-paired", metadata={"help": "the dataset name"})
subset: Optional[str] = field(default="data/finetune", metadata={"help": "the subset to use"})
split: Optional[str] = field(default="train", metadata={"help": "the split to use"})
size_valid_set: Optional[int] = field(default=4000, metadata={"help": "the size of the validation set"})
streaming: Optional[bool] = field(default=True, metadata={"help": "whether to stream the dataset"})
shuffle_buffer: Optional[int] = field(default=5000, metadata={"help": "the shuffle buffer size"})
seq_length: Optional[int] = field(default=1024, metadata={"help": "the sequence length"})
num_workers: Optional[int] = field(default=4, metadata={"help": "the number of workers"})
use_bnb: Optional[bool] = field(default=True, metadata={"help": "whether to use BitsAndBytes"})
# LoraConfig
lora_alpha: Optional[float] = field(default=16, metadata={"help": "the lora alpha parameter"})
lora_dropout: Optional[float] = field(default=0.05, metadata={"help": "the lora dropout parameter"})
lora_r: Optional[int] = field(default=8, metadata={"help": "the lora r parameter"})
parser = HfArgumentParser((ScriptArguments, SFTConfig))
script_args, training_args = parser.parse_args_into_dataclasses()
peft_config = LoraConfig(
r=script_args.lora_r,
lora_alpha=script_args.lora_alpha,
lora_dropout=script_args.lora_dropout,
target_modules=["q_proj", "v_proj"],
bias="none",
task_type="CAUSAL_LM",
)
if training_args.group_by_length and training_args.packing:
raise ValueError("Cannot use both packing and group by length")
# `gradient_checkpointing` was True by default until `1f3314`, but it's actually not used.
# `gradient_checkpointing=True` will cause `Variable._execution_engine.run_backward`.
if training_args.gradient_checkpointing:
raise ValueError("gradient_checkpointing not supported")
set_seed(training_args.seed)
def chars_token_ratio(dataset, tokenizer, nb_examples=400):
"""
Estimate the average number of characters per token in the dataset.
"""
total_characters, total_tokens = 0, 0
for _, example in tqdm(zip(range(nb_examples), iter(dataset)), total=nb_examples):
text = prepare_sample_text(example)
total_characters += len(text)
if tokenizer.is_fast:
total_tokens += len(tokenizer(text).tokens())
else:
total_tokens += len(tokenizer.tokenize(text))
return total_characters / total_tokens
def print_trainable_parameters(model):
"""
Prints the number of trainable parameters in the model.
"""
trainable_params = 0
all_param = 0
for _, param in model.named_parameters():
all_param += param.numel()
if param.requires_grad:
trainable_params += param.numel()
print(
f"trainable params: {trainable_params} || all params: {all_param} || trainable%: {100 * trainable_params / all_param}"
)
def prepare_sample_text(example):
"""Prepare the text from a sample of the dataset."""
text = f"Question: {example['question']}\n\nAnswer: {example['response_j']}"
return text
def create_datasets(tokenizer, args, seed=None):
dataset = load_dataset(
args.dataset_name,
data_dir=args.subset,
split=args.split,
use_auth_token=True,
num_proc=args.num_workers if not args.streaming else None,
streaming=args.streaming,
)
if args.streaming:
print("Loading the dataset in streaming mode")
valid_data = dataset.take(args.size_valid_set)
train_data = dataset.skip(args.size_valid_set)
train_data = train_data.shuffle(buffer_size=args.shuffle_buffer, seed=seed)
else:
dataset = dataset.train_test_split(test_size=0.005, seed=seed)
train_data = dataset["train"]
valid_data = dataset["test"]
print(f"Size of the train set: {len(train_data)}. Size of the validation set: {len(valid_data)}")
chars_per_token = chars_token_ratio(train_data, tokenizer)
print(f"The character to token ratio of the dataset is: {chars_per_token:.2f}")
train_dataset = ConstantLengthDataset(
tokenizer,
train_data,
formatting_func=prepare_sample_text,
infinite=True,
seq_length=args.seq_length,
chars_per_token=chars_per_token,
)
valid_dataset = ConstantLengthDataset(
tokenizer,
valid_data,
formatting_func=prepare_sample_text,
infinite=False,
seq_length=args.seq_length,
chars_per_token=chars_per_token,
)
return train_dataset, valid_dataset
bnb_config = None
if script_args.use_bnb:
bnb_config = BitsAndBytesConfig(
load_in_4bit=True,
bnb_4bit_quant_type="nf4",
bnb_4bit_compute_dtype=torch.bfloat16,
)
base_model = AutoModelForCausalLM.from_pretrained(
script_args.model_name,
quantization_config=bnb_config,
device_map={"": Accelerator().local_process_index},
trust_remote_code=True,
use_auth_token=True,
)
base_model.config.use_cache = False
tokenizer = AutoTokenizer.from_pretrained(script_args.model_name, trust_remote_code=True)
tokenizer.pad_token = tokenizer.eos_token
tokenizer.padding_side = "right" # Fix weird overflow issue with fp16 training
train_dataset, eval_dataset = create_datasets(tokenizer, script_args, seed=training_args.seed)
trainer = SFTTrainer(
model=base_model,
train_dataset=train_dataset,
eval_dataset=eval_dataset,
peft_config=peft_config,
max_seq_length=None,
formatting_func=prepare_sample_text,
processing_class=tokenizer,
args=training_args,
)
trainer.train()
trainer.save_model(training_args.output_dir)
output_dir = os.path.join(training_args.output_dir, "final_checkpoint")
trainer.model.save_pretrained(output_dir)
# Free memory for merging weights
del base_model
if is_torch_xpu_available():
torch.xpu.empty_cache()
elif is_torch_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
model = AutoPeftModelForCausalLM.from_pretrained(output_dir, device_map="auto", torch_dtype=torch.bfloat16)
model = model.merge_and_unload()
output_merged_dir = os.path.join(training_args.output_dir, "final_merged_checkpoint")
model.save_pretrained(output_merged_dir, safe_serialization=True)
| trl/examples/research_projects/stack_llama_2/scripts/sft_llama2.py/0 | {
"file_path": "trl/examples/research_projects/stack_llama_2/scripts/sft_llama2.py",
"repo_id": "trl",
"token_count": 2893
} |
# Copyright 2025 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
# Full training:
python examples/scripts/gkd.py \
--model_name_or_path Qwen/Qwen2-0.5B-Instruct \
--teacher_model_name_or_path Qwen/Qwen2-1.5B-Instruct \
--dataset_name trl-lib/chatbot_arena_completions \
--learning_rate 2e-5 \
--per_device_train_batch_size 4 \
--gradient_accumulation_steps 8 \
--output_dir gkd-model \
--logging_steps 10 \
--num_train_epochs 1 \
--push_to_hub \
--gradient_checkpointing
# LoRA:
python examples/scripts/gkd.py \
--model_name_or_path Qwen/Qwen2-0.5B-Instruct \
--teacher_model_name_or_path Qwen/Qwen2-1.5B-Instruct \
--dataset_name trl-lib/chatbot_arena_completions \
--learning_rate 2e-4 \
--per_device_train_batch_size 4 \
--gradient_accumulation_steps 8 \
--output_dir gkd-model \
--logging_steps 10 \
--num_train_epochs 1 \
--push_to_hub \
--gradient_checkpointing \
--use_peft \
--lora_r 64 \
--lora_alpha 16
"""
from accelerate import PartialState
from datasets import load_dataset
from transformers import AutoTokenizer, GenerationConfig
from trl import (
GKDConfig,
GKDTrainer,
LogCompletionsCallback,
ModelConfig,
ScriptArguments,
TrlParser,
get_kbit_device_map,
get_peft_config,
get_quantization_config,
)
if __name__ == "__main__":
parser = TrlParser((ScriptArguments, GKDConfig, ModelConfig))
script_args, training_args, model_args = parser.parse_args_and_config()
################
# Model & Tokenizer
################
quantization_config = get_quantization_config(model_args)
model_kwargs = dict(
revision=model_args.model_revision,
trust_remote_code=model_args.trust_remote_code,
attn_implementation=model_args.attn_implementation,
torch_dtype=model_args.torch_dtype,
use_cache=False if training_args.gradient_checkpointing else True,
device_map=get_kbit_device_map() if quantization_config is not None else None,
quantization_config=quantization_config,
)
training_args.model_init_kwargs = model_kwargs
teacher_model_kwargs = dict(
revision=model_args.model_revision,
trust_remote_code=model_args.trust_remote_code,
attn_implementation=model_args.attn_implementation,
torch_dtype=model_args.torch_dtype,
use_cache=True,
device_map=get_kbit_device_map() if quantization_config is not None else None,
quantization_config=quantization_config,
)
training_args.teacher_model_init_kwargs = teacher_model_kwargs
tokenizer = AutoTokenizer.from_pretrained(
model_args.model_name_or_path,
revision=model_args.model_revision,
trust_remote_code=model_args.trust_remote_code,
padding_side="left",
)
if tokenizer.pad_token is None:
tokenizer.pad_token = tokenizer.eos_token
################
# Dataset
################
dataset = load_dataset(script_args.dataset_name, name=script_args.dataset_config)
with PartialState().local_main_process_first():
dataset = dataset.map(
lambda x: {
"prompt": tokenizer.apply_chat_template(x["prompt"], tokenize=False, add_generation_prompt=True)
},
num_proc=training_args.dataset_num_proc,
)
################
# Training
################
trainer = GKDTrainer(
model=model_args.model_name_or_path,
teacher_model=training_args.teacher_model_name_or_path,
args=training_args,
train_dataset=dataset[script_args.dataset_train_split],
eval_dataset=dataset[script_args.dataset_test_split] if training_args.eval_strategy != "no" else None,
processing_class=tokenizer,
peft_config=get_peft_config(model_args),
)
if training_args.eval_strategy != "no":
generation_config = GenerationConfig(
max_new_tokens=training_args.max_new_tokens, do_sample=True, temperature=training_args.temperature
)
completions_callback = LogCompletionsCallback(trainer, generation_config, num_prompts=8)
trainer.add_callback(completions_callback)
trainer.train()
# Save and push to hub
trainer.save_model(training_args.output_dir)
if training_args.push_to_hub:
trainer.push_to_hub(dataset_name=script_args.dataset_name)
| trl/examples/scripts/gkd.py/0 | {
"file_path": "trl/examples/scripts/gkd.py",
"repo_id": "trl",
"token_count": 1996
} |
# Copyright 2025 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
import sys
import tempfile
import unittest
from datasets import load_dataset
from transformers import AutoModelForCausalLM, AutoTokenizer, GenerationConfig, Trainer, TrainingArguments
from transformers.testing_utils import require_peft, require_wandb
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import is_peft_available
from tests.testing_utils import require_comet, require_mergekit
from trl import BasePairwiseJudge, DPOConfig, DPOTrainer, LogCompletionsCallback, MergeModelCallback, WinRateCallback
from trl.mergekit_utils import MergeConfig
if is_peft_available():
from peft import LoraConfig
class HalfPairwiseJudge(BasePairwiseJudge):
"""Naive pairwise judge that always returns [1, 0] for two prompts"""
def judge(self, prompts, completions, shuffle_order=True, return_scores=False):
# just check that the batch size is 2
assert len(prompts) == 2
if return_scores:
return [0.3, 0.9]
return [1, 0]
class TrainerWithRefModel(Trainer):
# This is a dummy class to test the callback. Compared to the Trainer class, it only has an additional
# ref_model attribute
def __init__(self, model, ref_model, args, train_dataset, eval_dataset, processing_class):
super().__init__(
model=model,
args=args,
train_dataset=train_dataset,
eval_dataset=eval_dataset,
processing_class=processing_class,
)
self.ref_model = ref_model
class WinRateCallbackTester(unittest.TestCase):
def setUp(self):
self.model = AutoModelForCausalLM.from_pretrained("trl-internal-testing/tiny-Qwen2ForCausalLM-2.5")
self.ref_model = AutoModelForCausalLM.from_pretrained("trl-internal-testing/tiny-Qwen2ForCausalLM-2.5")
self.tokenizer = AutoTokenizer.from_pretrained("trl-internal-testing/tiny-Qwen2ForCausalLM-2.5")
self.tokenizer.pad_token = self.tokenizer.eos_token
dataset = load_dataset("trl-internal-testing/zen", "standard_prompt_only")
dataset["train"] = dataset["train"].select(range(8))
self.expected_winrates = [
{"eval_win_rate": 0.5, "epoch": 0.0, "step": 0},
{"eval_win_rate": 0.5, "epoch": 0.5, "step": 2},
{"eval_win_rate": 0.5, "epoch": 1.0, "step": 4},
{"eval_win_rate": 0.5, "epoch": 1.5, "step": 6},
{"eval_win_rate": 0.5, "epoch": 2.0, "step": 8},
{"eval_win_rate": 0.5, "epoch": 2.5, "step": 10},
{"eval_win_rate": 0.5, "epoch": 3.0, "step": 12},
]
def tokenize_function(examples):
out = self.tokenizer(examples["prompt"], padding="max_length", max_length=16, truncation=True)
out["labels"] = out["input_ids"].copy()
return out
self.dataset = dataset.map(tokenize_function, batched=True)
self.generation_config = GenerationConfig(max_length=32)
self.judge = HalfPairwiseJudge()
def test_basic(self):
with tempfile.TemporaryDirectory() as tmp_dir:
training_args = TrainingArguments(
output_dir=tmp_dir,
eval_strategy="steps",
eval_steps=2, # evaluate every 2 steps
per_device_train_batch_size=2, # 8 samples in total so 4 batches of 2 per epoch
per_device_eval_batch_size=2,
report_to="none",
)
trainer = TrainerWithRefModel(
model=self.model,
ref_model=self.ref_model,
args=training_args,
train_dataset=self.dataset["train"],
eval_dataset=self.dataset["test"],
processing_class=self.tokenizer,
)
win_rate_callback = WinRateCallback(
judge=self.judge, trainer=trainer, generation_config=self.generation_config
)
trainer.add_callback(win_rate_callback)
trainer.train()
winrate_history = [h for h in trainer.state.log_history if "eval_win_rate" in h]
self.assertListEqual(winrate_history, self.expected_winrates)
def test_without_ref_model(self):
# Same as before, but without the ref_model attribute. It should use the model attribute instead
with tempfile.TemporaryDirectory() as tmp_dir:
training_args = TrainingArguments(
output_dir=tmp_dir,
eval_strategy="steps",
eval_steps=2, # evaluate every 2 steps
per_device_train_batch_size=2, # 8 samples in total so 4 batches of 2 per epoch
per_device_eval_batch_size=2,
report_to="none",
)
trainer = Trainer(
model=self.model,
args=training_args,
train_dataset=self.dataset["train"],
eval_dataset=self.dataset["test"],
processing_class=self.tokenizer,
)
win_rate_callback = WinRateCallback(
judge=self.judge, trainer=trainer, generation_config=self.generation_config
)
trainer.add_callback(win_rate_callback)
trainer.train()
winrate_history = [h for h in trainer.state.log_history if "eval_win_rate" in h]
self.assertListEqual(winrate_history, self.expected_winrates)
def test_soft_judge(self):
"""Test that the soft judge functionality works correctly"""
with tempfile.TemporaryDirectory() as tmp_dir:
training_args = TrainingArguments(
output_dir=tmp_dir,
eval_strategy="steps",
eval_steps=2, # evaluate every 2 steps
per_device_train_batch_size=2, # 8 samples in total so 4 batches of 2 per epoch
per_device_eval_batch_size=2,
report_to="none",
)
trainer = TrainerWithRefModel(
model=self.model,
ref_model=self.ref_model,
args=training_args,
train_dataset=self.dataset["train"],
eval_dataset=self.dataset["test"],
processing_class=self.tokenizer,
)
win_rate_callback = WinRateCallback(
judge=self.judge, trainer=trainer, generation_config=self.generation_config, use_soft_judge=True
)
trainer.add_callback(win_rate_callback)
trainer.train()
# Expected values based on judge returning [0.3, 0.9] for each pair
expected_soft_winrates = [
{"eval_avg_win_prob": 0.4, "eval_win_rate": 0.5, "epoch": 0.0, "step": 0},
{"eval_avg_win_prob": 0.4, "eval_win_rate": 0.5, "epoch": 0.5, "step": 2},
{"eval_avg_win_prob": 0.4, "eval_win_rate": 0.5, "epoch": 1.0, "step": 4},
{"eval_avg_win_prob": 0.4, "eval_win_rate": 0.5, "epoch": 1.5, "step": 6},
{"eval_avg_win_prob": 0.4, "eval_win_rate": 0.5, "epoch": 2.0, "step": 8},
{"eval_avg_win_prob": 0.4, "eval_win_rate": 0.5, "epoch": 2.5, "step": 10},
{"eval_avg_win_prob": 0.4, "eval_win_rate": 0.5, "epoch": 3.0, "step": 12},
]
winrate_history = [
{k: h[k] for k in ["eval_avg_win_prob", "eval_win_rate", "epoch", "step"]}
for h in trainer.state.log_history
if "eval_avg_win_prob" in h
]
self.assertListEqual(winrate_history, expected_soft_winrates)
@require_peft
def test_lora(self):
with tempfile.TemporaryDirectory() as tmp_dir:
peft_config = LoraConfig(
r=16,
lora_alpha=32,
lora_dropout=0.05,
bias="none",
task_type="CAUSAL_LM",
)
self.model.add_adapter(peft_config)
training_args = TrainingArguments(
output_dir=tmp_dir,
eval_strategy="steps",
eval_steps=2, # evaluate every 2 steps
per_device_train_batch_size=2, # 8 samples in total so 4 batches of 2 per epoch
per_device_eval_batch_size=2,
report_to="none",
)
trainer = Trainer(
model=self.model,
args=training_args,
train_dataset=self.dataset["train"],
eval_dataset=self.dataset["test"],
processing_class=self.tokenizer,
)
win_rate_callback = WinRateCallback(
judge=self.judge, trainer=trainer, generation_config=self.generation_config
)
trainer.add_callback(win_rate_callback)
trainer.train()
winrate_history = [h for h in trainer.state.log_history if "eval_win_rate" in h]
self.assertListEqual(winrate_history, self.expected_winrates)
class LogCompletionsCallbackTester(unittest.TestCase):
def setUp(self):
self.model = AutoModelForCausalLM.from_pretrained("trl-internal-testing/tiny-Qwen2ForCausalLM-2.5")
self.tokenizer = AutoTokenizer.from_pretrained("trl-internal-testing/tiny-Qwen2ForCausalLM-2.5")
self.tokenizer.pad_token = self.tokenizer.eos_token
dataset = load_dataset("trl-internal-testing/zen", "standard_prompt_only")
dataset["train"] = dataset["train"].select(range(8))
def tokenize_function(examples):
out = self.tokenizer(examples["prompt"], padding="max_length", max_length=16, truncation=True)
out["labels"] = out["input_ids"].copy()
return out
self.dataset = dataset.map(tokenize_function, batched=True)
self.generation_config = GenerationConfig(max_length=32)
@require_wandb
def test_basic_wandb(self):
import wandb
with tempfile.TemporaryDirectory() as tmp_dir:
training_args = TrainingArguments(
output_dir=tmp_dir,
eval_strategy="steps",
eval_steps=2, # evaluate every 2 steps
per_device_train_batch_size=2, # 8 samples in total so 4 batches of 2 per epoch
per_device_eval_batch_size=2,
report_to="wandb",
)
trainer = Trainer(
model=self.model,
args=training_args,
train_dataset=self.dataset["train"],
eval_dataset=self.dataset["test"],
processing_class=self.tokenizer,
)
completions_callback = LogCompletionsCallback(trainer, self.generation_config, num_prompts=2)
trainer.add_callback(completions_callback)
trainer.train()
# Get the current run
completions_path = wandb.run.summary.completions["path"]
json_path = os.path.join(wandb.run.dir, completions_path)
with open(json_path) as f:
completions = json.load(f)
# Check that the columns are correct
self.assertIn("step", completions["columns"])
self.assertIn("prompt", completions["columns"])
self.assertIn("completion", completions["columns"])
# Check that the prompt is in the log
self.assertIn(self.dataset["test"][0]["prompt"], completions["data"][0])
@require_comet
def test_basic_comet(self):
import comet_ml
with tempfile.TemporaryDirectory() as tmp_dir:
training_args = TrainingArguments(
output_dir=tmp_dir,
eval_strategy="steps",
eval_steps=2, # evaluate every 2 steps
per_device_train_batch_size=2, # 8 samples in total so 4 batches of 2 per epoch
per_device_eval_batch_size=2,
report_to="comet_ml",
)
trainer = Trainer(
model=self.model,
args=training_args,
train_dataset=self.dataset["train"],
eval_dataset=self.dataset["test"],
processing_class=self.tokenizer,
)
completions_callback = LogCompletionsCallback(trainer, self.generation_config, num_prompts=2)
trainer.add_callback(completions_callback)
trainer.train()
# close experiment to make sure all pending data are flushed
experiment = comet_ml.get_running_experiment()
assert experiment is not None
experiment.end()
# get experiment assets and check that all required tables was logged
steps = len(self.dataset["train"]) + len(self.dataset["test"])
tables_logged = int(steps / 2) + 1 # +1 to include zero step
api_experiment = comet_ml.APIExperiment(previous_experiment=experiment.id)
tables = api_experiment.get_asset_list("dataframe")
assert tables is not None
assert len(tables) == tables_logged
assert all(table["fileName"] == "completions.csv" for table in tables)
# On Windows, temporary directory cleanup fails when using the MergeModelCallback.
# This is not an issue with the functionality of the code itself, but it can cause the test to fail
# due to unhandled cleanup errors. Python 3.10 introduces the `ignore_cleanup_errors` argument to
# mitigate this. As a result, this test is skipped for Python versions below 3.10.
@require_mergekit
@unittest.skipIf(
sys.version_info < (3, 10),
"Test fails on Python versions lower than 3.10, but its only related to cleanup errors with temp dir.",
)
class MergeModelCallbackTester(unittest.TestCase):
def setUp(self):
self.model = AutoModelForCausalLM.from_pretrained("trl-internal-testing/tiny-Qwen2ForCausalLM-2.5")
self.tokenizer = AutoTokenizer.from_pretrained("trl-internal-testing/tiny-Qwen2ForCausalLM-2.5")
self.dataset = load_dataset("trl-internal-testing/zen", "standard_preference", split="train")
def test_callback(self):
with tempfile.TemporaryDirectory(ignore_cleanup_errors=True) as tmp_dir:
training_args = DPOConfig(
output_dir=tmp_dir,
num_train_epochs=1,
report_to="none",
save_strategy="steps",
save_steps=1,
)
config = MergeConfig()
merge_callback = MergeModelCallback(config)
trainer = DPOTrainer(
model=self.model,
args=training_args,
train_dataset=self.dataset,
tokenizer=self.tokenizer,
callbacks=[merge_callback],
)
trainer.train()
last_checkpoint = get_last_checkpoint(tmp_dir)
merged_path = os.path.join(last_checkpoint, "merged")
self.assertTrue(os.path.isdir(merged_path), "Merged folder does not exist in the last checkpoint.")
def test_every_checkpoint(self):
with tempfile.TemporaryDirectory(ignore_cleanup_errors=True) as tmp_dir:
training_args = DPOConfig(
output_dir=tmp_dir,
num_train_epochs=1,
report_to="none",
save_strategy="steps",
save_steps=1,
)
config = MergeConfig()
merge_callback = MergeModelCallback(config, merge_at_every_checkpoint=True)
trainer = DPOTrainer(
model=self.model,
args=training_args,
train_dataset=self.dataset,
tokenizer=self.tokenizer,
callbacks=[merge_callback],
)
trainer.train()
checkpoints = sorted(
[os.path.join(tmp_dir, cp) for cp in os.listdir(tmp_dir) if cp.startswith("checkpoint-")]
)
for checkpoint in checkpoints:
merged_path = os.path.join(checkpoint, "merged")
self.assertTrue(
os.path.isdir(merged_path), f"Merged folder does not exist in checkpoint {checkpoint}."
)
| trl/tests/test_callbacks.py/0 | {
"file_path": "trl/tests/test_callbacks.py",
"repo_id": "trl",
"token_count": 7904
} |
# Copyright 2025 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tempfile
import unittest
import torch
from datasets import load_dataset
from parameterized import parameterized
from transformers import AutoModelForCausalLM, AutoModelForSeq2SeqLM, AutoTokenizer
from transformers.testing_utils import require_peft
from trl import KTOConfig, KTOTrainer
from trl.trainer.kto_trainer import _get_kl_dataset, _process_tokens, _tokenize
from .testing_utils import require_no_wandb
class KTOTrainerTester(unittest.TestCase):
def setUp(self):
self.model_id = "trl-internal-testing/tiny-Qwen2ForCausalLM-2.5"
self.model = AutoModelForCausalLM.from_pretrained(self.model_id)
self.ref_model = AutoModelForCausalLM.from_pretrained(self.model_id)
self.tokenizer = AutoTokenizer.from_pretrained(self.model_id)
self.tokenizer.pad_token = self.tokenizer.eos_token
# get t5 as seq2seq example:
model_id = "trl-internal-testing/tiny-T5ForConditionalGeneration"
self.t5_model = AutoModelForSeq2SeqLM.from_pretrained(model_id)
self.t5_ref_model = AutoModelForSeq2SeqLM.from_pretrained(model_id)
self.t5_tokenizer = AutoTokenizer.from_pretrained(model_id)
@parameterized.expand(
[
("qwen", "standard_preference", "kto", True, True),
# ("t5", "standard_implicit_prompt_preference", "kto", True, False), # KTO broken for enc-dec
("qwen", "standard_unpaired_preference", "kto", False, True),
# ("t5", "conversational_preference", "kto", False, False),
("qwen", "conversational_implicit_prompt_preference", "apo_zero_unpaired", True, True),
# ("t5", "conversational_unpaired_preference", "apo_zero_unpaired", True, False),
("qwen", "standard_unpaired_preference", "apo_zero_unpaired", False, True),
# ("t5", "conversational_unpaired_preference", "apo_zero_unpaired", False, False),
]
)
def test_kto_trainer(self, name, config_name, loss_type, pre_compute, eval_dataset):
with tempfile.TemporaryDirectory() as tmp_dir:
training_args = KTOConfig(
output_dir=tmp_dir,
per_device_train_batch_size=2,
max_steps=3,
remove_unused_columns=False,
gradient_accumulation_steps=1,
learning_rate=9e-1,
eval_strategy="steps" if eval_dataset else "no",
beta=0.1,
precompute_ref_log_probs=pre_compute,
loss_type=loss_type,
report_to="none",
)
dummy_dataset = load_dataset("trl-internal-testing/zen", config_name)
if name == "qwen":
model = self.model
ref_model = self.ref_model
tokenizer = self.tokenizer
elif name == "t5":
model = self.t5_model
ref_model = self.t5_ref_model
tokenizer = self.t5_tokenizer
trainer = KTOTrainer(
model=model,
ref_model=ref_model,
args=training_args,
processing_class=tokenizer,
train_dataset=dummy_dataset["train"],
eval_dataset=dummy_dataset["test"] if eval_dataset else None,
)
previous_trainable_params = {n: param.clone() for n, param in trainer.model.named_parameters()}
trainer.train()
self.assertIsNotNone(trainer.state.log_history[-1]["train_loss"])
# Check that the parameters have changed
for n, param in previous_trainable_params.items():
new_param = trainer.model.get_parameter(n)
if param.sum() != 0: # ignore 0 biases
self.assertFalse(torch.equal(param, new_param))
def test_kto_trainer_with_ref_model_is_model(self):
with tempfile.TemporaryDirectory() as tmp_dir:
training_args = KTOConfig(
output_dir=tmp_dir,
per_device_train_batch_size=2,
max_steps=3,
report_to="none",
)
dummy_dataset = load_dataset("trl-internal-testing/zen", "standard_unpaired_preference")
with self.assertRaises(ValueError):
KTOTrainer(
model=self.model,
ref_model=self.model, # ref_model can't be the same as model
args=training_args,
processing_class=self.tokenizer,
train_dataset=dummy_dataset["train"],
)
def test_tokenize_and_process_tokens(self):
with tempfile.TemporaryDirectory() as tmp_dir:
training_args = KTOConfig(
output_dir=tmp_dir,
per_device_train_batch_size=2,
max_steps=3,
remove_unused_columns=False,
gradient_accumulation_steps=1,
learning_rate=9e-1,
eval_strategy="steps",
beta=0.1,
report_to="none",
)
dummy_dataset = load_dataset("trl-internal-testing/zen", "standard_unpaired_preference")
trainer = KTOTrainer(
model=self.model,
ref_model=self.ref_model,
args=training_args,
processing_class=self.tokenizer,
train_dataset=dummy_dataset["train"],
eval_dataset=dummy_dataset["test"],
)
train_dataset = dummy_dataset["train"]
tokenized_dataset = train_dataset.map(
_tokenize,
fn_kwargs={"tokenizer": trainer.tokenizer},
batched=True,
batch_size=2,
)
self.assertListEqual(tokenized_dataset["prompt"], train_dataset["prompt"])
self.assertListEqual(tokenized_dataset["completion"], train_dataset["completion"])
self.assertListEqual(tokenized_dataset["label"], train_dataset["label"])
self.assertListEqual(tokenized_dataset["prompt_input_ids"][0], [46518, 374, 2664, 1091])
self.assertListEqual(tokenized_dataset["prompt_attention_mask"][0], [1, 1, 1, 1])
self.assertListEqual(tokenized_dataset["answer_input_ids"][0], [27261, 13])
self.assertListEqual(tokenized_dataset["answer_attention_mask"][0], [1, 1])
# Test corruption of (prompt, completion) pairs for KL dataset
for batch_size in [2, 3]:
tokenized_kl_dataset = tokenized_dataset.map(_get_kl_dataset, batched=True, batch_size=batch_size)
# Verify that the "answer_input_ids" have been modified, meaning the new "answer_input_ids" differ
# from the original ones. However, when the length of the dataset modulo batch_size equals 1,
# the last batch remains unaltered. This is a rare scenario that does not impact the training
# process, so we exclude it from testing by iterating only up to len - 1.
for i in range(len(tokenized_kl_dataset["answer_input_ids"]) - 1):
self.assertListEqual(
tokenized_dataset["prompt_input_ids"][i],
tokenized_kl_dataset["prompt_input_ids"][i],
)
self.assertListEqual(
tokenized_dataset["prompt_attention_mask"][i],
tokenized_kl_dataset["prompt_attention_mask"][i],
)
self.assertNotEqual(
tokenized_dataset["answer_input_ids"][i],
tokenized_kl_dataset["answer_input_ids"][i],
)
fn_kwargs = {
"prefix": "",
"is_encoder_decoder": trainer.is_encoder_decoder,
"tokenizer": trainer.tokenizer,
"max_length": trainer.max_length,
"truncation_mode": trainer.truncation_mode,
"label_pad_token_id": trainer.label_pad_token_id,
"max_prompt_length": trainer.max_prompt_length,
}
processed_dataset = tokenized_dataset.map(_process_tokens, fn_kwargs=fn_kwargs, num_proc=2)
self.assertListEqual(processed_dataset["prompt"], train_dataset["prompt"])
self.assertListEqual(processed_dataset["completion"], train_dataset["completion"])
self.assertListEqual(processed_dataset["label"], train_dataset["label"])
self.assertListEqual(processed_dataset["prompt_input_ids"][0], [46518, 374, 2664, 1091])
self.assertListEqual(processed_dataset["prompt_attention_mask"][0], [1, 1, 1, 1])
self.assertListEqual(
processed_dataset["completion_input_ids"][0], [46518, 374, 2664, 1091, 27261, 13, 151645]
)
self.assertListEqual(processed_dataset["completion_attention_mask"][0], [1, 1, 1, 1, 1, 1, 1])
self.assertListEqual(
processed_dataset["completion_labels"][0], [-100, -100, -100, -100, 27261, 13, 151645]
)
def test_kto_trainer_without_providing_ref_model(self):
with tempfile.TemporaryDirectory() as tmp_dir:
training_args = KTOConfig(
output_dir=tmp_dir,
per_device_train_batch_size=2,
max_steps=3,
remove_unused_columns=False,
gradient_accumulation_steps=4,
learning_rate=9e-1,
eval_strategy="steps",
beta=0.1,
report_to="none",
)
dummy_dataset = load_dataset("trl-internal-testing/zen", "standard_unpaired_preference")
trainer = KTOTrainer(
model=self.model,
ref_model=None,
args=training_args,
processing_class=self.tokenizer,
train_dataset=dummy_dataset["train"],
eval_dataset=dummy_dataset["test"],
)
previous_trainable_params = {n: param.clone() for n, param in trainer.model.named_parameters()}
trainer.train()
self.assertIsNotNone(trainer.state.log_history[-1]["train_loss"])
# Check that the parameters have changed
for n, param in previous_trainable_params.items():
new_param = trainer.model.get_parameter(n)
if param.sum() != 0: # ignore 0 biases
self.assertFalse(torch.equal(param, new_param))
@require_peft
def test_kto_trainer_without_providing_ref_model_with_lora(self):
from peft import LoraConfig
lora_config = LoraConfig(
r=16,
lora_alpha=32,
lora_dropout=0.05,
bias="none",
task_type="CAUSAL_LM",
)
with tempfile.TemporaryDirectory() as tmp_dir:
training_args = KTOConfig(
output_dir=tmp_dir,
per_device_train_batch_size=2,
max_steps=3,
remove_unused_columns=False,
gradient_accumulation_steps=4,
learning_rate=9e-1,
eval_strategy="steps",
beta=0.1,
report_to="none",
)
dummy_dataset = load_dataset("trl-internal-testing/zen", "standard_unpaired_preference")
trainer = KTOTrainer(
model=self.model,
ref_model=None,
args=training_args,
processing_class=self.tokenizer,
train_dataset=dummy_dataset["train"],
eval_dataset=dummy_dataset["test"],
peft_config=lora_config,
)
previous_trainable_params = {n: param.clone() for n, param in trainer.model.named_parameters()}
trainer.train()
self.assertIsNotNone(trainer.state.log_history[-1]["train_loss"])
# Check that the parameters have changed
for n, param in previous_trainable_params.items():
if "lora" in n:
new_param = trainer.model.get_parameter(n)
if param.sum() != 0: # ignore 0 biases
self.assertFalse(torch.equal(param, new_param))
@require_no_wandb
def test_kto_trainer_generate_during_eval_no_wandb(self):
with tempfile.TemporaryDirectory() as tmp_dir:
training_args = KTOConfig(
output_dir=tmp_dir,
per_device_train_batch_size=2,
max_steps=3,
remove_unused_columns=False,
gradient_accumulation_steps=1,
learning_rate=9e-1,
eval_strategy="steps",
beta=0.1,
generate_during_eval=True,
report_to="none",
)
dummy_dataset = load_dataset("trl-internal-testing/zen", "standard_unpaired_preference")
with self.assertRaisesRegex(
ValueError,
expected_regex="`generate_during_eval=True` requires Weights and Biases or Comet to be installed."
" Please install `wandb` or `comet-ml` to resolve.",
):
KTOTrainer(
model=self.model,
ref_model=None,
args=training_args,
processing_class=self.tokenizer,
train_dataset=dummy_dataset["train"],
eval_dataset=dummy_dataset["test"],
)
@require_peft
def test_kto_lora_save(self):
from peft import LoraConfig, get_peft_model
lora_config = LoraConfig(
r=16,
lora_alpha=32,
lora_dropout=0.05,
bias="none",
task_type="CAUSAL_LM",
)
# lora model
model = AutoModelForCausalLM.from_pretrained(self.model_id)
model_peft = get_peft_model(model, lora_config)
with tempfile.TemporaryDirectory() as tmp_dir:
training_args = KTOConfig(
output_dir=tmp_dir,
per_device_train_batch_size=2,
max_steps=3,
remove_unused_columns=False,
gradient_accumulation_steps=4,
learning_rate=9e-1,
eval_strategy="steps",
beta=0.1,
report_to="none",
)
dummy_dataset = load_dataset("trl-internal-testing/zen", "standard_unpaired_preference")
# kto train lora model with a lora config
trainer = KTOTrainer(
model=model_peft,
ref_model=None,
args=training_args,
processing_class=self.tokenizer,
train_dataset=dummy_dataset["train"],
eval_dataset=dummy_dataset["test"],
peft_config=lora_config,
)
# train the model
trainer.train()
# save peft adapter
trainer.save_model()
# assert that the model is loaded without giving OSError
try:
AutoModelForCausalLM.from_pretrained(tmp_dir)
except OSError:
self.fail("Loading the saved peft adapter failed")
| trl/tests/test_kto_trainer.py/0 | {
"file_path": "trl/tests/test_kto_trainer.py",
"repo_id": "trl",
"token_count": 8212
} |
# Copyright 2025 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import contextlib
import os
import random
import warnings
from dataclasses import dataclass
from typing import Any, Callable, Optional, Union
import numpy as np
import torch
import torch.utils.checkpoint as checkpoint
from diffusers import DDIMScheduler, StableDiffusionPipeline, UNet2DConditionModel
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import rescale_noise_cfg
from transformers.utils import is_peft_available
from ..core import randn_tensor
from .sd_utils import convert_state_dict_to_diffusers
if is_peft_available():
from peft import LoraConfig
from peft.utils import get_peft_model_state_dict
@dataclass
class DDPOPipelineOutput:
"""
Output class for the diffusers pipeline to be finetuned with the DDPO trainer
Args:
images (`torch.Tensor`):
The generated images.
latents (`list[torch.Tensor]`):
The latents used to generate the images.
log_probs (`list[torch.Tensor]`):
The log probabilities of the latents.
"""
images: torch.Tensor
latents: torch.Tensor
log_probs: torch.Tensor
@dataclass
class DDPOSchedulerOutput:
"""
Output class for the diffusers scheduler to be finetuned with the DDPO trainer
Args:
latents (`torch.Tensor`):
Predicted sample at the previous timestep. Shape: `(batch_size, num_channels, height, width)`
log_probs (`torch.Tensor`):
Log probability of the above mentioned sample. Shape: `(batch_size)`
"""
latents: torch.Tensor
log_probs: torch.Tensor
class DDPOStableDiffusionPipeline:
"""
Main class for the diffusers pipeline to be finetuned with the DDPO trainer
"""
def __call__(self, *args, **kwargs) -> DDPOPipelineOutput:
raise NotImplementedError
def scheduler_step(self, *args, **kwargs) -> DDPOSchedulerOutput:
raise NotImplementedError
@property
def unet(self):
"""
Returns the 2d U-Net model used for diffusion.
"""
raise NotImplementedError
@property
def vae(self):
"""
Returns the Variational Autoencoder model used from mapping images to and from the latent space
"""
raise NotImplementedError
@property
def tokenizer(self):
"""
Returns the tokenizer used for tokenizing text inputs
"""
raise NotImplementedError
@property
def scheduler(self):
"""
Returns the scheduler associated with the pipeline used for the diffusion process
"""
raise NotImplementedError
@property
def text_encoder(self):
"""
Returns the text encoder used for encoding text inputs
"""
raise NotImplementedError
@property
def autocast(self):
"""
Returns the autocast context manager
"""
raise NotImplementedError
def set_progress_bar_config(self, *args, **kwargs):
"""
Sets the progress bar config for the pipeline
"""
raise NotImplementedError
def save_pretrained(self, *args, **kwargs):
"""
Saves all of the model weights
"""
raise NotImplementedError
def get_trainable_layers(self, *args, **kwargs):
"""
Returns the trainable parameters of the pipeline
"""
raise NotImplementedError
def save_checkpoint(self, *args, **kwargs):
"""
Light wrapper around accelerate's register_save_state_pre_hook which is run before saving state
"""
raise NotImplementedError
def load_checkpoint(self, *args, **kwargs):
"""
Light wrapper around accelerate's register_lad_state_pre_hook which is run before loading state
"""
raise NotImplementedError
def _left_broadcast(input_tensor, shape):
"""
As opposed to the default direction of broadcasting (right to left), this function broadcasts
from left to right
Args:
input_tensor (`torch.FloatTensor`): is the tensor to broadcast
shape (`tuple[int]`): is the shape to broadcast to
"""
input_ndim = input_tensor.ndim
if input_ndim > len(shape):
raise ValueError(
"The number of dimensions of the tensor to broadcast cannot be greater than the length of the shape to broadcast to"
)
return input_tensor.reshape(input_tensor.shape + (1,) * (len(shape) - input_ndim)).broadcast_to(shape)
def _get_variance(self, timestep, prev_timestep):
alpha_prod_t = torch.gather(self.alphas_cumprod, 0, timestep.cpu()).to(timestep.device)
alpha_prod_t_prev = torch.where(
prev_timestep.cpu() >= 0,
self.alphas_cumprod.gather(0, prev_timestep.cpu()),
self.final_alpha_cumprod,
).to(timestep.device)
beta_prod_t = 1 - alpha_prod_t
beta_prod_t_prev = 1 - alpha_prod_t_prev
variance = (beta_prod_t_prev / beta_prod_t) * (1 - alpha_prod_t / alpha_prod_t_prev)
return variance
def scheduler_step(
self,
model_output: torch.FloatTensor,
timestep: int,
sample: torch.FloatTensor,
eta: float = 0.0,
use_clipped_model_output: bool = False,
generator=None,
prev_sample: Optional[torch.FloatTensor] = None,
) -> DDPOSchedulerOutput:
"""
Predict the sample at the previous timestep by reversing the SDE. Core function to propagate the diffusion
process from the learned model outputs (most often the predicted noise).
Args:
model_output (`torch.FloatTensor`): direct output from learned diffusion model.
timestep (`int`): current discrete timestep in the diffusion chain.
sample (`torch.FloatTensor`):
current instance of sample being created by diffusion process.
eta (`float`): weight of noise for added noise in diffusion step.
use_clipped_model_output (`bool`): if `True`, compute "corrected" `model_output` from the clipped
predicted original sample. Necessary because predicted original sample is clipped to [-1, 1] when
`self.config.clip_sample` is `True`. If no clipping has happened, "corrected" `model_output` would
coincide with the one provided as input and `use_clipped_model_output` will have not effect.
generator: random number generator.
variance_noise (`torch.FloatTensor`): instead of generating noise for the variance using `generator`, we
can directly provide the noise for the variance itself. This is useful for methods such as
CycleDiffusion. (https://huggingface.co/papers/2210.05559)
Returns:
`DDPOSchedulerOutput`: the predicted sample at the previous timestep and the log probability of the sample
"""
if self.num_inference_steps is None:
raise ValueError(
"Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler"
)
# See formulas (12) and (16) of DDIM paper https://huggingface.co/papers/2010.02502
# Ideally, read DDIM paper in-detail understanding
# Notation (<variable name> -> <name in paper>
# - pred_noise_t -> e_theta(x_t, t)
# - pred_original_sample -> f_theta(x_t, t) or x_0
# - std_dev_t -> sigma_t
# - eta -> η
# - pred_sample_direction -> "direction pointing to x_t"
# - pred_prev_sample -> "x_t-1"
# 1. get previous step value (=t-1)
prev_timestep = timestep - self.config.num_train_timesteps // self.num_inference_steps
# to prevent OOB on gather
prev_timestep = torch.clamp(prev_timestep, 0, self.config.num_train_timesteps - 1)
# 2. compute alphas, betas
alpha_prod_t = self.alphas_cumprod.gather(0, timestep.cpu())
alpha_prod_t_prev = torch.where(
prev_timestep.cpu() >= 0,
self.alphas_cumprod.gather(0, prev_timestep.cpu()),
self.final_alpha_cumprod,
)
alpha_prod_t = _left_broadcast(alpha_prod_t, sample.shape).to(sample.device)
alpha_prod_t_prev = _left_broadcast(alpha_prod_t_prev, sample.shape).to(sample.device)
beta_prod_t = 1 - alpha_prod_t
# 3. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://huggingface.co/papers/2010.02502
if self.config.prediction_type == "epsilon":
pred_original_sample = (sample - beta_prod_t ** (0.5) * model_output) / alpha_prod_t ** (0.5)
pred_epsilon = model_output
elif self.config.prediction_type == "sample":
pred_original_sample = model_output
pred_epsilon = (sample - alpha_prod_t ** (0.5) * pred_original_sample) / beta_prod_t ** (0.5)
elif self.config.prediction_type == "v_prediction":
pred_original_sample = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
pred_epsilon = (alpha_prod_t**0.5) * model_output + (beta_prod_t**0.5) * sample
else:
raise ValueError(
f"prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample`, or"
" `v_prediction`"
)
# 4. Clip or threshold "predicted x_0"
if self.config.thresholding:
pred_original_sample = self._threshold_sample(pred_original_sample)
elif self.config.clip_sample:
pred_original_sample = pred_original_sample.clamp(
-self.config.clip_sample_range, self.config.clip_sample_range
)
# 5. compute variance: "sigma_t(η)" -> see formula (16)
# σ_t = sqrt((1 − α_t−1)/(1 − α_t)) * sqrt(1 − α_t/α_t−1)
variance = _get_variance(self, timestep, prev_timestep)
std_dev_t = eta * variance ** (0.5)
std_dev_t = _left_broadcast(std_dev_t, sample.shape).to(sample.device)
if use_clipped_model_output:
# the pred_epsilon is always re-derived from the clipped x_0 in Glide
pred_epsilon = (sample - alpha_prod_t ** (0.5) * pred_original_sample) / beta_prod_t ** (0.5)
# 6. compute "direction pointing to x_t" of formula (12) from https://huggingface.co/papers/2010.02502
pred_sample_direction = (1 - alpha_prod_t_prev - std_dev_t**2) ** (0.5) * pred_epsilon
# 7. compute x_t without "random noise" of formula (12) from https://huggingface.co/papers/2010.02502
prev_sample_mean = alpha_prod_t_prev ** (0.5) * pred_original_sample + pred_sample_direction
if prev_sample is not None and generator is not None:
raise ValueError(
"Cannot pass both generator and prev_sample. Please make sure that either `generator` or"
" `prev_sample` stays `None`."
)
if prev_sample is None:
variance_noise = randn_tensor(
model_output.shape,
generator=generator,
device=model_output.device,
dtype=model_output.dtype,
)
prev_sample = prev_sample_mean + std_dev_t * variance_noise
# log prob of prev_sample given prev_sample_mean and std_dev_t
log_prob = (
-((prev_sample.detach() - prev_sample_mean) ** 2) / (2 * (std_dev_t**2))
- torch.log(std_dev_t)
- torch.log(torch.sqrt(2 * torch.as_tensor(np.pi)))
)
# mean along all but batch dimension
log_prob = log_prob.mean(dim=tuple(range(1, log_prob.ndim)))
return DDPOSchedulerOutput(prev_sample.type(sample.dtype), log_prob)
# 1. The output type for call is different as the logprobs are now returned
# 2. An extra method called `scheduler_step` is added which is used to constraint the scheduler output
@torch.no_grad()
def pipeline_step(
self,
prompt: Optional[Union[str, list[str]]] = None,
height: Optional[int] = None,
width: Optional[int] = None,
num_inference_steps: int = 50,
guidance_scale: float = 7.5,
negative_prompt: Optional[Union[str, list[str]]] = None,
num_images_per_prompt: Optional[int] = 1,
eta: float = 0.0,
generator: Optional[Union[torch.Generator, list[torch.Generator]]] = None,
latents: Optional[torch.FloatTensor] = None,
prompt_embeds: Optional[torch.FloatTensor] = None,
negative_prompt_embeds: Optional[torch.FloatTensor] = None,
output_type: Optional[str] = "pil",
return_dict: bool = True,
callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
callback_steps: int = 1,
cross_attention_kwargs: Optional[dict[str, Any]] = None,
guidance_rescale: float = 0.0,
):
r"""
Function invoked when calling the pipeline for generation. Args: prompt (`str` or `list[str]`, *optional*): The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`. instead. height (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor): The height in pixels of the generated image.
width (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor):
The width in pixels of the generated image.
num_inference_steps (`int`, *optional*, defaults to 50):
The number of denoising steps. More denoising steps usually lead to a higher quality image at the
expense of slower inference.
guidance_scale (`float`, *optional*, defaults to 7.5):
Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://huggingface.co/papers/2207.12598).
`guidance_scale` is defined as `w` of equation 2. of [Imagen
Paper](https://huggingface.co/papers/2205.11487). Guidance scale is enabled by setting `guidance_scale >
1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
usually at the expense of lower image quality.
negative_prompt (`str` or `list[str]`, *optional*):
The prompt or prompts not to guide the image generation. If not defined, one has to pass
`negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is
less than `1`).
num_images_per_prompt (`int`, *optional*, defaults to 1):
The number of images to generate per prompt.
eta (`float`, *optional*, defaults to 0.0):
Corresponds to parameter eta (η) in the DDIM paper: https://huggingface.co/papers/2010.02502. Only applies to
[`schedulers.DDIMScheduler`], will be ignored for others.
generator (`torch.Generator` or `list[torch.Generator]`, *optional*):
One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html)
to make generation deterministic.
latents (`torch.FloatTensor`, *optional*):
Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
tensor will ge generated by sampling using the supplied random `generator`.
prompt_embeds (`torch.FloatTensor`, *optional*):
Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
provided, text embeddings will be generated from `prompt` input argument.
negative_prompt_embeds (`torch.FloatTensor`, *optional*):
Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
argument.
output_type (`str`, *optional*, defaults to `"pil"`):
The output format of the generate image. Choose between
[PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
return_dict (`bool`, *optional*, defaults to `True`):
Whether to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
plain tuple.
callback (`Callable`, *optional*):
A function that will be called every `callback_steps` steps during inference. The function will be
called with the following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`.
callback_steps (`int`, *optional*, defaults to 1):
The frequency at which the `callback` function will be called. If not specified, the callback will be
called at every step.
cross_attention_kwargs (`dict`, *optional*):
A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under
`self.processor` in
[diffusers.cross_attention](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/cross_attention.py).
guidance_rescale (`float`, *optional*, defaults to 0.7):
Guidance rescale factor proposed by [Common Diffusion Noise Schedules and Sample Steps are
Flawed](https://huggingface.co/papers/2305.08891) `guidance_scale` is defined as `φ` in equation 16. of
[Common Diffusion Noise Schedules and Sample Steps are Flawed](https://huggingface.co/papers/2305.08891).
Guidance rescale factor should fix overexposure when using zero terminal SNR.
Examples:
Returns:
`DDPOPipelineOutput`: The generated image, the predicted latents used to generate the image and the associated log probabilities
"""
# 0. Default height and width to unet
height = height or self.unet.config.sample_size * self.vae_scale_factor
width = width or self.unet.config.sample_size * self.vae_scale_factor
# 1. Check inputs. Raise error if not correct
self.check_inputs(
prompt,
height,
width,
callback_steps,
negative_prompt,
prompt_embeds,
negative_prompt_embeds,
)
# 2. Define call parameters
if prompt is not None and isinstance(prompt, str):
batch_size = 1
elif prompt is not None and isinstance(prompt, list):
batch_size = len(prompt)
else:
batch_size = prompt_embeds.shape[0]
device = self._execution_device
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://huggingface.co/papers/2205.11487 . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
do_classifier_free_guidance = guidance_scale > 1.0
# 3. Encode input prompt
text_encoder_lora_scale = cross_attention_kwargs.get("scale", None) if cross_attention_kwargs is not None else None
prompt_embeds = self._encode_prompt(
prompt,
device,
num_images_per_prompt,
do_classifier_free_guidance,
negative_prompt,
prompt_embeds=prompt_embeds,
negative_prompt_embeds=negative_prompt_embeds,
lora_scale=text_encoder_lora_scale,
)
# 4. Prepare timesteps
self.scheduler.set_timesteps(num_inference_steps, device=device)
timesteps = self.scheduler.timesteps
# 5. Prepare latent variables
num_channels_latents = self.unet.config.in_channels
latents = self.prepare_latents(
batch_size * num_images_per_prompt,
num_channels_latents,
height,
width,
prompt_embeds.dtype,
device,
generator,
latents,
)
# 6. Denoising loop
num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order
all_latents = [latents]
all_log_probs = []
with self.progress_bar(total=num_inference_steps) as progress_bar:
for i, t in enumerate(timesteps):
# expand the latents if we are doing classifier free guidance
latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
# predict the noise residual
noise_pred = self.unet(
latent_model_input,
t,
encoder_hidden_states=prompt_embeds,
cross_attention_kwargs=cross_attention_kwargs,
return_dict=False,
)[0]
# perform guidance
if do_classifier_free_guidance:
noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
if do_classifier_free_guidance and guidance_rescale > 0.0:
# Based on 3.4. in https://huggingface.co/papers/2305.08891
noise_pred = rescale_noise_cfg(noise_pred, noise_pred_text, guidance_rescale=guidance_rescale)
# compute the previous noisy sample x_t -> x_t-1
scheduler_output = scheduler_step(self.scheduler, noise_pred, t, latents, eta)
latents = scheduler_output.latents
log_prob = scheduler_output.log_probs
all_latents.append(latents)
all_log_probs.append(log_prob)
# call the callback, if provided
if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):
progress_bar.update()
if callback is not None and i % callback_steps == 0:
callback(i, t, latents)
if not output_type == "latent":
image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0]
image, has_nsfw_concept = self.run_safety_checker(image, device, prompt_embeds.dtype)
else:
image = latents
has_nsfw_concept = None
if has_nsfw_concept is None:
do_denormalize = [True] * image.shape[0]
else:
do_denormalize = [not has_nsfw for has_nsfw in has_nsfw_concept]
image = self.image_processor.postprocess(image, output_type=output_type, do_denormalize=do_denormalize)
# Offload last model to CPU
if hasattr(self, "final_offload_hook") and self.final_offload_hook is not None:
self.final_offload_hook.offload()
return DDPOPipelineOutput(image, all_latents, all_log_probs)
def pipeline_step_with_grad(
pipeline,
prompt: Optional[Union[str, list[str]]] = None,
height: Optional[int] = None,
width: Optional[int] = None,
num_inference_steps: int = 50,
guidance_scale: float = 7.5,
truncated_backprop: bool = True,
truncated_backprop_rand: bool = True,
gradient_checkpoint: bool = True,
truncated_backprop_timestep: int = 49,
truncated_rand_backprop_minmax: tuple = (0, 50),
negative_prompt: Optional[Union[str, list[str]]] = None,
num_images_per_prompt: Optional[int] = 1,
eta: float = 0.0,
generator: Optional[Union[torch.Generator, list[torch.Generator]]] = None,
latents: Optional[torch.FloatTensor] = None,
prompt_embeds: Optional[torch.FloatTensor] = None,
negative_prompt_embeds: Optional[torch.FloatTensor] = None,
output_type: Optional[str] = "pil",
return_dict: bool = True,
callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
callback_steps: int = 1,
cross_attention_kwargs: Optional[dict[str, Any]] = None,
guidance_rescale: float = 0.0,
):
r"""
Function to get RGB image with gradients attached to the model weights.
Args:
prompt (`str` or `list[str]`, *optional*, defaults to `None`):
The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds` instead.
height (`int`, *optional*, defaults to `pipeline.unet.config.sample_size * pipeline.vae_scale_factor`):
The height in pixels of the generated image.
width (`int`, *optional*, defaults to `pipeline.unet.config.sample_size * pipeline.vae_scale_factor`):
The width in pixels of the generated image.
num_inference_steps (`int`, *optional*, defaults to `50`):
The number of denoising steps. More denoising steps usually lead to a higher quality image at the
expense of slower inference.
guidance_scale (`float`, *optional*, defaults to `7.5`):
Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://huggingface.co/papers/2207.12598).
`guidance_scale` is defined as `w` of equation 2. of [Imagen
Paper](https://huggingface.co/papers/2205.11487). Guidance scale is enabled by setting `guidance_scale >
1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
usually at the expense of lower image quality.
truncated_backprop (`bool`, *optional*, defaults to True):
Truncated Backpropation to fixed timesteps, helps prevent collapse during diffusion reward training as shown in AlignProp (https://huggingface.co/papers/2310.03739).
truncated_backprop_rand (`bool`, *optional*, defaults to True):
Truncated Randomized Backpropation randomizes truncation to different diffusion timesteps, this helps prevent collapse during diffusion reward training as shown in AlignProp (https://huggingface.co/papers/2310.03739).
Enabling truncated_backprop_rand allows adapting earlier timesteps in diffusion while not resulting in a collapse.
gradient_checkpoint (`bool`, *optional*, defaults to True):
Adds gradient checkpointing to Unet forward pass. Reduces GPU memory consumption while slightly increasing the training time.
truncated_backprop_timestep (`int`, *optional*, defaults to 49):
Absolute timestep to which the gradients are being backpropagated. Higher number reduces the memory usage and reduces the chances of collapse.
While a lower value, allows more semantic changes in the diffusion generations, as the earlier diffusion timesteps are getting updated.
However it also increases the chances of collapse.
truncated_rand_backprop_minmax (`Tuple`, *optional*, defaults to (0,50)):
Range for randomized backprop. Here the value at 0 index indicates the earlier diffusion timestep to update (closer to noise), while the value
at index 1 indicates the later diffusion timestep to update.
negative_prompt (`str` or `list[str]`, *optional*):
The prompt or prompts not to guide the image generation. If not defined, one has to pass
`negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is
less than `1`).
num_images_per_prompt (`int`, *optional*, defaults to 1):
The number of images to generate per prompt.
eta (`float`, *optional*, defaults to 0.0):
Corresponds to parameter eta (η) in the DDIM paper: https://huggingface.co/papers/2010.02502. Only applies to
[`schedulers.DDIMScheduler`], will be ignored for others.
generator (`torch.Generator` or `list[torch.Generator]`, *optional*):
One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html)
to make generation deterministic.
latents (`torch.FloatTensor`, *optional*):
Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
tensor will ge generated by sampling using the supplied random `generator`.
prompt_embeds (`torch.FloatTensor`, *optional*):
Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
provided, text embeddings will be generated from `prompt` input argument.
negative_prompt_embeds (`torch.FloatTensor`, *optional*):
Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
argument.
output_type (`str`, *optional*, defaults to `"pil"`):
The output format of the generate image. Choose between
[PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
return_dict (`bool`, *optional*, defaults to `True`):
Whether to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
plain tuple.
callback (`Callable`, *optional*):
A function that will be called every `callback_steps` steps during inference. The function will be
called with the following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`.
callback_steps (`int`, *optional*, defaults to 1):
The frequency at which the `callback` function will be called. If not specified, the callback will be
called at every step.
cross_attention_kwargs (`dict`, *optional*):
A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under
`pipeline.processor` in
[diffusers.cross_attention](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/cross_attention.py).
guidance_rescale (`float`, *optional*, defaults to 0.7):
Guidance rescale factor proposed by [Common Diffusion Noise Schedules and Sample Steps are
Flawed](https://huggingface.co/papers/2305.08891) `guidance_scale` is defined as `φ` in equation 16. of
[Common Diffusion Noise Schedules and Sample Steps are Flawed](https://huggingface.co/papers/2305.08891).
Guidance rescale factor should fix overexposure when using zero terminal SNR.
Examples:
Returns:
`DDPOPipelineOutput`: The generated image, the predicted latents used to generate the image and the associated log probabilities
"""
# 0. Default height and width to unet
height = height or pipeline.unet.config.sample_size * pipeline.vae_scale_factor
width = width or pipeline.unet.config.sample_size * pipeline.vae_scale_factor
with torch.no_grad():
# 1. Check inputs. Raise error if not correct
pipeline.check_inputs(
prompt,
height,
width,
callback_steps,
negative_prompt,
prompt_embeds,
negative_prompt_embeds,
)
# 2. Define call parameters
if prompt is not None and isinstance(prompt, str):
batch_size = 1
elif prompt is not None and isinstance(prompt, list):
batch_size = len(prompt)
else:
batch_size = prompt_embeds.shape[0]
device = pipeline._execution_device
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://huggingface.co/papers/2205.11487 . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
do_classifier_free_guidance = guidance_scale > 1.0
# 3. Encode input prompt
text_encoder_lora_scale = (
cross_attention_kwargs.get("scale", None) if cross_attention_kwargs is not None else None
)
prompt_embeds = pipeline._encode_prompt(
prompt,
device,
num_images_per_prompt,
do_classifier_free_guidance,
negative_prompt,
prompt_embeds=prompt_embeds,
negative_prompt_embeds=negative_prompt_embeds,
lora_scale=text_encoder_lora_scale,
)
# 4. Prepare timesteps
pipeline.scheduler.set_timesteps(num_inference_steps, device=device)
timesteps = pipeline.scheduler.timesteps
# 5. Prepare latent variables
num_channels_latents = pipeline.unet.config.in_channels
latents = pipeline.prepare_latents(
batch_size * num_images_per_prompt,
num_channels_latents,
height,
width,
prompt_embeds.dtype,
device,
generator,
latents,
)
# 6. Denoising loop
num_warmup_steps = len(timesteps) - num_inference_steps * pipeline.scheduler.order
all_latents = [latents]
all_log_probs = []
with pipeline.progress_bar(total=num_inference_steps) as progress_bar:
for i, t in enumerate(timesteps):
# expand the latents if we are doing classifier free guidance
latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
latent_model_input = pipeline.scheduler.scale_model_input(latent_model_input, t)
# predict the noise residual
if gradient_checkpoint:
noise_pred = checkpoint.checkpoint(
pipeline.unet,
latent_model_input,
t,
prompt_embeds,
cross_attention_kwargs=cross_attention_kwargs,
use_reentrant=False,
)[0]
else:
noise_pred = pipeline.unet(
latent_model_input,
t,
encoder_hidden_states=prompt_embeds,
cross_attention_kwargs=cross_attention_kwargs,
return_dict=False,
)[0]
# truncating backpropagation is critical for preventing overoptimization (https://huggingface.co/papers/2304.05977).
if truncated_backprop:
# Randomized truncation randomizes the truncation process (https://huggingface.co/papers/2310.03739)
# the range of truncation is defined by truncated_rand_backprop_minmax
# Setting truncated_rand_backprop_minmax[0] to be low will allow the model to update earlier timesteps in the diffusion chain, while setitng it high will reduce the memory usage.
if truncated_backprop_rand:
rand_timestep = random.randint(
truncated_rand_backprop_minmax[0], truncated_rand_backprop_minmax[1]
)
if i < rand_timestep:
noise_pred = noise_pred.detach()
else:
# fixed truncation process
if i < truncated_backprop_timestep:
noise_pred = noise_pred.detach()
# perform guidance
if do_classifier_free_guidance:
noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
if do_classifier_free_guidance and guidance_rescale > 0.0:
# Based on 3.4. in https://huggingface.co/papers/2305.08891
noise_pred = rescale_noise_cfg(noise_pred, noise_pred_text, guidance_rescale=guidance_rescale)
# compute the previous noisy sample x_t -> x_t-1
scheduler_output = scheduler_step(pipeline.scheduler, noise_pred, t, latents, eta)
latents = scheduler_output.latents
log_prob = scheduler_output.log_probs
all_latents.append(latents)
all_log_probs.append(log_prob)
# call the callback, if provided
if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % pipeline.scheduler.order == 0):
progress_bar.update()
if callback is not None and i % callback_steps == 0:
callback(i, t, latents)
if not output_type == "latent":
image = pipeline.vae.decode(latents / pipeline.vae.config.scaling_factor, return_dict=False)[0]
image, has_nsfw_concept = pipeline.run_safety_checker(image, device, prompt_embeds.dtype)
else:
image = latents
has_nsfw_concept = None
if has_nsfw_concept is None:
do_denormalize = [True] * image.shape[0]
else:
do_denormalize = [not has_nsfw for has_nsfw in has_nsfw_concept]
image = pipeline.image_processor.postprocess(image, output_type=output_type, do_denormalize=do_denormalize)
# Offload last model to CPU
if hasattr(pipeline, "final_offload_hook") and pipeline.final_offload_hook is not None:
pipeline.final_offload_hook.offload()
return DDPOPipelineOutput(image, all_latents, all_log_probs)
class DefaultDDPOStableDiffusionPipeline(DDPOStableDiffusionPipeline):
def __init__(self, pretrained_model_name: str, *, pretrained_model_revision: str = "main", use_lora: bool = True):
self.sd_pipeline = StableDiffusionPipeline.from_pretrained(
pretrained_model_name, revision=pretrained_model_revision
)
self.use_lora = use_lora
self.pretrained_model = pretrained_model_name
self.pretrained_revision = pretrained_model_revision
try:
self.sd_pipeline.load_lora_weights(
pretrained_model_name,
weight_name="pytorch_lora_weights.safetensors",
revision=pretrained_model_revision,
)
self.use_lora = True
except OSError:
if use_lora:
warnings.warn(
"Trying to load LoRA weights but no LoRA weights found. Set `use_lora=False` or check that "
"`pytorch_lora_weights.safetensors` exists in the model folder.",
UserWarning,
)
self.sd_pipeline.scheduler = DDIMScheduler.from_config(self.sd_pipeline.scheduler.config)
self.sd_pipeline.safety_checker = None
# memory optimization
self.sd_pipeline.vae.requires_grad_(False)
self.sd_pipeline.text_encoder.requires_grad_(False)
self.sd_pipeline.unet.requires_grad_(not self.use_lora)
def __call__(self, *args, **kwargs) -> DDPOPipelineOutput:
return pipeline_step(self.sd_pipeline, *args, **kwargs)
def rgb_with_grad(self, *args, **kwargs) -> DDPOPipelineOutput:
return pipeline_step_with_grad(self.sd_pipeline, *args, **kwargs)
def scheduler_step(self, *args, **kwargs) -> DDPOSchedulerOutput:
return scheduler_step(self.sd_pipeline.scheduler, *args, **kwargs)
@property
def unet(self):
return self.sd_pipeline.unet
@property
def vae(self):
return self.sd_pipeline.vae
@property
def tokenizer(self):
return self.sd_pipeline.tokenizer
@property
def scheduler(self):
return self.sd_pipeline.scheduler
@property
def text_encoder(self):
return self.sd_pipeline.text_encoder
@property
def autocast(self):
return contextlib.nullcontext if self.use_lora else None
def save_pretrained(self, output_dir):
if self.use_lora:
state_dict = convert_state_dict_to_diffusers(get_peft_model_state_dict(self.sd_pipeline.unet))
self.sd_pipeline.save_lora_weights(save_directory=output_dir, unet_lora_layers=state_dict)
self.sd_pipeline.save_pretrained(output_dir)
def set_progress_bar_config(self, *args, **kwargs):
self.sd_pipeline.set_progress_bar_config(*args, **kwargs)
def get_trainable_layers(self):
if self.use_lora:
lora_config = LoraConfig(
r=4,
lora_alpha=4,
init_lora_weights="gaussian",
target_modules=["to_k", "to_q", "to_v", "to_out.0"],
)
self.sd_pipeline.unet.add_adapter(lora_config)
# To avoid accelerate unscaling problems in FP16.
for param in self.sd_pipeline.unet.parameters():
# only upcast trainable parameters (LoRA) into fp32
if param.requires_grad:
param.data = param.to(torch.float32)
return self.sd_pipeline.unet
else:
return self.sd_pipeline.unet
def save_checkpoint(self, models, weights, output_dir):
if len(models) != 1:
raise ValueError("Given how the trainable params were set, this should be of length 1")
if self.use_lora and hasattr(models[0], "peft_config") and getattr(models[0], "peft_config", None) is not None:
state_dict = convert_state_dict_to_diffusers(get_peft_model_state_dict(models[0]))
self.sd_pipeline.save_lora_weights(save_directory=output_dir, unet_lora_layers=state_dict)
elif not self.use_lora and isinstance(models[0], UNet2DConditionModel):
models[0].save_pretrained(os.path.join(output_dir, "unet"))
else:
raise ValueError(f"Unknown model type {type(models[0])}")
def load_checkpoint(self, models, input_dir):
if len(models) != 1:
raise ValueError("Given how the trainable params were set, this should be of length 1")
if self.use_lora:
lora_state_dict, network_alphas = self.sd_pipeline.lora_state_dict(
input_dir, weight_name="pytorch_lora_weights.safetensors"
)
self.sd_pipeline.load_lora_into_unet(lora_state_dict, network_alphas=network_alphas, unet=models[0])
elif not self.use_lora and isinstance(models[0], UNet2DConditionModel):
load_model = UNet2DConditionModel.from_pretrained(input_dir, subfolder="unet")
models[0].register_to_config(**load_model.config)
models[0].load_state_dict(load_model.state_dict())
del load_model
else:
raise ValueError(f"Unknown model type {type(models[0])}")
| trl/trl/models/modeling_sd_base.py/0 | {
"file_path": "trl/trl/models/modeling_sd_base.py",
"repo_id": "trl",
"token_count": 17406
} |
# Copyright 2025 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from dataclasses import dataclass, field
from typing import Any, Optional
from transformers import TrainingArguments
@dataclass
class BCOConfig(TrainingArguments):
r"""
Configuration class for the [`BCOTrainer`].
Using [`~transformers.HfArgumentParser`] we can turn this class into
[argparse](https://docs.python.org/3/library/argparse#module-argparse) arguments that can be specified on the
command line.
Parameters:
max_length (`int` or `None`, *optional*, defaults to `1024`):
Maximum length of the sequences (prompt + completion) in the batch. This argument is required if you want
to use the default data collator.
max_prompt_length (`int` or `None`, *optional*, defaults to `512`):
Maximum length of the prompt. This argument is required if you want to use the default data collator.
max_completion_length (`int` or `None`, *optional*, defaults to `None`):
Maximum length of the completion. This argument is required if you want to use the default data collator
and your model is an encoder-decoder.
beta (`float`, *optional*, defaults to `0.1`):
Parameter controlling the deviation from the reference model. Higher β means less deviation from the
reference model.
label_pad_token_id (`int`, *optional*, defaults to `-100`):
Label pad token id. This argument is required if you want to use the default data collator.
padding_value (`int` or `None`, *optional*, defaults to `None`):
Padding value to use. If `None`, the padding value of the tokenizer is used.
truncation_mode (`str`, *optional*, defaults to `"keep_end"`):
Truncation mode to use when the prompt is too long. Possible values are `"keep_end"` or `"keep_start"`.
This argument is required if you want to use the default data collator.
disable_dropout (`bool`, *optional*, defaults to `True`):
Whether to disable dropout in the model and reference model.
generate_during_eval (`bool`, *optional*, defaults to `False`):
If `True`, generates and logs completions from both the model and the reference model to W&B or Comet during
evaluation.
is_encoder_decoder (`bool` or `None`, *optional*, defaults to `None`):
When using the `model_init` argument (callable) to instantiate the model instead of the `model` argument,
you need to specify if the model returned by the callable is an encoder-decoder model.
precompute_ref_log_probs (`bool`, *optional*, defaults to `False`):
Whether to precompute reference model log probabilities for training and evaluation datasets. This is
useful when training without the reference model to reduce the total GPU memory needed.
model_init_kwargs (`dict[str, Any]` or `None`, *optional*, defaults to `None`):
Keyword arguments to pass to `AutoModelForCausalLM.from_pretrained` when instantiating the model from a
string.
ref_model_init_kwargs (`dict[str, Any]` or `None`, *optional*, defaults to `None`):
Keyword arguments to pass to `AutoModelForCausalLM.from_pretrained` when instantiating the reference model
from a string.
dataset_num_proc (`int` or `None`, *optional*, defaults to `None`):
Number of processes to use for processing the dataset.
prompt_sample_size (`int`, *optional*, defaults to `1024`):
Number of prompts that are fed to density ratio classifier.
min_density_ratio (`float`, *optional*, defaults to `0.5`):
Minimum value of the density ratio. The estimated density ratio is clamped to this value.
max_density_ratio (`float`, *optional*, defaults to `10.0`):
Maximum value of the density ratio. The estimated density ratio is clamped to this value.
"""
max_length: Optional[int] = field(
default=1024,
metadata={
"help": "Maximum length of the sequences (prompt + completion) in the batch. "
"This argument is required if you want to use the default data collator."
},
)
max_prompt_length: Optional[int] = field(
default=512,
metadata={
"help": "Maximum length of the prompt. "
"This argument is required if you want to use the default data collator."
},
)
max_completion_length: Optional[int] = field(
default=None,
metadata={
"help": "Maximum length of the completion. This argument is required if you want to use the "
"default data collator and your model is an encoder-decoder."
},
)
beta: float = field(
default=0.1,
metadata={
"help": "Parameter controlling the deviation from the reference model. "
"Higher β means less deviation from the reference model."
},
)
label_pad_token_id: int = field(
default=-100,
metadata={
"help": "Label pad token id. This argument is required if you want to use the default data collator."
},
)
padding_value: Optional[int] = field(
default=None,
metadata={"help": "Padding value to use. If `None`, the padding value of the tokenizer is used."},
)
truncation_mode: str = field(
default="keep_end",
metadata={
"help": "Truncation mode to use when the prompt is too long. Possible values are "
"`keep_end` or `keep_start`. This argument is required if you want to use the "
"default data collator."
},
)
disable_dropout: bool = field(
default=True,
metadata={"help": "Whether to disable dropout in the model and reference model."},
)
generate_during_eval: bool = field(
default=False,
metadata={
"help": "If `True`, generates and logs completions from both the model and the reference model "
"to W&B during evaluation."
},
)
is_encoder_decoder: Optional[bool] = field(
default=None,
metadata={
"help": "When using the `model_init` argument (callable) to instantiate the model instead of the "
"`model` argument, you need to specify if the model returned by the callable is an "
"encoder-decoder model."
},
)
precompute_ref_log_probs: bool = field(
default=False,
metadata={
"help": "Whether to precompute reference model log probabilities for training and evaluation datasets. "
"This is useful when training without the reference model to reduce the total GPU memory "
"needed."
},
)
model_init_kwargs: Optional[dict[str, Any]] = field(
default=None,
metadata={
"help": "Keyword arguments to pass to `AutoModelForCausalLM.from_pretrained` when instantiating the "
"model from a string."
},
)
ref_model_init_kwargs: Optional[dict[str, Any]] = field(
default=None,
metadata={
"help": "Keyword arguments to pass to `AutoModelForCausalLM.from_pretrained` when instantiating the "
"reference model from a string."
},
)
dataset_num_proc: Optional[int] = field(
default=None,
metadata={"help": "Number of processes to use for processing the dataset."},
)
prompt_sample_size: int = field(
default=1024,
metadata={"help": "Number of prompts that are fed to density ratio classifier."},
)
min_density_ratio: float = field(
default=0.5,
metadata={"help": "Minimum value of the density ratio. The estimated density ratio is clamped to this value."},
)
max_density_ratio: float = field(
default=10.0,
metadata={"help": "Maximum value of the density ratio. The estimated density ratio is clamped to this value."},
)
| trl/trl/trainer/bco_config.py/0 | {
"file_path": "trl/trl/trainer/bco_config.py",
"repo_id": "trl",
"token_count": 3178
} |
# Copyright 2025 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import inspect
import os
import random
import textwrap
import warnings
from collections import defaultdict
from contextlib import contextmanager, nullcontext
from copy import deepcopy
from operator import itemgetter
from typing import TYPE_CHECKING, Any, Callable, Literal, Optional, Union
import numpy as np
import pandas as pd
import torch
import torch.amp as amp
import torch.nn as nn
import torch.nn.functional as F
import transformers
from accelerate import PartialState
from accelerate.utils import is_deepspeed_available, tqdm
from datasets import Dataset, concatenate_datasets
from packaging import version
from torch.utils.data import DataLoader, SequentialSampler
from transformers import (
AutoModelForCausalLM,
BaseImageProcessor,
DataCollator,
FeatureExtractionMixin,
PreTrainedModel,
PreTrainedTokenizerBase,
ProcessorMixin,
Trainer,
TrainerCallback,
TrainingArguments,
is_comet_available,
is_wandb_available,
)
from transformers.trainer_utils import EvalLoopOutput, has_length
from transformers.utils import is_peft_available
from ..data_utils import maybe_apply_chat_template, maybe_extract_prompt, maybe_unpair_preference_dataset
from ..models import PreTrainedModelWrapper, create_reference_model
from .kto_config import KTOConfig
from .utils import (
DPODataCollatorWithPadding,
disable_dropout_in_model,
generate_model_card,
get_comet_experiment_url,
log_table_to_comet_experiment,
pad_to_length,
peft_module_casting_to_bf16,
selective_log_softmax,
)
if is_peft_available():
from peft import PeftModel, get_peft_model, prepare_model_for_kbit_training
if is_wandb_available():
import wandb
if is_deepspeed_available():
import deepspeed
if TYPE_CHECKING:
from transformers import PreTrainedModel, PreTrainedTokenizer
RUNNING_NAME = "running.pt"
def _get_kl_dataset(batch: dict[str, list[Any]]) -> dict[str, list[Any]]:
"""
Creates mismatched pairs of prompts and completions for the KL dataset by adding a +1 offset to the order of completions.
For best results, the mismatched outputs y' used to estimate the KL term for a batch should be the same set as the matched
outputs y used to estimate the rewards in that batch, just paired with different x.
"""
batch["answer_input_ids"] = [batch["answer_input_ids"][-1]] + batch["answer_input_ids"][:-1]
batch["answer_attention_mask"] = [batch["answer_attention_mask"][-1]] + batch["answer_attention_mask"][:-1]
return batch
def _tokenize(
batch: dict[str, list[Any]],
tokenizer: "PreTrainedTokenizer",
) -> dict[str, list[Any]]:
"""Tokenize a batch from a KTO specific dataset."""
prompt_tokenized = tokenizer(batch["prompt"], add_special_tokens=False)
prompt_input_ids = prompt_tokenized["input_ids"]
prompt_attention_mask = prompt_tokenized["attention_mask"]
prompt_and_completion = [prompt + completion for prompt, completion in zip(batch["prompt"], batch["completion"])]
full_tokenized = tokenizer(prompt_and_completion, add_special_tokens=False)
full_input_ids = full_tokenized["input_ids"]
full_attention_mask = full_tokenized["attention_mask"]
answer_input_ids = [f[len(p) :] for f, p in zip(full_input_ids, prompt_input_ids)]
answer_attention_mask = [f[len(p) :] for f, p in zip(full_attention_mask, prompt_attention_mask)]
# Concat tokens to form `enc(a) + enc(a + b)[len(enc(a)):]`
full_concat_input_ids = [np.concatenate([p, a]) for p, a in zip(prompt_input_ids, answer_input_ids)]
# Prepare input tokens for token by token comparison
full_input_ids = [np.array(f) for f in full_input_ids]
for full, concat in zip(full_input_ids, full_concat_input_ids):
if len(full) != len(concat):
raise ValueError(
"The elements in 'full_input_ids' and 'full_concat_input_ids' must have the same pairwise length."
)
# On some tokenizers, like Llama-2 tokenizer, there are occasions where tokens
# can be merged together when tokenizing prompt+answer. This could result
# on the last token from the prompt being different when tokenized on its own
# vs when done as prompt+answer.
response_token_ids_start_idx = [len(p) for p in prompt_input_ids]
# If tokenized prompt is different than both prompt+answer, then it means the
# last token has changed due to merging.
for idx, (p, f, r) in enumerate(zip(prompt_input_ids, full_input_ids, response_token_ids_start_idx)):
if not np.array_equal(p, f[:r]):
response_token_ids_start_idx[idx] -= 1
prompt_input_ids = [f[:r] for f, r in zip(full_input_ids, response_token_ids_start_idx)]
prompt_attention_mask = [f[:r] for f, r in zip(full_attention_mask, response_token_ids_start_idx)]
for p, m in zip(prompt_input_ids, prompt_attention_mask):
if len(p) != len(m):
raise ValueError("Prompt input ids and attention mask should have the same length.")
answer_input_ids = [f[r:] for f, r in zip(full_input_ids, response_token_ids_start_idx)]
answer_attention_mask = [f[r:] for f, r in zip(full_attention_mask, response_token_ids_start_idx)]
output = dict(
prompt_input_ids=prompt_input_ids,
prompt_attention_mask=prompt_attention_mask,
answer_input_ids=answer_input_ids,
answer_attention_mask=answer_attention_mask,
)
return output
def _process_tokens(example: dict[str, Any], model: "PreTrainedModel" = None, **kwargs) -> dict:
"""Process tokens of a KTO specific dataset.
At this stage, we don't convert to PyTorch tensors yet; we just handle the truncation
in case the prompt + completion responses is/are too long. First
we truncate the prompt; if we're still too long, we truncate the completion.
We also create the labels for the completion responses, which are of length equal to
the sum of the length of the prompt and the completion response, with
label_pad_token_id for the prompt tokens.
"""
prompt = example["prompt"]
completion = example["completion"]
batch = {
f"{kwargs['prefix']}prompt": prompt,
f"{kwargs['prefix']}completion": completion,
f"{kwargs['prefix']}label": example["label"],
}
if not kwargs["is_encoder_decoder"]:
# Check issues below for more details
# 1. https://github.com/huggingface/trl/issues/907
# 2. https://github.com/EleutherAI/lm-evaluation-harness/pull/531#issuecomment-1595586257
# 3. https://github.com/LianjiaTech/BELLE/issues/337
if not isinstance(prompt, str):
raise ValueError(f"prompt should be an str but got {type(prompt)}")
if not isinstance(completion, str):
raise ValueError(f"completion should be an str but got {type(completion)}")
# keys of format prompt_* refers to just the prompt and answer_* refers to just the answer
all_tokens = {
"prompt_input_ids": example["prompt_input_ids"],
"prompt_attention_mask": example["prompt_attention_mask"],
"answer_input_ids": example["answer_input_ids"],
"answer_attention_mask": example["answer_attention_mask"],
}
# calculate max length by checking if BOS/EOS is already there
max_length = kwargs["max_length"]
bos_token_id = kwargs["tokenizer"].bos_token_id
eos_token_id = kwargs["tokenizer"].eos_token_id
if len(all_tokens["prompt_input_ids"]) > 0 and bos_token_id != all_tokens["prompt_input_ids"][0]:
max_length -= 1
if len(all_tokens["answer_input_ids"]) > 0 and eos_token_id != all_tokens["answer_input_ids"][-1]:
max_length -= 1
# if combined sequence is too long (> max_length - 1 for BOS token - 1 for EOS), truncate the prompt
if len(all_tokens["prompt_input_ids"]) + len(all_tokens["answer_input_ids"]) > max_length:
for k in ["prompt_input_ids", "prompt_attention_mask"]:
if kwargs["truncation_mode"] == "keep_start":
all_tokens[k] = all_tokens[k][: kwargs["max_prompt_length"]]
elif kwargs["truncation_mode"] == "keep_end":
all_tokens[k] = all_tokens[k][-kwargs["max_prompt_length"] :]
else:
raise ValueError(f"Unknown truncation mode: {kwargs['truncation_mode']}")
# if that's still too long, truncate the response
if len(all_tokens["prompt_input_ids"]) + len(all_tokens["answer_input_ids"]) > max_length:
for k in ["answer_input_ids", "answer_attention_mask"]:
all_tokens[k] = all_tokens[k][: max_length - kwargs["max_prompt_length"]]
# all input_ids and attention mask as is. We then check if we need to add BOS/EOS tokens
batch[f"{kwargs['prefix']}prompt_input_ids"] = all_tokens["prompt_input_ids"]
batch[f"{kwargs['prefix']}prompt_attention_mask"] = all_tokens["prompt_attention_mask"]
batch[f"{kwargs['prefix']}completion_input_ids"] = (
all_tokens["prompt_input_ids"] + all_tokens["answer_input_ids"]
)
batch[f"{kwargs['prefix']}completion_attention_mask"] = (
all_tokens["prompt_attention_mask"] + all_tokens["answer_attention_mask"]
)
# add BOS, which affects both prompt and the full completion
if bos_token_id is not None:
if len(all_tokens["prompt_input_ids"]) == 0 or bos_token_id != all_tokens["prompt_input_ids"][0]:
batch[f"{kwargs['prefix']}prompt_input_ids"] = [bos_token_id] + batch[
f"{kwargs['prefix']}prompt_input_ids"
]
batch[f"{kwargs['prefix']}prompt_attention_mask"] = [1] + batch[
f"{kwargs['prefix']}prompt_attention_mask"
]
batch[f"{kwargs['prefix']}completion_input_ids"] = [bos_token_id] + batch[
f"{kwargs['prefix']}completion_input_ids"
]
batch[f"{kwargs['prefix']}completion_attention_mask"] = [1] + batch[
f"{kwargs['prefix']}completion_attention_mask"
]
# add EOS, which affects only the full completion
if len(all_tokens["answer_input_ids"]) == 0 or eos_token_id != all_tokens["answer_input_ids"][-1]:
batch[f"{kwargs['prefix']}completion_input_ids"] = batch[f"{kwargs['prefix']}completion_input_ids"] + [
eos_token_id
]
batch[f"{kwargs['prefix']}completion_attention_mask"] = batch[
f"{kwargs['prefix']}completion_attention_mask"
] + [1]
batch[f"{kwargs['prefix']}completion_labels"] = batch[f"{kwargs['prefix']}completion_input_ids"][:]
batch[f"{kwargs['prefix']}completion_labels"][: len(batch[f"{kwargs['prefix']}prompt_input_ids"])] = [
kwargs["label_pad_token_id"]
] * len(batch[f"{kwargs['prefix']}prompt_input_ids"])
else:
completion_tokens = kwargs["tokenizer"](
completion, truncation=True, max_length=kwargs["max_completion_length"], add_special_tokens=True
)
prompt_tokens = kwargs["tokenizer"](
prompt, truncation=True, max_length=kwargs["max_prompt_length"], add_special_tokens=True
)
batch[f"{kwargs['prefix']}prompt_input_ids"] = prompt_tokens["input_ids"]
batch[f"{kwargs['prefix']}prompt_attention_mask"] = prompt_tokens["attention_mask"]
batch[f"{kwargs['prefix']}completion_labels"] = completion_tokens["input_ids"]
batch[f"{kwargs['prefix']}completion_attention_mask"] = completion_tokens["attention_mask"]
if model is not None and hasattr(model, "prepare_decoder_input_ids_from_labels"):
batch[f"{kwargs['prefix']}completion_decoder_input_ids"] = model.prepare_decoder_input_ids_from_labels(
labels=torch.tensor(batch["completion_labels"])
)
return batch
class KTOTrainer(Trainer):
r"""
Initialize KTOTrainer.
Args:
model (`transformers.PreTrainedModel`):
The model to train, preferably an `AutoModelForSequenceClassification`.
ref_model (`PreTrainedModelWrapper`):
Hugging Face transformer model with a casual language modelling head. Used for implicit reward computation and loss. If no
reference model is provided, the trainer will create a reference model with the same architecture as the model to be optimized.
args (`KTOConfig`):
The arguments to use for training.
train_dataset (`datasets.Dataset`):
The dataset to use for training.
eval_dataset (`datasets.Dataset`):
The dataset to use for evaluation.
processing_class (`PreTrainedTokenizerBase` or `BaseImageProcessor` or `FeatureExtractionMixin` or `ProcessorMixin`, *optional*):
Processing class used to process the data. If provided, will be used to automatically process the inputs
for the model, and it will be saved along the model to make it easier to rerun an interrupted training or
reuse the fine-tuned model.
data_collator (`transformers.DataCollator`, *optional*, defaults to `None`):
The data collator to use for training. If None is specified, the default data collator (`DPODataCollatorWithPadding`) will be used
which will pad the sequences to the maximum length of the sequences in the batch, given a dataset of paired sequences.
model_init (`Callable[[], transformers.PreTrainedModel]`):
The model initializer to use for training. If None is specified, the default model initializer will be used.
callbacks (`list[transformers.TrainerCallback]`):
The callbacks to use for training.
optimizers (`tuple[torch.optim.Optimizer, torch.optim.lr_scheduler.LambdaLR]`):
The optimizer and scheduler to use for training.
preprocess_logits_for_metrics (`Callable[[torch.Tensor, torch.Tensor], torch.Tensor]`):
The function to use to preprocess the logits before computing the metrics.
peft_config (`dict`, defaults to `None`):
The PEFT configuration to use for training. If you pass a PEFT configuration, the model will be wrapped in a PEFT model.
compute_metrics (`Callable[[EvalPrediction], dict]`, *optional*):
The function to use to compute the metrics. Must take a `EvalPrediction` and return
a dictionary string to metric values.
model_adapter_name (`str`, defaults to `None`):
Name of the train target PEFT adapter, when using LoRA with multiple adapters.
ref_adapter_name (`str`, defaults to `None`):
Name of the reference PEFT adapter, when using LoRA with multiple adapters.
"""
_tag_names = ["trl", "kto"]
def __init__(
self,
model: Union[PreTrainedModel, nn.Module, str] = None,
ref_model: Optional[Union[PreTrainedModel, nn.Module, str]] = None,
args: KTOConfig = None,
train_dataset: Optional[Dataset] = None,
eval_dataset: Optional[Union[Dataset, dict[str, Dataset]]] = None,
processing_class: Optional[
Union[PreTrainedTokenizerBase, BaseImageProcessor, FeatureExtractionMixin, ProcessorMixin]
] = None,
data_collator: Optional[DataCollator] = None,
model_init: Optional[Callable[[], PreTrainedModel]] = None,
callbacks: Optional[list[TrainerCallback]] = None,
optimizers: tuple[torch.optim.Optimizer, torch.optim.lr_scheduler.LambdaLR] = (None, None),
preprocess_logits_for_metrics: Optional[Callable[[torch.Tensor, torch.Tensor], torch.Tensor]] = None,
peft_config: Optional[dict] = None,
compute_metrics: Optional[Callable[[EvalLoopOutput], dict]] = None,
model_adapter_name: Optional[str] = None,
ref_adapter_name: Optional[str] = None,
):
if type(args) is TrainingArguments:
raise ValueError("Please use `KTOConfig` instead TrainingArguments.")
if not isinstance(model, str) and ref_model is model:
raise ValueError(
"`model` and `ref_model` cannot be the same object. If you want `ref_model` to be the "
"same as `model`, you must mass a copy of it, or `None` if you use peft."
)
if args.model_init_kwargs is None:
model_init_kwargs = {}
elif not isinstance(model, str):
raise ValueError("You passed model_kwargs to the KTOTrainer. But your model is already instantiated.")
else:
model_init_kwargs = args.model_init_kwargs
torch_dtype = model_init_kwargs.get("torch_dtype")
if torch_dtype is not None:
# Convert to `torch.dtype` if an str is passed
if isinstance(torch_dtype, str) and torch_dtype != "auto":
torch_dtype = getattr(torch, torch_dtype)
if torch_dtype != "auto" and not isinstance(torch_dtype, torch.dtype):
raise ValueError(
f"Invalid `torch_dtype` passed to the KTOConfig. Expected a string with either `torch.dtype` or 'auto', but got {torch_dtype}."
)
model_init_kwargs["torch_dtype"] = torch_dtype
if args.ref_model_init_kwargs is None:
ref_model_init_kwargs = {}
elif not isinstance(ref_model, str):
raise ValueError(
"You passed ref_model_kwargs to the KTOTrainer. But your ref_model is already instantiated."
)
else:
ref_model_init_kwargs = args.ref_model_init_kwargs
torch_dtype = ref_model_init_kwargs.get("torch_dtype")
if torch_dtype is not None:
# Convert to `torch.dtype` if an str is passed
if isinstance(torch_dtype, str) and torch_dtype != "auto":
torch_dtype = getattr(torch, torch_dtype)
if torch_dtype != "auto" and not isinstance(torch_dtype, torch.dtype):
raise ValueError(
f"Invalid `torch_dtype` passed to the KTOConfig. Expected a string with either `torch.dtype` or 'auto', but got {torch_dtype}."
)
ref_model_init_kwargs["torch_dtype"] = torch_dtype
if isinstance(model, str):
model = AutoModelForCausalLM.from_pretrained(model, **model_init_kwargs)
if isinstance(ref_model, str):
ref_model = AutoModelForCausalLM.from_pretrained(ref_model, **ref_model_init_kwargs)
# Initialize this variable to False. This helps tracking the case when `peft_module_casting_to_bf16`
# has been called in order to properly call autocast if needed.
self._peft_has_been_casted_to_bf16 = False
if not is_peft_available() and peft_config is not None:
raise ValueError(
"PEFT is not installed and you passed a `peft_config` in the trainer's kwargs, please install it with `pip install peft` to use the PEFT models"
)
elif is_peft_available() and peft_config is not None:
# if model is a peft model and we have a peft_config, we merge and unload it first
if isinstance(model, PeftModel):
model = model.merge_and_unload()
if getattr(model, "is_loaded_in_8bit", False) or getattr(model, "is_loaded_in_4bit", False):
_support_gc_kwargs = hasattr(
args, "gradient_checkpointing_kwargs"
) and "gradient_checkpointing_kwargs" in list(
inspect.signature(prepare_model_for_kbit_training).parameters
)
prepare_model_kwargs = {"use_gradient_checkpointing": args.gradient_checkpointing}
if _support_gc_kwargs:
prepare_model_kwargs["gradient_checkpointing_kwargs"] = args.gradient_checkpointing_kwargs
model = prepare_model_for_kbit_training(model, **prepare_model_kwargs)
elif getattr(args, "gradient_checkpointing", False):
# For backward compatibility with older versions of transformers
if hasattr(model, "enable_input_require_grads"):
model.enable_input_require_grads()
else:
def make_inputs_require_grad(module, input, output):
output.requires_grad_(True)
model.get_input_embeddings().register_forward_hook(make_inputs_require_grad)
# get peft model with the given config
model = get_peft_model(model, peft_config)
if args.bf16 and getattr(model, "is_loaded_in_4bit", False):
peft_module_casting_to_bf16(model)
# If args.bf16 we need to explicitly call `generate` with torch amp autocast context manager
self._peft_has_been_casted_to_bf16 = True
# For models that use gradient_checkpointing, we need to attach a hook that enables input
# to explicitly have `requires_grad=True`, otherwise training will either silently
# fail or completely fail.
elif getattr(args, "gradient_checkpointing", False):
# For backward compatibility with older versions of transformers
if hasattr(model, "enable_input_require_grads"):
model.enable_input_require_grads()
else:
def make_inputs_require_grad(module, input, output):
output.requires_grad_(True)
model.get_input_embeddings().register_forward_hook(make_inputs_require_grad)
if args.generate_during_eval and not (is_wandb_available() or is_comet_available()):
raise ValueError(
"`generate_during_eval=True` requires Weights and Biases or Comet to be installed."
" Please install `wandb` or `comet-ml` to resolve."
)
if model is not None:
self.is_encoder_decoder = model.config.is_encoder_decoder
elif args.is_encoder_decoder is None:
raise ValueError("When no model is provided, you need to pass the parameter is_encoder_decoder.")
else:
self.is_encoder_decoder = args.is_encoder_decoder
self.is_peft_model = is_peft_available() and isinstance(model, PeftModel)
self.model_adapter_name = model_adapter_name
self.ref_adapter_name = ref_adapter_name
if ref_model:
self.ref_model = ref_model
elif self.is_peft_model or args.precompute_ref_log_probs:
# The `model` with adapters turned off will be used as the reference model
self.ref_model = None
else:
self.ref_model = create_reference_model(model)
if processing_class is None:
raise ValueError(
"max_length or a processing_class must be specified when using the default DPODataCollatorWithPadding"
)
if args.max_length is None:
warnings.warn(
"When using DPODataCollatorWithPadding, you should set `max_length` in the KTOTrainer's init"
" it will be set to `512` by default, but you should do it yourself in the future.",
UserWarning,
)
max_length = 512
if args.max_length is not None:
max_length = args.max_length
if args.max_prompt_length is None:
warnings.warn(
"When using DPODataCollatorWithPadding, you should set `max_prompt_length` in the KTOTrainer's init"
" it will be set to `128` by default, but you should do it yourself in the future.",
UserWarning,
)
max_prompt_length = 128
if args.max_prompt_length is not None:
max_prompt_length = args.max_prompt_length
max_completion_length = None
if args.max_completion_length is None and self.is_encoder_decoder:
warnings.warn(
"When using DPODataCollatorWithPadding with an encoder decoder architecture, you should set `max_completion_length` in the KTOTrainer's init"
" it will be set to `128` by default, but you should do it yourself in the future.",
UserWarning,
)
max_completion_length = 128
if args.max_completion_length is not None and self.is_encoder_decoder:
max_completion_length = args.max_completion_length
if data_collator is None:
data_collator = DPODataCollatorWithPadding(
pad_token_id=processing_class.pad_token_id,
label_pad_token_id=args.label_pad_token_id,
is_encoder_decoder=self.is_encoder_decoder,
)
if args.remove_unused_columns:
args.remove_unused_columns = False
# warn users
warnings.warn(
"When using DPODataCollatorWithPadding, you should set `remove_unused_columns=False` in your KTOConfig"
" we have set it for you, but you should do it yourself in the future.",
UserWarning,
)
self.use_dpo_data_collator = True
else:
self.use_dpo_data_collator = False
# Disable dropout in the model and reference model
if args.disable_dropout:
disable_dropout_in_model(model)
if self.ref_model is not None:
disable_dropout_in_model(self.ref_model)
self.loss_type = args.loss_type
self.max_length = max_length
self.generate_during_eval = args.generate_during_eval
self.label_pad_token_id = args.label_pad_token_id
self.padding_value = args.padding_value if args.padding_value is not None else processing_class.pad_token_id
self.max_prompt_length = max_prompt_length
self.truncation_mode = args.truncation_mode
self.max_completion_length = max_completion_length
self.processing_class = processing_class
self.precompute_ref_log_probs = args.precompute_ref_log_probs
# Not all losses require a KL calculation
self.calculate_KL = True
if self.loss_type in ["apo_zero_unpaired"]:
self.calculate_KL = False
# Since ref_logs are precomputed on the first call to get_train/eval_dataloader
# keep track of first called to avoid computation of future calls
self._precomputed_train_ref_log_probs = False
self._precomputed_eval_ref_log_probs = False
# metric
self._stored_metrics = defaultdict(lambda: defaultdict(list))
# KTO parameter
self.beta = args.beta
self.desirable_weight = args.desirable_weight
self.undesirable_weight = args.undesirable_weight
self.aux_loss_enabled = getattr(model.config, "output_router_logits", False)
self.aux_loss_coef = getattr(model.config, "router_aux_loss_coef", 0.0)
if self.aux_loss_enabled and self.aux_loss_coef == 0.0:
warnings.warn(
"You set `output_router_logits` to `True` in the model config, but `router_aux_loss_coef` is set to "
"`0.0`, meaning the auxiliary loss will not be used. Either set `router_aux_loss_coef` to a value "
"greater than `0.0`, or set `output_router_logits` to `False` if you don't want to use the auxiliary "
"loss.",
UserWarning,
)
# The trainer estimates the number of FLOPs (floating-point operations) using the number of elements in the
# input tensor associated with the key "input_ids". However, in KTO, the sampled data does not include the
# "input_ids" key. Instead, the available keys are "prompt_input_ids" and "completion_input_ids". As a result,
# the trainer issues the warning: "Could not estimate the number of tokens of the input, floating-point
# operations will not be computed." To suppress this warning, we set the "estimate_tokens" key in the model's
# "warnings_issued" dictionary to True. This acts as a flag to indicate that the warning has already been
# issued.
model.warnings_issued["estimate_tokens"] = True
# Compute that only on the main process for faster data processing.
# see: https://github.com/huggingface/trl/pull/1255
with PartialState().local_main_process_first():
# Extract the prompt if needed
train_dataset = train_dataset.map(
maybe_extract_prompt, num_proc=args.dataset_num_proc, desc="Extracting prompt from train dataset"
)
# Unpair the dataset if needed
train_dataset = maybe_unpair_preference_dataset(
train_dataset, args.dataset_num_proc, desc="Unpairing train dataset"
)
# Apply the chat template if needed
train_dataset = train_dataset.map(
maybe_apply_chat_template,
fn_kwargs={"tokenizer": processing_class},
num_proc=args.dataset_num_proc,
desc="Applying chat template to train dataset",
)
if eval_dataset is not None:
eval_dataset = eval_dataset.map(
maybe_extract_prompt, num_proc=args.dataset_num_proc, desc="Extracting prompt from eval dataset"
)
eval_dataset = maybe_unpair_preference_dataset(
eval_dataset, args.dataset_num_proc, desc="Unpairing eval dataset"
)
eval_dataset = eval_dataset.map(
maybe_apply_chat_template,
fn_kwargs={"tokenizer": processing_class},
num_proc=args.dataset_num_proc,
desc="Applying chat template to eval dataset",
)
# Tokenize and prepare the training datasets
train_dataset = train_dataset.map(
_tokenize,
batched=True,
fn_kwargs={"tokenizer": self.processing_class},
num_proc=args.dataset_num_proc,
desc="Tokenizing train dataset",
)
fn_kwargs = {
"prefix": "",
"is_encoder_decoder": self.is_encoder_decoder,
"tokenizer": self.processing_class,
"max_length": self.max_length,
"truncation_mode": self.truncation_mode,
"label_pad_token_id": self.label_pad_token_id,
"max_prompt_length": self.max_prompt_length,
"max_completion_length": self.max_completion_length,
}
train_dataset = train_dataset.map(
_process_tokens,
fn_kwargs=fn_kwargs,
num_proc=args.dataset_num_proc,
desc="Processing tokenized train dataset",
)
# Tokenize and prepare the eval datasets
if eval_dataset is not None:
eval_dataset = eval_dataset.map(
_tokenize,
fn_kwargs={"tokenizer": self.processing_class},
batched=True,
num_proc=args.dataset_num_proc,
desc="Tokenizing eval dataset",
)
eval_dataset = eval_dataset.map(
_process_tokens,
fn_kwargs=fn_kwargs,
num_proc=args.dataset_num_proc,
desc="Processing tokenized eval dataset",
)
# Get KL datasets if needed
if self.calculate_KL:
if args.per_device_train_batch_size <= 1:
raise ValueError(
"Actual (not effective) batch size must be > 1. KTO will not work properly because the KL term will be equivalent to the implied reward."
)
# create pairs for estimating the KL term by flipping the matched pairs in each batch of size total_batch_size
# i.e., (x_1, y_1), ..., (x_n, y_n) --> (x_1, y_n), ..., (x_n, y_1) = (x'_1, y'_1), ..., (x'_n, y'_n)
train_kl_dataset = train_dataset.map(
_get_kl_dataset,
batched=True,
batch_size=args.per_device_train_batch_size,
num_proc=args.dataset_num_proc,
desc="Extracting KL train dataset",
)
fn_kwargs["prefix"] = "KL_"
train_kl_dataset = train_kl_dataset.map(
_process_tokens,
fn_kwargs=fn_kwargs,
num_proc=args.dataset_num_proc,
remove_columns=[c for c in train_kl_dataset.column_names if c in train_dataset.column_names],
desc="Processing tokenized train KL dataset",
)
# merge the datasets
train_dataset = concatenate_datasets([train_dataset, train_kl_dataset], axis=1)
if eval_dataset is not None:
# Get KL dataset
eval_kl_dataset = eval_dataset.map(
_get_kl_dataset,
batched=True,
batch_size=args.per_device_train_batch_size,
num_proc=args.dataset_num_proc,
desc="Extracting eval KL dataset",
)
eval_kl_dataset = eval_kl_dataset.map(
_process_tokens,
fn_kwargs=fn_kwargs,
num_proc=args.dataset_num_proc,
remove_columns=[c for c in eval_kl_dataset.column_names if c in eval_dataset.column_names],
desc="Processing tokenized eval KL dataset",
)
# merge the datasets
eval_dataset = concatenate_datasets([eval_dataset, eval_kl_dataset], axis=1)
# calculate dataset desirability balance
num_desirable = max(sum(train_dataset["label"]), 1)
num_undesirable = max(len(train_dataset["label"]) - num_desirable, 1) # "label" is binary
if num_desirable != num_undesirable:
# The lower and upper bounds come from Eq. (8) of https://huggingface.co/papers/2402.01306
des_weight_lower_bound = round((num_undesirable * self.undesirable_weight / num_desirable) * 1, 2)
des_weight_upper_bound = round((num_undesirable * self.undesirable_weight / num_desirable) * 1.33, 2)
und_weight_lower_bound = round((num_desirable * self.desirable_weight / num_undesirable) / 1.33, 2)
und_weight_upper_bound = round((num_desirable * self.desirable_weight / num_undesirable) / 1, 2)
des_weight_in_range = des_weight_lower_bound <= self.desirable_weight <= des_weight_upper_bound
und_weight_in_range = und_weight_lower_bound <= self.undesirable_weight <= und_weight_upper_bound
if not (des_weight_in_range or und_weight_in_range):
warnings.warn(
"You have different amounts of desirable/positive and undesirable/negative examples but the "
"weights on the desirable and undesirable losses don't seem to be in an ideal range. Based "
f"on your data, we recommend EITHER "
f"desirable_weight in [{des_weight_lower_bound}, {des_weight_upper_bound}] or "
f"undesirable_weight in [{und_weight_lower_bound}, {und_weight_upper_bound}] (but NOT BOTH). "
"See the documentation on how to optimally set these weights.",
UserWarning,
)
super().__init__(
model=model,
args=args,
data_collator=data_collator,
train_dataset=train_dataset,
eval_dataset=eval_dataset,
processing_class=processing_class,
model_init=model_init,
compute_metrics=compute_metrics,
callbacks=callbacks,
optimizers=optimizers,
preprocess_logits_for_metrics=preprocess_logits_for_metrics,
)
# Gradient accumulation requires scaled loss. Normally, loss scaling in the parent class depends on whether the
# model accepts loss-related kwargs. Since we compute our own loss, this check is irrelevant. We set
# self.model_accepts_loss_kwargs to False to enable scaling.
self.model_accepts_loss_kwargs = False
# Add tags for models that have been loaded with the correct transformers version
if hasattr(self.model, "add_model_tags"):
self.model.add_model_tags(self._tag_names)
if not hasattr(self, "accelerator"):
raise AttributeError(
"Your `Trainer` does not have an `accelerator` object. Consider upgrading `transformers`."
)
# Deepspeed Zero-3 does not support precompute_ref_log_probs
if self.is_deepspeed_enabled:
if self.accelerator.state.deepspeed_plugin.zero_stage == 3 and self.precompute_ref_log_probs:
raise ValueError(
"You cannot use `precompute_ref_log_probs=True` with Deepspeed ZeRO-3. Please set `precompute_ref_log_probs=False`."
)
if self.ref_model is None:
if not (self.is_peft_model or self.precompute_ref_log_probs):
raise ValueError(
"No reference model and model is not a Peft model. Try setting `precompute_ref_log_probs=True`"
)
else:
if self.is_deepspeed_enabled:
self.ref_model = self._prepare_deepspeed(self.ref_model)
else:
self.ref_model = self.accelerator.prepare_model(self.ref_model, evaluation_mode=True)
def _prepare_deepspeed(self, model: PreTrainedModelWrapper):
# Adapted from accelerate: https://github.com/huggingface/accelerate/blob/739b135f8367becb67ffaada12fe76e3aa60fefd/src/accelerate/accelerator.py#L1473
deepspeed_plugin = self.accelerator.state.deepspeed_plugin
config_kwargs = deepcopy(deepspeed_plugin.deepspeed_config)
if model is not None:
if hasattr(model, "config"):
hidden_size = (
max(model.config.hidden_sizes)
if getattr(model.config, "hidden_sizes", None)
else getattr(model.config, "hidden_size", None)
)
if hidden_size is not None and config_kwargs["zero_optimization"]["stage"] == 3:
# Note that `stage3_prefetch_bucket_size` can produce DeepSpeed messages like: `Invalidate trace cache @ step 0: expected module 1, but got module 0`
# This is expected and is not an error, see: https://github.com/microsoft/DeepSpeed/discussions/4081
config_kwargs.update(
{
"zero_optimization.reduce_bucket_size": hidden_size * hidden_size,
"zero_optimization.stage3_param_persistence_threshold": 10 * hidden_size,
"zero_optimization.stage3_prefetch_bucket_size": 0.9 * hidden_size * hidden_size,
}
)
# If ZeRO-3 is used, we shard both the active and reference model.
# Otherwise, we assume the reference model fits in memory and is initialized on each device with ZeRO disabled (stage 0)
if config_kwargs["zero_optimization"]["stage"] != 3:
config_kwargs["zero_optimization"]["stage"] = 0
model, *_ = deepspeed.initialize(model=model, config=config_kwargs)
model.eval()
return model
@contextmanager
def null_ref_context(self):
"""Context manager for handling null reference model (that is, peft adapter manipulation)."""
with (
self.accelerator.unwrap_model(self.model).disable_adapter()
if self.is_peft_model and not self.ref_adapter_name
else nullcontext()
):
if self.ref_adapter_name:
self.model.set_adapter(self.ref_adapter_name)
yield
if self.ref_adapter_name:
self.model.set_adapter(self.model_adapter_name or "default")
def get_train_dataloader(self) -> DataLoader:
"""
Returns the training [`~torch.utils.data.DataLoader`].
Subclass of transformers.src.transformers.trainer.get_train_dataloader to precompute `ref_log_probs`.
"""
if self.precompute_ref_log_probs and not self._precomputed_train_ref_log_probs:
dataloader_params = {
"batch_size": self.args.per_device_train_batch_size,
"collate_fn": self.data_collator,
"num_workers": self.args.dataloader_num_workers,
"pin_memory": self.args.dataloader_pin_memory,
"shuffle": False,
}
# prepare dataloader
data_loader = self.accelerator.prepare(DataLoader(self.train_dataset, **dataloader_params))
reference_completion_logps = []
reference_KL_logps = []
for padded_batch in tqdm(iterable=data_loader, desc="Train dataset reference log probs"):
reference_completion_logp, reference_KL_logp = self.compute_reference_log_probs(padded_batch)
reference_completion_logp = self.accelerator.gather_for_metrics(reference_completion_logp)
reference_completion_logps.append(reference_completion_logp.cpu())
if self.calculate_KL:
reference_KL_logp = self.accelerator.gather_for_metrics(reference_KL_logp)
reference_KL_logps.append(reference_KL_logp.cpu())
self.train_dataset = self.train_dataset.add_column(
name="reference_logps", column=torch.cat(reference_completion_logps).float().numpy()
)
if self.calculate_KL:
self.train_dataset = self.train_dataset.add_column(
name="reference_KL_logps", column=torch.cat(reference_KL_logps).float().numpy()
)
self._precomputed_train_ref_log_probs = True
return super().get_train_dataloader()
def get_eval_dataloader(self, eval_dataset: Optional[Dataset] = None) -> DataLoader:
"""
Returns the evaluation [`~torch.utils.data.DataLoader`].
Subclass of transformers.src.transformers.trainer.get_eval_dataloader to precompute `ref_log_probs`.
Args:
eval_dataset (`torch.utils.data.Dataset`, *optional*):
If provided, will override `self.eval_dataset`. If it is a [`~datasets.Dataset`], columns not accepted
by the `model.forward()` method are automatically removed. It must implement `__len__`.
"""
if eval_dataset is None and self.eval_dataset is None:
raise ValueError("Trainer: evaluation requires an eval_dataset.")
eval_dataset = eval_dataset if eval_dataset is not None else self.eval_dataset
if self.precompute_ref_log_probs and not self._precomputed_eval_ref_log_probs:
dataloader_params = {
"batch_size": self.args.per_device_eval_batch_size,
"collate_fn": self.data_collator,
"num_workers": self.args.dataloader_num_workers,
"pin_memory": self.args.dataloader_pin_memory,
"shuffle": False,
}
# prepare dataloader
data_loader = self.accelerator.prepare(DataLoader(eval_dataset, **dataloader_params))
reference_completion_logps = []
reference_KL_logps = []
for padded_batch in tqdm(iterable=data_loader, desc="Eval dataset reference log probs"):
reference_completion_logp, reference_KL_logp = self.compute_reference_log_probs(padded_batch)
reference_completion_logp = self.accelerator.gather_for_metrics(reference_completion_logp)
reference_completion_logps.append(reference_completion_logp.cpu())
if self.calculate_KL:
reference_KL_logp = self.accelerator.gather_for_metrics(reference_KL_logp)
reference_KL_logps.append(reference_KL_logp.cpu())
eval_dataset = eval_dataset.add_column(
name="reference_logps", column=torch.cat(reference_completion_logps).float().numpy()
)
if self.calculate_KL:
eval_dataset = eval_dataset.add_column(
name="reference_KL_logps", column=torch.cat(reference_KL_logps).float().numpy()
)
# Save calculated reference_chosen_logps and reference_rejected_logps to the eval_dataset for subsequent runs
if self.eval_dataset is not None:
self.eval_dataset = eval_dataset
self._precomputed_eval_ref_log_probs = True
return super().get_eval_dataloader(eval_dataset=eval_dataset)
def compute_reference_log_probs(self, padded_batch: dict) -> dict:
"""Computes log probabilities of the reference model for a single padded batch of a KTO specific dataset."""
with torch.no_grad():
if self.ref_model is None:
with self.null_ref_context():
if self.is_encoder_decoder:
completion_logits = self.model(
padded_batch["prompt_input_ids"],
attention_mask=padded_batch["prompt_attention_mask"],
decoder_input_ids=padded_batch.get("completion_decoder_input_ids"),
labels=padded_batch["completion_labels"],
).logits
if self.calculate_KL:
KL_logits = self.model(
padded_batch["KL_prompt_input_ids"],
attention_mask=padded_batch["KL_prompt_attention_mask"],
decoder_input_ids=padded_batch.get("KL_completion_decoder_input_ids"),
labels=padded_batch["KL_completion_labels"],
).logits
else:
completion_logits = self.model(
padded_batch["completion_input_ids"],
attention_mask=padded_batch["completion_attention_mask"],
).logits
if self.calculate_KL:
KL_logits = self.model(
padded_batch["KL_completion_input_ids"],
attention_mask=padded_batch["KL_completion_attention_mask"],
).logits
else:
if self.is_encoder_decoder:
completion_logits = self.ref_model(
padded_batch["prompt_input_ids"],
attention_mask=padded_batch["prompt_attention_mask"],
decoder_input_ids=padded_batch.get("completion_decoder_input_ids"),
labels=padded_batch["completion_labels"],
).logits
if self.calculate_KL:
KL_logits = self.ref_model(
padded_batch["KL_prompt_input_ids"],
attention_mask=padded_batch["KL_prompt_attention_mask"],
decoder_input_ids=padded_batch.get("KL_completion_decoder_input_ids"),
labels=padded_batch["KL_completion_labels"],
).logits
else:
completion_logits = self.ref_model(
padded_batch["completion_input_ids"], attention_mask=padded_batch["completion_attention_mask"]
).logits
if self.calculate_KL:
KL_logits = self.ref_model(
padded_batch["KL_completion_input_ids"],
attention_mask=padded_batch["KL_completion_attention_mask"],
).logits
completion_logps = self.get_batch_logps(
completion_logits,
padded_batch["completion_labels"],
average_log_prob=False,
is_encoder_decoder=self.is_encoder_decoder,
label_pad_token_id=self.label_pad_token_id,
)
if self.calculate_KL:
KL_logps = self.get_batch_logps(
KL_logits,
padded_batch["KL_completion_labels"],
average_log_prob=False,
is_encoder_decoder=self.is_encoder_decoder,
label_pad_token_id=self.label_pad_token_id,
)
else:
KL_logps = None
return completion_logps, KL_logps
@staticmethod
def get_batch_logps(
logits: torch.FloatTensor,
labels: torch.LongTensor,
average_log_prob: bool = False,
label_pad_token_id: int = -100,
is_encoder_decoder: bool = False,
) -> torch.FloatTensor:
"""Compute the log probabilities of the given labels under the given logits.
Args:
logits: Logits of the model (unnormalized). Shape: (batch_size, sequence_length, vocab_size)
labels: Labels for which to compute the log probabilities. Label tokens with a value of label_pad_token_id are ignored. Shape: (batch_size, sequence_length)
average_log_prob: If True, return the average log probability per (non-masked) token. Otherwise, return the sum of the log probabilities of the (non-masked) tokens.
Returns:
A tensor of shape (batch_size,) containing the average/sum log probabilities of the given labels under the given logits.
"""
if logits.shape[:-1] != labels.shape:
raise ValueError("Logits (batch and sequence length dim) and labels must have the same shape.")
if not is_encoder_decoder:
labels = labels[:, 1:].clone()
logits = logits[:, :-1, :]
else:
# Fixes end-dec RuntimeError
labels = labels.clone()
loss_mask = labels != label_pad_token_id
# dummy token; we'll ignore the losses on these tokens later
labels[labels == label_pad_token_id] = 0
per_token_logps = selective_log_softmax(logits, labels)
if average_log_prob:
return (per_token_logps * loss_mask).sum(-1) / loss_mask.sum(-1)
else:
return (per_token_logps * loss_mask).sum(-1)
def forward(
self, model: nn.Module, batch: dict[str, Union[list, torch.LongTensor]]
) -> tuple[torch.FloatTensor, torch.FloatTensor, torch.FloatTensor, torch.FloatTensor]:
if self.calculate_KL:
KL_logps = None
KL_model_kwargs = (
{
"input_ids": batch["KL_prompt_input_ids"],
"attention_mask": batch["KL_prompt_attention_mask"],
"labels": batch["KL_completion_labels"],
"decoder_input_ids": batch.get("KL_completion_decoder_input_ids"),
}
if self.is_encoder_decoder
else {
"input_ids": batch["KL_completion_input_ids"],
"attention_mask": batch["KL_completion_attention_mask"],
}
)
with torch.no_grad():
KL_logits = model(
**KL_model_kwargs,
).logits
KL_logps = self.get_batch_logps(
KL_logits,
batch["KL_completion_labels"],
average_log_prob=False,
is_encoder_decoder=self.is_encoder_decoder,
label_pad_token_id=self.label_pad_token_id,
)
else:
KL_logps = None
model_kwargs = (
{
"labels": batch["completion_labels"],
"decoder_input_ids": batch.get("completion_decoder_input_ids"),
}
if self.is_encoder_decoder
else {}
)
if self.aux_loss_enabled:
model_kwargs["output_router_logits"] = True
outputs = model(
batch["completion_input_ids"],
attention_mask=batch["completion_attention_mask"],
**model_kwargs,
)
completion_logits = outputs.logits
completion_logps = self.get_batch_logps(
completion_logits,
batch["completion_labels"],
average_log_prob=False,
is_encoder_decoder=self.is_encoder_decoder,
label_pad_token_id=self.label_pad_token_id,
)
if completion_logps.shape[0] != len(batch["label"]):
raise ValueError(
"There is a mismatch between the number of examples in this batch and the number of "
"examples for which an output sequence was predicted."
)
chosen_idx = [i for i in range(completion_logps.shape[0]) if batch["label"][i] is True]
rejected_idx = [i for i in range(completion_logps.shape[0]) if batch["label"][i] is False]
chosen_logps = completion_logps[chosen_idx, ...]
rejected_logps = completion_logps[rejected_idx, ...]
chosen_logits = completion_logits[chosen_idx, ...]
rejected_logits = completion_logits[rejected_idx, ...]
if self.aux_loss_enabled:
return (chosen_logps, rejected_logps, chosen_logits, rejected_logits, KL_logps, outputs.aux_loss)
else:
return (chosen_logps, rejected_logps, chosen_logits, rejected_logits, KL_logps)
def kto_loss(
self,
policy_chosen_logps: torch.FloatTensor,
policy_rejected_logps: torch.FloatTensor,
policy_KL_logps: torch.FloatTensor,
reference_chosen_logps: torch.FloatTensor,
reference_rejected_logps: torch.FloatTensor,
reference_KL_logps: torch.FloatTensor,
) -> tuple[torch.FloatTensor, torch.FloatTensor, torch.FloatTensor, torch.FloatTensor]:
"""Compute the KTO loss for a batch of policy and reference model log probabilities.
Args:
policy_chosen_logps: Log probabilities of the policy model for the chosen responses. Shape: (num(chosen) in batch_size,)
policy_rejected_logps: Log probabilities of the policy model for the rejected responses. Shape: (num(rejected) in batch_size,)
policy_KL_logps: Log probabilities of the policy model for the KL responses. Shape: (batch_size,)
reference_chosen_logps: Log probabilities of the reference model for the chosen responses. Shape: (num(chosen) in batch_size,)
reference_rejected_logps: Log probabilities of the reference model for the rejected responses. Shape: (num(rejected) in batch_size,)
reference_KL_logps: Log probabilities of the reference model for the KL responses. Shape: (batch_size,)
Returns:
A tuple of four tensors: (losses, chosen_rewards, rejected_rewards, KL).
The losses tensor contains the KTO loss for each example in the batch.
The chosen_rewards and rejected_rewards tensors contain the rewards for the chosen and rejected responses, respectively.
The KL tensor contains the detached KL divergence estimate between the policy and reference models.
"""
if self.calculate_KL:
kl = (policy_KL_logps - reference_KL_logps).mean().detach()
kl = self.accelerator.gather_for_metrics(kl).mean().clamp(min=0)
else:
kl = torch.zeros(1).to(policy_chosen_logps.device)
# Chosen losses
if policy_chosen_logps.shape[0] != 0 or reference_chosen_logps.shape[0] != 0:
chosen_logratios = policy_chosen_logps - reference_chosen_logps
if self.loss_type == "kto":
# Eqn (7) of the KTO paper (https://huggingface.co/papers/2402.01306)
chosen_losses = 1 - F.sigmoid(self.beta * (chosen_logratios - kl))
elif self.loss_type == "apo_zero_unpaired":
# Unpaired variant of Eqn (7) of the APO paper (https://huggingface.co/papers/2408.06266)
# Use this loss when you believe the chosen outputs are better than your model's default output
chosen_losses = 1 - F.sigmoid(self.beta * chosen_logratios)
chosen_rewards = self.beta * chosen_logratios.detach()
else:
# lists can't be empty -- if they are, then accelerate.gather will hang
chosen_losses = torch.Tensor([]).to(self.accelerator.device)
chosen_rewards = torch.Tensor([]).to(self.accelerator.device)
# Rejected losses
if policy_rejected_logps.shape[0] != 0 or reference_rejected_logps.shape[0] != 0:
rejected_logratios = policy_rejected_logps - reference_rejected_logps
if self.loss_type == "kto":
rejected_losses = 1 - F.sigmoid(self.beta * (kl - rejected_logratios))
elif self.loss_type == "apo_zero_unpaired":
rejected_losses = F.sigmoid(self.beta * rejected_logratios)
rejected_rewards = self.beta * rejected_logratios.detach()
else:
# lists can't be empty -- if they are, then accelerate.gather will hang
rejected_losses = torch.Tensor([]).to(self.accelerator.device)
rejected_rewards = torch.Tensor([]).to(self.accelerator.device)
losses = torch.cat(
(self.desirable_weight * chosen_losses, self.undesirable_weight * rejected_losses),
0,
)
return losses, chosen_rewards, rejected_rewards, kl
def get_batch_loss_metrics(
self,
model,
batch: dict[str, Union[list, torch.LongTensor]],
):
"""Compute the KTO loss and other metrics for the given batch of inputs for train or test."""
metrics = {}
batch = {k: (v.to(self.accelerator.device) if isinstance(v, torch.Tensor) else v) for k, v in batch.items()}
forward_output = self.forward(model, batch)
(
policy_chosen_logps,
policy_rejected_logps,
policy_chosen_logits,
policy_rejected_logits,
policy_KL_logps,
) = forward_output[:5]
if self.aux_loss_enabled:
aux_loss = forward_output[5]
# if reference_logps in batch use them, otherwise use the reference model
if "reference_logps" in batch:
chosen_idx = [i for i in range(batch["reference_logps"].shape[0]) if batch["label"][i] is True]
rejected_idx = [i for i in range(batch["reference_logps"].shape[0]) if batch["label"][i] is False]
reference_chosen_logps = batch["reference_logps"][chosen_idx, ...]
reference_rejected_logps = batch["reference_logps"][rejected_idx, ...]
if self.calculate_KL:
reference_KL_logps = batch["reference_KL_logps"]
else:
reference_KL_logps = None
else:
with torch.no_grad():
if self.ref_model is None:
with self.null_ref_context():
(
reference_chosen_logps,
reference_rejected_logps,
_,
_,
reference_KL_logps,
) = self.forward(self.model, batch)[:5]
else:
(
reference_chosen_logps,
reference_rejected_logps,
_,
_,
reference_KL_logps,
) = self.forward(self.ref_model, batch)[:5]
losses, chosen_rewards, rejected_rewards, kl = self.kto_loss(
policy_chosen_logps,
policy_rejected_logps,
policy_KL_logps,
reference_chosen_logps,
reference_rejected_logps,
reference_KL_logps,
)
metrics["kl"] = kl.item()
num_chosen = torch.Tensor([len(chosen_rewards)]).to(self.accelerator.device)
num_rejected = torch.Tensor([len(rejected_rewards)]).to(self.accelerator.device)
all_num_chosen = self.accelerator.gather_for_metrics(num_chosen).sum().item()
all_num_rejected = self.accelerator.gather_for_metrics(num_rejected).sum().item()
if all_num_chosen > 0:
metrics["rewards/chosen_sum"] = (
self.accelerator.gather_for_metrics(chosen_rewards.nansum()).nansum().item()
)
metrics["logps/chosen_sum"] = (
self.accelerator.gather_for_metrics(policy_chosen_logps.nansum()).nansum().item()
)
metrics["logits/chosen_sum"] = (
self.accelerator.gather_for_metrics(policy_chosen_logits.nansum()).nansum().item()
)
metrics["count/chosen"] = all_num_chosen
if all_num_rejected > 0:
metrics["rewards/rejected_sum"] = (
self.accelerator.gather_for_metrics(rejected_rewards.nansum()).nansum().item()
)
metrics["logps/rejected_sum"] = (
self.accelerator.gather_for_metrics(policy_rejected_logps.nansum()).nansum().item()
)
metrics["logits/rejected_sum"] = (
self.accelerator.gather_for_metrics(policy_rejected_logits.nansum()).nansum().item()
)
metrics["count/rejected"] = all_num_rejected
loss = losses.nanmean()
if self.aux_loss_enabled:
loss += self.aux_loss_coef * aux_loss
return loss, metrics
def compute_loss(
self,
model: Union[PreTrainedModel, nn.Module],
inputs: dict[str, Union[torch.Tensor, Any]],
return_outputs=False,
num_items_in_batch=None,
) -> Union[torch.Tensor, tuple[torch.Tensor, dict[str, torch.Tensor]]]:
compute_loss_context_manager = amp.autocast("cuda") if self._peft_has_been_casted_to_bf16 else nullcontext()
with compute_loss_context_manager:
loss, metrics = self.get_batch_loss_metrics(model, inputs)
# Make sure to move the loss to the device the original accumulating loss is at back in the `Trainer` class:
loss = loss.to(self.args.device)
# force log the metrics
if self.accelerator.is_main_process:
self.store_metrics(metrics, train_eval="train")
if return_outputs:
return (loss, metrics)
return loss
def store_metrics(self, metrics: dict[str, float], train_eval: Literal["train", "eval"] = "train") -> None:
for key, value in metrics.items():
self._stored_metrics[train_eval][key].append(value)
def _get_train_sampler(self) -> Optional[torch.utils.data.Sampler]:
if self.train_dataset is None or not has_length(self.train_dataset):
return None
return SequentialSampler(self.train_dataset)
def generate_from_model_and_ref(self, model, batch: dict[str, torch.LongTensor]) -> tuple[str, str]:
"""Generate samples from the model and reference model for the given batch of inputs."""
# If one uses `generate_during_eval` with peft + bf16, we need to explicitly call generate with
# the torch cuda amp context manager as some hidden states are silently casted to full precision.
generate_context_manager = amp.autocast("cuda") if self._peft_has_been_casted_to_bf16 else nullcontext()
with generate_context_manager:
policy_output = model.generate(
input_ids=batch["prompt_input_ids"],
attention_mask=batch["prompt_attention_mask"],
max_length=self.max_length,
do_sample=True,
pad_token_id=self.processing_class.pad_token_id,
)
# if reference_output in batch use that otherwise use the reference model
if "reference_output" in batch:
reference_output = batch["reference_output"]
else:
if self.ref_model is None:
with self.null_ref_context():
reference_output = self.model.generate(
input_ids=batch["prompt_input_ids"],
attention_mask=batch["prompt_attention_mask"],
max_length=self.max_length,
do_sample=True,
pad_token_id=self.processing_class.pad_token_id,
)
else:
reference_output = self.ref_model.generate(
input_ids=batch["prompt_input_ids"],
attention_mask=batch["prompt_attention_mask"],
max_length=self.max_length,
do_sample=True,
pad_token_id=self.processing_class.pad_token_id,
)
policy_output = pad_to_length(policy_output, self.max_length, self.processing_class.pad_token_id)
policy_output_decoded = self.processing_class.batch_decode(policy_output, skip_special_tokens=True)
reference_output = pad_to_length(reference_output, self.max_length, self.processing_class.pad_token_id)
reference_output_decoded = self.processing_class.batch_decode(reference_output, skip_special_tokens=True)
return policy_output_decoded, reference_output_decoded
def prediction_step(
self,
model: Union[PreTrainedModel, nn.Module],
inputs: dict[str, Union[torch.Tensor, Any]],
prediction_loss_only: bool,
ignore_keys: Optional[list[str]] = None,
):
if ignore_keys is None:
if hasattr(model, "config"):
ignore_keys = getattr(model.config, "keys_to_ignore_at_inference", [])
else:
ignore_keys = []
prediction_context_manager = amp.autocast("cuda") if self._peft_has_been_casted_to_bf16 else nullcontext()
with torch.no_grad(), prediction_context_manager:
loss, metrics = self.get_batch_loss_metrics(model, inputs)
# force log the metrics
if self.accelerator.is_main_process:
self.store_metrics(metrics, train_eval="eval")
if prediction_loss_only:
return (loss.detach(), None, None)
# logits for the chosen and rejected samples from model
logits_dict = {
"eval_logits/chosen": metrics["logits/chosen"],
"eval_logits/rejected": metrics["logits/rejected"],
}
logits = torch.tensor(
[v for k, v in logits_dict.items() if k not in ignore_keys], device=self.accelerator.device
)
labels = torch.zeros(logits.shape[0], device=self.accelerator.device)
return (loss.detach(), logits, labels)
def evaluation_loop(
self,
dataloader: DataLoader,
description: str,
prediction_loss_only: Optional[bool] = None,
ignore_keys: Optional[list[str]] = None,
metric_key_prefix: str = "eval",
) -> EvalLoopOutput:
"""
Overriding built-in evaluation loop to store metrics for each batch.
Prediction/evaluation loop, shared by `Trainer.evaluate()` and `Trainer.predict()`.
Works both with or without labels.
"""
# Sample and save to game log if requested (for one batch to save time)
if self.generate_during_eval:
# Generate random indices within the range of the total number of samples
num_samples = len(dataloader.dataset)
random_indices = random.sample(range(num_samples), k=self.args.eval_batch_size)
# Use dataloader.dataset.select to get the random batch without iterating over the DataLoader
random_batch_dataset = dataloader.dataset.select(random_indices)
random_batch = self.data_collator(random_batch_dataset)
random_batch = self._prepare_inputs(random_batch)
target_indicies = [i for i in range(len(random_batch["label"])) if random_batch["label"][i] is False]
target_batch = {
"prompt_input_ids": random_batch["prompt_input_ids"][target_indicies],
"prompt_attention_mask": random_batch["prompt_attention_mask"][target_indicies],
"prompt": itemgetter(*target_indicies)(random_batch["prompt"]),
}
policy_output_decoded, ref_output_decoded = self.generate_from_model_and_ref(self.model, target_batch)
table = pd.DataFrame(
columns=["Prompt", "Policy", "Ref Model"],
data=[
[prompt, pol[len(prompt) :], ref[len(prompt) :]]
for prompt, pol, ref in zip(target_batch["prompt"], policy_output_decoded, ref_output_decoded)
],
)
if "wandb" in self.args.report_to:
wandb.log({"game_log": wandb.Table(data=table)})
if "comet_ml" in self.args.report_to:
log_table_to_comet_experiment(
name="game_log.csv",
table=table,
)
# Base evaluation
initial_output = super().evaluation_loop(
dataloader, description, prediction_loss_only, ignore_keys, metric_key_prefix
)
return initial_output
def log(self, logs: dict[str, float], start_time: Optional[float] = None) -> None:
"""
Log `logs` on the various objects watching training, including stored metrics.
Args:
logs (`dict[str, float]`):
The values to log.
start_time (`float` or `None`, *optional*, defaults to `None`):
Start time of the training.
"""
# logs either has 'loss' or 'eval_loss'
train_eval = "train" if "loss" in logs else "eval"
# train metrics should have no prefix, eval should have 'eval_'
prefix = "eval_" if train_eval == "eval" else ""
# accumulate average metrics from sums and lengths
for split in ["chosen", "rejected"]:
if f"count/{split}" in self._stored_metrics[train_eval]:
count_sum = torch.Tensor(self._stored_metrics[train_eval][f"count/{split}"]).sum().item()
for metric in ["rewards", "logps", "logits"]:
logs[f"{prefix}{metric}/{split}"] = (
torch.Tensor(self._stored_metrics[train_eval][f"{metric}/{split}_sum"]).sum().item()
/ count_sum
)
# delete obsolete metric
del self._stored_metrics[train_eval][f"{metric}/{split}_sum"]
del self._stored_metrics[train_eval][f"count/{split}"]
# calculate reward margin
if f"{prefix}rewards/chosen" in logs and f"{prefix}rewards/rejected" in logs:
logs[f"{prefix}rewards/margins"] = logs[f"{prefix}rewards/chosen"] - logs[f"{prefix}rewards/rejected"]
# Add averaged stored metrics to logs
for key, metrics in self._stored_metrics[train_eval].items():
logs[f"{prefix}{key}"] = torch.Tensor(metrics).mean().item()
del self._stored_metrics[train_eval]
if version.parse(transformers.__version__) >= version.parse("4.47.0.dev0"):
return super().log(logs, start_time)
else: # transformers<=4.46
return super().log(logs)
def create_model_card(
self,
model_name: Optional[str] = None,
dataset_name: Optional[str] = None,
tags: Union[str, list[str], None] = None,
):
"""
Creates a draft of a model card using the information available to the `Trainer`.
Args:
model_name (`str` or `None`, *optional*, defaults to `None`):
Name of the model.
dataset_name (`str` or `None`, *optional*, defaults to `None`):
Name of the dataset used for training.
tags (`str`, `list[str]` or `None`, *optional*, defaults to `None`):
Tags to be associated with the model card.
"""
if not self.is_world_process_zero():
return
if hasattr(self.model.config, "_name_or_path") and not os.path.isdir(self.model.config._name_or_path):
base_model = self.model.config._name_or_path
else:
base_model = None
tags = tags or []
if isinstance(tags, str):
tags = [tags]
if hasattr(self.model.config, "unsloth_version"):
tags.append("unsloth")
citation = textwrap.dedent("""\
@article{ethayarajh2024kto,
title = {{KTO: Model Alignment as Prospect Theoretic Optimization}},
author = {Kawin Ethayarajh and Winnie Xu and Niklas Muennighoff and Dan Jurafsky and Douwe Kiela},
year = 2024,
eprint = {arXiv:2402.01306},
}""")
model_card = generate_model_card(
base_model=base_model,
model_name=model_name,
hub_model_id=self.hub_model_id,
dataset_name=dataset_name,
tags=tags,
wandb_url=wandb.run.get_url() if is_wandb_available() and wandb.run is not None else None,
comet_url=get_comet_experiment_url(),
trainer_name="KTO",
trainer_citation=citation,
paper_title="KTO: Model Alignment as Prospect Theoretic Optimization",
paper_id="2402.01306",
)
model_card.save(os.path.join(self.args.output_dir, "README.md"))
| trl/trl/trainer/kto_trainer.py/0 | {
"file_path": "trl/trl/trainer/kto_trainer.py",
"repo_id": "trl",
"token_count": 34887
} |
# Copyright 2025 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import warnings
from dataclasses import dataclass, field
from typing import Any, Optional
from transformers import TrainingArguments
@dataclass
class SFTConfig(TrainingArguments):
r"""
Configuration class for the [`SFTTrainer`].
Only the parameters specific to SFT training are listed here. For details on other parameters, refer to the
[`~transformers.TrainingArguments`] documentation.
Using [`~transformers.HfArgumentParser`] we can turn this class into
[argparse](https://docs.python.org/3/library/argparse#module-argparse) arguments that can be specified on the
command line.
Parameters:
> Parameters that control the model
model_init_kwargs (`dict[str, Any]` or `None`, *optional*, defaults to `None`):
Keyword arguments for [`~transformers.AutoModelForCausalLM.from_pretrained`], used when the `model`
argument of the [`SFTTrainer`] is provided as a string.
use_liger (`bool`, *optional*, defaults to `False`):
Monkey patch the model with Liger kernels to increase throughput and reduce memory usage.
> Parameters that control the data preprocessing
dataset_text_field (`str`, *optional*, defaults to `"text"`):
Name of the column that contains text data in the dataset.
dataset_kwargs (`dict[str, Any]` or `None`, *optional*, defaults to `None`):
Dictionary of optional keyword arguments for the dataset preparation. The only supported key is
`skip_prepare_dataset`.
dataset_num_proc (`int` or `None`, *optional*, defaults to `None`):
Number of processes to use for processing the dataset.
max_seq_length (`int` or `None`, *optional*, defaults to `1024`):
Maximum length of the tokenized sequence. Sequences longer than `max_seq_length` are truncated from the
right.
If `None`, no truncation is applied. When packing is enabled, this value sets the sequence length.
packing (`bool`, *optional*, defaults to `False`):
Whether to pack multiple sequences into a fixed-length format. Uses `max_seq_length` to define sequence
length.
eval_packing (`bool` or `None`, *optional*, defaults to `None`):
Whether to pack the eval dataset. If `None`, uses the same value as `packing`.
> Parameters that control the training
learning_rate (`float`, *optional*, defaults to `2e-5`):
Initial learning rate for [`AdamW`] optimizer. The default value replaces that of
[`~transformers.TrainingArguments`].
"""
# Parameters that control the model
model_init_kwargs: Optional[dict[str, Any]] = field(
default=None,
metadata={
"help": "Keyword arguments for `AutoModelForCausalLM.from_pretrained`, used when the `model` argument of "
"the `SFTTrainer` is provided as a string."
},
)
use_liger: bool = field(
default=False,
metadata={"help": "Monkey patch the model with Liger kernels to increase throughput and reduce memory usage."},
)
# Parameters that control the data preprocessing
dataset_text_field: str = field(
default="text",
metadata={"help": "Name of the column that contains text data in the dataset."},
)
dataset_kwargs: Optional[dict[str, Any]] = field(
default=None,
metadata={
"help": "Dictionary of optional keyword arguments for the dataset preparation. The only supported key is "
"`skip_prepare_dataset`."
},
)
dataset_num_proc: Optional[int] = field(
default=None,
metadata={"help": "Number of processes to use for processing the dataset."},
)
max_seq_length: Optional[int] = field(
default=1024,
metadata={
"help": "Maximum length of the tokenized sequence. Sequences longer than `max_seq_length` are truncated "
"from the right. If `None`, no truncation is applied. When packing is enabled, this value sets the "
"sequence length."
},
)
packing: bool = field(
default=False,
metadata={
"help": "Whether to pack multiple sequences into a fixed-length format. Uses `max_seq_length` to "
"define sequence length."
},
)
eval_packing: Optional[bool] = field(
default=None,
metadata={"help": "Whether to pack the eval dataset. If `None`, uses the same value as `packing`."},
)
# Parameters that control the training
learning_rate: float = field(
default=2.0e-5,
metadata={
"help": "Initial learning rate for `AdamW` optimizer. The default value replaces that of "
"`TrainingArguments`."
},
)
# Deprecated parameters
dataset_batch_size: int = field(
default=None,
metadata={"help": "Deprecated. You can safely remove this parameter from your configuration."},
)
num_of_sequences: int = field(
default=None,
metadata={
"help": "Deprecated. Use `max_seq_length` instead, which specifies the maximum length of the tokenized "
"sequence, unlike `num_of_sequences`, which referred to string sequences."
},
)
chars_per_token: float = field(
default=None,
metadata={"help": "Deprecated. If you want to customize the packing length, use `max_seq_length`."},
)
def __post_init__(self):
super().__post_init__()
if self.dataset_batch_size is not None:
warnings.warn(
"`dataset_batch_size` is deprecated and will be remove in version 0.18.0. You can safely remove this "
"parameter from your configuration.",
DeprecationWarning,
)
if self.num_of_sequences is not None:
warnings.warn(
"`num_of_sequences` is deprecated and will be remove in version 0.18.0. Use `max_seq_length` instead, "
"which specifies the maximum length of the tokenized sequence, unlike `num_of_sequences`, which r"
"eferred to string sequences.",
DeprecationWarning,
)
if self.chars_per_token is not None:
warnings.warn(
"`chars_per_token` is deprecated and will be remove in version 0.18.0. If you want to customize the "
"packing length, use `max_seq_length`.",
DeprecationWarning,
)
| trl/trl/trainer/sft_config.py/0 | {
"file_path": "trl/trl/trainer/sft_config.py",
"repo_id": "trl",
"token_count": 2682
} |
FROM ghcr.io/azure/msamp
RUN pip install transformers evaluate datasets
RUN git clone https://github.com/huggingface/accelerate
RUN cd accelerate && \
pip install -e . && \
cd benchmarks/fp8
CMD ["bash"]
| accelerate/benchmarks/fp8/ms_amp/Dockerfile/0 | {
"file_path": "accelerate/benchmarks/fp8/ms_amp/Dockerfile",
"repo_id": "accelerate",
"token_count": 78
} |
<!--Copyright 2022 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
rendered properly in your Markdown viewer.
-->
# Intel® Extension for PyTorch
[IPEX](https://github.com/intel/intel-extension-for-pytorch) is optimized for CPUs with AVX-512 or above, and functionally works for CPUs with only AVX2. So, it is expected to bring performance benefit for Intel CPU generations with AVX-512 or above while CPUs with only AVX2 (e.g., AMD CPUs or older Intel CPUs) might result in a better performance under IPEX, but not guaranteed. IPEX provides performance optimizations for CPU training with both Float32 and BFloat16. The usage of BFloat16 is the main focus of the following sections.
Low precision data type BFloat16 has been natively supported on the 3rd Generation Xeon® Scalable Processors (aka Cooper Lake) with AVX512 instruction set and will be supported on the next generation of Intel® Xeon® Scalable Processors with Intel® Advanced Matrix Extensions (Intel® AMX) instruction set with further boosted performance. The Auto Mixed Precision for CPU backend has been enabled since PyTorch-1.10. At the same time, the support of Auto Mixed Precision with BFloat16 for CPU and BFloat16 optimization of operators has been massively enabled in Intel® Extension for PyTorch, and partially upstreamed to PyTorch master branch. Users can get better performance and user experience with IPEX Auto Mixed Precision.
## IPEX installation:
IPEX release is following PyTorch, to install via pip:
| PyTorch Version | IPEX version |
| :---------------: | :----------: |
| 2.0 | 2.0.0 |
| 1.13 | 1.13.0 |
| 1.12 | 1.12.300 |
| 1.11 | 1.11.200 |
| 1.10 | 1.10.100 |
```
pip install intel_extension_for_pytorch==<version_name> -f https://developer.intel.com/ipex-whl-stable-cpu
```
Check more approaches for [IPEX installation](https://intel.github.io/intel-extension-for-pytorch/cpu/latest/tutorials/installation.html).
## How It Works For Training optimization in CPU
Accelerate has integrated [IPEX](https://github.com/intel/intel-extension-for-pytorch), all you need to do is enabling it through the config.
**Scenario 1**: Acceleration of No distributed CPU training
Run <u>accelerate config</u> on your machine:
```bash
$ accelerate config
-----------------------------------------------------------------------------------------------------------------------------------------------------------
In which compute environment are you running?
This machine
-----------------------------------------------------------------------------------------------------------------------------------------------------------
Which type of machine are you using?
No distributed training
Do you want to run your training on CPU only (even if a GPU / Apple Silicon device is available)? [yes/NO]:yes
Do you want to use Intel PyTorch Extension (IPEX) to speed up training on CPU? [yes/NO]:yes
Do you wish to optimize your script with torch dynamo?[yes/NO]:NO
Do you want to use DeepSpeed? [yes/NO]: NO
-----------------------------------------------------------------------------------------------------------------------------------------------------------
Do you wish to use FP16 or BF16 (mixed precision)?
bf16
```
This will generate a config file that will be used automatically to properly set the
default options when doing
```bash
accelerate launch my_script.py --args_to_my_script
```
For instance, here is how you would run the NLP example `examples/nlp_example.py` (from the root of the repo) with IPEX enabled.
default_config.yaml that is generated after `accelerate config`
```bash
compute_environment: LOCAL_MACHINE
distributed_type: 'NO'
downcast_bf16: 'no'
ipex_config:
ipex: true
machine_rank: 0
main_training_function: main
mixed_precision: bf16
num_machines: 1
num_processes: 1
rdzv_backend: static
same_network: true
tpu_env: []
tpu_use_cluster: false
tpu_use_sudo: false
use_cpu: true
```
```bash
accelerate launch examples/nlp_example.py
```
**Scenario 2**: Acceleration of distributed CPU training
we use Intel oneCCL for communication, combined with Intel® MPI library to deliver flexible, efficient, scalable cluster messaging on Intel® architecture. you could refer the [here](https://huggingface.co/docs/transformers/perf_train_cpu_many) for the installation guide
Run <u>accelerate config</u> on your machine(node0):
```bash
$ accelerate config
-----------------------------------------------------------------------------------------------------------------------------------------------------------
In which compute environment are you running?
This machine
-----------------------------------------------------------------------------------------------------------------------------------------------------------
Which type of machine are you using?
multi-CPU
How many different machines will you use (use more than 1 for multi-node training)? [1]: 4
-----------------------------------------------------------------------------------------------------------------------------------------------------------
What is the rank of this machine?
0
What is the IP address of the machine that will host the main process? 36.112.23.24
What is the port you will use to communicate with the main process? 29500
Are all the machines on the same local network? Answer `no` if nodes are on the cloud and/or on different network hosts [YES/no]: yes
Do you want to use Intel PyTorch Extension (IPEX) to speed up training on CPU? [yes/NO]:yes
Do you want accelerate to launch mpirun? [yes/NO]: yes
Please enter the path to the hostfile to use with mpirun [~/hostfile]: ~/hostfile
Enter the number of oneCCL worker threads [1]: 1
Do you wish to optimize your script with torch dynamo?[yes/NO]:NO
How many processes should be used for distributed training? [1]:16
-----------------------------------------------------------------------------------------------------------------------------------------------------------
Do you wish to use FP16 or BF16 (mixed precision)?
bf16
```
For instance, here is how you would run the NLP example `examples/nlp_example.py` (from the root of the repo) with IPEX enabled for distributed CPU training.
default_config.yaml that is generated after `accelerate config`
```bash
compute_environment: LOCAL_MACHINE
distributed_type: MULTI_CPU
downcast_bf16: 'no'
ipex_config:
ipex: true
machine_rank: 0
main_process_ip: 36.112.23.24
main_process_port: 29500
main_training_function: main
mixed_precision: bf16
mpirun_config:
mpirun_ccl: '1'
mpirun_hostfile: /home/user/hostfile
num_machines: 4
num_processes: 16
rdzv_backend: static
same_network: true
tpu_env: []
tpu_use_cluster: false
tpu_use_sudo: false
use_cpu: true
```
Set following env and using intel MPI to launch the training
In node0, you need to create a configuration file which contains the IP addresses of each node (for example hostfile) and pass that configuration file path as an argument.
If you selected to have Accelerate launch `mpirun`, ensure that the location of your hostfile matches the path in the config.
```bash
$ cat hostfile
xxx.xxx.xxx.xxx #node0 ip
xxx.xxx.xxx.xxx #node1 ip
xxx.xxx.xxx.xxx #node2 ip
xxx.xxx.xxx.xxx #node3 ip
```
When Accelerate is launching `mpirun`, source the oneCCL bindings setvars.sh to get your Intel MPI environment, and then
run your script using `accelerate launch`. Note that the python script and environment needs to exist on all of the
machines being used for multi-CPU training.
```bash
oneccl_bindings_for_pytorch_path=$(python -c "from oneccl_bindings_for_pytorch import cwd; print(cwd)")
source $oneccl_bindings_for_pytorch_path/env/setvars.sh
accelerate launch examples/nlp_example.py
```
Otherwise, if you selected not to have Accelerate launch `mpirun`, run the following command in node0 and **16DDP** will
be enabled in node0,node1,node2,node3 with BF16 mixed precision. When using this method, the python script, python
environment, and accelerate config file need to be present on all of the machines used for multi-CPU training.
```bash
oneccl_bindings_for_pytorch_path=$(python -c "from oneccl_bindings_for_pytorch import cwd; print(cwd)")
source $oneccl_bindings_for_pytorch_path/env/setvars.sh
export CCL_WORKER_COUNT=1
export MASTER_ADDR=xxx.xxx.xxx.xxx #node0 ip
export CCL_ATL_TRANSPORT=ofi
mpirun -f hostfile -n 16 -ppn 4 accelerate launch examples/nlp_example.py
```
## Related Resources
- [Project's github](https://github.com/intel/intel-extension-for-pytorch)
- [API docs](https://intel.github.io/intel-extension-for-pytorch/cpu/latest/tutorials/api_doc.html)
- [Tuning guide](https://intel.github.io/intel-extension-for-pytorch/cpu/latest/tutorials/performance_tuning/tuning_guide.html)
- [Blogs & Publications](https://intel.github.io/intel-extension-for-pytorch/cpu/latest/tutorials/blogs_publications.html)
| accelerate/docs/source/usage_guides/ipex.md/0 | {
"file_path": "accelerate/docs/source/usage_guides/ipex.md",
"repo_id": "accelerate",
"token_count": 2634
} |
# Similar to FSDP, we set the distributed type as DEEPSPEED
distributed_type: DEEPSPEED
# With DeepSpeed, we utilize a deepspeed config file for the entire configuration
deepspeed_config:
# Can also be any of the config json's in accelerate/examples/deepspeed_config_templates
deepspeed_config_file: ../deepspeed_config_templates/zero_stage1_config.json
# If using ZeRO-3 and wanting to load big models in, this should be set to `true` so
# `transformers` uses the right `init` function
zero3_init_flag: false # true
# Finally we need to specify the number of GPUs to use
num_processes: 2
# Optionally we can set the mixed precision now instead of in the deepspeed config file,
# however this requires the `fp16` and `bf16` options to be set to `auto` in the deepspeed config file
# mixed_precision: "bf16"
| accelerate/examples/config_yaml_templates/deepspeed.yaml/0 | {
"file_path": "accelerate/examples/config_yaml_templates/deepspeed.yaml",
"repo_id": "accelerate",
"token_count": 239
} |
# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
import pathlib
import queue
from concurrent.futures import ThreadPoolExecutor
from functools import partial
from typing import Union
import fire
import torch
import webdataset as wds
from huggingface_hub.utils import insecure_hashlib
from PIL import Image
from tqdm import tqdm
from transformers import AutoModelForCausalLM, AutoProcessor
from accelerate import PartialState
"""
Additional requirements: flash_attn einops timm webdataset fire tqdm huggingface_hub
pip install flash_attn einops timm webdataset fire tqdm huggingface_hub
Example:
accelerate launch --num_processes=2 florence2.py --data_path "https://huggingface.co/datasets/pixparse/cc3m-wds/resolve/main/cc3m-train-0000.tar" --output_path outputs --batch_size 12 --num_workers 1 --prompt "<CAPTION>"
"""
def main(
data_path: str,
output_path: str,
batch_size: int,
num_workers: int,
prompt: str = "<MORE_DETAILED_CAPTION>",
model_name: str = "microsoft/Florence-2-large",
max_new_tokens: int = 1024,
num_beams: int = 3,
):
output_dir = pathlib.Path(output_path)
distributed_state = PartialState()
if distributed_state.is_main_process:
output_dir.mkdir(exist_ok=True)
model = AutoModelForCausalLM.from_pretrained(
model_name,
device_map=distributed_state.device,
torch_dtype=torch.float16,
trust_remote_code=True,
)
processor = AutoProcessor.from_pretrained(model_name, trust_remote_code=True, clean_up_tokenization_spaces=True)
class ExistsFilter:
def __init__(self, output_dir: Union[pathlib.Path, str]):
current_training_img_hashes = [f.split(".jpg")[0] for f in os.listdir(output_dir) if f.endswith(".jpg")]
self.current_training_img_hashes = set(current_training_img_hashes)
if distributed_state.is_main_process:
print(f"Existing images found: {len(self.current_training_img_hashes)}.")
def __call__(self, x):
if len(self.current_training_img_hashes) > 0:
if x["img_hash"] in self.current_training_img_hashes:
return False
else:
return True
else:
return True
def preprocess_fn(sample, processor):
image: Image.Image = sample["jpg"].convert("RGB")
img_hash = insecure_hashlib.sha1(image.tobytes()).hexdigest()
inputs = processor(
text=prompt,
images=image,
return_tensors="pt",
)
return {
"input_ids": inputs["input_ids"],
"pixel_values": inputs["pixel_values"],
"image": image,
"img_hash": img_hash,
"original_caption": sample["txt"],
}
def collate_fn(examples):
input_ids = torch.cat([example["input_ids"] for example in examples])
pixel_values = torch.cat([example["pixel_values"] for example in examples])
images = [example["image"] for example in examples]
img_hashes = [example["img_hash"] for example in examples]
captions = [example["original_caption"] for example in examples]
return {
"input_ids": input_ids,
"pixel_values": pixel_values,
"images": images,
"img_hashes": img_hashes,
"original_captions": captions,
}
exist_filter = ExistsFilter(output_dir)
dataset = (
wds.WebDataset(
data_path,
handler=wds.warn_and_continue,
nodesplitter=None,
shardshuffle=False,
empty_check=False,
)
.decode("pil", handler=wds.warn_and_continue)
.map(partial(preprocess_fn, processor=processor), handler=wds.warn_and_continue)
)
if len(exist_filter.current_training_img_hashes) > 0:
dataset = dataset.select(exist_filter)
dataset = dataset.batched(
batch_size,
partial=False,
collation_fn=collate_fn,
)
dataloader = wds.WebLoader(
dataset,
batch_size=None,
num_workers=num_workers,
pin_memory=True,
persistent_workers=True,
)
def save_results(output_queue: queue.Queue, output_dir: pathlib.Path, processor):
while True:
try:
item = output_queue.get(timeout=5)
if item is None:
break
original_captions, predictions, images, img_hashes = item
predicted_captions = processor.batch_decode(
predictions,
skip_special_tokens=False,
)
for caption, pred_caption, image, img_hash in zip(
original_captions, predicted_captions, images, img_hashes
):
processed_caption = processor.post_process_generation(
pred_caption, task=prompt, image_size=(image.width, image.height)
)[prompt]
img_path = output_dir.joinpath(f"{img_hash}.jpg")
image.save(img_path)
caption_dict = {"original": caption, "predicted": processed_caption}
with output_dir.joinpath(f"{img_hash}_caption.json").open("w") as f:
json.dump(caption_dict, f, indent=4)
except queue.Empty:
continue
output_queue = queue.Queue()
save_thread = ThreadPoolExecutor(max_workers=num_workers)
save_future = save_thread.submit(save_results, output_queue, output_dir, processor)
try:
for _, batch_raw in tqdm(
enumerate(dataloader),
disable=not distributed_state.is_main_process,
):
with distributed_state.split_between_processes(batch_raw) as batch:
outputs = model.generate(
input_ids=batch["input_ids"].to(distributed_state.device),
pixel_values=batch["pixel_values"].to(distributed_state.device, model.dtype),
max_new_tokens=max_new_tokens,
num_beams=num_beams,
)
output_queue.put(
(
batch["original_captions"],
outputs,
batch["images"],
batch["img_hashes"],
)
)
finally:
output_queue.put(None)
save_thread.shutdown(wait=True)
save_future.result()
if __name__ == "__main__":
fire.Fire(main)
| accelerate/examples/inference/distributed/florence2.py/0 | {
"file_path": "accelerate/examples/inference/distributed/florence2.py",
"repo_id": "accelerate",
"token_count": 3297
} |
# Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__version__ = "1.4.0.dev0"
from .accelerator import Accelerator
from .big_modeling import (
cpu_offload,
cpu_offload_with_hook,
disk_offload,
dispatch_model,
init_empty_weights,
init_on_device,
load_checkpoint_and_dispatch,
)
from .data_loader import skip_first_batches
from .inference import prepare_pippy
from .launchers import debug_launcher, notebook_launcher
from .state import PartialState
from .utils import (
AutocastKwargs,
DataLoaderConfiguration,
DDPCommunicationHookType,
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
FullyShardedDataParallelPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
ProfileKwargs,
find_executable_batch_size,
infer_auto_device_map,
is_rich_available,
load_checkpoint_in_model,
synchronize_rng_states,
)
if is_rich_available():
from .utils import rich
| accelerate/src/accelerate/__init__.py/0 | {
"file_path": "accelerate/src/accelerate/__init__.py",
"repo_id": "accelerate",
"token_count": 500
} |
#!/usr/bin/env python
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import importlib
import logging
import os
import subprocess
import sys
from pathlib import Path
import psutil
import torch
from accelerate.commands.config import default_config_file, load_config_from_file
from accelerate.commands.config.config_args import SageMakerConfig
from accelerate.commands.config.config_utils import DYNAMO_BACKENDS
from accelerate.commands.utils import CustomArgumentParser
from accelerate.state import get_int_from_env
from accelerate.utils import (
ComputeEnvironment,
DistributedType,
PrepareForLaunch,
_filter_args,
check_cuda_p2p_ib_support,
convert_dict_to_env_variables,
is_bf16_available,
is_deepspeed_available,
is_mlu_available,
is_musa_available,
is_npu_available,
is_rich_available,
is_sagemaker_available,
is_torch_xla_available,
is_xpu_available,
patch_environment,
prepare_deepspeed_cmd_env,
prepare_multi_gpu_env,
prepare_sagemager_args_inputs,
prepare_simple_launcher_cmd_env,
prepare_tpu,
str_to_bool,
)
from accelerate.utils.constants import DEEPSPEED_MULTINODE_LAUNCHERS, TORCH_DYNAMO_MODES
if is_rich_available():
from rich import get_console
from rich.logging import RichHandler
FORMAT = "%(message)s"
logging.basicConfig(format=FORMAT, datefmt="[%X]", handlers=[RichHandler()])
logger = logging.getLogger(__name__)
options_to_group = {
"multi_gpu": "Distributed GPUs",
"tpu": "TPU",
"use_deepspeed": "DeepSpeed Arguments",
"use_fsdp": "FSDP Arguments",
"use_tp": "PyTorch TP Arguments",
"use_megatron_lm": "Megatron-LM Arguments",
"fp8_backend": "FP8 Arguments",
}
def clean_option(option):
"Finds all cases of - after the first two characters and changes them to _"
if "fp8_backend" in option:
option = "--fp8_backend"
if option.startswith("--"):
return option[2:].replace("-", "_")
class CustomHelpFormatter(argparse.HelpFormatter):
"""
This is a custom help formatter that will hide all arguments that are not used in the command line when the help is
called. This is useful for the case where the user is using a specific platform and only wants to see the arguments
for that platform.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.titles = [
"Hardware Selection Arguments",
"Resource Selection Arguments",
"Training Paradigm Arguments",
"positional arguments",
"optional arguments",
]
def add_argument(self, action: argparse.Action):
if "accelerate" in sys.argv[0] and "launch" in sys.argv[1:]:
args = sys.argv[2:]
else:
args = sys.argv[1:]
if len(args) > 1:
args = list(map(clean_option, args))
used_platforms = [arg for arg in args if arg in options_to_group.keys()]
used_titles = [options_to_group[o] for o in used_platforms]
if action.container.title not in self.titles + used_titles:
action.help = argparse.SUPPRESS
elif action.container.title == "Hardware Selection Arguments":
if set(action.option_strings).isdisjoint(set(args)):
action.help = argparse.SUPPRESS
else:
action.help = action.help + " (currently selected)"
elif action.container.title == "Training Paradigm Arguments":
if set(action.option_strings).isdisjoint(set(args)):
action.help = argparse.SUPPRESS
else:
action.help = action.help + " (currently selected)"
action.option_strings = [s for s in action.option_strings if "-" not in s[2:]]
super().add_argument(action)
def end_section(self):
if len(self._current_section.items) < 2:
self._current_section.items = []
self._current_section.heading = ""
super().end_section()
def launch_command_parser(subparsers=None):
description = "Launch a python script in a distributed scenario. Arguments can be passed in with either hyphens (`--num-processes=2`) or underscores (`--num_processes=2`)"
if subparsers is not None:
parser = subparsers.add_parser(
"launch", description=description, add_help=False, allow_abbrev=False, formatter_class=CustomHelpFormatter
)
else:
parser = CustomArgumentParser(
"Accelerate launch command",
description=description,
add_help=False,
allow_abbrev=False,
formatter_class=CustomHelpFormatter,
)
parser.add_argument("-h", "--help", action="help", help="Show this help message and exit.")
parser.add_argument(
"--config_file",
default=None,
help="The config file to use for the default values in the launching script.",
)
parser.add_argument(
"--quiet",
"-q",
action="store_true",
help="Silence subprocess errors from the launch stack trace and only show the relevant tracebacks. (Only applicable to DeepSpeed and single-process configurations)",
)
# Hardware selection arguments
hardware_args = parser.add_argument_group(
"Hardware Selection Arguments", "Arguments for selecting the hardware to be used."
)
hardware_args.add_argument(
"--cpu", default=False, action="store_true", help="Whether or not to force the training on the CPU."
)
hardware_args.add_argument(
"--multi_gpu",
default=False,
action="store_true",
help="Whether or not this should launch a distributed GPU training.",
)
hardware_args.add_argument(
"--tpu", default=False, action="store_true", help="Whether or not this should launch a TPU training."
)
hardware_args.add_argument(
"--ipex",
default=False,
action="store_true",
help="Whether or not this should launch a Intel PyTorch Extension (IPEX) training.",
)
# Resource selection arguments
resource_args = parser.add_argument_group(
"Resource Selection Arguments", "Arguments for fine-tuning how available hardware should be used."
)
resource_args.add_argument(
"--mixed_precision",
type=str,
choices=["no", "fp16", "bf16", "fp8"],
help="Whether or not to use mixed precision training. "
"Choose between FP16 and BF16 (bfloat16) training. "
"BF16 training is only supported on Nvidia Ampere GPUs and PyTorch 1.10 or later.",
)
resource_args.add_argument(
"--num_processes", type=int, default=None, help="The total number of processes to be launched in parallel."
)
resource_args.add_argument(
"--num_machines", type=int, default=None, help="The total number of machines used in this training."
)
resource_args.add_argument(
"--num_cpu_threads_per_process",
type=int,
default=None,
help="The number of CPU threads per process. Can be tuned for optimal performance.",
)
resource_args.add_argument(
"--enable_cpu_affinity",
default=False,
action="store_true",
help="Whether or not CPU affinity and balancing should be enabled. Currently only supported on NVIDIA hardware.",
)
# Dynamo arguments
resource_args.add_argument(
"--dynamo_backend",
type=str,
choices=["no"] + [b.lower() for b in DYNAMO_BACKENDS],
help="Choose a backend to optimize your training with dynamo, see more at "
"https://github.com/pytorch/torchdynamo.",
)
resource_args.add_argument(
"--dynamo_mode",
type=str,
default="default",
choices=TORCH_DYNAMO_MODES,
help="Choose a mode to optimize your training with dynamo.",
)
resource_args.add_argument(
"--dynamo_use_fullgraph",
default=False,
action="store_true",
help="Whether to use full graph mode for dynamo or it is ok to break model into several subgraphs",
)
resource_args.add_argument(
"--dynamo_use_dynamic",
default=False,
action="store_true",
help="Whether to enable dynamic shape tracing.",
)
# Training Paradigm arguments
paradigm_args = parser.add_argument_group(
"Training Paradigm Arguments", "Arguments for selecting which training paradigm to be used."
)
paradigm_args.add_argument(
"--use_deepspeed",
default=False,
action="store_true",
help="Whether to use deepspeed.",
)
paradigm_args.add_argument(
"--use_fsdp",
default=False,
action="store_true",
help="Whether to use fsdp.",
)
paradigm_args.add_argument(
"--use_tp",
default=False,
action="store_true",
help="Whether to use PyTorch TP.",
)
paradigm_args.add_argument(
"--use_megatron_lm",
default=False,
action="store_true",
help="Whether to use Megatron-LM.",
)
paradigm_args.add_argument(
"--use_xpu",
default=False,
action="store_true",
help="Whether to use IPEX plugin to speed up training on XPU specifically.",
)
# distributed GPU training arguments
distributed_args = parser.add_argument_group("Distributed GPUs", "Arguments related to distributed GPU training.")
distributed_args.add_argument(
"--gpu_ids",
default=None,
help="What GPUs (by id) should be used for training on this machine as a comma-seperated list",
)
distributed_args.add_argument(
"--same_network",
default=False,
action="store_true",
help="Whether all machines used for multinode training exist on the same local network.",
)
distributed_args.add_argument(
"--machine_rank", type=int, default=None, help="The rank of the machine on which this script is launched."
)
distributed_args.add_argument(
"--main_process_ip", type=str, default=None, help="The IP address of the machine of rank 0."
)
distributed_args.add_argument(
"--main_process_port",
type=int,
default=None,
help="The port to use to communicate with the machine of rank 0.",
)
distributed_args.add_argument(
"-t",
"--tee",
default="0",
type=str,
help="Tee std streams into a log file and also to console.",
)
distributed_args.add_argument(
"--log_dir",
type=str,
default=None,
help=(
"Base directory to use for log files when using torchrun/torch.distributed.run as launcher. "
"Use with --tee to redirect std streams info log files."
),
)
distributed_args.add_argument(
"--role",
type=str,
default="default",
help="User-defined role for the workers.",
)
# Rendezvous related arguments
distributed_args.add_argument(
"--rdzv_backend",
type=str,
default="static",
help="The rendezvous method to use, such as 'static' (the default) or 'c10d'",
)
distributed_args.add_argument(
"--rdzv_conf",
type=str,
default="",
help="Additional rendezvous configuration (<key1>=<value1>,<key2>=<value2>,...).",
)
distributed_args.add_argument(
"--max_restarts",
type=int,
default=0,
help="Maximum number of worker group restarts before failing.",
)
distributed_args.add_argument(
"--monitor_interval",
type=float,
default=0.1,
help="Interval, in seconds, to monitor the state of workers.",
)
parser.add_argument(
"-m",
"--module",
action="store_true",
help="Change each process to interpret the launch script as a Python module, executing with the same behavior as 'python -m'.",
)
parser.add_argument(
"--no_python",
action="store_true",
help="Skip prepending the training script with 'python' - just execute it directly. Useful when the script is not a Python script.",
)
# TPU arguments
tpu_args = parser.add_argument_group("TPU", "Arguments related to TPU.")
tpu_args.add_argument(
"--tpu_cluster",
action="store_true",
dest="tpu_use_cluster",
help="Whether to use a GCP TPU pod for training.",
)
tpu_args.add_argument(
"--no_tpu_cluster",
action="store_false",
dest="tpu_use_cluster",
help="Should not be passed explicitly, this is for internal use only.",
)
tpu_args.add_argument(
"--tpu_use_sudo",
action="store_true",
help="Whether to use `sudo` when running the TPU training script in each pod.",
)
tpu_args.add_argument(
"--vm",
type=str,
action="append",
help=(
"List of single Compute VM instance names. "
"If not provided we assume usage of instance groups. For TPU pods."
),
)
tpu_args.add_argument(
"--env",
type=str,
action="append",
help="List of environment variables to set on the Compute VM instances. For TPU pods.",
)
tpu_args.add_argument(
"--main_training_function",
type=str,
default=None,
help="The name of the main function to be executed in your script (only for TPU training).",
)
tpu_args.add_argument(
"--downcast_bf16",
action="store_true",
help="Whether when using bf16 precision on TPUs if both float and double tensors are cast to bfloat16 or if double tensors remain as float32.",
)
# DeepSpeed arguments
deepspeed_args = parser.add_argument_group("DeepSpeed Arguments", "Arguments related to DeepSpeed.")
deepspeed_args.add_argument(
"--deepspeed_config_file",
default=None,
type=str,
help="DeepSpeed config file.",
)
deepspeed_args.add_argument(
"--zero_stage",
default=None,
type=int,
help="DeepSpeed's ZeRO optimization stage (useful only when `use_deepspeed` flag is passed). "
"If unspecified, will default to `2`.",
)
deepspeed_args.add_argument(
"--offload_optimizer_device",
default=None,
type=str,
help="Decides where (none|cpu|nvme) to offload optimizer states (useful only when `use_deepspeed` flag is passed). "
"If unspecified, will default to 'none'.",
)
deepspeed_args.add_argument(
"--offload_param_device",
default=None,
type=str,
help="Decides where (none|cpu|nvme) to offload parameters (useful only when `use_deepspeed` flag is passed). "
"If unspecified, will default to 'none'.",
)
deepspeed_args.add_argument(
"--offload_optimizer_nvme_path",
default=None,
type=str,
help="Decides Nvme Path to offload optimizer states (useful only when `use_deepspeed` flag is passed). "
"If unspecified, will default to 'none'.",
)
deepspeed_args.add_argument(
"--offload_param_nvme_path",
default=None,
type=str,
help="Decides Nvme Path to offload parameters (useful only when `use_deepspeed` flag is passed). "
"If unspecified, will default to 'none'.",
)
deepspeed_args.add_argument(
"--gradient_accumulation_steps",
default=None,
type=int,
help="No of gradient_accumulation_steps used in your training script (useful only when `use_deepspeed` flag is passed). "
"If unspecified, will default to `1`.",
)
deepspeed_args.add_argument(
"--gradient_clipping",
default=None,
type=float,
help="gradient clipping value used in your training script (useful only when `use_deepspeed` flag is passed). "
"If unspecified, will default to `1.0`.",
)
deepspeed_args.add_argument(
"--zero3_init_flag",
default=None,
type=str,
help="Decides Whether (true|false) to enable `deepspeed.zero.Init` for constructing massive models. "
"Only applicable with DeepSpeed ZeRO Stage-3. If unspecified, will default to `true`.",
)
deepspeed_args.add_argument(
"--zero3_save_16bit_model",
default=None,
type=str,
help="Decides Whether (true|false) to save 16-bit model weights when using ZeRO Stage-3. "
"Only applicable with DeepSpeed ZeRO Stage-3. If unspecified, will default to `false`.",
)
deepspeed_args.add_argument(
"--deepspeed_hostfile",
default=None,
type=str,
help="DeepSpeed hostfile for configuring multi-node compute resources.",
)
deepspeed_args.add_argument(
"--deepspeed_exclusion_filter",
default=None,
type=str,
help="DeepSpeed exclusion filter string when using mutli-node setup.",
)
deepspeed_args.add_argument(
"--deepspeed_inclusion_filter",
default=None,
type=str,
help="DeepSpeed inclusion filter string when using mutli-node setup.",
)
deepspeed_args.add_argument(
"--deepspeed_multinode_launcher",
default=None,
type=str,
help="DeepSpeed multi-node launcher to use. If unspecified, will default to `pdsh`.",
)
deepspeed_args.add_argument(
"--deepspeed_moe_layer_cls_names",
default=None,
type=str,
help="comma-separated list of transformer MoE layer class names (case-sensitive) to wrap ,e.g, `MixtralSparseMoeBlock`, `Qwen2MoeSparseMoeBlock`, `JetMoEAttention,JetMoEBlock` ..."
" (useful only when `use_deepspeed` flag is passed).",
)
# fsdp arguments
fsdp_args = parser.add_argument_group("FSDP Arguments", "Arguments related to Fully Shared Data Parallelism.")
fsdp_args.add_argument(
"--fsdp_offload_params",
default="false",
type=str,
help="Decides Whether (true|false) to offload parameters and gradients to CPU. (useful only when `use_fsdp` flag is passed).",
)
fsdp_args.add_argument(
"--fsdp_min_num_params",
type=int,
default=1e8,
help="FSDP's minimum number of parameters for Default Auto Wrapping. (useful only when `use_fsdp` flag is passed).",
)
fsdp_args.add_argument(
"--fsdp_sharding_strategy",
type=str,
default="FULL_SHARD",
help="FSDP's Sharding Strategy. (useful only when `use_fsdp` flag is passed).",
)
fsdp_args.add_argument(
"--fsdp_auto_wrap_policy",
type=str,
default=None,
help="FSDP's auto wrap policy. (useful only when `use_fsdp` flag is passed).",
)
fsdp_args.add_argument(
"--fsdp_transformer_layer_cls_to_wrap",
default=None,
type=str,
help="Transformer layer class name (case-sensitive) to wrap ,e.g, `BertLayer`, `GPTJBlock`, `T5Block` .... "
"(useful only when `use_fsdp` flag is passed).",
)
fsdp_args.add_argument(
"--fsdp_backward_prefetch",
default=None,
type=str,
help="FSDP's backward prefetch policy. (useful only when `use_fsdp` flag is passed).",
)
fsdp_args.add_argument(
"--fsdp_state_dict_type",
default=None,
type=str,
help="FSDP's state dict type. (useful only when `use_fsdp` flag is passed).",
)
fsdp_args.add_argument(
"--fsdp_forward_prefetch",
default="false",
type=str,
help="If True, then FSDP explicitly prefetches the next upcoming "
"all-gather while executing in the forward pass (useful only when `use_fsdp` flag is passed).",
)
fsdp_args.add_argument(
"--fsdp_use_orig_params",
default="true",
type=str,
help="If True, allows non-uniform `requires_grad` during init, which means support for interspersed frozen and trainable paramteres."
" (useful only when `use_fsdp` flag is passed).",
)
fsdp_args.add_argument(
"--fsdp_cpu_ram_efficient_loading",
default="true",
type=str,
help="If True, only the first process loads the pretrained model checkoint while all other processes have empty weights. "
"Only applicable for 🤗 Transformers. When using this, `--fsdp_sync_module_states` needs to True. "
"(useful only when `use_fsdp` flag is passed).",
)
fsdp_args.add_argument(
"--fsdp_sync_module_states",
default="true",
type=str,
help="If True, each individually wrapped FSDP unit will broadcast module parameters from rank 0."
" (useful only when `use_fsdp` flag is passed).",
)
fsdp_args.add_argument(
"--fsdp_activation_checkpointing",
default="false",
type=str,
help="Decides Whether (true|false) intermediate activations are freed during the forward pass, and a checkpoint is left as a placeholder. (useful only when `use_fsdp` flag is passed).",
)
# tp args
tp_args = parser.add_argument_group("TP Arguments", "Arguments related to Tensor Parallelism using PyToch.")
tp_args.add_argument(
"--tp_size",
default=1,
type=int,
help="PyTorch Tensor Parallelism (TP) degree. Set a value greater than 1 to activate. (useful only when `use_tp` flag is passed)",
)
# megatron_lm args
megatron_lm_args = parser.add_argument_group("Megatron-LM Arguments", "Arguments related to Megatron-LM.")
megatron_lm_args.add_argument(
"--megatron_lm_tp_degree",
type=int,
default=1,
help="Megatron-LM's Tensor Parallelism (TP) degree. (useful only when `use_megatron_lm` flag is passed).",
)
megatron_lm_args.add_argument(
"--megatron_lm_pp_degree",
type=int,
default=1,
help="Megatron-LM's Pipeline Parallelism (PP) degree. (useful only when `use_megatron_lm` flag is passed).",
)
megatron_lm_args.add_argument(
"--megatron_lm_num_micro_batches",
type=int,
default=None,
help="Megatron-LM's number of micro batches when PP degree > 1. (useful only when `use_megatron_lm` flag is passed).",
)
megatron_lm_args.add_argument(
"--megatron_lm_sequence_parallelism",
default=None,
type=str,
help="Decides Whether (true|false) to enable Sequence Parallelism when TP degree > 1. "
"(useful only when `use_megatron_lm` flag is passed).",
)
megatron_lm_args.add_argument(
"--megatron_lm_recompute_activations",
default=None,
type=str,
help="Decides Whether (true|false) to enable Selective Activation Recomputation. "
"(useful only when `use_megatron_lm` flag is passed).",
)
megatron_lm_args.add_argument(
"--megatron_lm_use_distributed_optimizer",
default=None,
type=str,
help="Decides Whether (true|false) to use distributed optimizer "
"which shards optimizer state and gradients across Data Pralellel (DP) ranks. "
"(useful only when `use_megatron_lm` flag is passed).",
)
megatron_lm_args.add_argument(
"--megatron_lm_gradient_clipping",
default=1.0,
type=float,
help="Megatron-LM's gradient clipping value based on global L2 Norm (0 to disable). "
"(useful only when `use_megatron_lm` flag is passed).",
)
# FP8 arguments
fp8_args = parser.add_argument_group(
"FP8 Arguments", "Arguments related to FP8 training (requires `--mixed_precision=fp8`)"
)
fp8_args.add_argument(
"--fp8_backend",
type=str,
choices=["te", "msamp"],
help="Choose a backend to train with FP8 (te: TransformerEngine, msamp: MS-AMP)",
)
fp8_args.add_argument(
"--fp8_use_autocast_during_eval",
default=False,
action="store_true",
help="Whether to use FP8 autocast during eval mode (useful only when `--fp8_backend=te` is passed). Generally better metrics are found when this is not passed.",
)
fp8_args.add_argument(
"--fp8_margin",
type=int,
default=0,
help="The margin to use for the gradient scaling (useful only when `--fp8_backend=te` is passed).",
)
fp8_args.add_argument(
"--fp8_interval",
type=int,
default=1,
help="The interval to use for how often the scaling factor is recomputed (useful only when `--fp8_backend=te` is passed).",
)
fp8_args.add_argument(
"--fp8_format",
type=str,
default="E4M3",
choices=["E4M3", "HYBRID"],
help="The format to use for the FP8 recipe (useful only when `--fp8_backend=te` is passed).",
)
fp8_args.add_argument(
"--fp8_amax_history_len",
type=int,
default=1024,
help="The length of the history to use for the scaling factor computation (useful only when `--fp8_backend=te` is passed).",
)
fp8_args.add_argument(
"--fp8_amax_compute_algo",
type=str,
default="most_recent",
choices=["max", "most_recent"],
help="The algorithm to use for the scaling factor computation. (useful only when `--fp8_backend=te` is passed).",
)
fp8_args.add_argument(
"--fp8_override_linear_precision",
type=lambda x: tuple(map(str_to_bool, x.split(","))),
default=(False, False, False),
help="Whether or not to execute `fprop`, `dgrad`, and `wgrad` GEMMS in higher precision. Should be passed in a comma-seperated string of booleans (useful only when `--fp8_backend=te` is passed).",
)
fp8_args.add_argument(
"--fp8_opt_level",
type=str,
default="O2",
choices=["O1", "O2"],
help="What level of 8-bit collective communication should be used with MS-AMP (useful only when `--fp8_backend=msamp` is passed).",
)
# AWS arguments
aws_args = parser.add_argument_group("AWS Arguments", "Arguments related to AWS.")
aws_args.add_argument(
"--aws_access_key_id",
type=str,
default=None,
help="The AWS_ACCESS_KEY_ID used to launch the Amazon SageMaker training job",
)
aws_args.add_argument(
"--aws_secret_access_key",
type=str,
default=None,
help="The AWS_SECRET_ACCESS_KEY used to launch the Amazon SageMaker training job.",
)
parser.add_argument(
"--debug",
action="store_true",
help="Whether to print out the torch.distributed stack trace when something fails.",
)
parser.add_argument(
"training_script",
type=str,
help=(
"The full path to the script to be launched in parallel, followed by all the arguments for the training "
"script."
),
)
# MPI arguments
mpirun_args = parser.add_argument_group("MPI Arguments", "Arguments related to mpirun for Multi-CPU")
mpirun_args.add_argument(
"--mpirun_hostfile",
type=str,
default=None,
help="Location for a hostfile for using Accelerate to launch a multi-CPU training job with mpirun. This will "
"get passed to the MPI --hostfile or -f parameter, depending on which MPI program is installed.",
)
mpirun_args.add_argument(
"--mpirun_ccl",
type=int,
default=1,
help="The number of oneCCL worker threads when using Accelerate to launch multi-CPU training with mpirun.",
)
# Other arguments of the training scripts
parser.add_argument("training_script_args", nargs=argparse.REMAINDER, help="Arguments of the training script.")
if subparsers is not None:
parser.set_defaults(func=launch_command)
return parser
def simple_launcher(args):
cmd, current_env = prepare_simple_launcher_cmd_env(args)
process = subprocess.Popen(cmd, env=current_env)
process.wait()
if process.returncode != 0:
if not args.quiet:
raise subprocess.CalledProcessError(returncode=process.returncode, cmd=cmd)
else:
sys.exit(1)
def multi_gpu_launcher(args):
import torch.distributed.run as distrib_run
current_env = prepare_multi_gpu_env(args)
if not check_cuda_p2p_ib_support():
message = "Using RTX 4000 series which doesn't support faster communication speedups. Ensuring P2P and IB communications are disabled."
warn = False
if "NCCL_P2P_DISABLE" not in current_env:
current_env["NCCL_P2P_DISABLE"] = "1"
warn = True
if "NCCL_IB_DISABLE" not in current_env:
current_env["NCCL_IB_DISABLE"] = "1"
warn = True
if warn:
logger.warning(message)
debug = getattr(args, "debug", False)
args = _filter_args(
args,
distrib_run.get_args_parser(),
["--training_script", args.training_script, "--training_script_args", args.training_script_args],
)
with patch_environment(**current_env):
try:
distrib_run.run(args)
except Exception:
if is_rich_available() and debug:
console = get_console()
console.print("\n[bold red]Using --debug, `torch.distributed` Stack Trace:[/bold red]")
console.print_exception(suppress=[__file__], show_locals=False)
else:
raise
def deepspeed_launcher(args):
import torch.distributed.run as distrib_run
if not is_deepspeed_available():
raise ImportError("DeepSpeed is not installed => run `pip3 install deepspeed` or build it from source.")
else:
from deepspeed.launcher.runner import DEEPSPEED_ENVIRONMENT_NAME
cmd, current_env = prepare_deepspeed_cmd_env(args)
if not check_cuda_p2p_ib_support():
message = "Using RTX 4000 series which doesn't support faster communication speedups. Ensuring P2P and IB communications are disabled."
warn = False
if "NCCL_P2P_DISABLE" not in current_env:
current_env["NCCL_P2P_DISABLE"] = "1"
warn = True
if "NCCL_IB_DISABLE" not in current_env:
current_env["NCCL_IB_DISABLE"] = "1"
warn = True
if warn:
logger.warning(message)
if args.num_machines > 1 and args.deepspeed_multinode_launcher != DEEPSPEED_MULTINODE_LAUNCHERS[1]:
with open(DEEPSPEED_ENVIRONMENT_NAME, "a") as f:
valid_env_items = convert_dict_to_env_variables(current_env)
if len(valid_env_items) > 1:
f.writelines(valid_env_items)
process = subprocess.Popen(cmd, env=current_env)
process.wait()
if process.returncode != 0:
if not args.quiet:
raise subprocess.CalledProcessError(returncode=process.returncode, cmd=cmd)
else:
sys.exit(1)
else:
debug = getattr(args, "debug", False)
args = _filter_args(
args,
distrib_run.get_args_parser(),
["--training_script", args.training_script, "--training_script_args", args.training_script_args],
)
with patch_environment(**current_env):
try:
distrib_run.run(args)
except Exception:
if is_rich_available() and debug:
console = get_console()
console.print("\n[bold red]Using --debug, `torch.distributed` Stack Trace:[/bold red]")
console.print_exception(suppress=[__file__], show_locals=False)
else:
raise
def tpu_launcher(args):
import torch_xla.distributed.xla_multiprocessing as xmp
from torch_xla import device_count
if args.no_python:
raise ValueError("--no_python cannot be used with TPU launcher")
args, current_env = prepare_tpu(args, {})
if args.module:
mod_name = args.training_script
else:
# Import training_script as a module
script_path = Path(args.training_script)
sys.path.append(str(script_path.parent.resolve()))
mod_name = script_path.stem
mod = importlib.import_module(mod_name)
if not hasattr(mod, args.main_training_function):
raise ValueError(
f"Your training script should have a function named {args.main_training_function}, or you should pass a "
"different value to `--main_training_function`."
)
if args.num_processes and args.num_processes != device_count():
raise ValueError(
f"Number of processes ({args.num_processes}) must match the number of TPU devices ({device_count()})"
)
# Patch sys.argv
sys.argv = [mod.__file__] + args.training_script_args
main_function = getattr(mod, args.main_training_function)
with patch_environment(**current_env):
xmp.spawn(PrepareForLaunch(main_function), args=())
def tpu_pod_launcher(args):
from torch_xla.distributed import xla_dist
current_env = {}
args, current_env = prepare_tpu(args, current_env, True)
debug = getattr(args, "debug", False)
training_script = args.training_script
training_script_args = args.training_script_args
new_args = _filter_args(
args, xla_dist.get_args_parser(), ["--tpu", args.tpu_name, "--positional", "", "--restart-tpuvm-pod-server"]
)
if args.tpu_use_sudo:
new_cmd = ["sudo"]
else:
new_cmd = []
new_cmd += [
"accelerate-launch",
"--tpu",
"--no_tpu_cluster",
"--num_machines",
"1",
"--mixed_precision",
"no",
"--dynamo_backend",
"no",
"--num_processes",
str(args.num_processes),
"--main_training_function",
str(args.main_training_function),
training_script,
] + training_script_args
new_args.positional = new_cmd
bad_flags = ""
for arg in vars(new_args):
if arg.startswith("docker_"):
value = getattr(new_args, arg)
if value != "" and value is not None:
bad_flags += f'{arg}="{value}"\n'
if bad_flags != "":
raise ValueError(
f"Docker containers are not supported for TPU pod launcher currently, please remove the following flags:\n{bad_flags}"
)
new_args.env = [f"{k}={v}" for k, v in current_env.items()]
new_args.env.append("ACCELERATE_IN_TPU_POD=1")
try:
xla_dist.resolve_and_execute(new_args)
except Exception:
if is_rich_available() and debug:
console = get_console()
console.print("\n[bold red]Using --debug, `torch_xla.xla_dist` Stack Trace:[/bold red]")
console.print_exception(suppress=[__file__], show_locals=False)
else:
raise
def sagemaker_launcher(sagemaker_config: SageMakerConfig, args):
if not is_sagemaker_available():
raise ImportError(
"Please install sagemaker to be able to launch training on Amazon SageMaker with `pip install accelerate[sagemaker]`"
)
if args.module or args.no_python:
raise ValueError(
"SageMaker requires a python training script file and cannot be used with --module or --no_python"
)
from sagemaker.huggingface import HuggingFace
args, sagemaker_inputs = prepare_sagemager_args_inputs(sagemaker_config, args)
huggingface_estimator = HuggingFace(**args)
huggingface_estimator.fit(inputs=sagemaker_inputs)
print(f"You can find your model data at: {huggingface_estimator.model_data}")
def _validate_launch_command(args):
# Sanity checks
if sum([args.multi_gpu, args.cpu, args.tpu, args.use_deepspeed, args.use_fsdp, args.use_tp]) > 1:
raise ValueError(
"You can only use one of `--cpu`, `--multi_gpu`, `--tpu`, `--use_deepspeed`, `--use_fsdp`, `--use_tp` at a time."
)
if args.multi_gpu and (args.num_processes is not None) and (args.num_processes < 2):
raise ValueError("You need to use at least 2 processes to use `--multi_gpu`.")
defaults = None
warned = []
mp_from_config_flag = False
# Get the default from the config file.
if args.config_file is not None or os.path.isfile(default_config_file) and not args.cpu:
defaults = load_config_from_file(args.config_file)
if (
not args.multi_gpu
and not args.tpu
and not args.tpu_use_cluster
and not args.use_deepspeed
and not args.use_fsdp
and not args.use_tp
and not args.use_megatron_lm
):
args.use_deepspeed = defaults.distributed_type == DistributedType.DEEPSPEED
args.multi_gpu = (
True
if defaults.distributed_type
in (
DistributedType.MULTI_GPU,
DistributedType.MULTI_NPU,
DistributedType.MULTI_MLU,
DistributedType.MULTI_MUSA,
DistributedType.MULTI_XPU,
)
else False
)
args.tpu = defaults.distributed_type == DistributedType.XLA
args.use_fsdp = defaults.distributed_type == DistributedType.FSDP
args.use_tp = defaults.distributed_type == DistributedType.TP
args.use_megatron_lm = defaults.distributed_type == DistributedType.MEGATRON_LM
args.tpu_use_cluster = defaults.tpu_use_cluster if args.tpu else False
if args.gpu_ids is None:
if defaults.gpu_ids is not None:
args.gpu_ids = defaults.gpu_ids
else:
args.gpu_ids = "all"
if args.multi_gpu and args.num_machines is None:
args.num_machines = defaults.num_machines
if len(args.gpu_ids.split(",")) < 2 and (args.gpu_ids != "all") and args.multi_gpu and args.num_machines <= 1:
raise ValueError(
"Less than two GPU ids were configured and tried to run on on multiple GPUs. "
"Please ensure at least two are specified for `--gpu_ids`, or use `--gpu_ids='all'`."
)
if defaults.compute_environment == ComputeEnvironment.LOCAL_MACHINE:
# Update args with the defaults
for name, attr in defaults.__dict__.items():
if isinstance(attr, dict):
for k in defaults.deepspeed_config:
setattr(args, k, defaults.deepspeed_config[k])
for k in defaults.fsdp_config:
arg_to_set = k
if "fsdp" not in arg_to_set:
arg_to_set = "fsdp_" + arg_to_set
setattr(args, arg_to_set, defaults.fsdp_config[k])
for k in defaults.tp_config:
setattr(args, k, defaults.tp_config[k])
for k in defaults.megatron_lm_config:
setattr(args, k, defaults.megatron_lm_config[k])
for k in defaults.dynamo_config:
setattr(args, k, defaults.dynamo_config[k])
for k in defaults.ipex_config:
setattr(args, k, defaults.ipex_config[k])
for k in defaults.mpirun_config:
setattr(args, k, defaults.mpirun_config[k])
continue
# Those args are handled separately
if (
name not in ["compute_environment", "mixed_precision", "distributed_type"]
and getattr(args, name, None) is None
):
setattr(args, name, attr)
if not args.debug:
args.debug = defaults.debug
if not args.mixed_precision:
if defaults.mixed_precision is None:
args.mixed_precision = "no"
else:
args.mixed_precision = defaults.mixed_precision
mp_from_config_flag = True
else:
if args.use_cpu or (args.use_xpu and torch.xpu.is_available()):
native_amp = True
else:
native_amp = is_bf16_available(True)
if (
args.mixed_precision == "bf16"
and not native_amp
and not (args.tpu and is_torch_xla_available(check_is_tpu=True))
):
raise ValueError("bf16 mixed precision requires PyTorch >= 1.10 and a supported device.")
# Silently set the default here
if args.dynamo_backend is None:
args.dynamo_backend = "no"
if args.num_processes == -1:
raise ValueError("You need to manually pass in `--num_processes` using this config yaml.")
else:
if args.num_processes is None:
if args.use_xpu and is_xpu_available():
args.num_processes = torch.xpu.device_count()
elif is_mlu_available():
args.num_processes = torch.mlu.device_count()
elif is_musa_available():
args.num_processes = torch.musa.device_count()
elif is_npu_available():
args.num_processes = torch.npu.device_count()
else:
args.num_processes = torch.cuda.device_count()
warned.append(f"\t`--num_processes` was set to a value of `{args.num_processes}`")
if args.debug is None:
args.debug = False
if (
not args.multi_gpu
and args.num_processes > 1
and (
(args.use_xpu and is_xpu_available() and torch.xpu.device_count() > 1)
or (is_mlu_available() and torch.mlu.device_count() > 1)
or (is_musa_available() and torch.musa.device_count() > 1)
or (is_npu_available() and torch.npu.device_count() > 1)
or (torch.cuda.device_count() > 1)
)
):
warned.append(
"\t\tMore than one GPU was found, enabling multi-GPU training.\n"
"\t\tIf this was unintended please pass in `--num_processes=1`."
)
args.multi_gpu = True
if args.num_machines is None:
warned.append("\t`--num_machines` was set to a value of `1`")
args.num_machines = 1
if args.mixed_precision is None:
warned.append("\t`--mixed_precision` was set to a value of `'no'`")
args.mixed_precision = "no"
if not hasattr(args, "use_cpu"):
args.use_cpu = args.cpu
if args.dynamo_backend is None:
warned.append("\t`--dynamo_backend` was set to a value of `'no'`")
args.dynamo_backend = "no"
if args.debug:
logger.debug("Running script in debug mode, expect distributed operations to be slightly slower.")
is_aws_env_disabled = defaults is None or (
defaults is not None and defaults.compute_environment != ComputeEnvironment.AMAZON_SAGEMAKER
)
if is_aws_env_disabled and args.num_cpu_threads_per_process is None:
args.num_cpu_threads_per_process = get_int_from_env(["OMP_NUM_THREADS"], 1)
if args.use_cpu and args.num_processes >= 1 and get_int_from_env(["OMP_NUM_THREADS"], 0) == 0:
local_size = get_int_from_env(
["MPI_LOCALNRANKS", "OMPI_COMM_WORLD_LOCAL_SIZE", "MV2_COMM_WORLD_LOCAL_SIZE"],
max(int(args.num_processes / args.num_machines), 1),
)
threads_per_process = int(psutil.cpu_count(logical=False) / local_size)
if threads_per_process > 1:
args.num_cpu_threads_per_process = threads_per_process
warned.append(
f"\t`--num_cpu_threads_per_process` was set to `{args.num_cpu_threads_per_process}` to improve out-of-box performance when training on CPUs"
)
if any(warned):
message = "The following values were not passed to `accelerate launch` and had defaults used instead:\n"
message += "\n".join(warned)
message += (
"\nTo avoid this warning pass in values for each of the problematic parameters or run `accelerate config`."
)
logger.warning(message)
return args, defaults, mp_from_config_flag
def launch_command(args):
args, defaults, mp_from_config_flag = _validate_launch_command(args)
# Use the proper launcher
if args.use_deepspeed and not args.cpu:
args.deepspeed_fields_from_accelerate_config = list(defaults.deepspeed_config.keys()) if defaults else []
if mp_from_config_flag:
args.deepspeed_fields_from_accelerate_config.append("mixed_precision")
args.deepspeed_fields_from_accelerate_config = ",".join(args.deepspeed_fields_from_accelerate_config)
deepspeed_launcher(args)
elif args.use_fsdp and not args.cpu:
multi_gpu_launcher(args)
elif args.use_tp and not args.cpu:
multi_gpu_launcher(args)
elif args.use_megatron_lm and not args.cpu:
multi_gpu_launcher(args)
elif args.multi_gpu and not args.cpu:
multi_gpu_launcher(args)
elif args.tpu and not args.cpu:
if args.tpu_use_cluster:
tpu_pod_launcher(args)
else:
tpu_launcher(args)
elif defaults is not None and defaults.compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER:
sagemaker_launcher(defaults, args)
else:
simple_launcher(args)
def main():
parser = launch_command_parser()
args = parser.parse_args()
launch_command(args)
if __name__ == "__main__":
main()
| accelerate/src/accelerate/commands/launch.py/0 | {
"file_path": "accelerate/src/accelerate/commands/launch.py",
"repo_id": "accelerate",
"token_count": 20243
} |
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import functools
import logging
import os
from .state import PartialState
class MultiProcessAdapter(logging.LoggerAdapter):
"""
An adapter to assist with logging in multiprocess.
`log` takes in an additional `main_process_only` kwarg, which dictates whether it should be called on all processes
or only the main executed one. Default is `main_process_only=True`.
Does not require an `Accelerator` object to be created first.
"""
@staticmethod
def _should_log(main_process_only):
"Check if log should be performed"
state = PartialState()
return not main_process_only or (main_process_only and state.is_main_process)
def log(self, level, msg, *args, **kwargs):
"""
Delegates logger call after checking if we should log.
Accepts a new kwarg of `main_process_only`, which will dictate whether it will be logged across all processes
or only the main executed one. Default is `True` if not passed
Also accepts "in_order", which if `True` makes the processes log one by one, in order. This is much easier to
read, but comes at the cost of sometimes needing to wait for the other processes. Default is `False` to not
break with the previous behavior.
`in_order` is ignored if `main_process_only` is passed.
"""
if PartialState._shared_state == {}:
raise RuntimeError(
"You must initialize the accelerate state by calling either `PartialState()` or `Accelerator()` before using the logging utility."
)
main_process_only = kwargs.pop("main_process_only", True)
in_order = kwargs.pop("in_order", False)
# set `stacklevel` to exclude ourself in `Logger.findCaller()` while respecting user's choice
kwargs.setdefault("stacklevel", 2)
if self.isEnabledFor(level):
if self._should_log(main_process_only):
msg, kwargs = self.process(msg, kwargs)
self.logger.log(level, msg, *args, **kwargs)
elif in_order:
state = PartialState()
for i in range(state.num_processes):
if i == state.process_index:
msg, kwargs = self.process(msg, kwargs)
self.logger.log(level, msg, *args, **kwargs)
state.wait_for_everyone()
@functools.lru_cache(None)
def warning_once(self, *args, **kwargs):
"""
This method is identical to `logger.warning()`, but will emit the warning with the same message only once
Note: The cache is for the function arguments, so 2 different callers using the same arguments will hit the
cache. The assumption here is that all warning messages are unique across the code. If they aren't then need to
switch to another type of cache that includes the caller frame information in the hashing function.
"""
self.warning(*args, **kwargs)
def get_logger(name: str, log_level: str = None):
"""
Returns a `logging.Logger` for `name` that can handle multiprocessing.
If a log should be called on all processes, pass `main_process_only=False` If a log should be called on all
processes and in order, also pass `in_order=True`
Args:
name (`str`):
The name for the logger, such as `__file__`
log_level (`str`, *optional*):
The log level to use. If not passed, will default to the `LOG_LEVEL` environment variable, or `INFO` if not
Example:
```python
>>> from accelerate.logging import get_logger
>>> from accelerate import Accelerator
>>> logger = get_logger(__name__)
>>> accelerator = Accelerator()
>>> logger.info("My log", main_process_only=False)
>>> logger.debug("My log", main_process_only=True)
>>> logger = get_logger(__name__, log_level="DEBUG")
>>> logger.info("My log")
>>> logger.debug("My second log")
>>> array = ["a", "b", "c", "d"]
>>> letter_at_rank = array[accelerator.process_index]
>>> logger.info(letter_at_rank, in_order=True)
```
"""
if log_level is None:
log_level = os.environ.get("ACCELERATE_LOG_LEVEL", None)
logger = logging.getLogger(name)
if log_level is not None:
logger.setLevel(log_level.upper())
logger.root.setLevel(log_level.upper())
return MultiProcessAdapter(logger, {})
| accelerate/src/accelerate/logging.py/0 | {
"file_path": "accelerate/src/accelerate/logging.py",
"repo_id": "accelerate",
"token_count": 1842
} |
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import math
import os
import platform
import subprocess
import sys
from contextlib import contextmanager
from dataclasses import dataclass, field
from functools import lru_cache, wraps
from shutil import which
from typing import List, Optional
import torch
from packaging.version import parse
logger = logging.getLogger(__name__)
def convert_dict_to_env_variables(current_env: dict):
"""
Verifies that all keys and values in `current_env` do not contain illegal keys or values, and returns a list of
strings as the result.
Example:
```python
>>> from accelerate.utils.environment import verify_env
>>> env = {"ACCELERATE_DEBUG_MODE": "1", "BAD_ENV_NAME": "<mything", "OTHER_ENV": "2"}
>>> valid_env_items = verify_env(env)
>>> print(valid_env_items)
["ACCELERATE_DEBUG_MODE=1\n", "OTHER_ENV=2\n"]
```
"""
forbidden_chars = [";", "\n", "<", ">", " "]
valid_env_items = []
for key, value in current_env.items():
if all(char not in (key + value) for char in forbidden_chars) and len(key) >= 1 and len(value) >= 1:
valid_env_items.append(f"{key}={value}\n")
else:
logger.warning(f"WARNING: Skipping {key}={value} as it contains forbidden characters or missing values.")
return valid_env_items
def str_to_bool(value) -> int:
"""
Converts a string representation of truth to `True` (1) or `False` (0).
True values are `y`, `yes`, `t`, `true`, `on`, and `1`; False value are `n`, `no`, `f`, `false`, `off`, and `0`;
"""
value = value.lower()
if value in ("y", "yes", "t", "true", "on", "1"):
return 1
elif value in ("n", "no", "f", "false", "off", "0"):
return 0
else:
raise ValueError(f"invalid truth value {value}")
def get_int_from_env(env_keys, default):
"""Returns the first positive env value found in the `env_keys` list or the default."""
for e in env_keys:
val = int(os.environ.get(e, -1))
if val >= 0:
return val
return default
def parse_flag_from_env(key, default=False):
"""Returns truthy value for `key` from the env if available else the default."""
value = os.environ.get(key, str(default))
return str_to_bool(value) == 1 # As its name indicates `str_to_bool` actually returns an int...
def parse_choice_from_env(key, default="no"):
value = os.environ.get(key, str(default))
return value
def are_libraries_initialized(*library_names: str) -> List[str]:
"""
Checks if any of `library_names` are imported in the environment. Will return any names that are.
"""
return [lib_name for lib_name in library_names if lib_name in sys.modules.keys()]
def _nvidia_smi():
"""
Returns the right nvidia-smi command based on the system.
"""
if platform.system() == "Windows":
# If platform is Windows and nvidia-smi can't be found in path
# try from systemd drive with default installation path
command = which("nvidia-smi")
if command is None:
command = f"{os.environ['systemdrive']}\\Program Files\\NVIDIA Corporation\\NVSMI\\nvidia-smi.exe"
else:
command = "nvidia-smi"
return command
def get_gpu_info():
"""
Gets GPU count and names using `nvidia-smi` instead of torch to not initialize CUDA.
Largely based on the `gputil` library.
"""
# Returns as list of `n` GPUs and their names
output = subprocess.check_output(
[_nvidia_smi(), "--query-gpu=count,name", "--format=csv,noheader"], universal_newlines=True
)
output = output.strip()
gpus = output.split(os.linesep)
# Get names from output
gpu_count = len(gpus)
gpu_names = [gpu.split(",")[1].strip() for gpu in gpus]
return gpu_names, gpu_count
def get_driver_version():
"""
Returns the driver version
In the case of multiple GPUs, will return the first.
"""
output = subprocess.check_output(
[_nvidia_smi(), "--query-gpu=driver_version", "--format=csv,noheader"], universal_newlines=True
)
output = output.strip()
return output.split(os.linesep)[0]
def check_cuda_p2p_ib_support():
"""
Checks if the devices being used have issues with P2P and IB communications, namely any consumer GPU hardware after
the 3090.
Noteably uses `nvidia-smi` instead of torch to not initialize CUDA.
"""
try:
device_names, device_count = get_gpu_info()
# As new consumer GPUs get released, add them to `unsupported_devices``
unsupported_devices = {"RTX 40"}
if device_count > 1:
if any(
unsupported_device in device_name
for device_name in device_names
for unsupported_device in unsupported_devices
):
# Check if they have the right driver version
acceptable_driver_version = "550.40.07"
current_driver_version = get_driver_version()
if parse(current_driver_version) < parse(acceptable_driver_version):
return False
return True
except Exception:
pass
return True
def check_fp8_capability():
"""
Checks if all the current GPUs available support FP8.
Notably must initialize `torch.cuda` to check.
"""
cuda_device_capacity = torch.cuda.get_device_capability()
return cuda_device_capacity >= (8, 9)
@dataclass
class CPUInformation:
"""
Stores information about the CPU in a distributed environment. It contains the following attributes:
- rank: The rank of the current process.
- world_size: The total number of processes in the world.
- local_rank: The rank of the current process on the local node.
- local_world_size: The total number of processes on the local node.
"""
rank: int = field(default=0, metadata={"help": "The rank of the current process."})
world_size: int = field(default=1, metadata={"help": "The total number of processes in the world."})
local_rank: int = field(default=0, metadata={"help": "The rank of the current process on the local node."})
local_world_size: int = field(default=1, metadata={"help": "The total number of processes on the local node."})
def get_cpu_distributed_information() -> CPUInformation:
"""
Returns various information about the environment in relation to CPU distributed training as a `CPUInformation`
dataclass.
"""
information = {}
information["rank"] = get_int_from_env(["RANK", "PMI_RANK", "OMPI_COMM_WORLD_RANK", "MV2_COMM_WORLD_RANK"], 0)
information["world_size"] = get_int_from_env(
["WORLD_SIZE", "PMI_SIZE", "OMPI_COMM_WORLD_SIZE", "MV2_COMM_WORLD_SIZE"], 1
)
information["local_rank"] = get_int_from_env(
["LOCAL_RANK", "MPI_LOCALRANKID", "OMPI_COMM_WORLD_LOCAL_RANK", "MV2_COMM_WORLD_LOCAL_RANK"], 0
)
information["local_world_size"] = get_int_from_env(
["LOCAL_WORLD_SIZE", "MPI_LOCALNRANKS", "OMPI_COMM_WORLD_LOCAL_SIZE", "MV2_COMM_WORLD_LOCAL_SIZE"],
1,
)
return CPUInformation(**information)
def override_numa_affinity(local_process_index: int, verbose: Optional[bool] = None) -> None:
"""
Overrides whatever NUMA affinity is set for the current process. This is very taxing and requires recalculating the
affinity to set, ideally you should use `utils.environment.set_numa_affinity` instead.
Args:
local_process_index (int):
The index of the current process on the current server.
verbose (bool, *optional*):
Whether to log out the assignment of each CPU. If `ACCELERATE_DEBUG_MODE` is enabled, will default to True.
"""
if verbose is None:
verbose = parse_flag_from_env("ACCELERATE_DEBUG_MODE", False)
if torch.cuda.is_available():
from accelerate.utils import is_pynvml_available
if not is_pynvml_available():
raise ImportError(
"To set CPU affinity on CUDA GPUs the `pynvml` package must be available. (`pip install pynvml`)"
)
import pynvml as nvml
# The below code is based on https://github.com/NVIDIA/DeepLearningExamples/blob/master/TensorFlow2/LanguageModeling/BERT/gpu_affinity.py
nvml.nvmlInit()
num_elements = math.ceil(os.cpu_count() / 64)
handle = nvml.nvmlDeviceGetHandleByIndex(local_process_index)
affinity_string = ""
for j in nvml.nvmlDeviceGetCpuAffinity(handle, num_elements):
# assume nvml returns list of 64 bit ints
affinity_string = f"{j:064b}{affinity_string}"
affinity_list = [int(x) for x in affinity_string]
affinity_list.reverse() # so core 0 is the 0th element
affinity_to_set = [i for i, e in enumerate(affinity_list) if e != 0]
os.sched_setaffinity(0, affinity_to_set)
if verbose:
cpu_cores = os.sched_getaffinity(0)
logger.info(f"Assigning {len(cpu_cores)} cpu cores to process {local_process_index}: {cpu_cores}")
@lru_cache
def set_numa_affinity(local_process_index: int, verbose: Optional[bool] = None) -> None:
"""
Assigns the current process to a specific NUMA node. Ideally most efficient when having at least 2 cpus per node.
This result is cached between calls. If you want to override it, please use
`accelerate.utils.environment.override_numa_afifnity`.
Args:
local_process_index (int):
The index of the current process on the current server.
verbose (bool, *optional*):
Whether to print the new cpu cores assignment for each process. If `ACCELERATE_DEBUG_MODE` is enabled, will
default to True.
"""
override_numa_affinity(local_process_index=local_process_index, verbose=verbose)
@contextmanager
def clear_environment():
"""
A context manager that will temporarily clear environment variables.
When this context exits, the previous environment variables will be back.
Example:
```python
>>> import os
>>> from accelerate.utils import clear_environment
>>> os.environ["FOO"] = "bar"
>>> with clear_environment():
... print(os.environ)
... os.environ["FOO"] = "new_bar"
... print(os.environ["FOO"])
{}
new_bar
>>> print(os.environ["FOO"])
bar
```
"""
_old_os_environ = os.environ.copy()
os.environ.clear()
try:
yield
finally:
os.environ.clear() # clear any added keys,
os.environ.update(_old_os_environ) # then restore previous environment
@contextmanager
def patch_environment(**kwargs):
"""
A context manager that will add each keyword argument passed to `os.environ` and remove them when exiting.
Will convert the values in `kwargs` to strings and upper-case all the keys.
Example:
```python
>>> import os
>>> from accelerate.utils import patch_environment
>>> with patch_environment(FOO="bar"):
... print(os.environ["FOO"]) # prints "bar"
>>> print(os.environ["FOO"]) # raises KeyError
```
"""
existing_vars = {}
for key, value in kwargs.items():
key = key.upper()
if key in os.environ:
existing_vars[key] = os.environ[key]
os.environ[key] = str(value)
try:
yield
finally:
for key in kwargs:
key = key.upper()
if key in existing_vars:
# restore previous value
os.environ[key] = existing_vars[key]
else:
os.environ.pop(key, None)
def purge_accelerate_environment(func_or_cls):
"""Decorator to clean up accelerate environment variables set by the decorated class or function.
In some circumstances, calling certain classes or functions can result in accelerate env vars being set and not
being cleaned up afterwards. As an example, when calling:
TrainingArguments(fp16=True, ...)
The following env var will be set:
ACCELERATE_MIXED_PRECISION=fp16
This can affect subsequent code, since the env var takes precedence over TrainingArguments(fp16=False). This is
especially relevant for unit testing, where we want to avoid the individual tests to have side effects on one
another. Decorate the unit test function or whole class with this decorator to ensure that after each test, the env
vars are cleaned up. This works for both unittest.TestCase and normal classes (pytest); it also works when
decorating the parent class.
"""
prefix = "ACCELERATE_"
@contextmanager
def env_var_context():
# Store existing accelerate env vars
existing_vars = {k: v for k, v in os.environ.items() if k.startswith(prefix)}
try:
yield
finally:
# Restore original env vars or remove new ones
for key in [k for k in os.environ if k.startswith(prefix)]:
if key in existing_vars:
os.environ[key] = existing_vars[key]
else:
os.environ.pop(key, None)
def wrap_function(func):
@wraps(func)
def wrapper(*args, **kwargs):
with env_var_context():
return func(*args, **kwargs)
wrapper._accelerate_is_purged_environment_wrapped = True
return wrapper
if not isinstance(func_or_cls, type):
return wrap_function(func_or_cls)
# Handle classes by wrapping test methods
def wrap_test_methods(test_class_instance):
for name in dir(test_class_instance):
if name.startswith("test"):
method = getattr(test_class_instance, name)
if callable(method) and not hasattr(method, "_accelerate_is_purged_environment_wrapped"):
setattr(test_class_instance, name, wrap_function(method))
return test_class_instance
# Handle inheritance
wrap_test_methods(func_or_cls)
func_or_cls.__init_subclass__ = classmethod(lambda cls, **kw: wrap_test_methods(cls))
return func_or_cls
| accelerate/src/accelerate/utils/environment.py/0 | {
"file_path": "accelerate/src/accelerate/utils/environment.py",
"repo_id": "accelerate",
"token_count": 5668
} |
{
"fp16": {
"enabled": "auto",
"loss_scale": 0,
"loss_scale_window": 1000,
"initial_scale_power": 16,
"hysteresis": 2,
"min_loss_scale": 1
},
"bf16": {
"enabled": "auto"
},
"optimizer": {
"type": "AdamW",
"params": {
"lr": "auto",
"weight_decay": "auto",
"torch_adam": true,
"adam_w_mode": true
}
},
"scheduler": {
"type": "WarmupLR",
"params": {
"warmup_min_lr": "auto",
"warmup_max_lr": "auto",
"warmup_num_steps": "auto"
}
},
"zero_optimization": {
"stage": 2,
"offload_optimizer": {
"device": "cpu",
"pin_memory": true
},
"allgather_partitions": true,
"allgather_bucket_size": 2e8,
"overlap_comm": true,
"reduce_scatter": true,
"reduce_bucket_size": "auto",
"contiguous_gradients": true
},
"gradient_accumulation_steps": 1,
"gradient_clipping": "auto",
"steps_per_print": 2000,
"train_batch_size": "auto",
"train_micro_batch_size_per_gpu": "auto",
"wall_clock_breakdown": false
} | accelerate/tests/deepspeed/ds_config_zero2.json/0 | {
"file_path": "accelerate/tests/deepspeed/ds_config_zero2.json",
"repo_id": "accelerate",
"token_count": 680
} |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import gc
import tempfile
import unittest
import torch
import torch.nn as nn
from accelerate import Accelerator, init_empty_weights
from accelerate.test_utils import (
require_bnb,
require_cuda_or_xpu,
require_huggingface_suite,
require_multi_device,
require_non_torch_xla,
slow,
)
from accelerate.utils.bnb import load_and_quantize_model
from accelerate.utils.dataclasses import BnbQuantizationConfig
from accelerate.utils.memory import clear_device_cache
class BitsAndBytesConfigIntegration(unittest.TestCase):
def test_BnbQuantizationConfig(self):
with self.assertRaises(ValueError):
BnbQuantizationConfig(load_in_8bit=True, load_in_4bit=True)
@require_non_torch_xla
@slow
@require_cuda_or_xpu
@require_bnb
@require_huggingface_suite
class MixedInt8EmptyModelTest(unittest.TestCase):
# We keep the constants inside the init function and model loading inside setUp function
# We need to test on relatively large models (aka >1b parameters otherwise the quantiztion may not work as expected)
# Therefore here we use only bloom-1b3 to test our module
model_name = "marcsun13/bloom-1b7_with_lm_head"
# Constant values
# This was obtained on a Quadro RTX 8000 so the number might slightly change
EXPECTED_RELATIVE_DIFFERENCE = 1.540025
input_text = "Hello my name is"
EXPECTED_OUTPUT = "Hello my name is John.\nI am a friend of the family.\n"
MAX_NEW_TOKENS = 10
def setUp(self):
"""
Setup quantized model from empty model
"""
from huggingface_hub import hf_hub_download
from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer
# Models and tokenizer
self.model_fp16 = AutoModelForCausalLM.from_pretrained(
self.model_name, torch_dtype=torch.float16, device_map="auto"
)
# create model on meta device
with init_empty_weights():
self.model_8bit = AutoModelForCausalLM.from_config(AutoConfig.from_pretrained(self.model_name))
self.model_8bit.tie_weights()
self.weights_location = hf_hub_download(self.model_name, "pytorch_model.bin")
self.bnb_quantization_config = BnbQuantizationConfig(load_in_8bit=True)
self.model_8bit = load_and_quantize_model(
self.model_8bit,
self.bnb_quantization_config,
weights_location=self.weights_location,
device_map={"": 0},
no_split_module_classes=["BloomBlock"],
)
self.tokenizer = AutoTokenizer.from_pretrained("bigscience/bloom-1b7")
self.accelerate = Accelerator()
def tearDown(self):
r"""
TearDown function needs to be called at the end of each test to free the GPU memory and cache, also to
avoid unexpected behaviors. Please see: https://discuss.pytorch.org/t/how-can-we-release-gpu-memory-cache/14530/27
"""
del self.model_fp16
del self.model_8bit
clear_device_cache(garbage_collection=True)
def test_memory_footprint(self):
r"""
A simple test to check if the model conversion has been done correctly by checking on the
memory footprint of the converted model and the class type of the linear layers of the converted models
"""
from bitsandbytes.nn import Int8Params
mem_fp16 = self.model_fp16.get_memory_footprint()
mem_8bit = self.model_8bit.get_memory_footprint()
assert round((mem_fp16 / mem_8bit) - self.EXPECTED_RELATIVE_DIFFERENCE, 7) >= 0
assert self.model_8bit.transformer.h[0].mlp.dense_4h_to_h.weight.__class__ == Int8Params
def test_linear_are_8bit(self):
r"""
A simple test to check if the model conversion has been done correctly by checking on the
memory footprint of the converted model and the class type of the linear layers of the converted models
"""
self.model_fp16.get_memory_footprint()
self.model_8bit.get_memory_footprint()
for name, module in self.model_8bit.named_modules():
if isinstance(module, torch.nn.Linear):
modules_not_converted = (
self.bnb_quantization_config.keep_in_fp32_modules + self.bnb_quantization_config.skip_modules
)
if name not in modules_not_converted:
assert module.weight.dtype == torch.int8
def test_llm_skip(self):
r"""
A simple test to check if `llm_int8_skip_modules` works as expected
"""
import bitsandbytes as bnb
from transformers import AutoConfig, AutoModelForCausalLM
bnb_quantization_config = BnbQuantizationConfig(
load_in_8bit=True, skip_modules=["lm_head", "transformer.word_embeddings"]
)
with init_empty_weights():
model = AutoModelForCausalLM.from_config(AutoConfig.from_pretrained(self.model_name))
model.tie_weights()
model = load_and_quantize_model(
model,
bnb_quantization_config,
weights_location=self.weights_location,
device_map="auto",
no_split_module_classes=["BloomBlock"],
)
assert model.transformer.h[1].mlp.dense_4h_to_h.weight.dtype == torch.int8
assert isinstance(model.transformer.h[1].mlp.dense_4h_to_h, bnb.nn.Linear8bitLt)
assert isinstance(model.lm_head, nn.Linear)
assert model.lm_head.weight.dtype != torch.int8
def check_inference_correctness(self, model):
r"""
Test the generation quality of the quantized model and see that we are matching the expected output.
Given that we are operating on small numbers + the testing model is relatively small, we might not get
the same output across GPUs. So we'll generate few tokens (5-10) and check their output.
"""
# Check that inference pass works on the model
encoded_input = self.tokenizer(self.input_text, return_tensors="pt")
# Check the exactness of the results
output_parallel = model.generate(input_ids=encoded_input["input_ids"].to(0), max_new_tokens=10)
# Get the generation
output_text = self.tokenizer.decode(output_parallel[0], skip_special_tokens=True)
assert output_text == self.EXPECTED_OUTPUT
def test_generate_quality(self):
self.check_inference_correctness(self.model_8bit)
def test_fp32_8bit_conversion(self):
r"""
Test whether it is possible to mix both `8bit` and `fp32` weights when using `keep_in_fp32_modules` correctly.
"""
from transformers import AutoConfig, AutoModelForCausalLM
bnb_quantization_config = BnbQuantizationConfig(load_in_8bit=True, keep_in_fp32_modules=["lm_head"])
with init_empty_weights():
model = AutoModelForCausalLM.from_config(AutoConfig.from_pretrained(self.model_name))
model.tie_weights()
model = load_and_quantize_model(
model,
bnb_quantization_config,
weights_location=self.weights_location,
device_map="auto",
no_split_module_classes=["BloomBlock"],
)
assert model.lm_head.weight.dtype == torch.float32
@require_multi_device
def test_cpu_gpu_loading_custom_device_map(self):
from bitsandbytes.nn import Int8Params
from transformers import AutoConfig, AutoModelForCausalLM
r"""
A test to check is dispatching a model on cpu & gpu works correctly using a custom `device_map`.
"""
device_map = {
"transformer.word_embeddings": "cpu",
"transformer.word_embeddings_layernorm": 0,
"lm_head": "cpu",
"transformer.h.0": "cpu",
"transformer.h.1": "cpu",
"transformer.h.2": "cpu",
"transformer.h.3": 0,
"transformer.h.4": 0,
"transformer.h.5": 0,
"transformer.h.6": 0,
"transformer.h.7": 0,
"transformer.h.8": 0,
"transformer.h.9": 1,
"transformer.h.10": 0,
"transformer.h.11": 1,
"transformer.h.12": 0,
"transformer.h.13": 0,
"transformer.h.14": 1,
"transformer.h.15": 0,
"transformer.h.16": 0,
"transformer.h.17": 1,
"transformer.h.18": 1,
"transformer.h.19": 0,
"transformer.h.20": 1,
"transformer.h.21": 1,
"transformer.h.22": 0,
"transformer.h.23": 0,
"transformer.ln_f": 1,
}
bnb_quantization_config = BnbQuantizationConfig(load_in_8bit=True)
with init_empty_weights():
model_8bit = AutoModelForCausalLM.from_config(AutoConfig.from_pretrained(self.model_name))
model_8bit.tie_weights()
model_8bit = load_and_quantize_model(
model_8bit,
bnb_quantization_config,
weights_location=self.weights_location,
device_map=device_map,
no_split_module_classes=["BloomBlock"],
)
assert model_8bit.transformer.h[0].mlp.dense_4h_to_h.weight.__class__ == Int8Params
assert model_8bit.transformer.h[1].mlp.dense_4h_to_h.weight.__class__ == Int8Params
self.check_inference_correctness(model_8bit)
@require_multi_device
def test_cpu_gpu_loading_custom_device_map_offload_state_dict(self):
from bitsandbytes.nn import Int8Params
from transformers import AutoConfig, AutoModelForCausalLM
r"""
A test to check is dispatching a model on cpu & gpu works correctly using a custom `device_map` and offload_state_dict=True.
"""
device_map = {
"transformer.word_embeddings": "cpu",
"transformer.word_embeddings_layernorm": 0,
"lm_head": "cpu",
"transformer.h.0": "cpu",
"transformer.h.1": "cpu",
"transformer.h.2": "cpu",
"transformer.h.3": 0,
"transformer.h.4": 0,
"transformer.h.5": 0,
"transformer.h.6": 0,
"transformer.h.7": 0,
"transformer.h.8": 0,
"transformer.h.9": 1,
"transformer.h.10": 0,
"transformer.h.11": 1,
"transformer.h.12": 0,
"transformer.h.13": 0,
"transformer.h.14": 1,
"transformer.h.15": 0,
"transformer.h.16": 0,
"transformer.h.17": 1,
"transformer.h.18": 1,
"transformer.h.19": 0,
"transformer.h.20": 1,
"transformer.h.21": 1,
"transformer.h.22": 0,
"transformer.h.23": 0,
"transformer.ln_f": 1,
}
bnb_quantization_config = BnbQuantizationConfig(load_in_8bit=True)
with init_empty_weights():
model_8bit = AutoModelForCausalLM.from_config(AutoConfig.from_pretrained(self.model_name))
model_8bit.tie_weights()
model_8bit = load_and_quantize_model(
model_8bit,
bnb_quantization_config,
weights_location=self.weights_location,
device_map=device_map,
no_split_module_classes=["BloomBlock"],
offload_state_dict=True,
)
assert model_8bit.transformer.h[0].mlp.dense_4h_to_h.weight.__class__ == Int8Params
assert model_8bit.transformer.h[1].mlp.dense_4h_to_h.weight.__class__ == Int8Params
self.check_inference_correctness(model_8bit)
@require_multi_device
def test_cpu_gpu_disk_loading_custom_device_map_kwargs(self):
from bitsandbytes.nn import Int8Params
from transformers import AutoConfig, AutoModelForCausalLM
r"""
A test to check is dispatching a model on cpu & gpu works correctly using a custom `device_map`.
This time we also add `disk` on the device_map - using the kwargs directly instead of the quantization config
"""
device_map = {
"transformer.word_embeddings": "cpu",
"transformer.word_embeddings_layernorm": 0,
"lm_head": "cpu",
"transformer.h.0": "cpu",
"transformer.h.1": "cpu",
"transformer.h.2": "cpu",
"transformer.h.3": "disk",
"transformer.h.4": "disk",
"transformer.h.5": "disk",
"transformer.h.6": 0,
"transformer.h.7": 0,
"transformer.h.8": 0,
"transformer.h.9": 1,
"transformer.h.10": 0,
"transformer.h.11": 1,
"transformer.h.12": 0,
"transformer.h.13": 0,
"transformer.h.14": 1,
"transformer.h.15": 0,
"transformer.h.16": 0,
"transformer.h.17": 1,
"transformer.h.18": 1,
"transformer.h.19": 0,
"transformer.h.20": 1,
"transformer.h.21": 1,
"transformer.h.22": 0,
"transformer.h.23": 0,
"transformer.ln_f": 1,
}
bnb_quantization_config = BnbQuantizationConfig(load_in_8bit=True)
with init_empty_weights():
model_8bit = AutoModelForCausalLM.from_config(AutoConfig.from_pretrained(self.model_name))
model_8bit.tie_weights()
with tempfile.TemporaryDirectory() as tmpdirname:
model_8bit = load_and_quantize_model(
model_8bit,
bnb_quantization_config,
weights_location=self.weights_location,
device_map=device_map,
no_split_module_classes=["BloomBlock"],
offload_folder=tmpdirname,
offload_state_dict=True,
)
assert model_8bit.transformer.h[4].mlp.dense_4h_to_h.weight.__class__ == Int8Params
assert model_8bit.transformer.h[5].mlp.dense_4h_to_h.weight.__class__ == Int8Params
self.check_inference_correctness(model_8bit)
def test_int8_serialization(self):
r"""
Test whether it is possible to serialize a model in 8-bit.
"""
from bitsandbytes.nn import Int8Params
from transformers import AutoConfig, AutoModelForCausalLM
with tempfile.TemporaryDirectory() as tmpdirname:
# saving state dict for now but will save config and other in the future
self.accelerate.save_model(self.model_8bit, tmpdirname)
with init_empty_weights():
# let's suppose that we can get the right config
model_8bit_from_saved = AutoModelForCausalLM.from_config(AutoConfig.from_pretrained(self.model_name))
model_8bit_from_saved.tie_weights()
bnb_quantization_config = BnbQuantizationConfig(load_in_8bit=True)
model_8bit_from_saved = load_and_quantize_model(
model_8bit_from_saved,
bnb_quantization_config,
weights_location=tmpdirname,
device_map="auto",
no_split_module_classes=["BloomBlock"],
)
assert model_8bit_from_saved.transformer.h[0].mlp.dense_4h_to_h.weight.__class__ == Int8Params
assert hasattr(model_8bit_from_saved.transformer.h[0].mlp.dense_4h_to_h.weight, "SCB")
assert hasattr(model_8bit_from_saved.transformer.h[0].mlp.dense_4h_to_h.weight, "CB")
self.check_inference_correctness(model_8bit_from_saved)
@require_multi_device
def test_int8_serialization_offload(self):
r"""
Test whether it is possible to serialize a model in 8-bit and offload weights to cpu/disk
"""
from bitsandbytes.nn import Int8Params
from transformers import AutoConfig, AutoModelForCausalLM
with tempfile.TemporaryDirectory() as tmpdirname:
# saving state dict for now but will save config and other in the future
self.accelerate.save_model(self.model_8bit, tmpdirname)
with init_empty_weights():
# let's suppose that we can get the right config
model_8bit_from_saved = AutoModelForCausalLM.from_config(AutoConfig.from_pretrained(self.model_name))
model_8bit_from_saved.tie_weights()
bnb_quantization_config = BnbQuantizationConfig(load_in_8bit=True)
device_map = {
"transformer.word_embeddings": "cpu",
"transformer.word_embeddings_layernorm": 0,
"lm_head": "cpu",
"transformer.h.0": "cpu",
"transformer.h.1": "cpu",
"transformer.h.2": "cpu",
"transformer.h.3": "disk",
"transformer.h.4": "disk",
"transformer.h.5": "disk",
"transformer.h.6": 0,
"transformer.h.7": 0,
"transformer.h.8": 0,
"transformer.h.9": 1,
"transformer.h.10": 0,
"transformer.h.11": 1,
"transformer.h.12": 0,
"transformer.h.13": 0,
"transformer.h.14": 1,
"transformer.h.15": 0,
"transformer.h.16": 0,
"transformer.h.17": 1,
"transformer.h.18": 1,
"transformer.h.19": 0,
"transformer.h.20": 1,
"transformer.h.21": 1,
"transformer.h.22": 0,
"transformer.h.23": 0,
"transformer.ln_f": 1,
}
model_8bit_from_saved = load_and_quantize_model(
model_8bit_from_saved,
bnb_quantization_config,
weights_location=tmpdirname,
device_map=device_map,
no_split_module_classes=["BloomBlock"],
offload_folder=tmpdirname + "/tmp",
offload_state_dict=True,
)
assert model_8bit_from_saved.transformer.h[4].mlp.dense_4h_to_h.weight.__class__ == Int8Params
assert model_8bit_from_saved.transformer.h[5].mlp.dense_4h_to_h.weight.__class__ == Int8Params
self.check_inference_correctness(model_8bit_from_saved)
def test_int8_serialization_shard(self):
r"""
Test whether it is possible to serialize a model in 8-bit.
"""
from bitsandbytes.nn import Int8Params
from transformers import AutoConfig, AutoModelForCausalLM
with tempfile.TemporaryDirectory() as tmpdirname:
# saving state dict for now but will save config and other in the future
self.accelerate.save_model(self.model_8bit, tmpdirname, max_shard_size="1GB")
with init_empty_weights():
# let's suppose that we can get the right config
model_8bit_from_saved = AutoModelForCausalLM.from_config(AutoConfig.from_pretrained(self.model_name))
model_8bit_from_saved.tie_weights()
bnb_quantization_config = BnbQuantizationConfig(load_in_8bit=True)
model_8bit_from_saved = load_and_quantize_model(
model_8bit_from_saved,
bnb_quantization_config,
weights_location=tmpdirname,
device_map="auto",
no_split_module_classes=["BloomBlock"],
)
assert model_8bit_from_saved.transformer.h[0].mlp.dense_4h_to_h.weight.__class__ == Int8Params
assert hasattr(model_8bit_from_saved.transformer.h[0].mlp.dense_4h_to_h.weight, "SCB")
assert hasattr(model_8bit_from_saved.transformer.h[0].mlp.dense_4h_to_h.weight, "CB")
self.check_inference_correctness(model_8bit_from_saved)
@require_non_torch_xla
@slow
@require_cuda_or_xpu
@require_bnb
@require_huggingface_suite
class MixedInt8LoaddedModelTest(unittest.TestCase):
# We keep the constants inside the init function and model loading inside setUp function
# We need to test on relatively large models (aka >1b parameters otherwise the quantiztion may not work as expected)
# Therefore here we use only bloom-1b3 to test our module
model_name = "marcsun13/bloom-1b7_with_lm_head"
# Constant values
# This was obtained on a Quadro RTX 8000 so the number might slightly change
EXPECTED_RELATIVE_DIFFERENCE = 1.540025
input_text = "Hello my name is"
EXPECTED_OUTPUT = "Hello my name is John.\nI am a friend of the family.\n"
MAX_NEW_TOKENS = 10
def setUp(self):
"""
Setup quantized model from loaded model
"""
from transformers import AutoModelForCausalLM, AutoTokenizer
# Models and tokenizer
self.model_fp16 = AutoModelForCausalLM.from_pretrained(
self.model_name, torch_dtype=torch.float16, device_map="auto"
)
self.bnb_quantization_config = BnbQuantizationConfig(load_in_8bit=True)
self.model_8bit = AutoModelForCausalLM.from_pretrained(self.model_name, torch_dtype=torch.float16)
self.model_8bit = load_and_quantize_model(self.model_8bit, self.bnb_quantization_config)
self.tokenizer = AutoTokenizer.from_pretrained("bigscience/bloom-1b7")
def tearDown(self):
r"""
TearDown function needs to be called at the end of each test to free the GPU memory and cache, also to
avoid unexpected behaviors. Please see: https://discuss.pytorch.org/t/how-can-we-release-gpu-memory-cache/14530/27
"""
del self.model_fp16
del self.model_8bit
gc.collect()
torch.cuda.empty_cache()
def test_memory_footprint(self):
r"""
A simple test to check if the model conversion has been done correctly by checking on the
memory footprint of the converted model and the class type of the linear layers of the converted models
"""
from bitsandbytes.nn import Int8Params
mem_fp16 = self.model_fp16.get_memory_footprint()
mem_8bit = self.model_8bit.get_memory_footprint()
assert round((mem_fp16 / mem_8bit) - self.EXPECTED_RELATIVE_DIFFERENCE, 7) >= 0
assert self.model_8bit.transformer.h[0].mlp.dense_4h_to_h.weight.__class__ == Int8Params
def test_linear_are_8bit(self):
r"""
A simple test to check if the model conversion has been done correctly by checking on the
memory footprint of the converted model and the class type of the linear layers of the converted models
"""
self.model_fp16.get_memory_footprint()
self.model_8bit.get_memory_footprint()
for name, module in self.model_8bit.named_modules():
if isinstance(module, torch.nn.Linear):
modules_not_converted = (
self.bnb_quantization_config.keep_in_fp32_modules + self.bnb_quantization_config.skip_modules
)
if name not in modules_not_converted:
assert module.weight.dtype == torch.int8
def test_generate_quality(self):
r"""
Test the generation quality of the quantized model and see that we are matching the expected output.
Given that we are operating on small numbers + the testing model is relatively small, we might not get
the same output across GPUs. So we'll generate few tokens (5-10) and check their output.
"""
encoded_input = self.tokenizer(self.input_text, return_tensors="pt")
output_sequences = self.model_8bit.generate(
input_ids=encoded_input["input_ids"].to(self.model_8bit.device), max_new_tokens=10
)
assert self.tokenizer.decode(output_sequences[0], skip_special_tokens=True) == self.EXPECTED_OUTPUT
def test_fp32_8bit_conversion(self):
r"""
Test whether it is possible to mix both `8bit` and `fp32` weights when using `keep_in_fp32_modules` correctly.
"""
from transformers import AutoModelForCausalLM
bnb_quantization_config = BnbQuantizationConfig(load_in_8bit=True, keep_in_fp32_modules=["lm_head"])
model = AutoModelForCausalLM.from_pretrained(self.model_name, torch_dtype=torch.float16)
model = load_and_quantize_model(model, bnb_quantization_config)
assert model.lm_head.weight.dtype == torch.float32
@require_non_torch_xla
@slow
@require_cuda_or_xpu
@require_bnb
@require_huggingface_suite
class Bnb4BitEmptyModelTest(unittest.TestCase):
# We keep the constants inside the init function and model loading inside setUp function
# We need to test on relatively large models (aka >1b parameters otherwise the quantiztion may not work as expected)
# Therefore here we use only bloom-1b3 to test our module
model_name = "marcsun13/bloom-1b7_with_lm_head"
# Constant values
# This was obtained on a RTX Titan so the number might slightly change
EXPECTED_RELATIVE_DIFFERENCE = 2.109659552692574
input_text = "Hello my name is"
EXPECTED_OUTPUTS = set()
EXPECTED_OUTPUTS.add("Hello my name is John and I am a professional photographer. I")
EXPECTED_OUTPUTS.add("Hello my name is John.\nI am a friend of your father.\n")
MAX_NEW_TOKENS = 10
def setUp(self):
from huggingface_hub import hf_hub_download
from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer
super().setUp()
# Models and tokenizer
self.model_fp16 = AutoModelForCausalLM.from_pretrained(
self.model_name, torch_dtype=torch.float16, device_map="auto"
)
# create model on meta device
with init_empty_weights():
self.model_4bit = AutoModelForCausalLM.from_config(AutoConfig.from_pretrained(self.model_name))
self.model_4bit.tie_weights()
self.weights_location = hf_hub_download(self.model_name, "pytorch_model.bin")
self.bnb_quantization_config = BnbQuantizationConfig(load_in_4bit=True)
self.model_4bit = load_and_quantize_model(
self.model_4bit,
self.bnb_quantization_config,
weights_location=self.weights_location,
device_map={"": 0},
no_split_module_classes=["BloomBlock"],
)
self.tokenizer = AutoTokenizer.from_pretrained("bigscience/bloom-1b7")
def tearDown(self):
"""
TearDown function needs to be called at the end of each test to free the GPU memory and cache, also to
avoid unexpected behaviors. Please see: https://discuss.pytorch.org/t/how-can-we-release-gpu-memory-cache/14530/27
"""
super().tearDown()
del self.model_fp16
del self.model_4bit
gc.collect()
torch.cuda.empty_cache()
def test_memory_footprint(self):
r"""
A simple test to check if the model conversion has been done correctly by checking on the
memory footprint of the converted model and the class type of the linear layers of the converted models
"""
from bitsandbytes.nn import Params4bit
mem_fp16 = self.model_fp16.get_memory_footprint()
mem_4bit = self.model_4bit.get_memory_footprint()
assert round((mem_fp16 / mem_4bit) - self.EXPECTED_RELATIVE_DIFFERENCE, 7) >= 0
assert self.model_4bit.transformer.h[0].mlp.dense_4h_to_h.weight.__class__ == Params4bit
def check_inference_correctness(self, model):
r"""
Test the generation quality of the quantized model and see that we are matching the expected output.
Given that we are operating on small numbers + the testing model is relatively small, we might not get
the same output across GPUs. So we'll generate few tokens (5-10) and check their output.
"""
# Check that inference pass works on the model
encoded_input = self.tokenizer(self.input_text, return_tensors="pt")
# Check the exactness of the results
output_sequences = model.generate(input_ids=encoded_input["input_ids"].to(0), max_new_tokens=10)
assert self.tokenizer.decode(output_sequences[0], skip_special_tokens=True) in self.EXPECTED_OUTPUTS
def test_generate_quality(self):
self.check_inference_correctness(self.model_4bit)
def test_linear_are_4bit(self):
r"""
A simple test to check if the model conversion has been done correctly by checking on the
memory footprint of the converted model and the class type of the linear layers of the converted models
"""
self.model_fp16.get_memory_footprint()
self.model_4bit.get_memory_footprint()
for name, module in self.model_4bit.named_modules():
if isinstance(module, torch.nn.Linear):
if (
name
not in self.bnb_quantization_config.keep_in_fp32_modules
+ self.bnb_quantization_config.skip_modules
):
# 4-bit parameters are packed in uint8 variables
assert module.weight.dtype == torch.uint8
def test_fp32_4bit_conversion(self):
r"""
Test whether it is possible to mix both `4bit` and `fp32` weights when using `keep_in_fp32_modules` correctly.
"""
from transformers import AutoConfig, AutoModelForCausalLM
bnb_quantization_config = BnbQuantizationConfig(load_in_4bit=True, keep_in_fp32_modules=["lm_head"])
with init_empty_weights():
model = AutoModelForCausalLM.from_config(AutoConfig.from_pretrained(self.model_name))
model.tie_weights()
model = load_and_quantize_model(
model,
bnb_quantization_config,
weights_location=self.weights_location,
device_map="auto",
no_split_module_classes=["BloomBlock"],
)
assert model.lm_head.weight.dtype == torch.float32
@require_multi_device
def test_cpu_gpu_loading_random_device_map(self):
from transformers import AutoConfig, AutoModelForCausalLM
r"""
A test to check is dispatching a model on cpu & gpu works correctly using a random `device_map`.
"""
device_map = {
"transformer.word_embeddings": "cpu",
"transformer.word_embeddings_layernorm": 0,
"lm_head": "cpu",
"transformer.h.0": 0,
"transformer.h.1": 0,
"transformer.h.2": 0,
"transformer.h.3": 0,
"transformer.h.4": 0,
"transformer.h.5": 0,
"transformer.h.6": 0,
"transformer.h.7": 0,
"transformer.h.8": 0,
"transformer.h.9": 1,
"transformer.h.10": 0,
"transformer.h.11": 1,
"transformer.h.12": 0,
"transformer.h.13": 0,
"transformer.h.14": 1,
"transformer.h.15": 0,
"transformer.h.16": 0,
"transformer.h.17": 1,
"transformer.h.18": 1,
"transformer.h.19": 0,
"transformer.h.20": 1,
"transformer.h.21": 1,
"transformer.h.22": 0,
"transformer.h.23": 0,
"transformer.ln_f": 1,
}
bnb_quantization_config = BnbQuantizationConfig(load_in_4bit=True)
with init_empty_weights():
model_4bit = AutoModelForCausalLM.from_config(AutoConfig.from_pretrained(self.model_name))
model_4bit.tie_weights()
model_4bit = load_and_quantize_model(
model_4bit,
bnb_quantization_config,
weights_location=self.weights_location,
device_map=device_map,
no_split_module_classes=["BloomBlock"],
)
self.check_inference_correctness(model_4bit)
@require_multi_device
def test_cpu_gpu_loading_custom_device_map(self):
from transformers import AutoConfig, AutoModelForCausalLM
r"""
A test to check is dispatching a model on cpu & gpu works correctly using a random `device_map`.
"""
device_map = {
"transformer.word_embeddings": "cpu",
"transformer.word_embeddings_layernorm": "cpu",
"lm_head": "cpu",
"transformer.h": 0,
"transformer.ln_f": 1,
}
bnb_quantization_config = BnbQuantizationConfig(load_in_4bit=True)
with init_empty_weights():
model_4bit = AutoModelForCausalLM.from_config(AutoConfig.from_pretrained(self.model_name))
model_4bit.tie_weights()
model_4bit = load_and_quantize_model(
model_4bit,
bnb_quantization_config,
weights_location=self.weights_location,
device_map=device_map,
no_split_module_classes=["BloomBlock"],
)
self.check_inference_correctness(model_4bit)
@require_multi_device
def test_cpu_gpu_disk_loading_custom_device_map_kwargs(self):
from transformers import AutoConfig, AutoModelForCausalLM
r"""
A test to check is dispatching a model on cpu & gpu works correctly using a custom `device_map`.
This time we also add `disk` on the device_map - using the kwargs directly instead of the quantization config
"""
device_map = {
"transformer.word_embeddings": 0,
"transformer.word_embeddings_layernorm": "disk",
"lm_head": 0,
"transformer.h": 1,
"transformer.ln_f": "cpu",
}
bnb_quantization_config = BnbQuantizationConfig(load_in_4bit=True)
with init_empty_weights():
model_4bit = AutoModelForCausalLM.from_config(AutoConfig.from_pretrained(self.model_name))
model_4bit.tie_weights()
with tempfile.TemporaryDirectory() as tmpdirname:
model_4bit = load_and_quantize_model(
model_4bit,
bnb_quantization_config,
weights_location=self.weights_location,
device_map=device_map,
no_split_module_classes=["BloomBlock"],
offload_folder=tmpdirname,
offload_state_dict=True,
)
self.check_inference_correctness(model_4bit)
@require_non_torch_xla
@slow
@require_cuda_or_xpu
@require_bnb
@require_huggingface_suite
class Bnb4BitTestLoadedModel(unittest.TestCase):
# We keep the constants inside the init function and model loading inside setUp function
# We need to test on relatively large models (aka >1b parameters otherwise the quantiztion may not work as expected)
# Therefore here we use only bloom-1b3 to test our module
model_name = "marcsun13/bloom-1b7_with_lm_head"
# Constant values
# This was obtained on a RTX Titan so the number might slightly change
EXPECTED_RELATIVE_DIFFERENCE = 2.109659552692574
input_text = "Hello my name is"
EXPECTED_OUTPUTS = set()
EXPECTED_OUTPUTS.add("Hello my name is John and I am a professional photographer. I")
EXPECTED_OUTPUTS.add("Hello my name is John.\nI am a friend of your father.\n")
MAX_NEW_TOKENS = 10
def setUp(self):
"""
Setup quantized model from loaded model
"""
from transformers import AutoModelForCausalLM, AutoTokenizer
super().setUp()
# Models and tokenizer
self.model_fp16 = AutoModelForCausalLM.from_pretrained(
self.model_name, torch_dtype=torch.float16, device_map="auto"
)
self.bnb_quantization_config = BnbQuantizationConfig(load_in_4bit=True)
self.model_4bit = AutoModelForCausalLM.from_pretrained(self.model_name, torch_dtype=torch.float16)
self.model_4bit = load_and_quantize_model(self.model_4bit, self.bnb_quantization_config)
self.tokenizer = AutoTokenizer.from_pretrained("bigscience/bloom-1b7")
def tearDown(self):
"""
TearDown function needs to be called at the end of each test to free the GPU memory and cache, also to
avoid unexpected behaviors. Please see: https://discuss.pytorch.org/t/how-can-we-release-gpu-memory-cache/14530/27
"""
super().tearDown()
del self.model_fp16
del self.model_4bit
clear_device_cache(garbage_collection=True)
def test_memory_footprint(self):
r"""
A simple test to check if the model conversion has been done correctly by checking on the
memory footprint of the converted model and the class type of the linear layers of the converted models
"""
from bitsandbytes.nn import Params4bit
mem_fp16 = self.model_fp16.get_memory_footprint()
mem_4bit = self.model_4bit.get_memory_footprint()
assert round((mem_fp16 / mem_4bit) - self.EXPECTED_RELATIVE_DIFFERENCE, 7) >= 0
assert self.model_4bit.transformer.h[0].mlp.dense_4h_to_h.weight.__class__ == Params4bit
def test_linear_are_4bit(self):
r"""
A simple test to check if the model conversion has been done correctly by checking on the
memory footprint of the converted model and the class type of the linear layers of the converted models
"""
self.model_fp16.get_memory_footprint()
self.model_4bit.get_memory_footprint()
for name, module in self.model_4bit.named_modules():
if isinstance(module, torch.nn.Linear):
if (
name
not in self.bnb_quantization_config.keep_in_fp32_modules
+ self.bnb_quantization_config.skip_modules
):
# 4-bit parameters are packed in uint8 variables
assert module.weight.dtype == torch.uint8
def test_generate_quality(self):
r"""
Test the generation quality of the quantized model and see that we are matching the expected output.
Given that we are operating on small numbers + the testing model is relatively small, we might not get
the same output across GPUs. So we'll generate few tokens (5-10) and check their output.
"""
encoded_input = self.tokenizer(self.input_text, return_tensors="pt")
output_sequences = self.model_4bit.generate(
input_ids=encoded_input["input_ids"].to(self.model_4bit.device), max_new_tokens=10
)
assert self.tokenizer.decode(output_sequences[0], skip_special_tokens=True) in self.EXPECTED_OUTPUTS
def test_fp32_4bit_conversion(self):
r"""
Test whether it is possible to mix both `4bit` and `fp32` weights when using `keep_in_fp32_modules` correctly.
"""
from transformers import AutoModelForCausalLM
bnb_quantization_config = BnbQuantizationConfig(load_in_4bit=True, keep_in_fp32_modules=["lm_head"])
model = AutoModelForCausalLM.from_pretrained(self.model_name, torch_dtype=torch.float16)
model = load_and_quantize_model(model, bnb_quantization_config)
assert model.lm_head.weight.dtype == torch.float32
| accelerate/tests/test_quantization.py/0 | {
"file_path": "accelerate/tests/test_quantization.py",
"repo_id": "accelerate",
"token_count": 17702
} |
# Summary
[Introduction](README.md)
# User Guide
- [Installation](guide/installation.md)
- [Hello World - MNIST](guide/hello_world.md)
- [PyTorch cheatsheet](guide/cheatsheet.md)
# Reference Guide
- [Running a model](inference/inference.md)
- [Using the hub](inference/hub.md)
- [Error management](error_manage.md)
- [Training](training/training.md)
- [Simplified](training/simplified.md)
- [MNIST](training/mnist.md)
- [Fine-tuning]()
- [Serialization]()
- [Advanced Cuda usage]()
- [Writing a custom kernel]()
- [Porting a custom kernel]()
- [Using MKL]()
- [Creating apps]()
- [Creating a WASM app]()
- [Creating a REST api webserver]()
- [Creating a desktop Tauri app]()
| candle/candle-book/src/SUMMARY.md/0 | {
"file_path": "candle/candle-book/src/SUMMARY.md",
"repo_id": "candle",
"token_count": 274
} |
use crate::benchmarks::{BenchDevice, BenchDeviceHandler};
use candle_core::{DType, Device, Tensor};
use criterion::{black_box, criterion_group, Criterion, Throughput};
use std::time::Instant;
fn run(a: &Tensor, b: &Tensor) {
a.matmul(&b.t().unwrap()).unwrap();
}
fn run_bench(c: &mut Criterion, device: &Device) {
let b = 1;
let m = 1;
let n = 2048;
let k = 2048;
let dtype = DType::F32;
let lhs = Tensor::zeros((b, m, k), dtype, device).unwrap();
let rhs = Tensor::zeros((b, n, k), dtype, device).unwrap();
let flops = b * m * n * k;
let mut group = c.benchmark_group(device.bench_name("matmul"));
group.throughput(Throughput::Bytes(flops as u64));
group.bench_function("iter", move |b| {
b.iter_custom(|iters| {
let start = Instant::now();
for _i in 0..iters {
run(black_box(&lhs), black_box(&rhs));
}
device.sync().unwrap();
start.elapsed()
})
});
group.finish();
}
fn criterion_benchmark(c: &mut Criterion) {
let handler = BenchDeviceHandler::new().unwrap();
for device in handler.devices {
run_bench(c, &device);
}
}
criterion_group!(benches, criterion_benchmark);
| candle/candle-core/benches/benchmarks/matmul.rs/0 | {
"file_path": "candle/candle-core/benches/benchmarks/matmul.rs",
"repo_id": "candle",
"token_count": 551
} |
use super::{Cpu, CpuF16};
#[cfg(target_arch = "x86")]
use core::arch::x86::*;
#[cfg(target_arch = "x86_64")]
use core::arch::x86_64::*;
use half::f16;
pub struct CurrentCpu {}
const STEP: usize = 32;
const EPR: usize = 8;
const ARR: usize = STEP / EPR;
impl Cpu<ARR> for CurrentCpu {
type Unit = __m256;
type Array = [__m256; ARR];
const STEP: usize = STEP;
const EPR: usize = EPR;
fn n() -> usize {
ARR
}
unsafe fn zero() -> Self::Unit {
_mm256_setzero_ps()
}
unsafe fn zero_array() -> Self::Array {
[Self::zero(); ARR]
}
unsafe fn from_f32(v: f32) -> Self::Unit {
_mm256_set1_ps(v)
}
unsafe fn load(mem_addr: *const f32) -> Self::Unit {
_mm256_loadu_ps(mem_addr)
}
unsafe fn vec_add(a: Self::Unit, b: Self::Unit) -> Self::Unit {
_mm256_add_ps(a, b)
}
unsafe fn vec_fma(a: Self::Unit, b: Self::Unit, c: Self::Unit) -> Self::Unit {
_mm256_add_ps(_mm256_mul_ps(b, c), a)
}
unsafe fn vec_store(mem_addr: *mut f32, a: Self::Unit) {
_mm256_storeu_ps(mem_addr, a);
}
unsafe fn vec_reduce(mut x: Self::Array, y: *mut f32) {
for i in 0..ARR / 2 {
x[2 * i] = _mm256_add_ps(x[2 * i], x[2 * i + 1]);
}
for i in 0..ARR / 4 {
x[4 * i] = _mm256_add_ps(x[4 * i], x[4 * i + 2]);
}
#[allow(clippy::reversed_empty_ranges)]
for i in 0..ARR / 8 {
x[8 * i] = _mm256_add_ps(x[8 * i], x[8 * i + 4]);
}
let t0 = _mm_add_ps(_mm256_castps256_ps128(x[0]), _mm256_extractf128_ps(x[0], 1));
let t1 = _mm_hadd_ps(t0, t0);
*y = _mm_cvtss_f32(_mm_hadd_ps(t1, t1));
}
}
pub struct CurrentCpuF16 {}
impl CpuF16<ARR> for CurrentCpuF16 {
type Unit = __m256;
type Array = [__m256; ARR];
const STEP: usize = STEP;
const EPR: usize = EPR;
fn n() -> usize {
ARR
}
unsafe fn zero() -> Self::Unit {
_mm256_setzero_ps()
}
unsafe fn zero_array() -> Self::Array {
[Self::zero(); ARR]
}
unsafe fn from_f32(v: f32) -> Self::Unit {
_mm256_set1_ps(v)
}
#[cfg(target_feature = "f16c")]
unsafe fn load(mem_addr: *const f16) -> Self::Unit {
_mm256_cvtph_ps(_mm_loadu_si128(mem_addr as *const __m128i))
}
#[cfg(not(target_feature = "f16c"))]
unsafe fn load(mem_addr: *const f16) -> Self::Unit {
let mut tmp = [0.0f32; 8];
for i in 0..8 {
tmp[i] = (*mem_addr.add(i)).to_f32();
}
_mm256_loadu_ps(tmp.as_ptr())
}
unsafe fn vec_add(a: Self::Unit, b: Self::Unit) -> Self::Unit {
_mm256_add_ps(a, b)
}
unsafe fn vec_fma(a: Self::Unit, b: Self::Unit, c: Self::Unit) -> Self::Unit {
_mm256_add_ps(_mm256_mul_ps(b, c), a)
}
#[cfg(target_feature = "f16c")]
unsafe fn vec_store(mem_addr: *mut f16, a: Self::Unit) {
_mm_storeu_si128(mem_addr as *mut __m128i, _mm256_cvtps_ph(a, 0))
}
#[cfg(not(target_feature = "f16c"))]
unsafe fn vec_store(mem_addr: *mut f16, a: Self::Unit) {
let mut tmp = [0.0f32; 8];
_mm256_storeu_ps(tmp.as_mut_ptr(), a);
for i in 0..8 {
*mem_addr.add(i) = f16::from_f32(tmp[i]);
}
}
unsafe fn vec_reduce(mut x: Self::Array, y: *mut f32) {
let mut offset = ARR >> 1;
for i in 0..offset {
x[i] = _mm256_add_ps(x[i], x[offset + i]);
}
offset >>= 1;
for i in 0..offset {
x[i] = _mm256_add_ps(x[i], x[offset + i]);
}
offset >>= 1;
for i in 0..offset {
x[i] = _mm256_add_ps(x[i], x[offset + i]);
}
let t0 = _mm_add_ps(_mm256_castps256_ps128(x[0]), _mm256_extractf128_ps(x[0], 1));
let t1 = _mm_hadd_ps(t0, t0);
*y = _mm_cvtss_f32(_mm_hadd_ps(t1, t1));
}
}
| candle/candle-core/src/cpu/avx.rs/0 | {
"file_path": "candle/candle-core/src/cpu/avx.rs",
"repo_id": "candle",
"token_count": 2094
} |
//! Types for elements that can be stored and manipulated using tensors.
#![allow(clippy::redundant_closure_call)]
use crate::backend::BackendStorage;
use crate::{CpuStorage, CpuStorageRef, Error, Result};
/// The different types of elements allowed in tensors.
#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)]
pub enum DType {
// Unsigned 8 bits integer.
U8,
// Unsigned 32 bits integer.
U32,
// Signed 64 bits integer.
I64,
// Brain floating-point using half precision (16 bits).
BF16,
// Floating-point using half precision (16 bits).
F16,
// Floating-point using single precision (32 bits).
F32,
// Floating-point using double precision (64 bits).
F64,
}
#[derive(Debug, PartialEq, Eq)]
pub struct DTypeParseError(String);
impl std::fmt::Display for DTypeParseError {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(f, "cannot parse '{}' as a dtype", self.0)
}
}
impl std::error::Error for DTypeParseError {}
impl std::str::FromStr for DType {
type Err = DTypeParseError;
fn from_str(s: &str) -> std::result::Result<Self, Self::Err> {
match s {
"u8" => Ok(Self::U8),
"u32" => Ok(Self::U32),
"i64" => Ok(Self::I64),
"bf16" => Ok(Self::BF16),
"f16" => Ok(Self::F16),
"f32" => Ok(Self::F32),
"f64" => Ok(Self::F64),
_ => Err(DTypeParseError(s.to_string())),
}
}
}
impl DType {
/// String representation for dtypes.
pub fn as_str(&self) -> &'static str {
match self {
Self::U8 => "u8",
Self::U32 => "u32",
Self::I64 => "i64",
Self::BF16 => "bf16",
Self::F16 => "f16",
Self::F32 => "f32",
Self::F64 => "f64",
}
}
/// The size used by each element in bytes, i.e. 1 for `U8`, 4 for `F32`.
pub fn size_in_bytes(&self) -> usize {
match self {
Self::U8 => 1,
Self::U32 => 4,
Self::I64 => 8,
Self::BF16 => 2,
Self::F16 => 2,
Self::F32 => 4,
Self::F64 => 8,
}
}
pub fn is_int(&self) -> bool {
match self {
Self::U8 | Self::U32 | Self::I64 => true,
Self::BF16 | Self::F16 | Self::F32 | Self::F64 => false,
}
}
pub fn is_float(&self) -> bool {
match self {
Self::U8 | Self::U32 | Self::I64 => false,
Self::BF16 | Self::F16 | Self::F32 | Self::F64 => true,
}
}
}
pub trait WithDType:
Sized
+ Copy
+ num_traits::NumAssign
+ std::cmp::PartialOrd
+ std::fmt::Display
+ 'static
+ Send
+ Sync
+ std::any::Any
+ crate::cpu::kernels::VecOps
{
const DTYPE: DType;
fn from_f64(v: f64) -> Self;
fn to_f64(self) -> f64;
fn cpu_storage_ref(data: &[Self]) -> CpuStorageRef<'_>;
fn to_cpu_storage_owned(data: Vec<Self>) -> CpuStorage;
fn to_cpu_storage(data: &[Self]) -> CpuStorage {
Self::to_cpu_storage_owned(data.to_vec())
}
fn cpu_storage_as_slice(s: &CpuStorage) -> Result<&[Self]>;
fn cpu_storage_data(s: CpuStorage) -> Result<Vec<Self>>;
}
macro_rules! with_dtype {
($ty:ty, $dtype:ident, $from_f64:expr, $to_f64:expr) => {
impl WithDType for $ty {
const DTYPE: DType = DType::$dtype;
fn from_f64(v: f64) -> Self {
$from_f64(v)
}
fn to_f64(self) -> f64 {
$to_f64(self)
}
fn cpu_storage_ref(data: &[Self]) -> CpuStorageRef<'_> {
CpuStorageRef::$dtype(data)
}
fn to_cpu_storage_owned(data: Vec<Self>) -> CpuStorage {
CpuStorage::$dtype(data)
}
fn cpu_storage_data(s: CpuStorage) -> Result<Vec<Self>> {
match s {
CpuStorage::$dtype(data) => Ok(data),
_ => Err(Error::UnexpectedDType {
expected: DType::$dtype,
got: s.dtype(),
msg: "unexpected dtype",
}
.bt()),
}
}
fn cpu_storage_as_slice(s: &CpuStorage) -> Result<&[Self]> {
match s {
CpuStorage::$dtype(data) => Ok(data),
_ => Err(Error::UnexpectedDType {
expected: DType::$dtype,
got: s.dtype(),
msg: "unexpected dtype",
}
.bt()),
}
}
}
};
}
use half::{bf16, f16};
with_dtype!(u8, U8, |v: f64| v as u8, |v: u8| v as f64);
with_dtype!(u32, U32, |v: f64| v as u32, |v: u32| v as f64);
with_dtype!(i64, I64, |v: f64| v as i64, |v: i64| v as f64);
with_dtype!(f16, F16, f16::from_f64, f16::to_f64);
with_dtype!(bf16, BF16, bf16::from_f64, bf16::to_f64);
with_dtype!(f32, F32, |v: f64| v as f32, |v: f32| v as f64);
with_dtype!(f64, F64, |v: f64| v, |v: f64| v);
pub trait IntDType: WithDType {
fn is_true(&self) -> bool;
fn as_usize(&self) -> usize;
}
impl IntDType for i64 {
fn is_true(&self) -> bool {
*self != 0
}
fn as_usize(&self) -> usize {
*self as usize
}
}
impl IntDType for u32 {
fn is_true(&self) -> bool {
*self != 0
}
fn as_usize(&self) -> usize {
*self as usize
}
}
impl IntDType for u8 {
fn is_true(&self) -> bool {
*self != 0
}
fn as_usize(&self) -> usize {
*self as usize
}
}
pub trait FloatDType: WithDType {}
impl FloatDType for f16 {}
impl FloatDType for bf16 {}
impl FloatDType for f32 {}
impl FloatDType for f64 {}
| candle/candle-core/src/dtype.rs/0 | {
"file_path": "candle/candle-core/src/dtype.rs",
"repo_id": "candle",
"token_count": 3099
} |
#![allow(unused)]
use super::GgmlDType;
use crate::{Error, MetalDevice, MetalStorage, Result};
pub struct QMetalStorage {
dtype: GgmlDType,
device: MetalDevice,
}
impl QMetalStorage {
pub fn zeros(_: &MetalDevice, _: usize, _: GgmlDType) -> Result<Self> {
Err(Error::NotCompiledWithMetalSupport)
}
pub fn dtype(&self) -> GgmlDType {
self.dtype
}
pub fn device(&self) -> &MetalDevice {
&self.device
}
pub fn dequantize(&self, _elem_count: usize) -> Result<MetalStorage> {
Err(Error::NotCompiledWithMetalSupport)
}
pub fn quantize(&mut self, _src: &MetalStorage) -> Result<()> {
Err(Error::NotCompiledWithMetalSupport)
}
pub fn storage_size_in_bytes(&self) -> usize {
0
}
pub fn fwd(
&self,
_self_shape: &crate::Shape,
_storage: &MetalStorage,
_layout: &crate::Layout,
) -> Result<(MetalStorage, crate::Shape)> {
Err(Error::NotCompiledWithMetalSupport)
}
}
pub fn load_quantized<T: super::GgmlType + Send + Sync + 'static>(
_device: &MetalDevice,
_data: &[T],
) -> Result<super::QStorage> {
Err(Error::NotCompiledWithMetalSupport)
}
| candle/candle-core/src/quantized/dummy_metal.rs/0 | {
"file_path": "candle/candle-core/src/quantized/dummy_metal.rs",
"repo_id": "candle",
"token_count": 522
} |
//! Tensors are N-dimensional matrixes of elements using a single data type.
#![allow(clippy::redundant_closure_call)]
use crate::backend::{BackendDevice, BackendStorage};
use crate::op::{BackpropOp, BinaryOp, CmpOp, Op, ReduceOp, UnaryOp};
use crate::scalar::TensorOrScalar;
use crate::shape::{Dim, Dims};
use crate::{bail, storage::Storage, DType, Device, Error, Layout, Result, Shape};
use std::sync::{Arc, RwLock};
/// Unique identifier for tensors.
#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)]
pub struct TensorId(usize);
impl TensorId {
fn new() -> Self {
// https://users.rust-lang.org/t/idiomatic-rust-way-to-generate-unique-id/33805
use std::sync::atomic;
static COUNTER: atomic::AtomicUsize = atomic::AtomicUsize::new(1);
Self(COUNTER.fetch_add(1, atomic::Ordering::Relaxed))
}
}
pub struct Tensor_ {
id: TensorId,
// As we provide inner mutability on the tensor content, the alternatives are:
// - Using a mutex, this would have the highest cost when retrieving the storage but would
// prevent errors when concurrent access takes place. Mutex would also be subject to
// deadlocks for example using the current code if the same tensor is used twice by a single
// binary op.
// - Using a refcell unsafe cell would have some intermediary cost, borrow checking would be
// verified dynamically, but the resulting tensors would not be send or sync.
// - Using an unsafe cell would have the lowest cost but undefined behavior on concurrent
// accesses.
// Ideally, we would use Arc<Storage> for tensors on which we don't plan on modifying the data
// and Arc<Mutex<Storage>> for tensors where the data could be modified, e.g. variables but
// that's tricky to encode in the current setup.
storage: Arc<RwLock<Storage>>,
layout: Layout,
op: BackpropOp,
is_variable: bool,
dtype: DType,
device: Device,
}
impl AsRef<Tensor> for Tensor {
fn as_ref(&self) -> &Tensor {
self
}
}
// Tensors are refcounted so that cloning is cheap when building the op graph.
// Storages are also refcounted independently so that its possible to avoid
// copying the storage for operations that only modify the shape or stride.
#[derive(Clone)]
/// The core struct for manipulating tensors.
///
/// ```rust
/// use candle_core::{Tensor, DType, Device};
///
/// let a = Tensor::arange(0f32, 6f32, &Device::Cpu)?.reshape((2, 3))?;
/// let b = Tensor::arange(0f32, 12f32, &Device::Cpu)?.reshape((3, 4))?;
///
/// let c = a.matmul(&b)?;
/// # Ok::<(), candle_core::Error>(())
/// ```
///
/// Tensors are reference counted with [`Arc`] so cloning them is cheap.
pub struct Tensor(Arc<Tensor_>);
impl std::ops::Deref for Tensor {
type Target = Tensor_;
fn deref(&self) -> &Self::Target {
self.0.as_ref()
}
}
macro_rules! unary_op {
($fn_name:ident, $op_name:ident) => {
pub fn $fn_name(&self) -> Result<Self> {
let shape = self.shape();
if shape.elem_count() == 0 {
return Ok(self.clone());
}
let storage = self
.storage()
.unary_impl::<crate::op::$op_name>(self.layout())?;
let op = BackpropOp::new1(self, |s| Op::Unary(s, UnaryOp::$op_name));
Ok(from_storage(storage, shape.clone(), op, false))
}
};
}
macro_rules! binary_op {
($fn_name:ident, $op_name:ident) => {
pub fn $fn_name(&self, rhs: &Self) -> Result<Self> {
let shape = self.same_shape_binary_op(rhs, stringify!($fn_name))?;
if shape.elem_count() == 0 {
return Ok(self.clone());
}
let storage = self.storage().binary_impl::<crate::op::$op_name>(
&*rhs.storage(),
self.layout(),
rhs.layout(),
)?;
let op = BackpropOp::new2(self, rhs, |t1, t2| Op::Binary(t1, t2, BinaryOp::$op_name));
Ok(from_storage(storage, shape.clone(), op, false))
}
};
}
macro_rules! binary_op_scalar {
($fn_name:ident, $op_name:ident) => {
pub fn $fn_name<T: TensorOrScalar>(&self, rhs: T) -> Result<Self> {
let rhs = match rhs.to_tensor_scalar()? {
crate::scalar::TensorScalar::Tensor(rhs) => rhs,
crate::scalar::TensorScalar::Scalar(rhs) => rhs
.to_dtype(self.dtype())?
.to_device(self.device())?
.broadcast_as(self.shape())?,
};
let shape = self.same_shape_binary_op(&rhs, stringify!($fn_name))?;
if self.elem_count() == 0 {
return Ok(self.clone());
}
let storage = self.storage().binary_impl::<crate::op::$op_name>(
&*rhs.storage(),
self.layout(),
rhs.layout(),
)?;
let op = BackpropOp::new2(self, &rhs, |t1, t2| Op::Binary(t1, t2, BinaryOp::$op_name));
Ok(from_storage(storage, shape.clone(), op, false))
}
};
}
macro_rules! broadcast_binary_op {
($fn_name:ident, $inner_fn_name:ident) => {
pub fn $fn_name(&self, rhs: &Self) -> Result<Self> {
let lhs = self;
let shape = lhs
.shape()
.broadcast_shape_binary_op(rhs.shape(), stringify!($fn_name))?;
let l_broadcast = shape != *lhs.shape();
let r_broadcast = shape != *rhs.shape();
match (l_broadcast, r_broadcast) {
(true, true) => lhs
.broadcast_as(&shape)?
.$inner_fn_name(&rhs.broadcast_as(&shape)?),
(false, true) => lhs.$inner_fn_name(&rhs.broadcast_as(&shape)?),
(true, false) => lhs.broadcast_as(&shape)?.$inner_fn_name(rhs),
(false, false) => lhs.$inner_fn_name(rhs),
}
}
};
}
/// Creates a fresh tensor structure based on a storage and a shape, this uses contiguous strides.
pub(crate) fn from_storage<S: Into<Shape>>(
storage: Storage,
shape: S,
op: BackpropOp,
is_variable: bool,
) -> Tensor {
let dtype = storage.dtype();
let device = storage.device();
let tensor_ = Tensor_ {
id: TensorId::new(),
storage: Arc::new(RwLock::new(storage)),
layout: Layout::contiguous(shape),
op,
is_variable,
dtype,
device,
};
Tensor(Arc::new(tensor_))
}
impl Tensor {
pub(crate) fn ones_impl<S: Into<Shape>>(
shape: S,
dtype: DType,
device: &Device,
is_variable: bool,
) -> Result<Self> {
let none = BackpropOp::none();
let shape = shape.into();
let storage = device.ones(&shape, dtype)?;
Ok(from_storage(storage, shape, none, is_variable))
}
/// Creates a new tensor filled with ones.
///
/// ```rust
/// use candle_core::{Tensor, DType, Device};
/// let a = Tensor::ones((2, 3), DType::F32, &Device::Cpu)?;
/// let b = Tensor::from_slice(&[1.0f32, 1.0, 1.0, 1.0, 1.0, 1.0], (2, 3), &Device::Cpu)?;
/// // a == b
/// # Ok::<(), candle_core::Error>(())
/// ```
pub fn ones<S: Into<Shape>>(shape: S, dtype: DType, device: &Device) -> Result<Self> {
Self::ones_impl(shape, dtype, device, false)
}
/// Creates a new tensor filled with ones with same shape, dtype, and device as the other tensor.
///
/// ```rust
/// use candle_core::{Tensor, DType, Device};
/// let a = Tensor::zeros((2, 3), DType::F32, &Device::Cpu)?;
/// let b = a.ones_like()?;
/// // b == a + 1
/// # Ok::<(), candle_core::Error>(())
/// ```
pub fn ones_like(&self) -> Result<Self> {
Tensor::ones(self.shape(), self.dtype(), self.device())
}
// Do not expose outside of the crate, the `is_variable=true` case should only be accessed from
// the variable module.
pub(crate) fn zeros_impl<S: Into<Shape>>(
shape: S,
dtype: DType,
device: &Device,
is_variable: bool,
) -> Result<Self> {
let none = BackpropOp::none();
let shape = shape.into();
let storage = device.zeros(&shape, dtype)?;
Ok(from_storage(storage, shape, none, is_variable))
}
/// Creates a new tensor filled with zeros.
///
/// ```rust
/// use candle_core::{Tensor, DType, Device};
/// let a = Tensor::zeros((2, 3), DType::F32, &Device::Cpu)?;
/// let b = Tensor::from_slice(&[0.0f32, 0.0, 0.0, 0.0, 0.0, 0.0], (2, 3), &Device::Cpu)?;
/// // a == b
/// # Ok::<(), candle_core::Error>(())
/// ```
pub fn zeros<S: Into<Shape>>(shape: S, dtype: DType, device: &Device) -> Result<Self> {
Self::zeros_impl(shape, dtype, device, false)
}
/// Creates a new tensor filled with zeros with same shape, dtype, and device as the other
/// tensor.
///
/// ```rust
/// use candle_core::{Tensor, DType, Device};
/// let a = Tensor::zeros((2, 3), DType::F32, &Device::Cpu)?;
/// let b = a.zeros_like()?;
/// // b is on CPU f32.
/// # Ok::<(), candle_core::Error>(())
/// ```
pub fn zeros_like(&self) -> Result<Self> {
Tensor::zeros(self.shape(), self.dtype(), self.device())
}
pub(crate) fn rand_impl<S: Into<Shape>, T: crate::FloatDType>(
lo: T,
up: T,
s: S,
device: &Device,
is_variable: bool,
) -> Result<Self> {
let s = s.into();
let storage = device.rand_uniform(lo, up, &s)?;
let none = BackpropOp::none();
Ok(from_storage(storage, s, none, is_variable))
}
pub(crate) fn rand_f64_impl<S: Into<Shape>>(
lo: f64,
up: f64,
s: S,
dtype: DType,
device: &Device,
is_variable: bool,
) -> Result<Self> {
let s = s.into();
let storage = device.rand_uniform_f64(lo, up, &s, dtype)?;
let none = BackpropOp::none();
Ok(from_storage(storage, s, none, is_variable))
}
/// Creates a new tensor initialized with values sampled uniformly between `lo` and `up`.
pub fn rand<S: Into<Shape>, T: crate::FloatDType>(
lo: T,
up: T,
s: S,
device: &Device,
) -> Result<Self> {
Self::rand_impl(lo, up, s, device, false)
}
pub fn rand_like(&self, lo: f64, up: f64) -> Result<Self> {
Tensor::rand_f64_impl(lo, up, self.shape(), self.dtype(), self.device(), false)
}
pub(crate) fn randn_impl<S: Into<Shape>, T: crate::FloatDType>(
mean: T,
std: T,
s: S,
device: &Device,
is_variable: bool,
) -> Result<Self> {
let s = s.into();
let storage = device.rand_normal(mean, std, &s)?;
let none = BackpropOp::none();
Ok(from_storage(storage, s, none, is_variable))
}
pub(crate) fn randn_f64_impl<S: Into<Shape>>(
mean: f64,
std: f64,
s: S,
dtype: DType,
device: &Device,
is_variable: bool,
) -> Result<Self> {
let s = s.into();
let storage = device.rand_normal_f64(mean, std, &s, dtype)?;
let none = BackpropOp::none();
Ok(from_storage(storage, s, none, is_variable))
}
pub fn randn_like(&self, mean: f64, stdev: f64) -> Result<Self> {
Tensor::randn_f64_impl(
mean,
stdev,
self.shape(),
self.dtype(),
self.device(),
false,
)
}
/// Creates a new tensor initialized with values sampled from a normal distribution with the
/// specified `mean` and standard deviation `std`.
pub fn randn<S: Into<Shape>, T: crate::FloatDType>(
mean: T,
std: T,
s: S,
device: &Device,
) -> Result<Self> {
Self::randn_impl(mean, std, s, device, false)
}
pub(crate) fn new_impl<A: crate::device::NdArray>(
array: A,
shape: Shape,
device: &Device,
is_variable: bool,
) -> Result<Self> {
let n: usize = shape.elem_count();
let buffer_size: usize = array.shape()?.elem_count();
if buffer_size != n {
return Err(Error::ShapeMismatch { buffer_size, shape }.bt());
}
let storage = device.storage(array)?;
let none = BackpropOp::none();
Ok(from_storage(storage, shape, none, is_variable))
}
/// Creates a new tensor on the specified device using the content and shape of the input.
pub fn new<A: crate::device::NdArray>(array: A, device: &Device) -> Result<Self> {
let shape = array.shape()?;
Self::new_impl(array, shape, device, false)
}
/// Returns a new tensor with all the elements having the same specified value. Note that
/// the tensor is not contiguous so you would have to call `.contiguous()` on it if needed.
///```rust
/// use candle_core::{Tensor, Device};
/// let a = Tensor::full(3.5, (2, 4), &Device::Cpu)?;
///
/// assert_eq!(a.to_vec2::<f64>()?, &[
/// [3.5, 3.5, 3.5, 3.5],
/// [3.5, 3.5, 3.5, 3.5],
/// ]);
/// # Ok::<(), candle_core::Error>(())
pub fn full<D: crate::WithDType, S: Into<Shape>>(
value: D,
shape: S,
device: &Device,
) -> Result<Self> {
Self::from_vec_impl(vec![value], (), device, false)?.broadcast_as(shape)
}
/// Creates a new 1D tensor from an iterator.
///```rust
/// use candle_core::{Tensor, Device};
/// let a = Tensor::from_iter( [1.0, 2.0, 3.0, 4.0].into_iter(), &Device::Cpu)?;
///
/// assert_eq!(a.to_vec1::<f64>()?, &[1.0, 2.0, 3.0, 4.0]);
/// # Ok::<(), candle_core::Error>(())
/// ```
pub fn from_iter<D: crate::WithDType>(
iter: impl IntoIterator<Item = D>,
device: &Device,
) -> Result<Self> {
let data = iter.into_iter().collect::<Vec<_>>();
let len = data.len();
Self::from_vec_impl(data, len, device, false)
}
/// Creates a new 1D tensor with values from the interval `[start, end)` taken with a common
/// difference `1` from `start`.
///```rust
/// use candle_core::{Tensor, Device};
/// let a = Tensor::arange(2., 5., &Device::Cpu)?;
///
/// assert_eq!(a.to_vec1::<f64>()?, &[2., 3., 4.]);
/// # Ok::<(), candle_core::Error>(())
/// ```
pub fn arange<D: crate::WithDType>(start: D, end: D, device: &Device) -> Result<Self> {
Self::arange_step(start, end, D::one(), device)
}
/// Creates a new 1D tensor with values from the interval `[start, end)` taken with a common
/// difference `step` from `start`.
///```rust
/// use candle_core::{Tensor, Device};
/// let a = Tensor::arange_step(2.0, 4.0, 0.5, &Device::Cpu)?;
///
/// assert_eq!(a.to_vec1::<f64>()?, &[2.0, 2.5, 3.0, 3.5]);
/// # Ok::<(), candle_core::Error>(())
/// ```
pub fn arange_step<D: crate::WithDType>(
start: D,
end: D,
step: D,
device: &Device,
) -> Result<Self> {
if D::is_zero(&step) {
bail!("step cannot be zero")
}
let mut data = vec![];
let mut current = start;
if step >= D::zero() {
while current < end {
data.push(current);
current += step;
}
} else {
while current > end {
data.push(current);
current += step;
}
}
let len = data.len();
Self::from_vec_impl(data, len, device, false)
}
pub(crate) fn from_vec_impl<S: Into<Shape>, D: crate::WithDType>(
data: Vec<D>,
shape: S,
device: &Device,
is_variable: bool,
) -> Result<Self> {
let shape = shape.into();
let buffer_size = data.len();
if buffer_size != shape.elem_count() {
return Err(Error::ShapeMismatch { buffer_size, shape }.bt());
}
let storage = device.storage_owned(data)?;
let none = BackpropOp::none();
Ok(from_storage(storage, shape, none, is_variable))
}
/// Creates a new tensor initialized with values from the input vector. The number of elements
/// in this vector must be the same as the number of elements defined by the shape.
/// If the device is cpu, no data copy is made.
///```rust
/// use candle_core::{Tensor, Device};
/// let a = Tensor::from_vec(vec!{1., 2., 3., 4., 5., 6.}, (2, 3), &Device::Cpu)?;
///
/// assert_eq!(a.to_vec2::<f64>()?, &[
/// [1., 2., 3.],
/// [4., 5., 6.]
/// ]);
/// # Ok::<(), candle_core::Error>(())
/// ```
pub fn from_vec<S: Into<Shape>, D: crate::WithDType>(
data: Vec<D>,
shape: S,
device: &Device,
) -> Result<Self> {
Self::from_vec_impl(data, shape, device, false)
}
/// Creates a new tensor initialized with values from the input slice. The number of elements
/// in this vector must be the same as the number of elements defined by the shape.
///```rust
/// use candle_core::{Tensor, Device};
/// let values = vec![1., 2., 3., 4., 5., 6., 7., 8.];
/// let a = Tensor::from_slice(&values[1..7], (2, 3), &Device::Cpu)?;
///
/// assert_eq!(a.to_vec2::<f64>()?, &[
/// [2., 3., 4.],
/// [5., 6., 7.]
/// ]);
/// # Ok::<(), candle_core::Error>(())
/// ```
pub fn from_slice<S: Into<Shape>, D: crate::WithDType>(
array: &[D],
shape: S,
device: &Device,
) -> Result<Self> {
let shape = shape.into();
let n: usize = shape.elem_count();
let buffer_size: usize = array.len();
if buffer_size != n {
return Err(Error::ShapeMismatch { buffer_size, shape }.bt());
}
let storage = device.storage_from_slice(array)?;
let none = BackpropOp::none();
Ok(from_storage(storage, shape, none, false))
}
pub(crate) fn same_shape_binary_op(&self, rhs: &Self, op: &'static str) -> Result<&Shape> {
let lhs = self.shape();
let rhs = rhs.shape();
if lhs != rhs {
Err(Error::ShapeMismatchBinaryOp {
lhs: lhs.clone(),
rhs: rhs.clone(),
op,
}
.bt())
} else {
Ok(lhs)
}
}
/// Returns true if the computation graph should track this op, that is if it is
/// a variable or if it has some variable as dependencies.
pub fn track_op(&self) -> bool {
self.is_variable || self.op.is_some()
}
// TODO: Also make an inplace version or a pre-allocated? This could be tricky
// if this can create cycles in the compute graph.
binary_op!(add, Add);
binary_op!(mul, Mul);
binary_op!(sub, Sub);
binary_op!(div, Div);
binary_op_scalar!(maximum, Maximum);
binary_op_scalar!(minimum, Minimum);
broadcast_binary_op!(broadcast_add, add);
broadcast_binary_op!(broadcast_mul, mul);
broadcast_binary_op!(broadcast_sub, sub);
broadcast_binary_op!(broadcast_div, div);
broadcast_binary_op!(broadcast_maximum, maximum);
broadcast_binary_op!(broadcast_minimum, minimum);
broadcast_binary_op!(broadcast_eq, eq);
broadcast_binary_op!(broadcast_ne, ne);
broadcast_binary_op!(broadcast_lt, lt);
broadcast_binary_op!(broadcast_le, le);
broadcast_binary_op!(broadcast_gt, gt);
broadcast_binary_op!(broadcast_ge, ge);
unary_op!(recip, Recip);
unary_op!(neg, Neg);
unary_op!(exp, Exp);
unary_op!(log, Log);
unary_op!(sin, Sin);
unary_op!(cos, Cos);
unary_op!(tanh, Tanh);
unary_op!(abs, Abs);
unary_op!(sqr, Sqr);
unary_op!(sqrt, Sqrt);
unary_op!(gelu, Gelu);
unary_op!(gelu_erf, GeluErf);
unary_op!(erf, Erf);
unary_op!(relu, Relu);
unary_op!(silu, Silu);
unary_op!(ceil, Ceil);
unary_op!(floor, Floor);
unary_op!(round, Round);
unary_op!(sign, Sign);
/// Round element of the input tensor to the nearest integer.
///
/// If the number of decimals is negative, it specifies the number of positions to the left of
/// the decimal point.
pub fn round_to(&self, decimals: i32) -> Result<Self> {
let mult = 10f64.powi(decimals);
(self * mult)?.round()? * (1f64 / mult)
}
/// Retrieves the single scalar value hold in the tensor. If the tensor contains multiple
/// dimensions, an error is returned instead.
pub fn to_scalar<S: crate::WithDType>(&self) -> Result<S> {
if self.rank() != 0 {
Err(Error::UnexpectedNumberOfDims {
expected: 0,
got: self.rank(),
shape: self.shape().clone(),
}
.bt())?
}
let from_cpu_storage = |cpu_storage: &crate::CpuStorage| {
let data = S::cpu_storage_as_slice(cpu_storage)?;
Ok::<_, Error>(data[self.layout().start_offset()])
};
match &*self.storage() {
Storage::Cpu(cpu_storage) => from_cpu_storage(cpu_storage),
Storage::Cuda(storage) => from_cpu_storage(&storage.to_cpu_storage()?),
Storage::Metal(storage) => from_cpu_storage(&storage.to_cpu_storage()?),
}
}
/// An alias for `to_scalar`.
pub fn to_vec0<S: crate::WithDType>(&self) -> Result<S> {
self.to_scalar::<S>()
}
/// Repeat this tensor along the specified dimensions.
pub fn repeat<S: Into<Shape>>(&self, shape: S) -> Result<Tensor> {
// Similar to PyTorch, we extend the number of dimensions of self if needed.
let repeats = shape.into();
let repeats = repeats.dims();
let mut inp = if self.rank() < repeats.len() {
let shape = [vec![1; repeats.len() - self.rank()], self.dims().to_vec()].concat();
self.reshape(shape)?
} else {
self.clone()
};
for (idx, &repeat) in repeats.iter().enumerate() {
if repeat > 1 {
inp = Tensor::cat(&vec![&inp; repeat], idx)?
}
}
Ok(inp)
}
/// Creates grids of coordinates specified by the 1D inputs.
///
/// # Arguments
///
/// * `args` - A slice of 1D tensors.
/// * `xy_indexing` - Whether to use xy indexing or ij indexing. If xy is selected, the
/// first dimension corresponds to the cardinality of the second input and the second
/// dimension corresponds to the cardinality of the first input. If ij is selected, the
/// dimensions are in the same order as the cardinality of the inputs.
///
/// # Examples
///
/// ```rust
/// use candle_core::{Tensor, Device, Shape};
/// let x = Tensor::new(&[1f32, 2., 3.], &Device::Cpu)?;
/// let y = Tensor::new(&[4f32, 5., 6.], &Device::Cpu)?;
///
/// let grids_xy = Tensor::meshgrid(&[&x, &y], true)?;
///
/// assert_eq!(grids_xy.len(), 2);
/// assert_eq!(grids_xy[0].dims(), &[3, 3]);
///
/// assert_eq!(grids_xy[0].to_vec2::<f32>()?, &[[1., 2., 3.], [1., 2., 3.], [1., 2., 3.]]);
/// assert_eq!(grids_xy[1].to_vec2::<f32>()?, &[[4., 4., 4.], [5., 5., 5.], [6., 6., 6.]]);
///
/// let grids_ij = Tensor::meshgrid(&[&x, &y], false)?;
///
/// assert_eq!(grids_ij[0].to_vec2::<f32>()?, &[[1., 1., 1.], [2., 2., 2.], [3., 3., 3.]]);
/// assert_eq!(grids_ij[1].to_vec2::<f32>()?, &[[4., 5., 6.], [4., 5., 6.], [4., 5., 6.]]);
/// # Ok::<(), candle_core::Error>(())
/// ```
///
/// # Errors
///
/// * Will return `Err` if `args` contains less than 2 tensors.
///
pub fn meshgrid<A: AsRef<Tensor>>(args: &[A], xy_indexing: bool) -> Result<Vec<Self>> {
if args.len() <= 1 {
Err(Error::OpRequiresAtLeastTwoTensors { op: "meshgrid" }.bt())?
}
let args: Vec<_> = if xy_indexing {
args.iter().rev().collect()
} else {
args.iter().collect()
};
let mut shape = Vec::with_capacity(args.len());
for arg in args.iter() {
shape.push(arg.as_ref().dims1()?)
}
let mut grids = Vec::with_capacity(args.len());
for idx in 0..args.len() {
let mut ones = vec![1usize; args.len()];
ones[idx] = shape[idx];
let arg = args[idx].as_ref().reshape(ones)?;
let mut repeats = shape.clone();
repeats[idx] = 1;
let repeated_tensor = arg.repeat(repeats)?;
grids.push(repeated_tensor);
}
if xy_indexing {
grids.reverse();
}
Ok(grids)
}
/// This operation multiplies the input tensor by `mul` then adds `add` and return the result.
/// The input values `mul` and `add` are casted to the appropriate type so some rounding might
/// be performed.
///
/// ```rust
/// use candle_core::{Tensor, Device};
/// let a = Tensor::new(&[[0f32, 1.], [2., 3.]], &Device::Cpu)?;
/// let a = a.affine(4., -2.)?;
/// assert_eq!(a.to_vec2::<f32>()?, &[[-2.0, 2.0], [6.0, 10.0]]);
/// # Ok::<(), candle_core::Error>(())
/// ```
pub fn affine(&self, mul: f64, add: f64) -> Result<Self> {
if self.elem_count() == 0 {
return Ok(self.clone());
}
let storage = self.storage().affine(self.layout(), mul, add)?;
let op = BackpropOp::new1(self, |arg| Op::Affine { arg, mul, add });
Ok(from_storage(storage, self.shape(), op, false))
}
/// Applies the Exponential Linear Unit (ELU) function on each element of the input tensor.
pub fn elu(&self, alpha: f64) -> Result<Self> {
if self.elem_count() == 0 {
return Ok(self.clone());
}
let storage = self.storage().elu(self.layout(), alpha)?;
let op = BackpropOp::new1(self, |t| Op::Elu(t, alpha));
Ok(from_storage(storage, self.shape(), op, false))
}
/// Raise the tensor to some float exponent `e`.
pub fn powf(&self, e: f64) -> Result<Self> {
if self.elem_count() == 0 {
return Ok(self.clone());
}
let storage = self.storage().powf(self.layout(), e)?;
let op = BackpropOp::new1(self, |t| Op::Powf(t, e));
Ok(from_storage(storage, self.shape(), op, false))
}
pub(crate) fn check_dim(&self, dim: usize, op: &'static str) -> Result<()> {
if dim >= self.dims().len() {
Err(Error::DimOutOfRange {
shape: self.shape().clone(),
dim: dim as i32,
op,
}
.bt())?
} else {
Ok(())
}
}
/// Split a tensor into the specified number of chunks, this may return less chunks than
/// specified.
pub fn chunk<D: Dim>(&self, chunks: usize, dim: D) -> Result<Vec<Self>> {
let dim = dim.to_index(self.shape(), "chunk")?;
let size = self.dim(dim)?;
if size < chunks {
(0..size).map(|i| self.narrow(dim, i, 1)).collect()
} else {
let chunk_size = size / chunks;
let cnt_additional = size % chunks;
let mut tensors = vec![];
let mut sum_chunk_size = 0;
for i in 0..chunks {
let chunk_size = if i < cnt_additional {
chunk_size + 1
} else {
chunk_size
};
let tensor = self.narrow(dim, sum_chunk_size, chunk_size)?;
tensors.push(tensor);
sum_chunk_size += chunk_size
}
Ok(tensors)
}
}
/// Returns a new tensor that is a narrowed version of the input, the dimension `dim`
/// ranges from `start` to `start + len`.
/// ```
/// use candle_core::{Tensor, Device};
/// let a = Tensor::new(&[
/// [0f32, 1., 2.],
/// [3. , 4., 5.],
/// [6. , 7., 8.]
/// ], &Device::Cpu)?;
///
/// let b = a.narrow(0, 1, 2)?;
/// assert_eq!(b.shape().dims(), &[2, 3]);
/// assert_eq!(b.to_vec2::<f32>()?, &[
/// [3., 4., 5.],
/// [6., 7., 8.]
/// ]);
///
/// let c = a.narrow(1, 1, 1)?;
/// assert_eq!(c.shape().dims(), &[3, 1]);
/// assert_eq!(c.to_vec2::<f32>()?, &[
/// [1.],
/// [4.],
/// [7.]
/// ]);
/// # Ok::<(), candle_core::Error>(())
/// ```
pub fn narrow<D: Dim>(&self, dim: D, start: usize, len: usize) -> Result<Self> {
let dims = self.dims();
let dim = dim.to_index(self.shape(), "narrow")?;
let err = |msg| {
Err::<(), _>(
Error::NarrowInvalidArgs {
shape: self.shape().clone(),
dim,
start,
len,
msg,
}
.bt(),
)
};
if start > dims[dim] {
err("start > dim_len")?
}
if start.saturating_add(len) > dims[dim] {
err("start + len > dim_len")?
}
if start == 0 && dims[dim] == len {
Ok(self.clone())
} else {
let op = BackpropOp::new1(self, |t| Op::Narrow(t, dim, start, len));
let layout = self.layout().narrow(dim, start, len)?;
let tensor_ = Tensor_ {
id: TensorId::new(),
storage: self.storage.clone(),
layout,
op,
is_variable: false,
dtype: self.dtype,
device: self.device.clone(),
};
Ok(Tensor(Arc::new(tensor_)))
}
}
fn squeeze_dims(self, dims: &[usize]) -> Result<Self> {
match dims {
[] => Ok(self),
[i] => self.squeeze(*i),
dims => {
let dims = self
.dims()
.iter()
.enumerate()
.filter_map(|(dim_idx, &v)| {
if dims.contains(&dim_idx) {
None
} else {
Some(v)
}
})
.collect::<Vec<_>>();
self.reshape(dims)
}
}
}
fn reduce_impl<D: Dim>(&self, dim: D, keepdim: bool, op: ReduceOp) -> Result<Self> {
let dim = dim.to_index(self.shape(), op.name())?;
let storage = self.storage().reduce_op(op, self.layout(), &[dim])?;
let mut dims = self.dims().to_vec();
dims[dim] = 1;
let op = match op {
ReduceOp::Sum | ReduceOp::Min | ReduceOp::Max => {
BackpropOp::new1(self, |arg| Op::Reduce(arg, op, dims.to_vec()))
}
ReduceOp::ArgMin | ReduceOp::ArgMax => BackpropOp::none(),
};
let res = from_storage(storage, dims, op, false);
if keepdim {
Ok(res)
} else {
res.squeeze_dims(&[dim])
}
}
fn sum_impl<D: Dims>(&self, sum_dims: D, keepdim: bool) -> Result<Self> {
let sum_dims = sum_dims.to_indexes(self.shape(), "sum")?;
let storage = self
.storage()
.reduce_op(ReduceOp::Sum, self.layout(), &sum_dims)?;
let mut dims = self.dims().to_vec();
for &sum_dim in sum_dims.iter() {
dims[sum_dim] = 1
}
let op = BackpropOp::new1(self, |a| Op::Reduce(a, ReduceOp::Sum, dims.to_vec()));
let sum = from_storage(storage, dims, op, false);
if keepdim {
Ok(sum)
} else {
sum.squeeze_dims(&sum_dims)
}
}
/// Roll the tensor input along the given dimension.
/// Elements that are shifted beyond the last position are re-introduced at the first position.
///
/// ```rust
/// # use candle_core::{Tensor, Device};
/// let tensor = Tensor::new(&[[0f32, 1.], [2., 3.], [4., 5.]], &Device::Cpu)?;
/// let tensor = tensor.roll(1, 0)?;
/// assert_eq!(tensor.to_vec2::<f32>()?, &[[4., 5.], [0., 1.], [2., 3.]]);
/// let tensor = Tensor::new(&[[0f32, 1.], [2., 3.], [4., 5.]], &Device::Cpu)?;
/// let tensor = tensor.roll(-1, 0)?;
/// assert_eq!(tensor.to_vec2::<f32>()?, &[[2., 3.], [4., 5.], [0., 1.]]);
/// # Ok::<(), candle_core::Error>(())
/// ```
pub fn roll<D>(&self, shift: i32, dim: D) -> Result<Self>
where
D: Dim + Clone,
{
let dim = dim.to_index(self.shape(), "roll")?;
let dim_size = self.dim(dim)?;
let shift = shift.rem_euclid(dim_size as i32) as usize;
if shift == 0 {
Ok(self.clone())
} else {
let a = self.narrow(dim, 0, dim_size - shift)?;
let b = self.narrow(dim, dim_size - shift, shift)?;
Tensor::cat(&[&b, &a], dim)
}
}
/// Returns the sum of all elements in the input tensor. The sum is performed over all the
/// input dimensions.
///
/// The resulting tensor has a shape that is similar to the shape of the input tensor, except
/// that the number of elements for each dimension index in `sum_dims` is 1.
///
/// ```rust
/// use candle_core::{Tensor, Device};
/// let a = Tensor::new(&[[0f32, 1.], [2., 3.]], &Device::Cpu)?;
/// let s = a.sum_keepdim(0)?;
/// assert_eq!(s.to_vec2::<f32>()?, &[[2., 4.]]);
/// let s = a.sum_keepdim(1)?;
/// assert_eq!(s.to_vec2::<f32>()?, &[[1.], [5.]]);
/// let s = a.sum_keepdim((0, 1))?;
/// assert_eq!(s.to_vec2::<f32>()?, &[[6.]]);
/// # Ok::<(), candle_core::Error>(())
/// ```
pub fn sum_keepdim<D: Dims>(&self, sum_dims: D) -> Result<Self> {
self.sum_impl(sum_dims, true)
}
/// Returns the sum of all elements in the input tensor. The sum is performed over all the
/// input dimensions and compared to `sum_keepdim` these dimensions are squeezed rather than
/// kept.
pub fn sum<D: Dims>(&self, sum_dims: D) -> Result<Self> {
self.sum_impl(sum_dims, false)
}
/// Returns the mean of all elements in the input tensor. The mean is performed over all the
/// input dimensions.
///
/// The resulting tensor has a shape that is similar to the shape of the input tensor, except
/// that the number of elements for each dimension index in `mean_dims` is 1.
///
/// ```rust
/// use candle_core::{Tensor, Device};
/// let a = Tensor::new(&[[0f32, 1.], [2., 3.]], &Device::Cpu)?;
/// let s = a.mean_keepdim(0)?;
/// assert_eq!(s.to_vec2::<f32>()?, &[[1., 2.]]);
/// let s = a.mean_keepdim(1)?;
/// assert_eq!(s.to_vec2::<f32>()?, &[[0.5], [2.5]]);
/// let s = a.mean_keepdim((0, 1))?;
/// assert_eq!(s.to_vec2::<f32>()?, &[[1.5]]);
/// # Ok::<(), candle_core::Error>(())
/// ```
pub fn mean_keepdim<D: Dims>(&self, mean_dims: D) -> Result<Self> {
let mean_dims = mean_dims.to_indexes(self.shape(), "mean-keepdim")?;
let reduced_dim: usize = mean_dims.iter().map(|i| self.dims()[*i]).product();
let scale = 1f64 / (reduced_dim as f64);
self.sum_impl(mean_dims, true)? * scale
}
/// Returns the mean of all elements in the input tensor. The mean is performed over all the
/// input dimensions and compared to `mean_keepdim` these dimensions are squeezed rather than
/// kept.
pub fn mean<D: Dims>(&self, mean_dims: D) -> Result<Self> {
let mean_dims = mean_dims.to_indexes(self.shape(), "mean")?;
let reduced_dim: usize = mean_dims.iter().map(|i| self.dims()[*i]).product();
let scale = 1f64 / (reduced_dim as f64);
self.sum_impl(mean_dims, false)? * scale
}
/// Returns the unbiased variance over the selected dimension.
pub fn var_keepdim<D: Dim>(&self, dim: D) -> Result<Self> {
let dim = dim.to_index(self.shape(), "var")?;
let mean = self.mean_keepdim(dim)?;
let squares = self.broadcast_sub(&mean)?.sqr()?;
squares.sum_impl(dim, true)? / (self.dim(dim)? - 1) as f64
}
/// Returns the unbiased variance over the selected dimension.
pub fn var<D: Dim>(&self, dim: D) -> Result<Self> {
let dim = dim.to_index(self.shape(), "var")?;
self.var_keepdim(dim)?.squeeze(dim)
}
/// Gathers the maximum value across the selected dimension. The resulting shape has the same
/// number of dimensions as the original tensor and the select dimension has a single element.
pub fn max_keepdim<D: Dim>(&self, dim: D) -> Result<Self> {
self.reduce_impl(dim, true, ReduceOp::Max)
}
/// Similar to `max_keepdim` but the target dimension is squeezed.
pub fn max<D: Dim>(&self, dim: D) -> Result<Self> {
self.reduce_impl(dim, false, ReduceOp::Max)
}
/// Gathers the minimum value across the selected dimension. The resulting shape has the same
/// number of dimensions as the original tensor and the select dimension has a single element.
pub fn min_keepdim<D: Dim>(&self, dim: D) -> Result<Self> {
self.reduce_impl(dim, true, ReduceOp::Min)
}
/// Similar to `min_keepdim` but the target dimension is squeezed.
pub fn min<D: Dim>(&self, dim: D) -> Result<Self> {
self.reduce_impl(dim, false, ReduceOp::Min)
}
pub fn argmax_keepdim<D: Dim>(&self, dim: D) -> Result<Self> {
self.reduce_impl(dim, true, ReduceOp::ArgMax)
}
/// Similar to `argmax_keepdim` but the target dimension is squeezed.
pub fn argmax<D: Dim>(&self, dim: D) -> Result<Self> {
self.reduce_impl(dim, false, ReduceOp::ArgMax)
}
pub fn argmin_keepdim<D: Dim>(&self, dim: D) -> Result<Self> {
self.reduce_impl(dim, true, ReduceOp::ArgMin)
}
/// Similar to `argmin_keepdim` but the target dimension is squeezed.
pub fn argmin<D: Dim>(&self, dim: D) -> Result<Self> {
self.reduce_impl(dim, false, ReduceOp::ArgMin)
}
/// Element-wise comparison between two tensors, e.g. equality, greater than, ... The actual
/// comparison operation is specified by the `op` argument.
///
/// The returned tensor has the same shape as the original tensors and uses `u8` elements.
pub fn cmp<T: TensorOrScalar>(&self, rhs: T, op: CmpOp) -> Result<Self> {
let rhs = match rhs.to_tensor_scalar()? {
crate::scalar::TensorScalar::Tensor(rhs) => rhs,
crate::scalar::TensorScalar::Scalar(rhs) => rhs
.to_dtype(self.dtype())?
.to_device(self.device())?
.broadcast_as(self.shape())?,
};
let shape = self.same_shape_binary_op(&rhs, "cmp")?;
let storage = self
.storage()
.cmp(op, &rhs.storage(), self.layout(), rhs.layout())?;
let op = BackpropOp::new1(self, |a| Op::Cmp(a, op));
Ok(from_storage(storage, shape.dims(), op, false))
}
/// Element-wise equality.
pub fn eq<T: TensorOrScalar>(&self, rhs: T) -> Result<Self> {
self.cmp(rhs, CmpOp::Eq)
}
/// Element-wise non-equality.
pub fn ne<T: TensorOrScalar>(&self, rhs: T) -> Result<Self> {
self.cmp(rhs, CmpOp::Ne)
}
/// Element-wise comparison with lower-than, the returned tensor uses value 1 where `self <
/// rhs` and 0 otherwise.
pub fn lt<T: TensorOrScalar>(&self, rhs: T) -> Result<Self> {
self.cmp(rhs, CmpOp::Lt)
}
/// Element-wise comparison with greater-than, the returned tensor uses value 1 where `self >
/// rhs` and 0 otherwise.
pub fn gt<T: TensorOrScalar>(&self, rhs: T) -> Result<Self> {
self.cmp(rhs, CmpOp::Gt)
}
/// Element-wise comparison with greater-equal, the returned tensor uses value 1 where `self >=
/// rhs` and 0 otherwise.
pub fn ge<T: TensorOrScalar>(&self, rhs: T) -> Result<Self> {
self.cmp(rhs, CmpOp::Ge)
}
/// Element-wise comparison with lower-equal, the returned tensor uses value 1 where `self <=
/// rhs` and 0 otherwise.
pub fn le<T: TensorOrScalar>(&self, rhs: T) -> Result<Self> {
self.cmp(rhs, CmpOp::Le)
}
/// Clamp the tensor values to be between `min` and `max`.
pub fn clamp<T1: TensorOrScalar, T2: TensorOrScalar>(&self, min: T1, max: T2) -> Result<Self> {
self.maximum(min)?.minimum(max)
}
/// Interpolate the input tensor to the `target_size` size, taking the value of the nearest element.
///
/// The input tensor should have three dimensions, `(batch, channels, l)`, the returned
/// tensor also has three dimensions, `(batch, channels, target_size)`.
pub fn interpolate1d(&self, target_size: usize) -> Result<Self> {
let (n, c, _l) = self.dims3()?;
let op = BackpropOp::new1(self, |arg| Op::UpsampleNearest1D { arg, target_size });
let storage = self
.storage()
.upsample_nearest1d(self.layout(), target_size)?;
Ok(from_storage(storage, (n, c, target_size), op, false))
}
/// Alias for `interpolate1d`.
pub fn upsample_nearest1d(&self, target_size: usize) -> Result<Self> {
self.interpolate1d(target_size)
}
/// Interpolate the input tensor to the `(target_h, target_w)` size, taking the value of the
/// nearest element.
///
/// The input tensor should have four dimensions, `(batch, channels, h, w)`, the returned
/// tensor also has four dimensions, `(batch, channels, target_h, target_w)`.
pub fn interpolate2d(&self, target_h: usize, target_w: usize) -> Result<Self> {
let (n, c, _h, _w) = self.dims4()?;
let op = BackpropOp::new1(self, |arg| Op::UpsampleNearest2D {
arg,
target_h,
target_w,
});
let storage = self
.storage()
.upsample_nearest2d(self.layout(), target_h, target_w)?;
Ok(from_storage(storage, (n, c, target_h, target_w), op, false))
}
/// Alias for `interpolate2d`.
pub fn upsample_nearest2d(&self, target_h: usize, target_w: usize) -> Result<Self> {
self.interpolate2d(target_h, target_w)
}
/// 2D average pooling over an input tensor with multiple channels.
///
/// The input tensor should have four dimensions, `(batch, channels, h, w)`, the returned
/// tensor also has four dimensions, `(batch, channels, h', w')`. The pooling is performed on
/// the two last dimensions using a kernel of size `sz`. The returned element is the average
/// value over the kernel window.
pub fn avg_pool2d<T: crate::ToUsize2>(&self, sz: T) -> Result<Self> {
let sz = sz.to_usize2();
self.avg_pool2d_with_stride(sz, sz)
}
/// Same as `avg_pool2d` but with a `stride` that can be set to a value different from the
/// kernel size.
pub fn avg_pool2d_with_stride<T: crate::ToUsize2>(
&self,
kernel_size: T,
stride: T,
) -> Result<Self> {
let kernel_size = kernel_size.to_usize2();
let stride = stride.to_usize2();
let (n, c, h, w) = self.dims4()?;
if h < kernel_size.0 || w < kernel_size.1 {
bail!("kernel-size {kernel_size:?} is larger than the input size {h},{w}")
}
// https://pytorch.org/docs/stable/generated/torch.nn.AvgPool2d.html#torch.nn.AvgPool2d
let h_out = (h - kernel_size.0) / stride.0 + 1;
let w_out = (w - kernel_size.1) / stride.1 + 1;
let op = BackpropOp::new1(self, |arg| Op::AvgPool2D {
arg,
kernel_size,
stride,
});
let storage = self
.storage()
.avg_pool2d(self.layout(), kernel_size, stride)?;
Ok(from_storage(storage, (n, c, h_out, w_out), op, false))
}
/// 2D max pooling over an input tensor with multiple channels.
///
/// The input tensor should have four dimensions, `(batch, channels, h, w)`, the returned
/// tensor also has four dimensions, `(batch, channels, h', w')`. The pooling is performed on
/// the two last dimensions using a kernel of size `sz`, the returned element is the maximum
/// value over the kernel window.
pub fn max_pool2d<T: crate::ToUsize2>(&self, sz: T) -> Result<Self> {
let sz = sz.to_usize2();
self.max_pool2d_with_stride(sz, sz)
}
/// Same as `max_pool2d` but with a `stride` that can be set to a value different from the
/// kernel size.
pub fn max_pool2d_with_stride<T: crate::ToUsize2>(
&self,
kernel_size: T,
stride: T,
) -> Result<Self> {
let kernel_size = kernel_size.to_usize2();
let stride = stride.to_usize2();
let (n, c, h, w) = self.dims4()?;
if h < kernel_size.0 || w < kernel_size.1 {
bail!("kernel-size {kernel_size:?} is larger than the input size {h},{w}")
}
// https://pytorch.org/docs/stable/generated/torch.nn.MaxPool2d.html#torch.nn.MaxPool2d
let h_out = (h - kernel_size.0) / stride.0 + 1;
let w_out = (w - kernel_size.1) / stride.1 + 1;
let op = BackpropOp::new1(self, |arg| Op::MaxPool2D {
arg,
kernel_size,
stride,
});
let storage = self
.storage()
.max_pool2d(self.layout(), kernel_size, stride)?;
Ok(from_storage(storage, (n, c, h_out, w_out), op, false))
}
/// Returns the matrix-multiplication of the input tensor with the other provided tensor.
///
/// # Arguments
///
/// * `self` - A tensor with dimensions `b1, b2, ..., bi, m, k`.
/// * `rhs` - A tensor with dimensions `b1, b2, ..., bi, k, n`.
///
/// The resulting tensor has dimensions `b1, b2, ..., bi, m, n`.
pub fn matmul(&self, rhs: &Self) -> Result<Self> {
let a_dims = self.shape().dims();
let b_dims = rhs.shape().dims();
let dim = a_dims.len();
if dim < 2 || b_dims.len() != dim {
Err(Error::ShapeMismatchBinaryOp {
lhs: self.shape().clone(),
rhs: rhs.shape().clone(),
op: "matmul",
}
.bt())?
}
let m = a_dims[dim - 2];
let k = a_dims[dim - 1];
let k2 = b_dims[dim - 2];
let n = b_dims[dim - 1];
let c_shape = Shape::from(&a_dims[..dim - 2]).extend(&[m, n]);
if c_shape.elem_count() == 0 || k == 0 {
return Tensor::zeros(c_shape, self.dtype(), self.device());
}
let batching: usize = a_dims[..dim - 2].iter().product();
let batching_b: usize = b_dims[..dim - 2].iter().product();
if k != k2 || batching != batching_b {
Err(Error::ShapeMismatchBinaryOp {
lhs: self.shape().clone(),
rhs: rhs.shape().clone(),
op: "matmul",
}
.bt())?
}
let storage = self.storage().matmul(
&rhs.storage(),
(batching, m, n, k),
self.layout(),
rhs.layout(),
)?;
let op = BackpropOp::new2(self, rhs, Op::Matmul);
Ok(from_storage(storage, c_shape, op, false))
}
/// Matrix-multiplication with broadcasting support.
///
/// Compared to `matmul` the two matrixes are allowed to have different dimensions as long as
/// they are compatible for broadcast. E.g. if `self` has shape `(j, 1, n, k)` and `rhs` has
/// shape `(l, k, m)`, the output will have shape `(j, l, n, m)`.
pub fn broadcast_matmul(&self, rhs: &Self) -> Result<Self> {
let lhs = self;
let (l_shape, r_shape) = lhs.shape().broadcast_shape_matmul(rhs.shape())?;
let l_broadcast = l_shape != *lhs.shape();
let r_broadcast = r_shape != *rhs.shape();
// TODO: Avoid concretising the broadcasted matrixes via contiguous.
match (l_broadcast, r_broadcast) {
(true, true) => lhs
.broadcast_as(&l_shape)?
.contiguous()?
.matmul(&rhs.broadcast_as(&r_shape)?.contiguous()?),
(false, true) => lhs.matmul(&rhs.broadcast_as(&r_shape)?.contiguous()?),
(true, false) => lhs.broadcast_as(&l_shape)?.contiguous()?.matmul(rhs),
(false, false) => lhs.matmul(rhs),
}
}
/// Returns a tensor with the same shape as the input tensor, the values are taken from
/// `on_true` if the input tensor value is not zero, and `on_false` at the positions where the
/// input tensor is equal to zero.
pub fn where_cond(&self, on_true: &Self, on_false: &Self) -> Result<Self> {
let _shap = self.same_shape_binary_op(on_true, "where_cond")?;
let shape = self.same_shape_binary_op(on_false, "where_cond")?;
let storage = self.storage().where_cond(
self.layout(),
&on_true.storage(),
on_true.layout(),
&on_false.storage(),
on_false.layout(),
)?;
let op = BackpropOp::new3(self, on_true, on_false, Op::WhereCond);
Ok(from_storage(storage, shape, op, false))
}
/// Returns a tensor with the values from the `self` tensor at the index corresponding to the
/// values hold in the `ids` tensor.
///
/// # Arguments
///
/// * `self` - A tensor with dimensions `v, h`.
/// * `ids` - A tensor with dimensions `s` and with integer values between 0 and v (exclusive).
///
/// The resulting tensor has dimensions `s, h`. `s` is called the sequence length, `v` the
/// vocabulary size, and `h` the hidden size.
///
/// ```rust
/// use candle_core::{Tensor, Device};
/// let values = Tensor::new(&[[0f32, 1.], [2., 3.], [4., 5.]], &Device::Cpu)?;
/// let ids = Tensor::new(&[2u32, 1u32, 2u32], &Device::Cpu)?;
/// let emb = values.embedding(&ids)?;
/// assert_eq!(emb.to_vec2::<f32>()?, &[[4., 5.], [2., 3.], [4., 5.]]);
/// # Ok::<(), candle_core::Error>(())
/// ```
pub fn embedding(&self, ids: &Self) -> Result<Self> {
if self.rank() != 2 || ids.rank() != 1 {
Err(Error::ShapeMismatchBinaryOp {
lhs: self.shape().clone(),
rhs: ids.shape().clone(),
op: "embedding",
}
.bt())?
}
self.index_select(ids, 0)
}
pub fn scatter_add<D: Dim>(&self, indexes: &Self, source: &Self, dim: D) -> Result<Self> {
let dim = dim.to_index(self.shape(), "scatter-add")?;
let source_dims = source.dims();
let self_dims = self.dims();
let mismatch = if source_dims.len() != self_dims.len() {
true
} else {
let mut mismatch = false;
for (i, (&d1, &d2)) in self_dims.iter().zip(source_dims.iter()).enumerate() {
if i != dim && d1 != d2 {
mismatch = true;
break;
}
}
mismatch
};
if mismatch {
Err(Error::ShapeMismatchBinaryOp {
op: "scatter-add (self, src)",
lhs: self.shape().clone(),
rhs: source.shape().clone(),
}
.bt())?
}
if indexes.dims() != source.dims() {
Err(Error::ShapeMismatchBinaryOp {
op: "scatter-add (indexes, src)",
lhs: indexes.shape().clone(),
rhs: source.shape().clone(),
}
.bt())?
}
let storage = self.storage().scatter_add(
self.layout(),
&indexes.storage(),
indexes.layout(),
&source.storage(),
source.layout(),
dim,
)?;
let op = BackpropOp::new3(self, indexes, source, |t1, t2, t3| {
Op::ScatterAdd(t1, t2, t3, dim)
});
Ok(from_storage(storage, self.shape(), op, false))
}
/// Embeds the values of the `src` tensor into the `self` tensor on the specified dimension.
pub fn slice_scatter<D: Dim>(&self, src: &Self, dim: D, start: usize) -> Result<Self> {
let dim = dim.to_index(self.shape(), "slice-scatter")?;
if dim == 0 {
self.slice_scatter0(src, start)
} else {
// TODO: Maybe we want to add a more efficient implementation at some point.
self.transpose(0, dim)?
.slice_scatter0(&src.transpose(0, dim)?, start)?
.transpose(0, dim)
}
}
/// Embeds the values of the `src` tensor into the `self` tensor on the first dimension.
pub fn slice_scatter0(&self, src: &Self, start: usize) -> Result<Self> {
if self.dtype() != src.dtype() {
Err(Error::DTypeMismatchBinaryOp {
lhs: self.dtype(),
rhs: src.dtype(),
op: "slice-scatter",
}
.bt())?
}
if self.device().location() != src.device.location() {
Err(Error::DeviceMismatchBinaryOp {
lhs: self.device().location(),
rhs: src.device().location(),
op: "slice-scatter",
}
.bt())?
}
if self.rank() != src.rank() {
Err(Error::UnexpectedNumberOfDims {
expected: self.rank(),
got: src.rank(),
shape: src.shape().clone(),
}
.bt())?
}
let shape_ok =
self.dims()
.iter()
.zip(src.dims().iter())
.enumerate()
.all(|(dim_idx, (&d1, &d2))| {
if 0 == dim_idx {
d2 + start <= d1
} else {
d1 == d2
}
});
if !shape_ok {
Err(Error::ShapeMismatchBinaryOp {
op: "slice-scatter (self, src)",
lhs: self.shape().clone(),
rhs: src.shape().clone(),
}
.bt())?
}
let mut storage = unsafe { self.device().alloc_uninit(self.shape(), self.dtype())? };
self.storage()
.copy_strided_src(&mut storage, 0, self.layout())?;
let offset = start * src.dims()[1..].iter().product::<usize>();
src.storage()
.copy_strided_src(&mut storage, offset, src.layout())?;
let op = BackpropOp::new2(self, src, |t1, t2| Op::SliceScatter0(t1, t2, start));
Ok(from_storage(storage, self.shape(), op, false))
}
/// Accumulate element from `source` at indexes `indexes` and add them to `self`.
pub fn index_add<D: Dim>(&self, indexes: &Self, source: &Self, dim: D) -> Result<Self> {
let dim = dim.to_index(self.shape(), "index-add")?;
let source_dims = source.dims();
let self_dims = self.dims();
let mismatch = if source_dims.len() != self_dims.len() {
true
} else {
let mut mismatch = false;
for (i, (&d1, &d2)) in self_dims.iter().zip(source_dims.iter()).enumerate() {
if i != dim && d1 != d2 {
mismatch = true;
break;
}
}
mismatch
};
if mismatch {
Err(Error::ShapeMismatchBinaryOp {
op: "index-add (self, source)",
lhs: self.shape().clone(),
rhs: source.shape().clone(),
}
.bt())?
}
// The number of element in indexes must match the dimension on which the add is
// performed on the source tensor (and the index values from `indexes` are taken from
// the target tensor self)
let indexes_len = indexes.dims1()?;
if source_dims[dim] != indexes_len {
Err(Error::ShapeMismatchBinaryOp {
op: "index-add (ids, source))",
lhs: indexes.shape().clone(),
rhs: source.shape().clone(),
}
.bt())?
}
let storage = self.storage().index_add(
self.layout(),
&indexes.storage(),
indexes.layout(),
&source.storage(),
source.layout(),
dim,
)?;
let op = BackpropOp::new3(self, indexes, source, |t1, t2, t3| {
Op::IndexAdd(t1, t2, t3, dim)
});
Ok(from_storage(storage, self.shape(), op, false))
}
/// Gather values across the target dimension.
///
/// # Arguments
///
/// * `self` - The input tensor.
/// * `indexes` - The indices of elements to gather, this should have same number of dimensions as `self`
/// and indexes.dims()[d] <= self.dims()[d] for all dimensions d != dim
/// * `dim` - the target dimension.
///
/// The resulting tensor has the same shape as `indexes` and use values from `self` indexed on
/// dimension `dim` by the values in `indexes`.
pub fn gather<D: Dim>(&self, indexes: &Self, dim: D) -> Result<Self> {
let dim = dim.to_index(self.shape(), "gather")?;
let self_dims = self.dims();
let indexes_dims = indexes.dims();
let mismatch = if indexes_dims.len() != self_dims.len() {
true
} else {
let mut mismatch = false;
for (i, (&d1, &d2)) in self_dims.iter().zip(indexes_dims.iter()).enumerate() {
if i != dim && d1 < d2 {
mismatch = true;
break;
}
}
mismatch
};
if mismatch {
Err(Error::ShapeMismatchBinaryOp {
op: "gather",
lhs: self.shape().clone(),
rhs: indexes.shape().clone(),
}
.bt())?
}
let storage =
self.storage()
.gather(self.layout(), &indexes.storage(), indexes.layout(), dim)?;
let op = BackpropOp::new2(self, indexes, |t1, t2| Op::Gather(t1, t2, dim));
Ok(from_storage(storage, indexes.shape(), op, false))
}
/// Select values for the input tensor at the target indexes across the specified dimension.
///
/// The `indexes` is argument is an int tensor with a single dimension.
/// The output has the same number of dimension as the `self` input. The target dimension of
/// the output has length the length of `indexes` and the values are taken from `self` using
/// the index from `indexes`. Other dimensions have the same number of elements as the input
/// tensor.
pub fn index_select<D: Dim>(&self, indexes: &Self, dim: D) -> Result<Self> {
let dim = dim.to_index(self.shape(), "index-select")?;
let indexes_len = match indexes.dims() {
[l] => *l,
_ => Err(Error::ShapeMismatchBinaryOp {
lhs: self.shape().clone(),
rhs: indexes.shape().clone(),
op: "index-select",
}
.bt())?,
};
let storage = self.storage().index_select(
&indexes.storage(),
self.layout(),
indexes.layout(),
dim,
)?;
let mut dims = self.dims().to_vec();
dims[dim] = indexes_len;
let op = BackpropOp::new2(self, indexes, |t1, t2| Op::IndexSelect(t1, t2, dim));
Ok(from_storage(storage, dims, op, false))
}
/// Returns an iterator over position of the elements in the storage when ranging over the
/// index tuples in lexicographic order.
pub fn strided_index(&self) -> crate::StridedIndex {
self.layout.strided_index()
}
/// Similar to `strided_index` but returns the position of the start of each contiguous block
/// as well as the length of the contiguous blocks. For a contiguous tensor, the index iterator
/// will only return the start offset and the size would be the number of elements in the
/// tensor.
pub fn strided_blocks(&self) -> crate::StridedBlocks {
self.layout.strided_blocks()
}
/// Returns the data contained in a 1D tensor as a vector of scalar values.
pub fn to_vec1<S: crate::WithDType>(&self) -> Result<Vec<S>> {
if self.rank() != 1 {
Err(Error::UnexpectedNumberOfDims {
expected: 1,
got: self.rank(),
shape: self.shape().clone(),
}
.bt())?
}
let from_cpu_storage = |cpu_storage: &crate::CpuStorage| {
let data = S::cpu_storage_as_slice(cpu_storage)?;
let data = match self.layout.contiguous_offsets() {
Some((o1, o2)) => data[o1..o2].to_vec(),
None => self.strided_index().map(|i| data[i]).collect(),
};
Ok::<Vec<_>, Error>(data)
};
match &*self.storage() {
Storage::Cpu(storage) => from_cpu_storage(storage),
Storage::Cuda(storage) => from_cpu_storage(&storage.to_cpu_storage()?),
Storage::Metal(storage) => from_cpu_storage(&storage.to_cpu_storage()?),
}
}
/// Returns the data contained in a 2D tensor as a vector of vector of scalar values.
pub fn to_vec2<S: crate::WithDType>(&self) -> Result<Vec<Vec<S>>> {
let (dim1, dim2) = self.dims2()?;
let from_cpu_storage = |cpu_storage: &crate::CpuStorage| {
let data = S::cpu_storage_as_slice(cpu_storage)?;
let mut rows = vec![];
match self.layout.contiguous_offsets() {
Some((o1, o2)) => {
let data = &data[o1..o2];
for idx_row in 0..dim1 {
rows.push(data[idx_row * dim2..(idx_row + 1) * dim2].to_vec())
}
}
None => {
let mut src_index = self.strided_index();
for _idx_row in 0..dim1 {
let row = (0..dim2).map(|_| data[src_index.next().unwrap()]).collect();
rows.push(row)
}
assert!(src_index.next().is_none());
}
}
Ok(rows)
};
match &*self.storage() {
Storage::Cpu(storage) => from_cpu_storage(storage),
Storage::Cuda(storage) => from_cpu_storage(&storage.to_cpu_storage()?),
Storage::Metal(storage) => from_cpu_storage(&storage.to_cpu_storage()?),
}
}
/// Returns the data contained in a 3D tensor.
pub fn to_vec3<S: crate::WithDType>(&self) -> Result<Vec<Vec<Vec<S>>>> {
let (dim1, dim2, dim3) = self.dims3()?;
let from_cpu_storage = |cpu_storage: &crate::CpuStorage| {
let data = S::cpu_storage_as_slice(cpu_storage)?;
let mut top_rows = vec![];
match self.layout.contiguous_offsets() {
Some((o1, o2)) => {
let data = &data[o1..o2];
let dim23 = dim2 * dim3;
for idx1 in 0..dim1 {
let data = &data[idx1 * dim23..(idx1 + 1) * dim23];
let mut rows = vec![];
for idx2 in 0..dim2 {
rows.push(data[idx2 * dim3..(idx2 + 1) * dim3].to_vec())
}
top_rows.push(rows);
}
}
None => {
let mut src_index = self.strided_index();
for _idx in 0..dim1 {
let mut rows = vec![];
for _jdx in 0..dim2 {
let row = (0..dim3).map(|_| data[src_index.next().unwrap()]).collect();
rows.push(row)
}
top_rows.push(rows);
}
assert!(src_index.next().is_none());
}
}
Ok(top_rows)
};
match &*self.storage() {
Storage::Cpu(storage) => from_cpu_storage(storage),
Storage::Cuda(storage) => from_cpu_storage(&storage.to_cpu_storage()?),
Storage::Metal(storage) => from_cpu_storage(&storage.to_cpu_storage()?),
}
}
/// The dtype for the elements stored in the input tensor.
pub fn dtype(&self) -> DType {
self.dtype
}
/// The device on which the input tensor is located.
pub fn device(&self) -> &Device {
&self.device
}
/// The tensor shape, i.e. dimension sizes on each axis.
pub fn shape(&self) -> &Shape {
self.layout().shape()
}
/// The dimension size for this tensor on each axis.
pub fn dims(&self) -> &[usize] {
self.shape().dims()
}
/// The dimension size for a specified dimension index.
pub fn dim<D: Dim>(&self, dim: D) -> Result<usize> {
let dim = dim.to_index(self.shape(), "dim")?;
Ok(self.dims()[dim])
}
/// The layout of the input tensor, this stores both the shape of the tensor as well as the
/// strides and the start offset to apply to the underlying storage.
pub fn layout(&self) -> &Layout {
&self.layout
}
pub fn stride(&self) -> &[usize] {
self.layout.stride()
}
/// The number of dimensions for this tensor, 0 for a scalar tensor, 1 for a 1D tensor, etc.
pub fn rank(&self) -> usize {
self.shape().rank()
}
/// The number of elements stored in this tensor.
pub fn elem_count(&self) -> usize {
self.shape().elem_count()
}
/// The unique identifier for this tensor.
pub fn id(&self) -> TensorId {
self.id
}
/// Whether this tensor is a variable or not. A variable is a tensor for which gradient is
/// tracked and on which backpropagation can be performed.
pub fn is_variable(&self) -> bool {
self.is_variable
}
pub(crate) fn op(&self) -> &Option<Op> {
&self.op
}
/// Computes the max of all the elements in this tensor and returns a tensor holding this
/// scalar with zero dimensions.
///
/// ```rust
/// use candle_core::{Tensor, Device};
/// let tensor = Tensor::new(&[[0f32, 1.], [2., 3.], [4., 5.]], &Device::Cpu)?;
/// let tensor = tensor.max_all()?;
/// assert_eq!(tensor.to_scalar::<f32>()?, 5.);
/// # Ok::<(), candle_core::Error>(())
/// ```
pub fn max_all(&self) -> Result<Tensor> {
if self.rank() == 0 {
Ok(self.clone())
} else {
self.flatten_all()?.max(0)
}
}
/// Computes the min of all the elements in this tensor and returns a tensor holding this
/// scalar with zero dimensions.
///
/// ```rust
/// use candle_core::{Tensor, Device};
/// let tensor = Tensor::new(&[[0f32, 1.], [2., 3.], [4., 5.]], &Device::Cpu)?;
/// let tensor = tensor.min_all()?;
/// assert_eq!(tensor.to_scalar::<f32>()?, 0.);
/// # Ok::<(), candle_core::Error>(())
/// ```
pub fn min_all(&self) -> Result<Tensor> {
if self.rank() == 0 {
Ok(self.clone())
} else {
self.flatten_all()?.min(0)
}
}
/// Computes the sum of all the elements in this tensor and returns a tensor holding this
/// scalar with zero dimensions.
///
/// ```rust
/// use candle_core::{Tensor, Device};
/// let tensor = Tensor::new(&[[0f32, 1.], [2., 3.], [4., 5.]], &Device::Cpu)?;
/// let tensor = tensor.sum_all()?;
/// assert_eq!(tensor.to_scalar::<f32>()?, 15.);
/// # Ok::<(), candle_core::Error>(())
/// ```
pub fn sum_all(&self) -> Result<Tensor> {
let dims: Vec<_> = (0..self.rank()).collect();
self.sum(dims)
}
pub fn mean_all(&self) -> Result<Tensor> {
self.sum_all()? / self.elem_count() as f64
}
fn flatten_<D1: Dim, D2: Dim>(
&self,
start_dim: Option<D1>,
end_dim: Option<D2>,
) -> Result<Tensor> {
if self.rank() == 0 {
self.reshape(1)
} else {
let start_dim = match start_dim {
None => 0,
Some(dim) => dim.to_index(self.shape(), "flatten")?,
};
let end_dim = match end_dim {
None => self.rank() - 1,
Some(dim) => dim.to_index(self.shape(), "flatten")?,
};
if start_dim < end_dim {
let dims = self.dims();
let mut dst_dims = dims[..start_dim].to_vec();
dst_dims.push(dims[start_dim..end_dim + 1].iter().product::<usize>());
if end_dim + 1 < dims.len() {
dst_dims.extend(&dims[end_dim + 1..]);
}
self.reshape(dst_dims)
} else {
Ok(self.clone())
}
}
}
/// Flattens the input tensor on the dimension indexes from `start_dim` to `end_dim` (both
/// inclusive).
pub fn flatten<D1: Dim, D2: Dim>(&self, start_dim: D1, end_dim: D2) -> Result<Tensor> {
self.flatten_(Some(start_dim), Some(end_dim))
}
/// Flattens the input tensor on the dimension indexes from `0` to `end_dim` (inclusive).
pub fn flatten_to<D: Dim>(&self, end_dim: D) -> Result<Tensor> {
self.flatten_(None::<usize>, Some(end_dim))
}
/// Flattens the input tensor on the dimension indexes from `start_dim` (inclusive) to the last
/// dimension.
pub fn flatten_from<D: Dim>(&self, start_dim: D) -> Result<Tensor> {
self.flatten_(Some(start_dim), None::<usize>)
}
/// Flattens the input tensor by reshaping it into a one dimension tensor.
///
/// ```rust
/// use candle_core::{Tensor, Device};
/// let tensor = Tensor::new(&[[0f32, 1.], [2., 3.], [4., 5.]], &Device::Cpu)?;
/// let tensor = tensor.flatten_all()?;
/// assert_eq!(tensor.to_vec1::<f32>()?, &[0., 1., 2., 3., 4., 5.]);
/// # Ok::<(), candle_core::Error>(())
/// ```
pub fn flatten_all(&self) -> Result<Tensor> {
self.flatten_(None::<usize>, None::<usize>)
}
/// Returns the sub-tensor fixing the index at `i` on the first dimension.
///
/// ```rust
/// use candle_core::{Tensor, Device};
/// let tensor = Tensor::new(&[[0f32, 1.], [2., 3.], [4., 5.]], &Device::Cpu)?;
/// let t = tensor.get(0)?;
/// assert_eq!(t.to_vec1::<f32>()?, &[0., 1.]);
/// let t = tensor.get(1)?;
/// assert_eq!(t.to_vec1::<f32>()?, &[2., 3.]);
/// # Ok::<(), candle_core::Error>(())
/// ```
pub fn get(&self, i: usize) -> Result<Tensor> {
let dims = self.dims();
if dims.is_empty() {
Ok(self.clone())
} else {
self.narrow(0, i, 1)?.reshape(&dims[1..])
}
}
/// Returns the sub-tensor fixing the index at `index` on the dimension `dim`.
///
/// ```rust
/// use candle_core::{Tensor, Device};
/// let tensor = Tensor::new(&[[0f32, 1.], [2., 3.], [4., 5.]], &Device::Cpu)?;
/// let t = tensor.get_on_dim(1, 0)?;
/// assert_eq!(t.to_vec1::<f32>()?, &[0., 2., 4.]);
/// let t = tensor.get_on_dim(1, 1)?;
/// assert_eq!(t.to_vec1::<f32>()?, &[1., 3., 5.]);
/// let t = tensor.get_on_dim(0, 1)?;
/// assert_eq!(t.to_vec1::<f32>()?, &[2., 3.]);
/// # Ok::<(), candle_core::Error>(())
/// ```
pub fn get_on_dim<D: Dim>(&self, dim: D, index: usize) -> Result<Tensor> {
let dim = dim.to_index(self.shape(), "get_on_dim")?;
self.narrow(dim, index, 1)?.squeeze(dim)
}
/// Returns a tensor that is a transposed version of the input, the two last dimensions of the
/// input are swapped.
///
/// ```rust
/// use candle_core::{Tensor, Device};
/// let tensor = Tensor::new(&[[0f32, 1.], [2., 3.], [4., 5.]], &Device::Cpu)?;
/// let tensor = tensor.t()?;
/// assert_eq!(tensor.to_vec2::<f32>()?, &[[0.0, 2.0, 4.0], [1.0, 3.0, 5.0]]);
/// # Ok::<(), candle_core::Error>(())
/// ```
pub fn t(&self) -> Result<Tensor> {
let rank = self.rank();
if rank < 2 {
Err(Error::UnexpectedNumberOfDims {
expected: 2,
got: rank,
shape: self.shape().clone(),
}
.bt())?
}
self.transpose(rank - 2, rank - 1)
}
/// Returns a tensor that is a transposed version of the input, the given dimensions are
/// swapped.
pub fn transpose<D1: Dim, D2: Dim>(&self, dim1: D1, dim2: D2) -> Result<Tensor> {
let dim1 = dim1.to_index(self.shape(), "transpose")?;
let dim2 = dim2.to_index(self.shape(), "transpose")?;
if dim1 == dim2 {
return Ok(self.clone());
}
let op = BackpropOp::new1(self, |t| Op::Transpose(t, dim1, dim2));
let tensor_ = Tensor_ {
id: TensorId::new(),
storage: self.storage.clone(),
layout: self.layout.transpose(dim1, dim2)?,
op,
is_variable: false,
dtype: self.dtype,
device: self.device.clone(),
};
Ok(Tensor(Arc::new(tensor_)))
}
/// Returns a tensor with the same data as the input where the dimensions have been permuted.
/// dims must be a permutation, i.e. include each dimension index exactly once.
///
/// ```rust
/// use candle_core::{Tensor, Device};
/// let tensor = Tensor::arange(0u32, 120u32, &Device::Cpu)?.reshape((2, 3, 4, 5))?;
/// assert_eq!(tensor.dims(), &[2, 3, 4, 5]);
/// let tensor = tensor.permute((2, 3, 1, 0))?;
/// assert_eq!(tensor.dims(), &[4, 5, 3, 2]);
/// # Ok::<(), candle_core::Error>(())
/// ```
pub fn permute<D: Dims>(&self, dims: D) -> Result<Tensor> {
let dims = dims.to_indexes(self.shape(), "permute")?;
// O(n^2) permutation check but these arrays are small.
let is_permutation =
dims.len() == self.rank() && (0..dims.len()).all(|i| dims.contains(&i));
if !is_permutation {
bail!(
"dimension mismatch in permute, tensor {:?}, dims: {:?}",
self.dims(),
dims
)
}
let op = BackpropOp::new1(self, |t| Op::Permute(t, dims.clone()));
let tensor_ = Tensor_ {
id: TensorId::new(),
storage: self.storage.clone(),
layout: self.layout.permute(&dims)?,
op,
is_variable: false,
dtype: self.dtype,
device: self.device.clone(),
};
Ok(Tensor(Arc::new(tensor_)))
}
/// Returns true if the data is stored in a C contiguous (aka row major) way.
pub fn is_contiguous(&self) -> bool {
self.layout.is_contiguous()
}
/// Returns true if the data is stored in a Fortran contiguous (aka column major) way.
pub fn is_fortran_contiguous(&self) -> bool {
self.layout.is_fortran_contiguous()
}
/// Compared to clone, this copies the actual storage but may fail because of running out of
/// memory.
pub fn copy(&self) -> Result<Tensor> {
let op = BackpropOp::new1(self, Op::Copy);
let tensor_ = Tensor_ {
id: TensorId::new(),
storage: Arc::new(RwLock::new(self.storage().try_clone(self.layout())?)),
layout: self.layout.clone(),
op,
is_variable: false,
dtype: self.dtype,
device: self.device.clone(),
};
Ok(Tensor(Arc::new(tensor_)))
}
/// Returns a new tensor detached from the current graph, gradient are not propagated through
/// this new node. The storage of this tensor is shared with the initial tensor.
///
/// If the tensor is already detached from the computation graph, the same tensor is returned.
pub fn detach(&self) -> Tensor {
if self.op.is_none() && !self.is_variable {
self.clone()
} else {
let tensor_ = Tensor_ {
id: TensorId::new(),
storage: self.storage.clone(),
layout: self.layout.clone(),
op: BackpropOp::none(),
is_variable: false,
dtype: self.dtype,
device: self.device.clone(),
};
Tensor(Arc::new(tensor_))
}
}
/// If the target device is the same as the tensor device, only a shallow copy is performed.
pub fn to_device(&self, device: &Device) -> Result<Tensor> {
if self.device().same_device(device) {
Ok(self.clone())
} else {
let storage = match (&*self.storage(), device) {
(Storage::Cpu(storage), Device::Cuda(cuda)) => {
Storage::Cuda(cuda.storage_from_cpu_storage(storage)?)
}
(Storage::Cpu(storage), Device::Metal(metal)) => {
Storage::Metal(metal.storage_from_cpu_storage(storage)?)
}
(Storage::Cuda(storage), Device::Cpu) => Storage::Cpu(storage.to_cpu_storage()?),
(Storage::Metal(storage), Device::Cpu) => Storage::Cpu(storage.to_cpu_storage()?),
(Storage::Cuda(storage), Device::Cuda(cuda)) => {
// TODO: Avoid passing through the cpu storage here, especially if the gpu ids
// are the same.
let cpu_storage = storage.to_cpu_storage()?;
Storage::Cuda(cuda.storage_from_cpu_storage(&cpu_storage)?)
}
(Storage::Cpu(storage), Device::Cpu) => Storage::Cpu(storage.clone()),
_ => {
bail!(
"not implemented yet, self.device: {:?}, device: {:?}",
self.device(),
device
)
}
};
let op = BackpropOp::new1(self, Op::ToDevice);
let tensor_ = Tensor_ {
id: TensorId::new(),
storage: Arc::new(RwLock::new(storage)),
layout: self.layout.clone(),
op,
is_variable: false,
dtype: self.dtype,
device: device.clone(),
};
Ok(Tensor(Arc::new(tensor_)))
}
}
/// Returns a new tensor duplicating data from the original tensor. New dimensions are inserted
/// on the left.
pub fn broadcast_left<S: Into<Shape>>(&self, left_shape: S) -> Result<Self> {
let left_shape = left_shape.into();
let mut dims = left_shape.into_dims();
dims.extend(self.dims());
self.broadcast_as(dims)
}
/// Broadcast the input tensor to the target shape. This returns an error if the input shape is
/// not compatible with the target shape.
///
/// If the input shape is `i_1, i_2, ... i_k`, the target shape has to have `k` dimensions or
/// more and shape `j_1, ..., j_l, t_1, t_2, ..., t_k`. The dimensions `j_1` to `j_l` can have
/// any value, the dimension `t_a` must be equal to `i_a` if `i_a` is different from 1. If
/// `i_a` is equal to 1, any value can be used.
pub fn broadcast_as<S: Into<Shape>>(&self, shape: S) -> Result<Self> {
let tensor_ = Tensor_ {
id: TensorId::new(),
storage: self.storage.clone(),
layout: self.layout.broadcast_as(shape)?,
op: BackpropOp::new1(self, Op::Broadcast),
is_variable: false,
dtype: self.dtype,
device: self.device.clone(),
};
Ok(Tensor(Arc::new(tensor_)))
}
/// An alias for broadcast_as.
pub fn expand<S: Into<Shape>>(&self, shape: S) -> Result<Self> {
self.broadcast_as(shape)
}
/// Casts the input tensor to the target `dtype`.
///
/// ```rust
/// use candle_core::{Tensor, Device};
/// let tensor = Tensor::new(3.14159265358979f64, &Device::Cpu)?;
/// assert_eq!(tensor.to_scalar::<f64>()?, 3.14159265358979);
/// let tensor = tensor.to_dtype(candle_core::DType::F32)?;
/// assert_eq!(tensor.to_scalar::<f32>()?, 3.1415927);
/// # Ok::<(), candle_core::Error>(())
/// ```
pub fn to_dtype(&self, dtype: DType) -> Result<Self> {
if self.dtype() == dtype {
Ok(self.clone())
} else {
let shape = self.shape();
let storage = self.storage().to_dtype(self.layout(), dtype)?;
let op = BackpropOp::new1(self, Op::ToDType);
Ok(from_storage(storage, shape.clone(), op, false))
}
}
/// Returns a tensor that is in row major order. This is the same as the original tensor if it
/// was already contiguous, otherwise a copy is triggered.
pub fn contiguous(&self) -> Result<Tensor> {
if self.is_contiguous() {
Ok(self.clone())
} else {
let shape = self.shape();
let mut storage = unsafe { self.device().alloc_uninit(shape, self.dtype())? };
self.storage()
.copy_strided_src(&mut storage, 0, self.layout())?;
let op = BackpropOp::new1(self, Op::Copy);
Ok(from_storage(storage, shape.clone(), op, false))
}
}
/// Returns a tensor that is in row major order. This always makes a copy.
pub fn force_contiguous(&self) -> Result<Tensor> {
let shape = self.shape();
let mut storage = unsafe { self.device().alloc_uninit(shape, self.dtype())? };
self.storage()
.copy_strided_src(&mut storage, 0, self.layout())?;
let op = BackpropOp::new1(self, Op::Copy);
Ok(from_storage(storage, shape.clone(), op, false))
}
/// Create a variable based on the values currently stored in a tensor. The storage is always
/// copied.
pub(crate) fn make_var(&self) -> Result<Tensor> {
let shape = self.shape().clone();
let mut storage = unsafe { self.device().alloc_uninit(&shape, self.dtype())? };
self.storage()
.copy_strided_src(&mut storage, 0, self.layout())?;
Ok(from_storage(storage, shape, BackpropOp::none(), true))
}
/// Reshape returns a tensor with the target shape provided that the number of elements of the
/// original tensor is the same.
/// If the input tensor is contiguous, this is a view on the original data. Otherwise this uses
/// a new storage and copies the data over, the returned tensor is always contiguous.
///
/// The shape can be specified using a tuple of `usize` and at most one `()` in which case
/// the behavior is the same as when using `-1` in PyTorch: this dimension size is adjusted so
/// as to match the number of elements in the tensor.
///
/// ```rust
/// # use candle_core::{Tensor, DType, Device, D};
/// let a = Tensor::zeros((2, 3), DType::F32, &Device::Cpu)?;
///
/// let c = a.reshape((1, 6))?;
/// assert_eq!(c.shape().dims(), &[1, 6]);
///
/// let c = a.reshape((3, 2))?;
/// assert_eq!(c.shape().dims(), &[3, 2]);
///
/// let c = a.reshape((2, (), 1))?;
/// assert_eq!(c.shape().dims(), &[2, 3, 1]);
///
/// # Ok::<(), candle_core::Error>(())
/// ```
pub fn reshape<S: crate::shape::ShapeWithOneHole>(&self, s: S) -> Result<Tensor> {
let shape = s.into_shape(self.elem_count())?;
if shape.elem_count() != self.elem_count() {
return Err(Error::ShapeMismatchBinaryOp {
lhs: self.shape().clone(),
rhs: shape,
op: "reshape",
}
.bt());
}
let op = BackpropOp::new1(self, Op::Reshape);
if self.is_contiguous() {
let tensor_ = Tensor_ {
id: TensorId::new(),
storage: self.storage.clone(),
layout: Layout::contiguous_with_offset(shape, self.layout.start_offset()),
op,
is_variable: false,
dtype: self.dtype,
device: self.device.clone(),
};
Ok(Tensor(Arc::new(tensor_)))
} else {
let mut storage = unsafe { self.device().alloc_uninit(&shape, self.dtype())? };
self.storage()
.copy_strided_src(&mut storage, 0, self.layout())?;
Ok(from_storage(storage, shape, op, false))
}
}
/// Creates a new tensor with the specified dimension removed if its size was one.
///
/// ```rust
/// # use candle_core::{Tensor, DType, Device, D};
/// let a = Tensor::zeros((2, 3, 1), DType::F32, &Device::Cpu)?;
///
/// let c = a.squeeze(2)?;
/// assert_eq!(c.shape().dims(), &[2, 3]);
///
/// let c = a.squeeze(D::Minus1)?;
/// assert_eq!(c.shape().dims(), &[2, 3]);
/// # Ok::<(), candle_core::Error>(())
/// ```
pub fn squeeze<D: Dim>(&self, dim: D) -> Result<Self> {
// The PyTorch semantics are to return the same tensor if the target dimension
// does not have a size of 1.
let dims = self.dims();
let dim = dim.to_index(self.shape(), "squeeze")?;
if dims[dim] == 1 {
let mut dims = dims.to_vec();
let mut strides = self.stride().to_vec();
dims.remove(dim);
strides.remove(dim);
let tensor_ = Tensor_ {
id: TensorId::new(),
storage: self.storage.clone(),
layout: Layout::new(dims.into(), strides, self.layout.start_offset()),
op: BackpropOp::new1(self, Op::Reshape),
is_variable: false,
dtype: self.dtype,
device: self.device.clone(),
};
Ok(Tensor(Arc::new(tensor_)))
} else {
Ok(self.clone())
}
}
/// Creates a new tensor with a dimension of size one inserted at the specified position.
///
/// ```rust
/// # use candle_core::{Tensor, DType, Device, D};
/// let a = Tensor::zeros((2, 3), DType::F32, &Device::Cpu)?;
///
/// let c = a.unsqueeze(0)?;
/// assert_eq!(c.shape().dims(), &[1, 2, 3]);
///
/// let c = a.unsqueeze(D::Minus1)?;
/// assert_eq!(c.shape().dims(), &[2, 3, 1]);
/// # Ok::<(), candle_core::Error>(())
/// ```
pub fn unsqueeze<D: Dim>(&self, dim: D) -> Result<Self> {
let mut dims = self.dims().to_vec();
let mut strides = self.stride().to_vec();
let dim = dim.to_index_plus_one(self.shape(), "unsqueeze")?;
// Cannot panic because to_index_plus_one already checks dimensions
dims.insert(dim, 1);
// Any stride would work here, but we pick one so as to maximize the probability to remain
// C contiguous.
let stride = if dim < strides.len() { strides[dim] } else { 1 };
strides.insert(dim, stride);
let tensor_ = Tensor_ {
id: TensorId::new(),
storage: self.storage.clone(),
layout: Layout::new(dims.into(), strides, self.layout.start_offset()),
op: BackpropOp::new1(self, Op::Reshape),
is_variable: false,
dtype: self.dtype,
device: self.device.clone(),
};
Ok(Tensor(Arc::new(tensor_)))
}
/// Stacks two or more tensors along a particular dimension.
///
/// All tensors must have the same rank, and the output has one additional rank
///
/// ```rust
/// # use candle_core::{Tensor, DType, Device};
/// let a = Tensor::zeros((2, 3), DType::F32, &Device::Cpu)?;
/// let b = Tensor::zeros((2, 3), DType::F32, &Device::Cpu)?;
///
/// let c = Tensor::stack(&[&a, &b], 0)?;
/// assert_eq!(c.shape().dims(), &[2, 2, 3]);
///
/// let c = Tensor::stack(&[&a, &b], 2)?;
/// assert_eq!(c.shape().dims(), &[2, 3, 2]);
/// # Ok::<(), candle_core::Error>(())
/// ```
pub fn stack<A: AsRef<Tensor>, D: Dim>(args: &[A], dim: D) -> Result<Self> {
if args.is_empty() {
Err(Error::OpRequiresAtLeastOneTensor { op: "stack" }.bt())?
}
let dim = dim.to_index_plus_one(args[0].as_ref().shape(), "stack")?;
let args = args
.iter()
.map(|t| t.as_ref().unsqueeze(dim))
.collect::<Result<Vec<_>>>()?;
Self::cat(&args, dim)
}
/// Pad the input tensor using 0s along dimension `dim`. This adds `left` elements before the
/// input tensor values and `right` elements after.
pub fn pad_with_zeros<D: Dim>(&self, dim: D, left: usize, right: usize) -> Result<Self> {
if left == 0 && right == 0 {
Ok(self.clone())
} else if left == 0 {
let dim = dim.to_index(self.shape(), "pad_with_zeros")?;
let mut dims = self.dims().to_vec();
dims[dim] = right;
let right = Tensor::zeros(dims.as_slice(), self.dtype, self.device())?;
Tensor::cat(&[self, &right], dim)
} else if right == 0 {
let dim = dim.to_index(self.shape(), "pad_with_zeros")?;
let mut dims = self.dims().to_vec();
dims[dim] = left;
let left = Tensor::zeros(dims.as_slice(), self.dtype, self.device())?;
Tensor::cat(&[&left, self], dim)
} else {
let dim = dim.to_index(self.shape(), "pad_with_zeros")?;
let mut dims = self.dims().to_vec();
dims[dim] = left;
let left = Tensor::zeros(dims.as_slice(), self.dtype, self.device())?;
dims[dim] = right;
let right = Tensor::zeros(dims.as_slice(), self.dtype, self.device())?;
Tensor::cat(&[&left, self, &right], dim)
}
}
/// Pad the input tensor using same values along dimension `dim`. This adds `left` elements before the
/// input tensor values and `right` elements after.
pub fn pad_with_same<D: Dim>(&self, dim: D, left: usize, right: usize) -> Result<Self> {
if left == 0 && right == 0 {
Ok(self.clone())
} else if self.elem_count() == 0 {
bail!("cannot use pad_with_same on an empty tensor")
} else if left == 0 {
let dim = dim.to_index(self.shape(), "pad_with_same")?;
let r = self.narrow(dim, self.dim(dim)? - 1, 1)?;
let mut v = vec![self];
for _ in 0..right {
v.push(&r)
}
Tensor::cat(&v, dim)
} else if right == 0 {
let dim = dim.to_index(self.shape(), "pad_with_same")?;
let l = self.narrow(dim, 0, 1)?;
let mut v = vec![];
for _ in 0..left {
v.push(&l)
}
v.push(self);
Tensor::cat(&v, dim)
} else {
let dim = dim.to_index(self.shape(), "pad_with_same")?;
let l = self.narrow(dim, 0, 1)?;
let r = self.narrow(dim, self.dim(dim)? - 1, 1)?;
let mut v = vec![];
for _ in 0..left {
v.push(&l)
}
v.push(self);
for _ in 0..right {
v.push(&r)
}
Tensor::cat(&v, dim)
}
}
/// Run the `forward` method of `m` on `self`.
pub fn apply<M: crate::Module>(&self, m: &M) -> Result<Self> {
m.forward(self)
}
/// Run the `forward` method of `m` on `self`.
pub fn apply_t<M: crate::ModuleT>(&self, m: &M, train: bool) -> Result<Self> {
m.forward_t(self, train)
}
pub(crate) fn storage(&self) -> std::sync::RwLockReadGuard<'_, Storage> {
self.storage.read().unwrap()
}
pub(crate) fn storage_mut(&self) -> std::sync::RwLockWriteGuard<'_, Storage> {
self.storage.write().unwrap()
}
// If we extend the visibility of this function to be usable outside of this crate, we should
// make it unsafe.
pub(crate) fn storage_mut_and_layout(
&self,
) -> (std::sync::RwLockWriteGuard<'_, Storage>, &Layout) {
let storage = self.storage.write().unwrap();
(storage, &self.layout)
}
/// The storage used by this tensor, together with the layout to use to access it safely.
pub fn storage_and_layout(&self) -> (std::sync::RwLockReadGuard<'_, Storage>, &Layout) {
let storage = self.storage.read().unwrap();
(storage, &self.layout)
}
pub(crate) fn same_storage(&self, rhs: &Self) -> bool {
let lhs: &RwLock<Storage> = self.storage.as_ref();
let rhs: &RwLock<Storage> = rhs.storage.as_ref();
std::ptr::eq(lhs, rhs)
}
/// Normalize a 'relative' axis value: positive values are kept, negative
/// values means counting the dimensions from the back.
pub fn normalize_axis(&self, axis: i64) -> Result<usize> {
let rank = self.rank() as i64;
if rank <= axis {
bail!("axis {axis} is too large, tensor rank {rank}")
} else if 0 <= axis {
Ok(axis as usize)
} else {
let naxis = rank + axis;
if naxis < 0 {
bail!("axis {axis} is too small, tensor rank {rank}")
}
Ok(naxis as usize)
}
}
/// Returns a lower triangular matrix of ones of size n by n.
pub fn tril2(n: usize, dtype: DType, device: &Device) -> Result<Self> {
let t = Tensor::arange(0u32, n as u32, device)?;
let t1 = t.reshape((1, n))?.broadcast_as((n, n))?;
let t2 = t.reshape((n, 1))?.broadcast_as((n, n))?;
t1.le(&t2)?.to_dtype(dtype)
}
/// Returns an upper triangular matrix of ones of size n by n.
pub fn triu2(n: usize, dtype: DType, device: &Device) -> Result<Self> {
let t = Tensor::arange(0u32, n as u32, device)?;
let t1 = t.reshape((1, n))?.broadcast_as((n, n))?;
let t2 = t.reshape((n, 1))?.broadcast_as((n, n))?;
t1.ge(&t2)?.to_dtype(dtype)
}
/// Returns a matrix with a diagonal of ones of size n by n.
pub fn eye(n: usize, dtype: DType, device: &Device) -> Result<Self> {
let t = Tensor::arange(0u32, n as u32, device)?;
let t1 = t.reshape((1, n))?.broadcast_as((n, n))?;
let t2 = t.reshape((n, 1))?.broadcast_as((n, n))?;
t1.eq(&t2)?.to_dtype(dtype)
}
/// Returns the cumulative sum of elements of the input tensor summed over the specified
/// dimension.
///
/// This operation is most efficient when dim is the last dimension of the tensor.
pub fn cumsum<D: Dim>(&self, dim: D) -> Result<Self> {
let dim = dim.to_index(self.shape(), "cumsum")?;
let rank = self.rank();
if rank == 0 {
return Ok(self.clone());
}
let n_axis = self.dim(dim)?;
let triu = Tensor::triu2(n_axis, self.dtype(), self.device())?;
if rank == 1 {
self.unsqueeze(0)?.matmul(&triu)?.squeeze(0)
} else {
let last = rank - 1;
let t = self.transpose(dim, last)?;
let t = t.broadcast_matmul(&triu)?;
t.transpose(dim, last)
}
}
/// Returns a copy of `self` where the values within `ranges` have been replaced with the
/// content of `src`.
pub fn slice_assign<D: std::ops::RangeBounds<usize>>(
&self,
ranges: &[D],
src: &Tensor,
) -> Result<Self> {
let src_dims = src.dims();
let self_dims = self.dims();
if self_dims.len() != src_dims.len() {
bail!(
"slice-assign requires input with the same rank {} <> {}",
self_dims.len(),
src_dims.len()
)
}
if self_dims.len() != ranges.len() {
bail!(
"slice-assign requires input with the same rank as there are ranges {} <> {}",
self_dims.len(),
ranges.len()
)
}
let mut src = src.clone();
let mut mask = Self::ones(src.shape(), DType::U8, src.device())?;
for (i, range) in ranges.iter().enumerate() {
let start_included = match range.start_bound() {
std::ops::Bound::Unbounded => 0,
std::ops::Bound::Included(v) => *v,
std::ops::Bound::Excluded(v) => *v + 1,
};
let end_excluded = match range.end_bound() {
std::ops::Bound::Unbounded => self_dims[i],
std::ops::Bound::Included(v) => *v + 1,
std::ops::Bound::Excluded(v) => *v,
};
if end_excluded <= start_included {
bail!("slice-assign: empty range for dim {i}, {start_included} {end_excluded}")
}
if self_dims[i] < end_excluded {
bail!(
"slice-assign: upper bound is out of range for dim {i}, {end_excluded} {}",
self_dims[i]
)
}
if end_excluded - start_included != src_dims[i] {
bail!(
"slice-assign: the range for dim {i} ({start_included}..{end_excluded}) does not match the size of src {}", src_dims[i]
)
}
src = src.pad_with_zeros(i, start_included, self_dims[i] - end_excluded)?;
mask = mask.pad_with_zeros(i, start_included, self_dims[i] - end_excluded)?
}
mask.where_cond(/* on_true= */ &src, /* on_false= */ self)
}
/// Returns log(sum(exp(tensor), dim)).
pub fn log_sum_exp<D: Dims>(&self, sum_dims: D) -> Result<Self> {
let sum_dims = sum_dims.to_indexes(self.shape(), "log-sum-exp")?;
if sum_dims.is_empty() {
return Ok(self.clone());
}
let max = sum_dims[1..]
.iter()
.try_fold(self.max_keepdim(sum_dims[0])?, |max, &dim| {
max.max_keepdim(dim)
})?;
let exp = self.broadcast_sub(&max)?.exp()?;
let sum = exp.sum(sum_dims.clone())?;
sum.log()? + max.squeeze_dims(&sum_dims)
}
/// Pointwise pow operation.
pub fn pow(&self, rhs: &Tensor) -> Result<Self> {
rhs.mul(&self.log()?)?.exp()
}
/// Broadcasting version of `pow`.
pub fn broadcast_pow(&self, rhs: &Tensor) -> Result<Self> {
rhs.broadcast_mul(&self.log()?)?.exp()
}
}
macro_rules! bin_trait {
($trait:ident, $fn1:ident, $mul:expr, $add:expr) => {
impl<B: std::borrow::Borrow<Tensor>> std::ops::$trait<B> for Tensor {
type Output = Result<Tensor>;
fn $fn1(self, rhs: B) -> Self::Output {
Tensor::$fn1(&self, rhs.borrow())
}
}
impl<B: std::borrow::Borrow<Tensor>> std::ops::$trait<B> for &Tensor {
type Output = Result<Tensor>;
fn $fn1(self, rhs: B) -> Self::Output {
Tensor::$fn1(&self, rhs.borrow())
}
}
impl<B: std::borrow::Borrow<Tensor>> std::ops::$trait<Tensor> for Result<B> {
type Output = Result<Tensor>;
fn $fn1(self, rhs: Tensor) -> Self::Output {
Tensor::$fn1(self?.borrow(), &rhs)
}
}
impl<B: std::borrow::Borrow<Tensor>> std::ops::$trait<&Tensor> for Result<B> {
type Output = Result<Tensor>;
fn $fn1(self, rhs: &Tensor) -> Self::Output {
Tensor::$fn1(self?.borrow(), rhs)
}
}
impl<B: std::borrow::Borrow<Tensor>> std::ops::$trait<Result<B>> for Tensor {
type Output = Result<Tensor>;
fn $fn1(self, rhs: Result<B>) -> Self::Output {
Tensor::$fn1(&self, rhs?.borrow())
}
}
impl<B: std::borrow::Borrow<Tensor>> std::ops::$trait<Result<B>> for &Tensor {
type Output = Result<Tensor>;
fn $fn1(self, rhs: Result<B>) -> Self::Output {
Tensor::$fn1(&self, rhs?.borrow())
}
}
impl std::ops::$trait<f64> for Tensor {
type Output = Result<Tensor>;
fn $fn1(self, rhs: f64) -> Self::Output {
self.affine($mul(rhs), $add(rhs))
}
}
impl std::ops::$trait<f64> for &Tensor {
type Output = Result<Tensor>;
fn $fn1(self, rhs: f64) -> Self::Output {
self.affine($mul(rhs), $add(rhs))
}
}
};
}
bin_trait!(Add, add, |_| 1., |v| v);
bin_trait!(Sub, sub, |_| 1., |v: f64| -v);
bin_trait!(Mul, mul, |v| v, |_| 0.);
bin_trait!(Div, div, |v| 1. / v, |_| 0.);
impl std::ops::Add<Tensor> for f64 {
type Output = Result<Tensor>;
fn add(self, rhs: Tensor) -> Self::Output {
rhs + self
}
}
impl std::ops::Add<&Tensor> for f64 {
type Output = Result<Tensor>;
fn add(self, rhs: &Tensor) -> Self::Output {
rhs + self
}
}
impl std::ops::Mul<Tensor> for f64 {
type Output = Result<Tensor>;
fn mul(self, rhs: Tensor) -> Self::Output {
rhs * self
}
}
impl std::ops::Mul<&Tensor> for f64 {
type Output = Result<Tensor>;
fn mul(self, rhs: &Tensor) -> Self::Output {
rhs * self
}
}
impl std::ops::Sub<Tensor> for f64 {
type Output = Result<Tensor>;
fn sub(self, rhs: Tensor) -> Self::Output {
rhs.affine(-1., self)
}
}
impl std::ops::Sub<&Tensor> for f64 {
type Output = Result<Tensor>;
fn sub(self, rhs: &Tensor) -> Self::Output {
rhs.affine(-1., self)
}
}
impl std::ops::Div<Tensor> for f64 {
type Output = Result<Tensor>;
#[allow(clippy::suspicious_arithmetic_impl)]
fn div(self, rhs: Tensor) -> Self::Output {
rhs.recip()? * self
}
}
impl std::ops::Div<&Tensor> for f64 {
type Output = Result<Tensor>;
#[allow(clippy::suspicious_arithmetic_impl)]
fn div(self, rhs: &Tensor) -> Self::Output {
rhs.recip()? * self
}
}
| candle/candle-core/src/tensor.rs/0 | {
"file_path": "candle/candle-core/src/tensor.rs",
"repo_id": "candle",
"token_count": 48959
} |
/// Regression test for pth files not loading on Windows.
#[test]
fn test_pth() {
let tensors = candle_core::pickle::PthTensors::new("tests/test.pt", None).unwrap();
tensors.get("test").unwrap().unwrap();
}
#[test]
fn test_pth_with_key() {
let tensors =
candle_core::pickle::PthTensors::new("tests/test_with_key.pt", Some("model_state_dict"))
.unwrap();
tensors.get("test").unwrap().unwrap();
}
#[test]
fn test_pth_fortran_congiguous() {
let tensors =
candle_core::pickle::PthTensors::new("tests/fortran_tensor_3d.pth", None).unwrap();
let tensor = tensors.get("tensor_fortran").unwrap().unwrap();
assert_eq!(tensor.dims3().unwrap(), (2, 3, 4));
assert_eq!(
tensor.to_vec3::<i64>().unwrap(),
[
[[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]],
[[13, 14, 15, 16], [17, 18, 19, 20], [21, 22, 23, 24]]
]
);
}
| candle/candle-core/tests/pth_tests.rs/0 | {
"file_path": "candle/candle-core/tests/pth_tests.rs",
"repo_id": "candle",
"token_count": 440
} |
//! The MNIST hand-written digit dataset.
//!
//! The files can be obtained from the following link:
//! <http://yann.lecun.com/exdb/mnist/>
use candle::{DType, Device, Error, Result, Tensor};
use hf_hub::{api::sync::Api, Repo, RepoType};
use parquet::file::reader::{FileReader, SerializedFileReader};
use std::fs::File;
use std::io::{self, BufReader, Read};
fn read_u32<T: Read>(reader: &mut T) -> std::io::Result<u32> {
use byteorder::ReadBytesExt;
reader.read_u32::<byteorder::BigEndian>()
}
fn check_magic_number<T: Read>(reader: &mut T, expected: u32) -> Result<()> {
let magic_number = read_u32(reader)?;
if magic_number != expected {
Err(io::Error::new(
io::ErrorKind::Other,
format!("incorrect magic number {magic_number} != {expected}"),
))?;
}
Ok(())
}
fn read_labels(filename: &std::path::Path) -> Result<Tensor> {
let mut buf_reader = BufReader::new(File::open(filename)?);
check_magic_number(&mut buf_reader, 2049)?;
let samples = read_u32(&mut buf_reader)?;
let mut data = vec![0u8; samples as usize];
buf_reader.read_exact(&mut data)?;
let samples = data.len();
Tensor::from_vec(data, samples, &Device::Cpu)
}
fn read_images(filename: &std::path::Path) -> Result<Tensor> {
let mut buf_reader = BufReader::new(File::open(filename)?);
check_magic_number(&mut buf_reader, 2051)?;
let samples = read_u32(&mut buf_reader)? as usize;
let rows = read_u32(&mut buf_reader)? as usize;
let cols = read_u32(&mut buf_reader)? as usize;
let data_len = samples * rows * cols;
let mut data = vec![0u8; data_len];
buf_reader.read_exact(&mut data)?;
let tensor = Tensor::from_vec(data, (samples, rows * cols), &Device::Cpu)?;
tensor.to_dtype(DType::F32)? / 255.
}
pub fn load_dir<T: AsRef<std::path::Path>>(dir: T) -> Result<crate::vision::Dataset> {
let dir = dir.as_ref();
let train_images = read_images(&dir.join("train-images-idx3-ubyte"))?;
let train_labels = read_labels(&dir.join("train-labels-idx1-ubyte"))?;
let test_images = read_images(&dir.join("t10k-images-idx3-ubyte"))?;
let test_labels = read_labels(&dir.join("t10k-labels-idx1-ubyte"))?;
Ok(crate::vision::Dataset {
train_images,
train_labels,
test_images,
test_labels,
labels: 10,
})
}
fn load_parquet(parquet: SerializedFileReader<std::fs::File>) -> Result<(Tensor, Tensor)> {
let samples = parquet.metadata().file_metadata().num_rows() as usize;
let mut buffer_images: Vec<u8> = Vec::with_capacity(samples * 784);
let mut buffer_labels: Vec<u8> = Vec::with_capacity(samples);
for row in parquet.into_iter().flatten() {
for (_name, field) in row.get_column_iter() {
if let parquet::record::Field::Group(subrow) = field {
for (_name, field) in subrow.get_column_iter() {
if let parquet::record::Field::Bytes(value) = field {
let image = image::load_from_memory(value.data()).unwrap();
buffer_images.extend(image.to_luma8().as_raw());
}
}
} else if let parquet::record::Field::Long(label) = field {
buffer_labels.push(*label as u8);
}
}
}
let images = (Tensor::from_vec(buffer_images, (samples, 784), &Device::Cpu)?
.to_dtype(DType::F32)?
/ 255.)?;
let labels = Tensor::from_vec(buffer_labels, (samples,), &Device::Cpu)?;
Ok((images, labels))
}
pub fn load() -> Result<crate::vision::Dataset> {
let api = Api::new().map_err(|e| Error::Msg(format!("Api error: {e}")))?;
let dataset_id = "ylecun/mnist".to_string();
let repo = Repo::with_revision(
dataset_id,
RepoType::Dataset,
"refs/convert/parquet".to_string(),
);
let repo = api.repo(repo);
let test_parquet_filename = repo
.get("mnist/test/0000.parquet")
.map_err(|e| Error::Msg(format!("Api error: {e}")))?;
let train_parquet_filename = repo
.get("mnist/train/0000.parquet")
.map_err(|e| Error::Msg(format!("Api error: {e}")))?;
let test_parquet = SerializedFileReader::new(std::fs::File::open(test_parquet_filename)?)
.map_err(|e| Error::Msg(format!("Parquet error: {e}")))?;
let train_parquet = SerializedFileReader::new(std::fs::File::open(train_parquet_filename)?)
.map_err(|e| Error::Msg(format!("Parquet error: {e}")))?;
let (test_images, test_labels) = load_parquet(test_parquet)?;
let (train_images, train_labels) = load_parquet(train_parquet)?;
Ok(crate::vision::Dataset {
train_images,
train_labels,
test_images,
test_labels,
labels: 10,
})
}
| candle/candle-datasets/src/vision/mnist.rs/0 | {
"file_path": "candle/candle-datasets/src/vision/mnist.rs",
"repo_id": "candle",
"token_count": 2133
} |
#[cfg(feature = "mkl")]
extern crate intel_mkl_src;
#[cfg(feature = "accelerate")]
extern crate accelerate_src;
use candle::{DType, Device, Tensor};
use candle_nn as nn;
use candle_transformers::models::chinese_clip::{ChineseClipConfig, ChineseClipModel};
use clap::Parser;
use tokenizers::Tokenizer;
#[derive(Parser)]
struct Args {
#[arg(long)]
model: Option<String>,
#[arg(long)]
tokenizer: Option<String>,
#[arg(long, use_value_delimiter = true)]
images: Option<Vec<String>>,
#[arg(long)]
cpu: bool,
#[arg(long, use_value_delimiter = true)]
sequences: Option<Vec<String>>,
}
fn main() -> anyhow::Result<()> {
let args = Args::parse();
tracing_subscriber::fmt::init();
let device = candle_examples::device(args.cpu)?;
let var = load_weights(args.model, &device)?;
let clip_model = ChineseClipModel::new(var, &ChineseClipConfig::clip_vit_base_patch16())?;
tracing::info!("Transformer loaded. ");
let (pixel_values, vec_imgs) = load_images(args.images, &device)?;
tracing::info!("Images loaded. ");
let tokenizer = load_tokenizer()?;
let (input_ids, type_ids, attention_mask, text_sequences) =
tokenize_sequences(args.sequences, &tokenizer, &device)?;
tracing::info!("Computing ... ");
let (_logits_per_text, logits_per_image) = clip_model.forward(
&pixel_values,
&input_ids,
Some(&type_ids),
Some(&attention_mask),
)?;
let softmax_image = nn::ops::softmax(&logits_per_image, 1)?;
let softmax_image_vec = softmax_image.flatten_all()?.to_vec1::<f32>()?;
let probability_vec = softmax_image_vec
.iter()
.map(|v| v * 100.0)
.collect::<Vec<f32>>();
let probability_per_image = probability_vec.len() / vec_imgs.len();
for (i, img) in vec_imgs.iter().enumerate() {
let start = i * probability_per_image;
let end = start + probability_per_image;
let prob = &probability_vec[start..end];
tracing::info!("\n\nResults for image: {}\n", img);
for (i, p) in prob.iter().enumerate() {
tracing::info!("Probability: {:.4}% Text: {} ", p, text_sequences[i]);
}
}
Ok(())
}
pub fn load_weights(model: Option<String>, device: &Device) -> anyhow::Result<nn::VarBuilder> {
let model_file = match model {
None => {
let api = hf_hub::api::sync::Api::new()?;
let repo = hf_hub::Repo::with_revision(
"OFA-Sys/chinese-clip-vit-base-patch16".to_string(),
hf_hub::RepoType::Model,
"refs/pr/3".to_string(),
);
let api = api.repo(repo);
api.get("model.safetensors")?
}
Some(model) => model.into(),
};
Ok(unsafe { nn::VarBuilder::from_mmaped_safetensors(&[model_file], DType::F32, device)? })
}
pub fn load_tokenizer() -> anyhow::Result<Tokenizer> {
let tokenizer_file = {
let api = hf_hub::api::sync::Api::new()?;
let repo = hf_hub::Repo::with_revision(
"OFA-Sys/chinese-clip-vit-base-patch16".to_string(),
hf_hub::RepoType::Model,
"refs/pr/3".to_string(),
);
let api = api.repo(repo);
api.get("tokenizer.json")?
};
Tokenizer::from_file(tokenizer_file).map_err(anyhow::Error::msg)
}
pub fn tokenize_sequences(
sequences: Option<Vec<String>>,
tokenizer: &Tokenizer,
device: &Device,
) -> anyhow::Result<(Tensor, Tensor, Tensor, Vec<String>)> {
let vec_seq = match sequences {
Some(seq) => seq,
None => vec![
"自行车比赛".to_string(),
"两只猫咪".to_string(),
"拿着蜡烛的机器人".to_string(),
],
};
let mut input_ids = vec![];
let mut type_ids = vec![];
let mut attention_mask = vec![];
let mut max_len = 0;
for seq in vec_seq.clone() {
let encoding = tokenizer.encode(seq, true).map_err(anyhow::Error::msg)?;
input_ids.push(encoding.get_ids().to_vec());
type_ids.push(encoding.get_type_ids().to_vec());
attention_mask.push(encoding.get_attention_mask().to_vec());
if encoding.get_ids().len() > max_len {
max_len = encoding.get_ids().len();
}
}
let pad_id = *tokenizer
.get_vocab(true)
.get("[PAD]")
.ok_or(anyhow::Error::msg("No pad token"))?;
let input_ids: Vec<Vec<u32>> = input_ids
.iter_mut()
.map(|item| {
item.extend(vec![pad_id; max_len - item.len()]);
item.to_vec()
})
.collect();
let type_ids: Vec<Vec<u32>> = type_ids
.iter_mut()
.map(|item| {
item.extend(vec![0; max_len - item.len()]);
item.to_vec()
})
.collect();
let attention_mask: Vec<Vec<u32>> = attention_mask
.iter_mut()
.map(|item| {
item.extend(vec![0; max_len - item.len()]);
item.to_vec()
})
.collect();
let input_ids = Tensor::new(input_ids, device)?;
let type_ids = Tensor::new(type_ids, device)?;
let attention_mask = Tensor::new(attention_mask, device)?;
Ok((input_ids, type_ids, attention_mask, vec_seq))
}
pub fn load_images(
images: Option<Vec<String>>,
device: &Device,
) -> anyhow::Result<(Tensor, Vec<String>)> {
let vec_imgs = match images {
Some(imgs) => imgs,
None => vec![
"candle-examples/examples/stable-diffusion/assets/stable-diffusion-xl.jpg".to_string(),
"candle-examples/examples/yolo-v8/assets/bike.jpg".to_string(),
],
};
let mut images = vec![];
for path in vec_imgs.iter() {
let tensor = load_image(path, 224, device)?;
images.push(tensor);
}
let images = Tensor::stack(&images, 0)?.to_device(device)?;
Ok((images, vec_imgs))
}
fn load_image<T: AsRef<std::path::Path>>(
path: T,
image_size: usize,
device: &Device,
) -> anyhow::Result<Tensor> {
let img = image::ImageReader::open(path)?.decode()?;
let (height, width) = (image_size, image_size);
let img = img.resize_to_fill(
width as u32,
height as u32,
image::imageops::FilterType::Triangle,
);
let img = img.to_rgb8().into_raw();
let img = Tensor::from_vec(img, (height, width, 3), device)?.permute((2, 0, 1))?;
let mean = Tensor::new(&[0.48145466f32, 0.4578275, 0.40821073], device)?.reshape((3, 1, 1))?;
let std =
Tensor::new(&[0.26862954f32, 0.261_302_6, 0.275_777_1], device)?.reshape((3, 1, 1))?;
let img = (img.to_dtype(DType::F32)? / 255.)?
.broadcast_sub(&mean)?
.broadcast_div(&std)?;
Ok(img)
}
| candle/candle-examples/examples/chinese_clip/main.rs/0 | {
"file_path": "candle/candle-examples/examples/chinese_clip/main.rs",
"repo_id": "candle",
"token_count": 3194
} |
# candle-dinov2
[Depth Anything V2] is a model for Monocular Depth Estimation (MDE, i.e. just using a single image) which
builds on the [DINOv2](https://github.com/facebookresearch/dinov2) vision transformer.
This example first instantiates the DINOv2 model and then proceeds to create DepthAnythingV2 and run it.
## Running an example with color map and CUDA
```bash
cargo run --features cuda,depth_anything_v2 --package candle-examples --example depth_anything_v2 -- --color-map --image candle-examples/examples/yolo-v8/assets/bike.jpg
```
| candle/candle-examples/examples/depth_anything_v2/README.md/0 | {
"file_path": "candle/candle-examples/examples/depth_anything_v2/README.md",
"repo_id": "candle",
"token_count": 168
} |
# candle-eva2
[EVA-02](https://arxiv.org/abs/2303.11331) is a computer vision model.
In this example, it is used as an ImageNet classifier: the model returns the
probability for the image to belong to each of the 1000 ImageNet categories.
## Running some example
```bash
cargo run --example eva2 --release -- --image candle-examples/examples/yolo-v8/assets/bike.jpg
> mountain bike, all-terrain bike, off-roader: 37.09%
> maillot : 8.30%
> alp : 2.13%
> bicycle-built-for-two, tandem bicycle, tandem: 0.84%
> crash helmet : 0.73%
```

| candle/candle-examples/examples/eva2/README.md/0 | {
"file_path": "candle/candle-examples/examples/eva2/README.md",
"repo_id": "candle",
"token_count": 264
} |
# gte-Qwen1.5-7B-instruct
gte-Qwen1.5-7B-instruct is a variant of the GTE embedding model family.
- [Model card](https://huggingface.co/Alibaba-NLP/gte-Qwen1.5-7B-instruct) on the HuggingFace Hub.
- [Technical report](https://arxiv.org/abs/2308.03281) *Towards General Text Embeddings with Multi-stage Contrastive Learning*
## Running the example
Automatically download the model from the HuggingFace hub:
```bash
$ cargo run --example gte-qwen --release
```
or, load the model from a local directory:
```bash
cargo run --example gte-qwen --release --features cuda -- --local-repo /path/to/gte_Qwen1.5-7B-instruct/
```
| candle/candle-examples/examples/gte-qwen/README.md/0 | {
"file_path": "candle/candle-examples/examples/gte-qwen/README.md",
"repo_id": "candle",
"token_count": 229
} |
pub mod constants;
pub mod conversation;
pub mod image_processor;
use candle_transformers::generation::{LogitsProcessor, Sampling};
use candle_transformers::models::llama::Cache;
use anyhow::{bail, Error as E, Result};
use candle::{DType, Device, IndexOp, Tensor};
use candle_nn::VarBuilder;
use candle_transformers::models::llava::config::{
HFGenerationConfig, HFLLaVAConfig, HFPreProcessorConfig,
};
use candle_transformers::models::llava::{config::LLaVAConfig, LLaVA};
use clap::Parser;
use constants::*;
use conversation::Conversation;
use hf_hub::api::sync::Api;
use image_processor::{process_image, ImageProcessor};
use std::io::Write;
use tokenizers::Tokenizer;
#[derive(Parser, Debug)]
#[command(author, version, about,long_about=None)]
struct Args {
#[arg(long, default_value = "llava-hf/llava-v1.6-vicuna-7b-hf")]
model_path: String,
#[arg(long, default_value = "tokenizer/tokenizer.json")]
tokenizer_path: String,
#[arg(long)]
model_base: Option<String>,
#[arg(long)]
image_file: String, // Required
#[arg(long)]
conv_mode: Option<String>,
#[arg(long, default_value_t = 0.2)]
temperature: f32,
#[arg(long, default_value_t = 512)]
max_new_tokens: usize,
#[arg(long, action)]
hf: bool,
#[arg(long, action)]
cpu: bool,
#[arg(long, action)]
no_kv_cache: bool,
#[arg(long)]
prompt: String,
/// The seed to use when generating random samples. Copy from candle llama. Not exist in python llava.
#[arg(long, default_value_t = 299792458)]
seed: u64,
}
//from https://github.com/huggingface/candle/blob/main/candle-examples/examples/clip/main.rs
fn load_image<T: AsRef<std::path::Path>>(
path: T,
processor: &ImageProcessor,
llava_config: &LLaVAConfig,
dtype: DType,
) -> Result<((u32, u32), Tensor)> {
let img = image::ImageReader::open(path)?.decode()?;
let img_tensor = process_image(&img, processor, llava_config)?;
Ok(((img.width(), img.height()), img_tensor.to_dtype(dtype)?))
}
fn get_model_name_from_path(model_path: &str) -> String {
let model_paths: Vec<String> = model_path
.trim_matches('/')
.split('/')
.map(|s| s.to_string())
.collect();
if model_paths.last().unwrap().starts_with("checkpoint-") {
format!(
"{}_{}",
model_paths[model_paths.len() - 2],
model_paths.last().unwrap()
)
} else {
model_paths.last().unwrap().to_string()
}
}
fn duplicate_vec<T>(vec: &[T], n: usize) -> Vec<T>
where
T: Clone,
{
let mut res = Vec::new();
for _ in 0..n {
res.extend(vec.to_owned());
}
res
}
fn insert_separator<T>(x: Vec<Vec<T>>, sep: Vec<T>) -> Vec<Vec<T>>
where
T: Clone,
{
let sep = vec![sep];
let sep = duplicate_vec(&sep, x.len());
let mut res = x
.iter()
.zip(sep.iter())
.flat_map(|(x, y)| vec![x.clone(), y.clone()])
.collect::<Vec<Vec<T>>>();
res.pop();
res
}
fn tokenizer_image_token(
prompt: &str,
tokenizer: &Tokenizer,
image_token_index: i64,
llava_config: &LLaVAConfig,
) -> Result<Tensor> {
let prompt_chunks = prompt
.split("<image>")
.map(|s| {
tokenizer
.encode(s, true)
.unwrap()
.get_ids()
.to_vec()
.iter()
.map(|x| *x as i64)
.collect()
})
.collect::<Vec<Vec<i64>>>();
let mut input_ids = Vec::new();
let mut offset = 0;
if !prompt_chunks.is_empty()
&& !prompt_chunks[0].is_empty()
&& prompt_chunks[0][0] == llava_config.bos_token_id as i64
{
offset = 1;
input_ids.push(prompt_chunks[0][0]);
}
for x in insert_separator(
prompt_chunks,
duplicate_vec(&[image_token_index], offset + 1),
)
.iter()
{
input_ids.extend(x[1..].to_vec())
}
let input_len = input_ids.len();
Tensor::from_vec(input_ids, (1, input_len), &Device::Cpu).map_err(E::msg)
}
fn main() -> Result<()> {
let mut args = Args::parse();
let device = candle_examples::device(args.cpu)?;
println!("Start loading model");
let api = Api::new()?;
let api = api.model(args.model_path.clone());
let (llava_config, tokenizer, clip_vision_config, image_processor) = if args.hf {
let config_filename = api.get("config.json")?;
let hf_llava_config: HFLLaVAConfig =
serde_json::from_slice(&std::fs::read(config_filename)?)?;
let generation_config_filename = api.get("generation_config.json")?;
let generation_config: HFGenerationConfig =
serde_json::from_slice(&std::fs::read(generation_config_filename)?)?;
let preprocessor_config_filename = api.get("preprocessor_config.json")?;
let preprocessor_config: HFPreProcessorConfig =
serde_json::from_slice(&std::fs::read(preprocessor_config_filename)?)?;
let llava_config =
hf_llava_config.to_llava_config(&generation_config, &preprocessor_config);
let tokenizer_filename = api.get("tokenizer.json")?;
let tokenizer = Tokenizer::from_file(tokenizer_filename).map_err(E::msg)?;
let clip_vision_config = hf_llava_config.to_clip_vision_config();
(
llava_config,
tokenizer,
Some(clip_vision_config),
ImageProcessor::from_hf_preprocessor_config(&preprocessor_config),
)
} else {
let config_filename = api.get("config.json")?;
let llava_config: LLaVAConfig = serde_json::from_slice(&std::fs::read(config_filename)?)?;
let tokenizer = Tokenizer::from_file(&args.tokenizer_path)
.map_err(|e| E::msg(format!("Error loading {}: {}", &args.tokenizer_path, e)))?;
(
llava_config.clone(),
tokenizer,
None,
ImageProcessor::from_pretrained(&llava_config.mm_vision_tower.unwrap())?,
)
};
let llama_config = llava_config.to_llama_config();
let dtype: DType = match llava_config.torch_dtype.as_str() {
"float16" => DType::F16,
"bfloat16" => DType::BF16,
_ => bail!("unsupported dtype"),
};
let eos_token_id = llava_config.eos_token_id;
println!("setting kv cache");
let mut cache = Cache::new(!args.no_kv_cache, dtype, &llama_config, &device)?;
println!("loading model weights");
let weight_filenames =
candle_examples::hub_load_safetensors(&api, "model.safetensors.index.json")?;
let vb = unsafe { VarBuilder::from_mmaped_safetensors(&weight_filenames, dtype, &device)? };
let llava: LLaVA = LLaVA::load(vb, &llava_config, clip_vision_config)?;
println!("generating conv template");
let image_token_se = format!(
"{}{}{}",
DEFAULT_IM_START_TOKEN, DEFAULT_IMAGE_TOKEN, DEFAULT_IM_END_TOKEN
);
let qs = if args.prompt.contains(IMAGE_PLACEHOLDER) {
if llava_config.mm_use_im_start_end {
args.prompt.replace(IMAGE_PLACEHOLDER, &image_token_se)
} else {
args.prompt.replace(IMAGE_PLACEHOLDER, DEFAULT_IMAGE_TOKEN)
}
} else if llava_config.mm_use_im_start_end {
format!("{}\n{}", image_token_se, args.prompt)
} else {
format!("{}\n{}", DEFAULT_IMAGE_TOKEN, args.prompt)
};
let model_name = get_model_name_from_path(&args.model_path).to_lowercase();
let conv_mode = if model_name.contains("llama-2") {
"llava_llama_2"
} else if model_name.contains("mistral") {
"mistral_instruct"
} else if model_name.contains("v1.6-34b") {
"chatml_direct"
} else if model_name.contains("v1") {
"llava_v1"
} else if model_name.contains("mpt") {
"mpt"
} else {
"llava_v0"
};
if args.conv_mode.is_some() && args.conv_mode.as_deref() != Some(conv_mode) {
println!(
"Warning: the model is trained with {}, but you are using {}",
conv_mode,
args.conv_mode.as_deref().unwrap()
);
} else {
args.conv_mode = Some(conv_mode.to_string());
}
let mut conv = match args.conv_mode {
Some(conv_mode) => match conv_mode.as_str() {
"chatml_direct" => Conversation::conv_chatml_direct(),
"llava_v1" => Conversation::conv_llava_v1(),
_ => todo!("not implement yet"),
},
None => bail!("conv_mode is required"),
};
conv.append_user_message(Some(&qs));
conv.append_assistant_message(None);
let prompt = conv.get_prompt();
println!("loading image");
let (image_size, image_tensor) =
load_image(&args.image_file, &image_processor, &llava_config, dtype)
.map_err(|e| E::msg(format!("Error loading {}: {}", &args.image_file, e)))?;
let image_tensor = image_tensor.to_device(&device)?;
let mut logits_processor = {
let temperature = f64::from(args.temperature);
let sampling = if temperature <= 0. {
Sampling::ArgMax
} else {
Sampling::All { temperature }
};
LogitsProcessor::from_sampling(args.seed, sampling)
};
// get input tokens
let tokens = tokenizer_image_token(
&prompt,
&tokenizer,
llava_config.image_token_index as i64,
&llava_config,
)?;
let mut input_embeds =
llava.prepare_inputs_labels_for_multimodal(&tokens, &[image_tensor], &[image_size])?;
//inference loop, based on https://github.com/huggingface/candle/blob/main/candle-examples/examples/llama/main.rs
let mut tokenizer = candle_examples::token_output_stream::TokenOutputStream::new(tokenizer);
let mut index_pos = 0;
for index in 0..args.max_new_tokens {
let (_, input_embeds_len, _) = input_embeds.dims3()?;
let (context_size, context_index) = if cache.use_kv_cache && index > 0 {
(1, index_pos)
} else {
(input_embeds_len, 0)
};
let input = input_embeds.i((.., input_embeds_len.saturating_sub(context_size).., ..))?;
let logits = llava.forward(&input, context_index, &mut cache)?; //[1,32000]
let logits = logits.squeeze(0)?;
let (_, input_len, _) = input.dims3()?;
index_pos += input_len;
let next_token = logits_processor.sample(&logits)?;
let next_token_tensor = Tensor::from_vec(vec![next_token], 1, &device)?;
let next_embeds = llava.llama.embed(&next_token_tensor)?.unsqueeze(0)?;
input_embeds = Tensor::cat(&[input_embeds, next_embeds], 1)?;
if next_token == eos_token_id as u32 {
break;
}
if let Some(t) = tokenizer.next_token(next_token)? {
print!("{t}");
std::io::stdout().flush()?;
}
}
if let Some(rest) = tokenizer.decode_rest().map_err(E::msg)? {
print!("{rest}");
}
Ok(())
}
| candle/candle-examples/examples/llava/main.rs/0 | {
"file_path": "candle/candle-examples/examples/llava/main.rs",
"repo_id": "candle",
"token_count": 5097
} |
# NV-Embed-v2
Candle implementation (inference only) of [NV-Embed-v2](https://huggingface.co/nvidia/NV-Embed-v2), a text embedding model that ranks No. 1 (as of Nov 25 2024) on the [MTEB](https://huggingface.co/spaces/mteb/leaderboard) benchmark with a score of 72.31 across 56 text embedding tasks.
## Running an example: Retrieval
```bash
cargo run --example nvembed_v2 --release
> scores: [[87.4269, 0.4629],
> [ 0.9653, 86.0372]]
> Tensor[[2, 2], f32]
```
In this example, we have two queries and two passages (the corresponding answers). The output tensor represents the similarity scores between each query-passage pair. The scores are computed by taking the dot product of the query and passage embeddings and scaling the result by 100.
```rust
let queries = [
"are judo throws allowed in wrestling?",
"how to become a radiology technician in michigan?",
];
let query_instruction =
"Instruct: Given a question, retrieve passages that answer the question\nQuery: "
.to_string();
let passages = [
"Since you're reading this, you are probably someone from a judo background or someone who is just wondering how judo techniques can be applied under wrestling rules. So without further ado, let's get to the question. Are Judo throws allowed in wrestling? Yes, judo throws are allowed in freestyle and folkstyle wrestling. You only need to be careful to follow the slam rules when executing judo throws. In wrestling, a slam is lifting and returning an opponent to the mat with unnecessary force.",
"Below are the basic steps to becoming a radiologic technologist in Michigan:Earn a high school diploma. As with most careers in health care, a high school education is the first step to finding entry-level employment. Taking classes in math and science, such as anatomy, biology, chemistry, physiology, and physics, can help prepare students for their college studies and future careers.Earn an associate degree. Entry-level radiologic positions typically require at least an Associate of Applied Science. Before enrolling in one of these degree programs, students should make sure it has been properly accredited by the Joint Review Committee on Education in Radiologic Technology (JRCERT).Get licensed or certified in the state of Michigan."
];
let passage_instruction = "".to_string();
```
If you already have the model and tokenizer files, you can use the `--tokenizer` and `--model-files` options to specify their full paths, instead of downloading them from the hub.
## Running an example: Sentence embedding
```bash
cargo run --example nvembed_v2 --release -- --prompt "Here is a test sentence"
> Embedding: [[ 0.0066, -0.0048, 0.0066, ..., -0.0096, 0.0119, -0.0052]]
> Tensor[[1, 4096], f32]
```
In this example, we pass a prompt to the model and it outputs the vector encoding of the prompt.
## Hardware Requirements
29.25GB at fp32
## License
CC-BY-NC-4.0. This model should not be used for any commercial purpose. Refer the [license](https://spdx.org/licenses/CC-BY-NC-4.0) for the detailed terms.
| candle/candle-examples/examples/nvembed_v2/README.md/0 | {
"file_path": "candle/candle-examples/examples/nvembed_v2/README.md",
"repo_id": "candle",
"token_count": 851
} |
#[cfg(feature = "mkl")]
extern crate intel_mkl_src;
#[cfg(feature = "accelerate")]
extern crate accelerate_src;
use clap::{Parser, ValueEnum};
use std::io::Write;
use tokenizers::Tokenizer;
use candle::quantized::gguf_file;
use candle::Tensor;
use candle_transformers::generation::{LogitsProcessor, Sampling};
use candle_examples::token_output_stream::TokenOutputStream;
use candle_transformers::models::quantized_llama::ModelWeights as Phi3b;
use candle_transformers::models::quantized_phi::ModelWeights as Phi2;
use candle_transformers::models::quantized_phi3::ModelWeights as Phi3;
const DEFAULT_PROMPT: &str = "Write a function to count prime numbers up to N. ";
#[derive(Clone, Debug, Copy, PartialEq, Eq, ValueEnum)]
enum Which {
#[value(name = "phi-2")]
Phi2,
#[value(name = "phi-3")]
Phi3,
/// Alternative implementation of phi-3, based on llama.
#[value(name = "phi-3b")]
Phi3b,
#[value(name = "phi-4")]
Phi4,
}
#[derive(Parser, Debug)]
#[command(author, version, about, long_about = None)]
struct Args {
/// GGUF file to load, typically a .gguf file generated by the quantize command from llama.cpp
#[arg(long)]
model: Option<String>,
/// The initial prompt, use 'interactive' for entering multiple prompts in an interactive way
/// and 'chat' for an interactive model where history of previous prompts and generated tokens
/// is preserved.
#[arg(long)]
prompt: Option<String>,
/// The length of the sample to generate (in tokens).
#[arg(short = 'n', long, default_value_t = 1000)]
sample_len: usize,
/// The tokenizer config in json format.
#[arg(long)]
tokenizer: Option<String>,
/// The temperature used to generate samples, use 0 for greedy sampling.
#[arg(long, default_value_t = 0.8)]
temperature: f64,
/// Nucleus sampling probability cutoff.
#[arg(long)]
top_p: Option<f64>,
/// Only sample among the top K samples.
#[arg(long)]
top_k: Option<usize>,
/// The seed to use when generating random samples.
#[arg(long, default_value_t = 299792458)]
seed: u64,
/// Enable tracing (generates a trace-timestamp.json file).
#[arg(long)]
tracing: bool,
/// Process prompt elements separately.
#[arg(long)]
split_prompt: bool,
/// Run on CPU rather than GPU even if a GPU is available.
#[arg(long)]
cpu: bool,
/// Penalty to be applied for repeating tokens, 1. means no penalty.
#[arg(long, default_value_t = 1.1)]
repeat_penalty: f32,
/// The context size to consider for the repeat penalty.
#[arg(long, default_value_t = 64)]
repeat_last_n: usize,
/// The model size to use.
#[arg(long, default_value = "phi-3b")]
which: Which,
#[arg(long)]
use_flash_attn: bool,
}
impl Args {
fn tokenizer(&self) -> anyhow::Result<Tokenizer> {
let tokenizer_path = match &self.tokenizer {
Some(config) => std::path::PathBuf::from(config),
None => {
let api = hf_hub::api::sync::Api::new()?;
let repo = match self.which {
Which::Phi2 => "microsoft/phi-2",
Which::Phi3 | Which::Phi3b => "microsoft/Phi-3-mini-4k-instruct",
Which::Phi4 => "microsoft/phi-4",
};
let api = api.model(repo.to_string());
api.get("tokenizer.json")?
}
};
Tokenizer::from_file(tokenizer_path).map_err(anyhow::Error::msg)
}
fn model(&self) -> anyhow::Result<std::path::PathBuf> {
let model_path = match &self.model {
Some(config) => std::path::PathBuf::from(config),
None => {
let (repo, filename, revision) = match self.which {
Which::Phi2 => ("TheBloke/phi-2-GGUF", "phi-2.Q4_K_M.gguf", "main"),
Which::Phi3 => (
"microsoft/Phi-3-mini-4k-instruct-gguf",
"Phi-3-mini-4k-instruct-q4.gguf",
"main",
),
Which::Phi3b => (
"microsoft/Phi-3-mini-4k-instruct-gguf",
"Phi-3-mini-4k-instruct-q4.gguf",
"5eef2ce24766d31909c0b269fe90c817a8f263fb",
),
Which::Phi4 => ("microsoft/phi-4-gguf", "phi-4-q4.gguf", "main"),
};
let api = hf_hub::api::sync::Api::new()?;
api.repo(hf_hub::Repo::with_revision(
repo.to_string(),
hf_hub::RepoType::Model,
revision.to_string(),
))
.get(filename)?
}
};
Ok(model_path)
}
}
fn format_size(size_in_bytes: usize) -> String {
if size_in_bytes < 1_000 {
format!("{}B", size_in_bytes)
} else if size_in_bytes < 1_000_000 {
format!("{:.2}KB", size_in_bytes as f64 / 1e3)
} else if size_in_bytes < 1_000_000_000 {
format!("{:.2}MB", size_in_bytes as f64 / 1e6)
} else {
format!("{:.2}GB", size_in_bytes as f64 / 1e9)
}
}
enum Model {
Phi2(Phi2),
Phi3(Phi3),
Phi3b(Phi3b),
}
impl Model {
fn forward(&mut self, xs: &Tensor, pos: usize) -> candle::Result<Tensor> {
match self {
Self::Phi2(m) => m.forward(xs, pos),
Self::Phi3(m) => m.forward(xs, pos),
Self::Phi3b(m) => m.forward(xs, pos),
}
}
}
fn main() -> anyhow::Result<()> {
use tracing_chrome::ChromeLayerBuilder;
use tracing_subscriber::prelude::*;
let args = Args::parse();
let _guard = if args.tracing {
let (chrome_layer, guard) = ChromeLayerBuilder::new().build();
tracing_subscriber::registry().with(chrome_layer).init();
Some(guard)
} else {
None
};
println!(
"avx: {}, neon: {}, simd128: {}, f16c: {}",
candle::utils::with_avx(),
candle::utils::with_neon(),
candle::utils::with_simd128(),
candle::utils::with_f16c()
);
println!(
"temp: {:.2} repeat-penalty: {:.2} repeat-last-n: {}",
args.temperature, args.repeat_penalty, args.repeat_last_n
);
let model_path = args.model()?;
let mut file = std::fs::File::open(&model_path)?;
let start = std::time::Instant::now();
let device = candle_examples::device(args.cpu)?;
let mut model = {
let model = gguf_file::Content::read(&mut file).map_err(|e| e.with_path(model_path))?;
let mut total_size_in_bytes = 0;
for (_, tensor) in model.tensor_infos.iter() {
let elem_count = tensor.shape.elem_count();
total_size_in_bytes +=
elem_count * tensor.ggml_dtype.type_size() / tensor.ggml_dtype.block_size();
}
println!(
"loaded {:?} tensors ({}) in {:.2}s",
model.tensor_infos.len(),
&format_size(total_size_in_bytes),
start.elapsed().as_secs_f32(),
);
match args.which {
Which::Phi2 => Model::Phi2(Phi2::from_gguf(model, &mut file, &device)?),
Which::Phi3 | Which::Phi4 => Model::Phi3(Phi3::from_gguf(
args.use_flash_attn,
model,
&mut file,
&device,
)?),
Which::Phi3b => Model::Phi3b(Phi3b::from_gguf(model, &mut file, &device)?),
}
};
println!("model built");
let tokenizer = args.tokenizer()?;
let mut tos = TokenOutputStream::new(tokenizer);
let prompt_str = args.prompt.unwrap_or_else(|| DEFAULT_PROMPT.to_string());
print!("{}", &prompt_str);
let tokens = tos
.tokenizer()
.encode(prompt_str, true)
.map_err(anyhow::Error::msg)?;
let tokens = tokens.get_ids();
let to_sample = args.sample_len.saturating_sub(1);
let mut all_tokens = vec![];
let mut logits_processor = {
let temperature = args.temperature;
let sampling = if temperature <= 0. {
Sampling::ArgMax
} else {
match (args.top_k, args.top_p) {
(None, None) => Sampling::All { temperature },
(Some(k), None) => Sampling::TopK { k, temperature },
(None, Some(p)) => Sampling::TopP { p, temperature },
(Some(k), Some(p)) => Sampling::TopKThenTopP { k, p, temperature },
}
};
LogitsProcessor::from_sampling(args.seed, sampling)
};
let start_prompt_processing = std::time::Instant::now();
let mut next_token = if !args.split_prompt {
let input = Tensor::new(tokens, &device)?.unsqueeze(0)?;
let logits = model.forward(&input, 0)?;
let logits = logits.squeeze(0)?;
logits_processor.sample(&logits)?
} else {
let mut next_token = 0;
for (pos, token) in tokens.iter().enumerate() {
let input = Tensor::new(&[*token], &device)?.unsqueeze(0)?;
let logits = model.forward(&input, pos)?;
let logits = logits.squeeze(0)?;
next_token = logits_processor.sample(&logits)?
}
next_token
};
let prompt_dt = start_prompt_processing.elapsed();
all_tokens.push(next_token);
if let Some(t) = tos.next_token(next_token)? {
print!("{t}");
std::io::stdout().flush()?;
}
let eos_token = *tos
.tokenizer()
.get_vocab(true)
.get("<|endoftext|>")
.unwrap();
let start_post_prompt = std::time::Instant::now();
let mut sampled = 0;
for index in 0..to_sample {
let input = Tensor::new(&[next_token], &device)?.unsqueeze(0)?;
let logits = model.forward(&input, tokens.len() + index)?;
let logits = logits.squeeze(0)?;
let logits = if args.repeat_penalty == 1. {
logits
} else {
let start_at = all_tokens.len().saturating_sub(args.repeat_last_n);
candle_transformers::utils::apply_repeat_penalty(
&logits,
args.repeat_penalty,
&all_tokens[start_at..],
)?
};
next_token = logits_processor.sample(&logits)?;
all_tokens.push(next_token);
if let Some(t) = tos.next_token(next_token)? {
print!("{t}");
std::io::stdout().flush()?;
}
sampled += 1;
if next_token == eos_token {
break;
};
}
if let Some(rest) = tos.decode_rest().map_err(candle::Error::msg)? {
print!("{rest}");
}
std::io::stdout().flush()?;
let dt = start_post_prompt.elapsed();
println!(
"\n\n{:4} prompt tokens processed: {:.2} token/s",
tokens.len(),
tokens.len() as f64 / prompt_dt.as_secs_f64(),
);
println!(
"{sampled:4} tokens generated: {:.2} token/s",
sampled as f64 / dt.as_secs_f64(),
);
Ok(())
}
| candle/candle-examples/examples/quantized-phi/main.rs/0 | {
"file_path": "candle/candle-examples/examples/quantized-phi/main.rs",
"repo_id": "candle",
"token_count": 5342
} |
//! Wrappers around the Python API of Gymnasium (the new version of OpenAI gym)
use candle::{Device, Result, Tensor};
use pyo3::prelude::*;
use pyo3::types::PyDict;
/// The return value for a step.
#[derive(Debug)]
pub struct Step<A> {
pub state: Tensor,
pub action: A,
pub reward: f64,
pub terminated: bool,
pub truncated: bool,
}
impl<A: Copy> Step<A> {
/// Returns a copy of this step changing the observation tensor.
pub fn copy_with_obs(&self, state: &Tensor) -> Step<A> {
Step {
state: state.clone(),
action: self.action,
reward: self.reward,
terminated: self.terminated,
truncated: self.truncated,
}
}
}
/// An OpenAI Gym session.
pub struct GymEnv {
env: PyObject,
action_space: usize,
observation_space: Vec<usize>,
}
fn w(res: PyErr) -> candle::Error {
candle::Error::wrap(res)
}
impl GymEnv {
/// Creates a new session of the specified OpenAI Gym environment.
pub fn new(name: &str) -> Result<GymEnv> {
Python::with_gil(|py| {
let gym = py.import_bound("gymnasium")?;
let make = gym.getattr("make")?;
let env = make.call1((name,))?;
let action_space = env.getattr("action_space")?;
let action_space = if let Ok(val) = action_space.getattr("n") {
val.extract()?
} else {
let action_space: Vec<usize> = action_space.getattr("shape")?.extract()?;
action_space[0]
};
let observation_space = env.getattr("observation_space")?;
let observation_space = observation_space.getattr("shape")?.extract()?;
Ok(GymEnv {
env: env.into(),
action_space,
observation_space,
})
})
.map_err(w)
}
/// Resets the environment, returning the observation tensor.
pub fn reset(&self, seed: u64) -> Result<Tensor> {
let state: Vec<f32> = Python::with_gil(|py| {
let kwargs = PyDict::new_bound(py);
kwargs.set_item("seed", seed)?;
let state = self.env.call_method_bound(py, "reset", (), Some(&kwargs))?;
state.bind(py).get_item(0)?.extract()
})
.map_err(w)?;
Tensor::new(state, &Device::Cpu)
}
/// Applies an environment step using the specified action.
pub fn step<A: pyo3::IntoPy<pyo3::Py<pyo3::PyAny>> + Clone>(
&self,
action: A,
) -> Result<Step<A>> {
let (state, reward, terminated, truncated) = Python::with_gil(|py| {
let step = self
.env
.call_method_bound(py, "step", (action.clone(),), None)?;
let step = step.bind(py);
let state: Vec<f32> = step.get_item(0)?.extract()?;
let reward: f64 = step.get_item(1)?.extract()?;
let terminated: bool = step.get_item(2)?.extract()?;
let truncated: bool = step.get_item(3)?.extract()?;
Ok((state, reward, terminated, truncated))
})
.map_err(w)?;
let state = Tensor::new(state, &Device::Cpu)?;
Ok(Step {
state,
action,
reward,
terminated,
truncated,
})
}
/// Returns the number of allowed actions for this environment.
pub fn action_space(&self) -> usize {
self.action_space
}
/// Returns the shape of the observation tensors.
pub fn observation_space(&self) -> &[usize] {
&self.observation_space
}
}
| candle/candle-examples/examples/reinforcement-learning/gym_env.rs/0 | {
"file_path": "candle/candle-examples/examples/reinforcement-learning/gym_env.rs",
"repo_id": "candle",
"token_count": 1743
} |
# candle-segment-anything: Segment-Anything Model
This example is based on Meta AI [Segment-Anything
Model](https://github.com/facebookresearch/segment-anything). This model
provides a robust and fast image segmentation pipeline that can be tweaked via
some prompting (requesting some points to be in the target mask, requesting some
points to be part of the background so _not_ in the target mask, specifying some
bounding box).
The default backbone can be replaced by the smaller and faster TinyViT model
based on [MobileSAM](https://github.com/ChaoningZhang/MobileSAM).
## Running some example.
```bash
cargo run --example segment-anything --release -- \
--image candle-examples/examples/yolo-v8/assets/bike.jpg
--use-tiny
--point 0.6,0.6 --point 0.6,0.55
```
Running this command generates a `sam_merged.jpg` file containing the original
image with a blue overlay of the selected mask. The red dots represent the prompt
specified by `--point 0.6,0.6 --point 0.6,0.55`, this prompt is assumed to be part
of the target mask.
The values used for `--point` should be a comma delimited pair of float values.
They are proportional to the image dimension, i.e. use 0.5 for the image center.
Original image:

Segment results by prompting with a single point `--point 0.6,0.55`:

Segment results by prompting with multiple points `--point 0.6,0.6 --point 0.6,0.55`:

### Command-line flags
- `--use-tiny`: use the TinyViT based MobileSAM backbone rather than the default
one.
- `--point`: specifies the location of the target points.
- `--threshold`: sets the threshold value to be part of the mask, a negative
value results in a larger mask and can be specified via `--threshold=-1.2`.
| candle/candle-examples/examples/segment-anything/README.md/0 | {
"file_path": "candle/candle-examples/examples/segment-anything/README.md",
"repo_id": "candle",
"token_count": 573
} |
use anyhow::{Ok, Result};
use candle_transformers::models::stable_diffusion::vae;
pub fn build_sd3_vae_autoencoder(vb: candle_nn::VarBuilder) -> Result<vae::AutoEncoderKL> {
let config = vae::AutoEncoderKLConfig {
block_out_channels: vec![128, 256, 512, 512],
layers_per_block: 2,
latent_channels: 16,
norm_num_groups: 32,
use_quant_conv: false,
use_post_quant_conv: false,
};
Ok(vae::AutoEncoderKL::new(vb, 3, 3, config)?)
}
pub fn sd3_vae_vb_rename(name: &str) -> String {
let parts: Vec<&str> = name.split('.').collect();
let mut result = Vec::new();
let mut i = 0;
while i < parts.len() {
match parts[i] {
"down_blocks" => {
result.push("down");
}
"mid_block" => {
result.push("mid");
}
"up_blocks" => {
result.push("up");
match parts[i + 1] {
// Reverse the order of up_blocks.
"0" => result.push("3"),
"1" => result.push("2"),
"2" => result.push("1"),
"3" => result.push("0"),
_ => {}
}
i += 1; // Skip the number after up_blocks.
}
"resnets" => {
if i > 0 && parts[i - 1] == "mid_block" {
match parts[i + 1] {
"0" => result.push("block_1"),
"1" => result.push("block_2"),
_ => {}
}
i += 1; // Skip the number after resnets.
} else {
result.push("block");
}
}
"downsamplers" => {
result.push("downsample");
i += 1; // Skip the 0 after downsamplers.
}
"conv_shortcut" => {
result.push("nin_shortcut");
}
"attentions" => {
if parts[i + 1] == "0" {
result.push("attn_1")
}
i += 1; // Skip the number after attentions.
}
"group_norm" => {
result.push("norm");
}
"query" => {
result.push("q");
}
"key" => {
result.push("k");
}
"value" => {
result.push("v");
}
"proj_attn" => {
result.push("proj_out");
}
"conv_norm_out" => {
result.push("norm_out");
}
"upsamplers" => {
result.push("upsample");
i += 1; // Skip the 0 after upsamplers.
}
part => result.push(part),
}
i += 1;
}
result.join(".")
}
| candle/candle-examples/examples/stable-diffusion-3/vae.rs/0 | {
"file_path": "candle/candle-examples/examples/stable-diffusion-3/vae.rs",
"repo_id": "candle",
"token_count": 1772
} |
## VGG Model Implementation
This example demonstrates the implementation of VGG models (VGG13, VGG16, VGG19) using the Candle library.
The VGG models are defined in `candle-transformers/src/models/vgg.rs`. The main function in `candle-examples/examples/vgg/main.rs` loads an image, selects the VGG model based on the provided argument, and applies the model to the loaded image.
You can run the example with the following command:
```bash
cargo run --example vgg --release -- --image ../yolo-v8/assets/bike.jpg --which vgg13
```
In the command above, `--image` specifies the path to the image file and `--which` specifies the VGG model to use (vgg13, vgg16, or vgg19).
| candle/candle-examples/examples/vgg/README.md/0 | {
"file_path": "candle/candle-examples/examples/vgg/README.md",
"repo_id": "candle",
"token_count": 200
} |
# candle-xlm-roberta
This example demonstrates how to use the XLM-RoBERTa model in Candle especially known for their use in reranking. It uses the `fill-mask` task to generate a word for a masked token. And a `reranker` task to rerank a list of documents for a given query.
## Usage
Fill Mask:
```bash
cargo run --example xlm-roberta --release -- --task fill-mask --model xlm-roberta-base
```
```markdown
Sentence: 0 : Hello I'm a fashion model.
Sentence: 1 : I'm a little boy.
Sentence: 2 : I'm living in berlin.
```
Reranker:
```bash
cargo run --example xlm-roberta --release -- --task reranker --model bge-reranker-base
```
```markdown
Ranking Results:
--------------------------------------------------------------------------------
> Rank #4 | Score: 0.0001 | South Korea is a country in East Asia.
> Rank #5 | Score: 0.0000 | There are forests in the mountains.
> Rank #2 | Score: 0.7314 | Pandas look like bears.
> Rank #3 | Score: 0.6948 | There are some animals with black and white fur.
> Rank #1 | Score: 0.9990 | The giant panda (Ailuropoda melanoleuca), sometimes called a panda bear or simply panda, is a bear species endemic to China.
--------------------------------------------------------------------------------
```
| candle/candle-examples/examples/xlm-roberta/Readme.md/0 | {
"file_path": "candle/candle-examples/examples/xlm-roberta/Readme.md",
"repo_id": "candle",
"token_count": 367
} |
// Copied from https://github.com/ruuda/bs1770/blob/master/src/lib.rs
// BS1770 -- Loudness analysis library conforming to ITU-R BS.1770
// Copyright 2020 Ruud van Asseldonk
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// A copy of the License has been included in the root of the repository.
//! Loudness analysis conforming to [ITU-R BS.1770-4][bs17704].
//!
//! This library offers the building blocks to perform BS.1770 loudness
//! measurements, but you need to put the pieces together yourself.
//!
//! [bs17704]: https://www.itu.int/rec/R-REC-BS.1770-4-201510-I/en
//!
//! # Stereo integrated loudness example
//!
//! ```ignore
//! # fn load_stereo_audio() -> [Vec<i16>; 2] {
//! # [vec![0; 48_000], vec![0; 48_000]]
//! # }
//! #
//! let sample_rate_hz = 44_100;
//! let bits_per_sample = 16;
//! let channel_samples: [Vec<i16>; 2] = load_stereo_audio();
//!
//! // When converting integer samples to float, note that the maximum amplitude
//! // is `1 << (bits_per_sample - 1)`, one bit is the sign bit.
//! let normalizer = 1.0 / (1_u64 << (bits_per_sample - 1)) as f32;
//!
//! let channel_power: Vec<_> = channel_samples.iter().map(|samples| {
//! let mut meter = bs1770::ChannelLoudnessMeter::new(sample_rate_hz);
//! meter.push(samples.iter().map(|&s| s as f32 * normalizer));
//! meter.into_100ms_windows()
//! }).collect();
//!
//! let stereo_power = bs1770::reduce_stereo(
//! channel_power[0].as_ref(),
//! channel_power[1].as_ref(),
//! );
//!
//! let gated_power = bs1770::gated_mean(
//! stereo_power.as_ref()
//! ).unwrap_or(bs1770::Power(0.0));
//! println!("Integrated loudness: {:.1} LUFS", gated_power.loudness_lkfs());
//! ```
use std::f32;
/// Coefficients for a 2nd-degree infinite impulse response filter.
///
/// Coefficient a0 is implicitly 1.0.
#[derive(Clone)]
struct Filter {
a1: f32,
a2: f32,
b0: f32,
b1: f32,
b2: f32,
// The past two input and output samples.
x1: f32,
x2: f32,
y1: f32,
y2: f32,
}
impl Filter {
/// Stage 1 of th BS.1770-4 pre-filter.
pub fn high_shelf(sample_rate_hz: f32) -> Filter {
// Coefficients taken from https://github.com/csteinmetz1/pyloudnorm/blob/
// 6baa64d59b7794bc812e124438692e7fd2e65c0c/pyloudnorm/meter.py#L135-L136.
let gain_db = 3.999_843_8;
let q = 0.707_175_25;
let center_hz = 1_681.974_5;
// Formula taken from https://github.com/csteinmetz1/pyloudnorm/blob/
// 6baa64d59b7794bc812e124438692e7fd2e65c0c/pyloudnorm/iirfilter.py#L134-L143.
let k = (f32::consts::PI * center_hz / sample_rate_hz).tan();
let vh = 10.0_f32.powf(gain_db / 20.0);
let vb = vh.powf(0.499_666_78);
let a0 = 1.0 + k / q + k * k;
Filter {
b0: (vh + vb * k / q + k * k) / a0,
b1: 2.0 * (k * k - vh) / a0,
b2: (vh - vb * k / q + k * k) / a0,
a1: 2.0 * (k * k - 1.0) / a0,
a2: (1.0 - k / q + k * k) / a0,
x1: 0.0,
x2: 0.0,
y1: 0.0,
y2: 0.0,
}
}
/// Stage 2 of th BS.1770-4 pre-filter.
pub fn high_pass(sample_rate_hz: f32) -> Filter {
// Coefficients taken from https://github.com/csteinmetz1/pyloudnorm/blob/
// 6baa64d59b7794bc812e124438692e7fd2e65c0c/pyloudnorm/meter.py#L135-L136.
let q = 0.500_327_05;
let center_hz = 38.135_47;
// Formula taken from https://github.com/csteinmetz1/pyloudnorm/blob/
// 6baa64d59b7794bc812e124438692e7fd2e65c0c/pyloudnorm/iirfilter.py#L145-L151
let k = (f32::consts::PI * center_hz / sample_rate_hz).tan();
Filter {
a1: 2.0 * (k * k - 1.0) / (1.0 + k / q + k * k),
a2: (1.0 - k / q + k * k) / (1.0 + k / q + k * k),
b0: 1.0,
b1: -2.0,
b2: 1.0,
x1: 0.0,
x2: 0.0,
y1: 0.0,
y2: 0.0,
}
}
/// Feed the next input sample, get the next output sample.
#[inline(always)]
pub fn apply(&mut self, x0: f32) -> f32 {
let y0 = 0.0 + self.b0 * x0 + self.b1 * self.x1 + self.b2 * self.x2
- self.a1 * self.y1
- self.a2 * self.y2;
self.x2 = self.x1;
self.x1 = x0;
self.y2 = self.y1;
self.y1 = y0;
y0
}
}
/// Compensated sum, for summing many values of different orders of magnitude
/// accurately.
#[derive(Copy, Clone, PartialEq)]
struct Sum {
sum: f32,
residue: f32,
}
impl Sum {
#[inline(always)]
fn zero() -> Sum {
Sum {
sum: 0.0,
residue: 0.0,
}
}
#[inline(always)]
fn add(&mut self, x: f32) {
let sum = self.sum + (self.residue + x);
self.residue = (self.residue + x) - (sum - self.sum);
self.sum = sum;
}
}
/// The mean of the squares of the K-weighted samples in a window of time.
///
/// K-weighted power is equivalent to K-weighted loudness, the only difference
/// is one of scale: power is quadratic in sample amplitudes, whereas loudness
/// units are logarithmic. `loudness_lkfs` and `from_lkfs` convert between power,
/// and K-weighted Loudness Units relative to nominal Full Scale (LKFS).
///
/// The term “LKFS” (Loudness Units, K-Weighted, relative to nominal Full Scale)
/// is used in BS.1770-4 to emphasize K-weighting, but the term is otherwise
/// interchangeable with the more widespread term “LUFS” (Loudness Units,
/// relative to Full Scale). Loudness units are related to decibels in the
/// following sense: boosting a signal that has a loudness of
/// -<var>L<sub>K</sub></var> LUFS by <var>L<sub>K</sub></var> dB (by
/// multiplying the amplitude by 10<sup><var>L<sub>K</sub></var>/20</sup>) will
/// bring the loudness to 0 LUFS.
///
/// K-weighting refers to a high-shelf and high-pass filter that model the
/// effect that humans perceive a certain amount of power in low frequencies to
/// be less loud than the same amount of power in higher frequencies. In this
/// library the `Power` type is used exclusively to refer to power after applying K-weighting.
///
/// The nominal “full scale” is the range [-1.0, 1.0]. Because the power is the
/// mean square of the samples, if no input samples exceeded the full scale, the
/// power will be in the range [0.0, 1.0]. However, the power delivered by
/// multiple channels, which is a weighted sum over individual channel powers,
/// can exceed this range, because the weighted sum is not normalized.
#[derive(Copy, Clone, PartialEq, PartialOrd)]
pub struct Power(pub f32);
impl Power {
/// Convert Loudness Units relative to Full Scale into a squared sample amplitude.
///
/// This is the inverse of `loudness_lkfs`.
pub fn from_lkfs(lkfs: f32) -> Power {
// The inverse of the formula below.
Power(10.0_f32.powf((lkfs + 0.691) * 0.1))
}
/// Return the loudness of this window in Loudness Units, K-weighted, relative to Full Scale.
///
/// This is the inverse of `from_lkfs`.
pub fn loudness_lkfs(&self) -> f32 {
// Equation 2 (p.5) of BS.1770-4.
-0.691 + 10.0 * self.0.log10()
}
}
/// A `T` value for non-overlapping windows of audio, 100ms in length.
///
/// The `ChannelLoudnessMeter` applies K-weighting and then produces the power
/// for non-overlapping windows of 100ms duration.
///
/// These non-overlapping 100ms windows can later be combined into overlapping
/// windows of 400ms, spaced 100ms apart, to compute instantaneous loudness or
/// to perform a gated measurement, or they can be combined into even larger
/// windows for a momentary loudness measurement.
#[derive(Copy, Clone, Debug)]
pub struct Windows100ms<T> {
pub inner: T,
}
impl<T> Windows100ms<T> {
/// Wrap a new empty vector.
pub fn new() -> Windows100ms<Vec<T>> {
Windows100ms { inner: Vec::new() }
}
/// Apply `as_ref` to the inner value.
pub fn as_ref(&self) -> Windows100ms<&[Power]>
where
T: AsRef<[Power]>,
{
Windows100ms {
inner: self.inner.as_ref(),
}
}
/// Apply `as_mut` to the inner value.
pub fn as_mut(&mut self) -> Windows100ms<&mut [Power]>
where
T: AsMut<[Power]>,
{
Windows100ms {
inner: self.inner.as_mut(),
}
}
#[allow(clippy::len_without_is_empty)]
/// Apply `len` to the inner value.
pub fn len(&self) -> usize
where
T: AsRef<[Power]>,
{
self.inner.as_ref().len()
}
}
/// Measures K-weighted power of non-overlapping 100ms windows of a single channel of audio.
///
/// # Output
///
/// The output of the meter is an intermediate result in the form of power for
/// 100ms non-overlapping windows. The windows need to be processed further to
/// get one of the instantaneous, momentary, and integrated loudness
/// measurements defined in BS.1770.
///
/// The windows can also be inspected directly; the data is meaningful
/// on its own (the K-weighted power delivered in that window of time), but it
/// is not something that BS.1770 defines a term for.
///
/// # Multichannel audio
///
/// To perform a loudness measurement of multichannel audio, construct a
/// `ChannelLoudnessMeter` per channel, and later combine the measured power
/// with e.g. `reduce_stereo`.
///
/// # Instantaneous loudness
///
/// The instantaneous loudness is the power over a 400ms window, so you can
/// average four 100ms windows. No special functionality is implemented to help
/// with that at this time. ([Pull requests would be accepted.][contribute])
///
/// # Momentary loudness
///
/// The momentary loudness is the power over a 3-second window, so you can
/// average thirty 100ms windows. No special functionality is implemented to
/// help with that at this time. ([Pull requests would be accepted.][contribute])
///
/// # Integrated loudness
///
/// Use `gated_mean` to perform an integrated loudness measurement:
///
/// ```ignore
/// # use std::iter;
/// # use bs1770::{ChannelLoudnessMeter, gated_mean};
/// # let sample_rate_hz = 44_100;
/// # let samples_per_100ms = sample_rate_hz / 10;
/// # let mut meter = ChannelLoudnessMeter::new(sample_rate_hz);
/// # meter.push((0..44_100).map(|i| (i as f32 * 0.01).sin()));
/// let integrated_loudness_lkfs = gated_mean(meter.as_100ms_windows())
/// .unwrap_or(bs1770::Power(0.0))
/// .loudness_lkfs();
/// ```
///
/// [contribute]: https://github.com/ruuda/bs1770/blob/master/CONTRIBUTING.md
#[derive(Clone)]
pub struct ChannelLoudnessMeter {
/// The number of samples that fit in 100ms of audio.
samples_per_100ms: u32,
/// Stage 1 filter (head effects, high shelf).
filter_stage1: Filter,
/// Stage 2 filter (high-pass).
filter_stage2: Filter,
/// Sum of the squares over non-overlapping windows of 100ms.
windows: Windows100ms<Vec<Power>>,
/// The number of samples in the current unfinished window.
count: u32,
/// The sum of the squares of the samples in the current unfinished window.
square_sum: Sum,
}
impl ChannelLoudnessMeter {
/// Construct a new loudness meter for the given sample rate.
pub fn new(sample_rate_hz: u32) -> ChannelLoudnessMeter {
ChannelLoudnessMeter {
samples_per_100ms: sample_rate_hz / 10,
filter_stage1: Filter::high_shelf(sample_rate_hz as f32),
filter_stage2: Filter::high_pass(sample_rate_hz as f32),
windows: Windows100ms::new(),
count: 0,
square_sum: Sum::zero(),
}
}
/// Feed input samples for loudness analysis.
///
/// # Full scale
///
/// Full scale for the input samples is the interval [-1.0, 1.0]. If your
/// input consists of signed integer samples, you can convert as follows:
///
/// ```ignore
/// # let mut meter = bs1770::ChannelLoudnessMeter::new(44_100);
/// # let bits_per_sample = 16_usize;
/// # let samples = &[0_i16];
/// // Note that the maximum amplitude is `1 << (bits_per_sample - 1)`,
/// // one bit is the sign bit.
/// let normalizer = 1.0 / (1_u64 << (bits_per_sample - 1)) as f32;
/// meter.push(samples.iter().map(|&s| s as f32 * normalizer));
/// ```
///
/// # Repeated calls
///
/// You can call `push` multiple times to feed multiple batches of samples.
/// This is equivalent to feeding a single chained iterator. The leftover of
/// samples that did not fill a full 100ms window is not discarded:
///
/// ```ignore
/// # use std::iter;
/// # use bs1770::ChannelLoudnessMeter;
/// let sample_rate_hz = 44_100;
/// let samples_per_100ms = sample_rate_hz / 10;
/// let mut meter = ChannelLoudnessMeter::new(sample_rate_hz);
///
/// meter.push(iter::repeat(0.0).take(samples_per_100ms as usize - 1));
/// assert_eq!(meter.as_100ms_windows().len(), 0);
///
/// meter.push(iter::once(0.0));
/// assert_eq!(meter.as_100ms_windows().len(), 1);
/// ```
pub fn push<I: Iterator<Item = f32>>(&mut self, samples: I) {
let normalizer = 1.0 / self.samples_per_100ms as f32;
// LLVM, if you could go ahead and inline those apply calls, and then
// unroll and vectorize the loop, that'd be terrific.
for x in samples {
let y = self.filter_stage1.apply(x);
let z = self.filter_stage2.apply(y);
self.square_sum.add(z * z);
self.count += 1;
// TODO: Should this branch be marked cold?
if self.count == self.samples_per_100ms {
let mean_squares = Power(self.square_sum.sum * normalizer);
self.windows.inner.push(mean_squares);
// We intentionally do not reset the residue. That way, leftover
// energy from this window is not lost, so for the file overall,
// the sum remains more accurate.
self.square_sum.sum = 0.0;
self.count = 0;
}
}
}
/// Return a reference to the 100ms windows analyzed so far.
pub fn as_100ms_windows(&self) -> Windows100ms<&[Power]> {
self.windows.as_ref()
}
/// Return all 100ms windows analyzed so far.
pub fn into_100ms_windows(self) -> Windows100ms<Vec<Power>> {
self.windows
}
}
/// Combine power for multiple channels by taking a weighted sum.
///
/// Note that BS.1770-4 defines power for a multi-channel signal as a weighted
/// sum over channels which is not normalized. This means that a stereo signal
/// is inherently louder than a mono signal. For a mono signal played back on
/// stereo speakers, you should therefore still apply `reduce_stereo`, passing
/// in the same signal for both channels.
pub fn reduce_stereo(
left: Windows100ms<&[Power]>,
right: Windows100ms<&[Power]>,
) -> Windows100ms<Vec<Power>> {
assert_eq!(
left.len(),
right.len(),
"Channels must have the same length."
);
let mut result = Vec::with_capacity(left.len());
for (l, r) in left.inner.iter().zip(right.inner) {
result.push(Power(l.0 + r.0));
}
Windows100ms { inner: result }
}
/// In-place version of `reduce_stereo` that stores the result in the former left channel.
pub fn reduce_stereo_in_place(left: Windows100ms<&mut [Power]>, right: Windows100ms<&[Power]>) {
assert_eq!(
left.len(),
right.len(),
"Channels must have the same length."
);
for (l, r) in left.inner.iter_mut().zip(right.inner) {
l.0 += r.0;
}
}
/// Perform gating and averaging for a BS.1770-4 integrated loudness measurement.
///
/// The integrated loudness measurement is not just the average power over the
/// entire signal. BS.1770-4 defines two stages of gating that exclude
/// parts of the signal, to ensure that silent parts do not contribute to the
/// loudness measurement. This function performs that gating, and returns the
/// average power over the windows that were not excluded.
///
/// The result of this function is the integrated loudness measurement.
///
/// When no signal remains after applying the gate, this function returns
/// `None`. In particular, this happens when all of the signal is softer than
/// -70 LKFS, including a signal that consists of pure silence.
pub fn gated_mean(windows_100ms: Windows100ms<&[Power]>) -> Option<Power> {
let mut gating_blocks = Vec::with_capacity(windows_100ms.len());
// Stage 1: an absolute threshold of -70 LKFS. (Equation 6, p.6.)
let absolute_threshold = Power::from_lkfs(-70.0);
// Iterate over all 400ms windows.
for window in windows_100ms.inner.windows(4) {
// Note that the sum over channels has already been performed at this point.
let gating_block_power = Power(0.25 * window.iter().map(|mean| mean.0).sum::<f32>());
if gating_block_power > absolute_threshold {
gating_blocks.push(gating_block_power);
}
}
if gating_blocks.is_empty() {
return None;
}
// Compute the loudness after applying the absolute gate, in order to
// determine the threshold for the relative gate.
let mut sum_power = Sum::zero();
for &gating_block_power in &gating_blocks {
sum_power.add(gating_block_power.0);
}
let absolute_gated_power = Power(sum_power.sum / (gating_blocks.len() as f32));
// Stage 2: Apply the relative gate.
let relative_threshold = Power::from_lkfs(absolute_gated_power.loudness_lkfs() - 10.0);
let mut sum_power = Sum::zero();
let mut n_blocks = 0_usize;
for &gating_block_power in &gating_blocks {
if gating_block_power > relative_threshold {
sum_power.add(gating_block_power.0);
n_blocks += 1;
}
}
if n_blocks == 0 {
return None;
}
let relative_gated_power = Power(sum_power.sum / n_blocks as f32);
Some(relative_gated_power)
}
| candle/candle-examples/src/bs1770.rs/0 | {
"file_path": "candle/candle-examples/src/bs1770.rs",
"repo_id": "candle",
"token_count": 7220
} |
/******************************************************************************
* Copyright (c) 2023, Tri Dao.
******************************************************************************/
#pragma once
// #include <c10/cuda/CUDAException.h> // For C10_CUDA_CHECK and C10_CUDA_KERNEL_LAUNCH_CHECK
#include "error.h"
#include "static_switch.h"
#include "hardware_info.h"
#include "flash.h"
#include "flash_fwd_kernel.h"
// Determine if the architecture supports FLASH and define a macro to handle parameter modifiers
#if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 800
#define ARCH_SUPPORTS_FLASH
#define KERNEL_PARAM_MODIFIER __grid_constant__
#else
#define KERNEL_PARAM_MODIFIER
#endif
// Define a macro for unsupported architecture handling to centralize the error message
#define FLASH_UNSUPPORTED_ARCH printf("FATAL: FlashAttention requires building with sm version sm80-sm90, but was built for < 8.0!");
// Use a macro to clean up kernel definitions
#define DEFINE_FLASH_FORWARD_KERNEL(kernelName, ...) \
template<typename Kernel_traits, __VA_ARGS__> \
__global__ void kernelName(KERNEL_PARAM_MODIFIER const Flash_fwd_params params)
DEFINE_FLASH_FORWARD_KERNEL(flash_fwd_kernel, bool Is_dropout, bool Is_causal, bool Is_local, bool Has_alibi, bool Is_even_MN, bool Is_even_K, bool Is_softcap, bool Return_softmax) {
#if defined(ARCH_SUPPORTS_FLASH)
static_assert(!(Is_causal && Is_local)); // Enforce constraints
flash::compute_attn<Kernel_traits, Is_dropout, Is_causal, Is_local, Has_alibi, Is_even_MN, Is_even_K, Is_softcap, Return_softmax>(params);
#else
FLASH_UNSUPPORTED_ARCH
#endif
}
DEFINE_FLASH_FORWARD_KERNEL(flash_fwd_splitkv_kernel, bool Is_causal, bool Is_local, bool Has_alibi, bool Is_even_MN, bool Is_even_K, bool Is_softcap, bool Split, bool Append_KV) {
#if defined(ARCH_SUPPORTS_FLASH)
flash::compute_attn_splitkv<Kernel_traits, Is_causal, Is_local, Has_alibi, Is_even_MN, Is_even_K, Is_softcap, Split, Append_KV>(params);
#else
FLASH_UNSUPPORTED_ARCH
#endif
}
DEFINE_FLASH_FORWARD_KERNEL(flash_fwd_splitkv_combine_kernel, int kBlockM, int Log_max_splits, bool Is_even_K) {
static_assert(Log_max_splits >= 1);
flash::combine_attn_seqk_parallel<Kernel_traits, kBlockM, Log_max_splits, Is_even_K>(params);
}
template<typename Kernel_traits, bool Is_dropout, bool Is_causal>
void run_flash_fwd(Flash_fwd_params ¶ms, cudaStream_t stream) {
constexpr size_t smem_size = Kernel_traits::kSmemSize;
// printf("smem_size = %d\n", smem_size);
// Work-around for gcc 7. It doesn't like nested BOOL_SWITCH.
// https://github.com/kokkos/kokkos-kernels/issues/349
// https://github.com/HazyResearch/flash-attention/issues/21
const int num_m_block = (params.seqlen_q + Kernel_traits::kBlockM - 1) / Kernel_traits::kBlockM;
dim3 grid(num_m_block, params.b, params.h);
const bool is_even_MN = params.cu_seqlens_q == nullptr && params.cu_seqlens_k == nullptr && params.seqlen_k % Kernel_traits::kBlockN == 0 && params.seqlen_q % Kernel_traits::kBlockM == 0;
const bool is_even_K = params.d == Kernel_traits::kHeadDim;
const bool return_softmax = params.p_ptr != nullptr;
BOOL_SWITCH(is_even_MN, IsEvenMNConst, [&] {
EVENK_SWITCH(is_even_K, IsEvenKConst, [&] {
LOCAL_SWITCH((params.window_size_left >= 0 || params.window_size_right >= 0) && !Is_causal, Is_local, [&] {
BOOL_SWITCH(return_softmax, ReturnSoftmaxConst, [&] {
ALIBI_SWITCH(params.alibi_slopes_ptr != nullptr, Has_alibi, [&] {
SOFTCAP_SWITCH(params.softcap > 0.0, Is_softcap, [&] {
// Will only return softmax if dropout, to reduce compilation time.
// If not IsEvenKConst, we also set IsEvenMNConst to false to reduce number of templates.
// If return_softmax, set IsEvenMNConst to false to reduce number of templates
// If head dim > 128, set IsEvenMNConst to false to reduce number of templates
// If Is_local, set Is_causal to false
auto kernel = &flash_fwd_kernel<Kernel_traits, Is_dropout && !Is_softcap, Is_causal, Is_local && !Is_causal, Has_alibi, IsEvenMNConst && IsEvenKConst && !Is_local && !ReturnSoftmaxConst && Kernel_traits::kHeadDim <= 128, IsEvenKConst, Is_softcap, ReturnSoftmaxConst && Is_dropout && !Is_softcap>;
// auto kernel = &flash_fwd_kernel<Kernel_traits, false, Is_causal, false, false, true, true, false>;
// printf("IsEvenMNConst = %d, IsEvenKConst = %d, Is_local = %d, Is_causal = %d, ReturnSoftmaxConst = %d, Is_dropout = %d\n", int(IsEvenMNConst), int(IsEvenKConst), int(Is_local), int(Is_causal), int(ReturnSoftmaxConst), int(Is_dropout));
// auto kernel = &flash_fwd_kernel<Kernel_traits, false, Is_causal, false, true, true, false>;
if (smem_size >= 48 * 1024) {
C10_CUDA_CHECK(cudaFuncSetAttribute(
kernel, cudaFuncAttributeMaxDynamicSharedMemorySize, smem_size));
}
// int ctas_per_sm;
// cudaError status_ = cudaOccupancyMaxActiveBlocksPerMultiprocessor(
// &ctas_per_sm, kernel, Kernel_traits::kNThreads, smem_size);
// printf("smem_size = %d, CTAs per SM = %d\n", int(smem_size), ctas_per_sm);
kernel<<<grid, Kernel_traits::kNThreads, smem_size, stream>>>(params);
C10_CUDA_KERNEL_LAUNCH_CHECK();
});
});
});
});
});
});
}
template<typename Kernel_traits, bool Is_causal>
void run_flash_splitkv_fwd(Flash_fwd_params ¶ms, cudaStream_t stream) {
static_assert(!Kernel_traits::Is_Q_in_regs, "SplitKV implementation does not support Is_Q_in_regs");
static_assert(!Kernel_traits::Share_Q_K_smem, "SplitKV implementation does not support Share_Q_K_smem");
constexpr size_t smem_size = Kernel_traits::kSmemSize;
const int num_m_block = (params.seqlen_q + Kernel_traits::kBlockM - 1) / Kernel_traits::kBlockM;
dim3 grid(num_m_block, params.num_splits > 1 ? params.num_splits : params.b, params.num_splits > 1 ? params.b * params.h : params.h);
const bool is_even_MN = params.cu_seqlens_q == nullptr && params.cu_seqlens_k == nullptr && params.seqlen_k % Kernel_traits::kBlockN == 0 && params.seqlen_q % Kernel_traits::kBlockM == 0;
const bool is_even_K = params.d == Kernel_traits::kHeadDim;
BOOL_SWITCH(is_even_MN, IsEvenMNConst, [&] {
EVENK_SWITCH(is_even_K, IsEvenKConst, [&] {
LOCAL_SWITCH((params.window_size_left >= 0 || params.window_size_right >= 0) && !Is_causal, Is_local, [&] {
BOOL_SWITCH(params.num_splits > 1, Split, [&] {
BOOL_SWITCH(params.knew_ptr != nullptr, Append_KV, [&] {
ALIBI_SWITCH(params.alibi_slopes_ptr != nullptr, Has_alibi, [&] {
SOFTCAP_SWITCH(params.softcap > 0.0, Is_softcap, [&] {
// If Append_KV, then we must have seqlen_offsets, which means cu_seqlens_k != nullptr.
// If not IsEvenKConst, we also set IsEvenMNConst to false to reduce number of templates.
// If Is_local, set Is_causal to false
auto kernel = &flash_fwd_splitkv_kernel<Kernel_traits, Is_causal, Is_local && !Is_causal, Has_alibi, IsEvenMNConst && !Append_KV && IsEvenKConst && !Is_local && Kernel_traits::kHeadDim <= 128, IsEvenKConst, Is_softcap, Split, Append_KV>;
// auto kernel = &flash_fwd_splitkv_kernel<Kernel_traits, Is_causal, false, true, Split, Append_KV>;
// auto kernel = &flash_fwd_splitkv_kernel<Kernel_traits, Is_causal, false, IsEvenKConst>;
if (smem_size >= 48 * 1024) {
C10_CUDA_CHECK(cudaFuncSetAttribute(
kernel, cudaFuncAttributeMaxDynamicSharedMemorySize, smem_size));
}
kernel<<<grid, Kernel_traits::kNThreads, smem_size, stream>>>(params);
C10_CUDA_KERNEL_LAUNCH_CHECK();
});
});
});
});
});
});
});
if (params.num_splits > 1) {
// We want kBlockM to be as small as possible for more parallelism.
// With 128 threads we can load 512 elements at a time, so if headdim is divisible by 128, kBlockM = 4.
// If headdim is divisible by 64, then we set kBlockM = 8, etc.
constexpr static int kBlockM = Kernel_traits::kHeadDim % 128 == 0 ? 4 : (Kernel_traits::kHeadDim % 64 == 0 ? 8 : 16);
dim3 grid_combine((params.b * params.h * params.seqlen_q + kBlockM - 1) / kBlockM);
EVENK_SWITCH(is_even_K, IsEvenKConst, [&] {
if (params.num_splits <= 2) {
flash_fwd_splitkv_combine_kernel<Kernel_traits, kBlockM, 1, IsEvenKConst><<<grid_combine, Kernel_traits::kNThreads, 0, stream>>>(params);
} else if (params.num_splits <= 4) {
flash_fwd_splitkv_combine_kernel<Kernel_traits, kBlockM, 2, IsEvenKConst><<<grid_combine, Kernel_traits::kNThreads, 0, stream>>>(params);
} else if (params.num_splits <= 8) {
flash_fwd_splitkv_combine_kernel<Kernel_traits, kBlockM, 3, IsEvenKConst><<<grid_combine, Kernel_traits::kNThreads, 0, stream>>>(params);
} else if (params.num_splits <= 16) {
flash_fwd_splitkv_combine_kernel<Kernel_traits, kBlockM, 4, IsEvenKConst><<<grid_combine, Kernel_traits::kNThreads, 0, stream>>>(params);
} else if (params.num_splits <= 32) {
flash_fwd_splitkv_combine_kernel<Kernel_traits, kBlockM, 5, IsEvenKConst><<<grid_combine, Kernel_traits::kNThreads, 0, stream>>>(params);
} else if (params.num_splits <= 64) {
flash_fwd_splitkv_combine_kernel<Kernel_traits, kBlockM, 6, IsEvenKConst><<<grid_combine, Kernel_traits::kNThreads, 0, stream>>>(params);
} else if (params.num_splits <= 128) {
flash_fwd_splitkv_combine_kernel<Kernel_traits, kBlockM, 7, IsEvenKConst><<<grid_combine, Kernel_traits::kNThreads, 0, stream>>>(params);
}
C10_CUDA_KERNEL_LAUNCH_CHECK();
});
}
}
template<typename T, int Headdim, bool Is_causal>
void run_mha_fwd_splitkv_dispatch(Flash_fwd_params ¶ms, cudaStream_t stream) {
constexpr static int kBlockM = 64; // Fixed for all head dimensions
// TD [2023-08-28]: nvcc segfaults for headdim 96 with block size 64 x 256,
// and for headdim 192 with block size 64 x 128.
// Also for headdim 160 with block size 64 x 128 after the rotary addition.
constexpr static int kBlockN = Headdim <= 64 ? 256 : (Headdim <= 128 ? 128 : 64);
run_flash_splitkv_fwd<Flash_fwd_kernel_traits<Headdim, kBlockM, kBlockN, 4, false, false, T>, Is_causal>(params, stream);
}
template<typename T, bool Is_causal>
void run_mha_fwd_hdim32(Flash_fwd_params ¶ms, cudaStream_t stream) {
constexpr static int Headdim = 32;
DROPOUT_SWITCH(params.p_dropout < 1.f, Is_dropout, [&] {
run_flash_fwd<Flash_fwd_kernel_traits<Headdim, 128, 128, 4, false, false, T>, Is_dropout, Is_causal>(params, stream);
});
}
template<typename T, bool Is_causal>
void run_mha_fwd_hdim64(Flash_fwd_params ¶ms, cudaStream_t stream) {
constexpr static int Headdim = 64;
DROPOUT_SWITCH(params.p_dropout < 1.f, Is_dropout, [&] {
if constexpr(!Is_dropout) {
// Using 8 warps is 18% slower for seqlen=2k, 2 warps is 5% slower
// Using block size (64 x 256) is 27% slower for seqlen=2k
// Using block size (256 x 64) is 85% slower for seqlen=2k, because of register spilling
run_flash_fwd<Flash_fwd_kernel_traits<Headdim, 128, 128, 4, false, false, T>, Is_dropout, Is_causal>(params, stream);
// run_flash_fwd<Flash_fwd_kernel_traits<Headdim, 128, 64, 4, true, false, T>, Is_dropout, Is_causal>(params, stream);
// run_flash_fwd<Flash_fwd_kernel_traits<Headdim, 128, 64, 4, true, true, T>, Is_dropout, Is_causal>(params, stream);
} else {
run_flash_fwd<Flash_fwd_kernel_traits<Headdim, 128, 64, 4, false, false, T>, Is_dropout, Is_causal>(params, stream);
// run_flash_fwd<Flash_fwd_kernel_traits<Headdim, 128, 64, 4, true, true, T>, Is_dropout, Is_causal>(params, stream);
// run_flash_fwd<Flash_fwd_kernel_traits<Headdim, 128, 64, 4, true, false, T>, Is_dropout, Is_causal>(params, stream);
// run_flash_fwd<Flash_fwd_kernel_traits<Headdim, 128, 128, 4, false, false, T>, Is_dropout, Is_causal>(params, stream);
}
});
}
inline bool cuda_is_sm8x() {
// dprops = at::cuda::getCurrentDeviceProperties();
// return dprops->major == 8 && dprops->minor > 0;
return false;
}
template<typename T, bool Is_causal>
void run_mha_fwd_hdim96(Flash_fwd_params ¶ms, cudaStream_t stream) {
constexpr static int Headdim = 96;
auto [cc_major, cc_minor] = get_compute_capability(get_current_device());
bool is_sm8x = cc_major == 8 && cc_minor > 0;
DROPOUT_SWITCH(params.p_dropout < 1.f, Is_dropout, [&] {
// For sm86 or sm89, 64 x 64 is the fastest for causal (because it's square),
if (is_sm8x) {
if constexpr(!Is_causal) {
run_flash_fwd<Flash_fwd_kernel_traits<Headdim, 128, 64, 4, false, false, T>, Is_dropout, Is_causal>(params, stream);
} else {
run_flash_fwd<Flash_fwd_kernel_traits<Headdim, 64, 64, 4, false, false, T>, Is_dropout, Is_causal>(params, stream);
}
} else {
run_flash_fwd<Flash_fwd_kernel_traits<Headdim, 128, 64, 4, false, false, T>, Is_dropout, Is_causal>(params, stream);
}
// run_flash_fwd<Flash_fwd_kernel_traits<Headdim, 128, 64, 4, true, false, T>, Is_dropout, Is_causal>(params, stream);
// run_flash_fwd<Flash_fwd_kernel_traits<Headdim, 128, 64, 4, true, true, T>, Is_dropout, Is_causal>(params, stream);
// These two are always slower
// run_flash_fwd<Flash_fwd_kernel_traits<96, 128, 128, 4, true, T>>(params, stream);
// run_flash_fwd<Flash_fwd_kernel_traits<96, 64, 128, 4, true, T>>(params, stream);
});
}
template<typename T, bool Is_causal>
void run_mha_fwd_hdim128(Flash_fwd_params ¶ms, cudaStream_t stream) {
constexpr static int Headdim = 128;
auto [cc_major, cc_minor] = get_compute_capability(get_current_device());
bool is_sm8x = cc_major == 8 && cc_minor > 0;
DROPOUT_SWITCH(params.p_dropout < 1.f, Is_dropout, [&] {
if constexpr(!Is_dropout) {
// For sm86 or sm89, 64 x 64 is the fastest for causal (because it's square),
// and 128 x 32 (48 KB smem) is the fastest for non-causal since we get 2 CTAs per SM.
if (is_sm8x) {
if constexpr(!Is_causal) {
run_flash_fwd<Flash_fwd_kernel_traits<Headdim, 128, 32, 4, false, false, T>, Is_dropout, Is_causal>(params, stream);
} else {
run_flash_fwd<Flash_fwd_kernel_traits<Headdim, 64, 64, 4, false, false, T>, Is_dropout, Is_causal>(params, stream);
}
} else {
run_flash_fwd<Flash_fwd_kernel_traits<Headdim, 128, 64, 4, false, false, T>, Is_dropout, Is_causal>(params, stream);
}
// run_flash_fwd<Flash_fwd_kernel_traits<Headdim, 128, 64, 4, true, false, T>, Is_dropout, Is_causal>(params, stream);
// run_flash_fwd<Flash_fwd_kernel_traits<Headdim, 128, 64, 4, true, true, T>, Is_dropout, Is_causal>(params, stream);
// run_flash_fwd<Flash_fwd_kernel_traits<Headdim, 64, 128, 4, false, false, T>, Is_dropout, Is_causal>(params, stream);
// Using 8 warps (128 x 128 and 256 x 64) is 28% slower for seqlen=2k
// run_flash_fwd<Flash_fwd_kernel_traits<Headdim, 128, 128, 8, false, false, T>, Is_dropout, Is_causal>(params, stream);
// run_flash_fwd<Flash_fwd_kernel_traits<Headdim, 128, 64, 8, false, false, T>, Is_dropout, Is_causal>(params, stream);
// 1st ones are good for H100, A100
// 2nd one is good for A6000 bc we get slightly better occupancy
} else {
run_flash_fwd<Flash_fwd_kernel_traits<Headdim, 128, 32, 4, false, false, T>, Is_dropout, Is_causal>(params, stream);
// run_flash_fwd<Flash_fwd_kernel_traits<Headdim, 64, 64, 4, false, false, T>, Is_dropout, Is_causal>(params, stream);
// run_flash_fwd<Flash_fwd_kernel_traits<Headdim, 128, 32, 4, true, false, T>, Is_dropout, Is_causal>(params, stream);
// run_flash_fwd<Flash_fwd_kernel_traits<Headdim, 128, 32, 4, true, true, T>, Is_dropout, Is_causal>(params, stream);
}
});
}
template<typename T, bool Is_causal>
void run_mha_fwd_hdim160(Flash_fwd_params ¶ms, cudaStream_t stream) {
constexpr static int Headdim = 160;
auto [cc_major, cc_minor] = get_compute_capability(get_current_device());
bool is_sm8x = cc_major == 8 && cc_minor > 0;
DROPOUT_SWITCH(params.p_dropout < 1.f, Is_dropout, [&] {
// For A100, H100, 128 x 32 is the fastest.
// For sm86 or sm89, 64 x 64 is the fastest for causal (because it's square),
// and 128 x 64 with 8 warps is the fastest for non-causal.
if (is_sm8x) {
if constexpr(!Is_causal) {
run_flash_fwd<Flash_fwd_kernel_traits<Headdim, 128, 64, 8, false, false, T>, Is_dropout, Is_causal>(params, stream);
} else {
run_flash_fwd<Flash_fwd_kernel_traits<Headdim, 64, 64, 4, false, false, T>, Is_dropout, Is_causal>(params, stream);
}
} else {
run_flash_fwd<Flash_fwd_kernel_traits<Headdim, 128, 32, 4, false, false, T>, Is_dropout, Is_causal>(params, stream);
}
// run_flash_fwd<Flash_fwd_kernel_traits<Headdim, 128, 32, 4, false, true, T>, Is_dropout, Is_causal>(params, stream);
// run_flash_fwd<Flash_fwd_kernel_traits<Headdim, 128, 64, 4, false, false, T>, Is_dropout, Is_causal>(params, stream);
// run_flash_fwd<Flash_fwd_kernel_traits<Headdim, 128, 64, 4, false, T>>(params, stream);
// run_flash_fwd<Flash_fwd_kernel_traits<Headdim, 64, 128, 4, false, T>>(params, stream);
// run_flash_fwd<Flash_fwd_kernel_traits<Headdim, 64, 64, 4, false, T>>(params, stream);
// run_flash_fwd<Flash_fwd_kernel_traits<Headdim, 128, 64, 8, false, T>>(params, stream);
// run_flash_fwd<Flash_fwd_kernel_traits<Headdim, 128, 128, 8, false, T>>(params, stream);
});
}
template<typename T, bool Is_causal>
void run_mha_fwd_hdim192(Flash_fwd_params ¶ms, cudaStream_t stream) {
constexpr static int Headdim = 192;
DROPOUT_SWITCH(params.p_dropout < 1.f, Is_dropout, [&] {
if constexpr(!Is_dropout) {
run_flash_fwd<Flash_fwd_kernel_traits<Headdim, 128, 64, 8, false, false, T>, Is_dropout, Is_causal>(params, stream);
} else {
run_flash_fwd<Flash_fwd_kernel_traits<Headdim, 64, 64, 4, false, false, T>, Is_dropout, Is_causal>(params, stream);
}
// run_flash_fwd<Flash_fwd_kernel_traits<Headdim, 64, 32, 4, false, false, T>, Is_dropout, Is_causal>(params, stream);
// run_flash_fwd<Flash_fwd_kernel_traits<Headdim, 128, 32, 8, false, false, T>, Is_dropout, Is_causal>(params, stream);
// run_flash_fwd<Flash_fwd_kernel_traits<Headdim, 128, 64, 4, false, T>>(params, stream);
// run_flash_fwd<Flash_fwd_kernel_traits<Headdim, 64, 128, 4, false, T>>(params, stream);
// run_flash_fwd<Flash_fwd_kernel_traits<Headdim, 128, 128, 8, false, T>>(params, stream);
});
}
template<typename T, bool Is_causal>
void run_mha_fwd_hdim224(Flash_fwd_params ¶ms, cudaStream_t stream) {
constexpr static int Headdim = 224;
int device;
cudaGetDevice(&device);
int max_smem_per_block;
cudaError status_ = cudaDeviceGetAttribute(
&max_smem_per_block, cudaDevAttrMaxSharedMemoryPerBlockOptin, device);
if (status_ != cudaSuccess) {
C10_CUDA_CHECK(status_);
}
// printf("max_smem_per_block = %d\n", max_smem_per_block);
DROPOUT_SWITCH(params.p_dropout < 1.f, Is_dropout, [&] {
if (max_smem_per_block >= 2 * Headdim * (128 + 2 * 64)) { // 112 KB
run_flash_fwd<Flash_fwd_kernel_traits<Headdim, 128, 64, 8, false, false, T>, Is_dropout, Is_causal>(params, stream);
} else {
run_flash_fwd<Flash_fwd_kernel_traits<Headdim, 64, 64, 4, false, false, T>, Is_dropout, Is_causal>(params, stream);
}
// run_flash_fwd<Flash_fwd_kernel_traits<Headdim, 128, 32, 4, false, false, T>, Is_dropout, Is_causal>(params, stream);
// run_flash_fwd<Flash_fwd_kernel_traits<Headdim, 64, 32, 4, false, false, T>, Is_dropout, Is_causal>(params, stream);
// We can't do 128 x 32 with 8 warps because with headdim 224, kBlockKSmem = 32.
// If we have N = 32, there are only 1024 elements to load at once, where each load
// is 8 elements. This means we can only use 128 threads and not 256 threads.
// run_flash_fwd<Flash_fwd_kernel_traits<Headdim, 128, 32, 8, false, false, T>, Is_dropout, Is_causal>(params, stream);
});
}
template<typename T, bool Is_causal>
void run_mha_fwd_hdim256(Flash_fwd_params ¶ms, cudaStream_t stream) {
constexpr static int Headdim = 256;
int device;
cudaGetDevice(&device);
int max_smem_per_sm, max_smem_per_block;
cudaError status_ = cudaDeviceGetAttribute(
&max_smem_per_sm, cudaDevAttrMaxSharedMemoryPerMultiprocessor, device);
status_ = cudaDeviceGetAttribute(
&max_smem_per_block, cudaDevAttrMaxSharedMemoryPerBlockOptin, device);
if (status_ != cudaSuccess) {
C10_CUDA_CHECK(status_);
}
// printf("max_smem_per_sm = %d, max_smem_per_block = %d\n", max_smem_per_sm, max_smem_per_block);
DROPOUT_SWITCH(params.p_dropout < 1.f, Is_dropout, [&] {
// For A100, we want to run with 128 x 64 (128KB smem).
// For H100 we want to run with 64 x 64 (96KB smem) since then we can get 2 CTAs per SM.
if (max_smem_per_block >= 2 * Headdim * (128 + 2 * 64) && max_smem_per_sm < 4 * Headdim * (64 + 2 * 64)) {
run_flash_fwd<Flash_fwd_kernel_traits<Headdim, 128, 64, 8, false, false, T>, Is_dropout, Is_causal>(params, stream);
} else {
run_flash_fwd<Flash_fwd_kernel_traits<Headdim, 64, 64, 4, false, false, T>, Is_dropout, Is_causal>(params, stream);
}
// 64 KB
// run_flash_fwd<Flash_fwd_kernel_traits<Headdim, 64, 32, 4, false, false, T>, Is_dropout, Is_causal>(params, stream);
// 96 KB
// run_flash_fwd<Flash_fwd_kernel_traits<Headdim, 128, 32, 8, false, false, T>, Is_dropout, Is_causal>(params, stream);
});
}
| candle/candle-flash-attn/kernels/flash_fwd_launch_template.h/0 | {
"file_path": "candle/candle-flash-attn/kernels/flash_fwd_launch_template.h",
"repo_id": "candle",
"token_count": 10705
} |
#define _USE_MATH_DEFINES
#include<math.h>
#include<stdint.h>
#include "cuda_utils.cuh"
#define UNARY_OP(TYPENAME, FN_NAME, FUNC) \
extern "C" __global__ void FN_NAME( \
const size_t numel, \
const size_t num_dims, \
const size_t *info, \
const TYPENAME *inp, \
TYPENAME *out \
) { \
const size_t *dims = info; \
const size_t *strides = info + num_dims; \
if (info == nullptr || is_contiguous(num_dims, dims, strides)) { \
for (unsigned int i = blockIdx.x * blockDim.x + threadIdx.x; i < numel; i += blockDim.x * gridDim.x) { \
TYPENAME x = inp ? inp[i] : out[i]; \
out[i] = FUNC; \
} \
} \
else { \
for (unsigned int i = blockIdx.x * blockDim.x + threadIdx.x; i < numel; i += blockDim.x * gridDim.x) { \
unsigned strided_i = get_strided_index(i, num_dims, dims, strides); \
TYPENAME x = inp ? inp[strided_i] : out[i]; \
out[i] = FUNC; \
} \
} \
} \
template<typename T>
__device__ __forceinline__ T gelu_erf_fwd(T x) {
return x * normcdfg(x);
}
template<typename T>
__device__ __forceinline__ T gelu_fwd(T x) {
T x_sq = x * x;
T x_cube = x_sq * x;
T alpha = x + static_cast<T>(0.044715) * x_cube;
return static_cast<T>(0.5) * x * (static_cast<T>(1.0) + tanhg(static_cast<T>(M_2_SQRTPI * M_SQRT1_2) * alpha));
}
template<typename T>
__device__ __forceinline__ T elu_fwd(T x, T alpha) {
if (x > static_cast<T>(0)) {
return x;
}
return alpha * (expg(x) - static_cast<T>(1));
}
template<typename T>
__device__ __forceinline__ T relu_fwd(T x) {
T zero = 0.;
return maxg(x, zero);
}
template<typename T>
__device__ __forceinline__ T silu_fwd(T x) {
return x / (static_cast<T>(1) + expg(-x));
}
template<typename T>
__device__ __forceinline__ T sigmoid_fwd(T x) {
return recipg(static_cast<T>(1) + expg(-x));
}
#define UNARY_OP1(TYPENAME, FN_NAME, FUNC) \
extern "C" __global__ void FN_NAME( \
const size_t numel, \
const size_t num_dims, \
const size_t *info, \
const TYPENAME param, \
const TYPENAME *inp, \
TYPENAME *out \
) { \
const size_t *dims = info; \
const size_t *strides = info + num_dims; \
if (info == nullptr || is_contiguous(num_dims, dims, strides)) { \
for (unsigned int i = blockIdx.x * blockDim.x + threadIdx.x; i < numel; i += blockDim.x * gridDim.x) { \
TYPENAME x = inp ? inp[i] : out[i]; \
out[i] = FUNC; \
} \
} \
else { \
for (unsigned int i = blockIdx.x * blockDim.x + threadIdx.x; i < numel; i += blockDim.x * gridDim.x) { \
unsigned strided_i = get_strided_index(i, num_dims, dims, strides); \
TYPENAME x = inp ? inp[strided_i] : out[i]; \
out[i] = FUNC; \
} \
} \
} \
template<typename T>
__device__ T sign_(T t) {
return static_cast<T>(t > static_cast<T>(0)) - static_cast<T>(t < static_cast<T>(0));
}
#if __CUDA_ARCH__ >= 800
UNARY_OP(__nv_bfloat16, ucopy_bf16, x)
UNARY_OP(__nv_bfloat16, uneg_bf16, -x)
UNARY_OP(__nv_bfloat16, urecip_bf16, recipg(x))
UNARY_OP(__nv_bfloat16, uexp_bf16, expg(x))
UNARY_OP(__nv_bfloat16, ulog_bf16, logg(x))
UNARY_OP(__nv_bfloat16, usin_bf16, sing(x))
UNARY_OP(__nv_bfloat16, ucos_bf16, cosg(x))
UNARY_OP(__nv_bfloat16, utanh_bf16, tanhg(x))
UNARY_OP(__nv_bfloat16, uerf_bf16, erfg(x))
UNARY_OP(__nv_bfloat16, uceil_bf16, ceilg(x))
UNARY_OP(__nv_bfloat16, ufloor_bf16, floorg(x))
UNARY_OP(__nv_bfloat16, uround_bf16, roundg(x))
UNARY_OP(__nv_bfloat16, unormcdf_bf16, normcdfg(x))
UNARY_OP(__nv_bfloat16, uabs_bf16, absg(x))
UNARY_OP(__nv_bfloat16, usqr_bf16, x*x)
UNARY_OP(__nv_bfloat16, usqrt_bf16, sqrtg(x))
UNARY_OP(__nv_bfloat16, ugelu_bf16, gelu_fwd(x))
UNARY_OP(__nv_bfloat16, ugelu_erf_bf16, gelu_erf_fwd(x))
UNARY_OP(__nv_bfloat16, urelu_bf16, relu_fwd(x))
UNARY_OP1(__nv_bfloat16, uelu_bf16, elu_fwd(x, param))
UNARY_OP(__nv_bfloat16, usilu_bf16, silu_fwd(x))
UNARY_OP1(__nv_bfloat16, upowf_bf16, powg(x, param))
UNARY_OP(__nv_bfloat16, usign_bf16, sign_(x))
UNARY_OP(__nv_bfloat16, usigmoid_bf16, sigmoid_fwd(x))
#endif
#if __CUDA_ARCH__ >= 530
UNARY_OP(__half, ucopy_f16, x)
UNARY_OP(__half, uneg_f16, -x)
UNARY_OP(__half, urecip_f16, recipg(x))
UNARY_OP(__half, uexp_f16, expg(x))
UNARY_OP(__half, ulog_f16, logg(x))
UNARY_OP(__half, usin_f16, sing(x))
UNARY_OP(__half, ucos_f16, cosg(x))
UNARY_OP(__half, utanh_f16, tanhg(x))
UNARY_OP(__half, uerf_f16, erfg(x))
UNARY_OP(__half, uceil_f16, ceilg(x))
UNARY_OP(__half, ufloor_f16, floorg(x))
UNARY_OP(__half, uround_f16, roundg(x))
UNARY_OP(__half, unormcdf_f16, normcdfg(x))
UNARY_OP(__half, uabs_f16, absg(x))
UNARY_OP(__half, usqr_f16, x*x)
UNARY_OP(__half, usqrt_f16, sqrtg(x))
UNARY_OP(__half, ugelu_f16, gelu_fwd(x))
UNARY_OP(__half, ugelu_erf_f16, gelu_erf_fwd(x))
UNARY_OP(__half, urelu_f16, relu_fwd(x))
UNARY_OP1(__half, uelu_f16, elu_fwd(x, param))
UNARY_OP(__half, usilu_f16, silu_fwd(x))
UNARY_OP1(__half, upowf_f16, powg(x, param))
UNARY_OP(__half, usign_f16, sign_(x))
UNARY_OP(__half, usigmoid_f16, sigmoid_fwd(x))
#endif
UNARY_OP(uint8_t, ucopy_u8, x)
UNARY_OP(uint32_t, ucopy_u32, x)
UNARY_OP(int64_t, ucopy_i64, x)
UNARY_OP(float, ucopy_f32, x)
UNARY_OP(double, ucopy_f64, x)
UNARY_OP(float, uneg_f32, -x)
UNARY_OP(double, uneg_f64, -x)
UNARY_OP(float, urecip_f32, recipg(x))
UNARY_OP(double, urecip_f64, recipg(x))
UNARY_OP(float, uexp_f32, expg(x))
UNARY_OP(double, uexp_f64, expg(x))
UNARY_OP(float, ulog_f32, logg(x))
UNARY_OP(double, ulog_f64, logg(x))
UNARY_OP(float, usin_f32, sing(x))
UNARY_OP(double, usin_f64, sing(x))
UNARY_OP(float, ucos_f32, cosg(x))
UNARY_OP(double, ucos_f64, cosg(x))
UNARY_OP(float, utanh_f32, tanhg(x))
UNARY_OP(double, utanh_f64, tanhg(x))
UNARY_OP(float, uerf_f32, erfg(x))
UNARY_OP(double, uerf_f64, erfg(x))
UNARY_OP(float, uceil_f32, ceilg(x))
UNARY_OP(double, uceil_f64, ceilg(x))
UNARY_OP(float, ufloor_f32, floorg(x))
UNARY_OP(double, ufloor_f64, floorg(x))
UNARY_OP(float, uround_f32, roundg(x))
UNARY_OP(double, uround_f64, roundg(x))
UNARY_OP(float, unormcdf_f32, normcdfg(x))
UNARY_OP(double, unormcdf_f64, normcdfg(x))
UNARY_OP(float, uabs_f32, absg(x))
UNARY_OP(double, uabs_f64, absg(x))
UNARY_OP(float, usqr_f32, x*x)
UNARY_OP(double, usqr_f64, x*x)
UNARY_OP(float, usqrt_f32, sqrtg(x))
UNARY_OP(double, usqrt_f64, sqrtg(x))
UNARY_OP(float, ugelu_f32, gelu_fwd(x))
UNARY_OP(double, ugelu_f64, gelu_fwd(x))
UNARY_OP(float, ugelu_erf_f32, gelu_erf_fwd(x))
UNARY_OP(double, ugelu_erf_f64, gelu_erf_fwd(x))
UNARY_OP(float, urelu_f32, relu_fwd(x))
UNARY_OP(double, urelu_f64, relu_fwd(x))
UNARY_OP1(float, uelu_f32, elu_fwd(x, param))
UNARY_OP1(double, uelu_f64, elu_fwd(x, param))
UNARY_OP(float, usilu_f32, silu_fwd(x))
UNARY_OP(double, usilu_f64, silu_fwd(x))
UNARY_OP1(float, upowf_f32, powg(x, param))
UNARY_OP1(double, upowf_f64, powg(x, param))
UNARY_OP(float, usign_f32, sign_(x))
UNARY_OP(double, usign_f64, sign_(x))
UNARY_OP(float, usigmoid_f32, sigmoid_fwd(x))
UNARY_OP(double, usigmoid_f64, sigmoid_fwd(x))
| candle/candle-kernels/src/unary.cu/0 | {
"file_path": "candle/candle-kernels/src/unary.cu",
"repo_id": "candle",
"token_count": 3677
} |
#include <metal_stdlib>
#include <metal_limits>
using namespace metal;
METAL_FUNC uint nonzero(uint n) {
return n == 0 ? 1 : n;
}
template<uint N>
constexpr uint nonzero() {
return N == 0 ? 1 : N;
}
template<typename T>
constexpr ushort granularity() {
return nonzero<vec_elements<T>::value>();
}
METAL_FUNC uint next_p2(uint x) {
return 1 << (32 - clz(x - 1));
}
METAL_FUNC uint prev_p2(uint x) {
return 1 << (31 - clz(x));
}
constant uint MAX_SHARED_MEM = 32767;
template<typename T>
METAL_FUNC uint max_shared_mem(uint n) {
return min(n, prev_p2(MAX_SHARED_MEM / sizeof(T)));
}
METAL_FUNC uint get_strided_index(
uint idx,
constant const uint &num_dims,
constant const size_t *dims,
constant const size_t *strides
) {
uint strided_i = 0;
for (uint d = 0; d < num_dims; d++) {
uint dim_idx = num_dims - 1 - d;
strided_i += (idx % dims[dim_idx]) * strides[dim_idx];
idx /= dims[dim_idx];
}
return strided_i;
}
struct Divide {
template<typename T>
METAL_FUNC T operator()(T a, T b) { return a / b; }
METAL_FUNC float operator()(float a, float b) { return fast::divide(a, b); }
METAL_FUNC half operator()(half a, half b) { return divide(a, b); }
#if defined(__HAVE_BFLOAT__)
METAL_FUNC bfloat operator()(bfloat a, bfloat b) { return static_cast<bfloat>(fast::divide(a, b)); }
#endif
};
struct Exp {
template<typename T>
METAL_FUNC T operator()(T a) { return fast::exp(a); }
METAL_FUNC float operator()(float a) { return fast::exp(a); }
METAL_FUNC half operator()(half a) { return exp(a); }
#if defined(__HAVE_BFLOAT__)
METAL_FUNC bfloat operator()(bfloat a) { return static_cast<bfloat>(fast::exp(a)); }
#endif
};
// Keeps track of the index of the value in the reduction operation (argmin, argmax, etc.)
// and the value itself. The index is also used to break ties in the reduction operation.
template <typename T>
struct indexed {
uint i;
T val;
constexpr indexed<T>() threadgroup = default;
};
template <typename T>
struct is_indexed_type {
static constant constexpr bool value = false;
};
template <typename T>
constexpr constant bool is_indexed_t = is_indexed_type<T>::value;
template <typename T>
struct is_indexed_type<indexed<T>> {
static constant constexpr bool value = true;
};
template <typename T>
constexpr constant bool not_indexed_t = !is_indexed_t<T>;
template<typename T>
constexpr METAL_FUNC bool operator<(indexed<T> lhs, indexed<T> rhs) {
return lhs.val < rhs.val || (lhs.val == rhs.val && lhs.i < rhs.i);
}
template<typename T>
constexpr METAL_FUNC bool operator>(indexed<T> lhs, indexed<T> rhs) {
return lhs.val > rhs.val || (lhs.val == rhs.val && lhs.i < rhs.i);
}
template<typename T>
struct _numeric_limits_impl<indexed<T>> {
static constexpr METAL_FUNC indexed<T> lowest() {
return indexed<T>{ 0, numeric_limits<T>::lowest() };
}
static constexpr METAL_FUNC indexed<T> max() {
return indexed<T>{ 0, numeric_limits<T>::max() };
}
};
#if __METAL_VERSION__ >= 220
METAL_FUNC int64_t simd_shuffle_down(int64_t data, uint16_t delta) {
return as_type<int64_t>(simd_shuffle_down(as_type<uint2>(data), delta));
}
#endif
#if defined(__HAVE_BFLOAT__)
// Metal does not have simd_shuffle_down for bfloat16
METAL_FUNC bfloat simd_shuffle_down(bfloat value, ushort delta) {
return as_type<bfloat>(simd_shuffle_down(as_type<ushort>(value), delta));
}
#endif
template <typename T>
METAL_FUNC indexed<T> simd_shuffle_down(indexed<T> iv, ushort delta) {
return indexed<T> {
simd_shuffle_down(iv.i, delta),
simd_shuffle_down(iv.val, delta)
};
}
template<typename T>
struct Sum {
static constexpr METAL_FUNC T init() {
return 0;
}
static METAL_FUNC T simd_op(T a) {
return simd_sum(a);
}
template<typename V>
METAL_FUNC V operator()(V a, V b) {
return a + b;
}
};
template<typename T>
struct Mul {
static constexpr METAL_FUNC T init() {
return 1;
}
static METAL_FUNC T simd_op(T a) {
return simd_product(a);
}
template<typename V>
METAL_FUNC V operator()(V a, V b) {
return a * b;
}
};
template<typename T>
struct Min {
static constexpr METAL_FUNC T init() {
return numeric_limits<T>::max();
}
static METAL_FUNC T simd_op(T a) {
return simd_min(a);
}
template<typename V>
METAL_FUNC V operator()(V a, V b) { return a < b ? a : b; }
METAL_FUNC float operator()(float a, float b) { return fast::min(a, b); }
METAL_FUNC half operator()(half a, half b) { return min(a, b); }
METAL_FUNC uint operator()(uint a, uint b) { return min(a, b); }
METAL_FUNC uchar operator()(uchar a, uchar b) { return min(a, b); }
#if __METAL_VERSION__ >= 220
METAL_FUNC long operator()(long a, long b) { return min(a, b); }
#endif
#if defined(__HAVE_BFLOAT__)
METAL_FUNC bfloat operator()(bfloat a, bfloat b) { return static_cast<bfloat>(fast::min(static_cast<float>(a), static_cast<float>(b))); }
#endif
};
template<typename T>
struct Max {
static constexpr METAL_FUNC T init() {
return numeric_limits<T>::lowest();
}
static METAL_FUNC T simd_op(T a) {
return simd_max(a);
}
template<typename V>
METAL_FUNC V operator()(V a, V b) { return a > b ? a : b; }
METAL_FUNC float operator()(float a, float b) { return fast::max(a, b); }
METAL_FUNC half operator()(half a, half b) { return max(a, b); }
METAL_FUNC uint operator()(uint a, uint b) { return max(a, b); }
METAL_FUNC uchar operator()(uchar a, uchar b) { return max(a, b); }
#if __METAL_VERSION__ >= 220
METAL_FUNC long operator()(long a, long b) { return max(a, b); }
#endif
#if defined(__HAVE_BFLOAT__)
METAL_FUNC bfloat operator()(bfloat a, bfloat b) { return static_cast<bfloat>(fast::max(static_cast<float>(a), static_cast<float>(b))); }
#endif
};
template <typename T>
constexpr constant bool is_simd_t = __is_valid_simdgroup_type<T>::value;
template <typename T, typename _E = void>
struct is_valid_simd_type {
static constant constexpr bool value = false;
};
template <typename T>
constexpr constant bool is_valid_simd_t = is_valid_simd_type<T>::value;
template <typename T>
struct is_valid_simd_type<T, typename metal::enable_if_t<is_simd_t<T>>> {
static constant constexpr bool value = true;
};
template <typename T>
struct is_valid_simd_type<indexed<T>, typename metal::enable_if_t<is_valid_simd_t<T>>> {
static constant constexpr bool value = true;
};
#if __METAL_VERSION__ >= 220
template <>
struct is_valid_simd_type<int64_t> {
static constant constexpr bool value = true;
};
#endif
#if defined(__HAVE_BFLOAT__)
template <>
struct is_valid_simd_type<bfloat> {
static constant constexpr bool value = true;
};
#endif
template <typename T, typename _E = void>
struct is_simd_op {
static constant constexpr bool value = false;
};
template <typename T>
struct is_simd_op<Sum<T>, typename metal::enable_if_t<is_simd_t<T>>> {
static constant constexpr bool value = true;
};
template <typename T>
struct is_simd_op<Mul<T>, typename metal::enable_if_t<is_simd_t<T>>> {
static constant constexpr bool value = true;
};
template <typename T>
struct is_simd_op<Min<T>, typename metal::enable_if_t<is_simd_t<T>>> {
static constant constexpr bool value = true;
};
template <typename T>
struct is_simd_op<Max<T>, typename metal::enable_if_t<is_simd_t<T>>> {
static constant constexpr bool value = true;
};
// Helper struct for applying operators.
// The overloaded operator() function is used to apply an operator to two values.
template<typename OP, typename T>
struct operation;
// Specialization for scalar values.
template<typename OP, typename T>
struct operation {
OP op;
METAL_FUNC T operator()(T a, T b) {
return op(a, b);
}
};
// Specialization for indexed values.
template<typename OP, typename T>
struct operation<OP, indexed<T>> {
OP op;
METAL_FUNC indexed<T> operator()(indexed<T> a, indexed<T> b) {
return op(a, b);
}
METAL_FUNC indexed<T> operator()(indexed<T> a, T b, uint idx) {
return this->operator()(a, indexed<T>{ idx, b });
}
};
// Load elements from global memory into shared memory.
// Handles both indexed and non-indexed types by using operate.
template<
typename T,
typename R,
typename OP,
ushort BLOCKSIZE,
bool STRIDED = false,
typename _E = void
>
struct loader;
// Contiguous
template<
typename T,
typename R,
typename OP,
ushort BLOCKSIZE
>
struct loader<T, R, OP, BLOCKSIZE, false, typename metal::enable_if_t<not_indexed_t<R>>> {
operation<OP, R> operate;
METAL_FUNC R operator()(
R value,
constant uint &src_numel,
constant uint &el_per_block,
device const T *src,
const uint offset,
const uint tid
) {
uint idx = tid + offset;
const uint stop_idx = min(el_per_block + offset, src_numel);
#pragma clang loop unroll(full)
for (uint i = idx; i < stop_idx; i += BLOCKSIZE) {
value = operate(value, src[i]);
}
return value;
}
METAL_FUNC R operator()(
R value,
constant uint &src_numel,
constant uint &num_dims,
constant size_t *dims,
constant size_t *strides,
constant uint &el_per_block,
device const T *src,
const uint offset,
const uint tid
) {
return this->operator()(value, src_numel, el_per_block, src, offset, tid);
}
};
// Strided
template<
typename T,
typename R,
typename OP,
ushort BLOCKSIZE
>
struct loader<T, R, OP, BLOCKSIZE, true, typename metal::enable_if_t<not_indexed_t<R>>> {
operation<OP, R> operate;
METAL_FUNC R operator()(
R value,
constant uint &src_numel,
constant uint &num_dims,
constant size_t *dims,
constant size_t *strides,
constant uint &el_per_block,
device const T *src,
const uint offset,
const uint tid
) {
const uint idx = tid + offset;
const uint stop_idx = min(el_per_block + offset, src_numel);
#pragma clang loop unroll(full)
for (uint i = idx; i < stop_idx; i += BLOCKSIZE) {
value = operate(value, src[get_strided_index(i, num_dims, dims, strides)]);
}
return value;
}
};
// Indexed contiguous
template<
typename T,
typename R,
typename OP,
ushort BLOCKSIZE
>
struct loader<T, R, OP, BLOCKSIZE, false, typename metal::enable_if_t<is_indexed_t<R>>> {
operation<OP, R> operate;
METAL_FUNC R operator()(
R value,
constant uint &src_numel,
constant uint &num_dims,
constant size_t *dims,
constant size_t *strides,
constant uint &el_per_block,
device const T *src,
const uint offset,
const uint tid
) {
const uint thread_id = tid + offset;
const uint stop_idx = min(el_per_block + offset, src_numel);
#pragma clang loop unroll(full)
for (uint i = thread_id; i < stop_idx; i += BLOCKSIZE) {
value = operate(value, src[i], i % dims[num_dims - 1]);
}
return value;
}
};
// Indexed strided
template<
typename T,
typename R,
typename OP,
ushort BLOCKSIZE
>
struct loader<T, R, OP, BLOCKSIZE, true, typename metal::enable_if_t<is_indexed_t<R>>> {
operation<OP, R> operate;
METAL_FUNC R operator()(
R value,
constant uint &src_numel,
constant uint &num_dims,
constant size_t *dims,
constant size_t *strides,
constant uint &el_per_block,
device const T *src,
const uint offset,
const uint tid
) {
const uint thread_id = tid + offset;
const uint stop_idx = min(el_per_block + offset, src_numel);
#pragma clang loop unroll(full)
for (uint i = thread_id; i < stop_idx; i += BLOCKSIZE) {
value = operate(value, src[get_strided_index(i, num_dims, dims, strides)], i % dims[num_dims - 1]);
}
return value;
}
};
template<
typename OP,
ushort BLOCKSIZE,
typename T,
typename _E = void
>
struct simdgroup_reducer;
// Specialization for built-in simd operations.
template<typename OP, ushort BLOCKSIZE, typename T>
struct simdgroup_reducer<OP, BLOCKSIZE, T, typename metal::enable_if_t<is_simd_op<OP>::value && is_valid_simd_t<T>>> {
METAL_FUNC T operator()(T value) {
return OP::simd_op(value);
}
};
// Specialization for custom (non-built-in) simd operations.
template<typename OP, ushort BLOCKSIZE, typename T>
struct simdgroup_reducer<OP, BLOCKSIZE, T, typename metal::enable_if_t<!is_simd_op<OP>::value && is_valid_simd_t<T>>> {
operation<OP, T> op;
METAL_FUNC T operator()(T value) {
if (BLOCKSIZE >= 32) value = op(value, simd_shuffle_down(value, 16));
if (BLOCKSIZE >= 16) value = op(value, simd_shuffle_down(value, 8));
if (BLOCKSIZE >= 8) value = op(value, simd_shuffle_down(value, 4));
if (BLOCKSIZE >= 4) value = op(value, simd_shuffle_down(value, 2));
if (BLOCKSIZE >= 2) value = op(value, simd_shuffle_down(value, 1));
return value;
}
};
template<typename T, typename OP, ushort BLOCKSIZE>
struct block_reducer {
simdgroup_reducer<OP, BLOCKSIZE, T> simd_reduce;
operation<OP, T> operate;
threadgroup T *shared;
block_reducer(threadgroup T shared[BLOCKSIZE]) {
this->shared = shared;
}
METAL_FUNC T operator()(T value, const uint tid) {
if (BLOCKSIZE >= 64) {
// Only store in threadgroup shared memory if needed.
shared[tid] = value;
// Threadgroup barrier is needed to ensure that all threads have written to shared memory
threadgroup_barrier(mem_flags::mem_none);
}
#pragma clang loop unroll(full)
for (ushort s = BLOCKSIZE / 2; s >= 64; s >>= 1) {
if (tid < s) shared[tid] = operate(shared[tid], shared[tid + s]);
threadgroup_barrier(mem_flags::mem_none);
}
if (tid < 32) {
// Last shared memory reduce can be done without tid < s check.
if (BLOCKSIZE >= 64) {
value = operate(shared[tid], shared[tid + 32]);
simdgroup_barrier(mem_flags::mem_none);
}
// Remaining 32 threads can be reduced with simdgroup_reduce.
value = simd_reduce(value);
}
return value;
}
};
// Inspired by "Optimizing Parallel Reduction in CUDA" by Mark Harris
template<
typename T,
typename R,
typename OP,
ushort BLOCKSIZE,
bool STRIDED = false
>
METAL_FUNC void reduce(
constant uint &src_numel,
constant uint &num_dims,
constant size_t *dims,
constant size_t *strides,
constant uint &el_per_block,
device const T *src,
device R *dst,
threadgroup R shared[BLOCKSIZE],
uint tid [[ thread_index_in_threadgroup ]],
uint dst_id [[ threadgroup_position_in_grid ]]
) {
loader<T, R, OP, BLOCKSIZE, STRIDED> load;
block_reducer<T, OP, BLOCKSIZE> reduce(shared);
// Calcluate offset for the threadgroup of current thread
const uint offset = dst_id * el_per_block;
// Load with reduction from global memory into shared memory
auto value = load(
OP::init(),
src_numel,
num_dims,
dims,
strides,
el_per_block,
src,
offset,
tid
);
// Complete reduction
R result = reduce(value, tid);
if (tid == 0) dst[dst_id] = result;
}
#define reduce_case(OP, T, R, N) \
case N: { \
threadgroup R shared[N]; \
reduce<T, R, OP<R>, N, STRIDED>( \
src_numel, \
num_dims, \
dims, \
strides, \
el_per_block, \
src, \
dst, \
shared, \
tid, \
dst_id); \
break; \
}
#define ARG(...) __VA_ARGS__
#define impl_reduce_inner(OP, NAME, T) \
kernel void NAME( \
constant uint &src_numel, \
constant uint &num_dims, \
constant size_t *dims, \
constant uint &el_per_block, \
device const T *src, \
device T *dst, \
uint tid [[ thread_index_in_threadgroup ]], \
uint dst_id [[ threadgroup_position_in_grid ]], \
uint block_dim [[ threads_per_threadgroup ]] \
) { \
constant size_t *strides = {}; \
const bool STRIDED = false; \
switch (max_shared_mem<T>(block_dim)) { \
reduce_case(OP, ARG(T), ARG(T), 2048); \
reduce_case(OP, ARG(T), ARG(T), 1024); \
reduce_case(OP, ARG(T), ARG(T), 512); \
reduce_case(OP, ARG(T), ARG(T), 256); \
reduce_case(OP, ARG(T), ARG(T), 128); \
reduce_case(OP, ARG(T), ARG(T), 64); \
reduce_case(OP, ARG(T), ARG(T), 32); \
reduce_case(OP, ARG(T), ARG(T), 16); \
reduce_case(OP, ARG(T), ARG(T), 8); \
reduce_case(OP, ARG(T), ARG(T), 4); \
reduce_case(OP, ARG(T), ARG(T), 2); \
reduce_case(OP, ARG(T), ARG(T), 1); \
} \
}
#define impl_reduce_strided(OP, NAME, T) \
kernel void NAME##_strided( \
constant uint &src_numel, \
constant uint &num_dims, \
constant size_t *dims, \
constant size_t *strides, \
constant uint &el_per_block, \
device const T *src, \
device T *dst, \
uint tid [[ thread_index_in_threadgroup ]], \
uint dst_id [[ threadgroup_position_in_grid ]], \
uint block_dim [[ threads_per_threadgroup ]] \
) { \
const bool STRIDED = true; \
switch (max_shared_mem<T>(block_dim)) { \
reduce_case(OP, ARG(T), ARG(T), 2048); \
reduce_case(OP, ARG(T), ARG(T), 1024); \
reduce_case(OP, ARG(T), ARG(T), 512); \
reduce_case(OP, ARG(T), ARG(T), 256); \
reduce_case(OP, ARG(T), ARG(T), 128); \
reduce_case(OP, ARG(T), ARG(T), 64); \
reduce_case(OP, ARG(T), ARG(T), 32); \
reduce_case(OP, ARG(T), ARG(T), 16); \
reduce_case(OP, ARG(T), ARG(T), 8); \
reduce_case(OP, ARG(T), ARG(T), 4); \
reduce_case(OP, ARG(T), ARG(T), 2); \
reduce_case(OP, ARG(T), ARG(T), 1); \
} \
}
#define impl_reduce(OP, NAME, T) \
impl_reduce_inner(OP, NAME, T) \
impl_reduce_strided(OP, NAME, T) \
template<
typename T,
typename ReductionOp,
ushort BLOCKSIZE,
bool STRIDED = false
>
METAL_FUNC void reduce(
constant uint &src_numel,
constant uint &num_dims,
constant size_t *dims,
constant size_t *strides,
constant uint &el_per_block,
device const T *src,
device uint *dst,
threadgroup indexed<T> shared[BLOCKSIZE],
uint tid [[ thread_index_in_threadgroup ]],
uint dst_id [[ threadgroup_position_in_grid ]]
) {
using I = indexed<T>;
loader<T, indexed<T>, ReductionOp, BLOCKSIZE, STRIDED> load;
block_reducer<I, ReductionOp, BLOCKSIZE> reduce(shared);
// Calcluate offset for the threadgroup of current thread
const uint offset = dst_id * el_per_block;
// Load with reduction from global memory into shared memory
indexed<T> value = load(
ReductionOp::init(),
src_numel,
num_dims,
dims,
strides,
el_per_block,
src,
offset,
tid
);
// Complete reduction
I result = reduce(value, tid);
// Return index of reduce result
if (tid == 0) dst[dst_id] = result.i;
}
#define arg_reduce_case(OP, T, N) \
case N: { \
using I = indexed<T>; \
threadgroup I shared[N]; \
reduce<T, OP<I>, N, STRIDED>( \
src_numel, \
num_dims, \
dims, \
strides, \
el_per_block, \
src, \
dst, \
shared, \
tid, \
dst_id); \
break; \
}
#define impl_arg_reduce_inner(OP, NAME, T) \
kernel void NAME( \
constant uint &src_numel, \
constant uint &num_dims, \
constant size_t *dims, \
constant uint &el_per_block, \
device const T *src, \
device uint *dst, \
uint tid [[ thread_index_in_threadgroup ]], \
uint dst_id [[ threadgroup_position_in_grid ]], \
uint block_dim [[ threads_per_threadgroup ]] \
) { \
constant size_t *strides = {}; \
const bool STRIDED = false; \
switch (max_shared_mem<indexed<T>>(block_dim)) { \
arg_reduce_case(OP, ARG(T), 1024); \
arg_reduce_case(OP, ARG(T), 512); \
arg_reduce_case(OP, ARG(T), 256); \
arg_reduce_case(OP, ARG(T), 128); \
arg_reduce_case(OP, ARG(T), 64); \
arg_reduce_case(OP, ARG(T), 32); \
arg_reduce_case(OP, ARG(T), 16); \
arg_reduce_case(OP, ARG(T), 8); \
arg_reduce_case(OP, ARG(T), 4); \
arg_reduce_case(OP, ARG(T), 2); \
arg_reduce_case(OP, ARG(T), 1); \
} \
} \
#define impl_arg_reduce_strided(OP, NAME, T) \
kernel void NAME##_strided( \
constant uint &src_numel, \
constant uint &num_dims, \
constant size_t *dims, \
constant size_t *strides, \
constant uint &el_per_block, \
device const T *src, \
device uint *dst, \
uint tid [[ thread_index_in_threadgroup ]], \
uint dst_id [[ threadgroup_position_in_grid ]], \
uint block_dim [[ threads_per_threadgroup ]] \
) { \
const bool STRIDED = true; \
const bool INDEXED = true; \
switch (max_shared_mem<indexed<T>>(block_dim)) { \
arg_reduce_case(OP, ARG(T), 1024); \
arg_reduce_case(OP, ARG(T), 512); \
arg_reduce_case(OP, ARG(T), 256); \
arg_reduce_case(OP, ARG(T), 128); \
arg_reduce_case(OP, ARG(T), 64); \
arg_reduce_case(OP, ARG(T), 32); \
arg_reduce_case(OP, ARG(T), 16); \
arg_reduce_case(OP, ARG(T), 8); \
arg_reduce_case(OP, ARG(T), 4); \
arg_reduce_case(OP, ARG(T), 2); \
arg_reduce_case(OP, ARG(T), 1); \
} \
}
#define impl_arg_reduce(OP, NAME, T) \
impl_arg_reduce_inner(OP, NAME, T) \
impl_arg_reduce_strided(OP, NAME, T) \
// Contains the intermediate results for the online softmax calculation.
// m: max
// d: sum of the exponentials
template <typename T>
struct MD {
T m;
float d;
constexpr MD<T>() = default;
constexpr MD<T>() threadgroup = default;
};
// Enable operations for softmax MD
template<typename OP, typename T>
struct operation<OP, MD<T>> {
OP op;
METAL_FUNC MD<T> operator()(MD<T> a, MD<T> b) {
return op(a, b);
}
METAL_FUNC MD<T> operator()(MD<T> a, T b) {
return this->operator()(a, MD<T>{ b, static_cast<T>(1.0) });
}
};
template <typename T>
METAL_FUNC MD<T> simd_shuffle_down(MD<T> md, ushort delta) {
return MD<T> {
simd_shuffle_down(md.m, delta),
simd_shuffle_down(md.d, delta)
};
}
// Enable simd_shuffle_down for softmax MD
template <typename T>
struct is_valid_simd_type<MD<T>, typename metal::enable_if_t<is_valid_simd_t<T>>> {
static constant constexpr bool value = true;
};
template<typename T>
struct MDReduceOp {
Exp fast_exp;
static constexpr METAL_FUNC MD<T> init() {
return MD<T>{ numeric_limits<T>::lowest(), 0 };
}
METAL_FUNC MD<T> operator()(MD<T> a, MD<T> b) {
bool a_bigger = a.m > b.m;
MD<T> bigger_m = a_bigger ? a : b;
MD<T> smaller_m = a_bigger ? b : a;
MD<T> res;
res.d = bigger_m.d + smaller_m.d * fast_exp(smaller_m.m - bigger_m.m);
res.m = bigger_m.m;
return res;
}
};
template<typename T, ushort BLOCKSIZE>
struct finalize_softmax {
Divide fast_divide;
Exp fast_exp;
METAL_FUNC void operator()(
device const T *src,
device T *dst,
threadgroup MD<T> &md_total,
const uint thread_id,
const uint stop_idx
) {
const float d_total_inverse = fast_divide(1.0, md_total.d);
for (uint idx = thread_id; idx < stop_idx; idx += BLOCKSIZE) {
dst[idx] = static_cast<T>(fast_exp(src[idx] - md_total.m) * d_total_inverse);
}
}
};
// Welford's algorithm approach for an online softmax implementation.
// Same as the Online normalizer calculation for softmax: https://arxiv.org/pdf/1805.02867.pdf
template<typename T, ushort BLOCKSIZE>
METAL_FUNC void softmax(
constant uint &src_numel,
constant uint &el_per_block,
device const T *src,
device T *dst,
threadgroup MD<T> shared[BLOCKSIZE],
threadgroup MD<T> &md_total,
uint tid [[ thread_index_in_threadgroup ]],
uint dst_id [[ threadgroup_position_in_grid ]]
) {
using MDReduceOp = MDReduceOp<T>;
loader<T, MD<T>, MDReduceOp, BLOCKSIZE> load;
block_reducer<MD<T>, MDReduceOp, BLOCKSIZE> reduce(shared);
finalize_softmax<T, BLOCKSIZE> softmax_finalize;
// Calcluate offset for the threadgroup of current thread;
const uint offset = dst_id * el_per_block;
// Calculate partial result for current thread
MD<T> md_partial = MD<T> { numeric_limits<T>::lowest(), 0 };
md_partial = load(
md_partial,
src_numel,
el_per_block,
src,
offset,
tid
);
// Reduce in shared memory
MD<T> md = reduce(md_partial, tid);
if (tid == 0) md_total = md;
threadgroup_barrier(mem_flags::mem_none);
// Finalize softmax
const uint thread_id = tid + offset;
const uint stop_idx = min(el_per_block + offset, src_numel);
softmax_finalize(src, dst, md_total, thread_id, stop_idx);
}
#define softmax_case(T, N) \
case N: { \
threadgroup MD<T> shared[N]; \
threadgroup MD<T> md_total; \
softmax<T, N>( \
src_numel, \
el_per_block, \
src, \
dst, \
shared, \
md_total, \
tid, \
dst_id); \
break; \
}
#define impl_softmax(NAME, T) \
kernel void NAME( \
constant uint &src_numel, \
constant uint &el_per_block, \
device const T *src, \
device T *dst, \
uint tid [[ thread_index_in_threadgroup ]], \
uint dst_id [[ threadgroup_position_in_grid ]], \
uint block_dim [[ threads_per_threadgroup ]] \
) { \
switch (max_shared_mem<T>(block_dim)) { \
softmax_case(T, 1024); \
softmax_case(T, 512); \
softmax_case(T, 256); \
softmax_case(T, 128); \
softmax_case(T, 64); \
softmax_case(T, 32); \
softmax_case(T, 16); \
softmax_case(T, 8); \
softmax_case(T, 4); \
softmax_case(T, 2); \
softmax_case(T, 1); \
} \
}
template<typename T>
METAL_FUNC void rmsnorm(
constant size_t & src_numel,
constant size_t & el_to_sum_per_block,
device const T * src,
device T * dst,
device const T * alpha,
constant float & eps,
uint id,
uint tid,
uint dst_id,
uint block_dim,
threadgroup float * shared_memory
) {
size_t start_idx = dst_id * el_to_sum_per_block;
size_t stop_idx = min(start_idx + el_to_sum_per_block, src_numel);
size_t idx = start_idx + tid;
float tmp = 0;
while (idx < stop_idx) {
tmp = tmp + float(src[idx]) * float(src[idx]);
idx += block_dim;
}
shared_memory[tid] = tmp;
threadgroup_barrier(mem_flags::mem_threadgroup);
for (uint s = block_dim / 2; s > 0; s >>= 1) {
if (tid < s) {
shared_memory[tid] = shared_memory[tid] + shared_memory[tid + s];
}
threadgroup_barrier(mem_flags::mem_threadgroup);
}
/* wait for shared_memory[0] to be filled */
threadgroup_barrier(mem_flags::mem_threadgroup);
float norm = sqrt(shared_memory[0] / float(el_to_sum_per_block) + eps);
float inv_norm = 1.0f / norm;
idx = start_idx + tid;
while (idx < stop_idx) {
float val = float(src[idx]) * inv_norm;
if (alpha != nullptr) {
val *= float(alpha[idx - start_idx]);
}
dst[idx] = T(val);
idx += block_dim;
}
}
template<typename T>
METAL_FUNC void layernorm(
constant size_t & src_numel,
constant size_t & el_to_sum_per_block,
device const T * src,
device T * dst,
device const T * alpha,
device const T * beta,
constant float & eps,
uint id,
uint tid,
uint dst_id,
uint block_dim,
threadgroup float * shared_memory
) {
size_t start_idx = dst_id * el_to_sum_per_block;
size_t stop_idx = min(start_idx + el_to_sum_per_block, src_numel);
size_t idx = start_idx + tid;
float tmp1 = 0;
float tmp2 = 0;
while (idx < stop_idx) {
tmp1 += float(src[idx]);
tmp2 += float(src[idx]) * float(src[idx]);
idx += block_dim;
}
shared_memory[tid] = tmp1;
shared_memory[tid + block_dim] = tmp2;
threadgroup_barrier(mem_flags::mem_threadgroup);
for (uint s = block_dim / 2; s > 0; s >>= 1) {
if (tid < s) {
shared_memory[tid] = shared_memory[tid] + shared_memory[tid + s];
shared_memory[block_dim + tid] = shared_memory[block_dim + tid] + shared_memory[block_dim + tid + s];
}
threadgroup_barrier(mem_flags::mem_threadgroup);
}
/* wait for shared_memory[0] to be filled */
threadgroup_barrier(mem_flags::mem_threadgroup);
float mean = shared_memory[0] / float(el_to_sum_per_block);
float var = shared_memory[block_dim] / float(el_to_sum_per_block) - mean * mean;
float inv_norm = 1.0f / sqrt(var + eps);
idx = start_idx + tid;
while (idx < stop_idx) {
float val = (float(src[idx]) - mean) * inv_norm;
if (alpha != nullptr) {
val *= float(alpha[idx - start_idx]);
}
if (beta != nullptr) {
val += float(beta[idx - start_idx]);
}
dst[idx] = T(val);
idx += block_dim;
}
}
constant int THREADGROUP_SIZE = 2048;
#define RMSNORM(NAME, T) \
kernel void NAME( \
constant size_t &src_numel, \
constant size_t &el_to_sum_per_block, \
device const T *src, \
device T *dst, \
device const T *alpha, \
constant float &eps, \
uint id [[ thread_position_in_grid ]], \
uint tid [[ thread_index_in_threadgroup ]], \
uint dst_id [[ threadgroup_position_in_grid ]], \
uint block_dim [[ threads_per_threadgroup ]] \
) { \
threadgroup float shared_memory[THREADGROUP_SIZE]; \
shared_memory[tid] = 0; \
rmsnorm<T>(src_numel, el_to_sum_per_block, src, dst, alpha, eps, id, tid, dst_id, block_dim, shared_memory); \
} \
#define LAYERNORM(NAME, T) \
kernel void NAME( \
constant size_t &src_numel, \
constant size_t &el_to_sum_per_block, \
device const T *src, \
device T *dst, \
device const T *alpha, \
device const T *beta, \
constant float &eps, \
uint id [[ thread_position_in_grid ]], \
uint tid [[ thread_index_in_threadgroup ]], \
uint dst_id [[ threadgroup_position_in_grid ]], \
uint block_dim [[ threads_per_threadgroup ]] \
) { \
threadgroup float shared_memory[THREADGROUP_SIZE]; \
shared_memory[tid] = 0; \
layernorm<T>(src_numel, el_to_sum_per_block, src, dst, alpha, beta, eps, id, tid, dst_id, block_dim, shared_memory); \
} \
template<typename T>
METAL_FUNC void ropei(
constant size_t &bh,
constant size_t &td,
device const T *src,
device const T *cos,
device const T *sin,
device T *dst,
uint tid
) {
if (2 * tid >= bh * td) {
return;
}
size_t rope_idx = tid % (td / 2);
T c = cos[rope_idx];
T s = sin[rope_idx];
dst[2 * tid] = src[2 * tid] * c - src[2 * tid + 1] * s;
dst[2 * tid + 1] = src[2 * tid] * s + src[2 * tid + 1] * c;
}
template<typename T>
METAL_FUNC void rope(
constant size_t &bh,
constant size_t &td,
constant size_t &d,
device const T *src,
device const T *cos,
device const T *sin,
device T *dst,
uint idx
) {
if (2 * idx >= bh * td) {
return;
}
size_t i_bh = idx / (td / 2);
size_t i_td = idx - (td / 2) * i_bh;
size_t i_t = i_td / (d / 2);
size_t i_d = i_td - (d / 2) * i_t;
size_t i1 = i_bh * td + i_t * d + i_d;
size_t i2 = i1 + d / 2;
size_t i_cs = i_t * (d / 2) + i_d;
T c = cos[i_cs];
T s = sin[i_cs];
dst[i1] = src[i1] * c - src[i2] * s;
dst[i2] = src[i1] * s + src[i2] * c;
}
template<typename T>
METAL_FUNC void rope_thd(
constant size_t &b,
constant size_t &t,
constant size_t &h,
constant size_t &d,
device const T *src,
device const T *cos,
device const T *sin,
device T *dst,
uint idx
) {
if (2 * idx >= b * t * h * d) {
return;
}
const size_t i_bth = idx / (d / 2);
const size_t i_d = idx - (d / 2) * i_bth;
const size_t i_t = (i_bth / h) % t;
const size_t i1 = i_bth * d + i_d;
const size_t i2 = i1 + d / 2;
const size_t i_cs = i_t * (d / 2) + i_d;
T c = cos[i_cs];
T s = sin[i_cs];
dst[i1] = src[i1] * c - src[i2] * s;
dst[i2] = src[i1] * s + src[i2] * c;
}
#define ROPE(FN_NAME, FN_NAME_I, FN_NAME_THD, TYPENAME) \
kernel void FN_NAME_I( \
constant size_t &bh, \
constant size_t &td, \
device const TYPENAME *src, \
device const TYPENAME *cos, \
device const TYPENAME *sin, \
device TYPENAME *dst, \
uint tid [[ thread_position_in_grid ]] \
) { \
ropei<TYPENAME>(bh, td, src, cos, sin, dst, tid); \
}\
kernel void FN_NAME( \
constant size_t &bh, \
constant size_t &td, \
constant size_t &d, \
device const TYPENAME *src, \
device const TYPENAME *cos, \
device const TYPENAME *sin, \
device TYPENAME *dst, \
uint idx [[ thread_position_in_grid ]] \
) { \
rope<TYPENAME>(bh, td, d, src, cos, sin, dst, idx); \
}\
kernel void FN_NAME_THD( \
constant size_t &b, \
constant size_t &t, \
constant size_t &h, \
constant size_t &d, \
device const TYPENAME *src, \
device const TYPENAME *cos, \
device const TYPENAME *sin, \
device TYPENAME *dst, \
uint idx [[ thread_position_in_grid ]] \
) { \
rope_thd<TYPENAME>(b, t, h, d, src, cos, sin, dst, idx); \
}\
RMSNORM(rmsnorm_f32, float)
RMSNORM(rmsnorm_f16, half)
LAYERNORM(layernorm_f32, float)
LAYERNORM(layernorm_f16, half)
ROPE(rope_f32, rope_i_f32, rope_thd_f32, float)
ROPE(rope_f16, rope_i_f16, rope_thd_f16, half)
impl_reduce(Sum, fast_sum_f32, float)
impl_reduce(Sum, fast_sum_u32, uint)
impl_reduce(Sum, fast_sum_f16, half)
impl_reduce(Sum, fast_sum_u8, uint8_t)
impl_reduce(Mul, fast_mul_f32, float)
impl_reduce(Mul, fast_mul_u32, uint)
impl_reduce(Mul, fast_mul_f16, half)
impl_reduce(Mul, fast_mul_u8, uint8_t)
impl_reduce(Max, fast_max_f32, float)
impl_reduce(Max, fast_max_u32, uint)
impl_reduce(Max, fast_max_f16, half)
impl_reduce(Max, fast_max_u8, uint8_t)
impl_reduce(Min, fast_min_f32, float)
impl_reduce(Min, fast_min_u32, uint)
impl_reduce(Min, fast_min_f16, half)
impl_reduce(Min, fast_min_u8, uint8_t)
impl_arg_reduce(Min, fast_argmin_f32, float)
impl_arg_reduce(Min, fast_argmin_f16, half)
impl_arg_reduce(Min, fast_argmin_u32, uint)
impl_arg_reduce(Min, fast_argmin_u8, uint8_t)
impl_arg_reduce(Max, fast_argmax_f32, float)
impl_arg_reduce(Max, fast_argmax_f16, half)
impl_arg_reduce(Max, fast_argmax_u32, uint)
impl_arg_reduce(Max, fast_argmax_u8, uint8_t)
impl_softmax(softmax_f32, float)
impl_softmax(softmax_f16, half)
#if __METAL_VERSION__ >= 220
impl_reduce(Sum, fast_sum_i64, int64_t)
impl_reduce(Mul, fast_mul_i64, int64_t)
impl_reduce(Min, fast_min_i64, int64_t)
impl_reduce(Max, fast_max_i64, int64_t)
impl_arg_reduce(Min, fast_argmin_i64, int64_t)
impl_arg_reduce(Max, fast_argmax_i64, int64_t)
#endif
#if defined(__HAVE_BFLOAT__)
impl_reduce(Sum, fast_sum_bf16, bfloat)
impl_reduce(Mul, fast_mul_bf16, bfloat)
impl_reduce(Max, fast_max_bf16, bfloat)
impl_reduce(Min, fast_min_bf16, bfloat)
impl_arg_reduce(Min, fast_argmin_bf16, bfloat)
impl_arg_reduce(Max, fast_argmax_bf16, bfloat)
impl_softmax(softmax_bf16, bfloat)
RMSNORM(rmsnorm_bf16, bfloat)
LAYERNORM(layernorm_bf16, bfloat)
ROPE(rope_bf16, rope_i_bf16, rope_thd_bf16, bfloat)
#endif
| candle/candle-metal-kernels/src/reduce.metal/0 | {
"file_path": "candle/candle-metal-kernels/src/reduce.metal",
"repo_id": "candle",
"token_count": 21154
} |
use crate::benchmarks::{BenchDevice, BenchDeviceHandler};
use candle::{DType, Device, Module, Tensor};
use candle_nn::{Conv2d, Conv2dConfig};
use criterion::{black_box, criterion_group, Criterion};
use std::time::Instant;
const B: usize = 1;
const C: usize = 1;
const M: usize = 128;
const K: usize = 128;
const K_SIZE: usize = 3;
fn run(input: Tensor, weight: Tensor, bias: Tensor, config: Conv2dConfig) {
Conv2d::new(weight, Some(bias), config)
.forward(&input)
.unwrap();
}
fn run_conv2d_benchmark(c: &mut Criterion, device: &Device, dtype: DType, name: &str) {
let weight = Tensor::ones((1, 1, K_SIZE, K_SIZE), dtype, device)
.unwrap()
.to_dtype(dtype)
.unwrap();
let bias = Tensor::zeros(K, dtype, device).unwrap();
let input = Tensor::ones((B, C, M, K), dtype, device).unwrap();
let mut group = c.benchmark_group(device.bench_name(name));
group.bench_function("iter", move |b| {
b.iter_custom(|iters| {
let start = Instant::now();
for _i in 0..iters {
run(
black_box(input.clone()),
black_box(weight.clone()),
black_box(bias.clone()),
Default::default(),
);
}
device.sync().unwrap();
start.elapsed()
})
});
group.finish();
}
fn criterion_benchmark(c: &mut Criterion) {
let device = BenchDeviceHandler::new().unwrap();
for d in device.devices {
run_conv2d_benchmark(c, &d, DType::F32, "conv2d_f32");
run_conv2d_benchmark(c, &d, DType::F16, "conv2d_f16");
}
}
criterion_group!(benches, criterion_benchmark);
| candle/candle-nn/benches/benchmarks/conv.rs/0 | {
"file_path": "candle/candle-nn/benches/benchmarks/conv.rs",
"repo_id": "candle",
"token_count": 808
} |
//! candle-nn
//!
//! ## Other Crates
//!
//! Candle consists of a number of crates. This crate holds structs and functions
//! that allow you to build and train neural nets. You may wish
//! to look at the docs for the other crates which can be found here:
//!
//! - [candle-core](https://docs.rs/candle-core/). Core Datastructures and DataTypes.
//! - [candle-nn](https://docs.rs/candle-nn/). Building blocks for Neural Nets.
//! - [candle-datasets](https://docs.rs/candle-datasets/). Rust access to commonly used Datasets like MNIST.
//! - [candle-examples](https://docs.rs/candle-examples/). Examples of Candle in Use.
//! - [candle-onnx](https://docs.rs/candle-onnx/). Loading and using ONNX models.
//! - [candle-pyo3](https://docs.rs/candle-pyo3/). Access to Candle from Python.
//! - [candle-transformers](https://docs.rs/candle-transformers/). Candle implemntation of many published transformer models.
//!
pub mod activation;
pub mod batch_norm;
pub mod conv;
pub mod embedding;
pub mod encoding;
pub mod func;
pub mod group_norm;
pub mod init;
pub mod kv_cache;
pub mod layer_norm;
pub mod linear;
pub mod loss;
pub mod ops;
pub mod optim;
pub mod rnn;
pub mod rotary_emb;
pub mod sequential;
pub mod var_builder;
pub mod var_map;
pub use activation::{prelu, Activation, PReLU};
pub use batch_norm::{batch_norm, BatchNorm, BatchNormConfig};
pub use conv::{
conv1d, conv1d_no_bias, conv2d, conv2d_no_bias, conv_transpose1d, conv_transpose1d_no_bias,
conv_transpose2d, conv_transpose2d_no_bias, Conv1d, Conv1dConfig, Conv2d, Conv2dConfig,
ConvTranspose1d, ConvTranspose1dConfig, ConvTranspose2d, ConvTranspose2dConfig,
};
pub use embedding::{embedding, Embedding};
pub use func::{func, func_t, Func, FuncT};
pub use group_norm::{group_norm, GroupNorm};
pub use init::Init;
pub use layer_norm::{
layer_norm, layer_norm_no_bias, rms_norm, LayerNorm, LayerNormConfig, RmsNorm,
};
pub use linear::{linear, linear_b, linear_no_bias, Linear};
pub use ops::Dropout;
pub use optim::{AdamW, Optimizer, ParamsAdamW, SGD};
pub use rnn::{gru, lstm, GRUConfig, LSTMConfig, GRU, LSTM, RNN};
pub use sequential::{seq, Sequential};
pub use var_builder::VarBuilder;
pub use var_map::VarMap;
pub use candle::{Module, ModuleT};
| candle/candle-nn/src/lib.rs/0 | {
"file_path": "candle/candle-nn/src/lib.rs",
"repo_id": "candle",
"token_count": 807
} |
#[cfg(feature = "mkl")]
extern crate intel_mkl_src;
#[cfg(feature = "accelerate")]
extern crate accelerate_src;
use candle::{test_device, test_utils::to_vec3_round, Device, Result, Tensor};
fn softmax(device: &Device) -> Result<()> {
let data = &[[[3f32, 1., 4.], [1., 5., 9.]], [[2., 1., 7.], [8., 2., 8.]]];
let tensor = Tensor::new(data, device)?;
let t0 = candle_nn::ops::softmax(&tensor.log()?, 0)?;
let t1 = candle_nn::ops::softmax(&tensor.log()?, 1)?;
let t2 = candle_nn::ops::softmax(&tensor.log()?, 2)?;
assert_eq!(
to_vec3_round(&t0, 4)?,
&[
// 3/5, 1/2, 4/11
[[0.6, 0.5, 0.3636], [0.1111, 0.7143, 0.5294]],
// 2/5, 1/2, 7/11
[[0.4, 0.5, 0.6364], [0.8889, 0.2857, 0.4706]]
]
);
assert_eq!(
to_vec3_round(&t1, 4)?,
&[
// 3/4, 1/6, 4/13
[[0.75, 0.1667, 0.3077], [0.25, 0.8333, 0.6923]],
// 2/10, 1/3, 7/15
[[0.2, 0.3333, 0.4667], [0.8, 0.6667, 0.5333]]
]
);
assert_eq!(
to_vec3_round(&t2, 4)?,
&[
// (3, 1, 4) / 8, (1, 5, 9) / 15
[[0.375, 0.125, 0.5], [0.0667, 0.3333, 0.6]],
// (2, 1, 7) / 10, (8, 2, 8) / 18
[[0.2, 0.1, 0.7], [0.4444, 0.1111, 0.4444]]
]
);
let t2 = candle_nn::ops::softmax_last_dim(&tensor.log()?)?;
assert_eq!(
to_vec3_round(&t2, 4)?,
&[
// (3, 1, 4) / 8, (1, 5, 9) / 15
[[0.375, 0.125, 0.5], [0.0667, 0.3333, 0.6]],
// (2, 1, 7) / 10, (8, 2, 8) / 18
[[0.2, 0.1, 0.7], [0.4444, 0.1111, 0.4444]]
]
);
Ok(())
}
fn rms_norm(device: &Device) -> Result<()> {
let data = &[[[3f32, 1., 4.], [1., 5., 9.]], [[2., 1., 7.], [8., 2., 8.]]];
let tensor = Tensor::new(data, device)?;
let alpha = Tensor::new(&[1f32, 2f32, 3f32], device)?;
let t = candle_nn::ops::rms_norm(&tensor, &alpha, 1e-5)?;
assert_eq!(
to_vec3_round(&t, 4)?,
&[
[[1.019, 0.6794, 4.0762], [0.1674, 1.6744, 4.521]],
[[0.4714, 0.4714, 4.9497], [1.206, 0.603, 3.6181]]
]
);
let t2 = candle_nn::ops::rms_norm_slow(&tensor, &alpha, 1e-5)?;
assert_eq!(
to_vec3_round(&t2, 4)?,
&[
[[1.019, 0.6794, 4.0762], [0.1674, 1.6744, 4.521]],
[[0.4714, 0.4714, 4.9497], [1.206, 0.603, 3.6181]]
]
);
let diff = (t - t2)?.abs()?.sum_all()?.to_vec0::<f32>()?;
assert!(diff < 1e-5);
Ok(())
}
fn rms_norml(device: &Device) -> Result<()> {
use rand::{rngs::StdRng, Rng, SeedableRng};
let (b_size, seq_len, head_dim) = (24, 70, 64);
let el_count = b_size * seq_len * head_dim;
let mut rng = StdRng::seed_from_u64(299792458);
let src: Vec<f32> = (0..el_count).map(|_| rng.gen::<f32>()).collect();
let tensor = Tensor::new(src, device)?.reshape((b_size, seq_len, head_dim))?;
let alpha = Tensor::ones(head_dim, candle::DType::F32, device)?;
let t = candle_nn::ops::rms_norm(&tensor, &alpha, 1e-5)?;
let t2 = candle_nn::ops::rms_norm_slow(&tensor, &alpha, 1e-5)?;
let diff = (t - t2)?
.abs()?
.flatten_all()?
.max(0)?
.reshape(())?
.to_vec0::<f32>()?;
assert!(diff < 1e-5);
Ok(())
}
fn layer_norm(device: &Device) -> Result<()> {
let data = &[[[3f32, 1., 4.], [1., 5., 9.]], [[2., 1., 7.], [8., 2., 8.]]];
let tensor = Tensor::new(data, device)?;
let alpha = Tensor::new(&[1f32, 2f32, 3f32], device)?;
let beta = Tensor::new(&[0.5f32, 0f32, -0.2f32], device)?;
let t = candle_nn::ops::layer_norm(&tensor, &alpha, &beta, 1e-5)?;
assert_eq!(
to_vec3_round(&t, 4)?,
&[
[[0.7673, -2.6726, 3.0071], [-0.7247, 0.0, 3.4742]],
[[-0.008, -1.778, 3.991], [1.2071, -2.8284, 1.9213]]
]
);
let t2 = candle_nn::ops::layer_norm_slow(&tensor, &alpha, &beta, 1e-5)?;
assert_eq!(
to_vec3_round(&t2, 4)?,
&[
[[0.7673, -2.6726, 3.0071], [-0.7247, 0.0, 3.4742]],
[[-0.008, -1.778, 3.991], [1.2071, -2.8284, 1.9213]]
]
);
let diff = (t - t2)?.abs()?.sum_all()?.to_vec0::<f32>()?;
assert!(diff < 1e-5);
Ok(())
}
fn layer_norml(device: &Device) -> Result<()> {
use rand::{rngs::StdRng, Rng, SeedableRng};
let (b_size, seq_len, head_dim) = (24, 70, 64);
let el_count = b_size * seq_len * head_dim;
let mut rng = StdRng::seed_from_u64(299792458);
let src: Vec<f32> = (0..el_count).map(|_| rng.gen::<f32>()).collect();
let tensor = Tensor::new(src, device)?.reshape((b_size, seq_len, head_dim))?;
let alpha = Tensor::ones(head_dim, candle::DType::F32, device)?;
let beta = Tensor::zeros(head_dim, candle::DType::F32, device)?;
let t = candle_nn::ops::layer_norm(&tensor, &alpha, &beta, 1e-5)?;
let t2 = candle_nn::ops::layer_norm_slow(&tensor, &alpha, &beta, 1e-5)?;
let diff = (t - t2)?
.abs()?
.flatten_all()?
.max(0)?
.reshape(())?
.to_vec0::<f32>()?;
assert!(diff < 1e-5);
Ok(())
}
#[test]
fn softmax_numerical_stability() -> Result<()> {
let dev = &Device::Cpu;
let xs = Tensor::new(&[1234f32, 0.], dev)?;
let softmax = candle_nn::ops::softmax(&xs, 0)?;
assert_eq!(softmax.to_vec1::<f32>()?, &[1f32, 0.]);
Ok(())
}
fn ropei(device: &Device) -> Result<()> {
use rand::{rngs::StdRng, Rng, SeedableRng};
let (b_size, num_head, seq_len, head_dim) = (2, 5, 10, 16);
let el_count = b_size * num_head * seq_len * head_dim;
let mut rng = StdRng::seed_from_u64(299792458);
let src: Vec<f32> = (0..el_count).map(|_| rng.gen::<f32>()).collect();
let cos: Vec<f32> = (0..seq_len * head_dim / 2)
.map(|_| rng.gen::<f32>())
.collect();
let sin: Vec<f32> = (0..seq_len * head_dim / 2)
.map(|_| rng.gen::<f32>())
.collect();
let src = Tensor::from_vec(src, (b_size, num_head, seq_len, head_dim), device)?;
let cos = Tensor::from_vec(cos, (seq_len, head_dim / 2), device)?;
let sin = Tensor::from_vec(sin, (seq_len, head_dim / 2), device)?;
let rope1 = candle_nn::rotary_emb::rope_i(&src, &cos, &sin)?;
let rope2 = candle_nn::rotary_emb::rope_i_slow(&src, &cos, &sin)?;
let sum_diff = (rope1 - rope2)?.abs()?.sum_all()?.to_vec0::<f32>()?;
if device.is_cpu() {
assert_eq!(sum_diff, 0.);
} else {
assert!(sum_diff < 1e-4);
}
Ok(())
}
fn rope(device: &Device) -> Result<()> {
use rand::{rngs::StdRng, Rng, SeedableRng};
let (b_size, num_head, seq_len, head_dim) = (2, 5, 10, 16);
let el_count = b_size * num_head * seq_len * head_dim;
let mut rng = StdRng::seed_from_u64(299792458);
let src: Vec<f32> = (0..el_count).map(|_| rng.gen::<f32>()).collect();
let cos: Vec<f32> = (0..seq_len * head_dim / 2)
.map(|_| rng.gen::<f32>())
.collect();
let sin: Vec<f32> = (0..seq_len * head_dim / 2)
.map(|_| rng.gen::<f32>())
.collect();
let src = Tensor::from_vec(src, (b_size, num_head, seq_len, head_dim), device)?;
let cos = Tensor::from_vec(cos, (seq_len, head_dim / 2), device)?;
let sin = Tensor::from_vec(sin, (seq_len, head_dim / 2), device)?;
let rope1 = candle_nn::rotary_emb::rope(&src, &cos, &sin)?;
let rope2 = candle_nn::rotary_emb::rope_slow(&src, &cos, &sin)?;
let sum_diff = (rope1 - rope2)?.abs()?.sum_all()?.to_vec0::<f32>()?;
if device.is_cpu() {
assert_eq!(sum_diff, 0.);
} else {
assert!(sum_diff < 1e-4);
}
Ok(())
}
fn rope_thd(device: &Device) -> Result<()> {
use rand::{rngs::StdRng, Rng, SeedableRng};
let (b_size, num_head, seq_len, head_dim) = (2, 5, 10, 16);
let el_count = b_size * num_head * seq_len * head_dim;
let mut rng = StdRng::seed_from_u64(299792458);
let src: Vec<f32> = (0..el_count).map(|_| rng.gen::<f32>()).collect();
let cos: Vec<f32> = (0..seq_len * head_dim / 2)
.map(|_| rng.gen::<f32>())
.collect();
let sin: Vec<f32> = (0..seq_len * head_dim / 2)
.map(|_| rng.gen::<f32>())
.collect();
let src = Tensor::from_vec(src, (b_size, num_head, seq_len, head_dim), device)?;
let cos = Tensor::from_vec(cos, (seq_len, head_dim / 2), device)?;
let sin = Tensor::from_vec(sin, (seq_len, head_dim / 2), device)?;
let rope1 = {
let src = src.transpose(1, 2)?.contiguous()?;
candle_nn::rotary_emb::rope_thd(&src, &cos, &sin)?.transpose(1, 2)?
};
let rope2 = candle_nn::rotary_emb::rope_slow(&src, &cos, &sin)?;
let sum_diff = (rope1 - rope2)?.abs()?.sum_all()?.to_vec0::<f32>()?;
if device.is_cpu() {
assert_eq!(sum_diff, 0.);
} else {
assert!(sum_diff < 1e-4);
}
Ok(())
}
fn sigmoid(device: &Device) -> Result<()> {
let data = &[[[3f32, 1., 4.], [1., 5., 9.]], [[2., 1., 7.], [8., 2., 8.]]];
let tensor = Tensor::new(data, device)?;
let s1 = candle_nn::ops::sigmoid(&tensor)?;
let s2 = (1. / (1. + tensor.neg()?.exp()?)?)?;
let diff = (s1 - s2)?.abs()?.sum_all()?.to_vec0::<f32>()?;
assert_eq!(diff, 0.);
Ok(())
}
test_device!(ropei, ropei_cpu, ropei_gpu, ropei_metal);
test_device!(rope, rope_cpu, rope_gpu, rope_metal);
test_device!(rope_thd, rope_thd_cpu, rope_thd_gpu, rope_thd_metal);
test_device!(softmax, softmax_cpu, softmax_gpu, softmax_metal);
test_device!(rms_norm, rms_norm_cpu, rms_norm_gpu, rms_norm_metal);
test_device!(rms_norml, rms_norml_cpu, rms_norml_gpu, rms_norml_metal);
test_device!(layer_norm, ln_cpu, ln_gpu, ln_metal);
test_device!(layer_norml, lnl_cpu, lnl_gpu, lnl_metal);
test_device!(sigmoid, sigmoid_cpu, sigmoid_gpu, sigmoid_metal);
| candle/candle-nn/tests/ops.rs/0 | {
"file_path": "candle/candle-nn/tests/ops.rs",
"repo_id": "candle",
"token_count": 5198
} |
# Generated content DO NOT EDIT
from typing import Any, Callable, Dict, List, Optional, Tuple, Union, Sequence
from os import PathLike
from candle.typing import _ArrayLike, Device, Scalar, Index, Shape
from candle import Tensor, DType, QTensor
class ONNXModel:
"""
A wrapper around an ONNX model.
"""
def __init__(self, path: str):
pass
@property
def doc_string(self) -> str:
"""
The doc string of the model.
"""
pass
@property
def domain(self) -> str:
"""
The domain of the operator set of the model.
"""
pass
def initializers(self) -> Dict[str, Tensor]:
"""
Get the weights of the model.
"""
pass
@property
def inputs(self) -> Optional[Dict[str, ONNXTensorDescription]]:
"""
The inputs of the model.
"""
pass
@property
def ir_version(self) -> int:
"""
The version of the IR this model targets.
"""
pass
@property
def model_version(self) -> int:
"""
The version of the model.
"""
pass
@property
def outputs(self) -> Optional[Dict[str, ONNXTensorDescription]]:
"""
The outputs of the model.
"""
pass
@property
def producer_name(self) -> str:
"""
The producer of the model.
"""
pass
@property
def producer_version(self) -> str:
"""
The version of the producer of the model.
"""
pass
def run(self, inputs: Dict[str, Tensor]) -> Dict[str, Tensor]:
"""
Run the model on the given inputs.
"""
pass
class ONNXTensorDescription:
"""
A wrapper around an ONNX tensor description.
"""
@property
def dtype(self) -> DType:
"""
The data type of the tensor.
"""
pass
@property
def shape(self) -> Tuple[Union[int, str, Any]]:
"""
The shape of the tensor.
"""
pass
| candle/candle-pyo3/py_src/candle/onnx/__init__.pyi/0 | {
"file_path": "candle/candle-pyo3/py_src/candle/onnx/__init__.pyi",
"repo_id": "candle",
"token_count": 939
} |
import candle
from candle import Tensor, QTensor
from candle.nn import Module, Linear
from candle.utils import cuda_is_available
import pytest
def test_module_can_be_constructed():
class A(Module):
pass
a = A()
assert a is not None
assert len(list(a.buffers())) == 0
def test_module_registers_tensors():
class A(Module):
def __init__(self):
super().__init__()
self.t = Tensor(42.0)
a = A()
named_buffers = dict(a.named_buffers())
assert len(named_buffers) == 1
assert "t" in named_buffers
def test_module_registers_submodules():
class A(Module):
def __init__(self):
super().__init__()
self.linear = Linear(10, 20)
a = A()
named_modules = dict(a.named_modules())
named_buffers = dict(a.named_buffers())
assert len(named_buffers) == 2
assert "linear" in named_modules
assert "linear.weight" in named_buffers
assert "linear.bias" in named_buffers
def test_module_can_dump_statedict():
class A(Module):
def __init__(self):
super().__init__()
self.linear = Linear(10, 20)
self.t = Tensor(42.0)
a = A()
state_dict = a.state_dict()
assert hasattr(state_dict, "_metadata")
assert "t" in state_dict
assert "linear.weight" in state_dict
assert "linear.bias" in state_dict
assert len(state_dict) == 3
def test_module_can_load_statedict():
class A(Module):
def __init__(self):
super().__init__()
self.linear = Linear(10, 20)
self.t = Tensor(42.0)
statedict = {
"linear.weight": candle.ones((20, 10)),
"linear.bias": candle.zeros((20,)),
"t": Tensor(42.0),
}
a = A()
a.load_state_dict(statedict)
def test_module_throws_on_shape_mismatch():
class A(Module):
def __init__(self):
super().__init__()
self.t = Tensor(42.0)
statedict = {
"t": candle.ones((20,)),
}
a = A()
with pytest.raises(RuntimeError) as excinfo:
a.load_state_dict(statedict)
assert "size mismatch" in str(excinfo.value)
def test_module_throws_on_missing_key():
class A(Module):
def __init__(self):
super().__init__()
self.t = Tensor(42.0)
statedict = {
"not_t": Tensor(42.0),
}
a = A()
with pytest.raises(RuntimeError) as excinfo:
a.load_state_dict(statedict)
assert 'Missing key(s) in state_dict: "t".' in str(excinfo.value)
def test_module_can_load_quantized_tensors():
class A(Module):
def __init__(self):
super().__init__()
self.t = candle.randn((16, 256))
self._quantizable_buffers.add("t")
statedict = {
"t": candle.ones((16, 256)).quantize("q4_0"),
}
a = A()
a.load_state_dict(statedict)
assert isinstance(a.t, QTensor)
assert a.t.ggml_dtype == "Q4_0"
def test_module_dequantizes_tensors_automatically():
class A(Module):
def __init__(self):
super().__init__()
self.t = candle.randn((16, 256))
statedict = {
"t": candle.ones((16, 256)).quantize("q4_0"),
}
a = A()
a.load_state_dict(statedict)
assert isinstance(a.t, Tensor)
@pytest.mark.skipif(not cuda_is_available(), reason="CUDA is not available")
def test_module_can_be_moved_to_cuda():
class A(Module):
def __init__(self):
super().__init__()
self.t = candle.randn((16, 256))
a = A()
a.cuda()
assert a.t.device == "cuda"
@pytest.mark.skipif(not cuda_is_available(), reason="CUDA is not available")
def test_module_can_be_moved_from_cuda_to_cpu():
class A(Module):
def __init__(self):
super().__init__()
self.t = candle.randn((16, 256))
a = A()
a.cuda()
assert a.t.device == "cuda"
a.cpu()
assert a.t.device == "cpu"
| candle/candle-pyo3/tests/bindings/test_module.py/0 | {
"file_path": "candle/candle-pyo3/tests/bindings/test_module.py",
"repo_id": "candle",
"token_count": 1853
} |
//! Chinese contrastive Language-Image Pre-Training
//!
//! Chinese contrastive Language-Image Pre-Training (CLIP) is an architecture trained on
//! pairs of images with related texts.
//!
//! - 💻 [GH Link](https://github.com/OFA-Sys/Chinese-CLIP)
//! - 💻 Transformers Python [reference implementation](https://github.com/huggingface/transformers/blob/5af7d41e49bbfc8319f462eb45253dcb3863dfb7/src/transformers/models/chinese_clip/modeling_chinese_clip.py)
//!
use candle::{Module, Result, Tensor, D};
use candle_nn as nn;
use text_model::ChineseClipTextTransformer;
use vision_model::ChineseClipVisionTransformer;
pub mod text_model;
pub mod vision_model;
#[derive(Debug, Clone, Copy)]
pub enum Activation {
QuickGelu,
Gelu,
GeluNew,
Relu,
}
impl From<String> for Activation {
fn from(value: String) -> Self {
match value.as_str() {
"quick_gelu" => Activation::QuickGelu,
"gelu" => Activation::Gelu,
"gelu_new" => Activation::GeluNew,
"relu" => Activation::Relu,
_ => panic!("Invalid activation function: {}", value),
}
}
}
impl Module for Activation {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
match self {
Activation::QuickGelu => xs * nn::ops::sigmoid(&(xs * 1.702f64)?)?,
Activation::Gelu => xs.gelu_erf(),
Activation::GeluNew => xs.gelu(),
Activation::Relu => xs.relu(),
}
}
}
#[derive(Clone, Debug)]
pub struct ChineseClipConfig {
pub text_config: text_model::ChineseClipTextConfig,
pub vision_config: vision_model::ChineseClipVisionConfig,
pub projection_dim: usize,
pub logit_scale_init_value: f32,
pub image_size: usize,
}
impl ChineseClipConfig {
/// referer: https://huggingface.co/OFA-Sys/chinese-clip-vit-base-patch16/blob/main/config.json
pub fn clip_vit_base_patch16() -> Self {
let text_config = text_model::ChineseClipTextConfig::clip_vit_base_patch16();
let vision_config = vision_model::ChineseClipVisionConfig::clip_vit_base_patch16();
Self {
text_config,
vision_config,
projection_dim: 512,
logit_scale_init_value: 2.6592,
image_size: 512,
}
}
}
#[derive(Clone, Debug)]
pub enum EncoderConfig {
Text(text_model::ChineseClipTextConfig),
Vision(vision_model::ChineseClipVisionConfig),
}
impl EncoderConfig {
pub fn embed_dim(&self) -> usize {
match self {
Self::Text(c) => c.hidden_size,
Self::Vision(c) => c.hidden_size,
}
}
pub fn num_attention_heads(&self) -> usize {
match self {
Self::Text(c) => c.num_attention_heads,
Self::Vision(c) => c.num_attention_heads,
}
}
pub fn intermediate_size(&self) -> usize {
match self {
Self::Text(c) => c.intermediate_size,
Self::Vision(c) => c.intermediate_size,
}
}
pub fn num_hidden_layers(&self) -> usize {
match self {
Self::Text(c) => c.num_hidden_layers,
Self::Vision(c) => c.num_hidden_layers,
}
}
pub fn activation(&self) -> Activation {
match self {
Self::Text(c) => c.hidden_act,
Self::Vision(c) => c.hidden_act,
}
}
pub fn layer_norm_eps(&self) -> f64 {
match self {
Self::Text(c) => c.layer_norm_eps,
Self::Vision(c) => c.layer_norm_eps,
}
}
}
#[derive(Clone, Debug)]
pub struct ChineseClipModel {
text_model: ChineseClipTextTransformer,
vision_model: ChineseClipVisionTransformer,
visual_projection: nn::Linear,
text_projection: nn::Linear,
logit_scale: Tensor,
}
impl ChineseClipModel {
pub fn new(vs: nn::VarBuilder, c: &ChineseClipConfig) -> Result<Self> {
let text_model = ChineseClipTextTransformer::new(vs.pp("text_model"), &c.text_config)?;
let vision_model =
ChineseClipVisionTransformer::new(vs.pp("vision_model"), &c.vision_config)?;
let vision_embed_dim = c.vision_config.hidden_size;
let vision_projection = nn::linear_no_bias(
vision_embed_dim,
c.projection_dim,
vs.pp("visual_projection"),
)?;
let text_embed_dim = c.text_config.hidden_size;
let text_projection =
nn::linear_no_bias(text_embed_dim, c.projection_dim, vs.pp("text_projection"))?;
let logit_scale = if vs.contains_tensor("logit_scale") {
vs.get(&[], "logit_scale")?
} else {
Tensor::new(&[c.logit_scale_init_value], vs.device())?
};
Ok(Self {
text_model,
vision_model,
visual_projection: vision_projection,
text_projection,
logit_scale,
})
}
pub fn get_text_features(
&self,
input_ids: &Tensor,
token_type_ids: Option<&Tensor>,
attention_mask: Option<&Tensor>,
) -> Result<Tensor> {
let output = self
.text_model
.forward(input_ids, token_type_ids, attention_mask)?
.contiguous()?;
self.text_projection.forward(&output)
}
pub fn get_image_features(&self, pixel_values: &Tensor) -> Result<Tensor> {
pixel_values
.apply(&self.vision_model)?
.apply(&self.visual_projection)
}
pub fn forward(
&self,
pixel_values: &Tensor,
input_ids: &Tensor,
token_type_ids: Option<&Tensor>,
attention_mask: Option<&Tensor>,
) -> Result<(Tensor, Tensor)> {
let image_features = self.get_image_features(pixel_values)?;
let text_features = self.get_text_features(input_ids, token_type_ids, attention_mask)?;
let image_features_normalized = div_l2_norm(&image_features)?;
let text_features_normalized = div_l2_norm(&text_features)?;
let logits_per_text = text_features_normalized.matmul(&image_features_normalized.t()?)?;
let logit_scale = self.logit_scale.exp()?;
let logits_per_text = logits_per_text.broadcast_mul(&logit_scale)?;
let logits_per_image = logits_per_text.t()?;
Ok((logits_per_text, logits_per_image))
}
}
pub fn div_l2_norm(v: &Tensor) -> Result<Tensor> {
let l2_norm = v.sqr()?.sum_keepdim(D::Minus1)?.sqrt()?;
v.broadcast_div(&l2_norm)
}
| candle/candle-transformers/src/models/chinese_clip/mod.rs/0 | {
"file_path": "candle/candle-transformers/src/models/chinese_clip/mod.rs",
"repo_id": "candle",
"token_count": 3001
} |
//! Implementation of EfficientBert, an efficient variant of BERT for computer vision tasks.
//!
//! See:
//! - ["EfficientBERT: Progressively Searching Multilayer Perceptron Architectures for BERT"](https://arxiv.org/abs/2201.00462)
//!
use candle::{Context, Result, Tensor, D};
use candle_nn as nn;
use nn::{Module, VarBuilder};
// Based on the Python version from torchvision.
// https://github.com/pytorch/vision/blob/0d75d9e5516f446c9c0ef93bd4ed9fea13992d06/torchvision/models/efficientnet.py#L47
#[derive(Debug, Clone, Copy)]
pub struct MBConvConfig {
expand_ratio: f64,
kernel: usize,
stride: usize,
input_channels: usize,
out_channels: usize,
num_layers: usize,
}
fn make_divisible(v: f64, divisor: usize) -> usize {
let min_value = divisor;
let new_v = usize::max(
min_value,
(v + divisor as f64 * 0.5) as usize / divisor * divisor,
);
if (new_v as f64) < 0.9 * v {
new_v + divisor
} else {
new_v
}
}
fn bneck_confs(width_mult: f64, depth_mult: f64) -> Vec<MBConvConfig> {
let bneck_conf = |e, k, s, i, o, n| {
let input_channels = make_divisible(i as f64 * width_mult, 8);
let out_channels = make_divisible(o as f64 * width_mult, 8);
let num_layers = (n as f64 * depth_mult).ceil() as usize;
MBConvConfig {
expand_ratio: e,
kernel: k,
stride: s,
input_channels,
out_channels,
num_layers,
}
};
vec![
bneck_conf(1., 3, 1, 32, 16, 1),
bneck_conf(6., 3, 2, 16, 24, 2),
bneck_conf(6., 5, 2, 24, 40, 2),
bneck_conf(6., 3, 2, 40, 80, 3),
bneck_conf(6., 5, 1, 80, 112, 3),
bneck_conf(6., 5, 2, 112, 192, 4),
bneck_conf(6., 3, 1, 192, 320, 1),
]
}
impl MBConvConfig {
pub fn b0() -> Vec<Self> {
bneck_confs(1.0, 1.0)
}
pub fn b1() -> Vec<Self> {
bneck_confs(1.0, 1.1)
}
pub fn b2() -> Vec<Self> {
bneck_confs(1.1, 1.2)
}
pub fn b3() -> Vec<Self> {
bneck_confs(1.2, 1.4)
}
pub fn b4() -> Vec<Self> {
bneck_confs(1.4, 1.8)
}
pub fn b5() -> Vec<Self> {
bneck_confs(1.6, 2.2)
}
pub fn b6() -> Vec<Self> {
bneck_confs(1.8, 2.6)
}
pub fn b7() -> Vec<Self> {
bneck_confs(2.0, 3.1)
}
}
/// Conv2D with same padding.
#[derive(Debug)]
struct Conv2DSame {
conv2d: nn::Conv2d,
s: usize,
k: usize,
}
impl Conv2DSame {
fn new(
vb: VarBuilder,
i: usize,
o: usize,
k: usize,
stride: usize,
groups: usize,
bias: bool,
) -> Result<Self> {
let conv_config = nn::Conv2dConfig {
stride,
groups,
..Default::default()
};
let conv2d = if bias {
nn::conv2d(i, o, k, conv_config, vb)?
} else {
nn::conv2d_no_bias(i, o, k, conv_config, vb)?
};
Ok(Self {
conv2d,
s: stride,
k,
})
}
}
impl Module for Conv2DSame {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let s = self.s;
let k = self.k;
let (_, _, ih, iw) = xs.dims4()?;
let oh = ih.div_ceil(s);
let ow = iw.div_ceil(s);
let pad_h = usize::max((oh - 1) * s + k - ih, 0);
let pad_w = usize::max((ow - 1) * s + k - iw, 0);
if pad_h > 0 || pad_w > 0 {
let xs = xs.pad_with_zeros(2, pad_h / 2, pad_h - pad_h / 2)?;
let xs = xs.pad_with_zeros(3, pad_w / 2, pad_w - pad_w / 2)?;
self.conv2d.forward(&xs)
} else {
self.conv2d.forward(xs)
}
}
}
#[derive(Debug)]
struct ConvNormActivation {
conv2d: Conv2DSame,
bn2d: nn::BatchNorm,
activation: bool,
}
impl ConvNormActivation {
fn new(
vb: VarBuilder,
i: usize,
o: usize,
k: usize,
stride: usize,
groups: usize,
) -> Result<Self> {
let conv2d = Conv2DSame::new(vb.pp("0"), i, o, k, stride, groups, false)?;
let bn2d = nn::batch_norm(o, 1e-3, vb.pp("1"))?;
Ok(Self {
conv2d,
bn2d,
activation: true,
})
}
fn no_activation(self) -> Self {
Self {
activation: false,
..self
}
}
}
impl Module for ConvNormActivation {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let xs = self.conv2d.forward(xs)?.apply_t(&self.bn2d, false)?;
if self.activation {
swish(&xs)
} else {
Ok(xs)
}
}
}
#[derive(Debug)]
struct SqueezeExcitation {
fc1: Conv2DSame,
fc2: Conv2DSame,
}
impl SqueezeExcitation {
fn new(vb: VarBuilder, in_channels: usize, squeeze_channels: usize) -> Result<Self> {
let fc1 = Conv2DSame::new(vb.pp("fc1"), in_channels, squeeze_channels, 1, 1, 1, true)?;
let fc2 = Conv2DSame::new(vb.pp("fc2"), squeeze_channels, in_channels, 1, 1, 1, true)?;
Ok(Self { fc1, fc2 })
}
}
impl Module for SqueezeExcitation {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let residual = xs;
// equivalent to adaptive_avg_pool2d([1, 1])
let xs = xs.mean_keepdim(D::Minus2)?.mean_keepdim(D::Minus1)?;
let xs = self.fc1.forward(&xs)?;
let xs = swish(&xs)?;
let xs = self.fc2.forward(&xs)?;
let xs = nn::ops::sigmoid(&xs)?;
residual.broadcast_mul(&xs)
}
}
#[derive(Debug)]
struct MBConv {
expand_cna: Option<ConvNormActivation>,
depthwise_cna: ConvNormActivation,
squeeze_excitation: SqueezeExcitation,
project_cna: ConvNormActivation,
config: MBConvConfig,
}
impl MBConv {
fn new(vb: VarBuilder, c: MBConvConfig) -> Result<Self> {
let vb = vb.pp("block");
let exp = make_divisible(c.input_channels as f64 * c.expand_ratio, 8);
let expand_cna = if exp != c.input_channels {
Some(ConvNormActivation::new(
vb.pp("0"),
c.input_channels,
exp,
1,
1,
1,
)?)
} else {
None
};
let start_index = if expand_cna.is_some() { 1 } else { 0 };
let depthwise_cna =
ConvNormActivation::new(vb.pp(start_index), exp, exp, c.kernel, c.stride, exp)?;
let squeeze_channels = usize::max(1, c.input_channels / 4);
let squeeze_excitation =
SqueezeExcitation::new(vb.pp(start_index + 1), exp, squeeze_channels)?;
let project_cna =
ConvNormActivation::new(vb.pp(start_index + 2), exp, c.out_channels, 1, 1, 1)?
.no_activation();
Ok(Self {
expand_cna,
depthwise_cna,
squeeze_excitation,
project_cna,
config: c,
})
}
}
impl Module for MBConv {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let use_res_connect =
self.config.stride == 1 && self.config.input_channels == self.config.out_channels;
let ys = match &self.expand_cna {
Some(expand_cna) => expand_cna.forward(xs)?,
None => xs.clone(),
};
let ys = self.depthwise_cna.forward(&ys)?;
let ys = self.squeeze_excitation.forward(&ys)?;
let ys = self.project_cna.forward(&ys)?;
if use_res_connect {
ys + xs
} else {
Ok(ys)
}
}
}
fn swish(s: &Tensor) -> Result<Tensor> {
s * nn::ops::sigmoid(s)?
}
#[derive(Debug)]
pub struct EfficientNet {
init_cna: ConvNormActivation,
blocks: Vec<MBConv>,
final_cna: ConvNormActivation,
classifier: nn::Linear,
}
impl EfficientNet {
pub fn new(p: VarBuilder, configs: Vec<MBConvConfig>, nclasses: usize) -> Result<Self> {
let f_p = p.pp("features");
let first_in_c = configs[0].input_channels;
let last_out_c = configs.last().context("no last")?.out_channels;
let final_out_c = 4 * last_out_c;
let init_cna = ConvNormActivation::new(f_p.pp(0), 3, first_in_c, 3, 2, 1)?;
let nconfigs = configs.len();
let mut blocks = vec![];
for (index, cnf) in configs.into_iter().enumerate() {
let f_p = f_p.pp(index + 1);
for r_index in 0..cnf.num_layers {
let cnf = if r_index == 0 {
cnf
} else {
MBConvConfig {
input_channels: cnf.out_channels,
stride: 1,
..cnf
}
};
blocks.push(MBConv::new(f_p.pp(r_index), cnf)?)
}
}
let final_cna =
ConvNormActivation::new(f_p.pp(nconfigs + 1), last_out_c, final_out_c, 1, 1, 1)?;
let classifier = nn::linear(final_out_c, nclasses, p.pp("classifier.1"))?;
Ok(Self {
init_cna,
blocks,
final_cna,
classifier,
})
}
}
impl Module for EfficientNet {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let mut xs = self.init_cna.forward(xs)?;
for block in self.blocks.iter() {
xs = block.forward(&xs)?
}
let xs = self.final_cna.forward(&xs)?;
// Equivalent to adaptive_avg_pool2d([1, 1]) -> squeeze(-1) -> squeeze(-1)
let xs = xs.mean(D::Minus1)?.mean(D::Minus1)?;
self.classifier.forward(&xs)
}
}
| candle/candle-transformers/src/models/efficientnet.rs/0 | {
"file_path": "candle/candle-transformers/src/models/efficientnet.rs",
"repo_id": "candle",
"token_count": 5204
} |
//! Hiera inference implementation based on timm.
//!
//!
//! - 💻 [Hiera](https://github.com/huggingface/pytorch-image-models/blob/main/timm/models/hiera.py)
//! - 📝 [Paper](https://arxiv.org/abs/2306.00989). Hiera: A Hierarchical Vision Transformer without the Bells-and-Whistles
use candle::{Result, D};
use candle_nn::{conv2d, layer_norm, linear, ops::softmax, Conv2dConfig, Func, VarBuilder};
#[derive(Debug, Clone, serde::Deserialize)]
pub struct Config {
channels: usize,
heads: usize,
stages: [usize; 4],
}
impl Config {
pub fn tiny() -> Self {
Self {
channels: 96,
heads: 1,
stages: [1, 2, 7, 2],
}
}
pub fn small() -> Self {
Self {
channels: 96,
heads: 1,
stages: [1, 2, 11, 2],
}
}
pub fn base() -> Self {
Self {
channels: 96,
heads: 1,
stages: [2, 3, 16, 3],
}
}
pub fn base_plus() -> Self {
Self {
channels: 112,
heads: 2,
stages: [2, 3, 16, 3],
}
}
pub fn large() -> Self {
Self {
channels: 144,
heads: 2,
stages: [2, 6, 36, 4],
}
}
pub fn huge() -> Self {
Self {
channels: 256,
heads: 4,
stages: [2, 6, 36, 4],
}
}
}
const NUM_TOKENS: usize = 56 * 56;
fn hiera_embeddings(channels: usize, vb: VarBuilder) -> Result<Func<'static>> {
let conv_cfg = Conv2dConfig {
stride: 4,
padding: 3,
..Default::default()
};
let proj = conv2d(3, channels, 7, conv_cfg, vb.pp("patch_embed.proj"))?;
let pos_embed = vb.get((1, NUM_TOKENS, channels), "pos_embed")?;
Ok(Func::new(move |xs| {
let xs = xs.apply(&proj)?;
let (b, c, _, _) = xs.dims4()?;
let xs = xs.reshape((b, c, ()))?.transpose(1, 2)?;
let xs = xs.broadcast_add(&pos_embed)?;
Ok(xs)
}))
}
fn hiera_unroll() -> Result<Func<'static>> {
Ok(Func::new(move |xs| {
let mut xs = xs.clone();
let (mut b, _, c) = xs.dims3()?;
let mut size = 56;
xs = xs.reshape((b, size, size, c))?;
for _ in 0..3 {
size /= 2;
let new_shape = &[b, size, 2, size, 2, c];
xs = xs.reshape(new_shape)?;
xs = xs.permute((0, 2, 4, 1, 3, 5))?;
xs = xs.flatten(0, 2)?;
b *= 4;
}
xs = xs.reshape(((), NUM_TOKENS, c))?;
Ok(xs)
}))
}
fn hiera_mlp(in_channels: usize, out_channels: usize, vb: VarBuilder) -> Result<Func<'static>> {
let fc1 = linear(in_channels, out_channels, vb.pp("fc1"))?;
let fc2 = linear(out_channels, in_channels, vb.pp("fc2"))?;
Ok(Func::new(move |xs| {
let xs = xs.apply(&fc1)?.gelu()?.apply(&fc2)?;
Ok(xs)
}))
}
fn hiera_attention(
in_channels: usize,
out_channels: usize,
heads: usize,
q_stride: usize,
window_size: usize,
use_mask_attention: bool,
vb: VarBuilder,
) -> Result<Func<'static>> {
let head_dim = out_channels / heads;
let scale = (head_dim as f64).powf(-0.5);
let proj = linear(out_channels, out_channels, vb.pp("proj"))?;
let qkv = linear(in_channels, out_channels * 3, vb.pp("qkv"))?;
Ok(Func::new(move |xs| {
let (b, n, _) = xs.dims3()?;
let num_windows = if use_mask_attention {
n / (q_stride * window_size)
} else {
1
};
let qkv = xs.apply(&qkv)?;
let ec = qkv.elem_count();
let s = ec / (b * num_windows * 3 * heads * head_dim);
let qkv = qkv
.reshape((b, s, num_windows, 3, heads, head_dim))?
.permute((3, 0, 4, 2, 1, 5))?;
let mut q = qkv.get(0)?;
let k = qkv.get(1)?;
let v = qkv.get(2)?;
if q_stride > 1 {
let ec = q.elem_count();
let s = ec / (b * num_windows * q_stride * heads * head_dim);
q = q
.reshape((b, heads, num_windows, q_stride, s, head_dim))?
.max(3)?;
}
let q = (q * scale)?;
// Q, K and V are 6 dimensional with the first dimension being 1.
// Squeeze them for the attention calculation since 6 dimensional matmuls are not supported.
let att = q
.squeeze(0)?
.matmul(&k.squeeze(0)?.transpose(D::Minus2, D::Minus1)?)?;
let att = softmax(&att, D::Minus1)?;
let xs = att.matmul(&v.squeeze(0)?)?.unsqueeze(0)?;
let xs = xs.transpose(1, 3)?.reshape((b, (), out_channels))?;
let xs = xs.apply(&proj)?;
Ok(xs)
}))
}
fn hiera_block(
heads: usize,
in_channels: usize,
out_channels: usize,
q_stride: usize,
window_size: usize,
use_mask_attention: bool,
vb: VarBuilder,
) -> Result<Func<'static>> {
let norm1 = layer_norm(in_channels, 1e-6, vb.pp("norm1"))?;
let norm2 = layer_norm(out_channels, 1e-6, vb.pp("norm2"))?;
let proj = linear(in_channels, out_channels, vb.pp("proj"));
let stride = 4;
let mlp = hiera_mlp(out_channels, out_channels * 4, vb.pp("mlp"))?;
let attn = hiera_attention(
in_channels,
out_channels,
heads,
q_stride,
window_size,
use_mask_attention,
vb.pp("attn"),
)?;
Ok(Func::new(move |xs| {
let mut xs = xs.clone();
let xs_norm = xs.apply_t(&norm1, false)?;
if let Ok(p) = &proj {
xs = xs_norm.apply(p)?;
let (a, _, d) = xs.dims3()?;
xs = xs.reshape((a, stride, (), d))?.max(1)?;
}
let xs = (xs + &xs_norm.apply(&attn)?)?;
let xs = (&xs + &xs.apply_t(&norm2, false)?.apply(&mlp)?)?;
Ok(xs)
}))
}
fn hiera_blocks(cfg: &Config, vb: VarBuilder) -> Result<Func<'static>> {
let nblocks = cfg.stages.iter().sum();
let mut blocks = Vec::with_capacity(nblocks);
let mut out_channels = cfg.channels;
let mut in_channels = out_channels;
let mut heads = cfg.heads;
let mut b = 0;
let mut q_stride = 1;
let mut window_size = 64;
for s in 0..4 {
let use_mask_attention = s < 2;
for _ in 0..cfg.stages[s] {
blocks.push(hiera_block(
heads,
in_channels,
out_channels,
q_stride,
window_size,
use_mask_attention,
vb.pp(b),
)?);
b += 1;
in_channels = out_channels;
q_stride = 1;
}
q_stride = 4;
out_channels *= 2;
heads *= 2;
window_size /= 4;
}
Ok(Func::new(move |xs| {
let mut xs = xs.clone();
for block in blocks.iter() {
xs = xs.apply(block)?
}
Ok(xs)
}))
}
fn hiera_head(outputs: usize, nclasses: usize, vb: VarBuilder) -> Result<Func<'static>> {
let norm = layer_norm(outputs, 1e-6, vb.pp("norm"))?;
let linear = linear(outputs, nclasses, vb.pp("fc"))?;
Ok(Func::new(move |xs| {
xs.apply_t(&norm, false)?.apply(&linear)
}))
}
// Build a hiera model for a given configuration.
fn hiera_model(cfg: &Config, nclasses: Option<usize>, vb: VarBuilder) -> Result<Func<'static>> {
let cls = match nclasses {
None => None,
Some(nclasses) => {
let outputs = cfg.channels * 8;
let head = hiera_head(outputs, nclasses, vb.pp("head"))?;
Some(head)
}
};
let embeddings = hiera_embeddings(cfg.channels, vb.clone())?;
let unroll = hiera_unroll()?;
let blocks = hiera_blocks(cfg, vb.pp("blocks"))?;
Ok(Func::new(move |xs| {
let xs = xs
.apply(&embeddings)?
.apply(&unroll)?
.apply(&blocks)?
.mean(1)?;
match &cls {
None => Ok(xs),
Some(cls) => xs.apply(cls),
}
}))
}
pub fn hiera(cfg: &Config, nclasses: usize, vb: VarBuilder) -> Result<Func<'static>> {
hiera_model(cfg, Some(nclasses), vb)
}
pub fn hiera_no_final_layer(cfg: &Config, vb: VarBuilder) -> Result<Func<'static>> {
hiera_model(cfg, None, vb)
}
| candle/candle-transformers/src/models/hiera.rs/0 | {
"file_path": "candle/candle-transformers/src/models/hiera.rs",
"repo_id": "candle",
"token_count": 4445
} |
// Copyright (c) Kyutai, all rights reserved.
// This source code is licensed under the license found in the
// LICENSE file in the root directory of this source tree.
use candle::{DType, Device, IndexOp, Module, Result, StreamTensor, StreamingModule, Tensor, D};
use candle_nn::{linear_no_bias, Linear, VarBuilder};
use std::sync::Arc;
fn linear(in_d: usize, out_d: usize, bias: bool, vb: VarBuilder) -> Result<Linear> {
if bias {
candle_nn::linear(in_d, out_d, vb)
} else {
linear_no_bias(in_d, out_d, vb)
}
}
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
pub enum PositionalEmbedding {
Rope,
Sin,
None,
}
#[derive(Debug, Clone)]
pub struct Config {
pub d_model: usize,
pub num_heads: usize,
pub num_layers: usize,
pub causal: bool,
pub norm_first: bool,
pub bias_ff: bool,
pub bias_attn: bool,
pub layer_scale: Option<f64>,
pub positional_embedding: PositionalEmbedding,
pub use_conv_block: bool,
pub cross_attention: bool,
pub conv_kernel_size: usize,
pub use_conv_bias: bool,
pub gating: Option<candle_nn::Activation>,
pub norm: super::NormType,
pub context: usize,
pub max_period: usize,
pub max_seq_len: usize,
pub kv_repeat: usize,
pub dim_feedforward: usize,
pub conv_layout: bool,
}
#[derive(Debug, Clone)]
pub struct RotaryEmbedding {
sin: Tensor,
cos: Tensor,
span: tracing::Span,
}
impl RotaryEmbedding {
pub fn new(dim: usize, max_seq_len: usize, theta: f32, dev: &Device) -> Result<Self> {
let inv_freq: Vec<_> = (0..dim)
.step_by(2)
.map(|i| 1f32 / theta.powf(i as f32 / dim as f32))
.collect();
let inv_freq_len = inv_freq.len();
let inv_freq = Tensor::from_vec(inv_freq, (1, inv_freq_len), dev)?;
let t = Tensor::arange(0u32, max_seq_len as u32, dev)?
.to_dtype(DType::F32)?
.reshape((max_seq_len, 1))?;
let freqs = t.matmul(&inv_freq)?;
Ok(Self {
sin: freqs.sin()?,
cos: freqs.cos()?,
span: tracing::span!(tracing::Level::TRACE, "rot"),
})
}
pub fn apply_rotary_emb(&self, qk: &Tensor, seqlen_offset: usize) -> Result<Tensor> {
let _enter = self.span.enter();
let (_b_size, _nheads, seqlen, _headdim) = qk.dims4()?;
let qk_dtype = qk.dtype();
let c = self.cos.narrow(0, seqlen_offset, seqlen)?;
let s = self.sin.narrow(0, seqlen_offset, seqlen)?;
candle_nn::rotary_emb::rope_i(&qk.to_dtype(DType::F32)?, &c, &s)?.to_dtype(qk_dtype)
}
}
#[derive(Debug, Clone)]
pub struct LayerScale {
scale: Tensor,
}
impl LayerScale {
pub fn new(d_model: usize, _init: f64, vb: VarBuilder) -> Result<Self> {
let scale = vb.get(d_model, "scale")?;
Ok(Self { scale })
}
}
impl Module for LayerScale {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
xs.broadcast_mul(&self.scale)
}
}
#[derive(Debug, Clone)]
pub struct StreamingMultiheadAttention {
q_proj: Linear,
k_proj: Linear,
v_proj: Linear,
out_proj: Linear,
kv_repeat: usize,
num_heads: usize,
context: usize,
neg_inf: Tensor,
rope: Option<Arc<RotaryEmbedding>>,
kv_cache: candle_nn::kv_cache::RotatingKvCache,
pos: usize,
use_flash_attn: bool,
span: tracing::Span,
}
impl StreamingMultiheadAttention {
pub fn new(rope: &Option<Arc<RotaryEmbedding>>, cfg: &Config, vb: VarBuilder) -> Result<Self> {
let embed_dim = cfg.d_model;
let num_kv = cfg.num_heads / cfg.kv_repeat;
let kv_dim = num_kv * (embed_dim / cfg.num_heads);
let q_proj = linear(embed_dim, embed_dim, cfg.bias_attn, vb.pp("q_proj"))?;
let k_proj = linear(embed_dim, kv_dim, cfg.bias_attn, vb.pp("k_proj"))?;
let v_proj = linear(embed_dim, kv_dim, cfg.bias_attn, vb.pp("v_proj"))?;
let out_proj = linear(embed_dim, embed_dim, cfg.bias_attn, vb.pp("o_proj"))?;
let neg_inf = Tensor::new(f32::NEG_INFINITY, vb.device())?.to_dtype(vb.dtype())?;
Ok(Self {
q_proj,
k_proj,
v_proj,
out_proj,
rope: rope.clone(),
kv_repeat: cfg.kv_repeat,
num_heads: cfg.num_heads,
context: cfg.context,
neg_inf,
kv_cache: candle_nn::kv_cache::RotatingKvCache::new(2, cfg.context),
pos: 0,
use_flash_attn: false,
span: tracing::span!(tracing::Level::TRACE, "mha"),
})
}
pub fn forward(&mut self, xs: &Tensor, mask: Option<&Tensor>) -> Result<Tensor> {
let _enter = self.span.enter();
if self.kv_repeat != 1 {
candle::bail!("only kv-repeat = 1 is supported")
}
let (b, t, hd) = xs.dims3()?;
let head_dim = hd / self.num_heads;
let q = xs
.apply(&self.q_proj)?
.reshape((b, t, self.num_heads, head_dim))?;
let k = xs
.apply(&self.k_proj)?
.reshape((b, t, self.num_heads, head_dim))?;
let v = xs
.apply(&self.v_proj)?
.reshape((b, t, self.num_heads, head_dim))?;
// qk_layer_norm = None
// kv_repeat = 1, otherwise we would need repeat_kv
let mut q = q.transpose(1, 2)?.contiguous()?; // b,h,t,d
let mut k = k.transpose(1, 2)?.contiguous()?; // b,h,k,d
let v = v.transpose(1, 2)?.contiguous()?; // b,h,k,d
if let Some(rope) = &self.rope {
q = rope.apply_rotary_emb(&q, self.pos)?;
k = rope.apply_rotary_emb(&k, self.pos)?;
}
let (k, v) = {
self.pos += k.dim(2)?;
self.kv_cache.append(&k.contiguous()?, &v.contiguous()?)?
};
// The KV cache keeps all the data at the moment, we want to trim
// down the part that comes from the cache to at most context to
// be coherent with the mask shape we provide.
let k_len = k.dim(2)?;
let k_target_len = t + usize::min(self.context, k_len - t);
let (k, v) = if k_target_len < k_len {
let k = k.narrow(2, k_len - k_target_len, k_target_len)?;
let v = v.narrow(2, k_len - k_target_len, k_target_len)?;
(k, v)
} else {
(k.clone(), v.clone())
};
let xs = if q.dtype() == DType::BF16 && self.use_flash_attn {
let q = q.transpose(1, 2)?;
let k = k.transpose(1, 2)?;
let v = v.transpose(1, 2)?;
let softmax_scale = 1f32 / (head_dim as f32).sqrt();
flash_attn(&q, &k, &v, softmax_scale, t > 1)?.transpose(1, 2)?
} else {
let pre_ws = q.matmul(&k.t()?)?; // b,h,t,k
let pre_ws = (pre_ws * (head_dim as f64).powf(-0.5))?;
let pre_ws = match mask {
None => pre_ws,
Some(mask) => {
let mask = mask.broadcast_left((b, self.num_heads))?;
let neg_inf = self.neg_inf.broadcast_as(pre_ws.shape())?;
mask.where_cond(&neg_inf, &pre_ws)?
}
};
let ws = candle_nn::ops::softmax_last_dim(&pre_ws)?; // b,h,t,k
ws.matmul(&v)? // b,h,t,d
};
let xs = xs
.transpose(1, 2)? // b,t,h,d
.reshape((b, t, hd))?
.apply(&self.out_proj)?;
Ok(xs)
}
pub fn reset_kv_cache(&mut self) {
self.kv_cache.reset()
}
pub fn set_kv_cache(&mut self, kv_cache: candle_nn::kv_cache::RotatingKvCache) {
self.kv_cache = kv_cache
}
}
#[derive(Debug, Clone)]
pub struct StreamingMultiheadCrossAttention {
in_proj_q: Linear,
in_proj_k: Linear,
in_proj_v: Linear,
out_proj: Linear,
kv_repeat: usize,
num_heads: usize,
neg_inf: Tensor,
span: tracing::Span,
}
impl StreamingMultiheadCrossAttention {
pub fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> {
let embed_dim = cfg.d_model;
let num_kv = cfg.num_heads / cfg.kv_repeat;
let kv_dim = num_kv * (embed_dim / cfg.num_heads);
let out_dim = embed_dim + 2 * kv_dim;
let in_proj_weight = vb.get((out_dim, embed_dim), "in_proj_weight")?;
let in_proj_weight_q = in_proj_weight.narrow(0, 0, embed_dim)?;
let in_proj_weight_k = in_proj_weight.narrow(0, embed_dim, kv_dim)?;
let in_proj_weight_v = in_proj_weight.narrow(0, embed_dim + kv_dim, kv_dim)?;
let (in_proj_bias_q, in_proj_bias_k, in_proj_bias_v) = if cfg.bias_attn {
let b = vb.get(out_dim, "in_proj_bias")?;
let q = b.narrow(0, 0, embed_dim)?;
let k = b.narrow(0, embed_dim, kv_dim)?;
let v = b.narrow(0, embed_dim + kv_dim, kv_dim)?;
(Some(q), Some(k), Some(v))
} else {
(None, None, None)
};
let in_proj_q = Linear::new(in_proj_weight_q, in_proj_bias_q);
let in_proj_k = Linear::new(in_proj_weight_k, in_proj_bias_k);
let in_proj_v = Linear::new(in_proj_weight_v, in_proj_bias_v);
let out_proj = linear(embed_dim, embed_dim, cfg.bias_attn, vb.pp("out_proj"))?;
let neg_inf = Tensor::new(f32::NEG_INFINITY, vb.device())?.to_dtype(vb.dtype())?;
Ok(Self {
in_proj_q,
in_proj_k,
in_proj_v,
out_proj,
kv_repeat: cfg.kv_repeat,
num_heads: cfg.num_heads,
neg_inf,
span: tracing::span!(tracing::Level::TRACE, "mhca"),
})
}
pub fn forward(&self, xs: &Tensor, ca_src: &Tensor, mask: Option<&Tensor>) -> Result<Tensor> {
let _enter = self.span.enter();
if self.kv_repeat != 1 {
candle::bail!("only kv-repeat = 1 is supported")
}
let (b, t, hd) = xs.dims3()?;
let head_dim = hd / self.num_heads;
// time_dim = 1, layout: b,t,h,d
let q = xs.apply(&self.in_proj_q)?;
let k = ca_src.apply(&self.in_proj_k)?;
let v = ca_src.apply(&self.in_proj_v)?;
let (ca_b, ca_t, ca_dim) = k.dims3()?;
let q = q.reshape((b, t, self.num_heads, head_dim))?;
let k = k.reshape((ca_b, ca_t, ca_dim / head_dim, head_dim))?;
let v = v.reshape((ca_b, ca_t, ca_dim / head_dim, head_dim))?;
// qk_layer_norm = None
// kv_repeat = 1, otherwise we would need repeat_kv
let q = q.transpose(1, 2)?.contiguous()?; // b,h,t,d
let k = k.transpose(1, 2)?.contiguous()?; // b,h,k,d
let v = v.transpose(1, 2)?.contiguous()?; // b,h,k,d
let pre_ws = q.matmul(&k.t()?)?; // b,h,t,k
let pre_ws = (pre_ws * (head_dim as f64).powf(-0.5))?;
let pre_ws = match mask {
None => pre_ws,
Some(mask) => {
let mask = mask.broadcast_left((b, self.num_heads))?;
let neg_inf = self.neg_inf.broadcast_as(pre_ws.shape())?;
mask.where_cond(&neg_inf, &pre_ws)?
}
};
let ws = candle_nn::ops::softmax_last_dim(&pre_ws)?; // b,h,t,k
let xs = ws.matmul(&v)?; // b,h,t,d
let xs = xs
.transpose(1, 2)? // b,t,h,d
.reshape((b, t, hd))?
.apply(&self.out_proj)?;
Ok(xs)
}
}
#[derive(Debug, Clone)]
pub enum Mlp {
NoGating {
span1: tracing::Span,
linear1: Linear,
span2: tracing::Span,
linear2: Linear,
span: tracing::Span,
},
Gating {
linear_in: Linear,
linear_out: Linear,
activation: candle_nn::Activation,
span: tracing::Span,
},
}
impl Mlp {
pub fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> {
let d_model = cfg.d_model;
let span = tracing::span!(tracing::Level::TRACE, "mlp");
match cfg.gating {
None => {
let span1 = tracing::span!(tracing::Level::TRACE, "lin1");
let span2 = tracing::span!(tracing::Level::TRACE, "lin2");
let linear1 = linear(d_model, cfg.dim_feedforward, cfg.bias_ff, vb.pp("mlp.fc1"))?;
let linear2 = linear(cfg.dim_feedforward, d_model, cfg.bias_ff, vb.pp("mlp.fc2"))?;
Ok(Self::NoGating {
linear1,
linear2,
span,
span1,
span2,
})
}
Some(activation) => {
let vb = vb.pp("gating");
let hidden = if cfg.dim_feedforward == 4 * d_model {
11 * d_model / 4
} else {
2 * cfg.dim_feedforward / 3
};
// TODO: Maybe use bias_ff here?
let linear_in = linear(d_model, 2 * hidden, false, vb.pp("linear_in"))?;
let linear_out = linear(hidden, d_model, false, vb.pp("linear_out"))?;
Ok(Self::Gating {
linear_in,
linear_out,
activation,
span,
})
}
}
}
}
impl Module for Mlp {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
match self {
Self::NoGating {
linear1,
linear2,
span,
span1,
span2,
} => {
let _enter = span.enter();
let xs = {
let _enter = span1.enter();
xs.apply(linear1)?
};
let xs = xs.gelu_erf()?;
{
let _enter = span2.enter();
xs.apply(linear2)
}
}
Self::Gating {
linear_in,
linear_out,
activation,
span,
} => {
let _enter = span.enter();
let xs = xs.apply(linear_in)?;
let (b, t, _) = xs.dims3()?;
let xs = xs.reshape((b, t, 2, ()))?;
let xs = (xs.i((.., .., 0))?.apply(activation)? * xs.i((.., .., 1))?)?;
xs.apply(linear_out)
}
}
}
}
#[derive(Debug, Clone)]
pub struct RmsNorm {
pub(crate) alpha: Tensor,
pub(crate) eps: f32,
}
impl RmsNorm {
pub fn new(d_model: usize, eps: f32, vb: VarBuilder) -> Result<Self> {
let alpha = vb.get((1, 1, d_model), "alpha")?.reshape(d_model)?;
Ok(Self { alpha, eps })
}
}
impl Module for RmsNorm {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
candle_nn::ops::rms_norm(xs, &self.alpha, self.eps)
}
}
#[derive(Debug, Clone)]
pub enum Norm {
LayerNorm(candle_nn::LayerNorm),
RmsNorm(RmsNorm),
}
impl Norm {
pub fn new(d_model: usize, cfg: &Config, vb: VarBuilder) -> Result<Self> {
let norm = match cfg.norm {
super::NormType::LayerNorm => {
let norm = candle_nn::layer_norm(d_model, 1e-5, vb)?;
Self::LayerNorm(norm)
}
super::NormType::RmsNorm => {
let norm = RmsNorm::new(d_model, 1e-8, vb)?;
Self::RmsNorm(norm)
}
};
Ok(norm)
}
}
impl Module for Norm {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
match self {
Self::LayerNorm(m) => m.forward(xs),
Self::RmsNorm(m) => m.forward(xs),
}
}
}
#[derive(Debug, Clone)]
pub struct StreamingTransformerLayer {
self_attn: StreamingMultiheadAttention,
mlp: Mlp,
norm1: Norm,
norm2: Norm,
layer_scale_1: Option<LayerScale>,
layer_scale_2: Option<LayerScale>,
cross_attn: Option<(candle_nn::LayerNorm, StreamingMultiheadCrossAttention)>,
norm_first: bool,
span: tracing::Span,
}
impl StreamingTransformerLayer {
pub fn new(rope: &Option<Arc<RotaryEmbedding>>, cfg: &Config, vb: VarBuilder) -> Result<Self> {
if cfg.use_conv_block {
candle::bail!("conv-block is not supported")
}
let d_model = cfg.d_model;
let mlp = Mlp::new(cfg, vb.clone())?;
let (norm1, norm2) = match cfg.norm {
super::NormType::LayerNorm => {
let norm1 = candle_nn::layer_norm(d_model, 1e-5, vb.pp("input_layernorm"))?;
let norm2 =
candle_nn::layer_norm(d_model, 1e-5, vb.pp("post_attention_layernorm"))?;
(Norm::LayerNorm(norm1), Norm::LayerNorm(norm2))
}
super::NormType::RmsNorm => {
let norm1 = RmsNorm::new(d_model, 1e-8, vb.pp("input_rmsnorm"))?;
let norm2 = RmsNorm::new(d_model, 1e-8, vb.pp("post_attention_rmsnorm"))?;
(Norm::RmsNorm(norm1), Norm::RmsNorm(norm2))
}
};
let layer_scale_1 = match cfg.layer_scale {
None => None,
Some(ls) => {
let ls = LayerScale::new(d_model, ls, vb.pp("self_attn_layer_scale"))?;
Some(ls)
}
};
let layer_scale_2 = match cfg.layer_scale {
None => None,
Some(ls) => {
let ls = LayerScale::new(d_model, ls, vb.pp("mlp_layer_scale"))?;
Some(ls)
}
};
let self_attn = StreamingMultiheadAttention::new(rope, cfg, vb.pp("self_attn"))?;
let cross_attn = if cfg.cross_attention {
let norm_cross = candle_nn::layer_norm(cfg.d_model, 1e-5, vb.pp("norm_cross"))?;
let cross_attn = StreamingMultiheadCrossAttention::new(cfg, vb.pp("cross_attention"))?;
Some((norm_cross, cross_attn))
} else {
None
};
Ok(Self {
self_attn,
mlp,
norm1,
norm2,
layer_scale_1,
layer_scale_2,
cross_attn,
norm_first: cfg.norm_first,
span: tracing::span!(tracing::Level::TRACE, "transformer-layer"),
})
}
pub fn forward(
&mut self,
xs: &Tensor,
ca_src: Option<&Tensor>,
mask: Option<&Tensor>,
) -> Result<Tensor> {
let _enter = self.span.enter();
if !self.norm_first {
candle::bail!("only norm_first = true is supported")
}
let norm1 = xs.apply(&self.norm1)?;
let xs = (xs
+ self
.self_attn
.forward(&norm1, mask)?
.apply(&self.layer_scale_1.as_ref())?)?;
let xs = match (&self.cross_attn, ca_src) {
(Some((norm_cross, cross_attn)), Some(ca_src)) => {
let residual = &xs;
let xs = xs.apply(norm_cross)?;
(residual + cross_attn.forward(&xs, ca_src, None)?)?
}
_ => xs,
};
let xs = (&xs
+ xs.apply(&self.norm2)?
.apply(&self.mlp)?
.apply(&self.layer_scale_2.as_ref()))?;
Ok(xs)
}
pub fn reset_kv_cache(&mut self) {
self.self_attn.reset_kv_cache()
}
pub fn set_kv_cache(&mut self, kv_cache: candle_nn::kv_cache::RotatingKvCache) {
self.self_attn.set_kv_cache(kv_cache)
}
}
#[derive(Debug, Clone)]
pub struct StreamingTransformer {
layers: Vec<StreamingTransformerLayer>,
positional_embedding: PositionalEmbedding,
max_period: usize,
}
impl StreamingTransformer {
pub fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> {
let vb_l = vb.pp("layers");
let rope = match cfg.positional_embedding {
PositionalEmbedding::Rope => {
let rope = RotaryEmbedding::new(
cfg.d_model / cfg.num_heads,
cfg.max_seq_len,
cfg.max_period as f32,
vb.device(),
)?;
Some(Arc::new(rope))
}
PositionalEmbedding::Sin | PositionalEmbedding::None => None,
};
let mut layers = Vec::with_capacity(cfg.num_layers);
for layer_idx in 0..cfg.num_layers {
let layer = StreamingTransformerLayer::new(&rope, cfg, vb_l.pp(layer_idx))?;
layers.push(layer)
}
Ok(Self {
layers,
positional_embedding: cfg.positional_embedding,
max_period: cfg.max_period,
})
}
pub fn forward(&mut self, xs: &Tensor) -> Result<Tensor> {
self.forward_ca(xs, None)
}
pub fn forward_ca(&mut self, xs: &Tensor, ca_src: Option<&Tensor>) -> Result<Tensor> {
let (_b, t, c) = xs.dims3()?;
let pos = self.layers[0].self_attn.kv_cache.current_seq_len();
let mask = self.layers[0]
.self_attn
.kv_cache
.attn_mask(t, xs.device())?;
let mut xs = match self.positional_embedding {
PositionalEmbedding::Rope | PositionalEmbedding::None => xs.clone(),
PositionalEmbedding::Sin => {
let dev = xs.device();
let theta = self.max_period as f32;
let half_dim = c / 2;
let positions = Tensor::arange(pos as u32, (pos + t) as u32, dev)?
.unsqueeze(1)?
.to_dtype(DType::F32)?;
let inv_freq: Vec<_> = (0..half_dim)
.map(|i| 1f32 / theta.powf(i as f32 / (half_dim - 1) as f32))
.collect();
let inv_freq_len = inv_freq.len();
let inv_freq = Tensor::from_vec(inv_freq, (1, inv_freq_len), dev)?;
let freqs = positions.broadcast_mul(&inv_freq)?;
let pos_emb =
Tensor::cat(&[freqs.cos()?, freqs.sin()?], D::Minus1)?.to_dtype(xs.dtype())?;
xs.broadcast_add(&pos_emb)?
}
};
for layer in self.layers.iter_mut() {
xs = layer.forward(&xs, ca_src, mask.as_ref())?;
}
Ok(xs)
}
pub fn copy_state(&mut self, from: &Self) -> Result<()> {
if self.layers.len() != from.layers.len() {
candle::bail!("cannot copy kv-caches as the transformers have different depths")
}
self.layers
.iter_mut()
.zip(from.layers.iter())
.for_each(|(v, w)| v.set_kv_cache(w.self_attn.kv_cache.clone()));
Ok(())
}
}
impl StreamingModule for StreamingTransformer {
fn reset_state(&mut self) {
self.layers.iter_mut().for_each(|v| v.reset_kv_cache())
}
fn step(&mut self, xs: &StreamTensor) -> Result<StreamTensor> {
match xs.as_option() {
None => Ok(StreamTensor::empty()),
Some(xs) => Ok(StreamTensor::from_tensor(self.forward(xs)?)),
}
}
}
#[derive(Debug, Clone)]
pub struct ProjectedTransformer {
transformer: StreamingTransformer,
input_proj: Option<Linear>,
output_projs: Vec<Option<Linear>>,
conv_layout: bool,
span: tracing::Span,
}
impl ProjectedTransformer {
pub fn new(
input_dim: usize,
output_dims: &[usize],
cfg: &Config,
vb: VarBuilder,
) -> Result<Self> {
let transformer = StreamingTransformer::new(cfg, vb.clone())?;
let input_proj = if input_dim == cfg.d_model {
None
} else {
let l = linear_no_bias(input_dim, cfg.d_model, vb.pp("input_proj"))?;
Some(l)
};
let mut output_projs = Vec::with_capacity(output_dims.len());
let vb_o = vb.pp("output_projs");
for (i, &output_dim) in output_dims.iter().enumerate() {
let output_proj = if output_dim == cfg.d_model {
None
} else {
let l = linear_no_bias(cfg.d_model, output_dim, vb_o.pp(i))?;
Some(l)
};
output_projs.push(output_proj)
}
Ok(Self {
transformer,
input_proj,
output_projs,
conv_layout: cfg.conv_layout,
span: tracing::span!(tracing::Level::TRACE, "proj-transformer"),
})
}
pub fn forward(&mut self, xs: &Tensor) -> Result<Vec<Tensor>> {
let _enter = self.span.enter();
let xs = if self.conv_layout {
xs.transpose(1, 2)?
} else {
xs.clone()
};
let xs = xs.apply(&self.input_proj.as_ref())?;
let xs = self.transformer.forward(&xs)?;
let mut ys = Vec::with_capacity(self.output_projs.len());
for output_proj in self.output_projs.iter() {
let ys_ = xs.apply(&output_proj.as_ref())?;
let ys_ = if self.conv_layout {
ys_.transpose(1, 2)?
} else {
ys_
};
ys.push(ys_)
}
Ok(ys)
}
}
impl StreamingModule for ProjectedTransformer {
fn reset_state(&mut self) {
self.transformer.reset_state()
}
fn step(&mut self, xs: &StreamTensor) -> Result<StreamTensor> {
let xs = xs.apply(&|x: &Tensor| {
if self.conv_layout {
x.transpose(1, 2)
} else {
Ok(x.clone())
}
})?;
let xs = xs.apply(&self.input_proj.as_ref())?;
let xs = self.transformer.step(&xs)?;
let ys = xs.apply(&self.output_projs[0].as_ref())?;
ys.apply(&|y: &Tensor| {
if self.conv_layout {
y.transpose(1, 2)
} else {
Ok(y.clone())
}
})
}
}
#[cfg(feature = "flash-attn")]
fn flash_attn(
q: &Tensor,
k: &Tensor,
v: &Tensor,
softmax_scale: f32,
causal: bool,
) -> Result<Tensor> {
candle_flash_attn::flash_attn(q, k, v, softmax_scale, causal)
}
#[cfg(not(feature = "flash-attn"))]
fn flash_attn(_: &Tensor, _: &Tensor, _: &Tensor, _: f32, _: bool) -> Result<Tensor> {
unimplemented!("compile with '--features flash-attn'")
}
| candle/candle-transformers/src/models/mimi/transformer.rs/0 | {
"file_path": "candle/candle-transformers/src/models/mimi/transformer.rs",
"repo_id": "candle",
"token_count": 14210
} |
/// Mistral LLM, https://github.com/mistralai/mistral-src
use crate::models::{
mistral::Config,
with_tracing::{linear_no_bias, Linear, RmsNorm},
};
use crate::utils::repeat_kv;
use candle::{DType, Device, Module, Result, Tensor};
use candle_nn::{Activation, VarBuilder};
use std::sync::Arc;
#[derive(Debug, Clone)]
struct RotaryEmbedding {
sin: Tensor,
cos: Tensor,
}
impl RotaryEmbedding {
fn new(dtype: DType, cfg: &Config, dev: &Device) -> Result<Self> {
let rope_theta = cfg.rope_theta as f32;
let dim = cfg.hidden_size / cfg.num_attention_heads;
let max_seq_len = cfg.max_position_embeddings;
let inv_freq: Vec<_> = (0..dim)
.step_by(2)
.map(|i| 1f32 / rope_theta.powf(i as f32 / dim as f32))
.collect();
let inv_freq_len = inv_freq.len();
let inv_freq = Tensor::from_vec(inv_freq, (1, inv_freq_len), dev)?.to_dtype(dtype)?;
let t = Tensor::arange(0u32, max_seq_len as u32, dev)?
.to_dtype(dtype)?
.reshape((max_seq_len, 1))?;
let freqs = t.matmul(&inv_freq)?;
Ok(Self {
sin: freqs.sin()?,
cos: freqs.cos()?,
})
}
fn apply_rotary_emb_qkv(
&self,
q: &Tensor,
k: &Tensor,
seqlen_offset: usize,
) -> Result<(Tensor, Tensor)> {
let (_b_sz, _h, seq_len, _n_embd) = q.dims4()?;
let cos = self.cos.narrow(0, seqlen_offset, seq_len)?;
let sin = self.sin.narrow(0, seqlen_offset, seq_len)?;
let q_embed = candle_nn::rotary_emb::rope(q, &cos, &sin)?;
let k_embed = candle_nn::rotary_emb::rope(k, &cos, &sin)?;
Ok((q_embed, k_embed))
}
}
#[derive(Debug, Clone)]
#[allow(clippy::upper_case_acronyms)]
struct MLP {
gate_proj: Linear,
up_proj: Linear,
down_proj: Linear,
act_fn: Activation,
}
impl MLP {
fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> {
let hidden_sz = cfg.hidden_size;
let intermediate_sz = cfg.intermediate_size;
let gate_proj = linear_no_bias(hidden_sz, intermediate_sz, vb.pp("gate_proj"))?;
let up_proj = linear_no_bias(hidden_sz, intermediate_sz, vb.pp("up_proj"))?;
let down_proj = linear_no_bias(intermediate_sz, hidden_sz, vb.pp("down_proj"))?;
Ok(Self {
gate_proj,
up_proj,
down_proj,
act_fn: cfg.hidden_act,
})
}
}
impl Module for MLP {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let lhs = xs.apply(&self.gate_proj)?.apply(&self.act_fn)?;
let rhs = xs.apply(&self.up_proj)?;
(lhs * rhs)?.apply(&self.down_proj)
}
}
#[derive(Debug, Clone)]
struct Attention {
q_proj: Linear,
k_proj: Linear,
v_proj: Linear,
o_proj: Linear,
num_heads: usize,
num_kv_heads: usize,
num_kv_groups: usize,
head_dim: usize,
hidden_size: usize,
rotary_emb: Arc<RotaryEmbedding>,
}
impl Attention {
fn new(rotary_emb: Arc<RotaryEmbedding>, cfg: &Config, vb: VarBuilder) -> Result<Self> {
let hidden_sz = cfg.hidden_size;
let num_heads = cfg.num_attention_heads;
let num_kv_heads = cfg.num_key_value_heads;
let num_kv_groups = num_heads / num_kv_heads;
let head_dim = hidden_sz / num_heads;
let q_proj = linear_no_bias(hidden_sz, num_heads * head_dim, vb.pp("q_proj"))?;
let k_proj = linear_no_bias(hidden_sz, num_kv_heads * head_dim, vb.pp("k_proj"))?;
let v_proj = linear_no_bias(hidden_sz, num_kv_heads * head_dim, vb.pp("v_proj"))?;
let o_proj = linear_no_bias(num_heads * head_dim, hidden_sz, vb.pp("o_proj"))?;
Ok(Self {
q_proj,
k_proj,
v_proj,
o_proj,
num_heads,
num_kv_heads,
num_kv_groups,
head_dim,
hidden_size: hidden_sz,
rotary_emb,
})
}
fn forward(
&mut self,
xs: &Tensor,
attention_mask: Option<&Tensor>,
seqlen_offset: usize,
) -> Result<Tensor> {
let (b_sz, q_len, _) = xs.dims3()?;
let query_states = self.q_proj.forward(xs)?;
let key_states = self.k_proj.forward(xs)?;
let value_states = self.v_proj.forward(xs)?;
let query_states = query_states
.reshape((b_sz, q_len, self.num_heads, self.head_dim))?
.transpose(1, 2)?
.contiguous()?;
let key_states = key_states
.reshape((b_sz, q_len, self.num_kv_heads, self.head_dim))?
.transpose(1, 2)?
.contiguous()?;
let value_states = value_states
.reshape((b_sz, q_len, self.num_kv_heads, self.head_dim))?
.transpose(1, 2)?;
let (query_states, key_states) =
self.rotary_emb
.apply_rotary_emb_qkv(&query_states, &key_states, seqlen_offset)?;
let key_states = repeat_kv(key_states, self.num_kv_groups)?;
let value_states = repeat_kv(value_states, self.num_kv_groups)?;
let scale = 1f64 / f64::sqrt(self.head_dim as f64);
let attn_weights = (query_states.matmul(&key_states.transpose(2, 3)?)? * scale)?;
let attn_weights = match attention_mask {
None => attn_weights,
Some(mask) => attn_weights.broadcast_add(mask)?,
};
let attn_weights = candle_nn::ops::softmax_last_dim(&attn_weights)?;
let attn_output = attn_weights.matmul(&value_states)?;
attn_output
.transpose(1, 2)?
.reshape((b_sz, q_len, self.hidden_size))?
.apply(&self.o_proj)
}
}
#[derive(Debug, Clone)]
struct DecoderLayer {
self_attn: Attention,
mlp: MLP,
input_layernorm: RmsNorm,
post_attention_layernorm: RmsNorm,
}
impl DecoderLayer {
fn new(rotary_emb: Arc<RotaryEmbedding>, cfg: &Config, vb: VarBuilder) -> Result<Self> {
let self_attn = Attention::new(rotary_emb, cfg, vb.pp("self_attn"))?;
let mlp = MLP::new(cfg, vb.pp("mlp"))?;
let input_layernorm =
RmsNorm::new(cfg.hidden_size, cfg.rms_norm_eps, vb.pp("input_layernorm"))?;
let post_attention_layernorm = RmsNorm::new(
cfg.hidden_size,
cfg.rms_norm_eps,
vb.pp("post_attention_layernorm"),
)?;
Ok(Self {
self_attn,
mlp,
input_layernorm,
post_attention_layernorm,
})
}
fn forward(
&mut self,
xs: &Tensor,
attention_mask: Option<&Tensor>,
seqlen_offset: usize,
) -> Result<Tensor> {
let residual = xs;
let xs = self.input_layernorm.forward(xs)?;
let xs = self.self_attn.forward(&xs, attention_mask, seqlen_offset)?;
let xs = (xs + residual)?;
let residual = &xs;
let xs = xs.apply(&self.post_attention_layernorm)?.apply(&self.mlp)?;
residual + xs
}
}
#[derive(Debug, Clone)]
pub struct Model {
embed_tokens: candle_nn::Embedding,
layers: Vec<DecoderLayer>,
norm: RmsNorm,
pub cfg: Config,
}
impl Model {
pub fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> {
let embed_tokens =
candle_nn::embedding(cfg.vocab_size, cfg.hidden_size, vb.pp("embed_tokens"))?;
let rotary_emb = Arc::new(RotaryEmbedding::new(vb.dtype(), cfg, vb.device())?);
let mut layers = Vec::with_capacity(cfg.num_hidden_layers);
let vb_l = vb.pp("layers");
for layer_idx in 0..cfg.num_hidden_layers {
let layer = DecoderLayer::new(rotary_emb.clone(), cfg, vb_l.pp(layer_idx))?;
layers.push(layer)
}
let norm = RmsNorm::new(cfg.hidden_size, cfg.rms_norm_eps, vb.pp("norm"))?;
Ok(Self {
embed_tokens,
layers,
norm,
cfg: cfg.clone(),
})
}
// Attn mask used to mask out padding tokens
pub fn forward(
&mut self,
attn_mask: &Tensor,
input_ids: &Tensor,
dtype: DType,
) -> Result<Tensor> {
let mut xs = self.embed_tokens.forward(input_ids)?;
// Expand to 4d mask for sdpa
let attn_mask = prepare_4d_attention_mask(attn_mask, dtype, None)?;
for layer in self.layers.iter_mut() {
xs = layer.forward(&xs, Some(&attn_mask), 0)?;
}
// Return hiddens instead of logits
xs.apply(&self.norm)
}
}
fn prepare_4d_attention_mask(
mask: &Tensor,
dtype: DType,
tgt_len: Option<usize>,
) -> Result<Tensor> {
let bsz = mask.dims()[0];
let src_len = mask.dims()[1];
let tgt_len = tgt_len.unwrap_or(src_len);
let expanded_mask = mask
.unsqueeze(1)?
.unsqueeze(2)?
.expand((bsz, 1, tgt_len, src_len))?
.to_dtype(dtype)?;
let inverted_mask = (1.0 - expanded_mask)?;
(inverted_mask * get_dtype_min_val(dtype))?.to_dtype(dtype)
}
fn get_dtype_min_val(dtype: DType) -> f64 {
match dtype {
DType::F32 => f32::MIN as f64,
DType::F64 => f64::MIN,
_ => panic!("Unsupported data type"),
}
}
| candle/candle-transformers/src/models/nvembed_v2/embedding.rs/0 | {
"file_path": "candle/candle-transformers/src/models/nvembed_v2/embedding.rs",
"repo_id": "candle",
"token_count": 4768
} |
//! Quantized llama model implementation.
//!
//! This provides a quantized implementation of the llama language model architecture.
//! The model implements parameter efficient quantization for reduced memory usage
//! while maintaining model quality.
//!
//! Key characteristics:
//! - Transformer decoder architecture
//! - Support for 2/3/4/8-bit quantization
//! - Optimized memory usage through quantization
//! - Configurable model sizes and parameter counts
//!
//! - 💻 [GH Link](https://github.com/facebookresearch/llama)
//! - 📝 [Paper](https://arxiv.org/abs/2302.13971)
//!
//! 
//!
use std::collections::HashMap;
use crate::quantized_nn::RmsNorm;
use candle::quantized::QTensor;
use candle::quantized::{ggml_file, gguf_file};
use candle::{DType, Device, IndexOp, Result, Tensor};
use candle_nn::{Embedding, Module};
pub const MAX_SEQ_LEN: usize = 4096;
// QMatMul wrapper adding some tracing.
#[derive(Debug, Clone)]
struct QMatMul {
inner: candle::quantized::QMatMul,
span: tracing::Span,
}
impl QMatMul {
fn from_qtensor(qtensor: QTensor) -> Result<Self> {
let inner = candle::quantized::QMatMul::from_qtensor(qtensor)?;
let span = tracing::span!(tracing::Level::TRACE, "qmatmul");
Ok(Self { inner, span })
}
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let _enter = self.span.enter();
self.inner.forward(xs)
}
}
#[derive(Debug, Clone)]
struct Mlp {
feed_forward_w1: QMatMul,
feed_forward_w2: QMatMul,
feed_forward_w3: QMatMul,
}
impl Module for Mlp {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let w1 = self.feed_forward_w1.forward(xs)?;
let w3 = self.feed_forward_w3.forward(xs)?;
self.feed_forward_w2
.forward(&(candle_nn::ops::silu(&w1)? * w3)?)
}
}
#[derive(Debug, Clone)]
enum MlpOrMoe {
Mlp(Mlp),
MoE {
n_expert_used: usize,
feed_forward_gate_inp: QMatMul,
experts: Vec<Mlp>,
},
}
impl Module for MlpOrMoe {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
match self {
Self::MoE {
feed_forward_gate_inp,
experts,
n_expert_used,
} => {
let (b_size, seq_len, hidden_dim) = xs.dims3()?;
let xs = xs.reshape(((), hidden_dim))?;
let router_logits = feed_forward_gate_inp.forward(&xs)?;
let routing_weights = candle_nn::ops::softmax_last_dim(&router_logits)?;
// In order to extract topk, we extract the data from the tensor and manipulate it
// directly. Maybe we will want to use some custom ops instead at some point.
let routing_weights = routing_weights.to_dtype(DType::F32)?.to_vec2::<f32>()?;
// routing_weights, selected_experts = torch.topk(routing_weights, self.top_k, dim=-1)
// top_x contains the row indexes to evaluate for each expert.
let mut top_x = vec![vec![]; experts.len()];
let mut selected_rws = vec![vec![]; experts.len()];
for (row_idx, rw) in routing_weights.iter().enumerate() {
let mut dst = (0..rw.len() as u32).collect::<Vec<u32>>();
dst.sort_by(|&i, &j| rw[j as usize].total_cmp(&rw[i as usize]));
let mut sum_routing_weights = 0f32;
for &expert_idx in dst.iter().take(*n_expert_used) {
let expert_idx = expert_idx as usize;
let routing_weight = rw[expert_idx];
sum_routing_weights += routing_weight;
top_x[expert_idx].push(row_idx as u32);
}
for &expert_idx in dst.iter().take(*n_expert_used) {
let expert_idx = expert_idx as usize;
let routing_weight = rw[expert_idx];
selected_rws[expert_idx].push(routing_weight / sum_routing_weights)
}
}
// routing_weights /= routing_weights.sum(dim=-1, keepdim=True)
// expert_mask = torch.nn.functional.one_hot(selected_experts, num_classes=self.num_experts).permute(2, 1, 0)
let mut ys = xs.zeros_like()?;
for (expert_idx, expert_layer) in experts.iter().enumerate() {
let top_x = &top_x[expert_idx];
if top_x.is_empty() {
continue;
}
let top_x = Tensor::new(top_x.as_slice(), xs.device())?;
let selected_rws =
Tensor::new(selected_rws[expert_idx].as_slice(), xs.device())?
.reshape(((), 1))?;
// Index the correct hidden states and compute the expert hidden state for
// the current expert. We need to make sure to multiply the output hidden
// states by `routing_weights` on the corresponding tokens (top-1 and top-2)
let current_state = xs.index_select(&top_x, 0)?.reshape(((), hidden_dim))?;
// current_hidden_states = expert_layer(current_state, routing_weights[top_x_list, idx_list, None])
let current_hidden_states = expert_layer.forward(¤t_state)?;
let current_hidden_states =
current_hidden_states.broadcast_mul(&selected_rws)?;
ys = ys.index_add(&top_x, ¤t_hidden_states, 0)?;
}
let ys = ys.reshape((b_size, seq_len, hidden_dim))?;
Ok(ys)
}
Self::Mlp(mlp) => mlp.forward(xs),
}
}
}
#[derive(Debug, Clone)]
struct LayerWeights {
attention_wq: QMatMul,
attention_wk: QMatMul,
attention_wv: QMatMul,
attention_wo: QMatMul,
attention_norm: RmsNorm,
mlp_or_moe: MlpOrMoe,
ffn_norm: RmsNorm,
n_head: usize,
n_kv_head: usize,
head_dim: usize,
cos: Tensor,
sin: Tensor,
neg_inf: Tensor,
kv_cache: Option<(Tensor, Tensor)>,
span_attn: tracing::Span,
span_rot: tracing::Span,
span_mlp: tracing::Span,
}
fn masked_fill(on_false: &Tensor, mask: &Tensor, on_true: &Tensor) -> Result<Tensor> {
let shape = mask.shape();
let m = mask.where_cond(&on_true.broadcast_as(shape.dims())?, on_false)?;
Ok(m)
}
impl LayerWeights {
fn apply_rotary_emb(&self, x: &Tensor, index_pos: usize) -> Result<Tensor> {
let _enter = self.span_rot.enter();
let (_b_sz, _n_head, seq_len, _n_embd) = x.dims4()?;
let cos = self.cos.narrow(0, index_pos, seq_len)?;
let sin = self.sin.narrow(0, index_pos, seq_len)?;
// The call to contiguous below is only necessary when processing the prompt.
// When the seq_len is 1 in the inference loop, this is a no-op.
candle_nn::rotary_emb::rope_i(&x.contiguous()?, &cos, &sin)
}
fn forward_attn(
&mut self,
x: &Tensor,
mask: Option<&Tensor>,
index_pos: usize,
) -> Result<Tensor> {
let _enter = self.span_attn.enter();
let (b_sz, seq_len, n_embd) = x.dims3()?;
let q = self.attention_wq.forward(x)?;
let k = self.attention_wk.forward(x)?;
let v = self.attention_wv.forward(x)?;
let q = q
.reshape((b_sz, seq_len, self.n_head, self.head_dim))?
.transpose(1, 2)?;
let k = k
.reshape((b_sz, seq_len, self.n_kv_head, self.head_dim))?
.transpose(1, 2)?;
let v = v
.reshape((b_sz, seq_len, self.n_kv_head, self.head_dim))?
.transpose(1, 2)?
// This call to contiguous ensures that the fast kernel can be called below. It's
// actually a no-op except when processing the initial prompt so has no significant
// impact on performance.
.contiguous()?;
let q = self.apply_rotary_emb(&q, index_pos)?;
let k = self.apply_rotary_emb(&k, index_pos)?;
let (k, v) = match &self.kv_cache {
None => (k, v),
Some((k_cache, v_cache)) => {
if index_pos == 0 {
(k, v)
} else {
let k = Tensor::cat(&[k_cache, &k], 2)?;
let v = Tensor::cat(&[v_cache, &v], 2)?;
(k, v)
}
}
};
self.kv_cache = Some((k.clone(), v.clone()));
let y = if q.device().is_metal() && seq_len == 1 {
// SDPA will do MQA for us
candle_nn::ops::sdpa(&q, &k, &v, 1. / (self.head_dim as f32).sqrt(), 1.)?
} else {
// Support for MQA, useful for 70B models and mistral.
let k = crate::utils::repeat_kv(k, self.n_head / self.n_kv_head)?;
let v = crate::utils::repeat_kv(v, self.n_head / self.n_kv_head)?;
let att = (q.matmul(&k.t()?)? / (self.head_dim as f64).sqrt())?;
let att = match mask {
None => att,
Some(mask) => {
let mask = mask.broadcast_as(att.shape())?;
masked_fill(&att, &mask, &self.neg_inf)?
}
};
let att = candle_nn::ops::softmax_last_dim(&att)?;
// Convert to contiguous as matmul doesn't support strided vs for now.
att.matmul(&v.contiguous()?)?
};
let y = y.transpose(1, 2)?.reshape(&[b_sz, seq_len, n_embd])?;
let y = self.attention_wo.forward(&y)?;
Ok(y)
}
}
#[derive(Debug, Clone)]
pub struct ModelWeights {
tok_embeddings: Embedding,
layers: Vec<LayerWeights>,
norm: RmsNorm,
output: QMatMul,
masks: HashMap<usize, Tensor>,
span: tracing::Span,
span_output: tracing::Span,
}
fn precomput_freqs_cis(
head_dim: usize,
freq_base: f32,
device: &Device,
) -> Result<(Tensor, Tensor)> {
let theta: Vec<_> = (0..head_dim)
.step_by(2)
.map(|i| 1f32 / freq_base.powf(i as f32 / head_dim as f32))
.collect();
let theta = Tensor::new(theta.as_slice(), device)?;
let idx_theta = Tensor::arange(0, MAX_SEQ_LEN as u32, device)?
.to_dtype(DType::F32)?
.reshape((MAX_SEQ_LEN, 1))?
.matmul(&theta.reshape((1, theta.elem_count()))?)?;
let cos = idx_theta.cos()?;
let sin = idx_theta.sin()?;
Ok((cos, sin))
}
impl ModelWeights {
pub fn from_ggml(mut ct: ggml_file::Content, gqa: usize) -> Result<Self> {
let head_dim = (ct.hparams.n_embd / ct.hparams.n_head) as usize;
let (cos, sin) = precomput_freqs_cis(head_dim, 10000., &ct.device)?;
let neg_inf = Tensor::new(f32::NEG_INFINITY, &ct.device)?;
let tok_embeddings = ct.remove("tok_embeddings.weight")?;
let tok_embeddings = tok_embeddings.dequantize(&ct.device)?;
let norm = RmsNorm::from_qtensor(ct.remove("norm.weight")?, 1e-5)?;
let output = ct.remove("output.weight")?;
let mut layers = Vec::with_capacity(ct.hparams.n_layer as usize);
for layer_idx in 0..ct.hparams.n_layer {
let prefix = format!("layers.{layer_idx}");
let attention_wq = ct.remove(&format!("{prefix}.attention.wq.weight"))?;
let attention_wk = ct.remove(&format!("{prefix}.attention.wk.weight"))?;
let attention_wv = ct.remove(&format!("{prefix}.attention.wv.weight"))?;
let attention_wo = ct.remove(&format!("{prefix}.attention.wo.weight"))?;
let mlp_or_moe = {
let feed_forward_w1 = ct.remove(&format!("{prefix}.feed_forward.w1.weight"))?;
let feed_forward_w2 = ct.remove(&format!("{prefix}.feed_forward.w2.weight"))?;
let feed_forward_w3 = ct.remove(&format!("{prefix}.feed_forward.w3.weight"))?;
MlpOrMoe::Mlp(Mlp {
feed_forward_w1: QMatMul::from_qtensor(feed_forward_w1)?,
feed_forward_w2: QMatMul::from_qtensor(feed_forward_w2)?,
feed_forward_w3: QMatMul::from_qtensor(feed_forward_w3)?,
})
};
let attention_norm = ct.remove(&format!("{prefix}.attention_norm.weight"))?;
let ffn_norm = ct.remove(&format!("{prefix}.ffn_norm.weight"))?;
let span_attn = tracing::span!(tracing::Level::TRACE, "attn");
let span_rot = tracing::span!(tracing::Level::TRACE, "attn-rot");
let span_mlp = tracing::span!(tracing::Level::TRACE, "attn-mlp");
layers.push(LayerWeights {
attention_wq: QMatMul::from_qtensor(attention_wq)?,
attention_wk: QMatMul::from_qtensor(attention_wk)?,
attention_wv: QMatMul::from_qtensor(attention_wv)?,
attention_wo: QMatMul::from_qtensor(attention_wo)?,
attention_norm: RmsNorm::from_qtensor(attention_norm, 1e-5)?,
mlp_or_moe,
ffn_norm: RmsNorm::from_qtensor(ffn_norm, 1e-5)?,
n_head: ct.hparams.n_head as usize,
n_kv_head: ct.hparams.n_head as usize / gqa,
head_dim: (ct.hparams.n_embd / ct.hparams.n_head) as usize,
cos: cos.clone(),
sin: sin.clone(),
neg_inf: neg_inf.clone(),
kv_cache: None,
span_attn,
span_rot,
span_mlp,
})
}
let span = tracing::span!(tracing::Level::TRACE, "model");
let span_output = tracing::span!(tracing::Level::TRACE, "output");
Ok(Self {
tok_embeddings: Embedding::new(tok_embeddings, ct.hparams.n_embd as usize),
layers,
norm,
output: QMatMul::from_qtensor(output)?,
masks: HashMap::new(),
span,
span_output,
})
}
pub fn from_gguf<R: std::io::Seek + std::io::Read>(
ct: gguf_file::Content,
reader: &mut R,
device: &Device,
) -> Result<Self> {
let md_get = |s: &str| match ct.metadata.get(s) {
None => candle::bail!("cannot find {s} in metadata"),
Some(v) => Ok(v),
};
// Parameter extraction from metadata.
let n_expert = md_get("llama.expert_count")
.and_then(|v| v.to_u32())
.unwrap_or(0) as usize;
let n_expert_used = md_get("llama.expert_used_count")
.and_then(|v| v.to_u32())
.unwrap_or(0) as usize;
let head_count = md_get("llama.attention.head_count")?.to_u32()? as usize;
let head_count_kv = md_get("llama.attention.head_count_kv")?.to_u32()? as usize;
let block_count = md_get("llama.block_count")?.to_u32()? as usize;
let embedding_length = md_get("llama.embedding_length")?.to_u32()? as usize;
let rope_dim = md_get("llama.rope.dimension_count")?.to_u32()? as usize;
// Strangely this value is generally 1e-6 in GGUF file but used to be 1e-5 by default.
let rms_norm_eps = md_get("llama.attention.layer_norm_rms_epsilon")?.to_f32()? as f64;
let rope_freq_base = md_get("llama.rope.freq_base")
.and_then(|m| m.to_f32())
.unwrap_or(10000f32);
let (cos, sin) = precomput_freqs_cis(rope_dim, rope_freq_base, device)?;
let neg_inf = Tensor::new(f32::NEG_INFINITY, device)?;
let tok_embeddings_q = ct.tensor(reader, "token_embd.weight", device)?;
let tok_embeddings = tok_embeddings_q.dequantize(device)?;
let norm = RmsNorm::from_qtensor(
ct.tensor(reader, "output_norm.weight", device)?,
rms_norm_eps,
)?;
let output = match ct.tensor(reader, "output.weight", device) {
Ok(tensor) => tensor,
Err(_) => tok_embeddings_q,
};
let mut layers = Vec::with_capacity(block_count);
for layer_idx in 0..block_count {
let prefix = format!("blk.{layer_idx}");
let attention_wq = ct.tensor(reader, &format!("{prefix}.attn_q.weight"), device)?;
let attention_wk = ct.tensor(reader, &format!("{prefix}.attn_k.weight"), device)?;
let attention_wv = ct.tensor(reader, &format!("{prefix}.attn_v.weight"), device)?;
let attention_wo =
ct.tensor(reader, &format!("{prefix}.attn_output.weight"), device)?;
let mlp_or_moe = if n_expert <= 1 {
let feed_forward_w1 =
ct.tensor(reader, &format!("{prefix}.ffn_gate.weight"), device)?;
let feed_forward_w2 =
ct.tensor(reader, &format!("{prefix}.ffn_down.weight"), device)?;
let feed_forward_w3 =
ct.tensor(reader, &format!("{prefix}.ffn_up.weight"), device)?;
MlpOrMoe::Mlp(Mlp {
feed_forward_w1: QMatMul::from_qtensor(feed_forward_w1)?,
feed_forward_w2: QMatMul::from_qtensor(feed_forward_w2)?,
feed_forward_w3: QMatMul::from_qtensor(feed_forward_w3)?,
})
} else {
let feed_forward_gate_inp =
ct.tensor(reader, &format!("{prefix}.ffn_gate_inp.weight"), device)?;
let mut experts = Vec::with_capacity(n_expert);
for i in 0..n_expert {
let feed_forward_w1 =
ct.tensor(reader, &format!("{prefix}.ffn_gate.{i}.weight"), device)?;
let feed_forward_w2 =
ct.tensor(reader, &format!("{prefix}.ffn_down.{i}.weight"), device)?;
let feed_forward_w3 =
ct.tensor(reader, &format!("{prefix}.ffn_up.{i}.weight"), device)?;
experts.push(Mlp {
feed_forward_w1: QMatMul::from_qtensor(feed_forward_w1)?,
feed_forward_w2: QMatMul::from_qtensor(feed_forward_w2)?,
feed_forward_w3: QMatMul::from_qtensor(feed_forward_w3)?,
})
}
MlpOrMoe::MoE {
n_expert_used,
feed_forward_gate_inp: QMatMul::from_qtensor(feed_forward_gate_inp)?,
experts,
}
};
let attention_norm =
ct.tensor(reader, &format!("{prefix}.attn_norm.weight"), device)?;
let ffn_norm = ct.tensor(reader, &format!("{prefix}.ffn_norm.weight"), device)?;
let span_attn = tracing::span!(tracing::Level::TRACE, "attn");
let span_rot = tracing::span!(tracing::Level::TRACE, "attn-rot");
let span_mlp = tracing::span!(tracing::Level::TRACE, "attn-mlp");
layers.push(LayerWeights {
attention_wq: QMatMul::from_qtensor(attention_wq)?,
attention_wk: QMatMul::from_qtensor(attention_wk)?,
attention_wv: QMatMul::from_qtensor(attention_wv)?,
attention_wo: QMatMul::from_qtensor(attention_wo)?,
attention_norm: RmsNorm::from_qtensor(attention_norm, rms_norm_eps)?,
mlp_or_moe,
ffn_norm: RmsNorm::from_qtensor(ffn_norm, rms_norm_eps)?,
n_head: head_count,
n_kv_head: head_count_kv,
head_dim: embedding_length / head_count,
cos: cos.clone(),
sin: sin.clone(),
neg_inf: neg_inf.clone(),
kv_cache: None,
span_attn,
span_rot,
span_mlp,
})
}
let span = tracing::span!(tracing::Level::TRACE, "model");
let span_output = tracing::span!(tracing::Level::TRACE, "output");
Ok(Self {
tok_embeddings: Embedding::new(tok_embeddings, embedding_length),
layers,
norm,
output: QMatMul::from_qtensor(output)?,
masks: HashMap::new(),
span,
span_output,
})
}
fn mask(&mut self, t: usize, device: &Device) -> Result<Tensor> {
if let Some(mask) = self.masks.get(&t) {
Ok(mask.clone())
} else {
let mask: Vec<_> = (0..t)
.flat_map(|i| (0..t).map(move |j| u8::from(j > i)))
.collect();
let mask = Tensor::from_slice(&mask, (t, t), device)?;
self.masks.insert(t, mask.clone());
Ok(mask)
}
}
pub fn forward(&mut self, x: &Tensor, index_pos: usize) -> Result<Tensor> {
let (_b_sz, seq_len) = x.dims2()?;
let mask = if seq_len == 1 {
None
} else {
Some(self.mask(seq_len, x.device())?)
};
let _enter = self.span.enter();
let mut layer_in = self.tok_embeddings.forward(x)?;
for layer in self.layers.iter_mut() {
let x = layer_in;
let residual = &x;
let x = layer.attention_norm.forward(&x)?;
let attn = layer.forward_attn(&x, mask.as_ref(), index_pos)?;
let x = (attn + residual)?;
// MLP
let _enter = layer.span_mlp.enter();
let residual = &x;
let x = layer.ffn_norm.forward(&x)?;
let x = layer.mlp_or_moe.forward(&x)?;
let x = (x + residual)?;
layer_in = x
}
let x = self.norm.forward(&layer_in)?;
let x = x.i((.., seq_len - 1, ..))?;
let _enter = self.span_output.enter();
self.output.forward(&x)
}
}
| candle/candle-transformers/src/models/quantized_llama.rs/0 | {
"file_path": "candle/candle-transformers/src/models/quantized_llama.rs",
"repo_id": "candle",
"token_count": 11486
} |
//! Qwen2 model implementation with Mixture of Experts support.
//!
//! Qwen2 is a large language model using sparse Mixture of Experts (MoE).
//! This implementation provides support for sparsely activated MoE layers.
//!
//! Key characteristics:
//! - Mixture of Experts architecture
//! - Sparse expert activation
//! - Shared expert routing mechanism
//! - Grouped query attention (GQA)
//! - RMSNorm for layer normalization
//! - Rotary positional embeddings (RoPE)
//!
//! References:
//! - [Qwen2 Paper](https://arxiv.org/abs/2401.08985)
//! - [Model Card](https://huggingface.co/Qwen/Qwen2-7B-beta)
//!
use crate::models::with_tracing::{linear, linear_no_bias, Linear, RmsNorm};
use candle::{DType, Device, Module, Result, Tensor, D};
use candle_nn::{Activation, VarBuilder};
use std::sync::Arc;
#[derive(Debug, Clone, PartialEq, serde::Deserialize)]
pub struct Config {
pub vocab_size: usize,
pub hidden_size: usize,
pub intermediate_size: usize,
pub num_hidden_layers: usize,
pub num_attention_heads: usize,
pub num_key_value_heads: usize,
pub max_position_embeddings: usize,
pub sliding_window: usize,
pub max_window_layers: usize,
pub tie_word_embeddings: bool,
pub rope_theta: f64,
pub rms_norm_eps: f64,
pub use_sliding_window: bool,
pub hidden_act: Activation,
pub decoder_sparse_step: usize,
pub moe_intermediate_size: usize,
pub shared_expert_intermediate_size: usize,
pub num_experts_per_tok: usize,
pub num_experts: usize,
pub norm_topk_prob: bool,
}
#[derive(Debug, Clone)]
struct RotaryEmbedding {
sin: Tensor,
cos: Tensor,
}
impl RotaryEmbedding {
fn new(dtype: DType, cfg: &Config, dev: &Device) -> Result<Self> {
let dim = cfg.hidden_size / cfg.num_attention_heads;
let max_seq_len = cfg.max_position_embeddings;
let inv_freq: Vec<_> = (0..dim)
.step_by(2)
.map(|i| 1f32 / cfg.rope_theta.powf(i as f64 / dim as f64) as f32)
.collect();
let inv_freq_len = inv_freq.len();
let inv_freq = Tensor::from_vec(inv_freq, (1, inv_freq_len), dev)?.to_dtype(dtype)?;
let t = Tensor::arange(0u32, max_seq_len as u32, dev)?
.to_dtype(dtype)?
.reshape((max_seq_len, 1))?;
let freqs = t.matmul(&inv_freq)?;
Ok(Self {
sin: freqs.sin()?,
cos: freqs.cos()?,
})
}
fn apply_rotary_emb_qkv(
&self,
q: &Tensor,
k: &Tensor,
seqlen_offset: usize,
) -> Result<(Tensor, Tensor)> {
let (_b_sz, _h, seq_len, _n_embd) = q.dims4()?;
let cos = self.cos.narrow(0, seqlen_offset, seq_len)?;
let sin = self.sin.narrow(0, seqlen_offset, seq_len)?;
let q_embed = candle_nn::rotary_emb::rope(&q.contiguous()?, &cos, &sin)?;
let k_embed = candle_nn::rotary_emb::rope(&k.contiguous()?, &cos, &sin)?;
Ok((q_embed, k_embed))
}
}
#[derive(Debug, Clone)]
#[allow(clippy::upper_case_acronyms)]
struct MLP {
gate_proj: Linear,
up_proj: Linear,
down_proj: Linear,
act_fn: Activation,
}
impl MLP {
fn new(intermediate_sz: usize, cfg: &Config, vb: VarBuilder) -> Result<Self> {
let hidden_sz = cfg.hidden_size;
let gate_proj = linear_no_bias(hidden_sz, intermediate_sz, vb.pp("gate_proj"))?;
let up_proj = linear_no_bias(hidden_sz, intermediate_sz, vb.pp("up_proj"))?;
let down_proj = linear_no_bias(intermediate_sz, hidden_sz, vb.pp("down_proj"))?;
Ok(Self {
gate_proj,
up_proj,
down_proj,
act_fn: cfg.hidden_act,
})
}
}
impl Module for MLP {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let lhs = xs.apply(&self.gate_proj)?.apply(&self.act_fn)?;
let rhs = xs.apply(&self.up_proj)?;
(lhs * rhs)?.apply(&self.down_proj)
}
}
#[derive(Debug, Clone)]
struct Attention {
q_proj: Linear,
k_proj: Linear,
v_proj: Linear,
o_proj: Linear,
num_heads: usize,
num_kv_heads: usize,
num_kv_groups: usize,
head_dim: usize,
hidden_size: usize,
rotary_emb: Arc<RotaryEmbedding>,
kv_cache: Option<(Tensor, Tensor)>,
}
impl Attention {
fn new(rotary_emb: Arc<RotaryEmbedding>, cfg: &Config, vb: VarBuilder) -> Result<Self> {
let hidden_sz = cfg.hidden_size;
let num_heads = cfg.num_attention_heads;
let num_kv_heads = cfg.num_key_value_heads;
let num_kv_groups = num_heads / num_kv_heads;
let head_dim = hidden_sz / num_heads;
let q_proj = linear(hidden_sz, num_heads * head_dim, vb.pp("q_proj"))?;
let k_proj = linear(hidden_sz, num_kv_heads * head_dim, vb.pp("k_proj"))?;
let v_proj = linear(hidden_sz, num_kv_heads * head_dim, vb.pp("v_proj"))?;
let o_proj = linear_no_bias(num_heads * head_dim, hidden_sz, vb.pp("o_proj"))?;
Ok(Self {
q_proj,
k_proj,
v_proj,
o_proj,
num_heads,
num_kv_heads,
num_kv_groups,
head_dim,
hidden_size: hidden_sz,
rotary_emb,
kv_cache: None,
})
}
fn forward(
&mut self,
xs: &Tensor,
attention_mask: Option<&Tensor>,
seqlen_offset: usize,
) -> Result<Tensor> {
let (b_sz, q_len, _) = xs.dims3()?;
let query_states = self.q_proj.forward(xs)?;
let key_states = self.k_proj.forward(xs)?;
let value_states = self.v_proj.forward(xs)?;
let query_states = query_states
.reshape((b_sz, q_len, self.num_heads, self.head_dim))?
.transpose(1, 2)?;
let key_states = key_states
.reshape((b_sz, q_len, self.num_kv_heads, self.head_dim))?
.transpose(1, 2)?;
let value_states = value_states
.reshape((b_sz, q_len, self.num_kv_heads, self.head_dim))?
.transpose(1, 2)?;
let (query_states, key_states) =
self.rotary_emb
.apply_rotary_emb_qkv(&query_states, &key_states, seqlen_offset)?;
let (key_states, value_states) = match &self.kv_cache {
None => (key_states, value_states),
Some((prev_k, prev_v)) => {
let key_states = Tensor::cat(&[prev_k, &key_states], 2)?;
let value_states = Tensor::cat(&[prev_v, &value_states], 2)?;
(key_states, value_states)
}
};
self.kv_cache = Some((key_states.clone(), value_states.clone()));
let key_states = crate::utils::repeat_kv(key_states, self.num_kv_groups)?.contiguous()?;
let value_states =
crate::utils::repeat_kv(value_states, self.num_kv_groups)?.contiguous()?;
let attn_output = {
let scale = 1f64 / f64::sqrt(self.head_dim as f64);
let attn_weights = (query_states.matmul(&key_states.transpose(2, 3)?)? * scale)?;
let attn_weights = match attention_mask {
None => attn_weights,
Some(mask) => attn_weights.broadcast_add(mask)?,
};
let attn_weights = candle_nn::ops::softmax_last_dim(&attn_weights)?;
attn_weights.matmul(&value_states)?
};
attn_output
.transpose(1, 2)?
.reshape((b_sz, q_len, self.hidden_size))?
.apply(&self.o_proj)
}
fn clear_kv_cache(&mut self) {
self.kv_cache = None
}
}
// https://github.com/huggingface/transformers/blob/536ea2aca234fb48c5c69769431d643b0d93b233/src/transformers/models/qwen2_moe/modeling_qwen2_moe.py#L800
#[derive(Debug, Clone)]
struct SparseMoeBlock {
gate: Linear,
experts: Vec<MLP>,
shared_expert: MLP,
shared_expert_gate: Linear,
norm_topk_prob: bool,
num_experts_per_tok: usize,
}
impl SparseMoeBlock {
fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> {
let gate = linear_no_bias(cfg.hidden_size, cfg.num_experts, vb.pp("gate"))?;
let mut experts = Vec::with_capacity(cfg.num_experts);
let vb_e = vb.pp("experts");
for idx in 0..cfg.num_experts {
let expert = MLP::new(cfg.moe_intermediate_size, cfg, vb_e.pp(idx))?;
experts.push(expert)
}
let shared_expert = MLP::new(
cfg.shared_expert_intermediate_size,
cfg,
vb.pp("shared_expert"),
)?;
let shared_expert_gate = linear_no_bias(cfg.hidden_size, 1, vb.pp("shared_expert_gate"))?;
Ok(Self {
gate,
experts,
shared_expert,
shared_expert_gate,
norm_topk_prob: cfg.norm_topk_prob,
num_experts_per_tok: cfg.num_experts_per_tok,
})
}
}
impl Module for SparseMoeBlock {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let (b_size, seq_len, hidden_dim) = xs.dims3()?;
let xs = xs.reshape(((), hidden_dim))?;
let router_logits = xs.apply(&self.gate)?;
let routing_weights = candle_nn::ops::softmax_last_dim(&router_logits)?;
// In order to extract topk, we extract the data from the tensor and manipulate it
// directly. Maybe we will want to use some custom ops instead at some point.
let experts_per_tok = routing_weights
.arg_sort_last_dim(false)?
.narrow(D::Minus1, 0, self.num_experts_per_tok)?
.contiguous()?;
let routing_weights = routing_weights.gather(&experts_per_tok, D::Minus1)?;
// routing_weights, selected_experts = torch.topk(routing_weights, self.top_k, dim=-1)
// top_x contains the row indexes to evaluate for each expert.
let routing_weights = routing_weights.to_dtype(DType::F32)?.to_vec2::<f32>()?;
let experts_per_tok = experts_per_tok.to_vec2::<u32>()?;
let mut top_x = vec![vec![]; self.experts.len()];
let mut selected_experts = vec![vec![]; self.experts.len()];
for (row_idx, (rw, expert_idxs)) in routing_weights
.iter()
.zip(experts_per_tok.iter())
.enumerate()
{
let sum_rw = rw.iter().sum::<f32>();
for (&rw, &expert_idx) in rw.iter().zip(expert_idxs.iter()) {
top_x[expert_idx as usize].push(row_idx as u32);
let rw = if self.norm_topk_prob { rw / sum_rw } else { rw };
selected_experts[expert_idx as usize].push(rw)
}
}
let mut ys = xs.zeros_like()?;
for (expert_idx, expert_layer) in self.experts.iter().enumerate() {
let top_x = &top_x[expert_idx];
if top_x.is_empty() {
continue;
}
let top_x = Tensor::new(top_x.as_slice(), xs.device())?;
let selected_experts =
Tensor::new(selected_experts[expert_idx].as_slice(), xs.device())?
.reshape(((), 1))?
.to_dtype(xs.dtype())?;
// Index the correct hidden states and compute the expert hidden state for
// the current expert. We need to make sure to multiply the output hidden
// states by `routing_weights` on the corresponding tokens (top-1 and top-2)
let current_state = xs.index_select(&top_x, 0)?.reshape(((), hidden_dim))?;
// current_hidden_states = expert_layer(current_state, routing_weights[top_x_list, idx_list, None])
let current_hidden_states = expert_layer.forward(¤t_state)?;
let current_hidden_states = current_hidden_states.broadcast_mul(&selected_experts)?;
ys = ys.index_add(&top_x, ¤t_hidden_states, 0)?;
}
let shared_expert_output = xs.apply(&self.shared_expert)?;
let shared_expert_output = shared_expert_output.broadcast_mul(&candle_nn::ops::sigmoid(
&xs.apply(&self.shared_expert_gate)?,
)?)?;
let ys = (ys + shared_expert_output)?;
let ys = ys.reshape((b_size, seq_len, hidden_dim))?;
Ok(ys)
}
}
#[derive(Debug, Clone)]
enum MlpOrMoeBlock {
Mlp(MLP),
MoeBlock(SparseMoeBlock),
}
impl Module for MlpOrMoeBlock {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
match self {
Self::MoeBlock(m) => m.forward(xs),
Self::Mlp(m) => m.forward(xs),
}
}
}
#[derive(Debug, Clone)]
struct DecoderLayer {
self_attn: Attention,
mlp: MlpOrMoeBlock,
input_layernorm: RmsNorm,
post_attention_layernorm: RmsNorm,
}
impl DecoderLayer {
fn new(
layer_idx: usize,
rotary_emb: Arc<RotaryEmbedding>,
cfg: &Config,
vb: VarBuilder,
) -> Result<Self> {
let self_attn = Attention::new(rotary_emb, cfg, vb.pp("self_attn"))?;
let mlp = if cfg.num_experts > 0 && (layer_idx + 1) % cfg.decoder_sparse_step == 0 {
MlpOrMoeBlock::MoeBlock(SparseMoeBlock::new(cfg, vb.pp("mlp"))?)
} else {
MlpOrMoeBlock::Mlp(MLP::new(cfg.intermediate_size, cfg, vb.pp("mlp"))?)
};
let input_layernorm =
RmsNorm::new(cfg.hidden_size, cfg.rms_norm_eps, vb.pp("input_layernorm"))?;
let post_attention_layernorm = RmsNorm::new(
cfg.hidden_size,
cfg.rms_norm_eps,
vb.pp("post_attention_layernorm"),
)?;
Ok(Self {
self_attn,
mlp,
input_layernorm,
post_attention_layernorm,
})
}
fn forward(
&mut self,
xs: &Tensor,
attention_mask: Option<&Tensor>,
seqlen_offset: usize,
) -> Result<Tensor> {
let residual = xs;
let xs = self.input_layernorm.forward(xs)?;
let xs = self.self_attn.forward(&xs, attention_mask, seqlen_offset)?;
let xs = (xs + residual)?;
let residual = &xs;
let xs = xs.apply(&self.post_attention_layernorm)?.apply(&self.mlp)?;
residual + xs
}
fn clear_kv_cache(&mut self) {
self.self_attn.clear_kv_cache()
}
}
#[derive(Debug, Clone)]
pub struct Model {
embed_tokens: candle_nn::Embedding,
layers: Vec<DecoderLayer>,
norm: RmsNorm,
lm_head: Linear,
sliding_window: usize,
device: Device,
dtype: DType,
}
impl Model {
pub fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> {
let vb_m = vb.pp("model");
let embed_tokens =
candle_nn::embedding(cfg.vocab_size, cfg.hidden_size, vb_m.pp("embed_tokens"))?;
let rotary_emb = Arc::new(RotaryEmbedding::new(vb.dtype(), cfg, vb_m.device())?);
let mut layers = Vec::with_capacity(cfg.num_hidden_layers);
let vb_l = vb_m.pp("layers");
for layer_idx in 0..cfg.num_hidden_layers {
let layer = DecoderLayer::new(layer_idx, rotary_emb.clone(), cfg, vb_l.pp(layer_idx))?;
layers.push(layer)
}
let norm = RmsNorm::new(cfg.hidden_size, cfg.rms_norm_eps, vb_m.pp("norm"))?;
let lm_head = linear_no_bias(cfg.hidden_size, cfg.vocab_size, vb.pp("lm_head"))?;
Ok(Self {
embed_tokens,
layers,
norm,
lm_head,
sliding_window: cfg.sliding_window,
device: vb.device().clone(),
dtype: vb.dtype(),
})
}
fn prepare_decoder_attention_mask(
&self,
b_size: usize,
tgt_len: usize,
seqlen_offset: usize,
) -> Result<Tensor> {
// Sliding window mask?
let mask: Vec<_> = (0..tgt_len)
.flat_map(|i| {
(0..tgt_len).map(move |j| {
if i < j || j + self.sliding_window < i {
f32::NEG_INFINITY
} else {
0.
}
})
})
.collect();
let mask = Tensor::from_slice(&mask, (tgt_len, tgt_len), &self.device)?;
let mask = if seqlen_offset > 0 {
let mask0 = Tensor::zeros((tgt_len, seqlen_offset), DType::F32, &self.device)?;
Tensor::cat(&[&mask0, &mask], D::Minus1)?
} else {
mask
};
mask.expand((b_size, 1, tgt_len, tgt_len + seqlen_offset))?
.to_dtype(self.dtype)
}
pub fn forward(&mut self, input_ids: &Tensor, seqlen_offset: usize) -> Result<Tensor> {
let (b_size, seq_len) = input_ids.dims2()?;
let attention_mask = if seq_len <= 1 {
None
} else {
let mask = self.prepare_decoder_attention_mask(b_size, seq_len, seqlen_offset)?;
Some(mask)
};
let mut xs = self.embed_tokens.forward(input_ids)?;
for layer in self.layers.iter_mut() {
xs = layer.forward(&xs, attention_mask.as_ref(), seqlen_offset)?
}
xs.narrow(1, seq_len - 1, 1)?
.apply(&self.norm)?
.apply(&self.lm_head)
}
pub fn clear_kv_cache(&mut self) {
for layer in self.layers.iter_mut() {
layer.clear_kv_cache()
}
}
}
| candle/candle-transformers/src/models/qwen2_moe.rs/0 | {
"file_path": "candle/candle-transformers/src/models/qwen2_moe.rs",
"repo_id": "candle",
"token_count": 8732
} |
//! Contrastive Language-Image Pre-Training
//!
//! Contrastive Language-Image Pre-Training (CLIP) is an architecture trained on
//! pairs of images with related texts.
//!
//! - [CLIP](https://github.com/openai/CLIP)
use candle::{DType, Device, Result, Tensor, D};
use candle_nn as nn;
use candle_nn::Module;
#[derive(Debug, Clone, Copy)]
pub enum Activation {
QuickGelu,
Gelu,
GeluErf,
}
impl Module for Activation {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
match self {
Activation::QuickGelu => xs * nn::ops::sigmoid(&(xs * 1.702f64)?)?,
Activation::Gelu => xs.gelu(),
Activation::GeluErf => xs.gelu_erf(),
}
}
}
#[derive(Debug, Clone)]
pub struct Config {
vocab_size: usize,
embed_dim: usize, // aka config.hidden_size
activation: Activation, // aka config.hidden_act
intermediate_size: usize,
pub max_position_embeddings: usize,
// The character to use for padding, use EOS when not set.
pub pad_with: Option<String>,
num_hidden_layers: usize,
num_attention_heads: usize,
#[allow(dead_code)]
projection_dim: usize,
}
impl Config {
// The config details can be found in the "text_config" section of this json file:
// https://huggingface.co/openai/clip-vit-large-patch14/blob/main/config.json
pub fn v1_5() -> Self {
Self {
vocab_size: 49408,
embed_dim: 768,
intermediate_size: 3072,
max_position_embeddings: 77,
pad_with: None,
num_hidden_layers: 12,
num_attention_heads: 12,
projection_dim: 768,
activation: Activation::QuickGelu,
}
}
// https://huggingface.co/stabilityai/stable-diffusion-2-1/blob/main/text_encoder/config.json
pub fn v2_1() -> Self {
Self {
vocab_size: 49408,
embed_dim: 1024,
intermediate_size: 4096,
max_position_embeddings: 77,
pad_with: Some("!".to_string()),
num_hidden_layers: 23,
num_attention_heads: 16,
projection_dim: 512,
activation: Activation::Gelu,
}
}
// https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0/blob/main/text_encoder/config.json
pub fn sdxl() -> Self {
Self {
vocab_size: 49408,
embed_dim: 768,
intermediate_size: 3072,
max_position_embeddings: 77,
pad_with: Some("!".to_string()),
num_hidden_layers: 12,
num_attention_heads: 12,
projection_dim: 768,
activation: Activation::QuickGelu,
}
}
// https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0/blob/main/text_encoder_2/config.json
pub fn sdxl2() -> Self {
Self {
vocab_size: 49408,
embed_dim: 1280,
intermediate_size: 5120,
max_position_embeddings: 77,
pad_with: Some("!".to_string()),
num_hidden_layers: 32,
num_attention_heads: 20,
projection_dim: 1280,
activation: Activation::Gelu,
}
}
pub fn ssd1b() -> Self {
Self::sdxl()
}
pub fn ssd1b2() -> Self {
Self::sdxl2()
}
// https://huggingface.co/warp-ai/wuerstchen/blob/main/text_encoder/config.json
pub fn wuerstchen() -> Self {
Self {
vocab_size: 49408,
embed_dim: 1024,
intermediate_size: 4096,
max_position_embeddings: 77,
pad_with: None,
num_hidden_layers: 24,
num_attention_heads: 16,
projection_dim: 1024,
activation: Activation::GeluErf,
}
}
// https://huggingface.co/warp-ai/wuerstchen-prior/blob/main/text_encoder/config.json
pub fn wuerstchen_prior() -> Self {
Self {
vocab_size: 49408,
embed_dim: 1280,
intermediate_size: 5120,
max_position_embeddings: 77,
pad_with: None,
num_hidden_layers: 32,
num_attention_heads: 20,
projection_dim: 512,
activation: Activation::GeluErf,
}
}
}
// CLIP Text Model
// https://github.com/huggingface/transformers/blob/674f750a57431222fa2832503a108df3badf1564/src/transformers/models/clip/modeling_clip.py
#[derive(Debug)]
struct ClipTextEmbeddings {
token_embedding: candle_nn::Embedding,
position_embedding: candle_nn::Embedding,
position_ids: Tensor,
}
impl ClipTextEmbeddings {
fn new(vs: candle_nn::VarBuilder, c: &Config) -> Result<Self> {
let token_embedding =
candle_nn::embedding(c.vocab_size, c.embed_dim, vs.pp("token_embedding"))?;
let position_embedding = candle_nn::embedding(
c.max_position_embeddings,
c.embed_dim,
vs.pp("position_embedding"),
)?;
let position_ids =
Tensor::arange(0u32, c.max_position_embeddings as u32, vs.device())?.unsqueeze(0)?;
Ok(ClipTextEmbeddings {
token_embedding,
position_embedding,
position_ids,
})
}
}
impl Module for ClipTextEmbeddings {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let token_embedding = self.token_embedding.forward(xs)?;
let position_embedding = self.position_embedding.forward(&self.position_ids)?;
token_embedding.broadcast_add(&position_embedding)
}
}
#[derive(Debug)]
struct ClipAttention {
k_proj: candle_nn::Linear,
v_proj: candle_nn::Linear,
q_proj: candle_nn::Linear,
out_proj: candle_nn::Linear,
head_dim: usize,
scale: f64,
num_attention_heads: usize,
}
impl ClipAttention {
fn new(vs: candle_nn::VarBuilder, c: &Config) -> Result<Self> {
let embed_dim = c.embed_dim;
let num_attention_heads = c.num_attention_heads;
let k_proj = candle_nn::linear(embed_dim, embed_dim, vs.pp("k_proj"))?;
let v_proj = candle_nn::linear(embed_dim, embed_dim, vs.pp("v_proj"))?;
let q_proj = candle_nn::linear(embed_dim, embed_dim, vs.pp("q_proj"))?;
let out_proj = candle_nn::linear(embed_dim, embed_dim, vs.pp("out_proj"))?;
let head_dim = embed_dim / num_attention_heads;
let scale = (head_dim as f64).powf(-0.5);
Ok(ClipAttention {
k_proj,
v_proj,
q_proj,
out_proj,
head_dim,
scale,
num_attention_heads,
})
}
fn shape(&self, xs: &Tensor, seq_len: usize, bsz: usize) -> Result<Tensor> {
xs.reshape((bsz, seq_len, self.num_attention_heads, self.head_dim))?
.transpose(1, 2)?
.contiguous()
}
fn forward(&self, xs: &Tensor, causal_attention_mask: &Tensor) -> Result<Tensor> {
let in_dtype = xs.dtype();
let (bsz, seq_len, embed_dim) = xs.dims3()?;
let query_states = (self.q_proj.forward(xs)? * self.scale)?;
let proj_shape = (bsz * self.num_attention_heads, seq_len, self.head_dim);
let query_states = self
.shape(&query_states, seq_len, bsz)?
.reshape(proj_shape)?
.to_dtype(DType::F32)?;
let key_states = self
.shape(&self.k_proj.forward(xs)?, seq_len, bsz)?
.reshape(proj_shape)?
.to_dtype(DType::F32)?;
let value_states = self
.shape(&self.v_proj.forward(xs)?, seq_len, bsz)?
.reshape(proj_shape)?
.to_dtype(DType::F32)?;
let attn_weights = query_states.matmul(&key_states.transpose(1, 2)?)?;
let src_len = key_states.dim(1)?;
let attn_weights = attn_weights
.reshape((bsz, self.num_attention_heads, seq_len, src_len))?
.broadcast_add(causal_attention_mask)?;
let attn_weights =
attn_weights.reshape((bsz * self.num_attention_heads, seq_len, src_len))?;
let attn_weights = candle_nn::ops::softmax(&attn_weights, D::Minus1)?;
let attn_output = attn_weights.matmul(&value_states)?.to_dtype(in_dtype)?;
let attn_output = attn_output
.reshape((bsz, self.num_attention_heads, seq_len, self.head_dim))?
.transpose(1, 2)?
.reshape((bsz, seq_len, embed_dim))?;
self.out_proj.forward(&attn_output)
}
}
#[derive(Debug)]
struct ClipMlp {
fc1: candle_nn::Linear,
fc2: candle_nn::Linear,
activation: Activation,
}
impl ClipMlp {
fn new(vs: candle_nn::VarBuilder, c: &Config) -> Result<Self> {
let fc1 = candle_nn::linear(c.embed_dim, c.intermediate_size, vs.pp("fc1"))?;
let fc2 = candle_nn::linear(c.intermediate_size, c.embed_dim, vs.pp("fc2"))?;
Ok(ClipMlp {
fc1,
fc2,
activation: c.activation,
})
}
}
impl ClipMlp {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let xs = self.fc1.forward(xs)?;
self.fc2.forward(&self.activation.forward(&xs)?)
}
}
#[derive(Debug)]
struct ClipEncoderLayer {
self_attn: ClipAttention,
layer_norm1: candle_nn::LayerNorm,
mlp: ClipMlp,
layer_norm2: candle_nn::LayerNorm,
}
impl ClipEncoderLayer {
fn new(vs: candle_nn::VarBuilder, c: &Config) -> Result<Self> {
let self_attn = ClipAttention::new(vs.pp("self_attn"), c)?;
let layer_norm1 = candle_nn::layer_norm(c.embed_dim, 1e-5, vs.pp("layer_norm1"))?;
let mlp = ClipMlp::new(vs.pp("mlp"), c)?;
let layer_norm2 = candle_nn::layer_norm(c.embed_dim, 1e-5, vs.pp("layer_norm2"))?;
Ok(ClipEncoderLayer {
self_attn,
layer_norm1,
mlp,
layer_norm2,
})
}
fn forward(&self, xs: &Tensor, causal_attention_mask: &Tensor) -> Result<Tensor> {
let residual = xs;
let xs = self.layer_norm1.forward(xs)?;
let xs = self.self_attn.forward(&xs, causal_attention_mask)?;
let xs = (xs + residual)?;
let residual = &xs;
let xs = self.layer_norm2.forward(&xs)?;
let xs = self.mlp.forward(&xs)?;
xs + residual
}
}
#[derive(Debug)]
struct ClipEncoder {
layers: Vec<ClipEncoderLayer>,
}
impl ClipEncoder {
fn new(vs: candle_nn::VarBuilder, c: &Config) -> Result<Self> {
let vs = vs.pp("layers");
let mut layers: Vec<ClipEncoderLayer> = Vec::new();
for index in 0..c.num_hidden_layers {
let layer = ClipEncoderLayer::new(vs.pp(index.to_string()), c)?;
layers.push(layer)
}
Ok(ClipEncoder { layers })
}
fn forward(&self, xs: &Tensor, causal_attention_mask: &Tensor) -> Result<Tensor> {
let mut xs = xs.clone();
for layer in self.layers.iter() {
xs = layer.forward(&xs, causal_attention_mask)?;
}
Ok(xs)
}
}
/// A CLIP transformer based model.
#[derive(Debug)]
pub struct ClipTextTransformer {
embeddings: ClipTextEmbeddings,
encoder: ClipEncoder,
final_layer_norm: candle_nn::LayerNorm,
}
impl ClipTextTransformer {
pub fn new(vs: candle_nn::VarBuilder, c: &Config) -> Result<Self> {
let vs = vs.pp("text_model");
let embeddings = ClipTextEmbeddings::new(vs.pp("embeddings"), c)?;
let encoder = ClipEncoder::new(vs.pp("encoder"), c)?;
let final_layer_norm = candle_nn::layer_norm(c.embed_dim, 1e-5, vs.pp("final_layer_norm"))?;
Ok(ClipTextTransformer {
embeddings,
encoder,
final_layer_norm,
})
}
// https://github.com/huggingface/transformers/blob/674f750a57431222fa2832503a108df3badf1564/src/transformers/models/clip/modeling_clip.py#L678
fn build_causal_attention_mask(
bsz: usize,
seq_len: usize,
mask_after: usize,
device: &Device,
) -> Result<Tensor> {
let mask: Vec<_> = (0..seq_len)
.flat_map(|i| {
(0..seq_len).map(move |j| {
if j > i || j > mask_after {
f32::MIN
} else {
0.
}
})
})
.collect();
let mask = Tensor::from_slice(&mask, (seq_len, seq_len), device)?;
mask.broadcast_as((bsz, seq_len, seq_len))
}
pub fn forward_with_mask(&self, xs: &Tensor, mask_after: usize) -> Result<Tensor> {
let (bsz, seq_len) = xs.dims2()?;
let xs = self.embeddings.forward(xs)?;
let causal_attention_mask =
Self::build_causal_attention_mask(bsz, seq_len, mask_after, xs.device())?;
let xs = self.encoder.forward(&xs, &causal_attention_mask)?;
self.final_layer_norm.forward(&xs)
}
pub fn forward_until_encoder_layer(
&self,
xs: &Tensor,
mask_after: usize,
until_layer: isize,
) -> Result<(Tensor, Tensor)> {
let (bsz, seq_len) = xs.dims2()?;
let xs = self.embeddings.forward(xs)?;
let causal_attention_mask =
Self::build_causal_attention_mask(bsz, seq_len, mask_after, xs.device())?;
let mut xs = xs.clone();
let mut intermediate = xs.clone();
// Modified encoder.forward that returns the intermediate tensor along with final output.
let until_layer = if until_layer < 0 {
self.encoder.layers.len() as isize + until_layer
} else {
until_layer
} as usize;
for (layer_id, layer) in self.encoder.layers.iter().enumerate() {
xs = layer.forward(&xs, &causal_attention_mask)?;
if layer_id == until_layer {
intermediate = xs.clone();
}
}
Ok((self.final_layer_norm.forward(&xs)?, intermediate))
}
}
impl Module for ClipTextTransformer {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
self.forward_with_mask(xs, usize::MAX)
}
}
| candle/candle-transformers/src/models/stable_diffusion/clip.rs/0 | {
"file_path": "candle/candle-transformers/src/models/stable_diffusion/clip.rs",
"repo_id": "candle",
"token_count": 6983
} |
use crate::models::with_tracing::{linear, Linear};
use candle::{DType, Module, Result, Tensor};
use candle_nn::{
embedding, layer_norm, ops::softmax_last_dim, Activation, Embedding, LayerNorm, VarBuilder,
};
#[derive(Debug, Clone, serde::Deserialize)]
pub struct Config {
pub hidden_size: usize,
pub layer_norm_eps: f64,
pub attention_probs_dropout_prob: f32,
pub hidden_dropout_prob: f32,
pub num_attention_heads: usize,
pub position_embedding_type: String,
pub intermediate_size: usize,
pub hidden_act: Activation,
pub num_hidden_layers: usize,
pub vocab_size: usize,
pub max_position_embeddings: usize,
pub type_vocab_size: usize,
pub pad_token_id: u32,
}
struct XLMRobertaEmbeddings {
word_embeddings: Embedding,
position_embeddings: Option<Embedding>,
token_type_embeddings: Embedding,
layer_norm: LayerNorm,
padding_idx: u32,
span: tracing::Span,
}
impl XLMRobertaEmbeddings {
fn load(vb: VarBuilder, config: &Config) -> Result<Self> {
let word_embeddings = embedding(
config.vocab_size,
config.hidden_size,
vb.pp("word_embeddings"),
)?;
let position_embeddings = embedding(
config.max_position_embeddings,
config.hidden_size,
vb.pp("position_embeddings"),
)?;
let token_type_embeddings = embedding(
config.type_vocab_size,
config.hidden_size,
vb.pp("token_type_embeddings"),
)?;
let layer_norm = layer_norm(
config.hidden_size,
config.layer_norm_eps,
vb.pp("LayerNorm"),
)?;
Ok(Self {
word_embeddings,
position_embeddings: Some(position_embeddings),
token_type_embeddings,
layer_norm,
padding_idx: config.pad_token_id,
span: tracing::span!(tracing::Level::TRACE, "embeddings"),
})
}
fn forward(&self, input_ids: &Tensor, token_type_ids: &Tensor) -> Result<Tensor> {
let _enter = self.span.enter();
let (_bsize, _) = input_ids.dims2()?;
let input_embeddings = self.word_embeddings.forward(input_ids)?;
let token_type_embeddings = self.token_type_embeddings.forward(token_type_ids)?;
let mut embeddings = (&input_embeddings + token_type_embeddings)?;
if let Some(position_embeddings) = &self.position_embeddings {
let mask = input_ids
.ne(self.padding_idx)?
.to_dtype(input_embeddings.dtype())?;
let cumsum = mask.cumsum(1)?;
let position_ids = (cumsum * mask)?
.broadcast_add(
&Tensor::try_from(self.padding_idx)?
.to_dtype(input_embeddings.dtype())?
.to_device(input_embeddings.device())?,
)?
.to_dtype(candle::DType::U32)?;
embeddings = embeddings.broadcast_add(&position_embeddings.forward(&position_ids)?)?;
}
let embeddings = self.layer_norm.forward(&embeddings)?;
Ok(embeddings)
}
}
struct XLMRobertaSelfAttention {
num_attention_heads: usize,
attention_head_size: usize,
all_head_size: usize,
query: Linear,
key: Linear,
value: Linear,
}
impl XLMRobertaSelfAttention {
fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> {
let attention_head_size = cfg.hidden_size / cfg.num_attention_heads;
let all_head_size = cfg.num_attention_heads * attention_head_size;
Ok(Self {
num_attention_heads: cfg.num_attention_heads,
attention_head_size,
all_head_size,
query: linear(cfg.hidden_size, all_head_size, vb.pp("query"))?,
key: linear(cfg.hidden_size, all_head_size, vb.pp("key"))?,
value: linear(cfg.hidden_size, all_head_size, vb.pp("value"))?,
})
}
fn transpose_for_scores(&self, x: &Tensor) -> Result<Tensor> {
let mut new_x_shape = x.dims().to_vec();
new_x_shape[2] = self.num_attention_heads;
new_x_shape.push(self.attention_head_size);
let x = x.reshape(new_x_shape)?;
x.permute((0, 2, 1, 3))?.contiguous()
}
fn forward(
&self,
hidden_states: &Tensor,
encoder_hidden_states: Option<&Tensor>,
attention_mask: &Tensor,
past_key_value: Option<(&Tensor, &Tensor)>,
encoder_attention_mask: Option<&Tensor>,
) -> Result<Tensor> {
let mixed_query_layer = self.query.forward(hidden_states)?;
let is_cross_attention = encoder_hidden_states.is_some();
let (key_layer, value_layer, attention_mask) = if is_cross_attention
&& past_key_value.is_some()
{
let key_layer = past_key_value.unwrap().0.clone();
let value_layer = past_key_value.unwrap().1.clone();
let attention_mask = encoder_attention_mask.unwrap().clone();
(key_layer, value_layer, Some(attention_mask))
} else if is_cross_attention {
let key_layer =
self.transpose_for_scores(&self.key.forward(encoder_hidden_states.unwrap())?)?;
let value_layer =
self.transpose_for_scores(&self.value.forward(encoder_hidden_states.unwrap())?)?;
let attention_mask = encoder_attention_mask.unwrap();
(key_layer, value_layer, Some(attention_mask.clone()))
} else if past_key_value.is_some() {
let mut key_layer = self.transpose_for_scores(&self.key.forward(hidden_states)?)?;
let mut value_layer = self.transpose_for_scores(&self.value.forward(hidden_states)?)?;
key_layer = Tensor::cat(
&[
past_key_value.clone().as_ref().unwrap().0.clone(),
key_layer,
],
2,
)?;
value_layer = Tensor::cat(
&[past_key_value.as_ref().unwrap().1.clone(), value_layer],
2,
)?;
(key_layer, value_layer, Some(attention_mask.clone()))
} else {
let key_layer = self.transpose_for_scores(&self.key.forward(hidden_states)?)?;
let value_layer = self.transpose_for_scores(&self.value.forward(hidden_states)?)?;
(key_layer, value_layer, Some(attention_mask.clone()))
};
let query_layer = self.transpose_for_scores(&mixed_query_layer)?;
let mut attention_scores = query_layer.matmul(&key_layer.transpose(2, 3)?)?;
let scale = 1f64 / f64::sqrt(self.attention_head_size as f64);
attention_scores = (attention_scores * scale)?;
attention_scores = match attention_mask {
None => attention_scores,
Some(mask) => {
attention_scores.broadcast_add(&mask.to_dtype(attention_scores.dtype())?)?
}
};
let attention_probs = softmax_last_dim(&attention_scores)?;
let context_layer = attention_probs
.matmul(&value_layer)?
.permute((0, 2, 1, 3))?
.contiguous()?;
let mut new_context_layer_shape =
context_layer.dims()[..context_layer.dims().len() - 2].to_vec();
new_context_layer_shape.push(self.all_head_size);
let context_layer = context_layer.reshape(new_context_layer_shape)?;
Ok(context_layer)
}
}
struct XLMRobertaSelfOutput {
dense: Linear,
layernorm: LayerNorm,
}
impl XLMRobertaSelfOutput {
fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> {
let dense = linear(cfg.hidden_size, cfg.hidden_size, vb.pp("dense"))?;
let layernorm =
candle_nn::layer_norm(cfg.hidden_size, cfg.layer_norm_eps, vb.pp("LayerNorm"))?;
Ok(Self { dense, layernorm })
}
fn forward(&self, hidden_states: &Tensor, input_tensor: &Tensor) -> Result<Tensor> {
let hidden_states = self.dense.forward(hidden_states)?;
let hidden_states = self.layernorm.forward(&(hidden_states + input_tensor)?)?;
Ok(hidden_states)
}
}
struct XLMRobertaAttention {
output: XLMRobertaSelfOutput,
self_attention: XLMRobertaSelfAttention,
}
impl XLMRobertaAttention {
fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> {
let output = XLMRobertaSelfOutput::new(cfg, vb.pp("output"))?;
let self_attention = XLMRobertaSelfAttention::new(cfg, vb.pp("self"))?;
Ok(Self {
output,
self_attention,
})
}
fn forward(
&self,
hidden_states: &Tensor,
attention_mask: &Tensor,
encoder_hidden_states: Option<&Tensor>,
encoder_attention_mask: Option<&Tensor>,
past_key_value: Option<(&Tensor, &Tensor)>,
) -> Result<(Tensor, Tensor)> {
let self_outputs = self.self_attention.forward(
hidden_states,
encoder_hidden_states,
attention_mask,
past_key_value,
encoder_attention_mask,
)?;
let attention_output = self.output.forward(&self_outputs, hidden_states)?;
Ok((attention_output, self_outputs))
}
}
struct XLMRobertaOutput {
dense: Linear,
layernorm: LayerNorm,
}
impl XLMRobertaOutput {
fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> {
let dense = linear(cfg.intermediate_size, cfg.hidden_size, vb.pp("dense"))?;
let layernorm =
candle_nn::layer_norm(cfg.hidden_size, cfg.layer_norm_eps, vb.pp("LayerNorm"))?;
Ok(Self { dense, layernorm })
}
fn forward(&self, hidden_states: &Tensor, input_tensor: &Tensor) -> Result<Tensor> {
let hidden_states = self.dense.forward(hidden_states)?;
let hidden_states = self.layernorm.forward(&(hidden_states + input_tensor)?)?;
Ok(hidden_states)
}
}
struct XLMRobertaIntermediate {
dense: Linear,
intermediate_act_fn: Activation,
}
impl XLMRobertaIntermediate {
fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> {
let dense = linear(cfg.hidden_size, cfg.intermediate_size, vb.pp("dense"))?;
let intermediate_act_fn = cfg.hidden_act;
Ok(Self {
dense,
intermediate_act_fn,
})
}
fn forward(&self, hidden_states: &Tensor) -> Result<Tensor> {
let hidden_states = self.dense.forward(hidden_states)?;
let hidden_states = self.intermediate_act_fn.forward(&hidden_states)?;
Ok(hidden_states)
}
}
struct XLMRobertaLayer {
attention: XLMRobertaAttention,
intermediate: XLMRobertaIntermediate,
output: XLMRobertaOutput,
}
impl XLMRobertaLayer {
fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> {
let attention = XLMRobertaAttention::new(cfg, vb.pp("attention"))?;
let intermediate = XLMRobertaIntermediate::new(cfg, vb.pp("intermediate"))?;
let output = XLMRobertaOutput::new(cfg, vb.pp("output"))?;
Ok(Self {
attention,
intermediate,
output,
})
}
fn forward(
&self,
hidden_states: &Tensor,
attention_mask: &Tensor,
encoder_hidden_states: Option<&Tensor>,
encoder_attention_mask: Option<&Tensor>,
past_key_value: Option<(&Tensor, &Tensor)>,
) -> Result<(Tensor, Tensor)> {
let self_attention_outputs = self.attention.forward(
hidden_states,
attention_mask,
encoder_hidden_states,
encoder_attention_mask,
past_key_value,
)?;
let attention_output = self_attention_outputs.0;
let outputs = self_attention_outputs.1;
let intermediate_output = self.intermediate.forward(&attention_output)?;
let layer_output = self
.output
.forward(&intermediate_output, &attention_output)?;
Ok((layer_output, outputs))
}
}
struct XLMRobertaEncoder {
layers: Vec<XLMRobertaLayer>,
}
impl XLMRobertaEncoder {
fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> {
let layers = (0..cfg.num_hidden_layers)
.map(|i| XLMRobertaLayer::new(cfg, vb.pp(format!("layer.{}", i))))
.collect::<Result<Vec<_>>>()?;
Ok(Self { layers })
}
fn forward(
&self,
hidden_states: &Tensor,
attention_mask: &Tensor,
encoder_hidden_states: Option<&Tensor>,
encoder_attention_mask: Option<&Tensor>,
past_key_value: Option<(&Tensor, &Tensor)>,
) -> Result<Tensor> {
let mut hidden_states = hidden_states.clone();
for layer_module in self.layers.iter() {
let layer_outputs = layer_module.forward(
&hidden_states,
attention_mask,
encoder_hidden_states,
encoder_attention_mask,
past_key_value,
)?;
hidden_states = layer_outputs.0;
}
Ok(hidden_states)
}
}
pub struct XLMRobertaModel {
encoder: XLMRobertaEncoder,
embeddings: XLMRobertaEmbeddings,
}
impl XLMRobertaModel {
pub fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> {
let encoder = XLMRobertaEncoder::new(cfg, vb.pp("encoder"))?;
let embeddings = XLMRobertaEmbeddings::load(vb.pp("embeddings"), cfg)?;
Ok(Self {
encoder,
embeddings,
})
}
pub fn forward(
&self,
input_ids: &Tensor,
attention_mask: &Tensor,
token_type_ids: &Tensor,
past_key_value: Option<(&Tensor, &Tensor)>,
encoder_hidden_states: Option<&Tensor>,
encoder_attention_mask: Option<&Tensor>,
) -> Result<Tensor> {
let hidden_states = self.embeddings.forward(input_ids, token_type_ids)?;
let attention_mask = prepare_4d_attention_mask(attention_mask, DType::F32, None)?
.to_device(hidden_states.device())?;
let hidden_states = self.encoder.forward(
&hidden_states,
&attention_mask,
encoder_hidden_states,
encoder_attention_mask,
past_key_value,
)?;
Ok(hidden_states)
}
}
struct XLMRobertaLMHead {
dense: Linear,
layer_norm: LayerNorm,
}
impl XLMRobertaLMHead {
fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> {
let dense = linear(cfg.hidden_size, cfg.hidden_size, vb.pp("dense"))?;
let layer_norm =
candle_nn::layer_norm(cfg.hidden_size, cfg.layer_norm_eps, vb.pp("layer_norm"))?;
Ok(Self { dense, layer_norm })
}
fn forward(&self, hidden_states: &Tensor, shared_embeddings: &Tensor) -> Result<Tensor> {
let hidden_states = self.dense.forward(hidden_states)?;
let hidden_states = candle_nn::Activation::Gelu.forward(&hidden_states)?;
let hidden_states = self.layer_norm.forward(&hidden_states)?;
let hidden_states = hidden_states.broadcast_matmul(shared_embeddings)?;
Ok(hidden_states)
}
}
pub struct XLMRobertaForMaskedLM {
roberta: XLMRobertaModel,
lm_head: XLMRobertaLMHead,
}
impl XLMRobertaForMaskedLM {
pub fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> {
let roberta = XLMRobertaModel::new(cfg, vb.pp("roberta"))?;
let lm_head = XLMRobertaLMHead::new(cfg, vb.pp("lm_head"))?;
Ok(Self { roberta, lm_head })
}
pub fn forward(
&self,
input_ids: &Tensor,
attention_mask: &Tensor,
token_type_ids: &Tensor,
past_key_value: Option<(&Tensor, &Tensor)>,
encoder_hidden_states: Option<&Tensor>,
encoder_attention_mask: Option<&Tensor>,
) -> Result<Tensor> {
let hidden_states = self.roberta.forward(
input_ids,
attention_mask,
token_type_ids,
past_key_value,
encoder_hidden_states,
encoder_attention_mask,
)?;
let lm_logits = self.lm_head.forward(
&hidden_states,
&self
.roberta
.embeddings
.word_embeddings
.embeddings()
.t()?
.unsqueeze(0)?,
)?;
Ok(lm_logits)
}
}
struct XLMRobertaClassificationHead {
dense: Linear,
out_proj: Linear,
}
impl XLMRobertaClassificationHead {
fn new(num_labels: usize, cfg: &Config, vb: VarBuilder) -> Result<Self> {
let dense = linear(cfg.hidden_size, cfg.hidden_size, vb.pp("dense"))?;
let out_proj = linear(cfg.hidden_size, num_labels, vb.pp("out_proj"))?;
Ok(Self { dense, out_proj })
}
fn forward(&self, hidden_states: &Tensor) -> Result<Tensor> {
let cls_states = hidden_states.get_on_dim(1, 0)?.contiguous()?;
let hidden_states = self.dense.forward(&cls_states)?;
let hidden_states = candle_nn::Activation::GeluPytorchTanh.forward(&hidden_states)?;
let hidden_states = self.out_proj.forward(&hidden_states)?;
Ok(hidden_states)
}
}
pub struct XLMRobertaForSequenceClassification {
roberta: XLMRobertaModel,
classifier: XLMRobertaClassificationHead,
}
impl XLMRobertaForSequenceClassification {
pub fn new(num_labels: usize, cfg: &Config, vb: VarBuilder) -> Result<Self> {
let roberta = XLMRobertaModel::new(cfg, vb.pp("roberta"))?;
let classifier = XLMRobertaClassificationHead::new(num_labels, cfg, vb.pp("classifier"))?;
Ok(Self {
roberta,
classifier,
})
}
pub fn forward(
&self,
input_ids: &Tensor,
attention_mask: &Tensor,
token_type_ids: &Tensor,
) -> Result<Tensor> {
let hidden_states =
self.roberta
.forward(input_ids, attention_mask, token_type_ids, None, None, None)?;
self.classifier.forward(&hidden_states)
}
}
fn prepare_4d_attention_mask(
mask: &Tensor,
dtype: DType,
tgt_len: Option<usize>,
) -> Result<Tensor> {
let bsz = mask.dim(0)?;
let src_len = mask.dim(1)?;
let tgt_len = tgt_len.unwrap_or(src_len);
let expanded_mask = mask
.unsqueeze(1)?
.unsqueeze(2)?
.expand((bsz, 1, tgt_len, src_len))?
.to_dtype(dtype)?;
let inverted_mask = (1.0 - expanded_mask)?;
(inverted_mask * get_dtype_min_val(dtype))?.to_dtype(dtype)
}
fn get_dtype_min_val(dtype: DType) -> f64 {
match dtype {
DType::F32 => f32::MIN as f64,
DType::F64 => f64::MIN,
_ => panic!("Unsupported data type"),
}
}
| candle/candle-transformers/src/models/xlm_roberta.rs/0 | {
"file_path": "candle/candle-transformers/src/models/xlm_roberta.rs",
"repo_id": "candle",
"token_count": 8935
} |
use candle_transformers::models::bert;
use wasm_bindgen::prelude::*;
pub use bert::{BertModel, Config, DTYPE};
pub use tokenizers::{PaddingParams, Tokenizer};
#[wasm_bindgen]
extern "C" {
// Use `js_namespace` here to bind `console.log(..)` instead of just
// `log(..)`
#[wasm_bindgen(js_namespace = console)]
pub fn log(s: &str);
}
#[macro_export]
macro_rules! console_log {
// Note that this is using the `log` function imported above during
// `bare_bones`
($($t:tt)*) => ($crate::log(&format_args!($($t)*).to_string()))
}
| candle/candle-wasm-examples/bert/src/lib.rs/0 | {
"file_path": "candle/candle-wasm-examples/bert/src/lib.rs",
"repo_id": "candle",
"token_count": 226
} |
use crate::console_log;
use crate::worker::{ModelData, Worker, WorkerInput, WorkerOutput};
use std::str::FromStr;
use wasm_bindgen::prelude::*;
use wasm_bindgen_futures::JsFuture;
use yew::{html, Component, Context, Html};
use yew_agent::{Bridge, Bridged};
async fn fetch_url(url: &str) -> Result<Vec<u8>, JsValue> {
use web_sys::{Request, RequestCache, RequestInit, RequestMode, Response};
let window = web_sys::window().ok_or("window")?;
let opts = RequestInit::new();
opts.set_method("GET");
opts.set_mode(RequestMode::Cors);
opts.set_cache(RequestCache::NoCache);
let request = Request::new_with_str_and_init(url, &opts)?;
let resp_value = JsFuture::from(window.fetch_with_request(&request)).await?;
// `resp_value` is a `Response` object.
assert!(resp_value.is_instance_of::<Response>());
let resp: Response = resp_value.dyn_into()?;
let data = JsFuture::from(resp.blob()?).await?;
let blob = web_sys::Blob::from(data);
let array_buffer = JsFuture::from(blob.array_buffer()).await?;
let data = js_sys::Uint8Array::new(&array_buffer).to_vec();
Ok(data)
}
pub enum Msg {
Refresh,
Run,
UpdateStatus(String),
SetModel(ModelData),
WorkerIn(WorkerInput),
WorkerOut(Result<WorkerOutput, String>),
}
pub struct CurrentDecode {
start_time: Option<f64>,
}
pub struct App {
status: String,
loaded: bool,
temperature: std::rc::Rc<std::cell::RefCell<f64>>,
top_p: std::rc::Rc<std::cell::RefCell<f64>>,
prompt: std::rc::Rc<std::cell::RefCell<String>>,
generated: String,
n_tokens: usize,
current_decode: Option<CurrentDecode>,
worker: Box<dyn Bridge<Worker>>,
}
async fn model_data_load() -> Result<ModelData, JsValue> {
let tokenizer = fetch_url("tokenizer.json").await?;
let model = fetch_url("model.bin").await?;
console_log!("{}", model.len());
Ok(ModelData { tokenizer, model })
}
fn performance_now() -> Option<f64> {
let window = web_sys::window()?;
let performance = window.performance()?;
Some(performance.now() / 1000.)
}
impl Component for App {
type Message = Msg;
type Properties = ();
fn create(ctx: &Context<Self>) -> Self {
let status = "loading weights".to_string();
let cb = {
let link = ctx.link().clone();
move |e| link.send_message(Self::Message::WorkerOut(e))
};
let worker = Worker::bridge(std::rc::Rc::new(cb));
Self {
status,
n_tokens: 0,
temperature: std::rc::Rc::new(std::cell::RefCell::new(0.)),
top_p: std::rc::Rc::new(std::cell::RefCell::new(1.0)),
prompt: std::rc::Rc::new(std::cell::RefCell::new("".to_string())),
generated: String::new(),
current_decode: None,
worker,
loaded: false,
}
}
fn rendered(&mut self, ctx: &Context<Self>, first_render: bool) {
if first_render {
ctx.link().send_future(async {
match model_data_load().await {
Err(err) => {
let status = format!("{err:?}");
Msg::UpdateStatus(status)
}
Ok(model_data) => Msg::SetModel(model_data),
}
});
}
}
fn update(&mut self, ctx: &Context<Self>, msg: Self::Message) -> bool {
match msg {
Msg::SetModel(md) => {
self.status = "weights loaded successfully!".to_string();
self.loaded = true;
console_log!("loaded weights");
self.worker.send(WorkerInput::ModelData(md));
true
}
Msg::Run => {
if self.current_decode.is_some() {
self.status = "already generating some sample at the moment".to_string()
} else {
let start_time = performance_now();
self.current_decode = Some(CurrentDecode { start_time });
self.status = "generating...".to_string();
self.n_tokens = 0;
self.generated.clear();
let temp = *self.temperature.borrow();
let top_p = *self.top_p.borrow();
let prompt = self.prompt.borrow().clone();
console_log!("temp: {}, top_p: {}, prompt: {}", temp, top_p, prompt);
ctx.link()
.send_message(Msg::WorkerIn(WorkerInput::Run(temp, top_p, prompt)))
}
true
}
Msg::WorkerOut(output) => {
match output {
Ok(WorkerOutput::WeightsLoaded) => self.status = "weights loaded!".to_string(),
Ok(WorkerOutput::GenerationDone(Err(err))) => {
self.status = format!("error in worker process: {err}");
self.current_decode = None
}
Ok(WorkerOutput::GenerationDone(Ok(()))) => {
let dt = self.current_decode.as_ref().and_then(|current_decode| {
current_decode.start_time.and_then(|start_time| {
performance_now().map(|stop_time| stop_time - start_time)
})
});
self.status = match dt {
None => "generation succeeded!".to_string(),
Some(dt) => format!(
"generation succeeded in {:.2}s ({:.1} ms/token)",
dt,
dt * 1000.0 / (self.n_tokens as f64)
),
};
self.current_decode = None
}
Ok(WorkerOutput::Generated(token)) => {
self.n_tokens += 1;
self.generated.push_str(&token)
}
Err(err) => {
self.status = format!("error in worker {err:?}");
}
}
true
}
Msg::WorkerIn(inp) => {
self.worker.send(inp);
true
}
Msg::UpdateStatus(status) => {
self.status = status;
true
}
Msg::Refresh => true,
}
}
fn view(&self, ctx: &Context<Self>) -> Html {
use yew::TargetCast;
let temperature = self.temperature.clone();
let oninput_temperature = ctx.link().callback(move |e: yew::InputEvent| {
let input: web_sys::HtmlInputElement = e.target_unchecked_into();
if let Ok(temp) = f64::from_str(&input.value()) {
*temperature.borrow_mut() = temp
}
Msg::Refresh
});
let top_p = self.top_p.clone();
let oninput_top_p = ctx.link().callback(move |e: yew::InputEvent| {
let input: web_sys::HtmlInputElement = e.target_unchecked_into();
if let Ok(top_p_input) = f64::from_str(&input.value()) {
*top_p.borrow_mut() = top_p_input
}
Msg::Refresh
});
let prompt = self.prompt.clone();
let oninput_prompt = ctx.link().callback(move |e: yew::InputEvent| {
let input: web_sys::HtmlInputElement = e.target_unchecked_into();
*prompt.borrow_mut() = input.value();
Msg::Refresh
});
html! {
<div style="margin: 2%;">
<div><p>{"Running "}
<a href="https://github.com/karpathy/llama2.c" target="_blank">{"llama2.c"}</a>
{" in the browser using rust/wasm with "}
<a href="https://github.com/huggingface/candle" target="_blank">{"candle!"}</a>
</p>
<p>{"Once the weights have loaded, click on the run button to start generating content."}
</p>
</div>
{"temperature \u{00a0} "}
<input type="range" min="0." max="1.2" step="0.1" value={self.temperature.borrow().to_string()} oninput={oninput_temperature} id="temp"/>
{format!(" \u{00a0} {}", self.temperature.borrow())}
<br/ >
{"top_p \u{00a0} "}
<input type="range" min="0." max="1.0" step="0.05" value={self.top_p.borrow().to_string()} oninput={oninput_top_p} id="top_p"/>
{format!(" \u{00a0} {}", self.top_p.borrow())}
<br/ >
{"prompt: "}<input type="text" value={self.prompt.borrow().to_string()} oninput={oninput_prompt} id="prompt"/>
<br/ >
{
if self.loaded{
html!(<button class="button" onclick={ctx.link().callback(move |_| Msg::Run)}> { "run" }</button>)
}else{
html! { <progress id="progress-bar" aria-label="Loading weights..."></progress> }
}
}
<br/ >
<h3>
{&self.status}
</h3>
{
if self.current_decode.is_some() {
html! { <progress id="progress-bar" aria-label="generating…"></progress> }
} else {
html! {}
}
}
<blockquote>
<p> { self.generated.chars().map(|c|
if c == '\r' || c == '\n' {
html! { <br/> }
} else {
html! { {c} }
}).collect::<Html>()
} </p>
</blockquote>
</div>
}
}
}
| candle/candle-wasm-examples/llama2-c/src/app.rs/0 | {
"file_path": "candle/candle-wasm-examples/llama2-c/src/app.rs",
"repo_id": "candle",
"token_count": 5448
} |
//load Candle Bert Module wasm module
let init, ModelEncoder;
async function fetchArrayBuffer(url) {
const cacheName = "t5-candle-cache";
const cache = await caches.open(cacheName);
const cachedResponse = await cache.match(url);
if (cachedResponse) {
const data = await cachedResponse.arrayBuffer();
return new Uint8Array(data);
}
const res = await fetch(url, { cache: "force-cache" });
cache.put(url, res.clone());
return new Uint8Array(await res.arrayBuffer());
}
class Encoder {
static instance = {};
static async getInstance(weightsURL, tokenizerURL, configURL, modelID) {
if (modelID.includes("quantized")) {
({ default: init, ModelEncoder } = await import(
"./build/m-quantized.js"
));
} else {
({ default: init, ModelEncoder } = await import("./build/m.js"));
}
if (!this.instance[modelID]) {
await init();
self.postMessage({ status: "loading", message: "Loading Model" });
const [weightsArrayU8, tokenizerArrayU8, configArrayU8] =
await Promise.all([
fetchArrayBuffer(weightsURL),
fetchArrayBuffer(tokenizerURL),
fetchArrayBuffer(configURL),
]);
this.instance[modelID] = new ModelEncoder(
weightsArrayU8,
tokenizerArrayU8,
configArrayU8
);
} else {
self.postMessage({ status: "ready", message: "Model Already Loaded" });
}
return this.instance[modelID];
}
}
self.addEventListener("message", async (event) => {
const {
weightsURL,
tokenizerURL,
configURL,
modelID,
sentences,
normalize_embeddings,
} = event.data;
try {
self.postMessage({ status: "ready", message: "Starting T5 Encoder" });
const model = await Encoder.getInstance(
weightsURL,
tokenizerURL,
configURL,
modelID
);
self.postMessage({
status: "encoding",
message: "Encoding Sentences",
});
const output = model.decode({
sentences: sentences,
normalize_embeddings: normalize_embeddings || true,
});
self.postMessage({
status: "complete",
message: "complete",
output: output,
});
} catch (e) {
self.postMessage({ error: e });
}
});
| candle/candle-wasm-examples/t5/T5ModelEncoderWorker.js/0 | {
"file_path": "candle/candle-wasm-examples/t5/T5ModelEncoderWorker.js",
"repo_id": "candle",
"token_count": 873
} |
use candle_wasm_example_whisper::worker::{Decoder as D, ModelData};
use wasm_bindgen::prelude::*;
#[wasm_bindgen]
pub struct Decoder {
decoder: D,
}
#[wasm_bindgen]
impl Decoder {
#[wasm_bindgen(constructor)]
#[allow(clippy::too_many_arguments)]
pub fn new(
weights: Vec<u8>,
tokenizer: Vec<u8>,
mel_filters: Vec<u8>,
config: Vec<u8>,
quantized: bool,
is_multilingual: bool,
timestamps: bool,
task: Option<String>,
language: Option<String>,
) -> Result<Decoder, JsError> {
let decoder = D::load(ModelData {
tokenizer,
mel_filters,
config,
quantized,
weights,
is_multilingual,
timestamps,
task,
language,
});
match decoder {
Ok(decoder) => Ok(Self { decoder }),
Err(e) => Err(JsError::new(&e.to_string())),
}
}
#[wasm_bindgen]
pub fn decode(&mut self, wav_input: Vec<u8>) -> Result<String, JsError> {
let segments = self
.decoder
.convert_and_run(&wav_input)
.map_err(|e| JsError::new(&e.to_string()))?;
let json = serde_json::to_string(&segments)?;
Ok(json)
}
}
fn main() {}
| candle/candle-wasm-examples/whisper/src/bin/m.rs/0 | {
"file_path": "candle/candle-wasm-examples/whisper/src/bin/m.rs",
"repo_id": "candle",
"token_count": 694
} |
module.exports = {
root: true,
parser: "@typescript-eslint/parser",
extends: [
"eslint:recommended",
"plugin:@typescript-eslint/recommended",
"plugin:svelte/recommended",
"prettier",
],
plugins: ["@typescript-eslint"],
ignorePatterns: ["*.cjs"],
overrides: [
{
files: ["*.svelte"],
parser: "svelte-eslint-parser",
parserOptions: {
parser: "@typescript-eslint/parser",
},
},
],
parserOptions: {
sourceType: "module",
ecmaVersion: 2020,
extraFileExtensions: [".svelte"],
},
rules: {
"require-yield": "off",
"@typescript-eslint/no-explicit-any": "error",
"@typescript-eslint/no-non-null-assertion": "error",
"@typescript-eslint/no-unused-vars": [
// prevent variables with a _ prefix from being marked as unused
"error",
{
argsIgnorePattern: "^_",
},
],
"object-shorthand": ["error", "always"],
},
env: {
browser: true,
es2017: true,
node: true,
},
};
| chat-ui/.eslintrc.cjs/0 | {
"file_path": "chat-ui/.eslintrc.cjs",
"repo_id": "chat-ui",
"token_count": 420
} |
# syntax=docker/dockerfile:1
ARG INCLUDE_DB=false
FROM node:20-slim AS base
ENV PLAYWRIGHT_SKIP_BROWSER_GC=1
# install dotenv-cli
RUN npm install -g dotenv-cli
# switch to a user that works for spaces
RUN userdel -r node
RUN useradd -m -u 1000 user
USER user
ENV HOME=/home/user \
PATH=/home/user/.local/bin:$PATH
WORKDIR /app
# add a .env.local if the user doesn't bind a volume to it
RUN touch /app/.env.local
RUN npm i --no-package-lock --no-save playwright@1.47.0
USER root
RUN apt-get update
RUN apt-get install gnupg curl -y
RUN npx playwright install --with-deps chromium
RUN chown -R 1000:1000 /home/user/.npm
USER user
COPY --chown=1000 .env /app/.env
COPY --chown=1000 entrypoint.sh /app/entrypoint.sh
COPY --chown=1000 gcp-*.json /app/
COPY --chown=1000 package.json /app/package.json
COPY --chown=1000 package-lock.json /app/package-lock.json
RUN chmod +x /app/entrypoint.sh
FROM node:20 AS builder
WORKDIR /app
COPY --link --chown=1000 package-lock.json package.json ./
ARG APP_BASE=
ARG PUBLIC_APP_COLOR=blue
ENV BODY_SIZE_LIMIT=15728640
RUN --mount=type=cache,target=/app/.npm \
npm set cache /app/.npm && \
npm ci
COPY --link --chown=1000 . .
RUN git config --global --add safe.directory /app && \
npm run build
# mongo image
FROM mongo:7 AS mongo
# image to be used if INCLUDE_DB is false
FROM base AS local_db_false
# image to be used if INCLUDE_DB is true
FROM base AS local_db_true
# copy mongo from the other stage
COPY --from=mongo /usr/bin/mongo* /usr/bin/
ENV MONGODB_URL=mongodb://localhost:27017
USER root
RUN mkdir -p /data/db
RUN chown -R 1000:1000 /data/db
USER user
# final image
FROM local_db_${INCLUDE_DB} AS final
# build arg to determine if the database should be included
ARG INCLUDE_DB=false
ENV INCLUDE_DB=${INCLUDE_DB}
# svelte requires APP_BASE at build time so it must be passed as a build arg
ARG APP_BASE=
# tailwind requires the primary theme to be known at build time so it must be passed as a build arg
ARG PUBLIC_APP_COLOR=blue
ARG PUBLIC_COMMIT_SHA=
ENV PUBLIC_COMMIT_SHA=${PUBLIC_COMMIT_SHA}
ENV BODY_SIZE_LIMIT=15728640
#import the build & dependencies
COPY --from=builder --chown=1000 /app/build /app/build
COPY --from=builder --chown=1000 /app/node_modules /app/node_modules
CMD ["/bin/bash", "-c", "/app/entrypoint.sh"]
| chat-ui/Dockerfile/0 | {
"file_path": "chat-ui/Dockerfile",
"repo_id": "chat-ui",
"token_count": 901
} |
apiVersion: v1
kind: Service
metadata:
name: "{{ include "name" . }}"
annotations: {{ toYaml .Values.service.annotations | nindent 4 }}
namespace: {{ .Release.Namespace }}
labels: {{ include "labels.standard" . | nindent 4 }}
spec:
ports:
- name: http
port: 80
protocol: TCP
targetPort: http
{{- if $.Values.monitoring.enabled }}
- name: metrics
port: 5565
protocol: TCP
targetPort: metrics
{{- end }}
selector: {{ include "labels.standard" . | nindent 4 }}
type: {{.Values.service.type}}
| chat-ui/chart/templates/service.yaml/0 | {
"file_path": "chat-ui/chart/templates/service.yaml",
"repo_id": "chat-ui",
"token_count": 192
} |
# OpenAI
| Feature | Available |
| --------------------------- | --------- |
| [Tools](../tools) | No |
| [Multimodal](../multimodal) | No |
Chat UI can be used with any API server that supports OpenAI API compatibility, for example [text-generation-webui](https://github.com/oobabooga/text-generation-webui/tree/main/extensions/openai), [LocalAI](https://github.com/go-skynet/LocalAI), [FastChat](https://github.com/lm-sys/FastChat/blob/main/docs/openai_api.md), [llama-cpp-python](https://github.com/abetlen/llama-cpp-python), and [ialacol](https://github.com/chenhunghan/ialacol) and [vllm](https://docs.vllm.ai/en/latest/serving/openai_compatible_server.html).
The following example config makes Chat UI works with [text-generation-webui](https://github.com/oobabooga/text-generation-webui/tree/main/extensions/openai), the `endpoint.baseUrl` is the url of the OpenAI API compatible server, this overrides the baseUrl to be used by OpenAI instance. The `endpoint.completion` determine which endpoint to be used, default is `chat_completions` which uses `/chat/completions`, change to `endpoint.completion` to `completions` to use the `/completions` endpoint.
```ini
MODELS=`[
{
"name": "text-generation-webui",
"id": "text-generation-webui",
"parameters": {
"temperature": 0.9,
"top_p": 0.95,
"repetition_penalty": 1.2,
"top_k": 50,
"truncate": 1000,
"max_new_tokens": 1024,
"stop": []
},
"endpoints": [{
"type" : "openai",
"baseURL": "http://localhost:8000/v1"
}]
}
]`
```
The `openai` type includes official OpenAI models. You can add, for example, GPT4/GPT3.5 as a "openai" model:
```ini
OPENAI_API_KEY=#your openai api key here
MODELS=`[{
"name": "gpt-4",
"displayName": "GPT 4",
"endpoints" : [{
"type": "openai",
"apiKey": "or your openai api key here"
}]
},{
"name": "gpt-3.5-turbo",
"displayName": "GPT 3.5 Turbo",
"endpoints" : [{
"type": "openai",
"apiKey": "or your openai api key here"
}]
}]`
```
We also support models in the `o1` family. You need to add a few more options ot the config: Here is an example for `o1-mini`:
```ini
MODELS=`[
{
"name": "o1-mini",
"description": "ChatGPT o1-mini",
"systemRoleSupported": false,
"parameters": {
"max_new_tokens": 2048,
},
"endpoints" : [{
"type": "openai",
"useCompletionTokens": true,
}]
}
]
```
You may also consume any model provider that provides compatible OpenAI API endpoint. For example, you may self-host [Portkey](https://github.com/Portkey-AI/gateway) gateway and experiment with Claude or GPTs offered by Azure OpenAI. Example for Claude from Anthropic:
```ini
MODELS=`[{
"name": "claude-2.1",
"displayName": "Claude 2.1",
"description": "Anthropic has been founded by former OpenAI researchers...",
"parameters": {
"temperature": 0.5,
"max_new_tokens": 4096,
},
"endpoints": [
{
"type": "openai",
"baseURL": "https://gateway.example.com/v1",
"defaultHeaders": {
"x-portkey-config": '{"provider":"anthropic","api_key":"sk-ant-abc...xyz"}'
}
}
]
}]`
```
Example for GPT 4 deployed on Azure OpenAI:
```ini
MODELS=`[{
"id": "gpt-4-1106-preview",
"name": "gpt-4-1106-preview",
"displayName": "gpt-4-1106-preview",
"parameters": {
"temperature": 0.5,
"max_new_tokens": 4096,
},
"endpoints": [
{
"type": "openai",
"baseURL": "https://{resource-name}.openai.azure.com/openai/deployments/{deployment-id}",
"defaultHeaders": {
"api-key": "{api-key}"
},
"defaultQuery": {
"api-version": "2023-05-15"
}
}
]
}]`
```
## DeepInfra
Or try Mistral from [Deepinfra](https://deepinfra.com/mistralai/Mistral-7B-Instruct-v0.1/api?example=openai-http):
> Note, apiKey can either be set custom per endpoint, or globally using `OPENAI_API_KEY` variable.
```ini
MODELS=`[{
"name": "mistral-7b",
"displayName": "Mistral 7B",
"description": "A 7B dense Transformer, fast-deployed and easily customisable. Small, yet powerful for a variety of use cases. Supports English and code, and a 8k context window.",
"parameters": {
"temperature": 0.5,
"max_new_tokens": 4096,
},
"endpoints": [
{
"type": "openai",
"baseURL": "https://api.deepinfra.com/v1/openai",
"apiKey": "abc...xyz"
}
]
}]`
```
## Other
Some other providers and their `baseURL` for reference.
[Groq](https://groq.com/): https://api.groq.com/openai/v1
[Fireworks](https://fireworks.ai/): https://api.fireworks.ai/inference/v1
| chat-ui/docs/source/configuration/models/providers/openai.md/0 | {
"file_path": "chat-ui/docs/source/configuration/models/providers/openai.md",
"repo_id": "chat-ui",
"token_count": 1942
} |
{
"name": "chat-ui",
"version": "0.9.4",
"private": true,
"packageManager": "npm@9.5.0",
"scripts": {
"dev": "vite dev",
"build": "vite build",
"preview": "vite preview",
"check": "svelte-kit sync && svelte-check --tsconfig ./tsconfig.json",
"check:watch": "svelte-kit sync && svelte-check --tsconfig ./tsconfig.json --watch",
"lint": "prettier --check . && eslint .",
"format": "prettier --write .",
"test": "vitest",
"updateLocalEnv": "node --loader ts-node/esm scripts/updateLocalEnv.ts",
"populate": "vite-node --options.transformMode.ssr='/.*/' scripts/populate.ts",
"prepare": "husky"
},
"devDependencies": {
"@faker-js/faker": "^8.4.1",
"@iconify-json/carbon": "^1.1.16",
"@iconify-json/eos-icons": "^1.1.6",
"@sveltejs/adapter-node": "^5.2.0",
"@sveltejs/kit": "^2.17.1",
"@sveltejs/vite-plugin-svelte": "^5.0.3",
"@tailwindcss/typography": "^0.5.9",
"@types/dompurify": "^3.0.5",
"@types/express": "^4.17.21",
"@types/js-yaml": "^4.0.9",
"@types/jsdom": "^21.1.1",
"@types/jsonpath": "^0.2.4",
"@types/katex": "^0.16.7",
"@types/mime-types": "^2.1.4",
"@types/minimist": "^1.2.5",
"@types/node": "^22.1.0",
"@types/parquetjs": "^0.10.3",
"@types/sbd": "^1.0.5",
"@types/uuid": "^9.0.8",
"@typescript-eslint/eslint-plugin": "^6.x",
"@typescript-eslint/parser": "^6.x",
"dompurify": "^3.1.6",
"eslint": "^8.28.0",
"eslint-config-prettier": "^8.5.0",
"eslint-plugin-svelte": "^2.45.1",
"isomorphic-dompurify": "^2.13.0",
"js-yaml": "^4.1.0",
"minimist": "^1.2.8",
"mongodb-memory-server": "^10.1.2",
"prettier": "^3.1.0",
"prettier-plugin-svelte": "^3.2.6",
"prettier-plugin-tailwindcss": "^0.6.11",
"prom-client": "^15.1.2",
"svelte": "^5.0.0",
"svelte-check": "^4.0.0",
"ts-node": "^10.9.1",
"tslib": "^2.4.1",
"typescript": "^5.5.0",
"unplugin-icons": "^0.16.1",
"vite": "^6.1.0",
"vite-node": "^1.3.1",
"vitest": "^2.1.9"
},
"type": "module",
"dependencies": {
"@aws-sdk/credential-providers": "^3.592.0",
"@cliqz/adblocker-playwright": "^1.27.2",
"@gradio/client": "^1.8.0",
"@huggingface/hub": "^0.5.1",
"@huggingface/inference": "^2.8.1",
"@huggingface/transformers": "^3.1.1",
"@iconify-json/bi": "^1.1.21",
"@playwright/browser-chromium": "^1.43.1",
"@resvg/resvg-js": "^2.6.2",
"autoprefixer": "^10.4.14",
"aws-sigv4-fetch": "^4.0.1",
"aws4": "^1.13.0",
"browser-image-resizer": "^2.4.1",
"date-fns": "^2.29.3",
"dotenv": "^16.0.3",
"express": "^4.21.2",
"file-type": "^19.4.1",
"google-auth-library": "^9.13.0",
"handlebars": "^4.7.8",
"highlight.js": "^11.7.0",
"husky": "^9.0.11",
"image-size": "^1.0.2",
"ip-address": "^9.0.5",
"jose": "^5.3.0",
"jsdom": "^22.0.0",
"json5": "^2.2.3",
"jsonpath": "^1.1.1",
"katex": "^0.16.21",
"lint-staged": "^15.2.7",
"marked": "^12.0.1",
"mongodb": "^5.8.0",
"nanoid": "^5.0.9",
"openid-client": "^5.4.2",
"parquetjs": "^0.11.2",
"pino": "^9.0.0",
"pino-pretty": "^11.0.0",
"playwright": "^1.44.1",
"postcss": "^8.4.31",
"saslprep": "^1.0.3",
"satori": "^0.10.11",
"satori-html": "^0.3.2",
"sbd": "^1.0.19",
"serpapi": "^1.1.1",
"sharp": "^0.33.4",
"tailwind-scrollbar": "^3.0.0",
"tailwindcss": "^3.4.0",
"uuid": "^10.0.0",
"zod": "^3.22.3"
},
"optionalDependencies": {
"@anthropic-ai/sdk": "^0.32.1",
"@anthropic-ai/vertex-sdk": "^0.4.1",
"@aws-sdk/client-bedrock-runtime": "^3.631.0",
"@google-cloud/vertexai": "^1.1.0",
"@google/generative-ai": "^0.14.1",
"aws4fetch": "^1.0.17",
"cohere-ai": "^7.9.0",
"openai": "^4.44.0"
}
}
| chat-ui/package.json/0 | {
"file_path": "chat-ui/package.json",
"repo_id": "chat-ui",
"token_count": 2082
} |
<script lang="ts">
import CarbonContinue from "~icons/carbon/continue";
interface Props {
classNames?: string;
onClick?: () => void;
}
let { classNames = "", onClick }: Props = $props();
</script>
<button
type="button"
onclick={onClick}
class="btn flex h-8 rounded-lg border bg-white px-3 py-1 text-gray-500 shadow-sm transition-all hover:bg-gray-100 dark:border-gray-600 dark:bg-gray-700 dark:text-gray-300 dark:hover:bg-gray-600 {classNames}"
>
<CarbonContinue class="mr-2 text-xs " /> Continue
</button>
| chat-ui/src/lib/components/ContinueBtn.svelte/0 | {
"file_path": "chat-ui/src/lib/components/ContinueBtn.svelte",
"repo_id": "chat-ui",
"token_count": 190
} |
<script lang="ts">
import CarbonRotate360 from "~icons/carbon/rotate-360";
interface Props {
classNames?: string;
onClick?: () => void;
}
let { classNames = "", onClick }: Props = $props();
</script>
<button
type="button"
onclick={onClick}
class="btn flex h-8 rounded-lg border bg-white px-3 py-1 text-gray-500 shadow-sm transition-all hover:bg-gray-100 dark:border-gray-600 dark:bg-gray-700 dark:text-gray-300 dark:hover:bg-gray-600 {classNames}"
>
<CarbonRotate360 class="mr-2 text-xs " /> Retry
</button>
| chat-ui/src/lib/components/RetryBtn.svelte/0 | {
"file_path": "chat-ui/src/lib/components/RetryBtn.svelte",
"repo_id": "chat-ui",
"token_count": 198
} |
<script lang="ts">
import { browser } from "$app/environment";
import { createEventDispatcher, onMount } from "svelte";
import HoverTooltip from "$lib/components/HoverTooltip.svelte";
import IconInternet from "$lib/components/icons/IconInternet.svelte";
import IconImageGen from "$lib/components/icons/IconImageGen.svelte";
import IconPaperclip from "$lib/components/icons/IconPaperclip.svelte";
import { useSettingsStore } from "$lib/stores/settings";
import { webSearchParameters } from "$lib/stores/webSearchParameters";
import {
documentParserToolId,
fetchUrlToolId,
imageGenToolId,
webSearchToolId,
} from "$lib/utils/toolIds";
import type { Assistant } from "$lib/types/Assistant";
import { page } from "$app/state";
import type { ToolFront } from "$lib/types/Tool";
import ToolLogo from "../ToolLogo.svelte";
import { goto } from "$app/navigation";
import { base } from "$app/paths";
import IconAdd from "~icons/carbon/add";
import { captureScreen } from "$lib/utils/screenshot";
import IconScreenshot from "../icons/IconScreenshot.svelte";
import { loginModalOpen } from "$lib/stores/loginModal";
interface Props {
files?: File[];
mimeTypes?: string[];
value?: string;
placeholder?: string;
loading?: boolean;
disabled?: boolean;
assistant?: Assistant | undefined;
modelHasTools?: boolean;
modelIsMultimodal?: boolean;
children?: import("svelte").Snippet;
onPaste?: (e: ClipboardEvent) => void;
}
let {
files = $bindable([]),
mimeTypes = [],
value = $bindable(""),
placeholder = "",
loading = false,
disabled = false,
assistant = undefined,
modelHasTools = false,
modelIsMultimodal = false,
children,
onPaste,
}: Props = $props();
const onFileChange = async (e: Event) => {
if (!e.target) return;
const target = e.target as HTMLInputElement;
files = [...files, ...(target.files ?? [])];
if (files.some((file) => file.type.startsWith("application/"))) {
await settings.instantSet({
tools: [...($settings.tools ?? []), documentParserToolId],
});
}
};
let textareaElement: HTMLTextAreaElement | undefined = $state();
let isCompositionOn = $state(false);
const dispatch = createEventDispatcher<{ submit: void }>();
onMount(() => {
if (!isVirtualKeyboard()) {
textareaElement?.focus();
}
function onFormSubmit() {
adjustTextareaHeight();
}
const formEl = textareaElement?.closest("form");
formEl?.addEventListener("submit", onFormSubmit);
return () => {
formEl?.removeEventListener("submit", onFormSubmit);
};
});
function isVirtualKeyboard(): boolean {
if (!browser) return false;
// Check for touch capability
if (navigator.maxTouchPoints > 0) return true;
// Check for touch events
if ("ontouchstart" in window) return true;
// Fallback to user agent string check
const userAgent = navigator.userAgent.toLowerCase();
return /android|webos|iphone|ipad|ipod|blackberry|iemobile|opera mini/i.test(userAgent);
}
function adjustTextareaHeight() {
if (!textareaElement) {
return;
}
textareaElement.style.height = "auto";
textareaElement.style.height = `${textareaElement.scrollHeight}px`;
if (textareaElement.selectionStart === textareaElement.value.length) {
textareaElement.scrollTop = textareaElement.scrollHeight;
}
}
function handleKeydown(event: KeyboardEvent) {
if (
event.key === "Enter" &&
!event.shiftKey &&
!isCompositionOn &&
!isVirtualKeyboard() &&
value.trim() !== ""
) {
event.preventDefault();
dispatch("submit");
}
}
const settings = useSettingsStore();
// tool section
let webSearchIsOn = $derived(
modelHasTools
? ($settings.tools?.includes(webSearchToolId) ?? false) ||
($settings.tools?.includes(fetchUrlToolId) ?? false)
: $webSearchParameters.useSearch
);
let imageGenIsOn = $derived($settings.tools?.includes(imageGenToolId) ?? false);
let documentParserIsOn = $derived(
modelHasTools && files.length > 0 && files.some((file) => file.type.startsWith("application/"))
);
let extraTools = $derived(
page.data.tools
.filter((t: ToolFront) => $settings.tools?.includes(t._id))
.filter(
(t: ToolFront) =>
![documentParserToolId, imageGenToolId, webSearchToolId, fetchUrlToolId].includes(t._id)
) satisfies ToolFront[]
);
</script>
<div class="flex min-h-full flex-1 flex-col" onpaste={onPaste}>
<textarea
rows="1"
tabindex="0"
inputmode="text"
class="scrollbar-custom max-h-[4lh] w-full resize-none overflow-y-auto overflow-x-hidden border-0 bg-transparent px-2.5 py-2.5 outline-none focus:ring-0 focus-visible:ring-0 max-sm:text-[16px] sm:px-3"
class:text-gray-400={disabled}
bind:value
bind:this={textareaElement}
onkeydown={handleKeydown}
oncompositionstart={() => (isCompositionOn = true)}
oncompositionend={() => (isCompositionOn = false)}
oninput={adjustTextareaHeight}
onbeforeinput={(ev) => {
if (page.data.loginRequired) {
ev.preventDefault();
$loginModalOpen = true;
}
}}
{placeholder}
{disabled}
></textarea>
{#if !assistant}
<div
class="scrollbar-custom -ml-0.5 flex max-w-[calc(100%-40px)] flex-wrap items-center justify-start gap-2.5 px-3 pb-2.5 pt-1.5 text-gray-500 dark:text-gray-400 max-md:flex-nowrap max-md:overflow-x-auto sm:gap-2"
>
<HoverTooltip
label="Search the web"
position="top"
TooltipClassNames="text-xs !text-left !w-auto whitespace-nowrap !py-1 !mb-0 max-sm:hidden {webSearchIsOn
? 'hidden'
: ''}"
>
<button
class="base-tool"
class:active-tool={webSearchIsOn}
disabled={loading}
onclick={async (e) => {
e.preventDefault();
if (modelHasTools) {
if (webSearchIsOn) {
await settings.instantSet({
tools: ($settings.tools ?? []).filter(
(t) => t !== webSearchToolId && t !== fetchUrlToolId
),
});
} else {
await settings.instantSet({
tools: [...($settings.tools ?? []), webSearchToolId, fetchUrlToolId],
});
}
} else {
$webSearchParameters.useSearch = !webSearchIsOn;
}
}}
>
<IconInternet classNames="text-xl" />
{#if webSearchIsOn}
Search
{/if}
</button>
</HoverTooltip>
{#if modelHasTools}
<HoverTooltip
label="Generate images"
position="top"
TooltipClassNames="text-xs !text-left !w-auto whitespace-nowrap !py-1 !mb-0 max-sm:hidden {imageGenIsOn
? 'hidden'
: ''}"
>
<button
class="base-tool"
class:active-tool={imageGenIsOn}
disabled={loading}
onclick={async (e) => {
e.preventDefault();
if (modelHasTools) {
if (imageGenIsOn) {
await settings.instantSet({
tools: ($settings.tools ?? []).filter((t) => t !== imageGenToolId),
});
} else {
await settings.instantSet({
tools: [...($settings.tools ?? []), imageGenToolId],
});
}
}
}}
>
<IconImageGen classNames="text-xl" />
{#if imageGenIsOn}
Image Gen
{/if}
</button>
</HoverTooltip>
{/if}
{#if modelIsMultimodal || modelHasTools}
{@const mimeTypesString = mimeTypes
.map((m) => {
// if the mime type ends in *, grab the first part so image/* becomes image
if (m.endsWith("*")) {
return m.split("/")[0];
}
// otherwise, return the second part for example application/pdf becomes pdf
return m.split("/")[1];
})
.join(", ")}
<div class="flex items-center">
<HoverTooltip
label={mimeTypesString.includes("*")
? "Upload any file"
: `Upload ${mimeTypesString} files`}
position="top"
TooltipClassNames="text-xs !text-left !w-auto whitespace-nowrap !py-1 !mb-0 max-sm:hidden"
>
<label class="base-tool relative" class:active-tool={documentParserIsOn}>
<input
disabled={loading}
class="absolute hidden size-0"
aria-label="Upload file"
type="file"
onchange={onFileChange}
accept={mimeTypes.join(",")}
/>
<IconPaperclip classNames="text-xl" />
{#if documentParserIsOn}
Document Parser
{/if}
</label>
</HoverTooltip>
</div>
{#if mimeTypes.includes("image/*")}
<HoverTooltip
label="Capture screenshot"
position="top"
TooltipClassNames="text-xs !text-left !w-auto whitespace-nowrap !py-1 !mb-0 max-sm:hidden"
>
<button
class="base-tool"
onclick={async (e) => {
e.preventDefault();
const screenshot = await captureScreen();
// Convert base64 to blob
const base64Response = await fetch(screenshot);
const blob = await base64Response.blob();
// Create a File object from the blob
const file = new File([blob], "screenshot.png", { type: "image/png" });
files = [...files, file];
}}
>
<IconScreenshot classNames="text-xl" />
</button>
</HoverTooltip>
{/if}
{/if}
{#if modelHasTools}
{#each extraTools as tool}
<button
class="active-tool base-tool"
disabled={loading}
onclick={async (e) => {
e.preventDefault();
goto(`${base}/tools/${tool._id}`);
}}
>
{#key tool.icon + tool.color}
<ToolLogo icon={tool.icon} color={tool.color} size="xs" />
{/key}
{tool.displayName}
</button>
{/each}
{/if}
{#if modelHasTools}
<HoverTooltip
label="Browse more tools"
position="right"
TooltipClassNames="text-xs !text-left !w-auto whitespace-nowrap !py-1 max-sm:hidden"
>
<a
class="base-tool flex !size-[20px] items-center justify-center rounded-full border !border-gray-200 !bg-white !transition-none dark:!border-gray-500 dark:!bg-transparent"
href={`${base}/tools`}
title="Browse more tools"
>
<IconAdd class="text-sm" />
</a>
</HoverTooltip>
{/if}
</div>
{/if}
{@render children?.()}
</div>
<style lang="postcss">
:global(pre),
:global(textarea) {
font-family: inherit;
box-sizing: border-box;
line-height: 1.5;
}
</style>
| chat-ui/src/lib/components/chat/ChatInput.svelte/0 | {
"file_path": "chat-ui/src/lib/components/chat/ChatInput.svelte",
"repo_id": "chat-ui",
"token_count": 4361
} |
<script lang="ts">
interface Props {
classNames?: string;
}
let { classNames = "" }: Props = $props();
</script>
<div class={"inline-flex h-8 flex-none items-center gap-1 " + classNames}>
<div
class="h-1 w-1 flex-none animate-bounce rounded-full bg-gray-500 dark:bg-gray-400"
style="animation-delay: 0.25s;"
></div>
<div
class="h-1 w-1 flex-none animate-bounce rounded-full bg-gray-500 dark:bg-gray-400"
style="animation-delay: 0.5s;"
></div>
<div
class="h-1 w-1 flex-none animate-bounce rounded-full bg-gray-500 dark:bg-gray-400"
style="animation-delay: 0.75s;"
></div>
</div>
| chat-ui/src/lib/components/icons/IconLoading.svelte/0 | {
"file_path": "chat-ui/src/lib/components/icons/IconLoading.svelte",
"repo_id": "chat-ui",
"token_count": 254
} |
import type { Migration } from ".";
import { collections } from "$lib/server/database";
import { ObjectId } from "mongodb";
const updateAssistantsModels: Migration = {
_id: new ObjectId("5f9f3f3f3f3f3f3f3f3f3f3f"),
name: "Update deprecated models in assistants with the default model",
up: async () => {
const models = (await import("$lib/server/models")).models;
const oldModels = (await import("$lib/server/models")).oldModels;
const { assistants } = collections;
const modelIds = models.map((el) => el.id);
const defaultModelId = models[0].id;
// Find all assistants whose modelId is not in modelIds, and update it
const bulkOps = await assistants
.find({ modelId: { $nin: modelIds } })
.map((assistant) => {
// has an old model
let newModelId = defaultModelId;
const oldModel = oldModels.find((m) => m.id === assistant.modelId);
if (oldModel && oldModel.transferTo && !!models.find((m) => m.id === oldModel.transferTo)) {
newModelId = oldModel.transferTo;
}
return {
updateOne: {
filter: { _id: assistant._id },
update: { $set: { modelId: newModelId } },
},
};
})
.toArray();
if (bulkOps.length > 0) {
await assistants.bulkWrite(bulkOps);
}
return true;
},
runEveryTime: true,
runForHuggingChat: "only",
};
export default updateAssistantsModels;
| chat-ui/src/lib/migrations/routines/02-update-assistants-models.ts/0 | {
"file_path": "chat-ui/src/lib/migrations/routines/02-update-assistants-models.ts",
"repo_id": "chat-ui",
"token_count": 519
} |
import { z } from "zod";
import type { EmbeddingEndpoint, Embedding } from "../embeddingEndpoints";
import { chunk } from "$lib/utils/chunk";
import { env } from "$env/dynamic/private";
import { logger } from "$lib/server/logger";
export const embeddingEndpointTeiParametersSchema = z.object({
weight: z.number().int().positive().default(1),
model: z.any(),
type: z.literal("tei"),
url: z.string().url(),
authorization: z
.string()
.optional()
.transform((v) => (!v && env.HF_TOKEN ? "Bearer " + env.HF_TOKEN : v)), // if the header is not set but HF_TOKEN is, use it as the authorization header
});
const getModelInfoByUrl = async (url: string, authorization?: string) => {
const { origin } = new URL(url);
const response = await fetch(`${origin}/info`, {
headers: {
Accept: "application/json",
"Content-Type": "application/json",
...(authorization ? { Authorization: authorization } : {}),
},
});
try {
const json = await response.json();
return { max_client_batch_size: 32, max_batch_tokens: 16384, ...json };
} catch {
logger.debug("Could not get info from TEI embedding endpoint. Using defaults.");
return { max_client_batch_size: 32, max_batch_tokens: 16384 };
}
};
export async function embeddingEndpointTei(
input: z.input<typeof embeddingEndpointTeiParametersSchema>
): Promise<EmbeddingEndpoint> {
const { url, model, authorization } = embeddingEndpointTeiParametersSchema.parse(input);
const { max_client_batch_size, max_batch_tokens } = await getModelInfoByUrl(url);
const maxBatchSize = Math.min(
max_client_batch_size,
Math.floor(max_batch_tokens / model.chunkCharLength)
);
return async ({ inputs }) => {
const { origin } = new URL(url);
const batchesInputs = chunk(inputs, maxBatchSize);
const batchesResults = await Promise.all(
batchesInputs.map(async (batchInputs) => {
const response = await fetch(`${origin}/embed`, {
method: "POST",
headers: {
Accept: "application/json",
"Content-Type": "application/json",
...(authorization ? { Authorization: authorization } : {}),
},
body: JSON.stringify({ inputs: batchInputs, normalize: true, truncate: true }),
});
const embeddings: Embedding[] = await response.json();
return embeddings;
})
);
const flatAllEmbeddings = batchesResults.flat();
return flatAllEmbeddings;
};
}
| chat-ui/src/lib/server/embeddingEndpoints/tei/embeddingEndpoints.ts/0 | {
"file_path": "chat-ui/src/lib/server/embeddingEndpoints/tei/embeddingEndpoints.ts",
"repo_id": "chat-ui",
"token_count": 838
} |
import { env } from "$env/dynamic/private";
import { buildPrompt } from "$lib/buildPrompt";
import type { TextGenerationStreamOutput } from "@huggingface/inference";
import type { Endpoint } from "../endpoints";
import { z } from "zod";
import { logger } from "$lib/server/logger";
export const endpointLlamacppParametersSchema = z.object({
weight: z.number().int().positive().default(1),
model: z.any(),
type: z.literal("llamacpp"),
url: z.string().url().default("http://127.0.0.1:8080"), // legacy, feel free to remove in breaking change update
baseURL: z.string().url().optional(),
accessToken: z.string().default(env.HF_TOKEN ?? env.HF_ACCESS_TOKEN),
});
export function endpointLlamacpp(
input: z.input<typeof endpointLlamacppParametersSchema>
): Endpoint {
const { baseURL, url, model } = endpointLlamacppParametersSchema.parse(input);
return async ({ messages, preprompt, continueMessage, generateSettings }) => {
const prompt = await buildPrompt({
messages,
continueMessage,
preprompt,
model,
});
const parameters = { ...model.parameters, ...generateSettings };
const r = await fetch(`${baseURL ?? url}/completion`, {
method: "POST",
headers: {
"Content-Type": "application/json",
},
body: JSON.stringify({
prompt,
stream: true,
temperature: parameters.temperature,
top_p: parameters.top_p,
top_k: parameters.top_k,
stop: parameters.stop,
repeat_penalty: parameters.repetition_penalty,
n_predict: parameters.max_new_tokens,
cache_prompt: true,
}),
});
if (!r.ok) {
throw new Error(`Failed to generate text: ${await r.text()}`);
}
const encoder = new TextDecoderStream();
const reader = r.body?.pipeThrough(encoder).getReader();
return (async function* () {
let stop = false;
let generatedText = "";
let tokenId = 0;
let accumulatedData = ""; // Buffer to accumulate data chunks
while (!stop) {
// Read the stream and log the outputs to console
const out = (await reader?.read()) ?? { done: false, value: undefined };
// If it's done, we cancel
if (out.done) {
reader?.cancel();
return;
}
if (!out.value) {
return;
}
// Accumulate the data chunk
accumulatedData += out.value;
// Process each complete JSON object in the accumulated data
while (accumulatedData.includes("\n")) {
// Assuming each JSON object ends with a newline
const endIndex = accumulatedData.indexOf("\n");
let jsonString = accumulatedData.substring(0, endIndex).trim();
// Remove the processed part from the buffer
accumulatedData = accumulatedData.substring(endIndex + 1);
if (jsonString.startsWith("data: ")) {
jsonString = jsonString.slice(6);
let data = null;
try {
data = JSON.parse(jsonString);
} catch (e) {
logger.error(e, "Failed to parse JSON");
logger.error(jsonString, "Problematic JSON string:");
continue; // Skip this iteration and try the next chunk
}
// Handle the parsed data
if (data.content || data.stop) {
generatedText += data.content;
const output: TextGenerationStreamOutput = {
token: {
id: tokenId++,
text: data.content ?? "",
logprob: 0,
special: false,
},
generated_text: data.stop ? generatedText : null,
details: null,
};
if (data.stop) {
stop = true;
output.token.special = true;
reader?.cancel();
}
yield output;
}
}
}
}
})();
};
}
export default endpointLlamacpp;
| chat-ui/src/lib/server/endpoints/llamacpp/endpointLlamacpp.ts/0 | {
"file_path": "chat-ui/src/lib/server/endpoints/llamacpp/endpointLlamacpp.ts",
"repo_id": "chat-ui",
"token_count": 1447
} |
import { ToolResultStatus, type ToolCall, type Tool, type ToolResult } from "$lib/types/Tool";
import { v4 as uuidV4 } from "uuid";
import { getCallMethod, toolFromConfigs, type BackendToolContext } from "../tools";
import {
MessageToolUpdateType,
MessageUpdateStatus,
MessageUpdateType,
type MessageUpdate,
} from "$lib/types/MessageUpdate";
import type { TextGenerationContext } from "./types";
import directlyAnswer from "../tools/directlyAnswer";
import websearch from "../tools/web/search";
import { z } from "zod";
import { logger } from "../logger";
import { extractJson, toolHasName } from "../tools/utils";
import { mergeAsyncGenerators } from "$lib/utils/mergeAsyncGenerators";
import { MetricsServer } from "../metrics";
import { stringifyError } from "$lib/utils/stringifyError";
import { collections } from "../database";
import { ObjectId } from "mongodb";
import type { Message } from "$lib/types/Message";
import type { Assistant } from "$lib/types/Assistant";
import { assistantHasWebSearch } from "./assistant";
export async function getTools(
toolsPreference: Array<string>,
assistant: Pick<Assistant, "rag" | "tools"> | undefined
): Promise<Tool[]> {
let preferences = toolsPreference;
if (assistant) {
if (assistant?.tools?.length) {
preferences = assistant.tools;
if (assistantHasWebSearch(assistant)) {
preferences.push(websearch._id.toString());
}
} else {
if (assistantHasWebSearch(assistant)) {
return [websearch, directlyAnswer];
}
return [directlyAnswer];
}
}
// filter based on tool preferences, add the tools that are on by default
const activeConfigTools = toolFromConfigs.filter((el) => {
if (el.isLocked && el.isOnByDefault && !assistant) return true;
return preferences?.includes(el._id.toString()) ?? (el.isOnByDefault && !assistant);
});
// find tool where the id is in preferences
const activeCommunityTools = await collections.tools
.find({
_id: { $in: preferences.map((el) => new ObjectId(el)) },
})
.toArray()
.then((el) => el.map((el) => ({ ...el, call: getCallMethod(el) })));
return [...activeConfigTools, ...activeCommunityTools];
}
async function* callTool(
ctx: BackendToolContext,
tools: Tool[],
call: ToolCall
): AsyncGenerator<MessageUpdate, ToolResult | undefined, undefined> {
const uuid = uuidV4();
const tool = tools.find((el) => toolHasName(call.name, el));
if (!tool) {
return { call, status: ToolResultStatus.Error, message: `Could not find tool "${call.name}"` };
}
// Special case for directly_answer tool where we ignore
if (toolHasName(directlyAnswer.name, tool)) return;
const startTime = Date.now();
MetricsServer.getMetrics().tool.toolUseCount.inc({ tool: call.name });
yield {
type: MessageUpdateType.Tool,
subtype: MessageToolUpdateType.Call,
uuid,
call,
};
try {
const toolResult = yield* tool.call(call.parameters, ctx, uuid);
yield {
type: MessageUpdateType.Tool,
subtype: MessageToolUpdateType.Result,
uuid,
result: { ...toolResult, call, status: ToolResultStatus.Success },
};
MetricsServer.getMetrics().tool.toolUseDuration.observe(
{ tool: call.name },
Date.now() - startTime
);
await collections.tools.findOneAndUpdate({ _id: tool._id }, { $inc: { useCount: 1 } });
return { ...toolResult, call, status: ToolResultStatus.Success };
} catch (error) {
MetricsServer.getMetrics().tool.toolUseCountError.inc({ tool: call.name });
logger.error(error, `Failed while running tool ${call.name}. ${stringifyError(error)}`);
yield {
type: MessageUpdateType.Tool,
subtype: MessageToolUpdateType.Error,
uuid,
message:
"An error occurred while calling the tool " + call.name + ": " + stringifyError(error),
};
return {
call,
status: ToolResultStatus.Error,
message:
"An error occurred while calling the tool " + call.name + ": " + stringifyError(error),
};
}
}
export async function* runTools(
ctx: TextGenerationContext,
tools: Tool[],
preprompt?: string
): AsyncGenerator<MessageUpdate, ToolResult[], undefined> {
const { endpoint, conv, messages, assistant, ip, username } = ctx;
const calls: ToolCall[] = [];
const pickToolStartTime = Date.now();
// append a message with the list of all available files
const files = messages.reduce((acc, curr, idx) => {
if (curr.files) {
const prefix = (curr.from === "user" ? "input" : "ouput") + "_" + idx;
acc.push(
...curr.files.map(
(file, fileIdx) => `${prefix}_${fileIdx}.${file?.name?.split(".")?.pop()?.toLowerCase()}`
)
);
}
return acc;
}, [] as string[]);
let formattedMessages = messages.map((message, msgIdx) => {
let content = message.content;
if (message.files && message.files.length > 0) {
content +=
"\n\nAdded files: \n - " +
message.files
.map((file, fileIdx) => {
const prefix = message.from === "user" ? "input" : "output";
const fileName = file.name.split(".").pop()?.toLowerCase();
return `${prefix}_${msgIdx}_${fileIdx}.${fileName}`;
})
.join("\n - ");
}
return {
...message,
content,
} satisfies Message;
});
const fileMsg = {
id: crypto.randomUUID(),
from: "system",
content:
"Here is the list of available filenames that can be used as input for tools. Use the filenames that are in this list. \n The filename structure is as follows : {input for user|output for tool}_{message index in the conversation}_{file index in the list of files}.{file extension} \n - " +
files.join("\n - ") +
"\n\n\n",
} satisfies Message;
// put fileMsg before last if files.length > 0
formattedMessages = files.length
? [...formattedMessages.slice(0, -1), fileMsg, ...formattedMessages.slice(-1)]
: messages;
let rawText = "";
const mappedTools = tools.map((tool) => ({
...tool,
inputs: tool.inputs.map((input) => ({
...input,
type: input.type === "file" ? "str" : input.type,
})),
}));
// do the function calling bits here
for await (const output of await endpoint({
messages: formattedMessages,
preprompt,
generateSettings: { temperature: 0.1, ...assistant?.generateSettings },
tools: mappedTools,
conversationId: conv._id,
})) {
// model natively supports tool calls
if (output.token.toolCalls) {
calls.push(...output.token.toolCalls);
continue;
}
if (output.token.text) {
rawText += output.token.text;
}
// if we dont see a tool call in the first 25 chars, something is going wrong and we abort
if (rawText.length > 100 && !(rawText.includes("```json") || rawText.includes("{"))) {
return [];
}
// if we see a directly_answer tool call, we skip the rest
if (
rawText.includes("directly_answer") ||
rawText.includes("directlyAnswer") ||
rawText.includes("directly-answer")
) {
return [];
}
// look for a code blocks of ```json and parse them
// if they're valid json, add them to the calls array
if (output.generated_text) {
try {
const rawCalls = await extractJson(output.generated_text);
const newCalls = rawCalls
.map((call) => externalToToolCall(call, tools))
.filter((call) => call !== undefined) as ToolCall[];
calls.push(...newCalls);
} catch (e) {
logger.warn({ rawCall: output.generated_text, error: e }, "Error while parsing tool calls");
// error parsing the calls
yield {
type: MessageUpdateType.Status,
status: MessageUpdateStatus.Error,
message: "Error while parsing tool calls.",
};
}
}
}
MetricsServer.getMetrics().tool.timeToChooseTools.observe(
{ model: conv.model },
Date.now() - pickToolStartTime
);
const toolContext: BackendToolContext = { conv, messages, preprompt, assistant, ip, username };
const toolResults: (ToolResult | undefined)[] = yield* mergeAsyncGenerators(
calls.map((call) => callTool(toolContext, tools, call))
);
return toolResults.filter((result): result is ToolResult => result !== undefined);
}
function externalToToolCall(call: unknown, tools: Tool[]): ToolCall | undefined {
// Early return if invalid input
if (!isValidCallObject(call)) {
return undefined;
}
const parsedCall = parseExternalCall(call);
if (!parsedCall) return undefined;
const tool = tools.find((tool) => toolHasName(parsedCall.tool_name, tool));
if (!tool) {
logger.debug(
`Model requested tool that does not exist: "${parsedCall.tool_name}". Skipping tool...`
);
return undefined;
}
const parametersWithDefaults: Record<string, string> = {};
for (const input of tool.inputs) {
const value = parsedCall.parameters[input.name];
// Required so ensure it's there, otherwise return undefined
if (input.paramType === "required") {
if (value === undefined) {
logger.debug(
`Model requested tool "${parsedCall.tool_name}" but was missing required parameter "${input.name}". Skipping tool...`
);
return;
}
parametersWithDefaults[input.name] = value;
continue;
}
// Optional so use default if not there
parametersWithDefaults[input.name] = value;
if (input.paramType === "optional") {
parametersWithDefaults[input.name] ??= input.default.toString();
}
}
return {
name: parsedCall.tool_name,
parameters: parametersWithDefaults,
};
}
// Helper functions
function isValidCallObject(call: unknown): call is Record<string, unknown> {
return typeof call === "object" && call !== null;
}
function parseExternalCall(callObj: Record<string, unknown>) {
const nameFields = ["tool_name", "name"] as const;
const parametersFields = ["parameters", "arguments", "parameter_definitions"] as const;
const groupedCall = {
tool_name: "" as string,
parameters: undefined as Record<string, string> | undefined,
};
for (const name of nameFields) {
if (callObj[name]) {
groupedCall.tool_name = callObj[name] as string;
}
}
for (const name of parametersFields) {
if (callObj[name]) {
groupedCall.parameters = callObj[name] as Record<string, string>;
}
}
return z
.object({
tool_name: z.string(),
parameters: z.record(z.any()),
})
.parse(groupedCall);
}
| chat-ui/src/lib/server/textGeneration/tools.ts/0 | {
"file_path": "chat-ui/src/lib/server/textGeneration/tools.ts",
"repo_id": "chat-ui",
"token_count": 3544
} |
import { sentences as splitBySentences } from "sbd";
import { MarkdownElementType, type MarkdownElement } from "../types";
export function chunkElements(elements: MarkdownElement[], maxLength: number): MarkdownElement[] {
return elements.flatMap((elem) => {
// Can't split headers because it would break the tree, and this situation should be rare
// so we just cut off the end
if (elem.type === MarkdownElementType.Header) {
return { ...elem, content: elem.content.slice(0, maxLength) };
}
const contentChunks = enforceMaxLength(elem.content, maxLength);
return contentChunks.map<MarkdownElement>((content) => ({ ...elem, content }));
});
}
const delimitersByPriority = ["?", "!", ".", ";", ":", ",", "|", " - ", " ", "-"];
function enforceMaxLength(text: string, maxLength: number): string[] {
if (text.length <= maxLength) return [text].filter(Boolean);
return splitBySentences(text)
.flatMap((sentence) => {
if (sentence.length <= maxLength) return sentence;
// Discover all necessary split points to fit the sentence within the max length
const indices: [number, number][] = [];
while ((indices.at(-1)?.[1] ?? 0) < sentence.length) {
const prevIndex = indices.at(-1)?.[1] ?? 0;
// Remaining text fits within maxLength
if (prevIndex + maxLength >= sentence.length) {
indices.push([prevIndex, sentence.length]);
continue;
}
const bestDelimiter = delimitersByPriority.find(
(delimiter) => sentence.lastIndexOf(delimiter, prevIndex + maxLength) !== -1
);
// Fallback in the unusual case that no delimiter is found
if (!bestDelimiter) {
indices.push([prevIndex, prevIndex + maxLength]);
continue;
}
const closestDelimiter = sentence.lastIndexOf(bestDelimiter, prevIndex + maxLength);
indices.push([prevIndex, Math.max(prevIndex + 1, closestDelimiter)]);
}
return indices.map((sliceIndices) => sentence.slice(...sliceIndices));
})
.reduce<string[]>(
(chunks, sentence) => {
const lastChunk = chunks[chunks.length - 1];
if (lastChunk.length + sentence.length <= maxLength) {
return [...chunks.slice(0, -1), lastChunk + sentence];
}
return [...chunks, sentence];
},
[""]
)
.filter(Boolean);
}
| chat-ui/src/lib/server/websearch/markdown/utils/chunk.ts/0 | {
"file_path": "chat-ui/src/lib/server/websearch/markdown/utils/chunk.ts",
"repo_id": "chat-ui",
"token_count": 801
} |
import { env } from "$env/dynamic/private";
import { isURL } from "$lib/utils/isUrl";
import type { WebSearchSource } from "$lib/types/WebSearch";
interface YouWebSearch {
hits: YouSearchHit[];
latency: number;
}
interface YouSearchHit {
url: string;
title: string;
description: string;
snippets: string[];
}
export default async function searchWebYouApi(query: string): Promise<WebSearchSource[]> {
const response = await fetch(`https://api.ydc-index.io/search?query=${query}`, {
method: "GET",
headers: {
"X-API-Key": env.YDC_API_KEY,
"Content-type": "application/json; charset=UTF-8",
},
});
if (!response.ok) {
throw new Error(`You.com API returned error code ${response.status} - ${response.statusText}`);
}
const data = (await response.json()) as YouWebSearch;
const formattedResultsWithSnippets = data.hits
.filter(({ url }) => isURL(url))
.map(({ title, url, snippets }) => ({
title,
link: url,
text: snippets?.join("\n") || "",
}))
.sort((a, b) => b.text.length - a.text.length); // desc order by text length
return formattedResultsWithSnippets;
}
| chat-ui/src/lib/server/websearch/search/endpoints/youApi.ts/0 | {
"file_path": "chat-ui/src/lib/server/websearch/search/endpoints/youApi.ts",
"repo_id": "chat-ui",
"token_count": 402
} |
import type { Conversation } from "$lib/types/Conversation";
import { sha256 } from "./sha256";
export async function hashConv(conv: Conversation) {
// messages contains the conversation message but only the immutable part
const messages = conv.messages.map((message) => {
return (({ from, id, content, webSearchId }) => ({ from, id, content, webSearchId }))(message);
});
const hash = await sha256(JSON.stringify(messages));
return hash;
}
| chat-ui/src/lib/utils/hashConv.ts/0 | {
"file_path": "chat-ui/src/lib/utils/hashConv.ts",
"repo_id": "chat-ui",
"token_count": 132
} |
<script lang="ts">
import { run } from "svelte/legacy";
import "../styles/main.css";
import { onDestroy, onMount, untrack } from "svelte";
import { goto } from "$app/navigation";
import { base } from "$app/paths";
import { page } from "$app/stores";
import { env as envPublic } from "$env/dynamic/public";
import { error } from "$lib/stores/errors";
import { createSettingsStore } from "$lib/stores/settings";
import { shareConversation } from "$lib/shareConversation";
import Toast from "$lib/components/Toast.svelte";
import NavMenu from "$lib/components/NavMenu.svelte";
import MobileNav from "$lib/components/MobileNav.svelte";
import titleUpdate from "$lib/stores/titleUpdate";
import DisclaimerModal from "$lib/components/DisclaimerModal.svelte";
import ExpandNavigation from "$lib/components/ExpandNavigation.svelte";
import { loginModalOpen } from "$lib/stores/loginModal";
import LoginModal from "$lib/components/LoginModal.svelte";
let { data = $bindable(), children } = $props();
let conversations = $state(data.conversations);
$effect(() => {
data.conversations && untrack(() => (conversations = data.conversations));
});
let isNavOpen = $state(false);
let isNavCollapsed = $state(false);
let errorToastTimeout: ReturnType<typeof setTimeout>;
let currentError: string | undefined = $state();
async function onError() {
// If a new different error comes, wait for the current error to hide first
if ($error && currentError && $error !== currentError) {
clearTimeout(errorToastTimeout);
currentError = undefined;
await new Promise((resolve) => setTimeout(resolve, 300));
}
currentError = $error;
errorToastTimeout = setTimeout(() => {
$error = undefined;
currentError = undefined;
}, 10000);
}
async function deleteConversation(id: string) {
try {
const res = await fetch(`${base}/conversation/${id}`, {
method: "DELETE",
headers: {
"Content-Type": "application/json",
},
});
if (!res.ok) {
$error = "Error while deleting conversation, try again.";
return;
}
conversations = conversations.filter((conv) => conv.id !== id);
if ($page.params.id === id) {
await goto(`${base}/`, { invalidateAll: true });
}
} catch (err) {
console.error(err);
$error = String(err);
}
}
async function editConversationTitle(id: string, title: string) {
try {
const res = await fetch(`${base}/conversation/${id}`, {
method: "PATCH",
headers: {
"Content-Type": "application/json",
},
body: JSON.stringify({ title }),
});
if (!res.ok) {
$error = "Error while editing title, try again.";
return;
}
conversations = conversations.map((conv) => (conv.id === id ? { ...conv, title } : conv));
} catch (err) {
console.error(err);
$error = String(err);
}
}
onDestroy(() => {
clearTimeout(errorToastTimeout);
});
run(() => {
if ($error) onError();
});
run(() => {
if ($titleUpdate) {
const convIdx = conversations.findIndex(({ id }) => id === $titleUpdate?.convId);
if (convIdx != -1) {
conversations[convIdx].title = $titleUpdate?.title ?? conversations[convIdx].title;
}
$titleUpdate = null;
}
});
const settings = createSettingsStore(data.settings);
onMount(async () => {
if ($page.url.searchParams.has("model")) {
await settings
.instantSet({
activeModel: $page.url.searchParams.get("model") ?? $settings.activeModel,
})
.then(async () => {
const query = new URLSearchParams($page.url.searchParams.toString());
query.delete("model");
await goto(`${base}/?${query.toString()}`, {
invalidateAll: true,
});
});
}
if ($page.url.searchParams.has("tools")) {
const tools = $page.url.searchParams.get("tools")?.split(",");
await settings
.instantSet({
tools: [...($settings.tools ?? []), ...(tools ?? [])],
})
.then(async () => {
const query = new URLSearchParams($page.url.searchParams.toString());
query.delete("tools");
await goto(`${base}/?${query.toString()}`, {
invalidateAll: true,
});
});
}
});
let mobileNavTitle = $derived(
["/models", "/assistants", "/privacy", "/tools"].includes($page.route.id ?? "")
? ""
: conversations.find((conv) => conv.id === $page.params.id)?.title
);
let showDisclaimer = $derived(
!$settings.ethicsModalAccepted &&
$page.url.pathname !== `${base}/privacy` &&
envPublic.PUBLIC_APP_DISCLAIMER === "1" &&
!($page.data.shared === true)
);
</script>
<svelte:head>
<title>{envPublic.PUBLIC_APP_NAME}</title>
<meta name="description" content="The first open source alternative to ChatGPT. 💪" />
<meta name="twitter:card" content="summary_large_image" />
<meta name="twitter:site" content="@huggingface" />
<!-- use those meta tags everywhere except on the share assistant page -->
<!-- feel free to refacto if there's a better way -->
{#if !$page.url.pathname.includes("/assistant/") && $page.route.id !== "/assistants" && !$page.url.pathname.includes("/models/") && !$page.url.pathname.includes("/tools")}
<meta property="og:title" content={envPublic.PUBLIC_APP_NAME} />
<meta property="og:type" content="website" />
<meta property="og:url" content="{envPublic.PUBLIC_ORIGIN || $page.url.origin}{base}" />
<meta
property="og:image"
content="{envPublic.PUBLIC_ORIGIN ||
$page.url.origin}{base}/{envPublic.PUBLIC_APP_ASSETS}/thumbnail.png"
/>
<meta property="og:description" content={envPublic.PUBLIC_APP_DESCRIPTION} />
{/if}
<link
rel="icon"
href="{envPublic.PUBLIC_ORIGIN ||
$page.url.origin}{base}/{envPublic.PUBLIC_APP_ASSETS}/favicon.ico"
sizes="32x32"
/>
<link
rel="icon"
href="{envPublic.PUBLIC_ORIGIN ||
$page.url.origin}{base}/{envPublic.PUBLIC_APP_ASSETS}/icon.svg"
type="image/svg+xml"
/>
<link
rel="apple-touch-icon"
href="{envPublic.PUBLIC_ORIGIN ||
$page.url.origin}{base}/{envPublic.PUBLIC_APP_ASSETS}/apple-touch-icon.png"
/>
<link
rel="manifest"
href="{envPublic.PUBLIC_ORIGIN ||
$page.url.origin}{base}/{envPublic.PUBLIC_APP_ASSETS}/manifest.json"
/>
{#if envPublic.PUBLIC_PLAUSIBLE_SCRIPT_URL && envPublic.PUBLIC_ORIGIN}
<script
defer
data-domain={new URL(envPublic.PUBLIC_ORIGIN).hostname}
src={envPublic.PUBLIC_PLAUSIBLE_SCRIPT_URL}
></script>
{/if}
{#if envPublic.PUBLIC_APPLE_APP_ID}
<meta name="apple-itunes-app" content={`app-id=${envPublic.PUBLIC_APPLE_APP_ID}`} />
{/if}
</svelte:head>
{#if showDisclaimer}
<DisclaimerModal on:close={() => ($settings.ethicsModalAccepted = true)} />
{/if}
{#if $loginModalOpen}
<LoginModal
on:close={() => {
$loginModalOpen = false;
}}
/>
{/if}
<div
class="fixed grid h-full w-screen grid-cols-1 grid-rows-[auto,1fr] overflow-hidden text-smd {!isNavCollapsed
? 'md:grid-cols-[290px,1fr]'
: 'md:grid-cols-[0px,1fr]'} transition-[300ms] [transition-property:grid-template-columns] dark:text-gray-300 md:grid-rows-[1fr]"
>
<ExpandNavigation
isCollapsed={isNavCollapsed}
onClick={() => (isNavCollapsed = !isNavCollapsed)}
classNames="absolute inset-y-0 z-10 my-auto {!isNavCollapsed
? 'left-[290px]'
: 'left-0'} *:transition-transform"
/>
<MobileNav isOpen={isNavOpen} on:toggle={(ev) => (isNavOpen = ev.detail)} title={mobileNavTitle}>
<NavMenu
{conversations}
user={data.user}
canLogin={data.user === undefined && data.loginEnabled}
on:shareConversation={(ev) => shareConversation(ev.detail.id, ev.detail.title)}
on:deleteConversation={(ev) => deleteConversation(ev.detail)}
on:editConversationTitle={(ev) => editConversationTitle(ev.detail.id, ev.detail.title)}
/>
</MobileNav>
<nav
class="grid max-h-screen grid-cols-1 grid-rows-[auto,1fr,auto] overflow-hidden *:w-[290px] max-md:hidden"
>
<NavMenu
{conversations}
user={data.user}
canLogin={data.user === undefined && data.loginEnabled}
on:shareConversation={(ev) => shareConversation(ev.detail.id, ev.detail.title)}
on:deleteConversation={(ev) => deleteConversation(ev.detail)}
on:editConversationTitle={(ev) => editConversationTitle(ev.detail.id, ev.detail.title)}
/>
</nav>
{#if currentError}
<Toast message={currentError} />
{/if}
{@render children?.()}
</div>
| chat-ui/src/routes/+layout.svelte/0 | {
"file_path": "chat-ui/src/routes/+layout.svelte",
"repo_id": "chat-ui",
"token_count": 3266
} |
import ChatThumbnail from "./ChatThumbnail.svelte";
import { collections } from "$lib/server/database";
import { error, type RequestHandler } from "@sveltejs/kit";
import { ObjectId } from "mongodb";
import type { SvelteComponent } from "svelte";
import { Resvg } from "@resvg/resvg-js";
import satori from "satori";
import { html } from "satori-html";
import InterRegular from "$lib/server/fonts/Inter-Regular.ttf";
import InterBold from "$lib/server/fonts/Inter-Bold.ttf";
import sharp from "sharp";
export const GET: RequestHandler = (async ({ params }) => {
const assistant = await collections.assistants.findOne({
_id: new ObjectId(params.assistantId),
});
if (!assistant) {
error(404, "Assistant not found.");
}
let avatar = "";
const fileId = collections.bucket.find({ filename: assistant._id.toString() });
const file = await fileId.next();
if (file) {
avatar = await (async () => {
const fileStream = collections.bucket.openDownloadStream(file?._id);
const fileBuffer = await new Promise<Buffer>((resolve, reject) => {
const chunks: Uint8Array[] = [];
fileStream.on("data", (chunk) => chunks.push(chunk));
fileStream.on("error", reject);
fileStream.on("end", () => resolve(Buffer.concat(chunks)));
});
return fileBuffer;
})()
.then(async (buf) => sharp(buf).jpeg().toBuffer()) // convert to jpeg bc satori png is really slow
.then(async (buf) => "data:image/jpeg;base64," + buf.toString("base64"));
}
const renderedComponent = (ChatThumbnail as unknown as SvelteComponent).render({
name: assistant.name,
description: assistant.description,
createdByName: assistant.createdByName,
avatar,
});
const reactLike = html(
"<style>" + renderedComponent.css.code + "</style>" + renderedComponent.html
);
const svg = await satori(reactLike, {
width: 1200,
height: 648,
fonts: [
{
name: "Inter",
data: InterRegular as unknown as ArrayBuffer,
weight: 500,
},
{
name: "Inter",
data: InterBold as unknown as ArrayBuffer,
weight: 700,
},
],
});
const png = new Resvg(svg, {
fitTo: { mode: "original" },
})
.render()
.asPng();
return new Response(png, {
headers: {
"Content-Type": "image/png",
},
});
}) satisfies RequestHandler;
| chat-ui/src/routes/assistant/[assistantId]/thumbnail.png/+server.ts/0 | {
"file_path": "chat-ui/src/routes/assistant/[assistantId]/thumbnail.png/+server.ts",
"repo_id": "chat-ui",
"token_count": 826
} |
import { redirect, error } from "@sveltejs/kit";
import { getOIDCUserData, validateAndParseCsrfToken } from "$lib/server/auth";
import { z } from "zod";
import { base } from "$app/paths";
import { updateUser } from "./updateUser";
import { env } from "$env/dynamic/private";
import JSON5 from "json5";
const allowedUserEmails = z
.array(z.string().email())
.optional()
.default([])
.parse(JSON5.parse(env.ALLOWED_USER_EMAILS));
const allowedUserDomains = z
.array(z.string().regex(/\.\w+$/)) // Contains at least a dot
.optional()
.default([])
.parse(JSON5.parse(env.ALLOWED_USER_DOMAINS));
export async function load({ url, locals, cookies, request, getClientAddress }) {
const { error: errorName, error_description: errorDescription } = z
.object({
error: z.string().optional(),
error_description: z.string().optional(),
})
.parse(Object.fromEntries(url.searchParams.entries()));
if (errorName) {
error(400, errorName + (errorDescription ? ": " + errorDescription : ""));
}
const { code, state, iss } = z
.object({
code: z.string(),
state: z.string(),
iss: z.string().optional(),
})
.parse(Object.fromEntries(url.searchParams.entries()));
const csrfToken = Buffer.from(state, "base64").toString("utf-8");
const validatedToken = await validateAndParseCsrfToken(csrfToken, locals.sessionId);
if (!validatedToken) {
error(403, "Invalid or expired CSRF token");
}
const { userData } = await getOIDCUserData(
{ redirectURI: validatedToken.redirectUrl },
code,
iss
);
// Filter by allowed user emails or domains
if (allowedUserEmails.length > 0 || allowedUserDomains.length > 0) {
if (!userData.email) {
error(403, "User not allowed: email not returned");
}
const emailVerified = userData.email_verified ?? true;
if (!emailVerified) {
error(403, "User not allowed: email not verified");
}
const emailDomain = userData.email.split("@")[1];
const isEmailAllowed = allowedUserEmails.includes(userData.email);
const isDomainAllowed = allowedUserDomains.includes(emailDomain);
if (!isEmailAllowed && !isDomainAllowed) {
error(403, "User not allowed");
}
}
await updateUser({
userData,
locals,
cookies,
userAgent: request.headers.get("user-agent") ?? undefined,
ip: getClientAddress(),
});
redirect(302, `${base}/`);
}
| chat-ui/src/routes/login/callback/+page.server.ts/0 | {
"file_path": "chat-ui/src/routes/login/callback/+page.server.ts",
"repo_id": "chat-ui",
"token_count": 827
} |
import { collections } from "$lib/server/database";
import { type Actions, fail, redirect } from "@sveltejs/kit";
import { ObjectId } from "mongodb";
import { authCondition } from "$lib/server/auth";
import { base } from "$app/paths";
import { env as envPublic } from "$env/dynamic/public";
import { env } from "$env/dynamic/private";
import { z } from "zod";
import type { Assistant } from "$lib/types/Assistant";
import { ReviewStatus } from "$lib/types/Review";
import { sendSlack } from "$lib/server/sendSlack";
async function assistantOnlyIfAuthor(locals: App.Locals, assistantId?: string) {
const assistant = await collections.assistants.findOne({ _id: new ObjectId(assistantId) });
if (!assistant) {
throw Error("Assistant not found");
}
if (
assistant.createdById.toString() !== (locals.user?._id ?? locals.sessionId).toString() &&
!locals.user?.isAdmin
) {
throw Error("You are not the author of this assistant");
}
return assistant;
}
export const actions: Actions = {
delete: async ({ params, locals }) => {
let assistant;
try {
assistant = await assistantOnlyIfAuthor(locals, params.assistantId);
} catch (e) {
return fail(400, { error: true, message: (e as Error).message });
}
await collections.assistants.deleteOne({ _id: assistant._id });
// and remove it from all users settings
await collections.settings.updateMany(
{
assistants: { $in: [assistant._id] },
},
{
$pull: { assistants: assistant._id },
}
);
// and delete all avatars
const fileCursor = collections.bucket.find({ filename: assistant._id.toString() });
// Step 2: Delete the existing file if it exists
let fileId = await fileCursor.next();
while (fileId) {
await collections.bucket.delete(fileId._id);
fileId = await fileCursor.next();
}
redirect(302, `${base}/settings`);
},
report: async ({ request, params, locals, url }) => {
// is there already a report from this user for this model ?
const report = await collections.reports.findOne({
createdBy: locals.user?._id ?? locals.sessionId,
object: "assistant",
contentId: new ObjectId(params.assistantId),
});
if (report) {
return fail(400, { error: true, message: "Already reported" });
}
const formData = await request.formData();
const result = z.string().min(1).max(128).safeParse(formData?.get("reportReason"));
if (!result.success) {
return fail(400, { error: true, message: "Invalid report reason" });
}
const { acknowledged } = await collections.reports.insertOne({
_id: new ObjectId(),
contentId: new ObjectId(params.assistantId),
object: "assistant",
createdBy: locals.user?._id ?? locals.sessionId,
createdAt: new Date(),
updatedAt: new Date(),
reason: result.data,
});
if (!acknowledged) {
return fail(500, { error: true, message: "Failed to report assistant" });
}
if (env.WEBHOOK_URL_REPORT_ASSISTANT) {
const prefixUrl =
envPublic.PUBLIC_SHARE_PREFIX || `${envPublic.PUBLIC_ORIGIN || url.origin}${base}`;
const assistantUrl = `${prefixUrl}/assistant/${params.assistantId}`;
const assistant = await collections.assistants.findOne<Pick<Assistant, "name">>(
{ _id: new ObjectId(params.assistantId) },
{ projection: { name: 1 } }
);
const username = locals.user?.username;
await sendSlack(
`🔴 Assistant <${assistantUrl}|${assistant?.name}> reported by ${
username ? `<http://hf.co/${username}|${username}>` : "non-logged in user"
}.\n\n> ${result.data}`
);
}
return { from: "report", ok: true, message: "Assistant reported" };
},
subscribe: async ({ params, locals }) => {
const assistant = await collections.assistants.findOne({
_id: new ObjectId(params.assistantId),
});
if (!assistant) {
return fail(404, { error: true, message: "Assistant not found" });
}
// don't push if it's already there
const settings = await collections.settings.findOne(authCondition(locals));
if (settings?.assistants?.includes(assistant._id)) {
return fail(400, { error: true, message: "Already subscribed" });
}
const result = await collections.settings.updateOne(authCondition(locals), {
$addToSet: { assistants: assistant._id },
});
// reduce count only if push succeeded
if (result.modifiedCount > 0) {
await collections.assistants.updateOne({ _id: assistant._id }, { $inc: { userCount: 1 } });
}
return { from: "subscribe", ok: true, message: "Assistant added" };
},
unsubscribe: async ({ params, locals }) => {
const assistant = await collections.assistants.findOne({
_id: new ObjectId(params.assistantId),
});
if (!assistant) {
return fail(404, { error: true, message: "Assistant not found" });
}
const result = await collections.settings.updateOne(authCondition(locals), {
$pull: { assistants: assistant._id },
});
// reduce count only if pull succeeded
if (result.modifiedCount > 0) {
await collections.assistants.updateOne({ _id: assistant._id }, { $inc: { userCount: -1 } });
}
redirect(302, `${base}/settings`);
},
deny: async ({ params, locals, url }) => {
return await setReviewStatus({
assistantId: params.assistantId,
locals,
status: ReviewStatus.DENIED,
url,
});
},
approve: async ({ params, locals, url }) => {
return await setReviewStatus({
assistantId: params.assistantId,
locals,
status: ReviewStatus.APPROVED,
url,
});
},
request: async ({ params, locals, url }) => {
return await setReviewStatus({
assistantId: params.assistantId,
locals,
status: ReviewStatus.PENDING,
url,
});
},
unrequest: async ({ params, locals, url }) => {
return await setReviewStatus({
assistantId: params.assistantId,
locals,
status: ReviewStatus.PRIVATE,
url,
});
},
};
async function setReviewStatus({
locals,
assistantId,
status,
url,
}: {
locals: App.Locals;
assistantId?: string;
status: ReviewStatus;
url: URL;
}) {
if (!assistantId) {
return fail(400, { error: true, message: "Assistant ID is required" });
}
const assistant = await collections.assistants.findOne({
_id: new ObjectId(assistantId),
});
if (!assistant) {
return fail(404, { error: true, message: "Assistant not found" });
}
if (
!locals.user ||
(!locals.user.isAdmin && assistant.createdById.toString() !== locals.user._id.toString())
) {
return fail(403, { error: true, message: "Permission denied" });
}
// only admins can set the status to APPROVED or DENIED
// if the status is already APPROVED or DENIED, only admins can change it
if (
(status === ReviewStatus.APPROVED ||
status === ReviewStatus.DENIED ||
assistant.review === ReviewStatus.APPROVED ||
assistant.review === ReviewStatus.DENIED) &&
!locals.user?.isAdmin
) {
return fail(403, { error: true, message: "Permission denied" });
}
const result = await collections.assistants.updateOne(
{ _id: assistant._id },
{ $set: { review: status } }
);
if (result.modifiedCount === 0) {
return fail(500, { error: true, message: "Failed to update review status" });
}
if (status === ReviewStatus.PENDING) {
const prefixUrl =
envPublic.PUBLIC_SHARE_PREFIX || `${envPublic.PUBLIC_ORIGIN || url.origin}${base}`;
const assistantUrl = `${prefixUrl}/assistant/${assistantId}`;
const username = locals.user?.username;
await sendSlack(
`🟢 Assistant <${assistantUrl}|${assistant?.name}> requested to be featured by ${
username ? `<http://hf.co/${username}|${username}>` : "non-logged in user"
}.`
);
}
return { from: "setReviewStatus", ok: true, message: "Review status updated" };
}
| chat-ui/src/routes/settings/(nav)/assistants/[assistantId]/+page.server.ts/0 | {
"file_path": "chat-ui/src/routes/settings/(nav)/assistants/[assistantId]/+page.server.ts",
"repo_id": "chat-ui",
"token_count": 2712
} |
<script lang="ts">
import { run } from "svelte/legacy";
interface Props {
type: string;
value: string | boolean | number;
disabled?: boolean;
}
let { type, value = $bindable(), disabled = false }: Props = $props();
let innerValue: string | boolean | number = $state(
(() => {
if (type === "bool") {
return Boolean(value) || false;
} else if (type === "int" || type === "float") {
return Number(value) || 0;
} else {
return value || "";
}
})()
);
let previousValue: string | boolean | number = $state("");
$effect(() => {
previousValue = innerValue;
});
run(() => {
value = typeof innerValue === "string" ? innerValue : innerValue.toString();
});
</script>
{#if type === "str" && typeof innerValue === "string"}
<input
type="text"
class="w-full rounded-lg border-2 border-gray-200 bg-gray-100 p-2"
bind:value={innerValue}
{disabled}
/>
{:else if type === "int" && typeof innerValue === "number"}
<input
type="number"
step="1"
class="w-full rounded-lg border-2 border-gray-200 bg-gray-100 p-2"
{disabled}
oninput={(e) => {
const value = e.currentTarget.value;
if (value === "" || isNaN(parseInt(value))) {
innerValue = previousValue;
e.currentTarget.value = previousValue.toString();
return;
} else {
innerValue = parseFloat(value);
previousValue = innerValue;
}
}}
value={innerValue}
/>
{:else if type === "float" && typeof innerValue === "number"}
<input
type="number"
step="0.001"
class="w-full rounded-lg border-2 border-gray-200 bg-gray-100 p-2"
{disabled}
oninput={(e) => {
const value = e.currentTarget.value;
if (value === "" || isNaN(parseFloat(value))) {
innerValue = previousValue;
e.currentTarget.value = previousValue.toString();
return;
} else {
innerValue = parseFloat(value);
previousValue = innerValue;
}
}}
value={innerValue}
/>
{:else if type === "bool" && typeof innerValue === "boolean"}
<input
type="checkbox"
class="peer my-auto mr-4 size-6 rounded-lg border-2 border-gray-200 bg-gray-100 p-1"
bind:checked={innerValue}
/>
<!-- Literal['bigvgan_24khz_100band', 'bigvgan_base_24khz_100band', 'bigvgan_22khz_80band', 'bigvgan_base_22khz_80band', 'bigvgan_v2_22khz_80band_256x', 'bigvgan_v2_22khz_80band_fmax8k_256x', 'bigvgan_v2_24khz_100band_256x', 'bigvgan_v2_44khz_128band_256x', 'bigvgan_v2_44khz_128band_512x'] -->
{:else if type.startsWith("Literal[") && typeof innerValue === "string"}
{@const options = type
.slice(8, -1)
.split(",")
.map((option) => option.trim().replaceAll("'", ""))}
<select
class="w-full rounded-lg border-2 border-gray-200 bg-gray-100 p-2"
bind:value={innerValue}
{disabled}
>
{#each options as option}
<option value={option}>{option}</option>
{/each}
</select>
{:else}
<span class="font-mono text-red-800">{innerValue}-{typeof innerValue}</span>
{/if}
| chat-ui/src/routes/tools/ToolInputComponent.svelte/0 | {
"file_path": "chat-ui/src/routes/tools/ToolInputComponent.svelte",
"repo_id": "chat-ui",
"token_count": 1200
} |
.PHONY: quality style test
check_dirs := tests src benchmarks utils
# Check that source code meets quality standards
quality:
ruff check $(check_dirs) setup.py # linter
ruff format --check $(check_dirs) setup.py # formatter
# Format source code automatically
style:
ruff check --fix $(check_dirs) setup.py # linter
ruff format $(check_dirs) setup.py # formatter
# Run tests for the library
test:
python -m pytest -n auto --dist=loadfile -s -v ./tests/
| datasets/Makefile/0 | {
"file_path": "datasets/Makefile",
"repo_id": "datasets",
"token_count": 148
} |
# Create a dataset
Sometimes, you may need to create a dataset if you're working with your own data. Creating a dataset with 🤗 Datasets confers all the advantages of the library to your dataset: fast loading and processing, [stream enormous datasets](stream), [memory-mapping](https://huggingface.co/course/chapter5/4?fw=pt#the-magic-of-memory-mapping), and more. You can easily and rapidly create a dataset with 🤗 Datasets low-code approaches, reducing the time it takes to start training a model. In many cases, it is as easy as [dragging and dropping](upload_dataset#upload-with-the-hub-ui) your data files into a dataset repository on the Hub.
In this tutorial, you'll learn how to use 🤗 Datasets low-code methods for creating all types of datasets:
* Folder-based builders for quickly creating an image or audio dataset
* `from_` methods for creating datasets from local files
## File-based builders
🤗 Datasets supports many common formats such as `csv`, `json/jsonl`, `parquet`, `txt`.
For example it can read a dataset made up of one or several CSV files (in this case, pass your CSV files as a list):
```py
>>> from datasets import load_dataset
>>> dataset = load_dataset("csv", data_files="my_file.csv")
```
To get the list of supported formats and code examples, follow this guide [here](https://huggingface.co/docs/datasets/loading#local-and-remote-files).
## Folder-based builders
There are two folder-based builders, [`ImageFolder`] and [`AudioFolder`]. These are low-code methods for quickly creating an image or speech and audio dataset with several thousand examples. They are great for rapidly prototyping computer vision and speech models before scaling to a larger dataset. Folder-based builders takes your data and automatically generates the dataset's features, splits, and labels. Under the hood:
* [`ImageFolder`] uses the [`~datasets.Image`] feature to decode an image file. Many image extension formats are supported, such as jpg and png, but other formats are also supported. You can check the complete [list](https://github.com/huggingface/datasets/blob/b5672a956d5de864e6f5550e493527d962d6ae55/src/datasets/packaged_modules/imagefolder/imagefolder.py#L39) of supported image extensions.
* [`AudioFolder`] uses the [`~datasets.Audio`] feature to decode an audio file. Audio extensions such as wav and mp3 are supported, and you can check the complete [list](https://github.com/huggingface/datasets/blob/b5672a956d5de864e6f5550e493527d962d6ae55/src/datasets/packaged_modules/audiofolder/audiofolder.py#L39) of supported audio extensions.
The dataset splits are generated from the repository structure, and the label names are automatically inferred from the directory name.
For example, if your image dataset (it is the same for an audio dataset) is stored like this:
```
pokemon/train/grass/bulbasaur.png
pokemon/train/fire/charmander.png
pokemon/train/water/squirtle.png
pokemon/test/grass/ivysaur.png
pokemon/test/fire/charmeleon.png
pokemon/test/water/wartortle.png
```
Then this is how the folder-based builder generates an example:
<div class="flex justify-center">
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/datasets/folder-based-builder.png"/>
</div>
Create the image dataset by specifying `imagefolder` in [`load_dataset`]:
```py
>>> from datasets import load_dataset
>>> dataset = load_dataset("imagefolder", data_dir="/path/to/pokemon")
```
An audio dataset is created in the same way, except you specify `audiofolder` in [`load_dataset`] instead:
```py
>>> from datasets import load_dataset
>>> dataset = load_dataset("audiofolder", data_dir="/path/to/folder")
```
Any additional information about your dataset, such as text captions or transcriptions, can be included with a `metadata.csv` file in the folder containing your dataset. The metadata file needs to have a `file_name` column that links the image or audio file to its corresponding metadata:
```
file_name, text
bulbasaur.png, There is a plant seed on its back right from the day this Pokémon is born.
charmander.png, It has a preference for hot things.
squirtle.png, When it retracts its long neck into its shell, it squirts out water with vigorous force.
```
To learn more about each of these folder-based builders, check out the and <a href="https://huggingface.co/docs/datasets/image_dataset#imagefolder"><span class="underline decoration-yellow-400 decoration-2 font-semibold">ImageFolder</span></a> or <a href="https://huggingface.co/docs/datasets/audio_dataset#audiofolder"><span class="underline decoration-pink-400 decoration-2 font-semibold">AudioFolder</span></a> guides.
## From Python dictionaries
You can also create a dataset from data in Python dictionaries. There are two ways you can create a dataset using the `from_` methods:
* The [`~Dataset.from_generator`] method is the most memory-efficient way to create a dataset from a [generator](https://wiki.python.org/moin/Generators) due to a generators iterative behavior. This is especially useful when you're working with a really large dataset that may not fit in memory, since the dataset is generated on disk progressively and then memory-mapped.
```py
>>> from datasets import Dataset
>>> def gen():
... yield {"pokemon": "bulbasaur", "type": "grass"}
... yield {"pokemon": "squirtle", "type": "water"}
>>> ds = Dataset.from_generator(gen)
>>> ds[0]
{"pokemon": "bulbasaur", "type": "grass"}
```
A generator-based [`IterableDataset`] needs to be iterated over with a `for` loop for example:
```py
>>> from datasets import IterableDataset
>>> ds = IterableDataset.from_generator(gen)
>>> for example in ds:
... print(example)
{"pokemon": "bulbasaur", "type": "grass"}
{"pokemon": "squirtle", "type": "water"}
```
* The [`~Dataset.from_dict`] method is a straightforward way to create a dataset from a dictionary:
```py
>>> from datasets import Dataset
>>> ds = Dataset.from_dict({"pokemon": ["bulbasaur", "squirtle"], "type": ["grass", "water"]})
>>> ds[0]
{"pokemon": "bulbasaur", "type": "grass"}
```
To create an image or audio dataset, chain the [`~Dataset.cast_column`] method with [`~Dataset.from_dict`] and specify the column and feature type. For example, to create an audio dataset:
```py
>>> audio_dataset = Dataset.from_dict({"audio": ["path/to/audio_1", ..., "path/to/audio_n"]}).cast_column("audio", Audio())
```
Now that you know how to create a dataset, consider sharing it on the Hub so the community can also benefit from your work! Go on to the next section to learn how to share your dataset.
| datasets/docs/source/create_dataset.mdx/0 | {
"file_path": "datasets/docs/source/create_dataset.mdx",
"repo_id": "datasets",
"token_count": 2035
} |
# Load
Your data can be stored in various places; they can be on your local machine's disk, in a Github repository, and in in-memory data structures like Python dictionaries and Pandas DataFrames. Wherever a dataset is stored, 🤗 Datasets can help you load it.
This guide will show you how to load a dataset from:
- The Hugging Face Hub
- Local files
- In-memory data
- Offline
- A specific slice of a split
- Local loading script (legacy)
For more details specific to loading other dataset modalities, take a look at the <a class="underline decoration-pink-400 decoration-2 font-semibold" href="./audio_load">load audio dataset guide</a>, the <a class="underline decoration-yellow-400 decoration-2 font-semibold" href="./image_load">load image dataset guide</a>, the <a class="underline decoration-blue-400 decoration-2 font-semibold" href="./video_load">load video dataset guide</a>, or the <a class="underline decoration-green-400 decoration-2 font-semibold" href="./nlp_load">load text dataset guide</a>.
<a id='load-from-the-hub'></a>
## Hugging Face Hub
Datasets are loaded from a dataset loading script that downloads and generates the dataset. However, you can also load a dataset from any dataset repository on the Hub without a loading script! Begin by [creating a dataset repository](share#create-the-repository) and upload your data files. Now you can use the [`load_dataset`] function to load the dataset.
For example, try loading the files from this [demo repository](https://huggingface.co/datasets/lhoestq/demo1) by providing the repository namespace and dataset name. This dataset repository contains CSV files, and the code below loads the dataset from the CSV files:
```py
>>> from datasets import load_dataset
>>> dataset = load_dataset("lhoestq/demo1")
```
Some datasets may have more than one version based on Git tags, branches, or commits. Use the `revision` parameter to specify the dataset version you want to load:
```py
>>> dataset = load_dataset(
... "lhoestq/custom_squad",
... revision="main" # tag name, or branch name, or commit hash
... )
```
<Tip>
Refer to the [Upload a dataset to the Hub](./upload_dataset) tutorial for more details on how to create a dataset repository on the Hub, and how to upload your data files.
</Tip>
A dataset without a loading script by default loads all the data into the `train` split. Use the `data_files` parameter to map data files to splits like `train`, `validation` and `test`:
```py
>>> data_files = {"train": "train.csv", "test": "test.csv"}
>>> dataset = load_dataset("namespace/your_dataset_name", data_files=data_files)
```
<Tip warning={true}>
If you don't specify which data files to use, [`load_dataset`] will return all the data files. This can take a long time if you load a large dataset like C4, which is approximately 13TB of data.
</Tip>
You can also load a specific subset of the files with the `data_files` or `data_dir` parameter. These parameters can accept a relative path which resolves to the base path corresponding to where the dataset is loaded from.
```py
>>> from datasets import load_dataset
# load files that match the grep pattern
>>> c4_subset = load_dataset("allenai/c4", data_files="en/c4-train.0000*-of-01024.json.gz")
# load dataset from the en directory on the Hub
>>> c4_subset = load_dataset("allenai/c4", data_dir="en")
```
The `split` parameter can also map a data file to a specific split:
```py
>>> data_files = {"validation": "en/c4-validation.*.json.gz"}
>>> c4_validation = load_dataset("allenai/c4", data_files=data_files, split="validation")
```
## Local and remote files
Datasets can be loaded from local files stored on your computer and from remote files. The datasets are most likely stored as a `csv`, `json`, `txt` or `parquet` file. The [`load_dataset`] function can load each of these file types.
### CSV
🤗 Datasets can read a dataset made up of one or several CSV files (in this case, pass your CSV files as a list):
```py
>>> from datasets import load_dataset
>>> dataset = load_dataset("csv", data_files="my_file.csv")
```
<Tip>
For more details, check out the [how to load tabular datasets from CSV files](tabular_load#csv-files) guide.
</Tip>
### JSON
JSON files are loaded directly with [`load_dataset`] as shown below:
```py
>>> from datasets import load_dataset
>>> dataset = load_dataset("json", data_files="my_file.json")
```
JSON files have diverse formats, but we think the most efficient format is to have multiple JSON objects; each line represents an individual row of data. For example:
```json
{"a": 1, "b": 2.0, "c": "foo", "d": false}
{"a": 4, "b": -5.5, "c": null, "d": true}
```
Another JSON format you may encounter is a nested field, in which case you'll need to specify the `field` argument as shown in the following:
```py
{"version": "0.1.0",
"data": [{"a": 1, "b": 2.0, "c": "foo", "d": false},
{"a": 4, "b": -5.5, "c": null, "d": true}]
}
>>> from datasets import load_dataset
>>> dataset = load_dataset("json", data_files="my_file.json", field="data")
```
To load remote JSON files via HTTP, pass the URLs instead:
```py
>>> base_url = "https://rajpurkar.github.io/SQuAD-explorer/dataset/"
>>> dataset = load_dataset("json", data_files={"train": base_url + "train-v1.1.json", "validation": base_url + "dev-v1.1.json"}, field="data")
```
While these are the most common JSON formats, you'll see other datasets that are formatted differently. 🤗 Datasets recognizes these other formats and will fallback accordingly on the Python JSON loading methods to handle them.
### Parquet
Parquet files are stored in a columnar format, unlike row-based files like a CSV. Large datasets may be stored in a Parquet file because it is more efficient and faster at returning your query.
To load a Parquet file:
```py
>>> from datasets import load_dataset
>>> dataset = load_dataset("parquet", data_files={'train': 'train.parquet', 'test': 'test.parquet'})
```
To load remote Parquet files via HTTP, pass the URLs instead:
```py
>>> base_url = "https://storage.googleapis.com/huggingface-nlp/cache/datasets/wikipedia/20200501.en/1.0.0/"
>>> data_files = {"train": base_url + "wikipedia-train.parquet"}
>>> wiki = load_dataset("parquet", data_files=data_files, split="train")
```
### Arrow
Arrow files are stored in an in-memory columnar format, unlike row-based formats like CSV and uncompressed formats like Parquet.
To load an Arrow file:
```py
>>> from datasets import load_dataset
>>> dataset = load_dataset("arrow", data_files={'train': 'train.arrow', 'test': 'test.arrow'})
```
To load remote Arrow files via HTTP, pass the URLs instead:
```py
>>> base_url = "https://storage.googleapis.com/huggingface-nlp/cache/datasets/wikipedia/20200501.en/1.0.0/"
>>> data_files = {"train": base_url + "wikipedia-train.arrow"}
>>> wiki = load_dataset("arrow", data_files=data_files, split="train")
```
Arrow is the file format used by 🤗 Datasets under the hood, therefore you can load a local Arrow file using [`Dataset.from_file`] directly:
```py
>>> from datasets import Dataset
>>> dataset = Dataset.from_file("data.arrow")
```
Unlike [`load_dataset`], [`Dataset.from_file`] memory maps the Arrow file without preparing the dataset in the cache, saving you disk space.
The cache directory to store intermediate processing results will be the Arrow file directory in that case.
For now only the Arrow streaming format is supported. The Arrow IPC file format (also known as Feather V2) is not supported.
### SQL
Read database contents with [`~datasets.Dataset.from_sql`] by specifying the URI to connect to your database. You can read both table names and queries:
```py
>>> from datasets import Dataset
# load entire table
>>> dataset = Dataset.from_sql("data_table_name", con="sqlite:///sqlite_file.db")
# load from query
>>> dataset = Dataset.from_sql("SELECT text FROM table WHERE length(text) > 100 LIMIT 10", con="sqlite:///sqlite_file.db")
```
<Tip>
For more details, check out the [how to load tabular datasets from SQL databases](tabular_load#databases) guide.
</Tip>
### WebDataset
The [WebDataset](https://github.com/webdataset/webdataset) format is based on TAR archives and is suitable for big image datasets.
Because of their size, WebDatasets are generally loaded in streaming mode (using `streaming=True`).
You can load a WebDataset like this:
```python
>>> from datasets import load_dataset
>>>
>>> path = "path/to/train/*.tar"
>>> dataset = load_dataset("webdataset", data_files={"train": path}, split="train", streaming=True)
```
To load remote WebDatasets via HTTP, pass the URLs instead:
```python
>>> from datasets import load_dataset
>>>
>>> base_url = "https://huggingface.co/datasets/lhoestq/small-publaynet-wds/resolve/main/publaynet-train-{i:06d}.tar"
>>> urls = [base_url.format(i=i) for i in range(4)]
>>> dataset = load_dataset("webdataset", data_files={"train": urls}, split="train", streaming=True)
```
## Multiprocessing
When a dataset is made of several files (that we call "shards"), it is possible to significantly speed up the dataset downloading and preparation step.
You can choose how many processes you'd like to use to prepare a dataset in parallel using `num_proc`.
In this case, each process is given a subset of shards to prepare:
```python
from datasets import load_dataset
imagenet = load_dataset("imagenet-1k", num_proc=8)
ml_librispeech_spanish = load_dataset("facebook/multilingual_librispeech", "spanish", num_proc=8)
```
## In-memory data
🤗 Datasets will also allow you to create a [`Dataset`] directly from in-memory data structures like Python dictionaries and Pandas DataFrames.
### Python dictionary
Load Python dictionaries with [`~Dataset.from_dict`]:
```py
>>> from datasets import Dataset
>>> my_dict = {"a": [1, 2, 3]}
>>> dataset = Dataset.from_dict(my_dict)
```
### Python list of dictionaries
Load a list of Python dictionaries with [`~Dataset.from_list`]:
```py
>>> from datasets import Dataset
>>> my_list = [{"a": 1}, {"a": 2}, {"a": 3}]
>>> dataset = Dataset.from_list(my_list)
```
### Python generator
Create a dataset from a Python generator with [`~Dataset.from_generator`]:
```py
>>> from datasets import Dataset
>>> def my_gen():
... for i in range(1, 4):
... yield {"a": i}
...
>>> dataset = Dataset.from_generator(my_gen)
```
This approach supports loading data larger than available memory.
You can also define a sharded dataset by passing lists to `gen_kwargs`:
```py
>>> def gen(shards):
... for shard in shards:
... with open(shard) as f:
... for line in f:
... yield {"line": line}
...
>>> shards = [f"data{i}.txt" for i in range(32)]
>>> ds = IterableDataset.from_generator(gen, gen_kwargs={"shards": shards})
>>> ds = ds.shuffle(seed=42, buffer_size=10_000) # shuffles the shards order + uses a shuffle buffer
>>> from torch.utils.data import DataLoader
>>> dataloader = DataLoader(ds.with_format("torch"), num_workers=4) # give each worker a subset of 32/4=8 shards
```
### Pandas DataFrame
Load Pandas DataFrames with [`~Dataset.from_pandas`]:
```py
>>> from datasets import Dataset
>>> import pandas as pd
>>> df = pd.DataFrame({"a": [1, 2, 3]})
>>> dataset = Dataset.from_pandas(df)
```
<Tip>
For more details, check out the [how to load tabular datasets from Pandas DataFrames](tabular_load#pandas-dataframes) guide.
</Tip>
## Offline
Even if you don't have an internet connection, it is still possible to load a dataset. As long as you've downloaded a dataset from the Hub repository before, it should be cached. This means you can reload the dataset from the cache and use it offline.
If you know you won't have internet access, you can run 🤗 Datasets in full offline mode. This saves time because instead of waiting for the Dataset builder download to time out, 🤗 Datasets will look directly in the cache. Set the environment variable `HF_HUB_OFFLINE` to `1` to enable full offline mode.
## Slice splits
You can also choose only to load specific slices of a split. There are two options for slicing a split: using strings or the [`ReadInstruction`] API. Strings are more compact and readable for simple cases, while [`ReadInstruction`] is easier to use with variable slicing parameters.
Concatenate a `train` and `test` split by:
```py
>>> train_test_ds = datasets.load_dataset("bookcorpus", split="train+test")
===STRINGAPI-READINSTRUCTION-SPLIT===
>>> ri = datasets.ReadInstruction("train") + datasets.ReadInstruction("test")
>>> train_test_ds = datasets.load_dataset("bookcorpus", split=ri)
```
Select specific rows of the `train` split:
```py
>>> train_10_20_ds = datasets.load_dataset("bookcorpus", split="train[10:20]")
===STRINGAPI-READINSTRUCTION-SPLIT===
>>> train_10_20_ds = datasets.load_dataset("bookcorpu", split=datasets.ReadInstruction("train", from_=10, to=20, unit="abs"))
```
Or select a percentage of a split with:
```py
>>> train_10pct_ds = datasets.load_dataset("bookcorpus", split="train[:10%]")
===STRINGAPI-READINSTRUCTION-SPLIT===
>>> train_10_20_ds = datasets.load_dataset("bookcorpus", split=datasets.ReadInstruction("train", to=10, unit="%"))
```
Select a combination of percentages from each split:
```py
>>> train_10_80pct_ds = datasets.load_dataset("bookcorpus", split="train[:10%]+train[-80%:]")
===STRINGAPI-READINSTRUCTION-SPLIT===
>>> ri = (datasets.ReadInstruction("train", to=10, unit="%") + datasets.ReadInstruction("train", from_=-80, unit="%"))
>>> train_10_80pct_ds = datasets.load_dataset("bookcorpus", split=ri)
```
Finally, you can even create cross-validated splits. The example below creates 10-fold cross-validated splits. Each validation dataset is a 10% chunk, and the training dataset makes up the remaining complementary 90% chunk:
```py
>>> val_ds = datasets.load_dataset("bookcorpus", split=[f"train[{k}%:{k+10}%]" for k in range(0, 100, 10)])
>>> train_ds = datasets.load_dataset("bookcorpus", split=[f"train[:{k}%]+train[{k+10}%:]" for k in range(0, 100, 10)])
===STRINGAPI-READINSTRUCTION-SPLIT===
>>> val_ds = datasets.load_dataset("bookcorpus", [datasets.ReadInstruction("train", from_=k, to=k+10, unit="%") for k in range(0, 100, 10)])
>>> train_ds = datasets.load_dataset("bookcorpus", [(datasets.ReadInstruction("train", to=k, unit="%") + datasets.ReadInstruction("train", from_=k+10, unit="%")) for k in range(0, 100, 10)])
```
### Percent slicing and rounding
The default behavior is to round the boundaries to the nearest integer for datasets where the requested slice boundaries do not divide evenly by 100. As shown below, some slices may contain more examples than others. For instance, if the following train split includes 999 records, then:
```py
# 19 records, from 500 (included) to 519 (excluded).
>>> train_50_52_ds = datasets.load_dataset("bookcorpus", split="train[50%:52%]")
# 20 records, from 519 (included) to 539 (excluded).
>>> train_52_54_ds = datasets.load_dataset("bookcorpus", split="train[52%:54%]")
```
If you want equal sized splits, use `pct1_dropremainder` rounding instead. This treats the specified percentage boundaries as multiples of 1%.
```py
# 18 records, from 450 (included) to 468 (excluded).
>>> train_50_52pct1_ds = datasets.load_dataset("bookcorpus", split=datasets.ReadInstruction("train", from_=50, to=52, unit="%", rounding="pct1_dropremainder"))
# 18 records, from 468 (included) to 486 (excluded).
>>> train_52_54pct1_ds = datasets.load_dataset("bookcorpus", split=datasets.ReadInstruction("train",from_=52, to=54, unit="%", rounding="pct1_dropremainder"))
# Or equivalently:
>>> train_50_52pct1_ds = datasets.load_dataset("bookcorpus", split="train[50%:52%](pct1_dropremainder)")
>>> train_52_54pct1_ds = datasets.load_dataset("bookcorpus", split="train[52%:54%](pct1_dropremainder)")
```
<Tip warning={true}>
`pct1_dropremainder` rounding may truncate the last examples in a dataset if the number of examples in your dataset don't divide evenly by 100.
</Tip>
<a id='troubleshoot'></a>
## Troubleshooting
Sometimes, you may get unexpected results when you load a dataset. Two of the most common issues you may encounter are manually downloading a dataset and specifying features of a dataset.
### Manual download
Certain datasets require you to manually download the dataset files due to licensing incompatibility or if the files are hidden behind a login page. This causes [`load_dataset`] to throw an `AssertionError`. But 🤗 Datasets provides detailed instructions for downloading the missing files. After you've downloaded the files, use the `data_dir` argument to specify the path to the files you just downloaded.
For example, if you try to download a configuration from the [MATINF](https://huggingface.co/datasets/matinf) dataset:
```py
>>> dataset = load_dataset("matinf", "summarization")
Downloading and preparing dataset matinf/summarization (download: Unknown size, generated: 246.89 MiB, post-processed: Unknown size, total: 246.89 MiB) to /root/.cache/huggingface/datasets/matinf/summarization/1.0.0/82eee5e71c3ceaf20d909bca36ff237452b4e4ab195d3be7ee1c78b53e6f540e...
AssertionError: The dataset matinf with config summarization requires manual data.
Please follow the manual download instructions: To use MATINF you have to download it manually. Please fill this google form (https://forms.gle/nkH4LVE4iNQeDzsc9). You will receive a download link and a password once you complete the form. Please extract all files in one folder and load the dataset with: *datasets.load_dataset('matinf', data_dir='path/to/folder/folder_name')*.
Manual data can be loaded with `datasets.load_dataset(matinf, data_dir='<path/to/manual/data>')
```
If you've already downloaded a dataset from the *Hub with a loading script* to your computer, then you need to pass an absolute path to the `data_dir` or `data_files` parameter to load that dataset. Otherwise, if you pass a relative path, [`load_dataset`] will load the directory from the repository on the Hub instead of the local directory.
### Specify features
When you create a dataset from local files, the [`Features`] are automatically inferred by [Apache Arrow](https://arrow.apache.org/docs/). However, the dataset's features may not always align with your expectations, or you may want to define the features yourself. The following example shows how you can add custom labels with the [`ClassLabel`] feature.
Start by defining your own labels with the [`Features`] class:
```py
>>> class_names = ["sadness", "joy", "love", "anger", "fear", "surprise"]
>>> emotion_features = Features({'text': Value('string'), 'label': ClassLabel(names=class_names)})
```
Next, specify the `features` parameter in [`load_dataset`] with the features you just created:
```py
>>> dataset = load_dataset('csv', data_files=file_dict, delimiter=';', column_names=['text', 'label'], features=emotion_features)
```
Now when you look at your dataset features, you can see it uses the custom labels you defined:
```py
>>> dataset['train'].features
{'text': Value(dtype='string', id=None),
'label': ClassLabel(names=['sadness', 'joy', 'love', 'anger', 'fear', 'surprise'], id=None)}
```
## (Legacy) Local loading script
You may have a 🤗 Datasets loading script locally on your computer. In this case, load the dataset by passing one of the following paths to [`load_dataset`]:
- The local path to the loading script file.
- The local path to the directory containing the loading script file (only if the script file has the same name as the directory).
Pass `trust_remote_code=True` to allow 🤗 Datasets to execute the loading script:
```py
>>> dataset = load_dataset("path/to/local/loading_script/loading_script.py", split="train", trust_remote_code=True)
>>> dataset = load_dataset("path/to/local/loading_script", split="train", trust_remote_code=True) # equivalent because the file has the same name as the directory
```
| datasets/docs/source/loading.mdx/0 | {
"file_path": "datasets/docs/source/loading.mdx",
"repo_id": "datasets",
"token_count": 6273
} |
# Troubleshooting
This guide aims to provide you the tools and knowledge required to navigate some common issues. If the suggestions listed
in this guide do not cover your such situation, please refer to the [Asking for Help](#asking-for-help) section to learn where to
find help with your specific issue.
## Issues when uploading datasets with `push_to_hub`
### Authentication issues
If you are experiencing authentication issues when sharing a dataset on 🤗 Hub using [`Dataset.push_to_hub`] and a Hugging Face
access token:
* Make sure that the Hugging Face token you're using to authenticate yourself is a token with **write** permission.
* On OSX, it may help to clean up all the huggingface.co passwords on your keychain access, as well as reconfigure `git config --global credential.helper osxkeychain`, before using `huggingface-cli login`.
Alternatively, you can use SSH keys to authenticate yourself - read more in the [🤗 Hub documentation](https://huggingface.co/docs/hub/security-git-ssh).
### Lost connection on large dataset upload
When uploading large datasets to Hub, if the number of dataset shards is large, it can create too many commits for the Hub in a
short period. This will result in a connection error.
The connection error can also be caused by a HTTP 500 error returned by AWS S3 bucket that Hub uses internally.
In either situation, you can re-run [`Dataset.push_to_hub`] to proceed with the dataset upload. Hub will check the SHAs
of already uploaded shards to avoid reuploading them.
We are working on making upload process more robust to transient errors, so updating to the latest library version is
always a good idea.
### `Too Many Requests`
Uploading large datasets via `push_to_hub()` can result in an error:
```bash
HfHubHTTPError: 429 Client Error: Too Many Requests for url: ...
You have exceeded our hourly quotas for action: commit. We invite you to retry later.
```
If you encounter this issue, you need to upgrade the `datasets` library to the latest version (or at least `2.15.0`).
## Issues when creating datasets from custom data
### Loading images and audio from a folder
When creating a dataset from a folder, one of the most common issues is that the file structure does not follow the
expected format, or there's an issue with the metadata file.
Learn more about required folder structure in corresponding documentation pages:
* [AudioFolder](https://huggingface.co/docs/datasets/audio_dataset#audiofolder)
* [ImageFolder](https://huggingface.co/docs/datasets/image_dataset#imagefolder)
### Pickling issues
#### Pickling issues when using `Dataset.from_generator`
When creating a dataset, [`IterableDataset.from_generator`] and [`Dataset.from_generator`] expect a "picklable" generator function.
This is required to hash the function using [`pickle`](https://docs.python.org/3/library/pickle.html) to be able to cache the dataset on disk.
While generator functions are generally "picklable", note that generator objects are not. So if you're using a generator object,
you will encounter a `TypeError` like this:
```bash
TypeError: cannot pickle 'generator' object
```
This error can also occur when using a generator function that uses a global object that is not "picklable", such as a
DB connection, for example. If that's the case, you can initialize such object directly inside the generator function to
avoid this error.
#### Pickling issues with `Dataset.map`
Pickling errors can also happen in the multiprocess [`Dataset.map`] - objects are pickled to be passed to child processes.
If the objects used in the transformation are not picklable, it's not possible to cache the result of `map`, which leads to an error being raised.
Here are some ways to address this issue:
* A universal solution to pickle issues is to make sure the objects (or generator classes) are pickable manually by implementing `__getstate__` / `__setstate__` / `__reduce__`.
* You can also provide your own unique hash in `map` with the `new_fingerprint` argument.
* You can also disable caching by calling `datasets.disable_caching()`, however, this is undesirable - [read more about importance of cache](cache)
## Asking for help
If the above troubleshooting advice did not help you resolve your issue, reach out for help to the community and the team.
### Forums
Ask for help on the Hugging Face forums - post your question in the [🤗Datasets category](https://discuss.huggingface.co/c/datasets/10)
Make sure to write a descriptive post with relevant context about your setup and reproducible code to maximize the likelihood that your problem is solved!
### Discord
Post a question on [Discord](http://hf.co/join/discord), and let the team and the community help you.
### Community Discussions on 🤗 Hub
If you are facing issues creating a custom dataset with a script on Hub, you can ask the Hugging Face team for help by opening
a discussion in the Community tab of your dataset with this message:
```text
# Dataset rewiew request for <Dataset name>
## Description
<brief description of the dataset>
## Files to review
- file1
- file2
- ...
cc @lhoestq @polinaeterna @mariosasko @albertvillanova
```
### GitHub Issues
Finally, if you suspect to have found a bug related to the library itself, create an Issue on the 🤗 Datasets
[GitHub repository](https://github.com/huggingface/datasets/issues). Include context regarding the bug: code snippet to reproduce,
details about your environment and data, etc. to help us figure out what's wrong and how we can fix it.
| datasets/docs/source/troubleshoot.mdx/0 | {
"file_path": "datasets/docs/source/troubleshoot.mdx",
"repo_id": "datasets",
"token_count": 1470
} |
# Lint as: python3
"""HuggingFace/Datasets is an open library of datasets.
Note:
VERSION needs to be formatted following the MAJOR.MINOR.PATCH convention
(we need to follow this convention to be able to retrieve versioned scripts)
Simple check list for release from AllenNLP repo: https://github.com/allenai/allennlp/blob/master/setup.py
Steps to make a release:
0. Prerequisites:
- Dependencies:
- twine: `pip install twine`
- Create an account in (and join the 'datasets' project):
- PyPI: https://pypi.org/
- Test PyPI: https://test.pypi.org/
- Don't break `transformers`: run the `transformers` CI using the `main` branch and make sure it's green.
- In `transformers`, use `datasets @ git+https://github.com/huggingface/datasets@main#egg=datasets`
Add a step to install `datasets@main` after `save_cache` in .circleci/create_circleci_config.py:
```
steps.append({"run": {"name": "Install `datasets@main`", "command": 'pip uninstall datasets -y && pip install "datasets @ git+https://github.com/huggingface/datasets@main#egg=datasets"'}})
```
- and then run the CI
1. Create the release branch from main branch:
```
git checkout main
git pull upstream main
git checkout -b release-VERSION
```
2. Change the version to the release VERSION in:
- __init__.py
- setup.py
3. Commit these changes, push and create a Pull Request:
```
git add -u
git commit -m "Release: VERSION"
git push upstream release-VERSION
```
- Go to: https://github.com/huggingface/datasets/pull/new/release-VERSION
- Create pull request
4. From your local release branch, build both the sources and the wheel. Do not change anything in setup.py between
creating the wheel and the source distribution (obviously).
- First, delete any building directories that may exist from previous builds:
- build
- dist
- From the top level directory, build the wheel and the sources:
```
python setup.py bdist_wheel
python setup.py sdist
```
- You should now have a /dist directory with both .whl and .tar.gz source versions.
5. Check that everything looks correct by uploading the package to the test PyPI server:
```
twine upload dist/* -r testpypi
```
Check that you can install it in a virtualenv/notebook by running:
```
pip install huggingface-hub fsspec aiohttp
pip install -U tqdm pyarrow
pip install -i https://testpypi.python.org/pypi datasets
```
6. Upload the final version to the actual PyPI:
```
twine upload dist/* -r pypi
```
7. Make the release on GitHub once everything is looking hunky-dory:
- Merge the release Pull Request
- Create a new release: https://github.com/huggingface/datasets/releases/new
- Choose a tag: Introduce the new VERSION as tag, that will be created when you publish the release
- Create new tag VERSION on publish
- Release title: Introduce the new VERSION as well
- Describe the release
- Use "Generate release notes" button for automatic generation
- Publish release
8. Set the dev version
- Create the dev-version branch from the main branch:
```
git checkout main
git pull upstream main
git branch -D dev-version
git checkout -b dev-version
```
- Change the version to X.X.X+1.dev0 (e.g. VERSION=1.18.3 -> 1.18.4.dev0) in:
- __init__.py
- setup.py
- Commit these changes, push and create a Pull Request:
```
git add -u
git commit -m "Set dev version"
git push upstream dev-version
```
- Go to: https://github.com/huggingface/datasets/pull/new/dev-version
- Create pull request
- Merge the dev version Pull Request
"""
from setuptools import find_packages, setup
REQUIRED_PKGS = [
# For file locking
"filelock",
# We use numpy>=1.17 to have np.random.Generator (Dataset shuffling)
"numpy>=1.17",
# Backend and serialization.
# Minimum 15.0.0 to be able to cast dictionary types to their underlying types
"pyarrow>=15.0.0",
# For smart caching dataset processing
"dill>=0.3.0,<0.3.9", # tmp pin until dill has official support for determinism see https://github.com/uqfoundation/dill/issues/19
# For performance gains with apache arrow
"pandas",
# for downloading datasets over HTTPS
"requests>=2.32.2",
# progress bars in download and scripts
"tqdm>=4.66.3",
# for fast hashing
"xxhash",
# for better multiprocessing
"multiprocess<0.70.17", # to align with dill<0.3.9 (see above)
# to save datasets locally or on any filesystem
# minimum 2023.1.0 to support protocol=kwargs in fsspec's `open`, `get_fs_token_paths`, etc.: see https://github.com/fsspec/filesystem_spec/pull/1143
"fsspec[http]>=2023.1.0,<=2024.12.0",
# for data streaming via http
"aiohttp",
# To get datasets from the Datasets Hub on huggingface.co
"huggingface-hub>=0.24.0",
# Utilities from PyPA to e.g., compare versions
"packaging",
# To parse YAML metadata from dataset cards
"pyyaml>=5.1",
]
AUDIO_REQUIRE = [
"soundfile>=0.12.1",
"librosa",
"soxr>=0.4.0; python_version>='3.9'", # Supports numpy-2
]
VISION_REQUIRE = [
"Pillow>=9.4.0", # When PIL.Image.ExifTags was introduced
]
BENCHMARKS_REQUIRE = [
"tensorflow==2.12.0",
"torch==2.0.1",
"transformers==4.30.1",
]
TESTS_REQUIRE = [
# test dependencies
"absl-py",
"decorator",
"joblib<1.3.0", # joblibspark doesn't support recent joblib versions
"joblibspark",
"pytest",
"pytest-datadir",
"pytest-xdist",
# optional dependencies
"elasticsearch>=7.17.12,<8.0.0", # 8.0 asks users to provide hosts or cloud_id when instantiating ElasticSearch(); 7.9.1 has legacy numpy.float_ which was fixed in https://github.com/elastic/elasticsearch-py/pull/2551.
"faiss-cpu>=1.8.0.post1", # Pins numpy < 2
"jax>=0.3.14; sys_platform != 'win32'",
"jaxlib>=0.3.14; sys_platform != 'win32'",
"lz4",
"moto[server]",
"pyspark>=3.4", # https://issues.apache.org/jira/browse/SPARK-40991 fixed in 3.4.0
"py7zr",
"rarfile>=4.0",
"sqlalchemy",
"s3fs>=2021.11.1", # aligned with fsspec[http]>=2021.11.1; test only on python 3.7 for now
"protobuf<4.0.0", # 4.0.0 breaks compatibility with tensorflow<2.12
"tensorflow>=2.6.0; python_version<'3.10'", # numpy-2 is not supported for Python < 3.10
"tensorflow>=2.16.0; python_version>='3.10'", # Pins numpy < 2
"tiktoken",
"torch>=2.0.0",
"torchdata",
"soundfile>=0.12.1",
"transformers>=4.42.0", # Pins numpy < 2
"zstandard",
"polars[timezone]>=0.20.0",
"decord==0.6.0",
]
TESTS_REQUIRE.extend(VISION_REQUIRE)
TESTS_REQUIRE.extend(AUDIO_REQUIRE)
NUMPY2_INCOMPATIBLE_LIBRARIES = [
"faiss-cpu",
"librosa", # librosa -> numba-0.60.0 requires numpy < 2.1 (see GH-7111)
"tensorflow",
]
TESTS_NUMPY2_REQUIRE = [
library for library in TESTS_REQUIRE if library.partition(">")[0] not in NUMPY2_INCOMPATIBLE_LIBRARIES
]
QUALITY_REQUIRE = ["ruff>=0.3.0"]
DOCS_REQUIRE = [
# Might need to add doc-builder and some specific deps in the future
"s3fs",
# Following dependencies are required for the Python reference to be built properly
"transformers",
"torch",
"tensorflow>=2.6.0",
]
EXTRAS_REQUIRE = {
"audio": AUDIO_REQUIRE,
"vision": VISION_REQUIRE,
"tensorflow": [
"tensorflow>=2.6.0",
],
"tensorflow_gpu": ["tensorflow>=2.6.0"],
"torch": ["torch"],
"jax": ["jax>=0.3.14", "jaxlib>=0.3.14"],
"s3": ["s3fs"],
"streaming": [], # for backward compatibility
"dev": TESTS_REQUIRE + QUALITY_REQUIRE + DOCS_REQUIRE,
"tests": TESTS_REQUIRE,
"tests_numpy2": TESTS_NUMPY2_REQUIRE,
"quality": QUALITY_REQUIRE,
"benchmarks": BENCHMARKS_REQUIRE,
"docs": DOCS_REQUIRE,
}
setup(
name="datasets",
version="3.2.1.dev0", # expected format is one of x.y.z.dev0, or x.y.z.rc1 or x.y.z (no to dashes, yes to dots)
description="HuggingFace community-driven open-source library of datasets",
long_description=open("README.md", encoding="utf-8").read(),
long_description_content_type="text/markdown",
author="HuggingFace Inc.",
author_email="thomas@huggingface.co",
url="https://github.com/huggingface/datasets",
download_url="https://github.com/huggingface/datasets/tags",
license="Apache 2.0",
package_dir={"": "src"},
packages=find_packages("src"),
package_data={
"datasets": ["py.typed"],
"datasets.utils.resources": ["*.json", "*.yaml", "*.tsv"],
},
entry_points={"console_scripts": ["datasets-cli=datasets.commands.datasets_cli:main"]},
python_requires=">=3.9.0",
install_requires=REQUIRED_PKGS,
extras_require=EXTRAS_REQUIRE,
classifiers=[
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Developers",
"Intended Audience :: Education",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: Apache Software License",
"Operating System :: OS Independent",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3.10",
"Programming Language :: Python :: 3.11",
"Topic :: Scientific/Engineering :: Artificial Intelligence",
],
keywords="datasets machine learning datasets",
zip_safe=False, # Required for mypy to find the py.typed file
)
| datasets/setup.py/0 | {
"file_path": "datasets/setup.py",
"repo_id": "datasets",
"token_count": 3874
} |
import contextlib
import copy
import fnmatch
import json
import math
import posixpath
import re
from io import BytesIO
from pathlib import Path
from typing import Callable, Dict, List, Optional, Sequence, Tuple, Union
import fsspec
import numpy as np
from fsspec.core import url_to_fs
from huggingface_hub import (
CommitInfo,
CommitOperationAdd,
CommitOperationDelete,
DatasetCard,
DatasetCardData,
HfApi,
)
from huggingface_hub.hf_api import RepoFile
from . import config
from .arrow_dataset import PUSH_TO_HUB_WITHOUT_METADATA_CONFIGS_SPLIT_PATTERN_SHARDED, Dataset
from .features import Features
from .features.features import FeatureType
from .info import DatasetInfo, DatasetInfosDict
from .naming import _split_re
from .splits import NamedSplit, Split, SplitDict, SplitInfo
from .table import Table
from .utils import logging
from .utils.doc_utils import is_documented_by
from .utils.metadata import MetadataConfigs
from .utils.py_utils import asdict, glob_pattern_to_regex, string_to_dict
from .utils.typing import PathLike
logger = logging.get_logger(__name__)
class DatasetDict(dict):
"""A dictionary (dict of str: datasets.Dataset) with dataset transforms methods (map, filter, etc.)"""
def _check_values_type(self):
for dataset in self.values():
if not isinstance(dataset, Dataset):
raise TypeError(f"Values in `DatasetDict` should be of type `Dataset` but got type '{type(dataset)}'")
def _check_values_features(self):
items = list(self.items())
for item_a, item_b in zip(items[:-1], items[1:]):
if item_a[1].features != item_b[1].features:
raise ValueError(
f"All datasets in `DatasetDict` should have the same features but features for '{item_a[0]}' and '{item_b[0]}' don't match: {item_a[1].features} != {item_b[1].features}"
)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
# Here `del` is used to del the pyarrow tables. This properly closes the files used for memory mapped tables
for dataset in self.values():
if hasattr(dataset, "_data"):
del dataset._data
if hasattr(dataset, "_indices"):
del dataset._indices
def __getitem__(self, k) -> Dataset:
if isinstance(k, (str, NamedSplit)) or len(self) == 0:
return super().__getitem__(k)
else:
available_suggested_splits = [
split for split in (Split.TRAIN, Split.TEST, Split.VALIDATION) if split in self
]
suggested_split = available_suggested_splits[0] if available_suggested_splits else list(self)[0]
raise KeyError(
f"Invalid key: {k}. Please first select a split. For example: "
f"`my_dataset_dictionary['{suggested_split}'][{k}]`. "
f"Available splits: {sorted(self)}"
)
@property
def data(self) -> Dict[str, Table]:
"""The Apache Arrow tables backing each split.
Example:
```py
>>> from datasets import load_dataset
>>> ds = load_dataset("rotten_tomatoes")
>>> ds.data
```
"""
self._check_values_type()
return {k: dataset.data for k, dataset in self.items()}
@property
def cache_files(self) -> Dict[str, Dict]:
"""The cache files containing the Apache Arrow table backing each split.
Example:
```py
>>> from datasets import load_dataset
>>> ds = load_dataset("rotten_tomatoes")
>>> ds.cache_files
{'test': [{'filename': '/root/.cache/huggingface/datasets/rotten_tomatoes_movie_review/default/1.0.0/40d411e45a6ce3484deed7cc15b82a53dad9a72aafd9f86f8f227134bec5ca46/rotten_tomatoes_movie_review-test.arrow'}],
'train': [{'filename': '/root/.cache/huggingface/datasets/rotten_tomatoes_movie_review/default/1.0.0/40d411e45a6ce3484deed7cc15b82a53dad9a72aafd9f86f8f227134bec5ca46/rotten_tomatoes_movie_review-train.arrow'}],
'validation': [{'filename': '/root/.cache/huggingface/datasets/rotten_tomatoes_movie_review/default/1.0.0/40d411e45a6ce3484deed7cc15b82a53dad9a72aafd9f86f8f227134bec5ca46/rotten_tomatoes_movie_review-validation.arrow'}]}
```
"""
self._check_values_type()
return {k: dataset.cache_files for k, dataset in self.items()}
@property
def num_columns(self) -> Dict[str, int]:
"""Number of columns in each split of the dataset.
Example:
```py
>>> from datasets import load_dataset
>>> ds = load_dataset("rotten_tomatoes")
>>> ds.num_columns
{'test': 2, 'train': 2, 'validation': 2}
```
"""
self._check_values_type()
return {k: dataset.num_columns for k, dataset in self.items()}
@property
def num_rows(self) -> Dict[str, int]:
"""Number of rows in each split of the dataset.
Example:
```py
>>> from datasets import load_dataset
>>> ds = load_dataset("rotten_tomatoes")
>>> ds.num_rows
{'test': 1066, 'train': 8530, 'validation': 1066}
```
"""
self._check_values_type()
return {k: dataset.num_rows for k, dataset in self.items()}
@property
def column_names(self) -> Dict[str, List[str]]:
"""Names of the columns in each split of the dataset.
Example:
```py
>>> from datasets import load_dataset
>>> ds = load_dataset("rotten_tomatoes")
>>> ds.column_names
{'test': ['text', 'label'],
'train': ['text', 'label'],
'validation': ['text', 'label']}
```
"""
self._check_values_type()
return {k: dataset.column_names for k, dataset in self.items()}
@property
def shape(self) -> Dict[str, Tuple[int]]:
"""Shape of each split of the dataset (number of rows, number of columns).
Example:
```py
>>> from datasets import load_dataset
>>> ds = load_dataset("rotten_tomatoes")
>>> ds.shape
{'test': (1066, 2), 'train': (8530, 2), 'validation': (1066, 2)}
```
"""
self._check_values_type()
return {k: dataset.shape for k, dataset in self.items()}
def flatten(self, max_depth=16) -> "DatasetDict":
"""Flatten the Apache Arrow Table of each split (nested features are flatten).
Each column with a struct type is flattened into one column per struct field.
Other columns are left unchanged.
Example:
```py
>>> from datasets import load_dataset
>>> ds = load_dataset("squad")
>>> ds["train"].features
{'answers': Sequence(feature={'text': Value(dtype='string', id=None), 'answer_start': Value(dtype='int32', id=None)}, length=-1, id=None),
'context': Value(dtype='string', id=None),
'id': Value(dtype='string', id=None),
'question': Value(dtype='string', id=None),
'title': Value(dtype='string', id=None)}
>>> ds.flatten()
DatasetDict({
train: Dataset({
features: ['id', 'title', 'context', 'question', 'answers.text', 'answers.answer_start'],
num_rows: 87599
})
validation: Dataset({
features: ['id', 'title', 'context', 'question', 'answers.text', 'answers.answer_start'],
num_rows: 10570
})
})
```
"""
self._check_values_type()
return DatasetDict({k: dataset.flatten(max_depth=max_depth) for k, dataset in self.items()})
def unique(self, column: str) -> Dict[str, List]:
"""Return a list of the unique elements in a column for each split.
This is implemented in the low-level backend and as such, very fast.
Args:
column (`str`):
column name (list all the column names with [`~datasets.DatasetDict.column_names`])
Returns:
Dict[`str`, `list`]: Dictionary of unique elements in the given column.
Example:
```py
>>> from datasets import load_dataset
>>> ds = load_dataset("rotten_tomatoes")
>>> ds.unique("label")
{'test': [1, 0], 'train': [1, 0], 'validation': [1, 0]}
```
"""
self._check_values_type()
return {k: dataset.unique(column) for k, dataset in self.items()}
def cleanup_cache_files(self) -> Dict[str, int]:
"""Clean up all cache files in the dataset cache directory, excepted the currently used cache file if there is one.
Be careful when running this command that no other process is currently using other cache files.
Return:
`Dict` with the number of removed files for each split
Example:
```py
>>> from datasets import load_dataset
>>> ds = load_dataset("rotten_tomatoes")
>>> ds.cleanup_cache_files()
{'test': 0, 'train': 0, 'validation': 0}
```
"""
self._check_values_type()
return {k: dataset.cleanup_cache_files() for k, dataset in self.items()}
def __repr__(self):
repr = "\n".join([f"{k}: {v}" for k, v in self.items()])
repr = re.sub(r"^", " " * 4, repr, 0, re.M)
return f"DatasetDict({{\n{repr}\n}})"
def cast(self, features: Features) -> "DatasetDict":
"""
Cast the dataset to a new set of features.
The transformation is applied to all the datasets of the dataset dictionary.
Args:
features ([`Features`]):
New features to cast the dataset to.
The name and order of the fields in the features must match the current column names.
The type of the data must also be convertible from one type to the other.
For non-trivial conversion, e.g. `string` <-> `ClassLabel` you should use [`~DatasetDict.map`] to update the dataset.
Example:
```py
>>> from datasets import load_dataset, ClassLabel, Value
>>> ds = load_dataset("rotten_tomatoes")
>>> ds["train"].features
{'label': ClassLabel(names=['neg', 'pos'], id=None),
'text': Value(dtype='string', id=None)}
>>> new_features = ds["train"].features.copy()
>>> new_features['label'] = ClassLabel(names=['bad', 'good'])
>>> new_features['text'] = Value('large_string')
>>> ds = ds.cast(new_features)
>>> ds["train"].features
{'label': ClassLabel(names=['bad', 'good'], id=None),
'text': Value(dtype='large_string', id=None)}
```
"""
self._check_values_type()
return DatasetDict({k: dataset.cast(features=features) for k, dataset in self.items()})
def cast_column(self, column: str, feature) -> "DatasetDict":
"""Cast column to feature for decoding.
Args:
column (`str`):
Column name.
feature ([`Feature`]):
Target feature.
Returns:
[`DatasetDict`]
Example:
```py
>>> from datasets import load_dataset, ClassLabel
>>> ds = load_dataset("rotten_tomatoes")
>>> ds["train"].features
{'label': ClassLabel(names=['neg', 'pos'], id=None),
'text': Value(dtype='string', id=None)}
>>> ds = ds.cast_column('label', ClassLabel(names=['bad', 'good']))
>>> ds["train"].features
{'label': ClassLabel(names=['bad', 'good'], id=None),
'text': Value(dtype='string', id=None)}
```
"""
self._check_values_type()
return DatasetDict({k: dataset.cast_column(column=column, feature=feature) for k, dataset in self.items()})
def remove_columns(self, column_names: Union[str, List[str]]) -> "DatasetDict":
"""
Remove one or several column(s) from each split in the dataset
and the features associated to the column(s).
The transformation is applied to all the splits of the dataset dictionary.
You can also remove a column using [`~DatasetDict.map`] with `remove_columns` but the present method
doesn't copy the data of the remaining columns and is thus faster.
Args:
column_names (`Union[str, List[str]]`):
Name of the column(s) to remove.
Returns:
[`DatasetDict`]: A copy of the dataset object without the columns to remove.
Example:
```py
>>> from datasets import load_dataset
>>> ds = load_dataset("rotten_tomatoes")
>>> ds = ds.remove_columns("label")
DatasetDict({
train: Dataset({
features: ['text'],
num_rows: 8530
})
validation: Dataset({
features: ['text'],
num_rows: 1066
})
test: Dataset({
features: ['text'],
num_rows: 1066
})
})
```
"""
self._check_values_type()
return DatasetDict({k: dataset.remove_columns(column_names=column_names) for k, dataset in self.items()})
def rename_column(self, original_column_name: str, new_column_name: str) -> "DatasetDict":
"""
Rename a column in the dataset and move the features associated to the original column under the new column name.
The transformation is applied to all the datasets of the dataset dictionary.
You can also rename a column using [`~DatasetDict.map`] with `remove_columns` but the present method:
- takes care of moving the original features under the new column name.
- doesn't copy the data to a new dataset and is thus much faster.
Args:
original_column_name (`str`):
Name of the column to rename.
new_column_name (`str`):
New name for the column.
Example:
```py
>>> from datasets import load_dataset
>>> ds = load_dataset("rotten_tomatoes")
>>> ds = ds.rename_column("label", "label_new")
DatasetDict({
train: Dataset({
features: ['text', 'label_new'],
num_rows: 8530
})
validation: Dataset({
features: ['text', 'label_new'],
num_rows: 1066
})
test: Dataset({
features: ['text', 'label_new'],
num_rows: 1066
})
})
```
"""
self._check_values_type()
return DatasetDict(
{
k: dataset.rename_column(original_column_name=original_column_name, new_column_name=new_column_name)
for k, dataset in self.items()
}
)
def rename_columns(self, column_mapping: Dict[str, str]) -> "DatasetDict":
"""
Rename several columns in the dataset, and move the features associated to the original columns under
the new column names.
The transformation is applied to all the datasets of the dataset dictionary.
Args:
column_mapping (`Dict[str, str]`):
A mapping of columns to rename to their new names.
Returns:
[`DatasetDict`]: A copy of the dataset with renamed columns.
Example:
```py
>>> from datasets import load_dataset
>>> ds = load_dataset("rotten_tomatoes")
>>> ds.rename_columns({'text': 'text_new', 'label': 'label_new'})
DatasetDict({
train: Dataset({
features: ['text_new', 'label_new'],
num_rows: 8530
})
validation: Dataset({
features: ['text_new', 'label_new'],
num_rows: 1066
})
test: Dataset({
features: ['text_new', 'label_new'],
num_rows: 1066
})
})
```
"""
self._check_values_type()
return DatasetDict({k: dataset.rename_columns(column_mapping=column_mapping) for k, dataset in self.items()})
def select_columns(self, column_names: Union[str, List[str]]) -> "DatasetDict":
"""Select one or several column(s) from each split in the dataset and
the features associated to the column(s).
The transformation is applied to all the splits of the dataset
dictionary.
Args:
column_names (`Union[str, List[str]]`):
Name of the column(s) to keep.
Example:
```py
>>> from datasets import load_dataset
>>> ds = load_dataset("rotten_tomatoes")
>>> ds.select_columns("text")
DatasetDict({
train: Dataset({
features: ['text'],
num_rows: 8530
})
validation: Dataset({
features: ['text'],
num_rows: 1066
})
test: Dataset({
features: ['text'],
num_rows: 1066
})
})
```
"""
self._check_values_type()
return DatasetDict({k: dataset.select_columns(column_names=column_names) for k, dataset in self.items()})
def class_encode_column(self, column: str, include_nulls: bool = False) -> "DatasetDict":
"""Casts the given column as [`~datasets.features.ClassLabel`] and updates the tables.
Args:
column (`str`):
The name of the column to cast.
include_nulls (`bool`, defaults to `False`):
Whether to include null values in the class labels. If `True`, the null values will be encoded as the `"None"` class label.
<Added version="1.14.2"/>
Example:
```py
>>> from datasets import load_dataset
>>> ds = load_dataset("boolq")
>>> ds["train"].features
{'answer': Value(dtype='bool', id=None),
'passage': Value(dtype='string', id=None),
'question': Value(dtype='string', id=None)}
>>> ds = ds.class_encode_column("answer")
>>> ds["train"].features
{'answer': ClassLabel(num_classes=2, names=['False', 'True'], id=None),
'passage': Value(dtype='string', id=None),
'question': Value(dtype='string', id=None)}
```
"""
self._check_values_type()
return DatasetDict(
{k: dataset.class_encode_column(column=column, include_nulls=include_nulls) for k, dataset in self.items()}
)
@contextlib.contextmanager
def formatted_as(
self,
type: Optional[str] = None,
columns: Optional[List] = None,
output_all_columns: bool = False,
**format_kwargs,
):
"""To be used in a `with` statement. Set `__getitem__` return format (type and columns).
The transformation is applied to all the datasets of the dataset dictionary.
Args:
type (`str`, *optional*):
Either output type selected in `[None, 'numpy', 'torch', 'tensorflow', 'jax', 'arrow', 'pandas', 'polars']`.
`None` means `__getitem__` returns python objects (default).
columns (`List[str]`, *optional*):
Columns to format in the output.
`None` means `__getitem__` returns all columns (default).
output_all_columns (`bool`, defaults to False):
Keep un-formatted columns as well in the output (as python objects).
**format_kwargs (additional keyword arguments):
Keywords arguments passed to the convert function like `np.array`, `torch.tensor` or `tensorflow.ragged.constant`.
"""
self._check_values_type()
old_format_type = {k: dataset._format_type for k, dataset in self.items()}
old_format_kwargs = {k: dataset._format_kwargs for k, dataset in self.items()}
old_format_columns = {k: dataset._format_columns for k, dataset in self.items()}
old_output_all_columns = {k: dataset._output_all_columns for k, dataset in self.items()}
try:
self.set_format(type, columns, output_all_columns, **format_kwargs)
yield
finally:
for k, dataset in self.items():
dataset.set_format(
old_format_type[k], old_format_columns[k], old_output_all_columns[k], **old_format_kwargs[k]
)
def set_format(
self,
type: Optional[str] = None,
columns: Optional[List] = None,
output_all_columns: bool = False,
**format_kwargs,
):
"""Set `__getitem__` return format (type and columns).
The format is set for every dataset in the dataset dictionary.
Args:
type (`str`, *optional*):
Either output type selected in `[None, 'numpy', 'torch', 'tensorflow', 'jax', 'arrow', 'pandas', 'polars']`.
`None` means `__getitem__` returns python objects (default).
columns (`List[str]`, *optional*):
Columns to format in the output.
`None` means `__getitem__` returns all columns (default).
output_all_columns (`bool`, defaults to False):
Keep un-formatted columns as well in the output (as python objects),
**format_kwargs (additional keyword arguments):
Keywords arguments passed to the convert function like `np.array`, `torch.tensor` or `tensorflow.ragged.constant`.
It is possible to call `map` after calling `set_format`. Since `map` may add new columns, then the list of formatted columns
gets updated. In this case, if you apply `map` on a dataset to add a new column, then this column will be formatted:
`new formatted columns = (all columns - previously unformatted columns)`
Example:
```py
>>> from datasets import load_dataset
>>> from transformers import AutoTokenizer
>>> tokenizer = AutoTokenizer.from_pretrained("bert-base-cased")
>>> ds = ds.map(lambda x: tokenizer(x["text"], truncation=True, padding=True), batched=True)
>>> ds.set_format(type="numpy", columns=['input_ids', 'token_type_ids', 'attention_mask', 'label'])
>>> ds["train"].format
{'columns': ['input_ids', 'token_type_ids', 'attention_mask', 'label'],
'format_kwargs': {},
'output_all_columns': False,
'type': 'numpy'}
```
"""
self._check_values_type()
for dataset in self.values():
dataset.set_format(type=type, columns=columns, output_all_columns=output_all_columns, **format_kwargs)
def reset_format(self):
"""Reset `__getitem__` return format to python objects and all columns.
The transformation is applied to all the datasets of the dataset dictionary.
Same as `self.set_format()`
Example:
```py
>>> from datasets import load_dataset
>>> from transformers import AutoTokenizer
>>> ds = load_dataset("rotten_tomatoes")
>>> tokenizer = AutoTokenizer.from_pretrained("bert-base-cased")
>>> ds = ds.map(lambda x: tokenizer(x["text"], truncation=True, padding=True), batched=True)
>>> ds.set_format(type="numpy", columns=['input_ids', 'token_type_ids', 'attention_mask', 'label'])
>>> ds["train"].format
{'columns': ['input_ids', 'token_type_ids', 'attention_mask', 'label'],
'format_kwargs': {},
'output_all_columns': False,
'type': 'numpy'}
>>> ds.reset_format()
>>> ds["train"].format
{'columns': ['text', 'label', 'input_ids', 'token_type_ids', 'attention_mask'],
'format_kwargs': {},
'output_all_columns': False,
'type': None}
```
"""
self._check_values_type()
for dataset in self.values():
dataset.set_format()
def set_transform(
self,
transform: Optional[Callable],
columns: Optional[List] = None,
output_all_columns: bool = False,
):
"""Set ``__getitem__`` return format using this transform. The transform is applied on-the-fly on batches when ``__getitem__`` is called.
The transform is set for every dataset in the dataset dictionary
As :func:`datasets.Dataset.set_format`, this can be reset using :func:`datasets.Dataset.reset_format`
Args:
transform (`Callable`, optional): user-defined formatting transform, replaces the format defined by :func:`datasets.Dataset.set_format`
A formatting function is a callable that takes a batch (as a dict) as input and returns a batch.
This function is applied right before returning the objects in ``__getitem__``.
columns (`List[str]`, optional): columns to format in the output
If specified, then the input batch of the transform only contains those columns.
output_all_columns (`bool`, default to False): keep un-formatted columns as well in the output (as python objects)
If set to True, then the other un-formatted columns are kept with the output of the transform.
"""
self._check_values_type()
for dataset in self.values():
dataset.set_format("custom", columns=columns, output_all_columns=output_all_columns, transform=transform)
def with_format(
self,
type: Optional[str] = None,
columns: Optional[List] = None,
output_all_columns: bool = False,
**format_kwargs,
) -> "DatasetDict":
"""Set `__getitem__` return format (type and columns). The data formatting is applied on-the-fly.
The format `type` (for example "numpy") is used to format batches when using `__getitem__`.
The format is set for every dataset in the dataset dictionary.
It's also possible to use custom transforms for formatting using [`~datasets.Dataset.with_transform`].
Contrary to [`~datasets.DatasetDict.set_format`], `with_format` returns a new [`DatasetDict`] object with new [`Dataset`] objects.
Args:
type (`str`, *optional*):
Either output type selected in `[None, 'numpy', 'torch', 'tensorflow', 'jax', 'arrow', 'pandas', 'polars']`.
`None` means `__getitem__` returns python objects (default).
columns (`List[str]`, *optional*):
Columns to format in the output.
`None` means `__getitem__` returns all columns (default).
output_all_columns (`bool`, defaults to `False`):
Keep un-formatted columns as well in the output (as python objects).
**format_kwargs (additional keyword arguments):
Keywords arguments passed to the convert function like `np.array`, `torch.tensor` or `tensorflow.ragged.constant`.
Example:
```py
>>> from datasets import load_dataset
>>> from transformers import AutoTokenizer
>>> ds = load_dataset("rotten_tomatoes")
>>> tokenizer = AutoTokenizer.from_pretrained("bert-base-cased")
>>> ds = ds.map(lambda x: tokenizer(x['text'], truncation=True, padding=True), batched=True)
>>> ds["train"].format
{'columns': ['text', 'label', 'input_ids', 'token_type_ids', 'attention_mask'],
'format_kwargs': {},
'output_all_columns': False,
'type': None}
>>> ds = ds.with_format("torch")
>>> ds["train"].format
{'columns': ['text', 'label', 'input_ids', 'token_type_ids', 'attention_mask'],
'format_kwargs': {},
'output_all_columns': False,
'type': 'torch'}
>>> ds["train"][0]
{'text': 'compassionately explores the seemingly irreconcilable situation between conservative christian parents and their estranged gay and lesbian children .',
'label': tensor(1),
'input_ids': tensor([ 101, 18027, 16310, 16001, 1103, 9321, 178, 11604, 7235, 6617,
1742, 2165, 2820, 1206, 6588, 22572, 12937, 1811, 2153, 1105,
1147, 12890, 19587, 6463, 1105, 15026, 1482, 119, 102, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0]),
'token_type_ids': tensor([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]),
'attention_mask': tensor([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])}
```
"""
dataset = copy.deepcopy(self)
dataset.set_format(type=type, columns=columns, output_all_columns=output_all_columns, **format_kwargs)
return dataset
def with_transform(
self,
transform: Optional[Callable],
columns: Optional[List] = None,
output_all_columns: bool = False,
) -> "DatasetDict":
"""Set `__getitem__` return format using this transform. The transform is applied on-the-fly on batches when `__getitem__` is called.
The transform is set for every dataset in the dataset dictionary
As [`~datasets.Dataset.set_format`], this can be reset using [`~datasets.Dataset.reset_format`].
Contrary to [`~datasets.DatasetDict.set_transform`], `with_transform` returns a new [`DatasetDict`] object with new [`Dataset`] objects.
Args:
transform (`Callable`, *optional*):
User-defined formatting transform, replaces the format defined by [`~datasets.Dataset.set_format`].
A formatting function is a callable that takes a batch (as a dict) as input and returns a batch.
This function is applied right before returning the objects in `__getitem__`.
columns (`List[str]`, *optional*):
Columns to format in the output.
If specified, then the input batch of the transform only contains those columns.
output_all_columns (`bool`, defaults to False):
Keep un-formatted columns as well in the output (as python objects).
If set to `True`, then the other un-formatted columns are kept with the output of the transform.
Example:
```py
>>> from datasets import load_dataset
>>> from transformers import AutoTokenizer
>>> ds = load_dataset("rotten_tomatoes")
>>> tokenizer = AutoTokenizer.from_pretrained("bert-base-cased")
>>> def encode(example):
... return tokenizer(example['text'], truncation=True, padding=True, return_tensors="pt")
>>> ds = ds.with_transform(encode)
>>> ds["train"][0]
{'attention_mask': tensor([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1]),
'input_ids': tensor([ 101, 1103, 2067, 1110, 17348, 1106, 1129, 1103, 6880, 1432,
112, 188, 1207, 107, 14255, 1389, 107, 1105, 1115, 1119,
112, 188, 1280, 1106, 1294, 170, 24194, 1256, 3407, 1190,
170, 11791, 5253, 188, 1732, 7200, 10947, 12606, 2895, 117,
179, 7766, 118, 172, 15554, 1181, 3498, 6961, 3263, 1137,
188, 1566, 7912, 14516, 6997, 119, 102]),
'token_type_ids': tensor([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0])}
```
"""
dataset = copy.deepcopy(self)
dataset.set_transform(transform=transform, columns=columns, output_all_columns=output_all_columns)
return dataset
def map(
self,
function: Optional[Callable] = None,
with_indices: bool = False,
with_rank: bool = False,
input_columns: Optional[Union[str, List[str]]] = None,
batched: bool = False,
batch_size: Optional[int] = 1000,
drop_last_batch: bool = False,
remove_columns: Optional[Union[str, List[str]]] = None,
keep_in_memory: bool = False,
load_from_cache_file: Optional[bool] = None,
cache_file_names: Optional[Dict[str, Optional[str]]] = None,
writer_batch_size: Optional[int] = 1000,
features: Optional[Features] = None,
disable_nullable: bool = False,
fn_kwargs: Optional[dict] = None,
num_proc: Optional[int] = None,
desc: Optional[str] = None,
) -> "DatasetDict":
"""Apply a function to all the elements in the table (individually or in batches)
and update the table (if function does updated examples).
The transformation is applied to all the datasets of the dataset dictionary.
Args:
function (`callable`): with one of the following signature:
- `function(example: Dict[str, Any]) -> Dict[str, Any]` if `batched=False` and `with_indices=False`
- `function(example: Dict[str, Any], indices: int) -> Dict[str, Any]` if `batched=False` and `with_indices=True`
- `function(batch: Dict[str, List]) -> Dict[str, List]` if `batched=True` and `with_indices=False`
- `function(batch: Dict[str, List], indices: List[int]) -> Dict[str, List]` if `batched=True` and `with_indices=True`
For advanced usage, the function can also return a `pyarrow.Table`.
Moreover if your function returns nothing (`None`), then `map` will run your function and return the dataset unchanged.
with_indices (`bool`, defaults to `False`):
Provide example indices to `function`. Note that in this case the signature of `function` should be `def function(example, idx): ...`.
with_rank (`bool`, defaults to `False`):
Provide process rank to `function`. Note that in this case the
signature of `function` should be `def function(example[, idx], rank): ...`.
input_columns (`[Union[str, List[str]]]`, *optional*, defaults to `None`):
The columns to be passed into `function` as
positional arguments. If `None`, a dict mapping to all formatted columns is passed as one argument.
batched (`bool`, defaults to `False`):
Provide batch of examples to `function`.
batch_size (`int`, *optional*, defaults to `1000`):
Number of examples per batch provided to `function` if `batched=True`,
`batch_size <= 0` or `batch_size == None` then provide the full dataset as a single batch to `function`.
drop_last_batch (`bool`, defaults to `False`):
Whether a last batch smaller than the batch_size should be
dropped instead of being processed by the function.
remove_columns (`[Union[str, List[str]]]`, *optional*, defaults to `None`):
Remove a selection of columns while doing the mapping.
Columns will be removed before updating the examples with the output of `function`, i.e. if `function` is adding
columns with names in `remove_columns`, these columns will be kept.
keep_in_memory (`bool`, defaults to `False`):
Keep the dataset in memory instead of writing it to a cache file.
load_from_cache_file (`Optional[bool]`, defaults to `True` if caching is enabled):
If a cache file storing the current computation from `function`
can be identified, use it instead of recomputing.
cache_file_names (`[Dict[str, str]]`, *optional*, defaults to `None`):
Provide the name of a path for the cache file. It is used to store the
results of the computation instead of the automatically generated cache file name.
You have to provide one `cache_file_name` per dataset in the dataset dictionary.
writer_batch_size (`int`, default `1000`):
Number of rows per write operation for the cache file writer.
This value is a good trade-off between memory usage during the processing, and processing speed.
Higher value makes the processing do fewer lookups, lower value consume less temporary memory while running `map`.
features (`[datasets.Features]`, *optional*, defaults to `None`):
Use a specific [`Features`] to store the cache file
instead of the automatically generated one.
disable_nullable (`bool`, defaults to `False`):
Disallow null values in the table.
fn_kwargs (`Dict`, *optional*, defaults to `None`):
Keyword arguments to be passed to `function`
num_proc (`int`, *optional*, defaults to `None`):
Number of processes for multiprocessing. By default it doesn't
use multiprocessing.
desc (`str`, *optional*, defaults to `None`):
Meaningful description to be displayed alongside with the progress bar while mapping examples.
Example:
```py
>>> from datasets import load_dataset
>>> ds = load_dataset("rotten_tomatoes")
>>> def add_prefix(example):
... example["text"] = "Review: " + example["text"]
... return example
>>> ds = ds.map(add_prefix)
>>> ds["train"][0:3]["text"]
['Review: the rock is destined to be the 21st century\'s new " conan " and that he\'s going to make a splash even greater than arnold schwarzenegger , jean-claud van damme or steven segal .',
'Review: the gorgeously elaborate continuation of " the lord of the rings " trilogy is so huge that a column of words cannot adequately describe co-writer/director peter jackson\'s expanded vision of j . r . r . tolkien\'s middle-earth .',
'Review: effective but too-tepid biopic']
# process a batch of examples
>>> ds = ds.map(lambda example: tokenizer(example["text"]), batched=True)
# set number of processors
>>> ds = ds.map(add_prefix, num_proc=4)
```
"""
self._check_values_type()
if cache_file_names is None:
cache_file_names = {k: None for k in self}
return DatasetDict(
{
k: dataset.map(
function=function,
with_indices=with_indices,
with_rank=with_rank,
input_columns=input_columns,
batched=batched,
batch_size=batch_size,
drop_last_batch=drop_last_batch,
remove_columns=remove_columns,
keep_in_memory=keep_in_memory,
load_from_cache_file=load_from_cache_file,
cache_file_name=cache_file_names[k],
writer_batch_size=writer_batch_size,
features=features,
disable_nullable=disable_nullable,
fn_kwargs=fn_kwargs,
num_proc=num_proc,
desc=desc,
)
for k, dataset in self.items()
}
)
def filter(
self,
function: Optional[Callable] = None,
with_indices: bool = False,
with_rank: bool = False,
input_columns: Optional[Union[str, List[str]]] = None,
batched: bool = False,
batch_size: Optional[int] = 1000,
keep_in_memory: bool = False,
load_from_cache_file: Optional[bool] = None,
cache_file_names: Optional[Dict[str, Optional[str]]] = None,
writer_batch_size: Optional[int] = 1000,
fn_kwargs: Optional[dict] = None,
num_proc: Optional[int] = None,
desc: Optional[str] = None,
) -> "DatasetDict":
"""Apply a filter function to all the elements in the table in batches
and update the table so that the dataset only includes examples according to the filter function.
The transformation is applied to all the datasets of the dataset dictionary.
Args:
function (`Callable`): Callable with one of the following signatures:
- `function(example: Dict[str, Any]) -> bool` if `batched=False` and `with_indices=False` and `with_rank=False`
- `function(example: Dict[str, Any], *extra_args) -> bool` if `batched=False` and `with_indices=True` and/or `with_rank=True` (one extra arg for each)
- `function(batch: Dict[str, List]) -> List[bool]` if `batched=True` and `with_indices=False` and `with_rank=False`
- `function(batch: Dict[str, List], *extra_args) -> List[bool]` if `batched=True` and `with_indices=True` and/or `with_rank=True` (one extra arg for each)
If no function is provided, defaults to an always `True` function: `lambda x: True`.
with_indices (`bool`, defaults to `False`):
Provide example indices to `function`. Note that in this case the
signature of `function` should be `def function(example, idx[, rank]): ...`.
with_rank (`bool`, defaults to `False`):
Provide process rank to `function`. Note that in this case the
signature of `function` should be `def function(example[, idx], rank): ...`.
input_columns (`[Union[str, List[str]]]`, *optional*, defaults to `None`):
The columns to be passed into `function` as
positional arguments. If `None`, a dict mapping to all formatted columns is passed as one argument.
batched (`bool`, defaults to `False`):
Provide batch of examples to `function`.
batch_size (`int`, *optional*, defaults to `1000`):
Number of examples per batch provided to `function` if `batched=True`
`batch_size <= 0` or `batch_size == None` then provide the full dataset as a single batch to `function`.
keep_in_memory (`bool`, defaults to `False`):
Keep the dataset in memory instead of writing it to a cache file.
load_from_cache_file (`Optional[bool]`, defaults to `True` if caching is enabled):
If a cache file storing the current computation from `function`
can be identified, use it instead of recomputing.
cache_file_names (`[Dict[str, str]]`, *optional*, defaults to `None`):
Provide the name of a path for the cache file. It is used to store the
results of the computation instead of the automatically generated cache file name.
You have to provide one `cache_file_name` per dataset in the dataset dictionary.
writer_batch_size (`int`, defaults to `1000`):
Number of rows per write operation for the cache file writer.
This value is a good trade-off between memory usage during the processing, and processing speed.
Higher value makes the processing do fewer lookups, lower value consume less temporary memory while running `map`.
fn_kwargs (`Dict`, *optional*, defaults to `None`):
Keyword arguments to be passed to `function`
num_proc (`int`, *optional*, defaults to `None`):
Number of processes for multiprocessing. By default it doesn't
use multiprocessing.
desc (`str`, *optional*, defaults to `None`):
Meaningful description to be displayed alongside with the progress bar while filtering examples.
Example:
```py
>>> from datasets import load_dataset
>>> ds = load_dataset("rotten_tomatoes")
>>> ds.filter(lambda x: x["label"] == 1)
DatasetDict({
train: Dataset({
features: ['text', 'label'],
num_rows: 4265
})
validation: Dataset({
features: ['text', 'label'],
num_rows: 533
})
test: Dataset({
features: ['text', 'label'],
num_rows: 533
})
})
```
"""
self._check_values_type()
if cache_file_names is None:
cache_file_names = {k: None for k in self}
return DatasetDict(
{
k: dataset.filter(
function=function,
with_indices=with_indices,
with_rank=with_rank,
input_columns=input_columns,
batched=batched,
batch_size=batch_size,
keep_in_memory=keep_in_memory,
load_from_cache_file=load_from_cache_file,
cache_file_name=cache_file_names[k],
writer_batch_size=writer_batch_size,
fn_kwargs=fn_kwargs,
num_proc=num_proc,
desc=desc,
)
for k, dataset in self.items()
}
)
def flatten_indices(
self,
keep_in_memory: bool = False,
cache_file_names: Optional[Dict[str, Optional[str]]] = None,
writer_batch_size: Optional[int] = 1000,
features: Optional[Features] = None,
disable_nullable: bool = False,
num_proc: Optional[int] = None,
new_fingerprint: Optional[str] = None,
) -> "DatasetDict":
"""Create and cache a new Dataset by flattening the indices mapping.
Args:
keep_in_memory (`bool`, defaults to `False`):
Keep the dataset in memory instead of writing it to a cache file.
cache_file_names (`Dict[str, str]`, *optional*, default `None`):
Provide the name of a path for the cache file. It is used to store the
results of the computation instead of the automatically generated cache file name.
You have to provide one `cache_file_name` per dataset in the dataset dictionary.
writer_batch_size (`int`, defaults to `1000`):
Number of rows per write operation for the cache file writer.
This value is a good trade-off between memory usage during the processing, and processing speed.
Higher value makes the processing do fewer lookups, lower value consume less temporary memory while running `map`.
features (`Optional[datasets.Features]`, defaults to `None`):
Use a specific [`Features`] to store the cache file
instead of the automatically generated one.
disable_nullable (`bool`, defaults to `False`):
Allow null values in the table.
num_proc (`int`, optional, default `None`):
Max number of processes when generating cache. Already cached shards are loaded sequentially
new_fingerprint (`str`, *optional*, defaults to `None`):
The new fingerprint of the dataset after transform.
If `None`, the new fingerprint is computed using a hash of the previous fingerprint, and the transform arguments
"""
self._check_values_type()
if cache_file_names is None:
cache_file_names = {k: None for k in self}
return DatasetDict(
{
k: dataset.flatten_indices(
keep_in_memory=keep_in_memory,
cache_file_name=cache_file_names[k],
writer_batch_size=writer_batch_size,
features=features,
disable_nullable=disable_nullable,
num_proc=num_proc,
new_fingerprint=new_fingerprint,
)
for k, dataset in self.items()
}
)
def sort(
self,
column_names: Union[str, Sequence[str]],
reverse: Union[bool, Sequence[bool]] = False,
null_placement: str = "at_end",
keep_in_memory: bool = False,
load_from_cache_file: Optional[bool] = None,
indices_cache_file_names: Optional[Dict[str, Optional[str]]] = None,
writer_batch_size: Optional[int] = 1000,
) -> "DatasetDict":
"""Create a new dataset sorted according to a single or multiple columns.
Args:
column_names (`Union[str, Sequence[str]]`):
Column name(s) to sort by.
reverse (`Union[bool, Sequence[bool]]`, defaults to `False`):
If `True`, sort by descending order rather than ascending. If a single bool is provided,
the value is applied to the sorting of all column names. Otherwise a list of bools with the
same length and order as column_names must be provided.
null_placement (`str`, defaults to `at_end`):
Put `None` values at the beginning if `at_start` or `first` or at the end if `at_end` or `last`
keep_in_memory (`bool`, defaults to `False`):
Keep the sorted indices in memory instead of writing it to a cache file.
load_from_cache_file (`Optional[bool]`, defaults to `True` if caching is enabled):
If a cache file storing the sorted indices
can be identified, use it instead of recomputing.
indices_cache_file_names (`[Dict[str, str]]`, *optional*, defaults to `None`):
Provide the name of a path for the cache file. It is used to store the
indices mapping instead of the automatically generated cache file name.
You have to provide one `cache_file_name` per dataset in the dataset dictionary.
writer_batch_size (`int`, defaults to `1000`):
Number of rows per write operation for the cache file writer.
Higher value gives smaller cache files, lower value consume less temporary memory.
Example:
```py
>>> from datasets import load_dataset
>>> ds = load_dataset('rotten_tomatoes')
>>> ds['train']['label'][:10]
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
>>> sorted_ds = ds.sort('label')
>>> sorted_ds['train']['label'][:10]
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
>>> another_sorted_ds = ds.sort(['label', 'text'], reverse=[True, False])
>>> another_sorted_ds['train']['label'][:10]
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
```
"""
self._check_values_type()
if indices_cache_file_names is None:
indices_cache_file_names = {k: None for k in self}
return DatasetDict(
{
k: dataset.sort(
column_names=column_names,
reverse=reverse,
null_placement=null_placement,
keep_in_memory=keep_in_memory,
load_from_cache_file=load_from_cache_file,
indices_cache_file_name=indices_cache_file_names[k],
writer_batch_size=writer_batch_size,
)
for k, dataset in self.items()
}
)
def shuffle(
self,
seeds: Optional[Union[int, Dict[str, Optional[int]]]] = None,
seed: Optional[int] = None,
generators: Optional[Dict[str, np.random.Generator]] = None,
keep_in_memory: bool = False,
load_from_cache_file: Optional[bool] = None,
indices_cache_file_names: Optional[Dict[str, Optional[str]]] = None,
writer_batch_size: Optional[int] = 1000,
) -> "DatasetDict":
"""Create a new Dataset where the rows are shuffled.
The transformation is applied to all the datasets of the dataset dictionary.
Currently shuffling uses numpy random generators.
You can either supply a NumPy BitGenerator to use, or a seed to initiate NumPy's default random generator (PCG64).
Args:
seeds (`Dict[str, int]` or `int`, *optional*):
A seed to initialize the default BitGenerator if `generator=None`.
If `None`, then fresh, unpredictable entropy will be pulled from the OS.
If an `int` or `array_like[ints]` is passed, then it will be passed to SeedSequence to derive the initial BitGenerator state.
You can provide one `seed` per dataset in the dataset dictionary.
seed (`int`, *optional*):
A seed to initialize the default BitGenerator if `generator=None`. Alias for seeds (a `ValueError` is raised if both are provided).
generators (`Dict[str, *optional*, np.random.Generator]`):
Numpy random Generator to use to compute the permutation of the dataset rows.
If `generator=None` (default), uses `np.random.default_rng` (the default BitGenerator (PCG64) of NumPy).
You have to provide one `generator` per dataset in the dataset dictionary.
keep_in_memory (`bool`, defaults to `False`):
Keep the dataset in memory instead of writing it to a cache file.
load_from_cache_file (`Optional[bool]`, defaults to `True` if caching is enabled):
If a cache file storing the current computation from `function`
can be identified, use it instead of recomputing.
indices_cache_file_names (`Dict[str, str]`, *optional*):
Provide the name of a path for the cache file. It is used to store the
indices mappings instead of the automatically generated cache file name.
You have to provide one `cache_file_name` per dataset in the dataset dictionary.
writer_batch_size (`int`, defaults to `1000`):
Number of rows per write operation for the cache file writer.
This value is a good trade-off between memory usage during the processing, and processing speed.
Higher value makes the processing do fewer lookups, lower value consume less temporary memory while running `map`.
Example:
```py
>>> from datasets import load_dataset
>>> ds = load_dataset("rotten_tomatoes")
>>> ds["train"]["label"][:10]
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
# set a seed
>>> shuffled_ds = ds.shuffle(seed=42)
>>> shuffled_ds["train"]["label"][:10]
[0, 1, 0, 1, 0, 0, 0, 0, 0, 0]
```
"""
self._check_values_type()
if seed is not None and seeds is not None:
raise ValueError("Please specify seed or seeds, but not both")
seeds = seed if seed is not None else seeds
if seeds is None:
seeds = {k: None for k in self}
elif not isinstance(seeds, dict):
seeds = {k: seeds for k in self}
if generators is None:
generators = {k: None for k in self}
if indices_cache_file_names is None:
indices_cache_file_names = {k: None for k in self}
return DatasetDict(
{
k: dataset.shuffle(
seed=seeds[k],
generator=generators[k],
keep_in_memory=keep_in_memory,
load_from_cache_file=load_from_cache_file,
indices_cache_file_name=indices_cache_file_names[k],
writer_batch_size=writer_batch_size,
)
for k, dataset in self.items()
}
)
def save_to_disk(
self,
dataset_dict_path: PathLike,
max_shard_size: Optional[Union[str, int]] = None,
num_shards: Optional[Dict[str, int]] = None,
num_proc: Optional[int] = None,
storage_options: Optional[dict] = None,
):
"""
Saves a dataset dict to a filesystem using `fsspec.spec.AbstractFileSystem`.
For [`Image`], [`Audio`] and [`Video`] data:
All the Image(), Audio() and Video() data are stored in the arrow files.
If you want to store paths or urls, please use the Value("string") type.
Args:
dataset_dict_path (`path-like`):
Path (e.g. `dataset/train`) or remote URI (e.g. `s3://my-bucket/dataset/train`)
of the dataset dict directory where the dataset dict will be saved to.
max_shard_size (`int` or `str`, *optional*, defaults to `"500MB"`):
The maximum size of the dataset shards to be uploaded to the hub. If expressed as a string, needs to be digits followed by a unit
(like `"50MB"`).
num_shards (`Dict[str, int]`, *optional*):
Number of shards to write. By default the number of shards depends on `max_shard_size` and `num_proc`.
You need to provide the number of shards for each dataset in the dataset dictionary.
Use a dictionary to define a different num_shards for each split.
<Added version="2.8.0"/>
num_proc (`int`, *optional*, default `None`):
Number of processes when downloading and generating the dataset locally.
Multiprocessing is disabled by default.
<Added version="2.8.0"/>
storage_options (`dict`, *optional*):
Key/value pairs to be passed on to the file-system backend, if any.
<Added version="2.8.0"/>
Example:
```python
>>> dataset_dict.save_to_disk("path/to/dataset/directory")
>>> dataset_dict.save_to_disk("path/to/dataset/directory", max_shard_size="1GB")
>>> dataset_dict.save_to_disk("path/to/dataset/directory", num_shards={"train": 1024, "test": 8})
```
"""
fs: fsspec.AbstractFileSystem
fs, _ = url_to_fs(dataset_dict_path, **(storage_options or {}))
if num_shards is None:
num_shards = {k: None for k in self}
elif not isinstance(num_shards, dict):
raise ValueError(
"Please provide one `num_shards` per dataset in the dataset dictionary, e.g. {{'train': 128, 'test': 4}}"
)
fs.makedirs(dataset_dict_path, exist_ok=True)
with fs.open(posixpath.join(dataset_dict_path, config.DATASETDICT_JSON_FILENAME), "w", encoding="utf-8") as f:
json.dump({"splits": list(self)}, f)
for k, dataset in self.items():
dataset.save_to_disk(
posixpath.join(dataset_dict_path, k),
num_shards=num_shards.get(k),
max_shard_size=max_shard_size,
num_proc=num_proc,
storage_options=storage_options,
)
@staticmethod
def load_from_disk(
dataset_dict_path: PathLike,
keep_in_memory: Optional[bool] = None,
storage_options: Optional[dict] = None,
) -> "DatasetDict":
"""
Load a dataset that was previously saved using [`save_to_disk`] from a filesystem using `fsspec.spec.AbstractFileSystem`.
Args:
dataset_dict_path (`path-like`):
Path (e.g. `"dataset/train"`) or remote URI (e.g. `"s3//my-bucket/dataset/train"`)
of the dataset dict directory where the dataset dict will be loaded from.
keep_in_memory (`bool`, defaults to `None`):
Whether to copy the dataset in-memory. If `None`, the
dataset will not be copied in-memory unless explicitly enabled by setting
`datasets.config.IN_MEMORY_MAX_SIZE` to nonzero. See more details in the
[improve performance](../cache#improve-performance) section.
storage_options (`dict`, *optional*):
Key/value pairs to be passed on to the file-system backend, if any.
<Added version="2.8.0"/>
Returns:
[`DatasetDict`]
Example:
```py
>>> ds = load_from_disk('path/to/dataset/directory')
```
"""
fs: fsspec.AbstractFileSystem
fs, dataset_dict_path = url_to_fs(dataset_dict_path, **(storage_options or {}))
dataset_dict_json_path = posixpath.join(dataset_dict_path, config.DATASETDICT_JSON_FILENAME)
dataset_state_json_path = posixpath.join(dataset_dict_path, config.DATASET_STATE_JSON_FILENAME)
dataset_info_path = posixpath.join(dataset_dict_path, config.DATASET_INFO_FILENAME)
if not fs.isfile(dataset_dict_json_path):
if fs.isfile(dataset_info_path) and fs.isfile(dataset_state_json_path):
raise FileNotFoundError(
f"No such file: '{dataset_dict_json_path}'. Expected to load a `DatasetDict` object, but got a `Dataset`. Please use either `datasets.load_from_disk` or `Dataset.load_from_disk` instead."
)
raise FileNotFoundError(
f"No such file: '{dataset_dict_json_path}'. Expected to load a `DatasetDict` object, but provided path is not a `DatasetDict`."
)
with fs.open(dataset_dict_json_path, "r", encoding="utf-8") as f:
splits = json.load(f)["splits"]
dataset_dict = DatasetDict()
for k in splits:
dataset_dict_split_path = posixpath.join(fs.unstrip_protocol(dataset_dict_path), k)
dataset_dict[k] = Dataset.load_from_disk(
dataset_dict_split_path, keep_in_memory=keep_in_memory, storage_options=storage_options
)
return dataset_dict
@staticmethod
def from_csv(
path_or_paths: Dict[str, PathLike],
features: Optional[Features] = None,
cache_dir: str = None,
keep_in_memory: bool = False,
**kwargs,
) -> "DatasetDict":
"""Create [`DatasetDict`] from CSV file(s).
Args:
path_or_paths (`dict` of path-like):
Path(s) of the CSV file(s).
features ([`Features`], *optional*):
Dataset features.
cache_dir (str, *optional*, defaults to `"~/.cache/huggingface/datasets"`):
Directory to cache data.
keep_in_memory (`bool`, defaults to `False`):
Whether to copy the data in-memory.
**kwargs (additional keyword arguments):
Keyword arguments to be passed to [`pandas.read_csv`].
Returns:
[`DatasetDict`]
Example:
```py
>>> from datasets import DatasetDict
>>> ds = DatasetDict.from_csv({'train': 'path/to/dataset.csv'})
```
"""
# Dynamic import to avoid circular dependency
from .io.csv import CsvDatasetReader
return CsvDatasetReader(
path_or_paths, features=features, cache_dir=cache_dir, keep_in_memory=keep_in_memory, **kwargs
).read()
@staticmethod
def from_json(
path_or_paths: Dict[str, PathLike],
features: Optional[Features] = None,
cache_dir: str = None,
keep_in_memory: bool = False,
**kwargs,
) -> "DatasetDict":
"""Create [`DatasetDict`] from JSON Lines file(s).
Args:
path_or_paths (`path-like` or list of `path-like`):
Path(s) of the JSON Lines file(s).
features ([`Features`], *optional*):
Dataset features.
cache_dir (str, *optional*, defaults to `"~/.cache/huggingface/datasets"`):
Directory to cache data.
keep_in_memory (`bool`, defaults to `False`):
Whether to copy the data in-memory.
**kwargs (additional keyword arguments):
Keyword arguments to be passed to [`JsonConfig`].
Returns:
[`DatasetDict`]
Example:
```py
>>> from datasets import DatasetDict
>>> ds = DatasetDict.from_json({'train': 'path/to/dataset.json'})
```
"""
# Dynamic import to avoid circular dependency
from .io.json import JsonDatasetReader
return JsonDatasetReader(
path_or_paths, features=features, cache_dir=cache_dir, keep_in_memory=keep_in_memory, **kwargs
).read()
@staticmethod
def from_parquet(
path_or_paths: Dict[str, PathLike],
features: Optional[Features] = None,
cache_dir: str = None,
keep_in_memory: bool = False,
columns: Optional[List[str]] = None,
**kwargs,
) -> "DatasetDict":
"""Create [`DatasetDict`] from Parquet file(s).
Args:
path_or_paths (`dict` of path-like):
Path(s) of the CSV file(s).
features ([`Features`], *optional*):
Dataset features.
cache_dir (`str`, *optional*, defaults to `"~/.cache/huggingface/datasets"`):
Directory to cache data.
keep_in_memory (`bool`, defaults to `False`):
Whether to copy the data in-memory.
columns (`List[str]`, *optional*):
If not `None`, only these columns will be read from the file.
A column name may be a prefix of a nested field, e.g. 'a' will select
'a.b', 'a.c', and 'a.d.e'.
**kwargs (additional keyword arguments):
Keyword arguments to be passed to [`ParquetConfig`].
Returns:
[`DatasetDict`]
Example:
```py
>>> from datasets import DatasetDict
>>> ds = DatasetDict.from_parquet({'train': 'path/to/dataset/parquet'})
```
"""
# Dynamic import to avoid circular dependency
from .io.parquet import ParquetDatasetReader
return ParquetDatasetReader(
path_or_paths,
features=features,
cache_dir=cache_dir,
keep_in_memory=keep_in_memory,
columns=columns,
**kwargs,
).read()
@staticmethod
def from_text(
path_or_paths: Dict[str, PathLike],
features: Optional[Features] = None,
cache_dir: str = None,
keep_in_memory: bool = False,
**kwargs,
) -> "DatasetDict":
"""Create [`DatasetDict`] from text file(s).
Args:
path_or_paths (`dict` of path-like):
Path(s) of the text file(s).
features ([`Features`], *optional*):
Dataset features.
cache_dir (`str`, *optional*, defaults to `"~/.cache/huggingface/datasets"`):
Directory to cache data.
keep_in_memory (`bool`, defaults to `False`):
Whether to copy the data in-memory.
**kwargs (additional keyword arguments):
Keyword arguments to be passed to [`TextConfig`].
Returns:
[`DatasetDict`]
Example:
```py
>>> from datasets import DatasetDict
>>> ds = DatasetDict.from_text({'train': 'path/to/dataset.txt'})
```
"""
# Dynamic import to avoid circular dependency
from .io.text import TextDatasetReader
return TextDatasetReader(
path_or_paths, features=features, cache_dir=cache_dir, keep_in_memory=keep_in_memory, **kwargs
).read()
@is_documented_by(Dataset.align_labels_with_mapping)
def align_labels_with_mapping(self, label2id: Dict, label_column: str) -> "DatasetDict":
self._check_values_type()
return DatasetDict(
{
k: dataset.align_labels_with_mapping(label2id=label2id, label_column=label_column)
for k, dataset in self.items()
}
)
def push_to_hub(
self,
repo_id,
config_name: str = "default",
set_default: Optional[bool] = None,
data_dir: Optional[str] = None,
commit_message: Optional[str] = None,
commit_description: Optional[str] = None,
private: Optional[bool] = None,
token: Optional[str] = None,
revision: Optional[str] = None,
create_pr: Optional[bool] = False,
max_shard_size: Optional[Union[int, str]] = None,
num_shards: Optional[Dict[str, int]] = None,
embed_external_files: bool = True,
) -> CommitInfo:
"""Pushes the [`DatasetDict`] to the hub as a Parquet dataset.
The [`DatasetDict`] is pushed using HTTP requests and does not need to have neither git or git-lfs installed.
Each dataset split will be pushed independently. The pushed dataset will keep the original split names.
The resulting Parquet files are self-contained by default: if your dataset contains [`Image`] or [`Audio`]
data, the Parquet files will store the bytes of your images or audio files.
You can disable this by setting `embed_external_files` to False.
Args:
repo_id (`str`):
The ID of the repository to push to in the following format: `<user>/<dataset_name>` or
`<org>/<dataset_name>`. Also accepts `<dataset_name>`, which will default to the namespace
of the logged-in user.
config_name (`str`):
Configuration name of a dataset. Defaults to "default".
set_default (`bool`, *optional*):
Whether to set this configuration as the default one. Otherwise, the default configuration is the one
named "default".
data_dir (`str`, *optional*):
Directory name that will contain the uploaded data files. Defaults to the `config_name` if different
from "default", else "data".
<Added version="2.17.0"/>
commit_message (`str`, *optional*):
Message to commit while pushing. Will default to `"Upload dataset"`.
commit_description (`str`, *optional*):
Description of the commit that will be created.
Additionally, description of the PR if a PR is created (`create_pr` is True).
<Added version="2.16.0"/>
private (`bool`, *optional*):
Whether to make the repo private. If `None` (default), the repo will be public unless the
organization's default is private. This value is ignored if the repo already exists.
token (`str`, *optional*):
An optional authentication token for the Hugging Face Hub. If no token is passed, will default
to the token saved locally when logging in with `huggingface-cli login`. Will raise an error
if no token is passed and the user is not logged-in.
revision (`str`, *optional*):
Branch to push the uploaded files to. Defaults to the `"main"` branch.
<Added version="2.15.0"/>
create_pr (`bool`, *optional*, defaults to `False`):
Whether to create a PR with the uploaded files or directly commit.
<Added version="2.15.0"/>
max_shard_size (`int` or `str`, *optional*, defaults to `"500MB"`):
The maximum size of the dataset shards to be uploaded to the hub. If expressed as a string, needs to be digits followed by a unit
(like `"500MB"` or `"1GB"`).
num_shards (`Dict[str, int]`, *optional*):
Number of shards to write. By default, the number of shards depends on `max_shard_size`.
Use a dictionary to define a different num_shards for each split.
<Added version="2.8.0"/>
embed_external_files (`bool`, defaults to `True`):
Whether to embed file bytes in the shards.
In particular, this will do the following before the push for the fields of type:
- [`Audio`] and [`Image`] removes local path information and embed file content in the Parquet files.
Return:
huggingface_hub.CommitInfo
Example:
```python
>>> dataset_dict.push_to_hub("<organization>/<dataset_id>")
>>> dataset_dict.push_to_hub("<organization>/<dataset_id>", private=True)
>>> dataset_dict.push_to_hub("<organization>/<dataset_id>", max_shard_size="1GB")
>>> dataset_dict.push_to_hub("<organization>/<dataset_id>", num_shards={"train": 1024, "test": 8})
```
If you want to add a new configuration (or subset) to a dataset (e.g. if the dataset has multiple tasks/versions/languages):
```python
>>> english_dataset.push_to_hub("<organization>/<dataset_id>", "en")
>>> french_dataset.push_to_hub("<organization>/<dataset_id>", "fr")
>>> # later
>>> english_dataset = load_dataset("<organization>/<dataset_id>", "en")
>>> french_dataset = load_dataset("<organization>/<dataset_id>", "fr")
```
"""
if num_shards is None:
num_shards = {k: None for k in self}
elif not isinstance(num_shards, dict):
raise ValueError(
"Please provide one `num_shards` per dataset in the dataset dictionary, e.g. {{'train': 128, 'test': 4}}"
)
self._check_values_type()
self._check_values_features()
total_uploaded_size = 0
total_dataset_nbytes = 0
info_to_dump: DatasetInfo = next(iter(self.values())).info.copy()
info_to_dump.config_name = config_name
info_to_dump.splits = SplitDict()
for split in self.keys():
if not re.match(_split_re, split):
raise ValueError(f"Split name should match '{_split_re}' but got '{split}'.")
api = HfApi(endpoint=config.HF_ENDPOINT, token=token)
repo_url = api.create_repo(
repo_id,
token=token,
repo_type="dataset",
private=private,
exist_ok=True,
)
repo_id = repo_url.repo_id
if revision is not None and not revision.startswith("refs/pr/"):
# We do not call create_branch for a PR reference: 400 Bad Request
api.create_branch(repo_id, branch=revision, token=token, repo_type="dataset", exist_ok=True)
if not data_dir:
data_dir = config_name if config_name != "default" else "data" # for backward compatibility
additions = []
for split in self.keys():
logger.info(f"Pushing split {split} to the Hub.")
# The split=key needs to be removed before merging
split_additions, uploaded_size, dataset_nbytes = self[split]._push_parquet_shards_to_hub(
repo_id,
data_dir=data_dir,
split=split,
token=token,
revision=revision,
create_pr=create_pr,
max_shard_size=max_shard_size,
num_shards=num_shards.get(split),
embed_external_files=embed_external_files,
)
additions += split_additions
total_uploaded_size += uploaded_size
total_dataset_nbytes += dataset_nbytes
info_to_dump.splits[split] = SplitInfo(str(split), num_bytes=dataset_nbytes, num_examples=len(self[split]))
info_to_dump.download_checksums = None
info_to_dump.download_size = total_uploaded_size
info_to_dump.dataset_size = total_dataset_nbytes
info_to_dump.size_in_bytes = total_uploaded_size + total_dataset_nbytes
# Check if the repo already has a README.md and/or a dataset_infos.json to update them with the new split info (size and pattern)
# and delete old split shards (if they exist)
repo_with_dataset_card, repo_with_dataset_infos = False, False
repo_splits = [] # use a list to keep the order of the splits
deletions = []
repo_files_to_add = [addition.path_in_repo for addition in additions]
for repo_file in api.list_repo_tree(
repo_id=repo_id, revision=revision, repo_type="dataset", token=token, recursive=True
):
if not isinstance(repo_file, RepoFile):
continue
if repo_file.rfilename == config.REPOCARD_FILENAME:
repo_with_dataset_card = True
elif repo_file.rfilename == config.DATASETDICT_INFOS_FILENAME:
repo_with_dataset_infos = True
elif (
repo_file.rfilename.startswith(tuple(f"{data_dir}/{split}-" for split in self.keys()))
and repo_file.rfilename not in repo_files_to_add
):
deletions.append(CommitOperationDelete(path_in_repo=repo_file.rfilename))
elif fnmatch.fnmatch(
repo_file.rfilename, PUSH_TO_HUB_WITHOUT_METADATA_CONFIGS_SPLIT_PATTERN_SHARDED.replace("{split}", "*")
):
repo_split = string_to_dict(
repo_file.rfilename,
glob_pattern_to_regex(PUSH_TO_HUB_WITHOUT_METADATA_CONFIGS_SPLIT_PATTERN_SHARDED),
)["split"]
if repo_split not in repo_splits:
repo_splits.append(split)
# get the info from the README to update them
if repo_with_dataset_card:
dataset_card_path = api.hf_hub_download(
repo_id, config.REPOCARD_FILENAME, repo_type="dataset", revision=revision
)
dataset_card = DatasetCard.load(Path(dataset_card_path))
dataset_card_data = dataset_card.data
metadata_configs = MetadataConfigs.from_dataset_card_data(dataset_card_data)
# get the deprecated dataset_infos.json to update them
elif repo_with_dataset_infos:
dataset_card = None
dataset_card_data = DatasetCardData()
metadata_configs = MetadataConfigs()
else:
dataset_card = None
dataset_card_data = DatasetCardData()
metadata_configs = MetadataConfigs()
# create the metadata configs if it was uploaded with push_to_hub before metadata configs existed
if not metadata_configs and repo_splits:
default_metadata_configs_to_dump = {
"data_files": [{"split": split, "path": f"data/{split}-*"} for split in repo_splits]
}
MetadataConfigs({"default": default_metadata_configs_to_dump}).to_dataset_card_data(dataset_card_data)
metadata_config_to_dump = {
"data_files": [{"split": split, "path": f"{data_dir}/{split}-*"} for split in self.keys()],
}
if set_default and config_name != "default":
if metadata_configs:
default_config_name = metadata_configs.get_default_config_name()
if default_config_name == "default":
raise ValueError(
"There exists a configuration named 'default'. To set a different configuration as default, "
"rename the 'default' one first."
)
else:
_ = metadata_configs[default_config_name].pop("default")
metadata_config_to_dump["default"] = True
# push to the deprecated dataset_infos.json
if repo_with_dataset_infos:
dataset_infos_path = api.hf_hub_download(
repo_id, config.DATASETDICT_INFOS_FILENAME, repo_type="dataset", revision=revision
)
with open(dataset_infos_path, encoding="utf-8") as f:
dataset_infos: dict = json.load(f)
dataset_infos[config_name] = asdict(info_to_dump)
buffer = BytesIO()
buffer.write(json.dumps(dataset_infos, indent=4).encode("utf-8"))
additions.append(
CommitOperationAdd(path_in_repo=config.DATASETDICT_INFOS_FILENAME, path_or_fileobj=buffer)
)
# push to README
DatasetInfosDict({config_name: info_to_dump}).to_dataset_card_data(dataset_card_data)
MetadataConfigs({config_name: metadata_config_to_dump}).to_dataset_card_data(dataset_card_data)
dataset_card = DatasetCard(f"---\n{dataset_card_data}\n---\n") if dataset_card is None else dataset_card
additions.append(
CommitOperationAdd(path_in_repo=config.REPOCARD_FILENAME, path_or_fileobj=str(dataset_card).encode())
)
commit_message = commit_message if commit_message is not None else "Upload dataset"
if len(additions) <= config.UPLOADS_MAX_NUMBER_PER_COMMIT:
commit_info = api.create_commit(
repo_id,
operations=additions + deletions,
commit_message=commit_message,
commit_description=commit_description,
token=token,
repo_type="dataset",
revision=revision,
create_pr=create_pr,
)
else:
logger.info(
f"Number of files to upload is larger than {config.UPLOADS_MAX_NUMBER_PER_COMMIT}. Splitting the push into multiple commits."
)
num_commits = math.ceil(len(additions) / config.UPLOADS_MAX_NUMBER_PER_COMMIT)
for i in range(0, num_commits):
operations = additions[
i * config.UPLOADS_MAX_NUMBER_PER_COMMIT : (i + 1) * config.UPLOADS_MAX_NUMBER_PER_COMMIT
] + (deletions if i == 0 else [])
commit_info = api.create_commit(
repo_id,
operations=operations,
commit_message=commit_message + f" (part {i:05d}-of-{num_commits:05d})",
commit_description=commit_description,
token=token,
repo_type="dataset",
revision=revision,
create_pr=create_pr,
)
logger.info(
f"Commit #{i + 1} completed"
+ (f" (still {num_commits - i - 1} to go)" if num_commits - i - 1 else "")
+ "."
)
return commit_info
class IterableDatasetDict(dict):
def __repr__(self):
repr = "\n".join([f"{k}: {v}" for k, v in self.items()])
repr = re.sub(r"^", " " * 4, repr, 0, re.M)
return f"IterableDatasetDict({{\n{repr}\n}})"
def with_format(
self,
type: Optional[str] = None,
) -> "IterableDatasetDict":
"""
Return a dataset with the specified format.
Args:
type (`str`, *optional*):
Either output type selected in `[None, 'numpy', 'torch', 'tensorflow', 'jax', 'arrow', 'pandas', 'polars']`.
`None` means it returns python objects (default).
Example:
```py
>>> from datasets import load_dataset
>>> from transformers import AutoTokenizer
>>> ds = load_dataset("rotten_tomatoes", split="validation", streaming=True)
>>> tokenizer = AutoTokenizer.from_pretrained("bert-base-cased")
>>> ds = ds.map(lambda x: tokenizer(x['text'], truncation=True, padding=True), batched=True)
>>> ds = ds.with_format("torch")
>>> next(iter(ds))
{'text': 'compassionately explores the seemingly irreconcilable situation between conservative christian parents and their estranged gay and lesbian children .',
'label': tensor(1),
'input_ids': tensor([ 101, 18027, 16310, 16001, 1103, 9321, 178, 11604, 7235, 6617,
1742, 2165, 2820, 1206, 6588, 22572, 12937, 1811, 2153, 1105,
1147, 12890, 19587, 6463, 1105, 15026, 1482, 119, 102, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0]),
'token_type_ids': tensor([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]),
'attention_mask': tensor([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])}
```
"""
return IterableDatasetDict({k: dataset.with_format(type=type) for k, dataset in self.items()})
def map(
self,
function: Optional[Callable] = None,
with_indices: bool = False,
input_columns: Optional[Union[str, List[str]]] = None,
batched: bool = False,
batch_size: int = 1000,
drop_last_batch: bool = False,
remove_columns: Optional[Union[str, List[str]]] = None,
fn_kwargs: Optional[dict] = None,
) -> "IterableDatasetDict":
"""
Apply a function to all the examples in the iterable dataset (individually or in batches) and update them.
If your function returns a column that already exists, then it overwrites it.
The function is applied on-the-fly on the examples when iterating over the dataset.
The transformation is applied to all the datasets of the dataset dictionary.
You can specify whether the function should be batched or not with the `batched` parameter:
- If batched is `False`, then the function takes 1 example in and should return 1 example.
An example is a dictionary, e.g. `{"text": "Hello there !"}`.
- If batched is `True` and `batch_size` is 1, then the function takes a batch of 1 example as input and can return a batch with 1 or more examples.
A batch is a dictionary, e.g. a batch of 1 example is `{"text": ["Hello there !"]}`.
- If batched is `True` and `batch_size` is `n` > 1, then the function takes a batch of `n` examples as input and can return a batch with `n` examples, or with an arbitrary number of examples.
Note that the last batch may have less than `n` examples.
A batch is a dictionary, e.g. a batch of `n` examples is `{"text": ["Hello there !"] * n}`.
Args:
function (`Callable`, *optional*, defaults to `None`):
Function applied on-the-fly on the examples when you iterate on the dataset.
It must have one of the following signatures:
- `function(example: Dict[str, Any]) -> Dict[str, Any]` if `batched=False` and `with_indices=False`
- `function(example: Dict[str, Any], idx: int) -> Dict[str, Any]` if `batched=False` and `with_indices=True`
- `function(batch: Dict[str, List]) -> Dict[str, List]` if `batched=True` and `with_indices=False`
- `function(batch: Dict[str, List], indices: List[int]) -> Dict[str, List]` if `batched=True` and `with_indices=True`
For advanced usage, the function can also return a `pyarrow.Table`.
Moreover if your function returns nothing (`None`), then `map` will run your function and return the dataset unchanged.
If no function is provided, default to identity function: `lambda x: x`.
with_indices (`bool`, defaults to `False`):
Provide example indices to `function`. Note that in this case the signature of `function` should be `def function(example, idx[, rank]): ...`.
input_columns (`[Union[str, List[str]]]`, *optional*, defaults to `None`):
The columns to be passed into `function`
as positional arguments. If `None`, a dict mapping to all formatted columns is passed as one argument.
batched (`bool`, defaults to `False`):
Provide batch of examples to `function`.
batch_size (`int`, *optional*, defaults to `1000`):
Number of examples per batch provided to `function` if `batched=True`.
drop_last_batch (`bool`, defaults to `False`):
Whether a last batch smaller than the `batch_size` should be
dropped instead of being processed by the function.
remove_columns (`[List[str]]`, *optional*, defaults to `None`):
Remove a selection of columns while doing the mapping.
Columns will be removed before updating the examples with the output of `function`, i.e. if `function` is adding
columns with names in `remove_columns`, these columns will be kept.
fn_kwargs (`Dict`, *optional*, defaults to `None`):
Keyword arguments to be passed to `function`
Example:
```py
>>> from datasets import load_dataset
>>> ds = load_dataset("rotten_tomatoes", streaming=True)
>>> def add_prefix(example):
... example["text"] = "Review: " + example["text"]
... return example
>>> ds = ds.map(add_prefix)
>>> next(iter(ds["train"]))
{'label': 1,
'text': 'Review: the rock is destined to be the 21st century\'s new " conan " and that he\'s going to make a splash even greater than arnold schwarzenegger , jean-claud van damme or steven segal .'}
```
"""
return IterableDatasetDict(
{
k: dataset.map(
function=function,
with_indices=with_indices,
input_columns=input_columns,
batched=batched,
batch_size=batch_size,
drop_last_batch=drop_last_batch,
remove_columns=remove_columns,
fn_kwargs=fn_kwargs,
)
for k, dataset in self.items()
}
)
def filter(
self,
function: Optional[Callable] = None,
with_indices=False,
input_columns: Optional[Union[str, List[str]]] = None,
batched: bool = False,
batch_size: Optional[int] = 1000,
fn_kwargs: Optional[dict] = None,
) -> "IterableDatasetDict":
"""Apply a filter function to all the elements so that the dataset only includes examples according to the filter function.
The filtering is done on-the-fly when iterating over the dataset.
The filtering is applied to all the datasets of the dataset dictionary.
Args:
function (`Callable`):
Callable with one of the following signatures:
- `function(example: Dict[str, Any]) -> bool` if `with_indices=False, batched=False`
- `function(example: Dict[str, Any], indices: int) -> bool` if `with_indices=True, batched=False`
- `function(example: Dict[str, List]) -> List[bool]` if `with_indices=False, batched=True`
- `function(example: Dict[str, List], indices: List[int]) -> List[bool]` if `with_indices=True, batched=True`
If no function is provided, defaults to an always True function: `lambda x: True`.
with_indices (`bool`, defaults to `False`):
Provide example indices to `function`. Note that in this case the signature of `function` should be `def function(example, idx): ...`.
input_columns (`str` or `List[str]`, *optional*):
The columns to be passed into `function` as
positional arguments. If `None`, a dict mapping to all formatted columns is passed as one argument.
batched (`bool`, defaults to `False`):
Provide batch of examples to `function`
batch_size (`int`, *optional*, defaults to `1000`):
Number of examples per batch provided to `function` if `batched=True`.
fn_kwargs (`Dict`, *optional*, defaults to `None`):
Keyword arguments to be passed to `function`
Example:
```py
>>> from datasets import load_dataset
>>> ds = load_dataset("rotten_tomatoes", streaming=True)
>>> ds = ds.filter(lambda x: x["label"] == 0)
>>> list(ds["train"].take(3))
[{'label': 0, 'text': 'Review: simplistic , silly and tedious .'},
{'label': 0,
'text': "Review: it's so laddish and juvenile , only teenage boys could possibly find it funny ."},
{'label': 0,
'text': 'Review: exploitative and largely devoid of the depth or sophistication that would make watching such a graphic treatment of the crimes bearable .'}]
```
"""
return IterableDatasetDict(
{
k: dataset.filter(
function=function,
with_indices=with_indices,
input_columns=input_columns,
batched=batched,
batch_size=batch_size,
fn_kwargs=fn_kwargs,
)
for k, dataset in self.items()
}
)
def shuffle(
self, seed=None, generator: Optional[np.random.Generator] = None, buffer_size: int = 1000
) -> "IterableDatasetDict":
"""
Randomly shuffles the elements of this dataset.
The shuffling is applied to all the datasets of the dataset dictionary.
This dataset fills a buffer with buffer_size elements, then randomly samples elements from this buffer,
replacing the selected elements with new elements. For perfect shuffling, a buffer size greater than or
equal to the full size of the dataset is required.
For instance, if your dataset contains 10,000 elements but `buffer_size` is set to 1000, then `shuffle` will
initially select a random element from only the first 1000 elements in the buffer. Once an element is
selected, its space in the buffer is replaced by the next (i.e. 1,001-st) element,
maintaining the 1000 element buffer.
If the dataset is made of several shards, it also does `shuffle` the order of the shards.
However if the order has been fixed by using [`~datasets.IterableDataset.skip`] or [`~datasets.IterableDataset.take`]
then the order of the shards is kept unchanged.
Args:
seed (`int`, *optional*, defaults to `None`):
Random seed that will be used to shuffle the dataset.
It is used to sample from the shuffle buffer and also to shuffle the data shards.
generator (`numpy.random.Generator`, *optional*):
Numpy random Generator to use to compute the permutation of the dataset rows.
If `generator=None` (default), uses `np.random.default_rng` (the default BitGenerator (PCG64) of NumPy).
buffer_size (`int`, defaults to `1000`):
Size of the buffer.
Example:
```py
>>> from datasets import load_dataset
>>> ds = load_dataset("rotten_tomatoes", streaming=True)
>>> list(ds["train"].take(3))
[{'label': 1,
'text': 'the rock is destined to be the 21st century\'s new " conan " and that he\'s going to make a splash even greater than arnold schwarzenegger , jean-claud van damme or steven segal .'},
{'label': 1,
'text': 'the gorgeously elaborate continuation of " the lord of the rings " trilogy is so huge that a column of words cannot adequately describe co-writer/director peter jackson\'s expanded vision of j . r . r . tolkien\'s middle-earth .'},
{'label': 1, 'text': 'effective but too-tepid biopic'}]
>>> ds = ds.shuffle(seed=42)
>>> list(ds["train"].take(3))
[{'label': 1,
'text': "a sports movie with action that's exciting on the field and a story you care about off it ."},
{'label': 1,
'text': 'at its best , the good girl is a refreshingly adult take on adultery . . .'},
{'label': 1,
'text': "sam jones became a very lucky filmmaker the day wilco got dropped from their record label , proving that one man's ruin may be another's fortune ."}]
```
"""
return IterableDatasetDict(
{
k: dataset.shuffle(seed=seed, generator=generator, buffer_size=buffer_size)
for k, dataset in self.items()
}
)
def rename_column(self, original_column_name: str, new_column_name: str) -> "IterableDatasetDict":
"""
Rename a column in the dataset, and move the features associated to the original column under the new column
name.
The renaming is applied to all the datasets of the dataset dictionary.
Args:
original_column_name (`str`):
Name of the column to rename.
new_column_name (`str`):
New name for the column.
Returns:
[`IterableDatasetDict`]: A copy of the dataset with a renamed column.
Example:
```py
>>> from datasets import load_dataset
>>> ds = load_dataset("rotten_tomatoes", streaming=True)
>>> ds = ds.rename_column("text", "movie_review")
>>> next(iter(ds["train"]))
{'label': 1,
'movie_review': 'the rock is destined to be the 21st century\'s new " conan " and that he\'s going to make a splash even greater than arnold schwarzenegger , jean-claud van damme or steven segal .'}
```
"""
return IterableDatasetDict(
{
k: dataset.rename_column(original_column_name=original_column_name, new_column_name=new_column_name)
for k, dataset in self.items()
}
)
def rename_columns(self, column_mapping: Dict[str, str]) -> "IterableDatasetDict":
"""
Rename several columns in the dataset, and move the features associated to the original columns under
the new column names.
The renaming is applied to all the datasets of the dataset dictionary.
Args:
column_mapping (`Dict[str, str]`):
A mapping of columns to rename to their new names.
Returns:
[`IterableDatasetDict`]: A copy of the dataset with renamed columns
Example:
```py
>>> from datasets import load_dataset
>>> ds = load_dataset("rotten_tomatoes", streaming=True)
>>> ds = ds.rename_columns({"text": "movie_review", "label": "rating"})
>>> next(iter(ds["train"]))
{'movie_review': 'the rock is destined to be the 21st century\'s new " conan " and that he\'s going to make a splash even greater than arnold schwarzenegger , jean-claud van damme or steven segal .',
'rating': 1}
```
"""
return IterableDatasetDict(
{k: dataset.rename_columns(column_mapping=column_mapping) for k, dataset in self.items()}
)
def remove_columns(self, column_names: Union[str, List[str]]) -> "IterableDatasetDict":
"""
Remove one or several column(s) in the dataset and the features associated to them.
The removal is done on-the-fly on the examples when iterating over the dataset.
The removal is applied to all the datasets of the dataset dictionary.
Args:
column_names (`Union[str, List[str]]`):
Name of the column(s) to remove.
Returns:
[`IterableDatasetDict`]: A copy of the dataset object without the columns to remove.
Example:
```py
>>> from datasets import load_dataset
>>> ds = load_dataset("rotten_tomatoes", streaming=True)
>>> ds = ds.remove_columns("label")
>>> next(iter(ds["train"]))
{'text': 'the rock is destined to be the 21st century\'s new " conan " and that he\'s going to make a splash even greater than arnold schwarzenegger , jean-claud van damme or steven segal .'}
```
"""
return IterableDatasetDict({k: dataset.remove_columns(column_names) for k, dataset in self.items()})
def select_columns(self, column_names: Union[str, List[str]]) -> "IterableDatasetDict":
"""Select one or several column(s) in the dataset and the features
associated to them. The selection is done on-the-fly on the examples
when iterating over the dataset. The selection is applied to all the
datasets of the dataset dictionary.
Args:
column_names (`Union[str, List[str]]`):
Name of the column(s) to keep.
Returns:
[`IterableDatasetDict`]: A copy of the dataset object with only selected columns.
Example:
```py
>>> from datasets import load_dataset
>>> ds = load_dataset("rotten_tomatoes", streaming=True)
>>> ds = ds.select("text")
>>> next(iter(ds["train"]))
{'text': 'the rock is destined to be the 21st century\'s new " conan " and that he\'s going to make a splash even greater than arnold schwarzenegger , jean-claud van damme or steven segal .'}
```
"""
return IterableDatasetDict({k: dataset.select_columns(column_names) for k, dataset in self.items()})
def cast_column(self, column: str, feature: FeatureType) -> "IterableDatasetDict":
"""Cast column to feature for decoding.
The type casting is applied to all the datasets of the dataset dictionary.
Args:
column (`str`):
Column name.
feature ([`Feature`]):
Target feature.
Returns:
[`IterableDatasetDict`]
Example:
```py
>>> from datasets import load_dataset, ClassLabel
>>> ds = load_dataset("rotten_tomatoes", streaming=True)
>>> ds["train"].features
{'label': ClassLabel(names=['neg', 'pos'], id=None),
'text': Value(dtype='string', id=None)}
>>> ds = ds.cast_column('label', ClassLabel(names=['bad', 'good']))
>>> ds["train"].features
{'label': ClassLabel(names=['bad', 'good'], id=None),
'text': Value(dtype='string', id=None)}
```
"""
return IterableDatasetDict(
{k: dataset.cast_column(column=column, feature=feature) for k, dataset in self.items()}
)
def cast(
self,
features: Features,
) -> "IterableDatasetDict":
"""
Cast the dataset to a new set of features.
The type casting is applied to all the datasets of the dataset dictionary.
Args:
features (`Features`):
New features to cast the dataset to.
The name of the fields in the features must match the current column names.
The type of the data must also be convertible from one type to the other.
For non-trivial conversion, e.g. `string` <-> `ClassLabel` you should use [`map`] to update the Dataset.
Returns:
[`IterableDatasetDict`]: A copy of the dataset with casted features.
Example:
```py
>>> from datasets import load_dataset
>>> ds = load_dataset("rotten_tomatoes", streaming=True)
>>> ds["train"].features
{'label': ClassLabel(names=['neg', 'pos'], id=None),
'text': Value(dtype='string', id=None)}
>>> new_features = ds["train"].features.copy()
>>> new_features['label'] = ClassLabel(names=['bad', 'good'])
>>> new_features['text'] = Value('large_string')
>>> ds = ds.cast(new_features)
>>> ds["train"].features
{'label': ClassLabel(names=['bad', 'good'], id=None),
'text': Value(dtype='large_string', id=None)}
```
"""
return IterableDatasetDict({k: dataset.cast(features=features) for k, dataset in self.items()})
| datasets/src/datasets/dataset_dict.py/0 | {
"file_path": "datasets/src/datasets/dataset_dict.py",
"repo_id": "datasets",
"token_count": 47816
} |
# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Dict, List, Optional, Type
from .. import config
from ..utils import logging
from .formatting import (
ArrowFormatter,
CustomFormatter,
Formatter,
PandasFormatter,
PythonFormatter,
TableFormatter,
TensorFormatter,
format_table,
query_table,
)
from .np_formatter import NumpyFormatter
logger = logging.get_logger(__name__)
_FORMAT_TYPES: Dict[Optional[str], Type[Formatter]] = {}
_FORMAT_TYPES_ALIASES: Dict[Optional[str], str] = {}
_FORMAT_TYPES_ALIASES_UNAVAILABLE: Dict[Optional[str], Exception] = {}
def _register_formatter(
formatter_cls: type,
format_type: Optional[str],
aliases: Optional[List[str]] = None,
):
"""
Register a Formatter object using a name and optional aliases.
This function must be used on a Formatter class.
"""
aliases = aliases if aliases is not None else []
if format_type in _FORMAT_TYPES:
logger.warning(
f"Overwriting format type '{format_type}' ({_FORMAT_TYPES[format_type].__name__} -> {formatter_cls.__name__})"
)
_FORMAT_TYPES[format_type] = formatter_cls
for alias in set(aliases + [format_type]):
if alias in _FORMAT_TYPES_ALIASES:
logger.warning(
f"Overwriting format type alias '{alias}' ({_FORMAT_TYPES_ALIASES[alias]} -> {format_type})"
)
_FORMAT_TYPES_ALIASES[alias] = format_type
def _register_unavailable_formatter(
unavailable_error: Exception, format_type: Optional[str], aliases: Optional[List[str]] = None
):
"""
Register an unavailable Formatter object using a name and optional aliases.
This function must be used on an Exception object that is raised when trying to get the unavailable formatter.
"""
aliases = aliases if aliases is not None else []
for alias in set(aliases + [format_type]):
_FORMAT_TYPES_ALIASES_UNAVAILABLE[alias] = unavailable_error
# Here we define all the available formatting functions that can be used by `Dataset.set_format`
_register_formatter(PythonFormatter, None, aliases=["python"])
_register_formatter(ArrowFormatter, "arrow", aliases=["pa", "pyarrow"])
_register_formatter(NumpyFormatter, "numpy", aliases=["np"])
_register_formatter(PandasFormatter, "pandas", aliases=["pd"])
_register_formatter(CustomFormatter, "custom")
if config.POLARS_AVAILABLE:
from .polars_formatter import PolarsFormatter
_register_formatter(PolarsFormatter, "polars", aliases=["pl"])
else:
_polars_error = ValueError("Polars needs to be installed to be able to return Polars dataframes.")
_register_unavailable_formatter(_polars_error, "polars", aliases=["pl"])
if config.TORCH_AVAILABLE:
from .torch_formatter import TorchFormatter
_register_formatter(TorchFormatter, "torch", aliases=["pt", "pytorch"])
else:
_torch_error = ValueError("PyTorch needs to be installed to be able to return PyTorch tensors.")
_register_unavailable_formatter(_torch_error, "torch", aliases=["pt", "pytorch"])
if config.TF_AVAILABLE:
from .tf_formatter import TFFormatter
_register_formatter(TFFormatter, "tensorflow", aliases=["tf"])
else:
_tf_error = ValueError("Tensorflow needs to be installed to be able to return Tensorflow tensors.")
_register_unavailable_formatter(_tf_error, "tensorflow", aliases=["tf"])
if config.JAX_AVAILABLE:
from .jax_formatter import JaxFormatter
_register_formatter(JaxFormatter, "jax", aliases=[])
else:
_jax_error = ValueError("JAX needs to be installed to be able to return JAX arrays.")
_register_unavailable_formatter(_jax_error, "jax", aliases=[])
def get_format_type_from_alias(format_type: Optional[str]) -> Optional[str]:
"""If the given format type is a known alias, then return its main type name. Otherwise return the type with no change."""
if format_type in _FORMAT_TYPES_ALIASES:
return _FORMAT_TYPES_ALIASES[format_type]
else:
return format_type
def get_formatter(format_type: Optional[str], **format_kwargs) -> Formatter:
"""
Factory function to get a Formatter given its type name and keyword arguments.
A formatter is an object that extracts and formats data from pyarrow table.
It defines the formatting for rows, colums and batches.
If the formatter for a given type name doesn't exist or is not available, an error is raised.
"""
format_type = get_format_type_from_alias(format_type)
if format_type in _FORMAT_TYPES:
return _FORMAT_TYPES[format_type](**format_kwargs)
if format_type in _FORMAT_TYPES_ALIASES_UNAVAILABLE:
raise _FORMAT_TYPES_ALIASES_UNAVAILABLE[format_type]
else:
raise ValueError(f"Format type should be one of {list(_FORMAT_TYPES.keys())}, but got '{format_type}'")
| datasets/src/datasets/formatting/__init__.py/0 | {
"file_path": "datasets/src/datasets/formatting/__init__.py",
"repo_id": "datasets",
"token_count": 1900
} |
from typing import Optional
import pyspark
from .. import Features, NamedSplit
from ..download import DownloadMode
from ..packaged_modules.spark.spark import Spark
from .abc import AbstractDatasetReader
class SparkDatasetReader(AbstractDatasetReader):
"""A dataset reader that reads from a Spark DataFrame.
When caching, cache materialization is parallelized over Spark; an NFS that is accessible to the driver must be
provided. Streaming is not currently supported.
"""
def __init__(
self,
df: pyspark.sql.DataFrame,
split: Optional[NamedSplit] = None,
features: Optional[Features] = None,
streaming: bool = True,
cache_dir: str = None,
keep_in_memory: bool = False,
working_dir: str = None,
load_from_cache_file: bool = True,
file_format: str = "arrow",
**kwargs,
):
super().__init__(
split=split,
features=features,
cache_dir=cache_dir,
keep_in_memory=keep_in_memory,
streaming=streaming,
**kwargs,
)
self._load_from_cache_file = load_from_cache_file
self._file_format = file_format
self.builder = Spark(
df=df,
features=features,
cache_dir=cache_dir,
working_dir=working_dir,
**kwargs,
)
def read(self):
if self.streaming:
return self.builder.as_streaming_dataset(split=self.split)
download_mode = None if self._load_from_cache_file else DownloadMode.FORCE_REDOWNLOAD
self.builder.download_and_prepare(
download_mode=download_mode,
file_format=self._file_format,
)
return self.builder.as_dataset(split=self.split)
| datasets/src/datasets/io/spark.py/0 | {
"file_path": "datasets/src/datasets/io/spark.py",
"repo_id": "datasets",
"token_count": 787
} |
from typing import Any, Dict, List, Optional, Union
from huggingface_hub.utils import get_session
from .. import config
from ..exceptions import DatasetsError
from .file_utils import (
get_authentication_headers_for_url,
)
from .logging import get_logger
logger = get_logger(__name__)
class DatasetViewerError(DatasetsError):
"""Dataset viewer error.
Raised when trying to use the dataset viewer HTTP API and when trying to access:
- a missing dataset, or
- a private/gated dataset and the user is not authenticated.
- unavailable /parquet or /info responses
"""
def get_exported_parquet_files(
dataset: str, commit_hash: str, token: Optional[Union[str, bool]]
) -> List[Dict[str, Any]]:
"""
Get the dataset exported parquet files
Docs: https://huggingface.co/docs/datasets-server/parquet
"""
dataset_viewer_parquet_url = config.HF_ENDPOINT.replace("://", "://datasets-server.") + "/parquet?dataset="
try:
parquet_data_files_response = get_session().get(
url=dataset_viewer_parquet_url + dataset,
headers=get_authentication_headers_for_url(config.HF_ENDPOINT + f"datasets/{dataset}", token=token),
timeout=100.0,
)
parquet_data_files_response.raise_for_status()
if "X-Revision" in parquet_data_files_response.headers:
if parquet_data_files_response.headers["X-Revision"] == commit_hash or commit_hash is None:
parquet_data_files_response_json = parquet_data_files_response.json()
if (
parquet_data_files_response_json.get("partial") is False
and not parquet_data_files_response_json.get("pending", True)
and not parquet_data_files_response_json.get("failed", True)
and "parquet_files" in parquet_data_files_response_json
):
return parquet_data_files_response_json["parquet_files"]
else:
logger.debug(f"Parquet export for {dataset} is not completely ready yet.")
else:
logger.debug(
f"Parquet export for {dataset} is available but outdated (commit_hash='{parquet_data_files_response.headers['X-Revision']}')"
)
except Exception as e: # noqa catch any exception of the dataset viewer API and consider the parquet export doesn't exist
logger.debug(f"No parquet export for {dataset} available ({type(e).__name__}: {e})")
raise DatasetViewerError("No exported Parquet files available.")
def get_exported_dataset_infos(
dataset: str, commit_hash: str, token: Optional[Union[str, bool]]
) -> Dict[str, Dict[str, Any]]:
"""
Get the dataset information, can be useful to get e.g. the dataset features.
Docs: https://huggingface.co/docs/datasets-server/info
"""
dataset_viewer_info_url = config.HF_ENDPOINT.replace("://", "://datasets-server.") + "/info?dataset="
try:
info_response = get_session().get(
url=dataset_viewer_info_url + dataset,
headers=get_authentication_headers_for_url(config.HF_ENDPOINT + f"datasets/{dataset}", token=token),
timeout=100.0,
)
info_response.raise_for_status()
if "X-Revision" in info_response.headers:
if info_response.headers["X-Revision"] == commit_hash or commit_hash is None:
info_response = info_response.json()
if (
info_response.get("partial") is False
and not info_response.get("pending", True)
and not info_response.get("failed", True)
and "dataset_info" in info_response
):
return info_response["dataset_info"]
else:
logger.debug(f"Dataset info for {dataset} is not completely ready yet.")
else:
logger.debug(
f"Dataset info for {dataset} is available but outdated (commit_hash='{info_response.headers['X-Revision']}')"
)
except Exception as e: # noqa catch any exception of the dataset viewer API and consider the dataset info doesn't exist
logger.debug(f"No dataset info for {dataset} available ({type(e).__name__}: {e})")
raise DatasetViewerError("No exported dataset infos available.")
| datasets/src/datasets/utils/_dataset_viewer.py/0 | {
"file_path": "datasets/src/datasets/utils/_dataset_viewer.py",
"repo_id": "datasets",
"token_count": 1901
} |
{
"language": [
"found",
"crowdsourced",
"expert-generated",
"machine-generated",
"other"
],
"annotations": [
"found",
"crowdsourced",
"expert-generated",
"machine-generated",
"no-annotation",
"other"
]
}
| datasets/src/datasets/utils/resources/creators.json/0 | {
"file_path": "datasets/src/datasets/utils/resources/creators.json",
"repo_id": "datasets",
"token_count": 119
} |
import os
import random
import tempfile
import unittest
import numpy as np
import pandas as pd
import pyarrow as pa
import pytest
from absl.testing import parameterized
import datasets
from datasets.arrow_writer import ArrowWriter
from datasets.features import Array2D, Array3D, Array4D, Array5D, Value
from datasets.features.features import Array3DExtensionType, PandasArrayExtensionDtype, _ArrayXD
from datasets.formatting.formatting import NumpyArrowExtractor, SimpleArrowExtractor
SHAPE_TEST_1 = (30, 487)
SHAPE_TEST_2 = (36, 1024)
SHAPE_TEST_3 = (None, 100)
SPEED_TEST_SHAPE = (100, 100)
SPEED_TEST_N_EXAMPLES = 100
DEFAULT_FEATURES = datasets.Features(
{
"text": Array2D(SHAPE_TEST_1, dtype="float32"),
"image": Array2D(SHAPE_TEST_2, dtype="float32"),
"dynamic": Array2D(SHAPE_TEST_3, dtype="float32"),
}
)
def generate_examples(features: dict, num_examples=100, seq_shapes=None):
dummy_data = []
seq_shapes = seq_shapes or {}
for i in range(num_examples):
example = {}
for col_id, (k, v) in enumerate(features.items()):
if isinstance(v, _ArrayXD):
if k == "dynamic":
first_dim = random.randint(1, 3)
data = np.random.rand(first_dim, *v.shape[1:]).astype(v.dtype)
else:
data = np.random.rand(*v.shape).astype(v.dtype)
elif isinstance(v, datasets.Value):
data = "foo"
elif isinstance(v, datasets.Sequence):
while isinstance(v, datasets.Sequence):
v = v.feature
shape = seq_shapes[k]
data = np.random.rand(*shape).astype(v.dtype)
example[k] = data
dummy_data.append((i, example))
return dummy_data
class ExtensionTypeCompatibilityTest(unittest.TestCase):
def test_array2d_nonspecific_shape(self):
with tempfile.TemporaryDirectory() as tmp_dir:
my_features = DEFAULT_FEATURES.copy()
with ArrowWriter(features=my_features, path=os.path.join(tmp_dir, "beta.arrow")) as writer:
for key, record in generate_examples(
features=my_features,
num_examples=1,
):
example = my_features.encode_example(record)
writer.write(example)
num_examples, num_bytes = writer.finalize()
dataset = datasets.Dataset.from_file(os.path.join(tmp_dir, "beta.arrow"))
dataset.set_format("numpy")
row = dataset[0]
first_shape = row["image"].shape
second_shape = row["text"].shape
self.assertTrue(first_shape is not None and second_shape is not None, "need atleast 2 different shapes")
self.assertEqual(len(first_shape), len(second_shape), "both shapes are supposed to be equal length")
self.assertNotEqual(first_shape, second_shape, "shapes must not be the same")
del dataset
def test_multiple_extensions_same_row(self):
with tempfile.TemporaryDirectory() as tmp_dir:
my_features = DEFAULT_FEATURES.copy()
with ArrowWriter(features=my_features, path=os.path.join(tmp_dir, "beta.arrow")) as writer:
for key, record in generate_examples(features=my_features, num_examples=1):
example = my_features.encode_example(record)
writer.write(example)
num_examples, num_bytes = writer.finalize()
dataset = datasets.Dataset.from_file(os.path.join(tmp_dir, "beta.arrow"))
dataset.set_format("numpy")
row = dataset[0]
first_len = len(row["image"].shape)
second_len = len(row["text"].shape)
third_len = len(row["dynamic"].shape)
self.assertEqual(first_len, 2, "use a sequence type if dim is < 2")
self.assertEqual(second_len, 2, "use a sequence type if dim is < 2")
self.assertEqual(third_len, 2, "use a sequence type if dim is < 2")
del dataset
def test_compatability_with_string_values(self):
with tempfile.TemporaryDirectory() as tmp_dir:
my_features = DEFAULT_FEATURES.copy()
my_features["image_id"] = datasets.Value("string")
with ArrowWriter(features=my_features, path=os.path.join(tmp_dir, "beta.arrow")) as writer:
for key, record in generate_examples(features=my_features, num_examples=1):
example = my_features.encode_example(record)
writer.write(example)
num_examples, num_bytes = writer.finalize()
dataset = datasets.Dataset.from_file(os.path.join(tmp_dir, "beta.arrow"))
self.assertIsInstance(dataset[0]["image_id"], str, "image id must be of type string")
del dataset
def test_extension_indexing(self):
with tempfile.TemporaryDirectory() as tmp_dir:
my_features = DEFAULT_FEATURES.copy()
my_features["explicit_ext"] = Array2D((3, 3), dtype="float32")
with ArrowWriter(features=my_features, path=os.path.join(tmp_dir, "beta.arrow")) as writer:
for key, record in generate_examples(features=my_features, num_examples=1):
example = my_features.encode_example(record)
writer.write(example)
num_examples, num_bytes = writer.finalize()
dataset = datasets.Dataset.from_file(os.path.join(tmp_dir, "beta.arrow"))
dataset.set_format("numpy")
data = dataset[0]["explicit_ext"]
self.assertIsInstance(data, np.ndarray, "indexed extension must return numpy.ndarray")
del dataset
def get_array_feature_types():
shape_1 = [3] * 5
shape_2 = [3, 4, 5, 6, 7]
return [
{
"testcase_name": f"{d}d",
"array_feature": array_feature,
"shape_1": tuple(shape_1[:d]),
"shape_2": tuple(shape_2[:d]),
}
for d, array_feature in zip(range(2, 6), [Array2D, Array3D, Array4D, Array5D])
]
@parameterized.named_parameters(get_array_feature_types())
class ArrayXDTest(unittest.TestCase):
def get_features(self, array_feature, shape_1, shape_2):
return datasets.Features(
{
"image": array_feature(shape_1, dtype="float32"),
"source": Value("string"),
"matrix": array_feature(shape_2, dtype="float32"),
}
)
def get_dict_example_0(self, shape_1, shape_2):
return {
"image": np.random.rand(*shape_1).astype("float32"),
"source": "foo",
"matrix": np.random.rand(*shape_2).astype("float32"),
}
def get_dict_example_1(self, shape_1, shape_2):
return {
"image": np.random.rand(*shape_1).astype("float32"),
"matrix": np.random.rand(*shape_2).astype("float32"),
"source": "bar",
}
def get_dict_examples(self, shape_1, shape_2):
return {
"image": np.random.rand(2, *shape_1).astype("float32").tolist(),
"source": ["foo", "bar"],
"matrix": np.random.rand(2, *shape_2).astype("float32").tolist(),
}
def _check_getitem_output_type(self, dataset, shape_1, shape_2, first_matrix):
matrix_column = dataset["matrix"]
self.assertIsInstance(matrix_column, list)
self.assertIsInstance(matrix_column[0], list)
self.assertIsInstance(matrix_column[0][0], list)
self.assertTupleEqual(np.array(matrix_column).shape, (2, *shape_2))
matrix_field_of_first_example = dataset[0]["matrix"]
self.assertIsInstance(matrix_field_of_first_example, list)
self.assertIsInstance(matrix_field_of_first_example, list)
self.assertEqual(np.array(matrix_field_of_first_example).shape, shape_2)
np.testing.assert_array_equal(np.array(matrix_field_of_first_example), np.array(first_matrix))
matrix_field_of_first_two_examples = dataset[:2]["matrix"]
self.assertIsInstance(matrix_field_of_first_two_examples, list)
self.assertIsInstance(matrix_field_of_first_two_examples[0], list)
self.assertIsInstance(matrix_field_of_first_two_examples[0][0], list)
self.assertTupleEqual(np.array(matrix_field_of_first_two_examples).shape, (2, *shape_2))
with dataset.formatted_as("numpy"):
self.assertTupleEqual(dataset["matrix"].shape, (2, *shape_2))
self.assertEqual(dataset[0]["matrix"].shape, shape_2)
self.assertTupleEqual(dataset[:2]["matrix"].shape, (2, *shape_2))
with dataset.formatted_as("pandas"):
self.assertIsInstance(dataset["matrix"], pd.Series)
self.assertIsInstance(dataset[0]["matrix"], pd.Series)
self.assertIsInstance(dataset[:2]["matrix"], pd.Series)
self.assertTupleEqual(dataset["matrix"].to_numpy().shape, (2, *shape_2))
self.assertTupleEqual(dataset[0]["matrix"].to_numpy().shape, (1, *shape_2))
self.assertTupleEqual(dataset[:2]["matrix"].to_numpy().shape, (2, *shape_2))
def test_write(self, array_feature, shape_1, shape_2):
with tempfile.TemporaryDirectory() as tmp_dir:
my_features = self.get_features(array_feature, shape_1, shape_2)
my_examples = [
(0, self.get_dict_example_0(shape_1, shape_2)),
(1, self.get_dict_example_1(shape_1, shape_2)),
]
with ArrowWriter(features=my_features, path=os.path.join(tmp_dir, "beta.arrow")) as writer:
for key, record in my_examples:
example = my_features.encode_example(record)
writer.write(example)
num_examples, num_bytes = writer.finalize()
dataset = datasets.Dataset.from_file(os.path.join(tmp_dir, "beta.arrow"))
self._check_getitem_output_type(dataset, shape_1, shape_2, my_examples[0][1]["matrix"])
del dataset
def test_write_batch(self, array_feature, shape_1, shape_2):
with tempfile.TemporaryDirectory() as tmp_dir:
my_features = self.get_features(array_feature, shape_1, shape_2)
dict_examples = self.get_dict_examples(shape_1, shape_2)
dict_examples = my_features.encode_batch(dict_examples)
with ArrowWriter(features=my_features, path=os.path.join(tmp_dir, "beta.arrow")) as writer:
writer.write_batch(dict_examples)
num_examples, num_bytes = writer.finalize()
dataset = datasets.Dataset.from_file(os.path.join(tmp_dir, "beta.arrow"))
self._check_getitem_output_type(dataset, shape_1, shape_2, dict_examples["matrix"][0])
del dataset
def test_from_dict(self, array_feature, shape_1, shape_2):
dict_examples = self.get_dict_examples(shape_1, shape_2)
dataset = datasets.Dataset.from_dict(
dict_examples, features=self.get_features(array_feature, shape_1, shape_2)
)
self._check_getitem_output_type(dataset, shape_1, shape_2, dict_examples["matrix"][0])
del dataset
class ArrayXDDynamicTest(unittest.TestCase):
def get_one_col_dataset(self, first_dim_list, fixed_shape):
features = datasets.Features({"image": Array3D(shape=(None, *fixed_shape), dtype="float32")})
dict_values = {"image": [np.random.rand(fdim, *fixed_shape).astype("float32") for fdim in first_dim_list]}
dataset = datasets.Dataset.from_dict(dict_values, features=features)
return dataset
def get_two_col_datasset(self, first_dim_list, fixed_shape):
features = datasets.Features(
{"image": Array3D(shape=(None, *fixed_shape), dtype="float32"), "text": Value("string")}
)
dict_values = {
"image": [np.random.rand(fdim, *fixed_shape).astype("float32") for fdim in first_dim_list],
"text": ["text" for _ in first_dim_list],
}
dataset = datasets.Dataset.from_dict(dict_values, features=features)
return dataset
def test_to_pylist(self):
fixed_shape = (2, 2)
first_dim_list = [1, 3, 10]
dataset = self.get_one_col_dataset(first_dim_list, fixed_shape)
arr_xd = SimpleArrowExtractor().extract_column(dataset._data)
self.assertIsInstance(arr_xd.type, Array3DExtensionType)
pylist = arr_xd.to_pylist()
for first_dim, single_arr in zip(first_dim_list, pylist):
self.assertIsInstance(single_arr, list)
self.assertTupleEqual(np.array(single_arr).shape, (first_dim, *fixed_shape))
def test_to_numpy(self):
fixed_shape = (2, 2)
# ragged
first_dim_list = [1, 3, 10]
dataset = self.get_one_col_dataset(first_dim_list, fixed_shape)
arr_xd = SimpleArrowExtractor().extract_column(dataset._data)
self.assertIsInstance(arr_xd.type, Array3DExtensionType)
# replace with arr_xd = arr_xd.combine_chunks() when 12.0.0 will be the minimal required PyArrow version
arr_xd = arr_xd.type.wrap_array(pa.concat_arrays([chunk.storage for chunk in arr_xd.chunks]))
numpy_arr = arr_xd.to_numpy()
self.assertIsInstance(numpy_arr, np.ndarray)
self.assertEqual(numpy_arr.dtype, object)
for first_dim, single_arr in zip(first_dim_list, numpy_arr):
self.assertIsInstance(single_arr, np.ndarray)
self.assertTupleEqual(single_arr.shape, (first_dim, *fixed_shape))
# non-ragged
first_dim_list = [4, 4, 4]
dataset = self.get_one_col_dataset(first_dim_list, fixed_shape)
arr_xd = SimpleArrowExtractor().extract_column(dataset._data)
self.assertIsInstance(arr_xd.type, Array3DExtensionType)
# replace with arr_xd = arr_xd.combine_chunks() when 12.0.0 will be the minimal required PyArrow version
arr_xd = arr_xd.type.wrap_array(pa.concat_arrays([chunk.storage for chunk in arr_xd.chunks]))
numpy_arr = arr_xd.to_numpy()
self.assertIsInstance(numpy_arr, np.ndarray)
self.assertNotEqual(numpy_arr.dtype, object)
for first_dim, single_arr in zip(first_dim_list, numpy_arr):
self.assertIsInstance(single_arr, np.ndarray)
self.assertTupleEqual(single_arr.shape, (first_dim, *fixed_shape))
def test_iter_dataset(self):
fixed_shape = (2, 2)
first_dim_list = [1, 3, 10]
dataset = self.get_one_col_dataset(first_dim_list, fixed_shape)
for first_dim, ds_row in zip(first_dim_list, dataset):
single_arr = ds_row["image"]
self.assertIsInstance(single_arr, list)
self.assertTupleEqual(np.array(single_arr).shape, (first_dim, *fixed_shape))
def test_to_pandas(self):
fixed_shape = (2, 2)
# ragged
first_dim_list = [1, 3, 10]
dataset = self.get_one_col_dataset(first_dim_list, fixed_shape)
df = dataset.to_pandas()
self.assertEqual(type(df.image.dtype), PandasArrayExtensionDtype)
numpy_arr = df.image.to_numpy()
self.assertIsInstance(numpy_arr, np.ndarray)
self.assertEqual(numpy_arr.dtype, object)
for first_dim, single_arr in zip(first_dim_list, numpy_arr):
self.assertIsInstance(single_arr, np.ndarray)
self.assertTupleEqual(single_arr.shape, (first_dim, *fixed_shape))
# non-ragged
first_dim_list = [4, 4, 4]
dataset = self.get_one_col_dataset(first_dim_list, fixed_shape)
df = dataset.to_pandas()
self.assertEqual(type(df.image.dtype), PandasArrayExtensionDtype)
numpy_arr = df.image.to_numpy()
self.assertIsInstance(numpy_arr, np.ndarray)
self.assertNotEqual(numpy_arr.dtype, object)
for first_dim, single_arr in zip(first_dim_list, numpy_arr):
self.assertIsInstance(single_arr, np.ndarray)
self.assertTupleEqual(single_arr.shape, (first_dim, *fixed_shape))
def test_map_dataset(self):
fixed_shape = (2, 2)
first_dim_list = [1, 3, 10]
dataset = self.get_one_col_dataset(first_dim_list, fixed_shape)
dataset = dataset.map(lambda a: {"image": np.concatenate([a] * 2)}, input_columns="image")
# check also if above function resulted with 2x bigger first dim
for first_dim, ds_row in zip(first_dim_list, dataset):
single_arr = ds_row["image"]
self.assertIsInstance(single_arr, list)
self.assertTupleEqual(np.array(single_arr).shape, (first_dim * 2, *fixed_shape))
@pytest.mark.parametrize("dtype, dummy_value", [("int32", 1), ("bool", True), ("float64", 1)])
def test_table_to_pandas(dtype, dummy_value):
features = datasets.Features({"foo": datasets.Array2D(dtype=dtype, shape=(2, 2))})
dataset = datasets.Dataset.from_dict({"foo": [[[dummy_value] * 2] * 2]}, features=features)
df = dataset._data.to_pandas()
assert isinstance(df.foo.dtype, PandasArrayExtensionDtype)
arr = df.foo.to_numpy()
np.testing.assert_equal(arr, np.array([[[dummy_value] * 2] * 2], dtype=np.dtype(dtype)))
@pytest.mark.parametrize("dtype, dummy_value", [("int32", 1), ("bool", True), ("float64", 1)])
def test_array_xd_numpy_arrow_extractor(dtype, dummy_value):
features = datasets.Features({"foo": datasets.Array2D(dtype=dtype, shape=(2, 2))})
dataset = datasets.Dataset.from_dict({"foo": [[[dummy_value] * 2] * 2]}, features=features)
arr = NumpyArrowExtractor().extract_column(dataset._data)
assert isinstance(arr, np.ndarray)
np.testing.assert_equal(arr, np.array([[[dummy_value] * 2] * 2], dtype=np.dtype(dtype)))
def test_array_xd_with_none():
# Fixed shape
features = datasets.Features({"foo": datasets.Array2D(dtype="int32", shape=(2, 2))})
dummy_array = np.array([[1, 2], [3, 4]], dtype="int32")
dataset = datasets.Dataset.from_dict({"foo": [dummy_array, None, dummy_array, None]}, features=features)
arr = NumpyArrowExtractor().extract_column(dataset._data)
assert isinstance(arr, np.ndarray) and arr.dtype == np.float64 and arr.shape == (4, 2, 2)
assert np.allclose(arr[0], dummy_array) and np.allclose(arr[2], dummy_array)
assert np.all(np.isnan(arr[1])) and np.all(np.isnan(arr[3])) # broadcasted np.nan - use np.all
# Dynamic shape
features = datasets.Features({"foo": datasets.Array2D(dtype="int32", shape=(None, 2))})
dummy_array = np.array([[1, 2], [3, 4]], dtype="int32")
dataset = datasets.Dataset.from_dict({"foo": [dummy_array, None, dummy_array, None]}, features=features)
arr = NumpyArrowExtractor().extract_column(dataset._data)
assert isinstance(arr, np.ndarray) and arr.dtype == object and arr.shape == (4,)
np.testing.assert_equal(arr[0], dummy_array)
np.testing.assert_equal(arr[2], dummy_array)
assert np.isnan(arr[1]) and np.isnan(arr[3]) # a single np.nan value - np.all not needed
@pytest.mark.parametrize("seq_type", ["no_sequence", "sequence", "sequence_of_sequence"])
@pytest.mark.parametrize(
"dtype",
[
"bool",
"int8",
"int16",
"int32",
"int64",
"uint8",
"uint16",
"uint32",
"uint64",
"float16",
"float32",
"float64",
],
)
@pytest.mark.parametrize("shape, feature_class", [((2, 3), datasets.Array2D), ((2, 3, 4), datasets.Array3D)])
def test_array_xd_with_np(seq_type, dtype, shape, feature_class):
feature = feature_class(dtype=dtype, shape=shape)
data = np.zeros(shape, dtype=dtype)
expected = data.tolist()
if seq_type == "sequence":
feature = datasets.Sequence(feature)
data = [data]
expected = [expected]
elif seq_type == "sequence_of_sequence":
feature = datasets.Sequence(datasets.Sequence(feature))
data = [[data]]
expected = [[expected]]
ds = datasets.Dataset.from_dict({"col": [data]}, features=datasets.Features({"col": feature}))
assert ds[0]["col"] == expected
@pytest.mark.parametrize("with_none", [False, True])
def test_dataset_map(with_none):
ds = datasets.Dataset.from_dict({"path": ["path1", "path2"]})
def process_data(batch):
batch = {
"image": [
np.array(
[
[[1, 2, 3], [4, 5, 6], [7, 8, 9]],
[[10, 20, 30], [40, 50, 60], [70, 80, 90]],
[[100, 200, 300], [400, 500, 600], [700, 800, 900]],
]
)
for _ in batch["path"]
]
}
if with_none:
batch["image"][0] = None
return batch
features = datasets.Features({"image": Array3D(dtype="int32", shape=(3, 3, 3))})
processed_ds = ds.map(process_data, batched=True, remove_columns=ds.column_names, features=features)
assert processed_ds.shape == (2, 1)
with processed_ds.with_format("numpy") as pds:
for i, example in enumerate(pds):
assert "image" in example
assert isinstance(example["image"], np.ndarray)
assert example["image"].shape == (3, 3, 3)
if with_none and i == 0:
assert np.all(np.isnan(example["image"]))
| datasets/tests/features/test_array_xd.py/0 | {
"file_path": "datasets/tests/features/test_array_xd.py",
"repo_id": "datasets",
"token_count": 9827
} |
import contextlib
import os
import sqlite3
import pytest
from datasets import Dataset, Features, Value
from datasets.io.sql import SqlDatasetReader, SqlDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases, require_sqlalchemy
def _check_sql_dataset(dataset, expected_features):
assert isinstance(dataset, Dataset)
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@require_sqlalchemy
@pytest.mark.parametrize("keep_in_memory", [False, True])
def test_dataset_from_sql_keep_in_memory(keep_in_memory, sqlite_path, tmp_path, set_sqlalchemy_silence_uber_warning):
cache_dir = tmp_path / "cache"
expected_features = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
dataset = SqlDatasetReader(
"dataset", "sqlite:///" + sqlite_path, cache_dir=cache_dir, keep_in_memory=keep_in_memory
).read()
_check_sql_dataset(dataset, expected_features)
@require_sqlalchemy
@pytest.mark.parametrize(
"features",
[
None,
{"col_1": "string", "col_2": "int64", "col_3": "float64"},
{"col_1": "string", "col_2": "string", "col_3": "string"},
{"col_1": "int32", "col_2": "int32", "col_3": "int32"},
{"col_1": "float32", "col_2": "float32", "col_3": "float32"},
],
)
def test_dataset_from_sql_features(features, sqlite_path, tmp_path, set_sqlalchemy_silence_uber_warning):
cache_dir = tmp_path / "cache"
default_expected_features = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
expected_features = features.copy() if features else default_expected_features
features = (
Features({feature: Value(dtype) for feature, dtype in features.items()}) if features is not None else None
)
dataset = SqlDatasetReader("dataset", "sqlite:///" + sqlite_path, features=features, cache_dir=cache_dir).read()
_check_sql_dataset(dataset, expected_features)
def iter_sql_file(sqlite_path):
with contextlib.closing(sqlite3.connect(sqlite_path)) as con:
cur = con.cursor()
cur.execute("SELECT * FROM dataset")
for row in cur:
yield row
@require_sqlalchemy
def test_dataset_to_sql(sqlite_path, tmp_path, set_sqlalchemy_silence_uber_warning):
cache_dir = tmp_path / "cache"
output_sqlite_path = os.path.join(cache_dir, "tmp.sql")
dataset = SqlDatasetReader("dataset", "sqlite:///" + sqlite_path, cache_dir=cache_dir).read()
SqlDatasetWriter(dataset, "dataset", "sqlite:///" + output_sqlite_path, num_proc=1).write()
original_sql = iter_sql_file(sqlite_path)
expected_sql = iter_sql_file(output_sqlite_path)
for row1, row2 in zip(original_sql, expected_sql):
assert row1 == row2
@require_sqlalchemy
def test_dataset_to_sql_multiproc(sqlite_path, tmp_path, set_sqlalchemy_silence_uber_warning):
cache_dir = tmp_path / "cache"
output_sqlite_path = os.path.join(cache_dir, "tmp.sql")
dataset = SqlDatasetReader("dataset", "sqlite:///" + sqlite_path, cache_dir=cache_dir).read()
SqlDatasetWriter(dataset, "dataset", "sqlite:///" + output_sqlite_path, num_proc=2).write()
original_sql = iter_sql_file(sqlite_path)
expected_sql = iter_sql_file(output_sqlite_path)
for row1, row2 in zip(original_sql, expected_sql):
assert row1 == row2
@require_sqlalchemy
def test_dataset_to_sql_invalidproc(sqlite_path, tmp_path, set_sqlalchemy_silence_uber_warning):
cache_dir = tmp_path / "cache"
output_sqlite_path = os.path.join(cache_dir, "tmp.sql")
dataset = SqlDatasetReader("dataset", "sqlite:///" + sqlite_path, cache_dir=cache_dir).read()
with pytest.raises(ValueError):
SqlDatasetWriter(dataset, "dataset", "sqlite:///" + output_sqlite_path, num_proc=0).write()
| datasets/tests/io/test_sql.py/0 | {
"file_path": "datasets/tests/io/test_sql.py",
"repo_id": "datasets",
"token_count": 1628
} |
import contextlib
import copy
import itertools
import json
import os
import pickle
import re
import sys
import tempfile
from functools import partial
from pathlib import Path
from unittest import TestCase
from unittest.mock import MagicMock, patch
import numpy as np
import numpy.testing as npt
import pandas as pd
import pyarrow as pa
import pytest
from absl.testing import parameterized
from fsspec.core import strip_protocol
from packaging import version
import datasets.arrow_dataset
from datasets import concatenate_datasets, interleave_datasets, load_from_disk
from datasets.arrow_dataset import Dataset, transmit_format, update_metadata_with_features
from datasets.dataset_dict import DatasetDict
from datasets.features import (
Array2D,
Array3D,
ClassLabel,
Features,
Image,
LargeList,
Sequence,
Translation,
TranslationVariableLanguages,
Value,
)
from datasets.info import DatasetInfo
from datasets.iterable_dataset import IterableDataset
from datasets.splits import NamedSplit
from datasets.table import ConcatenationTable, InMemoryTable, MemoryMappedTable
from datasets.utils.logging import INFO, get_logger
from datasets.utils.py_utils import temp_seed
from .utils import (
assert_arrow_memory_doesnt_increase,
assert_arrow_memory_increases,
require_dill_gt_0_3_2,
require_jax,
require_not_windows,
require_numpy1_on_windows,
require_pil,
require_polars,
require_pyspark,
require_sqlalchemy,
require_tf,
require_torch,
require_transformers,
set_current_working_directory_to_temp_dir,
)
class PickableMagicMock(MagicMock):
def __reduce__(self):
return MagicMock, ()
class Unpicklable:
def __init__(self, **kwargs):
for key, value in kwargs.items():
setattr(self, key, value)
def __getstate__(self):
raise pickle.PicklingError()
def picklable_map_function(x):
return {"id": int(x["filename"].split("_")[-1])}
def picklable_map_function_with_indices(x, i):
return {"id": i}
def picklable_map_function_with_rank(x, r):
return {"rank": r}
def picklable_map_function_with_indices_and_rank(x, i, r):
return {"id": i, "rank": r}
def picklable_filter_function(x):
return int(x["filename"].split("_")[-1]) < 10
def picklable_filter_function_with_rank(x, r):
return r == 0
def assert_arrow_metadata_are_synced_with_dataset_features(dataset: Dataset):
assert dataset.data.schema.metadata is not None
assert b"huggingface" in dataset.data.schema.metadata
metadata = json.loads(dataset.data.schema.metadata[b"huggingface"].decode())
assert "info" in metadata
features = DatasetInfo.from_dict(metadata["info"]).features
assert features is not None
assert features == dataset.features
assert features == Features.from_arrow_schema(dataset.data.schema)
assert list(features) == dataset.data.column_names
assert list(features) == list(dataset.features)
IN_MEMORY_PARAMETERS = [
{"testcase_name": name, "in_memory": im} for im, name in [(True, "in_memory"), (False, "on_disk")]
]
@parameterized.named_parameters(IN_MEMORY_PARAMETERS)
class BaseDatasetTest(TestCase):
@pytest.fixture(autouse=True)
def inject_fixtures(self, caplog, set_sqlalchemy_silence_uber_warning):
self._caplog = caplog
def _create_dummy_dataset(
self, in_memory: bool, tmp_dir: str, multiple_columns=False, array_features=False, nested_features=False
) -> Dataset:
assert int(multiple_columns) + int(array_features) + int(nested_features) < 2
if multiple_columns:
data = {"col_1": [3, 2, 1, 0], "col_2": ["a", "b", "c", "d"], "col_3": [False, True, False, True]}
dset = Dataset.from_dict(data)
elif array_features:
data = {
"col_1": [[[True, False], [False, True]]] * 4, # 2D
"col_2": [[[["a", "b"], ["c", "d"]], [["e", "f"], ["g", "h"]]]] * 4, # 3D array
"col_3": [[3, 2, 1, 0]] * 4, # Sequence
}
features = Features(
{
"col_1": Array2D(shape=(2, 2), dtype="bool"),
"col_2": Array3D(shape=(2, 2, 2), dtype="string"),
"col_3": Sequence(feature=Value("int64")),
}
)
dset = Dataset.from_dict(data, features=features)
elif nested_features:
data = {"nested": [{"a": i, "x": i * 10, "c": i * 100} for i in range(1, 11)]}
features = Features({"nested": {"a": Value("int64"), "x": Value("int64"), "c": Value("int64")}})
dset = Dataset.from_dict(data, features=features)
else:
dset = Dataset.from_dict({"filename": ["my_name-train" + "_" + str(x) for x in np.arange(30).tolist()]})
if not in_memory:
dset = self._to(in_memory, tmp_dir, dset)
return dset
def _to(self, in_memory, tmp_dir, *datasets):
if in_memory:
datasets = [dataset.map(keep_in_memory=True) for dataset in datasets]
else:
start = 0
while os.path.isfile(os.path.join(tmp_dir, f"dataset{start}.arrow")):
start += 1
datasets = [
dataset.map(cache_file_name=os.path.join(tmp_dir, f"dataset{start + i}.arrow"))
for i, dataset in enumerate(datasets)
]
return datasets if len(datasets) > 1 else datasets[0]
def test_dummy_dataset(self, in_memory):
with tempfile.TemporaryDirectory() as tmp_dir:
with self._create_dummy_dataset(in_memory, tmp_dir) as dset:
self.assertDictEqual(dset.features, Features({"filename": Value("string")}))
self.assertEqual(dset[0]["filename"], "my_name-train_0")
self.assertEqual(dset["filename"][0], "my_name-train_0")
with self._create_dummy_dataset(in_memory, tmp_dir, multiple_columns=True) as dset:
self.assertDictEqual(
dset.features,
Features({"col_1": Value("int64"), "col_2": Value("string"), "col_3": Value("bool")}),
)
self.assertEqual(dset[0]["col_1"], 3)
self.assertEqual(dset["col_1"][0], 3)
with tempfile.TemporaryDirectory() as tmp_dir:
with self._create_dummy_dataset(in_memory, tmp_dir, array_features=True) as dset:
self.assertDictEqual(
dset.features,
Features(
{
"col_1": Array2D(shape=(2, 2), dtype="bool"),
"col_2": Array3D(shape=(2, 2, 2), dtype="string"),
"col_3": Sequence(feature=Value("int64")),
}
),
)
self.assertEqual(dset[0]["col_2"], [[["a", "b"], ["c", "d"]], [["e", "f"], ["g", "h"]]])
self.assertEqual(dset["col_2"][0], [[["a", "b"], ["c", "d"]], [["e", "f"], ["g", "h"]]])
def test_dataset_getitem(self, in_memory):
with tempfile.TemporaryDirectory() as tmp_dir:
with self._create_dummy_dataset(in_memory, tmp_dir) as dset:
self.assertEqual(dset[0]["filename"], "my_name-train_0")
self.assertEqual(dset["filename"][0], "my_name-train_0")
self.assertEqual(dset[-1]["filename"], "my_name-train_29")
self.assertEqual(dset["filename"][-1], "my_name-train_29")
self.assertListEqual(dset[:2]["filename"], ["my_name-train_0", "my_name-train_1"])
self.assertListEqual(dset["filename"][:2], ["my_name-train_0", "my_name-train_1"])
self.assertEqual(dset[:-1]["filename"][-1], "my_name-train_28")
self.assertEqual(dset["filename"][:-1][-1], "my_name-train_28")
self.assertListEqual(dset[[0, -1]]["filename"], ["my_name-train_0", "my_name-train_29"])
self.assertListEqual(dset[range(0, -2, -1)]["filename"], ["my_name-train_0", "my_name-train_29"])
self.assertListEqual(dset[np.array([0, -1])]["filename"], ["my_name-train_0", "my_name-train_29"])
self.assertListEqual(dset[pd.Series([0, -1])]["filename"], ["my_name-train_0", "my_name-train_29"])
with dset.select(range(2)) as dset_subset:
self.assertListEqual(dset_subset[-1:]["filename"], ["my_name-train_1"])
self.assertListEqual(dset_subset["filename"][-1:], ["my_name-train_1"])
def test_dummy_dataset_deepcopy(self, in_memory):
with tempfile.TemporaryDirectory() as tmp_dir:
with self._create_dummy_dataset(in_memory, tmp_dir).select(range(10)) as dset:
with assert_arrow_memory_doesnt_increase():
dset2 = copy.deepcopy(dset)
# don't copy the underlying arrow data using memory
self.assertEqual(len(dset2), 10)
self.assertDictEqual(dset2.features, Features({"filename": Value("string")}))
self.assertEqual(dset2[0]["filename"], "my_name-train_0")
self.assertEqual(dset2["filename"][0], "my_name-train_0")
del dset2
def test_dummy_dataset_pickle(self, in_memory):
with tempfile.TemporaryDirectory() as tmp_dir:
tmp_file = os.path.join(tmp_dir, "dset.pt")
with self._create_dummy_dataset(in_memory, tmp_dir).select(range(0, 10, 2)) as dset:
with open(tmp_file, "wb") as f:
pickle.dump(dset, f)
with open(tmp_file, "rb") as f:
with pickle.load(f) as dset:
self.assertEqual(len(dset), 5)
self.assertDictEqual(dset.features, Features({"filename": Value("string")}))
self.assertEqual(dset[0]["filename"], "my_name-train_0")
self.assertEqual(dset["filename"][0], "my_name-train_0")
with self._create_dummy_dataset(in_memory, tmp_dir).select(
range(0, 10, 2), indices_cache_file_name=os.path.join(tmp_dir, "ind.arrow")
) as dset:
if not in_memory:
dset._data.table = Unpicklable()
dset._indices.table = Unpicklable()
with open(tmp_file, "wb") as f:
pickle.dump(dset, f)
with open(tmp_file, "rb") as f:
with pickle.load(f) as dset:
self.assertEqual(len(dset), 5)
self.assertDictEqual(dset.features, Features({"filename": Value("string")}))
self.assertEqual(dset[0]["filename"], "my_name-train_0")
self.assertEqual(dset["filename"][0], "my_name-train_0")
def test_dummy_dataset_serialize(self, in_memory):
with tempfile.TemporaryDirectory() as tmp_dir:
with set_current_working_directory_to_temp_dir():
with self._create_dummy_dataset(in_memory, tmp_dir).select(range(10)) as dset:
dataset_path = "my_dataset" # rel path
dset.save_to_disk(dataset_path)
with Dataset.load_from_disk(dataset_path) as dset:
self.assertEqual(len(dset), 10)
self.assertDictEqual(dset.features, Features({"filename": Value("string")}))
self.assertEqual(dset[0]["filename"], "my_name-train_0")
self.assertEqual(dset["filename"][0], "my_name-train_0")
expected = dset.to_dict()
with self._create_dummy_dataset(in_memory, tmp_dir).select(range(10)) as dset:
dataset_path = os.path.join(tmp_dir, "my_dataset") # abs path
dset.save_to_disk(dataset_path)
with Dataset.load_from_disk(dataset_path) as dset:
self.assertEqual(len(dset), 10)
self.assertDictEqual(dset.features, Features({"filename": Value("string")}))
self.assertEqual(dset[0]["filename"], "my_name-train_0")
self.assertEqual(dset["filename"][0], "my_name-train_0")
with self._create_dummy_dataset(in_memory, tmp_dir).select(
range(10), indices_cache_file_name=os.path.join(tmp_dir, "ind.arrow")
) as dset:
with assert_arrow_memory_doesnt_increase():
dset.save_to_disk(dataset_path)
with Dataset.load_from_disk(dataset_path) as dset:
self.assertEqual(len(dset), 10)
self.assertDictEqual(dset.features, Features({"filename": Value("string")}))
self.assertEqual(dset[0]["filename"], "my_name-train_0")
self.assertEqual(dset["filename"][0], "my_name-train_0")
with self._create_dummy_dataset(in_memory, tmp_dir, nested_features=True) as dset:
with assert_arrow_memory_doesnt_increase():
dset.save_to_disk(dataset_path)
with Dataset.load_from_disk(dataset_path) as dset:
self.assertEqual(len(dset), 10)
self.assertDictEqual(
dset.features,
Features({"nested": {"a": Value("int64"), "x": Value("int64"), "c": Value("int64")}}),
)
self.assertDictEqual(dset[0]["nested"], {"a": 1, "c": 100, "x": 10})
self.assertDictEqual(dset["nested"][0], {"a": 1, "c": 100, "x": 10})
with self._create_dummy_dataset(in_memory, tmp_dir).select(range(10)) as dset:
with assert_arrow_memory_doesnt_increase():
dset.save_to_disk(dataset_path, num_shards=4)
with Dataset.load_from_disk(dataset_path) as dset:
self.assertEqual(len(dset), 10)
self.assertDictEqual(dset.features, Features({"filename": Value("string")}))
self.assertDictEqual(dset.to_dict(), expected)
self.assertEqual(len(dset.cache_files), 4)
with self._create_dummy_dataset(in_memory, tmp_dir).select(range(10)) as dset:
with assert_arrow_memory_doesnt_increase():
dset.save_to_disk(dataset_path, num_proc=2)
with Dataset.load_from_disk(dataset_path) as dset:
self.assertEqual(len(dset), 10)
self.assertDictEqual(dset.features, Features({"filename": Value("string")}))
self.assertDictEqual(dset.to_dict(), expected)
self.assertEqual(len(dset.cache_files), 2)
with self._create_dummy_dataset(in_memory, tmp_dir).select(range(10)) as dset:
with assert_arrow_memory_doesnt_increase():
dset.save_to_disk(dataset_path, num_shards=7, num_proc=2)
with Dataset.load_from_disk(dataset_path) as dset:
self.assertEqual(len(dset), 10)
self.assertDictEqual(dset.features, Features({"filename": Value("string")}))
self.assertDictEqual(dset.to_dict(), expected)
self.assertEqual(len(dset.cache_files), 7)
with self._create_dummy_dataset(in_memory, tmp_dir).select(range(10)) as dset:
with assert_arrow_memory_doesnt_increase():
max_shard_size = dset._estimate_nbytes() // 2 + 1
dset.save_to_disk(dataset_path, max_shard_size=max_shard_size)
with Dataset.load_from_disk(dataset_path) as dset:
self.assertEqual(len(dset), 10)
self.assertDictEqual(dset.features, Features({"filename": Value("string")}))
self.assertDictEqual(dset.to_dict(), expected)
self.assertEqual(len(dset.cache_files), 2)
def test_dummy_dataset_load_from_disk(self, in_memory):
with tempfile.TemporaryDirectory() as tmp_dir:
with self._create_dummy_dataset(in_memory, tmp_dir).select(range(10)) as dset:
dataset_path = os.path.join(tmp_dir, "my_dataset")
dset.save_to_disk(dataset_path)
with load_from_disk(dataset_path) as dset:
self.assertEqual(len(dset), 10)
self.assertDictEqual(dset.features, Features({"filename": Value("string")}))
self.assertEqual(dset[0]["filename"], "my_name-train_0")
self.assertEqual(dset["filename"][0], "my_name-train_0")
def test_restore_saved_format(self, in_memory):
with tempfile.TemporaryDirectory() as tmp_dir:
with self._create_dummy_dataset(in_memory, tmp_dir, multiple_columns=True) as dset:
dset.set_format(type="numpy", columns=["col_1"], output_all_columns=True)
dataset_path = os.path.join(tmp_dir, "my_dataset")
dset.save_to_disk(dataset_path)
with load_from_disk(dataset_path) as loaded_dset:
self.assertEqual(dset.format, loaded_dset.format)
def test_set_format_numpy_multiple_columns(self, in_memory):
with tempfile.TemporaryDirectory() as tmp_dir:
with self._create_dummy_dataset(in_memory, tmp_dir, multiple_columns=True) as dset:
fingerprint = dset._fingerprint
dset.set_format(type="numpy", columns=["col_1"])
self.assertEqual(len(dset[0]), 1)
self.assertIsInstance(dset[0]["col_1"], np.int64)
self.assertEqual(dset[0]["col_1"].item(), 3)
self.assertIsInstance(dset["col_1"], np.ndarray)
self.assertListEqual(list(dset["col_1"].shape), [4])
np.testing.assert_array_equal(dset["col_1"], np.array([3, 2, 1, 0]))
self.assertNotEqual(dset._fingerprint, fingerprint)
dset.reset_format()
with dset.formatted_as(type="numpy", columns=["col_1"]):
self.assertEqual(len(dset[0]), 1)
self.assertIsInstance(dset[0]["col_1"], np.int64)
self.assertEqual(dset[0]["col_1"].item(), 3)
self.assertIsInstance(dset["col_1"], np.ndarray)
self.assertListEqual(list(dset["col_1"].shape), [4])
np.testing.assert_array_equal(dset["col_1"], np.array([3, 2, 1, 0]))
self.assertEqual(dset.format["type"], None)
self.assertEqual(dset.format["format_kwargs"], {})
self.assertEqual(dset.format["columns"], dset.column_names)
self.assertEqual(dset.format["output_all_columns"], False)
dset.set_format(type="numpy", columns=["col_1"], output_all_columns=True)
self.assertEqual(len(dset[0]), 3)
self.assertIsInstance(dset[0]["col_2"], str)
self.assertEqual(dset[0]["col_2"], "a")
dset.set_format(type="numpy", columns=["col_1", "col_2"])
self.assertEqual(len(dset[0]), 2)
self.assertIsInstance(dset[0]["col_2"], np.str_)
self.assertEqual(dset[0]["col_2"].item(), "a")
@require_numpy1_on_windows
@require_torch
def test_set_format_torch(self, in_memory):
import torch
with tempfile.TemporaryDirectory() as tmp_dir:
with self._create_dummy_dataset(in_memory, tmp_dir, multiple_columns=True) as dset:
dset.set_format(type="torch", columns=["col_1"])
self.assertEqual(len(dset[0]), 1)
self.assertIsInstance(dset[0]["col_1"], torch.Tensor)
self.assertIsInstance(dset["col_1"], torch.Tensor)
self.assertListEqual(list(dset[0]["col_1"].shape), [])
self.assertEqual(dset[0]["col_1"].item(), 3)
dset.set_format(type="torch", columns=["col_1"], output_all_columns=True)
self.assertEqual(len(dset[0]), 3)
self.assertIsInstance(dset[0]["col_2"], str)
self.assertEqual(dset[0]["col_2"], "a")
dset.set_format(type="torch")
self.assertEqual(len(dset[0]), 3)
self.assertIsInstance(dset[0]["col_1"], torch.Tensor)
self.assertIsInstance(dset["col_1"], torch.Tensor)
self.assertListEqual(list(dset[0]["col_1"].shape), [])
self.assertEqual(dset[0]["col_1"].item(), 3)
self.assertIsInstance(dset[0]["col_2"], str)
self.assertEqual(dset[0]["col_2"], "a")
self.assertIsInstance(dset[0]["col_3"], torch.Tensor)
self.assertIsInstance(dset["col_3"], torch.Tensor)
self.assertListEqual(list(dset[0]["col_3"].shape), [])
@require_tf
def test_set_format_tf(self, in_memory):
import tensorflow as tf
with tempfile.TemporaryDirectory() as tmp_dir:
with self._create_dummy_dataset(in_memory, tmp_dir, multiple_columns=True) as dset:
dset.set_format(type="tensorflow", columns=["col_1"])
self.assertEqual(len(dset[0]), 1)
self.assertIsInstance(dset[0]["col_1"], tf.Tensor)
self.assertListEqual(list(dset[0]["col_1"].shape), [])
self.assertEqual(dset[0]["col_1"].numpy().item(), 3)
dset.set_format(type="tensorflow", columns=["col_1"], output_all_columns=True)
self.assertEqual(len(dset[0]), 3)
self.assertIsInstance(dset[0]["col_2"], str)
self.assertEqual(dset[0]["col_2"], "a")
dset.set_format(type="tensorflow", columns=["col_1", "col_2"])
self.assertEqual(len(dset[0]), 2)
self.assertEqual(dset[0]["col_2"].numpy().decode("utf-8"), "a")
def test_set_format_pandas(self, in_memory):
with tempfile.TemporaryDirectory() as tmp_dir:
with self._create_dummy_dataset(in_memory, tmp_dir, multiple_columns=True) as dset:
dset.set_format(type="pandas", columns=["col_1"])
self.assertEqual(len(dset[0].columns), 1)
self.assertIsInstance(dset[0], pd.DataFrame)
self.assertListEqual(list(dset[0].shape), [1, 1])
self.assertEqual(dset[0]["col_1"].item(), 3)
dset.set_format(type="pandas", columns=["col_1", "col_2"])
self.assertEqual(len(dset[0].columns), 2)
self.assertEqual(dset[0]["col_2"].item(), "a")
@require_polars
def test_set_format_polars(self, in_memory):
import polars as pl
with tempfile.TemporaryDirectory() as tmp_dir:
with self._create_dummy_dataset(in_memory, tmp_dir, multiple_columns=True) as dset:
dset.set_format(type="polars", columns=["col_1"])
self.assertEqual(len(dset[0].columns), 1)
self.assertIsInstance(dset[0], pl.DataFrame)
self.assertListEqual(list(dset[0].shape), [1, 1])
self.assertEqual(dset[0]["col_1"].item(), 3)
dset.set_format(type="polars", columns=["col_1", "col_2"])
self.assertEqual(len(dset[0].columns), 2)
self.assertEqual(dset[0]["col_2"].item(), "a")
def test_set_transform(self, in_memory):
def transform(batch):
return {k: [str(i).upper() for i in v] for k, v in batch.items()}
with tempfile.TemporaryDirectory() as tmp_dir:
with self._create_dummy_dataset(in_memory, tmp_dir, multiple_columns=True) as dset:
dset.set_transform(transform=transform, columns=["col_1"])
self.assertEqual(dset.format["type"], "custom")
self.assertEqual(len(dset[0].keys()), 1)
self.assertEqual(dset[0]["col_1"], "3")
self.assertEqual(dset[:2]["col_1"], ["3", "2"])
self.assertEqual(dset["col_1"][:2], ["3", "2"])
prev_format = dset.format
dset.set_format(**dset.format)
self.assertEqual(prev_format, dset.format)
dset.set_transform(transform=transform, columns=["col_1", "col_2"])
self.assertEqual(len(dset[0].keys()), 2)
self.assertEqual(dset[0]["col_2"], "A")
def test_transmit_format(self, in_memory):
with tempfile.TemporaryDirectory() as tmp_dir:
with self._create_dummy_dataset(in_memory, tmp_dir, multiple_columns=True) as dset:
transform = datasets.arrow_dataset.transmit_format(lambda x: x)
# make sure identity transform doesn't apply unnecessary format
self.assertEqual(dset._fingerprint, transform(dset)._fingerprint)
dset.set_format(**dset.format)
self.assertEqual(dset._fingerprint, transform(dset)._fingerprint)
# check lists comparisons
dset.set_format(columns=["col_1"])
self.assertEqual(dset._fingerprint, transform(dset)._fingerprint)
dset.set_format(columns=["col_1", "col_2"])
self.assertEqual(dset._fingerprint, transform(dset)._fingerprint)
dset.set_format("numpy", columns=["col_1", "col_2"])
self.assertEqual(dset._fingerprint, transform(dset)._fingerprint)
def test_cast(self, in_memory):
with tempfile.TemporaryDirectory() as tmp_dir:
with self._create_dummy_dataset(in_memory, tmp_dir, multiple_columns=True) as dset:
features = dset.features
features["col_1"] = Value("float64")
features = Features({k: features[k] for k in list(features)[::-1]})
fingerprint = dset._fingerprint
# TODO: with assert_arrow_memory_increases() if in_memory else assert_arrow_memory_doesnt_increase():
with dset.cast(features) as casted_dset:
self.assertEqual(casted_dset.num_columns, 3)
self.assertEqual(casted_dset.features["col_1"], Value("float64"))
self.assertIsInstance(casted_dset[0]["col_1"], float)
self.assertNotEqual(casted_dset._fingerprint, fingerprint)
self.assertNotEqual(casted_dset, dset)
assert_arrow_metadata_are_synced_with_dataset_features(casted_dset)
def test_class_encode_column(self, in_memory):
with tempfile.TemporaryDirectory() as tmp_dir:
with self._create_dummy_dataset(in_memory, tmp_dir, multiple_columns=True) as dset:
with self.assertRaises(ValueError):
dset.class_encode_column(column="does not exist")
with dset.class_encode_column("col_1") as casted_dset:
self.assertIsInstance(casted_dset.features["col_1"], ClassLabel)
self.assertListEqual(casted_dset.features["col_1"].names, ["0", "1", "2", "3"])
self.assertListEqual(casted_dset["col_1"], [3, 2, 1, 0])
self.assertNotEqual(casted_dset._fingerprint, dset._fingerprint)
self.assertNotEqual(casted_dset, dset)
assert_arrow_metadata_are_synced_with_dataset_features(casted_dset)
with dset.class_encode_column("col_2") as casted_dset:
self.assertIsInstance(casted_dset.features["col_2"], ClassLabel)
self.assertListEqual(casted_dset.features["col_2"].names, ["a", "b", "c", "d"])
self.assertListEqual(casted_dset["col_2"], [0, 1, 2, 3])
self.assertNotEqual(casted_dset._fingerprint, dset._fingerprint)
self.assertNotEqual(casted_dset, dset)
assert_arrow_metadata_are_synced_with_dataset_features(casted_dset)
with dset.class_encode_column("col_3") as casted_dset:
self.assertIsInstance(casted_dset.features["col_3"], ClassLabel)
self.assertListEqual(casted_dset.features["col_3"].names, ["False", "True"])
self.assertListEqual(casted_dset["col_3"], [0, 1, 0, 1])
self.assertNotEqual(casted_dset._fingerprint, dset._fingerprint)
self.assertNotEqual(casted_dset, dset)
assert_arrow_metadata_are_synced_with_dataset_features(casted_dset)
# Test raises if feature is an array / sequence
with self._create_dummy_dataset(in_memory, tmp_dir, array_features=True) as dset:
for column in dset.column_names:
with self.assertRaises(ValueError):
dset.class_encode_column(column)
def test_remove_columns(self, in_memory):
with tempfile.TemporaryDirectory() as tmp_dir:
with self._create_dummy_dataset(in_memory, tmp_dir, multiple_columns=True) as dset:
fingerprint = dset._fingerprint
with dset.remove_columns(column_names="col_1") as new_dset:
self.assertEqual(new_dset.num_columns, 2)
self.assertListEqual(list(new_dset.column_names), ["col_2", "col_3"])
self.assertNotEqual(new_dset._fingerprint, fingerprint)
assert_arrow_metadata_are_synced_with_dataset_features(new_dset)
with self._create_dummy_dataset(in_memory, tmp_dir, multiple_columns=True) as dset:
with dset.remove_columns(column_names=["col_1", "col_2", "col_3"]) as new_dset:
self.assertEqual(new_dset.num_columns, 0)
self.assertNotEqual(new_dset._fingerprint, fingerprint)
assert_arrow_metadata_are_synced_with_dataset_features(new_dset)
with self._create_dummy_dataset(in_memory, tmp_dir, multiple_columns=True) as dset:
dset._format_columns = ["col_1", "col_2", "col_3"]
with dset.remove_columns(column_names=["col_1"]) as new_dset:
self.assertListEqual(new_dset._format_columns, ["col_2", "col_3"])
self.assertEqual(new_dset.num_columns, 2)
self.assertListEqual(list(new_dset.column_names), ["col_2", "col_3"])
self.assertNotEqual(new_dset._fingerprint, fingerprint)
assert_arrow_metadata_are_synced_with_dataset_features(new_dset)
def test_rename_column(self, in_memory):
with tempfile.TemporaryDirectory() as tmp_dir:
with self._create_dummy_dataset(in_memory, tmp_dir, multiple_columns=True) as dset:
fingerprint = dset._fingerprint
with dset.rename_column(original_column_name="col_1", new_column_name="new_name") as new_dset:
self.assertEqual(new_dset.num_columns, 3)
self.assertListEqual(list(new_dset.column_names), ["new_name", "col_2", "col_3"])
self.assertListEqual(list(dset.column_names), ["col_1", "col_2", "col_3"])
self.assertNotEqual(new_dset._fingerprint, fingerprint)
assert_arrow_metadata_are_synced_with_dataset_features(new_dset)
def test_rename_columns(self, in_memory):
with tempfile.TemporaryDirectory() as tmp_dir:
with self._create_dummy_dataset(in_memory, tmp_dir, multiple_columns=True) as dset:
fingerprint = dset._fingerprint
with dset.rename_columns({"col_1": "new_name"}) as new_dset:
self.assertEqual(new_dset.num_columns, 3)
self.assertListEqual(list(new_dset.column_names), ["new_name", "col_2", "col_3"])
self.assertListEqual(list(dset.column_names), ["col_1", "col_2", "col_3"])
self.assertNotEqual(new_dset._fingerprint, fingerprint)
with dset.rename_columns({"col_1": "new_name", "col_2": "new_name2"}) as new_dset:
self.assertEqual(new_dset.num_columns, 3)
self.assertListEqual(list(new_dset.column_names), ["new_name", "new_name2", "col_3"])
self.assertListEqual(list(dset.column_names), ["col_1", "col_2", "col_3"])
self.assertNotEqual(new_dset._fingerprint, fingerprint)
# Original column not in dataset
with self.assertRaises(ValueError):
dset.rename_columns({"not_there": "new_name"})
# Empty new name
with self.assertRaises(ValueError):
dset.rename_columns({"col_1": ""})
# Duplicates
with self.assertRaises(ValueError):
dset.rename_columns({"col_1": "new_name", "col_2": "new_name"})
def test_select_columns(self, in_memory):
with tempfile.TemporaryDirectory() as tmp_dir:
with self._create_dummy_dataset(in_memory, tmp_dir, multiple_columns=True) as dset:
fingerprint = dset._fingerprint
with dset.select_columns(column_names=[]) as new_dset:
self.assertEqual(new_dset.num_columns, 0)
self.assertListEqual(list(new_dset.column_names), [])
self.assertNotEqual(new_dset._fingerprint, fingerprint)
assert_arrow_metadata_are_synced_with_dataset_features(new_dset)
with self._create_dummy_dataset(in_memory, tmp_dir, multiple_columns=True) as dset:
fingerprint = dset._fingerprint
with dset.select_columns(column_names="col_1") as new_dset:
self.assertEqual(new_dset.num_columns, 1)
self.assertListEqual(list(new_dset.column_names), ["col_1"])
self.assertNotEqual(new_dset._fingerprint, fingerprint)
assert_arrow_metadata_are_synced_with_dataset_features(new_dset)
with self._create_dummy_dataset(in_memory, tmp_dir, multiple_columns=True) as dset:
with dset.select_columns(column_names=["col_1", "col_2", "col_3"]) as new_dset:
self.assertEqual(new_dset.num_columns, 3)
self.assertListEqual(list(new_dset.column_names), ["col_1", "col_2", "col_3"])
self.assertNotEqual(new_dset._fingerprint, fingerprint)
assert_arrow_metadata_are_synced_with_dataset_features(new_dset)
with self._create_dummy_dataset(in_memory, tmp_dir, multiple_columns=True) as dset:
with dset.select_columns(column_names=["col_3", "col_2", "col_1"]) as new_dset:
self.assertEqual(new_dset.num_columns, 3)
self.assertListEqual(list(new_dset.column_names), ["col_3", "col_2", "col_1"])
self.assertNotEqual(new_dset._fingerprint, fingerprint)
assert_arrow_metadata_are_synced_with_dataset_features(new_dset)
with self._create_dummy_dataset(in_memory, tmp_dir, multiple_columns=True) as dset:
dset._format_columns = ["col_1", "col_2", "col_3"]
with dset.select_columns(column_names=["col_1"]) as new_dset:
self.assertListEqual(new_dset._format_columns, ["col_1"])
self.assertEqual(new_dset.num_columns, 1)
self.assertListEqual(list(new_dset.column_names), ["col_1"])
self.assertNotEqual(new_dset._fingerprint, fingerprint)
assert_arrow_metadata_are_synced_with_dataset_features(new_dset)
def test_concatenate(self, in_memory):
data1, data2, data3 = {"id": [0, 1, 2]}, {"id": [3, 4, 5]}, {"id": [6, 7]}
info1 = DatasetInfo(description="Dataset1")
info2 = DatasetInfo(description="Dataset2")
with tempfile.TemporaryDirectory() as tmp_dir:
dset1, dset2, dset3 = (
Dataset.from_dict(data1, info=info1),
Dataset.from_dict(data2, info=info2),
Dataset.from_dict(data3),
)
dset1, dset2, dset3 = self._to(in_memory, tmp_dir, dset1, dset2, dset3)
with concatenate_datasets([dset1, dset2, dset3]) as dset_concat:
self.assertTupleEqual((len(dset1), len(dset2), len(dset3)), (3, 3, 2))
self.assertEqual(len(dset_concat), len(dset1) + len(dset2) + len(dset3))
self.assertListEqual(dset_concat["id"], [0, 1, 2, 3, 4, 5, 6, 7])
self.assertEqual(len(dset_concat.cache_files), 0 if in_memory else 3)
self.assertEqual(dset_concat.info.description, "Dataset1\n\nDataset2")
del dset1, dset2, dset3
def test_concatenate_formatted(self, in_memory):
data1, data2, data3 = {"id": [0, 1, 2]}, {"id": [3, 4, 5]}, {"id": [6, 7]}
info1 = DatasetInfo(description="Dataset1")
info2 = DatasetInfo(description="Dataset2")
with tempfile.TemporaryDirectory() as tmp_dir:
dset1, dset2, dset3 = (
Dataset.from_dict(data1, info=info1),
Dataset.from_dict(data2, info=info2),
Dataset.from_dict(data3),
)
dset1, dset2, dset3 = self._to(in_memory, tmp_dir, dset1, dset2, dset3)
dset1.set_format("numpy")
with concatenate_datasets([dset1, dset2, dset3]) as dset_concat:
self.assertEqual(dset_concat.format["type"], None)
dset2.set_format("numpy")
dset3.set_format("numpy")
with concatenate_datasets([dset1, dset2, dset3]) as dset_concat:
self.assertEqual(dset_concat.format["type"], "numpy")
del dset1, dset2, dset3
def test_concatenate_with_indices(self, in_memory):
data1, data2, data3 = {"id": [0, 1, 2] * 2}, {"id": [3, 4, 5] * 2}, {"id": [6, 7, 8]}
info1 = DatasetInfo(description="Dataset1")
info2 = DatasetInfo(description="Dataset2")
with tempfile.TemporaryDirectory() as tmp_dir:
dset1, dset2, dset3 = (
Dataset.from_dict(data1, info=info1),
Dataset.from_dict(data2, info=info2),
Dataset.from_dict(data3),
)
dset1, dset2, dset3 = self._to(in_memory, tmp_dir, dset1, dset2, dset3)
dset1, dset2, dset3 = dset1.select([2, 1, 0]), dset2.select([2, 1, 0]), dset3
with concatenate_datasets([dset3, dset2, dset1]) as dset_concat:
self.assertTupleEqual((len(dset1), len(dset2), len(dset3)), (3, 3, 3))
self.assertEqual(len(dset_concat), len(dset1) + len(dset2) + len(dset3))
self.assertListEqual(dset_concat["id"], [6, 7, 8, 5, 4, 3, 2, 1, 0])
# in_memory = False:
# 3 cache files for the dset_concat._data table
# no cache file for the indices because it's in memory
# in_memory = True:
# no cache files since both dset_concat._data and dset_concat._indices are in memory
self.assertEqual(len(dset_concat.cache_files), 0 if in_memory else 3)
self.assertEqual(dset_concat.info.description, "Dataset2\n\nDataset1")
dset1 = dset1.rename_columns({"id": "id1"})
dset2 = dset2.rename_columns({"id": "id2"})
dset3 = dset3.rename_columns({"id": "id3"})
with concatenate_datasets([dset1, dset2, dset3], axis=1) as dset_concat:
self.assertTupleEqual((len(dset1), len(dset2), len(dset3)), (3, 3, 3))
self.assertEqual(len(dset_concat), len(dset1))
self.assertListEqual(dset_concat["id1"], [2, 1, 0])
self.assertListEqual(dset_concat["id2"], [5, 4, 3])
self.assertListEqual(dset_concat["id3"], [6, 7, 8])
# in_memory = False:
# 3 cache files for the dset_concat._data table
# no cache file for the indices because it's None
# in_memory = True:
# no cache files since dset_concat._data is in memory and dset_concat._indices is None
self.assertEqual(len(dset_concat.cache_files), 0 if in_memory else 3)
self.assertIsNone(dset_concat._indices)
self.assertEqual(dset_concat.info.description, "Dataset1\n\nDataset2")
with concatenate_datasets([dset1], axis=1) as dset_concat:
self.assertEqual(len(dset_concat), len(dset1))
self.assertListEqual(dset_concat["id1"], [2, 1, 0])
# in_memory = False:
# 1 cache file for the dset_concat._data table
# no cache file for the indices because it's in memory
# in_memory = True:
# no cache files since both dset_concat._data and dset_concat._indices are in memory
self.assertEqual(len(dset_concat.cache_files), 0 if in_memory else 1)
self.assertTrue(dset_concat._indices == dset1._indices)
self.assertEqual(dset_concat.info.description, "Dataset1")
del dset1, dset2, dset3
def test_concatenate_with_indices_from_disk(self, in_memory):
data1, data2, data3 = {"id": [0, 1, 2] * 2}, {"id": [3, 4, 5] * 2}, {"id": [6, 7]}
info1 = DatasetInfo(description="Dataset1")
info2 = DatasetInfo(description="Dataset2")
with tempfile.TemporaryDirectory() as tmp_dir:
dset1, dset2, dset3 = (
Dataset.from_dict(data1, info=info1),
Dataset.from_dict(data2, info=info2),
Dataset.from_dict(data3),
)
dset1, dset2, dset3 = self._to(in_memory, tmp_dir, dset1, dset2, dset3)
dset1, dset2, dset3 = (
dset1.select([2, 1, 0], indices_cache_file_name=os.path.join(tmp_dir, "i1.arrow")),
dset2.select([2, 1, 0], indices_cache_file_name=os.path.join(tmp_dir, "i2.arrow")),
dset3.select([1, 0], indices_cache_file_name=os.path.join(tmp_dir, "i3.arrow")),
)
with concatenate_datasets([dset3, dset2, dset1]) as dset_concat:
self.assertTupleEqual((len(dset1), len(dset2), len(dset3)), (3, 3, 2))
self.assertEqual(len(dset_concat), len(dset1) + len(dset2) + len(dset3))
self.assertListEqual(dset_concat["id"], [7, 6, 5, 4, 3, 2, 1, 0])
# in_memory = False:
# 3 cache files for the dset_concat._data table, and 1 for the dset_concat._indices_table
# There is only 1 for the indices tables (i1.arrow)
# Indeed, the others are brought to memory since an offset is applied to them.
# in_memory = True:
# 1 cache file for i1.arrow since both dset_concat._data and dset_concat._indices are in memory
self.assertEqual(len(dset_concat.cache_files), 1 if in_memory else 3 + 1)
self.assertEqual(dset_concat.info.description, "Dataset2\n\nDataset1")
del dset1, dset2, dset3
def test_concatenate_pickle(self, in_memory):
data1, data2, data3 = {"id": [0, 1, 2] * 2}, {"id": [3, 4, 5] * 2}, {"id": [6, 7], "foo": ["bar", "bar"]}
info1 = DatasetInfo(description="Dataset1")
info2 = DatasetInfo(description="Dataset2")
with tempfile.TemporaryDirectory() as tmp_dir:
dset1, dset2, dset3 = (
Dataset.from_dict(data1, info=info1),
Dataset.from_dict(data2, info=info2),
Dataset.from_dict(data3),
)
schema = dset1.data.schema
# mix from in-memory and on-disk datasets
dset1, dset2 = self._to(in_memory, tmp_dir, dset1, dset2)
dset3 = self._to(not in_memory, tmp_dir, dset3)
dset1, dset2, dset3 = (
dset1.select(
[2, 1, 0],
keep_in_memory=in_memory,
indices_cache_file_name=os.path.join(tmp_dir, "i1.arrow") if not in_memory else None,
),
dset2.select(
[2, 1, 0],
keep_in_memory=in_memory,
indices_cache_file_name=os.path.join(tmp_dir, "i2.arrow") if not in_memory else None,
),
dset3.select(
[1, 0],
keep_in_memory=in_memory,
indices_cache_file_name=os.path.join(tmp_dir, "i3.arrow") if not in_memory else None,
),
)
dset3 = dset3.rename_column("foo", "new_foo")
dset3 = dset3.remove_columns("new_foo")
if in_memory:
dset3._data.table = Unpicklable(schema=schema)
else:
dset1._data.table, dset2._data.table = Unpicklable(schema=schema), Unpicklable(schema=schema)
dset1, dset2, dset3 = (pickle.loads(pickle.dumps(d)) for d in (dset1, dset2, dset3))
with concatenate_datasets([dset3, dset2, dset1]) as dset_concat:
if not in_memory:
dset_concat._data.table = Unpicklable(schema=schema)
with pickle.loads(pickle.dumps(dset_concat)) as dset_concat:
self.assertTupleEqual((len(dset1), len(dset2), len(dset3)), (3, 3, 2))
self.assertEqual(len(dset_concat), len(dset1) + len(dset2) + len(dset3))
self.assertListEqual(dset_concat["id"], [7, 6, 5, 4, 3, 2, 1, 0])
# in_memory = True: 1 cache file for dset3
# in_memory = False: 2 caches files for dset1 and dset2, and 1 cache file for i1.arrow
self.assertEqual(len(dset_concat.cache_files), 1 if in_memory else 2 + 1)
self.assertEqual(dset_concat.info.description, "Dataset2\n\nDataset1")
del dset1, dset2, dset3
def test_repeat(self, in_memory):
with tempfile.TemporaryDirectory() as tmp_dir:
with self._create_dummy_dataset(in_memory, tmp_dir, multiple_columns=True) as dset:
repeated_dset = dset.repeat(3)
column_values_dict = {col: dset[col] for col in dset.column_names}
for col, single_values in column_values_dict.items():
self.assertListEqual(repeated_dset[col], single_values * 3)
del repeated_dset
with tempfile.TemporaryDirectory() as tmp_dir:
with self._create_dummy_dataset(in_memory, tmp_dir, multiple_columns=True) as dset:
with pytest.raises(ValueError):
dset.repeat(None)
with tempfile.TemporaryDirectory() as tmp_dir:
with self._create_dummy_dataset(in_memory, tmp_dir, multiple_columns=True) as dset:
repeated_dset = dset.repeat(0)
self.assertEqual(len(repeated_dset), 0)
del repeated_dset
with tempfile.TemporaryDirectory() as tmp_dir:
with self._create_dummy_dataset(in_memory, tmp_dir, multiple_columns=True) as dset:
repeated_dset = dset.repeat(-1)
self.assertEqual(len(repeated_dset), 0)
del repeated_dset
def test_flatten(self, in_memory):
with tempfile.TemporaryDirectory() as tmp_dir:
with Dataset.from_dict(
{"a": [{"b": {"c": ["text"]}}] * 10, "foo": [1] * 10},
features=Features({"a": {"b": Sequence({"c": Value("string")})}, "foo": Value("int64")}),
) as dset:
with self._to(in_memory, tmp_dir, dset) as dset:
fingerprint = dset._fingerprint
with dset.flatten() as dset:
self.assertListEqual(sorted(dset.column_names), ["a.b.c", "foo"])
self.assertListEqual(sorted(dset.features.keys()), ["a.b.c", "foo"])
self.assertDictEqual(
dset.features, Features({"a.b.c": Sequence(Value("string")), "foo": Value("int64")})
)
self.assertNotEqual(dset._fingerprint, fingerprint)
assert_arrow_metadata_are_synced_with_dataset_features(dset)
with tempfile.TemporaryDirectory() as tmp_dir:
with Dataset.from_dict(
{"a": [{"en": "Thank you", "fr": "Merci"}] * 10, "foo": [1] * 10},
features=Features({"a": Translation(languages=["en", "fr"]), "foo": Value("int64")}),
) as dset:
with self._to(in_memory, tmp_dir, dset) as dset:
fingerprint = dset._fingerprint
with dset.flatten() as dset:
self.assertListEqual(sorted(dset.column_names), ["a.en", "a.fr", "foo"])
self.assertListEqual(sorted(dset.features.keys()), ["a.en", "a.fr", "foo"])
self.assertDictEqual(
dset.features,
Features({"a.en": Value("string"), "a.fr": Value("string"), "foo": Value("int64")}),
)
self.assertNotEqual(dset._fingerprint, fingerprint)
assert_arrow_metadata_are_synced_with_dataset_features(dset)
with tempfile.TemporaryDirectory() as tmp_dir:
with Dataset.from_dict(
{"a": [{"en": "the cat", "fr": ["le chat", "la chatte"], "de": "die katze"}] * 10, "foo": [1] * 10},
features=Features(
{
"a": TranslationVariableLanguages(languages=["en", "fr", "de"]),
"foo": Value("int64"),
}
),
) as dset:
with self._to(in_memory, tmp_dir, dset) as dset:
fingerprint = dset._fingerprint
with dset.flatten() as dset:
self.assertListEqual(sorted(dset.column_names), ["a.language", "a.translation", "foo"])
self.assertListEqual(sorted(dset.features.keys()), ["a.language", "a.translation", "foo"])
self.assertDictEqual(
dset.features,
Features(
{
"a.language": Sequence(Value("string")),
"a.translation": Sequence(Value("string")),
"foo": Value("int64"),
}
),
)
self.assertNotEqual(dset._fingerprint, fingerprint)
assert_arrow_metadata_are_synced_with_dataset_features(dset)
@require_pil
def test_flatten_complex_image(self, in_memory):
# decoding turned on
with tempfile.TemporaryDirectory() as tmp_dir:
with Dataset.from_dict(
{"a": [np.arange(4 * 4 * 3, dtype=np.uint8).reshape(4, 4, 3)] * 10, "foo": [1] * 10},
features=Features({"a": Image(), "foo": Value("int64")}),
) as dset:
with self._to(in_memory, tmp_dir, dset) as dset:
fingerprint = dset._fingerprint
with dset.flatten() as dset:
self.assertListEqual(sorted(dset.column_names), ["a", "foo"])
self.assertListEqual(sorted(dset.features.keys()), ["a", "foo"])
self.assertDictEqual(dset.features, Features({"a": Image(), "foo": Value("int64")}))
self.assertNotEqual(dset._fingerprint, fingerprint)
assert_arrow_metadata_are_synced_with_dataset_features(dset)
# decoding turned on + nesting
with tempfile.TemporaryDirectory() as tmp_dir:
with Dataset.from_dict(
{"a": [{"b": np.arange(4 * 4 * 3, dtype=np.uint8).reshape(4, 4, 3)}] * 10, "foo": [1] * 10},
features=Features({"a": {"b": Image()}, "foo": Value("int64")}),
) as dset:
with self._to(in_memory, tmp_dir, dset) as dset:
fingerprint = dset._fingerprint
with dset.flatten() as dset:
self.assertListEqual(sorted(dset.column_names), ["a.b", "foo"])
self.assertListEqual(sorted(dset.features.keys()), ["a.b", "foo"])
self.assertDictEqual(dset.features, Features({"a.b": Image(), "foo": Value("int64")}))
self.assertNotEqual(dset._fingerprint, fingerprint)
assert_arrow_metadata_are_synced_with_dataset_features(dset)
# decoding turned off
with tempfile.TemporaryDirectory() as tmp_dir:
with Dataset.from_dict(
{"a": [np.arange(4 * 4 * 3, dtype=np.uint8).reshape(4, 4, 3)] * 10, "foo": [1] * 10},
features=Features({"a": Image(decode=False), "foo": Value("int64")}),
) as dset:
with self._to(in_memory, tmp_dir, dset) as dset:
fingerprint = dset._fingerprint
with dset.flatten() as dset:
self.assertListEqual(sorted(dset.column_names), ["a.bytes", "a.path", "foo"])
self.assertListEqual(sorted(dset.features.keys()), ["a.bytes", "a.path", "foo"])
self.assertDictEqual(
dset.features,
Features({"a.bytes": Value("binary"), "a.path": Value("string"), "foo": Value("int64")}),
)
self.assertNotEqual(dset._fingerprint, fingerprint)
assert_arrow_metadata_are_synced_with_dataset_features(dset)
# decoding turned off + nesting
with tempfile.TemporaryDirectory() as tmp_dir:
with Dataset.from_dict(
{"a": [{"b": np.arange(4 * 4 * 3, dtype=np.uint8).reshape(4, 4, 3)}] * 10, "foo": [1] * 10},
features=Features({"a": {"b": Image(decode=False)}, "foo": Value("int64")}),
) as dset:
with self._to(in_memory, tmp_dir, dset) as dset:
fingerprint = dset._fingerprint
with dset.flatten() as dset:
self.assertListEqual(sorted(dset.column_names), ["a.b.bytes", "a.b.path", "foo"])
self.assertListEqual(sorted(dset.features.keys()), ["a.b.bytes", "a.b.path", "foo"])
self.assertDictEqual(
dset.features,
Features(
{
"a.b.bytes": Value("binary"),
"a.b.path": Value("string"),
"foo": Value("int64"),
}
),
)
self.assertNotEqual(dset._fingerprint, fingerprint)
assert_arrow_metadata_are_synced_with_dataset_features(dset)
def test_map(self, in_memory):
# standard
with tempfile.TemporaryDirectory() as tmp_dir:
with self._create_dummy_dataset(in_memory, tmp_dir) as dset:
self.assertDictEqual(dset.features, Features({"filename": Value("string")}))
fingerprint = dset._fingerprint
with dset.map(
lambda x: {"name": x["filename"][:-2], "id": int(x["filename"].split("_")[-1])}
) as dset_test:
self.assertEqual(len(dset_test), 30)
self.assertDictEqual(dset.features, Features({"filename": Value("string")}))
self.assertDictEqual(
dset_test.features,
Features({"filename": Value("string"), "name": Value("string"), "id": Value("int64")}),
)
self.assertListEqual(dset_test["id"], list(range(30)))
self.assertNotEqual(dset_test._fingerprint, fingerprint)
assert_arrow_metadata_are_synced_with_dataset_features(dset_test)
# no transform
with tempfile.TemporaryDirectory() as tmp_dir:
with self._create_dummy_dataset(in_memory, tmp_dir) as dset:
fingerprint = dset._fingerprint
with dset.map(lambda x: None) as dset_test:
self.assertEqual(len(dset_test), 30)
self.assertEqual(dset_test._fingerprint, fingerprint)
assert_arrow_metadata_are_synced_with_dataset_features(dset_test)
# with indices
with tempfile.TemporaryDirectory() as tmp_dir:
with self._create_dummy_dataset(in_memory, tmp_dir) as dset:
with dset.map(
lambda x, i: {"name": x["filename"][:-2], "id": i}, with_indices=True
) as dset_test_with_indices:
self.assertEqual(len(dset_test_with_indices), 30)
self.assertDictEqual(dset.features, Features({"filename": Value("string")}))
self.assertDictEqual(
dset_test_with_indices.features,
Features({"filename": Value("string"), "name": Value("string"), "id": Value("int64")}),
)
self.assertListEqual(dset_test_with_indices["id"], list(range(30)))
assert_arrow_metadata_are_synced_with_dataset_features(dset_test_with_indices)
# interrupted
with tempfile.TemporaryDirectory() as tmp_dir:
with self._create_dummy_dataset(in_memory, tmp_dir) as dset:
def func(x, i):
if i == 4:
raise KeyboardInterrupt()
return {"name": x["filename"][:-2], "id": i}
tmp_file = os.path.join(tmp_dir, "test.arrow")
self.assertRaises(
KeyboardInterrupt,
dset.map,
function=func,
with_indices=True,
cache_file_name=tmp_file,
writer_batch_size=2,
)
self.assertFalse(os.path.exists(tmp_file))
with dset.map(
lambda x, i: {"name": x["filename"][:-2], "id": i},
with_indices=True,
cache_file_name=tmp_file,
writer_batch_size=2,
) as dset_test_with_indices:
self.assertTrue(os.path.exists(tmp_file))
self.assertEqual(len(dset_test_with_indices), 30)
self.assertDictEqual(dset.features, Features({"filename": Value("string")}))
self.assertDictEqual(
dset_test_with_indices.features,
Features({"filename": Value("string"), "name": Value("string"), "id": Value("int64")}),
)
self.assertListEqual(dset_test_with_indices["id"], list(range(30)))
assert_arrow_metadata_are_synced_with_dataset_features(dset_test_with_indices)
# formatted
with tempfile.TemporaryDirectory() as tmp_dir:
with self._create_dummy_dataset(in_memory, tmp_dir, multiple_columns=True) as dset:
dset.set_format("numpy", columns=["col_1"])
with dset.map(lambda x: {"col_1_plus_one": x["col_1"] + 1}) as dset_test:
self.assertEqual(len(dset_test), 4)
self.assertEqual(dset_test.format["type"], "numpy")
self.assertIsInstance(dset_test["col_1"], np.ndarray)
self.assertIsInstance(dset_test["col_1_plus_one"], np.ndarray)
self.assertListEqual(sorted(dset_test[0].keys()), ["col_1", "col_1_plus_one"])
self.assertListEqual(sorted(dset_test.column_names), ["col_1", "col_1_plus_one", "col_2", "col_3"])
assert_arrow_metadata_are_synced_with_dataset_features(dset_test)
def test_map_multiprocessing(self, in_memory):
with tempfile.TemporaryDirectory() as tmp_dir: # standard
with self._create_dummy_dataset(in_memory, tmp_dir) as dset:
self.assertDictEqual(dset.features, Features({"filename": Value("string")}))
fingerprint = dset._fingerprint
with dset.map(picklable_map_function, num_proc=2) as dset_test:
self.assertEqual(len(dset_test), 30)
self.assertDictEqual(dset.features, Features({"filename": Value("string")}))
self.assertDictEqual(
dset_test.features,
Features({"filename": Value("string"), "id": Value("int64")}),
)
self.assertEqual(len(dset_test.cache_files), 0 if in_memory else 2)
if not in_memory:
self.assertIn("_of_00002.arrow", dset_test.cache_files[0]["filename"])
self.assertListEqual(dset_test["id"], list(range(30)))
self.assertNotEqual(dset_test._fingerprint, fingerprint)
assert_arrow_metadata_are_synced_with_dataset_features(dset_test)
with tempfile.TemporaryDirectory() as tmp_dir: # num_proc > num rows
with self._create_dummy_dataset(in_memory, tmp_dir) as dset:
self.assertDictEqual(dset.features, Features({"filename": Value("string")}))
fingerprint = dset._fingerprint
with dset.select([0, 1], keep_in_memory=True).map(picklable_map_function, num_proc=10) as dset_test:
self.assertEqual(len(dset_test), 2)
self.assertDictEqual(dset.features, Features({"filename": Value("string")}))
self.assertDictEqual(
dset_test.features,
Features({"filename": Value("string"), "id": Value("int64")}),
)
self.assertEqual(len(dset_test.cache_files), 0 if in_memory else 2)
self.assertListEqual(dset_test["id"], list(range(2)))
self.assertNotEqual(dset_test._fingerprint, fingerprint)
assert_arrow_metadata_are_synced_with_dataset_features(dset_test)
with tempfile.TemporaryDirectory() as tmp_dir: # with_indices
with self._create_dummy_dataset(in_memory, tmp_dir) as dset:
fingerprint = dset._fingerprint
with dset.map(picklable_map_function_with_indices, num_proc=3, with_indices=True) as dset_test:
self.assertEqual(len(dset_test), 30)
self.assertDictEqual(dset.features, Features({"filename": Value("string")}))
self.assertDictEqual(
dset_test.features,
Features({"filename": Value("string"), "id": Value("int64")}),
)
self.assertEqual(len(dset_test.cache_files), 0 if in_memory else 3)
self.assertListEqual(dset_test["id"], list(range(30)))
self.assertNotEqual(dset_test._fingerprint, fingerprint)
assert_arrow_metadata_are_synced_with_dataset_features(dset_test)
with tempfile.TemporaryDirectory() as tmp_dir: # with_rank
with self._create_dummy_dataset(in_memory, tmp_dir) as dset:
fingerprint = dset._fingerprint
with dset.map(picklable_map_function_with_rank, num_proc=3, with_rank=True) as dset_test:
self.assertEqual(len(dset_test), 30)
self.assertDictEqual(dset.features, Features({"filename": Value("string")}))
self.assertDictEqual(
dset_test.features,
Features({"filename": Value("string"), "rank": Value("int64")}),
)
self.assertEqual(len(dset_test.cache_files), 0 if in_memory else 3)
self.assertListEqual(dset_test["rank"], [0] * 10 + [1] * 10 + [2] * 10)
self.assertNotEqual(dset_test._fingerprint, fingerprint)
assert_arrow_metadata_are_synced_with_dataset_features(dset_test)
with tempfile.TemporaryDirectory() as tmp_dir: # with_indices AND with_rank
with self._create_dummy_dataset(in_memory, tmp_dir) as dset:
fingerprint = dset._fingerprint
with dset.map(
picklable_map_function_with_indices_and_rank, num_proc=3, with_indices=True, with_rank=True
) as dset_test:
self.assertEqual(len(dset_test), 30)
self.assertDictEqual(dset.features, Features({"filename": Value("string")}))
self.assertDictEqual(
dset_test.features,
Features({"filename": Value("string"), "id": Value("int64"), "rank": Value("int64")}),
)
self.assertEqual(len(dset_test.cache_files), 0 if in_memory else 3)
self.assertListEqual(dset_test["id"], list(range(30)))
self.assertListEqual(dset_test["rank"], [0] * 10 + [1] * 10 + [2] * 10)
self.assertNotEqual(dset_test._fingerprint, fingerprint)
assert_arrow_metadata_are_synced_with_dataset_features(dset_test)
with tempfile.TemporaryDirectory() as tmp_dir: # new_fingerprint
new_fingerprint = "foobar"
invalid_new_fingerprint = "foobar/hey"
with self._create_dummy_dataset(in_memory, tmp_dir) as dset:
fingerprint = dset._fingerprint
self.assertRaises(
ValueError, dset.map, picklable_map_function, num_proc=2, new_fingerprint=invalid_new_fingerprint
)
with dset.map(picklable_map_function, num_proc=2, new_fingerprint=new_fingerprint) as dset_test:
self.assertEqual(len(dset_test), 30)
self.assertDictEqual(dset.features, Features({"filename": Value("string")}))
self.assertDictEqual(
dset_test.features,
Features({"filename": Value("string"), "id": Value("int64")}),
)
self.assertEqual(len(dset_test.cache_files), 0 if in_memory else 2)
self.assertListEqual(dset_test["id"], list(range(30)))
self.assertNotEqual(dset_test._fingerprint, fingerprint)
self.assertEqual(dset_test._fingerprint, new_fingerprint)
assert_arrow_metadata_are_synced_with_dataset_features(dset_test)
file_names = sorted(Path(cache_file["filename"]).name for cache_file in dset_test.cache_files)
for i, file_name in enumerate(file_names):
self.assertIn(new_fingerprint + f"_{i:05d}", file_name)
with tempfile.TemporaryDirectory() as tmp_dir: # lambda (requires multiprocess from pathos)
with self._create_dummy_dataset(in_memory, tmp_dir) as dset:
fingerprint = dset._fingerprint
with dset.map(lambda x: {"id": int(x["filename"].split("_")[-1])}, num_proc=2) as dset_test:
self.assertEqual(len(dset_test), 30)
self.assertDictEqual(dset.features, Features({"filename": Value("string")}))
self.assertDictEqual(
dset_test.features,
Features({"filename": Value("string"), "id": Value("int64")}),
)
self.assertEqual(len(dset_test.cache_files), 0 if in_memory else 2)
self.assertListEqual(dset_test["id"], list(range(30)))
self.assertNotEqual(dset_test._fingerprint, fingerprint)
assert_arrow_metadata_are_synced_with_dataset_features(dset_test)
def test_map_new_features(self, in_memory):
with tempfile.TemporaryDirectory() as tmp_dir:
with self._create_dummy_dataset(in_memory, tmp_dir) as dset:
features = Features({"filename": Value("string"), "label": ClassLabel(names=["positive", "negative"])})
with dset.map(
lambda x, i: {"label": i % 2}, with_indices=True, features=features
) as dset_test_with_indices:
self.assertEqual(len(dset_test_with_indices), 30)
self.assertDictEqual(
dset_test_with_indices.features,
features,
)
assert_arrow_metadata_are_synced_with_dataset_features(dset_test_with_indices)
def test_map_batched(self, in_memory):
def map_batched(example):
return {"filename_new": [x + "_extension" for x in example["filename"]]}
with tempfile.TemporaryDirectory() as tmp_dir:
with self._create_dummy_dataset(in_memory, tmp_dir) as dset:
with dset.map(map_batched, batched=True) as dset_test_batched:
self.assertEqual(len(dset_test_batched), 30)
self.assertDictEqual(dset.features, Features({"filename": Value("string")}))
self.assertDictEqual(
dset_test_batched.features,
Features({"filename": Value("string"), "filename_new": Value("string")}),
)
assert_arrow_metadata_are_synced_with_dataset_features(dset_test_batched)
# change batch size and drop the last batch
with tempfile.TemporaryDirectory() as tmp_dir:
with self._create_dummy_dataset(in_memory, tmp_dir) as dset:
batch_size = 4
with dset.map(
map_batched, batched=True, batch_size=batch_size, drop_last_batch=True
) as dset_test_batched:
self.assertEqual(len(dset_test_batched), 30 // batch_size * batch_size)
self.assertDictEqual(dset.features, Features({"filename": Value("string")}))
self.assertDictEqual(
dset_test_batched.features,
Features({"filename": Value("string"), "filename_new": Value("string")}),
)
assert_arrow_metadata_are_synced_with_dataset_features(dset_test_batched)
with tempfile.TemporaryDirectory() as tmp_dir:
with self._create_dummy_dataset(in_memory, tmp_dir) as dset:
with dset.formatted_as("numpy", columns=["filename"]):
with dset.map(map_batched, batched=True) as dset_test_batched:
self.assertEqual(len(dset_test_batched), 30)
self.assertDictEqual(dset.features, Features({"filename": Value("string")}))
self.assertDictEqual(
dset_test_batched.features,
Features({"filename": Value("string"), "filename_new": Value("string")}),
)
assert_arrow_metadata_are_synced_with_dataset_features(dset_test_batched)
def map_batched_with_indices(example, idx):
return {"filename_new": [x + "_extension_" + str(idx) for x in example["filename"]]}
with tempfile.TemporaryDirectory() as tmp_dir:
with self._create_dummy_dataset(in_memory, tmp_dir) as dset:
with dset.map(
map_batched_with_indices, batched=True, with_indices=True
) as dset_test_with_indices_batched:
self.assertEqual(len(dset_test_with_indices_batched), 30)
self.assertDictEqual(dset.features, Features({"filename": Value("string")}))
self.assertDictEqual(
dset_test_with_indices_batched.features,
Features({"filename": Value("string"), "filename_new": Value("string")}),
)
assert_arrow_metadata_are_synced_with_dataset_features(dset_test_with_indices_batched)
# check remove columns for even if the function modifies input in-place
def map_batched_modifying_inputs_inplace(example):
result = {"filename_new": [x + "_extension" for x in example["filename"]]}
del example["filename"]
return result
with tempfile.TemporaryDirectory() as tmp_dir:
with self._create_dummy_dataset(in_memory, tmp_dir) as dset:
with dset.map(
map_batched_modifying_inputs_inplace, batched=True, remove_columns="filename"
) as dset_test_modifying_inputs_inplace:
self.assertEqual(len(dset_test_modifying_inputs_inplace), 30)
self.assertDictEqual(dset.features, Features({"filename": Value("string")}))
self.assertDictEqual(
dset_test_modifying_inputs_inplace.features,
Features({"filename_new": Value("string")}),
)
assert_arrow_metadata_are_synced_with_dataset_features(dset_test_modifying_inputs_inplace)
def test_map_nested(self, in_memory):
with tempfile.TemporaryDirectory() as tmp_dir:
with Dataset.from_dict({"field": ["a", "b"]}) as dset:
with self._to(in_memory, tmp_dir, dset) as dset:
with dset.map(lambda example: {"otherfield": {"capital": example["field"].capitalize()}}) as dset:
with dset.map(lambda example: {"otherfield": {"append_x": example["field"] + "x"}}) as dset:
self.assertEqual(dset[0], {"field": "a", "otherfield": {"append_x": "ax"}})
def test_map_return_example_as_dict_value(self, in_memory):
with tempfile.TemporaryDirectory() as tmp_dir:
with Dataset.from_dict({"en": ["aa", "bb"], "fr": ["cc", "dd"]}) as dset:
with self._to(in_memory, tmp_dir, dset) as dset:
with dset.map(lambda example: {"translation": example}) as dset:
self.assertEqual(dset[0], {"en": "aa", "fr": "cc", "translation": {"en": "aa", "fr": "cc"}})
def test_map_fn_kwargs(self, in_memory):
with tempfile.TemporaryDirectory() as tmp_dir:
with Dataset.from_dict({"id": range(10)}) as dset:
with self._to(in_memory, tmp_dir, dset) as dset:
fn_kwargs = {"offset": 3}
with dset.map(
lambda example, offset: {"id+offset": example["id"] + offset}, fn_kwargs=fn_kwargs
) as mapped_dset:
assert mapped_dset["id+offset"] == list(range(3, 13))
with dset.map(
lambda id, offset: {"id+offset": id + offset}, fn_kwargs=fn_kwargs, input_columns="id"
) as mapped_dset:
assert mapped_dset["id+offset"] == list(range(3, 13))
with dset.map(
lambda id, i, offset: {"id+offset": i + offset},
fn_kwargs=fn_kwargs,
input_columns="id",
with_indices=True,
) as mapped_dset:
assert mapped_dset["id+offset"] == list(range(3, 13))
def test_map_caching(self, in_memory):
with tempfile.TemporaryDirectory() as tmp_dir:
self._caplog.clear()
with self._caplog.at_level(INFO, logger=get_logger().name):
with self._create_dummy_dataset(in_memory, tmp_dir) as dset:
with patch(
"datasets.arrow_dataset.Dataset._map_single",
autospec=Dataset._map_single,
side_effect=Dataset._map_single,
) as mock_map_single:
with dset.map(lambda x: {"foo": "bar"}) as dset_test1:
dset_test1_data_files = list(dset_test1.cache_files)
self.assertEqual(mock_map_single.call_count, 1)
with dset.map(lambda x: {"foo": "bar"}) as dset_test2:
self.assertEqual(dset_test1_data_files, dset_test2.cache_files)
self.assertEqual(len(dset_test2.cache_files), 1 - int(in_memory))
self.assertTrue(("Loading cached processed dataset" in self._caplog.text) ^ in_memory)
self.assertEqual(mock_map_single.call_count, 2 if in_memory else 1)
with tempfile.TemporaryDirectory() as tmp_dir:
self._caplog.clear()
with self._caplog.at_level(INFO, logger=get_logger().name):
with self._create_dummy_dataset(in_memory, tmp_dir) as dset:
with dset.map(lambda x: {"foo": "bar"}) as dset_test1:
dset_test1_data_files = list(dset_test1.cache_files)
with dset.map(lambda x: {"foo": "bar"}, load_from_cache_file=False) as dset_test2:
self.assertEqual(dset_test1_data_files, dset_test2.cache_files)
self.assertEqual(len(dset_test2.cache_files), 1 - int(in_memory))
self.assertNotIn("Loading cached processed dataset", self._caplog.text)
with tempfile.TemporaryDirectory() as tmp_dir:
self._caplog.clear()
with self._caplog.at_level(INFO, logger=get_logger().name):
with self._create_dummy_dataset(in_memory, tmp_dir) as dset:
with patch(
"datasets.arrow_dataset.Pool",
new_callable=PickableMagicMock,
side_effect=datasets.arrow_dataset.Pool,
) as mock_pool:
with dset.map(lambda x: {"foo": "bar"}, num_proc=2) as dset_test1:
dset_test1_data_files = list(dset_test1.cache_files)
self.assertEqual(mock_pool.call_count, 1)
with dset.map(lambda x: {"foo": "bar"}, num_proc=2) as dset_test2:
self.assertEqual(dset_test1_data_files, dset_test2.cache_files)
self.assertTrue(
(len(re.findall("Loading cached processed dataset", self._caplog.text)) == 1)
^ in_memory
)
self.assertEqual(mock_pool.call_count, 2 if in_memory else 1)
with tempfile.TemporaryDirectory() as tmp_dir:
self._caplog.clear()
with self._caplog.at_level(INFO, logger=get_logger().name):
with self._create_dummy_dataset(in_memory, tmp_dir) as dset:
with dset.map(lambda x: {"foo": "bar"}, num_proc=2) as dset_test1:
dset_test1_data_files = list(dset_test1.cache_files)
with dset.map(lambda x: {"foo": "bar"}, num_proc=2, load_from_cache_file=False) as dset_test2:
self.assertEqual(dset_test1_data_files, dset_test2.cache_files)
self.assertEqual(len(dset_test2.cache_files), (1 - int(in_memory)) * 2)
self.assertNotIn("Loading cached processed dataset", self._caplog.text)
if not in_memory:
try:
self._caplog.clear()
with tempfile.TemporaryDirectory() as tmp_dir:
with self._caplog.at_level(INFO, logger=get_logger().name):
with self._create_dummy_dataset(in_memory, tmp_dir) as dset:
datasets.disable_caching()
with dset.map(lambda x: {"foo": "bar"}) as dset_test1:
with dset.map(lambda x: {"foo": "bar"}) as dset_test2:
self.assertNotEqual(dset_test1.cache_files, dset_test2.cache_files)
self.assertEqual(len(dset_test1.cache_files), 1)
self.assertEqual(len(dset_test2.cache_files), 1)
self.assertNotIn("Loading cached processed dataset", self._caplog.text)
# make sure the arrow files are going to be removed
self.assertIn(
Path(tempfile.gettempdir()),
Path(dset_test1.cache_files[0]["filename"]).parents,
)
self.assertIn(
Path(tempfile.gettempdir()),
Path(dset_test2.cache_files[0]["filename"]).parents,
)
finally:
datasets.enable_caching()
def test_map_return_pa_table(self, in_memory):
def func_return_single_row_pa_table(x):
return pa.table({"id": [0], "text": ["a"]})
with tempfile.TemporaryDirectory() as tmp_dir:
with self._create_dummy_dataset(in_memory, tmp_dir) as dset:
with dset.map(func_return_single_row_pa_table) as dset_test:
self.assertEqual(len(dset_test), 30)
self.assertDictEqual(
dset_test.features,
Features({"id": Value("int64"), "text": Value("string")}),
)
self.assertEqual(dset_test[0]["id"], 0)
self.assertEqual(dset_test[0]["text"], "a")
# Batched
def func_return_single_row_pa_table_batched(x):
batch_size = len(x[next(iter(x))])
return pa.table({"id": [0] * batch_size, "text": ["a"] * batch_size})
with tempfile.TemporaryDirectory() as tmp_dir:
with self._create_dummy_dataset(in_memory, tmp_dir) as dset:
with dset.map(func_return_single_row_pa_table_batched, batched=True) as dset_test:
self.assertEqual(len(dset_test), 30)
self.assertDictEqual(
dset_test.features,
Features({"id": Value("int64"), "text": Value("string")}),
)
self.assertEqual(dset_test[0]["id"], 0)
self.assertEqual(dset_test[0]["text"], "a")
# Error when returning a table with more than one row in the non-batched mode
def func_return_multi_row_pa_table(x):
return pa.table({"id": [0, 1], "text": ["a", "b"]})
with tempfile.TemporaryDirectory() as tmp_dir:
with self._create_dummy_dataset(in_memory, tmp_dir) as dset:
self.assertRaises(ValueError, dset.map, func_return_multi_row_pa_table)
# arrow formatted dataset
def func_return_table_from_expression(t):
import pyarrow.dataset as pds
return pds.dataset(t).to_table(
columns={"new_column": pds.field("")._call("ascii_capitalize", [pds.field("filename")])}
)
with tempfile.TemporaryDirectory() as tmp_dir:
with self._create_dummy_dataset(in_memory, tmp_dir) as dset:
with dset.with_format("arrow").map(func_return_table_from_expression, batched=True) as dset_test:
self.assertEqual(len(dset_test), 30)
self.assertDictEqual(
dset_test.features,
Features({"new_column": Value("string")}),
)
self.assertEqual(dset_test.with_format(None)[0]["new_column"], dset[0]["filename"].capitalize())
def test_map_return_pd_dataframe(self, in_memory):
def func_return_single_row_pd_dataframe(x):
return pd.DataFrame({"id": [0], "text": ["a"]})
with tempfile.TemporaryDirectory() as tmp_dir:
with self._create_dummy_dataset(in_memory, tmp_dir) as dset:
with dset.map(func_return_single_row_pd_dataframe) as dset_test:
self.assertEqual(len(dset_test), 30)
self.assertDictEqual(
dset_test.features,
Features({"id": Value("int64"), "text": Value("string")}),
)
self.assertEqual(dset_test[0]["id"], 0)
self.assertEqual(dset_test[0]["text"], "a")
# Batched
def func_return_single_row_pd_dataframe_batched(x):
batch_size = len(x[next(iter(x))])
return pd.DataFrame({"id": [0] * batch_size, "text": ["a"] * batch_size})
with tempfile.TemporaryDirectory() as tmp_dir:
with self._create_dummy_dataset(in_memory, tmp_dir) as dset:
with dset.map(func_return_single_row_pd_dataframe_batched, batched=True) as dset_test:
self.assertEqual(len(dset_test), 30)
self.assertDictEqual(
dset_test.features,
Features({"id": Value("int64"), "text": Value("string")}),
)
self.assertEqual(dset_test[0]["id"], 0)
self.assertEqual(dset_test[0]["text"], "a")
# Error when returning a table with more than one row in the non-batched mode
def func_return_multi_row_pd_dataframe(x):
return pd.DataFrame({"id": [0, 1], "text": ["a", "b"]})
with tempfile.TemporaryDirectory() as tmp_dir:
with self._create_dummy_dataset(in_memory, tmp_dir) as dset:
self.assertRaises(ValueError, dset.map, func_return_multi_row_pd_dataframe)
@require_polars
def test_map_return_pl_dataframe(self, in_memory):
import polars as pl
def func_return_single_row_pl_dataframe(x):
return pl.DataFrame({"id": [0], "text": ["a"]})
with tempfile.TemporaryDirectory() as tmp_dir:
with self._create_dummy_dataset(in_memory, tmp_dir) as dset:
with dset.map(func_return_single_row_pl_dataframe) as dset_test:
self.assertEqual(len(dset_test), 30)
self.assertDictEqual(
dset_test.features,
Features({"id": Value("int64"), "text": Value("large_string")}),
)
self.assertEqual(dset_test[0]["id"], 0)
self.assertEqual(dset_test[0]["text"], "a")
# Batched
def func_return_single_row_pl_dataframe_batched(x):
batch_size = len(x[next(iter(x))])
return pl.DataFrame({"id": [0] * batch_size, "text": ["a"] * batch_size})
with tempfile.TemporaryDirectory() as tmp_dir:
with self._create_dummy_dataset(in_memory, tmp_dir) as dset:
with dset.map(func_return_single_row_pl_dataframe_batched, batched=True) as dset_test:
self.assertEqual(len(dset_test), 30)
self.assertDictEqual(
dset_test.features,
Features({"id": Value("int64"), "text": Value("large_string")}),
)
self.assertEqual(dset_test[0]["id"], 0)
self.assertEqual(dset_test[0]["text"], "a")
# Error when returning a table with more than one row in the non-batched mode
def func_return_multi_row_pl_dataframe(x):
return pl.DataFrame({"id": [0, 1], "text": ["a", "b"]})
with tempfile.TemporaryDirectory() as tmp_dir:
with self._create_dummy_dataset(in_memory, tmp_dir) as dset:
self.assertRaises(ValueError, dset.map, func_return_multi_row_pl_dataframe)
@require_numpy1_on_windows
@require_torch
def test_map_torch(self, in_memory):
import torch
def func(example):
return {"tensor": torch.tensor([1.0, 2, 3])}
with tempfile.TemporaryDirectory() as tmp_dir:
with self._create_dummy_dataset(in_memory, tmp_dir) as dset:
with dset.map(func) as dset_test:
self.assertEqual(len(dset_test), 30)
self.assertDictEqual(
dset_test.features,
Features({"filename": Value("string"), "tensor": Sequence(Value("float32"))}),
)
self.assertListEqual(dset_test[0]["tensor"], [1, 2, 3])
@require_tf
def test_map_tf(self, in_memory):
import tensorflow as tf
def func(example):
return {"tensor": tf.constant([1.0, 2, 3])}
with tempfile.TemporaryDirectory() as tmp_dir:
with self._create_dummy_dataset(in_memory, tmp_dir) as dset:
with dset.map(func) as dset_test:
self.assertEqual(len(dset_test), 30)
self.assertDictEqual(
dset_test.features,
Features({"filename": Value("string"), "tensor": Sequence(Value("float32"))}),
)
self.assertListEqual(dset_test[0]["tensor"], [1, 2, 3])
@require_jax
def test_map_jax(self, in_memory):
import jax.numpy as jnp
def func(example):
return {"tensor": jnp.asarray([1.0, 2, 3])}
with tempfile.TemporaryDirectory() as tmp_dir:
with self._create_dummy_dataset(in_memory, tmp_dir) as dset:
with dset.map(func) as dset_test:
self.assertEqual(len(dset_test), 30)
self.assertDictEqual(
dset_test.features,
Features({"filename": Value("string"), "tensor": Sequence(Value("float32"))}),
)
self.assertListEqual(dset_test[0]["tensor"], [1, 2, 3])
def test_map_numpy(self, in_memory):
def func(example):
return {"tensor": np.array([1.0, 2, 3])}
with tempfile.TemporaryDirectory() as tmp_dir:
with self._create_dummy_dataset(in_memory, tmp_dir) as dset:
with dset.map(func) as dset_test:
self.assertEqual(len(dset_test), 30)
self.assertDictEqual(
dset_test.features,
Features({"filename": Value("string"), "tensor": Sequence(Value("float64"))}),
)
self.assertListEqual(dset_test[0]["tensor"], [1, 2, 3])
@require_numpy1_on_windows
@require_torch
def test_map_tensor_batched(self, in_memory):
import torch
def func(batch):
return {"tensor": torch.tensor([[1.0, 2, 3]] * len(batch["filename"]))}
with tempfile.TemporaryDirectory() as tmp_dir:
with self._create_dummy_dataset(in_memory, tmp_dir) as dset:
with dset.map(func, batched=True) as dset_test:
self.assertEqual(len(dset_test), 30)
self.assertDictEqual(
dset_test.features,
Features({"filename": Value("string"), "tensor": Sequence(Value("float32"))}),
)
self.assertListEqual(dset_test[0]["tensor"], [1, 2, 3])
def test_map_input_columns(self, in_memory):
with tempfile.TemporaryDirectory() as tmp_dir:
with self._create_dummy_dataset(in_memory, tmp_dir, multiple_columns=True) as dset:
with dset.map(lambda col_1: {"label": col_1 % 2}, input_columns="col_1") as mapped_dset:
self.assertEqual(mapped_dset[0].keys(), {"col_1", "col_2", "col_3", "label"})
self.assertEqual(
mapped_dset.features,
Features(
{
"col_1": Value("int64"),
"col_2": Value("string"),
"col_3": Value("bool"),
"label": Value("int64"),
}
),
)
def test_map_remove_columns(self, in_memory):
with tempfile.TemporaryDirectory() as tmp_dir:
with self._create_dummy_dataset(in_memory, tmp_dir) as dset:
with dset.map(lambda x, i: {"name": x["filename"][:-2], "id": i}, with_indices=True) as dset:
self.assertTrue("id" in dset[0])
self.assertDictEqual(
dset.features,
Features({"filename": Value("string"), "name": Value("string"), "id": Value("int64")}),
)
assert_arrow_metadata_are_synced_with_dataset_features(dset)
with dset.map(lambda x: x, remove_columns=["id"]) as mapped_dset:
self.assertTrue("id" not in mapped_dset[0])
self.assertDictEqual(
mapped_dset.features, Features({"filename": Value("string"), "name": Value("string")})
)
assert_arrow_metadata_are_synced_with_dataset_features(mapped_dset)
with mapped_dset.with_format("numpy", columns=mapped_dset.column_names) as mapped_dset:
with mapped_dset.map(
lambda x: {"name": 1}, remove_columns=mapped_dset.column_names
) as mapped_dset:
self.assertTrue("filename" not in mapped_dset[0])
self.assertTrue("name" in mapped_dset[0])
self.assertDictEqual(mapped_dset.features, Features({"name": Value(dtype="int64")}))
assert_arrow_metadata_are_synced_with_dataset_features(mapped_dset)
# empty dataset
columns_names = dset.column_names
with dset.select([]) as empty_dset:
self.assertEqual(len(empty_dset), 0)
with empty_dset.map(lambda x: {}, remove_columns=columns_names[0]) as mapped_dset:
self.assertListEqual(columns_names[1:], mapped_dset.column_names)
assert_arrow_metadata_are_synced_with_dataset_features(mapped_dset)
def test_map_stateful_callable(self, in_memory):
# be sure that the state of the map callable is unaffected
# before processing the dataset examples
class ExampleCounter:
def __init__(self, batched=False):
self.batched = batched
# state
self.cnt = 0
def __call__(self, example):
if self.batched:
self.cnt += len(example)
else:
self.cnt += 1
with tempfile.TemporaryDirectory() as tmp_dir:
with self._create_dummy_dataset(in_memory, tmp_dir) as dset:
ex_cnt = ExampleCounter()
dset.map(ex_cnt)
self.assertEqual(ex_cnt.cnt, len(dset))
ex_cnt = ExampleCounter(batched=True)
dset.map(ex_cnt)
self.assertEqual(ex_cnt.cnt, len(dset))
@require_not_windows
def test_map_crash_subprocess(self, in_memory):
# be sure that a crash in one of the subprocess will not
# hang dataset.map() call forever
def do_crash(row):
import os
os.kill(os.getpid(), 9)
return row
with tempfile.TemporaryDirectory() as tmp_dir:
with self._create_dummy_dataset(in_memory, tmp_dir) as dset:
with pytest.raises(RuntimeError) as excinfo:
dset.map(do_crash, num_proc=2)
assert str(excinfo.value) == (
"One of the subprocesses has abruptly died during map operation."
"To debug the error, disable multiprocessing."
)
def test_filter(self, in_memory):
# keep only first five examples
with tempfile.TemporaryDirectory() as tmp_dir:
with self._create_dummy_dataset(in_memory, tmp_dir) as dset:
fingerprint = dset._fingerprint
with dset.filter(lambda x, i: i < 5, with_indices=True) as dset_filter_first_five:
self.assertEqual(len(dset_filter_first_five), 5)
self.assertDictEqual(dset.features, Features({"filename": Value("string")}))
self.assertDictEqual(dset_filter_first_five.features, Features({"filename": Value("string")}))
self.assertNotEqual(dset_filter_first_five._fingerprint, fingerprint)
# filter filenames with even id at the end + formatted
with tempfile.TemporaryDirectory() as tmp_dir:
with self._create_dummy_dataset(in_memory, tmp_dir) as dset:
dset.set_format("numpy")
fingerprint = dset._fingerprint
with dset.filter(lambda x: (int(x["filename"][-1]) % 2 == 0)) as dset_filter_even_num:
self.assertEqual(len(dset_filter_even_num), 15)
self.assertDictEqual(dset.features, Features({"filename": Value("string")}))
self.assertDictEqual(dset_filter_even_num.features, Features({"filename": Value("string")}))
self.assertNotEqual(dset_filter_even_num._fingerprint, fingerprint)
self.assertEqual(dset_filter_even_num.format["type"], "numpy")
def test_filter_with_indices_mapping(self, in_memory):
with tempfile.TemporaryDirectory() as tmp_dir:
dset = Dataset.from_dict({"col": [0, 1, 2]})
with self._to(in_memory, tmp_dir, dset) as dset:
with dset.filter(lambda x: x["col"] > 0) as dset:
self.assertListEqual(dset["col"], [1, 2])
with dset.filter(lambda x: x["col"] < 2) as dset:
self.assertListEqual(dset["col"], [1])
def test_filter_empty(self, in_memory):
with tempfile.TemporaryDirectory() as tmp_dir:
with self._create_dummy_dataset(in_memory, tmp_dir) as dset:
self.assertIsNone(dset._indices, None)
tmp_file = os.path.join(tmp_dir, "test.arrow")
with dset.filter(lambda _: False, cache_file_name=tmp_file) as dset:
self.assertEqual(len(dset), 0)
self.assertIsNotNone(dset._indices, None)
tmp_file_2 = os.path.join(tmp_dir, "test_2.arrow")
with dset.filter(lambda _: False, cache_file_name=tmp_file_2) as dset2:
self.assertEqual(len(dset2), 0)
self.assertEqual(dset._indices, dset2._indices)
def test_filter_batched(self, in_memory):
with tempfile.TemporaryDirectory() as tmp_dir:
dset = Dataset.from_dict({"col": [0, 1, 2]})
with self._to(in_memory, tmp_dir, dset) as dset:
with dset.filter(lambda x: [i > 0 for i in x["col"]], batched=True) as dset:
self.assertListEqual(dset["col"], [1, 2])
with dset.filter(lambda x: [i < 2 for i in x["col"]], batched=True) as dset:
self.assertListEqual(dset["col"], [1])
def test_filter_input_columns(self, in_memory):
with tempfile.TemporaryDirectory() as tmp_dir:
dset = Dataset.from_dict({"col_1": [0, 1, 2], "col_2": ["a", "b", "c"]})
with self._to(in_memory, tmp_dir, dset) as dset:
with dset.filter(lambda x: x > 0, input_columns=["col_1"]) as filtered_dset:
self.assertListEqual(filtered_dset.column_names, dset.column_names)
self.assertListEqual(filtered_dset["col_1"], [1, 2])
self.assertListEqual(filtered_dset["col_2"], ["b", "c"])
def test_filter_fn_kwargs(self, in_memory):
with tempfile.TemporaryDirectory() as tmp_dir:
with Dataset.from_dict({"id": range(10)}) as dset:
with self._to(in_memory, tmp_dir, dset) as dset:
fn_kwargs = {"max_offset": 3}
with dset.filter(
lambda example, max_offset: example["id"] < max_offset, fn_kwargs=fn_kwargs
) as filtered_dset:
assert len(filtered_dset) == 3
with dset.filter(
lambda id, max_offset: id < max_offset, fn_kwargs=fn_kwargs, input_columns="id"
) as filtered_dset:
assert len(filtered_dset) == 3
with dset.filter(
lambda id, i, max_offset: i < max_offset,
fn_kwargs=fn_kwargs,
input_columns="id",
with_indices=True,
) as filtered_dset:
assert len(filtered_dset) == 3
def test_filter_multiprocessing(self, in_memory):
with tempfile.TemporaryDirectory() as tmp_dir:
with self._create_dummy_dataset(in_memory, tmp_dir) as dset:
fingerprint = dset._fingerprint
with dset.filter(picklable_filter_function, num_proc=2) as dset_filter_first_ten:
self.assertEqual(len(dset_filter_first_ten), 10)
self.assertDictEqual(dset.features, Features({"filename": Value("string")}))
self.assertDictEqual(dset_filter_first_ten.features, Features({"filename": Value("string")}))
self.assertEqual(len(dset_filter_first_ten.cache_files), 0 if in_memory else 2)
self.assertNotEqual(dset_filter_first_ten._fingerprint, fingerprint)
with tempfile.TemporaryDirectory() as tmp_dir: # with_rank
with self._create_dummy_dataset(in_memory, tmp_dir) as dset:
fingerprint = dset._fingerprint
with dset.filter(
picklable_filter_function_with_rank, num_proc=2, with_rank=True
) as dset_filter_first_rank:
self.assertEqual(len(dset_filter_first_rank), min(len(dset) // 2, len(dset)))
self.assertDictEqual(dset.features, Features({"filename": Value("string")}))
self.assertDictEqual(dset_filter_first_rank.features, Features({"filename": Value("string")}))
self.assertEqual(len(dset_filter_first_rank.cache_files), 0 if in_memory else 2)
self.assertNotEqual(dset_filter_first_rank._fingerprint, fingerprint)
def test_filter_caching(self, in_memory):
with tempfile.TemporaryDirectory() as tmp_dir:
self._caplog.clear()
with self._caplog.at_level(INFO, logger=get_logger().name):
with self._create_dummy_dataset(in_memory, tmp_dir) as dset:
with dset.filter(lambda x, i: i < 5, with_indices=True) as dset_filter_first_five1:
dset_test1_data_files = list(dset_filter_first_five1.cache_files)
with dset.filter(lambda x, i: i < 5, with_indices=True) as dset_filter_first_five2:
self.assertEqual(dset_test1_data_files, dset_filter_first_five2.cache_files)
self.assertEqual(len(dset_filter_first_five2.cache_files), 0 if in_memory else 2)
self.assertTrue(("Loading cached processed dataset" in self._caplog.text) ^ in_memory)
def test_keep_features_after_transform_specified(self, in_memory):
features = Features(
{
"tokens": Sequence(Value("string")),
"labels": Sequence(ClassLabel(names=["negative", "positive"])),
}
)
def invert_labels(x):
return {"labels": [(1 - label) for label in x["labels"]]}
with tempfile.TemporaryDirectory() as tmp_dir:
with Dataset.from_dict(
{"tokens": [["foo"] * 5] * 10, "labels": [[1] * 5] * 10}, features=features
) as dset:
with self._to(in_memory, tmp_dir, dset) as dset:
with dset.map(invert_labels, features=features) as inverted_dset:
self.assertEqual(inverted_dset.features.type, features.type)
self.assertDictEqual(inverted_dset.features, features)
assert_arrow_metadata_are_synced_with_dataset_features(inverted_dset)
def test_keep_features_after_transform_unspecified(self, in_memory):
features = Features(
{
"tokens": Sequence(Value("string")),
"labels": Sequence(ClassLabel(names=["negative", "positive"])),
}
)
def invert_labels(x):
return {"labels": [(1 - label) for label in x["labels"]]}
with tempfile.TemporaryDirectory() as tmp_dir:
with Dataset.from_dict(
{"tokens": [["foo"] * 5] * 10, "labels": [[1] * 5] * 10}, features=features
) as dset:
with self._to(in_memory, tmp_dir, dset) as dset:
with dset.map(invert_labels) as inverted_dset:
self.assertEqual(inverted_dset.features.type, features.type)
self.assertDictEqual(inverted_dset.features, features)
assert_arrow_metadata_are_synced_with_dataset_features(inverted_dset)
def test_keep_features_after_transform_to_file(self, in_memory):
features = Features(
{
"tokens": Sequence(Value("string")),
"labels": Sequence(ClassLabel(names=["negative", "positive"])),
}
)
def invert_labels(x):
return {"labels": [(1 - label) for label in x["labels"]]}
with tempfile.TemporaryDirectory() as tmp_dir:
with Dataset.from_dict(
{"tokens": [["foo"] * 5] * 10, "labels": [[1] * 5] * 10}, features=features
) as dset:
with self._to(in_memory, tmp_dir, dset) as dset:
tmp_file = os.path.join(tmp_dir, "test.arrow")
dset.map(invert_labels, cache_file_name=tmp_file)
with Dataset.from_file(tmp_file) as inverted_dset:
self.assertEqual(inverted_dset.features.type, features.type)
self.assertDictEqual(inverted_dset.features, features)
def test_keep_features_after_transform_to_memory(self, in_memory):
features = Features(
{
"tokens": Sequence(Value("string")),
"labels": Sequence(ClassLabel(names=["negative", "positive"])),
}
)
def invert_labels(x):
return {"labels": [(1 - label) for label in x["labels"]]}
with tempfile.TemporaryDirectory() as tmp_dir:
with Dataset.from_dict(
{"tokens": [["foo"] * 5] * 10, "labels": [[1] * 5] * 10}, features=features
) as dset:
with self._to(in_memory, tmp_dir, dset) as dset:
with dset.map(invert_labels, keep_in_memory=True) as inverted_dset:
self.assertEqual(inverted_dset.features.type, features.type)
self.assertDictEqual(inverted_dset.features, features)
def test_keep_features_after_loading_from_cache(self, in_memory):
features = Features(
{
"tokens": Sequence(Value("string")),
"labels": Sequence(ClassLabel(names=["negative", "positive"])),
}
)
def invert_labels(x):
return {"labels": [(1 - label) for label in x["labels"]]}
with tempfile.TemporaryDirectory() as tmp_dir:
with Dataset.from_dict(
{"tokens": [["foo"] * 5] * 10, "labels": [[1] * 5] * 10}, features=features
) as dset:
with self._to(in_memory, tmp_dir, dset) as dset:
tmp_file1 = os.path.join(tmp_dir, "test1.arrow")
tmp_file2 = os.path.join(tmp_dir, "test2.arrow")
# TODO: Why mapped twice?
inverted_dset = dset.map(invert_labels, cache_file_name=tmp_file1)
inverted_dset = dset.map(invert_labels, cache_file_name=tmp_file2)
self.assertGreater(len(inverted_dset.cache_files), 0)
self.assertEqual(inverted_dset.features.type, features.type)
self.assertDictEqual(inverted_dset.features, features)
del inverted_dset
def test_keep_features_with_new_features(self, in_memory):
features = Features(
{
"tokens": Sequence(Value("string")),
"labels": Sequence(ClassLabel(names=["negative", "positive"])),
}
)
def invert_labels(x):
return {"labels": [(1 - label) for label in x["labels"]], "labels2": x["labels"]}
expected_features = Features(
{
"tokens": Sequence(Value("string")),
"labels": Sequence(ClassLabel(names=["negative", "positive"])),
"labels2": Sequence(Value("int64")),
}
)
with tempfile.TemporaryDirectory() as tmp_dir:
with Dataset.from_dict(
{"tokens": [["foo"] * 5] * 10, "labels": [[1] * 5] * 10}, features=features
) as dset:
with self._to(in_memory, tmp_dir, dset) as dset:
with dset.map(invert_labels) as inverted_dset:
self.assertEqual(inverted_dset.features.type, expected_features.type)
self.assertDictEqual(inverted_dset.features, expected_features)
assert_arrow_metadata_are_synced_with_dataset_features(inverted_dset)
def test_select(self, in_memory):
with tempfile.TemporaryDirectory() as tmp_dir:
with self._create_dummy_dataset(in_memory, tmp_dir) as dset:
# select every two example
indices = list(range(0, len(dset), 2))
tmp_file = os.path.join(tmp_dir, "test.arrow")
fingerprint = dset._fingerprint
with dset.select(indices, indices_cache_file_name=tmp_file) as dset_select_even:
self.assertIsNotNone(dset_select_even._indices) # an indices mapping is created
self.assertTrue(os.path.exists(tmp_file))
self.assertEqual(len(dset_select_even), 15)
for row in dset_select_even:
self.assertEqual(int(row["filename"][-1]) % 2, 0)
self.assertDictEqual(dset.features, Features({"filename": Value("string")}))
self.assertDictEqual(dset_select_even.features, Features({"filename": Value("string")}))
self.assertNotEqual(dset_select_even._fingerprint, fingerprint)
with tempfile.TemporaryDirectory() as tmp_dir:
with self._create_dummy_dataset(in_memory, tmp_dir) as dset:
indices = list(range(0, len(dset)))
with dset.select(indices) as dset_select_all:
# no indices mapping, since the indices are contiguous
# (in this case the arrow table is simply sliced, which is more efficient)
self.assertIsNone(dset_select_all._indices)
self.assertEqual(len(dset_select_all), len(dset))
self.assertListEqual(list(dset_select_all), list(dset))
self.assertDictEqual(dset.features, Features({"filename": Value("string")}))
self.assertDictEqual(dset_select_all.features, Features({"filename": Value("string")}))
self.assertNotEqual(dset_select_all._fingerprint, fingerprint)
indices = range(0, len(dset))
with dset.select(indices) as dset_select_all:
# same but with range
self.assertIsNone(dset_select_all._indices)
self.assertEqual(len(dset_select_all), len(dset))
self.assertListEqual(list(dset_select_all), list(dset))
self.assertDictEqual(dset.features, Features({"filename": Value("string")}))
self.assertDictEqual(dset_select_all.features, Features({"filename": Value("string")}))
self.assertNotEqual(dset_select_all._fingerprint, fingerprint)
with tempfile.TemporaryDirectory() as tmp_dir:
with self._create_dummy_dataset(in_memory, tmp_dir) as dset:
bad_indices = list(range(5))
bad_indices[-1] = len(dset) + 10 # out of bounds
tmp_file = os.path.join(tmp_dir, "test.arrow")
self.assertRaises(
Exception,
dset.select,
indices=bad_indices,
indices_cache_file_name=tmp_file,
writer_batch_size=2,
)
self.assertFalse(os.path.exists(tmp_file))
with tempfile.TemporaryDirectory() as tmp_dir:
with self._create_dummy_dataset(in_memory, tmp_dir) as dset:
indices = iter(range(len(dset))) # iterator of contiguous indices
with dset.select(indices) as dset_select_all:
# no indices mapping, since the indices are contiguous
self.assertIsNone(dset_select_all._indices)
self.assertEqual(len(dset_select_all), len(dset))
indices = reversed(range(len(dset))) # iterator of not contiguous indices
tmp_file = os.path.join(tmp_dir, "test.arrow")
with dset.select(indices, indices_cache_file_name=tmp_file) as dset_select_all:
# new indices mapping, since the indices are not contiguous
self.assertIsNotNone(dset_select_all._indices)
self.assertEqual(len(dset_select_all), len(dset))
with tempfile.TemporaryDirectory() as tmp_dir:
with self._create_dummy_dataset(in_memory, tmp_dir) as dset:
bad_indices = list(range(5))
bad_indices[3] = "foo" # wrong type
tmp_file = os.path.join(tmp_dir, "test.arrow")
self.assertRaises(
Exception,
dset.select,
indices=bad_indices,
indices_cache_file_name=tmp_file,
writer_batch_size=2,
)
self.assertFalse(os.path.exists(tmp_file))
dset.set_format("numpy")
with dset.select(
range(5),
indices_cache_file_name=tmp_file,
writer_batch_size=2,
) as dset_select_five:
self.assertIsNone(dset_select_five._indices)
self.assertEqual(len(dset_select_five), 5)
self.assertEqual(dset_select_five.format["type"], "numpy")
for i, row in enumerate(dset_select_five):
self.assertEqual(int(row["filename"][-1]), i)
self.assertDictEqual(dset.features, Features({"filename": Value("string")}))
self.assertDictEqual(dset_select_five.features, Features({"filename": Value("string")}))
def test_select_then_map(self, in_memory):
with tempfile.TemporaryDirectory() as tmp_dir:
with self._create_dummy_dataset(in_memory, tmp_dir) as dset:
with dset.select([0]) as d1:
with d1.map(lambda x: {"id": int(x["filename"].split("_")[-1])}) as d1:
self.assertEqual(d1[0]["id"], 0)
with dset.select([1]) as d2:
with d2.map(lambda x: {"id": int(x["filename"].split("_")[-1])}) as d2:
self.assertEqual(d2[0]["id"], 1)
with tempfile.TemporaryDirectory() as tmp_dir:
with self._create_dummy_dataset(in_memory, tmp_dir) as dset:
with dset.select([0], indices_cache_file_name=os.path.join(tmp_dir, "i1.arrow")) as d1:
with d1.map(lambda x: {"id": int(x["filename"].split("_")[-1])}) as d1:
self.assertEqual(d1[0]["id"], 0)
with dset.select([1], indices_cache_file_name=os.path.join(tmp_dir, "i2.arrow")) as d2:
with d2.map(lambda x: {"id": int(x["filename"].split("_")[-1])}) as d2:
self.assertEqual(d2[0]["id"], 1)
def test_pickle_after_many_transforms_on_disk(self, in_memory):
with tempfile.TemporaryDirectory() as tmp_dir:
with self._create_dummy_dataset(in_memory, tmp_dir) as dset:
self.assertEqual(len(dset.cache_files), 0 if in_memory else 1)
with dset.rename_column("filename", "file") as dset:
self.assertListEqual(dset.column_names, ["file"])
with dset.select(range(5)) as dset:
self.assertEqual(len(dset), 5)
with dset.map(lambda x: {"id": int(x["file"][-1])}) as dset:
self.assertListEqual(sorted(dset.column_names), ["file", "id"])
with dset.rename_column("id", "number") as dset:
self.assertListEqual(sorted(dset.column_names), ["file", "number"])
with dset.select([1, 0]) as dset:
self.assertEqual(dset[0]["file"], "my_name-train_1")
self.assertEqual(dset[0]["number"], 1)
self.assertEqual(dset._indices["indices"].to_pylist(), [1, 0])
if not in_memory:
self.assertIn(
("rename_columns", (["file", "number"],), {}),
dset._data.replays,
)
if not in_memory:
dset._data.table = Unpicklable() # check that we don't pickle the entire table
pickled = pickle.dumps(dset)
with pickle.loads(pickled) as loaded:
self.assertEqual(loaded[0]["file"], "my_name-train_1")
self.assertEqual(loaded[0]["number"], 1)
def test_shuffle(self, in_memory):
with tempfile.TemporaryDirectory() as tmp_dir:
with self._create_dummy_dataset(in_memory, tmp_dir) as dset:
tmp_file = os.path.join(tmp_dir, "test.arrow")
fingerprint = dset._fingerprint
with dset.shuffle(seed=1234, keep_in_memory=True) as dset_shuffled:
self.assertEqual(len(dset_shuffled), 30)
self.assertEqual(dset_shuffled[0]["filename"], "my_name-train_28")
self.assertEqual(dset_shuffled[2]["filename"], "my_name-train_10")
self.assertDictEqual(dset.features, Features({"filename": Value("string")}))
self.assertDictEqual(dset_shuffled.features, Features({"filename": Value("string")}))
self.assertNotEqual(dset_shuffled._fingerprint, fingerprint)
with dset.shuffle(seed=1234, indices_cache_file_name=tmp_file) as dset_shuffled:
self.assertEqual(len(dset_shuffled), 30)
self.assertEqual(dset_shuffled[0]["filename"], "my_name-train_28")
self.assertEqual(dset_shuffled[2]["filename"], "my_name-train_10")
self.assertDictEqual(dset.features, Features({"filename": Value("string")}))
self.assertDictEqual(dset_shuffled.features, Features({"filename": Value("string")}))
self.assertNotEqual(dset_shuffled._fingerprint, fingerprint)
# Reproducibility
tmp_file = os.path.join(tmp_dir, "test_2.arrow")
with dset.shuffle(seed=1234, indices_cache_file_name=tmp_file) as dset_shuffled_2:
self.assertListEqual(dset_shuffled["filename"], dset_shuffled_2["filename"])
# Compatible with temp_seed
with temp_seed(42), dset.shuffle() as d1:
with temp_seed(42), dset.shuffle() as d2, dset.shuffle() as d3:
self.assertListEqual(d1["filename"], d2["filename"])
self.assertEqual(d1._fingerprint, d2._fingerprint)
self.assertNotEqual(d3["filename"], d2["filename"])
self.assertNotEqual(d3._fingerprint, d2._fingerprint)
def test_sort(self, in_memory):
with tempfile.TemporaryDirectory() as tmp_dir:
# Sort on a single key
with self._create_dummy_dataset(in_memory=in_memory, tmp_dir=tmp_dir) as dset:
# Keep only 10 examples
tmp_file = os.path.join(tmp_dir, "test.arrow")
with dset.select(range(10), indices_cache_file_name=tmp_file) as dset:
tmp_file = os.path.join(tmp_dir, "test_2.arrow")
with dset.shuffle(seed=1234, indices_cache_file_name=tmp_file) as dset:
self.assertEqual(len(dset), 10)
self.assertEqual(dset[0]["filename"], "my_name-train_8")
self.assertEqual(dset[1]["filename"], "my_name-train_9")
# Sort
tmp_file = os.path.join(tmp_dir, "test_3.arrow")
fingerprint = dset._fingerprint
with dset.sort("filename", indices_cache_file_name=tmp_file) as dset_sorted:
for i, row in enumerate(dset_sorted):
self.assertEqual(int(row["filename"][-1]), i)
self.assertDictEqual(dset.features, Features({"filename": Value("string")}))
self.assertDictEqual(dset_sorted.features, Features({"filename": Value("string")}))
self.assertNotEqual(dset_sorted._fingerprint, fingerprint)
# Sort reversed
tmp_file = os.path.join(tmp_dir, "test_4.arrow")
fingerprint = dset._fingerprint
with dset.sort("filename", indices_cache_file_name=tmp_file, reverse=True) as dset_sorted:
for i, row in enumerate(dset_sorted):
self.assertEqual(int(row["filename"][-1]), len(dset_sorted) - 1 - i)
self.assertDictEqual(dset.features, Features({"filename": Value("string")}))
self.assertDictEqual(dset_sorted.features, Features({"filename": Value("string")}))
self.assertNotEqual(dset_sorted._fingerprint, fingerprint)
# formatted
dset.set_format("numpy")
with dset.sort("filename") as dset_sorted_formatted:
self.assertEqual(dset_sorted_formatted.format["type"], "numpy")
# Sort on multiple keys
with self._create_dummy_dataset(in_memory=in_memory, tmp_dir=tmp_dir, multiple_columns=True) as dset:
tmp_file = os.path.join(tmp_dir, "test_5.arrow")
fingerprint = dset._fingerprint
# Throw error when reverse is a list of bools that does not match the length of column_names
with pytest.raises(ValueError):
dset.sort(["col_1", "col_2", "col_3"], reverse=[False])
with dset.shuffle(seed=1234, indices_cache_file_name=tmp_file) as dset:
# Sort
with dset.sort(["col_1", "col_2", "col_3"], reverse=[False, True, False]) as dset_sorted:
for i, row in enumerate(dset_sorted):
self.assertEqual(row["col_1"], i)
self.assertDictEqual(
dset.features,
Features(
{
"col_1": Value("int64"),
"col_2": Value("string"),
"col_3": Value("bool"),
}
),
)
self.assertDictEqual(
dset_sorted.features,
Features(
{
"col_1": Value("int64"),
"col_2": Value("string"),
"col_3": Value("bool"),
}
),
)
self.assertNotEqual(dset_sorted._fingerprint, fingerprint)
# Sort reversed
with dset.sort(["col_1", "col_2", "col_3"], reverse=[True, False, True]) as dset_sorted:
for i, row in enumerate(dset_sorted):
self.assertEqual(row["col_1"], len(dset_sorted) - 1 - i)
self.assertDictEqual(
dset.features,
Features(
{
"col_1": Value("int64"),
"col_2": Value("string"),
"col_3": Value("bool"),
}
),
)
self.assertDictEqual(
dset_sorted.features,
Features(
{
"col_1": Value("int64"),
"col_2": Value("string"),
"col_3": Value("bool"),
}
),
)
self.assertNotEqual(dset_sorted._fingerprint, fingerprint)
# formatted
dset.set_format("numpy")
with dset.sort(
["col_1", "col_2", "col_3"], reverse=[False, True, False]
) as dset_sorted_formatted:
self.assertEqual(dset_sorted_formatted.format["type"], "numpy")
def test_to_csv(self, in_memory):
with tempfile.TemporaryDirectory() as tmp_dir:
# File path argument
with self._create_dummy_dataset(in_memory, tmp_dir, multiple_columns=True) as dset:
file_path = os.path.join(tmp_dir, "test_path.csv")
bytes_written = dset.to_csv(path_or_buf=file_path)
self.assertTrue(os.path.isfile(file_path))
self.assertEqual(bytes_written, os.path.getsize(file_path))
csv_dset = pd.read_csv(file_path)
self.assertEqual(csv_dset.shape, dset.shape)
self.assertListEqual(list(csv_dset.columns), list(dset.column_names))
# File buffer argument
with self._create_dummy_dataset(in_memory, tmp_dir, multiple_columns=True) as dset:
file_path = os.path.join(tmp_dir, "test_buffer.csv")
with open(file_path, "wb+") as buffer:
bytes_written = dset.to_csv(path_or_buf=buffer)
self.assertTrue(os.path.isfile(file_path))
self.assertEqual(bytes_written, os.path.getsize(file_path))
csv_dset = pd.read_csv(file_path)
self.assertEqual(csv_dset.shape, dset.shape)
self.assertListEqual(list(csv_dset.columns), list(dset.column_names))
# After a select/shuffle transform
with self._create_dummy_dataset(in_memory, tmp_dir, multiple_columns=True) as dset:
dset = dset.select(range(0, len(dset), 2)).shuffle()
file_path = os.path.join(tmp_dir, "test_path.csv")
bytes_written = dset.to_csv(path_or_buf=file_path)
self.assertTrue(os.path.isfile(file_path))
self.assertEqual(bytes_written, os.path.getsize(file_path))
csv_dset = pd.read_csv(file_path)
self.assertEqual(csv_dset.shape, dset.shape)
self.assertListEqual(list(csv_dset.columns), list(dset.column_names))
# With array features
with self._create_dummy_dataset(in_memory, tmp_dir, array_features=True) as dset:
file_path = os.path.join(tmp_dir, "test_path.csv")
bytes_written = dset.to_csv(path_or_buf=file_path)
self.assertTrue(os.path.isfile(file_path))
self.assertEqual(bytes_written, os.path.getsize(file_path))
csv_dset = pd.read_csv(file_path)
self.assertEqual(csv_dset.shape, dset.shape)
self.assertListEqual(list(csv_dset.columns), list(dset.column_names))
def test_to_dict(self, in_memory):
with tempfile.TemporaryDirectory() as tmp_dir:
with self._create_dummy_dataset(in_memory, tmp_dir, multiple_columns=True) as dset:
# Full
dset_to_dict = dset.to_dict()
self.assertIsInstance(dset_to_dict, dict)
self.assertListEqual(sorted(dset_to_dict.keys()), sorted(dset.column_names))
for col_name in dset.column_names:
self.assertLessEqual(len(dset_to_dict[col_name]), len(dset))
# With index mapping
with dset.select([1, 0, 3]) as dset:
dset_to_dict = dset.to_dict()
self.assertIsInstance(dset_to_dict, dict)
self.assertEqual(len(dset_to_dict), 3)
self.assertListEqual(sorted(dset_to_dict.keys()), sorted(dset.column_names))
for col_name in dset.column_names:
self.assertIsInstance(dset_to_dict[col_name], list)
self.assertEqual(len(dset_to_dict[col_name]), len(dset))
def test_to_list(self, in_memory):
with tempfile.TemporaryDirectory() as tmp_dir:
with self._create_dummy_dataset(in_memory, tmp_dir, multiple_columns=True) as dset:
dset_to_list = dset.to_list()
self.assertIsInstance(dset_to_list, list)
for row in dset_to_list:
self.assertIsInstance(row, dict)
self.assertListEqual(sorted(row.keys()), sorted(dset.column_names))
# With index mapping
with dset.select([1, 0, 3]) as dset:
dset_to_list = dset.to_list()
self.assertIsInstance(dset_to_list, list)
self.assertEqual(len(dset_to_list), 3)
for row in dset_to_list:
self.assertIsInstance(row, dict)
self.assertListEqual(sorted(row.keys()), sorted(dset.column_names))
def test_to_pandas(self, in_memory):
with tempfile.TemporaryDirectory() as tmp_dir:
# Batched
with self._create_dummy_dataset(in_memory, tmp_dir, multiple_columns=True) as dset:
batch_size = dset.num_rows - 1
to_pandas_generator = dset.to_pandas(batched=True, batch_size=batch_size)
for batch in to_pandas_generator:
self.assertIsInstance(batch, pd.DataFrame)
self.assertListEqual(sorted(batch.columns), sorted(dset.column_names))
for col_name in dset.column_names:
self.assertLessEqual(len(batch[col_name]), batch_size)
# Full
dset_to_pandas = dset.to_pandas()
self.assertIsInstance(dset_to_pandas, pd.DataFrame)
self.assertListEqual(sorted(dset_to_pandas.columns), sorted(dset.column_names))
for col_name in dset.column_names:
self.assertEqual(len(dset_to_pandas[col_name]), len(dset))
# With index mapping
with dset.select([1, 0, 3]) as dset:
dset_to_pandas = dset.to_pandas()
self.assertIsInstance(dset_to_pandas, pd.DataFrame)
self.assertEqual(len(dset_to_pandas), 3)
self.assertListEqual(sorted(dset_to_pandas.columns), sorted(dset.column_names))
for col_name in dset.column_names:
self.assertEqual(len(dset_to_pandas[col_name]), dset.num_rows)
@require_polars
def test_to_polars(self, in_memory):
with tempfile.TemporaryDirectory() as tmp_dir:
# Batched
with self._create_dummy_dataset(in_memory, tmp_dir, multiple_columns=True) as dset:
batch_size = dset.num_rows - 1
to_polars_generator = dset.to_polars(batched=True, batch_size=batch_size)
for batch in to_polars_generator:
self.assertIsInstance(batch, sys.modules["polars"].DataFrame)
self.assertListEqual(sorted(batch.columns), sorted(dset.column_names))
for col_name in dset.column_names:
self.assertLessEqual(len(batch[col_name]), batch_size)
del batch
# Full
dset_to_polars = dset.to_polars()
self.assertIsInstance(dset_to_polars, sys.modules["polars"].DataFrame)
self.assertListEqual(sorted(dset_to_polars.columns), sorted(dset.column_names))
for col_name in dset.column_names:
self.assertEqual(len(dset_to_polars[col_name]), len(dset))
# With index mapping
with dset.select([1, 0, 3]) as dset:
dset_to_polars = dset.to_polars()
self.assertIsInstance(dset_to_polars, sys.modules["polars"].DataFrame)
self.assertEqual(len(dset_to_polars), 3)
self.assertListEqual(sorted(dset_to_polars.columns), sorted(dset.column_names))
for col_name in dset.column_names:
self.assertEqual(len(dset_to_polars[col_name]), dset.num_rows)
def test_to_parquet(self, in_memory):
with tempfile.TemporaryDirectory() as tmp_dir:
# File path argument
with self._create_dummy_dataset(in_memory, tmp_dir, multiple_columns=True) as dset:
file_path = os.path.join(tmp_dir, "test_path.parquet")
dset.to_parquet(path_or_buf=file_path)
self.assertTrue(os.path.isfile(file_path))
# self.assertEqual(bytes_written, os.path.getsize(file_path)) # because of compression, the number of bytes doesn't match
parquet_dset = pd.read_parquet(file_path)
self.assertEqual(parquet_dset.shape, dset.shape)
self.assertListEqual(list(parquet_dset.columns), list(dset.column_names))
# File buffer argument
with self._create_dummy_dataset(in_memory, tmp_dir, multiple_columns=True) as dset:
file_path = os.path.join(tmp_dir, "test_buffer.parquet")
with open(file_path, "wb+") as buffer:
dset.to_parquet(path_or_buf=buffer)
self.assertTrue(os.path.isfile(file_path))
# self.assertEqual(bytes_written, os.path.getsize(file_path)) # because of compression, the number of bytes doesn't match
parquet_dset = pd.read_parquet(file_path)
self.assertEqual(parquet_dset.shape, dset.shape)
self.assertListEqual(list(parquet_dset.columns), list(dset.column_names))
# After a select/shuffle transform
with self._create_dummy_dataset(in_memory, tmp_dir, multiple_columns=True) as dset:
dset = dset.select(range(0, len(dset), 2)).shuffle()
file_path = os.path.join(tmp_dir, "test_path.parquet")
dset.to_parquet(path_or_buf=file_path)
self.assertTrue(os.path.isfile(file_path))
# self.assertEqual(bytes_written, os.path.getsize(file_path)) # because of compression, the number of bytes doesn't match
parquet_dset = pd.read_parquet(file_path)
self.assertEqual(parquet_dset.shape, dset.shape)
self.assertListEqual(list(parquet_dset.columns), list(dset.column_names))
# With array features
with self._create_dummy_dataset(in_memory, tmp_dir, array_features=True) as dset:
file_path = os.path.join(tmp_dir, "test_path.parquet")
dset.to_parquet(path_or_buf=file_path)
self.assertTrue(os.path.isfile(file_path))
# self.assertEqual(bytes_written, os.path.getsize(file_path)) # because of compression, the number of bytes doesn't match
parquet_dset = pd.read_parquet(file_path)
self.assertEqual(parquet_dset.shape, dset.shape)
self.assertListEqual(list(parquet_dset.columns), list(dset.column_names))
@require_sqlalchemy
def test_to_sql(self, in_memory):
with tempfile.TemporaryDirectory() as tmp_dir:
# Destionation specified as database URI string
with self._create_dummy_dataset(in_memory, tmp_dir, multiple_columns=True) as dset:
file_path = os.path.join(tmp_dir, "test_path.sqlite")
_ = dset.to_sql("data", "sqlite:///" + file_path)
self.assertTrue(os.path.isfile(file_path))
sql_dset = pd.read_sql("data", "sqlite:///" + file_path)
self.assertEqual(sql_dset.shape, dset.shape)
self.assertListEqual(list(sql_dset.columns), list(dset.column_names))
# Destionation specified as sqlite3 connection
with self._create_dummy_dataset(in_memory, tmp_dir, multiple_columns=True) as dset:
import sqlite3
file_path = os.path.join(tmp_dir, "test_path.sqlite")
with contextlib.closing(sqlite3.connect(file_path)) as con:
_ = dset.to_sql("data", con, if_exists="replace")
self.assertTrue(os.path.isfile(file_path))
sql_dset = pd.read_sql("data", "sqlite:///" + file_path)
self.assertEqual(sql_dset.shape, dset.shape)
self.assertListEqual(list(sql_dset.columns), list(dset.column_names))
# Test writing to a database in chunks
with self._create_dummy_dataset(in_memory, tmp_dir, multiple_columns=True) as dset:
file_path = os.path.join(tmp_dir, "test_path.sqlite")
_ = dset.to_sql("data", "sqlite:///" + file_path, batch_size=1, if_exists="replace")
self.assertTrue(os.path.isfile(file_path))
sql_dset = pd.read_sql("data", "sqlite:///" + file_path)
self.assertEqual(sql_dset.shape, dset.shape)
self.assertListEqual(list(sql_dset.columns), list(dset.column_names))
# After a select/shuffle transform
with self._create_dummy_dataset(in_memory, tmp_dir, multiple_columns=True) as dset:
dset = dset.select(range(0, len(dset), 2)).shuffle()
file_path = os.path.join(tmp_dir, "test_path.sqlite")
_ = dset.to_sql("data", "sqlite:///" + file_path, if_exists="replace")
self.assertTrue(os.path.isfile(file_path))
sql_dset = pd.read_sql("data", "sqlite:///" + file_path)
self.assertEqual(sql_dset.shape, dset.shape)
self.assertListEqual(list(sql_dset.columns), list(dset.column_names))
# With array features
with self._create_dummy_dataset(in_memory, tmp_dir, array_features=True) as dset:
file_path = os.path.join(tmp_dir, "test_path.sqlite")
_ = dset.to_sql("data", "sqlite:///" + file_path, if_exists="replace")
self.assertTrue(os.path.isfile(file_path))
sql_dset = pd.read_sql("data", "sqlite:///" + file_path)
self.assertEqual(sql_dset.shape, dset.shape)
self.assertListEqual(list(sql_dset.columns), list(dset.column_names))
def test_train_test_split(self, in_memory):
with tempfile.TemporaryDirectory() as tmp_dir:
with self._create_dummy_dataset(in_memory, tmp_dir) as dset:
fingerprint = dset._fingerprint
dset_dict = dset.train_test_split(test_size=10, shuffle=False)
self.assertListEqual(list(dset_dict.keys()), ["train", "test"])
dset_train = dset_dict["train"]
dset_test = dset_dict["test"]
self.assertEqual(len(dset_train), 20)
self.assertEqual(len(dset_test), 10)
self.assertEqual(dset_train[0]["filename"], "my_name-train_0")
self.assertEqual(dset_train[-1]["filename"], "my_name-train_19")
self.assertEqual(dset_test[0]["filename"], "my_name-train_20")
self.assertEqual(dset_test[-1]["filename"], "my_name-train_29")
self.assertDictEqual(dset.features, Features({"filename": Value("string")}))
self.assertDictEqual(dset_train.features, Features({"filename": Value("string")}))
self.assertDictEqual(dset_test.features, Features({"filename": Value("string")}))
self.assertNotEqual(dset_train._fingerprint, fingerprint)
self.assertNotEqual(dset_test._fingerprint, fingerprint)
self.assertNotEqual(dset_train._fingerprint, dset_test._fingerprint)
dset_dict = dset.train_test_split(test_size=0.5, shuffle=False)
self.assertListEqual(list(dset_dict.keys()), ["train", "test"])
dset_train = dset_dict["train"]
dset_test = dset_dict["test"]
self.assertEqual(len(dset_train), 15)
self.assertEqual(len(dset_test), 15)
self.assertEqual(dset_train[0]["filename"], "my_name-train_0")
self.assertEqual(dset_train[-1]["filename"], "my_name-train_14")
self.assertEqual(dset_test[0]["filename"], "my_name-train_15")
self.assertEqual(dset_test[-1]["filename"], "my_name-train_29")
self.assertDictEqual(dset.features, Features({"filename": Value("string")}))
self.assertDictEqual(dset_train.features, Features({"filename": Value("string")}))
self.assertDictEqual(dset_test.features, Features({"filename": Value("string")}))
dset_dict = dset.train_test_split(train_size=10, shuffle=False)
self.assertListEqual(list(dset_dict.keys()), ["train", "test"])
dset_train = dset_dict["train"]
dset_test = dset_dict["test"]
self.assertEqual(len(dset_train), 10)
self.assertEqual(len(dset_test), 20)
self.assertEqual(dset_train[0]["filename"], "my_name-train_0")
self.assertEqual(dset_train[-1]["filename"], "my_name-train_9")
self.assertEqual(dset_test[0]["filename"], "my_name-train_10")
self.assertEqual(dset_test[-1]["filename"], "my_name-train_29")
self.assertDictEqual(dset.features, Features({"filename": Value("string")}))
self.assertDictEqual(dset_train.features, Features({"filename": Value("string")}))
self.assertDictEqual(dset_test.features, Features({"filename": Value("string")}))
dset.set_format("numpy")
dset_dict = dset.train_test_split(train_size=10, seed=42)
self.assertListEqual(list(dset_dict.keys()), ["train", "test"])
dset_train = dset_dict["train"]
dset_test = dset_dict["test"]
self.assertEqual(len(dset_train), 10)
self.assertEqual(len(dset_test), 20)
self.assertEqual(dset_train.format["type"], "numpy")
self.assertEqual(dset_test.format["type"], "numpy")
self.assertNotEqual(dset_train[0]["filename"].item(), "my_name-train_0")
self.assertNotEqual(dset_train[-1]["filename"].item(), "my_name-train_9")
self.assertNotEqual(dset_test[0]["filename"].item(), "my_name-train_10")
self.assertNotEqual(dset_test[-1]["filename"].item(), "my_name-train_29")
self.assertDictEqual(dset.features, Features({"filename": Value("string")}))
self.assertDictEqual(dset_train.features, Features({"filename": Value("string")}))
self.assertDictEqual(dset_test.features, Features({"filename": Value("string")}))
del dset_test, dset_train, dset_dict # DatasetDict
def test_shard(self, in_memory):
with tempfile.TemporaryDirectory() as tmp_dir, self._create_dummy_dataset(in_memory, tmp_dir) as dset:
tmp_file = os.path.join(tmp_dir, "test.arrow")
with dset.select(range(10), indices_cache_file_name=tmp_file) as dset:
self.assertEqual(len(dset), 10)
# Shard non-contiguous
tmp_file_1 = os.path.join(tmp_dir, "test_1.arrow")
fingerprint = dset._fingerprint
with dset.shard(
num_shards=8, index=1, contiguous=False, indices_cache_file_name=tmp_file_1
) as dset_sharded:
self.assertEqual(2, len(dset_sharded))
self.assertEqual(["my_name-train_1", "my_name-train_9"], dset_sharded["filename"])
self.assertDictEqual(dset.features, Features({"filename": Value("string")}))
self.assertDictEqual(dset_sharded.features, Features({"filename": Value("string")}))
self.assertNotEqual(dset_sharded._fingerprint, fingerprint)
# Shard contiguous
tmp_file_2 = os.path.join(tmp_dir, "test_2.arrow")
with dset.shard(
num_shards=3, index=0, contiguous=True, indices_cache_file_name=tmp_file_2
) as dset_sharded_contiguous:
self.assertEqual([f"my_name-train_{i}" for i in (0, 1, 2, 3)], dset_sharded_contiguous["filename"])
self.assertDictEqual(dset.features, Features({"filename": Value("string")}))
self.assertDictEqual(dset_sharded_contiguous.features, Features({"filename": Value("string")}))
# Test lengths of sharded contiguous
self.assertEqual(
[4, 3, 3],
[
len(dset.shard(3, index=i, contiguous=True, indices_cache_file_name=tmp_file_2 + str(i)))
for i in range(3)
],
)
# formatted
dset.set_format("numpy")
with dset.shard(num_shards=3, index=0) as dset_sharded_formatted:
self.assertEqual(dset_sharded_formatted.format["type"], "numpy")
def test_flatten_indices(self, in_memory):
with tempfile.TemporaryDirectory() as tmp_dir:
with self._create_dummy_dataset(in_memory, tmp_dir) as dset:
self.assertIsNone(dset._indices)
tmp_file = os.path.join(tmp_dir, "test.arrow")
with dset.select(range(0, 10, 2), indices_cache_file_name=tmp_file) as dset:
self.assertEqual(len(dset), 5)
self.assertIsNotNone(dset._indices)
tmp_file_2 = os.path.join(tmp_dir, "test_2.arrow")
fingerprint = dset._fingerprint
dset.set_format("numpy")
with dset.flatten_indices(cache_file_name=tmp_file_2) as dset:
self.assertEqual(len(dset), 5)
self.assertEqual(len(dset.data), len(dset))
self.assertIsNone(dset._indices)
self.assertNotEqual(dset._fingerprint, fingerprint)
self.assertEqual(dset.format["type"], "numpy")
# Test unique works
dset.unique(dset.column_names[0])
assert_arrow_metadata_are_synced_with_dataset_features(dset)
# Empty indices mapping
with tempfile.TemporaryDirectory() as tmp_dir:
with self._create_dummy_dataset(in_memory, tmp_dir) as dset:
self.assertIsNone(dset._indices, None)
tmp_file = os.path.join(tmp_dir, "test.arrow")
with dset.filter(lambda _: False, cache_file_name=tmp_file) as dset:
self.assertEqual(len(dset), 0)
self.assertIsNotNone(dset._indices, None)
tmp_file_2 = os.path.join(tmp_dir, "test_2.arrow")
fingerprint = dset._fingerprint
dset.set_format("numpy")
with dset.flatten_indices(cache_file_name=tmp_file_2) as dset:
self.assertEqual(len(dset), 0)
self.assertEqual(len(dset.data), len(dset))
self.assertIsNone(dset._indices, None)
self.assertNotEqual(dset._fingerprint, fingerprint)
self.assertEqual(dset.format["type"], "numpy")
# Test unique works
dset.unique(dset.column_names[0])
assert_arrow_metadata_are_synced_with_dataset_features(dset)
@require_tf
@require_torch
def test_format_vectors(self, in_memory):
import numpy as np
import tensorflow as tf
import torch
with (
tempfile.TemporaryDirectory() as tmp_dir,
self._create_dummy_dataset(in_memory, tmp_dir) as dset,
dset.map(lambda ex, i: {"vec": np.ones(3) * i}, with_indices=True) as dset,
):
columns = dset.column_names
self.assertIsNotNone(dset[0])
self.assertIsNotNone(dset[:2])
for col in columns:
self.assertIsInstance(dset[0][col], (str, list))
self.assertIsInstance(dset[:2][col], list)
self.assertDictEqual(
dset.features, Features({"filename": Value("string"), "vec": Sequence(Value("float64"))})
)
dset.set_format("tensorflow")
self.assertIsNotNone(dset[0])
self.assertIsNotNone(dset[:2])
for col in columns:
self.assertIsInstance(dset[0][col], (tf.Tensor, tf.RaggedTensor))
self.assertIsInstance(dset[:2][col], (tf.Tensor, tf.RaggedTensor))
self.assertIsInstance(dset[col], (tf.Tensor, tf.RaggedTensor))
self.assertTupleEqual(tuple(dset[:2]["vec"].shape), (2, 3))
self.assertTupleEqual(tuple(dset["vec"][:2].shape), (2, 3))
dset.set_format("numpy")
self.assertIsNotNone(dset[0])
self.assertIsNotNone(dset[:2])
self.assertIsInstance(dset[0]["filename"], np.str_)
self.assertIsInstance(dset[:2]["filename"], np.ndarray)
self.assertIsInstance(dset["filename"], np.ndarray)
self.assertIsInstance(dset[0]["vec"], np.ndarray)
self.assertIsInstance(dset[:2]["vec"], np.ndarray)
self.assertIsInstance(dset["vec"], np.ndarray)
self.assertTupleEqual(dset[:2]["vec"].shape, (2, 3))
self.assertTupleEqual(dset["vec"][:2].shape, (2, 3))
dset.set_format("torch", columns=["vec"])
self.assertIsNotNone(dset[0])
self.assertIsNotNone(dset[:2])
# torch.Tensor is only for numerical columns
self.assertIsInstance(dset[0]["vec"], torch.Tensor)
self.assertIsInstance(dset[:2]["vec"], torch.Tensor)
self.assertIsInstance(dset["vec"][:2], torch.Tensor)
self.assertTupleEqual(dset[:2]["vec"].shape, (2, 3))
self.assertTupleEqual(dset["vec"][:2].shape, (2, 3))
@require_tf
@require_torch
def test_format_ragged_vectors(self, in_memory):
import numpy as np
import tensorflow as tf
import torch
with (
tempfile.TemporaryDirectory() as tmp_dir,
self._create_dummy_dataset(in_memory, tmp_dir) as dset,
dset.map(lambda ex, i: {"vec": np.ones(3 + i) * i}, with_indices=True) as dset,
):
columns = dset.column_names
self.assertIsNotNone(dset[0])
self.assertIsNotNone(dset[:2])
for col in columns:
self.assertIsInstance(dset[0][col], (str, list))
self.assertIsInstance(dset[:2][col], list)
self.assertDictEqual(
dset.features, Features({"filename": Value("string"), "vec": Sequence(Value("float64"))})
)
dset.set_format("tensorflow")
self.assertIsNotNone(dset[0])
self.assertIsNotNone(dset[:2])
for col in columns:
self.assertIsInstance(dset[0][col], tf.Tensor)
self.assertIsInstance(dset[:2][col], tf.RaggedTensor if col == "vec" else tf.Tensor)
self.assertIsInstance(dset[col], tf.RaggedTensor if col == "vec" else tf.Tensor)
# dim is None for ragged vectors in tensorflow
self.assertListEqual(dset[:2]["vec"].shape.as_list(), [2, None])
self.assertListEqual(dset["vec"][:2].shape.as_list(), [2, None])
dset.set_format("numpy")
self.assertIsNotNone(dset[0])
self.assertIsNotNone(dset[:2])
self.assertIsInstance(dset[0]["filename"], np.str_)
self.assertIsInstance(dset[:2]["filename"], np.ndarray)
self.assertIsInstance(dset["filename"], np.ndarray)
self.assertIsInstance(dset[0]["vec"], np.ndarray)
self.assertIsInstance(dset[:2]["vec"], np.ndarray)
self.assertIsInstance(dset["vec"], np.ndarray)
# array is flat for ragged vectors in numpy
self.assertTupleEqual(dset[:2]["vec"].shape, (2,))
self.assertTupleEqual(dset["vec"][:2].shape, (2,))
dset.set_format("torch")
self.assertIsNotNone(dset[0])
self.assertIsNotNone(dset[:2])
self.assertIsInstance(dset[0]["filename"], str)
self.assertIsInstance(dset[:2]["filename"], list)
self.assertIsInstance(dset["filename"], list)
self.assertIsInstance(dset[0]["vec"], torch.Tensor)
self.assertIsInstance(dset[:2]["vec"][0], torch.Tensor)
self.assertIsInstance(dset["vec"][0], torch.Tensor)
# pytorch doesn't support ragged tensors, so we should have lists
self.assertIsInstance(dset[:2]["vec"], list)
self.assertIsInstance(dset[:2]["vec"][0], torch.Tensor)
self.assertIsInstance(dset["vec"][:2], list)
self.assertIsInstance(dset["vec"][0], torch.Tensor)
@require_tf
@require_torch
def test_format_nested(self, in_memory):
import numpy as np
import tensorflow as tf
import torch
with (
tempfile.TemporaryDirectory() as tmp_dir,
self._create_dummy_dataset(in_memory, tmp_dir) as dset,
dset.map(lambda ex: {"nested": [{"foo": np.ones(3)}] * len(ex["filename"])}, batched=True) as dset,
):
self.assertDictEqual(
dset.features, Features({"filename": Value("string"), "nested": {"foo": Sequence(Value("float64"))}})
)
dset.set_format("tensorflow")
self.assertIsNotNone(dset[0])
self.assertIsInstance(dset[0]["nested"]["foo"], (tf.Tensor, tf.RaggedTensor))
self.assertIsNotNone(dset[:2])
self.assertIsInstance(dset[:2]["nested"][0]["foo"], (tf.Tensor, tf.RaggedTensor))
self.assertIsInstance(dset["nested"][0]["foo"], (tf.Tensor, tf.RaggedTensor))
dset.set_format("numpy")
self.assertIsNotNone(dset[0])
self.assertIsInstance(dset[0]["nested"]["foo"], np.ndarray)
self.assertIsNotNone(dset[:2])
self.assertIsInstance(dset[:2]["nested"][0]["foo"], np.ndarray)
self.assertIsInstance(dset["nested"][0]["foo"], np.ndarray)
dset.set_format("torch", columns="nested")
self.assertIsNotNone(dset[0])
self.assertIsInstance(dset[0]["nested"]["foo"], torch.Tensor)
self.assertIsNotNone(dset[:2])
self.assertIsInstance(dset[:2]["nested"][0]["foo"], torch.Tensor)
self.assertIsInstance(dset["nested"][0]["foo"], torch.Tensor)
def test_format_pandas(self, in_memory):
import pandas as pd
with tempfile.TemporaryDirectory() as tmp_dir:
with self._create_dummy_dataset(in_memory, tmp_dir, multiple_columns=True) as dset:
dset.set_format("pandas")
self.assertIsInstance(dset[0], pd.DataFrame)
self.assertIsInstance(dset[:2], pd.DataFrame)
self.assertIsInstance(dset["col_1"], pd.Series)
@require_polars
def test_format_polars(self, in_memory):
import polars as pl
with tempfile.TemporaryDirectory() as tmp_dir:
with self._create_dummy_dataset(in_memory, tmp_dir, multiple_columns=True) as dset:
dset.set_format("polars")
self.assertIsInstance(dset[0], pl.DataFrame)
self.assertIsInstance(dset[:2], pl.DataFrame)
self.assertIsInstance(dset["col_1"], pl.Series)
def test_transmit_format_single(self, in_memory):
@transmit_format
def my_single_transform(self, return_factory, *args, **kwargs):
return return_factory()
with tempfile.TemporaryDirectory() as tmp_dir:
return_factory = partial(
self._create_dummy_dataset, in_memory=in_memory, tmp_dir=tmp_dir, multiple_columns=True
)
with return_factory() as dset:
dset.set_format("numpy", columns=["col_1"])
prev_format = dset.format
with my_single_transform(dset, return_factory) as transformed_dset:
self.assertDictEqual(transformed_dset.format, prev_format)
def test_transmit_format_dict(self, in_memory):
@transmit_format
def my_split_transform(self, return_factory, *args, **kwargs):
return DatasetDict({"train": return_factory()})
with tempfile.TemporaryDirectory() as tmp_dir:
return_factory = partial(
self._create_dummy_dataset, in_memory=in_memory, tmp_dir=tmp_dir, multiple_columns=True
)
with return_factory() as dset:
dset.set_format("numpy", columns=["col_1"])
prev_format = dset.format
transformed_dset = my_split_transform(dset, return_factory)["train"]
self.assertDictEqual(transformed_dset.format, prev_format)
del transformed_dset # DatasetDict
def test_with_format(self, in_memory):
with tempfile.TemporaryDirectory() as tmp_dir:
with self._create_dummy_dataset(in_memory, tmp_dir, multiple_columns=True) as dset:
with dset.with_format("numpy", columns=["col_1"]) as dset2:
dset.set_format("numpy", columns=["col_1"])
self.assertDictEqual(dset.format, dset2.format)
self.assertEqual(dset._fingerprint, dset2._fingerprint)
# dset.reset_format()
# self.assertNotEqual(dset.format, dset2.format)
# self.assertNotEqual(dset._fingerprint, dset2._fingerprint)
def test_with_transform(self, in_memory):
with tempfile.TemporaryDirectory() as tmp_dir:
with self._create_dummy_dataset(in_memory, tmp_dir, multiple_columns=True) as dset:
transform = lambda x: {"foo": x["col_1"]} # noqa: E731
with dset.with_transform(transform, columns=["col_1"]) as dset2:
dset.set_transform(transform, columns=["col_1"])
self.assertDictEqual(dset.format, dset2.format)
self.assertEqual(dset._fingerprint, dset2._fingerprint)
dset.reset_format()
self.assertNotEqual(dset.format, dset2.format)
self.assertNotEqual(dset._fingerprint, dset2._fingerprint)
@require_tf
def test_tf_dataset_conversion(self, in_memory):
tmp_dir = tempfile.TemporaryDirectory()
for num_workers in [0, 1, 2]:
if num_workers > 0 and sys.platform == "win32" and not in_memory:
continue # This test hangs on the Py3.10 test worker, but it runs fine locally on my Windows machine
with self._create_dummy_dataset(in_memory, tmp_dir.name, array_features=True) as dset:
tf_dataset = dset.to_tf_dataset(columns="col_3", batch_size=2, num_workers=num_workers)
batch = next(iter(tf_dataset))
self.assertEqual(batch.shape.as_list(), [2, 4])
self.assertEqual(batch.dtype.name, "int64")
with self._create_dummy_dataset(in_memory, tmp_dir.name, multiple_columns=True) as dset:
tf_dataset = dset.to_tf_dataset(columns="col_1", batch_size=2, num_workers=num_workers)
batch = next(iter(tf_dataset))
self.assertEqual(batch.shape.as_list(), [2])
self.assertEqual(batch.dtype.name, "int64")
with self._create_dummy_dataset(in_memory, tmp_dir.name, multiple_columns=True) as dset:
# Check that it works with all default options (except batch_size because the dummy dataset only has 4)
tf_dataset = dset.to_tf_dataset(batch_size=2, num_workers=num_workers)
batch = next(iter(tf_dataset))
self.assertEqual(batch["col_1"].shape.as_list(), [2])
self.assertEqual(batch["col_2"].shape.as_list(), [2])
self.assertEqual(batch["col_1"].dtype.name, "int64")
self.assertEqual(batch["col_2"].dtype.name, "string") # Assert that we're converting strings properly
with self._create_dummy_dataset(in_memory, tmp_dir.name, multiple_columns=True) as dset:
# Check that when we use a transform that creates a new column from existing column values
# but don't load the old columns that the new column depends on in the final dataset,
# that they're still kept around long enough to be used in the transform
transform_dset = dset.with_transform(
lambda x: {"new_col": [val * 2 for val in x["col_1"]], "col_1": x["col_1"]}
)
tf_dataset = transform_dset.to_tf_dataset(columns="new_col", batch_size=2, num_workers=num_workers)
batch = next(iter(tf_dataset))
self.assertEqual(batch.shape.as_list(), [2])
self.assertEqual(batch.dtype.name, "int64")
del transform_dset
del tf_dataset # For correct cleanup
@require_tf
def test_tf_index_reshuffling(self, in_memory):
# This test checks that when we do two epochs over a tf.data.Dataset from to_tf_dataset
# that we get a different shuffle order each time
# It also checks that when we aren't shuffling, that the dataset order is fully preserved
# even when loading is split across multiple workers
data = {"col_1": list(range(20))}
for num_workers in [0, 1, 2, 3]:
with Dataset.from_dict(data) as dset:
tf_dataset = dset.to_tf_dataset(batch_size=10, shuffle=True, num_workers=num_workers)
indices = []
for batch in tf_dataset:
indices.append(batch["col_1"])
indices = np.concatenate([arr.numpy() for arr in indices])
second_indices = []
for batch in tf_dataset:
second_indices.append(batch["col_1"])
second_indices = np.concatenate([arr.numpy() for arr in second_indices])
self.assertFalse(np.array_equal(indices, second_indices))
self.assertEqual(len(indices), len(np.unique(indices)))
self.assertEqual(len(second_indices), len(np.unique(second_indices)))
tf_dataset = dset.to_tf_dataset(batch_size=1, shuffle=False, num_workers=num_workers)
for i, batch in enumerate(tf_dataset):
# Assert that the unshuffled order is fully preserved even when multiprocessing
self.assertEqual(i, batch["col_1"].numpy())
@require_tf
def test_tf_label_renaming(self, in_memory):
# Protect TF-specific imports in here
import tensorflow as tf
from datasets.utils.tf_utils import minimal_tf_collate_fn_with_renaming
tmp_dir = tempfile.TemporaryDirectory()
with self._create_dummy_dataset(in_memory, tmp_dir.name, multiple_columns=True) as dset:
with dset.rename_columns({"col_1": "features", "col_2": "label"}) as new_dset:
tf_dataset = new_dset.to_tf_dataset(collate_fn=minimal_tf_collate_fn_with_renaming, batch_size=4)
batch = next(iter(tf_dataset))
self.assertTrue("labels" in batch and "features" in batch)
tf_dataset = new_dset.to_tf_dataset(
columns=["features", "labels"], collate_fn=minimal_tf_collate_fn_with_renaming, batch_size=4
)
batch = next(iter(tf_dataset))
self.assertTrue("labels" in batch and "features" in batch)
tf_dataset = new_dset.to_tf_dataset(
columns=["features", "label"], collate_fn=minimal_tf_collate_fn_with_renaming, batch_size=4
)
batch = next(iter(tf_dataset))
self.assertTrue("labels" in batch and "features" in batch) # Assert renaming was handled correctly
tf_dataset = new_dset.to_tf_dataset(
columns=["features"],
label_cols=["labels"],
collate_fn=minimal_tf_collate_fn_with_renaming,
batch_size=4,
)
batch = next(iter(tf_dataset))
self.assertEqual(len(batch), 2)
# Assert that we don't have any empty entries here
self.assertTrue(isinstance(batch[0], tf.Tensor) and isinstance(batch[1], tf.Tensor))
tf_dataset = new_dset.to_tf_dataset(
columns=["features"],
label_cols=["label"],
collate_fn=minimal_tf_collate_fn_with_renaming,
batch_size=4,
)
batch = next(iter(tf_dataset))
self.assertEqual(len(batch), 2)
# Assert that we don't have any empty entries here
self.assertTrue(isinstance(batch[0], tf.Tensor) and isinstance(batch[1], tf.Tensor))
tf_dataset = new_dset.to_tf_dataset(
columns=["features"],
collate_fn=minimal_tf_collate_fn_with_renaming,
batch_size=4,
)
batch = next(iter(tf_dataset))
# Assert that labels didn't creep in when we don't ask for them
# just because the collate_fn added them
self.assertTrue(isinstance(batch, tf.Tensor))
del tf_dataset # For correct cleanup
@require_tf
def test_tf_dataset_options(self, in_memory):
tmp_dir = tempfile.TemporaryDirectory()
# Test that batch_size option works as expected
with self._create_dummy_dataset(in_memory, tmp_dir.name, array_features=True) as dset:
tf_dataset = dset.to_tf_dataset(columns="col_3", batch_size=2)
batch = next(iter(tf_dataset))
self.assertEqual(batch.shape.as_list(), [2, 4])
self.assertEqual(batch.dtype.name, "int64")
# Test that batch_size=None (optional) works as expected
with self._create_dummy_dataset(in_memory, tmp_dir.name, multiple_columns=True) as dset:
tf_dataset = dset.to_tf_dataset(columns="col_3", batch_size=None)
single_example = next(iter(tf_dataset))
self.assertEqual(single_example.shape.as_list(), [])
self.assertEqual(single_example.dtype.name, "int64")
# Assert that we can batch it with `tf.data.Dataset.batch` method
batched_dataset = tf_dataset.batch(batch_size=2)
batch = next(iter(batched_dataset))
self.assertEqual(batch.shape.as_list(), [2])
self.assertEqual(batch.dtype.name, "int64")
# Test that batching a batch_size=None dataset produces the same results as using batch_size arg
with self._create_dummy_dataset(in_memory, tmp_dir.name, multiple_columns=True) as dset:
batch_size = 2
tf_dataset_no_batch = dset.to_tf_dataset(columns="col_3")
tf_dataset_batch = dset.to_tf_dataset(columns="col_3", batch_size=batch_size)
self.assertEqual(tf_dataset_no_batch.element_spec, tf_dataset_batch.unbatch().element_spec)
self.assertEqual(tf_dataset_no_batch.cardinality(), tf_dataset_batch.cardinality() * batch_size)
for batch_1, batch_2 in zip(tf_dataset_no_batch.batch(batch_size=batch_size), tf_dataset_batch):
self.assertEqual(batch_1.shape, batch_2.shape)
self.assertEqual(batch_1.dtype, batch_2.dtype)
self.assertListEqual(batch_1.numpy().tolist(), batch_2.numpy().tolist())
# Test that requesting label_cols works as expected
with self._create_dummy_dataset(in_memory, tmp_dir.name, multiple_columns=True) as dset:
tf_dataset = dset.to_tf_dataset(columns="col_1", label_cols=["col_2", "col_3"], batch_size=4)
batch = next(iter(tf_dataset))
self.assertEqual(len(batch), 2)
self.assertEqual(set(batch[1].keys()), {"col_2", "col_3"})
self.assertEqual(batch[0].dtype.name, "int64")
# Assert data comes out as expected and isn't shuffled
self.assertEqual(batch[0].numpy().tolist(), [3, 2, 1, 0])
self.assertEqual(batch[1]["col_2"].numpy().tolist(), [b"a", b"b", b"c", b"d"])
self.assertEqual(batch[1]["col_3"].numpy().tolist(), [0, 1, 0, 1])
# Check that incomplete batches are dropped if requested
with self._create_dummy_dataset(in_memory, tmp_dir.name, multiple_columns=True) as dset:
tf_dataset = dset.to_tf_dataset(columns="col_1", batch_size=3)
tf_dataset_with_drop = dset.to_tf_dataset(columns="col_1", batch_size=3, drop_remainder=True)
self.assertEqual(len(tf_dataset), 2) # One batch of 3 and one batch of 1
self.assertEqual(len(tf_dataset_with_drop), 1) # Incomplete batch of 1 is dropped
# Test that `NotImplementedError` is raised `batch_size` is None and `num_workers` is > 0
if sys.version_info >= (3, 8):
with self._create_dummy_dataset(in_memory, tmp_dir.name, multiple_columns=True) as dset:
with self.assertRaisesRegex(
NotImplementedError, "`batch_size` must be specified when using multiple workers"
):
dset.to_tf_dataset(columns="col_1", batch_size=None, num_workers=2)
del tf_dataset # For correct cleanup
del tf_dataset_with_drop
class MiscellaneousDatasetTest(TestCase):
def test_from_pandas(self):
data = {"col_1": [3, 2, 1, 0], "col_2": ["a", "b", "c", "d"]}
df = pd.DataFrame.from_dict(data)
with Dataset.from_pandas(df) as dset:
self.assertListEqual(dset["col_1"], data["col_1"])
self.assertListEqual(dset["col_2"], data["col_2"])
self.assertListEqual(list(dset.features.keys()), ["col_1", "col_2"])
self.assertDictEqual(dset.features, Features({"col_1": Value("int64"), "col_2": Value("string")}))
features = Features({"col_1": Value("int64"), "col_2": Value("string")})
with Dataset.from_pandas(df, features=features) as dset:
self.assertListEqual(dset["col_1"], data["col_1"])
self.assertListEqual(dset["col_2"], data["col_2"])
self.assertListEqual(list(dset.features.keys()), ["col_1", "col_2"])
self.assertDictEqual(dset.features, Features({"col_1": Value("int64"), "col_2": Value("string")}))
features = Features({"col_1": Value("int64"), "col_2": Value("string")})
with Dataset.from_pandas(df, features=features, info=DatasetInfo(features=features)) as dset:
self.assertListEqual(dset["col_1"], data["col_1"])
self.assertListEqual(dset["col_2"], data["col_2"])
self.assertListEqual(list(dset.features.keys()), ["col_1", "col_2"])
self.assertDictEqual(dset.features, Features({"col_1": Value("int64"), "col_2": Value("string")}))
features = Features({"col_1": Sequence(Value("string")), "col_2": Value("string")})
self.assertRaises(TypeError, Dataset.from_pandas, df, features=features)
@require_polars
def test_from_polars(self):
import polars as pl
data = {"col_1": [3, 2, 1, 0], "col_2": ["a", "b", "c", "d"]}
df = pl.from_dict(data)
with Dataset.from_polars(df) as dset:
self.assertListEqual(dset["col_1"], data["col_1"])
self.assertListEqual(dset["col_2"], data["col_2"])
self.assertListEqual(list(dset.features.keys()), ["col_1", "col_2"])
self.assertDictEqual(dset.features, Features({"col_1": Value("int64"), "col_2": Value("large_string")}))
features = Features({"col_1": Value("int64"), "col_2": Value("large_string")})
with Dataset.from_polars(df, features=features) as dset:
self.assertListEqual(dset["col_1"], data["col_1"])
self.assertListEqual(dset["col_2"], data["col_2"])
self.assertListEqual(list(dset.features.keys()), ["col_1", "col_2"])
self.assertDictEqual(dset.features, Features({"col_1": Value("int64"), "col_2": Value("large_string")}))
features = Features({"col_1": Value("int64"), "col_2": Value("large_string")})
with Dataset.from_polars(df, features=features, info=DatasetInfo(features=features)) as dset:
self.assertListEqual(dset["col_1"], data["col_1"])
self.assertListEqual(dset["col_2"], data["col_2"])
self.assertListEqual(list(dset.features.keys()), ["col_1", "col_2"])
self.assertDictEqual(dset.features, Features({"col_1": Value("int64"), "col_2": Value("large_string")}))
features = Features({"col_1": Sequence(Value("string")), "col_2": Value("large_string")})
self.assertRaises(TypeError, Dataset.from_polars, df, features=features)
def test_from_dict(self):
data = {"col_1": [3, 2, 1, 0], "col_2": ["a", "b", "c", "d"], "col_3": pa.array([True, False, True, False])}
with Dataset.from_dict(data) as dset:
self.assertListEqual(dset["col_1"], data["col_1"])
self.assertListEqual(dset["col_2"], data["col_2"])
self.assertListEqual(dset["col_3"], data["col_3"].to_pylist())
self.assertListEqual(list(dset.features.keys()), ["col_1", "col_2", "col_3"])
self.assertDictEqual(
dset.features, Features({"col_1": Value("int64"), "col_2": Value("string"), "col_3": Value("bool")})
)
features = Features({"col_1": Value("int64"), "col_2": Value("string"), "col_3": Value("bool")})
with Dataset.from_dict(data, features=features) as dset:
self.assertListEqual(dset["col_1"], data["col_1"])
self.assertListEqual(dset["col_2"], data["col_2"])
self.assertListEqual(dset["col_3"], data["col_3"].to_pylist())
self.assertListEqual(list(dset.features.keys()), ["col_1", "col_2", "col_3"])
self.assertDictEqual(
dset.features, Features({"col_1": Value("int64"), "col_2": Value("string"), "col_3": Value("bool")})
)
features = Features({"col_1": Value("int64"), "col_2": Value("string"), "col_3": Value("bool")})
with Dataset.from_dict(data, features=features, info=DatasetInfo(features=features)) as dset:
self.assertListEqual(dset["col_1"], data["col_1"])
self.assertListEqual(dset["col_2"], data["col_2"])
self.assertListEqual(dset["col_3"], data["col_3"].to_pylist())
self.assertListEqual(list(dset.features.keys()), ["col_1", "col_2", "col_3"])
self.assertDictEqual(
dset.features, Features({"col_1": Value("int64"), "col_2": Value("string"), "col_3": Value("bool")})
)
features = Features({"col_1": Value("string"), "col_2": Value("string"), "col_3": Value("int32")})
with Dataset.from_dict(data, features=features) as dset:
# the integers are converted to strings
self.assertListEqual(dset["col_1"], [str(x) for x in data["col_1"]])
self.assertListEqual(dset["col_2"], data["col_2"])
self.assertListEqual(dset["col_3"], [int(x) for x in data["col_3"].to_pylist()])
self.assertListEqual(list(dset.features.keys()), ["col_1", "col_2", "col_3"])
self.assertDictEqual(
dset.features, Features({"col_1": Value("string"), "col_2": Value("string"), "col_3": Value("int32")})
)
features = Features({"col_1": Value("int64"), "col_2": Value("int64"), "col_3": Value("bool")})
self.assertRaises(ValueError, Dataset.from_dict, data, features=features)
def test_concatenate_mixed_memory_and_disk(self):
data1, data2, data3 = {"id": [0, 1, 2]}, {"id": [3, 4, 5]}, {"id": [6, 7]}
info1 = DatasetInfo(description="Dataset1")
info2 = DatasetInfo(description="Dataset2")
with tempfile.TemporaryDirectory() as tmp_dir:
with (
Dataset.from_dict(data1, info=info1).map(cache_file_name=os.path.join(tmp_dir, "d1.arrow")) as dset1,
Dataset.from_dict(data2, info=info2).map(cache_file_name=os.path.join(tmp_dir, "d2.arrow")) as dset2,
Dataset.from_dict(data3) as dset3,
):
with concatenate_datasets([dset1, dset2, dset3]) as concatenated_dset:
self.assertEqual(len(concatenated_dset), len(dset1) + len(dset2) + len(dset3))
self.assertListEqual(concatenated_dset["id"], dset1["id"] + dset2["id"] + dset3["id"])
@require_transformers
@pytest.mark.integration
def test_set_format_encode(self):
from transformers import BertTokenizer
tokenizer = BertTokenizer.from_pretrained("bert-base-uncased")
def encode(batch):
return tokenizer(batch["text"], padding="longest", return_tensors="np")
with Dataset.from_dict({"text": ["hello there", "foo"]}) as dset:
dset.set_transform(transform=encode)
self.assertEqual(str(dset[:2]), str(encode({"text": ["hello there", "foo"]})))
@require_tf
def test_tf_string_encoding(self):
data = {"col_1": ["á", "é", "í", "ó", "ú"], "col_2": ["à", "è", "ì", "ò", "ù"]}
with Dataset.from_dict(data) as dset:
tf_dset_wo_batch = dset.to_tf_dataset(columns=["col_1", "col_2"])
for tf_row, row in zip(tf_dset_wo_batch, dset):
self.assertEqual(tf_row["col_1"].numpy().decode("utf-8"), row["col_1"])
self.assertEqual(tf_row["col_2"].numpy().decode("utf-8"), row["col_2"])
tf_dset_w_batch = dset.to_tf_dataset(columns=["col_1", "col_2"], batch_size=2)
for tf_row, row in zip(tf_dset_w_batch.unbatch(), dset):
self.assertEqual(tf_row["col_1"].numpy().decode("utf-8"), row["col_1"])
self.assertEqual(tf_row["col_2"].numpy().decode("utf-8"), row["col_2"])
self.assertEqual(tf_dset_w_batch.unbatch().element_spec, tf_dset_wo_batch.element_spec)
self.assertEqual(tf_dset_w_batch.element_spec, tf_dset_wo_batch.batch(2).element_spec)
def test_cast_with_sliced_list():
old_features = Features({"foo": Sequence(Value("int64"))})
new_features = Features({"foo": Sequence(Value("int32"))})
dataset = Dataset.from_dict({"foo": [[i] * (i % 3) for i in range(20)]}, features=old_features)
casted_dataset = dataset.cast(new_features, batch_size=2) # small batch size to slice the ListArray
assert dataset["foo"] == casted_dataset["foo"]
assert casted_dataset.features == new_features
@pytest.mark.parametrize("include_nulls", [False, True])
def test_class_encode_column_with_none(include_nulls):
dataset = Dataset.from_dict({"col_1": ["a", "b", "c", None, "d", None]})
dataset = dataset.class_encode_column("col_1", include_nulls=include_nulls)
class_names = ["a", "b", "c", "d"]
if include_nulls:
class_names += ["None"]
assert isinstance(dataset.features["col_1"], ClassLabel)
assert set(dataset.features["col_1"].names) == set(class_names)
assert (None in dataset.unique("col_1")) == (not include_nulls)
@pytest.mark.parametrize("null_placement", ["first", "last"])
def test_sort_with_none(null_placement):
dataset = Dataset.from_dict({"col_1": ["item_2", "item_3", "item_1", None, "item_4", None]})
dataset = dataset.sort("col_1", null_placement=null_placement)
if null_placement == "first":
assert dataset["col_1"] == [None, None, "item_1", "item_2", "item_3", "item_4"]
else:
assert dataset["col_1"] == ["item_1", "item_2", "item_3", "item_4", None, None]
def test_update_metadata_with_features(dataset_dict):
table1 = pa.Table.from_pydict(dataset_dict)
features1 = Features.from_arrow_schema(table1.schema)
features2 = features1.copy()
features2["col_2"] = ClassLabel(num_classes=len(table1))
assert features1 != features2
table2 = update_metadata_with_features(table1, features2)
metadata = json.loads(table2.schema.metadata[b"huggingface"].decode())
assert features2 == Features.from_dict(metadata["info"]["features"])
with Dataset(table1) as dset1, Dataset(table2) as dset2:
assert dset1.features == features1
assert dset2.features == features2
@pytest.mark.parametrize("dataset_type", ["in_memory", "memory_mapped", "mixed"])
@pytest.mark.parametrize("axis, expected_shape", [(0, (4, 3)), (1, (2, 6))])
def test_concatenate_datasets(dataset_type, axis, expected_shape, dataset_dict, arrow_path):
table = {
"in_memory": InMemoryTable.from_pydict(dataset_dict),
"memory_mapped": MemoryMappedTable.from_file(arrow_path),
}
tables = [
table[dataset_type if dataset_type != "mixed" else "memory_mapped"].slice(0, 2), # shape = (2, 3)
table[dataset_type if dataset_type != "mixed" else "in_memory"].slice(2, 4), # shape = (2, 3)
]
if axis == 1: # don't duplicate columns
tables[1] = tables[1].rename_columns([col + "_bis" for col in tables[1].column_names])
datasets = [Dataset(table) for table in tables]
dataset = concatenate_datasets(datasets, axis=axis)
assert dataset.shape == expected_shape
assert_arrow_metadata_are_synced_with_dataset_features(dataset)
def test_concatenate_datasets_new_columns():
dataset1 = Dataset.from_dict({"col_1": ["a", "b", "c"]})
dataset2 = Dataset.from_dict({"col_1": ["d", "e", "f"], "col_2": [True, False, True]})
dataset = concatenate_datasets([dataset1, dataset2])
assert dataset.data.shape == (6, 2)
assert dataset.features == Features({"col_1": Value("string"), "col_2": Value("bool")})
assert dataset[:] == {"col_1": ["a", "b", "c", "d", "e", "f"], "col_2": [None, None, None, True, False, True]}
dataset3 = Dataset.from_dict({"col_3": ["a_1"]})
dataset = concatenate_datasets([dataset, dataset3])
assert dataset.data.shape == (7, 3)
assert dataset.features == Features({"col_1": Value("string"), "col_2": Value("bool"), "col_3": Value("string")})
assert dataset[:] == {
"col_1": ["a", "b", "c", "d", "e", "f", None],
"col_2": [None, None, None, True, False, True, None],
"col_3": [None, None, None, None, None, None, "a_1"],
}
@pytest.mark.parametrize("axis", [0, 1])
def test_concatenate_datasets_complex_features(axis):
n = 5
dataset1 = Dataset.from_dict(
{"col_1": [0] * n, "col_2": list(range(n))},
features=Features({"col_1": Value("int32"), "col_2": ClassLabel(num_classes=n)}),
)
if axis == 1:
dataset2 = dataset1.rename_columns({col: col + "_" for col in dataset1.column_names})
expected_features = Features({**dataset1.features, **dataset2.features})
else:
dataset2 = dataset1
expected_features = dataset1.features
assert concatenate_datasets([dataset1, dataset2], axis=axis).features == expected_features
@pytest.mark.parametrize("other_dataset_type", ["in_memory", "memory_mapped", "concatenation"])
@pytest.mark.parametrize("axis, expected_shape", [(0, (8, 3)), (1, (4, 6))])
def test_concatenate_datasets_with_concatenation_tables(
axis, expected_shape, other_dataset_type, dataset_dict, arrow_path
):
def _create_concatenation_table(axis):
if axis == 0: # shape: (4, 3) = (4, 1) + (4, 2)
concatenation_table = ConcatenationTable.from_blocks(
[
[
InMemoryTable.from_pydict({"col_1": dataset_dict["col_1"]}),
MemoryMappedTable.from_file(arrow_path).remove_column(0),
]
]
)
elif axis == 1: # shape: (4, 3) = (1, 3) + (3, 3)
concatenation_table = ConcatenationTable.from_blocks(
[
[InMemoryTable.from_pydict(dataset_dict).slice(0, 1)],
[MemoryMappedTable.from_file(arrow_path).slice(1, 4)],
]
)
return concatenation_table
concatenation_table = _create_concatenation_table(axis)
assert concatenation_table.shape == (4, 3)
if other_dataset_type == "in_memory":
other_table = InMemoryTable.from_pydict(dataset_dict)
elif other_dataset_type == "memory_mapped":
other_table = MemoryMappedTable.from_file(arrow_path)
elif other_dataset_type == "concatenation":
other_table = _create_concatenation_table(axis)
assert other_table.shape == (4, 3)
tables = [concatenation_table, other_table]
if axis == 1: # don't duplicate columns
tables[1] = tables[1].rename_columns([col + "_bis" for col in tables[1].column_names])
for tables in [tables, reversed(tables)]:
datasets = [Dataset(table) for table in tables]
dataset = concatenate_datasets(datasets, axis=axis)
assert dataset.shape == expected_shape
def test_concatenate_datasets_duplicate_columns(dataset):
with pytest.raises(ValueError) as excinfo:
concatenate_datasets([dataset, dataset], axis=1)
assert "duplicated" in str(excinfo.value)
def test_interleave_datasets():
d1 = Dataset.from_dict({"a": [0, 1, 2]})
d2 = Dataset.from_dict({"a": [10, 11, 12, 13]})
d3 = Dataset.from_dict({"a": [22, 21, 20]}).select([2, 1, 0])
dataset = interleave_datasets([d1, d2, d3])
expected_length = 3 * min(len(d1), len(d2), len(d3))
expected_values = [x["a"] for x in itertools.chain(*zip(d1, d2, d3))]
assert isinstance(dataset, Dataset)
assert len(dataset) == expected_length
assert dataset["a"] == expected_values
assert dataset._fingerprint == interleave_datasets([d1, d2, d3])._fingerprint
def test_interleave_datasets_probabilities():
seed = 42
probabilities = [0.3, 0.5, 0.2]
d1 = Dataset.from_dict({"a": [0, 1, 2]})
d2 = Dataset.from_dict({"a": [10, 11, 12, 13]})
d3 = Dataset.from_dict({"a": [22, 21, 20]}).select([2, 1, 0])
dataset = interleave_datasets([d1, d2, d3], probabilities=probabilities, seed=seed)
expected_length = 7 # hardcoded
expected_values = [10, 11, 20, 12, 0, 21, 13] # hardcoded
assert isinstance(dataset, Dataset)
assert len(dataset) == expected_length
assert dataset["a"] == expected_values
assert (
dataset._fingerprint == interleave_datasets([d1, d2, d3], probabilities=probabilities, seed=seed)._fingerprint
)
def test_interleave_datasets_oversampling_strategy():
d1 = Dataset.from_dict({"a": [0, 1, 2]})
d2 = Dataset.from_dict({"a": [10, 11, 12, 13]})
d3 = Dataset.from_dict({"a": [22, 21, 20]}).select([2, 1, 0])
dataset = interleave_datasets([d1, d2, d3], stopping_strategy="all_exhausted")
expected_length = 3 * max(len(d1), len(d2), len(d3))
expected_values = [0, 10, 20, 1, 11, 21, 2, 12, 22, 0, 13, 20] # hardcoded
assert isinstance(dataset, Dataset)
assert len(dataset) == expected_length
assert dataset["a"] == expected_values
assert dataset._fingerprint == interleave_datasets([d1, d2, d3], stopping_strategy="all_exhausted")._fingerprint
def test_interleave_datasets_probabilities_oversampling_strategy():
seed = 42
probabilities = [0.3, 0.5, 0.2]
d1 = Dataset.from_dict({"a": [0, 1, 2]})
d2 = Dataset.from_dict({"a": [10, 11, 12, 13]})
d3 = Dataset.from_dict({"a": [22, 21, 20]}).select([2, 1, 0])
dataset = interleave_datasets(
[d1, d2, d3], stopping_strategy="all_exhausted", probabilities=probabilities, seed=seed
)
expected_length = 16 # hardcoded
expected_values = [10, 11, 20, 12, 0, 21, 13, 10, 1, 11, 12, 22, 13, 20, 10, 2] # hardcoded
assert isinstance(dataset, Dataset)
assert len(dataset) == expected_length
assert dataset["a"] == expected_values
assert (
dataset._fingerprint
== interleave_datasets(
[d1, d2, d3], stopping_strategy="all_exhausted", probabilities=probabilities, seed=seed
)._fingerprint
)
@pytest.mark.parametrize("batch_size", [4, 5])
@pytest.mark.parametrize("drop_last_batch", [False, True])
def test_dataset_iter_batch(batch_size, drop_last_batch):
n = 25
dset = Dataset.from_dict({"i": list(range(n))})
all_col_values = list(range(n))
batches = []
for i, batch in enumerate(dset.iter(batch_size, drop_last_batch=drop_last_batch)):
assert batch == {"i": all_col_values[i * batch_size : (i + 1) * batch_size]}
batches.append(batch)
if drop_last_batch:
assert all(len(batch["i"]) == batch_size for batch in batches)
else:
assert all(len(batch["i"]) == batch_size for batch in batches[:-1])
assert len(batches[-1]["i"]) <= batch_size
@pytest.mark.parametrize(
"column, expected_dtype",
[(["a", "b", "c", "d"], "string"), ([1, 2, 3, 4], "int64"), ([1.0, 2.0, 3.0, 4.0], "float64")],
)
@pytest.mark.parametrize("in_memory", [False, True])
@pytest.mark.parametrize(
"transform",
[
None,
("shuffle", (42,), {}),
("with_format", ("pandas",), {}),
("class_encode_column", ("col_2",), {}),
("select", (range(3),), {}),
],
)
def test_dataset_add_column(column, expected_dtype, in_memory, transform, dataset_dict, arrow_path):
column_name = "col_4"
original_dataset = (
Dataset(InMemoryTable.from_pydict(dataset_dict))
if in_memory
else Dataset(MemoryMappedTable.from_file(arrow_path))
)
if transform is not None:
transform_name, args, kwargs = transform
original_dataset: Dataset = getattr(original_dataset, transform_name)(*args, **kwargs)
column = column[:3] if transform is not None and transform_name == "select" else column
dataset = original_dataset.add_column(column_name, column)
assert dataset.data.shape == (3, 4) if transform is not None and transform_name == "select" else (4, 4)
expected_features = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
# Sort expected features as in the original dataset
expected_features = {feature: expected_features[feature] for feature in original_dataset.features}
# Add new column feature
expected_features[column_name] = expected_dtype
assert dataset.data.column_names == list(expected_features.keys())
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
assert len(dataset.data.blocks) == 1 if in_memory else 2 # multiple InMemoryTables are consolidated as one
assert dataset.format["type"] == original_dataset.format["type"]
assert dataset._fingerprint != original_dataset._fingerprint
dataset.reset_format()
original_dataset.reset_format()
assert all(dataset[col] == original_dataset[col] for col in original_dataset.column_names)
assert set(dataset["col_4"]) == set(column)
if dataset._indices is not None:
dataset_indices = dataset._indices["indices"].to_pylist()
expected_dataset_indices = original_dataset._indices["indices"].to_pylist()
assert dataset_indices == expected_dataset_indices
assert_arrow_metadata_are_synced_with_dataset_features(dataset)
@pytest.mark.parametrize(
"transform",
[None, ("shuffle", (42,), {}), ("with_format", ("pandas",), {}), ("class_encode_column", ("col_2",), {})],
)
@pytest.mark.parametrize("in_memory", [False, True])
@pytest.mark.parametrize(
"item",
[
{"col_1": "2", "col_2": 2, "col_3": 2.0},
{"col_1": "2", "col_2": "2", "col_3": "2"},
{"col_1": 2, "col_2": 2, "col_3": 2},
{"col_1": 2.0, "col_2": 2.0, "col_3": 2.0},
],
)
def test_dataset_add_item(item, in_memory, dataset_dict, arrow_path, transform):
dataset_to_test = (
Dataset(InMemoryTable.from_pydict(dataset_dict))
if in_memory
else Dataset(MemoryMappedTable.from_file(arrow_path))
)
if transform is not None:
transform_name, args, kwargs = transform
dataset_to_test: Dataset = getattr(dataset_to_test, transform_name)(*args, **kwargs)
dataset = dataset_to_test.add_item(item)
assert dataset.data.shape == (5, 3)
expected_features = dataset_to_test.features
assert sorted(dataset.data.column_names) == sorted(expected_features.keys())
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature] == expected_dtype
assert len(dataset.data.blocks) == 1 if in_memory else 2 # multiple InMemoryTables are consolidated as one
assert dataset.format["type"] == dataset_to_test.format["type"]
assert dataset._fingerprint != dataset_to_test._fingerprint
dataset.reset_format()
dataset_to_test.reset_format()
assert dataset[:-1] == dataset_to_test[:]
assert {k: int(v) for k, v in dataset[-1].items()} == {k: int(v) for k, v in item.items()}
if dataset._indices is not None:
dataset_indices = dataset._indices["indices"].to_pylist()
dataset_to_test_indices = dataset_to_test._indices["indices"].to_pylist()
assert dataset_indices == dataset_to_test_indices + [len(dataset_to_test._data)]
def test_dataset_add_item_new_columns():
dataset = Dataset.from_dict({"col_1": [0, 1, 2]}, features=Features({"col_1": Value("uint8")}))
dataset = dataset.add_item({"col_1": 3, "col_2": "a"})
assert dataset.data.shape == (4, 2)
assert dataset.features == Features({"col_1": Value("uint8"), "col_2": Value("string")})
assert dataset[:] == {"col_1": [0, 1, 2, 3], "col_2": [None, None, None, "a"]}
dataset = dataset.add_item({"col_3": True})
assert dataset.data.shape == (5, 3)
assert dataset.features == Features({"col_1": Value("uint8"), "col_2": Value("string"), "col_3": Value("bool")})
assert dataset[:] == {
"col_1": [0, 1, 2, 3, None],
"col_2": [None, None, None, "a", None],
"col_3": [None, None, None, None, True],
}
def test_dataset_add_item_introduce_feature_type():
dataset = Dataset.from_dict({"col_1": [None, None, None]})
dataset = dataset.add_item({"col_1": "a"})
assert dataset.data.shape == (4, 1)
assert dataset.features == Features({"col_1": Value("string")})
assert dataset[:] == {"col_1": [None, None, None, "a"]}
def test_dataset_filter_batched_indices():
ds = Dataset.from_dict({"num": [0, 1, 2, 3]})
ds = ds.filter(lambda num: num % 2 == 0, input_columns="num", batch_size=2)
assert all(item["num"] % 2 == 0 for item in ds)
@pytest.mark.parametrize("in_memory", [False, True])
def test_dataset_from_file(in_memory, dataset, arrow_file):
filename = arrow_file
with assert_arrow_memory_increases() if in_memory else assert_arrow_memory_doesnt_increase():
dataset_from_file = Dataset.from_file(filename, in_memory=in_memory)
assert dataset_from_file.features.type == dataset.features.type
assert dataset_from_file.features == dataset.features
assert dataset_from_file.cache_files == ([{"filename": filename}] if not in_memory else [])
def _check_csv_dataset(dataset, expected_features):
assert isinstance(dataset, Dataset)
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory", [False, True])
def test_dataset_from_csv_keep_in_memory(keep_in_memory, csv_path, tmp_path):
cache_dir = tmp_path / "cache"
expected_features = {"col_1": "int64", "col_2": "int64", "col_3": "float64"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
dataset = Dataset.from_csv(csv_path, cache_dir=cache_dir, keep_in_memory=keep_in_memory)
_check_csv_dataset(dataset, expected_features)
@pytest.mark.parametrize(
"features",
[
None,
{"col_1": "string", "col_2": "int64", "col_3": "float64"},
{"col_1": "string", "col_2": "string", "col_3": "string"},
{"col_1": "int32", "col_2": "int32", "col_3": "int32"},
{"col_1": "float32", "col_2": "float32", "col_3": "float32"},
],
)
def test_dataset_from_csv_features(features, csv_path, tmp_path):
cache_dir = tmp_path / "cache"
# CSV file loses col_1 string dtype information: default now is "int64" instead of "string"
default_expected_features = {"col_1": "int64", "col_2": "int64", "col_3": "float64"}
expected_features = features.copy() if features else default_expected_features
features = (
Features({feature: Value(dtype) for feature, dtype in features.items()}) if features is not None else None
)
dataset = Dataset.from_csv(csv_path, features=features, cache_dir=cache_dir)
_check_csv_dataset(dataset, expected_features)
@pytest.mark.parametrize("split", [None, NamedSplit("train"), "train", "test"])
def test_dataset_from_csv_split(split, csv_path, tmp_path):
cache_dir = tmp_path / "cache"
expected_features = {"col_1": "int64", "col_2": "int64", "col_3": "float64"}
dataset = Dataset.from_csv(csv_path, cache_dir=cache_dir, split=split)
_check_csv_dataset(dataset, expected_features)
assert dataset.split == split if split else "train"
@pytest.mark.parametrize("path_type", [str, list])
def test_dataset_from_csv_path_type(path_type, csv_path, tmp_path):
if issubclass(path_type, str):
path = csv_path
elif issubclass(path_type, list):
path = [csv_path]
cache_dir = tmp_path / "cache"
expected_features = {"col_1": "int64", "col_2": "int64", "col_3": "float64"}
dataset = Dataset.from_csv(path, cache_dir=cache_dir)
_check_csv_dataset(dataset, expected_features)
def _check_json_dataset(dataset, expected_features):
assert isinstance(dataset, Dataset)
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory", [False, True])
def test_dataset_from_json_keep_in_memory(keep_in_memory, jsonl_path, tmp_path):
cache_dir = tmp_path / "cache"
expected_features = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
dataset = Dataset.from_json(jsonl_path, cache_dir=cache_dir, keep_in_memory=keep_in_memory)
_check_json_dataset(dataset, expected_features)
@pytest.mark.parametrize(
"features",
[
None,
{"col_1": "string", "col_2": "int64", "col_3": "float64"},
{"col_1": "string", "col_2": "string", "col_3": "string"},
{"col_1": "int32", "col_2": "int32", "col_3": "int32"},
{"col_1": "float32", "col_2": "float32", "col_3": "float32"},
],
)
def test_dataset_from_json_features(features, jsonl_path, tmp_path):
cache_dir = tmp_path / "cache"
default_expected_features = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
expected_features = features.copy() if features else default_expected_features
features = (
Features({feature: Value(dtype) for feature, dtype in features.items()}) if features is not None else None
)
dataset = Dataset.from_json(jsonl_path, features=features, cache_dir=cache_dir)
_check_json_dataset(dataset, expected_features)
def test_dataset_from_json_with_class_label_feature(jsonl_str_path, tmp_path):
features = Features(
{
"col_1": ClassLabel(names=["s0", "s1", "s2", "s3"]),
"col_2": Value("int64"),
"col_3": Value("float64"),
}
)
cache_dir = tmp_path / "cache"
dataset = Dataset.from_json(jsonl_str_path, features=features, cache_dir=cache_dir)
assert dataset.features["col_1"].dtype == "int64"
@pytest.mark.parametrize("split", [None, NamedSplit("train"), "train", "test"])
def test_dataset_from_json_split(split, jsonl_path, tmp_path):
cache_dir = tmp_path / "cache"
expected_features = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
dataset = Dataset.from_json(jsonl_path, cache_dir=cache_dir, split=split)
_check_json_dataset(dataset, expected_features)
assert dataset.split == split if split else "train"
@pytest.mark.parametrize("path_type", [str, list])
def test_dataset_from_json_path_type(path_type, jsonl_path, tmp_path):
if issubclass(path_type, str):
path = jsonl_path
elif issubclass(path_type, list):
path = [jsonl_path]
cache_dir = tmp_path / "cache"
expected_features = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
dataset = Dataset.from_json(path, cache_dir=cache_dir)
_check_json_dataset(dataset, expected_features)
def _check_parquet_dataset(dataset, expected_features):
assert isinstance(dataset, Dataset)
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory", [False, True])
def test_dataset_from_parquet_keep_in_memory(keep_in_memory, parquet_path, tmp_path):
cache_dir = tmp_path / "cache"
expected_features = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
dataset = Dataset.from_parquet(parquet_path, cache_dir=cache_dir, keep_in_memory=keep_in_memory)
_check_parquet_dataset(dataset, expected_features)
@pytest.mark.parametrize(
"features",
[
None,
{"col_1": "string", "col_2": "int64", "col_3": "float64"},
{"col_1": "string", "col_2": "string", "col_3": "string"},
{"col_1": "int32", "col_2": "int32", "col_3": "int32"},
{"col_1": "float32", "col_2": "float32", "col_3": "float32"},
],
)
def test_dataset_from_parquet_features(features, parquet_path, tmp_path):
cache_dir = tmp_path / "cache"
default_expected_features = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
expected_features = features.copy() if features else default_expected_features
features = (
Features({feature: Value(dtype) for feature, dtype in features.items()}) if features is not None else None
)
dataset = Dataset.from_parquet(parquet_path, features=features, cache_dir=cache_dir)
_check_parquet_dataset(dataset, expected_features)
@pytest.mark.parametrize("split", [None, NamedSplit("train"), "train", "test"])
def test_dataset_from_parquet_split(split, parquet_path, tmp_path):
cache_dir = tmp_path / "cache"
expected_features = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
dataset = Dataset.from_parquet(parquet_path, cache_dir=cache_dir, split=split)
_check_parquet_dataset(dataset, expected_features)
assert dataset.split == split if split else "train"
@pytest.mark.parametrize("path_type", [str, list])
def test_dataset_from_parquet_path_type(path_type, parquet_path, tmp_path):
if issubclass(path_type, str):
path = parquet_path
elif issubclass(path_type, list):
path = [parquet_path]
cache_dir = tmp_path / "cache"
expected_features = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
dataset = Dataset.from_parquet(path, cache_dir=cache_dir)
_check_parquet_dataset(dataset, expected_features)
def _check_text_dataset(dataset, expected_features):
assert isinstance(dataset, Dataset)
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory", [False, True])
def test_dataset_from_text_keep_in_memory(keep_in_memory, text_path, tmp_path):
cache_dir = tmp_path / "cache"
expected_features = {"text": "string"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
dataset = Dataset.from_text(text_path, cache_dir=cache_dir, keep_in_memory=keep_in_memory)
_check_text_dataset(dataset, expected_features)
@pytest.mark.parametrize(
"features",
[
None,
{"text": "string"},
{"text": "int32"},
{"text": "float32"},
],
)
def test_dataset_from_text_features(features, text_path, tmp_path):
cache_dir = tmp_path / "cache"
default_expected_features = {"text": "string"}
expected_features = features.copy() if features else default_expected_features
features = (
Features({feature: Value(dtype) for feature, dtype in features.items()}) if features is not None else None
)
dataset = Dataset.from_text(text_path, features=features, cache_dir=cache_dir)
_check_text_dataset(dataset, expected_features)
@pytest.mark.parametrize("split", [None, NamedSplit("train"), "train", "test"])
def test_dataset_from_text_split(split, text_path, tmp_path):
cache_dir = tmp_path / "cache"
expected_features = {"text": "string"}
dataset = Dataset.from_text(text_path, cache_dir=cache_dir, split=split)
_check_text_dataset(dataset, expected_features)
assert dataset.split == split if split else "train"
@pytest.mark.parametrize("path_type", [str, list])
def test_dataset_from_text_path_type(path_type, text_path, tmp_path):
if issubclass(path_type, str):
path = text_path
elif issubclass(path_type, list):
path = [text_path]
cache_dir = tmp_path / "cache"
expected_features = {"text": "string"}
dataset = Dataset.from_text(path, cache_dir=cache_dir)
_check_text_dataset(dataset, expected_features)
@pytest.fixture
def data_generator():
def _gen():
data = [
{"col_1": "0", "col_2": 0, "col_3": 0.0},
{"col_1": "1", "col_2": 1, "col_3": 1.0},
{"col_1": "2", "col_2": 2, "col_3": 2.0},
{"col_1": "3", "col_2": 3, "col_3": 3.0},
]
for item in data:
yield item
return _gen
def _check_generator_dataset(dataset, expected_features, split):
assert isinstance(dataset, Dataset)
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.split == split
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory", [False, True])
def test_dataset_from_generator_keep_in_memory(keep_in_memory, data_generator, tmp_path):
cache_dir = tmp_path / "cache"
expected_features = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
dataset = Dataset.from_generator(data_generator, cache_dir=cache_dir, keep_in_memory=keep_in_memory)
_check_generator_dataset(dataset, expected_features, NamedSplit("train"))
@pytest.mark.parametrize(
"features",
[
None,
{"col_1": "string", "col_2": "int64", "col_3": "float64"},
{"col_1": "string", "col_2": "string", "col_3": "string"},
{"col_1": "int32", "col_2": "int32", "col_3": "int32"},
{"col_1": "float32", "col_2": "float32", "col_3": "float32"},
],
)
def test_dataset_from_generator_features(features, data_generator, tmp_path):
cache_dir = tmp_path / "cache"
default_expected_features = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
expected_features = features.copy() if features else default_expected_features
features = (
Features({feature: Value(dtype) for feature, dtype in features.items()}) if features is not None else None
)
dataset = Dataset.from_generator(data_generator, features=features, cache_dir=cache_dir)
_check_generator_dataset(dataset, expected_features, NamedSplit("train"))
@pytest.mark.parametrize(
"split",
[None, NamedSplit("train"), "train", NamedSplit("foo"), "foo"],
)
def test_dataset_from_generator_split(split, data_generator, tmp_path):
cache_dir = tmp_path / "cache"
default_expected_split = "train"
expected_features = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
expected_split = split if split else default_expected_split
if split:
dataset = Dataset.from_generator(data_generator, cache_dir=cache_dir, split=split)
else:
dataset = Dataset.from_generator(data_generator, cache_dir=cache_dir)
_check_generator_dataset(dataset, expected_features, expected_split)
@require_not_windows
@require_dill_gt_0_3_2
@require_pyspark
def test_from_spark():
import pyspark
spark = pyspark.sql.SparkSession.builder.master("local[*]").appName("pyspark").getOrCreate()
data = [
("0", 0, 0.0),
("1", 1, 1.0),
("2", 2, 2.0),
("3", 3, 3.0),
]
df = spark.createDataFrame(data, "col_1: string, col_2: int, col_3: float")
dataset = Dataset.from_spark(df)
assert isinstance(dataset, Dataset)
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
@require_not_windows
@require_dill_gt_0_3_2
@require_pyspark
def test_from_spark_features():
import PIL.Image
import pyspark
spark = pyspark.sql.SparkSession.builder.master("local[*]").appName("pyspark").getOrCreate()
data = [(0, np.arange(4 * 4 * 3).reshape(4, 4, 3).tolist())]
df = spark.createDataFrame(data, "idx: int, image: array<array<array<int>>>")
features = Features({"idx": Value("int64"), "image": Image()})
dataset = Dataset.from_spark(
df,
features=features,
)
assert isinstance(dataset, Dataset)
assert dataset.num_rows == 1
assert dataset.num_columns == 2
assert dataset.column_names == ["idx", "image"]
assert isinstance(dataset[0]["image"], PIL.Image.Image)
assert dataset.features == features
assert_arrow_metadata_are_synced_with_dataset_features(dataset)
@require_not_windows
@require_dill_gt_0_3_2
@require_pyspark
def test_from_spark_different_cache():
import pyspark
spark = pyspark.sql.SparkSession.builder.master("local[*]").appName("pyspark").getOrCreate()
df = spark.createDataFrame([("0", 0)], "col_1: string, col_2: int")
dataset = Dataset.from_spark(df)
assert isinstance(dataset, Dataset)
different_df = spark.createDataFrame([("1", 1)], "col_1: string, col_2: int")
different_dataset = Dataset.from_spark(different_df)
assert isinstance(different_dataset, Dataset)
assert dataset[0]["col_1"] == "0"
# Check to make sure that the second dataset wasn't read from the cache.
assert different_dataset[0]["col_1"] == "1"
def _check_sql_dataset(dataset, expected_features):
assert isinstance(dataset, Dataset)
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@require_sqlalchemy
@pytest.mark.parametrize("con_type", ["string", "engine"])
def test_dataset_from_sql_con_type(con_type, sqlite_path, tmp_path, set_sqlalchemy_silence_uber_warning, caplog):
cache_dir = tmp_path / "cache"
expected_features = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
if con_type == "string":
con = "sqlite:///" + sqlite_path
elif con_type == "engine":
import sqlalchemy
con = sqlalchemy.create_engine("sqlite:///" + sqlite_path)
with caplog.at_level(INFO, logger=get_logger().name):
dataset = Dataset.from_sql(
"dataset",
con,
cache_dir=cache_dir,
)
if con_type == "string":
assert "couldn't be hashed properly" not in caplog.text
elif con_type == "engine":
assert "couldn't be hashed properly" in caplog.text
dataset = Dataset.from_sql(
"dataset",
con,
cache_dir=cache_dir,
)
_check_sql_dataset(dataset, expected_features)
@require_sqlalchemy
@pytest.mark.parametrize(
"features",
[
None,
{"col_1": "string", "col_2": "int64", "col_3": "float64"},
{"col_1": "string", "col_2": "string", "col_3": "string"},
{"col_1": "int32", "col_2": "int32", "col_3": "int32"},
{"col_1": "float32", "col_2": "float32", "col_3": "float32"},
],
)
def test_dataset_from_sql_features(features, sqlite_path, tmp_path, set_sqlalchemy_silence_uber_warning):
cache_dir = tmp_path / "cache"
default_expected_features = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
expected_features = features.copy() if features else default_expected_features
features = (
Features({feature: Value(dtype) for feature, dtype in features.items()}) if features is not None else None
)
dataset = Dataset.from_sql("dataset", "sqlite:///" + sqlite_path, features=features, cache_dir=cache_dir)
_check_sql_dataset(dataset, expected_features)
@require_sqlalchemy
@pytest.mark.parametrize("keep_in_memory", [False, True])
def test_dataset_from_sql_keep_in_memory(keep_in_memory, sqlite_path, tmp_path, set_sqlalchemy_silence_uber_warning):
cache_dir = tmp_path / "cache"
expected_features = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
dataset = Dataset.from_sql(
"dataset", "sqlite:///" + sqlite_path, cache_dir=cache_dir, keep_in_memory=keep_in_memory
)
_check_sql_dataset(dataset, expected_features)
def test_dataset_to_json(dataset, tmp_path):
file_path = tmp_path / "test_path.jsonl"
bytes_written = dataset.to_json(path_or_buf=file_path)
assert file_path.is_file()
assert bytes_written == file_path.stat().st_size
df = pd.read_json(file_path, orient="records", lines=True)
assert df.shape == dataset.shape
assert list(df.columns) == list(dataset.column_names)
@pytest.mark.parametrize("in_memory", [False, True])
@pytest.mark.parametrize(
"method_and_params",
[
("rename_column", (), {"original_column_name": "labels", "new_column_name": "label"}),
("remove_columns", (), {"column_names": "labels"}),
(
"cast",
(),
{
"features": Features(
{
"tokens": Sequence(Value("string")),
"labels": Sequence(Value("int16")),
"answers": Sequence(
{
"text": Value("string"),
"answer_start": Value("int32"),
}
),
"id": Value("int32"),
}
)
},
),
("flatten", (), {}),
],
)
def test_pickle_dataset_after_transforming_the_table(in_memory, method_and_params, arrow_file):
method, args, kwargs = method_and_params
with (
Dataset.from_file(arrow_file, in_memory=in_memory) as dataset,
Dataset.from_file(arrow_file, in_memory=in_memory) as reference_dataset,
):
out = getattr(dataset, method)(*args, **kwargs)
dataset = out if out is not None else dataset
pickled_dataset = pickle.dumps(dataset)
reloaded_dataset = pickle.loads(pickled_dataset)
assert dataset._data != reference_dataset._data
assert dataset._data.table == reloaded_dataset._data.table
def test_dummy_dataset_serialize_fs(dataset, mockfs):
dataset_path = "mock://my_dataset"
dataset.save_to_disk(dataset_path, storage_options=mockfs.storage_options)
assert mockfs.isdir(dataset_path)
assert mockfs.glob(dataset_path + "/*")
reloaded = Dataset.load_from_disk(dataset_path, storage_options=mockfs.storage_options)
assert len(reloaded) == len(dataset)
assert reloaded.features == dataset.features
assert reloaded.to_dict() == dataset.to_dict()
@pytest.mark.parametrize(
"uri_or_path",
[
"relative/path",
"/absolute/path",
"s3://bucket/relative/path",
"hdfs://relative/path",
"hdfs:///absolute/path",
],
)
def test_build_local_temp_path(uri_or_path):
extracted_path = strip_protocol(uri_or_path)
local_temp_path = Dataset._build_local_temp_path(extracted_path).as_posix()
extracted_path_without_anchor = Path(extracted_path).relative_to(Path(extracted_path).anchor).as_posix()
# Check that the local temp path is relative to the system temp dir
path_relative_to_tmp_dir = Path(local_temp_path).relative_to(Path(tempfile.gettempdir())).as_posix()
assert (
"hdfs://" not in path_relative_to_tmp_dir
and "s3://" not in path_relative_to_tmp_dir
and not local_temp_path.startswith(extracted_path_without_anchor)
and local_temp_path.endswith(extracted_path_without_anchor)
), f"Local temp path: {local_temp_path}"
class StratifiedTest(TestCase):
def test_errors_train_test_split_stratify(self):
ys = [
np.array([0, 0, 0, 0, 1, 1, 1, 2, 2, 2, 2, 2]),
np.array([0, 1, 1, 1, 2, 2, 2, 3, 3, 3]),
np.array([0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2] * 2),
np.array([0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5]),
np.array([0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5]),
]
for i in range(len(ys)):
features = Features({"text": Value("int64"), "label": ClassLabel(len(np.unique(ys[i])))})
data = {"text": np.ones(len(ys[i])), "label": ys[i]}
d1 = Dataset.from_dict(data, features=features)
# For checking stratify_by_column exist as key in self.features.keys()
if i == 0:
self.assertRaises(ValueError, d1.train_test_split, 0.33, stratify_by_column="labl")
# For checking minimum class count error
elif i == 1:
self.assertRaises(ValueError, d1.train_test_split, 0.33, stratify_by_column="label")
# For check typeof label as ClassLabel type
elif i == 2:
d1 = Dataset.from_dict(data)
self.assertRaises(ValueError, d1.train_test_split, 0.33, stratify_by_column="label")
# For checking test_size should be greater than or equal to number of classes
elif i == 3:
self.assertRaises(ValueError, d1.train_test_split, 0.30, stratify_by_column="label")
# For checking train_size should be greater than or equal to number of classes
elif i == 4:
self.assertRaises(ValueError, d1.train_test_split, 0.60, stratify_by_column="label")
def test_train_test_split_startify(self):
ys = [
np.array([0, 0, 0, 0, 1, 1, 1, 2, 2, 2, 2, 2]),
np.array([0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3]),
np.array([0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2] * 2),
np.array([0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3]),
np.array([0] * 800 + [1] * 50),
]
for y in ys:
features = Features({"text": Value("int64"), "label": ClassLabel(len(np.unique(y)))})
data = {"text": np.ones(len(y)), "label": y}
d1 = Dataset.from_dict(data, features=features)
d1 = d1.train_test_split(test_size=0.33, stratify_by_column="label")
y = np.asanyarray(y) # To make it indexable for y[train]
test_size = np.ceil(0.33 * len(y))
train_size = len(y) - test_size
npt.assert_array_equal(np.unique(d1["train"]["label"]), np.unique(d1["test"]["label"]))
# checking classes proportion
p_train = np.bincount(np.unique(d1["train"]["label"], return_inverse=True)[1]) / float(
len(d1["train"]["label"])
)
p_test = np.bincount(np.unique(d1["test"]["label"], return_inverse=True)[1]) / float(
len(d1["test"]["label"])
)
npt.assert_array_almost_equal(p_train, p_test, 1)
assert len(d1["train"]["text"]) + len(d1["test"]["text"]) == y.size
assert len(d1["train"]["text"]) == train_size
assert len(d1["test"]["text"]) == test_size
def test_dataset_estimate_nbytes():
ds = Dataset.from_dict({"a": ["0" * 100] * 100})
assert 0.9 * ds._estimate_nbytes() < 100 * 100, "must be smaller than full dataset size"
ds = Dataset.from_dict({"a": ["0" * 100] * 100}).select([0])
assert 0.9 * ds._estimate_nbytes() < 100 * 100, "must be smaller than one chunk"
ds = Dataset.from_dict({"a": ["0" * 100] * 100})
ds = concatenate_datasets([ds] * 100)
assert 0.9 * ds._estimate_nbytes() < 100 * 100 * 100, "must be smaller than full dataset size"
assert 1.1 * ds._estimate_nbytes() > 100 * 100 * 100, "must be bigger than full dataset size"
ds = Dataset.from_dict({"a": ["0" * 100] * 100})
ds = concatenate_datasets([ds] * 100).select([0])
assert 0.9 * ds._estimate_nbytes() < 100 * 100, "must be smaller than one chunk"
def test_dataset_to_iterable_dataset(dataset: Dataset):
iterable_dataset = dataset.to_iterable_dataset()
assert isinstance(iterable_dataset, IterableDataset)
assert list(iterable_dataset) == list(dataset)
assert iterable_dataset.features == dataset.features
iterable_dataset = dataset.to_iterable_dataset(num_shards=3)
assert isinstance(iterable_dataset, IterableDataset)
assert list(iterable_dataset) == list(dataset)
assert iterable_dataset.features == dataset.features
assert iterable_dataset.num_shards == 3
with pytest.raises(ValueError):
dataset.to_iterable_dataset(num_shards=len(dataset) + 1)
assert dataset.with_format("torch").to_iterable_dataset()._formatting.format_type == "torch"
with pytest.raises(NotImplementedError):
dataset.with_format("torch", columns=[dataset.column_names[0]]).to_iterable_dataset()
@require_pil
def test_dataset_format_with_unformatted_image():
import PIL
ds = Dataset.from_dict(
{"a": [np.arange(4 * 4 * 3).reshape(4, 4, 3)] * 10, "b": [[0, 1]] * 10},
Features({"a": Image(), "b": Sequence(Value("int64"))}),
)
ds.set_format("np", columns=["b"], output_all_columns=True)
assert isinstance(ds[0]["a"], PIL.Image.Image)
assert isinstance(ds[0]["b"], np.ndarray)
@pytest.mark.parametrize("batch_size", [1, 4])
@require_torch
def test_dataset_with_torch_dataloader(dataset, batch_size):
from torch.utils.data import DataLoader
from datasets import config
dataloader = DataLoader(dataset, batch_size=batch_size)
with patch.object(dataset, "_getitem", wraps=dataset._getitem) as mock_getitem:
out = list(dataloader)
getitem_call_count = mock_getitem.call_count
assert len(out) == len(dataset) // batch_size + int(len(dataset) % batch_size > 0)
# calling dataset[list_of_indices] is much more efficient than [dataset[idx] for idx in list of indices]
if config.TORCH_VERSION >= version.parse("1.13.0"):
assert getitem_call_count == len(dataset) // batch_size + int(len(dataset) % batch_size > 0)
@pytest.mark.parametrize("return_lazy_dict", [True, False, "mix"])
def test_map_cases(return_lazy_dict):
def f(x):
"""May return a mix of LazyDict and regular Dict"""
if x["a"] < 2:
x["a"] = -1
return dict(x) if return_lazy_dict is False else x
else:
return x if return_lazy_dict is True else {}
ds = Dataset.from_dict({"a": [0, 1, 2, 3]})
ds = ds.map(f)
outputs = ds[:]
assert outputs == {"a": [-1, -1, 2, 3]}
def f(x):
"""May return a mix of LazyDict and regular Dict, but sometimes with None values"""
if x["a"] < 2:
x["a"] = None
return dict(x) if return_lazy_dict is False else x
else:
return x if return_lazy_dict is True else {}
ds = Dataset.from_dict({"a": [0, 1, 2, 3]})
ds = ds.map(f)
outputs = ds[:]
assert outputs == {"a": [None, None, 2, 3]}
def f(x):
"""Return a LazyDict, but we remove a lazy column and add a new one"""
if x["a"] < 2:
x["b"] = -1
return x
else:
x["b"] = x["a"]
return x
ds = Dataset.from_dict({"a": [0, 1, 2, 3]})
ds = ds.map(f, remove_columns=["a"])
outputs = ds[:]
assert outputs == {"b": [-1, -1, 2, 3]}
# The formatted dataset version removes the lazy column from a different dictionary, hence it should be preserved in the output
ds = Dataset.from_dict({"a": [0, 1, 2, 3]})
ds = ds.with_format("numpy")
ds = ds.map(f, remove_columns=["a"])
ds = ds.with_format(None)
outputs = ds[:]
assert outputs == {"a": [0, 1, 2, 3], "b": [-1, -1, 2, 3]}
def f(x):
"""May return a mix of LazyDict and regular Dict, but we replace a lazy column"""
if x["a"] < 2:
x["a"] = -1
return dict(x) if return_lazy_dict is False else x
else:
x["a"] = x["a"]
return x if return_lazy_dict is True else {"a": x["a"]}
ds = Dataset.from_dict({"a": [0, 1, 2, 3]})
ds = ds.map(f, remove_columns=["a"])
outputs = ds[:]
assert outputs == ({"a": [-1, -1, 2, 3]} if return_lazy_dict is False else {})
def f(x):
"""May return a mix of LazyDict and regular Dict, but we modify a nested lazy column in-place"""
if x["a"]["b"] < 2:
x["a"]["c"] = -1
return dict(x) if return_lazy_dict is False else x
else:
x["a"]["c"] = x["a"]["b"]
return x if return_lazy_dict is True else {}
ds = Dataset.from_dict({"a": [{"b": 0}, {"b": 1}, {"b": 2}, {"b": 3}]})
ds = ds.map(f)
outputs = ds[:]
assert outputs == {"a": [{"b": 0, "c": -1}, {"b": 1, "c": -1}, {"b": 2, "c": 2}, {"b": 3, "c": 3}]}
def f(x):
"""May return a mix of LazyDict and regular Dict, but using an extension type"""
if x["a"][0][0] < 2:
x["a"] = [[-1]]
return dict(x) if return_lazy_dict is False else x
else:
return x if return_lazy_dict is True else {}
features = Features({"a": Array2D(shape=(1, 1), dtype="int32")})
ds = Dataset.from_dict({"a": [[[i]] for i in [0, 1, 2, 3]]}, features=features)
ds = ds.map(f)
outputs = ds[:]
assert outputs == {"a": [[[i]] for i in [-1, -1, 2, 3]]}
def f(x):
"""May return a mix of LazyDict and regular Dict, but using a nested extension type"""
if x["a"]["nested"][0][0] < 2:
x["a"] = {"nested": [[-1]]}
return dict(x) if return_lazy_dict is False else x
else:
return x if return_lazy_dict is True else {}
features = Features({"a": {"nested": Array2D(shape=(1, 1), dtype="int64")}})
ds = Dataset.from_dict({"a": [{"nested": [[i]]} for i in [0, 1, 2, 3]]}, features=features)
ds = ds.map(f)
outputs = ds[:]
assert outputs == {"a": [{"nested": [[i]]} for i in [-1, -1, 2, 3]]}
def test_dataset_getitem_raises():
ds = Dataset.from_dict({"a": [0, 1, 2, 3]})
with pytest.raises(TypeError):
ds[False]
with pytest.raises(TypeError):
ds._getitem(True)
def test_categorical_dataset(tmpdir):
n_legs = pa.array([2, 4, 5, 100])
animals = pa.array(["Flamingo", "Horse", "Brittle stars", "Centipede"]).cast(
pa.dictionary(pa.int32(), pa.string())
)
names = ["n_legs", "animals"]
table = pa.Table.from_arrays([n_legs, animals], names=names)
table_path = str(tmpdir / "data.parquet")
pa.parquet.write_table(table, table_path)
dataset = Dataset.from_parquet(table_path)
entry = dataset[0]
# Categorical types get transparently converted to string
assert entry["animals"] == "Flamingo"
def test_dataset_batch():
# Create a simple Dataset
data = {"id": list(range(10)), "text": [f"Text {i}" for i in range(10)]}
ds = Dataset.from_dict(data)
# Test with batch_size=3, drop_last_batch=False
batched_ds = ds.batch(batch_size=3, drop_last_batch=False)
batches = list(batched_ds)
assert len(batches) == 4 # 3 full batches and 1 partial batch
for i, batch in enumerate(batches[:3]): # Check full batches
assert len(batch["id"]) == 3
assert len(batch["text"]) == 3
assert batch["id"] == [3 * i, 3 * i + 1, 3 * i + 2]
assert batch["text"] == [f"Text {3 * i}", f"Text {3 * i + 1}", f"Text {3 * i + 2}"]
# Check last partial batch
assert len(batches[3]["id"]) == 1
assert len(batches[3]["text"]) == 1
assert batches[3]["id"] == [9]
assert batches[3]["text"] == ["Text 9"]
# Test with batch_size=3, drop_last_batch=True
batched_ds = ds.batch(batch_size=3, drop_last_batch=True)
batches = list(batched_ds)
assert len(batches) == 3 # Only full batches
for i, batch in enumerate(batches):
assert len(batch["id"]) == 3
assert len(batch["text"]) == 3
assert batch["id"] == [3 * i, 3 * i + 1, 3 * i + 2]
assert batch["text"] == [f"Text {3 * i}", f"Text {3 * i + 1}", f"Text {3 * i + 2}"]
# Test with batch_size=4 (doesn't evenly divide dataset size)
batched_ds = ds.batch(batch_size=4, drop_last_batch=False)
batches = list(batched_ds)
assert len(batches) == 3 # 2 full batches and 1 partial batch
for i, batch in enumerate(batches[:2]): # Check full batches
assert len(batch["id"]) == 4
assert len(batch["text"]) == 4
assert batch["id"] == [4 * i, 4 * i + 1, 4 * i + 2, 4 * i + 3]
assert batch["text"] == [f"Text {4 * i}", f"Text {4 * i + 1}", f"Text {4 * i + 2}", f"Text {4 * i + 3}"]
# Check last partial batch
assert len(batches[2]["id"]) == 2
assert len(batches[2]["text"]) == 2
assert batches[2]["id"] == [8, 9]
assert batches[2]["text"] == ["Text 8", "Text 9"]
def test_dataset_from_dict_with_large_list():
data = {"col_1": [[1, 2], [3, 4]]}
features = Features({"col_1": LargeList(Value("int64"))})
ds = Dataset.from_dict(data, features=features)
assert isinstance(ds, Dataset)
assert pa.types.is_large_list(ds.data.schema.field("col_1").type)
def test_dataset_save_to_disk_with_large_list(tmp_path):
data = {"col_1": [[1, 2], [3, 4]]}
features = Features({"col_1": LargeList(Value("int64"))})
ds = Dataset.from_dict(data, features=features)
dataset_path = tmp_path / "dataset_dir"
ds.save_to_disk(dataset_path)
assert (dataset_path / "data-00000-of-00001.arrow").exists()
def test_dataset_save_to_disk_and_load_from_disk_round_trip_with_large_list(tmp_path):
data = {"col_1": [[1, 2], [3, 4]]}
features = Features({"col_1": LargeList(Value("int64"))})
ds = Dataset.from_dict(data, features=features)
dataset_path = tmp_path / "dataset_dir"
ds.save_to_disk(dataset_path)
assert (dataset_path / "data-00000-of-00001.arrow").exists()
loaded_ds = load_from_disk(dataset_path)
assert len(loaded_ds) == len(ds)
assert loaded_ds.features == ds.features
assert loaded_ds.to_dict() == ds.to_dict()
@require_polars
def test_from_polars_with_large_list():
import polars as pl
df = pl.from_dict({"col_1": [[1, 2], [3, 4]]})
ds = Dataset.from_polars(df)
assert isinstance(ds, Dataset)
@require_polars
def test_from_polars_save_to_disk_with_large_list(tmp_path):
import polars as pl
df = pl.from_dict({"col_1": [[1, 2], [3, 4]]})
ds = Dataset.from_polars(df)
dataset_path = tmp_path / "dataset_dir"
ds.save_to_disk(dataset_path)
assert (dataset_path / "data-00000-of-00001.arrow").exists()
@require_polars
def test_from_polars_save_to_disk_and_load_from_disk_round_trip_with_large_list(tmp_path):
import polars as pl
df = pl.from_dict({"col_1": [[1, 2], [3, 4]]})
ds = Dataset.from_polars(df)
dataset_path = tmp_path / "dataset_dir"
ds.save_to_disk(dataset_path)
assert (dataset_path / "data-00000-of-00001.arrow").exists()
loaded_ds = load_from_disk(dataset_path)
assert len(loaded_ds) == len(ds)
assert loaded_ds.features == ds.features
assert loaded_ds.to_dict() == ds.to_dict()
@require_polars
def test_polars_round_trip():
ds = Dataset.from_dict({"x": [[1, 2], [3, 4, 5]], "y": ["a", "b"]})
assert isinstance(Dataset.from_polars(ds.to_polars()), Dataset)
| datasets/tests/test_arrow_dataset.py/0 | {
"file_path": "datasets/tests/test_arrow_dataset.py",
"repo_id": "datasets",
"token_count": 119070
} |
import datetime
from pathlib import Path
from unittest import TestCase
import numpy as np
import pandas as pd
import pyarrow as pa
import pytest
from datasets import Audio, Features, Image, IterableDataset
from datasets.formatting import NumpyFormatter, PandasFormatter, PythonFormatter, query_table
from datasets.formatting.formatting import (
LazyBatch,
LazyRow,
NumpyArrowExtractor,
PandasArrowExtractor,
PythonArrowExtractor,
)
from datasets.table import InMemoryTable
from .utils import (
require_jax,
require_librosa,
require_numpy1_on_windows,
require_pil,
require_polars,
require_sndfile,
require_tf,
require_torch,
)
class AnyArray:
def __init__(self, data) -> None:
self.data = data
def __array__(self) -> np.ndarray:
return np.asarray(self.data)
def _gen_any_arrays():
for _ in range(10):
yield {"array": AnyArray(list(range(10)))}
@pytest.fixture
def any_arrays_dataset():
return IterableDataset.from_generator(_gen_any_arrays)
_COL_A = [0, 1, 2]
_COL_B = ["foo", "bar", "foobar"]
_COL_C = [[[1.0, 0.0, 0.0]] * 2, [[0.0, 1.0, 0.0]] * 2, [[0.0, 0.0, 1.0]] * 2]
_COL_D = [datetime.datetime(2023, 1, 1, 0, 0, tzinfo=datetime.timezone.utc)] * 3
_INDICES = [1, 0]
IMAGE_PATH_1 = Path(__file__).parent / "features" / "data" / "test_image_rgb.jpg"
IMAGE_PATH_2 = Path(__file__).parent / "features" / "data" / "test_image_rgba.png"
AUDIO_PATH_1 = Path(__file__).parent / "features" / "data" / "test_audio_44100.wav"
class ArrowExtractorTest(TestCase):
def _create_dummy_table(self):
return pa.Table.from_pydict({"a": _COL_A, "b": _COL_B, "c": _COL_C, "d": _COL_D})
def test_python_extractor(self):
pa_table = self._create_dummy_table()
extractor = PythonArrowExtractor()
row = extractor.extract_row(pa_table)
self.assertEqual(row, {"a": _COL_A[0], "b": _COL_B[0], "c": _COL_C[0], "d": _COL_D[0]})
col = extractor.extract_column(pa_table)
self.assertEqual(col, _COL_A)
batch = extractor.extract_batch(pa_table)
self.assertEqual(batch, {"a": _COL_A, "b": _COL_B, "c": _COL_C, "d": _COL_D})
def test_numpy_extractor(self):
pa_table = self._create_dummy_table().drop(["c", "d"])
extractor = NumpyArrowExtractor()
row = extractor.extract_row(pa_table)
np.testing.assert_equal(row, {"a": _COL_A[0], "b": _COL_B[0]})
col = extractor.extract_column(pa_table)
np.testing.assert_equal(col, np.array(_COL_A))
batch = extractor.extract_batch(pa_table)
np.testing.assert_equal(batch, {"a": np.array(_COL_A), "b": np.array(_COL_B)})
def test_numpy_extractor_nested(self):
pa_table = self._create_dummy_table().drop(["a", "b", "d"])
extractor = NumpyArrowExtractor()
row = extractor.extract_row(pa_table)
self.assertEqual(row["c"][0].dtype, np.float64)
self.assertEqual(row["c"].dtype, object)
col = extractor.extract_column(pa_table)
self.assertEqual(col[0][0].dtype, np.float64)
self.assertEqual(col[0].dtype, object)
self.assertEqual(col.dtype, object)
batch = extractor.extract_batch(pa_table)
self.assertEqual(batch["c"][0][0].dtype, np.float64)
self.assertEqual(batch["c"][0].dtype, object)
self.assertEqual(batch["c"].dtype, object)
def test_numpy_extractor_temporal(self):
pa_table = self._create_dummy_table().drop(["a", "b", "c"])
extractor = NumpyArrowExtractor()
row = extractor.extract_row(pa_table)
self.assertTrue(np.issubdtype(row["d"].dtype, np.datetime64))
col = extractor.extract_column(pa_table)
self.assertTrue(np.issubdtype(col[0].dtype, np.datetime64))
self.assertTrue(np.issubdtype(col.dtype, np.datetime64))
batch = extractor.extract_batch(pa_table)
self.assertTrue(np.issubdtype(batch["d"][0].dtype, np.datetime64))
self.assertTrue(np.issubdtype(batch["d"].dtype, np.datetime64))
def test_pandas_extractor(self):
pa_table = self._create_dummy_table()
extractor = PandasArrowExtractor()
row = extractor.extract_row(pa_table)
self.assertIsInstance(row, pd.DataFrame)
pd.testing.assert_series_equal(row["a"], pd.Series(_COL_A, name="a")[:1])
pd.testing.assert_series_equal(row["b"], pd.Series(_COL_B, name="b")[:1])
col = extractor.extract_column(pa_table)
pd.testing.assert_series_equal(col, pd.Series(_COL_A, name="a"))
batch = extractor.extract_batch(pa_table)
self.assertIsInstance(batch, pd.DataFrame)
pd.testing.assert_series_equal(batch["a"], pd.Series(_COL_A, name="a"))
pd.testing.assert_series_equal(batch["b"], pd.Series(_COL_B, name="b"))
def test_pandas_extractor_nested(self):
pa_table = self._create_dummy_table().drop(["a", "b", "d"])
extractor = PandasArrowExtractor()
row = extractor.extract_row(pa_table)
self.assertEqual(row["c"][0][0].dtype, np.float64)
self.assertEqual(row["c"].dtype, object)
col = extractor.extract_column(pa_table)
self.assertEqual(col[0][0].dtype, np.float64)
self.assertEqual(col[0].dtype, object)
self.assertEqual(col.dtype, object)
batch = extractor.extract_batch(pa_table)
self.assertEqual(batch["c"][0][0].dtype, np.float64)
self.assertEqual(batch["c"][0].dtype, object)
self.assertEqual(batch["c"].dtype, object)
def test_pandas_extractor_temporal(self):
pa_table = self._create_dummy_table().drop(["a", "b", "c"])
extractor = PandasArrowExtractor()
row = extractor.extract_row(pa_table)
self.assertTrue(pd.api.types.is_datetime64_any_dtype(row["d"].dtype))
col = extractor.extract_column(pa_table)
self.assertTrue(isinstance(col[0], datetime.datetime))
self.assertTrue(pd.api.types.is_datetime64_any_dtype(col.dtype))
batch = extractor.extract_batch(pa_table)
self.assertTrue(isinstance(batch["d"][0], datetime.datetime))
self.assertTrue(pd.api.types.is_datetime64_any_dtype(batch["d"].dtype))
@require_polars
def test_polars_extractor(self):
import polars as pl
from datasets.formatting.polars_formatter import PolarsArrowExtractor
pa_table = self._create_dummy_table()
extractor = PolarsArrowExtractor()
row = extractor.extract_row(pa_table)
self.assertIsInstance(row, pl.DataFrame)
assert pl.Series.eq(row["a"], pl.Series("a", _COL_A)[:1]).all()
assert pl.Series.eq(row["b"], pl.Series("b", _COL_B)[:1]).all()
col = extractor.extract_column(pa_table)
assert pl.Series.eq(col, pl.Series("a", _COL_A)).all()
batch = extractor.extract_batch(pa_table)
self.assertIsInstance(batch, pl.DataFrame)
assert pl.Series.eq(batch["a"], pl.Series("a", _COL_A)).all()
assert pl.Series.eq(batch["b"], pl.Series("b", _COL_B)).all()
@require_polars
def test_polars_nested(self):
import polars as pl
from datasets.formatting.polars_formatter import PolarsArrowExtractor
pa_table = self._create_dummy_table().drop(["a", "b", "d"])
extractor = PolarsArrowExtractor()
row = extractor.extract_row(pa_table)
self.assertEqual(row["c"][0][0].dtype, pl.Float64)
self.assertEqual(row["c"].dtype, pl.List(pl.List(pl.Float64)))
col = extractor.extract_column(pa_table)
self.assertEqual(col[0][0].dtype, pl.Float64)
self.assertEqual(col[0].dtype, pl.List(pl.Float64))
self.assertEqual(col.dtype, pl.List(pl.List(pl.Float64)))
batch = extractor.extract_batch(pa_table)
self.assertEqual(batch["c"][0][0].dtype, pl.Float64)
self.assertEqual(batch["c"][0].dtype, pl.List(pl.Float64))
self.assertEqual(batch["c"].dtype, pl.List(pl.List(pl.Float64)))
@require_polars
def test_polars_temporal(self):
from datasets.formatting.polars_formatter import PolarsArrowExtractor
pa_table = self._create_dummy_table().drop(["a", "b", "c"])
extractor = PolarsArrowExtractor()
row = extractor.extract_row(pa_table)
self.assertTrue(row["d"].dtype.is_temporal())
col = extractor.extract_column(pa_table)
self.assertTrue(isinstance(col[0], datetime.datetime))
self.assertTrue(col.dtype.is_temporal())
batch = extractor.extract_batch(pa_table)
self.assertTrue(isinstance(batch["d"][0], datetime.datetime))
self.assertTrue(batch["d"].dtype.is_temporal())
class LazyDictTest(TestCase):
def _create_dummy_table(self):
return pa.Table.from_pydict({"a": _COL_A, "b": _COL_B, "c": _COL_C})
def _create_dummy_formatter(self):
return PythonFormatter(lazy=True)
def test_lazy_dict_copy(self):
pa_table = self._create_dummy_table()
formatter = self._create_dummy_formatter()
lazy_batch = formatter.format_batch(pa_table)
lazy_batch_copy = lazy_batch.copy()
self.assertEqual(type(lazy_batch), type(lazy_batch_copy))
self.assertEqual(lazy_batch.items(), lazy_batch_copy.items())
lazy_batch["d"] = [1, 2, 3]
self.assertNotEqual(lazy_batch.items(), lazy_batch_copy.items())
class FormatterTest(TestCase):
def _create_dummy_table(self):
return pa.Table.from_pydict({"a": _COL_A, "b": _COL_B, "c": _COL_C})
def test_python_formatter(self):
pa_table = self._create_dummy_table()
formatter = PythonFormatter()
row = formatter.format_row(pa_table)
self.assertEqual(row, {"a": _COL_A[0], "b": _COL_B[0], "c": _COL_C[0]})
col = formatter.format_column(pa_table)
self.assertEqual(col, _COL_A)
batch = formatter.format_batch(pa_table)
self.assertEqual(batch, {"a": _COL_A, "b": _COL_B, "c": _COL_C})
def test_python_formatter_lazy(self):
pa_table = self._create_dummy_table()
formatter = PythonFormatter(lazy=True)
row = formatter.format_row(pa_table)
self.assertIsInstance(row, LazyRow)
self.assertEqual(row["a"], _COL_A[0])
self.assertEqual(row["b"], _COL_B[0])
self.assertEqual(row["c"], _COL_C[0])
batch = formatter.format_batch(pa_table)
self.assertIsInstance(batch, LazyBatch)
self.assertEqual(batch["a"], _COL_A)
self.assertEqual(batch["b"], _COL_B)
self.assertEqual(batch["c"], _COL_C)
def test_numpy_formatter(self):
pa_table = self._create_dummy_table()
formatter = NumpyFormatter()
row = formatter.format_row(pa_table)
np.testing.assert_equal(row, {"a": _COL_A[0], "b": _COL_B[0], "c": np.array(_COL_C[0])})
col = formatter.format_column(pa_table)
np.testing.assert_equal(col, np.array(_COL_A))
batch = formatter.format_batch(pa_table)
np.testing.assert_equal(batch, {"a": np.array(_COL_A), "b": np.array(_COL_B), "c": np.array(_COL_C)})
assert batch["c"].shape == np.array(_COL_C).shape
def test_numpy_formatter_np_array_kwargs(self):
pa_table = self._create_dummy_table().drop(["b"])
formatter = NumpyFormatter(dtype=np.float16)
row = formatter.format_row(pa_table)
self.assertEqual(row["c"].dtype, np.dtype(np.float16))
col = formatter.format_column(pa_table)
self.assertEqual(col.dtype, np.float16)
batch = formatter.format_batch(pa_table)
self.assertEqual(batch["a"].dtype, np.dtype(np.float16))
self.assertEqual(batch["c"].dtype, np.dtype(np.float16))
@require_pil
def test_numpy_formatter_image(self):
# same dimensions
pa_table = pa.table({"image": [{"bytes": None, "path": str(IMAGE_PATH_1)}] * 2})
formatter = NumpyFormatter(features=Features({"image": Image()}))
row = formatter.format_row(pa_table)
self.assertEqual(row["image"].dtype, np.uint8)
self.assertEqual(row["image"].shape, (480, 640, 3))
col = formatter.format_column(pa_table)
self.assertEqual(col.dtype, np.uint8)
self.assertEqual(col.shape, (2, 480, 640, 3))
batch = formatter.format_batch(pa_table)
self.assertEqual(batch["image"].dtype, np.uint8)
self.assertEqual(batch["image"].shape, (2, 480, 640, 3))
# different dimensions
pa_table = pa.table(
{"image": [{"bytes": None, "path": str(IMAGE_PATH_1)}, {"bytes": None, "path": str(IMAGE_PATH_2)}]}
)
formatter = NumpyFormatter(features=Features({"image": Image()}))
row = formatter.format_row(pa_table)
self.assertEqual(row["image"].dtype, np.uint8)
self.assertEqual(row["image"].shape, (480, 640, 3))
col = formatter.format_column(pa_table)
self.assertIsInstance(col, np.ndarray)
self.assertEqual(col.dtype, object)
self.assertEqual(col[0].dtype, np.uint8)
self.assertEqual(col[0].shape, (480, 640, 3))
batch = formatter.format_batch(pa_table)
self.assertIsInstance(batch["image"], np.ndarray)
self.assertEqual(batch["image"].dtype, object)
self.assertEqual(batch["image"][0].dtype, np.uint8)
self.assertEqual(batch["image"][0].shape, (480, 640, 3))
@require_librosa
@require_sndfile
def test_numpy_formatter_audio(self):
pa_table = pa.table({"audio": [{"bytes": None, "path": str(AUDIO_PATH_1)}]})
formatter = NumpyFormatter(features=Features({"audio": Audio()}))
row = formatter.format_row(pa_table)
self.assertEqual(row["audio"]["array"].dtype, np.dtype(np.float32))
col = formatter.format_column(pa_table)
self.assertEqual(col[0]["array"].dtype, np.float32)
batch = formatter.format_batch(pa_table)
self.assertEqual(batch["audio"][0]["array"].dtype, np.dtype(np.float32))
def test_pandas_formatter(self):
pa_table = self._create_dummy_table()
formatter = PandasFormatter()
row = formatter.format_row(pa_table)
self.assertIsInstance(row, pd.DataFrame)
pd.testing.assert_series_equal(row["a"], pd.Series(_COL_A, name="a")[:1])
pd.testing.assert_series_equal(row["b"], pd.Series(_COL_B, name="b")[:1])
col = formatter.format_column(pa_table)
pd.testing.assert_series_equal(col, pd.Series(_COL_A, name="a"))
batch = formatter.format_batch(pa_table)
self.assertIsInstance(batch, pd.DataFrame)
pd.testing.assert_series_equal(batch["a"], pd.Series(_COL_A, name="a"))
pd.testing.assert_series_equal(batch["b"], pd.Series(_COL_B, name="b"))
@require_polars
def test_polars_formatter(self):
import polars as pl
from datasets.formatting import PolarsFormatter
pa_table = self._create_dummy_table()
formatter = PolarsFormatter()
row = formatter.format_row(pa_table)
self.assertIsInstance(row, pl.DataFrame)
assert pl.Series.eq(row["a"], pl.Series("a", _COL_A)[:1]).all()
assert pl.Series.eq(row["b"], pl.Series("b", _COL_B)[:1]).all()
col = formatter.format_column(pa_table)
assert pl.Series.eq(col, pl.Series("a", _COL_A)).all()
batch = formatter.format_batch(pa_table)
self.assertIsInstance(batch, pl.DataFrame)
assert pl.Series.eq(batch["a"], pl.Series("a", _COL_A)).all()
assert pl.Series.eq(batch["b"], pl.Series("b", _COL_B)).all()
@require_numpy1_on_windows
@require_torch
def test_torch_formatter(self):
import torch
from datasets.formatting import TorchFormatter
pa_table = self._create_dummy_table()
formatter = TorchFormatter()
row = formatter.format_row(pa_table)
torch.testing.assert_close(row["a"], torch.tensor(_COL_A, dtype=torch.int64)[0])
assert row["b"] == _COL_B[0]
torch.testing.assert_close(row["c"], torch.tensor(_COL_C, dtype=torch.float32)[0])
col = formatter.format_column(pa_table)
torch.testing.assert_close(col, torch.tensor(_COL_A, dtype=torch.int64))
batch = formatter.format_batch(pa_table)
torch.testing.assert_close(batch["a"], torch.tensor(_COL_A, dtype=torch.int64))
assert batch["b"] == _COL_B
torch.testing.assert_close(batch["c"], torch.tensor(_COL_C, dtype=torch.float32))
assert batch["c"].shape == np.array(_COL_C).shape
@require_numpy1_on_windows
@require_torch
def test_torch_formatter_torch_tensor_kwargs(self):
import torch
from datasets.formatting import TorchFormatter
pa_table = self._create_dummy_table().drop(["b"])
formatter = TorchFormatter(dtype=torch.float16)
row = formatter.format_row(pa_table)
self.assertEqual(row["c"].dtype, torch.float16)
col = formatter.format_column(pa_table)
self.assertEqual(col.dtype, torch.float16)
batch = formatter.format_batch(pa_table)
self.assertEqual(batch["a"].dtype, torch.float16)
self.assertEqual(batch["c"].dtype, torch.float16)
@require_numpy1_on_windows
@require_torch
@require_pil
def test_torch_formatter_image(self):
import torch
from datasets.formatting import TorchFormatter
# same dimensions
pa_table = pa.table({"image": [{"bytes": None, "path": str(IMAGE_PATH_1)}] * 2})
formatter = TorchFormatter(features=Features({"image": Image()}))
row = formatter.format_row(pa_table)
self.assertEqual(row["image"].dtype, torch.uint8)
# torch uses CHW format contrary to numpy which uses HWC
self.assertEqual(row["image"].shape, (3, 480, 640))
col = formatter.format_column(pa_table)
self.assertEqual(col.dtype, torch.uint8)
self.assertEqual(col.shape, (2, 3, 480, 640))
batch = formatter.format_batch(pa_table)
self.assertEqual(batch["image"].dtype, torch.uint8)
self.assertEqual(batch["image"].shape, (2, 3, 480, 640))
# different dimensions
pa_table = pa.table(
{"image": [{"bytes": None, "path": str(IMAGE_PATH_1)}, {"bytes": None, "path": str(IMAGE_PATH_2)}]}
)
formatter = TorchFormatter(features=Features({"image": Image()}))
row = formatter.format_row(pa_table)
self.assertEqual(row["image"].dtype, torch.uint8)
self.assertEqual(row["image"].shape, (3, 480, 640))
col = formatter.format_column(pa_table)
self.assertIsInstance(col, list)
self.assertEqual(col[0].dtype, torch.uint8)
self.assertEqual(col[0].shape, (3, 480, 640))
batch = formatter.format_batch(pa_table)
self.assertIsInstance(batch["image"], list)
self.assertEqual(batch["image"][0].dtype, torch.uint8)
self.assertEqual(batch["image"][0].shape, (3, 480, 640))
@require_torch
@require_librosa
@require_sndfile
def test_torch_formatter_audio(self):
import torch
from datasets.formatting import TorchFormatter
pa_table = pa.table({"audio": [{"bytes": None, "path": str(AUDIO_PATH_1)}]})
formatter = TorchFormatter(features=Features({"audio": Audio()}))
row = formatter.format_row(pa_table)
self.assertEqual(row["audio"]["array"].dtype, torch.float32)
col = formatter.format_column(pa_table)
self.assertEqual(col[0]["array"].dtype, torch.float32)
batch = formatter.format_batch(pa_table)
self.assertEqual(batch["audio"][0]["array"].dtype, torch.float32)
@require_tf
def test_tf_formatter(self):
import tensorflow as tf
from datasets.formatting import TFFormatter
pa_table = self._create_dummy_table()
formatter = TFFormatter()
row = formatter.format_row(pa_table)
tf.debugging.assert_equal(row["a"], tf.convert_to_tensor(_COL_A, dtype=tf.int64)[0])
tf.debugging.assert_equal(row["b"], tf.convert_to_tensor(_COL_B, dtype=tf.string)[0])
tf.debugging.assert_equal(row["c"], tf.convert_to_tensor(_COL_C, dtype=tf.float32)[0])
col = formatter.format_column(pa_table)
tf.debugging.assert_equal(col, tf.ragged.constant(_COL_A, dtype=tf.int64))
batch = formatter.format_batch(pa_table)
tf.debugging.assert_equal(batch["a"], tf.convert_to_tensor(_COL_A, dtype=tf.int64))
tf.debugging.assert_equal(batch["b"], tf.convert_to_tensor(_COL_B, dtype=tf.string))
self.assertIsInstance(batch["c"], tf.Tensor)
self.assertEqual(batch["c"].dtype, tf.float32)
tf.debugging.assert_equal(
batch["c"].shape.as_list(), tf.convert_to_tensor(_COL_C, dtype=tf.float32).shape.as_list()
)
tf.debugging.assert_equal(tf.convert_to_tensor(batch["c"]), tf.convert_to_tensor(_COL_C, dtype=tf.float32))
@require_tf
def test_tf_formatter_tf_tensor_kwargs(self):
import tensorflow as tf
from datasets.formatting import TFFormatter
pa_table = self._create_dummy_table().drop(["b"])
formatter = TFFormatter(dtype=tf.float16)
row = formatter.format_row(pa_table)
self.assertEqual(row["c"].dtype, tf.float16)
col = formatter.format_column(pa_table)
self.assertEqual(col.dtype, tf.float16)
batch = formatter.format_batch(pa_table)
self.assertEqual(batch["a"].dtype, tf.float16)
self.assertEqual(batch["c"].dtype, tf.float16)
@require_tf
@require_pil
def test_tf_formatter_image(self):
import tensorflow as tf
from datasets.formatting import TFFormatter
# same dimensions
pa_table = pa.table({"image": [{"bytes": None, "path": str(IMAGE_PATH_1)}] * 2})
formatter = TFFormatter(features=Features({"image": Image()}))
row = formatter.format_row(pa_table)
self.assertEqual(row["image"].dtype, tf.uint8)
self.assertEqual(row["image"].shape, (480, 640, 3))
col = formatter.format_column(pa_table)
self.assertEqual(col.dtype, tf.uint8)
self.assertEqual(col.shape, (2, 480, 640, 3))
batch = formatter.format_batch(pa_table)
self.assertEqual(batch["image"][0].dtype, tf.uint8)
self.assertEqual(batch["image"].shape, (2, 480, 640, 3))
# different dimensions
pa_table = pa.table(
{"image": [{"bytes": None, "path": str(IMAGE_PATH_1)}, {"bytes": None, "path": str(IMAGE_PATH_2)}]}
)
formatter = TFFormatter(features=Features({"image": Image()}))
row = formatter.format_row(pa_table)
self.assertEqual(row["image"].dtype, tf.uint8)
self.assertEqual(row["image"].shape, (480, 640, 3))
col = formatter.format_column(pa_table)
self.assertIsInstance(col, list)
self.assertEqual(col[0].dtype, tf.uint8)
self.assertEqual(col[0].shape, (480, 640, 3))
batch = formatter.format_batch(pa_table)
self.assertIsInstance(batch["image"], list)
self.assertEqual(batch["image"][0].dtype, tf.uint8)
self.assertEqual(batch["image"][0].shape, (480, 640, 3))
@require_tf
@require_sndfile
def test_tf_formatter_audio(self):
import tensorflow as tf
from datasets.formatting import TFFormatter
pa_table = pa.table({"audio": [{"bytes": None, "path": str(AUDIO_PATH_1)}]})
formatter = TFFormatter(features=Features({"audio": Audio()}))
row = formatter.format_row(pa_table)
self.assertEqual(row["audio"]["array"].dtype, tf.float32)
col = formatter.format_column(pa_table)
self.assertEqual(col[0]["array"].dtype, tf.float32)
batch = formatter.format_batch(pa_table)
self.assertEqual(batch["audio"][0]["array"].dtype, tf.float32)
@require_jax
def test_jax_formatter(self):
import jax
import jax.numpy as jnp
from datasets.formatting import JaxFormatter
pa_table = self._create_dummy_table()
formatter = JaxFormatter()
row = formatter.format_row(pa_table)
jnp.allclose(row["a"], jnp.array(_COL_A, dtype=jnp.int64 if jax.config.jax_enable_x64 else jnp.int32)[0])
assert row["b"] == _COL_B[0]
jnp.allclose(row["c"], jnp.array(_COL_C, dtype=jnp.float32)[0])
col = formatter.format_column(pa_table)
jnp.allclose(col, jnp.array(_COL_A, dtype=jnp.int64 if jax.config.jax_enable_x64 else jnp.int32))
batch = formatter.format_batch(pa_table)
jnp.allclose(batch["a"], jnp.array(_COL_A, dtype=jnp.int64 if jax.config.jax_enable_x64 else jnp.int32))
assert batch["b"] == _COL_B
jnp.allclose(batch["c"], jnp.array(_COL_C, dtype=jnp.float32))
assert batch["c"].shape == np.array(_COL_C).shape
@require_jax
def test_jax_formatter_jnp_array_kwargs(self):
import jax.numpy as jnp
from datasets.formatting import JaxFormatter
pa_table = self._create_dummy_table().drop(["b"])
formatter = JaxFormatter(dtype=jnp.float16)
row = formatter.format_row(pa_table)
self.assertEqual(row["c"].dtype, jnp.float16)
col = formatter.format_column(pa_table)
self.assertEqual(col.dtype, jnp.float16)
batch = formatter.format_batch(pa_table)
self.assertEqual(batch["a"].dtype, jnp.float16)
self.assertEqual(batch["c"].dtype, jnp.float16)
@require_jax
@require_pil
def test_jax_formatter_image(self):
import jax.numpy as jnp
from datasets.formatting import JaxFormatter
# same dimensions
pa_table = pa.table({"image": [{"bytes": None, "path": str(IMAGE_PATH_1)}] * 2})
formatter = JaxFormatter(features=Features({"image": Image()}))
row = formatter.format_row(pa_table)
self.assertEqual(row["image"].dtype, jnp.uint8)
self.assertEqual(row["image"].shape, (480, 640, 3))
col = formatter.format_column(pa_table)
self.assertEqual(col.dtype, jnp.uint8)
self.assertEqual(col.shape, (2, 480, 640, 3))
batch = formatter.format_batch(pa_table)
self.assertEqual(batch["image"].dtype, jnp.uint8)
self.assertEqual(batch["image"].shape, (2, 480, 640, 3))
# different dimensions
pa_table = pa.table(
{"image": [{"bytes": None, "path": str(IMAGE_PATH_1)}, {"bytes": None, "path": str(IMAGE_PATH_2)}]}
)
formatter = JaxFormatter(features=Features({"image": Image()}))
row = formatter.format_row(pa_table)
self.assertEqual(row["image"].dtype, jnp.uint8)
self.assertEqual(row["image"].shape, (480, 640, 3))
col = formatter.format_column(pa_table)
self.assertIsInstance(col, list)
self.assertEqual(col[0].dtype, jnp.uint8)
self.assertEqual(col[0].shape, (480, 640, 3))
batch = formatter.format_batch(pa_table)
self.assertIsInstance(batch["image"], list)
self.assertEqual(batch["image"][0].dtype, jnp.uint8)
self.assertEqual(batch["image"][0].shape, (480, 640, 3))
@require_jax
@require_librosa
@require_sndfile
def test_jax_formatter_audio(self):
import jax.numpy as jnp
from datasets.formatting import JaxFormatter
pa_table = pa.table({"audio": [{"bytes": None, "path": str(AUDIO_PATH_1)}]})
formatter = JaxFormatter(features=Features({"audio": Audio()}))
row = formatter.format_row(pa_table)
self.assertEqual(row["audio"]["array"].dtype, jnp.float32)
col = formatter.format_column(pa_table)
self.assertEqual(col[0]["array"].dtype, jnp.float32)
batch = formatter.format_batch(pa_table)
self.assertEqual(batch["audio"][0]["array"].dtype, jnp.float32)
@require_jax
def test_jax_formatter_device(self):
import jax
from datasets.formatting import JaxFormatter
pa_table = self._create_dummy_table()
device = jax.devices()[0]
formatter = JaxFormatter(device=str(device))
row = formatter.format_row(pa_table)
assert row["a"].devices().pop() == device
assert row["c"].devices().pop() == device
col = formatter.format_column(pa_table)
assert col.devices().pop() == device
batch = formatter.format_batch(pa_table)
assert batch["a"].devices().pop() == device
assert batch["c"].devices().pop() == device
class QueryTest(TestCase):
def _create_dummy_table(self):
return pa.Table.from_pydict({"a": _COL_A, "b": _COL_B, "c": _COL_C})
def _create_dummy_arrow_indices(self):
return pa.Table.from_arrays([pa.array(_INDICES, type=pa.uint64())], names=["indices"])
def assertTableEqual(self, first: pa.Table, second: pa.Table):
self.assertEqual(first.schema, second.schema)
for first_array, second_array in zip(first, second):
self.assertEqual(first_array, second_array)
self.assertEqual(first, second)
def test_query_table_int(self):
pa_table = self._create_dummy_table()
table = InMemoryTable(pa_table)
n = pa_table.num_rows
# classical usage
subtable = query_table(table, 0)
self.assertTableEqual(subtable, pa.Table.from_pydict({"a": _COL_A[:1], "b": _COL_B[:1], "c": _COL_C[:1]}))
subtable = query_table(table, 1)
self.assertTableEqual(subtable, pa.Table.from_pydict({"a": _COL_A[1:2], "b": _COL_B[1:2], "c": _COL_C[1:2]}))
subtable = query_table(table, -1)
self.assertTableEqual(subtable, pa.Table.from_pydict({"a": _COL_A[-1:], "b": _COL_B[-1:], "c": _COL_C[-1:]}))
# raise an IndexError
with self.assertRaises(IndexError):
query_table(table, n)
with self.assertRaises(IndexError):
query_table(table, -(n + 1))
# with indices
indices = InMemoryTable(self._create_dummy_arrow_indices())
subtable = query_table(table, 0, indices=indices)
self.assertTableEqual(
subtable,
pa.Table.from_pydict({"a": [_COL_A[_INDICES[0]]], "b": [_COL_B[_INDICES[0]]], "c": [_COL_C[_INDICES[0]]]}),
)
with self.assertRaises(IndexError):
assert len(indices) < n
query_table(table, len(indices), indices=indices)
def test_query_table_slice(self):
pa_table = self._create_dummy_table()
table = InMemoryTable(pa_table)
n = pa_table.num_rows
# classical usage
subtable = query_table(table, slice(0, 1))
self.assertTableEqual(subtable, pa.Table.from_pydict({"a": _COL_A[:1], "b": _COL_B[:1], "c": _COL_C[:1]}))
subtable = query_table(table, slice(1, 2))
self.assertTableEqual(subtable, pa.Table.from_pydict({"a": _COL_A[1:2], "b": _COL_B[1:2], "c": _COL_C[1:2]}))
subtable = query_table(table, slice(-2, -1))
self.assertTableEqual(
subtable, pa.Table.from_pydict({"a": _COL_A[-2:-1], "b": _COL_B[-2:-1], "c": _COL_C[-2:-1]})
)
# usage with None
subtable = query_table(table, slice(-1, None))
self.assertTableEqual(subtable, pa.Table.from_pydict({"a": _COL_A[-1:], "b": _COL_B[-1:], "c": _COL_C[-1:]}))
subtable = query_table(table, slice(None, n + 1))
self.assertTableEqual(
subtable, pa.Table.from_pydict({"a": _COL_A[: n + 1], "b": _COL_B[: n + 1], "c": _COL_C[: n + 1]})
)
self.assertTableEqual(subtable, pa.Table.from_pydict({"a": _COL_A, "b": _COL_B, "c": _COL_C}))
subtable = query_table(table, slice(-(n + 1), None))
self.assertTableEqual(
subtable, pa.Table.from_pydict({"a": _COL_A[-(n + 1) :], "b": _COL_B[-(n + 1) :], "c": _COL_C[-(n + 1) :]})
)
self.assertTableEqual(subtable, pa.Table.from_pydict({"a": _COL_A, "b": _COL_B, "c": _COL_C}))
# usage with step
subtable = query_table(table, slice(None, None, 2))
self.assertTableEqual(subtable, pa.Table.from_pydict({"a": _COL_A[::2], "b": _COL_B[::2], "c": _COL_C[::2]}))
# empty ouput but no errors
subtable = query_table(table, slice(-1, 0)) # usage with both negative and positive idx
assert len(_COL_A[-1:0]) == 0
self.assertTableEqual(subtable, pa_table.slice(0, 0))
subtable = query_table(table, slice(2, 1))
assert len(_COL_A[2:1]) == 0
self.assertTableEqual(subtable, pa_table.slice(0, 0))
subtable = query_table(table, slice(n, n))
assert len(_COL_A[n:n]) == 0
self.assertTableEqual(subtable, pa_table.slice(0, 0))
subtable = query_table(table, slice(n, n + 1))
assert len(_COL_A[n : n + 1]) == 0
self.assertTableEqual(subtable, pa_table.slice(0, 0))
# it's not possible to get an error with a slice
# with indices
indices = InMemoryTable(self._create_dummy_arrow_indices())
subtable = query_table(table, slice(0, 1), indices=indices)
self.assertTableEqual(
subtable,
pa.Table.from_pydict({"a": [_COL_A[_INDICES[0]]], "b": [_COL_B[_INDICES[0]]], "c": [_COL_C[_INDICES[0]]]}),
)
subtable = query_table(table, slice(n - 1, n), indices=indices)
assert len(indices.column(0).to_pylist()[n - 1 : n]) == 0
self.assertTableEqual(subtable, pa_table.slice(0, 0))
def test_query_table_range(self):
pa_table = self._create_dummy_table()
table = InMemoryTable(pa_table)
n = pa_table.num_rows
np_A, np_B, np_C = np.array(_COL_A, dtype=np.int64), np.array(_COL_B), np.array(_COL_C)
# classical usage
subtable = query_table(table, range(0, 1))
self.assertTableEqual(
subtable,
pa.Table.from_pydict({"a": np_A[range(0, 1)], "b": np_B[range(0, 1)], "c": np_C[range(0, 1)].tolist()}),
)
subtable = query_table(table, range(1, 2))
self.assertTableEqual(
subtable,
pa.Table.from_pydict({"a": np_A[range(1, 2)], "b": np_B[range(1, 2)], "c": np_C[range(1, 2)].tolist()}),
)
subtable = query_table(table, range(-2, -1))
self.assertTableEqual(
subtable,
pa.Table.from_pydict(
{"a": np_A[range(-2, -1)], "b": np_B[range(-2, -1)], "c": np_C[range(-2, -1)].tolist()}
),
)
# usage with both negative and positive idx
subtable = query_table(table, range(-1, 0))
self.assertTableEqual(
subtable,
pa.Table.from_pydict({"a": np_A[range(-1, 0)], "b": np_B[range(-1, 0)], "c": np_C[range(-1, 0)].tolist()}),
)
subtable = query_table(table, range(-1, n))
self.assertTableEqual(
subtable,
pa.Table.from_pydict({"a": np_A[range(-1, n)], "b": np_B[range(-1, n)], "c": np_C[range(-1, n)].tolist()}),
)
# usage with step
subtable = query_table(table, range(0, n, 2))
self.assertTableEqual(
subtable,
pa.Table.from_pydict(
{"a": np_A[range(0, n, 2)], "b": np_B[range(0, n, 2)], "c": np_C[range(0, n, 2)].tolist()}
),
)
subtable = query_table(table, range(0, n + 1, 2 * n))
self.assertTableEqual(
subtable,
pa.Table.from_pydict(
{
"a": np_A[range(0, n + 1, 2 * n)],
"b": np_B[range(0, n + 1, 2 * n)],
"c": np_C[range(0, n + 1, 2 * n)].tolist(),
}
),
)
# empty ouput but no errors
subtable = query_table(table, range(2, 1))
assert len(np_A[range(2, 1)]) == 0
self.assertTableEqual(subtable, pa.Table.from_batches([], schema=pa_table.schema))
subtable = query_table(table, range(n, n))
assert len(np_A[range(n, n)]) == 0
self.assertTableEqual(subtable, pa.Table.from_batches([], schema=pa_table.schema))
# raise an IndexError
with self.assertRaises(IndexError):
with self.assertRaises(IndexError):
np_A[range(0, n + 1)]
query_table(table, range(0, n + 1))
with self.assertRaises(IndexError):
with self.assertRaises(IndexError):
np_A[range(-(n + 1), -1)]
query_table(table, range(-(n + 1), -1))
with self.assertRaises(IndexError):
with self.assertRaises(IndexError):
np_A[range(n, n + 1)]
query_table(table, range(n, n + 1))
# with indices
indices = InMemoryTable(self._create_dummy_arrow_indices())
subtable = query_table(table, range(0, 1), indices=indices)
self.assertTableEqual(
subtable,
pa.Table.from_pydict({"a": [_COL_A[_INDICES[0]]], "b": [_COL_B[_INDICES[0]]], "c": [_COL_C[_INDICES[0]]]}),
)
with self.assertRaises(IndexError):
assert len(indices) < n
query_table(table, range(len(indices), len(indices) + 1), indices=indices)
def test_query_table_str(self):
pa_table = self._create_dummy_table()
table = InMemoryTable(pa_table)
subtable = query_table(table, "a")
self.assertTableEqual(subtable, pa.Table.from_pydict({"a": _COL_A}))
with self.assertRaises(KeyError):
query_table(table, "z")
indices = InMemoryTable(self._create_dummy_arrow_indices())
subtable = query_table(table, "a", indices=indices)
self.assertTableEqual(subtable, pa.Table.from_pydict({"a": [_COL_A[i] for i in _INDICES]}))
def test_query_table_iterable(self):
pa_table = self._create_dummy_table()
table = InMemoryTable(pa_table)
n = pa_table.num_rows
np_A, np_B, np_C = np.array(_COL_A, dtype=np.int64), np.array(_COL_B), np.array(_COL_C)
# classical usage
subtable = query_table(table, [0])
self.assertTableEqual(
subtable, pa.Table.from_pydict({"a": np_A[[0]], "b": np_B[[0]], "c": np_C[[0]].tolist()})
)
subtable = query_table(table, [1])
self.assertTableEqual(
subtable, pa.Table.from_pydict({"a": np_A[[1]], "b": np_B[[1]], "c": np_C[[1]].tolist()})
)
subtable = query_table(table, [-1])
self.assertTableEqual(
subtable, pa.Table.from_pydict({"a": np_A[[-1]], "b": np_B[[-1]], "c": np_C[[-1]].tolist()})
)
subtable = query_table(table, [0, -1, 1])
self.assertTableEqual(
subtable,
pa.Table.from_pydict({"a": np_A[[0, -1, 1]], "b": np_B[[0, -1, 1]], "c": np_C[[0, -1, 1]].tolist()}),
)
# numpy iterable
subtable = query_table(table, np.array([0, -1, 1]))
self.assertTableEqual(
subtable,
pa.Table.from_pydict({"a": np_A[[0, -1, 1]], "b": np_B[[0, -1, 1]], "c": np_C[[0, -1, 1]].tolist()}),
)
# empty ouput but no errors
subtable = query_table(table, [])
assert len(np_A[[]]) == 0
self.assertTableEqual(subtable, pa.Table.from_batches([], schema=pa_table.schema))
# raise an IndexError
with self.assertRaises(IndexError):
with self.assertRaises(IndexError):
np_A[[n]]
query_table(table, [n])
with self.assertRaises(IndexError):
with self.assertRaises(IndexError):
np_A[[-(n + 1)]]
query_table(table, [-(n + 1)])
# with indices
indices = InMemoryTable(self._create_dummy_arrow_indices())
subtable = query_table(table, [0], indices=indices)
self.assertTableEqual(
subtable,
pa.Table.from_pydict({"a": [_COL_A[_INDICES[0]]], "b": [_COL_B[_INDICES[0]]], "c": [_COL_C[_INDICES[0]]]}),
)
with self.assertRaises(IndexError):
assert len(indices) < n
query_table(table, [len(indices)], indices=indices)
def test_query_table_indexable_type(self):
pa_table = self._create_dummy_table()
table = InMemoryTable(pa_table)
n = pa_table.num_rows
# classical usage
subtable = query_table(table, np.int64(0))
self.assertTableEqual(subtable, pa.Table.from_pydict({"a": _COL_A[:1], "b": _COL_B[:1], "c": _COL_C[:1]}))
subtable = query_table(table, np.int64(1))
self.assertTableEqual(subtable, pa.Table.from_pydict({"a": _COL_A[1:2], "b": _COL_B[1:2], "c": _COL_C[1:2]}))
subtable = query_table(table, np.int64(-1))
self.assertTableEqual(subtable, pa.Table.from_pydict({"a": _COL_A[-1:], "b": _COL_B[-1:], "c": _COL_C[-1:]}))
# raise an IndexError
with self.assertRaises(IndexError):
query_table(table, np.int64(n))
with self.assertRaises(IndexError):
query_table(table, np.int64(-(n + 1)))
# with indices
indices = InMemoryTable(self._create_dummy_arrow_indices())
subtable = query_table(table, np.int64(0), indices=indices)
self.assertTableEqual(
subtable,
pa.Table.from_pydict({"a": [_COL_A[_INDICES[0]]], "b": [_COL_B[_INDICES[0]]], "c": [_COL_C[_INDICES[0]]]}),
)
with self.assertRaises(IndexError):
assert len(indices) < n
query_table(table, np.int64(len(indices)), indices=indices)
def test_query_table_invalid_key_type(self):
pa_table = self._create_dummy_table()
table = InMemoryTable(pa_table)
with self.assertRaises(TypeError):
query_table(table, 0.0)
with self.assertRaises(TypeError):
query_table(table, [0, "a"])
with self.assertRaises(TypeError):
query_table(table, int)
with self.assertRaises(TypeError):
def iter_to_inf(start=0):
while True:
yield start
start += 1
query_table(table, iter_to_inf())
@pytest.fixture(scope="session")
def arrow_table():
return pa.Table.from_pydict({"col_int": [0, 1, 2], "col_float": [0.0, 1.0, 2.0]})
@require_tf
@pytest.mark.parametrize(
"cast_schema",
[
None,
[("col_int", pa.int64()), ("col_float", pa.float64())],
[("col_int", pa.int32()), ("col_float", pa.float64())],
[("col_int", pa.int64()), ("col_float", pa.float32())],
],
)
def test_tf_formatter_sets_default_dtypes(cast_schema, arrow_table):
import tensorflow as tf
from datasets.formatting import TFFormatter
if cast_schema:
arrow_table = arrow_table.cast(pa.schema(cast_schema))
arrow_table_dict = arrow_table.to_pydict()
list_int = arrow_table_dict["col_int"]
list_float = arrow_table_dict["col_float"]
formatter = TFFormatter()
row = formatter.format_row(arrow_table)
tf.debugging.assert_equal(row["col_int"], tf.ragged.constant(list_int, dtype=tf.int64)[0])
tf.debugging.assert_equal(row["col_float"], tf.ragged.constant(list_float, dtype=tf.float32)[0])
col = formatter.format_column(arrow_table)
tf.debugging.assert_equal(col, tf.ragged.constant(list_int, dtype=tf.int64))
batch = formatter.format_batch(arrow_table)
tf.debugging.assert_equal(batch["col_int"], tf.ragged.constant(list_int, dtype=tf.int64))
tf.debugging.assert_equal(batch["col_float"], tf.ragged.constant(list_float, dtype=tf.float32))
@require_numpy1_on_windows
@require_torch
@pytest.mark.parametrize(
"cast_schema",
[
None,
[("col_int", pa.int64()), ("col_float", pa.float64())],
[("col_int", pa.int32()), ("col_float", pa.float64())],
[("col_int", pa.int64()), ("col_float", pa.float32())],
],
)
def test_torch_formatter_sets_default_dtypes(cast_schema, arrow_table):
import torch
from datasets.formatting import TorchFormatter
if cast_schema:
arrow_table = arrow_table.cast(pa.schema(cast_schema))
arrow_table_dict = arrow_table.to_pydict()
list_int = arrow_table_dict["col_int"]
list_float = arrow_table_dict["col_float"]
formatter = TorchFormatter()
row = formatter.format_row(arrow_table)
torch.testing.assert_close(row["col_int"], torch.tensor(list_int, dtype=torch.int64)[0])
torch.testing.assert_close(row["col_float"], torch.tensor(list_float, dtype=torch.float32)[0])
col = formatter.format_column(arrow_table)
torch.testing.assert_close(col, torch.tensor(list_int, dtype=torch.int64))
batch = formatter.format_batch(arrow_table)
torch.testing.assert_close(batch["col_int"], torch.tensor(list_int, dtype=torch.int64))
torch.testing.assert_close(batch["col_float"], torch.tensor(list_float, dtype=torch.float32))
def test_iterable_dataset_of_arrays_format_to_arrow(any_arrays_dataset: IterableDataset):
formatted = any_arrays_dataset.with_format("arrow")
assert all(isinstance(example, pa.Table) for example in formatted)
def test_iterable_dataset_of_arrays_format_to_numpy(any_arrays_dataset: IterableDataset):
formatted = any_arrays_dataset.with_format("np")
assert all(isinstance(example["array"], np.ndarray) for example in formatted)
@require_torch
def test_iterable_dataset_of_arrays_format_to_torch(any_arrays_dataset: IterableDataset):
import torch
formatted = any_arrays_dataset.with_format("torch")
assert all(isinstance(example["array"], torch.Tensor) for example in formatted)
@require_tf
def test_iterable_dataset_of_arrays_format_to_tf(any_arrays_dataset: IterableDataset):
import tensorflow as tf
formatted = any_arrays_dataset.with_format("tf")
assert all(isinstance(example["array"], tf.Tensor) for example in formatted)
@require_jax
def test_iterable_dataset_of_arrays_format_to_jax(any_arrays_dataset: IterableDataset):
import jax.numpy as jnp
formatted = any_arrays_dataset.with_format("jax")
assert all(isinstance(example["array"], jnp.ndarray) for example in formatted)
| datasets/tests/test_formatting.py/0 | {
"file_path": "datasets/tests/test_formatting.py",
"repo_id": "datasets",
"token_count": 21450
} |
import copy
import pickle
from decimal import Decimal
from functools import partial
from typing import List, Union
from unittest.mock import MagicMock
import numpy as np
import pyarrow as pa
import pytest
from datasets.features import Array2D, ClassLabel, Features, Image, LargeList, Sequence, Value
from datasets.features.features import Array2DExtensionType, get_nested_type
from datasets.table import (
ConcatenationTable,
InMemoryTable,
MemoryMappedTable,
Table,
TableBlock,
_in_memory_arrow_table_from_buffer,
_in_memory_arrow_table_from_file,
_interpolation_search,
_memory_mapped_arrow_table_from_file,
array_cast,
cast_array_to_feature,
cast_table_to_schema,
concat_tables,
embed_array_storage,
embed_table_storage,
inject_arrow_table_documentation,
table_cast,
table_iter,
)
from .utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases, slow
@pytest.fixture(scope="session")
def in_memory_pa_table(arrow_file) -> pa.Table:
return pa.ipc.open_stream(arrow_file).read_all()
def _to_testing_blocks(table: TableBlock) -> List[List[TableBlock]]:
assert len(table) > 2
blocks = [
[table.slice(0, 2)],
[table.slice(2).drop([c for c in table.column_names if c != "tokens"]), table.slice(2).drop(["tokens"])],
]
return blocks
@pytest.fixture(scope="session")
def in_memory_blocks(in_memory_pa_table):
table = InMemoryTable(in_memory_pa_table)
return _to_testing_blocks(table)
@pytest.fixture(scope="session")
def memory_mapped_blocks(arrow_file):
table = MemoryMappedTable.from_file(arrow_file)
return _to_testing_blocks(table)
@pytest.fixture(scope="session")
def mixed_in_memory_and_memory_mapped_blocks(in_memory_blocks, memory_mapped_blocks):
return in_memory_blocks[:1] + memory_mapped_blocks[1:]
def assert_deepcopy_without_bringing_data_in_memory(table: MemoryMappedTable):
with assert_arrow_memory_doesnt_increase():
copied_table = copy.deepcopy(table)
assert isinstance(copied_table, MemoryMappedTable)
assert copied_table.table == table.table
def assert_deepcopy_does_bring_data_in_memory(table: MemoryMappedTable):
with assert_arrow_memory_increases():
copied_table = copy.deepcopy(table)
assert isinstance(copied_table, MemoryMappedTable)
assert copied_table.table == table.table
def assert_pickle_without_bringing_data_in_memory(table: MemoryMappedTable):
with assert_arrow_memory_doesnt_increase():
pickled_table = pickle.dumps(table)
unpickled_table = pickle.loads(pickled_table)
assert isinstance(unpickled_table, MemoryMappedTable)
assert unpickled_table.table == table.table
def assert_pickle_does_bring_data_in_memory(table: MemoryMappedTable):
with assert_arrow_memory_increases():
pickled_table = pickle.dumps(table)
unpickled_table = pickle.loads(pickled_table)
assert isinstance(unpickled_table, MemoryMappedTable)
assert unpickled_table.table == table.table
def assert_index_attributes_equal(table: Table, other: Table):
assert table._batches == other._batches
np.testing.assert_array_equal(table._offsets, other._offsets)
assert table._schema == other._schema
def add_suffix_to_column_names(table, suffix):
return table.rename_columns([f"{name}{suffix}" for name in table.column_names])
def test_inject_arrow_table_documentation(in_memory_pa_table):
method = pa.Table.slice
def function_to_wrap(*args):
return method(*args)
args = (0, 1)
wrapped_method = inject_arrow_table_documentation(method)(function_to_wrap)
assert method(in_memory_pa_table, *args) == wrapped_method(in_memory_pa_table, *args)
assert "pyarrow.Table" not in wrapped_method.__doc__
assert "Table" in wrapped_method.__doc__
def test_in_memory_arrow_table_from_file(arrow_file, in_memory_pa_table):
with assert_arrow_memory_increases():
pa_table = _in_memory_arrow_table_from_file(arrow_file)
assert in_memory_pa_table == pa_table
def test_in_memory_arrow_table_from_buffer(in_memory_pa_table):
with assert_arrow_memory_increases():
buf_writer = pa.BufferOutputStream()
writer = pa.RecordBatchStreamWriter(buf_writer, schema=in_memory_pa_table.schema)
writer.write_table(in_memory_pa_table)
writer.close()
buf_writer.close()
pa_table = _in_memory_arrow_table_from_buffer(buf_writer.getvalue())
assert in_memory_pa_table == pa_table
def test_memory_mapped_arrow_table_from_file(arrow_file, in_memory_pa_table):
with assert_arrow_memory_doesnt_increase():
pa_table = _memory_mapped_arrow_table_from_file(arrow_file)
assert in_memory_pa_table == pa_table
def test_table_init(in_memory_pa_table):
table = Table(in_memory_pa_table)
assert table.table == in_memory_pa_table
def test_table_validate(in_memory_pa_table):
table = Table(in_memory_pa_table)
assert table.validate() == in_memory_pa_table.validate()
def test_table_equals(in_memory_pa_table):
table = Table(in_memory_pa_table)
assert table.equals(in_memory_pa_table)
def test_table_to_batches(in_memory_pa_table):
table = Table(in_memory_pa_table)
assert table.to_batches() == in_memory_pa_table.to_batches()
def test_table_to_pydict(in_memory_pa_table):
table = Table(in_memory_pa_table)
assert table.to_pydict() == in_memory_pa_table.to_pydict()
def test_table_to_string(in_memory_pa_table):
table = Table(in_memory_pa_table)
assert table.to_string() == in_memory_pa_table.to_string()
def test_table_field(in_memory_pa_table):
assert "tokens" in in_memory_pa_table.column_names
table = Table(in_memory_pa_table)
assert table.field("tokens") == in_memory_pa_table.field("tokens")
def test_table_column(in_memory_pa_table):
assert "tokens" in in_memory_pa_table.column_names
table = Table(in_memory_pa_table)
assert table.column("tokens") == in_memory_pa_table.column("tokens")
def test_table_itercolumns(in_memory_pa_table):
table = Table(in_memory_pa_table)
assert isinstance(table.itercolumns(), type(in_memory_pa_table.itercolumns()))
assert list(table.itercolumns()) == list(in_memory_pa_table.itercolumns())
def test_table_getitem(in_memory_pa_table):
table = Table(in_memory_pa_table)
assert table[0] == in_memory_pa_table[0]
def test_table_len(in_memory_pa_table):
table = Table(in_memory_pa_table)
assert len(table) == len(in_memory_pa_table)
def test_table_str(in_memory_pa_table):
table = Table(in_memory_pa_table)
assert str(table) == str(in_memory_pa_table).replace("pyarrow.Table", "Table")
assert repr(table) == repr(in_memory_pa_table).replace("pyarrow.Table", "Table")
@pytest.mark.parametrize(
"attribute", ["schema", "columns", "num_columns", "num_rows", "shape", "nbytes", "column_names"]
)
def test_table_attributes(in_memory_pa_table, attribute):
table = Table(in_memory_pa_table)
assert getattr(table, attribute) == getattr(in_memory_pa_table, attribute)
def test_in_memory_table_from_file(arrow_file, in_memory_pa_table):
with assert_arrow_memory_increases():
table = InMemoryTable.from_file(arrow_file)
assert table.table == in_memory_pa_table
assert isinstance(table, InMemoryTable)
def test_in_memory_table_from_buffer(in_memory_pa_table):
with assert_arrow_memory_increases():
buf_writer = pa.BufferOutputStream()
writer = pa.RecordBatchStreamWriter(buf_writer, schema=in_memory_pa_table.schema)
writer.write_table(in_memory_pa_table)
writer.close()
buf_writer.close()
table = InMemoryTable.from_buffer(buf_writer.getvalue())
assert table.table == in_memory_pa_table
assert isinstance(table, InMemoryTable)
def test_in_memory_table_from_pandas(in_memory_pa_table):
df = in_memory_pa_table.to_pandas()
with assert_arrow_memory_increases():
# with no schema it might infer another order of the fields in the schema
table = InMemoryTable.from_pandas(df)
assert isinstance(table, InMemoryTable)
# by specifying schema we get the same order of features, and so the exact same table
table = InMemoryTable.from_pandas(df, schema=in_memory_pa_table.schema)
assert table.table == in_memory_pa_table
assert isinstance(table, InMemoryTable)
def test_in_memory_table_from_arrays(in_memory_pa_table):
arrays = list(in_memory_pa_table.columns)
names = list(in_memory_pa_table.column_names)
table = InMemoryTable.from_arrays(arrays, names=names)
assert table.table == in_memory_pa_table
assert isinstance(table, InMemoryTable)
def test_in_memory_table_from_pydict(in_memory_pa_table):
pydict = in_memory_pa_table.to_pydict()
with assert_arrow_memory_increases():
table = InMemoryTable.from_pydict(pydict)
assert isinstance(table, InMemoryTable)
assert table.table == pa.Table.from_pydict(pydict)
def test_in_memory_table_from_pylist(in_memory_pa_table):
pylist = InMemoryTable(in_memory_pa_table).to_pylist()
table = InMemoryTable.from_pylist(pylist)
assert isinstance(table, InMemoryTable)
assert pylist == table.to_pylist()
def test_in_memory_table_from_batches(in_memory_pa_table):
batches = list(in_memory_pa_table.to_batches())
table = InMemoryTable.from_batches(batches)
assert table.table == in_memory_pa_table
assert isinstance(table, InMemoryTable)
def test_in_memory_table_deepcopy(in_memory_pa_table):
table = InMemoryTable(in_memory_pa_table)
copied_table = copy.deepcopy(table)
assert table.table == copied_table.table
assert_index_attributes_equal(table, copied_table)
# deepcopy must return the exact same arrow objects since they are immutable
assert table.table is copied_table.table
assert all(batch1 is batch2 for batch1, batch2 in zip(table._batches, copied_table._batches))
def test_in_memory_table_pickle(in_memory_pa_table):
table = InMemoryTable(in_memory_pa_table)
pickled_table = pickle.dumps(table)
unpickled_table = pickle.loads(pickled_table)
assert unpickled_table.table == table.table
assert_index_attributes_equal(table, unpickled_table)
@slow
def test_in_memory_table_pickle_big_table():
big_table_4GB = InMemoryTable.from_pydict({"col": [0] * ((4 * 8 << 30) // 64)})
length = len(big_table_4GB)
big_table_4GB = pickle.dumps(big_table_4GB)
big_table_4GB = pickle.loads(big_table_4GB)
assert len(big_table_4GB) == length
def test_in_memory_table_slice(in_memory_pa_table):
table = InMemoryTable(in_memory_pa_table).slice(1, 2)
assert table.table == in_memory_pa_table.slice(1, 2)
assert isinstance(table, InMemoryTable)
def test_in_memory_table_filter(in_memory_pa_table):
mask = pa.array([i % 2 == 0 for i in range(len(in_memory_pa_table))])
table = InMemoryTable(in_memory_pa_table).filter(mask)
assert table.table == in_memory_pa_table.filter(mask)
assert isinstance(table, InMemoryTable)
def test_in_memory_table_flatten(in_memory_pa_table):
table = InMemoryTable(in_memory_pa_table).flatten()
assert table.table == in_memory_pa_table.flatten()
assert isinstance(table, InMemoryTable)
def test_in_memory_table_combine_chunks(in_memory_pa_table):
table = InMemoryTable(in_memory_pa_table).combine_chunks()
assert table.table == in_memory_pa_table.combine_chunks()
assert isinstance(table, InMemoryTable)
def test_in_memory_table_cast(in_memory_pa_table):
assert pa.list_(pa.int64()) in in_memory_pa_table.schema.types
schema = pa.schema(
{
k: v if v != pa.list_(pa.int64()) else pa.list_(pa.int32())
for k, v in zip(in_memory_pa_table.schema.names, in_memory_pa_table.schema.types)
}
)
table = InMemoryTable(in_memory_pa_table).cast(schema)
assert table.table == in_memory_pa_table.cast(schema)
assert isinstance(table, InMemoryTable)
def test_in_memory_table_cast_reorder_struct():
table = InMemoryTable(
pa.Table.from_pydict(
{
"top": [
{
"foo": "a",
"bar": "b",
}
]
}
)
)
schema = pa.schema({"top": pa.struct({"bar": pa.string(), "foo": pa.string()})})
assert table.cast(schema).schema == schema
def test_in_memory_table_cast_with_hf_features():
table = InMemoryTable(pa.Table.from_pydict({"labels": [0, 1]}))
features = Features({"labels": ClassLabel(names=["neg", "pos"])})
schema = features.arrow_schema
assert table.cast(schema).schema == schema
assert Features.from_arrow_schema(table.cast(schema).schema) == features
def test_in_memory_table_replace_schema_metadata(in_memory_pa_table):
metadata = {"huggingface": "{}"}
table = InMemoryTable(in_memory_pa_table).replace_schema_metadata(metadata)
assert table.table.schema.metadata == in_memory_pa_table.replace_schema_metadata(metadata).schema.metadata
assert isinstance(table, InMemoryTable)
def test_in_memory_table_add_column(in_memory_pa_table):
i = len(in_memory_pa_table.column_names)
field_ = "new_field"
column = pa.array(list(range(len(in_memory_pa_table))))
table = InMemoryTable(in_memory_pa_table).add_column(i, field_, column)
assert table.table == in_memory_pa_table.add_column(i, field_, column)
assert isinstance(table, InMemoryTable)
def test_in_memory_table_append_column(in_memory_pa_table):
field_ = "new_field"
column = pa.array(list(range(len(in_memory_pa_table))))
table = InMemoryTable(in_memory_pa_table).append_column(field_, column)
assert table.table == in_memory_pa_table.append_column(field_, column)
assert isinstance(table, InMemoryTable)
def test_in_memory_table_remove_column(in_memory_pa_table):
table = InMemoryTable(in_memory_pa_table).remove_column(0)
assert table.table == in_memory_pa_table.remove_column(0)
assert isinstance(table, InMemoryTable)
def test_in_memory_table_set_column(in_memory_pa_table):
i = len(in_memory_pa_table.column_names)
field_ = "new_field"
column = pa.array(list(range(len(in_memory_pa_table))))
table = InMemoryTable(in_memory_pa_table).set_column(i, field_, column)
assert table.table == in_memory_pa_table.set_column(i, field_, column)
assert isinstance(table, InMemoryTable)
def test_in_memory_table_rename_columns(in_memory_pa_table):
assert "tokens" in in_memory_pa_table.column_names
names = [name if name != "tokens" else "new_tokens" for name in in_memory_pa_table.column_names]
table = InMemoryTable(in_memory_pa_table).rename_columns(names)
assert table.table == in_memory_pa_table.rename_columns(names)
assert isinstance(table, InMemoryTable)
def test_in_memory_table_drop(in_memory_pa_table):
names = [in_memory_pa_table.column_names[0]]
table = InMemoryTable(in_memory_pa_table).drop(names)
assert table.table == in_memory_pa_table.drop(names)
assert isinstance(table, InMemoryTable)
def test_memory_mapped_table_init(arrow_file, in_memory_pa_table):
table = MemoryMappedTable(_memory_mapped_arrow_table_from_file(arrow_file), arrow_file)
assert table.table == in_memory_pa_table
assert isinstance(table, MemoryMappedTable)
assert_deepcopy_without_bringing_data_in_memory(table)
assert_pickle_without_bringing_data_in_memory(table)
def test_memory_mapped_table_from_file(arrow_file, in_memory_pa_table):
with assert_arrow_memory_doesnt_increase():
table = MemoryMappedTable.from_file(arrow_file)
assert table.table == in_memory_pa_table
assert isinstance(table, MemoryMappedTable)
assert_deepcopy_without_bringing_data_in_memory(table)
assert_pickle_without_bringing_data_in_memory(table)
def test_memory_mapped_table_from_file_with_replay(arrow_file, in_memory_pa_table):
replays = [("slice", (0, 1), {}), ("flatten", (), {})]
with assert_arrow_memory_doesnt_increase():
table = MemoryMappedTable.from_file(arrow_file, replays=replays)
assert len(table) == 1
for method, args, kwargs in replays:
in_memory_pa_table = getattr(in_memory_pa_table, method)(*args, **kwargs)
assert table.table == in_memory_pa_table
assert_deepcopy_without_bringing_data_in_memory(table)
assert_pickle_without_bringing_data_in_memory(table)
def test_memory_mapped_table_deepcopy(arrow_file):
table = MemoryMappedTable.from_file(arrow_file)
copied_table = copy.deepcopy(table)
assert table.table == copied_table.table
assert table.path == copied_table.path
assert_index_attributes_equal(table, copied_table)
# deepcopy must return the exact same arrow objects since they are immutable
assert table.table is copied_table.table
assert all(batch1 is batch2 for batch1, batch2 in zip(table._batches, copied_table._batches))
def test_memory_mapped_table_pickle(arrow_file):
table = MemoryMappedTable.from_file(arrow_file)
pickled_table = pickle.dumps(table)
unpickled_table = pickle.loads(pickled_table)
assert unpickled_table.table == table.table
assert unpickled_table.path == table.path
assert_index_attributes_equal(table, unpickled_table)
def test_memory_mapped_table_pickle_doesnt_fill_memory(arrow_file):
with assert_arrow_memory_doesnt_increase():
table = MemoryMappedTable.from_file(arrow_file)
assert_deepcopy_without_bringing_data_in_memory(table)
assert_pickle_without_bringing_data_in_memory(table)
def test_memory_mapped_table_pickle_applies_replay(arrow_file):
replays = [("slice", (0, 1), {}), ("flatten", (), {})]
with assert_arrow_memory_doesnt_increase():
table = MemoryMappedTable.from_file(arrow_file, replays=replays)
assert isinstance(table, MemoryMappedTable)
assert table.replays == replays
assert_deepcopy_without_bringing_data_in_memory(table)
assert_pickle_without_bringing_data_in_memory(table)
def test_memory_mapped_table_slice(arrow_file, in_memory_pa_table):
table = MemoryMappedTable.from_file(arrow_file).slice(1, 2)
assert table.table == in_memory_pa_table.slice(1, 2)
assert isinstance(table, MemoryMappedTable)
assert table.replays == [("slice", (1, 2), {})]
assert_deepcopy_without_bringing_data_in_memory(table)
assert_pickle_without_bringing_data_in_memory(table)
def test_memory_mapped_table_filter(arrow_file, in_memory_pa_table):
mask = pa.array([i % 2 == 0 for i in range(len(in_memory_pa_table))])
table = MemoryMappedTable.from_file(arrow_file).filter(mask)
assert table.table == in_memory_pa_table.filter(mask)
assert isinstance(table, MemoryMappedTable)
assert table.replays == [("filter", (mask,), {})]
assert_deepcopy_without_bringing_data_in_memory(table)
# filter DOES increase memory
# assert_pickle_without_bringing_data_in_memory(table)
assert_pickle_does_bring_data_in_memory(table)
def test_memory_mapped_table_flatten(arrow_file, in_memory_pa_table):
table = MemoryMappedTable.from_file(arrow_file).flatten()
assert table.table == in_memory_pa_table.flatten()
assert isinstance(table, MemoryMappedTable)
assert table.replays == [("flatten", (), {})]
assert_deepcopy_without_bringing_data_in_memory(table)
assert_pickle_without_bringing_data_in_memory(table)
def test_memory_mapped_table_combine_chunks(arrow_file, in_memory_pa_table):
table = MemoryMappedTable.from_file(arrow_file).combine_chunks()
assert table.table == in_memory_pa_table.combine_chunks()
assert isinstance(table, MemoryMappedTable)
assert table.replays == [("combine_chunks", (), {})]
assert_deepcopy_without_bringing_data_in_memory(table)
assert_pickle_without_bringing_data_in_memory(table)
def test_memory_mapped_table_cast(arrow_file, in_memory_pa_table):
assert pa.list_(pa.int64()) in in_memory_pa_table.schema.types
schema = pa.schema(
{
k: v if v != pa.list_(pa.int64()) else pa.list_(pa.int32())
for k, v in zip(in_memory_pa_table.schema.names, in_memory_pa_table.schema.types)
}
)
table = MemoryMappedTable.from_file(arrow_file).cast(schema)
assert table.table == in_memory_pa_table.cast(schema)
assert isinstance(table, MemoryMappedTable)
assert table.replays == [("cast", (schema,), {})]
assert_deepcopy_without_bringing_data_in_memory(table)
# cast DOES increase memory when converting integers precision for example
# assert_pickle_without_bringing_data_in_memory(table)
assert_pickle_does_bring_data_in_memory(table)
def test_memory_mapped_table_replace_schema_metadata(arrow_file, in_memory_pa_table):
metadata = {"huggingface": "{}"}
table = MemoryMappedTable.from_file(arrow_file).replace_schema_metadata(metadata)
assert table.table.schema.metadata == in_memory_pa_table.replace_schema_metadata(metadata).schema.metadata
assert isinstance(table, MemoryMappedTable)
assert table.replays == [("replace_schema_metadata", (metadata,), {})]
assert_deepcopy_without_bringing_data_in_memory(table)
assert_pickle_without_bringing_data_in_memory(table)
def test_memory_mapped_table_add_column(arrow_file, in_memory_pa_table):
i = len(in_memory_pa_table.column_names)
field_ = "new_field"
column = pa.array(list(range(len(in_memory_pa_table))))
table = MemoryMappedTable.from_file(arrow_file).add_column(i, field_, column)
assert table.table == in_memory_pa_table.add_column(i, field_, column)
assert isinstance(table, MemoryMappedTable)
assert table.replays == [("add_column", (i, field_, column), {})]
assert_deepcopy_without_bringing_data_in_memory(table)
assert_pickle_without_bringing_data_in_memory(table)
def test_memory_mapped_table_append_column(arrow_file, in_memory_pa_table):
field_ = "new_field"
column = pa.array(list(range(len(in_memory_pa_table))))
table = MemoryMappedTable.from_file(arrow_file).append_column(field_, column)
assert table.table == in_memory_pa_table.append_column(field_, column)
assert isinstance(table, MemoryMappedTable)
assert table.replays == [("append_column", (field_, column), {})]
assert_deepcopy_without_bringing_data_in_memory(table)
assert_pickle_without_bringing_data_in_memory(table)
def test_memory_mapped_table_remove_column(arrow_file, in_memory_pa_table):
table = MemoryMappedTable.from_file(arrow_file).remove_column(0)
assert table.table == in_memory_pa_table.remove_column(0)
assert isinstance(table, MemoryMappedTable)
assert table.replays == [("remove_column", (0,), {})]
assert_deepcopy_without_bringing_data_in_memory(table)
assert_pickle_without_bringing_data_in_memory(table)
def test_memory_mapped_table_set_column(arrow_file, in_memory_pa_table):
i = len(in_memory_pa_table.column_names)
field_ = "new_field"
column = pa.array(list(range(len(in_memory_pa_table))))
table = MemoryMappedTable.from_file(arrow_file).set_column(i, field_, column)
assert table.table == in_memory_pa_table.set_column(i, field_, column)
assert isinstance(table, MemoryMappedTable)
assert table.replays == [("set_column", (i, field_, column), {})]
assert_deepcopy_without_bringing_data_in_memory(table)
assert_pickle_without_bringing_data_in_memory(table)
def test_memory_mapped_table_rename_columns(arrow_file, in_memory_pa_table):
assert "tokens" in in_memory_pa_table.column_names
names = [name if name != "tokens" else "new_tokens" for name in in_memory_pa_table.column_names]
table = MemoryMappedTable.from_file(arrow_file).rename_columns(names)
assert table.table == in_memory_pa_table.rename_columns(names)
assert isinstance(table, MemoryMappedTable)
assert table.replays == [("rename_columns", (names,), {})]
assert_deepcopy_without_bringing_data_in_memory(table)
assert_pickle_without_bringing_data_in_memory(table)
def test_memory_mapped_table_drop(arrow_file, in_memory_pa_table):
names = [in_memory_pa_table.column_names[0]]
table = MemoryMappedTable.from_file(arrow_file).drop(names)
assert table.table == in_memory_pa_table.drop(names)
assert isinstance(table, MemoryMappedTable)
assert table.replays == [("drop", (names,), {})]
assert_deepcopy_without_bringing_data_in_memory(table)
assert_pickle_without_bringing_data_in_memory(table)
@pytest.mark.parametrize("blocks_type", ["in_memory", "memory_mapped", "mixed"])
def test_concatenation_table_init(
blocks_type, in_memory_pa_table, in_memory_blocks, memory_mapped_blocks, mixed_in_memory_and_memory_mapped_blocks
):
blocks = (
in_memory_blocks
if blocks_type == "in_memory"
else memory_mapped_blocks
if blocks_type == "memory_mapped"
else mixed_in_memory_and_memory_mapped_blocks
)
table = ConcatenationTable(in_memory_pa_table, blocks)
assert table.table == in_memory_pa_table
assert table.blocks == blocks
def test_concatenation_table_from_blocks(in_memory_pa_table, in_memory_blocks):
assert len(in_memory_pa_table) > 2
in_memory_table = InMemoryTable(in_memory_pa_table)
t1, t2 = in_memory_table.slice(0, 2), in_memory_table.slice(2)
table = ConcatenationTable.from_blocks(in_memory_table)
assert isinstance(table, ConcatenationTable)
assert table.table == in_memory_pa_table
assert table.blocks == [[in_memory_table]]
table = ConcatenationTable.from_blocks([t1, t2])
assert isinstance(table, ConcatenationTable)
assert table.table == in_memory_pa_table
assert table.blocks == [[in_memory_table]]
table = ConcatenationTable.from_blocks([[t1], [t2]])
assert isinstance(table, ConcatenationTable)
assert table.table == in_memory_pa_table
assert table.blocks == [[in_memory_table]]
table = ConcatenationTable.from_blocks(in_memory_blocks)
assert isinstance(table, ConcatenationTable)
assert table.table == in_memory_pa_table
assert table.blocks == [[in_memory_table]]
@pytest.mark.parametrize("blocks_type", ["in_memory", "memory_mapped", "mixed"])
def test_concatenation_table_from_blocks_doesnt_increase_memory(
blocks_type, in_memory_pa_table, in_memory_blocks, memory_mapped_blocks, mixed_in_memory_and_memory_mapped_blocks
):
blocks = {
"in_memory": in_memory_blocks,
"memory_mapped": memory_mapped_blocks,
"mixed": mixed_in_memory_and_memory_mapped_blocks,
}[blocks_type]
with assert_arrow_memory_doesnt_increase():
table = ConcatenationTable.from_blocks(blocks)
assert isinstance(table, ConcatenationTable)
assert table.table == in_memory_pa_table
if blocks_type == "in_memory":
assert table.blocks == [[InMemoryTable(in_memory_pa_table)]]
else:
assert table.blocks == blocks
@pytest.mark.parametrize("axis", [0, 1])
def test_concatenation_table_from_tables(axis, in_memory_pa_table, arrow_file):
in_memory_table = InMemoryTable(in_memory_pa_table)
concatenation_table = ConcatenationTable.from_blocks(in_memory_table)
memory_mapped_table = MemoryMappedTable.from_file(arrow_file)
tables = [in_memory_pa_table, in_memory_table, concatenation_table, memory_mapped_table]
if axis == 0:
expected_table = pa.concat_tables([in_memory_pa_table] * len(tables))
else:
# avoids error due to duplicate column names
tables[1:] = [add_suffix_to_column_names(table, i) for i, table in enumerate(tables[1:], 1)]
expected_table = in_memory_pa_table
for table in tables[1:]:
for name, col in zip(table.column_names, table.columns):
expected_table = expected_table.append_column(name, col)
with assert_arrow_memory_doesnt_increase():
table = ConcatenationTable.from_tables(tables, axis=axis)
assert isinstance(table, ConcatenationTable)
assert table.table == expected_table
# because of consolidation, we end up with 1 InMemoryTable and 1 MemoryMappedTable
assert len(table.blocks) == 1 if axis == 1 else 2
assert len(table.blocks[0]) == 1 if axis == 0 else 2
assert axis == 1 or len(table.blocks[1]) == 1
assert isinstance(table.blocks[0][0], InMemoryTable)
assert isinstance(table.blocks[1][0] if axis == 0 else table.blocks[0][1], MemoryMappedTable)
def test_concatenation_table_from_tables_axis1_misaligned_blocks(arrow_file):
table = MemoryMappedTable.from_file(arrow_file)
t1 = table.slice(0, 2)
t2 = table.slice(0, 3).rename_columns([col + "_1" for col in table.column_names])
concatenated = ConcatenationTable.from_tables(
[
ConcatenationTable.from_blocks([[t1], [t1], [t1]]),
ConcatenationTable.from_blocks([[t2], [t2]]),
],
axis=1,
)
assert len(concatenated) == 6
assert [len(row_blocks[0]) for row_blocks in concatenated.blocks] == [2, 1, 1, 2]
concatenated = ConcatenationTable.from_tables(
[
ConcatenationTable.from_blocks([[t2], [t2]]),
ConcatenationTable.from_blocks([[t1], [t1], [t1]]),
],
axis=1,
)
assert len(concatenated) == 6
assert [len(row_blocks[0]) for row_blocks in concatenated.blocks] == [2, 1, 1, 2]
@pytest.mark.parametrize("blocks_type", ["in_memory", "memory_mapped", "mixed"])
def test_concatenation_table_deepcopy(
blocks_type, in_memory_blocks, memory_mapped_blocks, mixed_in_memory_and_memory_mapped_blocks
):
blocks = {
"in_memory": in_memory_blocks,
"memory_mapped": memory_mapped_blocks,
"mixed": mixed_in_memory_and_memory_mapped_blocks,
}[blocks_type]
table = ConcatenationTable.from_blocks(blocks)
copied_table = copy.deepcopy(table)
assert table.table == copied_table.table
assert table.blocks == copied_table.blocks
assert_index_attributes_equal(table, copied_table)
# deepcopy must return the exact same arrow objects since they are immutable
assert table.table is copied_table.table
assert all(batch1 is batch2 for batch1, batch2 in zip(table._batches, copied_table._batches))
@pytest.mark.parametrize("blocks_type", ["in_memory", "memory_mapped", "mixed"])
def test_concatenation_table_pickle(
blocks_type, in_memory_blocks, memory_mapped_blocks, mixed_in_memory_and_memory_mapped_blocks
):
blocks = {
"in_memory": in_memory_blocks,
"memory_mapped": memory_mapped_blocks,
"mixed": mixed_in_memory_and_memory_mapped_blocks,
}[blocks_type]
table = ConcatenationTable.from_blocks(blocks)
pickled_table = pickle.dumps(table)
unpickled_table = pickle.loads(pickled_table)
assert unpickled_table.table == table.table
assert unpickled_table.blocks == table.blocks
assert_index_attributes_equal(table, unpickled_table)
def test_concat_tables_with_features_metadata(arrow_file, in_memory_pa_table):
input_features = Features.from_arrow_schema(in_memory_pa_table.schema)
input_features["id"] = Value("int64", id="my_id")
intput_schema = input_features.arrow_schema
t0 = in_memory_pa_table.replace_schema_metadata(intput_schema.metadata)
t1 = MemoryMappedTable.from_file(arrow_file)
tables = [t0, t1]
concatenated_table = concat_tables(tables, axis=0)
output_schema = concatenated_table.schema
output_features = Features.from_arrow_schema(output_schema)
assert output_schema == intput_schema
assert output_schema.metadata == intput_schema.metadata
assert output_features == input_features
assert output_features["id"].id == "my_id"
@pytest.mark.parametrize("blocks_type", ["in_memory", "memory_mapped", "mixed"])
def test_concatenation_table_slice(
blocks_type, in_memory_pa_table, in_memory_blocks, memory_mapped_blocks, mixed_in_memory_and_memory_mapped_blocks
):
blocks = {
"in_memory": in_memory_blocks,
"memory_mapped": memory_mapped_blocks,
"mixed": mixed_in_memory_and_memory_mapped_blocks,
}[blocks_type]
table = ConcatenationTable.from_blocks(blocks).slice(1, 2)
assert table.table == in_memory_pa_table.slice(1, 2)
assert isinstance(table, ConcatenationTable)
def test_concatenation_table_slice_mixed_schemas_vertically(arrow_file):
t1 = MemoryMappedTable.from_file(arrow_file)
t2 = InMemoryTable.from_pydict({"additional_column": ["foo"]})
expected = pa.table(
{
**{column: values + [None] for column, values in t1.to_pydict().items()},
"additional_column": [None] * len(t1) + ["foo"],
}
)
blocks = [[t1], [t2]]
table = ConcatenationTable.from_blocks(blocks)
assert table.to_pydict() == expected.to_pydict()
assert isinstance(table, ConcatenationTable)
reloaded = pickle.loads(pickle.dumps(table))
assert reloaded.to_pydict() == expected.to_pydict()
assert isinstance(reloaded, ConcatenationTable)
reloaded = pickle.loads(pickle.dumps(table.slice(1, 2)))
assert reloaded.to_pydict() == expected.slice(1, 2).to_pydict()
assert isinstance(reloaded, ConcatenationTable)
@pytest.mark.parametrize("blocks_type", ["in_memory", "memory_mapped", "mixed"])
def test_concatenation_table_filter(
blocks_type, in_memory_pa_table, in_memory_blocks, memory_mapped_blocks, mixed_in_memory_and_memory_mapped_blocks
):
blocks = {
"in_memory": in_memory_blocks,
"memory_mapped": memory_mapped_blocks,
"mixed": mixed_in_memory_and_memory_mapped_blocks,
}[blocks_type]
mask = pa.array([i % 2 == 0 for i in range(len(in_memory_pa_table))])
table = ConcatenationTable.from_blocks(blocks).filter(mask)
assert table.table == in_memory_pa_table.filter(mask)
assert isinstance(table, ConcatenationTable)
@pytest.mark.parametrize("blocks_type", ["in_memory", "memory_mapped", "mixed"])
def test_concatenation_table_flatten(
blocks_type, in_memory_pa_table, in_memory_blocks, memory_mapped_blocks, mixed_in_memory_and_memory_mapped_blocks
):
blocks = {
"in_memory": in_memory_blocks,
"memory_mapped": memory_mapped_blocks,
"mixed": mixed_in_memory_and_memory_mapped_blocks,
}[blocks_type]
table = ConcatenationTable.from_blocks(blocks).flatten()
assert table.table == in_memory_pa_table.flatten()
assert isinstance(table, ConcatenationTable)
@pytest.mark.parametrize("blocks_type", ["in_memory", "memory_mapped", "mixed"])
def test_concatenation_table_combine_chunks(
blocks_type, in_memory_pa_table, in_memory_blocks, memory_mapped_blocks, mixed_in_memory_and_memory_mapped_blocks
):
blocks = {
"in_memory": in_memory_blocks,
"memory_mapped": memory_mapped_blocks,
"mixed": mixed_in_memory_and_memory_mapped_blocks,
}[blocks_type]
table = ConcatenationTable.from_blocks(blocks).combine_chunks()
assert table.table == in_memory_pa_table.combine_chunks()
assert isinstance(table, ConcatenationTable)
@pytest.mark.parametrize("blocks_type", ["in_memory", "memory_mapped", "mixed"])
def test_concatenation_table_cast(
blocks_type, in_memory_pa_table, in_memory_blocks, memory_mapped_blocks, mixed_in_memory_and_memory_mapped_blocks
):
blocks = {
"in_memory": in_memory_blocks,
"memory_mapped": memory_mapped_blocks,
"mixed": mixed_in_memory_and_memory_mapped_blocks,
}[blocks_type]
assert pa.list_(pa.int64()) in in_memory_pa_table.schema.types
assert pa.int64() in in_memory_pa_table.schema.types
schema = pa.schema(
{
k: v if v != pa.list_(pa.int64()) else pa.list_(pa.int32())
for k, v in zip(in_memory_pa_table.schema.names, in_memory_pa_table.schema.types)
}
)
table = ConcatenationTable.from_blocks(blocks).cast(schema)
assert table.table == in_memory_pa_table.cast(schema)
assert isinstance(table, ConcatenationTable)
schema = pa.schema(
{
k: v if v != pa.int64() else pa.int32()
for k, v in zip(in_memory_pa_table.schema.names, in_memory_pa_table.schema.types)
}
)
table = ConcatenationTable.from_blocks(blocks).cast(schema)
assert table.table == in_memory_pa_table.cast(schema)
assert isinstance(table, ConcatenationTable)
@pytest.mark.parametrize("blocks_type", ["in_memory", "memory_mapped", "mixed"])
def test_concat_tables_cast_with_features_metadata(
blocks_type, in_memory_pa_table, in_memory_blocks, memory_mapped_blocks, mixed_in_memory_and_memory_mapped_blocks
):
blocks = {
"in_memory": in_memory_blocks,
"memory_mapped": memory_mapped_blocks,
"mixed": mixed_in_memory_and_memory_mapped_blocks,
}[blocks_type]
input_features = Features.from_arrow_schema(in_memory_pa_table.schema)
input_features["id"] = Value("int64", id="my_id")
intput_schema = input_features.arrow_schema
concatenated_table = ConcatenationTable.from_blocks(blocks).cast(intput_schema)
output_schema = concatenated_table.schema
output_features = Features.from_arrow_schema(output_schema)
assert output_schema == intput_schema
assert output_schema.metadata == intput_schema.metadata
assert output_features == input_features
assert output_features["id"].id == "my_id"
@pytest.mark.parametrize("blocks_type", ["in_memory", "memory_mapped", "mixed"])
def test_concatenation_table_replace_schema_metadata(
blocks_type, in_memory_pa_table, in_memory_blocks, memory_mapped_blocks, mixed_in_memory_and_memory_mapped_blocks
):
blocks = {
"in_memory": in_memory_blocks,
"memory_mapped": memory_mapped_blocks,
"mixed": mixed_in_memory_and_memory_mapped_blocks,
}[blocks_type]
metadata = {"huggingface": "{}"}
table = ConcatenationTable.from_blocks(blocks).replace_schema_metadata(metadata)
assert table.table.schema.metadata == in_memory_pa_table.replace_schema_metadata(metadata).schema.metadata
assert isinstance(table, ConcatenationTable)
@pytest.mark.parametrize("blocks_type", ["in_memory", "memory_mapped", "mixed"])
def test_concatenation_table_add_column(
blocks_type, in_memory_pa_table, in_memory_blocks, memory_mapped_blocks, mixed_in_memory_and_memory_mapped_blocks
):
blocks = {
"in_memory": in_memory_blocks,
"memory_mapped": memory_mapped_blocks,
"mixed": mixed_in_memory_and_memory_mapped_blocks,
}[blocks_type]
i = len(in_memory_pa_table.column_names)
field_ = "new_field"
column = pa.array(list(range(len(in_memory_pa_table))))
with pytest.raises(NotImplementedError):
ConcatenationTable.from_blocks(blocks).add_column(i, field_, column)
# assert table.table == in_memory_pa_table.add_column(i, field_, column)
# unpickled_table = pickle.loads(pickle.dumps(table))
# assert unpickled_table.table == in_memory_pa_table.add_column(i, field_, column)
@pytest.mark.parametrize("blocks_type", ["in_memory", "memory_mapped", "mixed"])
def test_concatenation_table_append_column(
blocks_type, in_memory_pa_table, in_memory_blocks, memory_mapped_blocks, mixed_in_memory_and_memory_mapped_blocks
):
blocks = {
"in_memory": in_memory_blocks,
"memory_mapped": memory_mapped_blocks,
"mixed": mixed_in_memory_and_memory_mapped_blocks,
}[blocks_type]
field_ = "new_field"
column = pa.array(list(range(len(in_memory_pa_table))))
with pytest.raises(NotImplementedError):
ConcatenationTable.from_blocks(blocks).append_column(field_, column)
# assert table.table == in_memory_pa_table.append_column(field_, column)
# unpickled_table = pickle.loads(pickle.dumps(table))
# assert unpickled_table.table == in_memory_pa_table.append_column(field_, column)
@pytest.mark.parametrize("blocks_type", ["in_memory", "memory_mapped", "mixed"])
def test_concatenation_table_remove_column(
blocks_type, in_memory_pa_table, in_memory_blocks, memory_mapped_blocks, mixed_in_memory_and_memory_mapped_blocks
):
blocks = {
"in_memory": in_memory_blocks,
"memory_mapped": memory_mapped_blocks,
"mixed": mixed_in_memory_and_memory_mapped_blocks,
}[blocks_type]
table = ConcatenationTable.from_blocks(blocks).remove_column(0)
assert table.table == in_memory_pa_table.remove_column(0)
assert isinstance(table, ConcatenationTable)
@pytest.mark.parametrize("blocks_type", ["in_memory", "memory_mapped", "mixed"])
def test_concatenation_table_set_column(
blocks_type, in_memory_pa_table, in_memory_blocks, memory_mapped_blocks, mixed_in_memory_and_memory_mapped_blocks
):
blocks = {
"in_memory": in_memory_blocks,
"memory_mapped": memory_mapped_blocks,
"mixed": mixed_in_memory_and_memory_mapped_blocks,
}[blocks_type]
i = len(in_memory_pa_table.column_names)
field_ = "new_field"
column = pa.array(list(range(len(in_memory_pa_table))))
with pytest.raises(NotImplementedError):
ConcatenationTable.from_blocks(blocks).set_column(i, field_, column)
# assert table.table == in_memory_pa_table.set_column(i, field_, column)
# unpickled_table = pickle.loads(pickle.dumps(table))
# assert unpickled_table.table == in_memory_pa_table.set_column(i, field_, column)
@pytest.mark.parametrize("blocks_type", ["in_memory", "memory_mapped", "mixed"])
def test_concatenation_table_rename_columns(
blocks_type, in_memory_pa_table, in_memory_blocks, memory_mapped_blocks, mixed_in_memory_and_memory_mapped_blocks
):
blocks = {
"in_memory": in_memory_blocks,
"memory_mapped": memory_mapped_blocks,
"mixed": mixed_in_memory_and_memory_mapped_blocks,
}[blocks_type]
assert "tokens" in in_memory_pa_table.column_names
names = [name if name != "tokens" else "new_tokens" for name in in_memory_pa_table.column_names]
table = ConcatenationTable.from_blocks(blocks).rename_columns(names)
assert isinstance(table, ConcatenationTable)
assert table.table == in_memory_pa_table.rename_columns(names)
@pytest.mark.parametrize("blocks_type", ["in_memory", "memory_mapped", "mixed"])
def test_concatenation_table_drop(
blocks_type, in_memory_pa_table, in_memory_blocks, memory_mapped_blocks, mixed_in_memory_and_memory_mapped_blocks
):
blocks = {
"in_memory": in_memory_blocks,
"memory_mapped": memory_mapped_blocks,
"mixed": mixed_in_memory_and_memory_mapped_blocks,
}[blocks_type]
names = [in_memory_pa_table.column_names[0]]
table = ConcatenationTable.from_blocks(blocks).drop(names)
assert table.table == in_memory_pa_table.drop(names)
assert isinstance(table, ConcatenationTable)
def test_concat_tables(arrow_file, in_memory_pa_table):
t0 = in_memory_pa_table
t1 = InMemoryTable(t0)
t2 = MemoryMappedTable.from_file(arrow_file)
t3 = ConcatenationTable.from_blocks(t1)
tables = [t0, t1, t2, t3]
concatenated_table = concat_tables(tables, axis=0)
assert concatenated_table.table == pa.concat_tables([t0] * 4)
assert concatenated_table.table.shape == (40, 4)
assert isinstance(concatenated_table, ConcatenationTable)
assert len(concatenated_table.blocks) == 3 # t0 and t1 are consolidated as a single InMemoryTable
assert isinstance(concatenated_table.blocks[0][0], InMemoryTable)
assert isinstance(concatenated_table.blocks[1][0], MemoryMappedTable)
assert isinstance(concatenated_table.blocks[2][0], InMemoryTable)
# add suffix to avoid error due to duplicate column names
concatenated_table = concat_tables(
[add_suffix_to_column_names(table, i) for i, table in enumerate(tables)], axis=1
)
assert concatenated_table.table.shape == (10, 16)
assert len(concatenated_table.blocks[0]) == 3 # t0 and t1 are consolidated as a single InMemoryTable
assert isinstance(concatenated_table.blocks[0][0], InMemoryTable)
assert isinstance(concatenated_table.blocks[0][1], MemoryMappedTable)
assert isinstance(concatenated_table.blocks[0][2], InMemoryTable)
def _interpolation_search_ground_truth(arr: List[int], x: int) -> Union[int, IndexError]:
for i in range(len(arr) - 1):
if arr[i] <= x < arr[i + 1]:
return i
return IndexError
class _ListWithGetitemCounter(list):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.unique_getitem_calls = set()
def __getitem__(self, i):
out = super().__getitem__(i)
self.unique_getitem_calls.add(i)
return out
@property
def getitem_unique_count(self):
return len(self.unique_getitem_calls)
@pytest.mark.parametrize(
"arr, x",
[(np.arange(0, 14, 3), x) for x in range(-1, 22)]
+ [(list(np.arange(-5, 5)), x) for x in range(-6, 6)]
+ [([0, 1_000, 1_001, 1_003], x) for x in [-1, 0, 2, 100, 999, 1_000, 1_001, 1_002, 1_003, 1_004]]
+ [(list(range(1_000)), x) for x in [-1, 0, 1, 10, 666, 999, 1_000, 1_0001]],
)
def test_interpolation_search(arr, x):
ground_truth = _interpolation_search_ground_truth(arr, x)
if isinstance(ground_truth, int):
arr = _ListWithGetitemCounter(arr)
output = _interpolation_search(arr, x)
assert ground_truth == output
# 4 maximum unique getitem calls is expected for the cases of this test
# but it can be bigger for large and messy arrays.
assert arr.getitem_unique_count <= 4
else:
with pytest.raises(ground_truth):
_interpolation_search(arr, x)
def test_indexed_table_mixin():
n_rows_per_chunk = 10
n_chunks = 4
pa_table = pa.Table.from_pydict({"col": [0] * n_rows_per_chunk})
pa_table = pa.concat_tables([pa_table] * n_chunks)
table = Table(pa_table)
assert all(table._offsets.tolist() == np.cumsum([0] + [n_rows_per_chunk] * n_chunks))
assert table.fast_slice(5) == pa_table.slice(5)
assert table.fast_slice(2, 13) == pa_table.slice(2, 13)
def test_cast_integer_array_to_features():
arr = pa.array([[0, 1]])
assert cast_array_to_feature(arr, Sequence(Value("string"))).type == pa.list_(pa.string())
assert cast_array_to_feature(arr, Sequence(Value("string")), allow_decimal_to_str=False).type == pa.list_(
pa.string()
)
with pytest.raises(TypeError):
cast_array_to_feature(arr, Sequence(Value("string")), allow_primitive_to_str=False)
def test_cast_float_array_to_features():
arr = pa.array([[0.0, 1.0]])
assert cast_array_to_feature(arr, Sequence(Value("string"))).type == pa.list_(pa.string())
assert cast_array_to_feature(arr, Sequence(Value("string")), allow_decimal_to_str=False).type == pa.list_(
pa.string()
)
with pytest.raises(TypeError):
cast_array_to_feature(arr, Sequence(Value("string")), allow_primitive_to_str=False)
def test_cast_boolean_array_to_features():
arr = pa.array([[False, True]])
assert cast_array_to_feature(arr, Sequence(Value("string"))).type == pa.list_(pa.string())
assert cast_array_to_feature(arr, Sequence(Value("string")), allow_decimal_to_str=False).type == pa.list_(
pa.string()
)
with pytest.raises(TypeError):
cast_array_to_feature(arr, Sequence(Value("string")), allow_primitive_to_str=False)
def test_cast_decimal_array_to_features():
arr = pa.array([[Decimal(0), Decimal(1)]])
assert cast_array_to_feature(arr, Sequence(Value("string"))).type == pa.list_(pa.string())
assert cast_array_to_feature(arr, Sequence(Value("string")), allow_primitive_to_str=False).type == pa.list_(
pa.string()
)
with pytest.raises(TypeError):
cast_array_to_feature(arr, Sequence(Value("string")), allow_decimal_to_str=False)
@pytest.mark.parametrize(
"array_list, expected_list",
[
([{"age": 25}, {"age": 63}], [{"age": 25, "name": None}, {"age": 63, "name": None}]),
([{}, {}], [{"age": None, "name": None}, {"age": None, "name": None}]), # completely empty struct
],
)
def test_cast_array_to_feature_with_struct_with_missing_fields(array_list, expected_list):
arr = pa.array(array_list)
feature = {"age": Value("int32"), "name": Value("string")}
cast_array = cast_array_to_feature(arr, feature)
assert cast_array.type == pa.struct({"age": pa.int32(), "name": pa.string()})
assert cast_array.to_pylist() == expected_list
def test_cast_array_to_features_nested():
arr = pa.array([[{"foo": [0]}]])
assert cast_array_to_feature(arr, [{"foo": Sequence(Value("string"))}]).type == pa.list_(
pa.struct({"foo": pa.list_(pa.string())})
)
def test_cast_array_to_features_to_nested_with_no_fields():
arr = pa.array([{}])
assert cast_array_to_feature(arr, {}).type == pa.struct({})
assert cast_array_to_feature(arr, {}).to_pylist() == arr.to_pylist()
def test_cast_array_to_features_nested_with_nulls():
# same type
arr = pa.array([{"foo": [None, [0]]}], pa.struct({"foo": pa.list_(pa.list_(pa.int64()))}))
casted_array = cast_array_to_feature(arr, {"foo": [[Value("int64")]]})
assert casted_array.type == pa.struct({"foo": pa.list_(pa.list_(pa.int64()))})
assert casted_array.to_pylist() == arr.to_pylist()
# different type
arr = pa.array([{"foo": [None, [0]]}], pa.struct({"foo": pa.list_(pa.list_(pa.int64()))}))
casted_array = cast_array_to_feature(arr, {"foo": [[Value("int32")]]})
assert casted_array.type == pa.struct({"foo": pa.list_(pa.list_(pa.int32()))})
assert casted_array.to_pylist() == [{"foo": [None, [0]]}]
def test_cast_array_to_features_to_null_type():
# same type
arr = pa.array([[None, None]])
assert cast_array_to_feature(arr, Sequence(Value("null"))).type == pa.list_(pa.null())
# different type
arr = pa.array([[None, 1]])
with pytest.raises(TypeError):
cast_array_to_feature(arr, Sequence(Value("null")))
def test_cast_array_to_features_array_xd():
# same storage type
arr = pa.array([[[0, 1], [2, 3]], [[4, 5], [6, 7]]], pa.list_(pa.list_(pa.int32(), 2), 2))
casted_array = cast_array_to_feature(arr, Array2D(shape=(2, 2), dtype="int32"))
assert casted_array.type == Array2DExtensionType(shape=(2, 2), dtype="int32")
# different storage type
casted_array = cast_array_to_feature(arr, Array2D(shape=(2, 2), dtype="float32"))
assert casted_array.type == Array2DExtensionType(shape=(2, 2), dtype="float32")
def test_cast_array_to_features_sequence_classlabel():
arr = pa.array([[], [1], [0, 1]], pa.list_(pa.int64()))
assert cast_array_to_feature(arr, Sequence(ClassLabel(names=["foo", "bar"]))).type == pa.list_(pa.int64())
arr = pa.array([[], ["bar"], ["foo", "bar"]], pa.list_(pa.string()))
assert cast_array_to_feature(arr, Sequence(ClassLabel(names=["foo", "bar"]))).type == pa.list_(pa.int64())
# Test empty arrays
arr = pa.array([[], []], pa.list_(pa.int64()))
assert cast_array_to_feature(arr, Sequence(ClassLabel(names=["foo", "bar"]))).type == pa.list_(pa.int64())
arr = pa.array([[], []], pa.list_(pa.string()))
assert cast_array_to_feature(arr, Sequence(ClassLabel(names=["foo", "bar"]))).type == pa.list_(pa.int64())
# Test invalid class labels
arr = pa.array([[2]], pa.list_(pa.int64()))
with pytest.raises(ValueError):
assert cast_array_to_feature(arr, Sequence(ClassLabel(names=["foo", "bar"])))
arr = pa.array([["baz"]], pa.list_(pa.string()))
with pytest.raises(ValueError):
assert cast_array_to_feature(arr, Sequence(ClassLabel(names=["foo", "bar"])))
@pytest.mark.parametrize(
"arr",
[
pa.array([[0, 1, 2], [3, None, 5], None, [6, 7, 8], None], pa.list_(pa.int32(), 3)),
],
)
@pytest.mark.parametrize("slice", [None, slice(1, None), slice(-1), slice(1, 3), slice(2, 3), slice(1, 1)])
@pytest.mark.parametrize("target_value_feature", [Value("int64")])
def test_cast_fixed_size_list_array_to_features_sequence(arr, slice, target_value_feature):
arr = arr if slice is None else arr[slice]
# Fixed size list
casted_array = cast_array_to_feature(arr, Sequence(target_value_feature, length=arr.type.list_size))
assert casted_array.type == get_nested_type(Sequence(target_value_feature, length=arr.type.list_size))
assert casted_array.to_pylist() == arr.to_pylist()
with pytest.raises(TypeError):
cast_array_to_feature(arr, Sequence(target_value_feature, length=arr.type.list_size + 1))
# Variable size list
casted_array = cast_array_to_feature(arr, Sequence(target_value_feature))
assert casted_array.type == get_nested_type(Sequence(target_value_feature))
assert casted_array.to_pylist() == arr.to_pylist()
casted_array = cast_array_to_feature(arr, [target_value_feature])
assert casted_array.type == get_nested_type([target_value_feature])
assert casted_array.to_pylist() == arr.to_pylist()
@pytest.mark.parametrize(
"arr",
[
pa.array([[0, 1, 2], [3, None, 5], None, [6, 7, 8], None], pa.list_(pa.int32())),
],
)
@pytest.mark.parametrize("slice", [None, slice(1, None), slice(-1), slice(1, 3), slice(2, 3), slice(1, 1)])
@pytest.mark.parametrize("target_value_feature", [Value("int64")])
def test_cast_list_array_to_features_sequence(arr, slice, target_value_feature):
arr = arr if slice is None else arr[slice]
# Variable size list
casted_array = cast_array_to_feature(arr, Sequence(target_value_feature))
assert casted_array.type == get_nested_type(Sequence(target_value_feature))
assert casted_array.to_pylist() == arr.to_pylist()
casted_array = cast_array_to_feature(arr, [target_value_feature])
assert casted_array.type == get_nested_type([target_value_feature])
assert casted_array.to_pylist() == arr.to_pylist()
# Fixed size list
list_size = arr.value_lengths().drop_null()[0].as_py() if arr.value_lengths().drop_null() else 2
casted_array = cast_array_to_feature(arr, Sequence(target_value_feature, length=list_size))
assert casted_array.type == get_nested_type(Sequence(target_value_feature, length=list_size))
assert casted_array.to_pylist() == arr.to_pylist()
@pytest.mark.parametrize("sequence_feature_dtype", ["string", "int64"])
@pytest.mark.parametrize("from_list_type", ["list", "fixed_size_list", "large_list"])
@pytest.mark.parametrize("list_within_struct", [False, True])
def test_cast_array_to_feature_with_list_array_and_sequence_feature(
list_within_struct, from_list_type, sequence_feature_dtype
):
list_type = {
"list": pa.list_,
"fixed_size_list": partial(pa.list_, list_size=2),
"large_list": pa.large_list,
}
primitive_type = {
"string": pa.string(),
"int64": pa.int64(),
}
to_type = "list"
array_data = [0, 1]
array_type = list_type[from_list_type](pa.int64())
sequence_feature = Value(sequence_feature_dtype)
expected_array_type = list_type[to_type](primitive_type[sequence_feature_dtype])
if list_within_struct:
array_data = {"col_1": array_data}
array_type = pa.struct({"col_1": array_type})
sequence_feature = {"col_1": sequence_feature}
expected_array_type = pa.struct({"col_1": expected_array_type})
feature = Sequence(sequence_feature)
array = pa.array([array_data], type=array_type)
cast_array = cast_array_to_feature(array, feature)
assert cast_array.type == expected_array_type
@pytest.mark.parametrize("large_list_feature_value_type", ["string", "int64"])
@pytest.mark.parametrize("from_list_type", ["list", "fixed_size_list", "large_list"])
def test_cast_array_to_feature_with_list_array_and_large_list_feature(from_list_type, large_list_feature_value_type):
list_type = {
"list": pa.list_,
"fixed_size_list": partial(pa.list_, list_size=2),
"large_list": pa.large_list,
}
primitive_type = {
"string": pa.string(),
"int64": pa.int64(),
}
to_type = "large_list"
array_data = [0, 1]
array_type = list_type[from_list_type](pa.int64())
large_list_feature_value = Value(large_list_feature_value_type)
expected_array_type = list_type[to_type](primitive_type[large_list_feature_value_type])
feature = LargeList(large_list_feature_value)
array = pa.array([array_data], type=array_type)
cast_array = cast_array_to_feature(array, feature)
assert cast_array.type == expected_array_type
def test_cast_array_xd_to_features_sequence():
arr = np.random.randint(0, 10, size=(8, 2, 3)).tolist()
arr = Array2DExtensionType(shape=(2, 3), dtype="int64").wrap_array(pa.array(arr, pa.list_(pa.list_(pa.int64()))))
arr = pa.ListArray.from_arrays([0, None, 4, 8], arr)
# Variable size list
casted_array = cast_array_to_feature(arr, Sequence(Array2D(shape=(2, 3), dtype="int32")))
assert casted_array.type == get_nested_type(Sequence(Array2D(shape=(2, 3), dtype="int32")))
assert casted_array.to_pylist() == arr.to_pylist()
# Fixed size list
casted_array = cast_array_to_feature(arr, Sequence(Array2D(shape=(2, 3), dtype="int32"), length=4))
assert casted_array.type == get_nested_type(Sequence(Array2D(shape=(2, 3), dtype="int32"), length=4))
assert casted_array.to_pylist() == arr.to_pylist()
def test_embed_array_storage(image_file):
array = pa.array([{"bytes": None, "path": image_file}], type=Image.pa_type)
embedded_images_array = embed_array_storage(array, Image())
assert isinstance(embedded_images_array.to_pylist()[0]["path"], str)
assert embedded_images_array.to_pylist()[0]["path"] == "test_image_rgb.jpg"
assert isinstance(embedded_images_array.to_pylist()[0]["bytes"], bytes)
def test_embed_array_storage_nested(image_file):
array = pa.array([[{"bytes": None, "path": image_file}]], type=pa.list_(Image.pa_type))
embedded_images_array = embed_array_storage(array, [Image()])
assert isinstance(embedded_images_array.to_pylist()[0][0]["path"], str)
assert isinstance(embedded_images_array.to_pylist()[0][0]["bytes"], bytes)
array = pa.array([{"foo": {"bytes": None, "path": image_file}}], type=pa.struct({"foo": Image.pa_type}))
embedded_images_array = embed_array_storage(array, {"foo": Image()})
assert isinstance(embedded_images_array.to_pylist()[0]["foo"]["path"], str)
assert isinstance(embedded_images_array.to_pylist()[0]["foo"]["bytes"], bytes)
@pytest.mark.parametrize(
"array, feature, expected_embedded_array_type",
[
(
pa.array([[{"path": "image_path"}]], type=pa.list_(Image.pa_type)),
[Image()],
pa.types.is_list,
),
(
pa.array([[{"path": "image_path"}]], type=pa.large_list(Image.pa_type)),
LargeList(Image()),
pa.types.is_large_list,
),
(
pa.array([[{"path": "image_path"}]], type=pa.list_(Image.pa_type)),
Sequence(Image()),
pa.types.is_list,
),
],
)
def test_embed_array_storage_with_list_types(array, feature, expected_embedded_array_type, monkeypatch):
mock_embed_storage = MagicMock(
return_value=pa.StructArray.from_arrays(
[pa.array([b"image_bytes"], type=pa.binary()), pa.array(["image_path"], type=pa.string())],
["bytes", "path"],
)
)
monkeypatch.setattr(Image, "embed_storage", mock_embed_storage)
embedded_images_array = embed_array_storage(array, feature)
assert expected_embedded_array_type(embedded_images_array.type)
assert embedded_images_array.to_pylist() == [[{"bytes": b"image_bytes", "path": "image_path"}]]
def test_embed_table_storage(image_file):
features = Features({"image": Image()})
table = table_cast(pa.table({"image": [image_file]}), features.arrow_schema)
embedded_images_table = embed_table_storage(table)
assert isinstance(embedded_images_table.to_pydict()["image"][0]["path"], str)
assert isinstance(embedded_images_table.to_pydict()["image"][0]["bytes"], bytes)
@pytest.mark.parametrize(
"table",
[
InMemoryTable(pa.table({"foo": range(10)})),
InMemoryTable(pa.concat_tables([pa.table({"foo": range(0, 5)}), pa.table({"foo": range(5, 10)})])),
InMemoryTable(pa.concat_tables([pa.table({"foo": [i]}) for i in range(10)])),
],
)
@pytest.mark.parametrize("batch_size", [1, 2, 3, 9, 10, 11, 20])
@pytest.mark.parametrize("drop_last_batch", [False, True])
def test_table_iter(table, batch_size, drop_last_batch):
num_rows = len(table) if not drop_last_batch else len(table) // batch_size * batch_size
num_batches = (num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size
subtables = list(table_iter(table, batch_size=batch_size, drop_last_batch=drop_last_batch))
assert len(subtables) == num_batches
if drop_last_batch:
assert all(len(subtable) == batch_size for subtable in subtables)
else:
assert all(len(subtable) == batch_size for subtable in subtables[:-1])
assert len(subtables[-1]) <= batch_size
if num_rows > 0:
reloaded = pa.concat_tables(subtables)
assert table.slice(0, num_rows).to_pydict() == reloaded.to_pydict()
@pytest.mark.parametrize("to_type", ["list", "fixed_size_list", "large_list"])
@pytest.mark.parametrize("from_type", ["list", "fixed_size_list", "large_list"])
def test_array_cast(from_type, to_type):
array_type = {
"list": pa.list_(pa.int64()),
"fixed_size_list": pa.list_(pa.int64(), 2),
"large_list": pa.large_list(pa.int64()),
}
arr = pa.array([[0, 1]], type=array_type[from_type])
cast_arr = array_cast(arr, array_type[to_type])
assert cast_arr.type == array_type[to_type]
assert cast_arr.values == arr.values
def test_cast_table_to_schema_with_missing_fields():
table = pa.table({"age": [25, 63]})
schema = pa.schema({"age": pa.int32(), "name": pa.string()})
cast_table = cast_table_to_schema(table, schema)
assert cast_table.schema == pa.schema({"age": pa.int32(), "name": pa.string()})
assert cast_table.to_pydict() == {"age": [25, 63], "name": [None, None]}
| datasets/tests/test_table.py/0 | {
"file_path": "datasets/tests/test_table.py",
"repo_id": "datasets",
"token_count": 24661
} |
<!--Copyright 2024 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
-->
# Textual Inversion
Textual Inversion is a training method for personalizing models by learning new text embeddings from a few example images. The file produced from training is extremely small (a few KBs) and the new embeddings can be loaded into the text encoder.
[`TextualInversionLoaderMixin`] provides a function for loading Textual Inversion embeddings from Diffusers and Automatic1111 into the text encoder and loading a special token to activate the embeddings.
<Tip>
To learn more about how to load Textual Inversion embeddings, see the [Textual Inversion](../../using-diffusers/loading_adapters#textual-inversion) loading guide.
</Tip>
## TextualInversionLoaderMixin
[[autodoc]] loaders.textual_inversion.TextualInversionLoaderMixin | diffusers/docs/source/en/api/loaders/textual_inversion.md/0 | {
"file_path": "diffusers/docs/source/en/api/loaders/textual_inversion.md",
"repo_id": "diffusers",
"token_count": 340
} |
<!--Copyright 2024 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License. -->
# CogVideoXTransformer3DModel
A Diffusion Transformer model for 3D data from [CogVideoX](https://github.com/THUDM/CogVideo) was introduced in [CogVideoX: Text-to-Video Diffusion Models with An Expert Transformer](https://github.com/THUDM/CogVideo/blob/main/resources/CogVideoX.pdf) by Tsinghua University & ZhipuAI.
The model can be loaded with the following code snippet.
```python
from diffusers import CogVideoXTransformer3DModel
transformer = CogVideoXTransformer3DModel.from_pretrained("THUDM/CogVideoX-2b", subfolder="transformer", torch_dtype=torch.float16).to("cuda")
```
## CogVideoXTransformer3DModel
[[autodoc]] CogVideoXTransformer3DModel
## Transformer2DModelOutput
[[autodoc]] models.modeling_outputs.Transformer2DModelOutput
| diffusers/docs/source/en/api/models/cogvideox_transformer3d.md/0 | {
"file_path": "diffusers/docs/source/en/api/models/cogvideox_transformer3d.md",
"repo_id": "diffusers",
"token_count": 394
} |
<!--Copyright 2024 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
-->
# Flux
Flux is a series of text-to-image generation models based on diffusion transformers. To know more about Flux, check out the original [blog post](https://blackforestlabs.ai/announcing-black-forest-labs/) by the creators of Flux, Black Forest Labs.
Original model checkpoints for Flux can be found [here](https://huggingface.co/black-forest-labs). Original inference code can be found [here](https://github.com/black-forest-labs/flux).
<Tip>
Flux can be quite expensive to run on consumer hardware devices. However, you can perform a suite of optimizations to run it faster and in a more memory-friendly manner. Check out [this section](https://huggingface.co/blog/sd3#memory-optimizations-for-sd3) for more details. Additionally, Flux can benefit from quantization for memory efficiency with a trade-off in inference latency. Refer to [this blog post](https://huggingface.co/blog/quanto-diffusers) to learn more. For an exhaustive list of resources, check out [this gist](https://gist.github.com/sayakpaul/b664605caf0aa3bf8585ab109dd5ac9c).
</Tip>
Flux comes in the following variants:
| model type | model id |
|:----------:|:--------:|
| Timestep-distilled | [`black-forest-labs/FLUX.1-schnell`](https://huggingface.co/black-forest-labs/FLUX.1-schnell) |
| Guidance-distilled | [`black-forest-labs/FLUX.1-dev`](https://huggingface.co/black-forest-labs/FLUX.1-dev) |
| Fill Inpainting/Outpainting (Guidance-distilled) | [`black-forest-labs/FLUX.1-Fill-dev`](https://huggingface.co/black-forest-labs/FLUX.1-Fill-dev) |
| Canny Control (Guidance-distilled) | [`black-forest-labs/FLUX.1-Canny-dev`](https://huggingface.co/black-forest-labs/FLUX.1-Canny-dev) |
| Depth Control (Guidance-distilled) | [`black-forest-labs/FLUX.1-Depth-dev`](https://huggingface.co/black-forest-labs/FLUX.1-Depth-dev) |
| Canny Control (LoRA) | [`black-forest-labs/FLUX.1-Canny-dev-lora`](https://huggingface.co/black-forest-labs/FLUX.1-Canny-dev-lora) |
| Depth Control (LoRA) | [`black-forest-labs/FLUX.1-Depth-dev-lora`](https://huggingface.co/black-forest-labs/FLUX.1-Depth-dev-lora) |
| Redux (Adapter) | [`black-forest-labs/FLUX.1-Redux-dev`](https://huggingface.co/black-forest-labs/FLUX.1-Redux-dev) |
All checkpoints have different usage which we detail below.
### Timestep-distilled
* `max_sequence_length` cannot be more than 256.
* `guidance_scale` needs to be 0.
* As this is a timestep-distilled model, it benefits from fewer sampling steps.
```python
import torch
from diffusers import FluxPipeline
pipe = FluxPipeline.from_pretrained("black-forest-labs/FLUX.1-schnell", torch_dtype=torch.bfloat16)
pipe.enable_model_cpu_offload()
prompt = "A cat holding a sign that says hello world"
out = pipe(
prompt=prompt,
guidance_scale=0.,
height=768,
width=1360,
num_inference_steps=4,
max_sequence_length=256,
).images[0]
out.save("image.png")
```
### Guidance-distilled
* The guidance-distilled variant takes about 50 sampling steps for good-quality generation.
* It doesn't have any limitations around the `max_sequence_length`.
```python
import torch
from diffusers import FluxPipeline
pipe = FluxPipeline.from_pretrained("black-forest-labs/FLUX.1-dev", torch_dtype=torch.bfloat16)
pipe.enable_model_cpu_offload()
prompt = "a tiny astronaut hatching from an egg on the moon"
out = pipe(
prompt=prompt,
guidance_scale=3.5,
height=768,
width=1360,
num_inference_steps=50,
).images[0]
out.save("image.png")
```
### Fill Inpainting/Outpainting
* Flux Fill pipeline does not require `strength` as an input like regular inpainting pipelines.
* It supports both inpainting and outpainting.
```python
import torch
from diffusers import FluxFillPipeline
from diffusers.utils import load_image
image = load_image("https://huggingface.co/datasets/YiYiXu/testing-images/resolve/main/cup.png")
mask = load_image("https://huggingface.co/datasets/YiYiXu/testing-images/resolve/main/cup_mask.png")
repo_id = "black-forest-labs/FLUX.1-Fill-dev"
pipe = FluxFillPipeline.from_pretrained(repo_id, torch_dtype=torch.bfloat16).to("cuda")
image = pipe(
prompt="a white paper cup",
image=image,
mask_image=mask,
height=1632,
width=1232,
max_sequence_length=512,
generator=torch.Generator("cpu").manual_seed(0)
).images[0]
image.save(f"output.png")
```
### Canny Control
**Note:** `black-forest-labs/Flux.1-Canny-dev` is _not_ a [`ControlNetModel`] model. ControlNet models are a separate component from the UNet/Transformer whose residuals are added to the actual underlying model. Canny Control is an alternate architecture that achieves effectively the same results as a ControlNet model would, by using channel-wise concatenation with input control condition and ensuring the transformer learns structure control by following the condition as closely as possible.
```python
# !pip install -U controlnet-aux
import torch
from controlnet_aux import CannyDetector
from diffusers import FluxControlPipeline
from diffusers.utils import load_image
pipe = FluxControlPipeline.from_pretrained("black-forest-labs/FLUX.1-Canny-dev", torch_dtype=torch.bfloat16).to("cuda")
prompt = "A robot made of exotic candies and chocolates of different kinds. The background is filled with confetti and celebratory gifts."
control_image = load_image("https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/robot.png")
processor = CannyDetector()
control_image = processor(control_image, low_threshold=50, high_threshold=200, detect_resolution=1024, image_resolution=1024)
image = pipe(
prompt=prompt,
control_image=control_image,
height=1024,
width=1024,
num_inference_steps=50,
guidance_scale=30.0,
).images[0]
image.save("output.png")
```
Canny Control is also possible with a LoRA variant of this condition. The usage is as follows:
```python
# !pip install -U controlnet-aux
import torch
from controlnet_aux import CannyDetector
from diffusers import FluxControlPipeline
from diffusers.utils import load_image
pipe = FluxControlPipeline.from_pretrained("black-forest-labs/FLUX.1-dev", torch_dtype=torch.bfloat16).to("cuda")
pipe.load_lora_weights("black-forest-labs/FLUX.1-Canny-dev-lora")
prompt = "A robot made of exotic candies and chocolates of different kinds. The background is filled with confetti and celebratory gifts."
control_image = load_image("https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/robot.png")
processor = CannyDetector()
control_image = processor(control_image, low_threshold=50, high_threshold=200, detect_resolution=1024, image_resolution=1024)
image = pipe(
prompt=prompt,
control_image=control_image,
height=1024,
width=1024,
num_inference_steps=50,
guidance_scale=30.0,
).images[0]
image.save("output.png")
```
### Depth Control
**Note:** `black-forest-labs/Flux.1-Depth-dev` is _not_ a ControlNet model. [`ControlNetModel`] models are a separate component from the UNet/Transformer whose residuals are added to the actual underlying model. Depth Control is an alternate architecture that achieves effectively the same results as a ControlNet model would, by using channel-wise concatenation with input control condition and ensuring the transformer learns structure control by following the condition as closely as possible.
```python
# !pip install git+https://github.com/huggingface/image_gen_aux
import torch
from diffusers import FluxControlPipeline, FluxTransformer2DModel
from diffusers.utils import load_image
from image_gen_aux import DepthPreprocessor
pipe = FluxControlPipeline.from_pretrained("black-forest-labs/FLUX.1-Depth-dev", torch_dtype=torch.bfloat16).to("cuda")
prompt = "A robot made of exotic candies and chocolates of different kinds. The background is filled with confetti and celebratory gifts."
control_image = load_image("https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/robot.png")
processor = DepthPreprocessor.from_pretrained("LiheYoung/depth-anything-large-hf")
control_image = processor(control_image)[0].convert("RGB")
image = pipe(
prompt=prompt,
control_image=control_image,
height=1024,
width=1024,
num_inference_steps=30,
guidance_scale=10.0,
generator=torch.Generator().manual_seed(42),
).images[0]
image.save("output.png")
```
Depth Control is also possible with a LoRA variant of this condition. The usage is as follows:
```python
# !pip install git+https://github.com/huggingface/image_gen_aux
import torch
from diffusers import FluxControlPipeline, FluxTransformer2DModel
from diffusers.utils import load_image
from image_gen_aux import DepthPreprocessor
pipe = FluxControlPipeline.from_pretrained("black-forest-labs/FLUX.1-dev", torch_dtype=torch.bfloat16).to("cuda")
pipe.load_lora_weights("black-forest-labs/FLUX.1-Depth-dev-lora")
prompt = "A robot made of exotic candies and chocolates of different kinds. The background is filled with confetti and celebratory gifts."
control_image = load_image("https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/robot.png")
processor = DepthPreprocessor.from_pretrained("LiheYoung/depth-anything-large-hf")
control_image = processor(control_image)[0].convert("RGB")
image = pipe(
prompt=prompt,
control_image=control_image,
height=1024,
width=1024,
num_inference_steps=30,
guidance_scale=10.0,
generator=torch.Generator().manual_seed(42),
).images[0]
image.save("output.png")
```
### Redux
* Flux Redux pipeline is an adapter for FLUX.1 base models. It can be used with both flux-dev and flux-schnell, for image-to-image generation.
* You can first use the `FluxPriorReduxPipeline` to get the `prompt_embeds` and `pooled_prompt_embeds`, and then feed them into the `FluxPipeline` for image-to-image generation.
* When use `FluxPriorReduxPipeline` with a base pipeline, you can set `text_encoder=None` and `text_encoder_2=None` in the base pipeline, in order to save VRAM.
```python
import torch
from diffusers import FluxPriorReduxPipeline, FluxPipeline
from diffusers.utils import load_image
device = "cuda"
dtype = torch.bfloat16
repo_redux = "black-forest-labs/FLUX.1-Redux-dev"
repo_base = "black-forest-labs/FLUX.1-dev"
pipe_prior_redux = FluxPriorReduxPipeline.from_pretrained(repo_redux, torch_dtype=dtype).to(device)
pipe = FluxPipeline.from_pretrained(
repo_base,
text_encoder=None,
text_encoder_2=None,
torch_dtype=torch.bfloat16
).to(device)
image = load_image("https://huggingface.co/datasets/YiYiXu/testing-images/resolve/main/style_ziggy/img5.png")
pipe_prior_output = pipe_prior_redux(image)
images = pipe(
guidance_scale=2.5,
num_inference_steps=50,
generator=torch.Generator("cpu").manual_seed(0),
**pipe_prior_output,
).images
images[0].save("flux-redux.png")
```
## Combining Flux Turbo LoRAs with Flux Control, Fill, and Redux
We can combine Flux Turbo LoRAs with Flux Control and other pipelines like Fill and Redux to enable few-steps' inference. The example below shows how to do that for Flux Control LoRA for depth and turbo LoRA from [`ByteDance/Hyper-SD`](https://hf.co/ByteDance/Hyper-SD).
```py
from diffusers import FluxControlPipeline
from image_gen_aux import DepthPreprocessor
from diffusers.utils import load_image
from huggingface_hub import hf_hub_download
import torch
control_pipe = FluxControlPipeline.from_pretrained("black-forest-labs/FLUX.1-dev", torch_dtype=torch.bfloat16)
control_pipe.load_lora_weights("black-forest-labs/FLUX.1-Depth-dev-lora", adapter_name="depth")
control_pipe.load_lora_weights(
hf_hub_download("ByteDance/Hyper-SD", "Hyper-FLUX.1-dev-8steps-lora.safetensors"), adapter_name="hyper-sd"
)
control_pipe.set_adapters(["depth", "hyper-sd"], adapter_weights=[0.85, 0.125])
control_pipe.enable_model_cpu_offload()
prompt = "A robot made of exotic candies and chocolates of different kinds. The background is filled with confetti and celebratory gifts."
control_image = load_image("https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/robot.png")
processor = DepthPreprocessor.from_pretrained("LiheYoung/depth-anything-large-hf")
control_image = processor(control_image)[0].convert("RGB")
image = control_pipe(
prompt=prompt,
control_image=control_image,
height=1024,
width=1024,
num_inference_steps=8,
guidance_scale=10.0,
generator=torch.Generator().manual_seed(42),
).images[0]
image.save("output.png")
```
## Note about `unload_lora_weights()` when using Flux LoRAs
When unloading the Control LoRA weights, call `pipe.unload_lora_weights(reset_to_overwritten_params=True)` to reset the `pipe.transformer` completely back to its original form. The resultant pipeline can then be used with methods like [`DiffusionPipeline.from_pipe`]. More details about this argument are available in [this PR](https://github.com/huggingface/diffusers/pull/10397).
## IP-Adapter
<Tip>
Check out [IP-Adapter](../../../using-diffusers/ip_adapter) to learn more about how IP-Adapters work.
</Tip>
An IP-Adapter lets you prompt Flux with images, in addition to the text prompt. This is especially useful when describing complex concepts that are difficult to articulate through text alone and you have reference images.
```python
import torch
from diffusers import FluxPipeline
from diffusers.utils import load_image
pipe = FluxPipeline.from_pretrained(
"black-forest-labs/FLUX.1-dev", torch_dtype=torch.bfloat16
).to("cuda")
image = load_image("https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/flux_ip_adapter_input.jpg").resize((1024, 1024))
pipe.load_ip_adapter(
"XLabs-AI/flux-ip-adapter",
weight_name="ip_adapter.safetensors",
image_encoder_pretrained_model_name_or_path="openai/clip-vit-large-patch14"
)
pipe.set_ip_adapter_scale(1.0)
image = pipe(
width=1024,
height=1024,
prompt="wearing sunglasses",
negative_prompt="",
true_cfg=4.0,
generator=torch.Generator().manual_seed(4444),
ip_adapter_image=image,
).images[0]
image.save('flux_ip_adapter_output.jpg')
```
<div class="justify-center">
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/flux_ip_adapter_output.jpg"/>
<figcaption class="mt-2 text-sm text-center text-gray-500">IP-Adapter examples with prompt "wearing sunglasses"</figcaption>
</div>
## Running FP16 inference
Flux can generate high-quality images with FP16 (i.e. to accelerate inference on Turing/Volta GPUs) but produces different outputs compared to FP32/BF16. The issue is that some activations in the text encoders have to be clipped when running in FP16, which affects the overall image. Forcing text encoders to run with FP32 inference thus removes this output difference. See [here](https://github.com/huggingface/diffusers/pull/9097#issuecomment-2272292516) for details.
FP16 inference code:
```python
import torch
from diffusers import FluxPipeline
pipe = FluxPipeline.from_pretrained("black-forest-labs/FLUX.1-schnell", torch_dtype=torch.bfloat16) # can replace schnell with dev
# to run on low vram GPUs (i.e. between 4 and 32 GB VRAM)
pipe.enable_sequential_cpu_offload()
pipe.vae.enable_slicing()
pipe.vae.enable_tiling()
pipe.to(torch.float16) # casting here instead of in the pipeline constructor because doing so in the constructor loads all models into CPU memory at once
prompt = "A cat holding a sign that says hello world"
out = pipe(
prompt=prompt,
guidance_scale=0.,
height=768,
width=1360,
num_inference_steps=4,
max_sequence_length=256,
).images[0]
out.save("image.png")
```
## Quantization
Quantization helps reduce the memory requirements of very large models by storing model weights in a lower precision data type. However, quantization may have varying impact on video quality depending on the video model.
Refer to the [Quantization](../../quantization/overview) overview to learn more about supported quantization backends and selecting a quantization backend that supports your use case. The example below demonstrates how to load a quantized [`FluxPipeline`] for inference with bitsandbytes.
```py
import torch
from diffusers import BitsAndBytesConfig as DiffusersBitsAndBytesConfig, FluxTransformer2DModel, FluxPipeline
from transformers import BitsAndBytesConfig as BitsAndBytesConfig, T5EncoderModel
quant_config = BitsAndBytesConfig(load_in_8bit=True)
text_encoder_8bit = T5EncoderModel.from_pretrained(
"black-forest-labs/FLUX.1-dev",
subfolder="text_encoder_2",
quantization_config=quant_config,
torch_dtype=torch.float16,
)
quant_config = DiffusersBitsAndBytesConfig(load_in_8bit=True)
transformer_8bit = FluxTransformer2DModel.from_pretrained(
"black-forest-labs/FLUX.1-dev",
subfolder="transformer",
quantization_config=quant_config,
torch_dtype=torch.float16,
)
pipeline = FluxPipeline.from_pretrained(
"black-forest-labs/FLUX.1-dev",
text_encoder_2=text_encoder_8bit,
transformer=transformer_8bit,
torch_dtype=torch.float16,
device_map="balanced",
)
prompt = "a tiny astronaut hatching from an egg on the moon"
image = pipeline(prompt, guidance_scale=3.5, height=768, width=1360, num_inference_steps=50).images[0]
image.save("flux.png")
```
## Single File Loading for the `FluxTransformer2DModel`
The `FluxTransformer2DModel` supports loading checkpoints in the original format shipped by Black Forest Labs. This is also useful when trying to load finetunes or quantized versions of the models that have been published by the community.
<Tip>
`FP8` inference can be brittle depending on the GPU type, CUDA version, and `torch` version that you are using. It is recommended that you use the `optimum-quanto` library in order to run FP8 inference on your machine.
</Tip>
The following example demonstrates how to run Flux with less than 16GB of VRAM.
First install `optimum-quanto`
```shell
pip install optimum-quanto
```
Then run the following example
```python
import torch
from diffusers import FluxTransformer2DModel, FluxPipeline
from transformers import T5EncoderModel, CLIPTextModel
from optimum.quanto import freeze, qfloat8, quantize
bfl_repo = "black-forest-labs/FLUX.1-dev"
dtype = torch.bfloat16
transformer = FluxTransformer2DModel.from_single_file("https://huggingface.co/Kijai/flux-fp8/blob/main/flux1-dev-fp8.safetensors", torch_dtype=dtype)
quantize(transformer, weights=qfloat8)
freeze(transformer)
text_encoder_2 = T5EncoderModel.from_pretrained(bfl_repo, subfolder="text_encoder_2", torch_dtype=dtype)
quantize(text_encoder_2, weights=qfloat8)
freeze(text_encoder_2)
pipe = FluxPipeline.from_pretrained(bfl_repo, transformer=None, text_encoder_2=None, torch_dtype=dtype)
pipe.transformer = transformer
pipe.text_encoder_2 = text_encoder_2
pipe.enable_model_cpu_offload()
prompt = "A cat holding a sign that says hello world"
image = pipe(
prompt,
guidance_scale=3.5,
output_type="pil",
num_inference_steps=20,
generator=torch.Generator("cpu").manual_seed(0)
).images[0]
image.save("flux-fp8-dev.png")
```
## FluxPipeline
[[autodoc]] FluxPipeline
- all
- __call__
## FluxImg2ImgPipeline
[[autodoc]] FluxImg2ImgPipeline
- all
- __call__
## FluxInpaintPipeline
[[autodoc]] FluxInpaintPipeline
- all
- __call__
## FluxControlNetInpaintPipeline
[[autodoc]] FluxControlNetInpaintPipeline
- all
- __call__
## FluxControlNetImg2ImgPipeline
[[autodoc]] FluxControlNetImg2ImgPipeline
- all
- __call__
## FluxControlPipeline
[[autodoc]] FluxControlPipeline
- all
- __call__
## FluxControlImg2ImgPipeline
[[autodoc]] FluxControlImg2ImgPipeline
- all
- __call__
## FluxPriorReduxPipeline
[[autodoc]] FluxPriorReduxPipeline
- all
- __call__
## FluxFillPipeline
[[autodoc]] FluxFillPipeline
- all
- __call__
| diffusers/docs/source/en/api/pipelines/flux.md/0 | {
"file_path": "diffusers/docs/source/en/api/pipelines/flux.md",
"repo_id": "diffusers",
"token_count": 6995
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.