whisper-large-v2-W4A16-G128
1
1
1 language
license:apache-2.0
by
RedHatAI
Audio Model
OTHER
2212.04356B params
New
1 downloads
Early-stage
Edge AI:
Mobile
Laptop
Server
4945GB+ RAM
Mobile
Laptop
Server
Quick Summary
Model Overview - Model Architecture: whisper-large-v2 - Input: Audio-Text - Output: Text - Model Optimizations: - Weight quantization: INT4 - Activation quantiz...
Device Compatibility
Mobile
4-6GB RAM
Laptop
16GB RAM
Server
GPU
Minimum Recommended
2061GB+ RAM
Code Examples
Creationpythontransformers
import torch
from datasets import load_dataset
from transformers import WhisperProcessor
from llmcompressor.modifiers.quantization import GPTQModifier
from llmcompressor.transformers import oneshot
from llmcompressor.transformers.tracing import TraceableWhisperForConditionalGeneration
# Select model and load it.
model_id = "openai/whisper-large-v2"
model = TraceableWhisperForConditionalGeneration.from_pretrained(
model_id,
device_map="auto",
torch_dtype="auto",
)
processor = WhisperProcessor.from_pretrained(model_id)
# Configure processor the dataset task.
processor.tokenizer.set_prefix_tokens(language="en", task="transcribe")
# Select calibration dataset.
DATASET_ID = "MLCommons/peoples_speech"
DATASET_SUBSET = "test"
DATASET_SPLIT = "test"
# Select number of samples. 512 samples is a good place to start.
# Increasing the number of samples can improve accuracy.
NUM_CALIBRATION_SAMPLES = 512
MAX_SEQUENCE_LENGTH = 2048
# Load dataset and preprocess.
ds = load_dataset(
DATASET_ID,
DATASET_SUBSET,
split=f"{DATASET_SPLIT}[:{NUM_CALIBRATION_SAMPLES}]",
trust_remote_code=True,
)
# Preprocess and Tokenize inputs.
def preprocess_and_tokenize(example):
audio = example["audio"]["array"]
sampling_rate = example["audio"]["sampling_rate"]
text = " " + example["text"].capitalize()
audio_inputs = processor(
audio=audio,
sampling_rate=sampling_rate,
return_tensors="pt",
)
text_inputs = processor(
text=text,
add_special_tokens=True,
return_tensors="pt"
)
text_inputs["decoder_input_ids"] = text_inputs["input_ids"]
del text_inputs["input_ids"]
return dict(**audio_inputs, **text_inputs)
ds = ds.map(preprocess_and_tokenize, remove_columns=ds.column_names)
# Define a oneshot data collator for multimodal inputs.
def data_collator(batch):
assert len(batch) == 1
return {key: torch.tensor(value) for key, value in batch[0].items()}
# Recipe
recipe = GPTQModifier(targets="Linear", scheme="W4A16", ignore=["lm_head"])
# Apply algorithms.
SAVE_DIR = model_id.split("/")[1] + "-W4A16-G128"
oneshot(
model=model,
dataset=ds,
recipe=recipe,
max_seq_length=MAX_SEQUENCE_LENGTH,
num_calibration_samples=NUM_CALIBRATION_SAMPLES,
data_collator=data_collator,
output_dir=SAVE_DIR,
)Creationpythontransformers
import torch
from datasets import load_dataset
from transformers import WhisperProcessor
from llmcompressor.modifiers.quantization import GPTQModifier
from llmcompressor.transformers import oneshot
from llmcompressor.transformers.tracing import TraceableWhisperForConditionalGeneration
# Select model and load it.
model_id = "openai/whisper-large-v2"
model = TraceableWhisperForConditionalGeneration.from_pretrained(
model_id,
device_map="auto",
torch_dtype="auto",
)
processor = WhisperProcessor.from_pretrained(model_id)
# Configure processor the dataset task.
processor.tokenizer.set_prefix_tokens(language="en", task="transcribe")
# Select calibration dataset.
DATASET_ID = "MLCommons/peoples_speech"
DATASET_SUBSET = "test"
DATASET_SPLIT = "test"
# Select number of samples. 512 samples is a good place to start.
# Increasing the number of samples can improve accuracy.
NUM_CALIBRATION_SAMPLES = 512
MAX_SEQUENCE_LENGTH = 2048
# Load dataset and preprocess.
ds = load_dataset(
DATASET_ID,
DATASET_SUBSET,
split=f"{DATASET_SPLIT}[:{NUM_CALIBRATION_SAMPLES}]",
trust_remote_code=True,
)
# Preprocess and Tokenize inputs.
def preprocess_and_tokenize(example):
audio = example["audio"]["array"]
sampling_rate = example["audio"]["sampling_rate"]
text = " " + example["text"].capitalize()
audio_inputs = processor(
audio=audio,
sampling_rate=sampling_rate,
return_tensors="pt",
)
text_inputs = processor(
text=text,
add_special_tokens=True,
return_tensors="pt"
)
text_inputs["decoder_input_ids"] = text_inputs["input_ids"]
del text_inputs["input_ids"]
return dict(**audio_inputs, **text_inputs)
ds = ds.map(preprocess_and_tokenize, remove_columns=ds.column_names)
# Define a oneshot data collator for multimodal inputs.
def data_collator(batch):
assert len(batch) == 1
return {key: torch.tensor(value) for key, value in batch[0].items()}
# Recipe
recipe = GPTQModifier(targets="Linear", scheme="W4A16", ignore=["lm_head"])
# Apply algorithms.
SAVE_DIR = model_id.split("/")[1] + "-W4A16-G128"
oneshot(
model=model,
dataset=ds,
recipe=recipe,
max_seq_length=MAX_SEQUENCE_LENGTH,
num_calibration_samples=NUM_CALIBRATION_SAMPLES,
data_collator=data_collator,
output_dir=SAVE_DIR,
)Creationpythontransformers
import torch
from datasets import load_dataset
from transformers import WhisperProcessor
from llmcompressor.modifiers.quantization import GPTQModifier
from llmcompressor.transformers import oneshot
from llmcompressor.transformers.tracing import TraceableWhisperForConditionalGeneration
# Select model and load it.
model_id = "openai/whisper-large-v2"
model = TraceableWhisperForConditionalGeneration.from_pretrained(
model_id,
device_map="auto",
torch_dtype="auto",
)
processor = WhisperProcessor.from_pretrained(model_id)
# Configure processor the dataset task.
processor.tokenizer.set_prefix_tokens(language="en", task="transcribe")
# Select calibration dataset.
DATASET_ID = "MLCommons/peoples_speech"
DATASET_SUBSET = "test"
DATASET_SPLIT = "test"
# Select number of samples. 512 samples is a good place to start.
# Increasing the number of samples can improve accuracy.
NUM_CALIBRATION_SAMPLES = 512
MAX_SEQUENCE_LENGTH = 2048
# Load dataset and preprocess.
ds = load_dataset(
DATASET_ID,
DATASET_SUBSET,
split=f"{DATASET_SPLIT}[:{NUM_CALIBRATION_SAMPLES}]",
trust_remote_code=True,
)
# Preprocess and Tokenize inputs.
def preprocess_and_tokenize(example):
audio = example["audio"]["array"]
sampling_rate = example["audio"]["sampling_rate"]
text = " " + example["text"].capitalize()
audio_inputs = processor(
audio=audio,
sampling_rate=sampling_rate,
return_tensors="pt",
)
text_inputs = processor(
text=text,
add_special_tokens=True,
return_tensors="pt"
)
text_inputs["decoder_input_ids"] = text_inputs["input_ids"]
del text_inputs["input_ids"]
return dict(**audio_inputs, **text_inputs)
ds = ds.map(preprocess_and_tokenize, remove_columns=ds.column_names)
# Define a oneshot data collator for multimodal inputs.
def data_collator(batch):
assert len(batch) == 1
return {key: torch.tensor(value) for key, value in batch[0].items()}
# Recipe
recipe = GPTQModifier(targets="Linear", scheme="W4A16", ignore=["lm_head"])
# Apply algorithms.
SAVE_DIR = model_id.split("/")[1] + "-W4A16-G128"
oneshot(
model=model,
dataset=ds,
recipe=recipe,
max_seq_length=MAX_SEQUENCE_LENGTH,
num_calibration_samples=NUM_CALIBRATION_SAMPLES,
data_collator=data_collator,
output_dir=SAVE_DIR,
)Creationpythontransformers
import torch
from datasets import load_dataset
from transformers import WhisperProcessor
from llmcompressor.modifiers.quantization import GPTQModifier
from llmcompressor.transformers import oneshot
from llmcompressor.transformers.tracing import TraceableWhisperForConditionalGeneration
# Select model and load it.
model_id = "openai/whisper-large-v2"
model = TraceableWhisperForConditionalGeneration.from_pretrained(
model_id,
device_map="auto",
torch_dtype="auto",
)
processor = WhisperProcessor.from_pretrained(model_id)
# Configure processor the dataset task.
processor.tokenizer.set_prefix_tokens(language="en", task="transcribe")
# Select calibration dataset.
DATASET_ID = "MLCommons/peoples_speech"
DATASET_SUBSET = "test"
DATASET_SPLIT = "test"
# Select number of samples. 512 samples is a good place to start.
# Increasing the number of samples can improve accuracy.
NUM_CALIBRATION_SAMPLES = 512
MAX_SEQUENCE_LENGTH = 2048
# Load dataset and preprocess.
ds = load_dataset(
DATASET_ID,
DATASET_SUBSET,
split=f"{DATASET_SPLIT}[:{NUM_CALIBRATION_SAMPLES}]",
trust_remote_code=True,
)
# Preprocess and Tokenize inputs.
def preprocess_and_tokenize(example):
audio = example["audio"]["array"]
sampling_rate = example["audio"]["sampling_rate"]
text = " " + example["text"].capitalize()
audio_inputs = processor(
audio=audio,
sampling_rate=sampling_rate,
return_tensors="pt",
)
text_inputs = processor(
text=text,
add_special_tokens=True,
return_tensors="pt"
)
text_inputs["decoder_input_ids"] = text_inputs["input_ids"]
del text_inputs["input_ids"]
return dict(**audio_inputs, **text_inputs)
ds = ds.map(preprocess_and_tokenize, remove_columns=ds.column_names)
# Define a oneshot data collator for multimodal inputs.
def data_collator(batch):
assert len(batch) == 1
return {key: torch.tensor(value) for key, value in batch[0].items()}
# Recipe
recipe = GPTQModifier(targets="Linear", scheme="W4A16", ignore=["lm_head"])
# Apply algorithms.
SAVE_DIR = model_id.split("/")[1] + "-W4A16-G128"
oneshot(
model=model,
dataset=ds,
recipe=recipe,
max_seq_length=MAX_SEQUENCE_LENGTH,
num_calibration_samples=NUM_CALIBRATION_SAMPLES,
data_collator=data_collator,
output_dir=SAVE_DIR,
)Creationpythontransformers
import torch
from datasets import load_dataset
from transformers import WhisperProcessor
from llmcompressor.modifiers.quantization import GPTQModifier
from llmcompressor.transformers import oneshot
from llmcompressor.transformers.tracing import TraceableWhisperForConditionalGeneration
# Select model and load it.
model_id = "openai/whisper-large-v2"
model = TraceableWhisperForConditionalGeneration.from_pretrained(
model_id,
device_map="auto",
torch_dtype="auto",
)
processor = WhisperProcessor.from_pretrained(model_id)
# Configure processor the dataset task.
processor.tokenizer.set_prefix_tokens(language="en", task="transcribe")
# Select calibration dataset.
DATASET_ID = "MLCommons/peoples_speech"
DATASET_SUBSET = "test"
DATASET_SPLIT = "test"
# Select number of samples. 512 samples is a good place to start.
# Increasing the number of samples can improve accuracy.
NUM_CALIBRATION_SAMPLES = 512
MAX_SEQUENCE_LENGTH = 2048
# Load dataset and preprocess.
ds = load_dataset(
DATASET_ID,
DATASET_SUBSET,
split=f"{DATASET_SPLIT}[:{NUM_CALIBRATION_SAMPLES}]",
trust_remote_code=True,
)
# Preprocess and Tokenize inputs.
def preprocess_and_tokenize(example):
audio = example["audio"]["array"]
sampling_rate = example["audio"]["sampling_rate"]
text = " " + example["text"].capitalize()
audio_inputs = processor(
audio=audio,
sampling_rate=sampling_rate,
return_tensors="pt",
)
text_inputs = processor(
text=text,
add_special_tokens=True,
return_tensors="pt"
)
text_inputs["decoder_input_ids"] = text_inputs["input_ids"]
del text_inputs["input_ids"]
return dict(**audio_inputs, **text_inputs)
ds = ds.map(preprocess_and_tokenize, remove_columns=ds.column_names)
# Define a oneshot data collator for multimodal inputs.
def data_collator(batch):
assert len(batch) == 1
return {key: torch.tensor(value) for key, value in batch[0].items()}
# Recipe
recipe = GPTQModifier(targets="Linear", scheme="W4A16", ignore=["lm_head"])
# Apply algorithms.
SAVE_DIR = model_id.split("/")[1] + "-W4A16-G128"
oneshot(
model=model,
dataset=ds,
recipe=recipe,
max_seq_length=MAX_SEQUENCE_LENGTH,
num_calibration_samples=NUM_CALIBRATION_SAMPLES,
data_collator=data_collator,
output_dir=SAVE_DIR,
)Creationpythontransformers
import torch
from datasets import load_dataset
from transformers import WhisperProcessor
from llmcompressor.modifiers.quantization import GPTQModifier
from llmcompressor.transformers import oneshot
from llmcompressor.transformers.tracing import TraceableWhisperForConditionalGeneration
# Select model and load it.
model_id = "openai/whisper-large-v2"
model = TraceableWhisperForConditionalGeneration.from_pretrained(
model_id,
device_map="auto",
torch_dtype="auto",
)
processor = WhisperProcessor.from_pretrained(model_id)
# Configure processor the dataset task.
processor.tokenizer.set_prefix_tokens(language="en", task="transcribe")
# Select calibration dataset.
DATASET_ID = "MLCommons/peoples_speech"
DATASET_SUBSET = "test"
DATASET_SPLIT = "test"
# Select number of samples. 512 samples is a good place to start.
# Increasing the number of samples can improve accuracy.
NUM_CALIBRATION_SAMPLES = 512
MAX_SEQUENCE_LENGTH = 2048
# Load dataset and preprocess.
ds = load_dataset(
DATASET_ID,
DATASET_SUBSET,
split=f"{DATASET_SPLIT}[:{NUM_CALIBRATION_SAMPLES}]",
trust_remote_code=True,
)
# Preprocess and Tokenize inputs.
def preprocess_and_tokenize(example):
audio = example["audio"]["array"]
sampling_rate = example["audio"]["sampling_rate"]
text = " " + example["text"].capitalize()
audio_inputs = processor(
audio=audio,
sampling_rate=sampling_rate,
return_tensors="pt",
)
text_inputs = processor(
text=text,
add_special_tokens=True,
return_tensors="pt"
)
text_inputs["decoder_input_ids"] = text_inputs["input_ids"]
del text_inputs["input_ids"]
return dict(**audio_inputs, **text_inputs)
ds = ds.map(preprocess_and_tokenize, remove_columns=ds.column_names)
# Define a oneshot data collator for multimodal inputs.
def data_collator(batch):
assert len(batch) == 1
return {key: torch.tensor(value) for key, value in batch[0].items()}
# Recipe
recipe = GPTQModifier(targets="Linear", scheme="W4A16", ignore=["lm_head"])
# Apply algorithms.
SAVE_DIR = model_id.split("/")[1] + "-W4A16-G128"
oneshot(
model=model,
dataset=ds,
recipe=recipe,
max_seq_length=MAX_SEQUENCE_LENGTH,
num_calibration_samples=NUM_CALIBRATION_SAMPLES,
data_collator=data_collator,
output_dir=SAVE_DIR,
)Creationpythontransformers
import torch
from datasets import load_dataset
from transformers import WhisperProcessor
from llmcompressor.modifiers.quantization import GPTQModifier
from llmcompressor.transformers import oneshot
from llmcompressor.transformers.tracing import TraceableWhisperForConditionalGeneration
# Select model and load it.
model_id = "openai/whisper-large-v2"
model = TraceableWhisperForConditionalGeneration.from_pretrained(
model_id,
device_map="auto",
torch_dtype="auto",
)
processor = WhisperProcessor.from_pretrained(model_id)
# Configure processor the dataset task.
processor.tokenizer.set_prefix_tokens(language="en", task="transcribe")
# Select calibration dataset.
DATASET_ID = "MLCommons/peoples_speech"
DATASET_SUBSET = "test"
DATASET_SPLIT = "test"
# Select number of samples. 512 samples is a good place to start.
# Increasing the number of samples can improve accuracy.
NUM_CALIBRATION_SAMPLES = 512
MAX_SEQUENCE_LENGTH = 2048
# Load dataset and preprocess.
ds = load_dataset(
DATASET_ID,
DATASET_SUBSET,
split=f"{DATASET_SPLIT}[:{NUM_CALIBRATION_SAMPLES}]",
trust_remote_code=True,
)
# Preprocess and Tokenize inputs.
def preprocess_and_tokenize(example):
audio = example["audio"]["array"]
sampling_rate = example["audio"]["sampling_rate"]
text = " " + example["text"].capitalize()
audio_inputs = processor(
audio=audio,
sampling_rate=sampling_rate,
return_tensors="pt",
)
text_inputs = processor(
text=text,
add_special_tokens=True,
return_tensors="pt"
)
text_inputs["decoder_input_ids"] = text_inputs["input_ids"]
del text_inputs["input_ids"]
return dict(**audio_inputs, **text_inputs)
ds = ds.map(preprocess_and_tokenize, remove_columns=ds.column_names)
# Define a oneshot data collator for multimodal inputs.
def data_collator(batch):
assert len(batch) == 1
return {key: torch.tensor(value) for key, value in batch[0].items()}
# Recipe
recipe = GPTQModifier(targets="Linear", scheme="W4A16", ignore=["lm_head"])
# Apply algorithms.
SAVE_DIR = model_id.split("/")[1] + "-W4A16-G128"
oneshot(
model=model,
dataset=ds,
recipe=recipe,
max_seq_length=MAX_SEQUENCE_LENGTH,
num_calibration_samples=NUM_CALIBRATION_SAMPLES,
data_collator=data_collator,
output_dir=SAVE_DIR,
)Creationpythontransformers
import torch
from datasets import load_dataset
from transformers import WhisperProcessor
from llmcompressor.modifiers.quantization import GPTQModifier
from llmcompressor.transformers import oneshot
from llmcompressor.transformers.tracing import TraceableWhisperForConditionalGeneration
# Select model and load it.
model_id = "openai/whisper-large-v2"
model = TraceableWhisperForConditionalGeneration.from_pretrained(
model_id,
device_map="auto",
torch_dtype="auto",
)
processor = WhisperProcessor.from_pretrained(model_id)
# Configure processor the dataset task.
processor.tokenizer.set_prefix_tokens(language="en", task="transcribe")
# Select calibration dataset.
DATASET_ID = "MLCommons/peoples_speech"
DATASET_SUBSET = "test"
DATASET_SPLIT = "test"
# Select number of samples. 512 samples is a good place to start.
# Increasing the number of samples can improve accuracy.
NUM_CALIBRATION_SAMPLES = 512
MAX_SEQUENCE_LENGTH = 2048
# Load dataset and preprocess.
ds = load_dataset(
DATASET_ID,
DATASET_SUBSET,
split=f"{DATASET_SPLIT}[:{NUM_CALIBRATION_SAMPLES}]",
trust_remote_code=True,
)
# Preprocess and Tokenize inputs.
def preprocess_and_tokenize(example):
audio = example["audio"]["array"]
sampling_rate = example["audio"]["sampling_rate"]
text = " " + example["text"].capitalize()
audio_inputs = processor(
audio=audio,
sampling_rate=sampling_rate,
return_tensors="pt",
)
text_inputs = processor(
text=text,
add_special_tokens=True,
return_tensors="pt"
)
text_inputs["decoder_input_ids"] = text_inputs["input_ids"]
del text_inputs["input_ids"]
return dict(**audio_inputs, **text_inputs)
ds = ds.map(preprocess_and_tokenize, remove_columns=ds.column_names)
# Define a oneshot data collator for multimodal inputs.
def data_collator(batch):
assert len(batch) == 1
return {key: torch.tensor(value) for key, value in batch[0].items()}
# Recipe
recipe = GPTQModifier(targets="Linear", scheme="W4A16", ignore=["lm_head"])
# Apply algorithms.
SAVE_DIR = model_id.split("/")[1] + "-W4A16-G128"
oneshot(
model=model,
dataset=ds,
recipe=recipe,
max_seq_length=MAX_SEQUENCE_LENGTH,
num_calibration_samples=NUM_CALIBRATION_SAMPLES,
data_collator=data_collator,
output_dir=SAVE_DIR,
)Creationpythontransformers
import torch
from datasets import load_dataset
from transformers import WhisperProcessor
from llmcompressor.modifiers.quantization import GPTQModifier
from llmcompressor.transformers import oneshot
from llmcompressor.transformers.tracing import TraceableWhisperForConditionalGeneration
# Select model and load it.
model_id = "openai/whisper-large-v2"
model = TraceableWhisperForConditionalGeneration.from_pretrained(
model_id,
device_map="auto",
torch_dtype="auto",
)
processor = WhisperProcessor.from_pretrained(model_id)
# Configure processor the dataset task.
processor.tokenizer.set_prefix_tokens(language="en", task="transcribe")
# Select calibration dataset.
DATASET_ID = "MLCommons/peoples_speech"
DATASET_SUBSET = "test"
DATASET_SPLIT = "test"
# Select number of samples. 512 samples is a good place to start.
# Increasing the number of samples can improve accuracy.
NUM_CALIBRATION_SAMPLES = 512
MAX_SEQUENCE_LENGTH = 2048
# Load dataset and preprocess.
ds = load_dataset(
DATASET_ID,
DATASET_SUBSET,
split=f"{DATASET_SPLIT}[:{NUM_CALIBRATION_SAMPLES}]",
trust_remote_code=True,
)
# Preprocess and Tokenize inputs.
def preprocess_and_tokenize(example):
audio = example["audio"]["array"]
sampling_rate = example["audio"]["sampling_rate"]
text = " " + example["text"].capitalize()
audio_inputs = processor(
audio=audio,
sampling_rate=sampling_rate,
return_tensors="pt",
)
text_inputs = processor(
text=text,
add_special_tokens=True,
return_tensors="pt"
)
text_inputs["decoder_input_ids"] = text_inputs["input_ids"]
del text_inputs["input_ids"]
return dict(**audio_inputs, **text_inputs)
ds = ds.map(preprocess_and_tokenize, remove_columns=ds.column_names)
# Define a oneshot data collator for multimodal inputs.
def data_collator(batch):
assert len(batch) == 1
return {key: torch.tensor(value) for key, value in batch[0].items()}
# Recipe
recipe = GPTQModifier(targets="Linear", scheme="W4A16", ignore=["lm_head"])
# Apply algorithms.
SAVE_DIR = model_id.split("/")[1] + "-W4A16-G128"
oneshot(
model=model,
dataset=ds,
recipe=recipe,
max_seq_length=MAX_SEQUENCE_LENGTH,
num_calibration_samples=NUM_CALIBRATION_SAMPLES,
data_collator=data_collator,
output_dir=SAVE_DIR,
)Creationpythontransformers
import torch
from datasets import load_dataset
from transformers import WhisperProcessor
from llmcompressor.modifiers.quantization import GPTQModifier
from llmcompressor.transformers import oneshot
from llmcompressor.transformers.tracing import TraceableWhisperForConditionalGeneration
# Select model and load it.
model_id = "openai/whisper-large-v2"
model = TraceableWhisperForConditionalGeneration.from_pretrained(
model_id,
device_map="auto",
torch_dtype="auto",
)
processor = WhisperProcessor.from_pretrained(model_id)
# Configure processor the dataset task.
processor.tokenizer.set_prefix_tokens(language="en", task="transcribe")
# Select calibration dataset.
DATASET_ID = "MLCommons/peoples_speech"
DATASET_SUBSET = "test"
DATASET_SPLIT = "test"
# Select number of samples. 512 samples is a good place to start.
# Increasing the number of samples can improve accuracy.
NUM_CALIBRATION_SAMPLES = 512
MAX_SEQUENCE_LENGTH = 2048
# Load dataset and preprocess.
ds = load_dataset(
DATASET_ID,
DATASET_SUBSET,
split=f"{DATASET_SPLIT}[:{NUM_CALIBRATION_SAMPLES}]",
trust_remote_code=True,
)
# Preprocess and Tokenize inputs.
def preprocess_and_tokenize(example):
audio = example["audio"]["array"]
sampling_rate = example["audio"]["sampling_rate"]
text = " " + example["text"].capitalize()
audio_inputs = processor(
audio=audio,
sampling_rate=sampling_rate,
return_tensors="pt",
)
text_inputs = processor(
text=text,
add_special_tokens=True,
return_tensors="pt"
)
text_inputs["decoder_input_ids"] = text_inputs["input_ids"]
del text_inputs["input_ids"]
return dict(**audio_inputs, **text_inputs)
ds = ds.map(preprocess_and_tokenize, remove_columns=ds.column_names)
# Define a oneshot data collator for multimodal inputs.
def data_collator(batch):
assert len(batch) == 1
return {key: torch.tensor(value) for key, value in batch[0].items()}
# Recipe
recipe = GPTQModifier(targets="Linear", scheme="W4A16", ignore=["lm_head"])
# Apply algorithms.
SAVE_DIR = model_id.split("/")[1] + "-W4A16-G128"
oneshot(
model=model,
dataset=ds,
recipe=recipe,
max_seq_length=MAX_SEQUENCE_LENGTH,
num_calibration_samples=NUM_CALIBRATION_SAMPLES,
data_collator=data_collator,
output_dir=SAVE_DIR,
)Creationpythontransformers
import torch
from datasets import load_dataset
from transformers import WhisperProcessor
from llmcompressor.modifiers.quantization import GPTQModifier
from llmcompressor.transformers import oneshot
from llmcompressor.transformers.tracing import TraceableWhisperForConditionalGeneration
# Select model and load it.
model_id = "openai/whisper-large-v2"
model = TraceableWhisperForConditionalGeneration.from_pretrained(
model_id,
device_map="auto",
torch_dtype="auto",
)
processor = WhisperProcessor.from_pretrained(model_id)
# Configure processor the dataset task.
processor.tokenizer.set_prefix_tokens(language="en", task="transcribe")
# Select calibration dataset.
DATASET_ID = "MLCommons/peoples_speech"
DATASET_SUBSET = "test"
DATASET_SPLIT = "test"
# Select number of samples. 512 samples is a good place to start.
# Increasing the number of samples can improve accuracy.
NUM_CALIBRATION_SAMPLES = 512
MAX_SEQUENCE_LENGTH = 2048
# Load dataset and preprocess.
ds = load_dataset(
DATASET_ID,
DATASET_SUBSET,
split=f"{DATASET_SPLIT}[:{NUM_CALIBRATION_SAMPLES}]",
trust_remote_code=True,
)
# Preprocess and Tokenize inputs.
def preprocess_and_tokenize(example):
audio = example["audio"]["array"]
sampling_rate = example["audio"]["sampling_rate"]
text = " " + example["text"].capitalize()
audio_inputs = processor(
audio=audio,
sampling_rate=sampling_rate,
return_tensors="pt",
)
text_inputs = processor(
text=text,
add_special_tokens=True,
return_tensors="pt"
)
text_inputs["decoder_input_ids"] = text_inputs["input_ids"]
del text_inputs["input_ids"]
return dict(**audio_inputs, **text_inputs)
ds = ds.map(preprocess_and_tokenize, remove_columns=ds.column_names)
# Define a oneshot data collator for multimodal inputs.
def data_collator(batch):
assert len(batch) == 1
return {key: torch.tensor(value) for key, value in batch[0].items()}
# Recipe
recipe = GPTQModifier(targets="Linear", scheme="W4A16", ignore=["lm_head"])
# Apply algorithms.
SAVE_DIR = model_id.split("/")[1] + "-W4A16-G128"
oneshot(
model=model,
dataset=ds,
recipe=recipe,
max_seq_length=MAX_SEQUENCE_LENGTH,
num_calibration_samples=NUM_CALIBRATION_SAMPLES,
data_collator=data_collator,
output_dir=SAVE_DIR,
)Creationpythontransformers
import torch
from datasets import load_dataset
from transformers import WhisperProcessor
from llmcompressor.modifiers.quantization import GPTQModifier
from llmcompressor.transformers import oneshot
from llmcompressor.transformers.tracing import TraceableWhisperForConditionalGeneration
# Select model and load it.
model_id = "openai/whisper-large-v2"
model = TraceableWhisperForConditionalGeneration.from_pretrained(
model_id,
device_map="auto",
torch_dtype="auto",
)
processor = WhisperProcessor.from_pretrained(model_id)
# Configure processor the dataset task.
processor.tokenizer.set_prefix_tokens(language="en", task="transcribe")
# Select calibration dataset.
DATASET_ID = "MLCommons/peoples_speech"
DATASET_SUBSET = "test"
DATASET_SPLIT = "test"
# Select number of samples. 512 samples is a good place to start.
# Increasing the number of samples can improve accuracy.
NUM_CALIBRATION_SAMPLES = 512
MAX_SEQUENCE_LENGTH = 2048
# Load dataset and preprocess.
ds = load_dataset(
DATASET_ID,
DATASET_SUBSET,
split=f"{DATASET_SPLIT}[:{NUM_CALIBRATION_SAMPLES}]",
trust_remote_code=True,
)
# Preprocess and Tokenize inputs.
def preprocess_and_tokenize(example):
audio = example["audio"]["array"]
sampling_rate = example["audio"]["sampling_rate"]
text = " " + example["text"].capitalize()
audio_inputs = processor(
audio=audio,
sampling_rate=sampling_rate,
return_tensors="pt",
)
text_inputs = processor(
text=text,
add_special_tokens=True,
return_tensors="pt"
)
text_inputs["decoder_input_ids"] = text_inputs["input_ids"]
del text_inputs["input_ids"]
return dict(**audio_inputs, **text_inputs)
ds = ds.map(preprocess_and_tokenize, remove_columns=ds.column_names)
# Define a oneshot data collator for multimodal inputs.
def data_collator(batch):
assert len(batch) == 1
return {key: torch.tensor(value) for key, value in batch[0].items()}
# Recipe
recipe = GPTQModifier(targets="Linear", scheme="W4A16", ignore=["lm_head"])
# Apply algorithms.
SAVE_DIR = model_id.split("/")[1] + "-W4A16-G128"
oneshot(
model=model,
dataset=ds,
recipe=recipe,
max_seq_length=MAX_SEQUENCE_LENGTH,
num_calibration_samples=NUM_CALIBRATION_SAMPLES,
data_collator=data_collator,
output_dir=SAVE_DIR,
)Creationpythontransformers
import torch
from datasets import load_dataset
from transformers import WhisperProcessor
from llmcompressor.modifiers.quantization import GPTQModifier
from llmcompressor.transformers import oneshot
from llmcompressor.transformers.tracing import TraceableWhisperForConditionalGeneration
# Select model and load it.
model_id = "openai/whisper-large-v2"
model = TraceableWhisperForConditionalGeneration.from_pretrained(
model_id,
device_map="auto",
torch_dtype="auto",
)
processor = WhisperProcessor.from_pretrained(model_id)
# Configure processor the dataset task.
processor.tokenizer.set_prefix_tokens(language="en", task="transcribe")
# Select calibration dataset.
DATASET_ID = "MLCommons/peoples_speech"
DATASET_SUBSET = "test"
DATASET_SPLIT = "test"
# Select number of samples. 512 samples is a good place to start.
# Increasing the number of samples can improve accuracy.
NUM_CALIBRATION_SAMPLES = 512
MAX_SEQUENCE_LENGTH = 2048
# Load dataset and preprocess.
ds = load_dataset(
DATASET_ID,
DATASET_SUBSET,
split=f"{DATASET_SPLIT}[:{NUM_CALIBRATION_SAMPLES}]",
trust_remote_code=True,
)
# Preprocess and Tokenize inputs.
def preprocess_and_tokenize(example):
audio = example["audio"]["array"]
sampling_rate = example["audio"]["sampling_rate"]
text = " " + example["text"].capitalize()
audio_inputs = processor(
audio=audio,
sampling_rate=sampling_rate,
return_tensors="pt",
)
text_inputs = processor(
text=text,
add_special_tokens=True,
return_tensors="pt"
)
text_inputs["decoder_input_ids"] = text_inputs["input_ids"]
del text_inputs["input_ids"]
return dict(**audio_inputs, **text_inputs)
ds = ds.map(preprocess_and_tokenize, remove_columns=ds.column_names)
# Define a oneshot data collator for multimodal inputs.
def data_collator(batch):
assert len(batch) == 1
return {key: torch.tensor(value) for key, value in batch[0].items()}
# Recipe
recipe = GPTQModifier(targets="Linear", scheme="W4A16", ignore=["lm_head"])
# Apply algorithms.
SAVE_DIR = model_id.split("/")[1] + "-W4A16-G128"
oneshot(
model=model,
dataset=ds,
recipe=recipe,
max_seq_length=MAX_SEQUENCE_LENGTH,
num_calibration_samples=NUM_CALIBRATION_SAMPLES,
data_collator=data_collator,
output_dir=SAVE_DIR,
)Creationpythontransformers
import torch
from datasets import load_dataset
from transformers import WhisperProcessor
from llmcompressor.modifiers.quantization import GPTQModifier
from llmcompressor.transformers import oneshot
from llmcompressor.transformers.tracing import TraceableWhisperForConditionalGeneration
# Select model and load it.
model_id = "openai/whisper-large-v2"
model = TraceableWhisperForConditionalGeneration.from_pretrained(
model_id,
device_map="auto",
torch_dtype="auto",
)
processor = WhisperProcessor.from_pretrained(model_id)
# Configure processor the dataset task.
processor.tokenizer.set_prefix_tokens(language="en", task="transcribe")
# Select calibration dataset.
DATASET_ID = "MLCommons/peoples_speech"
DATASET_SUBSET = "test"
DATASET_SPLIT = "test"
# Select number of samples. 512 samples is a good place to start.
# Increasing the number of samples can improve accuracy.
NUM_CALIBRATION_SAMPLES = 512
MAX_SEQUENCE_LENGTH = 2048
# Load dataset and preprocess.
ds = load_dataset(
DATASET_ID,
DATASET_SUBSET,
split=f"{DATASET_SPLIT}[:{NUM_CALIBRATION_SAMPLES}]",
trust_remote_code=True,
)
# Preprocess and Tokenize inputs.
def preprocess_and_tokenize(example):
audio = example["audio"]["array"]
sampling_rate = example["audio"]["sampling_rate"]
text = " " + example["text"].capitalize()
audio_inputs = processor(
audio=audio,
sampling_rate=sampling_rate,
return_tensors="pt",
)
text_inputs = processor(
text=text,
add_special_tokens=True,
return_tensors="pt"
)
text_inputs["decoder_input_ids"] = text_inputs["input_ids"]
del text_inputs["input_ids"]
return dict(**audio_inputs, **text_inputs)
ds = ds.map(preprocess_and_tokenize, remove_columns=ds.column_names)
# Define a oneshot data collator for multimodal inputs.
def data_collator(batch):
assert len(batch) == 1
return {key: torch.tensor(value) for key, value in batch[0].items()}
# Recipe
recipe = GPTQModifier(targets="Linear", scheme="W4A16", ignore=["lm_head"])
# Apply algorithms.
SAVE_DIR = model_id.split("/")[1] + "-W4A16-G128"
oneshot(
model=model,
dataset=ds,
recipe=recipe,
max_seq_length=MAX_SEQUENCE_LENGTH,
num_calibration_samples=NUM_CALIBRATION_SAMPLES,
data_collator=data_collator,
output_dir=SAVE_DIR,
)Creationpythontransformers
import torch
from datasets import load_dataset
from transformers import WhisperProcessor
from llmcompressor.modifiers.quantization import GPTQModifier
from llmcompressor.transformers import oneshot
from llmcompressor.transformers.tracing import TraceableWhisperForConditionalGeneration
# Select model and load it.
model_id = "openai/whisper-large-v2"
model = TraceableWhisperForConditionalGeneration.from_pretrained(
model_id,
device_map="auto",
torch_dtype="auto",
)
processor = WhisperProcessor.from_pretrained(model_id)
# Configure processor the dataset task.
processor.tokenizer.set_prefix_tokens(language="en", task="transcribe")
# Select calibration dataset.
DATASET_ID = "MLCommons/peoples_speech"
DATASET_SUBSET = "test"
DATASET_SPLIT = "test"
# Select number of samples. 512 samples is a good place to start.
# Increasing the number of samples can improve accuracy.
NUM_CALIBRATION_SAMPLES = 512
MAX_SEQUENCE_LENGTH = 2048
# Load dataset and preprocess.
ds = load_dataset(
DATASET_ID,
DATASET_SUBSET,
split=f"{DATASET_SPLIT}[:{NUM_CALIBRATION_SAMPLES}]",
trust_remote_code=True,
)
# Preprocess and Tokenize inputs.
def preprocess_and_tokenize(example):
audio = example["audio"]["array"]
sampling_rate = example["audio"]["sampling_rate"]
text = " " + example["text"].capitalize()
audio_inputs = processor(
audio=audio,
sampling_rate=sampling_rate,
return_tensors="pt",
)
text_inputs = processor(
text=text,
add_special_tokens=True,
return_tensors="pt"
)
text_inputs["decoder_input_ids"] = text_inputs["input_ids"]
del text_inputs["input_ids"]
return dict(**audio_inputs, **text_inputs)
ds = ds.map(preprocess_and_tokenize, remove_columns=ds.column_names)
# Define a oneshot data collator for multimodal inputs.
def data_collator(batch):
assert len(batch) == 1
return {key: torch.tensor(value) for key, value in batch[0].items()}
# Recipe
recipe = GPTQModifier(targets="Linear", scheme="W4A16", ignore=["lm_head"])
# Apply algorithms.
SAVE_DIR = model_id.split("/")[1] + "-W4A16-G128"
oneshot(
model=model,
dataset=ds,
recipe=recipe,
max_seq_length=MAX_SEQUENCE_LENGTH,
num_calibration_samples=NUM_CALIBRATION_SAMPLES,
data_collator=data_collator,
output_dir=SAVE_DIR,
)Creationpythontransformers
import torch
from datasets import load_dataset
from transformers import WhisperProcessor
from llmcompressor.modifiers.quantization import GPTQModifier
from llmcompressor.transformers import oneshot
from llmcompressor.transformers.tracing import TraceableWhisperForConditionalGeneration
# Select model and load it.
model_id = "openai/whisper-large-v2"
model = TraceableWhisperForConditionalGeneration.from_pretrained(
model_id,
device_map="auto",
torch_dtype="auto",
)
processor = WhisperProcessor.from_pretrained(model_id)
# Configure processor the dataset task.
processor.tokenizer.set_prefix_tokens(language="en", task="transcribe")
# Select calibration dataset.
DATASET_ID = "MLCommons/peoples_speech"
DATASET_SUBSET = "test"
DATASET_SPLIT = "test"
# Select number of samples. 512 samples is a good place to start.
# Increasing the number of samples can improve accuracy.
NUM_CALIBRATION_SAMPLES = 512
MAX_SEQUENCE_LENGTH = 2048
# Load dataset and preprocess.
ds = load_dataset(
DATASET_ID,
DATASET_SUBSET,
split=f"{DATASET_SPLIT}[:{NUM_CALIBRATION_SAMPLES}]",
trust_remote_code=True,
)
# Preprocess and Tokenize inputs.
def preprocess_and_tokenize(example):
audio = example["audio"]["array"]
sampling_rate = example["audio"]["sampling_rate"]
text = " " + example["text"].capitalize()
audio_inputs = processor(
audio=audio,
sampling_rate=sampling_rate,
return_tensors="pt",
)
text_inputs = processor(
text=text,
add_special_tokens=True,
return_tensors="pt"
)
text_inputs["decoder_input_ids"] = text_inputs["input_ids"]
del text_inputs["input_ids"]
return dict(**audio_inputs, **text_inputs)
ds = ds.map(preprocess_and_tokenize, remove_columns=ds.column_names)
# Define a oneshot data collator for multimodal inputs.
def data_collator(batch):
assert len(batch) == 1
return {key: torch.tensor(value) for key, value in batch[0].items()}
# Recipe
recipe = GPTQModifier(targets="Linear", scheme="W4A16", ignore=["lm_head"])
# Apply algorithms.
SAVE_DIR = model_id.split("/")[1] + "-W4A16-G128"
oneshot(
model=model,
dataset=ds,
recipe=recipe,
max_seq_length=MAX_SEQUENCE_LENGTH,
num_calibration_samples=NUM_CALIBRATION_SAMPLES,
data_collator=data_collator,
output_dir=SAVE_DIR,
)Creationpythontransformers
import torch
from datasets import load_dataset
from transformers import WhisperProcessor
from llmcompressor.modifiers.quantization import GPTQModifier
from llmcompressor.transformers import oneshot
from llmcompressor.transformers.tracing import TraceableWhisperForConditionalGeneration
# Select model and load it.
model_id = "openai/whisper-large-v2"
model = TraceableWhisperForConditionalGeneration.from_pretrained(
model_id,
device_map="auto",
torch_dtype="auto",
)
processor = WhisperProcessor.from_pretrained(model_id)
# Configure processor the dataset task.
processor.tokenizer.set_prefix_tokens(language="en", task="transcribe")
# Select calibration dataset.
DATASET_ID = "MLCommons/peoples_speech"
DATASET_SUBSET = "test"
DATASET_SPLIT = "test"
# Select number of samples. 512 samples is a good place to start.
# Increasing the number of samples can improve accuracy.
NUM_CALIBRATION_SAMPLES = 512
MAX_SEQUENCE_LENGTH = 2048
# Load dataset and preprocess.
ds = load_dataset(
DATASET_ID,
DATASET_SUBSET,
split=f"{DATASET_SPLIT}[:{NUM_CALIBRATION_SAMPLES}]",
trust_remote_code=True,
)
# Preprocess and Tokenize inputs.
def preprocess_and_tokenize(example):
audio = example["audio"]["array"]
sampling_rate = example["audio"]["sampling_rate"]
text = " " + example["text"].capitalize()
audio_inputs = processor(
audio=audio,
sampling_rate=sampling_rate,
return_tensors="pt",
)
text_inputs = processor(
text=text,
add_special_tokens=True,
return_tensors="pt"
)
text_inputs["decoder_input_ids"] = text_inputs["input_ids"]
del text_inputs["input_ids"]
return dict(**audio_inputs, **text_inputs)
ds = ds.map(preprocess_and_tokenize, remove_columns=ds.column_names)
# Define a oneshot data collator for multimodal inputs.
def data_collator(batch):
assert len(batch) == 1
return {key: torch.tensor(value) for key, value in batch[0].items()}
# Recipe
recipe = GPTQModifier(targets="Linear", scheme="W4A16", ignore=["lm_head"])
# Apply algorithms.
SAVE_DIR = model_id.split("/")[1] + "-W4A16-G128"
oneshot(
model=model,
dataset=ds,
recipe=recipe,
max_seq_length=MAX_SEQUENCE_LENGTH,
num_calibration_samples=NUM_CALIBRATION_SAMPLES,
data_collator=data_collator,
output_dir=SAVE_DIR,
)Creationpythontransformers
import torch
from datasets import load_dataset
from transformers import WhisperProcessor
from llmcompressor.modifiers.quantization import GPTQModifier
from llmcompressor.transformers import oneshot
from llmcompressor.transformers.tracing import TraceableWhisperForConditionalGeneration
# Select model and load it.
model_id = "openai/whisper-large-v2"
model = TraceableWhisperForConditionalGeneration.from_pretrained(
model_id,
device_map="auto",
torch_dtype="auto",
)
processor = WhisperProcessor.from_pretrained(model_id)
# Configure processor the dataset task.
processor.tokenizer.set_prefix_tokens(language="en", task="transcribe")
# Select calibration dataset.
DATASET_ID = "MLCommons/peoples_speech"
DATASET_SUBSET = "test"
DATASET_SPLIT = "test"
# Select number of samples. 512 samples is a good place to start.
# Increasing the number of samples can improve accuracy.
NUM_CALIBRATION_SAMPLES = 512
MAX_SEQUENCE_LENGTH = 2048
# Load dataset and preprocess.
ds = load_dataset(
DATASET_ID,
DATASET_SUBSET,
split=f"{DATASET_SPLIT}[:{NUM_CALIBRATION_SAMPLES}]",
trust_remote_code=True,
)
# Preprocess and Tokenize inputs.
def preprocess_and_tokenize(example):
audio = example["audio"]["array"]
sampling_rate = example["audio"]["sampling_rate"]
text = " " + example["text"].capitalize()
audio_inputs = processor(
audio=audio,
sampling_rate=sampling_rate,
return_tensors="pt",
)
text_inputs = processor(
text=text,
add_special_tokens=True,
return_tensors="pt"
)
text_inputs["decoder_input_ids"] = text_inputs["input_ids"]
del text_inputs["input_ids"]
return dict(**audio_inputs, **text_inputs)
ds = ds.map(preprocess_and_tokenize, remove_columns=ds.column_names)
# Define a oneshot data collator for multimodal inputs.
def data_collator(batch):
assert len(batch) == 1
return {key: torch.tensor(value) for key, value in batch[0].items()}
# Recipe
recipe = GPTQModifier(targets="Linear", scheme="W4A16", ignore=["lm_head"])
# Apply algorithms.
SAVE_DIR = model_id.split("/")[1] + "-W4A16-G128"
oneshot(
model=model,
dataset=ds,
recipe=recipe,
max_seq_length=MAX_SEQUENCE_LENGTH,
num_calibration_samples=NUM_CALIBRATION_SAMPLES,
data_collator=data_collator,
output_dir=SAVE_DIR,
)Creationpythontransformers
import torch
from datasets import load_dataset
from transformers import WhisperProcessor
from llmcompressor.modifiers.quantization import GPTQModifier
from llmcompressor.transformers import oneshot
from llmcompressor.transformers.tracing import TraceableWhisperForConditionalGeneration
# Select model and load it.
model_id = "openai/whisper-large-v2"
model = TraceableWhisperForConditionalGeneration.from_pretrained(
model_id,
device_map="auto",
torch_dtype="auto",
)
processor = WhisperProcessor.from_pretrained(model_id)
# Configure processor the dataset task.
processor.tokenizer.set_prefix_tokens(language="en", task="transcribe")
# Select calibration dataset.
DATASET_ID = "MLCommons/peoples_speech"
DATASET_SUBSET = "test"
DATASET_SPLIT = "test"
# Select number of samples. 512 samples is a good place to start.
# Increasing the number of samples can improve accuracy.
NUM_CALIBRATION_SAMPLES = 512
MAX_SEQUENCE_LENGTH = 2048
# Load dataset and preprocess.
ds = load_dataset(
DATASET_ID,
DATASET_SUBSET,
split=f"{DATASET_SPLIT}[:{NUM_CALIBRATION_SAMPLES}]",
trust_remote_code=True,
)
# Preprocess and Tokenize inputs.
def preprocess_and_tokenize(example):
audio = example["audio"]["array"]
sampling_rate = example["audio"]["sampling_rate"]
text = " " + example["text"].capitalize()
audio_inputs = processor(
audio=audio,
sampling_rate=sampling_rate,
return_tensors="pt",
)
text_inputs = processor(
text=text,
add_special_tokens=True,
return_tensors="pt"
)
text_inputs["decoder_input_ids"] = text_inputs["input_ids"]
del text_inputs["input_ids"]
return dict(**audio_inputs, **text_inputs)
ds = ds.map(preprocess_and_tokenize, remove_columns=ds.column_names)
# Define a oneshot data collator for multimodal inputs.
def data_collator(batch):
assert len(batch) == 1
return {key: torch.tensor(value) for key, value in batch[0].items()}
# Recipe
recipe = GPTQModifier(targets="Linear", scheme="W4A16", ignore=["lm_head"])
# Apply algorithms.
SAVE_DIR = model_id.split("/")[1] + "-W4A16-G128"
oneshot(
model=model,
dataset=ds,
recipe=recipe,
max_seq_length=MAX_SEQUENCE_LENGTH,
num_calibration_samples=NUM_CALIBRATION_SAMPLES,
data_collator=data_collator,
output_dir=SAVE_DIR,
)Creationpythontransformers
import torch
from datasets import load_dataset
from transformers import WhisperProcessor
from llmcompressor.modifiers.quantization import GPTQModifier
from llmcompressor.transformers import oneshot
from llmcompressor.transformers.tracing import TraceableWhisperForConditionalGeneration
# Select model and load it.
model_id = "openai/whisper-large-v2"
model = TraceableWhisperForConditionalGeneration.from_pretrained(
model_id,
device_map="auto",
torch_dtype="auto",
)
processor = WhisperProcessor.from_pretrained(model_id)
# Configure processor the dataset task.
processor.tokenizer.set_prefix_tokens(language="en", task="transcribe")
# Select calibration dataset.
DATASET_ID = "MLCommons/peoples_speech"
DATASET_SUBSET = "test"
DATASET_SPLIT = "test"
# Select number of samples. 512 samples is a good place to start.
# Increasing the number of samples can improve accuracy.
NUM_CALIBRATION_SAMPLES = 512
MAX_SEQUENCE_LENGTH = 2048
# Load dataset and preprocess.
ds = load_dataset(
DATASET_ID,
DATASET_SUBSET,
split=f"{DATASET_SPLIT}[:{NUM_CALIBRATION_SAMPLES}]",
trust_remote_code=True,
)
# Preprocess and Tokenize inputs.
def preprocess_and_tokenize(example):
audio = example["audio"]["array"]
sampling_rate = example["audio"]["sampling_rate"]
text = " " + example["text"].capitalize()
audio_inputs = processor(
audio=audio,
sampling_rate=sampling_rate,
return_tensors="pt",
)
text_inputs = processor(
text=text,
add_special_tokens=True,
return_tensors="pt"
)
text_inputs["decoder_input_ids"] = text_inputs["input_ids"]
del text_inputs["input_ids"]
return dict(**audio_inputs, **text_inputs)
ds = ds.map(preprocess_and_tokenize, remove_columns=ds.column_names)
# Define a oneshot data collator for multimodal inputs.
def data_collator(batch):
assert len(batch) == 1
return {key: torch.tensor(value) for key, value in batch[0].items()}
# Recipe
recipe = GPTQModifier(targets="Linear", scheme="W4A16", ignore=["lm_head"])
# Apply algorithms.
SAVE_DIR = model_id.split("/")[1] + "-W4A16-G128"
oneshot(
model=model,
dataset=ds,
recipe=recipe,
max_seq_length=MAX_SEQUENCE_LENGTH,
num_calibration_samples=NUM_CALIBRATION_SAMPLES,
data_collator=data_collator,
output_dir=SAVE_DIR,
)Creationpythontransformers
import torch
from datasets import load_dataset
from transformers import WhisperProcessor
from llmcompressor.modifiers.quantization import GPTQModifier
from llmcompressor.transformers import oneshot
from llmcompressor.transformers.tracing import TraceableWhisperForConditionalGeneration
# Select model and load it.
model_id = "openai/whisper-large-v2"
model = TraceableWhisperForConditionalGeneration.from_pretrained(
model_id,
device_map="auto",
torch_dtype="auto",
)
processor = WhisperProcessor.from_pretrained(model_id)
# Configure processor the dataset task.
processor.tokenizer.set_prefix_tokens(language="en", task="transcribe")
# Select calibration dataset.
DATASET_ID = "MLCommons/peoples_speech"
DATASET_SUBSET = "test"
DATASET_SPLIT = "test"
# Select number of samples. 512 samples is a good place to start.
# Increasing the number of samples can improve accuracy.
NUM_CALIBRATION_SAMPLES = 512
MAX_SEQUENCE_LENGTH = 2048
# Load dataset and preprocess.
ds = load_dataset(
DATASET_ID,
DATASET_SUBSET,
split=f"{DATASET_SPLIT}[:{NUM_CALIBRATION_SAMPLES}]",
trust_remote_code=True,
)
# Preprocess and Tokenize inputs.
def preprocess_and_tokenize(example):
audio = example["audio"]["array"]
sampling_rate = example["audio"]["sampling_rate"]
text = " " + example["text"].capitalize()
audio_inputs = processor(
audio=audio,
sampling_rate=sampling_rate,
return_tensors="pt",
)
text_inputs = processor(
text=text,
add_special_tokens=True,
return_tensors="pt"
)
text_inputs["decoder_input_ids"] = text_inputs["input_ids"]
del text_inputs["input_ids"]
return dict(**audio_inputs, **text_inputs)
ds = ds.map(preprocess_and_tokenize, remove_columns=ds.column_names)
# Define a oneshot data collator for multimodal inputs.
def data_collator(batch):
assert len(batch) == 1
return {key: torch.tensor(value) for key, value in batch[0].items()}
# Recipe
recipe = GPTQModifier(targets="Linear", scheme="W4A16", ignore=["lm_head"])
# Apply algorithms.
SAVE_DIR = model_id.split("/")[1] + "-W4A16-G128"
oneshot(
model=model,
dataset=ds,
recipe=recipe,
max_seq_length=MAX_SEQUENCE_LENGTH,
num_calibration_samples=NUM_CALIBRATION_SAMPLES,
data_collator=data_collator,
output_dir=SAVE_DIR,
)Creationpythontransformers
import torch
from datasets import load_dataset
from transformers import WhisperProcessor
from llmcompressor.modifiers.quantization import GPTQModifier
from llmcompressor.transformers import oneshot
from llmcompressor.transformers.tracing import TraceableWhisperForConditionalGeneration
# Select model and load it.
model_id = "openai/whisper-large-v2"
model = TraceableWhisperForConditionalGeneration.from_pretrained(
model_id,
device_map="auto",
torch_dtype="auto",
)
processor = WhisperProcessor.from_pretrained(model_id)
# Configure processor the dataset task.
processor.tokenizer.set_prefix_tokens(language="en", task="transcribe")
# Select calibration dataset.
DATASET_ID = "MLCommons/peoples_speech"
DATASET_SUBSET = "test"
DATASET_SPLIT = "test"
# Select number of samples. 512 samples is a good place to start.
# Increasing the number of samples can improve accuracy.
NUM_CALIBRATION_SAMPLES = 512
MAX_SEQUENCE_LENGTH = 2048
# Load dataset and preprocess.
ds = load_dataset(
DATASET_ID,
DATASET_SUBSET,
split=f"{DATASET_SPLIT}[:{NUM_CALIBRATION_SAMPLES}]",
trust_remote_code=True,
)
# Preprocess and Tokenize inputs.
def preprocess_and_tokenize(example):
audio = example["audio"]["array"]
sampling_rate = example["audio"]["sampling_rate"]
text = " " + example["text"].capitalize()
audio_inputs = processor(
audio=audio,
sampling_rate=sampling_rate,
return_tensors="pt",
)
text_inputs = processor(
text=text,
add_special_tokens=True,
return_tensors="pt"
)
text_inputs["decoder_input_ids"] = text_inputs["input_ids"]
del text_inputs["input_ids"]
return dict(**audio_inputs, **text_inputs)
ds = ds.map(preprocess_and_tokenize, remove_columns=ds.column_names)
# Define a oneshot data collator for multimodal inputs.
def data_collator(batch):
assert len(batch) == 1
return {key: torch.tensor(value) for key, value in batch[0].items()}
# Recipe
recipe = GPTQModifier(targets="Linear", scheme="W4A16", ignore=["lm_head"])
# Apply algorithms.
SAVE_DIR = model_id.split("/")[1] + "-W4A16-G128"
oneshot(
model=model,
dataset=ds,
recipe=recipe,
max_seq_length=MAX_SEQUENCE_LENGTH,
num_calibration_samples=NUM_CALIBRATION_SAMPLES,
data_collator=data_collator,
output_dir=SAVE_DIR,
)Creationpythontransformers
import torch
from datasets import load_dataset
from transformers import WhisperProcessor
from llmcompressor.modifiers.quantization import GPTQModifier
from llmcompressor.transformers import oneshot
from llmcompressor.transformers.tracing import TraceableWhisperForConditionalGeneration
# Select model and load it.
model_id = "openai/whisper-large-v2"
model = TraceableWhisperForConditionalGeneration.from_pretrained(
model_id,
device_map="auto",
torch_dtype="auto",
)
processor = WhisperProcessor.from_pretrained(model_id)
# Configure processor the dataset task.
processor.tokenizer.set_prefix_tokens(language="en", task="transcribe")
# Select calibration dataset.
DATASET_ID = "MLCommons/peoples_speech"
DATASET_SUBSET = "test"
DATASET_SPLIT = "test"
# Select number of samples. 512 samples is a good place to start.
# Increasing the number of samples can improve accuracy.
NUM_CALIBRATION_SAMPLES = 512
MAX_SEQUENCE_LENGTH = 2048
# Load dataset and preprocess.
ds = load_dataset(
DATASET_ID,
DATASET_SUBSET,
split=f"{DATASET_SPLIT}[:{NUM_CALIBRATION_SAMPLES}]",
trust_remote_code=True,
)
# Preprocess and Tokenize inputs.
def preprocess_and_tokenize(example):
audio = example["audio"]["array"]
sampling_rate = example["audio"]["sampling_rate"]
text = " " + example["text"].capitalize()
audio_inputs = processor(
audio=audio,
sampling_rate=sampling_rate,
return_tensors="pt",
)
text_inputs = processor(
text=text,
add_special_tokens=True,
return_tensors="pt"
)
text_inputs["decoder_input_ids"] = text_inputs["input_ids"]
del text_inputs["input_ids"]
return dict(**audio_inputs, **text_inputs)
ds = ds.map(preprocess_and_tokenize, remove_columns=ds.column_names)
# Define a oneshot data collator for multimodal inputs.
def data_collator(batch):
assert len(batch) == 1
return {key: torch.tensor(value) for key, value in batch[0].items()}
# Recipe
recipe = GPTQModifier(targets="Linear", scheme="W4A16", ignore=["lm_head"])
# Apply algorithms.
SAVE_DIR = model_id.split("/")[1] + "-W4A16-G128"
oneshot(
model=model,
dataset=ds,
recipe=recipe,
max_seq_length=MAX_SEQUENCE_LENGTH,
num_calibration_samples=NUM_CALIBRATION_SAMPLES,
data_collator=data_collator,
output_dir=SAVE_DIR,
)Creationpythontransformers
import torch
from datasets import load_dataset
from transformers import WhisperProcessor
from llmcompressor.modifiers.quantization import GPTQModifier
from llmcompressor.transformers import oneshot
from llmcompressor.transformers.tracing import TraceableWhisperForConditionalGeneration
# Select model and load it.
model_id = "openai/whisper-large-v2"
model = TraceableWhisperForConditionalGeneration.from_pretrained(
model_id,
device_map="auto",
torch_dtype="auto",
)
processor = WhisperProcessor.from_pretrained(model_id)
# Configure processor the dataset task.
processor.tokenizer.set_prefix_tokens(language="en", task="transcribe")
# Select calibration dataset.
DATASET_ID = "MLCommons/peoples_speech"
DATASET_SUBSET = "test"
DATASET_SPLIT = "test"
# Select number of samples. 512 samples is a good place to start.
# Increasing the number of samples can improve accuracy.
NUM_CALIBRATION_SAMPLES = 512
MAX_SEQUENCE_LENGTH = 2048
# Load dataset and preprocess.
ds = load_dataset(
DATASET_ID,
DATASET_SUBSET,
split=f"{DATASET_SPLIT}[:{NUM_CALIBRATION_SAMPLES}]",
trust_remote_code=True,
)
# Preprocess and Tokenize inputs.
def preprocess_and_tokenize(example):
audio = example["audio"]["array"]
sampling_rate = example["audio"]["sampling_rate"]
text = " " + example["text"].capitalize()
audio_inputs = processor(
audio=audio,
sampling_rate=sampling_rate,
return_tensors="pt",
)
text_inputs = processor(
text=text,
add_special_tokens=True,
return_tensors="pt"
)
text_inputs["decoder_input_ids"] = text_inputs["input_ids"]
del text_inputs["input_ids"]
return dict(**audio_inputs, **text_inputs)
ds = ds.map(preprocess_and_tokenize, remove_columns=ds.column_names)
# Define a oneshot data collator for multimodal inputs.
def data_collator(batch):
assert len(batch) == 1
return {key: torch.tensor(value) for key, value in batch[0].items()}
# Recipe
recipe = GPTQModifier(targets="Linear", scheme="W4A16", ignore=["lm_head"])
# Apply algorithms.
SAVE_DIR = model_id.split("/")[1] + "-W4A16-G128"
oneshot(
model=model,
dataset=ds,
recipe=recipe,
max_seq_length=MAX_SEQUENCE_LENGTH,
num_calibration_samples=NUM_CALIBRATION_SAMPLES,
data_collator=data_collator,
output_dir=SAVE_DIR,
)Creationpythontransformers
import torch
from datasets import load_dataset
from transformers import WhisperProcessor
from llmcompressor.modifiers.quantization import GPTQModifier
from llmcompressor.transformers import oneshot
from llmcompressor.transformers.tracing import TraceableWhisperForConditionalGeneration
# Select model and load it.
model_id = "openai/whisper-large-v2"
model = TraceableWhisperForConditionalGeneration.from_pretrained(
model_id,
device_map="auto",
torch_dtype="auto",
)
processor = WhisperProcessor.from_pretrained(model_id)
# Configure processor the dataset task.
processor.tokenizer.set_prefix_tokens(language="en", task="transcribe")
# Select calibration dataset.
DATASET_ID = "MLCommons/peoples_speech"
DATASET_SUBSET = "test"
DATASET_SPLIT = "test"
# Select number of samples. 512 samples is a good place to start.
# Increasing the number of samples can improve accuracy.
NUM_CALIBRATION_SAMPLES = 512
MAX_SEQUENCE_LENGTH = 2048
# Load dataset and preprocess.
ds = load_dataset(
DATASET_ID,
DATASET_SUBSET,
split=f"{DATASET_SPLIT}[:{NUM_CALIBRATION_SAMPLES}]",
trust_remote_code=True,
)
# Preprocess and Tokenize inputs.
def preprocess_and_tokenize(example):
audio = example["audio"]["array"]
sampling_rate = example["audio"]["sampling_rate"]
text = " " + example["text"].capitalize()
audio_inputs = processor(
audio=audio,
sampling_rate=sampling_rate,
return_tensors="pt",
)
text_inputs = processor(
text=text,
add_special_tokens=True,
return_tensors="pt"
)
text_inputs["decoder_input_ids"] = text_inputs["input_ids"]
del text_inputs["input_ids"]
return dict(**audio_inputs, **text_inputs)
ds = ds.map(preprocess_and_tokenize, remove_columns=ds.column_names)
# Define a oneshot data collator for multimodal inputs.
def data_collator(batch):
assert len(batch) == 1
return {key: torch.tensor(value) for key, value in batch[0].items()}
# Recipe
recipe = GPTQModifier(targets="Linear", scheme="W4A16", ignore=["lm_head"])
# Apply algorithms.
SAVE_DIR = model_id.split("/")[1] + "-W4A16-G128"
oneshot(
model=model,
dataset=ds,
recipe=recipe,
max_seq_length=MAX_SEQUENCE_LENGTH,
num_calibration_samples=NUM_CALIBRATION_SAMPLES,
data_collator=data_collator,
output_dir=SAVE_DIR,
)Deploy This Model
Production-ready deployment in minutes
Together.ai
Instant API access to this model
Production-ready inference API. Start free, scale to millions.
Try Free APIReplicate
One-click model deployment
Run models in the cloud with simple API. No DevOps required.
Deploy NowDisclosure: We may earn a commission from these partners. This helps keep LLMYourWay free.