whisper-large-v3-turbo-FP8-dynamic

292
5
1 language
license:apache-2.0
by
RedHatAI
Audio Model
OTHER
New
292 downloads
Early-stage
Edge AI:
Mobile
Laptop
Server
Unknown
Mobile
Laptop
Server
Quick Summary

Model Overview - Model Architecture: whisper-large-v3-turbo - Input: Audio-Text - Output: Text - Model Optimizations: - Weight quantization: FP8 - Activation qu...

Code Examples

Creationbash
python quantize.py \
    --model_path openai/whisper-large-v3-turbo \
    --quant_path output_dir/whisper-large-v3-turbo-FP8-Dynamic
Creationbash
python quantize.py \
    --model_path openai/whisper-large-v3-turbo \
    --quant_path output_dir/whisper-large-v3-turbo-FP8-Dynamic
Creationbash
python quantize.py \
    --model_path openai/whisper-large-v3-turbo \
    --quant_path output_dir/whisper-large-v3-turbo-FP8-Dynamic
Creationbash
python quantize.py \
    --model_path openai/whisper-large-v3-turbo \
    --quant_path output_dir/whisper-large-v3-turbo-FP8-Dynamic
Creationbash
python quantize.py \
    --model_path openai/whisper-large-v3-turbo \
    --quant_path output_dir/whisper-large-v3-turbo-FP8-Dynamic
Creationbash
python quantize.py \
    --model_path openai/whisper-large-v3-turbo \
    --quant_path output_dir/whisper-large-v3-turbo-FP8-Dynamic
Creationbash
python quantize.py \
    --model_path openai/whisper-large-v3-turbo \
    --quant_path output_dir/whisper-large-v3-turbo-FP8-Dynamic
Creationbash
python quantize.py \
    --model_path openai/whisper-large-v3-turbo \
    --quant_path output_dir/whisper-large-v3-turbo-FP8-Dynamic
Creationbash
python quantize.py \
    --model_path openai/whisper-large-v3-turbo \
    --quant_path output_dir/whisper-large-v3-turbo-FP8-Dynamic
Creationbash
python quantize.py \
    --model_path openai/whisper-large-v3-turbo \
    --quant_path output_dir/whisper-large-v3-turbo-FP8-Dynamic
Creationbash
python quantize.py \
    --model_path openai/whisper-large-v3-turbo \
    --quant_path output_dir/whisper-large-v3-turbo-FP8-Dynamic
Creationbash
python quantize.py \
    --model_path openai/whisper-large-v3-turbo \
    --quant_path output_dir/whisper-large-v3-turbo-FP8-Dynamic
Creationbash
python quantize.py \
    --model_path openai/whisper-large-v3-turbo \
    --quant_path output_dir/whisper-large-v3-turbo-FP8-Dynamic
--- Args ---pythontransformers
import argparse
import torch
import os
from datasets import load_dataset
from transformers import WhisperProcessor
from llmcompressor import oneshot
from llmcompressor.modifiers.quantization import QuantizationModifier
from llmcompressor.transformers.tracing import TraceableWhisperForConditionalGeneration
from compressed_tensors.quantization import QuantizationType

# --- Args ---
parser = argparse.ArgumentParser()
parser.add_argument('--model_path', type=str, required=True)
parser.add_argument('--quant_path', type=str, required=True)
parser.add_argument('--observer', type=str, default="minmax")
args = parser.parse_args()

# --- Load Model ---
model = TraceableWhisperForConditionalGeneration.from_pretrained(
    args.model_path,
    device_map="auto",
    torch_dtype="auto",
)
model.config.forced_decoder_ids = None
processor = WhisperProcessor.from_pretrained(args.model_path)

# --- Recipe (FP8 Dynamic) ---
recipe = [
    QuantizationModifier(
        targets="Linear",
        scheme="FP8_DYNAMIC",
        sequential_targets=["WhisperEncoderLayer", "WhisperDecoderLayer"],
        ignore=["re:.*lm_head"],
    )
]

# --- Run oneshot ---
oneshot(
    model=model,
    recipe=recipe,
    trust_remote_code_model=True,
)

# --- Save ---
os.makedirs(args.quant_path, exist_ok=True)
model.save_pretrained(args.quant_path, save_compressed=True)
processor.save_pretrained(args.quant_path)
--- Args ---pythontransformers
import argparse
import torch
import os
from datasets import load_dataset
from transformers import WhisperProcessor
from llmcompressor import oneshot
from llmcompressor.modifiers.quantization import QuantizationModifier
from llmcompressor.transformers.tracing import TraceableWhisperForConditionalGeneration
from compressed_tensors.quantization import QuantizationType

# --- Args ---
parser = argparse.ArgumentParser()
parser.add_argument('--model_path', type=str, required=True)
parser.add_argument('--quant_path', type=str, required=True)
parser.add_argument('--observer', type=str, default="minmax")
args = parser.parse_args()

# --- Load Model ---
model = TraceableWhisperForConditionalGeneration.from_pretrained(
    args.model_path,
    device_map="auto",
    torch_dtype="auto",
)
model.config.forced_decoder_ids = None
processor = WhisperProcessor.from_pretrained(args.model_path)

# --- Recipe (FP8 Dynamic) ---
recipe = [
    QuantizationModifier(
        targets="Linear",
        scheme="FP8_DYNAMIC",
        sequential_targets=["WhisperEncoderLayer", "WhisperDecoderLayer"],
        ignore=["re:.*lm_head"],
    )
]

# --- Run oneshot ---
oneshot(
    model=model,
    recipe=recipe,
    trust_remote_code_model=True,
)

# --- Save ---
os.makedirs(args.quant_path, exist_ok=True)
model.save_pretrained(args.quant_path, save_compressed=True)
processor.save_pretrained(args.quant_path)
--- Args ---pythontransformers
import argparse
import torch
import os
from datasets import load_dataset
from transformers import WhisperProcessor
from llmcompressor import oneshot
from llmcompressor.modifiers.quantization import QuantizationModifier
from llmcompressor.transformers.tracing import TraceableWhisperForConditionalGeneration
from compressed_tensors.quantization import QuantizationType

# --- Args ---
parser = argparse.ArgumentParser()
parser.add_argument('--model_path', type=str, required=True)
parser.add_argument('--quant_path', type=str, required=True)
parser.add_argument('--observer', type=str, default="minmax")
args = parser.parse_args()

# --- Load Model ---
model = TraceableWhisperForConditionalGeneration.from_pretrained(
    args.model_path,
    device_map="auto",
    torch_dtype="auto",
)
model.config.forced_decoder_ids = None
processor = WhisperProcessor.from_pretrained(args.model_path)

# --- Recipe (FP8 Dynamic) ---
recipe = [
    QuantizationModifier(
        targets="Linear",
        scheme="FP8_DYNAMIC",
        sequential_targets=["WhisperEncoderLayer", "WhisperDecoderLayer"],
        ignore=["re:.*lm_head"],
    )
]

# --- Run oneshot ---
oneshot(
    model=model,
    recipe=recipe,
    trust_remote_code_model=True,
)

# --- Save ---
os.makedirs(args.quant_path, exist_ok=True)
model.save_pretrained(args.quant_path, save_compressed=True)
processor.save_pretrained(args.quant_path)
--- Args ---pythontransformers
import argparse
import torch
import os
from datasets import load_dataset
from transformers import WhisperProcessor
from llmcompressor import oneshot
from llmcompressor.modifiers.quantization import QuantizationModifier
from llmcompressor.transformers.tracing import TraceableWhisperForConditionalGeneration
from compressed_tensors.quantization import QuantizationType

# --- Args ---
parser = argparse.ArgumentParser()
parser.add_argument('--model_path', type=str, required=True)
parser.add_argument('--quant_path', type=str, required=True)
parser.add_argument('--observer', type=str, default="minmax")
args = parser.parse_args()

# --- Load Model ---
model = TraceableWhisperForConditionalGeneration.from_pretrained(
    args.model_path,
    device_map="auto",
    torch_dtype="auto",
)
model.config.forced_decoder_ids = None
processor = WhisperProcessor.from_pretrained(args.model_path)

# --- Recipe (FP8 Dynamic) ---
recipe = [
    QuantizationModifier(
        targets="Linear",
        scheme="FP8_DYNAMIC",
        sequential_targets=["WhisperEncoderLayer", "WhisperDecoderLayer"],
        ignore=["re:.*lm_head"],
    )
]

# --- Run oneshot ---
oneshot(
    model=model,
    recipe=recipe,
    trust_remote_code_model=True,
)

# --- Save ---
os.makedirs(args.quant_path, exist_ok=True)
model.save_pretrained(args.quant_path, save_compressed=True)
processor.save_pretrained(args.quant_path)
--- Args ---pythontransformers
import argparse
import torch
import os
from datasets import load_dataset
from transformers import WhisperProcessor
from llmcompressor import oneshot
from llmcompressor.modifiers.quantization import QuantizationModifier
from llmcompressor.transformers.tracing import TraceableWhisperForConditionalGeneration
from compressed_tensors.quantization import QuantizationType

# --- Args ---
parser = argparse.ArgumentParser()
parser.add_argument('--model_path', type=str, required=True)
parser.add_argument('--quant_path', type=str, required=True)
parser.add_argument('--observer', type=str, default="minmax")
args = parser.parse_args()

# --- Load Model ---
model = TraceableWhisperForConditionalGeneration.from_pretrained(
    args.model_path,
    device_map="auto",
    torch_dtype="auto",
)
model.config.forced_decoder_ids = None
processor = WhisperProcessor.from_pretrained(args.model_path)

# --- Recipe (FP8 Dynamic) ---
recipe = [
    QuantizationModifier(
        targets="Linear",
        scheme="FP8_DYNAMIC",
        sequential_targets=["WhisperEncoderLayer", "WhisperDecoderLayer"],
        ignore=["re:.*lm_head"],
    )
]

# --- Run oneshot ---
oneshot(
    model=model,
    recipe=recipe,
    trust_remote_code_model=True,
)

# --- Save ---
os.makedirs(args.quant_path, exist_ok=True)
model.save_pretrained(args.quant_path, save_compressed=True)
processor.save_pretrained(args.quant_path)
--- Args ---pythontransformers
import argparse
import torch
import os
from datasets import load_dataset
from transformers import WhisperProcessor
from llmcompressor import oneshot
from llmcompressor.modifiers.quantization import QuantizationModifier
from llmcompressor.transformers.tracing import TraceableWhisperForConditionalGeneration
from compressed_tensors.quantization import QuantizationType

# --- Args ---
parser = argparse.ArgumentParser()
parser.add_argument('--model_path', type=str, required=True)
parser.add_argument('--quant_path', type=str, required=True)
parser.add_argument('--observer', type=str, default="minmax")
args = parser.parse_args()

# --- Load Model ---
model = TraceableWhisperForConditionalGeneration.from_pretrained(
    args.model_path,
    device_map="auto",
    torch_dtype="auto",
)
model.config.forced_decoder_ids = None
processor = WhisperProcessor.from_pretrained(args.model_path)

# --- Recipe (FP8 Dynamic) ---
recipe = [
    QuantizationModifier(
        targets="Linear",
        scheme="FP8_DYNAMIC",
        sequential_targets=["WhisperEncoderLayer", "WhisperDecoderLayer"],
        ignore=["re:.*lm_head"],
    )
]

# --- Run oneshot ---
oneshot(
    model=model,
    recipe=recipe,
    trust_remote_code_model=True,
)

# --- Save ---
os.makedirs(args.quant_path, exist_ok=True)
model.save_pretrained(args.quant_path, save_compressed=True)
processor.save_pretrained(args.quant_path)
--- Args ---pythontransformers
import argparse
import torch
import os
from datasets import load_dataset
from transformers import WhisperProcessor
from llmcompressor import oneshot
from llmcompressor.modifiers.quantization import QuantizationModifier
from llmcompressor.transformers.tracing import TraceableWhisperForConditionalGeneration
from compressed_tensors.quantization import QuantizationType

# --- Args ---
parser = argparse.ArgumentParser()
parser.add_argument('--model_path', type=str, required=True)
parser.add_argument('--quant_path', type=str, required=True)
parser.add_argument('--observer', type=str, default="minmax")
args = parser.parse_args()

# --- Load Model ---
model = TraceableWhisperForConditionalGeneration.from_pretrained(
    args.model_path,
    device_map="auto",
    torch_dtype="auto",
)
model.config.forced_decoder_ids = None
processor = WhisperProcessor.from_pretrained(args.model_path)

# --- Recipe (FP8 Dynamic) ---
recipe = [
    QuantizationModifier(
        targets="Linear",
        scheme="FP8_DYNAMIC",
        sequential_targets=["WhisperEncoderLayer", "WhisperDecoderLayer"],
        ignore=["re:.*lm_head"],
    )
]

# --- Run oneshot ---
oneshot(
    model=model,
    recipe=recipe,
    trust_remote_code_model=True,
)

# --- Save ---
os.makedirs(args.quant_path, exist_ok=True)
model.save_pretrained(args.quant_path, save_compressed=True)
processor.save_pretrained(args.quant_path)
--- Args ---pythontransformers
import argparse
import torch
import os
from datasets import load_dataset
from transformers import WhisperProcessor
from llmcompressor import oneshot
from llmcompressor.modifiers.quantization import QuantizationModifier
from llmcompressor.transformers.tracing import TraceableWhisperForConditionalGeneration
from compressed_tensors.quantization import QuantizationType

# --- Args ---
parser = argparse.ArgumentParser()
parser.add_argument('--model_path', type=str, required=True)
parser.add_argument('--quant_path', type=str, required=True)
parser.add_argument('--observer', type=str, default="minmax")
args = parser.parse_args()

# --- Load Model ---
model = TraceableWhisperForConditionalGeneration.from_pretrained(
    args.model_path,
    device_map="auto",
    torch_dtype="auto",
)
model.config.forced_decoder_ids = None
processor = WhisperProcessor.from_pretrained(args.model_path)

# --- Recipe (FP8 Dynamic) ---
recipe = [
    QuantizationModifier(
        targets="Linear",
        scheme="FP8_DYNAMIC",
        sequential_targets=["WhisperEncoderLayer", "WhisperDecoderLayer"],
        ignore=["re:.*lm_head"],
    )
]

# --- Run oneshot ---
oneshot(
    model=model,
    recipe=recipe,
    trust_remote_code_model=True,
)

# --- Save ---
os.makedirs(args.quant_path, exist_ok=True)
model.save_pretrained(args.quant_path, save_compressed=True)
processor.save_pretrained(args.quant_path)
--- Args ---pythontransformers
import argparse
import torch
import os
from datasets import load_dataset
from transformers import WhisperProcessor
from llmcompressor import oneshot
from llmcompressor.modifiers.quantization import QuantizationModifier
from llmcompressor.transformers.tracing import TraceableWhisperForConditionalGeneration
from compressed_tensors.quantization import QuantizationType

# --- Args ---
parser = argparse.ArgumentParser()
parser.add_argument('--model_path', type=str, required=True)
parser.add_argument('--quant_path', type=str, required=True)
parser.add_argument('--observer', type=str, default="minmax")
args = parser.parse_args()

# --- Load Model ---
model = TraceableWhisperForConditionalGeneration.from_pretrained(
    args.model_path,
    device_map="auto",
    torch_dtype="auto",
)
model.config.forced_decoder_ids = None
processor = WhisperProcessor.from_pretrained(args.model_path)

# --- Recipe (FP8 Dynamic) ---
recipe = [
    QuantizationModifier(
        targets="Linear",
        scheme="FP8_DYNAMIC",
        sequential_targets=["WhisperEncoderLayer", "WhisperDecoderLayer"],
        ignore=["re:.*lm_head"],
    )
]

# --- Run oneshot ---
oneshot(
    model=model,
    recipe=recipe,
    trust_remote_code_model=True,
)

# --- Save ---
os.makedirs(args.quant_path, exist_ok=True)
model.save_pretrained(args.quant_path, save_compressed=True)
processor.save_pretrained(args.quant_path)
--- Args ---pythontransformers
import argparse
import torch
import os
from datasets import load_dataset
from transformers import WhisperProcessor
from llmcompressor import oneshot
from llmcompressor.modifiers.quantization import QuantizationModifier
from llmcompressor.transformers.tracing import TraceableWhisperForConditionalGeneration
from compressed_tensors.quantization import QuantizationType

# --- Args ---
parser = argparse.ArgumentParser()
parser.add_argument('--model_path', type=str, required=True)
parser.add_argument('--quant_path', type=str, required=True)
parser.add_argument('--observer', type=str, default="minmax")
args = parser.parse_args()

# --- Load Model ---
model = TraceableWhisperForConditionalGeneration.from_pretrained(
    args.model_path,
    device_map="auto",
    torch_dtype="auto",
)
model.config.forced_decoder_ids = None
processor = WhisperProcessor.from_pretrained(args.model_path)

# --- Recipe (FP8 Dynamic) ---
recipe = [
    QuantizationModifier(
        targets="Linear",
        scheme="FP8_DYNAMIC",
        sequential_targets=["WhisperEncoderLayer", "WhisperDecoderLayer"],
        ignore=["re:.*lm_head"],
    )
]

# --- Run oneshot ---
oneshot(
    model=model,
    recipe=recipe,
    trust_remote_code_model=True,
)

# --- Save ---
os.makedirs(args.quant_path, exist_ok=True)
model.save_pretrained(args.quant_path, save_compressed=True)
processor.save_pretrained(args.quant_path)
--- Args ---pythontransformers
import argparse
import torch
import os
from datasets import load_dataset
from transformers import WhisperProcessor
from llmcompressor import oneshot
from llmcompressor.modifiers.quantization import QuantizationModifier
from llmcompressor.transformers.tracing import TraceableWhisperForConditionalGeneration
from compressed_tensors.quantization import QuantizationType

# --- Args ---
parser = argparse.ArgumentParser()
parser.add_argument('--model_path', type=str, required=True)
parser.add_argument('--quant_path', type=str, required=True)
parser.add_argument('--observer', type=str, default="minmax")
args = parser.parse_args()

# --- Load Model ---
model = TraceableWhisperForConditionalGeneration.from_pretrained(
    args.model_path,
    device_map="auto",
    torch_dtype="auto",
)
model.config.forced_decoder_ids = None
processor = WhisperProcessor.from_pretrained(args.model_path)

# --- Recipe (FP8 Dynamic) ---
recipe = [
    QuantizationModifier(
        targets="Linear",
        scheme="FP8_DYNAMIC",
        sequential_targets=["WhisperEncoderLayer", "WhisperDecoderLayer"],
        ignore=["re:.*lm_head"],
    )
]

# --- Run oneshot ---
oneshot(
    model=model,
    recipe=recipe,
    trust_remote_code_model=True,
)

# --- Save ---
os.makedirs(args.quant_path, exist_ok=True)
model.save_pretrained(args.quant_path, save_compressed=True)
processor.save_pretrained(args.quant_path)
--- Args ---pythontransformers
import argparse
import torch
import os
from datasets import load_dataset
from transformers import WhisperProcessor
from llmcompressor import oneshot
from llmcompressor.modifiers.quantization import QuantizationModifier
from llmcompressor.transformers.tracing import TraceableWhisperForConditionalGeneration
from compressed_tensors.quantization import QuantizationType

# --- Args ---
parser = argparse.ArgumentParser()
parser.add_argument('--model_path', type=str, required=True)
parser.add_argument('--quant_path', type=str, required=True)
parser.add_argument('--observer', type=str, default="minmax")
args = parser.parse_args()

# --- Load Model ---
model = TraceableWhisperForConditionalGeneration.from_pretrained(
    args.model_path,
    device_map="auto",
    torch_dtype="auto",
)
model.config.forced_decoder_ids = None
processor = WhisperProcessor.from_pretrained(args.model_path)

# --- Recipe (FP8 Dynamic) ---
recipe = [
    QuantizationModifier(
        targets="Linear",
        scheme="FP8_DYNAMIC",
        sequential_targets=["WhisperEncoderLayer", "WhisperDecoderLayer"],
        ignore=["re:.*lm_head"],
    )
]

# --- Run oneshot ---
oneshot(
    model=model,
    recipe=recipe,
    trust_remote_code_model=True,
)

# --- Save ---
os.makedirs(args.quant_path, exist_ok=True)
model.save_pretrained(args.quant_path, save_compressed=True)
processor.save_pretrained(args.quant_path)
--- Args ---pythontransformers
import argparse
import torch
import os
from datasets import load_dataset
from transformers import WhisperProcessor
from llmcompressor import oneshot
from llmcompressor.modifiers.quantization import QuantizationModifier
from llmcompressor.transformers.tracing import TraceableWhisperForConditionalGeneration
from compressed_tensors.quantization import QuantizationType

# --- Args ---
parser = argparse.ArgumentParser()
parser.add_argument('--model_path', type=str, required=True)
parser.add_argument('--quant_path', type=str, required=True)
parser.add_argument('--observer', type=str, default="minmax")
args = parser.parse_args()

# --- Load Model ---
model = TraceableWhisperForConditionalGeneration.from_pretrained(
    args.model_path,
    device_map="auto",
    torch_dtype="auto",
)
model.config.forced_decoder_ids = None
processor = WhisperProcessor.from_pretrained(args.model_path)

# --- Recipe (FP8 Dynamic) ---
recipe = [
    QuantizationModifier(
        targets="Linear",
        scheme="FP8_DYNAMIC",
        sequential_targets=["WhisperEncoderLayer", "WhisperDecoderLayer"],
        ignore=["re:.*lm_head"],
    )
]

# --- Run oneshot ---
oneshot(
    model=model,
    recipe=recipe,
    trust_remote_code_model=True,
)

# --- Save ---
os.makedirs(args.quant_path, exist_ok=True)
model.save_pretrained(args.quant_path, save_compressed=True)
processor.save_pretrained(args.quant_path)

Deploy This Model

Production-ready deployment in minutes

Together.ai

Instant API access to this model

Fastest API

Production-ready inference API. Start free, scale to millions.

Try Free API

Replicate

One-click model deployment

Easiest Setup

Run models in the cloud with simple API. No DevOps required.

Deploy Now

Disclosure: We may earn a commission from these partners. This helps keep LLMYourWay free.