distil-large-v3
1.3M
348
2 languages
FP32
license:mit
by
distil-whisper
Audio Model
OTHER
High
1.3M downloads
Battle-tested
Edge AI:
Mobile
Laptop
Server
Unknown
Mobile
Laptop
Server
Quick Summary
--- language: - en license: mit library_name: transformers tags: - audio - automatic-speech-recognition - transformers.
Code Examples
bash
pip install --upgrade pip
pip install --upgrade transformers accelerate datasets[audio]bash
pip install --upgrade pip
pip install --upgrade transformers accelerate datasets[audio]text
pip install flash-attn --no-build-isolationtext
pip install flash-attn --no-build-isolationtext
git clone https://github.com/ggerganov/whisper.cpp.git
cd whisper.cpptext
git clone https://github.com/ggerganov/whisper.cpp.git
cd whisper.cppbash
pip install --upgrade huggingface_hubbash
pip install --upgrade huggingface_hubpython
from huggingface_hub import hf_hub_download
hf_hub_download(repo_id='distil-whisper/distil-large-v3-ggml', filename='ggml-distil-large-v3.bin', local_dir='./models')python
from huggingface_hub import hf_hub_download
hf_hub_download(repo_id='distil-whisper/distil-large-v3-ggml', filename='ggml-distil-large-v3.bin', local_dir='./models')bash
pip install --upgrade pip
pip install --upgrade git+https://github.com/SYSTRAN/faster-whisper datasets[audio]bash
pip install --upgrade pip
pip install --upgrade git+https://github.com/SYSTRAN/faster-whisper datasets[audio]bash
pip install --upgrade pip
pip install --upgrade openai-whisper datasets[audio]bash
pip install --upgrade pip
pip install --upgrade openai-whisper datasets[audio]python
from huggingface_hub import hf_hub_download
from datasets import load_dataset
from whisper import load_model, transcribe
model_path = hf_hub_download(repo_id="distil-whisper/distil-large-v3-openai", filename="model.bin")
model = load_model(model_path)
dataset = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation")
sample = dataset[0]["audio"]["path"]
pred_out = transcribe(model, audio=sample, language="en")
print(pred_out["text"])python
from huggingface_hub import hf_hub_download
from datasets import load_dataset
from whisper import load_model, transcribe
model_path = hf_hub_download(repo_id="distil-whisper/distil-large-v3-openai", filename="model.bin")
model = load_model(model_path)
dataset = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation")
sample = dataset[0]["audio"]["path"]
pred_out = transcribe(model, audio=sample, language="en")
print(pred_out["text"])bash
npm i @xenova/transformersbash
npm i @xenova/transformersjavascript
import { pipeline } from '@xenova/transformers';
const transcriber = await pipeline('automatic-speech-recognition', 'distil-whisper/distil-large-v3');
const url = 'https://huggingface.co/datasets/Xenova/transformers.js-docs/resolve/main/jfk.wav';
const output = await transcriber(url);
// { text: " And so, my fellow Americans, ask not what your country can do for you. Ask what you can do for your country." }javascript
import { pipeline } from '@xenova/transformers';
const transcriber = await pipeline('automatic-speech-recognition', 'distil-whisper/distil-large-v3');
const url = 'https://huggingface.co/datasets/Xenova/transformers.js-docs/resolve/main/jfk.wav';
const output = await transcriber(url);
// { text: " And so, my fellow Americans, ask not what your country can do for you. Ask what you can do for your country." }text
git clone https://github.com/huggingface/candle.gittext
git clone https://github.com/huggingface/candle.gittext
cargo cleantext
cargo cleantext
cargo run --example whisper --release --features symphonia -- --model distil-large-v3text
cargo run --example whisper --release --features symphonia -- --model distil-large-v3bash
pip install --upgrade pip
pip install --upgrade transformers datasets[audio] evaluate jiwerbash
pip install --upgrade pip
pip install --upgrade transformers datasets[audio] evaluate jiwerDeploy This Model
Production-ready deployment in minutes
Together.ai
Instant API access to this model
Production-ready inference API. Start free, scale to millions.
Try Free APIReplicate
One-click model deployment
Run models in the cloud with simple API. No DevOps required.
Deploy NowDisclosure: We may earn a commission from these partners. This helps keep LLMYourWay free.