Malaysian-TTS-4B-v0.1
105
—
by
mesolitica
Language Model
OTHER
4B params
New
105 downloads
Early-stage
Edge AI:
Mobile
Laptop
Server
9GB+ RAM
Mobile
Laptop
Server
Quick Summary
Continue pretraining Qwen/Qwen3-4B-Base on mesolitica/Malaysian-TTS-v2, 1.
Device Compatibility
Mobile
4-6GB RAM
Laptop
16GB RAM
Server
GPU
Minimum Recommended
4GB+ RAM
Code Examples
How to usepythontransformers
# wget https://huggingface.co/IDEA-Emdoor/DistilCodec-v1.0/resolve/main/model_config.json
# wget https://huggingface.co/IDEA-Emdoor/DistilCodec-v1.0/resolve/main/g_00204000
from distilcodec import DistilCodec, demo_for_generate_audio_codes
from transformers import AutoTokenizer, AutoModelForCausalLM
codec_model_config_path='model_config.json'
codec_ckpt_path = 'g_00204000'
codec = DistilCodec.from_pretrained(
config_path=codec_model_config_path,
model_path=codec_ckpt_path,
use_generator=True,
is_debug=False).eval()
tokenizer = AutoTokenizer.from_pretrained('mesolitica/Malaysian-TTS-4B-v0.1')
model = AutoModelForCausalLM.from_pretrained('mesolitica/Malaysian-TTS-4B-v0.1', torch_dtype = 'auto').cuda()How to usepythontransformers
# wget https://huggingface.co/IDEA-Emdoor/DistilCodec-v1.0/resolve/main/model_config.json
# wget https://huggingface.co/IDEA-Emdoor/DistilCodec-v1.0/resolve/main/g_00204000
from distilcodec import DistilCodec, demo_for_generate_audio_codes
from transformers import AutoTokenizer, AutoModelForCausalLM
codec_model_config_path='model_config.json'
codec_ckpt_path = 'g_00204000'
codec = DistilCodec.from_pretrained(
config_path=codec_model_config_path,
model_path=codec_ckpt_path,
use_generator=True,
is_debug=False).eval()
tokenizer = AutoTokenizer.from_pretrained('mesolitica/Malaysian-TTS-4B-v0.1')
model = AutoModelForCausalLM.from_pretrained('mesolitica/Malaysian-TTS-4B-v0.1', torch_dtype = 'auto').cuda()How to usepythontransformers
# wget https://huggingface.co/IDEA-Emdoor/DistilCodec-v1.0/resolve/main/model_config.json
# wget https://huggingface.co/IDEA-Emdoor/DistilCodec-v1.0/resolve/main/g_00204000
from distilcodec import DistilCodec, demo_for_generate_audio_codes
from transformers import AutoTokenizer, AutoModelForCausalLM
codec_model_config_path='model_config.json'
codec_ckpt_path = 'g_00204000'
codec = DistilCodec.from_pretrained(
config_path=codec_model_config_path,
model_path=codec_ckpt_path,
use_generator=True,
is_debug=False).eval()
tokenizer = AutoTokenizer.from_pretrained('mesolitica/Malaysian-TTS-4B-v0.1')
model = AutoModelForCausalLM.from_pretrained('mesolitica/Malaysian-TTS-4B-v0.1', torch_dtype = 'auto').cuda()How to usepythontransformers
# wget https://huggingface.co/IDEA-Emdoor/DistilCodec-v1.0/resolve/main/model_config.json
# wget https://huggingface.co/IDEA-Emdoor/DistilCodec-v1.0/resolve/main/g_00204000
from distilcodec import DistilCodec, demo_for_generate_audio_codes
from transformers import AutoTokenizer, AutoModelForCausalLM
codec_model_config_path='model_config.json'
codec_ckpt_path = 'g_00204000'
codec = DistilCodec.from_pretrained(
config_path=codec_model_config_path,
model_path=codec_ckpt_path,
use_generator=True,
is_debug=False).eval()
tokenizer = AutoTokenizer.from_pretrained('mesolitica/Malaysian-TTS-4B-v0.1')
model = AutoModelForCausalLM.from_pretrained('mesolitica/Malaysian-TTS-4B-v0.1', torch_dtype = 'auto').cuda()How to usepythontransformers
# wget https://huggingface.co/IDEA-Emdoor/DistilCodec-v1.0/resolve/main/model_config.json
# wget https://huggingface.co/IDEA-Emdoor/DistilCodec-v1.0/resolve/main/g_00204000
from distilcodec import DistilCodec, demo_for_generate_audio_codes
from transformers import AutoTokenizer, AutoModelForCausalLM
codec_model_config_path='model_config.json'
codec_ckpt_path = 'g_00204000'
codec = DistilCodec.from_pretrained(
config_path=codec_model_config_path,
model_path=codec_ckpt_path,
use_generator=True,
is_debug=False).eval()
tokenizer = AutoTokenizer.from_pretrained('mesolitica/Malaysian-TTS-4B-v0.1')
model = AutoModelForCausalLM.from_pretrained('mesolitica/Malaysian-TTS-4B-v0.1', torch_dtype = 'auto').cuda()How to usepythontransformers
# wget https://huggingface.co/IDEA-Emdoor/DistilCodec-v1.0/resolve/main/model_config.json
# wget https://huggingface.co/IDEA-Emdoor/DistilCodec-v1.0/resolve/main/g_00204000
from distilcodec import DistilCodec, demo_for_generate_audio_codes
from transformers import AutoTokenizer, AutoModelForCausalLM
codec_model_config_path='model_config.json'
codec_ckpt_path = 'g_00204000'
codec = DistilCodec.from_pretrained(
config_path=codec_model_config_path,
model_path=codec_ckpt_path,
use_generator=True,
is_debug=False).eval()
tokenizer = AutoTokenizer.from_pretrained('mesolitica/Malaysian-TTS-4B-v0.1')
model = AutoModelForCausalLM.from_pretrained('mesolitica/Malaysian-TTS-4B-v0.1', torch_dtype = 'auto').cuda()How to usepythontransformers
# wget https://huggingface.co/IDEA-Emdoor/DistilCodec-v1.0/resolve/main/model_config.json
# wget https://huggingface.co/IDEA-Emdoor/DistilCodec-v1.0/resolve/main/g_00204000
from distilcodec import DistilCodec, demo_for_generate_audio_codes
from transformers import AutoTokenizer, AutoModelForCausalLM
codec_model_config_path='model_config.json'
codec_ckpt_path = 'g_00204000'
codec = DistilCodec.from_pretrained(
config_path=codec_model_config_path,
model_path=codec_ckpt_path,
use_generator=True,
is_debug=False).eval()
tokenizer = AutoTokenizer.from_pretrained('mesolitica/Malaysian-TTS-4B-v0.1')
model = AutoModelForCausalLM.from_pretrained('mesolitica/Malaysian-TTS-4B-v0.1', torch_dtype = 'auto').cuda()How to usepythontransformers
# wget https://huggingface.co/IDEA-Emdoor/DistilCodec-v1.0/resolve/main/model_config.json
# wget https://huggingface.co/IDEA-Emdoor/DistilCodec-v1.0/resolve/main/g_00204000
from distilcodec import DistilCodec, demo_for_generate_audio_codes
from transformers import AutoTokenizer, AutoModelForCausalLM
codec_model_config_path='model_config.json'
codec_ckpt_path = 'g_00204000'
codec = DistilCodec.from_pretrained(
config_path=codec_model_config_path,
model_path=codec_ckpt_path,
use_generator=True,
is_debug=False).eval()
tokenizer = AutoTokenizer.from_pretrained('mesolitica/Malaysian-TTS-4B-v0.1')
model = AutoModelForCausalLM.from_pretrained('mesolitica/Malaysian-TTS-4B-v0.1', torch_dtype = 'auto').cuda()How to usepythontransformers
# wget https://huggingface.co/IDEA-Emdoor/DistilCodec-v1.0/resolve/main/model_config.json
# wget https://huggingface.co/IDEA-Emdoor/DistilCodec-v1.0/resolve/main/g_00204000
from distilcodec import DistilCodec, demo_for_generate_audio_codes
from transformers import AutoTokenizer, AutoModelForCausalLM
codec_model_config_path='model_config.json'
codec_ckpt_path = 'g_00204000'
codec = DistilCodec.from_pretrained(
config_path=codec_model_config_path,
model_path=codec_ckpt_path,
use_generator=True,
is_debug=False).eval()
tokenizer = AutoTokenizer.from_pretrained('mesolitica/Malaysian-TTS-4B-v0.1')
model = AutoModelForCausalLM.from_pretrained('mesolitica/Malaysian-TTS-4B-v0.1', torch_dtype = 'auto').cuda()How to usepythontransformers
# wget https://huggingface.co/IDEA-Emdoor/DistilCodec-v1.0/resolve/main/model_config.json
# wget https://huggingface.co/IDEA-Emdoor/DistilCodec-v1.0/resolve/main/g_00204000
from distilcodec import DistilCodec, demo_for_generate_audio_codes
from transformers import AutoTokenizer, AutoModelForCausalLM
codec_model_config_path='model_config.json'
codec_ckpt_path = 'g_00204000'
codec = DistilCodec.from_pretrained(
config_path=codec_model_config_path,
model_path=codec_ckpt_path,
use_generator=True,
is_debug=False).eval()
tokenizer = AutoTokenizer.from_pretrained('mesolitica/Malaysian-TTS-4B-v0.1')
model = AutoModelForCausalLM.from_pretrained('mesolitica/Malaysian-TTS-4B-v0.1', torch_dtype = 'auto').cuda()How to usepythontransformers
# wget https://huggingface.co/IDEA-Emdoor/DistilCodec-v1.0/resolve/main/model_config.json
# wget https://huggingface.co/IDEA-Emdoor/DistilCodec-v1.0/resolve/main/g_00204000
from distilcodec import DistilCodec, demo_for_generate_audio_codes
from transformers import AutoTokenizer, AutoModelForCausalLM
codec_model_config_path='model_config.json'
codec_ckpt_path = 'g_00204000'
codec = DistilCodec.from_pretrained(
config_path=codec_model_config_path,
model_path=codec_ckpt_path,
use_generator=True,
is_debug=False).eval()
tokenizer = AutoTokenizer.from_pretrained('mesolitica/Malaysian-TTS-4B-v0.1')
model = AutoModelForCausalLM.from_pretrained('mesolitica/Malaysian-TTS-4B-v0.1', torch_dtype = 'auto').cuda()How to usepythontransformers
# wget https://huggingface.co/IDEA-Emdoor/DistilCodec-v1.0/resolve/main/model_config.json
# wget https://huggingface.co/IDEA-Emdoor/DistilCodec-v1.0/resolve/main/g_00204000
from distilcodec import DistilCodec, demo_for_generate_audio_codes
from transformers import AutoTokenizer, AutoModelForCausalLM
codec_model_config_path='model_config.json'
codec_ckpt_path = 'g_00204000'
codec = DistilCodec.from_pretrained(
config_path=codec_model_config_path,
model_path=codec_ckpt_path,
use_generator=True,
is_debug=False).eval()
tokenizer = AutoTokenizer.from_pretrained('mesolitica/Malaysian-TTS-4B-v0.1')
model = AutoModelForCausalLM.from_pretrained('mesolitica/Malaysian-TTS-4B-v0.1', torch_dtype = 'auto').cuda()How to usepythontransformers
# wget https://huggingface.co/IDEA-Emdoor/DistilCodec-v1.0/resolve/main/model_config.json
# wget https://huggingface.co/IDEA-Emdoor/DistilCodec-v1.0/resolve/main/g_00204000
from distilcodec import DistilCodec, demo_for_generate_audio_codes
from transformers import AutoTokenizer, AutoModelForCausalLM
codec_model_config_path='model_config.json'
codec_ckpt_path = 'g_00204000'
codec = DistilCodec.from_pretrained(
config_path=codec_model_config_path,
model_path=codec_ckpt_path,
use_generator=True,
is_debug=False).eval()
tokenizer = AutoTokenizer.from_pretrained('mesolitica/Malaysian-TTS-4B-v0.1')
model = AutoModelForCausalLM.from_pretrained('mesolitica/Malaysian-TTS-4B-v0.1', torch_dtype = 'auto').cuda()How to usepythontransformers
# wget https://huggingface.co/IDEA-Emdoor/DistilCodec-v1.0/resolve/main/model_config.json
# wget https://huggingface.co/IDEA-Emdoor/DistilCodec-v1.0/resolve/main/g_00204000
from distilcodec import DistilCodec, demo_for_generate_audio_codes
from transformers import AutoTokenizer, AutoModelForCausalLM
codec_model_config_path='model_config.json'
codec_ckpt_path = 'g_00204000'
codec = DistilCodec.from_pretrained(
config_path=codec_model_config_path,
model_path=codec_ckpt_path,
use_generator=True,
is_debug=False).eval()
tokenizer = AutoTokenizer.from_pretrained('mesolitica/Malaysian-TTS-4B-v0.1')
model = AutoModelForCausalLM.from_pretrained('mesolitica/Malaysian-TTS-4B-v0.1', torch_dtype = 'auto').cuda()How to usepythontransformers
# wget https://huggingface.co/IDEA-Emdoor/DistilCodec-v1.0/resolve/main/model_config.json
# wget https://huggingface.co/IDEA-Emdoor/DistilCodec-v1.0/resolve/main/g_00204000
from distilcodec import DistilCodec, demo_for_generate_audio_codes
from transformers import AutoTokenizer, AutoModelForCausalLM
codec_model_config_path='model_config.json'
codec_ckpt_path = 'g_00204000'
codec = DistilCodec.from_pretrained(
config_path=codec_model_config_path,
model_path=codec_ckpt_path,
use_generator=True,
is_debug=False).eval()
tokenizer = AutoTokenizer.from_pretrained('mesolitica/Malaysian-TTS-4B-v0.1')
model = AutoModelForCausalLM.from_pretrained('mesolitica/Malaysian-TTS-4B-v0.1', torch_dtype = 'auto').cuda()How to usepythontransformers
# wget https://huggingface.co/IDEA-Emdoor/DistilCodec-v1.0/resolve/main/model_config.json
# wget https://huggingface.co/IDEA-Emdoor/DistilCodec-v1.0/resolve/main/g_00204000
from distilcodec import DistilCodec, demo_for_generate_audio_codes
from transformers import AutoTokenizer, AutoModelForCausalLM
codec_model_config_path='model_config.json'
codec_ckpt_path = 'g_00204000'
codec = DistilCodec.from_pretrained(
config_path=codec_model_config_path,
model_path=codec_ckpt_path,
use_generator=True,
is_debug=False).eval()
tokenizer = AutoTokenizer.from_pretrained('mesolitica/Malaysian-TTS-4B-v0.1')
model = AutoModelForCausalLM.from_pretrained('mesolitica/Malaysian-TTS-4B-v0.1', torch_dtype = 'auto').cuda()How to usepythontransformers
# wget https://huggingface.co/IDEA-Emdoor/DistilCodec-v1.0/resolve/main/model_config.json
# wget https://huggingface.co/IDEA-Emdoor/DistilCodec-v1.0/resolve/main/g_00204000
from distilcodec import DistilCodec, demo_for_generate_audio_codes
from transformers import AutoTokenizer, AutoModelForCausalLM
codec_model_config_path='model_config.json'
codec_ckpt_path = 'g_00204000'
codec = DistilCodec.from_pretrained(
config_path=codec_model_config_path,
model_path=codec_ckpt_path,
use_generator=True,
is_debug=False).eval()
tokenizer = AutoTokenizer.from_pretrained('mesolitica/Malaysian-TTS-4B-v0.1')
model = AutoModelForCausalLM.from_pretrained('mesolitica/Malaysian-TTS-4B-v0.1', torch_dtype = 'auto').cuda()How to usepythontransformers
# wget https://huggingface.co/IDEA-Emdoor/DistilCodec-v1.0/resolve/main/model_config.json
# wget https://huggingface.co/IDEA-Emdoor/DistilCodec-v1.0/resolve/main/g_00204000
from distilcodec import DistilCodec, demo_for_generate_audio_codes
from transformers import AutoTokenizer, AutoModelForCausalLM
codec_model_config_path='model_config.json'
codec_ckpt_path = 'g_00204000'
codec = DistilCodec.from_pretrained(
config_path=codec_model_config_path,
model_path=codec_ckpt_path,
use_generator=True,
is_debug=False).eval()
tokenizer = AutoTokenizer.from_pretrained('mesolitica/Malaysian-TTS-4B-v0.1')
model = AutoModelForCausalLM.from_pretrained('mesolitica/Malaysian-TTS-4B-v0.1', torch_dtype = 'auto').cuda()How to usepythontransformers
# wget https://huggingface.co/IDEA-Emdoor/DistilCodec-v1.0/resolve/main/model_config.json
# wget https://huggingface.co/IDEA-Emdoor/DistilCodec-v1.0/resolve/main/g_00204000
from distilcodec import DistilCodec, demo_for_generate_audio_codes
from transformers import AutoTokenizer, AutoModelForCausalLM
codec_model_config_path='model_config.json'
codec_ckpt_path = 'g_00204000'
codec = DistilCodec.from_pretrained(
config_path=codec_model_config_path,
model_path=codec_ckpt_path,
use_generator=True,
is_debug=False).eval()
tokenizer = AutoTokenizer.from_pretrained('mesolitica/Malaysian-TTS-4B-v0.1')
model = AutoModelForCausalLM.from_pretrained('mesolitica/Malaysian-TTS-4B-v0.1', torch_dtype = 'auto').cuda()How to usepythontransformers
# wget https://huggingface.co/IDEA-Emdoor/DistilCodec-v1.0/resolve/main/model_config.json
# wget https://huggingface.co/IDEA-Emdoor/DistilCodec-v1.0/resolve/main/g_00204000
from distilcodec import DistilCodec, demo_for_generate_audio_codes
from transformers import AutoTokenizer, AutoModelForCausalLM
codec_model_config_path='model_config.json'
codec_ckpt_path = 'g_00204000'
codec = DistilCodec.from_pretrained(
config_path=codec_model_config_path,
model_path=codec_ckpt_path,
use_generator=True,
is_debug=False).eval()
tokenizer = AutoTokenizer.from_pretrained('mesolitica/Malaysian-TTS-4B-v0.1')
model = AutoModelForCausalLM.from_pretrained('mesolitica/Malaysian-TTS-4B-v0.1', torch_dtype = 'auto').cuda()Deploy This Model
Production-ready deployment in minutes
Together.ai
Instant API access to this model
Production-ready inference API. Start free, scale to millions.
Try Free APIReplicate
One-click model deployment
Run models in the cloud with simple API. No DevOps required.
Deploy NowDisclosure: We may earn a commission from these partners. This helps keep LLMYourWay free.