Nitro-T-1.2B
8
6
1.2B
1 language
license:apache-2.0
by
amd
Image Model
OTHER
1.2B params
New
8 downloads
Early-stage
Edge AI:
Mobile
Laptop
Server
3GB+ RAM
Mobile
Laptop
Server
Quick Summary
AI model with specialized capabilities.
Device Compatibility
Mobile
4-6GB RAM
Laptop
16GB RAM
Server
GPU
Minimum Recommended
2GB+ RAM
Code Examples
Quickstartpythontransformers
import torch
from diffusers import DiffusionPipeline
from transformers import AutoModelForCausalLM
torch.set_grad_enabled(False)
device = torch.device('cuda:0')
dtype = torch.bfloat16
resolution = 1024
MODEL_NAME = "amd/Nitro-T-1.2B"
text_encoder = AutoModelForCausalLM.from_pretrained("meta-llama/Llama-3.2-1B", torch_dtype=dtype)
pipe = DiffusionPipeline.from_pretrained(
MODEL_NAME,
text_encoder=text_encoder,
torch_dtype=dtype,
trust_remote_code=True,
)
pipe.to(device)
image = pipe(
prompt="The image is a close-up portrait of a scientist in a modern laboratory. He has short, neatly styled black hair and wears thin, stylish eyeglasses. The lighting is soft and warm, highlighting his facial features against a backdrop of lab equipment and glowing screens.",
height=resolution, width=resolution,
num_inference_steps=20,
guidance_scale=4.0,
).images[0]
image.save("output.png")Quickstartpythontransformers
import torch
from diffusers import DiffusionPipeline
from transformers import AutoModelForCausalLM
torch.set_grad_enabled(False)
device = torch.device('cuda:0')
dtype = torch.bfloat16
resolution = 1024
MODEL_NAME = "amd/Nitro-T-1.2B"
text_encoder = AutoModelForCausalLM.from_pretrained("meta-llama/Llama-3.2-1B", torch_dtype=dtype)
pipe = DiffusionPipeline.from_pretrained(
MODEL_NAME,
text_encoder=text_encoder,
torch_dtype=dtype,
trust_remote_code=True,
)
pipe.to(device)
image = pipe(
prompt="The image is a close-up portrait of a scientist in a modern laboratory. He has short, neatly styled black hair and wears thin, stylish eyeglasses. The lighting is soft and warm, highlighting his facial features against a backdrop of lab equipment and glowing screens.",
height=resolution, width=resolution,
num_inference_steps=20,
guidance_scale=4.0,
).images[0]
image.save("output.png")Quickstartpythontransformers
import torch
from diffusers import DiffusionPipeline
from transformers import AutoModelForCausalLM
torch.set_grad_enabled(False)
device = torch.device('cuda:0')
dtype = torch.bfloat16
resolution = 1024
MODEL_NAME = "amd/Nitro-T-1.2B"
text_encoder = AutoModelForCausalLM.from_pretrained("meta-llama/Llama-3.2-1B", torch_dtype=dtype)
pipe = DiffusionPipeline.from_pretrained(
MODEL_NAME,
text_encoder=text_encoder,
torch_dtype=dtype,
trust_remote_code=True,
)
pipe.to(device)
image = pipe(
prompt="The image is a close-up portrait of a scientist in a modern laboratory. He has short, neatly styled black hair and wears thin, stylish eyeglasses. The lighting is soft and warm, highlighting his facial features against a backdrop of lab equipment and glowing screens.",
height=resolution, width=resolution,
num_inference_steps=20,
guidance_scale=4.0,
).images[0]
image.save("output.png")Quickstartpythontransformers
import torch
from diffusers import DiffusionPipeline
from transformers import AutoModelForCausalLM
torch.set_grad_enabled(False)
device = torch.device('cuda:0')
dtype = torch.bfloat16
resolution = 1024
MODEL_NAME = "amd/Nitro-T-1.2B"
text_encoder = AutoModelForCausalLM.from_pretrained("meta-llama/Llama-3.2-1B", torch_dtype=dtype)
pipe = DiffusionPipeline.from_pretrained(
MODEL_NAME,
text_encoder=text_encoder,
torch_dtype=dtype,
trust_remote_code=True,
)
pipe.to(device)
image = pipe(
prompt="The image is a close-up portrait of a scientist in a modern laboratory. He has short, neatly styled black hair and wears thin, stylish eyeglasses. The lighting is soft and warm, highlighting his facial features against a backdrop of lab equipment and glowing screens.",
height=resolution, width=resolution,
num_inference_steps=20,
guidance_scale=4.0,
).images[0]
image.save("output.png")Quickstartpythontransformers
import torch
from diffusers import DiffusionPipeline
from transformers import AutoModelForCausalLM
torch.set_grad_enabled(False)
device = torch.device('cuda:0')
dtype = torch.bfloat16
resolution = 1024
MODEL_NAME = "amd/Nitro-T-1.2B"
text_encoder = AutoModelForCausalLM.from_pretrained("meta-llama/Llama-3.2-1B", torch_dtype=dtype)
pipe = DiffusionPipeline.from_pretrained(
MODEL_NAME,
text_encoder=text_encoder,
torch_dtype=dtype,
trust_remote_code=True,
)
pipe.to(device)
image = pipe(
prompt="The image is a close-up portrait of a scientist in a modern laboratory. He has short, neatly styled black hair and wears thin, stylish eyeglasses. The lighting is soft and warm, highlighting his facial features against a backdrop of lab equipment and glowing screens.",
height=resolution, width=resolution,
num_inference_steps=20,
guidance_scale=4.0,
).images[0]
image.save("output.png")Quickstartpythontransformers
import torch
from diffusers import DiffusionPipeline
from transformers import AutoModelForCausalLM
torch.set_grad_enabled(False)
device = torch.device('cuda:0')
dtype = torch.bfloat16
resolution = 1024
MODEL_NAME = "amd/Nitro-T-1.2B"
text_encoder = AutoModelForCausalLM.from_pretrained("meta-llama/Llama-3.2-1B", torch_dtype=dtype)
pipe = DiffusionPipeline.from_pretrained(
MODEL_NAME,
text_encoder=text_encoder,
torch_dtype=dtype,
trust_remote_code=True,
)
pipe.to(device)
image = pipe(
prompt="The image is a close-up portrait of a scientist in a modern laboratory. He has short, neatly styled black hair and wears thin, stylish eyeglasses. The lighting is soft and warm, highlighting his facial features against a backdrop of lab equipment and glowing screens.",
height=resolution, width=resolution,
num_inference_steps=20,
guidance_scale=4.0,
).images[0]
image.save("output.png")Quickstartpythontransformers
import torch
from diffusers import DiffusionPipeline
from transformers import AutoModelForCausalLM
torch.set_grad_enabled(False)
device = torch.device('cuda:0')
dtype = torch.bfloat16
resolution = 1024
MODEL_NAME = "amd/Nitro-T-1.2B"
text_encoder = AutoModelForCausalLM.from_pretrained("meta-llama/Llama-3.2-1B", torch_dtype=dtype)
pipe = DiffusionPipeline.from_pretrained(
MODEL_NAME,
text_encoder=text_encoder,
torch_dtype=dtype,
trust_remote_code=True,
)
pipe.to(device)
image = pipe(
prompt="The image is a close-up portrait of a scientist in a modern laboratory. He has short, neatly styled black hair and wears thin, stylish eyeglasses. The lighting is soft and warm, highlighting his facial features against a backdrop of lab equipment and glowing screens.",
height=resolution, width=resolution,
num_inference_steps=20,
guidance_scale=4.0,
).images[0]
image.save("output.png")Quickstartpythontransformers
import torch
from diffusers import DiffusionPipeline
from transformers import AutoModelForCausalLM
torch.set_grad_enabled(False)
device = torch.device('cuda:0')
dtype = torch.bfloat16
resolution = 1024
MODEL_NAME = "amd/Nitro-T-1.2B"
text_encoder = AutoModelForCausalLM.from_pretrained("meta-llama/Llama-3.2-1B", torch_dtype=dtype)
pipe = DiffusionPipeline.from_pretrained(
MODEL_NAME,
text_encoder=text_encoder,
torch_dtype=dtype,
trust_remote_code=True,
)
pipe.to(device)
image = pipe(
prompt="The image is a close-up portrait of a scientist in a modern laboratory. He has short, neatly styled black hair and wears thin, stylish eyeglasses. The lighting is soft and warm, highlighting his facial features against a backdrop of lab equipment and glowing screens.",
height=resolution, width=resolution,
num_inference_steps=20,
guidance_scale=4.0,
).images[0]
image.save("output.png")Quickstartpythontransformers
import torch
from diffusers import DiffusionPipeline
from transformers import AutoModelForCausalLM
torch.set_grad_enabled(False)
device = torch.device('cuda:0')
dtype = torch.bfloat16
resolution = 1024
MODEL_NAME = "amd/Nitro-T-1.2B"
text_encoder = AutoModelForCausalLM.from_pretrained("meta-llama/Llama-3.2-1B", torch_dtype=dtype)
pipe = DiffusionPipeline.from_pretrained(
MODEL_NAME,
text_encoder=text_encoder,
torch_dtype=dtype,
trust_remote_code=True,
)
pipe.to(device)
image = pipe(
prompt="The image is a close-up portrait of a scientist in a modern laboratory. He has short, neatly styled black hair and wears thin, stylish eyeglasses. The lighting is soft and warm, highlighting his facial features against a backdrop of lab equipment and glowing screens.",
height=resolution, width=resolution,
num_inference_steps=20,
guidance_scale=4.0,
).images[0]
image.save("output.png")Quickstartpythontransformers
import torch
from diffusers import DiffusionPipeline
from transformers import AutoModelForCausalLM
torch.set_grad_enabled(False)
device = torch.device('cuda:0')
dtype = torch.bfloat16
resolution = 1024
MODEL_NAME = "amd/Nitro-T-1.2B"
text_encoder = AutoModelForCausalLM.from_pretrained("meta-llama/Llama-3.2-1B", torch_dtype=dtype)
pipe = DiffusionPipeline.from_pretrained(
MODEL_NAME,
text_encoder=text_encoder,
torch_dtype=dtype,
trust_remote_code=True,
)
pipe.to(device)
image = pipe(
prompt="The image is a close-up portrait of a scientist in a modern laboratory. He has short, neatly styled black hair and wears thin, stylish eyeglasses. The lighting is soft and warm, highlighting his facial features against a backdrop of lab equipment and glowing screens.",
height=resolution, width=resolution,
num_inference_steps=20,
guidance_scale=4.0,
).images[0]
image.save("output.png")Quickstartpythontransformers
import torch
from diffusers import DiffusionPipeline
from transformers import AutoModelForCausalLM
torch.set_grad_enabled(False)
device = torch.device('cuda:0')
dtype = torch.bfloat16
resolution = 1024
MODEL_NAME = "amd/Nitro-T-1.2B"
text_encoder = AutoModelForCausalLM.from_pretrained("meta-llama/Llama-3.2-1B", torch_dtype=dtype)
pipe = DiffusionPipeline.from_pretrained(
MODEL_NAME,
text_encoder=text_encoder,
torch_dtype=dtype,
trust_remote_code=True,
)
pipe.to(device)
image = pipe(
prompt="The image is a close-up portrait of a scientist in a modern laboratory. He has short, neatly styled black hair and wears thin, stylish eyeglasses. The lighting is soft and warm, highlighting his facial features against a backdrop of lab equipment and glowing screens.",
height=resolution, width=resolution,
num_inference_steps=20,
guidance_scale=4.0,
).images[0]
image.save("output.png")Quickstartpythontransformers
import torch
from diffusers import DiffusionPipeline
from transformers import AutoModelForCausalLM
torch.set_grad_enabled(False)
device = torch.device('cuda:0')
dtype = torch.bfloat16
resolution = 1024
MODEL_NAME = "amd/Nitro-T-1.2B"
text_encoder = AutoModelForCausalLM.from_pretrained("meta-llama/Llama-3.2-1B", torch_dtype=dtype)
pipe = DiffusionPipeline.from_pretrained(
MODEL_NAME,
text_encoder=text_encoder,
torch_dtype=dtype,
trust_remote_code=True,
)
pipe.to(device)
image = pipe(
prompt="The image is a close-up portrait of a scientist in a modern laboratory. He has short, neatly styled black hair and wears thin, stylish eyeglasses. The lighting is soft and warm, highlighting his facial features against a backdrop of lab equipment and glowing screens.",
height=resolution, width=resolution,
num_inference_steps=20,
guidance_scale=4.0,
).images[0]
image.save("output.png")Quickstartpythontransformers
import torch
from diffusers import DiffusionPipeline
from transformers import AutoModelForCausalLM
torch.set_grad_enabled(False)
device = torch.device('cuda:0')
dtype = torch.bfloat16
resolution = 1024
MODEL_NAME = "amd/Nitro-T-1.2B"
text_encoder = AutoModelForCausalLM.from_pretrained("meta-llama/Llama-3.2-1B", torch_dtype=dtype)
pipe = DiffusionPipeline.from_pretrained(
MODEL_NAME,
text_encoder=text_encoder,
torch_dtype=dtype,
trust_remote_code=True,
)
pipe.to(device)
image = pipe(
prompt="The image is a close-up portrait of a scientist in a modern laboratory. He has short, neatly styled black hair and wears thin, stylish eyeglasses. The lighting is soft and warm, highlighting his facial features against a backdrop of lab equipment and glowing screens.",
height=resolution, width=resolution,
num_inference_steps=20,
guidance_scale=4.0,
).images[0]
image.save("output.png")Quickstartpythontransformers
import torch
from diffusers import DiffusionPipeline
from transformers import AutoModelForCausalLM
torch.set_grad_enabled(False)
device = torch.device('cuda:0')
dtype = torch.bfloat16
resolution = 1024
MODEL_NAME = "amd/Nitro-T-1.2B"
text_encoder = AutoModelForCausalLM.from_pretrained("meta-llama/Llama-3.2-1B", torch_dtype=dtype)
pipe = DiffusionPipeline.from_pretrained(
MODEL_NAME,
text_encoder=text_encoder,
torch_dtype=dtype,
trust_remote_code=True,
)
pipe.to(device)
image = pipe(
prompt="The image is a close-up portrait of a scientist in a modern laboratory. He has short, neatly styled black hair and wears thin, stylish eyeglasses. The lighting is soft and warm, highlighting his facial features against a backdrop of lab equipment and glowing screens.",
height=resolution, width=resolution,
num_inference_steps=20,
guidance_scale=4.0,
).images[0]
image.save("output.png")Quickstartpythontransformers
import torch
from diffusers import DiffusionPipeline
from transformers import AutoModelForCausalLM
torch.set_grad_enabled(False)
device = torch.device('cuda:0')
dtype = torch.bfloat16
resolution = 1024
MODEL_NAME = "amd/Nitro-T-1.2B"
text_encoder = AutoModelForCausalLM.from_pretrained("meta-llama/Llama-3.2-1B", torch_dtype=dtype)
pipe = DiffusionPipeline.from_pretrained(
MODEL_NAME,
text_encoder=text_encoder,
torch_dtype=dtype,
trust_remote_code=True,
)
pipe.to(device)
image = pipe(
prompt="The image is a close-up portrait of a scientist in a modern laboratory. He has short, neatly styled black hair and wears thin, stylish eyeglasses. The lighting is soft and warm, highlighting his facial features against a backdrop of lab equipment and glowing screens.",
height=resolution, width=resolution,
num_inference_steps=20,
guidance_scale=4.0,
).images[0]
image.save("output.png")Quickstartpythontransformers
import torch
from diffusers import DiffusionPipeline
from transformers import AutoModelForCausalLM
torch.set_grad_enabled(False)
device = torch.device('cuda:0')
dtype = torch.bfloat16
resolution = 1024
MODEL_NAME = "amd/Nitro-T-1.2B"
text_encoder = AutoModelForCausalLM.from_pretrained("meta-llama/Llama-3.2-1B", torch_dtype=dtype)
pipe = DiffusionPipeline.from_pretrained(
MODEL_NAME,
text_encoder=text_encoder,
torch_dtype=dtype,
trust_remote_code=True,
)
pipe.to(device)
image = pipe(
prompt="The image is a close-up portrait of a scientist in a modern laboratory. He has short, neatly styled black hair and wears thin, stylish eyeglasses. The lighting is soft and warm, highlighting his facial features against a backdrop of lab equipment and glowing screens.",
height=resolution, width=resolution,
num_inference_steps=20,
guidance_scale=4.0,
).images[0]
image.save("output.png")Quickstartpythontransformers
import torch
from diffusers import DiffusionPipeline
from transformers import AutoModelForCausalLM
torch.set_grad_enabled(False)
device = torch.device('cuda:0')
dtype = torch.bfloat16
resolution = 1024
MODEL_NAME = "amd/Nitro-T-1.2B"
text_encoder = AutoModelForCausalLM.from_pretrained("meta-llama/Llama-3.2-1B", torch_dtype=dtype)
pipe = DiffusionPipeline.from_pretrained(
MODEL_NAME,
text_encoder=text_encoder,
torch_dtype=dtype,
trust_remote_code=True,
)
pipe.to(device)
image = pipe(
prompt="The image is a close-up portrait of a scientist in a modern laboratory. He has short, neatly styled black hair and wears thin, stylish eyeglasses. The lighting is soft and warm, highlighting his facial features against a backdrop of lab equipment and glowing screens.",
height=resolution, width=resolution,
num_inference_steps=20,
guidance_scale=4.0,
).images[0]
image.save("output.png")Quickstartpythontransformers
import torch
from diffusers import DiffusionPipeline
from transformers import AutoModelForCausalLM
torch.set_grad_enabled(False)
device = torch.device('cuda:0')
dtype = torch.bfloat16
resolution = 1024
MODEL_NAME = "amd/Nitro-T-1.2B"
text_encoder = AutoModelForCausalLM.from_pretrained("meta-llama/Llama-3.2-1B", torch_dtype=dtype)
pipe = DiffusionPipeline.from_pretrained(
MODEL_NAME,
text_encoder=text_encoder,
torch_dtype=dtype,
trust_remote_code=True,
)
pipe.to(device)
image = pipe(
prompt="The image is a close-up portrait of a scientist in a modern laboratory. He has short, neatly styled black hair and wears thin, stylish eyeglasses. The lighting is soft and warm, highlighting his facial features against a backdrop of lab equipment and glowing screens.",
height=resolution, width=resolution,
num_inference_steps=20,
guidance_scale=4.0,
).images[0]
image.save("output.png")Quickstartpythontransformers
import torch
from diffusers import DiffusionPipeline
from transformers import AutoModelForCausalLM
torch.set_grad_enabled(False)
device = torch.device('cuda:0')
dtype = torch.bfloat16
resolution = 1024
MODEL_NAME = "amd/Nitro-T-1.2B"
text_encoder = AutoModelForCausalLM.from_pretrained("meta-llama/Llama-3.2-1B", torch_dtype=dtype)
pipe = DiffusionPipeline.from_pretrained(
MODEL_NAME,
text_encoder=text_encoder,
torch_dtype=dtype,
trust_remote_code=True,
)
pipe.to(device)
image = pipe(
prompt="The image is a close-up portrait of a scientist in a modern laboratory. He has short, neatly styled black hair and wears thin, stylish eyeglasses. The lighting is soft and warm, highlighting his facial features against a backdrop of lab equipment and glowing screens.",
height=resolution, width=resolution,
num_inference_steps=20,
guidance_scale=4.0,
).images[0]
image.save("output.png")Quickstartpythontransformers
import torch
from diffusers import DiffusionPipeline
from transformers import AutoModelForCausalLM
torch.set_grad_enabled(False)
device = torch.device('cuda:0')
dtype = torch.bfloat16
resolution = 1024
MODEL_NAME = "amd/Nitro-T-1.2B"
text_encoder = AutoModelForCausalLM.from_pretrained("meta-llama/Llama-3.2-1B", torch_dtype=dtype)
pipe = DiffusionPipeline.from_pretrained(
MODEL_NAME,
text_encoder=text_encoder,
torch_dtype=dtype,
trust_remote_code=True,
)
pipe.to(device)
image = pipe(
prompt="The image is a close-up portrait of a scientist in a modern laboratory. He has short, neatly styled black hair and wears thin, stylish eyeglasses. The lighting is soft and warm, highlighting his facial features against a backdrop of lab equipment and glowing screens.",
height=resolution, width=resolution,
num_inference_steps=20,
guidance_scale=4.0,
).images[0]
image.save("output.png")Quickstartpythontransformers
import torch
from diffusers import DiffusionPipeline
from transformers import AutoModelForCausalLM
torch.set_grad_enabled(False)
device = torch.device('cuda:0')
dtype = torch.bfloat16
resolution = 1024
MODEL_NAME = "amd/Nitro-T-1.2B"
text_encoder = AutoModelForCausalLM.from_pretrained("meta-llama/Llama-3.2-1B", torch_dtype=dtype)
pipe = DiffusionPipeline.from_pretrained(
MODEL_NAME,
text_encoder=text_encoder,
torch_dtype=dtype,
trust_remote_code=True,
)
pipe.to(device)
image = pipe(
prompt="The image is a close-up portrait of a scientist in a modern laboratory. He has short, neatly styled black hair and wears thin, stylish eyeglasses. The lighting is soft and warm, highlighting his facial features against a backdrop of lab equipment and glowing screens.",
height=resolution, width=resolution,
num_inference_steps=20,
guidance_scale=4.0,
).images[0]
image.save("output.png")Quickstartpythontransformers
import torch
from diffusers import DiffusionPipeline
from transformers import AutoModelForCausalLM
torch.set_grad_enabled(False)
device = torch.device('cuda:0')
dtype = torch.bfloat16
resolution = 1024
MODEL_NAME = "amd/Nitro-T-1.2B"
text_encoder = AutoModelForCausalLM.from_pretrained("meta-llama/Llama-3.2-1B", torch_dtype=dtype)
pipe = DiffusionPipeline.from_pretrained(
MODEL_NAME,
text_encoder=text_encoder,
torch_dtype=dtype,
trust_remote_code=True,
)
pipe.to(device)
image = pipe(
prompt="The image is a close-up portrait of a scientist in a modern laboratory. He has short, neatly styled black hair and wears thin, stylish eyeglasses. The lighting is soft and warm, highlighting his facial features against a backdrop of lab equipment and glowing screens.",
height=resolution, width=resolution,
num_inference_steps=20,
guidance_scale=4.0,
).images[0]
image.save("output.png")Quickstartpythontransformers
import torch
from diffusers import DiffusionPipeline
from transformers import AutoModelForCausalLM
torch.set_grad_enabled(False)
device = torch.device('cuda:0')
dtype = torch.bfloat16
resolution = 1024
MODEL_NAME = "amd/Nitro-T-1.2B"
text_encoder = AutoModelForCausalLM.from_pretrained("meta-llama/Llama-3.2-1B", torch_dtype=dtype)
pipe = DiffusionPipeline.from_pretrained(
MODEL_NAME,
text_encoder=text_encoder,
torch_dtype=dtype,
trust_remote_code=True,
)
pipe.to(device)
image = pipe(
prompt="The image is a close-up portrait of a scientist in a modern laboratory. He has short, neatly styled black hair and wears thin, stylish eyeglasses. The lighting is soft and warm, highlighting his facial features against a backdrop of lab equipment and glowing screens.",
height=resolution, width=resolution,
num_inference_steps=20,
guidance_scale=4.0,
).images[0]
image.save("output.png")Quickstartpythontransformers
import torch
from diffusers import DiffusionPipeline
from transformers import AutoModelForCausalLM
torch.set_grad_enabled(False)
device = torch.device('cuda:0')
dtype = torch.bfloat16
resolution = 1024
MODEL_NAME = "amd/Nitro-T-1.2B"
text_encoder = AutoModelForCausalLM.from_pretrained("meta-llama/Llama-3.2-1B", torch_dtype=dtype)
pipe = DiffusionPipeline.from_pretrained(
MODEL_NAME,
text_encoder=text_encoder,
torch_dtype=dtype,
trust_remote_code=True,
)
pipe.to(device)
image = pipe(
prompt="The image is a close-up portrait of a scientist in a modern laboratory. He has short, neatly styled black hair and wears thin, stylish eyeglasses. The lighting is soft and warm, highlighting his facial features against a backdrop of lab equipment and glowing screens.",
height=resolution, width=resolution,
num_inference_steps=20,
guidance_scale=4.0,
).images[0]
image.save("output.png")Quickstartpythontransformers
import torch
from diffusers import DiffusionPipeline
from transformers import AutoModelForCausalLM
torch.set_grad_enabled(False)
device = torch.device('cuda:0')
dtype = torch.bfloat16
resolution = 1024
MODEL_NAME = "amd/Nitro-T-1.2B"
text_encoder = AutoModelForCausalLM.from_pretrained("meta-llama/Llama-3.2-1B", torch_dtype=dtype)
pipe = DiffusionPipeline.from_pretrained(
MODEL_NAME,
text_encoder=text_encoder,
torch_dtype=dtype,
trust_remote_code=True,
)
pipe.to(device)
image = pipe(
prompt="The image is a close-up portrait of a scientist in a modern laboratory. He has short, neatly styled black hair and wears thin, stylish eyeglasses. The lighting is soft and warm, highlighting his facial features against a backdrop of lab equipment and glowing screens.",
height=resolution, width=resolution,
num_inference_steps=20,
guidance_scale=4.0,
).images[0]
image.save("output.png")Quickstartpythontransformers
import torch
from diffusers import DiffusionPipeline
from transformers import AutoModelForCausalLM
torch.set_grad_enabled(False)
device = torch.device('cuda:0')
dtype = torch.bfloat16
resolution = 1024
MODEL_NAME = "amd/Nitro-T-1.2B"
text_encoder = AutoModelForCausalLM.from_pretrained("meta-llama/Llama-3.2-1B", torch_dtype=dtype)
pipe = DiffusionPipeline.from_pretrained(
MODEL_NAME,
text_encoder=text_encoder,
torch_dtype=dtype,
trust_remote_code=True,
)
pipe.to(device)
image = pipe(
prompt="The image is a close-up portrait of a scientist in a modern laboratory. He has short, neatly styled black hair and wears thin, stylish eyeglasses. The lighting is soft and warm, highlighting his facial features against a backdrop of lab equipment and glowing screens.",
height=resolution, width=resolution,
num_inference_steps=20,
guidance_scale=4.0,
).images[0]
image.save("output.png")Quickstartpythontransformers
import torch
from diffusers import DiffusionPipeline
from transformers import AutoModelForCausalLM
torch.set_grad_enabled(False)
device = torch.device('cuda:0')
dtype = torch.bfloat16
resolution = 1024
MODEL_NAME = "amd/Nitro-T-1.2B"
text_encoder = AutoModelForCausalLM.from_pretrained("meta-llama/Llama-3.2-1B", torch_dtype=dtype)
pipe = DiffusionPipeline.from_pretrained(
MODEL_NAME,
text_encoder=text_encoder,
torch_dtype=dtype,
trust_remote_code=True,
)
pipe.to(device)
image = pipe(
prompt="The image is a close-up portrait of a scientist in a modern laboratory. He has short, neatly styled black hair and wears thin, stylish eyeglasses. The lighting is soft and warm, highlighting his facial features against a backdrop of lab equipment and glowing screens.",
height=resolution, width=resolution,
num_inference_steps=20,
guidance_scale=4.0,
).images[0]
image.save("output.png")Quickstartpythontransformers
import torch
from diffusers import DiffusionPipeline
from transformers import AutoModelForCausalLM
torch.set_grad_enabled(False)
device = torch.device('cuda:0')
dtype = torch.bfloat16
resolution = 1024
MODEL_NAME = "amd/Nitro-T-1.2B"
text_encoder = AutoModelForCausalLM.from_pretrained("meta-llama/Llama-3.2-1B", torch_dtype=dtype)
pipe = DiffusionPipeline.from_pretrained(
MODEL_NAME,
text_encoder=text_encoder,
torch_dtype=dtype,
trust_remote_code=True,
)
pipe.to(device)
image = pipe(
prompt="The image is a close-up portrait of a scientist in a modern laboratory. He has short, neatly styled black hair and wears thin, stylish eyeglasses. The lighting is soft and warm, highlighting his facial features against a backdrop of lab equipment and glowing screens.",
height=resolution, width=resolution,
num_inference_steps=20,
guidance_scale=4.0,
).images[0]
image.save("output.png")Quickstartpythontransformers
import torch
from diffusers import DiffusionPipeline
from transformers import AutoModelForCausalLM
torch.set_grad_enabled(False)
device = torch.device('cuda:0')
dtype = torch.bfloat16
resolution = 1024
MODEL_NAME = "amd/Nitro-T-1.2B"
text_encoder = AutoModelForCausalLM.from_pretrained("meta-llama/Llama-3.2-1B", torch_dtype=dtype)
pipe = DiffusionPipeline.from_pretrained(
MODEL_NAME,
text_encoder=text_encoder,
torch_dtype=dtype,
trust_remote_code=True,
)
pipe.to(device)
image = pipe(
prompt="The image is a close-up portrait of a scientist in a modern laboratory. He has short, neatly styled black hair and wears thin, stylish eyeglasses. The lighting is soft and warm, highlighting his facial features against a backdrop of lab equipment and glowing screens.",
height=resolution, width=resolution,
num_inference_steps=20,
guidance_scale=4.0,
).images[0]
image.save("output.png")Quickstartpythontransformers
import torch
from diffusers import DiffusionPipeline
from transformers import AutoModelForCausalLM
torch.set_grad_enabled(False)
device = torch.device('cuda:0')
dtype = torch.bfloat16
resolution = 1024
MODEL_NAME = "amd/Nitro-T-1.2B"
text_encoder = AutoModelForCausalLM.from_pretrained("meta-llama/Llama-3.2-1B", torch_dtype=dtype)
pipe = DiffusionPipeline.from_pretrained(
MODEL_NAME,
text_encoder=text_encoder,
torch_dtype=dtype,
trust_remote_code=True,
)
pipe.to(device)
image = pipe(
prompt="The image is a close-up portrait of a scientist in a modern laboratory. He has short, neatly styled black hair and wears thin, stylish eyeglasses. The lighting is soft and warm, highlighting his facial features against a backdrop of lab equipment and glowing screens.",
height=resolution, width=resolution,
num_inference_steps=20,
guidance_scale=4.0,
).images[0]
image.save("output.png")Quickstartpythontransformers
import torch
from diffusers import DiffusionPipeline
from transformers import AutoModelForCausalLM
torch.set_grad_enabled(False)
device = torch.device('cuda:0')
dtype = torch.bfloat16
resolution = 1024
MODEL_NAME = "amd/Nitro-T-1.2B"
text_encoder = AutoModelForCausalLM.from_pretrained("meta-llama/Llama-3.2-1B", torch_dtype=dtype)
pipe = DiffusionPipeline.from_pretrained(
MODEL_NAME,
text_encoder=text_encoder,
torch_dtype=dtype,
trust_remote_code=True,
)
pipe.to(device)
image = pipe(
prompt="The image is a close-up portrait of a scientist in a modern laboratory. He has short, neatly styled black hair and wears thin, stylish eyeglasses. The lighting is soft and warm, highlighting his facial features against a backdrop of lab equipment and glowing screens.",
height=resolution, width=resolution,
num_inference_steps=20,
guidance_scale=4.0,
).images[0]
image.save("output.png")Quickstartpythontransformers
import torch
from diffusers import DiffusionPipeline
from transformers import AutoModelForCausalLM
torch.set_grad_enabled(False)
device = torch.device('cuda:0')
dtype = torch.bfloat16
resolution = 1024
MODEL_NAME = "amd/Nitro-T-1.2B"
text_encoder = AutoModelForCausalLM.from_pretrained("meta-llama/Llama-3.2-1B", torch_dtype=dtype)
pipe = DiffusionPipeline.from_pretrained(
MODEL_NAME,
text_encoder=text_encoder,
torch_dtype=dtype,
trust_remote_code=True,
)
pipe.to(device)
image = pipe(
prompt="The image is a close-up portrait of a scientist in a modern laboratory. He has short, neatly styled black hair and wears thin, stylish eyeglasses. The lighting is soft and warm, highlighting his facial features against a backdrop of lab equipment and glowing screens.",
height=resolution, width=resolution,
num_inference_steps=20,
guidance_scale=4.0,
).images[0]
image.save("output.png")Quickstartpythontransformers
import torch
from diffusers import DiffusionPipeline
from transformers import AutoModelForCausalLM
torch.set_grad_enabled(False)
device = torch.device('cuda:0')
dtype = torch.bfloat16
resolution = 1024
MODEL_NAME = "amd/Nitro-T-1.2B"
text_encoder = AutoModelForCausalLM.from_pretrained("meta-llama/Llama-3.2-1B", torch_dtype=dtype)
pipe = DiffusionPipeline.from_pretrained(
MODEL_NAME,
text_encoder=text_encoder,
torch_dtype=dtype,
trust_remote_code=True,
)
pipe.to(device)
image = pipe(
prompt="The image is a close-up portrait of a scientist in a modern laboratory. He has short, neatly styled black hair and wears thin, stylish eyeglasses. The lighting is soft and warm, highlighting his facial features against a backdrop of lab equipment and glowing screens.",
height=resolution, width=resolution,
num_inference_steps=20,
guidance_scale=4.0,
).images[0]
image.save("output.png")Quickstartpythontransformers
import torch
from diffusers import DiffusionPipeline
from transformers import AutoModelForCausalLM
torch.set_grad_enabled(False)
device = torch.device('cuda:0')
dtype = torch.bfloat16
resolution = 1024
MODEL_NAME = "amd/Nitro-T-1.2B"
text_encoder = AutoModelForCausalLM.from_pretrained("meta-llama/Llama-3.2-1B", torch_dtype=dtype)
pipe = DiffusionPipeline.from_pretrained(
MODEL_NAME,
text_encoder=text_encoder,
torch_dtype=dtype,
trust_remote_code=True,
)
pipe.to(device)
image = pipe(
prompt="The image is a close-up portrait of a scientist in a modern laboratory. He has short, neatly styled black hair and wears thin, stylish eyeglasses. The lighting is soft and warm, highlighting his facial features against a backdrop of lab equipment and glowing screens.",
height=resolution, width=resolution,
num_inference_steps=20,
guidance_scale=4.0,
).images[0]
image.save("output.png")Quickstartpythontransformers
import torch
from diffusers import DiffusionPipeline
from transformers import AutoModelForCausalLM
torch.set_grad_enabled(False)
device = torch.device('cuda:0')
dtype = torch.bfloat16
resolution = 1024
MODEL_NAME = "amd/Nitro-T-1.2B"
text_encoder = AutoModelForCausalLM.from_pretrained("meta-llama/Llama-3.2-1B", torch_dtype=dtype)
pipe = DiffusionPipeline.from_pretrained(
MODEL_NAME,
text_encoder=text_encoder,
torch_dtype=dtype,
trust_remote_code=True,
)
pipe.to(device)
image = pipe(
prompt="The image is a close-up portrait of a scientist in a modern laboratory. He has short, neatly styled black hair and wears thin, stylish eyeglasses. The lighting is soft and warm, highlighting his facial features against a backdrop of lab equipment and glowing screens.",
height=resolution, width=resolution,
num_inference_steps=20,
guidance_scale=4.0,
).images[0]
image.save("output.png")Quickstartpythontransformers
import torch
from diffusers import DiffusionPipeline
from transformers import AutoModelForCausalLM
torch.set_grad_enabled(False)
device = torch.device('cuda:0')
dtype = torch.bfloat16
resolution = 1024
MODEL_NAME = "amd/Nitro-T-1.2B"
text_encoder = AutoModelForCausalLM.from_pretrained("meta-llama/Llama-3.2-1B", torch_dtype=dtype)
pipe = DiffusionPipeline.from_pretrained(
MODEL_NAME,
text_encoder=text_encoder,
torch_dtype=dtype,
trust_remote_code=True,
)
pipe.to(device)
image = pipe(
prompt="The image is a close-up portrait of a scientist in a modern laboratory. He has short, neatly styled black hair and wears thin, stylish eyeglasses. The lighting is soft and warm, highlighting his facial features against a backdrop of lab equipment and glowing screens.",
height=resolution, width=resolution,
num_inference_steps=20,
guidance_scale=4.0,
).images[0]
image.save("output.png")Quickstartpythontransformers
import torch
from diffusers import DiffusionPipeline
from transformers import AutoModelForCausalLM
torch.set_grad_enabled(False)
device = torch.device('cuda:0')
dtype = torch.bfloat16
resolution = 1024
MODEL_NAME = "amd/Nitro-T-1.2B"
text_encoder = AutoModelForCausalLM.from_pretrained("meta-llama/Llama-3.2-1B", torch_dtype=dtype)
pipe = DiffusionPipeline.from_pretrained(
MODEL_NAME,
text_encoder=text_encoder,
torch_dtype=dtype,
trust_remote_code=True,
)
pipe.to(device)
image = pipe(
prompt="The image is a close-up portrait of a scientist in a modern laboratory. He has short, neatly styled black hair and wears thin, stylish eyeglasses. The lighting is soft and warm, highlighting his facial features against a backdrop of lab equipment and glowing screens.",
height=resolution, width=resolution,
num_inference_steps=20,
guidance_scale=4.0,
).images[0]
image.save("output.png")Quickstartpythontransformers
import torch
from diffusers import DiffusionPipeline
from transformers import AutoModelForCausalLM
torch.set_grad_enabled(False)
device = torch.device('cuda:0')
dtype = torch.bfloat16
resolution = 1024
MODEL_NAME = "amd/Nitro-T-1.2B"
text_encoder = AutoModelForCausalLM.from_pretrained("meta-llama/Llama-3.2-1B", torch_dtype=dtype)
pipe = DiffusionPipeline.from_pretrained(
MODEL_NAME,
text_encoder=text_encoder,
torch_dtype=dtype,
trust_remote_code=True,
)
pipe.to(device)
image = pipe(
prompt="The image is a close-up portrait of a scientist in a modern laboratory. He has short, neatly styled black hair and wears thin, stylish eyeglasses. The lighting is soft and warm, highlighting his facial features against a backdrop of lab equipment and glowing screens.",
height=resolution, width=resolution,
num_inference_steps=20,
guidance_scale=4.0,
).images[0]
image.save("output.png")Quickstartpythontransformers
import torch
from diffusers import DiffusionPipeline
from transformers import AutoModelForCausalLM
torch.set_grad_enabled(False)
device = torch.device('cuda:0')
dtype = torch.bfloat16
resolution = 1024
MODEL_NAME = "amd/Nitro-T-1.2B"
text_encoder = AutoModelForCausalLM.from_pretrained("meta-llama/Llama-3.2-1B", torch_dtype=dtype)
pipe = DiffusionPipeline.from_pretrained(
MODEL_NAME,
text_encoder=text_encoder,
torch_dtype=dtype,
trust_remote_code=True,
)
pipe.to(device)
image = pipe(
prompt="The image is a close-up portrait of a scientist in a modern laboratory. He has short, neatly styled black hair and wears thin, stylish eyeglasses. The lighting is soft and warm, highlighting his facial features against a backdrop of lab equipment and glowing screens.",
height=resolution, width=resolution,
num_inference_steps=20,
guidance_scale=4.0,
).images[0]
image.save("output.png")Quickstartpythontransformers
import torch
from diffusers import DiffusionPipeline
from transformers import AutoModelForCausalLM
torch.set_grad_enabled(False)
device = torch.device('cuda:0')
dtype = torch.bfloat16
resolution = 1024
MODEL_NAME = "amd/Nitro-T-1.2B"
text_encoder = AutoModelForCausalLM.from_pretrained("meta-llama/Llama-3.2-1B", torch_dtype=dtype)
pipe = DiffusionPipeline.from_pretrained(
MODEL_NAME,
text_encoder=text_encoder,
torch_dtype=dtype,
trust_remote_code=True,
)
pipe.to(device)
image = pipe(
prompt="The image is a close-up portrait of a scientist in a modern laboratory. He has short, neatly styled black hair and wears thin, stylish eyeglasses. The lighting is soft and warm, highlighting his facial features against a backdrop of lab equipment and glowing screens.",
height=resolution, width=resolution,
num_inference_steps=20,
guidance_scale=4.0,
).images[0]
image.save("output.png")Quickstartpythontransformers
import torch
from diffusers import DiffusionPipeline
from transformers import AutoModelForCausalLM
torch.set_grad_enabled(False)
device = torch.device('cuda:0')
dtype = torch.bfloat16
resolution = 1024
MODEL_NAME = "amd/Nitro-T-1.2B"
text_encoder = AutoModelForCausalLM.from_pretrained("meta-llama/Llama-3.2-1B", torch_dtype=dtype)
pipe = DiffusionPipeline.from_pretrained(
MODEL_NAME,
text_encoder=text_encoder,
torch_dtype=dtype,
trust_remote_code=True,
)
pipe.to(device)
image = pipe(
prompt="The image is a close-up portrait of a scientist in a modern laboratory. He has short, neatly styled black hair and wears thin, stylish eyeglasses. The lighting is soft and warm, highlighting his facial features against a backdrop of lab equipment and glowing screens.",
height=resolution, width=resolution,
num_inference_steps=20,
guidance_scale=4.0,
).images[0]
image.save("output.png")Quickstartpythontransformers
import torch
from diffusers import DiffusionPipeline
from transformers import AutoModelForCausalLM
torch.set_grad_enabled(False)
device = torch.device('cuda:0')
dtype = torch.bfloat16
resolution = 1024
MODEL_NAME = "amd/Nitro-T-1.2B"
text_encoder = AutoModelForCausalLM.from_pretrained("meta-llama/Llama-3.2-1B", torch_dtype=dtype)
pipe = DiffusionPipeline.from_pretrained(
MODEL_NAME,
text_encoder=text_encoder,
torch_dtype=dtype,
trust_remote_code=True,
)
pipe.to(device)
image = pipe(
prompt="The image is a close-up portrait of a scientist in a modern laboratory. He has short, neatly styled black hair and wears thin, stylish eyeglasses. The lighting is soft and warm, highlighting his facial features against a backdrop of lab equipment and glowing screens.",
height=resolution, width=resolution,
num_inference_steps=20,
guidance_scale=4.0,
).images[0]
image.save("output.png")Quickstartpythontransformers
import torch
from diffusers import DiffusionPipeline
from transformers import AutoModelForCausalLM
torch.set_grad_enabled(False)
device = torch.device('cuda:0')
dtype = torch.bfloat16
resolution = 1024
MODEL_NAME = "amd/Nitro-T-1.2B"
text_encoder = AutoModelForCausalLM.from_pretrained("meta-llama/Llama-3.2-1B", torch_dtype=dtype)
pipe = DiffusionPipeline.from_pretrained(
MODEL_NAME,
text_encoder=text_encoder,
torch_dtype=dtype,
trust_remote_code=True,
)
pipe.to(device)
image = pipe(
prompt="The image is a close-up portrait of a scientist in a modern laboratory. He has short, neatly styled black hair and wears thin, stylish eyeglasses. The lighting is soft and warm, highlighting his facial features against a backdrop of lab equipment and glowing screens.",
height=resolution, width=resolution,
num_inference_steps=20,
guidance_scale=4.0,
).images[0]
image.save("output.png")Deploy This Model
Production-ready deployment in minutes
Together.ai
Instant API access to this model
Production-ready inference API. Start free, scale to millions.
Try Free APIReplicate
One-click model deployment
Run models in the cloud with simple API. No DevOps required.
Deploy NowDisclosure: We may earn a commission from these partners. This helps keep LLMYourWay free.