face-parsing

670.4K
199
1 language
nvidia/mit-b5
by
jonathandinu
Image Model
OTHER
Good
670K downloads
Production-ready
Edge AI:
Mobile
Laptop
Server
Unknown
Mobile
Laptop
Server
Quick Summary

--- language: en library_name: transformers tags: - vision - image-segmentation - nvidia/mit-b5 - transformers.

Code Examples

convenience expression for automatically determining devicepythontransformers
import torch
from torch import nn
from transformers import SegformerImageProcessor, SegformerForSemanticSegmentation

from PIL import Image
import matplotlib.pyplot as plt
import requests

# convenience expression for automatically determining device
device = (
    "cuda"
    # Device for NVIDIA or AMD GPUs
    if torch.cuda.is_available()
    else "mps"
    # Device for Apple Silicon (Metal Performance Shaders)
    if torch.backends.mps.is_available()
    else "cpu"
)

# load models
image_processor = SegformerImageProcessor.from_pretrained("jonathandinu/face-parsing")
model = SegformerForSemanticSegmentation.from_pretrained("jonathandinu/face-parsing")
model.to(device)

# expects a PIL.Image or torch.Tensor
url = "https://images.unsplash.com/photo-1539571696357-5a69c17a67c6"
image = Image.open(requests.get(url, stream=True).raw)

# run inference on image
inputs = image_processor(images=image, return_tensors="pt").to(device)
outputs = model(**inputs)
logits = outputs.logits  # shape (batch_size, num_labels, ~height/4, ~width/4)

# resize output to match input image dimensions
upsampled_logits = nn.functional.interpolate(logits,
                size=image.size[::-1], # H x W
                mode='bilinear',
                align_corners=False)

# get label masks
labels = upsampled_logits.argmax(dim=1)[0]

# move to CPU to visualize in matplotlib
labels_viz = labels.cpu().numpy()
plt.imshow(labels_viz)
plt.show()
move to CPU to visualize in matplotlibjavascript
import {
  pipeline,
  env,
} from "https://cdn.jsdelivr.net/npm/@xenova/[email protected]";

// important to prevent errors since the model files are likely remote on HF hub
env.allowLocalModels = false;

// instantiate image segmentation pipeline with pretrained face parsing model
model = await pipeline("image-segmentation", "jonathandinu/face-parsing");

// async inference since it could take a few seconds
const output = await model(url);

// each label is a separate mask object
// [
//   { score: null, label: 'background', mask: transformers.js RawImage { ... }}
//   { score: null, label: 'hair', mask: transformers.js RawImage { ... }}
//    ...
// ]
for (const m of output) {
  print(`Found ${m.label}`);
  m.mask.save(`${m.label}.png`);
}
p5.jsjavascript
// ...

// asynchronously load transformers.js and instantiate model
async function preload() {
  // load transformers.js library with a dynamic import
  const { pipeline, env } = await import(
    "https://cdn.jsdelivr.net/npm/@xenova/[email protected]"
  );

  // important to prevent errors since the model files are remote on HF hub
  env.allowLocalModels = false;

  // instantiate image segmentation pipeline with pretrained face parsing model
  model = await pipeline("image-segmentation", "jonathandinu/face-parsing");

  print("face-parsing model loaded");
}

// ...

Deploy This Model

Production-ready deployment in minutes

Together.ai

Instant API access to this model

Fastest API

Production-ready inference API. Start free, scale to millions.

Try Free API

Replicate

One-click model deployment

Easiest Setup

Run models in the cloud with simple API. No DevOps required.

Deploy Now

Disclosure: We may earn a commission from these partners. This helps keep LLMYourWay free.