WorldPM-72B

78
80
72.0B
1 language
license:apache-2.0
by
Qwen
Language Model
OTHER
72B params
New
78 downloads
Early-stage
Edge AI:
Mobile
Laptop
Server
161GB+ RAM
Mobile
Laptop
Server
Quick Summary

AI model with specialized capabilities.

Device Compatibility

Mobile
4-6GB RAM
Laptop
16GB RAM
Server
GPU
Minimum Recommended
68GB+ RAM

Code Examples

💻 Usage Example with Hugging Facepythontransformers
from transformers import AutoModel, AutoTokenizer, AutoConfig

model_name = 'Qwen/WorldPM-72B'
tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True)
config = AutoConfig.from_pretrained(model_name,trust_remote_code=True )
model = AutoModel.from_pretrained(
    model_name, 
    config = config, 
    device_map = "auto", 
    trust_remote_code=True,
).eval()


def get_score(model, tokenizer, conversation):
    con_str = tokenizer.apply_chat_template(conversation, tokenize=False, add_generation_prompt=False)
    input_ids = tokenizer.encode(con_str, return_tensors="pt", add_special_tokens=False)
    outputs = model(input_ids=input_ids.to(model.device))
    score = outputs[0].cpu().item()
    return score


messages = [
    [
        {'role': 'system', 'content': ""},
        {'role': 'user', 'content': "Tips for better sleep?"},
        {'role': 'assistant', 'content': "Three key tips for better sleep:\n1. Set a consistent bedtime and wake-up schedule\n2. Avoid screens 1 hour before bed\n3. Keep your bedroom cool and dark\n\nStart with these habits and you should notice improvement within a week."}
    ],
    [
        {'role': 'system', 'content': ""},
        {'role': 'user', 'content': "Tips for better sleep?"},
        {'role': 'assistant', 'content': "just try sleeping earlier or take some pills if u cant sleep lol. maybe watch tv until u get tired idk"}
    ]
]

score = [get_score(model, tokenizer=tokenizer, conversation=message) for message in messages]

print(score) #[0.6298332214355469, -1.6425328254699707] for WorldPM-72B
💻 Usage Example with Hugging Facepythontransformers
from transformers import AutoModel, AutoTokenizer, AutoConfig

model_name = 'Qwen/WorldPM-72B'
tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True)
config = AutoConfig.from_pretrained(model_name,trust_remote_code=True )
model = AutoModel.from_pretrained(
    model_name, 
    config = config, 
    device_map = "auto", 
    trust_remote_code=True,
).eval()


def get_score(model, tokenizer, conversation):
    con_str = tokenizer.apply_chat_template(conversation, tokenize=False, add_generation_prompt=False)
    input_ids = tokenizer.encode(con_str, return_tensors="pt", add_special_tokens=False)
    outputs = model(input_ids=input_ids.to(model.device))
    score = outputs[0].cpu().item()
    return score


messages = [
    [
        {'role': 'system', 'content': ""},
        {'role': 'user', 'content': "Tips for better sleep?"},
        {'role': 'assistant', 'content': "Three key tips for better sleep:\n1. Set a consistent bedtime and wake-up schedule\n2. Avoid screens 1 hour before bed\n3. Keep your bedroom cool and dark\n\nStart with these habits and you should notice improvement within a week."}
    ],
    [
        {'role': 'system', 'content': ""},
        {'role': 'user', 'content': "Tips for better sleep?"},
        {'role': 'assistant', 'content': "just try sleeping earlier or take some pills if u cant sleep lol. maybe watch tv until u get tired idk"}
    ]
]

score = [get_score(model, tokenizer=tokenizer, conversation=message) for message in messages]

print(score) #[0.6298332214355469, -1.6425328254699707] for WorldPM-72B
💻 Usage Example with Hugging Facepythontransformers
from transformers import AutoModel, AutoTokenizer, AutoConfig

model_name = 'Qwen/WorldPM-72B'
tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True)
config = AutoConfig.from_pretrained(model_name,trust_remote_code=True )
model = AutoModel.from_pretrained(
    model_name, 
    config = config, 
    device_map = "auto", 
    trust_remote_code=True,
).eval()


def get_score(model, tokenizer, conversation):
    con_str = tokenizer.apply_chat_template(conversation, tokenize=False, add_generation_prompt=False)
    input_ids = tokenizer.encode(con_str, return_tensors="pt", add_special_tokens=False)
    outputs = model(input_ids=input_ids.to(model.device))
    score = outputs[0].cpu().item()
    return score


messages = [
    [
        {'role': 'system', 'content': ""},
        {'role': 'user', 'content': "Tips for better sleep?"},
        {'role': 'assistant', 'content': "Three key tips for better sleep:\n1. Set a consistent bedtime and wake-up schedule\n2. Avoid screens 1 hour before bed\n3. Keep your bedroom cool and dark\n\nStart with these habits and you should notice improvement within a week."}
    ],
    [
        {'role': 'system', 'content': ""},
        {'role': 'user', 'content': "Tips for better sleep?"},
        {'role': 'assistant', 'content': "just try sleeping earlier or take some pills if u cant sleep lol. maybe watch tv until u get tired idk"}
    ]
]

score = [get_score(model, tokenizer=tokenizer, conversation=message) for message in messages]

print(score) #[0.6298332214355469, -1.6425328254699707] for WorldPM-72B
💻 Usage Example with Hugging Facepythontransformers
from transformers import AutoModel, AutoTokenizer, AutoConfig

model_name = 'Qwen/WorldPM-72B'
tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True)
config = AutoConfig.from_pretrained(model_name,trust_remote_code=True )
model = AutoModel.from_pretrained(
    model_name, 
    config = config, 
    device_map = "auto", 
    trust_remote_code=True,
).eval()


def get_score(model, tokenizer, conversation):
    con_str = tokenizer.apply_chat_template(conversation, tokenize=False, add_generation_prompt=False)
    input_ids = tokenizer.encode(con_str, return_tensors="pt", add_special_tokens=False)
    outputs = model(input_ids=input_ids.to(model.device))
    score = outputs[0].cpu().item()
    return score


messages = [
    [
        {'role': 'system', 'content': ""},
        {'role': 'user', 'content': "Tips for better sleep?"},
        {'role': 'assistant', 'content': "Three key tips for better sleep:\n1. Set a consistent bedtime and wake-up schedule\n2. Avoid screens 1 hour before bed\n3. Keep your bedroom cool and dark\n\nStart with these habits and you should notice improvement within a week."}
    ],
    [
        {'role': 'system', 'content': ""},
        {'role': 'user', 'content': "Tips for better sleep?"},
        {'role': 'assistant', 'content': "just try sleeping earlier or take some pills if u cant sleep lol. maybe watch tv until u get tired idk"}
    ]
]

score = [get_score(model, tokenizer=tokenizer, conversation=message) for message in messages]

print(score) #[0.6298332214355469, -1.6425328254699707] for WorldPM-72B
💻 Usage Example with Hugging Facepythontransformers
from transformers import AutoModel, AutoTokenizer, AutoConfig

model_name = 'Qwen/WorldPM-72B'
tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True)
config = AutoConfig.from_pretrained(model_name,trust_remote_code=True )
model = AutoModel.from_pretrained(
    model_name, 
    config = config, 
    device_map = "auto", 
    trust_remote_code=True,
).eval()


def get_score(model, tokenizer, conversation):
    con_str = tokenizer.apply_chat_template(conversation, tokenize=False, add_generation_prompt=False)
    input_ids = tokenizer.encode(con_str, return_tensors="pt", add_special_tokens=False)
    outputs = model(input_ids=input_ids.to(model.device))
    score = outputs[0].cpu().item()
    return score


messages = [
    [
        {'role': 'system', 'content': ""},
        {'role': 'user', 'content': "Tips for better sleep?"},
        {'role': 'assistant', 'content': "Three key tips for better sleep:\n1. Set a consistent bedtime and wake-up schedule\n2. Avoid screens 1 hour before bed\n3. Keep your bedroom cool and dark\n\nStart with these habits and you should notice improvement within a week."}
    ],
    [
        {'role': 'system', 'content': ""},
        {'role': 'user', 'content': "Tips for better sleep?"},
        {'role': 'assistant', 'content': "just try sleeping earlier or take some pills if u cant sleep lol. maybe watch tv until u get tired idk"}
    ]
]

score = [get_score(model, tokenizer=tokenizer, conversation=message) for message in messages]

print(score) #[0.6298332214355469, -1.6425328254699707] for WorldPM-72B

Deploy This Model

Production-ready deployment in minutes

Together.ai

Instant API access to this model

Fastest API

Production-ready inference API. Start free, scale to millions.

Try Free API

Replicate

One-click model deployment

Easiest Setup

Run models in the cloud with simple API. No DevOps required.

Deploy Now

Disclosure: We may earn a commission from these partners. This helps keep LLMYourWay free.