GLM-Z1-Rumination-32B-0414-GGUF

91
32.0B
2 languages
BF16
license:mit
by
Mungert
Language Model
OTHER
32B params
New
91 downloads
Early-stage
Edge AI:
Mobile
Laptop
Server
72GB+ RAM
Mobile
Laptop
Server
Quick Summary

AI model with specialized capabilities.

Device Compatibility

Mobile
4-6GB RAM
Laptop
16GB RAM
Server
GPU
Minimum Recommended
30GB+ RAM

Code Examples

pythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import re
import json

MODEL_PATH = "THUDM/GLM-4-Z1-Rumination-32B-0414"

tokenizer = AutoTokenizer.from_pretrained(MODEL_PATH)
model = AutoModelForCausalLM.from_pretrained(MODEL_PATH, device_map="auto")

messages = [{"role": "user", "content": "Let a, b be positive real numbers such that ab = a + b + 3. Determine the range of possible values for a + b."}]

generate_kwargs = {
    "temperature": 0.95,
    "top_p": 0.7,
    "do_sample": True,
    "max_new_tokens": 16384
}

def get_assistant():
    inputs = tokenizer.apply_chat_template(
        messages,
        return_tensors="pt",
        add_generation_prompt=True,
        return_dict=True,
    ).to(model.device)
    out = model.generate(input_ids=inputs["input_ids"], **generate_kwargs)
    return tokenizer.decode(out[0][inputs["input_ids"].shape[1]:], skip_special_tokens=True).strip()

def get_observation(function_name, args):
    content = None
    if function_name == "search":
        mock_search_res = [
            {"title": "t1", "url":"url1", "snippet": "snippet_content_1"},
            {"title": "t2", "url":"url2", "snippet": "snippet_content_2"}
        ]
        content = "\n\n".join([f"【{i}†{res['title']}†{res['url']}\n{res['snippet']}】"] for i, res in enumerate(mock_search_res))
    elif function_name == "click":
        mock_click_res = "main content"
        content = mock_click_res
    elif function_name == "open":
        mock_open_res = "main_content"
        content = mock_open_res
    else:
        raise ValueError("unspport function name!")
    return content
        
def get_func_name_args(llm_text):
    function_call = re.sub(r'.*?</think>', '', llm_text, flags=re.DOTALL)
    function_call = json.loads(function_call)
    action = function_call['name']
    params = function_call['arguments']
    return action, params

def pipeline():
    end_str = "{\"name\": \"finish\", \"arguments\": {}}"
    response = get_assistant()
    messages.append({"role": "assistant", "content": response})
    max_turns, turns = 35, 1
    while not response.endswith(end_str) and turns < max_turns:
        action, params = get_func_name_args(response)
        observation = get_observation(action, params)
        messages.append({"role": "observation", "content": observation})
        response = get_assistant()
        messages.append({"role": "assistant", "content": response})
        turns += 1
        
    if response.endswith(end_str):
        final_answer = get_assistant()
    else:
        final_answer = None
    return final_answer

pipeline()
pythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import re
import json

MODEL_PATH = "THUDM/GLM-4-Z1-Rumination-32B-0414"

tokenizer = AutoTokenizer.from_pretrained(MODEL_PATH)
model = AutoModelForCausalLM.from_pretrained(MODEL_PATH, device_map="auto")

messages = [{"role": "user", "content": "Let a, b be positive real numbers such that ab = a + b + 3. Determine the range of possible values for a + b."}]

generate_kwargs = {
    "temperature": 0.95,
    "top_p": 0.7,
    "do_sample": True,
    "max_new_tokens": 16384
}

def get_assistant():
    inputs = tokenizer.apply_chat_template(
        messages,
        return_tensors="pt",
        add_generation_prompt=True,
        return_dict=True,
    ).to(model.device)
    out = model.generate(input_ids=inputs["input_ids"], **generate_kwargs)
    return tokenizer.decode(out[0][inputs["input_ids"].shape[1]:], skip_special_tokens=True).strip()

def get_observation(function_name, args):
    content = None
    if function_name == "search":
        mock_search_res = [
            {"title": "t1", "url":"url1", "snippet": "snippet_content_1"},
            {"title": "t2", "url":"url2", "snippet": "snippet_content_2"}
        ]
        content = "\n\n".join([f"【{i}†{res['title']}†{res['url']}\n{res['snippet']}】"] for i, res in enumerate(mock_search_res))
    elif function_name == "click":
        mock_click_res = "main content"
        content = mock_click_res
    elif function_name == "open":
        mock_open_res = "main_content"
        content = mock_open_res
    else:
        raise ValueError("unspport function name!")
    return content
        
def get_func_name_args(llm_text):
    function_call = re.sub(r'.*?</think>', '', llm_text, flags=re.DOTALL)
    function_call = json.loads(function_call)
    action = function_call['name']
    params = function_call['arguments']
    return action, params

def pipeline():
    end_str = "{\"name\": \"finish\", \"arguments\": {}}"
    response = get_assistant()
    messages.append({"role": "assistant", "content": response})
    max_turns, turns = 35, 1
    while not response.endswith(end_str) and turns < max_turns:
        action, params = get_func_name_args(response)
        observation = get_observation(action, params)
        messages.append({"role": "observation", "content": observation})
        response = get_assistant()
        messages.append({"role": "assistant", "content": response})
        turns += 1
        
    if response.endswith(end_str):
        final_answer = get_assistant()
    else:
        final_answer = None
    return final_answer

pipeline()
pythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import re
import json

MODEL_PATH = "THUDM/GLM-4-Z1-Rumination-32B-0414"

tokenizer = AutoTokenizer.from_pretrained(MODEL_PATH)
model = AutoModelForCausalLM.from_pretrained(MODEL_PATH, device_map="auto")

messages = [{"role": "user", "content": "Let a, b be positive real numbers such that ab = a + b + 3. Determine the range of possible values for a + b."}]

generate_kwargs = {
    "temperature": 0.95,
    "top_p": 0.7,
    "do_sample": True,
    "max_new_tokens": 16384
}

def get_assistant():
    inputs = tokenizer.apply_chat_template(
        messages,
        return_tensors="pt",
        add_generation_prompt=True,
        return_dict=True,
    ).to(model.device)
    out = model.generate(input_ids=inputs["input_ids"], **generate_kwargs)
    return tokenizer.decode(out[0][inputs["input_ids"].shape[1]:], skip_special_tokens=True).strip()

def get_observation(function_name, args):
    content = None
    if function_name == "search":
        mock_search_res = [
            {"title": "t1", "url":"url1", "snippet": "snippet_content_1"},
            {"title": "t2", "url":"url2", "snippet": "snippet_content_2"}
        ]
        content = "\n\n".join([f"【{i}†{res['title']}†{res['url']}\n{res['snippet']}】"] for i, res in enumerate(mock_search_res))
    elif function_name == "click":
        mock_click_res = "main content"
        content = mock_click_res
    elif function_name == "open":
        mock_open_res = "main_content"
        content = mock_open_res
    else:
        raise ValueError("unspport function name!")
    return content
        
def get_func_name_args(llm_text):
    function_call = re.sub(r'.*?</think>', '', llm_text, flags=re.DOTALL)
    function_call = json.loads(function_call)
    action = function_call['name']
    params = function_call['arguments']
    return action, params

def pipeline():
    end_str = "{\"name\": \"finish\", \"arguments\": {}}"
    response = get_assistant()
    messages.append({"role": "assistant", "content": response})
    max_turns, turns = 35, 1
    while not response.endswith(end_str) and turns < max_turns:
        action, params = get_func_name_args(response)
        observation = get_observation(action, params)
        messages.append({"role": "observation", "content": observation})
        response = get_assistant()
        messages.append({"role": "assistant", "content": response})
        turns += 1
        
    if response.endswith(end_str):
        final_answer = get_assistant()
    else:
        final_answer = None
    return final_answer

pipeline()
pythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import re
import json

MODEL_PATH = "THUDM/GLM-4-Z1-Rumination-32B-0414"

tokenizer = AutoTokenizer.from_pretrained(MODEL_PATH)
model = AutoModelForCausalLM.from_pretrained(MODEL_PATH, device_map="auto")

messages = [{"role": "user", "content": "Let a, b be positive real numbers such that ab = a + b + 3. Determine the range of possible values for a + b."}]

generate_kwargs = {
    "temperature": 0.95,
    "top_p": 0.7,
    "do_sample": True,
    "max_new_tokens": 16384
}

def get_assistant():
    inputs = tokenizer.apply_chat_template(
        messages,
        return_tensors="pt",
        add_generation_prompt=True,
        return_dict=True,
    ).to(model.device)
    out = model.generate(input_ids=inputs["input_ids"], **generate_kwargs)
    return tokenizer.decode(out[0][inputs["input_ids"].shape[1]:], skip_special_tokens=True).strip()

def get_observation(function_name, args):
    content = None
    if function_name == "search":
        mock_search_res = [
            {"title": "t1", "url":"url1", "snippet": "snippet_content_1"},
            {"title": "t2", "url":"url2", "snippet": "snippet_content_2"}
        ]
        content = "\n\n".join([f"【{i}†{res['title']}†{res['url']}\n{res['snippet']}】"] for i, res in enumerate(mock_search_res))
    elif function_name == "click":
        mock_click_res = "main content"
        content = mock_click_res
    elif function_name == "open":
        mock_open_res = "main_content"
        content = mock_open_res
    else:
        raise ValueError("unspport function name!")
    return content
        
def get_func_name_args(llm_text):
    function_call = re.sub(r'.*?</think>', '', llm_text, flags=re.DOTALL)
    function_call = json.loads(function_call)
    action = function_call['name']
    params = function_call['arguments']
    return action, params

def pipeline():
    end_str = "{\"name\": \"finish\", \"arguments\": {}}"
    response = get_assistant()
    messages.append({"role": "assistant", "content": response})
    max_turns, turns = 35, 1
    while not response.endswith(end_str) and turns < max_turns:
        action, params = get_func_name_args(response)
        observation = get_observation(action, params)
        messages.append({"role": "observation", "content": observation})
        response = get_assistant()
        messages.append({"role": "assistant", "content": response})
        turns += 1
        
    if response.endswith(end_str):
        final_answer = get_assistant()
    else:
        final_answer = None
    return final_answer

pipeline()
pythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import re
import json

MODEL_PATH = "THUDM/GLM-4-Z1-Rumination-32B-0414"

tokenizer = AutoTokenizer.from_pretrained(MODEL_PATH)
model = AutoModelForCausalLM.from_pretrained(MODEL_PATH, device_map="auto")

messages = [{"role": "user", "content": "Let a, b be positive real numbers such that ab = a + b + 3. Determine the range of possible values for a + b."}]

generate_kwargs = {
    "temperature": 0.95,
    "top_p": 0.7,
    "do_sample": True,
    "max_new_tokens": 16384
}

def get_assistant():
    inputs = tokenizer.apply_chat_template(
        messages,
        return_tensors="pt",
        add_generation_prompt=True,
        return_dict=True,
    ).to(model.device)
    out = model.generate(input_ids=inputs["input_ids"], **generate_kwargs)
    return tokenizer.decode(out[0][inputs["input_ids"].shape[1]:], skip_special_tokens=True).strip()

def get_observation(function_name, args):
    content = None
    if function_name == "search":
        mock_search_res = [
            {"title": "t1", "url":"url1", "snippet": "snippet_content_1"},
            {"title": "t2", "url":"url2", "snippet": "snippet_content_2"}
        ]
        content = "\n\n".join([f"【{i}†{res['title']}†{res['url']}\n{res['snippet']}】"] for i, res in enumerate(mock_search_res))
    elif function_name == "click":
        mock_click_res = "main content"
        content = mock_click_res
    elif function_name == "open":
        mock_open_res = "main_content"
        content = mock_open_res
    else:
        raise ValueError("unspport function name!")
    return content
        
def get_func_name_args(llm_text):
    function_call = re.sub(r'.*?</think>', '', llm_text, flags=re.DOTALL)
    function_call = json.loads(function_call)
    action = function_call['name']
    params = function_call['arguments']
    return action, params

def pipeline():
    end_str = "{\"name\": \"finish\", \"arguments\": {}}"
    response = get_assistant()
    messages.append({"role": "assistant", "content": response})
    max_turns, turns = 35, 1
    while not response.endswith(end_str) and turns < max_turns:
        action, params = get_func_name_args(response)
        observation = get_observation(action, params)
        messages.append({"role": "observation", "content": observation})
        response = get_assistant()
        messages.append({"role": "assistant", "content": response})
        turns += 1
        
    if response.endswith(end_str):
        final_answer = get_assistant()
    else:
        final_answer = None
    return final_answer

pipeline()
pythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import re
import json

MODEL_PATH = "THUDM/GLM-4-Z1-Rumination-32B-0414"

tokenizer = AutoTokenizer.from_pretrained(MODEL_PATH)
model = AutoModelForCausalLM.from_pretrained(MODEL_PATH, device_map="auto")

messages = [{"role": "user", "content": "Let a, b be positive real numbers such that ab = a + b + 3. Determine the range of possible values for a + b."}]

generate_kwargs = {
    "temperature": 0.95,
    "top_p": 0.7,
    "do_sample": True,
    "max_new_tokens": 16384
}

def get_assistant():
    inputs = tokenizer.apply_chat_template(
        messages,
        return_tensors="pt",
        add_generation_prompt=True,
        return_dict=True,
    ).to(model.device)
    out = model.generate(input_ids=inputs["input_ids"], **generate_kwargs)
    return tokenizer.decode(out[0][inputs["input_ids"].shape[1]:], skip_special_tokens=True).strip()

def get_observation(function_name, args):
    content = None
    if function_name == "search":
        mock_search_res = [
            {"title": "t1", "url":"url1", "snippet": "snippet_content_1"},
            {"title": "t2", "url":"url2", "snippet": "snippet_content_2"}
        ]
        content = "\n\n".join([f"【{i}†{res['title']}†{res['url']}\n{res['snippet']}】"] for i, res in enumerate(mock_search_res))
    elif function_name == "click":
        mock_click_res = "main content"
        content = mock_click_res
    elif function_name == "open":
        mock_open_res = "main_content"
        content = mock_open_res
    else:
        raise ValueError("unspport function name!")
    return content
        
def get_func_name_args(llm_text):
    function_call = re.sub(r'.*?</think>', '', llm_text, flags=re.DOTALL)
    function_call = json.loads(function_call)
    action = function_call['name']
    params = function_call['arguments']
    return action, params

def pipeline():
    end_str = "{\"name\": \"finish\", \"arguments\": {}}"
    response = get_assistant()
    messages.append({"role": "assistant", "content": response})
    max_turns, turns = 35, 1
    while not response.endswith(end_str) and turns < max_turns:
        action, params = get_func_name_args(response)
        observation = get_observation(action, params)
        messages.append({"role": "observation", "content": observation})
        response = get_assistant()
        messages.append({"role": "assistant", "content": response})
        turns += 1
        
    if response.endswith(end_str):
        final_answer = get_assistant()
    else:
        final_answer = None
    return final_answer

pipeline()
pythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import re
import json

MODEL_PATH = "THUDM/GLM-4-Z1-Rumination-32B-0414"

tokenizer = AutoTokenizer.from_pretrained(MODEL_PATH)
model = AutoModelForCausalLM.from_pretrained(MODEL_PATH, device_map="auto")

messages = [{"role": "user", "content": "Let a, b be positive real numbers such that ab = a + b + 3. Determine the range of possible values for a + b."}]

generate_kwargs = {
    "temperature": 0.95,
    "top_p": 0.7,
    "do_sample": True,
    "max_new_tokens": 16384
}

def get_assistant():
    inputs = tokenizer.apply_chat_template(
        messages,
        return_tensors="pt",
        add_generation_prompt=True,
        return_dict=True,
    ).to(model.device)
    out = model.generate(input_ids=inputs["input_ids"], **generate_kwargs)
    return tokenizer.decode(out[0][inputs["input_ids"].shape[1]:], skip_special_tokens=True).strip()

def get_observation(function_name, args):
    content = None
    if function_name == "search":
        mock_search_res = [
            {"title": "t1", "url":"url1", "snippet": "snippet_content_1"},
            {"title": "t2", "url":"url2", "snippet": "snippet_content_2"}
        ]
        content = "\n\n".join([f"【{i}†{res['title']}†{res['url']}\n{res['snippet']}】"] for i, res in enumerate(mock_search_res))
    elif function_name == "click":
        mock_click_res = "main content"
        content = mock_click_res
    elif function_name == "open":
        mock_open_res = "main_content"
        content = mock_open_res
    else:
        raise ValueError("unspport function name!")
    return content
        
def get_func_name_args(llm_text):
    function_call = re.sub(r'.*?</think>', '', llm_text, flags=re.DOTALL)
    function_call = json.loads(function_call)
    action = function_call['name']
    params = function_call['arguments']
    return action, params

def pipeline():
    end_str = "{\"name\": \"finish\", \"arguments\": {}}"
    response = get_assistant()
    messages.append({"role": "assistant", "content": response})
    max_turns, turns = 35, 1
    while not response.endswith(end_str) and turns < max_turns:
        action, params = get_func_name_args(response)
        observation = get_observation(action, params)
        messages.append({"role": "observation", "content": observation})
        response = get_assistant()
        messages.append({"role": "assistant", "content": response})
        turns += 1
        
    if response.endswith(end_str):
        final_answer = get_assistant()
    else:
        final_answer = None
    return final_answer

pipeline()
pythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import re
import json

MODEL_PATH = "THUDM/GLM-4-Z1-Rumination-32B-0414"

tokenizer = AutoTokenizer.from_pretrained(MODEL_PATH)
model = AutoModelForCausalLM.from_pretrained(MODEL_PATH, device_map="auto")

messages = [{"role": "user", "content": "Let a, b be positive real numbers such that ab = a + b + 3. Determine the range of possible values for a + b."}]

generate_kwargs = {
    "temperature": 0.95,
    "top_p": 0.7,
    "do_sample": True,
    "max_new_tokens": 16384
}

def get_assistant():
    inputs = tokenizer.apply_chat_template(
        messages,
        return_tensors="pt",
        add_generation_prompt=True,
        return_dict=True,
    ).to(model.device)
    out = model.generate(input_ids=inputs["input_ids"], **generate_kwargs)
    return tokenizer.decode(out[0][inputs["input_ids"].shape[1]:], skip_special_tokens=True).strip()

def get_observation(function_name, args):
    content = None
    if function_name == "search":
        mock_search_res = [
            {"title": "t1", "url":"url1", "snippet": "snippet_content_1"},
            {"title": "t2", "url":"url2", "snippet": "snippet_content_2"}
        ]
        content = "\n\n".join([f"【{i}†{res['title']}†{res['url']}\n{res['snippet']}】"] for i, res in enumerate(mock_search_res))
    elif function_name == "click":
        mock_click_res = "main content"
        content = mock_click_res
    elif function_name == "open":
        mock_open_res = "main_content"
        content = mock_open_res
    else:
        raise ValueError("unspport function name!")
    return content
        
def get_func_name_args(llm_text):
    function_call = re.sub(r'.*?</think>', '', llm_text, flags=re.DOTALL)
    function_call = json.loads(function_call)
    action = function_call['name']
    params = function_call['arguments']
    return action, params

def pipeline():
    end_str = "{\"name\": \"finish\", \"arguments\": {}}"
    response = get_assistant()
    messages.append({"role": "assistant", "content": response})
    max_turns, turns = 35, 1
    while not response.endswith(end_str) and turns < max_turns:
        action, params = get_func_name_args(response)
        observation = get_observation(action, params)
        messages.append({"role": "observation", "content": observation})
        response = get_assistant()
        messages.append({"role": "assistant", "content": response})
        turns += 1
        
    if response.endswith(end_str):
        final_answer = get_assistant()
    else:
        final_answer = None
    return final_answer

pipeline()
pythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import re
import json

MODEL_PATH = "THUDM/GLM-4-Z1-Rumination-32B-0414"

tokenizer = AutoTokenizer.from_pretrained(MODEL_PATH)
model = AutoModelForCausalLM.from_pretrained(MODEL_PATH, device_map="auto")

messages = [{"role": "user", "content": "Let a, b be positive real numbers such that ab = a + b + 3. Determine the range of possible values for a + b."}]

generate_kwargs = {
    "temperature": 0.95,
    "top_p": 0.7,
    "do_sample": True,
    "max_new_tokens": 16384
}

def get_assistant():
    inputs = tokenizer.apply_chat_template(
        messages,
        return_tensors="pt",
        add_generation_prompt=True,
        return_dict=True,
    ).to(model.device)
    out = model.generate(input_ids=inputs["input_ids"], **generate_kwargs)
    return tokenizer.decode(out[0][inputs["input_ids"].shape[1]:], skip_special_tokens=True).strip()

def get_observation(function_name, args):
    content = None
    if function_name == "search":
        mock_search_res = [
            {"title": "t1", "url":"url1", "snippet": "snippet_content_1"},
            {"title": "t2", "url":"url2", "snippet": "snippet_content_2"}
        ]
        content = "\n\n".join([f"【{i}†{res['title']}†{res['url']}\n{res['snippet']}】"] for i, res in enumerate(mock_search_res))
    elif function_name == "click":
        mock_click_res = "main content"
        content = mock_click_res
    elif function_name == "open":
        mock_open_res = "main_content"
        content = mock_open_res
    else:
        raise ValueError("unspport function name!")
    return content
        
def get_func_name_args(llm_text):
    function_call = re.sub(r'.*?</think>', '', llm_text, flags=re.DOTALL)
    function_call = json.loads(function_call)
    action = function_call['name']
    params = function_call['arguments']
    return action, params

def pipeline():
    end_str = "{\"name\": \"finish\", \"arguments\": {}}"
    response = get_assistant()
    messages.append({"role": "assistant", "content": response})
    max_turns, turns = 35, 1
    while not response.endswith(end_str) and turns < max_turns:
        action, params = get_func_name_args(response)
        observation = get_observation(action, params)
        messages.append({"role": "observation", "content": observation})
        response = get_assistant()
        messages.append({"role": "assistant", "content": response})
        turns += 1
        
    if response.endswith(end_str):
        final_answer = get_assistant()
    else:
        final_answer = None
    return final_answer

pipeline()
pythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import re
import json

MODEL_PATH = "THUDM/GLM-4-Z1-Rumination-32B-0414"

tokenizer = AutoTokenizer.from_pretrained(MODEL_PATH)
model = AutoModelForCausalLM.from_pretrained(MODEL_PATH, device_map="auto")

messages = [{"role": "user", "content": "Let a, b be positive real numbers such that ab = a + b + 3. Determine the range of possible values for a + b."}]

generate_kwargs = {
    "temperature": 0.95,
    "top_p": 0.7,
    "do_sample": True,
    "max_new_tokens": 16384
}

def get_assistant():
    inputs = tokenizer.apply_chat_template(
        messages,
        return_tensors="pt",
        add_generation_prompt=True,
        return_dict=True,
    ).to(model.device)
    out = model.generate(input_ids=inputs["input_ids"], **generate_kwargs)
    return tokenizer.decode(out[0][inputs["input_ids"].shape[1]:], skip_special_tokens=True).strip()

def get_observation(function_name, args):
    content = None
    if function_name == "search":
        mock_search_res = [
            {"title": "t1", "url":"url1", "snippet": "snippet_content_1"},
            {"title": "t2", "url":"url2", "snippet": "snippet_content_2"}
        ]
        content = "\n\n".join([f"【{i}†{res['title']}†{res['url']}\n{res['snippet']}】"] for i, res in enumerate(mock_search_res))
    elif function_name == "click":
        mock_click_res = "main content"
        content = mock_click_res
    elif function_name == "open":
        mock_open_res = "main_content"
        content = mock_open_res
    else:
        raise ValueError("unspport function name!")
    return content
        
def get_func_name_args(llm_text):
    function_call = re.sub(r'.*?</think>', '', llm_text, flags=re.DOTALL)
    function_call = json.loads(function_call)
    action = function_call['name']
    params = function_call['arguments']
    return action, params

def pipeline():
    end_str = "{\"name\": \"finish\", \"arguments\": {}}"
    response = get_assistant()
    messages.append({"role": "assistant", "content": response})
    max_turns, turns = 35, 1
    while not response.endswith(end_str) and turns < max_turns:
        action, params = get_func_name_args(response)
        observation = get_observation(action, params)
        messages.append({"role": "observation", "content": observation})
        response = get_assistant()
        messages.append({"role": "assistant", "content": response})
        turns += 1
        
    if response.endswith(end_str):
        final_answer = get_assistant()
    else:
        final_answer = None
    return final_answer

pipeline()
pythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import re
import json

MODEL_PATH = "THUDM/GLM-4-Z1-Rumination-32B-0414"

tokenizer = AutoTokenizer.from_pretrained(MODEL_PATH)
model = AutoModelForCausalLM.from_pretrained(MODEL_PATH, device_map="auto")

messages = [{"role": "user", "content": "Let a, b be positive real numbers such that ab = a + b + 3. Determine the range of possible values for a + b."}]

generate_kwargs = {
    "temperature": 0.95,
    "top_p": 0.7,
    "do_sample": True,
    "max_new_tokens": 16384
}

def get_assistant():
    inputs = tokenizer.apply_chat_template(
        messages,
        return_tensors="pt",
        add_generation_prompt=True,
        return_dict=True,
    ).to(model.device)
    out = model.generate(input_ids=inputs["input_ids"], **generate_kwargs)
    return tokenizer.decode(out[0][inputs["input_ids"].shape[1]:], skip_special_tokens=True).strip()

def get_observation(function_name, args):
    content = None
    if function_name == "search":
        mock_search_res = [
            {"title": "t1", "url":"url1", "snippet": "snippet_content_1"},
            {"title": "t2", "url":"url2", "snippet": "snippet_content_2"}
        ]
        content = "\n\n".join([f"【{i}†{res['title']}†{res['url']}\n{res['snippet']}】"] for i, res in enumerate(mock_search_res))
    elif function_name == "click":
        mock_click_res = "main content"
        content = mock_click_res
    elif function_name == "open":
        mock_open_res = "main_content"
        content = mock_open_res
    else:
        raise ValueError("unspport function name!")
    return content
        
def get_func_name_args(llm_text):
    function_call = re.sub(r'.*?</think>', '', llm_text, flags=re.DOTALL)
    function_call = json.loads(function_call)
    action = function_call['name']
    params = function_call['arguments']
    return action, params

def pipeline():
    end_str = "{\"name\": \"finish\", \"arguments\": {}}"
    response = get_assistant()
    messages.append({"role": "assistant", "content": response})
    max_turns, turns = 35, 1
    while not response.endswith(end_str) and turns < max_turns:
        action, params = get_func_name_args(response)
        observation = get_observation(action, params)
        messages.append({"role": "observation", "content": observation})
        response = get_assistant()
        messages.append({"role": "assistant", "content": response})
        turns += 1
        
    if response.endswith(end_str):
        final_answer = get_assistant()
    else:
        final_answer = None
    return final_answer

pipeline()
pythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import re
import json

MODEL_PATH = "THUDM/GLM-4-Z1-Rumination-32B-0414"

tokenizer = AutoTokenizer.from_pretrained(MODEL_PATH)
model = AutoModelForCausalLM.from_pretrained(MODEL_PATH, device_map="auto")

messages = [{"role": "user", "content": "Let a, b be positive real numbers such that ab = a + b + 3. Determine the range of possible values for a + b."}]

generate_kwargs = {
    "temperature": 0.95,
    "top_p": 0.7,
    "do_sample": True,
    "max_new_tokens": 16384
}

def get_assistant():
    inputs = tokenizer.apply_chat_template(
        messages,
        return_tensors="pt",
        add_generation_prompt=True,
        return_dict=True,
    ).to(model.device)
    out = model.generate(input_ids=inputs["input_ids"], **generate_kwargs)
    return tokenizer.decode(out[0][inputs["input_ids"].shape[1]:], skip_special_tokens=True).strip()

def get_observation(function_name, args):
    content = None
    if function_name == "search":
        mock_search_res = [
            {"title": "t1", "url":"url1", "snippet": "snippet_content_1"},
            {"title": "t2", "url":"url2", "snippet": "snippet_content_2"}
        ]
        content = "\n\n".join([f"【{i}†{res['title']}†{res['url']}\n{res['snippet']}】"] for i, res in enumerate(mock_search_res))
    elif function_name == "click":
        mock_click_res = "main content"
        content = mock_click_res
    elif function_name == "open":
        mock_open_res = "main_content"
        content = mock_open_res
    else:
        raise ValueError("unspport function name!")
    return content
        
def get_func_name_args(llm_text):
    function_call = re.sub(r'.*?</think>', '', llm_text, flags=re.DOTALL)
    function_call = json.loads(function_call)
    action = function_call['name']
    params = function_call['arguments']
    return action, params

def pipeline():
    end_str = "{\"name\": \"finish\", \"arguments\": {}}"
    response = get_assistant()
    messages.append({"role": "assistant", "content": response})
    max_turns, turns = 35, 1
    while not response.endswith(end_str) and turns < max_turns:
        action, params = get_func_name_args(response)
        observation = get_observation(action, params)
        messages.append({"role": "observation", "content": observation})
        response = get_assistant()
        messages.append({"role": "assistant", "content": response})
        turns += 1
        
    if response.endswith(end_str):
        final_answer = get_assistant()
    else:
        final_answer = None
    return final_answer

pipeline()
pythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import re
import json

MODEL_PATH = "THUDM/GLM-4-Z1-Rumination-32B-0414"

tokenizer = AutoTokenizer.from_pretrained(MODEL_PATH)
model = AutoModelForCausalLM.from_pretrained(MODEL_PATH, device_map="auto")

messages = [{"role": "user", "content": "Let a, b be positive real numbers such that ab = a + b + 3. Determine the range of possible values for a + b."}]

generate_kwargs = {
    "temperature": 0.95,
    "top_p": 0.7,
    "do_sample": True,
    "max_new_tokens": 16384
}

def get_assistant():
    inputs = tokenizer.apply_chat_template(
        messages,
        return_tensors="pt",
        add_generation_prompt=True,
        return_dict=True,
    ).to(model.device)
    out = model.generate(input_ids=inputs["input_ids"], **generate_kwargs)
    return tokenizer.decode(out[0][inputs["input_ids"].shape[1]:], skip_special_tokens=True).strip()

def get_observation(function_name, args):
    content = None
    if function_name == "search":
        mock_search_res = [
            {"title": "t1", "url":"url1", "snippet": "snippet_content_1"},
            {"title": "t2", "url":"url2", "snippet": "snippet_content_2"}
        ]
        content = "\n\n".join([f"【{i}†{res['title']}†{res['url']}\n{res['snippet']}】"] for i, res in enumerate(mock_search_res))
    elif function_name == "click":
        mock_click_res = "main content"
        content = mock_click_res
    elif function_name == "open":
        mock_open_res = "main_content"
        content = mock_open_res
    else:
        raise ValueError("unspport function name!")
    return content
        
def get_func_name_args(llm_text):
    function_call = re.sub(r'.*?</think>', '', llm_text, flags=re.DOTALL)
    function_call = json.loads(function_call)
    action = function_call['name']
    params = function_call['arguments']
    return action, params

def pipeline():
    end_str = "{\"name\": \"finish\", \"arguments\": {}}"
    response = get_assistant()
    messages.append({"role": "assistant", "content": response})
    max_turns, turns = 35, 1
    while not response.endswith(end_str) and turns < max_turns:
        action, params = get_func_name_args(response)
        observation = get_observation(action, params)
        messages.append({"role": "observation", "content": observation})
        response = get_assistant()
        messages.append({"role": "assistant", "content": response})
        turns += 1
        
    if response.endswith(end_str):
        final_answer = get_assistant()
    else:
        final_answer = None
    return final_answer

pipeline()
pythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import re
import json

MODEL_PATH = "THUDM/GLM-4-Z1-Rumination-32B-0414"

tokenizer = AutoTokenizer.from_pretrained(MODEL_PATH)
model = AutoModelForCausalLM.from_pretrained(MODEL_PATH, device_map="auto")

messages = [{"role": "user", "content": "Let a, b be positive real numbers such that ab = a + b + 3. Determine the range of possible values for a + b."}]

generate_kwargs = {
    "temperature": 0.95,
    "top_p": 0.7,
    "do_sample": True,
    "max_new_tokens": 16384
}

def get_assistant():
    inputs = tokenizer.apply_chat_template(
        messages,
        return_tensors="pt",
        add_generation_prompt=True,
        return_dict=True,
    ).to(model.device)
    out = model.generate(input_ids=inputs["input_ids"], **generate_kwargs)
    return tokenizer.decode(out[0][inputs["input_ids"].shape[1]:], skip_special_tokens=True).strip()

def get_observation(function_name, args):
    content = None
    if function_name == "search":
        mock_search_res = [
            {"title": "t1", "url":"url1", "snippet": "snippet_content_1"},
            {"title": "t2", "url":"url2", "snippet": "snippet_content_2"}
        ]
        content = "\n\n".join([f"【{i}†{res['title']}†{res['url']}\n{res['snippet']}】"] for i, res in enumerate(mock_search_res))
    elif function_name == "click":
        mock_click_res = "main content"
        content = mock_click_res
    elif function_name == "open":
        mock_open_res = "main_content"
        content = mock_open_res
    else:
        raise ValueError("unspport function name!")
    return content
        
def get_func_name_args(llm_text):
    function_call = re.sub(r'.*?</think>', '', llm_text, flags=re.DOTALL)
    function_call = json.loads(function_call)
    action = function_call['name']
    params = function_call['arguments']
    return action, params

def pipeline():
    end_str = "{\"name\": \"finish\", \"arguments\": {}}"
    response = get_assistant()
    messages.append({"role": "assistant", "content": response})
    max_turns, turns = 35, 1
    while not response.endswith(end_str) and turns < max_turns:
        action, params = get_func_name_args(response)
        observation = get_observation(action, params)
        messages.append({"role": "observation", "content": observation})
        response = get_assistant()
        messages.append({"role": "assistant", "content": response})
        turns += 1
        
    if response.endswith(end_str):
        final_answer = get_assistant()
    else:
        final_answer = None
    return final_answer

pipeline()
pythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import re
import json

MODEL_PATH = "THUDM/GLM-4-Z1-Rumination-32B-0414"

tokenizer = AutoTokenizer.from_pretrained(MODEL_PATH)
model = AutoModelForCausalLM.from_pretrained(MODEL_PATH, device_map="auto")

messages = [{"role": "user", "content": "Let a, b be positive real numbers such that ab = a + b + 3. Determine the range of possible values for a + b."}]

generate_kwargs = {
    "temperature": 0.95,
    "top_p": 0.7,
    "do_sample": True,
    "max_new_tokens": 16384
}

def get_assistant():
    inputs = tokenizer.apply_chat_template(
        messages,
        return_tensors="pt",
        add_generation_prompt=True,
        return_dict=True,
    ).to(model.device)
    out = model.generate(input_ids=inputs["input_ids"], **generate_kwargs)
    return tokenizer.decode(out[0][inputs["input_ids"].shape[1]:], skip_special_tokens=True).strip()

def get_observation(function_name, args):
    content = None
    if function_name == "search":
        mock_search_res = [
            {"title": "t1", "url":"url1", "snippet": "snippet_content_1"},
            {"title": "t2", "url":"url2", "snippet": "snippet_content_2"}
        ]
        content = "\n\n".join([f"【{i}†{res['title']}†{res['url']}\n{res['snippet']}】"] for i, res in enumerate(mock_search_res))
    elif function_name == "click":
        mock_click_res = "main content"
        content = mock_click_res
    elif function_name == "open":
        mock_open_res = "main_content"
        content = mock_open_res
    else:
        raise ValueError("unspport function name!")
    return content
        
def get_func_name_args(llm_text):
    function_call = re.sub(r'.*?</think>', '', llm_text, flags=re.DOTALL)
    function_call = json.loads(function_call)
    action = function_call['name']
    params = function_call['arguments']
    return action, params

def pipeline():
    end_str = "{\"name\": \"finish\", \"arguments\": {}}"
    response = get_assistant()
    messages.append({"role": "assistant", "content": response})
    max_turns, turns = 35, 1
    while not response.endswith(end_str) and turns < max_turns:
        action, params = get_func_name_args(response)
        observation = get_observation(action, params)
        messages.append({"role": "observation", "content": observation})
        response = get_assistant()
        messages.append({"role": "assistant", "content": response})
        turns += 1
        
    if response.endswith(end_str):
        final_answer = get_assistant()
    else:
        final_answer = None
    return final_answer

pipeline()
pythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import re
import json

MODEL_PATH = "THUDM/GLM-4-Z1-Rumination-32B-0414"

tokenizer = AutoTokenizer.from_pretrained(MODEL_PATH)
model = AutoModelForCausalLM.from_pretrained(MODEL_PATH, device_map="auto")

messages = [{"role": "user", "content": "Let a, b be positive real numbers such that ab = a + b + 3. Determine the range of possible values for a + b."}]

generate_kwargs = {
    "temperature": 0.95,
    "top_p": 0.7,
    "do_sample": True,
    "max_new_tokens": 16384
}

def get_assistant():
    inputs = tokenizer.apply_chat_template(
        messages,
        return_tensors="pt",
        add_generation_prompt=True,
        return_dict=True,
    ).to(model.device)
    out = model.generate(input_ids=inputs["input_ids"], **generate_kwargs)
    return tokenizer.decode(out[0][inputs["input_ids"].shape[1]:], skip_special_tokens=True).strip()

def get_observation(function_name, args):
    content = None
    if function_name == "search":
        mock_search_res = [
            {"title": "t1", "url":"url1", "snippet": "snippet_content_1"},
            {"title": "t2", "url":"url2", "snippet": "snippet_content_2"}
        ]
        content = "\n\n".join([f"【{i}†{res['title']}†{res['url']}\n{res['snippet']}】"] for i, res in enumerate(mock_search_res))
    elif function_name == "click":
        mock_click_res = "main content"
        content = mock_click_res
    elif function_name == "open":
        mock_open_res = "main_content"
        content = mock_open_res
    else:
        raise ValueError("unspport function name!")
    return content
        
def get_func_name_args(llm_text):
    function_call = re.sub(r'.*?</think>', '', llm_text, flags=re.DOTALL)
    function_call = json.loads(function_call)
    action = function_call['name']
    params = function_call['arguments']
    return action, params

def pipeline():
    end_str = "{\"name\": \"finish\", \"arguments\": {}}"
    response = get_assistant()
    messages.append({"role": "assistant", "content": response})
    max_turns, turns = 35, 1
    while not response.endswith(end_str) and turns < max_turns:
        action, params = get_func_name_args(response)
        observation = get_observation(action, params)
        messages.append({"role": "observation", "content": observation})
        response = get_assistant()
        messages.append({"role": "assistant", "content": response})
        turns += 1
        
    if response.endswith(end_str):
        final_answer = get_assistant()
    else:
        final_answer = None
    return final_answer

pipeline()
pythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import re
import json

MODEL_PATH = "THUDM/GLM-4-Z1-Rumination-32B-0414"

tokenizer = AutoTokenizer.from_pretrained(MODEL_PATH)
model = AutoModelForCausalLM.from_pretrained(MODEL_PATH, device_map="auto")

messages = [{"role": "user", "content": "Let a, b be positive real numbers such that ab = a + b + 3. Determine the range of possible values for a + b."}]

generate_kwargs = {
    "temperature": 0.95,
    "top_p": 0.7,
    "do_sample": True,
    "max_new_tokens": 16384
}

def get_assistant():
    inputs = tokenizer.apply_chat_template(
        messages,
        return_tensors="pt",
        add_generation_prompt=True,
        return_dict=True,
    ).to(model.device)
    out = model.generate(input_ids=inputs["input_ids"], **generate_kwargs)
    return tokenizer.decode(out[0][inputs["input_ids"].shape[1]:], skip_special_tokens=True).strip()

def get_observation(function_name, args):
    content = None
    if function_name == "search":
        mock_search_res = [
            {"title": "t1", "url":"url1", "snippet": "snippet_content_1"},
            {"title": "t2", "url":"url2", "snippet": "snippet_content_2"}
        ]
        content = "\n\n".join([f"【{i}†{res['title']}†{res['url']}\n{res['snippet']}】"] for i, res in enumerate(mock_search_res))
    elif function_name == "click":
        mock_click_res = "main content"
        content = mock_click_res
    elif function_name == "open":
        mock_open_res = "main_content"
        content = mock_open_res
    else:
        raise ValueError("unspport function name!")
    return content
        
def get_func_name_args(llm_text):
    function_call = re.sub(r'.*?</think>', '', llm_text, flags=re.DOTALL)
    function_call = json.loads(function_call)
    action = function_call['name']
    params = function_call['arguments']
    return action, params

def pipeline():
    end_str = "{\"name\": \"finish\", \"arguments\": {}}"
    response = get_assistant()
    messages.append({"role": "assistant", "content": response})
    max_turns, turns = 35, 1
    while not response.endswith(end_str) and turns < max_turns:
        action, params = get_func_name_args(response)
        observation = get_observation(action, params)
        messages.append({"role": "observation", "content": observation})
        response = get_assistant()
        messages.append({"role": "assistant", "content": response})
        turns += 1
        
    if response.endswith(end_str):
        final_answer = get_assistant()
    else:
        final_answer = None
    return final_answer

pipeline()
pythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import re
import json

MODEL_PATH = "THUDM/GLM-4-Z1-Rumination-32B-0414"

tokenizer = AutoTokenizer.from_pretrained(MODEL_PATH)
model = AutoModelForCausalLM.from_pretrained(MODEL_PATH, device_map="auto")

messages = [{"role": "user", "content": "Let a, b be positive real numbers such that ab = a + b + 3. Determine the range of possible values for a + b."}]

generate_kwargs = {
    "temperature": 0.95,
    "top_p": 0.7,
    "do_sample": True,
    "max_new_tokens": 16384
}

def get_assistant():
    inputs = tokenizer.apply_chat_template(
        messages,
        return_tensors="pt",
        add_generation_prompt=True,
        return_dict=True,
    ).to(model.device)
    out = model.generate(input_ids=inputs["input_ids"], **generate_kwargs)
    return tokenizer.decode(out[0][inputs["input_ids"].shape[1]:], skip_special_tokens=True).strip()

def get_observation(function_name, args):
    content = None
    if function_name == "search":
        mock_search_res = [
            {"title": "t1", "url":"url1", "snippet": "snippet_content_1"},
            {"title": "t2", "url":"url2", "snippet": "snippet_content_2"}
        ]
        content = "\n\n".join([f"【{i}†{res['title']}†{res['url']}\n{res['snippet']}】"] for i, res in enumerate(mock_search_res))
    elif function_name == "click":
        mock_click_res = "main content"
        content = mock_click_res
    elif function_name == "open":
        mock_open_res = "main_content"
        content = mock_open_res
    else:
        raise ValueError("unspport function name!")
    return content
        
def get_func_name_args(llm_text):
    function_call = re.sub(r'.*?</think>', '', llm_text, flags=re.DOTALL)
    function_call = json.loads(function_call)
    action = function_call['name']
    params = function_call['arguments']
    return action, params

def pipeline():
    end_str = "{\"name\": \"finish\", \"arguments\": {}}"
    response = get_assistant()
    messages.append({"role": "assistant", "content": response})
    max_turns, turns = 35, 1
    while not response.endswith(end_str) and turns < max_turns:
        action, params = get_func_name_args(response)
        observation = get_observation(action, params)
        messages.append({"role": "observation", "content": observation})
        response = get_assistant()
        messages.append({"role": "assistant", "content": response})
        turns += 1
        
    if response.endswith(end_str):
        final_answer = get_assistant()
    else:
        final_answer = None
    return final_answer

pipeline()
pythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import re
import json

MODEL_PATH = "THUDM/GLM-4-Z1-Rumination-32B-0414"

tokenizer = AutoTokenizer.from_pretrained(MODEL_PATH)
model = AutoModelForCausalLM.from_pretrained(MODEL_PATH, device_map="auto")

messages = [{"role": "user", "content": "Let a, b be positive real numbers such that ab = a + b + 3. Determine the range of possible values for a + b."}]

generate_kwargs = {
    "temperature": 0.95,
    "top_p": 0.7,
    "do_sample": True,
    "max_new_tokens": 16384
}

def get_assistant():
    inputs = tokenizer.apply_chat_template(
        messages,
        return_tensors="pt",
        add_generation_prompt=True,
        return_dict=True,
    ).to(model.device)
    out = model.generate(input_ids=inputs["input_ids"], **generate_kwargs)
    return tokenizer.decode(out[0][inputs["input_ids"].shape[1]:], skip_special_tokens=True).strip()

def get_observation(function_name, args):
    content = None
    if function_name == "search":
        mock_search_res = [
            {"title": "t1", "url":"url1", "snippet": "snippet_content_1"},
            {"title": "t2", "url":"url2", "snippet": "snippet_content_2"}
        ]
        content = "\n\n".join([f"【{i}†{res['title']}†{res['url']}\n{res['snippet']}】"] for i, res in enumerate(mock_search_res))
    elif function_name == "click":
        mock_click_res = "main content"
        content = mock_click_res
    elif function_name == "open":
        mock_open_res = "main_content"
        content = mock_open_res
    else:
        raise ValueError("unspport function name!")
    return content
        
def get_func_name_args(llm_text):
    function_call = re.sub(r'.*?</think>', '', llm_text, flags=re.DOTALL)
    function_call = json.loads(function_call)
    action = function_call['name']
    params = function_call['arguments']
    return action, params

def pipeline():
    end_str = "{\"name\": \"finish\", \"arguments\": {}}"
    response = get_assistant()
    messages.append({"role": "assistant", "content": response})
    max_turns, turns = 35, 1
    while not response.endswith(end_str) and turns < max_turns:
        action, params = get_func_name_args(response)
        observation = get_observation(action, params)
        messages.append({"role": "observation", "content": observation})
        response = get_assistant()
        messages.append({"role": "assistant", "content": response})
        turns += 1
        
    if response.endswith(end_str):
        final_answer = get_assistant()
    else:
        final_answer = None
    return final_answer

pipeline()
pythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import re
import json

MODEL_PATH = "THUDM/GLM-4-Z1-Rumination-32B-0414"

tokenizer = AutoTokenizer.from_pretrained(MODEL_PATH)
model = AutoModelForCausalLM.from_pretrained(MODEL_PATH, device_map="auto")

messages = [{"role": "user", "content": "Let a, b be positive real numbers such that ab = a + b + 3. Determine the range of possible values for a + b."}]

generate_kwargs = {
    "temperature": 0.95,
    "top_p": 0.7,
    "do_sample": True,
    "max_new_tokens": 16384
}

def get_assistant():
    inputs = tokenizer.apply_chat_template(
        messages,
        return_tensors="pt",
        add_generation_prompt=True,
        return_dict=True,
    ).to(model.device)
    out = model.generate(input_ids=inputs["input_ids"], **generate_kwargs)
    return tokenizer.decode(out[0][inputs["input_ids"].shape[1]:], skip_special_tokens=True).strip()

def get_observation(function_name, args):
    content = None
    if function_name == "search":
        mock_search_res = [
            {"title": "t1", "url":"url1", "snippet": "snippet_content_1"},
            {"title": "t2", "url":"url2", "snippet": "snippet_content_2"}
        ]
        content = "\n\n".join([f"【{i}†{res['title']}†{res['url']}\n{res['snippet']}】"] for i, res in enumerate(mock_search_res))
    elif function_name == "click":
        mock_click_res = "main content"
        content = mock_click_res
    elif function_name == "open":
        mock_open_res = "main_content"
        content = mock_open_res
    else:
        raise ValueError("unspport function name!")
    return content
        
def get_func_name_args(llm_text):
    function_call = re.sub(r'.*?</think>', '', llm_text, flags=re.DOTALL)
    function_call = json.loads(function_call)
    action = function_call['name']
    params = function_call['arguments']
    return action, params

def pipeline():
    end_str = "{\"name\": \"finish\", \"arguments\": {}}"
    response = get_assistant()
    messages.append({"role": "assistant", "content": response})
    max_turns, turns = 35, 1
    while not response.endswith(end_str) and turns < max_turns:
        action, params = get_func_name_args(response)
        observation = get_observation(action, params)
        messages.append({"role": "observation", "content": observation})
        response = get_assistant()
        messages.append({"role": "assistant", "content": response})
        turns += 1
        
    if response.endswith(end_str):
        final_answer = get_assistant()
    else:
        final_answer = None
    return final_answer

pipeline()
pythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import re
import json

MODEL_PATH = "THUDM/GLM-4-Z1-Rumination-32B-0414"

tokenizer = AutoTokenizer.from_pretrained(MODEL_PATH)
model = AutoModelForCausalLM.from_pretrained(MODEL_PATH, device_map="auto")

messages = [{"role": "user", "content": "Let a, b be positive real numbers such that ab = a + b + 3. Determine the range of possible values for a + b."}]

generate_kwargs = {
    "temperature": 0.95,
    "top_p": 0.7,
    "do_sample": True,
    "max_new_tokens": 16384
}

def get_assistant():
    inputs = tokenizer.apply_chat_template(
        messages,
        return_tensors="pt",
        add_generation_prompt=True,
        return_dict=True,
    ).to(model.device)
    out = model.generate(input_ids=inputs["input_ids"], **generate_kwargs)
    return tokenizer.decode(out[0][inputs["input_ids"].shape[1]:], skip_special_tokens=True).strip()

def get_observation(function_name, args):
    content = None
    if function_name == "search":
        mock_search_res = [
            {"title": "t1", "url":"url1", "snippet": "snippet_content_1"},
            {"title": "t2", "url":"url2", "snippet": "snippet_content_2"}
        ]
        content = "\n\n".join([f"【{i}†{res['title']}†{res['url']}\n{res['snippet']}】"] for i, res in enumerate(mock_search_res))
    elif function_name == "click":
        mock_click_res = "main content"
        content = mock_click_res
    elif function_name == "open":
        mock_open_res = "main_content"
        content = mock_open_res
    else:
        raise ValueError("unspport function name!")
    return content
        
def get_func_name_args(llm_text):
    function_call = re.sub(r'.*?</think>', '', llm_text, flags=re.DOTALL)
    function_call = json.loads(function_call)
    action = function_call['name']
    params = function_call['arguments']
    return action, params

def pipeline():
    end_str = "{\"name\": \"finish\", \"arguments\": {}}"
    response = get_assistant()
    messages.append({"role": "assistant", "content": response})
    max_turns, turns = 35, 1
    while not response.endswith(end_str) and turns < max_turns:
        action, params = get_func_name_args(response)
        observation = get_observation(action, params)
        messages.append({"role": "observation", "content": observation})
        response = get_assistant()
        messages.append({"role": "assistant", "content": response})
        turns += 1
        
    if response.endswith(end_str):
        final_answer = get_assistant()
    else:
        final_answer = None
    return final_answer

pipeline()
pythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import re
import json

MODEL_PATH = "THUDM/GLM-4-Z1-Rumination-32B-0414"

tokenizer = AutoTokenizer.from_pretrained(MODEL_PATH)
model = AutoModelForCausalLM.from_pretrained(MODEL_PATH, device_map="auto")

messages = [{"role": "user", "content": "Let a, b be positive real numbers such that ab = a + b + 3. Determine the range of possible values for a + b."}]

generate_kwargs = {
    "temperature": 0.95,
    "top_p": 0.7,
    "do_sample": True,
    "max_new_tokens": 16384
}

def get_assistant():
    inputs = tokenizer.apply_chat_template(
        messages,
        return_tensors="pt",
        add_generation_prompt=True,
        return_dict=True,
    ).to(model.device)
    out = model.generate(input_ids=inputs["input_ids"], **generate_kwargs)
    return tokenizer.decode(out[0][inputs["input_ids"].shape[1]:], skip_special_tokens=True).strip()

def get_observation(function_name, args):
    content = None
    if function_name == "search":
        mock_search_res = [
            {"title": "t1", "url":"url1", "snippet": "snippet_content_1"},
            {"title": "t2", "url":"url2", "snippet": "snippet_content_2"}
        ]
        content = "\n\n".join([f"【{i}†{res['title']}†{res['url']}\n{res['snippet']}】"] for i, res in enumerate(mock_search_res))
    elif function_name == "click":
        mock_click_res = "main content"
        content = mock_click_res
    elif function_name == "open":
        mock_open_res = "main_content"
        content = mock_open_res
    else:
        raise ValueError("unspport function name!")
    return content
        
def get_func_name_args(llm_text):
    function_call = re.sub(r'.*?</think>', '', llm_text, flags=re.DOTALL)
    function_call = json.loads(function_call)
    action = function_call['name']
    params = function_call['arguments']
    return action, params

def pipeline():
    end_str = "{\"name\": \"finish\", \"arguments\": {}}"
    response = get_assistant()
    messages.append({"role": "assistant", "content": response})
    max_turns, turns = 35, 1
    while not response.endswith(end_str) and turns < max_turns:
        action, params = get_func_name_args(response)
        observation = get_observation(action, params)
        messages.append({"role": "observation", "content": observation})
        response = get_assistant()
        messages.append({"role": "assistant", "content": response})
        turns += 1
        
    if response.endswith(end_str):
        final_answer = get_assistant()
    else:
        final_answer = None
    return final_answer

pipeline()
pythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import re
import json

MODEL_PATH = "THUDM/GLM-4-Z1-Rumination-32B-0414"

tokenizer = AutoTokenizer.from_pretrained(MODEL_PATH)
model = AutoModelForCausalLM.from_pretrained(MODEL_PATH, device_map="auto")

messages = [{"role": "user", "content": "Let a, b be positive real numbers such that ab = a + b + 3. Determine the range of possible values for a + b."}]

generate_kwargs = {
    "temperature": 0.95,
    "top_p": 0.7,
    "do_sample": True,
    "max_new_tokens": 16384
}

def get_assistant():
    inputs = tokenizer.apply_chat_template(
        messages,
        return_tensors="pt",
        add_generation_prompt=True,
        return_dict=True,
    ).to(model.device)
    out = model.generate(input_ids=inputs["input_ids"], **generate_kwargs)
    return tokenizer.decode(out[0][inputs["input_ids"].shape[1]:], skip_special_tokens=True).strip()

def get_observation(function_name, args):
    content = None
    if function_name == "search":
        mock_search_res = [
            {"title": "t1", "url":"url1", "snippet": "snippet_content_1"},
            {"title": "t2", "url":"url2", "snippet": "snippet_content_2"}
        ]
        content = "\n\n".join([f"【{i}†{res['title']}†{res['url']}\n{res['snippet']}】"] for i, res in enumerate(mock_search_res))
    elif function_name == "click":
        mock_click_res = "main content"
        content = mock_click_res
    elif function_name == "open":
        mock_open_res = "main_content"
        content = mock_open_res
    else:
        raise ValueError("unspport function name!")
    return content
        
def get_func_name_args(llm_text):
    function_call = re.sub(r'.*?</think>', '', llm_text, flags=re.DOTALL)
    function_call = json.loads(function_call)
    action = function_call['name']
    params = function_call['arguments']
    return action, params

def pipeline():
    end_str = "{\"name\": \"finish\", \"arguments\": {}}"
    response = get_assistant()
    messages.append({"role": "assistant", "content": response})
    max_turns, turns = 35, 1
    while not response.endswith(end_str) and turns < max_turns:
        action, params = get_func_name_args(response)
        observation = get_observation(action, params)
        messages.append({"role": "observation", "content": observation})
        response = get_assistant()
        messages.append({"role": "assistant", "content": response})
        turns += 1
        
    if response.endswith(end_str):
        final_answer = get_assistant()
    else:
        final_answer = None
    return final_answer

pipeline()
pythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import re
import json

MODEL_PATH = "THUDM/GLM-4-Z1-Rumination-32B-0414"

tokenizer = AutoTokenizer.from_pretrained(MODEL_PATH)
model = AutoModelForCausalLM.from_pretrained(MODEL_PATH, device_map="auto")

messages = [{"role": "user", "content": "Let a, b be positive real numbers such that ab = a + b + 3. Determine the range of possible values for a + b."}]

generate_kwargs = {
    "temperature": 0.95,
    "top_p": 0.7,
    "do_sample": True,
    "max_new_tokens": 16384
}

def get_assistant():
    inputs = tokenizer.apply_chat_template(
        messages,
        return_tensors="pt",
        add_generation_prompt=True,
        return_dict=True,
    ).to(model.device)
    out = model.generate(input_ids=inputs["input_ids"], **generate_kwargs)
    return tokenizer.decode(out[0][inputs["input_ids"].shape[1]:], skip_special_tokens=True).strip()

def get_observation(function_name, args):
    content = None
    if function_name == "search":
        mock_search_res = [
            {"title": "t1", "url":"url1", "snippet": "snippet_content_1"},
            {"title": "t2", "url":"url2", "snippet": "snippet_content_2"}
        ]
        content = "\n\n".join([f"【{i}†{res['title']}†{res['url']}\n{res['snippet']}】"] for i, res in enumerate(mock_search_res))
    elif function_name == "click":
        mock_click_res = "main content"
        content = mock_click_res
    elif function_name == "open":
        mock_open_res = "main_content"
        content = mock_open_res
    else:
        raise ValueError("unspport function name!")
    return content
        
def get_func_name_args(llm_text):
    function_call = re.sub(r'.*?</think>', '', llm_text, flags=re.DOTALL)
    function_call = json.loads(function_call)
    action = function_call['name']
    params = function_call['arguments']
    return action, params

def pipeline():
    end_str = "{\"name\": \"finish\", \"arguments\": {}}"
    response = get_assistant()
    messages.append({"role": "assistant", "content": response})
    max_turns, turns = 35, 1
    while not response.endswith(end_str) and turns < max_turns:
        action, params = get_func_name_args(response)
        observation = get_observation(action, params)
        messages.append({"role": "observation", "content": observation})
        response = get_assistant()
        messages.append({"role": "assistant", "content": response})
        turns += 1
        
    if response.endswith(end_str):
        final_answer = get_assistant()
    else:
        final_answer = None
    return final_answer

pipeline()
pythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import re
import json

MODEL_PATH = "THUDM/GLM-4-Z1-Rumination-32B-0414"

tokenizer = AutoTokenizer.from_pretrained(MODEL_PATH)
model = AutoModelForCausalLM.from_pretrained(MODEL_PATH, device_map="auto")

messages = [{"role": "user", "content": "Let a, b be positive real numbers such that ab = a + b + 3. Determine the range of possible values for a + b."}]

generate_kwargs = {
    "temperature": 0.95,
    "top_p": 0.7,
    "do_sample": True,
    "max_new_tokens": 16384
}

def get_assistant():
    inputs = tokenizer.apply_chat_template(
        messages,
        return_tensors="pt",
        add_generation_prompt=True,
        return_dict=True,
    ).to(model.device)
    out = model.generate(input_ids=inputs["input_ids"], **generate_kwargs)
    return tokenizer.decode(out[0][inputs["input_ids"].shape[1]:], skip_special_tokens=True).strip()

def get_observation(function_name, args):
    content = None
    if function_name == "search":
        mock_search_res = [
            {"title": "t1", "url":"url1", "snippet": "snippet_content_1"},
            {"title": "t2", "url":"url2", "snippet": "snippet_content_2"}
        ]
        content = "\n\n".join([f"【{i}†{res['title']}†{res['url']}\n{res['snippet']}】"] for i, res in enumerate(mock_search_res))
    elif function_name == "click":
        mock_click_res = "main content"
        content = mock_click_res
    elif function_name == "open":
        mock_open_res = "main_content"
        content = mock_open_res
    else:
        raise ValueError("unspport function name!")
    return content
        
def get_func_name_args(llm_text):
    function_call = re.sub(r'.*?</think>', '', llm_text, flags=re.DOTALL)
    function_call = json.loads(function_call)
    action = function_call['name']
    params = function_call['arguments']
    return action, params

def pipeline():
    end_str = "{\"name\": \"finish\", \"arguments\": {}}"
    response = get_assistant()
    messages.append({"role": "assistant", "content": response})
    max_turns, turns = 35, 1
    while not response.endswith(end_str) and turns < max_turns:
        action, params = get_func_name_args(response)
        observation = get_observation(action, params)
        messages.append({"role": "observation", "content": observation})
        response = get_assistant()
        messages.append({"role": "assistant", "content": response})
        turns += 1
        
    if response.endswith(end_str):
        final_answer = get_assistant()
    else:
        final_answer = None
    return final_answer

pipeline()
pythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import re
import json

MODEL_PATH = "THUDM/GLM-4-Z1-Rumination-32B-0414"

tokenizer = AutoTokenizer.from_pretrained(MODEL_PATH)
model = AutoModelForCausalLM.from_pretrained(MODEL_PATH, device_map="auto")

messages = [{"role": "user", "content": "Let a, b be positive real numbers such that ab = a + b + 3. Determine the range of possible values for a + b."}]

generate_kwargs = {
    "temperature": 0.95,
    "top_p": 0.7,
    "do_sample": True,
    "max_new_tokens": 16384
}

def get_assistant():
    inputs = tokenizer.apply_chat_template(
        messages,
        return_tensors="pt",
        add_generation_prompt=True,
        return_dict=True,
    ).to(model.device)
    out = model.generate(input_ids=inputs["input_ids"], **generate_kwargs)
    return tokenizer.decode(out[0][inputs["input_ids"].shape[1]:], skip_special_tokens=True).strip()

def get_observation(function_name, args):
    content = None
    if function_name == "search":
        mock_search_res = [
            {"title": "t1", "url":"url1", "snippet": "snippet_content_1"},
            {"title": "t2", "url":"url2", "snippet": "snippet_content_2"}
        ]
        content = "\n\n".join([f"【{i}†{res['title']}†{res['url']}\n{res['snippet']}】"] for i, res in enumerate(mock_search_res))
    elif function_name == "click":
        mock_click_res = "main content"
        content = mock_click_res
    elif function_name == "open":
        mock_open_res = "main_content"
        content = mock_open_res
    else:
        raise ValueError("unspport function name!")
    return content
        
def get_func_name_args(llm_text):
    function_call = re.sub(r'.*?</think>', '', llm_text, flags=re.DOTALL)
    function_call = json.loads(function_call)
    action = function_call['name']
    params = function_call['arguments']
    return action, params

def pipeline():
    end_str = "{\"name\": \"finish\", \"arguments\": {}}"
    response = get_assistant()
    messages.append({"role": "assistant", "content": response})
    max_turns, turns = 35, 1
    while not response.endswith(end_str) and turns < max_turns:
        action, params = get_func_name_args(response)
        observation = get_observation(action, params)
        messages.append({"role": "observation", "content": observation})
        response = get_assistant()
        messages.append({"role": "assistant", "content": response})
        turns += 1
        
    if response.endswith(end_str):
        final_answer = get_assistant()
    else:
        final_answer = None
    return final_answer

pipeline()
pythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import re
import json

MODEL_PATH = "THUDM/GLM-4-Z1-Rumination-32B-0414"

tokenizer = AutoTokenizer.from_pretrained(MODEL_PATH)
model = AutoModelForCausalLM.from_pretrained(MODEL_PATH, device_map="auto")

messages = [{"role": "user", "content": "Let a, b be positive real numbers such that ab = a + b + 3. Determine the range of possible values for a + b."}]

generate_kwargs = {
    "temperature": 0.95,
    "top_p": 0.7,
    "do_sample": True,
    "max_new_tokens": 16384
}

def get_assistant():
    inputs = tokenizer.apply_chat_template(
        messages,
        return_tensors="pt",
        add_generation_prompt=True,
        return_dict=True,
    ).to(model.device)
    out = model.generate(input_ids=inputs["input_ids"], **generate_kwargs)
    return tokenizer.decode(out[0][inputs["input_ids"].shape[1]:], skip_special_tokens=True).strip()

def get_observation(function_name, args):
    content = None
    if function_name == "search":
        mock_search_res = [
            {"title": "t1", "url":"url1", "snippet": "snippet_content_1"},
            {"title": "t2", "url":"url2", "snippet": "snippet_content_2"}
        ]
        content = "\n\n".join([f"【{i}†{res['title']}†{res['url']}\n{res['snippet']}】"] for i, res in enumerate(mock_search_res))
    elif function_name == "click":
        mock_click_res = "main content"
        content = mock_click_res
    elif function_name == "open":
        mock_open_res = "main_content"
        content = mock_open_res
    else:
        raise ValueError("unspport function name!")
    return content
        
def get_func_name_args(llm_text):
    function_call = re.sub(r'.*?</think>', '', llm_text, flags=re.DOTALL)
    function_call = json.loads(function_call)
    action = function_call['name']
    params = function_call['arguments']
    return action, params

def pipeline():
    end_str = "{\"name\": \"finish\", \"arguments\": {}}"
    response = get_assistant()
    messages.append({"role": "assistant", "content": response})
    max_turns, turns = 35, 1
    while not response.endswith(end_str) and turns < max_turns:
        action, params = get_func_name_args(response)
        observation = get_observation(action, params)
        messages.append({"role": "observation", "content": observation})
        response = get_assistant()
        messages.append({"role": "assistant", "content": response})
        turns += 1
        
    if response.endswith(end_str):
        final_answer = get_assistant()
    else:
        final_answer = None
    return final_answer

pipeline()
pythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import re
import json

MODEL_PATH = "THUDM/GLM-4-Z1-Rumination-32B-0414"

tokenizer = AutoTokenizer.from_pretrained(MODEL_PATH)
model = AutoModelForCausalLM.from_pretrained(MODEL_PATH, device_map="auto")

messages = [{"role": "user", "content": "Let a, b be positive real numbers such that ab = a + b + 3. Determine the range of possible values for a + b."}]

generate_kwargs = {
    "temperature": 0.95,
    "top_p": 0.7,
    "do_sample": True,
    "max_new_tokens": 16384
}

def get_assistant():
    inputs = tokenizer.apply_chat_template(
        messages,
        return_tensors="pt",
        add_generation_prompt=True,
        return_dict=True,
    ).to(model.device)
    out = model.generate(input_ids=inputs["input_ids"], **generate_kwargs)
    return tokenizer.decode(out[0][inputs["input_ids"].shape[1]:], skip_special_tokens=True).strip()

def get_observation(function_name, args):
    content = None
    if function_name == "search":
        mock_search_res = [
            {"title": "t1", "url":"url1", "snippet": "snippet_content_1"},
            {"title": "t2", "url":"url2", "snippet": "snippet_content_2"}
        ]
        content = "\n\n".join([f"【{i}†{res['title']}†{res['url']}\n{res['snippet']}】"] for i, res in enumerate(mock_search_res))
    elif function_name == "click":
        mock_click_res = "main content"
        content = mock_click_res
    elif function_name == "open":
        mock_open_res = "main_content"
        content = mock_open_res
    else:
        raise ValueError("unspport function name!")
    return content
        
def get_func_name_args(llm_text):
    function_call = re.sub(r'.*?</think>', '', llm_text, flags=re.DOTALL)
    function_call = json.loads(function_call)
    action = function_call['name']
    params = function_call['arguments']
    return action, params

def pipeline():
    end_str = "{\"name\": \"finish\", \"arguments\": {}}"
    response = get_assistant()
    messages.append({"role": "assistant", "content": response})
    max_turns, turns = 35, 1
    while not response.endswith(end_str) and turns < max_turns:
        action, params = get_func_name_args(response)
        observation = get_observation(action, params)
        messages.append({"role": "observation", "content": observation})
        response = get_assistant()
        messages.append({"role": "assistant", "content": response})
        turns += 1
        
    if response.endswith(end_str):
        final_answer = get_assistant()
    else:
        final_answer = None
    return final_answer

pipeline()
pythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import re
import json

MODEL_PATH = "THUDM/GLM-4-Z1-Rumination-32B-0414"

tokenizer = AutoTokenizer.from_pretrained(MODEL_PATH)
model = AutoModelForCausalLM.from_pretrained(MODEL_PATH, device_map="auto")

messages = [{"role": "user", "content": "Let a, b be positive real numbers such that ab = a + b + 3. Determine the range of possible values for a + b."}]

generate_kwargs = {
    "temperature": 0.95,
    "top_p": 0.7,
    "do_sample": True,
    "max_new_tokens": 16384
}

def get_assistant():
    inputs = tokenizer.apply_chat_template(
        messages,
        return_tensors="pt",
        add_generation_prompt=True,
        return_dict=True,
    ).to(model.device)
    out = model.generate(input_ids=inputs["input_ids"], **generate_kwargs)
    return tokenizer.decode(out[0][inputs["input_ids"].shape[1]:], skip_special_tokens=True).strip()

def get_observation(function_name, args):
    content = None
    if function_name == "search":
        mock_search_res = [
            {"title": "t1", "url":"url1", "snippet": "snippet_content_1"},
            {"title": "t2", "url":"url2", "snippet": "snippet_content_2"}
        ]
        content = "\n\n".join([f"【{i}†{res['title']}†{res['url']}\n{res['snippet']}】"] for i, res in enumerate(mock_search_res))
    elif function_name == "click":
        mock_click_res = "main content"
        content = mock_click_res
    elif function_name == "open":
        mock_open_res = "main_content"
        content = mock_open_res
    else:
        raise ValueError("unspport function name!")
    return content
        
def get_func_name_args(llm_text):
    function_call = re.sub(r'.*?</think>', '', llm_text, flags=re.DOTALL)
    function_call = json.loads(function_call)
    action = function_call['name']
    params = function_call['arguments']
    return action, params

def pipeline():
    end_str = "{\"name\": \"finish\", \"arguments\": {}}"
    response = get_assistant()
    messages.append({"role": "assistant", "content": response})
    max_turns, turns = 35, 1
    while not response.endswith(end_str) and turns < max_turns:
        action, params = get_func_name_args(response)
        observation = get_observation(action, params)
        messages.append({"role": "observation", "content": observation})
        response = get_assistant()
        messages.append({"role": "assistant", "content": response})
        turns += 1
        
    if response.endswith(end_str):
        final_answer = get_assistant()
    else:
        final_answer = None
    return final_answer

pipeline()
pythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import re
import json

MODEL_PATH = "THUDM/GLM-4-Z1-Rumination-32B-0414"

tokenizer = AutoTokenizer.from_pretrained(MODEL_PATH)
model = AutoModelForCausalLM.from_pretrained(MODEL_PATH, device_map="auto")

messages = [{"role": "user", "content": "Let a, b be positive real numbers such that ab = a + b + 3. Determine the range of possible values for a + b."}]

generate_kwargs = {
    "temperature": 0.95,
    "top_p": 0.7,
    "do_sample": True,
    "max_new_tokens": 16384
}

def get_assistant():
    inputs = tokenizer.apply_chat_template(
        messages,
        return_tensors="pt",
        add_generation_prompt=True,
        return_dict=True,
    ).to(model.device)
    out = model.generate(input_ids=inputs["input_ids"], **generate_kwargs)
    return tokenizer.decode(out[0][inputs["input_ids"].shape[1]:], skip_special_tokens=True).strip()

def get_observation(function_name, args):
    content = None
    if function_name == "search":
        mock_search_res = [
            {"title": "t1", "url":"url1", "snippet": "snippet_content_1"},
            {"title": "t2", "url":"url2", "snippet": "snippet_content_2"}
        ]
        content = "\n\n".join([f"【{i}†{res['title']}†{res['url']}\n{res['snippet']}】"] for i, res in enumerate(mock_search_res))
    elif function_name == "click":
        mock_click_res = "main content"
        content = mock_click_res
    elif function_name == "open":
        mock_open_res = "main_content"
        content = mock_open_res
    else:
        raise ValueError("unspport function name!")
    return content
        
def get_func_name_args(llm_text):
    function_call = re.sub(r'.*?</think>', '', llm_text, flags=re.DOTALL)
    function_call = json.loads(function_call)
    action = function_call['name']
    params = function_call['arguments']
    return action, params

def pipeline():
    end_str = "{\"name\": \"finish\", \"arguments\": {}}"
    response = get_assistant()
    messages.append({"role": "assistant", "content": response})
    max_turns, turns = 35, 1
    while not response.endswith(end_str) and turns < max_turns:
        action, params = get_func_name_args(response)
        observation = get_observation(action, params)
        messages.append({"role": "observation", "content": observation})
        response = get_assistant()
        messages.append({"role": "assistant", "content": response})
        turns += 1
        
    if response.endswith(end_str):
        final_answer = get_assistant()
    else:
        final_answer = None
    return final_answer

pipeline()
pythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import re
import json

MODEL_PATH = "THUDM/GLM-4-Z1-Rumination-32B-0414"

tokenizer = AutoTokenizer.from_pretrained(MODEL_PATH)
model = AutoModelForCausalLM.from_pretrained(MODEL_PATH, device_map="auto")

messages = [{"role": "user", "content": "Let a, b be positive real numbers such that ab = a + b + 3. Determine the range of possible values for a + b."}]

generate_kwargs = {
    "temperature": 0.95,
    "top_p": 0.7,
    "do_sample": True,
    "max_new_tokens": 16384
}

def get_assistant():
    inputs = tokenizer.apply_chat_template(
        messages,
        return_tensors="pt",
        add_generation_prompt=True,
        return_dict=True,
    ).to(model.device)
    out = model.generate(input_ids=inputs["input_ids"], **generate_kwargs)
    return tokenizer.decode(out[0][inputs["input_ids"].shape[1]:], skip_special_tokens=True).strip()

def get_observation(function_name, args):
    content = None
    if function_name == "search":
        mock_search_res = [
            {"title": "t1", "url":"url1", "snippet": "snippet_content_1"},
            {"title": "t2", "url":"url2", "snippet": "snippet_content_2"}
        ]
        content = "\n\n".join([f"【{i}†{res['title']}†{res['url']}\n{res['snippet']}】"] for i, res in enumerate(mock_search_res))
    elif function_name == "click":
        mock_click_res = "main content"
        content = mock_click_res
    elif function_name == "open":
        mock_open_res = "main_content"
        content = mock_open_res
    else:
        raise ValueError("unspport function name!")
    return content
        
def get_func_name_args(llm_text):
    function_call = re.sub(r'.*?</think>', '', llm_text, flags=re.DOTALL)
    function_call = json.loads(function_call)
    action = function_call['name']
    params = function_call['arguments']
    return action, params

def pipeline():
    end_str = "{\"name\": \"finish\", \"arguments\": {}}"
    response = get_assistant()
    messages.append({"role": "assistant", "content": response})
    max_turns, turns = 35, 1
    while not response.endswith(end_str) and turns < max_turns:
        action, params = get_func_name_args(response)
        observation = get_observation(action, params)
        messages.append({"role": "observation", "content": observation})
        response = get_assistant()
        messages.append({"role": "assistant", "content": response})
        turns += 1
        
    if response.endswith(end_str):
        final_answer = get_assistant()
    else:
        final_answer = None
    return final_answer

pipeline()
pythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import re
import json

MODEL_PATH = "THUDM/GLM-4-Z1-Rumination-32B-0414"

tokenizer = AutoTokenizer.from_pretrained(MODEL_PATH)
model = AutoModelForCausalLM.from_pretrained(MODEL_PATH, device_map="auto")

messages = [{"role": "user", "content": "Let a, b be positive real numbers such that ab = a + b + 3. Determine the range of possible values for a + b."}]

generate_kwargs = {
    "temperature": 0.95,
    "top_p": 0.7,
    "do_sample": True,
    "max_new_tokens": 16384
}

def get_assistant():
    inputs = tokenizer.apply_chat_template(
        messages,
        return_tensors="pt",
        add_generation_prompt=True,
        return_dict=True,
    ).to(model.device)
    out = model.generate(input_ids=inputs["input_ids"], **generate_kwargs)
    return tokenizer.decode(out[0][inputs["input_ids"].shape[1]:], skip_special_tokens=True).strip()

def get_observation(function_name, args):
    content = None
    if function_name == "search":
        mock_search_res = [
            {"title": "t1", "url":"url1", "snippet": "snippet_content_1"},
            {"title": "t2", "url":"url2", "snippet": "snippet_content_2"}
        ]
        content = "\n\n".join([f"【{i}†{res['title']}†{res['url']}\n{res['snippet']}】"] for i, res in enumerate(mock_search_res))
    elif function_name == "click":
        mock_click_res = "main content"
        content = mock_click_res
    elif function_name == "open":
        mock_open_res = "main_content"
        content = mock_open_res
    else:
        raise ValueError("unspport function name!")
    return content
        
def get_func_name_args(llm_text):
    function_call = re.sub(r'.*?</think>', '', llm_text, flags=re.DOTALL)
    function_call = json.loads(function_call)
    action = function_call['name']
    params = function_call['arguments']
    return action, params

def pipeline():
    end_str = "{\"name\": \"finish\", \"arguments\": {}}"
    response = get_assistant()
    messages.append({"role": "assistant", "content": response})
    max_turns, turns = 35, 1
    while not response.endswith(end_str) and turns < max_turns:
        action, params = get_func_name_args(response)
        observation = get_observation(action, params)
        messages.append({"role": "observation", "content": observation})
        response = get_assistant()
        messages.append({"role": "assistant", "content": response})
        turns += 1
        
    if response.endswith(end_str):
        final_answer = get_assistant()
    else:
        final_answer = None
    return final_answer

pipeline()
pythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import re
import json

MODEL_PATH = "THUDM/GLM-4-Z1-Rumination-32B-0414"

tokenizer = AutoTokenizer.from_pretrained(MODEL_PATH)
model = AutoModelForCausalLM.from_pretrained(MODEL_PATH, device_map="auto")

messages = [{"role": "user", "content": "Let a, b be positive real numbers such that ab = a + b + 3. Determine the range of possible values for a + b."}]

generate_kwargs = {
    "temperature": 0.95,
    "top_p": 0.7,
    "do_sample": True,
    "max_new_tokens": 16384
}

def get_assistant():
    inputs = tokenizer.apply_chat_template(
        messages,
        return_tensors="pt",
        add_generation_prompt=True,
        return_dict=True,
    ).to(model.device)
    out = model.generate(input_ids=inputs["input_ids"], **generate_kwargs)
    return tokenizer.decode(out[0][inputs["input_ids"].shape[1]:], skip_special_tokens=True).strip()

def get_observation(function_name, args):
    content = None
    if function_name == "search":
        mock_search_res = [
            {"title": "t1", "url":"url1", "snippet": "snippet_content_1"},
            {"title": "t2", "url":"url2", "snippet": "snippet_content_2"}
        ]
        content = "\n\n".join([f"【{i}†{res['title']}†{res['url']}\n{res['snippet']}】"] for i, res in enumerate(mock_search_res))
    elif function_name == "click":
        mock_click_res = "main content"
        content = mock_click_res
    elif function_name == "open":
        mock_open_res = "main_content"
        content = mock_open_res
    else:
        raise ValueError("unspport function name!")
    return content
        
def get_func_name_args(llm_text):
    function_call = re.sub(r'.*?</think>', '', llm_text, flags=re.DOTALL)
    function_call = json.loads(function_call)
    action = function_call['name']
    params = function_call['arguments']
    return action, params

def pipeline():
    end_str = "{\"name\": \"finish\", \"arguments\": {}}"
    response = get_assistant()
    messages.append({"role": "assistant", "content": response})
    max_turns, turns = 35, 1
    while not response.endswith(end_str) and turns < max_turns:
        action, params = get_func_name_args(response)
        observation = get_observation(action, params)
        messages.append({"role": "observation", "content": observation})
        response = get_assistant()
        messages.append({"role": "assistant", "content": response})
        turns += 1
        
    if response.endswith(end_str):
        final_answer = get_assistant()
    else:
        final_answer = None
    return final_answer

pipeline()
pythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import re
import json

MODEL_PATH = "THUDM/GLM-4-Z1-Rumination-32B-0414"

tokenizer = AutoTokenizer.from_pretrained(MODEL_PATH)
model = AutoModelForCausalLM.from_pretrained(MODEL_PATH, device_map="auto")

messages = [{"role": "user", "content": "Let a, b be positive real numbers such that ab = a + b + 3. Determine the range of possible values for a + b."}]

generate_kwargs = {
    "temperature": 0.95,
    "top_p": 0.7,
    "do_sample": True,
    "max_new_tokens": 16384
}

def get_assistant():
    inputs = tokenizer.apply_chat_template(
        messages,
        return_tensors="pt",
        add_generation_prompt=True,
        return_dict=True,
    ).to(model.device)
    out = model.generate(input_ids=inputs["input_ids"], **generate_kwargs)
    return tokenizer.decode(out[0][inputs["input_ids"].shape[1]:], skip_special_tokens=True).strip()

def get_observation(function_name, args):
    content = None
    if function_name == "search":
        mock_search_res = [
            {"title": "t1", "url":"url1", "snippet": "snippet_content_1"},
            {"title": "t2", "url":"url2", "snippet": "snippet_content_2"}
        ]
        content = "\n\n".join([f"【{i}†{res['title']}†{res['url']}\n{res['snippet']}】"] for i, res in enumerate(mock_search_res))
    elif function_name == "click":
        mock_click_res = "main content"
        content = mock_click_res
    elif function_name == "open":
        mock_open_res = "main_content"
        content = mock_open_res
    else:
        raise ValueError("unspport function name!")
    return content
        
def get_func_name_args(llm_text):
    function_call = re.sub(r'.*?</think>', '', llm_text, flags=re.DOTALL)
    function_call = json.loads(function_call)
    action = function_call['name']
    params = function_call['arguments']
    return action, params

def pipeline():
    end_str = "{\"name\": \"finish\", \"arguments\": {}}"
    response = get_assistant()
    messages.append({"role": "assistant", "content": response})
    max_turns, turns = 35, 1
    while not response.endswith(end_str) and turns < max_turns:
        action, params = get_func_name_args(response)
        observation = get_observation(action, params)
        messages.append({"role": "observation", "content": observation})
        response = get_assistant()
        messages.append({"role": "assistant", "content": response})
        turns += 1
        
    if response.endswith(end_str):
        final_answer = get_assistant()
    else:
        final_answer = None
    return final_answer

pipeline()
pythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import re
import json

MODEL_PATH = "THUDM/GLM-4-Z1-Rumination-32B-0414"

tokenizer = AutoTokenizer.from_pretrained(MODEL_PATH)
model = AutoModelForCausalLM.from_pretrained(MODEL_PATH, device_map="auto")

messages = [{"role": "user", "content": "Let a, b be positive real numbers such that ab = a + b + 3. Determine the range of possible values for a + b."}]

generate_kwargs = {
    "temperature": 0.95,
    "top_p": 0.7,
    "do_sample": True,
    "max_new_tokens": 16384
}

def get_assistant():
    inputs = tokenizer.apply_chat_template(
        messages,
        return_tensors="pt",
        add_generation_prompt=True,
        return_dict=True,
    ).to(model.device)
    out = model.generate(input_ids=inputs["input_ids"], **generate_kwargs)
    return tokenizer.decode(out[0][inputs["input_ids"].shape[1]:], skip_special_tokens=True).strip()

def get_observation(function_name, args):
    content = None
    if function_name == "search":
        mock_search_res = [
            {"title": "t1", "url":"url1", "snippet": "snippet_content_1"},
            {"title": "t2", "url":"url2", "snippet": "snippet_content_2"}
        ]
        content = "\n\n".join([f"【{i}†{res['title']}†{res['url']}\n{res['snippet']}】"] for i, res in enumerate(mock_search_res))
    elif function_name == "click":
        mock_click_res = "main content"
        content = mock_click_res
    elif function_name == "open":
        mock_open_res = "main_content"
        content = mock_open_res
    else:
        raise ValueError("unspport function name!")
    return content
        
def get_func_name_args(llm_text):
    function_call = re.sub(r'.*?</think>', '', llm_text, flags=re.DOTALL)
    function_call = json.loads(function_call)
    action = function_call['name']
    params = function_call['arguments']
    return action, params

def pipeline():
    end_str = "{\"name\": \"finish\", \"arguments\": {}}"
    response = get_assistant()
    messages.append({"role": "assistant", "content": response})
    max_turns, turns = 35, 1
    while not response.endswith(end_str) and turns < max_turns:
        action, params = get_func_name_args(response)
        observation = get_observation(action, params)
        messages.append({"role": "observation", "content": observation})
        response = get_assistant()
        messages.append({"role": "assistant", "content": response})
        turns += 1
        
    if response.endswith(end_str):
        final_answer = get_assistant()
    else:
        final_answer = None
    return final_answer

pipeline()
pythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import re
import json

MODEL_PATH = "THUDM/GLM-4-Z1-Rumination-32B-0414"

tokenizer = AutoTokenizer.from_pretrained(MODEL_PATH)
model = AutoModelForCausalLM.from_pretrained(MODEL_PATH, device_map="auto")

messages = [{"role": "user", "content": "Let a, b be positive real numbers such that ab = a + b + 3. Determine the range of possible values for a + b."}]

generate_kwargs = {
    "temperature": 0.95,
    "top_p": 0.7,
    "do_sample": True,
    "max_new_tokens": 16384
}

def get_assistant():
    inputs = tokenizer.apply_chat_template(
        messages,
        return_tensors="pt",
        add_generation_prompt=True,
        return_dict=True,
    ).to(model.device)
    out = model.generate(input_ids=inputs["input_ids"], **generate_kwargs)
    return tokenizer.decode(out[0][inputs["input_ids"].shape[1]:], skip_special_tokens=True).strip()

def get_observation(function_name, args):
    content = None
    if function_name == "search":
        mock_search_res = [
            {"title": "t1", "url":"url1", "snippet": "snippet_content_1"},
            {"title": "t2", "url":"url2", "snippet": "snippet_content_2"}
        ]
        content = "\n\n".join([f"【{i}†{res['title']}†{res['url']}\n{res['snippet']}】"] for i, res in enumerate(mock_search_res))
    elif function_name == "click":
        mock_click_res = "main content"
        content = mock_click_res
    elif function_name == "open":
        mock_open_res = "main_content"
        content = mock_open_res
    else:
        raise ValueError("unspport function name!")
    return content
        
def get_func_name_args(llm_text):
    function_call = re.sub(r'.*?</think>', '', llm_text, flags=re.DOTALL)
    function_call = json.loads(function_call)
    action = function_call['name']
    params = function_call['arguments']
    return action, params

def pipeline():
    end_str = "{\"name\": \"finish\", \"arguments\": {}}"
    response = get_assistant()
    messages.append({"role": "assistant", "content": response})
    max_turns, turns = 35, 1
    while not response.endswith(end_str) and turns < max_turns:
        action, params = get_func_name_args(response)
        observation = get_observation(action, params)
        messages.append({"role": "observation", "content": observation})
        response = get_assistant()
        messages.append({"role": "assistant", "content": response})
        turns += 1
        
    if response.endswith(end_str):
        final_answer = get_assistant()
    else:
        final_answer = None
    return final_answer

pipeline()
pythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import re
import json

MODEL_PATH = "THUDM/GLM-4-Z1-Rumination-32B-0414"

tokenizer = AutoTokenizer.from_pretrained(MODEL_PATH)
model = AutoModelForCausalLM.from_pretrained(MODEL_PATH, device_map="auto")

messages = [{"role": "user", "content": "Let a, b be positive real numbers such that ab = a + b + 3. Determine the range of possible values for a + b."}]

generate_kwargs = {
    "temperature": 0.95,
    "top_p": 0.7,
    "do_sample": True,
    "max_new_tokens": 16384
}

def get_assistant():
    inputs = tokenizer.apply_chat_template(
        messages,
        return_tensors="pt",
        add_generation_prompt=True,
        return_dict=True,
    ).to(model.device)
    out = model.generate(input_ids=inputs["input_ids"], **generate_kwargs)
    return tokenizer.decode(out[0][inputs["input_ids"].shape[1]:], skip_special_tokens=True).strip()

def get_observation(function_name, args):
    content = None
    if function_name == "search":
        mock_search_res = [
            {"title": "t1", "url":"url1", "snippet": "snippet_content_1"},
            {"title": "t2", "url":"url2", "snippet": "snippet_content_2"}
        ]
        content = "\n\n".join([f"【{i}†{res['title']}†{res['url']}\n{res['snippet']}】"] for i, res in enumerate(mock_search_res))
    elif function_name == "click":
        mock_click_res = "main content"
        content = mock_click_res
    elif function_name == "open":
        mock_open_res = "main_content"
        content = mock_open_res
    else:
        raise ValueError("unspport function name!")
    return content
        
def get_func_name_args(llm_text):
    function_call = re.sub(r'.*?</think>', '', llm_text, flags=re.DOTALL)
    function_call = json.loads(function_call)
    action = function_call['name']
    params = function_call['arguments']
    return action, params

def pipeline():
    end_str = "{\"name\": \"finish\", \"arguments\": {}}"
    response = get_assistant()
    messages.append({"role": "assistant", "content": response})
    max_turns, turns = 35, 1
    while not response.endswith(end_str) and turns < max_turns:
        action, params = get_func_name_args(response)
        observation = get_observation(action, params)
        messages.append({"role": "observation", "content": observation})
        response = get_assistant()
        messages.append({"role": "assistant", "content": response})
        turns += 1
        
    if response.endswith(end_str):
        final_answer = get_assistant()
    else:
        final_answer = None
    return final_answer

pipeline()
pythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import re
import json

MODEL_PATH = "THUDM/GLM-4-Z1-Rumination-32B-0414"

tokenizer = AutoTokenizer.from_pretrained(MODEL_PATH)
model = AutoModelForCausalLM.from_pretrained(MODEL_PATH, device_map="auto")

messages = [{"role": "user", "content": "Let a, b be positive real numbers such that ab = a + b + 3. Determine the range of possible values for a + b."}]

generate_kwargs = {
    "temperature": 0.95,
    "top_p": 0.7,
    "do_sample": True,
    "max_new_tokens": 16384
}

def get_assistant():
    inputs = tokenizer.apply_chat_template(
        messages,
        return_tensors="pt",
        add_generation_prompt=True,
        return_dict=True,
    ).to(model.device)
    out = model.generate(input_ids=inputs["input_ids"], **generate_kwargs)
    return tokenizer.decode(out[0][inputs["input_ids"].shape[1]:], skip_special_tokens=True).strip()

def get_observation(function_name, args):
    content = None
    if function_name == "search":
        mock_search_res = [
            {"title": "t1", "url":"url1", "snippet": "snippet_content_1"},
            {"title": "t2", "url":"url2", "snippet": "snippet_content_2"}
        ]
        content = "\n\n".join([f"【{i}†{res['title']}†{res['url']}\n{res['snippet']}】"] for i, res in enumerate(mock_search_res))
    elif function_name == "click":
        mock_click_res = "main content"
        content = mock_click_res
    elif function_name == "open":
        mock_open_res = "main_content"
        content = mock_open_res
    else:
        raise ValueError("unspport function name!")
    return content
        
def get_func_name_args(llm_text):
    function_call = re.sub(r'.*?</think>', '', llm_text, flags=re.DOTALL)
    function_call = json.loads(function_call)
    action = function_call['name']
    params = function_call['arguments']
    return action, params

def pipeline():
    end_str = "{\"name\": \"finish\", \"arguments\": {}}"
    response = get_assistant()
    messages.append({"role": "assistant", "content": response})
    max_turns, turns = 35, 1
    while not response.endswith(end_str) and turns < max_turns:
        action, params = get_func_name_args(response)
        observation = get_observation(action, params)
        messages.append({"role": "observation", "content": observation})
        response = get_assistant()
        messages.append({"role": "assistant", "content": response})
        turns += 1
        
    if response.endswith(end_str):
        final_answer = get_assistant()
    else:
        final_answer = None
    return final_answer

pipeline()

Deploy This Model

Production-ready deployment in minutes

Together.ai

Instant API access to this model

Fastest API

Production-ready inference API. Start free, scale to millions.

Try Free API

Replicate

One-click model deployment

Easiest Setup

Run models in the cloud with simple API. No DevOps required.

Deploy Now

Disclosure: We may earn a commission from these partners. This helps keep LLMYourWay free.