Goedel-Formalizer-V2-32B

233
7
32.0B
license:apache-2.0
by
Goedel-LM
Other
OTHER
32B params
New
233 downloads
Early-stage
Edge AI:
Mobile
Laptop
Server
72GB+ RAM
Mobile
Laptop
Server
Quick Summary

AI model with specialized capabilities.

Device Compatibility

Mobile
4-6GB RAM
Laptop
16GB RAM
Server
GPU
Minimum Recommended
30GB+ RAM

Code Examples

Construct the prompt for the modeltexttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
import re
torch.manual_seed(30)

model_id = "Goedel-LM/Goedel-Formalizer-V2-32B"
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", torch_dtype=torch.bfloat16, trust_remote_code=True)


problem_name = "test_problem"
informal_statement_content = "Prove that 3 cannot be written as the sum of two cubes."


# Construct the prompt for the model
user_prompt_content = (
    f"Please autoformalize the following natural language problem statement in Lean 4. "
    f"Use the following theorem name: {problem_name}\n"
    f"The natural language statement is: \n"
    f"{informal_statement_content}"
    f"Think before you provide the lean statement."
)



chat = [
  {"role": "user", "content": user_prompt_content},
]

inputs = tokenizer.apply_chat_template(chat, tokenize=True, add_generation_prompt=True, return_tensors="pt").to(model.device)

import time
start = time.time()
outputs = model.generate(inputs, max_new_tokens=16384, temperature = 0.9, do_sample = True, top_k=20, top_p=0.95)


model_output_text = tokenizer.batch_decode(outputs)[0]

def extract_code(text_input):
    """Extracts the last Lean 4 code block from the model's output."""
    try:
        matches = re.findall(r'
Construct the prompt for the modeltexttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
import re
torch.manual_seed(30)

model_id = "Goedel-LM/Goedel-Formalizer-V2-32B"
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", torch_dtype=torch.bfloat16, trust_remote_code=True)


problem_name = "test_problem"
informal_statement_content = "Prove that 3 cannot be written as the sum of two cubes."


# Construct the prompt for the model
user_prompt_content = (
    f"Please autoformalize the following natural language problem statement in Lean 4. "
    f"Use the following theorem name: {problem_name}\n"
    f"The natural language statement is: \n"
    f"{informal_statement_content}"
    f"Think before you provide the lean statement."
)



chat = [
  {"role": "user", "content": user_prompt_content},
]

inputs = tokenizer.apply_chat_template(chat, tokenize=True, add_generation_prompt=True, return_tensors="pt").to(model.device)

import time
start = time.time()
outputs = model.generate(inputs, max_new_tokens=16384, temperature = 0.9, do_sample = True, top_k=20, top_p=0.95)


model_output_text = tokenizer.batch_decode(outputs)[0]

def extract_code(text_input):
    """Extracts the last Lean 4 code block from the model's output."""
    try:
        matches = re.findall(r'
Construct the prompt for the modeltexttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
import re
torch.manual_seed(30)

model_id = "Goedel-LM/Goedel-Formalizer-V2-32B"
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", torch_dtype=torch.bfloat16, trust_remote_code=True)


problem_name = "test_problem"
informal_statement_content = "Prove that 3 cannot be written as the sum of two cubes."


# Construct the prompt for the model
user_prompt_content = (
    f"Please autoformalize the following natural language problem statement in Lean 4. "
    f"Use the following theorem name: {problem_name}\n"
    f"The natural language statement is: \n"
    f"{informal_statement_content}"
    f"Think before you provide the lean statement."
)



chat = [
  {"role": "user", "content": user_prompt_content},
]

inputs = tokenizer.apply_chat_template(chat, tokenize=True, add_generation_prompt=True, return_tensors="pt").to(model.device)

import time
start = time.time()
outputs = model.generate(inputs, max_new_tokens=16384, temperature = 0.9, do_sample = True, top_k=20, top_p=0.95)


model_output_text = tokenizer.batch_decode(outputs)[0]

def extract_code(text_input):
    """Extracts the last Lean 4 code block from the model's output."""
    try:
        matches = re.findall(r'
Construct the prompt for the modeltexttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
import re
torch.manual_seed(30)

model_id = "Goedel-LM/Goedel-Formalizer-V2-32B"
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", torch_dtype=torch.bfloat16, trust_remote_code=True)


problem_name = "test_problem"
informal_statement_content = "Prove that 3 cannot be written as the sum of two cubes."


# Construct the prompt for the model
user_prompt_content = (
    f"Please autoformalize the following natural language problem statement in Lean 4. "
    f"Use the following theorem name: {problem_name}\n"
    f"The natural language statement is: \n"
    f"{informal_statement_content}"
    f"Think before you provide the lean statement."
)



chat = [
  {"role": "user", "content": user_prompt_content},
]

inputs = tokenizer.apply_chat_template(chat, tokenize=True, add_generation_prompt=True, return_tensors="pt").to(model.device)

import time
start = time.time()
outputs = model.generate(inputs, max_new_tokens=16384, temperature = 0.9, do_sample = True, top_k=20, top_p=0.95)


model_output_text = tokenizer.batch_decode(outputs)[0]

def extract_code(text_input):
    """Extracts the last Lean 4 code block from the model's output."""
    try:
        matches = re.findall(r'
Construct the prompt for the modeltexttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
import re
torch.manual_seed(30)

model_id = "Goedel-LM/Goedel-Formalizer-V2-32B"
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", torch_dtype=torch.bfloat16, trust_remote_code=True)


problem_name = "test_problem"
informal_statement_content = "Prove that 3 cannot be written as the sum of two cubes."


# Construct the prompt for the model
user_prompt_content = (
    f"Please autoformalize the following natural language problem statement in Lean 4. "
    f"Use the following theorem name: {problem_name}\n"
    f"The natural language statement is: \n"
    f"{informal_statement_content}"
    f"Think before you provide the lean statement."
)



chat = [
  {"role": "user", "content": user_prompt_content},
]

inputs = tokenizer.apply_chat_template(chat, tokenize=True, add_generation_prompt=True, return_tensors="pt").to(model.device)

import time
start = time.time()
outputs = model.generate(inputs, max_new_tokens=16384, temperature = 0.9, do_sample = True, top_k=20, top_p=0.95)


model_output_text = tokenizer.batch_decode(outputs)[0]

def extract_code(text_input):
    """Extracts the last Lean 4 code block from the model's output."""
    try:
        matches = re.findall(r'
Construct the prompt for the modeltexttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
import re
torch.manual_seed(30)

model_id = "Goedel-LM/Goedel-Formalizer-V2-32B"
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", torch_dtype=torch.bfloat16, trust_remote_code=True)


problem_name = "test_problem"
informal_statement_content = "Prove that 3 cannot be written as the sum of two cubes."


# Construct the prompt for the model
user_prompt_content = (
    f"Please autoformalize the following natural language problem statement in Lean 4. "
    f"Use the following theorem name: {problem_name}\n"
    f"The natural language statement is: \n"
    f"{informal_statement_content}"
    f"Think before you provide the lean statement."
)



chat = [
  {"role": "user", "content": user_prompt_content},
]

inputs = tokenizer.apply_chat_template(chat, tokenize=True, add_generation_prompt=True, return_tensors="pt").to(model.device)

import time
start = time.time()
outputs = model.generate(inputs, max_new_tokens=16384, temperature = 0.9, do_sample = True, top_k=20, top_p=0.95)


model_output_text = tokenizer.batch_decode(outputs)[0]

def extract_code(text_input):
    """Extracts the last Lean 4 code block from the model's output."""
    try:
        matches = re.findall(r'
Construct the prompt for the modeltexttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
import re
torch.manual_seed(30)

model_id = "Goedel-LM/Goedel-Formalizer-V2-32B"
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", torch_dtype=torch.bfloat16, trust_remote_code=True)


problem_name = "test_problem"
informal_statement_content = "Prove that 3 cannot be written as the sum of two cubes."


# Construct the prompt for the model
user_prompt_content = (
    f"Please autoformalize the following natural language problem statement in Lean 4. "
    f"Use the following theorem name: {problem_name}\n"
    f"The natural language statement is: \n"
    f"{informal_statement_content}"
    f"Think before you provide the lean statement."
)



chat = [
  {"role": "user", "content": user_prompt_content},
]

inputs = tokenizer.apply_chat_template(chat, tokenize=True, add_generation_prompt=True, return_tensors="pt").to(model.device)

import time
start = time.time()
outputs = model.generate(inputs, max_new_tokens=16384, temperature = 0.9, do_sample = True, top_k=20, top_p=0.95)


model_output_text = tokenizer.batch_decode(outputs)[0]

def extract_code(text_input):
    """Extracts the last Lean 4 code block from the model's output."""
    try:
        matches = re.findall(r'
Construct the prompt for the modeltexttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
import re
torch.manual_seed(30)

model_id = "Goedel-LM/Goedel-Formalizer-V2-32B"
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", torch_dtype=torch.bfloat16, trust_remote_code=True)


problem_name = "test_problem"
informal_statement_content = "Prove that 3 cannot be written as the sum of two cubes."


# Construct the prompt for the model
user_prompt_content = (
    f"Please autoformalize the following natural language problem statement in Lean 4. "
    f"Use the following theorem name: {problem_name}\n"
    f"The natural language statement is: \n"
    f"{informal_statement_content}"
    f"Think before you provide the lean statement."
)



chat = [
  {"role": "user", "content": user_prompt_content},
]

inputs = tokenizer.apply_chat_template(chat, tokenize=True, add_generation_prompt=True, return_tensors="pt").to(model.device)

import time
start = time.time()
outputs = model.generate(inputs, max_new_tokens=16384, temperature = 0.9, do_sample = True, top_k=20, top_p=0.95)


model_output_text = tokenizer.batch_decode(outputs)[0]

def extract_code(text_input):
    """Extracts the last Lean 4 code block from the model's output."""
    try:
        matches = re.findall(r'
Construct the prompt for the modeltexttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
import re
torch.manual_seed(30)

model_id = "Goedel-LM/Goedel-Formalizer-V2-32B"
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", torch_dtype=torch.bfloat16, trust_remote_code=True)


problem_name = "test_problem"
informal_statement_content = "Prove that 3 cannot be written as the sum of two cubes."


# Construct the prompt for the model
user_prompt_content = (
    f"Please autoformalize the following natural language problem statement in Lean 4. "
    f"Use the following theorem name: {problem_name}\n"
    f"The natural language statement is: \n"
    f"{informal_statement_content}"
    f"Think before you provide the lean statement."
)



chat = [
  {"role": "user", "content": user_prompt_content},
]

inputs = tokenizer.apply_chat_template(chat, tokenize=True, add_generation_prompt=True, return_tensors="pt").to(model.device)

import time
start = time.time()
outputs = model.generate(inputs, max_new_tokens=16384, temperature = 0.9, do_sample = True, top_k=20, top_p=0.95)


model_output_text = tokenizer.batch_decode(outputs)[0]

def extract_code(text_input):
    """Extracts the last Lean 4 code block from the model's output."""
    try:
        matches = re.findall(r'
Construct the prompt for the modeltexttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
import re
torch.manual_seed(30)

model_id = "Goedel-LM/Goedel-Formalizer-V2-32B"
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", torch_dtype=torch.bfloat16, trust_remote_code=True)


problem_name = "test_problem"
informal_statement_content = "Prove that 3 cannot be written as the sum of two cubes."


# Construct the prompt for the model
user_prompt_content = (
    f"Please autoformalize the following natural language problem statement in Lean 4. "
    f"Use the following theorem name: {problem_name}\n"
    f"The natural language statement is: \n"
    f"{informal_statement_content}"
    f"Think before you provide the lean statement."
)



chat = [
  {"role": "user", "content": user_prompt_content},
]

inputs = tokenizer.apply_chat_template(chat, tokenize=True, add_generation_prompt=True, return_tensors="pt").to(model.device)

import time
start = time.time()
outputs = model.generate(inputs, max_new_tokens=16384, temperature = 0.9, do_sample = True, top_k=20, top_p=0.95)


model_output_text = tokenizer.batch_decode(outputs)[0]

def extract_code(text_input):
    """Extracts the last Lean 4 code block from the model's output."""
    try:
        matches = re.findall(r'
Construct the prompt for the modeltexttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
import re
torch.manual_seed(30)

model_id = "Goedel-LM/Goedel-Formalizer-V2-32B"
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", torch_dtype=torch.bfloat16, trust_remote_code=True)


problem_name = "test_problem"
informal_statement_content = "Prove that 3 cannot be written as the sum of two cubes."


# Construct the prompt for the model
user_prompt_content = (
    f"Please autoformalize the following natural language problem statement in Lean 4. "
    f"Use the following theorem name: {problem_name}\n"
    f"The natural language statement is: \n"
    f"{informal_statement_content}"
    f"Think before you provide the lean statement."
)



chat = [
  {"role": "user", "content": user_prompt_content},
]

inputs = tokenizer.apply_chat_template(chat, tokenize=True, add_generation_prompt=True, return_tensors="pt").to(model.device)

import time
start = time.time()
outputs = model.generate(inputs, max_new_tokens=16384, temperature = 0.9, do_sample = True, top_k=20, top_p=0.95)


model_output_text = tokenizer.batch_decode(outputs)[0]

def extract_code(text_input):
    """Extracts the last Lean 4 code block from the model's output."""
    try:
        matches = re.findall(r'
Construct the prompt for the modeltexttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
import re
torch.manual_seed(30)

model_id = "Goedel-LM/Goedel-Formalizer-V2-32B"
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", torch_dtype=torch.bfloat16, trust_remote_code=True)


problem_name = "test_problem"
informal_statement_content = "Prove that 3 cannot be written as the sum of two cubes."


# Construct the prompt for the model
user_prompt_content = (
    f"Please autoformalize the following natural language problem statement in Lean 4. "
    f"Use the following theorem name: {problem_name}\n"
    f"The natural language statement is: \n"
    f"{informal_statement_content}"
    f"Think before you provide the lean statement."
)



chat = [
  {"role": "user", "content": user_prompt_content},
]

inputs = tokenizer.apply_chat_template(chat, tokenize=True, add_generation_prompt=True, return_tensors="pt").to(model.device)

import time
start = time.time()
outputs = model.generate(inputs, max_new_tokens=16384, temperature = 0.9, do_sample = True, top_k=20, top_p=0.95)


model_output_text = tokenizer.batch_decode(outputs)[0]

def extract_code(text_input):
    """Extracts the last Lean 4 code block from the model's output."""
    try:
        matches = re.findall(r'
Construct the prompt for the modeltexttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
import re
torch.manual_seed(30)

model_id = "Goedel-LM/Goedel-Formalizer-V2-32B"
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", torch_dtype=torch.bfloat16, trust_remote_code=True)


problem_name = "test_problem"
informal_statement_content = "Prove that 3 cannot be written as the sum of two cubes."


# Construct the prompt for the model
user_prompt_content = (
    f"Please autoformalize the following natural language problem statement in Lean 4. "
    f"Use the following theorem name: {problem_name}\n"
    f"The natural language statement is: \n"
    f"{informal_statement_content}"
    f"Think before you provide the lean statement."
)



chat = [
  {"role": "user", "content": user_prompt_content},
]

inputs = tokenizer.apply_chat_template(chat, tokenize=True, add_generation_prompt=True, return_tensors="pt").to(model.device)

import time
start = time.time()
outputs = model.generate(inputs, max_new_tokens=16384, temperature = 0.9, do_sample = True, top_k=20, top_p=0.95)


model_output_text = tokenizer.batch_decode(outputs)[0]

def extract_code(text_input):
    """Extracts the last Lean 4 code block from the model's output."""
    try:
        matches = re.findall(r'
Construct the prompt for the modeltexttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
import re
torch.manual_seed(30)

model_id = "Goedel-LM/Goedel-Formalizer-V2-32B"
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", torch_dtype=torch.bfloat16, trust_remote_code=True)


problem_name = "test_problem"
informal_statement_content = "Prove that 3 cannot be written as the sum of two cubes."


# Construct the prompt for the model
user_prompt_content = (
    f"Please autoformalize the following natural language problem statement in Lean 4. "
    f"Use the following theorem name: {problem_name}\n"
    f"The natural language statement is: \n"
    f"{informal_statement_content}"
    f"Think before you provide the lean statement."
)



chat = [
  {"role": "user", "content": user_prompt_content},
]

inputs = tokenizer.apply_chat_template(chat, tokenize=True, add_generation_prompt=True, return_tensors="pt").to(model.device)

import time
start = time.time()
outputs = model.generate(inputs, max_new_tokens=16384, temperature = 0.9, do_sample = True, top_k=20, top_p=0.95)


model_output_text = tokenizer.batch_decode(outputs)[0]

def extract_code(text_input):
    """Extracts the last Lean 4 code block from the model's output."""
    try:
        matches = re.findall(r'
Construct the prompt for the modeltexttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
import re
torch.manual_seed(30)

model_id = "Goedel-LM/Goedel-Formalizer-V2-32B"
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", torch_dtype=torch.bfloat16, trust_remote_code=True)


problem_name = "test_problem"
informal_statement_content = "Prove that 3 cannot be written as the sum of two cubes."


# Construct the prompt for the model
user_prompt_content = (
    f"Please autoformalize the following natural language problem statement in Lean 4. "
    f"Use the following theorem name: {problem_name}\n"
    f"The natural language statement is: \n"
    f"{informal_statement_content}"
    f"Think before you provide the lean statement."
)



chat = [
  {"role": "user", "content": user_prompt_content},
]

inputs = tokenizer.apply_chat_template(chat, tokenize=True, add_generation_prompt=True, return_tensors="pt").to(model.device)

import time
start = time.time()
outputs = model.generate(inputs, max_new_tokens=16384, temperature = 0.9, do_sample = True, top_k=20, top_p=0.95)


model_output_text = tokenizer.batch_decode(outputs)[0]

def extract_code(text_input):
    """Extracts the last Lean 4 code block from the model's output."""
    try:
        matches = re.findall(r'
Construct the prompt for the modeltexttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
import re
torch.manual_seed(30)

model_id = "Goedel-LM/Goedel-Formalizer-V2-32B"
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", torch_dtype=torch.bfloat16, trust_remote_code=True)


problem_name = "test_problem"
informal_statement_content = "Prove that 3 cannot be written as the sum of two cubes."


# Construct the prompt for the model
user_prompt_content = (
    f"Please autoformalize the following natural language problem statement in Lean 4. "
    f"Use the following theorem name: {problem_name}\n"
    f"The natural language statement is: \n"
    f"{informal_statement_content}"
    f"Think before you provide the lean statement."
)



chat = [
  {"role": "user", "content": user_prompt_content},
]

inputs = tokenizer.apply_chat_template(chat, tokenize=True, add_generation_prompt=True, return_tensors="pt").to(model.device)

import time
start = time.time()
outputs = model.generate(inputs, max_new_tokens=16384, temperature = 0.9, do_sample = True, top_k=20, top_p=0.95)


model_output_text = tokenizer.batch_decode(outputs)[0]

def extract_code(text_input):
    """Extracts the last Lean 4 code block from the model's output."""
    try:
        matches = re.findall(r'
Construct the prompt for the modeltexttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
import re
torch.manual_seed(30)

model_id = "Goedel-LM/Goedel-Formalizer-V2-32B"
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", torch_dtype=torch.bfloat16, trust_remote_code=True)


problem_name = "test_problem"
informal_statement_content = "Prove that 3 cannot be written as the sum of two cubes."


# Construct the prompt for the model
user_prompt_content = (
    f"Please autoformalize the following natural language problem statement in Lean 4. "
    f"Use the following theorem name: {problem_name}\n"
    f"The natural language statement is: \n"
    f"{informal_statement_content}"
    f"Think before you provide the lean statement."
)



chat = [
  {"role": "user", "content": user_prompt_content},
]

inputs = tokenizer.apply_chat_template(chat, tokenize=True, add_generation_prompt=True, return_tensors="pt").to(model.device)

import time
start = time.time()
outputs = model.generate(inputs, max_new_tokens=16384, temperature = 0.9, do_sample = True, top_k=20, top_p=0.95)


model_output_text = tokenizer.batch_decode(outputs)[0]

def extract_code(text_input):
    """Extracts the last Lean 4 code block from the model's output."""
    try:
        matches = re.findall(r'
Construct the prompt for the modeltexttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
import re
torch.manual_seed(30)

model_id = "Goedel-LM/Goedel-Formalizer-V2-32B"
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", torch_dtype=torch.bfloat16, trust_remote_code=True)


problem_name = "test_problem"
informal_statement_content = "Prove that 3 cannot be written as the sum of two cubes."


# Construct the prompt for the model
user_prompt_content = (
    f"Please autoformalize the following natural language problem statement in Lean 4. "
    f"Use the following theorem name: {problem_name}\n"
    f"The natural language statement is: \n"
    f"{informal_statement_content}"
    f"Think before you provide the lean statement."
)



chat = [
  {"role": "user", "content": user_prompt_content},
]

inputs = tokenizer.apply_chat_template(chat, tokenize=True, add_generation_prompt=True, return_tensors="pt").to(model.device)

import time
start = time.time()
outputs = model.generate(inputs, max_new_tokens=16384, temperature = 0.9, do_sample = True, top_k=20, top_p=0.95)


model_output_text = tokenizer.batch_decode(outputs)[0]

def extract_code(text_input):
    """Extracts the last Lean 4 code block from the model's output."""
    try:
        matches = re.findall(r'
Construct the prompt for the modeltexttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
import re
torch.manual_seed(30)

model_id = "Goedel-LM/Goedel-Formalizer-V2-32B"
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", torch_dtype=torch.bfloat16, trust_remote_code=True)


problem_name = "test_problem"
informal_statement_content = "Prove that 3 cannot be written as the sum of two cubes."


# Construct the prompt for the model
user_prompt_content = (
    f"Please autoformalize the following natural language problem statement in Lean 4. "
    f"Use the following theorem name: {problem_name}\n"
    f"The natural language statement is: \n"
    f"{informal_statement_content}"
    f"Think before you provide the lean statement."
)



chat = [
  {"role": "user", "content": user_prompt_content},
]

inputs = tokenizer.apply_chat_template(chat, tokenize=True, add_generation_prompt=True, return_tensors="pt").to(model.device)

import time
start = time.time()
outputs = model.generate(inputs, max_new_tokens=16384, temperature = 0.9, do_sample = True, top_k=20, top_p=0.95)


model_output_text = tokenizer.batch_decode(outputs)[0]

def extract_code(text_input):
    """Extracts the last Lean 4 code block from the model's output."""
    try:
        matches = re.findall(r'
Construct the prompt for the modeltexttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
import re
torch.manual_seed(30)

model_id = "Goedel-LM/Goedel-Formalizer-V2-32B"
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", torch_dtype=torch.bfloat16, trust_remote_code=True)


problem_name = "test_problem"
informal_statement_content = "Prove that 3 cannot be written as the sum of two cubes."


# Construct the prompt for the model
user_prompt_content = (
    f"Please autoformalize the following natural language problem statement in Lean 4. "
    f"Use the following theorem name: {problem_name}\n"
    f"The natural language statement is: \n"
    f"{informal_statement_content}"
    f"Think before you provide the lean statement."
)



chat = [
  {"role": "user", "content": user_prompt_content},
]

inputs = tokenizer.apply_chat_template(chat, tokenize=True, add_generation_prompt=True, return_tensors="pt").to(model.device)

import time
start = time.time()
outputs = model.generate(inputs, max_new_tokens=16384, temperature = 0.9, do_sample = True, top_k=20, top_p=0.95)


model_output_text = tokenizer.batch_decode(outputs)[0]

def extract_code(text_input):
    """Extracts the last Lean 4 code block from the model's output."""
    try:
        matches = re.findall(r'
Construct the prompt for the modeltexttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
import re
torch.manual_seed(30)

model_id = "Goedel-LM/Goedel-Formalizer-V2-32B"
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", torch_dtype=torch.bfloat16, trust_remote_code=True)


problem_name = "test_problem"
informal_statement_content = "Prove that 3 cannot be written as the sum of two cubes."


# Construct the prompt for the model
user_prompt_content = (
    f"Please autoformalize the following natural language problem statement in Lean 4. "
    f"Use the following theorem name: {problem_name}\n"
    f"The natural language statement is: \n"
    f"{informal_statement_content}"
    f"Think before you provide the lean statement."
)



chat = [
  {"role": "user", "content": user_prompt_content},
]

inputs = tokenizer.apply_chat_template(chat, tokenize=True, add_generation_prompt=True, return_tensors="pt").to(model.device)

import time
start = time.time()
outputs = model.generate(inputs, max_new_tokens=16384, temperature = 0.9, do_sample = True, top_k=20, top_p=0.95)


model_output_text = tokenizer.batch_decode(outputs)[0]

def extract_code(text_input):
    """Extracts the last Lean 4 code block from the model's output."""
    try:
        matches = re.findall(r'
Construct the prompt for the modeltexttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
import re
torch.manual_seed(30)

model_id = "Goedel-LM/Goedel-Formalizer-V2-32B"
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", torch_dtype=torch.bfloat16, trust_remote_code=True)


problem_name = "test_problem"
informal_statement_content = "Prove that 3 cannot be written as the sum of two cubes."


# Construct the prompt for the model
user_prompt_content = (
    f"Please autoformalize the following natural language problem statement in Lean 4. "
    f"Use the following theorem name: {problem_name}\n"
    f"The natural language statement is: \n"
    f"{informal_statement_content}"
    f"Think before you provide the lean statement."
)



chat = [
  {"role": "user", "content": user_prompt_content},
]

inputs = tokenizer.apply_chat_template(chat, tokenize=True, add_generation_prompt=True, return_tensors="pt").to(model.device)

import time
start = time.time()
outputs = model.generate(inputs, max_new_tokens=16384, temperature = 0.9, do_sample = True, top_k=20, top_p=0.95)


model_output_text = tokenizer.batch_decode(outputs)[0]

def extract_code(text_input):
    """Extracts the last Lean 4 code block from the model's output."""
    try:
        matches = re.findall(r'
Construct the prompt for the modeltexttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
import re
torch.manual_seed(30)

model_id = "Goedel-LM/Goedel-Formalizer-V2-32B"
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", torch_dtype=torch.bfloat16, trust_remote_code=True)


problem_name = "test_problem"
informal_statement_content = "Prove that 3 cannot be written as the sum of two cubes."


# Construct the prompt for the model
user_prompt_content = (
    f"Please autoformalize the following natural language problem statement in Lean 4. "
    f"Use the following theorem name: {problem_name}\n"
    f"The natural language statement is: \n"
    f"{informal_statement_content}"
    f"Think before you provide the lean statement."
)



chat = [
  {"role": "user", "content": user_prompt_content},
]

inputs = tokenizer.apply_chat_template(chat, tokenize=True, add_generation_prompt=True, return_tensors="pt").to(model.device)

import time
start = time.time()
outputs = model.generate(inputs, max_new_tokens=16384, temperature = 0.9, do_sample = True, top_k=20, top_p=0.95)


model_output_text = tokenizer.batch_decode(outputs)[0]

def extract_code(text_input):
    """Extracts the last Lean 4 code block from the model's output."""
    try:
        matches = re.findall(r'
Construct the prompt for the modeltexttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
import re
torch.manual_seed(30)

model_id = "Goedel-LM/Goedel-Formalizer-V2-32B"
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", torch_dtype=torch.bfloat16, trust_remote_code=True)


problem_name = "test_problem"
informal_statement_content = "Prove that 3 cannot be written as the sum of two cubes."


# Construct the prompt for the model
user_prompt_content = (
    f"Please autoformalize the following natural language problem statement in Lean 4. "
    f"Use the following theorem name: {problem_name}\n"
    f"The natural language statement is: \n"
    f"{informal_statement_content}"
    f"Think before you provide the lean statement."
)



chat = [
  {"role": "user", "content": user_prompt_content},
]

inputs = tokenizer.apply_chat_template(chat, tokenize=True, add_generation_prompt=True, return_tensors="pt").to(model.device)

import time
start = time.time()
outputs = model.generate(inputs, max_new_tokens=16384, temperature = 0.9, do_sample = True, top_k=20, top_p=0.95)


model_output_text = tokenizer.batch_decode(outputs)[0]

def extract_code(text_input):
    """Extracts the last Lean 4 code block from the model's output."""
    try:
        matches = re.findall(r'
Construct the prompt for the modeltexttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
import re
torch.manual_seed(30)

model_id = "Goedel-LM/Goedel-Formalizer-V2-32B"
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", torch_dtype=torch.bfloat16, trust_remote_code=True)


problem_name = "test_problem"
informal_statement_content = "Prove that 3 cannot be written as the sum of two cubes."


# Construct the prompt for the model
user_prompt_content = (
    f"Please autoformalize the following natural language problem statement in Lean 4. "
    f"Use the following theorem name: {problem_name}\n"
    f"The natural language statement is: \n"
    f"{informal_statement_content}"
    f"Think before you provide the lean statement."
)



chat = [
  {"role": "user", "content": user_prompt_content},
]

inputs = tokenizer.apply_chat_template(chat, tokenize=True, add_generation_prompt=True, return_tensors="pt").to(model.device)

import time
start = time.time()
outputs = model.generate(inputs, max_new_tokens=16384, temperature = 0.9, do_sample = True, top_k=20, top_p=0.95)


model_output_text = tokenizer.batch_decode(outputs)[0]

def extract_code(text_input):
    """Extracts the last Lean 4 code block from the model's output."""
    try:
        matches = re.findall(r'
Construct the prompt for the modeltexttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
import re
torch.manual_seed(30)

model_id = "Goedel-LM/Goedel-Formalizer-V2-32B"
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", torch_dtype=torch.bfloat16, trust_remote_code=True)


problem_name = "test_problem"
informal_statement_content = "Prove that 3 cannot be written as the sum of two cubes."


# Construct the prompt for the model
user_prompt_content = (
    f"Please autoformalize the following natural language problem statement in Lean 4. "
    f"Use the following theorem name: {problem_name}\n"
    f"The natural language statement is: \n"
    f"{informal_statement_content}"
    f"Think before you provide the lean statement."
)



chat = [
  {"role": "user", "content": user_prompt_content},
]

inputs = tokenizer.apply_chat_template(chat, tokenize=True, add_generation_prompt=True, return_tensors="pt").to(model.device)

import time
start = time.time()
outputs = model.generate(inputs, max_new_tokens=16384, temperature = 0.9, do_sample = True, top_k=20, top_p=0.95)


model_output_text = tokenizer.batch_decode(outputs)[0]

def extract_code(text_input):
    """Extracts the last Lean 4 code block from the model's output."""
    try:
        matches = re.findall(r'
Construct the prompt for the modeltexttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
import re
torch.manual_seed(30)

model_id = "Goedel-LM/Goedel-Formalizer-V2-32B"
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", torch_dtype=torch.bfloat16, trust_remote_code=True)


problem_name = "test_problem"
informal_statement_content = "Prove that 3 cannot be written as the sum of two cubes."


# Construct the prompt for the model
user_prompt_content = (
    f"Please autoformalize the following natural language problem statement in Lean 4. "
    f"Use the following theorem name: {problem_name}\n"
    f"The natural language statement is: \n"
    f"{informal_statement_content}"
    f"Think before you provide the lean statement."
)



chat = [
  {"role": "user", "content": user_prompt_content},
]

inputs = tokenizer.apply_chat_template(chat, tokenize=True, add_generation_prompt=True, return_tensors="pt").to(model.device)

import time
start = time.time()
outputs = model.generate(inputs, max_new_tokens=16384, temperature = 0.9, do_sample = True, top_k=20, top_p=0.95)


model_output_text = tokenizer.batch_decode(outputs)[0]

def extract_code(text_input):
    """Extracts the last Lean 4 code block from the model's output."""
    try:
        matches = re.findall(r'
Construct the prompt for the modeltexttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
import re
torch.manual_seed(30)

model_id = "Goedel-LM/Goedel-Formalizer-V2-32B"
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", torch_dtype=torch.bfloat16, trust_remote_code=True)


problem_name = "test_problem"
informal_statement_content = "Prove that 3 cannot be written as the sum of two cubes."


# Construct the prompt for the model
user_prompt_content = (
    f"Please autoformalize the following natural language problem statement in Lean 4. "
    f"Use the following theorem name: {problem_name}\n"
    f"The natural language statement is: \n"
    f"{informal_statement_content}"
    f"Think before you provide the lean statement."
)



chat = [
  {"role": "user", "content": user_prompt_content},
]

inputs = tokenizer.apply_chat_template(chat, tokenize=True, add_generation_prompt=True, return_tensors="pt").to(model.device)

import time
start = time.time()
outputs = model.generate(inputs, max_new_tokens=16384, temperature = 0.9, do_sample = True, top_k=20, top_p=0.95)


model_output_text = tokenizer.batch_decode(outputs)[0]

def extract_code(text_input):
    """Extracts the last Lean 4 code block from the model's output."""
    try:
        matches = re.findall(r'
Construct the prompt for the modeltexttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
import re
torch.manual_seed(30)

model_id = "Goedel-LM/Goedel-Formalizer-V2-32B"
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", torch_dtype=torch.bfloat16, trust_remote_code=True)


problem_name = "test_problem"
informal_statement_content = "Prove that 3 cannot be written as the sum of two cubes."


# Construct the prompt for the model
user_prompt_content = (
    f"Please autoformalize the following natural language problem statement in Lean 4. "
    f"Use the following theorem name: {problem_name}\n"
    f"The natural language statement is: \n"
    f"{informal_statement_content}"
    f"Think before you provide the lean statement."
)



chat = [
  {"role": "user", "content": user_prompt_content},
]

inputs = tokenizer.apply_chat_template(chat, tokenize=True, add_generation_prompt=True, return_tensors="pt").to(model.device)

import time
start = time.time()
outputs = model.generate(inputs, max_new_tokens=16384, temperature = 0.9, do_sample = True, top_k=20, top_p=0.95)


model_output_text = tokenizer.batch_decode(outputs)[0]

def extract_code(text_input):
    """Extracts the last Lean 4 code block from the model's output."""
    try:
        matches = re.findall(r'
Construct the prompt for the modeltexttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
import re
torch.manual_seed(30)

model_id = "Goedel-LM/Goedel-Formalizer-V2-32B"
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", torch_dtype=torch.bfloat16, trust_remote_code=True)


problem_name = "test_problem"
informal_statement_content = "Prove that 3 cannot be written as the sum of two cubes."


# Construct the prompt for the model
user_prompt_content = (
    f"Please autoformalize the following natural language problem statement in Lean 4. "
    f"Use the following theorem name: {problem_name}\n"
    f"The natural language statement is: \n"
    f"{informal_statement_content}"
    f"Think before you provide the lean statement."
)



chat = [
  {"role": "user", "content": user_prompt_content},
]

inputs = tokenizer.apply_chat_template(chat, tokenize=True, add_generation_prompt=True, return_tensors="pt").to(model.device)

import time
start = time.time()
outputs = model.generate(inputs, max_new_tokens=16384, temperature = 0.9, do_sample = True, top_k=20, top_p=0.95)


model_output_text = tokenizer.batch_decode(outputs)[0]

def extract_code(text_input):
    """Extracts the last Lean 4 code block from the model's output."""
    try:
        matches = re.findall(r'
Construct the prompt for the modeltexttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
import re
torch.manual_seed(30)

model_id = "Goedel-LM/Goedel-Formalizer-V2-32B"
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", torch_dtype=torch.bfloat16, trust_remote_code=True)


problem_name = "test_problem"
informal_statement_content = "Prove that 3 cannot be written as the sum of two cubes."


# Construct the prompt for the model
user_prompt_content = (
    f"Please autoformalize the following natural language problem statement in Lean 4. "
    f"Use the following theorem name: {problem_name}\n"
    f"The natural language statement is: \n"
    f"{informal_statement_content}"
    f"Think before you provide the lean statement."
)



chat = [
  {"role": "user", "content": user_prompt_content},
]

inputs = tokenizer.apply_chat_template(chat, tokenize=True, add_generation_prompt=True, return_tensors="pt").to(model.device)

import time
start = time.time()
outputs = model.generate(inputs, max_new_tokens=16384, temperature = 0.9, do_sample = True, top_k=20, top_p=0.95)


model_output_text = tokenizer.batch_decode(outputs)[0]

def extract_code(text_input):
    """Extracts the last Lean 4 code block from the model's output."""
    try:
        matches = re.findall(r'
Construct the prompt for the modeltexttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
import re
torch.manual_seed(30)

model_id = "Goedel-LM/Goedel-Formalizer-V2-32B"
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", torch_dtype=torch.bfloat16, trust_remote_code=True)


problem_name = "test_problem"
informal_statement_content = "Prove that 3 cannot be written as the sum of two cubes."


# Construct the prompt for the model
user_prompt_content = (
    f"Please autoformalize the following natural language problem statement in Lean 4. "
    f"Use the following theorem name: {problem_name}\n"
    f"The natural language statement is: \n"
    f"{informal_statement_content}"
    f"Think before you provide the lean statement."
)



chat = [
  {"role": "user", "content": user_prompt_content},
]

inputs = tokenizer.apply_chat_template(chat, tokenize=True, add_generation_prompt=True, return_tensors="pt").to(model.device)

import time
start = time.time()
outputs = model.generate(inputs, max_new_tokens=16384, temperature = 0.9, do_sample = True, top_k=20, top_p=0.95)


model_output_text = tokenizer.batch_decode(outputs)[0]

def extract_code(text_input):
    """Extracts the last Lean 4 code block from the model's output."""
    try:
        matches = re.findall(r'
Construct the prompt for the modeltexttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
import re
torch.manual_seed(30)

model_id = "Goedel-LM/Goedel-Formalizer-V2-32B"
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", torch_dtype=torch.bfloat16, trust_remote_code=True)


problem_name = "test_problem"
informal_statement_content = "Prove that 3 cannot be written as the sum of two cubes."


# Construct the prompt for the model
user_prompt_content = (
    f"Please autoformalize the following natural language problem statement in Lean 4. "
    f"Use the following theorem name: {problem_name}\n"
    f"The natural language statement is: \n"
    f"{informal_statement_content}"
    f"Think before you provide the lean statement."
)



chat = [
  {"role": "user", "content": user_prompt_content},
]

inputs = tokenizer.apply_chat_template(chat, tokenize=True, add_generation_prompt=True, return_tensors="pt").to(model.device)

import time
start = time.time()
outputs = model.generate(inputs, max_new_tokens=16384, temperature = 0.9, do_sample = True, top_k=20, top_p=0.95)


model_output_text = tokenizer.batch_decode(outputs)[0]

def extract_code(text_input):
    """Extracts the last Lean 4 code block from the model's output."""
    try:
        matches = re.findall(r'
Construct the prompt for the modeltexttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
import re
torch.manual_seed(30)

model_id = "Goedel-LM/Goedel-Formalizer-V2-32B"
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", torch_dtype=torch.bfloat16, trust_remote_code=True)


problem_name = "test_problem"
informal_statement_content = "Prove that 3 cannot be written as the sum of two cubes."


# Construct the prompt for the model
user_prompt_content = (
    f"Please autoformalize the following natural language problem statement in Lean 4. "
    f"Use the following theorem name: {problem_name}\n"
    f"The natural language statement is: \n"
    f"{informal_statement_content}"
    f"Think before you provide the lean statement."
)



chat = [
  {"role": "user", "content": user_prompt_content},
]

inputs = tokenizer.apply_chat_template(chat, tokenize=True, add_generation_prompt=True, return_tensors="pt").to(model.device)

import time
start = time.time()
outputs = model.generate(inputs, max_new_tokens=16384, temperature = 0.9, do_sample = True, top_k=20, top_p=0.95)


model_output_text = tokenizer.batch_decode(outputs)[0]

def extract_code(text_input):
    """Extracts the last Lean 4 code block from the model's output."""
    try:
        matches = re.findall(r'
Construct the prompt for the modeltexttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
import re
torch.manual_seed(30)

model_id = "Goedel-LM/Goedel-Formalizer-V2-32B"
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", torch_dtype=torch.bfloat16, trust_remote_code=True)


problem_name = "test_problem"
informal_statement_content = "Prove that 3 cannot be written as the sum of two cubes."


# Construct the prompt for the model
user_prompt_content = (
    f"Please autoformalize the following natural language problem statement in Lean 4. "
    f"Use the following theorem name: {problem_name}\n"
    f"The natural language statement is: \n"
    f"{informal_statement_content}"
    f"Think before you provide the lean statement."
)



chat = [
  {"role": "user", "content": user_prompt_content},
]

inputs = tokenizer.apply_chat_template(chat, tokenize=True, add_generation_prompt=True, return_tensors="pt").to(model.device)

import time
start = time.time()
outputs = model.generate(inputs, max_new_tokens=16384, temperature = 0.9, do_sample = True, top_k=20, top_p=0.95)


model_output_text = tokenizer.batch_decode(outputs)[0]

def extract_code(text_input):
    """Extracts the last Lean 4 code block from the model's output."""
    try:
        matches = re.findall(r'
Construct the prompt for the modeltexttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
import re
torch.manual_seed(30)

model_id = "Goedel-LM/Goedel-Formalizer-V2-32B"
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", torch_dtype=torch.bfloat16, trust_remote_code=True)


problem_name = "test_problem"
informal_statement_content = "Prove that 3 cannot be written as the sum of two cubes."


# Construct the prompt for the model
user_prompt_content = (
    f"Please autoformalize the following natural language problem statement in Lean 4. "
    f"Use the following theorem name: {problem_name}\n"
    f"The natural language statement is: \n"
    f"{informal_statement_content}"
    f"Think before you provide the lean statement."
)



chat = [
  {"role": "user", "content": user_prompt_content},
]

inputs = tokenizer.apply_chat_template(chat, tokenize=True, add_generation_prompt=True, return_tensors="pt").to(model.device)

import time
start = time.time()
outputs = model.generate(inputs, max_new_tokens=16384, temperature = 0.9, do_sample = True, top_k=20, top_p=0.95)


model_output_text = tokenizer.batch_decode(outputs)[0]

def extract_code(text_input):
    """Extracts the last Lean 4 code block from the model's output."""
    try:
        matches = re.findall(r'
Construct the prompt for the modeltexttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
import re
torch.manual_seed(30)

model_id = "Goedel-LM/Goedel-Formalizer-V2-32B"
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", torch_dtype=torch.bfloat16, trust_remote_code=True)


problem_name = "test_problem"
informal_statement_content = "Prove that 3 cannot be written as the sum of two cubes."


# Construct the prompt for the model
user_prompt_content = (
    f"Please autoformalize the following natural language problem statement in Lean 4. "
    f"Use the following theorem name: {problem_name}\n"
    f"The natural language statement is: \n"
    f"{informal_statement_content}"
    f"Think before you provide the lean statement."
)



chat = [
  {"role": "user", "content": user_prompt_content},
]

inputs = tokenizer.apply_chat_template(chat, tokenize=True, add_generation_prompt=True, return_tensors="pt").to(model.device)

import time
start = time.time()
outputs = model.generate(inputs, max_new_tokens=16384, temperature = 0.9, do_sample = True, top_k=20, top_p=0.95)


model_output_text = tokenizer.batch_decode(outputs)[0]

def extract_code(text_input):
    """Extracts the last Lean 4 code block from the model's output."""
    try:
        matches = re.findall(r'
Construct the prompt for the modeltexttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
import re
torch.manual_seed(30)

model_id = "Goedel-LM/Goedel-Formalizer-V2-32B"
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", torch_dtype=torch.bfloat16, trust_remote_code=True)


problem_name = "test_problem"
informal_statement_content = "Prove that 3 cannot be written as the sum of two cubes."


# Construct the prompt for the model
user_prompt_content = (
    f"Please autoformalize the following natural language problem statement in Lean 4. "
    f"Use the following theorem name: {problem_name}\n"
    f"The natural language statement is: \n"
    f"{informal_statement_content}"
    f"Think before you provide the lean statement."
)



chat = [
  {"role": "user", "content": user_prompt_content},
]

inputs = tokenizer.apply_chat_template(chat, tokenize=True, add_generation_prompt=True, return_tensors="pt").to(model.device)

import time
start = time.time()
outputs = model.generate(inputs, max_new_tokens=16384, temperature = 0.9, do_sample = True, top_k=20, top_p=0.95)


model_output_text = tokenizer.batch_decode(outputs)[0]

def extract_code(text_input):
    """Extracts the last Lean 4 code block from the model's output."""
    try:
        matches = re.findall(r'
Construct the prompt for the modeltexttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
import re
torch.manual_seed(30)

model_id = "Goedel-LM/Goedel-Formalizer-V2-32B"
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", torch_dtype=torch.bfloat16, trust_remote_code=True)


problem_name = "test_problem"
informal_statement_content = "Prove that 3 cannot be written as the sum of two cubes."


# Construct the prompt for the model
user_prompt_content = (
    f"Please autoformalize the following natural language problem statement in Lean 4. "
    f"Use the following theorem name: {problem_name}\n"
    f"The natural language statement is: \n"
    f"{informal_statement_content}"
    f"Think before you provide the lean statement."
)



chat = [
  {"role": "user", "content": user_prompt_content},
]

inputs = tokenizer.apply_chat_template(chat, tokenize=True, add_generation_prompt=True, return_tensors="pt").to(model.device)

import time
start = time.time()
outputs = model.generate(inputs, max_new_tokens=16384, temperature = 0.9, do_sample = True, top_k=20, top_p=0.95)


model_output_text = tokenizer.batch_decode(outputs)[0]

def extract_code(text_input):
    """Extracts the last Lean 4 code block from the model's output."""
    try:
        matches = re.findall(r'
Construct the prompt for the modeltexttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
import re
torch.manual_seed(30)

model_id = "Goedel-LM/Goedel-Formalizer-V2-32B"
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", torch_dtype=torch.bfloat16, trust_remote_code=True)


problem_name = "test_problem"
informal_statement_content = "Prove that 3 cannot be written as the sum of two cubes."


# Construct the prompt for the model
user_prompt_content = (
    f"Please autoformalize the following natural language problem statement in Lean 4. "
    f"Use the following theorem name: {problem_name}\n"
    f"The natural language statement is: \n"
    f"{informal_statement_content}"
    f"Think before you provide the lean statement."
)



chat = [
  {"role": "user", "content": user_prompt_content},
]

inputs = tokenizer.apply_chat_template(chat, tokenize=True, add_generation_prompt=True, return_tensors="pt").to(model.device)

import time
start = time.time()
outputs = model.generate(inputs, max_new_tokens=16384, temperature = 0.9, do_sample = True, top_k=20, top_p=0.95)


model_output_text = tokenizer.batch_decode(outputs)[0]

def extract_code(text_input):
    """Extracts the last Lean 4 code block from the model's output."""
    try:
        matches = re.findall(r'
Construct the prompt for the modeltexttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
import re
torch.manual_seed(30)

model_id = "Goedel-LM/Goedel-Formalizer-V2-32B"
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", torch_dtype=torch.bfloat16, trust_remote_code=True)


problem_name = "test_problem"
informal_statement_content = "Prove that 3 cannot be written as the sum of two cubes."


# Construct the prompt for the model
user_prompt_content = (
    f"Please autoformalize the following natural language problem statement in Lean 4. "
    f"Use the following theorem name: {problem_name}\n"
    f"The natural language statement is: \n"
    f"{informal_statement_content}"
    f"Think before you provide the lean statement."
)



chat = [
  {"role": "user", "content": user_prompt_content},
]

inputs = tokenizer.apply_chat_template(chat, tokenize=True, add_generation_prompt=True, return_tensors="pt").to(model.device)

import time
start = time.time()
outputs = model.generate(inputs, max_new_tokens=16384, temperature = 0.9, do_sample = True, top_k=20, top_p=0.95)


model_output_text = tokenizer.batch_decode(outputs)[0]

def extract_code(text_input):
    """Extracts the last Lean 4 code block from the model's output."""
    try:
        matches = re.findall(r'
Construct the prompt for the modeltexttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
import re
torch.manual_seed(30)

model_id = "Goedel-LM/Goedel-Formalizer-V2-32B"
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", torch_dtype=torch.bfloat16, trust_remote_code=True)


problem_name = "test_problem"
informal_statement_content = "Prove that 3 cannot be written as the sum of two cubes."


# Construct the prompt for the model
user_prompt_content = (
    f"Please autoformalize the following natural language problem statement in Lean 4. "
    f"Use the following theorem name: {problem_name}\n"
    f"The natural language statement is: \n"
    f"{informal_statement_content}"
    f"Think before you provide the lean statement."
)



chat = [
  {"role": "user", "content": user_prompt_content},
]

inputs = tokenizer.apply_chat_template(chat, tokenize=True, add_generation_prompt=True, return_tensors="pt").to(model.device)

import time
start = time.time()
outputs = model.generate(inputs, max_new_tokens=16384, temperature = 0.9, do_sample = True, top_k=20, top_p=0.95)


model_output_text = tokenizer.batch_decode(outputs)[0]

def extract_code(text_input):
    """Extracts the last Lean 4 code block from the model's output."""
    try:
        matches = re.findall(r'
Construct the prompt for the modeltexttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
import re
torch.manual_seed(30)

model_id = "Goedel-LM/Goedel-Formalizer-V2-32B"
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", torch_dtype=torch.bfloat16, trust_remote_code=True)


problem_name = "test_problem"
informal_statement_content = "Prove that 3 cannot be written as the sum of two cubes."


# Construct the prompt for the model
user_prompt_content = (
    f"Please autoformalize the following natural language problem statement in Lean 4. "
    f"Use the following theorem name: {problem_name}\n"
    f"The natural language statement is: \n"
    f"{informal_statement_content}"
    f"Think before you provide the lean statement."
)



chat = [
  {"role": "user", "content": user_prompt_content},
]

inputs = tokenizer.apply_chat_template(chat, tokenize=True, add_generation_prompt=True, return_tensors="pt").to(model.device)

import time
start = time.time()
outputs = model.generate(inputs, max_new_tokens=16384, temperature = 0.9, do_sample = True, top_k=20, top_p=0.95)


model_output_text = tokenizer.batch_decode(outputs)[0]

def extract_code(text_input):
    """Extracts the last Lean 4 code block from the model's output."""
    try:
        matches = re.findall(r'
Construct the prompt for the modeltexttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
import re
torch.manual_seed(30)

model_id = "Goedel-LM/Goedel-Formalizer-V2-32B"
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", torch_dtype=torch.bfloat16, trust_remote_code=True)


problem_name = "test_problem"
informal_statement_content = "Prove that 3 cannot be written as the sum of two cubes."


# Construct the prompt for the model
user_prompt_content = (
    f"Please autoformalize the following natural language problem statement in Lean 4. "
    f"Use the following theorem name: {problem_name}\n"
    f"The natural language statement is: \n"
    f"{informal_statement_content}"
    f"Think before you provide the lean statement."
)



chat = [
  {"role": "user", "content": user_prompt_content},
]

inputs = tokenizer.apply_chat_template(chat, tokenize=True, add_generation_prompt=True, return_tensors="pt").to(model.device)

import time
start = time.time()
outputs = model.generate(inputs, max_new_tokens=16384, temperature = 0.9, do_sample = True, top_k=20, top_p=0.95)


model_output_text = tokenizer.batch_decode(outputs)[0]

def extract_code(text_input):
    """Extracts the last Lean 4 code block from the model's output."""
    try:
        matches = re.findall(r'
Construct the prompt for the modeltexttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
import re
torch.manual_seed(30)

model_id = "Goedel-LM/Goedel-Formalizer-V2-32B"
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", torch_dtype=torch.bfloat16, trust_remote_code=True)


problem_name = "test_problem"
informal_statement_content = "Prove that 3 cannot be written as the sum of two cubes."


# Construct the prompt for the model
user_prompt_content = (
    f"Please autoformalize the following natural language problem statement in Lean 4. "
    f"Use the following theorem name: {problem_name}\n"
    f"The natural language statement is: \n"
    f"{informal_statement_content}"
    f"Think before you provide the lean statement."
)



chat = [
  {"role": "user", "content": user_prompt_content},
]

inputs = tokenizer.apply_chat_template(chat, tokenize=True, add_generation_prompt=True, return_tensors="pt").to(model.device)

import time
start = time.time()
outputs = model.generate(inputs, max_new_tokens=16384, temperature = 0.9, do_sample = True, top_k=20, top_p=0.95)


model_output_text = tokenizer.batch_decode(outputs)[0]

def extract_code(text_input):
    """Extracts the last Lean 4 code block from the model's output."""
    try:
        matches = re.findall(r'
Construct the prompt for the modeltexttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
import re
torch.manual_seed(30)

model_id = "Goedel-LM/Goedel-Formalizer-V2-32B"
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", torch_dtype=torch.bfloat16, trust_remote_code=True)


problem_name = "test_problem"
informal_statement_content = "Prove that 3 cannot be written as the sum of two cubes."


# Construct the prompt for the model
user_prompt_content = (
    f"Please autoformalize the following natural language problem statement in Lean 4. "
    f"Use the following theorem name: {problem_name}\n"
    f"The natural language statement is: \n"
    f"{informal_statement_content}"
    f"Think before you provide the lean statement."
)



chat = [
  {"role": "user", "content": user_prompt_content},
]

inputs = tokenizer.apply_chat_template(chat, tokenize=True, add_generation_prompt=True, return_tensors="pt").to(model.device)

import time
start = time.time()
outputs = model.generate(inputs, max_new_tokens=16384, temperature = 0.9, do_sample = True, top_k=20, top_p=0.95)


model_output_text = tokenizer.batch_decode(outputs)[0]

def extract_code(text_input):
    """Extracts the last Lean 4 code block from the model's output."""
    try:
        matches = re.findall(r'
Construct the prompt for the modeltexttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
import re
torch.manual_seed(30)

model_id = "Goedel-LM/Goedel-Formalizer-V2-32B"
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", torch_dtype=torch.bfloat16, trust_remote_code=True)


problem_name = "test_problem"
informal_statement_content = "Prove that 3 cannot be written as the sum of two cubes."


# Construct the prompt for the model
user_prompt_content = (
    f"Please autoformalize the following natural language problem statement in Lean 4. "
    f"Use the following theorem name: {problem_name}\n"
    f"The natural language statement is: \n"
    f"{informal_statement_content}"
    f"Think before you provide the lean statement."
)



chat = [
  {"role": "user", "content": user_prompt_content},
]

inputs = tokenizer.apply_chat_template(chat, tokenize=True, add_generation_prompt=True, return_tensors="pt").to(model.device)

import time
start = time.time()
outputs = model.generate(inputs, max_new_tokens=16384, temperature = 0.9, do_sample = True, top_k=20, top_p=0.95)


model_output_text = tokenizer.batch_decode(outputs)[0]

def extract_code(text_input):
    """Extracts the last Lean 4 code block from the model's output."""
    try:
        matches = re.findall(r'
Construct the prompt for the modeltexttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
import re
torch.manual_seed(30)

model_id = "Goedel-LM/Goedel-Formalizer-V2-32B"
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", torch_dtype=torch.bfloat16, trust_remote_code=True)


problem_name = "test_problem"
informal_statement_content = "Prove that 3 cannot be written as the sum of two cubes."


# Construct the prompt for the model
user_prompt_content = (
    f"Please autoformalize the following natural language problem statement in Lean 4. "
    f"Use the following theorem name: {problem_name}\n"
    f"The natural language statement is: \n"
    f"{informal_statement_content}"
    f"Think before you provide the lean statement."
)



chat = [
  {"role": "user", "content": user_prompt_content},
]

inputs = tokenizer.apply_chat_template(chat, tokenize=True, add_generation_prompt=True, return_tensors="pt").to(model.device)

import time
start = time.time()
outputs = model.generate(inputs, max_new_tokens=16384, temperature = 0.9, do_sample = True, top_k=20, top_p=0.95)


model_output_text = tokenizer.batch_decode(outputs)[0]

def extract_code(text_input):
    """Extracts the last Lean 4 code block from the model's output."""
    try:
        matches = re.findall(r'
Construct the prompt for the modeltexttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
import re
torch.manual_seed(30)

model_id = "Goedel-LM/Goedel-Formalizer-V2-32B"
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", torch_dtype=torch.bfloat16, trust_remote_code=True)


problem_name = "test_problem"
informal_statement_content = "Prove that 3 cannot be written as the sum of two cubes."


# Construct the prompt for the model
user_prompt_content = (
    f"Please autoformalize the following natural language problem statement in Lean 4. "
    f"Use the following theorem name: {problem_name}\n"
    f"The natural language statement is: \n"
    f"{informal_statement_content}"
    f"Think before you provide the lean statement."
)



chat = [
  {"role": "user", "content": user_prompt_content},
]

inputs = tokenizer.apply_chat_template(chat, tokenize=True, add_generation_prompt=True, return_tensors="pt").to(model.device)

import time
start = time.time()
outputs = model.generate(inputs, max_new_tokens=16384, temperature = 0.9, do_sample = True, top_k=20, top_p=0.95)


model_output_text = tokenizer.batch_decode(outputs)[0]

def extract_code(text_input):
    """Extracts the last Lean 4 code block from the model's output."""
    try:
        matches = re.findall(r'
Construct the prompt for the modeltexttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
import re
torch.manual_seed(30)

model_id = "Goedel-LM/Goedel-Formalizer-V2-32B"
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", torch_dtype=torch.bfloat16, trust_remote_code=True)


problem_name = "test_problem"
informal_statement_content = "Prove that 3 cannot be written as the sum of two cubes."


# Construct the prompt for the model
user_prompt_content = (
    f"Please autoformalize the following natural language problem statement in Lean 4. "
    f"Use the following theorem name: {problem_name}\n"
    f"The natural language statement is: \n"
    f"{informal_statement_content}"
    f"Think before you provide the lean statement."
)



chat = [
  {"role": "user", "content": user_prompt_content},
]

inputs = tokenizer.apply_chat_template(chat, tokenize=True, add_generation_prompt=True, return_tensors="pt").to(model.device)

import time
start = time.time()
outputs = model.generate(inputs, max_new_tokens=16384, temperature = 0.9, do_sample = True, top_k=20, top_p=0.95)


model_output_text = tokenizer.batch_decode(outputs)[0]

def extract_code(text_input):
    """Extracts the last Lean 4 code block from the model's output."""
    try:
        matches = re.findall(r'
Construct the prompt for the modeltexttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
import re
torch.manual_seed(30)

model_id = "Goedel-LM/Goedel-Formalizer-V2-32B"
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", torch_dtype=torch.bfloat16, trust_remote_code=True)


problem_name = "test_problem"
informal_statement_content = "Prove that 3 cannot be written as the sum of two cubes."


# Construct the prompt for the model
user_prompt_content = (
    f"Please autoformalize the following natural language problem statement in Lean 4. "
    f"Use the following theorem name: {problem_name}\n"
    f"The natural language statement is: \n"
    f"{informal_statement_content}"
    f"Think before you provide the lean statement."
)



chat = [
  {"role": "user", "content": user_prompt_content},
]

inputs = tokenizer.apply_chat_template(chat, tokenize=True, add_generation_prompt=True, return_tensors="pt").to(model.device)

import time
start = time.time()
outputs = model.generate(inputs, max_new_tokens=16384, temperature = 0.9, do_sample = True, top_k=20, top_p=0.95)


model_output_text = tokenizer.batch_decode(outputs)[0]

def extract_code(text_input):
    """Extracts the last Lean 4 code block from the model's output."""
    try:
        matches = re.findall(r'
Construct the prompt for the modeltexttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
import re
torch.manual_seed(30)

model_id = "Goedel-LM/Goedel-Formalizer-V2-32B"
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", torch_dtype=torch.bfloat16, trust_remote_code=True)


problem_name = "test_problem"
informal_statement_content = "Prove that 3 cannot be written as the sum of two cubes."


# Construct the prompt for the model
user_prompt_content = (
    f"Please autoformalize the following natural language problem statement in Lean 4. "
    f"Use the following theorem name: {problem_name}\n"
    f"The natural language statement is: \n"
    f"{informal_statement_content}"
    f"Think before you provide the lean statement."
)



chat = [
  {"role": "user", "content": user_prompt_content},
]

inputs = tokenizer.apply_chat_template(chat, tokenize=True, add_generation_prompt=True, return_tensors="pt").to(model.device)

import time
start = time.time()
outputs = model.generate(inputs, max_new_tokens=16384, temperature = 0.9, do_sample = True, top_k=20, top_p=0.95)


model_output_text = tokenizer.batch_decode(outputs)[0]

def extract_code(text_input):
    """Extracts the last Lean 4 code block from the model's output."""
    try:
        matches = re.findall(r'
Construct the prompt for the modeltexttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
import re
torch.manual_seed(30)

model_id = "Goedel-LM/Goedel-Formalizer-V2-32B"
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", torch_dtype=torch.bfloat16, trust_remote_code=True)


problem_name = "test_problem"
informal_statement_content = "Prove that 3 cannot be written as the sum of two cubes."


# Construct the prompt for the model
user_prompt_content = (
    f"Please autoformalize the following natural language problem statement in Lean 4. "
    f"Use the following theorem name: {problem_name}\n"
    f"The natural language statement is: \n"
    f"{informal_statement_content}"
    f"Think before you provide the lean statement."
)



chat = [
  {"role": "user", "content": user_prompt_content},
]

inputs = tokenizer.apply_chat_template(chat, tokenize=True, add_generation_prompt=True, return_tensors="pt").to(model.device)

import time
start = time.time()
outputs = model.generate(inputs, max_new_tokens=16384, temperature = 0.9, do_sample = True, top_k=20, top_p=0.95)


model_output_text = tokenizer.batch_decode(outputs)[0]

def extract_code(text_input):
    """Extracts the last Lean 4 code block from the model's output."""
    try:
        matches = re.findall(r'
Construct the prompt for the modeltexttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
import re
torch.manual_seed(30)

model_id = "Goedel-LM/Goedel-Formalizer-V2-32B"
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", torch_dtype=torch.bfloat16, trust_remote_code=True)


problem_name = "test_problem"
informal_statement_content = "Prove that 3 cannot be written as the sum of two cubes."


# Construct the prompt for the model
user_prompt_content = (
    f"Please autoformalize the following natural language problem statement in Lean 4. "
    f"Use the following theorem name: {problem_name}\n"
    f"The natural language statement is: \n"
    f"{informal_statement_content}"
    f"Think before you provide the lean statement."
)



chat = [
  {"role": "user", "content": user_prompt_content},
]

inputs = tokenizer.apply_chat_template(chat, tokenize=True, add_generation_prompt=True, return_tensors="pt").to(model.device)

import time
start = time.time()
outputs = model.generate(inputs, max_new_tokens=16384, temperature = 0.9, do_sample = True, top_k=20, top_p=0.95)


model_output_text = tokenizer.batch_decode(outputs)[0]

def extract_code(text_input):
    """Extracts the last Lean 4 code block from the model's output."""
    try:
        matches = re.findall(r'
Construct the prompt for the modeltexttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
import re
torch.manual_seed(30)

model_id = "Goedel-LM/Goedel-Formalizer-V2-32B"
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", torch_dtype=torch.bfloat16, trust_remote_code=True)


problem_name = "test_problem"
informal_statement_content = "Prove that 3 cannot be written as the sum of two cubes."


# Construct the prompt for the model
user_prompt_content = (
    f"Please autoformalize the following natural language problem statement in Lean 4. "
    f"Use the following theorem name: {problem_name}\n"
    f"The natural language statement is: \n"
    f"{informal_statement_content}"
    f"Think before you provide the lean statement."
)



chat = [
  {"role": "user", "content": user_prompt_content},
]

inputs = tokenizer.apply_chat_template(chat, tokenize=True, add_generation_prompt=True, return_tensors="pt").to(model.device)

import time
start = time.time()
outputs = model.generate(inputs, max_new_tokens=16384, temperature = 0.9, do_sample = True, top_k=20, top_p=0.95)


model_output_text = tokenizer.batch_decode(outputs)[0]

def extract_code(text_input):
    """Extracts the last Lean 4 code block from the model's output."""
    try:
        matches = re.findall(r'
Construct the prompt for the modeltexttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
import re
torch.manual_seed(30)

model_id = "Goedel-LM/Goedel-Formalizer-V2-32B"
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", torch_dtype=torch.bfloat16, trust_remote_code=True)


problem_name = "test_problem"
informal_statement_content = "Prove that 3 cannot be written as the sum of two cubes."


# Construct the prompt for the model
user_prompt_content = (
    f"Please autoformalize the following natural language problem statement in Lean 4. "
    f"Use the following theorem name: {problem_name}\n"
    f"The natural language statement is: \n"
    f"{informal_statement_content}"
    f"Think before you provide the lean statement."
)



chat = [
  {"role": "user", "content": user_prompt_content},
]

inputs = tokenizer.apply_chat_template(chat, tokenize=True, add_generation_prompt=True, return_tensors="pt").to(model.device)

import time
start = time.time()
outputs = model.generate(inputs, max_new_tokens=16384, temperature = 0.9, do_sample = True, top_k=20, top_p=0.95)


model_output_text = tokenizer.batch_decode(outputs)[0]

def extract_code(text_input):
    """Extracts the last Lean 4 code block from the model's output."""
    try:
        matches = re.findall(r'
Construct the prompt for the modeltexttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
import re
torch.manual_seed(30)

model_id = "Goedel-LM/Goedel-Formalizer-V2-32B"
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", torch_dtype=torch.bfloat16, trust_remote_code=True)


problem_name = "test_problem"
informal_statement_content = "Prove that 3 cannot be written as the sum of two cubes."


# Construct the prompt for the model
user_prompt_content = (
    f"Please autoformalize the following natural language problem statement in Lean 4. "
    f"Use the following theorem name: {problem_name}\n"
    f"The natural language statement is: \n"
    f"{informal_statement_content}"
    f"Think before you provide the lean statement."
)



chat = [
  {"role": "user", "content": user_prompt_content},
]

inputs = tokenizer.apply_chat_template(chat, tokenize=True, add_generation_prompt=True, return_tensors="pt").to(model.device)

import time
start = time.time()
outputs = model.generate(inputs, max_new_tokens=16384, temperature = 0.9, do_sample = True, top_k=20, top_p=0.95)


model_output_text = tokenizer.batch_decode(outputs)[0]

def extract_code(text_input):
    """Extracts the last Lean 4 code block from the model's output."""
    try:
        matches = re.findall(r'
Construct the prompt for the modeltexttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
import re
torch.manual_seed(30)

model_id = "Goedel-LM/Goedel-Formalizer-V2-32B"
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", torch_dtype=torch.bfloat16, trust_remote_code=True)


problem_name = "test_problem"
informal_statement_content = "Prove that 3 cannot be written as the sum of two cubes."


# Construct the prompt for the model
user_prompt_content = (
    f"Please autoformalize the following natural language problem statement in Lean 4. "
    f"Use the following theorem name: {problem_name}\n"
    f"The natural language statement is: \n"
    f"{informal_statement_content}"
    f"Think before you provide the lean statement."
)



chat = [
  {"role": "user", "content": user_prompt_content},
]

inputs = tokenizer.apply_chat_template(chat, tokenize=True, add_generation_prompt=True, return_tensors="pt").to(model.device)

import time
start = time.time()
outputs = model.generate(inputs, max_new_tokens=16384, temperature = 0.9, do_sample = True, top_k=20, top_p=0.95)


model_output_text = tokenizer.batch_decode(outputs)[0]

def extract_code(text_input):
    """Extracts the last Lean 4 code block from the model's output."""
    try:
        matches = re.findall(r'
Construct the prompt for the modeltexttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
import re
torch.manual_seed(30)

model_id = "Goedel-LM/Goedel-Formalizer-V2-32B"
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", torch_dtype=torch.bfloat16, trust_remote_code=True)


problem_name = "test_problem"
informal_statement_content = "Prove that 3 cannot be written as the sum of two cubes."


# Construct the prompt for the model
user_prompt_content = (
    f"Please autoformalize the following natural language problem statement in Lean 4. "
    f"Use the following theorem name: {problem_name}\n"
    f"The natural language statement is: \n"
    f"{informal_statement_content}"
    f"Think before you provide the lean statement."
)



chat = [
  {"role": "user", "content": user_prompt_content},
]

inputs = tokenizer.apply_chat_template(chat, tokenize=True, add_generation_prompt=True, return_tensors="pt").to(model.device)

import time
start = time.time()
outputs = model.generate(inputs, max_new_tokens=16384, temperature = 0.9, do_sample = True, top_k=20, top_p=0.95)


model_output_text = tokenizer.batch_decode(outputs)[0]

def extract_code(text_input):
    """Extracts the last Lean 4 code block from the model's output."""
    try:
        matches = re.findall(r'
Construct the prompt for the modeltexttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
import re
torch.manual_seed(30)

model_id = "Goedel-LM/Goedel-Formalizer-V2-32B"
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", torch_dtype=torch.bfloat16, trust_remote_code=True)


problem_name = "test_problem"
informal_statement_content = "Prove that 3 cannot be written as the sum of two cubes."


# Construct the prompt for the model
user_prompt_content = (
    f"Please autoformalize the following natural language problem statement in Lean 4. "
    f"Use the following theorem name: {problem_name}\n"
    f"The natural language statement is: \n"
    f"{informal_statement_content}"
    f"Think before you provide the lean statement."
)



chat = [
  {"role": "user", "content": user_prompt_content},
]

inputs = tokenizer.apply_chat_template(chat, tokenize=True, add_generation_prompt=True, return_tensors="pt").to(model.device)

import time
start = time.time()
outputs = model.generate(inputs, max_new_tokens=16384, temperature = 0.9, do_sample = True, top_k=20, top_p=0.95)


model_output_text = tokenizer.batch_decode(outputs)[0]

def extract_code(text_input):
    """Extracts the last Lean 4 code block from the model's output."""
    try:
        matches = re.findall(r'
Construct the prompt for the modeltexttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
import re
torch.manual_seed(30)

model_id = "Goedel-LM/Goedel-Formalizer-V2-32B"
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", torch_dtype=torch.bfloat16, trust_remote_code=True)


problem_name = "test_problem"
informal_statement_content = "Prove that 3 cannot be written as the sum of two cubes."


# Construct the prompt for the model
user_prompt_content = (
    f"Please autoformalize the following natural language problem statement in Lean 4. "
    f"Use the following theorem name: {problem_name}\n"
    f"The natural language statement is: \n"
    f"{informal_statement_content}"
    f"Think before you provide the lean statement."
)



chat = [
  {"role": "user", "content": user_prompt_content},
]

inputs = tokenizer.apply_chat_template(chat, tokenize=True, add_generation_prompt=True, return_tensors="pt").to(model.device)

import time
start = time.time()
outputs = model.generate(inputs, max_new_tokens=16384, temperature = 0.9, do_sample = True, top_k=20, top_p=0.95)


model_output_text = tokenizer.batch_decode(outputs)[0]

def extract_code(text_input):
    """Extracts the last Lean 4 code block from the model's output."""
    try:
        matches = re.findall(r'
Construct the prompt for the modeltexttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
import re
torch.manual_seed(30)

model_id = "Goedel-LM/Goedel-Formalizer-V2-32B"
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", torch_dtype=torch.bfloat16, trust_remote_code=True)


problem_name = "test_problem"
informal_statement_content = "Prove that 3 cannot be written as the sum of two cubes."


# Construct the prompt for the model
user_prompt_content = (
    f"Please autoformalize the following natural language problem statement in Lean 4. "
    f"Use the following theorem name: {problem_name}\n"
    f"The natural language statement is: \n"
    f"{informal_statement_content}"
    f"Think before you provide the lean statement."
)



chat = [
  {"role": "user", "content": user_prompt_content},
]

inputs = tokenizer.apply_chat_template(chat, tokenize=True, add_generation_prompt=True, return_tensors="pt").to(model.device)

import time
start = time.time()
outputs = model.generate(inputs, max_new_tokens=16384, temperature = 0.9, do_sample = True, top_k=20, top_p=0.95)


model_output_text = tokenizer.batch_decode(outputs)[0]

def extract_code(text_input):
    """Extracts the last Lean 4 code block from the model's output."""
    try:
        matches = re.findall(r'
Construct the prompt for the modeltexttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
import re
torch.manual_seed(30)

model_id = "Goedel-LM/Goedel-Formalizer-V2-32B"
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", torch_dtype=torch.bfloat16, trust_remote_code=True)


problem_name = "test_problem"
informal_statement_content = "Prove that 3 cannot be written as the sum of two cubes."


# Construct the prompt for the model
user_prompt_content = (
    f"Please autoformalize the following natural language problem statement in Lean 4. "
    f"Use the following theorem name: {problem_name}\n"
    f"The natural language statement is: \n"
    f"{informal_statement_content}"
    f"Think before you provide the lean statement."
)



chat = [
  {"role": "user", "content": user_prompt_content},
]

inputs = tokenizer.apply_chat_template(chat, tokenize=True, add_generation_prompt=True, return_tensors="pt").to(model.device)

import time
start = time.time()
outputs = model.generate(inputs, max_new_tokens=16384, temperature = 0.9, do_sample = True, top_k=20, top_p=0.95)


model_output_text = tokenizer.batch_decode(outputs)[0]

def extract_code(text_input):
    """Extracts the last Lean 4 code block from the model's output."""
    try:
        matches = re.findall(r'
Construct the prompt for the modeltexttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
import re
torch.manual_seed(30)

model_id = "Goedel-LM/Goedel-Formalizer-V2-32B"
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", torch_dtype=torch.bfloat16, trust_remote_code=True)


problem_name = "test_problem"
informal_statement_content = "Prove that 3 cannot be written as the sum of two cubes."


# Construct the prompt for the model
user_prompt_content = (
    f"Please autoformalize the following natural language problem statement in Lean 4. "
    f"Use the following theorem name: {problem_name}\n"
    f"The natural language statement is: \n"
    f"{informal_statement_content}"
    f"Think before you provide the lean statement."
)



chat = [
  {"role": "user", "content": user_prompt_content},
]

inputs = tokenizer.apply_chat_template(chat, tokenize=True, add_generation_prompt=True, return_tensors="pt").to(model.device)

import time
start = time.time()
outputs = model.generate(inputs, max_new_tokens=16384, temperature = 0.9, do_sample = True, top_k=20, top_p=0.95)


model_output_text = tokenizer.batch_decode(outputs)[0]

def extract_code(text_input):
    """Extracts the last Lean 4 code block from the model's output."""
    try:
        matches = re.findall(r'
Construct the prompt for the modeltexttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
import re
torch.manual_seed(30)

model_id = "Goedel-LM/Goedel-Formalizer-V2-32B"
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", torch_dtype=torch.bfloat16, trust_remote_code=True)


problem_name = "test_problem"
informal_statement_content = "Prove that 3 cannot be written as the sum of two cubes."


# Construct the prompt for the model
user_prompt_content = (
    f"Please autoformalize the following natural language problem statement in Lean 4. "
    f"Use the following theorem name: {problem_name}\n"
    f"The natural language statement is: \n"
    f"{informal_statement_content}"
    f"Think before you provide the lean statement."
)



chat = [
  {"role": "user", "content": user_prompt_content},
]

inputs = tokenizer.apply_chat_template(chat, tokenize=True, add_generation_prompt=True, return_tensors="pt").to(model.device)

import time
start = time.time()
outputs = model.generate(inputs, max_new_tokens=16384, temperature = 0.9, do_sample = True, top_k=20, top_p=0.95)


model_output_text = tokenizer.batch_decode(outputs)[0]

def extract_code(text_input):
    """Extracts the last Lean 4 code block from the model's output."""
    try:
        matches = re.findall(r'
Construct the prompt for the modeltexttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
import re
torch.manual_seed(30)

model_id = "Goedel-LM/Goedel-Formalizer-V2-32B"
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", torch_dtype=torch.bfloat16, trust_remote_code=True)


problem_name = "test_problem"
informal_statement_content = "Prove that 3 cannot be written as the sum of two cubes."


# Construct the prompt for the model
user_prompt_content = (
    f"Please autoformalize the following natural language problem statement in Lean 4. "
    f"Use the following theorem name: {problem_name}\n"
    f"The natural language statement is: \n"
    f"{informal_statement_content}"
    f"Think before you provide the lean statement."
)



chat = [
  {"role": "user", "content": user_prompt_content},
]

inputs = tokenizer.apply_chat_template(chat, tokenize=True, add_generation_prompt=True, return_tensors="pt").to(model.device)

import time
start = time.time()
outputs = model.generate(inputs, max_new_tokens=16384, temperature = 0.9, do_sample = True, top_k=20, top_p=0.95)


model_output_text = tokenizer.batch_decode(outputs)[0]

def extract_code(text_input):
    """Extracts the last Lean 4 code block from the model's output."""
    try:
        matches = re.findall(r'
Construct the prompt for the modeltexttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
import re
torch.manual_seed(30)

model_id = "Goedel-LM/Goedel-Formalizer-V2-32B"
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", torch_dtype=torch.bfloat16, trust_remote_code=True)


problem_name = "test_problem"
informal_statement_content = "Prove that 3 cannot be written as the sum of two cubes."


# Construct the prompt for the model
user_prompt_content = (
    f"Please autoformalize the following natural language problem statement in Lean 4. "
    f"Use the following theorem name: {problem_name}\n"
    f"The natural language statement is: \n"
    f"{informal_statement_content}"
    f"Think before you provide the lean statement."
)



chat = [
  {"role": "user", "content": user_prompt_content},
]

inputs = tokenizer.apply_chat_template(chat, tokenize=True, add_generation_prompt=True, return_tensors="pt").to(model.device)

import time
start = time.time()
outputs = model.generate(inputs, max_new_tokens=16384, temperature = 0.9, do_sample = True, top_k=20, top_p=0.95)


model_output_text = tokenizer.batch_decode(outputs)[0]

def extract_code(text_input):
    """Extracts the last Lean 4 code block from the model's output."""
    try:
        matches = re.findall(r'
Construct the prompt for the modeltexttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
import re
torch.manual_seed(30)

model_id = "Goedel-LM/Goedel-Formalizer-V2-32B"
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", torch_dtype=torch.bfloat16, trust_remote_code=True)


problem_name = "test_problem"
informal_statement_content = "Prove that 3 cannot be written as the sum of two cubes."


# Construct the prompt for the model
user_prompt_content = (
    f"Please autoformalize the following natural language problem statement in Lean 4. "
    f"Use the following theorem name: {problem_name}\n"
    f"The natural language statement is: \n"
    f"{informal_statement_content}"
    f"Think before you provide the lean statement."
)



chat = [
  {"role": "user", "content": user_prompt_content},
]

inputs = tokenizer.apply_chat_template(chat, tokenize=True, add_generation_prompt=True, return_tensors="pt").to(model.device)

import time
start = time.time()
outputs = model.generate(inputs, max_new_tokens=16384, temperature = 0.9, do_sample = True, top_k=20, top_p=0.95)


model_output_text = tokenizer.batch_decode(outputs)[0]

def extract_code(text_input):
    """Extracts the last Lean 4 code block from the model's output."""
    try:
        matches = re.findall(r'
Construct the prompt for the modeltexttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
import re
torch.manual_seed(30)

model_id = "Goedel-LM/Goedel-Formalizer-V2-32B"
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", torch_dtype=torch.bfloat16, trust_remote_code=True)


problem_name = "test_problem"
informal_statement_content = "Prove that 3 cannot be written as the sum of two cubes."


# Construct the prompt for the model
user_prompt_content = (
    f"Please autoformalize the following natural language problem statement in Lean 4. "
    f"Use the following theorem name: {problem_name}\n"
    f"The natural language statement is: \n"
    f"{informal_statement_content}"
    f"Think before you provide the lean statement."
)



chat = [
  {"role": "user", "content": user_prompt_content},
]

inputs = tokenizer.apply_chat_template(chat, tokenize=True, add_generation_prompt=True, return_tensors="pt").to(model.device)

import time
start = time.time()
outputs = model.generate(inputs, max_new_tokens=16384, temperature = 0.9, do_sample = True, top_k=20, top_p=0.95)


model_output_text = tokenizer.batch_decode(outputs)[0]

def extract_code(text_input):
    """Extracts the last Lean 4 code block from the model's output."""
    try:
        matches = re.findall(r'
Construct the prompt for the modeltexttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
import re
torch.manual_seed(30)

model_id = "Goedel-LM/Goedel-Formalizer-V2-32B"
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", torch_dtype=torch.bfloat16, trust_remote_code=True)


problem_name = "test_problem"
informal_statement_content = "Prove that 3 cannot be written as the sum of two cubes."


# Construct the prompt for the model
user_prompt_content = (
    f"Please autoformalize the following natural language problem statement in Lean 4. "
    f"Use the following theorem name: {problem_name}\n"
    f"The natural language statement is: \n"
    f"{informal_statement_content}"
    f"Think before you provide the lean statement."
)



chat = [
  {"role": "user", "content": user_prompt_content},
]

inputs = tokenizer.apply_chat_template(chat, tokenize=True, add_generation_prompt=True, return_tensors="pt").to(model.device)

import time
start = time.time()
outputs = model.generate(inputs, max_new_tokens=16384, temperature = 0.9, do_sample = True, top_k=20, top_p=0.95)


model_output_text = tokenizer.batch_decode(outputs)[0]

def extract_code(text_input):
    """Extracts the last Lean 4 code block from the model's output."""
    try:
        matches = re.findall(r'
Construct the prompt for the modeltexttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
import re
torch.manual_seed(30)

model_id = "Goedel-LM/Goedel-Formalizer-V2-32B"
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", torch_dtype=torch.bfloat16, trust_remote_code=True)


problem_name = "test_problem"
informal_statement_content = "Prove that 3 cannot be written as the sum of two cubes."


# Construct the prompt for the model
user_prompt_content = (
    f"Please autoformalize the following natural language problem statement in Lean 4. "
    f"Use the following theorem name: {problem_name}\n"
    f"The natural language statement is: \n"
    f"{informal_statement_content}"
    f"Think before you provide the lean statement."
)



chat = [
  {"role": "user", "content": user_prompt_content},
]

inputs = tokenizer.apply_chat_template(chat, tokenize=True, add_generation_prompt=True, return_tensors="pt").to(model.device)

import time
start = time.time()
outputs = model.generate(inputs, max_new_tokens=16384, temperature = 0.9, do_sample = True, top_k=20, top_p=0.95)


model_output_text = tokenizer.batch_decode(outputs)[0]

def extract_code(text_input):
    """Extracts the last Lean 4 code block from the model's output."""
    try:
        matches = re.findall(r'
Construct the prompt for the modeltexttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
import re
torch.manual_seed(30)

model_id = "Goedel-LM/Goedel-Formalizer-V2-32B"
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", torch_dtype=torch.bfloat16, trust_remote_code=True)


problem_name = "test_problem"
informal_statement_content = "Prove that 3 cannot be written as the sum of two cubes."


# Construct the prompt for the model
user_prompt_content = (
    f"Please autoformalize the following natural language problem statement in Lean 4. "
    f"Use the following theorem name: {problem_name}\n"
    f"The natural language statement is: \n"
    f"{informal_statement_content}"
    f"Think before you provide the lean statement."
)



chat = [
  {"role": "user", "content": user_prompt_content},
]

inputs = tokenizer.apply_chat_template(chat, tokenize=True, add_generation_prompt=True, return_tensors="pt").to(model.device)

import time
start = time.time()
outputs = model.generate(inputs, max_new_tokens=16384, temperature = 0.9, do_sample = True, top_k=20, top_p=0.95)


model_output_text = tokenizer.batch_decode(outputs)[0]

def extract_code(text_input):
    """Extracts the last Lean 4 code block from the model's output."""
    try:
        matches = re.findall(r'
Construct the prompt for the modeltexttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
import re
torch.manual_seed(30)

model_id = "Goedel-LM/Goedel-Formalizer-V2-32B"
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", torch_dtype=torch.bfloat16, trust_remote_code=True)


problem_name = "test_problem"
informal_statement_content = "Prove that 3 cannot be written as the sum of two cubes."


# Construct the prompt for the model
user_prompt_content = (
    f"Please autoformalize the following natural language problem statement in Lean 4. "
    f"Use the following theorem name: {problem_name}\n"
    f"The natural language statement is: \n"
    f"{informal_statement_content}"
    f"Think before you provide the lean statement."
)



chat = [
  {"role": "user", "content": user_prompt_content},
]

inputs = tokenizer.apply_chat_template(chat, tokenize=True, add_generation_prompt=True, return_tensors="pt").to(model.device)

import time
start = time.time()
outputs = model.generate(inputs, max_new_tokens=16384, temperature = 0.9, do_sample = True, top_k=20, top_p=0.95)


model_output_text = tokenizer.batch_decode(outputs)[0]

def extract_code(text_input):
    """Extracts the last Lean 4 code block from the model's output."""
    try:
        matches = re.findall(r'
Construct the prompt for the modeltexttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
import re
torch.manual_seed(30)

model_id = "Goedel-LM/Goedel-Formalizer-V2-32B"
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", torch_dtype=torch.bfloat16, trust_remote_code=True)


problem_name = "test_problem"
informal_statement_content = "Prove that 3 cannot be written as the sum of two cubes."


# Construct the prompt for the model
user_prompt_content = (
    f"Please autoformalize the following natural language problem statement in Lean 4. "
    f"Use the following theorem name: {problem_name}\n"
    f"The natural language statement is: \n"
    f"{informal_statement_content}"
    f"Think before you provide the lean statement."
)



chat = [
  {"role": "user", "content": user_prompt_content},
]

inputs = tokenizer.apply_chat_template(chat, tokenize=True, add_generation_prompt=True, return_tensors="pt").to(model.device)

import time
start = time.time()
outputs = model.generate(inputs, max_new_tokens=16384, temperature = 0.9, do_sample = True, top_k=20, top_p=0.95)


model_output_text = tokenizer.batch_decode(outputs)[0]

def extract_code(text_input):
    """Extracts the last Lean 4 code block from the model's output."""
    try:
        matches = re.findall(r'
Construct the prompt for the modeltexttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
import re
torch.manual_seed(30)

model_id = "Goedel-LM/Goedel-Formalizer-V2-32B"
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", torch_dtype=torch.bfloat16, trust_remote_code=True)


problem_name = "test_problem"
informal_statement_content = "Prove that 3 cannot be written as the sum of two cubes."


# Construct the prompt for the model
user_prompt_content = (
    f"Please autoformalize the following natural language problem statement in Lean 4. "
    f"Use the following theorem name: {problem_name}\n"
    f"The natural language statement is: \n"
    f"{informal_statement_content}"
    f"Think before you provide the lean statement."
)



chat = [
  {"role": "user", "content": user_prompt_content},
]

inputs = tokenizer.apply_chat_template(chat, tokenize=True, add_generation_prompt=True, return_tensors="pt").to(model.device)

import time
start = time.time()
outputs = model.generate(inputs, max_new_tokens=16384, temperature = 0.9, do_sample = True, top_k=20, top_p=0.95)


model_output_text = tokenizer.batch_decode(outputs)[0]

def extract_code(text_input):
    """Extracts the last Lean 4 code block from the model's output."""
    try:
        matches = re.findall(r'
Construct the prompt for the modeltexttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
import re
torch.manual_seed(30)

model_id = "Goedel-LM/Goedel-Formalizer-V2-32B"
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", torch_dtype=torch.bfloat16, trust_remote_code=True)


problem_name = "test_problem"
informal_statement_content = "Prove that 3 cannot be written as the sum of two cubes."


# Construct the prompt for the model
user_prompt_content = (
    f"Please autoformalize the following natural language problem statement in Lean 4. "
    f"Use the following theorem name: {problem_name}\n"
    f"The natural language statement is: \n"
    f"{informal_statement_content}"
    f"Think before you provide the lean statement."
)



chat = [
  {"role": "user", "content": user_prompt_content},
]

inputs = tokenizer.apply_chat_template(chat, tokenize=True, add_generation_prompt=True, return_tensors="pt").to(model.device)

import time
start = time.time()
outputs = model.generate(inputs, max_new_tokens=16384, temperature = 0.9, do_sample = True, top_k=20, top_p=0.95)


model_output_text = tokenizer.batch_decode(outputs)[0]

def extract_code(text_input):
    """Extracts the last Lean 4 code block from the model's output."""
    try:
        matches = re.findall(r'
Construct the prompt for the modeltexttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
import re
torch.manual_seed(30)

model_id = "Goedel-LM/Goedel-Formalizer-V2-32B"
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", torch_dtype=torch.bfloat16, trust_remote_code=True)


problem_name = "test_problem"
informal_statement_content = "Prove that 3 cannot be written as the sum of two cubes."


# Construct the prompt for the model
user_prompt_content = (
    f"Please autoformalize the following natural language problem statement in Lean 4. "
    f"Use the following theorem name: {problem_name}\n"
    f"The natural language statement is: \n"
    f"{informal_statement_content}"
    f"Think before you provide the lean statement."
)



chat = [
  {"role": "user", "content": user_prompt_content},
]

inputs = tokenizer.apply_chat_template(chat, tokenize=True, add_generation_prompt=True, return_tensors="pt").to(model.device)

import time
start = time.time()
outputs = model.generate(inputs, max_new_tokens=16384, temperature = 0.9, do_sample = True, top_k=20, top_p=0.95)


model_output_text = tokenizer.batch_decode(outputs)[0]

def extract_code(text_input):
    """Extracts the last Lean 4 code block from the model's output."""
    try:
        matches = re.findall(r'
Construct the prompt for the modeltexttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
import re
torch.manual_seed(30)

model_id = "Goedel-LM/Goedel-Formalizer-V2-32B"
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", torch_dtype=torch.bfloat16, trust_remote_code=True)


problem_name = "test_problem"
informal_statement_content = "Prove that 3 cannot be written as the sum of two cubes."


# Construct the prompt for the model
user_prompt_content = (
    f"Please autoformalize the following natural language problem statement in Lean 4. "
    f"Use the following theorem name: {problem_name}\n"
    f"The natural language statement is: \n"
    f"{informal_statement_content}"
    f"Think before you provide the lean statement."
)



chat = [
  {"role": "user", "content": user_prompt_content},
]

inputs = tokenizer.apply_chat_template(chat, tokenize=True, add_generation_prompt=True, return_tensors="pt").to(model.device)

import time
start = time.time()
outputs = model.generate(inputs, max_new_tokens=16384, temperature = 0.9, do_sample = True, top_k=20, top_p=0.95)


model_output_text = tokenizer.batch_decode(outputs)[0]

def extract_code(text_input):
    """Extracts the last Lean 4 code block from the model's output."""
    try:
        matches = re.findall(r'
Construct the prompt for the modeltexttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
import re
torch.manual_seed(30)

model_id = "Goedel-LM/Goedel-Formalizer-V2-32B"
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", torch_dtype=torch.bfloat16, trust_remote_code=True)


problem_name = "test_problem"
informal_statement_content = "Prove that 3 cannot be written as the sum of two cubes."


# Construct the prompt for the model
user_prompt_content = (
    f"Please autoformalize the following natural language problem statement in Lean 4. "
    f"Use the following theorem name: {problem_name}\n"
    f"The natural language statement is: \n"
    f"{informal_statement_content}"
    f"Think before you provide the lean statement."
)



chat = [
  {"role": "user", "content": user_prompt_content},
]

inputs = tokenizer.apply_chat_template(chat, tokenize=True, add_generation_prompt=True, return_tensors="pt").to(model.device)

import time
start = time.time()
outputs = model.generate(inputs, max_new_tokens=16384, temperature = 0.9, do_sample = True, top_k=20, top_p=0.95)


model_output_text = tokenizer.batch_decode(outputs)[0]

def extract_code(text_input):
    """Extracts the last Lean 4 code block from the model's output."""
    try:
        matches = re.findall(r'
Construct the prompt for the modeltexttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
import re
torch.manual_seed(30)

model_id = "Goedel-LM/Goedel-Formalizer-V2-32B"
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", torch_dtype=torch.bfloat16, trust_remote_code=True)


problem_name = "test_problem"
informal_statement_content = "Prove that 3 cannot be written as the sum of two cubes."


# Construct the prompt for the model
user_prompt_content = (
    f"Please autoformalize the following natural language problem statement in Lean 4. "
    f"Use the following theorem name: {problem_name}\n"
    f"The natural language statement is: \n"
    f"{informal_statement_content}"
    f"Think before you provide the lean statement."
)



chat = [
  {"role": "user", "content": user_prompt_content},
]

inputs = tokenizer.apply_chat_template(chat, tokenize=True, add_generation_prompt=True, return_tensors="pt").to(model.device)

import time
start = time.time()
outputs = model.generate(inputs, max_new_tokens=16384, temperature = 0.9, do_sample = True, top_k=20, top_p=0.95)


model_output_text = tokenizer.batch_decode(outputs)[0]

def extract_code(text_input):
    """Extracts the last Lean 4 code block from the model's output."""
    try:
        matches = re.findall(r'
Construct the prompt for the modeltexttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
import re
torch.manual_seed(30)

model_id = "Goedel-LM/Goedel-Formalizer-V2-32B"
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", torch_dtype=torch.bfloat16, trust_remote_code=True)


problem_name = "test_problem"
informal_statement_content = "Prove that 3 cannot be written as the sum of two cubes."


# Construct the prompt for the model
user_prompt_content = (
    f"Please autoformalize the following natural language problem statement in Lean 4. "
    f"Use the following theorem name: {problem_name}\n"
    f"The natural language statement is: \n"
    f"{informal_statement_content}"
    f"Think before you provide the lean statement."
)



chat = [
  {"role": "user", "content": user_prompt_content},
]

inputs = tokenizer.apply_chat_template(chat, tokenize=True, add_generation_prompt=True, return_tensors="pt").to(model.device)

import time
start = time.time()
outputs = model.generate(inputs, max_new_tokens=16384, temperature = 0.9, do_sample = True, top_k=20, top_p=0.95)


model_output_text = tokenizer.batch_decode(outputs)[0]

def extract_code(text_input):
    """Extracts the last Lean 4 code block from the model's output."""
    try:
        matches = re.findall(r'
Construct the prompt for the modeltexttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
import re
torch.manual_seed(30)

model_id = "Goedel-LM/Goedel-Formalizer-V2-32B"
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", torch_dtype=torch.bfloat16, trust_remote_code=True)


problem_name = "test_problem"
informal_statement_content = "Prove that 3 cannot be written as the sum of two cubes."


# Construct the prompt for the model
user_prompt_content = (
    f"Please autoformalize the following natural language problem statement in Lean 4. "
    f"Use the following theorem name: {problem_name}\n"
    f"The natural language statement is: \n"
    f"{informal_statement_content}"
    f"Think before you provide the lean statement."
)



chat = [
  {"role": "user", "content": user_prompt_content},
]

inputs = tokenizer.apply_chat_template(chat, tokenize=True, add_generation_prompt=True, return_tensors="pt").to(model.device)

import time
start = time.time()
outputs = model.generate(inputs, max_new_tokens=16384, temperature = 0.9, do_sample = True, top_k=20, top_p=0.95)


model_output_text = tokenizer.batch_decode(outputs)[0]

def extract_code(text_input):
    """Extracts the last Lean 4 code block from the model's output."""
    try:
        matches = re.findall(r'
Construct the prompt for the modeltexttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
import re
torch.manual_seed(30)

model_id = "Goedel-LM/Goedel-Formalizer-V2-32B"
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", torch_dtype=torch.bfloat16, trust_remote_code=True)


problem_name = "test_problem"
informal_statement_content = "Prove that 3 cannot be written as the sum of two cubes."


# Construct the prompt for the model
user_prompt_content = (
    f"Please autoformalize the following natural language problem statement in Lean 4. "
    f"Use the following theorem name: {problem_name}\n"
    f"The natural language statement is: \n"
    f"{informal_statement_content}"
    f"Think before you provide the lean statement."
)



chat = [
  {"role": "user", "content": user_prompt_content},
]

inputs = tokenizer.apply_chat_template(chat, tokenize=True, add_generation_prompt=True, return_tensors="pt").to(model.device)

import time
start = time.time()
outputs = model.generate(inputs, max_new_tokens=16384, temperature = 0.9, do_sample = True, top_k=20, top_p=0.95)


model_output_text = tokenizer.batch_decode(outputs)[0]

def extract_code(text_input):
    """Extracts the last Lean 4 code block from the model's output."""
    try:
        matches = re.findall(r'
Construct the prompt for the modeltexttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
import re
torch.manual_seed(30)

model_id = "Goedel-LM/Goedel-Formalizer-V2-32B"
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", torch_dtype=torch.bfloat16, trust_remote_code=True)


problem_name = "test_problem"
informal_statement_content = "Prove that 3 cannot be written as the sum of two cubes."


# Construct the prompt for the model
user_prompt_content = (
    f"Please autoformalize the following natural language problem statement in Lean 4. "
    f"Use the following theorem name: {problem_name}\n"
    f"The natural language statement is: \n"
    f"{informal_statement_content}"
    f"Think before you provide the lean statement."
)



chat = [
  {"role": "user", "content": user_prompt_content},
]

inputs = tokenizer.apply_chat_template(chat, tokenize=True, add_generation_prompt=True, return_tensors="pt").to(model.device)

import time
start = time.time()
outputs = model.generate(inputs, max_new_tokens=16384, temperature = 0.9, do_sample = True, top_k=20, top_p=0.95)


model_output_text = tokenizer.batch_decode(outputs)[0]

def extract_code(text_input):
    """Extracts the last Lean 4 code block from the model's output."""
    try:
        matches = re.findall(r'
Construct the prompt for the modeltexttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
import re
torch.manual_seed(30)

model_id = "Goedel-LM/Goedel-Formalizer-V2-32B"
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", torch_dtype=torch.bfloat16, trust_remote_code=True)


problem_name = "test_problem"
informal_statement_content = "Prove that 3 cannot be written as the sum of two cubes."


# Construct the prompt for the model
user_prompt_content = (
    f"Please autoformalize the following natural language problem statement in Lean 4. "
    f"Use the following theorem name: {problem_name}\n"
    f"The natural language statement is: \n"
    f"{informal_statement_content}"
    f"Think before you provide the lean statement."
)



chat = [
  {"role": "user", "content": user_prompt_content},
]

inputs = tokenizer.apply_chat_template(chat, tokenize=True, add_generation_prompt=True, return_tensors="pt").to(model.device)

import time
start = time.time()
outputs = model.generate(inputs, max_new_tokens=16384, temperature = 0.9, do_sample = True, top_k=20, top_p=0.95)


model_output_text = tokenizer.batch_decode(outputs)[0]

def extract_code(text_input):
    """Extracts the last Lean 4 code block from the model's output."""
    try:
        matches = re.findall(r'
Construct the prompt for the modeltexttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
import re
torch.manual_seed(30)

model_id = "Goedel-LM/Goedel-Formalizer-V2-32B"
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", torch_dtype=torch.bfloat16, trust_remote_code=True)


problem_name = "test_problem"
informal_statement_content = "Prove that 3 cannot be written as the sum of two cubes."


# Construct the prompt for the model
user_prompt_content = (
    f"Please autoformalize the following natural language problem statement in Lean 4. "
    f"Use the following theorem name: {problem_name}\n"
    f"The natural language statement is: \n"
    f"{informal_statement_content}"
    f"Think before you provide the lean statement."
)



chat = [
  {"role": "user", "content": user_prompt_content},
]

inputs = tokenizer.apply_chat_template(chat, tokenize=True, add_generation_prompt=True, return_tensors="pt").to(model.device)

import time
start = time.time()
outputs = model.generate(inputs, max_new_tokens=16384, temperature = 0.9, do_sample = True, top_k=20, top_p=0.95)


model_output_text = tokenizer.batch_decode(outputs)[0]

def extract_code(text_input):
    """Extracts the last Lean 4 code block from the model's output."""
    try:
        matches = re.findall(r'
Construct the prompt for the modeltexttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
import re
torch.manual_seed(30)

model_id = "Goedel-LM/Goedel-Formalizer-V2-32B"
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", torch_dtype=torch.bfloat16, trust_remote_code=True)


problem_name = "test_problem"
informal_statement_content = "Prove that 3 cannot be written as the sum of two cubes."


# Construct the prompt for the model
user_prompt_content = (
    f"Please autoformalize the following natural language problem statement in Lean 4. "
    f"Use the following theorem name: {problem_name}\n"
    f"The natural language statement is: \n"
    f"{informal_statement_content}"
    f"Think before you provide the lean statement."
)



chat = [
  {"role": "user", "content": user_prompt_content},
]

inputs = tokenizer.apply_chat_template(chat, tokenize=True, add_generation_prompt=True, return_tensors="pt").to(model.device)

import time
start = time.time()
outputs = model.generate(inputs, max_new_tokens=16384, temperature = 0.9, do_sample = True, top_k=20, top_p=0.95)


model_output_text = tokenizer.batch_decode(outputs)[0]

def extract_code(text_input):
    """Extracts the last Lean 4 code block from the model's output."""
    try:
        matches = re.findall(r'
Construct the prompt for the modeltexttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
import re
torch.manual_seed(30)

model_id = "Goedel-LM/Goedel-Formalizer-V2-32B"
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", torch_dtype=torch.bfloat16, trust_remote_code=True)


problem_name = "test_problem"
informal_statement_content = "Prove that 3 cannot be written as the sum of two cubes."


# Construct the prompt for the model
user_prompt_content = (
    f"Please autoformalize the following natural language problem statement in Lean 4. "
    f"Use the following theorem name: {problem_name}\n"
    f"The natural language statement is: \n"
    f"{informal_statement_content}"
    f"Think before you provide the lean statement."
)



chat = [
  {"role": "user", "content": user_prompt_content},
]

inputs = tokenizer.apply_chat_template(chat, tokenize=True, add_generation_prompt=True, return_tensors="pt").to(model.device)

import time
start = time.time()
outputs = model.generate(inputs, max_new_tokens=16384, temperature = 0.9, do_sample = True, top_k=20, top_p=0.95)


model_output_text = tokenizer.batch_decode(outputs)[0]

def extract_code(text_input):
    """Extracts the last Lean 4 code block from the model's output."""
    try:
        matches = re.findall(r'
Construct the prompt for the modeltexttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
import re
torch.manual_seed(30)

model_id = "Goedel-LM/Goedel-Formalizer-V2-32B"
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", torch_dtype=torch.bfloat16, trust_remote_code=True)


problem_name = "test_problem"
informal_statement_content = "Prove that 3 cannot be written as the sum of two cubes."


# Construct the prompt for the model
user_prompt_content = (
    f"Please autoformalize the following natural language problem statement in Lean 4. "
    f"Use the following theorem name: {problem_name}\n"
    f"The natural language statement is: \n"
    f"{informal_statement_content}"
    f"Think before you provide the lean statement."
)



chat = [
  {"role": "user", "content": user_prompt_content},
]

inputs = tokenizer.apply_chat_template(chat, tokenize=True, add_generation_prompt=True, return_tensors="pt").to(model.device)

import time
start = time.time()
outputs = model.generate(inputs, max_new_tokens=16384, temperature = 0.9, do_sample = True, top_k=20, top_p=0.95)


model_output_text = tokenizer.batch_decode(outputs)[0]

def extract_code(text_input):
    """Extracts the last Lean 4 code block from the model's output."""
    try:
        matches = re.findall(r'
Construct the prompt for the modeltexttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
import re
torch.manual_seed(30)

model_id = "Goedel-LM/Goedel-Formalizer-V2-32B"
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", torch_dtype=torch.bfloat16, trust_remote_code=True)


problem_name = "test_problem"
informal_statement_content = "Prove that 3 cannot be written as the sum of two cubes."


# Construct the prompt for the model
user_prompt_content = (
    f"Please autoformalize the following natural language problem statement in Lean 4. "
    f"Use the following theorem name: {problem_name}\n"
    f"The natural language statement is: \n"
    f"{informal_statement_content}"
    f"Think before you provide the lean statement."
)



chat = [
  {"role": "user", "content": user_prompt_content},
]

inputs = tokenizer.apply_chat_template(chat, tokenize=True, add_generation_prompt=True, return_tensors="pt").to(model.device)

import time
start = time.time()
outputs = model.generate(inputs, max_new_tokens=16384, temperature = 0.9, do_sample = True, top_k=20, top_p=0.95)


model_output_text = tokenizer.batch_decode(outputs)[0]

def extract_code(text_input):
    """Extracts the last Lean 4 code block from the model's output."""
    try:
        matches = re.findall(r'
Construct the prompt for the modeltexttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
import re
torch.manual_seed(30)

model_id = "Goedel-LM/Goedel-Formalizer-V2-32B"
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", torch_dtype=torch.bfloat16, trust_remote_code=True)


problem_name = "test_problem"
informal_statement_content = "Prove that 3 cannot be written as the sum of two cubes."


# Construct the prompt for the model
user_prompt_content = (
    f"Please autoformalize the following natural language problem statement in Lean 4. "
    f"Use the following theorem name: {problem_name}\n"
    f"The natural language statement is: \n"
    f"{informal_statement_content}"
    f"Think before you provide the lean statement."
)



chat = [
  {"role": "user", "content": user_prompt_content},
]

inputs = tokenizer.apply_chat_template(chat, tokenize=True, add_generation_prompt=True, return_tensors="pt").to(model.device)

import time
start = time.time()
outputs = model.generate(inputs, max_new_tokens=16384, temperature = 0.9, do_sample = True, top_k=20, top_p=0.95)


model_output_text = tokenizer.batch_decode(outputs)[0]

def extract_code(text_input):
    """Extracts the last Lean 4 code block from the model's output."""
    try:
        matches = re.findall(r'
Construct the prompt for the modeltexttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
import re
torch.manual_seed(30)

model_id = "Goedel-LM/Goedel-Formalizer-V2-32B"
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", torch_dtype=torch.bfloat16, trust_remote_code=True)


problem_name = "test_problem"
informal_statement_content = "Prove that 3 cannot be written as the sum of two cubes."


# Construct the prompt for the model
user_prompt_content = (
    f"Please autoformalize the following natural language problem statement in Lean 4. "
    f"Use the following theorem name: {problem_name}\n"
    f"The natural language statement is: \n"
    f"{informal_statement_content}"
    f"Think before you provide the lean statement."
)



chat = [
  {"role": "user", "content": user_prompt_content},
]

inputs = tokenizer.apply_chat_template(chat, tokenize=True, add_generation_prompt=True, return_tensors="pt").to(model.device)

import time
start = time.time()
outputs = model.generate(inputs, max_new_tokens=16384, temperature = 0.9, do_sample = True, top_k=20, top_p=0.95)


model_output_text = tokenizer.batch_decode(outputs)[0]

def extract_code(text_input):
    """Extracts the last Lean 4 code block from the model's output."""
    try:
        matches = re.findall(r'
Construct the prompt for the modeltexttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
import re
torch.manual_seed(30)

model_id = "Goedel-LM/Goedel-Formalizer-V2-32B"
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", torch_dtype=torch.bfloat16, trust_remote_code=True)


problem_name = "test_problem"
informal_statement_content = "Prove that 3 cannot be written as the sum of two cubes."


# Construct the prompt for the model
user_prompt_content = (
    f"Please autoformalize the following natural language problem statement in Lean 4. "
    f"Use the following theorem name: {problem_name}\n"
    f"The natural language statement is: \n"
    f"{informal_statement_content}"
    f"Think before you provide the lean statement."
)



chat = [
  {"role": "user", "content": user_prompt_content},
]

inputs = tokenizer.apply_chat_template(chat, tokenize=True, add_generation_prompt=True, return_tensors="pt").to(model.device)

import time
start = time.time()
outputs = model.generate(inputs, max_new_tokens=16384, temperature = 0.9, do_sample = True, top_k=20, top_p=0.95)


model_output_text = tokenizer.batch_decode(outputs)[0]

def extract_code(text_input):
    """Extracts the last Lean 4 code block from the model's output."""
    try:
        matches = re.findall(r'
Construct the prompt for the modeltexttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
import re
torch.manual_seed(30)

model_id = "Goedel-LM/Goedel-Formalizer-V2-32B"
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", torch_dtype=torch.bfloat16, trust_remote_code=True)


problem_name = "test_problem"
informal_statement_content = "Prove that 3 cannot be written as the sum of two cubes."


# Construct the prompt for the model
user_prompt_content = (
    f"Please autoformalize the following natural language problem statement in Lean 4. "
    f"Use the following theorem name: {problem_name}\n"
    f"The natural language statement is: \n"
    f"{informal_statement_content}"
    f"Think before you provide the lean statement."
)



chat = [
  {"role": "user", "content": user_prompt_content},
]

inputs = tokenizer.apply_chat_template(chat, tokenize=True, add_generation_prompt=True, return_tensors="pt").to(model.device)

import time
start = time.time()
outputs = model.generate(inputs, max_new_tokens=16384, temperature = 0.9, do_sample = True, top_k=20, top_p=0.95)


model_output_text = tokenizer.batch_decode(outputs)[0]

def extract_code(text_input):
    """Extracts the last Lean 4 code block from the model's output."""
    try:
        matches = re.findall(r'
Construct the prompt for the modeltexttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
import re
torch.manual_seed(30)

model_id = "Goedel-LM/Goedel-Formalizer-V2-32B"
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", torch_dtype=torch.bfloat16, trust_remote_code=True)


problem_name = "test_problem"
informal_statement_content = "Prove that 3 cannot be written as the sum of two cubes."


# Construct the prompt for the model
user_prompt_content = (
    f"Please autoformalize the following natural language problem statement in Lean 4. "
    f"Use the following theorem name: {problem_name}\n"
    f"The natural language statement is: \n"
    f"{informal_statement_content}"
    f"Think before you provide the lean statement."
)



chat = [
  {"role": "user", "content": user_prompt_content},
]

inputs = tokenizer.apply_chat_template(chat, tokenize=True, add_generation_prompt=True, return_tensors="pt").to(model.device)

import time
start = time.time()
outputs = model.generate(inputs, max_new_tokens=16384, temperature = 0.9, do_sample = True, top_k=20, top_p=0.95)


model_output_text = tokenizer.batch_decode(outputs)[0]

def extract_code(text_input):
    """Extracts the last Lean 4 code block from the model's output."""
    try:
        matches = re.findall(r'
Construct the prompt for the modeltexttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
import re
torch.manual_seed(30)

model_id = "Goedel-LM/Goedel-Formalizer-V2-32B"
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", torch_dtype=torch.bfloat16, trust_remote_code=True)


problem_name = "test_problem"
informal_statement_content = "Prove that 3 cannot be written as the sum of two cubes."


# Construct the prompt for the model
user_prompt_content = (
    f"Please autoformalize the following natural language problem statement in Lean 4. "
    f"Use the following theorem name: {problem_name}\n"
    f"The natural language statement is: \n"
    f"{informal_statement_content}"
    f"Think before you provide the lean statement."
)



chat = [
  {"role": "user", "content": user_prompt_content},
]

inputs = tokenizer.apply_chat_template(chat, tokenize=True, add_generation_prompt=True, return_tensors="pt").to(model.device)

import time
start = time.time()
outputs = model.generate(inputs, max_new_tokens=16384, temperature = 0.9, do_sample = True, top_k=20, top_p=0.95)


model_output_text = tokenizer.batch_decode(outputs)[0]

def extract_code(text_input):
    """Extracts the last Lean 4 code block from the model's output."""
    try:
        matches = re.findall(r'
Construct the prompt for the modeltexttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
import re
torch.manual_seed(30)

model_id = "Goedel-LM/Goedel-Formalizer-V2-32B"
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", torch_dtype=torch.bfloat16, trust_remote_code=True)


problem_name = "test_problem"
informal_statement_content = "Prove that 3 cannot be written as the sum of two cubes."


# Construct the prompt for the model
user_prompt_content = (
    f"Please autoformalize the following natural language problem statement in Lean 4. "
    f"Use the following theorem name: {problem_name}\n"
    f"The natural language statement is: \n"
    f"{informal_statement_content}"
    f"Think before you provide the lean statement."
)



chat = [
  {"role": "user", "content": user_prompt_content},
]

inputs = tokenizer.apply_chat_template(chat, tokenize=True, add_generation_prompt=True, return_tensors="pt").to(model.device)

import time
start = time.time()
outputs = model.generate(inputs, max_new_tokens=16384, temperature = 0.9, do_sample = True, top_k=20, top_p=0.95)


model_output_text = tokenizer.batch_decode(outputs)[0]

def extract_code(text_input):
    """Extracts the last Lean 4 code block from the model's output."""
    try:
        matches = re.findall(r'
Construct the prompt for the modeltexttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
import re
torch.manual_seed(30)

model_id = "Goedel-LM/Goedel-Formalizer-V2-32B"
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", torch_dtype=torch.bfloat16, trust_remote_code=True)


problem_name = "test_problem"
informal_statement_content = "Prove that 3 cannot be written as the sum of two cubes."


# Construct the prompt for the model
user_prompt_content = (
    f"Please autoformalize the following natural language problem statement in Lean 4. "
    f"Use the following theorem name: {problem_name}\n"
    f"The natural language statement is: \n"
    f"{informal_statement_content}"
    f"Think before you provide the lean statement."
)



chat = [
  {"role": "user", "content": user_prompt_content},
]

inputs = tokenizer.apply_chat_template(chat, tokenize=True, add_generation_prompt=True, return_tensors="pt").to(model.device)

import time
start = time.time()
outputs = model.generate(inputs, max_new_tokens=16384, temperature = 0.9, do_sample = True, top_k=20, top_p=0.95)


model_output_text = tokenizer.batch_decode(outputs)[0]

def extract_code(text_input):
    """Extracts the last Lean 4 code block from the model's output."""
    try:
        matches = re.findall(r'
Construct the prompt for the modeltexttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
import re
torch.manual_seed(30)

model_id = "Goedel-LM/Goedel-Formalizer-V2-32B"
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", torch_dtype=torch.bfloat16, trust_remote_code=True)


problem_name = "test_problem"
informal_statement_content = "Prove that 3 cannot be written as the sum of two cubes."


# Construct the prompt for the model
user_prompt_content = (
    f"Please autoformalize the following natural language problem statement in Lean 4. "
    f"Use the following theorem name: {problem_name}\n"
    f"The natural language statement is: \n"
    f"{informal_statement_content}"
    f"Think before you provide the lean statement."
)



chat = [
  {"role": "user", "content": user_prompt_content},
]

inputs = tokenizer.apply_chat_template(chat, tokenize=True, add_generation_prompt=True, return_tensors="pt").to(model.device)

import time
start = time.time()
outputs = model.generate(inputs, max_new_tokens=16384, temperature = 0.9, do_sample = True, top_k=20, top_p=0.95)


model_output_text = tokenizer.batch_decode(outputs)[0]

def extract_code(text_input):
    """Extracts the last Lean 4 code block from the model's output."""
    try:
        matches = re.findall(r'
Construct the prompt for the modeltexttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
import re
torch.manual_seed(30)

model_id = "Goedel-LM/Goedel-Formalizer-V2-32B"
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", torch_dtype=torch.bfloat16, trust_remote_code=True)


problem_name = "test_problem"
informal_statement_content = "Prove that 3 cannot be written as the sum of two cubes."


# Construct the prompt for the model
user_prompt_content = (
    f"Please autoformalize the following natural language problem statement in Lean 4. "
    f"Use the following theorem name: {problem_name}\n"
    f"The natural language statement is: \n"
    f"{informal_statement_content}"
    f"Think before you provide the lean statement."
)



chat = [
  {"role": "user", "content": user_prompt_content},
]

inputs = tokenizer.apply_chat_template(chat, tokenize=True, add_generation_prompt=True, return_tensors="pt").to(model.device)

import time
start = time.time()
outputs = model.generate(inputs, max_new_tokens=16384, temperature = 0.9, do_sample = True, top_k=20, top_p=0.95)


model_output_text = tokenizer.batch_decode(outputs)[0]

def extract_code(text_input):
    """Extracts the last Lean 4 code block from the model's output."""
    try:
        matches = re.findall(r'
Construct the prompt for the modeltexttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
import re
torch.manual_seed(30)

model_id = "Goedel-LM/Goedel-Formalizer-V2-32B"
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", torch_dtype=torch.bfloat16, trust_remote_code=True)


problem_name = "test_problem"
informal_statement_content = "Prove that 3 cannot be written as the sum of two cubes."


# Construct the prompt for the model
user_prompt_content = (
    f"Please autoformalize the following natural language problem statement in Lean 4. "
    f"Use the following theorem name: {problem_name}\n"
    f"The natural language statement is: \n"
    f"{informal_statement_content}"
    f"Think before you provide the lean statement."
)



chat = [
  {"role": "user", "content": user_prompt_content},
]

inputs = tokenizer.apply_chat_template(chat, tokenize=True, add_generation_prompt=True, return_tensors="pt").to(model.device)

import time
start = time.time()
outputs = model.generate(inputs, max_new_tokens=16384, temperature = 0.9, do_sample = True, top_k=20, top_p=0.95)


model_output_text = tokenizer.batch_decode(outputs)[0]

def extract_code(text_input):
    """Extracts the last Lean 4 code block from the model's output."""
    try:
        matches = re.findall(r'
Construct the prompt for the modeltexttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
import re
torch.manual_seed(30)

model_id = "Goedel-LM/Goedel-Formalizer-V2-32B"
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", torch_dtype=torch.bfloat16, trust_remote_code=True)


problem_name = "test_problem"
informal_statement_content = "Prove that 3 cannot be written as the sum of two cubes."


# Construct the prompt for the model
user_prompt_content = (
    f"Please autoformalize the following natural language problem statement in Lean 4. "
    f"Use the following theorem name: {problem_name}\n"
    f"The natural language statement is: \n"
    f"{informal_statement_content}"
    f"Think before you provide the lean statement."
)



chat = [
  {"role": "user", "content": user_prompt_content},
]

inputs = tokenizer.apply_chat_template(chat, tokenize=True, add_generation_prompt=True, return_tensors="pt").to(model.device)

import time
start = time.time()
outputs = model.generate(inputs, max_new_tokens=16384, temperature = 0.9, do_sample = True, top_k=20, top_p=0.95)


model_output_text = tokenizer.batch_decode(outputs)[0]

def extract_code(text_input):
    """Extracts the last Lean 4 code block from the model's output."""
    try:
        matches = re.findall(r'
Construct the prompt for the modeltexttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
import re
torch.manual_seed(30)

model_id = "Goedel-LM/Goedel-Formalizer-V2-32B"
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", torch_dtype=torch.bfloat16, trust_remote_code=True)


problem_name = "test_problem"
informal_statement_content = "Prove that 3 cannot be written as the sum of two cubes."


# Construct the prompt for the model
user_prompt_content = (
    f"Please autoformalize the following natural language problem statement in Lean 4. "
    f"Use the following theorem name: {problem_name}\n"
    f"The natural language statement is: \n"
    f"{informal_statement_content}"
    f"Think before you provide the lean statement."
)



chat = [
  {"role": "user", "content": user_prompt_content},
]

inputs = tokenizer.apply_chat_template(chat, tokenize=True, add_generation_prompt=True, return_tensors="pt").to(model.device)

import time
start = time.time()
outputs = model.generate(inputs, max_new_tokens=16384, temperature = 0.9, do_sample = True, top_k=20, top_p=0.95)


model_output_text = tokenizer.batch_decode(outputs)[0]

def extract_code(text_input):
    """Extracts the last Lean 4 code block from the model's output."""
    try:
        matches = re.findall(r'
Construct the prompt for the modeltexttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
import re
torch.manual_seed(30)

model_id = "Goedel-LM/Goedel-Formalizer-V2-32B"
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", torch_dtype=torch.bfloat16, trust_remote_code=True)


problem_name = "test_problem"
informal_statement_content = "Prove that 3 cannot be written as the sum of two cubes."


# Construct the prompt for the model
user_prompt_content = (
    f"Please autoformalize the following natural language problem statement in Lean 4. "
    f"Use the following theorem name: {problem_name}\n"
    f"The natural language statement is: \n"
    f"{informal_statement_content}"
    f"Think before you provide the lean statement."
)



chat = [
  {"role": "user", "content": user_prompt_content},
]

inputs = tokenizer.apply_chat_template(chat, tokenize=True, add_generation_prompt=True, return_tensors="pt").to(model.device)

import time
start = time.time()
outputs = model.generate(inputs, max_new_tokens=16384, temperature = 0.9, do_sample = True, top_k=20, top_p=0.95)


model_output_text = tokenizer.batch_decode(outputs)[0]

def extract_code(text_input):
    """Extracts the last Lean 4 code block from the model's output."""
    try:
        matches = re.findall(r'
Construct the prompt for the modeltexttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
import re
torch.manual_seed(30)

model_id = "Goedel-LM/Goedel-Formalizer-V2-32B"
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", torch_dtype=torch.bfloat16, trust_remote_code=True)


problem_name = "test_problem"
informal_statement_content = "Prove that 3 cannot be written as the sum of two cubes."


# Construct the prompt for the model
user_prompt_content = (
    f"Please autoformalize the following natural language problem statement in Lean 4. "
    f"Use the following theorem name: {problem_name}\n"
    f"The natural language statement is: \n"
    f"{informal_statement_content}"
    f"Think before you provide the lean statement."
)



chat = [
  {"role": "user", "content": user_prompt_content},
]

inputs = tokenizer.apply_chat_template(chat, tokenize=True, add_generation_prompt=True, return_tensors="pt").to(model.device)

import time
start = time.time()
outputs = model.generate(inputs, max_new_tokens=16384, temperature = 0.9, do_sample = True, top_k=20, top_p=0.95)


model_output_text = tokenizer.batch_decode(outputs)[0]

def extract_code(text_input):
    """Extracts the last Lean 4 code block from the model's output."""
    try:
        matches = re.findall(r'
Construct the prompt for the modeltexttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
import re
torch.manual_seed(30)

model_id = "Goedel-LM/Goedel-Formalizer-V2-32B"
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", torch_dtype=torch.bfloat16, trust_remote_code=True)


problem_name = "test_problem"
informal_statement_content = "Prove that 3 cannot be written as the sum of two cubes."


# Construct the prompt for the model
user_prompt_content = (
    f"Please autoformalize the following natural language problem statement in Lean 4. "
    f"Use the following theorem name: {problem_name}\n"
    f"The natural language statement is: \n"
    f"{informal_statement_content}"
    f"Think before you provide the lean statement."
)



chat = [
  {"role": "user", "content": user_prompt_content},
]

inputs = tokenizer.apply_chat_template(chat, tokenize=True, add_generation_prompt=True, return_tensors="pt").to(model.device)

import time
start = time.time()
outputs = model.generate(inputs, max_new_tokens=16384, temperature = 0.9, do_sample = True, top_k=20, top_p=0.95)


model_output_text = tokenizer.batch_decode(outputs)[0]

def extract_code(text_input):
    """Extracts the last Lean 4 code block from the model's output."""
    try:
        matches = re.findall(r'
Construct the prompt for the modeltexttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
import re
torch.manual_seed(30)

model_id = "Goedel-LM/Goedel-Formalizer-V2-32B"
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", torch_dtype=torch.bfloat16, trust_remote_code=True)


problem_name = "test_problem"
informal_statement_content = "Prove that 3 cannot be written as the sum of two cubes."


# Construct the prompt for the model
user_prompt_content = (
    f"Please autoformalize the following natural language problem statement in Lean 4. "
    f"Use the following theorem name: {problem_name}\n"
    f"The natural language statement is: \n"
    f"{informal_statement_content}"
    f"Think before you provide the lean statement."
)



chat = [
  {"role": "user", "content": user_prompt_content},
]

inputs = tokenizer.apply_chat_template(chat, tokenize=True, add_generation_prompt=True, return_tensors="pt").to(model.device)

import time
start = time.time()
outputs = model.generate(inputs, max_new_tokens=16384, temperature = 0.9, do_sample = True, top_k=20, top_p=0.95)


model_output_text = tokenizer.batch_decode(outputs)[0]

def extract_code(text_input):
    """Extracts the last Lean 4 code block from the model's output."""
    try:
        matches = re.findall(r'
Construct the prompt for the modeltexttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
import re
torch.manual_seed(30)

model_id = "Goedel-LM/Goedel-Formalizer-V2-32B"
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", torch_dtype=torch.bfloat16, trust_remote_code=True)


problem_name = "test_problem"
informal_statement_content = "Prove that 3 cannot be written as the sum of two cubes."


# Construct the prompt for the model
user_prompt_content = (
    f"Please autoformalize the following natural language problem statement in Lean 4. "
    f"Use the following theorem name: {problem_name}\n"
    f"The natural language statement is: \n"
    f"{informal_statement_content}"
    f"Think before you provide the lean statement."
)



chat = [
  {"role": "user", "content": user_prompt_content},
]

inputs = tokenizer.apply_chat_template(chat, tokenize=True, add_generation_prompt=True, return_tensors="pt").to(model.device)

import time
start = time.time()
outputs = model.generate(inputs, max_new_tokens=16384, temperature = 0.9, do_sample = True, top_k=20, top_p=0.95)


model_output_text = tokenizer.batch_decode(outputs)[0]

def extract_code(text_input):
    """Extracts the last Lean 4 code block from the model's output."""
    try:
        matches = re.findall(r'
Construct the prompt for the modeltexttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
import re
torch.manual_seed(30)

model_id = "Goedel-LM/Goedel-Formalizer-V2-32B"
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", torch_dtype=torch.bfloat16, trust_remote_code=True)


problem_name = "test_problem"
informal_statement_content = "Prove that 3 cannot be written as the sum of two cubes."


# Construct the prompt for the model
user_prompt_content = (
    f"Please autoformalize the following natural language problem statement in Lean 4. "
    f"Use the following theorem name: {problem_name}\n"
    f"The natural language statement is: \n"
    f"{informal_statement_content}"
    f"Think before you provide the lean statement."
)



chat = [
  {"role": "user", "content": user_prompt_content},
]

inputs = tokenizer.apply_chat_template(chat, tokenize=True, add_generation_prompt=True, return_tensors="pt").to(model.device)

import time
start = time.time()
outputs = model.generate(inputs, max_new_tokens=16384, temperature = 0.9, do_sample = True, top_k=20, top_p=0.95)


model_output_text = tokenizer.batch_decode(outputs)[0]

def extract_code(text_input):
    """Extracts the last Lean 4 code block from the model's output."""
    try:
        matches = re.findall(r'
Construct the prompt for the modeltexttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
import re
torch.manual_seed(30)

model_id = "Goedel-LM/Goedel-Formalizer-V2-32B"
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", torch_dtype=torch.bfloat16, trust_remote_code=True)


problem_name = "test_problem"
informal_statement_content = "Prove that 3 cannot be written as the sum of two cubes."


# Construct the prompt for the model
user_prompt_content = (
    f"Please autoformalize the following natural language problem statement in Lean 4. "
    f"Use the following theorem name: {problem_name}\n"
    f"The natural language statement is: \n"
    f"{informal_statement_content}"
    f"Think before you provide the lean statement."
)



chat = [
  {"role": "user", "content": user_prompt_content},
]

inputs = tokenizer.apply_chat_template(chat, tokenize=True, add_generation_prompt=True, return_tensors="pt").to(model.device)

import time
start = time.time()
outputs = model.generate(inputs, max_new_tokens=16384, temperature = 0.9, do_sample = True, top_k=20, top_p=0.95)


model_output_text = tokenizer.batch_decode(outputs)[0]

def extract_code(text_input):
    """Extracts the last Lean 4 code block from the model's output."""
    try:
        matches = re.findall(r'
Construct the prompt for the modeltexttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
import re
torch.manual_seed(30)

model_id = "Goedel-LM/Goedel-Formalizer-V2-32B"
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", torch_dtype=torch.bfloat16, trust_remote_code=True)


problem_name = "test_problem"
informal_statement_content = "Prove that 3 cannot be written as the sum of two cubes."


# Construct the prompt for the model
user_prompt_content = (
    f"Please autoformalize the following natural language problem statement in Lean 4. "
    f"Use the following theorem name: {problem_name}\n"
    f"The natural language statement is: \n"
    f"{informal_statement_content}"
    f"Think before you provide the lean statement."
)



chat = [
  {"role": "user", "content": user_prompt_content},
]

inputs = tokenizer.apply_chat_template(chat, tokenize=True, add_generation_prompt=True, return_tensors="pt").to(model.device)

import time
start = time.time()
outputs = model.generate(inputs, max_new_tokens=16384, temperature = 0.9, do_sample = True, top_k=20, top_p=0.95)


model_output_text = tokenizer.batch_decode(outputs)[0]

def extract_code(text_input):
    """Extracts the last Lean 4 code block from the model's output."""
    try:
        matches = re.findall(r'
Construct the prompt for the modeltexttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
import re
torch.manual_seed(30)

model_id = "Goedel-LM/Goedel-Formalizer-V2-32B"
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", torch_dtype=torch.bfloat16, trust_remote_code=True)


problem_name = "test_problem"
informal_statement_content = "Prove that 3 cannot be written as the sum of two cubes."


# Construct the prompt for the model
user_prompt_content = (
    f"Please autoformalize the following natural language problem statement in Lean 4. "
    f"Use the following theorem name: {problem_name}\n"
    f"The natural language statement is: \n"
    f"{informal_statement_content}"
    f"Think before you provide the lean statement."
)



chat = [
  {"role": "user", "content": user_prompt_content},
]

inputs = tokenizer.apply_chat_template(chat, tokenize=True, add_generation_prompt=True, return_tensors="pt").to(model.device)

import time
start = time.time()
outputs = model.generate(inputs, max_new_tokens=16384, temperature = 0.9, do_sample = True, top_k=20, top_p=0.95)


model_output_text = tokenizer.batch_decode(outputs)[0]

def extract_code(text_input):
    """Extracts the last Lean 4 code block from the model's output."""
    try:
        matches = re.findall(r'
Construct the prompt for the modeltexttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
import re
torch.manual_seed(30)

model_id = "Goedel-LM/Goedel-Formalizer-V2-32B"
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", torch_dtype=torch.bfloat16, trust_remote_code=True)


problem_name = "test_problem"
informal_statement_content = "Prove that 3 cannot be written as the sum of two cubes."


# Construct the prompt for the model
user_prompt_content = (
    f"Please autoformalize the following natural language problem statement in Lean 4. "
    f"Use the following theorem name: {problem_name}\n"
    f"The natural language statement is: \n"
    f"{informal_statement_content}"
    f"Think before you provide the lean statement."
)



chat = [
  {"role": "user", "content": user_prompt_content},
]

inputs = tokenizer.apply_chat_template(chat, tokenize=True, add_generation_prompt=True, return_tensors="pt").to(model.device)

import time
start = time.time()
outputs = model.generate(inputs, max_new_tokens=16384, temperature = 0.9, do_sample = True, top_k=20, top_p=0.95)


model_output_text = tokenizer.batch_decode(outputs)[0]

def extract_code(text_input):
    """Extracts the last Lean 4 code block from the model's output."""
    try:
        matches = re.findall(r'
Construct the prompt for the modeltexttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
import re
torch.manual_seed(30)

model_id = "Goedel-LM/Goedel-Formalizer-V2-32B"
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", torch_dtype=torch.bfloat16, trust_remote_code=True)


problem_name = "test_problem"
informal_statement_content = "Prove that 3 cannot be written as the sum of two cubes."


# Construct the prompt for the model
user_prompt_content = (
    f"Please autoformalize the following natural language problem statement in Lean 4. "
    f"Use the following theorem name: {problem_name}\n"
    f"The natural language statement is: \n"
    f"{informal_statement_content}"
    f"Think before you provide the lean statement."
)



chat = [
  {"role": "user", "content": user_prompt_content},
]

inputs = tokenizer.apply_chat_template(chat, tokenize=True, add_generation_prompt=True, return_tensors="pt").to(model.device)

import time
start = time.time()
outputs = model.generate(inputs, max_new_tokens=16384, temperature = 0.9, do_sample = True, top_k=20, top_p=0.95)


model_output_text = tokenizer.batch_decode(outputs)[0]

def extract_code(text_input):
    """Extracts the last Lean 4 code block from the model's output."""
    try:
        matches = re.findall(r'
Construct the prompt for the modeltexttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
import re
torch.manual_seed(30)

model_id = "Goedel-LM/Goedel-Formalizer-V2-32B"
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", torch_dtype=torch.bfloat16, trust_remote_code=True)


problem_name = "test_problem"
informal_statement_content = "Prove that 3 cannot be written as the sum of two cubes."


# Construct the prompt for the model
user_prompt_content = (
    f"Please autoformalize the following natural language problem statement in Lean 4. "
    f"Use the following theorem name: {problem_name}\n"
    f"The natural language statement is: \n"
    f"{informal_statement_content}"
    f"Think before you provide the lean statement."
)



chat = [
  {"role": "user", "content": user_prompt_content},
]

inputs = tokenizer.apply_chat_template(chat, tokenize=True, add_generation_prompt=True, return_tensors="pt").to(model.device)

import time
start = time.time()
outputs = model.generate(inputs, max_new_tokens=16384, temperature = 0.9, do_sample = True, top_k=20, top_p=0.95)


model_output_text = tokenizer.batch_decode(outputs)[0]

def extract_code(text_input):
    """Extracts the last Lean 4 code block from the model's output."""
    try:
        matches = re.findall(r'
Construct the prompt for the modeltexttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
import re
torch.manual_seed(30)

model_id = "Goedel-LM/Goedel-Formalizer-V2-32B"
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", torch_dtype=torch.bfloat16, trust_remote_code=True)


problem_name = "test_problem"
informal_statement_content = "Prove that 3 cannot be written as the sum of two cubes."


# Construct the prompt for the model
user_prompt_content = (
    f"Please autoformalize the following natural language problem statement in Lean 4. "
    f"Use the following theorem name: {problem_name}\n"
    f"The natural language statement is: \n"
    f"{informal_statement_content}"
    f"Think before you provide the lean statement."
)



chat = [
  {"role": "user", "content": user_prompt_content},
]

inputs = tokenizer.apply_chat_template(chat, tokenize=True, add_generation_prompt=True, return_tensors="pt").to(model.device)

import time
start = time.time()
outputs = model.generate(inputs, max_new_tokens=16384, temperature = 0.9, do_sample = True, top_k=20, top_p=0.95)


model_output_text = tokenizer.batch_decode(outputs)[0]

def extract_code(text_input):
    """Extracts the last Lean 4 code block from the model's output."""
    try:
        matches = re.findall(r'
Construct the prompt for the modeltexttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
import re
torch.manual_seed(30)

model_id = "Goedel-LM/Goedel-Formalizer-V2-32B"
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", torch_dtype=torch.bfloat16, trust_remote_code=True)


problem_name = "test_problem"
informal_statement_content = "Prove that 3 cannot be written as the sum of two cubes."


# Construct the prompt for the model
user_prompt_content = (
    f"Please autoformalize the following natural language problem statement in Lean 4. "
    f"Use the following theorem name: {problem_name}\n"
    f"The natural language statement is: \n"
    f"{informal_statement_content}"
    f"Think before you provide the lean statement."
)



chat = [
  {"role": "user", "content": user_prompt_content},
]

inputs = tokenizer.apply_chat_template(chat, tokenize=True, add_generation_prompt=True, return_tensors="pt").to(model.device)

import time
start = time.time()
outputs = model.generate(inputs, max_new_tokens=16384, temperature = 0.9, do_sample = True, top_k=20, top_p=0.95)


model_output_text = tokenizer.batch_decode(outputs)[0]

def extract_code(text_input):
    """Extracts the last Lean 4 code block from the model's output."""
    try:
        matches = re.findall(r'
Construct the prompt for the modeltexttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
import re
torch.manual_seed(30)

model_id = "Goedel-LM/Goedel-Formalizer-V2-32B"
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", torch_dtype=torch.bfloat16, trust_remote_code=True)


problem_name = "test_problem"
informal_statement_content = "Prove that 3 cannot be written as the sum of two cubes."


# Construct the prompt for the model
user_prompt_content = (
    f"Please autoformalize the following natural language problem statement in Lean 4. "
    f"Use the following theorem name: {problem_name}\n"
    f"The natural language statement is: \n"
    f"{informal_statement_content}"
    f"Think before you provide the lean statement."
)



chat = [
  {"role": "user", "content": user_prompt_content},
]

inputs = tokenizer.apply_chat_template(chat, tokenize=True, add_generation_prompt=True, return_tensors="pt").to(model.device)

import time
start = time.time()
outputs = model.generate(inputs, max_new_tokens=16384, temperature = 0.9, do_sample = True, top_k=20, top_p=0.95)


model_output_text = tokenizer.batch_decode(outputs)[0]

def extract_code(text_input):
    """Extracts the last Lean 4 code block from the model's output."""
    try:
        matches = re.findall(r'
Construct the prompt for the modeltexttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
import re
torch.manual_seed(30)

model_id = "Goedel-LM/Goedel-Formalizer-V2-32B"
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", torch_dtype=torch.bfloat16, trust_remote_code=True)


problem_name = "test_problem"
informal_statement_content = "Prove that 3 cannot be written as the sum of two cubes."


# Construct the prompt for the model
user_prompt_content = (
    f"Please autoformalize the following natural language problem statement in Lean 4. "
    f"Use the following theorem name: {problem_name}\n"
    f"The natural language statement is: \n"
    f"{informal_statement_content}"
    f"Think before you provide the lean statement."
)



chat = [
  {"role": "user", "content": user_prompt_content},
]

inputs = tokenizer.apply_chat_template(chat, tokenize=True, add_generation_prompt=True, return_tensors="pt").to(model.device)

import time
start = time.time()
outputs = model.generate(inputs, max_new_tokens=16384, temperature = 0.9, do_sample = True, top_k=20, top_p=0.95)


model_output_text = tokenizer.batch_decode(outputs)[0]

def extract_code(text_input):
    """Extracts the last Lean 4 code block from the model's output."""
    try:
        matches = re.findall(r'
Construct the prompt for the modeltexttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
import re
torch.manual_seed(30)

model_id = "Goedel-LM/Goedel-Formalizer-V2-32B"
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", torch_dtype=torch.bfloat16, trust_remote_code=True)


problem_name = "test_problem"
informal_statement_content = "Prove that 3 cannot be written as the sum of two cubes."


# Construct the prompt for the model
user_prompt_content = (
    f"Please autoformalize the following natural language problem statement in Lean 4. "
    f"Use the following theorem name: {problem_name}\n"
    f"The natural language statement is: \n"
    f"{informal_statement_content}"
    f"Think before you provide the lean statement."
)



chat = [
  {"role": "user", "content": user_prompt_content},
]

inputs = tokenizer.apply_chat_template(chat, tokenize=True, add_generation_prompt=True, return_tensors="pt").to(model.device)

import time
start = time.time()
outputs = model.generate(inputs, max_new_tokens=16384, temperature = 0.9, do_sample = True, top_k=20, top_p=0.95)


model_output_text = tokenizer.batch_decode(outputs)[0]

def extract_code(text_input):
    """Extracts the last Lean 4 code block from the model's output."""
    try:
        matches = re.findall(r'
Construct the prompt for the modeltexttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
import re
torch.manual_seed(30)

model_id = "Goedel-LM/Goedel-Formalizer-V2-32B"
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", torch_dtype=torch.bfloat16, trust_remote_code=True)


problem_name = "test_problem"
informal_statement_content = "Prove that 3 cannot be written as the sum of two cubes."


# Construct the prompt for the model
user_prompt_content = (
    f"Please autoformalize the following natural language problem statement in Lean 4. "
    f"Use the following theorem name: {problem_name}\n"
    f"The natural language statement is: \n"
    f"{informal_statement_content}"
    f"Think before you provide the lean statement."
)



chat = [
  {"role": "user", "content": user_prompt_content},
]

inputs = tokenizer.apply_chat_template(chat, tokenize=True, add_generation_prompt=True, return_tensors="pt").to(model.device)

import time
start = time.time()
outputs = model.generate(inputs, max_new_tokens=16384, temperature = 0.9, do_sample = True, top_k=20, top_p=0.95)


model_output_text = tokenizer.batch_decode(outputs)[0]

def extract_code(text_input):
    """Extracts the last Lean 4 code block from the model's output."""
    try:
        matches = re.findall(r'
Construct the prompt for the modeltexttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
import re
torch.manual_seed(30)

model_id = "Goedel-LM/Goedel-Formalizer-V2-32B"
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", torch_dtype=torch.bfloat16, trust_remote_code=True)


problem_name = "test_problem"
informal_statement_content = "Prove that 3 cannot be written as the sum of two cubes."


# Construct the prompt for the model
user_prompt_content = (
    f"Please autoformalize the following natural language problem statement in Lean 4. "
    f"Use the following theorem name: {problem_name}\n"
    f"The natural language statement is: \n"
    f"{informal_statement_content}"
    f"Think before you provide the lean statement."
)



chat = [
  {"role": "user", "content": user_prompt_content},
]

inputs = tokenizer.apply_chat_template(chat, tokenize=True, add_generation_prompt=True, return_tensors="pt").to(model.device)

import time
start = time.time()
outputs = model.generate(inputs, max_new_tokens=16384, temperature = 0.9, do_sample = True, top_k=20, top_p=0.95)


model_output_text = tokenizer.batch_decode(outputs)[0]

def extract_code(text_input):
    """Extracts the last Lean 4 code block from the model's output."""
    try:
        matches = re.findall(r'
Construct the prompt for the modeltexttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
import re
torch.manual_seed(30)

model_id = "Goedel-LM/Goedel-Formalizer-V2-32B"
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", torch_dtype=torch.bfloat16, trust_remote_code=True)


problem_name = "test_problem"
informal_statement_content = "Prove that 3 cannot be written as the sum of two cubes."


# Construct the prompt for the model
user_prompt_content = (
    f"Please autoformalize the following natural language problem statement in Lean 4. "
    f"Use the following theorem name: {problem_name}\n"
    f"The natural language statement is: \n"
    f"{informal_statement_content}"
    f"Think before you provide the lean statement."
)



chat = [
  {"role": "user", "content": user_prompt_content},
]

inputs = tokenizer.apply_chat_template(chat, tokenize=True, add_generation_prompt=True, return_tensors="pt").to(model.device)

import time
start = time.time()
outputs = model.generate(inputs, max_new_tokens=16384, temperature = 0.9, do_sample = True, top_k=20, top_p=0.95)


model_output_text = tokenizer.batch_decode(outputs)[0]

def extract_code(text_input):
    """Extracts the last Lean 4 code block from the model's output."""
    try:
        matches = re.findall(r'
Construct the prompt for the modeltexttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
import re
torch.manual_seed(30)

model_id = "Goedel-LM/Goedel-Formalizer-V2-32B"
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", torch_dtype=torch.bfloat16, trust_remote_code=True)


problem_name = "test_problem"
informal_statement_content = "Prove that 3 cannot be written as the sum of two cubes."


# Construct the prompt for the model
user_prompt_content = (
    f"Please autoformalize the following natural language problem statement in Lean 4. "
    f"Use the following theorem name: {problem_name}\n"
    f"The natural language statement is: \n"
    f"{informal_statement_content}"
    f"Think before you provide the lean statement."
)



chat = [
  {"role": "user", "content": user_prompt_content},
]

inputs = tokenizer.apply_chat_template(chat, tokenize=True, add_generation_prompt=True, return_tensors="pt").to(model.device)

import time
start = time.time()
outputs = model.generate(inputs, max_new_tokens=16384, temperature = 0.9, do_sample = True, top_k=20, top_p=0.95)


model_output_text = tokenizer.batch_decode(outputs)[0]

def extract_code(text_input):
    """Extracts the last Lean 4 code block from the model's output."""
    try:
        matches = re.findall(r'
Construct the prompt for the modeltexttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
import re
torch.manual_seed(30)

model_id = "Goedel-LM/Goedel-Formalizer-V2-32B"
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", torch_dtype=torch.bfloat16, trust_remote_code=True)


problem_name = "test_problem"
informal_statement_content = "Prove that 3 cannot be written as the sum of two cubes."


# Construct the prompt for the model
user_prompt_content = (
    f"Please autoformalize the following natural language problem statement in Lean 4. "
    f"Use the following theorem name: {problem_name}\n"
    f"The natural language statement is: \n"
    f"{informal_statement_content}"
    f"Think before you provide the lean statement."
)



chat = [
  {"role": "user", "content": user_prompt_content},
]

inputs = tokenizer.apply_chat_template(chat, tokenize=True, add_generation_prompt=True, return_tensors="pt").to(model.device)

import time
start = time.time()
outputs = model.generate(inputs, max_new_tokens=16384, temperature = 0.9, do_sample = True, top_k=20, top_p=0.95)


model_output_text = tokenizer.batch_decode(outputs)[0]

def extract_code(text_input):
    """Extracts the last Lean 4 code block from the model's output."""
    try:
        matches = re.findall(r'
Construct the prompt for the modeltexttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
import re
torch.manual_seed(30)

model_id = "Goedel-LM/Goedel-Formalizer-V2-32B"
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", torch_dtype=torch.bfloat16, trust_remote_code=True)


problem_name = "test_problem"
informal_statement_content = "Prove that 3 cannot be written as the sum of two cubes."


# Construct the prompt for the model
user_prompt_content = (
    f"Please autoformalize the following natural language problem statement in Lean 4. "
    f"Use the following theorem name: {problem_name}\n"
    f"The natural language statement is: \n"
    f"{informal_statement_content}"
    f"Think before you provide the lean statement."
)



chat = [
  {"role": "user", "content": user_prompt_content},
]

inputs = tokenizer.apply_chat_template(chat, tokenize=True, add_generation_prompt=True, return_tensors="pt").to(model.device)

import time
start = time.time()
outputs = model.generate(inputs, max_new_tokens=16384, temperature = 0.9, do_sample = True, top_k=20, top_p=0.95)


model_output_text = tokenizer.batch_decode(outputs)[0]

def extract_code(text_input):
    """Extracts the last Lean 4 code block from the model's output."""
    try:
        matches = re.findall(r'
Construct the prompt for the modeltexttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
import re
torch.manual_seed(30)

model_id = "Goedel-LM/Goedel-Formalizer-V2-32B"
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", torch_dtype=torch.bfloat16, trust_remote_code=True)


problem_name = "test_problem"
informal_statement_content = "Prove that 3 cannot be written as the sum of two cubes."


# Construct the prompt for the model
user_prompt_content = (
    f"Please autoformalize the following natural language problem statement in Lean 4. "
    f"Use the following theorem name: {problem_name}\n"
    f"The natural language statement is: \n"
    f"{informal_statement_content}"
    f"Think before you provide the lean statement."
)



chat = [
  {"role": "user", "content": user_prompt_content},
]

inputs = tokenizer.apply_chat_template(chat, tokenize=True, add_generation_prompt=True, return_tensors="pt").to(model.device)

import time
start = time.time()
outputs = model.generate(inputs, max_new_tokens=16384, temperature = 0.9, do_sample = True, top_k=20, top_p=0.95)


model_output_text = tokenizer.batch_decode(outputs)[0]

def extract_code(text_input):
    """Extracts the last Lean 4 code block from the model's output."""
    try:
        matches = re.findall(r'
Construct the prompt for the modeltexttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
import re
torch.manual_seed(30)

model_id = "Goedel-LM/Goedel-Formalizer-V2-32B"
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", torch_dtype=torch.bfloat16, trust_remote_code=True)


problem_name = "test_problem"
informal_statement_content = "Prove that 3 cannot be written as the sum of two cubes."


# Construct the prompt for the model
user_prompt_content = (
    f"Please autoformalize the following natural language problem statement in Lean 4. "
    f"Use the following theorem name: {problem_name}\n"
    f"The natural language statement is: \n"
    f"{informal_statement_content}"
    f"Think before you provide the lean statement."
)



chat = [
  {"role": "user", "content": user_prompt_content},
]

inputs = tokenizer.apply_chat_template(chat, tokenize=True, add_generation_prompt=True, return_tensors="pt").to(model.device)

import time
start = time.time()
outputs = model.generate(inputs, max_new_tokens=16384, temperature = 0.9, do_sample = True, top_k=20, top_p=0.95)


model_output_text = tokenizer.batch_decode(outputs)[0]

def extract_code(text_input):
    """Extracts the last Lean 4 code block from the model's output."""
    try:
        matches = re.findall(r'
Construct the prompt for the modeltexttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
import re
torch.manual_seed(30)

model_id = "Goedel-LM/Goedel-Formalizer-V2-32B"
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", torch_dtype=torch.bfloat16, trust_remote_code=True)


problem_name = "test_problem"
informal_statement_content = "Prove that 3 cannot be written as the sum of two cubes."


# Construct the prompt for the model
user_prompt_content = (
    f"Please autoformalize the following natural language problem statement in Lean 4. "
    f"Use the following theorem name: {problem_name}\n"
    f"The natural language statement is: \n"
    f"{informal_statement_content}"
    f"Think before you provide the lean statement."
)



chat = [
  {"role": "user", "content": user_prompt_content},
]

inputs = tokenizer.apply_chat_template(chat, tokenize=True, add_generation_prompt=True, return_tensors="pt").to(model.device)

import time
start = time.time()
outputs = model.generate(inputs, max_new_tokens=16384, temperature = 0.9, do_sample = True, top_k=20, top_p=0.95)


model_output_text = tokenizer.batch_decode(outputs)[0]

def extract_code(text_input):
    """Extracts the last Lean 4 code block from the model's output."""
    try:
        matches = re.findall(r'
Construct the prompt for the modeltexttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
import re
torch.manual_seed(30)

model_id = "Goedel-LM/Goedel-Formalizer-V2-32B"
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", torch_dtype=torch.bfloat16, trust_remote_code=True)


problem_name = "test_problem"
informal_statement_content = "Prove that 3 cannot be written as the sum of two cubes."


# Construct the prompt for the model
user_prompt_content = (
    f"Please autoformalize the following natural language problem statement in Lean 4. "
    f"Use the following theorem name: {problem_name}\n"
    f"The natural language statement is: \n"
    f"{informal_statement_content}"
    f"Think before you provide the lean statement."
)



chat = [
  {"role": "user", "content": user_prompt_content},
]

inputs = tokenizer.apply_chat_template(chat, tokenize=True, add_generation_prompt=True, return_tensors="pt").to(model.device)

import time
start = time.time()
outputs = model.generate(inputs, max_new_tokens=16384, temperature = 0.9, do_sample = True, top_k=20, top_p=0.95)


model_output_text = tokenizer.batch_decode(outputs)[0]

def extract_code(text_input):
    """Extracts the last Lean 4 code block from the model's output."""
    try:
        matches = re.findall(r'
Construct the prompt for the modeltexttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
import re
torch.manual_seed(30)

model_id = "Goedel-LM/Goedel-Formalizer-V2-32B"
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", torch_dtype=torch.bfloat16, trust_remote_code=True)


problem_name = "test_problem"
informal_statement_content = "Prove that 3 cannot be written as the sum of two cubes."


# Construct the prompt for the model
user_prompt_content = (
    f"Please autoformalize the following natural language problem statement in Lean 4. "
    f"Use the following theorem name: {problem_name}\n"
    f"The natural language statement is: \n"
    f"{informal_statement_content}"
    f"Think before you provide the lean statement."
)



chat = [
  {"role": "user", "content": user_prompt_content},
]

inputs = tokenizer.apply_chat_template(chat, tokenize=True, add_generation_prompt=True, return_tensors="pt").to(model.device)

import time
start = time.time()
outputs = model.generate(inputs, max_new_tokens=16384, temperature = 0.9, do_sample = True, top_k=20, top_p=0.95)


model_output_text = tokenizer.batch_decode(outputs)[0]

def extract_code(text_input):
    """Extracts the last Lean 4 code block from the model's output."""
    try:
        matches = re.findall(r'
Construct the prompt for the modeltexttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
import re
torch.manual_seed(30)

model_id = "Goedel-LM/Goedel-Formalizer-V2-32B"
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", torch_dtype=torch.bfloat16, trust_remote_code=True)


problem_name = "test_problem"
informal_statement_content = "Prove that 3 cannot be written as the sum of two cubes."


# Construct the prompt for the model
user_prompt_content = (
    f"Please autoformalize the following natural language problem statement in Lean 4. "
    f"Use the following theorem name: {problem_name}\n"
    f"The natural language statement is: \n"
    f"{informal_statement_content}"
    f"Think before you provide the lean statement."
)



chat = [
  {"role": "user", "content": user_prompt_content},
]

inputs = tokenizer.apply_chat_template(chat, tokenize=True, add_generation_prompt=True, return_tensors="pt").to(model.device)

import time
start = time.time()
outputs = model.generate(inputs, max_new_tokens=16384, temperature = 0.9, do_sample = True, top_k=20, top_p=0.95)


model_output_text = tokenizer.batch_decode(outputs)[0]

def extract_code(text_input):
    """Extracts the last Lean 4 code block from the model's output."""
    try:
        matches = re.findall(r'
Construct the prompt for the modeltexttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
import re
torch.manual_seed(30)

model_id = "Goedel-LM/Goedel-Formalizer-V2-32B"
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", torch_dtype=torch.bfloat16, trust_remote_code=True)


problem_name = "test_problem"
informal_statement_content = "Prove that 3 cannot be written as the sum of two cubes."


# Construct the prompt for the model
user_prompt_content = (
    f"Please autoformalize the following natural language problem statement in Lean 4. "
    f"Use the following theorem name: {problem_name}\n"
    f"The natural language statement is: \n"
    f"{informal_statement_content}"
    f"Think before you provide the lean statement."
)



chat = [
  {"role": "user", "content": user_prompt_content},
]

inputs = tokenizer.apply_chat_template(chat, tokenize=True, add_generation_prompt=True, return_tensors="pt").to(model.device)

import time
start = time.time()
outputs = model.generate(inputs, max_new_tokens=16384, temperature = 0.9, do_sample = True, top_k=20, top_p=0.95)


model_output_text = tokenizer.batch_decode(outputs)[0]

def extract_code(text_input):
    """Extracts the last Lean 4 code block from the model's output."""
    try:
        matches = re.findall(r'
Construct the prompt for the modeltexttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
import re
torch.manual_seed(30)

model_id = "Goedel-LM/Goedel-Formalizer-V2-32B"
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", torch_dtype=torch.bfloat16, trust_remote_code=True)


problem_name = "test_problem"
informal_statement_content = "Prove that 3 cannot be written as the sum of two cubes."


# Construct the prompt for the model
user_prompt_content = (
    f"Please autoformalize the following natural language problem statement in Lean 4. "
    f"Use the following theorem name: {problem_name}\n"
    f"The natural language statement is: \n"
    f"{informal_statement_content}"
    f"Think before you provide the lean statement."
)



chat = [
  {"role": "user", "content": user_prompt_content},
]

inputs = tokenizer.apply_chat_template(chat, tokenize=True, add_generation_prompt=True, return_tensors="pt").to(model.device)

import time
start = time.time()
outputs = model.generate(inputs, max_new_tokens=16384, temperature = 0.9, do_sample = True, top_k=20, top_p=0.95)


model_output_text = tokenizer.batch_decode(outputs)[0]

def extract_code(text_input):
    """Extracts the last Lean 4 code block from the model's output."""
    try:
        matches = re.findall(r'
Construct the prompt for the modeltexttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
import re
torch.manual_seed(30)

model_id = "Goedel-LM/Goedel-Formalizer-V2-32B"
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", torch_dtype=torch.bfloat16, trust_remote_code=True)


problem_name = "test_problem"
informal_statement_content = "Prove that 3 cannot be written as the sum of two cubes."


# Construct the prompt for the model
user_prompt_content = (
    f"Please autoformalize the following natural language problem statement in Lean 4. "
    f"Use the following theorem name: {problem_name}\n"
    f"The natural language statement is: \n"
    f"{informal_statement_content}"
    f"Think before you provide the lean statement."
)



chat = [
  {"role": "user", "content": user_prompt_content},
]

inputs = tokenizer.apply_chat_template(chat, tokenize=True, add_generation_prompt=True, return_tensors="pt").to(model.device)

import time
start = time.time()
outputs = model.generate(inputs, max_new_tokens=16384, temperature = 0.9, do_sample = True, top_k=20, top_p=0.95)


model_output_text = tokenizer.batch_decode(outputs)[0]

def extract_code(text_input):
    """Extracts the last Lean 4 code block from the model's output."""
    try:
        matches = re.findall(r'
Construct the prompt for the modeltexttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
import re
torch.manual_seed(30)

model_id = "Goedel-LM/Goedel-Formalizer-V2-32B"
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", torch_dtype=torch.bfloat16, trust_remote_code=True)


problem_name = "test_problem"
informal_statement_content = "Prove that 3 cannot be written as the sum of two cubes."


# Construct the prompt for the model
user_prompt_content = (
    f"Please autoformalize the following natural language problem statement in Lean 4. "
    f"Use the following theorem name: {problem_name}\n"
    f"The natural language statement is: \n"
    f"{informal_statement_content}"
    f"Think before you provide the lean statement."
)



chat = [
  {"role": "user", "content": user_prompt_content},
]

inputs = tokenizer.apply_chat_template(chat, tokenize=True, add_generation_prompt=True, return_tensors="pt").to(model.device)

import time
start = time.time()
outputs = model.generate(inputs, max_new_tokens=16384, temperature = 0.9, do_sample = True, top_k=20, top_p=0.95)


model_output_text = tokenizer.batch_decode(outputs)[0]

def extract_code(text_input):
    """Extracts the last Lean 4 code block from the model's output."""
    try:
        matches = re.findall(r'
Construct the prompt for the modeltexttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
import re
torch.manual_seed(30)

model_id = "Goedel-LM/Goedel-Formalizer-V2-32B"
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", torch_dtype=torch.bfloat16, trust_remote_code=True)


problem_name = "test_problem"
informal_statement_content = "Prove that 3 cannot be written as the sum of two cubes."


# Construct the prompt for the model
user_prompt_content = (
    f"Please autoformalize the following natural language problem statement in Lean 4. "
    f"Use the following theorem name: {problem_name}\n"
    f"The natural language statement is: \n"
    f"{informal_statement_content}"
    f"Think before you provide the lean statement."
)



chat = [
  {"role": "user", "content": user_prompt_content},
]

inputs = tokenizer.apply_chat_template(chat, tokenize=True, add_generation_prompt=True, return_tensors="pt").to(model.device)

import time
start = time.time()
outputs = model.generate(inputs, max_new_tokens=16384, temperature = 0.9, do_sample = True, top_k=20, top_p=0.95)


model_output_text = tokenizer.batch_decode(outputs)[0]

def extract_code(text_input):
    """Extracts the last Lean 4 code block from the model's output."""
    try:
        matches = re.findall(r'
Construct the prompt for the modeltexttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
import re
torch.manual_seed(30)

model_id = "Goedel-LM/Goedel-Formalizer-V2-32B"
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", torch_dtype=torch.bfloat16, trust_remote_code=True)


problem_name = "test_problem"
informal_statement_content = "Prove that 3 cannot be written as the sum of two cubes."


# Construct the prompt for the model
user_prompt_content = (
    f"Please autoformalize the following natural language problem statement in Lean 4. "
    f"Use the following theorem name: {problem_name}\n"
    f"The natural language statement is: \n"
    f"{informal_statement_content}"
    f"Think before you provide the lean statement."
)



chat = [
  {"role": "user", "content": user_prompt_content},
]

inputs = tokenizer.apply_chat_template(chat, tokenize=True, add_generation_prompt=True, return_tensors="pt").to(model.device)

import time
start = time.time()
outputs = model.generate(inputs, max_new_tokens=16384, temperature = 0.9, do_sample = True, top_k=20, top_p=0.95)


model_output_text = tokenizer.batch_decode(outputs)[0]

def extract_code(text_input):
    """Extracts the last Lean 4 code block from the model's output."""
    try:
        matches = re.findall(r'
Construct the prompt for the modeltexttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
import re
torch.manual_seed(30)

model_id = "Goedel-LM/Goedel-Formalizer-V2-32B"
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", torch_dtype=torch.bfloat16, trust_remote_code=True)


problem_name = "test_problem"
informal_statement_content = "Prove that 3 cannot be written as the sum of two cubes."


# Construct the prompt for the model
user_prompt_content = (
    f"Please autoformalize the following natural language problem statement in Lean 4. "
    f"Use the following theorem name: {problem_name}\n"
    f"The natural language statement is: \n"
    f"{informal_statement_content}"
    f"Think before you provide the lean statement."
)



chat = [
  {"role": "user", "content": user_prompt_content},
]

inputs = tokenizer.apply_chat_template(chat, tokenize=True, add_generation_prompt=True, return_tensors="pt").to(model.device)

import time
start = time.time()
outputs = model.generate(inputs, max_new_tokens=16384, temperature = 0.9, do_sample = True, top_k=20, top_p=0.95)


model_output_text = tokenizer.batch_decode(outputs)[0]

def extract_code(text_input):
    """Extracts the last Lean 4 code block from the model's output."""
    try:
        matches = re.findall(r'
Construct the prompt for the modeltexttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
import re
torch.manual_seed(30)

model_id = "Goedel-LM/Goedel-Formalizer-V2-32B"
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", torch_dtype=torch.bfloat16, trust_remote_code=True)


problem_name = "test_problem"
informal_statement_content = "Prove that 3 cannot be written as the sum of two cubes."


# Construct the prompt for the model
user_prompt_content = (
    f"Please autoformalize the following natural language problem statement in Lean 4. "
    f"Use the following theorem name: {problem_name}\n"
    f"The natural language statement is: \n"
    f"{informal_statement_content}"
    f"Think before you provide the lean statement."
)



chat = [
  {"role": "user", "content": user_prompt_content},
]

inputs = tokenizer.apply_chat_template(chat, tokenize=True, add_generation_prompt=True, return_tensors="pt").to(model.device)

import time
start = time.time()
outputs = model.generate(inputs, max_new_tokens=16384, temperature = 0.9, do_sample = True, top_k=20, top_p=0.95)


model_output_text = tokenizer.batch_decode(outputs)[0]

def extract_code(text_input):
    """Extracts the last Lean 4 code block from the model's output."""
    try:
        matches = re.findall(r'
Construct the prompt for the modeltexttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
import re
torch.manual_seed(30)

model_id = "Goedel-LM/Goedel-Formalizer-V2-32B"
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", torch_dtype=torch.bfloat16, trust_remote_code=True)


problem_name = "test_problem"
informal_statement_content = "Prove that 3 cannot be written as the sum of two cubes."


# Construct the prompt for the model
user_prompt_content = (
    f"Please autoformalize the following natural language problem statement in Lean 4. "
    f"Use the following theorem name: {problem_name}\n"
    f"The natural language statement is: \n"
    f"{informal_statement_content}"
    f"Think before you provide the lean statement."
)



chat = [
  {"role": "user", "content": user_prompt_content},
]

inputs = tokenizer.apply_chat_template(chat, tokenize=True, add_generation_prompt=True, return_tensors="pt").to(model.device)

import time
start = time.time()
outputs = model.generate(inputs, max_new_tokens=16384, temperature = 0.9, do_sample = True, top_k=20, top_p=0.95)


model_output_text = tokenizer.batch_decode(outputs)[0]

def extract_code(text_input):
    """Extracts the last Lean 4 code block from the model's output."""
    try:
        matches = re.findall(r'
Construct the prompt for the modeltexttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
import re
torch.manual_seed(30)

model_id = "Goedel-LM/Goedel-Formalizer-V2-32B"
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", torch_dtype=torch.bfloat16, trust_remote_code=True)


problem_name = "test_problem"
informal_statement_content = "Prove that 3 cannot be written as the sum of two cubes."


# Construct the prompt for the model
user_prompt_content = (
    f"Please autoformalize the following natural language problem statement in Lean 4. "
    f"Use the following theorem name: {problem_name}\n"
    f"The natural language statement is: \n"
    f"{informal_statement_content}"
    f"Think before you provide the lean statement."
)



chat = [
  {"role": "user", "content": user_prompt_content},
]

inputs = tokenizer.apply_chat_template(chat, tokenize=True, add_generation_prompt=True, return_tensors="pt").to(model.device)

import time
start = time.time()
outputs = model.generate(inputs, max_new_tokens=16384, temperature = 0.9, do_sample = True, top_k=20, top_p=0.95)


model_output_text = tokenizer.batch_decode(outputs)[0]

def extract_code(text_input):
    """Extracts the last Lean 4 code block from the model's output."""
    try:
        matches = re.findall(r'
Construct the prompt for the modeltexttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
import re
torch.manual_seed(30)

model_id = "Goedel-LM/Goedel-Formalizer-V2-32B"
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", torch_dtype=torch.bfloat16, trust_remote_code=True)


problem_name = "test_problem"
informal_statement_content = "Prove that 3 cannot be written as the sum of two cubes."


# Construct the prompt for the model
user_prompt_content = (
    f"Please autoformalize the following natural language problem statement in Lean 4. "
    f"Use the following theorem name: {problem_name}\n"
    f"The natural language statement is: \n"
    f"{informal_statement_content}"
    f"Think before you provide the lean statement."
)



chat = [
  {"role": "user", "content": user_prompt_content},
]

inputs = tokenizer.apply_chat_template(chat, tokenize=True, add_generation_prompt=True, return_tensors="pt").to(model.device)

import time
start = time.time()
outputs = model.generate(inputs, max_new_tokens=16384, temperature = 0.9, do_sample = True, top_k=20, top_p=0.95)


model_output_text = tokenizer.batch_decode(outputs)[0]

def extract_code(text_input):
    """Extracts the last Lean 4 code block from the model's output."""
    try:
        matches = re.findall(r'
Construct the prompt for the modeltexttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
import re
torch.manual_seed(30)

model_id = "Goedel-LM/Goedel-Formalizer-V2-32B"
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", torch_dtype=torch.bfloat16, trust_remote_code=True)


problem_name = "test_problem"
informal_statement_content = "Prove that 3 cannot be written as the sum of two cubes."


# Construct the prompt for the model
user_prompt_content = (
    f"Please autoformalize the following natural language problem statement in Lean 4. "
    f"Use the following theorem name: {problem_name}\n"
    f"The natural language statement is: \n"
    f"{informal_statement_content}"
    f"Think before you provide the lean statement."
)



chat = [
  {"role": "user", "content": user_prompt_content},
]

inputs = tokenizer.apply_chat_template(chat, tokenize=True, add_generation_prompt=True, return_tensors="pt").to(model.device)

import time
start = time.time()
outputs = model.generate(inputs, max_new_tokens=16384, temperature = 0.9, do_sample = True, top_k=20, top_p=0.95)


model_output_text = tokenizer.batch_decode(outputs)[0]

def extract_code(text_input):
    """Extracts the last Lean 4 code block from the model's output."""
    try:
        matches = re.findall(r'
Construct the prompt for the modeltexttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
import re
torch.manual_seed(30)

model_id = "Goedel-LM/Goedel-Formalizer-V2-32B"
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", torch_dtype=torch.bfloat16, trust_remote_code=True)


problem_name = "test_problem"
informal_statement_content = "Prove that 3 cannot be written as the sum of two cubes."


# Construct the prompt for the model
user_prompt_content = (
    f"Please autoformalize the following natural language problem statement in Lean 4. "
    f"Use the following theorem name: {problem_name}\n"
    f"The natural language statement is: \n"
    f"{informal_statement_content}"
    f"Think before you provide the lean statement."
)



chat = [
  {"role": "user", "content": user_prompt_content},
]

inputs = tokenizer.apply_chat_template(chat, tokenize=True, add_generation_prompt=True, return_tensors="pt").to(model.device)

import time
start = time.time()
outputs = model.generate(inputs, max_new_tokens=16384, temperature = 0.9, do_sample = True, top_k=20, top_p=0.95)


model_output_text = tokenizer.batch_decode(outputs)[0]

def extract_code(text_input):
    """Extracts the last Lean 4 code block from the model's output."""
    try:
        matches = re.findall(r'
Construct the prompt for the modeltexttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
import re
torch.manual_seed(30)

model_id = "Goedel-LM/Goedel-Formalizer-V2-32B"
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", torch_dtype=torch.bfloat16, trust_remote_code=True)


problem_name = "test_problem"
informal_statement_content = "Prove that 3 cannot be written as the sum of two cubes."


# Construct the prompt for the model
user_prompt_content = (
    f"Please autoformalize the following natural language problem statement in Lean 4. "
    f"Use the following theorem name: {problem_name}\n"
    f"The natural language statement is: \n"
    f"{informal_statement_content}"
    f"Think before you provide the lean statement."
)



chat = [
  {"role": "user", "content": user_prompt_content},
]

inputs = tokenizer.apply_chat_template(chat, tokenize=True, add_generation_prompt=True, return_tensors="pt").to(model.device)

import time
start = time.time()
outputs = model.generate(inputs, max_new_tokens=16384, temperature = 0.9, do_sample = True, top_k=20, top_p=0.95)


model_output_text = tokenizer.batch_decode(outputs)[0]

def extract_code(text_input):
    """Extracts the last Lean 4 code block from the model's output."""
    try:
        matches = re.findall(r'
Construct the prompt for the modeltexttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
import re
torch.manual_seed(30)

model_id = "Goedel-LM/Goedel-Formalizer-V2-32B"
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", torch_dtype=torch.bfloat16, trust_remote_code=True)


problem_name = "test_problem"
informal_statement_content = "Prove that 3 cannot be written as the sum of two cubes."


# Construct the prompt for the model
user_prompt_content = (
    f"Please autoformalize the following natural language problem statement in Lean 4. "
    f"Use the following theorem name: {problem_name}\n"
    f"The natural language statement is: \n"
    f"{informal_statement_content}"
    f"Think before you provide the lean statement."
)



chat = [
  {"role": "user", "content": user_prompt_content},
]

inputs = tokenizer.apply_chat_template(chat, tokenize=True, add_generation_prompt=True, return_tensors="pt").to(model.device)

import time
start = time.time()
outputs = model.generate(inputs, max_new_tokens=16384, temperature = 0.9, do_sample = True, top_k=20, top_p=0.95)


model_output_text = tokenizer.batch_decode(outputs)[0]

def extract_code(text_input):
    """Extracts the last Lean 4 code block from the model's output."""
    try:
        matches = re.findall(r'
Construct the prompt for the modeltexttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
import re
torch.manual_seed(30)

model_id = "Goedel-LM/Goedel-Formalizer-V2-32B"
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", torch_dtype=torch.bfloat16, trust_remote_code=True)


problem_name = "test_problem"
informal_statement_content = "Prove that 3 cannot be written as the sum of two cubes."


# Construct the prompt for the model
user_prompt_content = (
    f"Please autoformalize the following natural language problem statement in Lean 4. "
    f"Use the following theorem name: {problem_name}\n"
    f"The natural language statement is: \n"
    f"{informal_statement_content}"
    f"Think before you provide the lean statement."
)



chat = [
  {"role": "user", "content": user_prompt_content},
]

inputs = tokenizer.apply_chat_template(chat, tokenize=True, add_generation_prompt=True, return_tensors="pt").to(model.device)

import time
start = time.time()
outputs = model.generate(inputs, max_new_tokens=16384, temperature = 0.9, do_sample = True, top_k=20, top_p=0.95)


model_output_text = tokenizer.batch_decode(outputs)[0]

def extract_code(text_input):
    """Extracts the last Lean 4 code block from the model's output."""
    try:
        matches = re.findall(r'
Construct the prompt for the modeltexttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
import re
torch.manual_seed(30)

model_id = "Goedel-LM/Goedel-Formalizer-V2-32B"
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", torch_dtype=torch.bfloat16, trust_remote_code=True)


problem_name = "test_problem"
informal_statement_content = "Prove that 3 cannot be written as the sum of two cubes."


# Construct the prompt for the model
user_prompt_content = (
    f"Please autoformalize the following natural language problem statement in Lean 4. "
    f"Use the following theorem name: {problem_name}\n"
    f"The natural language statement is: \n"
    f"{informal_statement_content}"
    f"Think before you provide the lean statement."
)



chat = [
  {"role": "user", "content": user_prompt_content},
]

inputs = tokenizer.apply_chat_template(chat, tokenize=True, add_generation_prompt=True, return_tensors="pt").to(model.device)

import time
start = time.time()
outputs = model.generate(inputs, max_new_tokens=16384, temperature = 0.9, do_sample = True, top_k=20, top_p=0.95)


model_output_text = tokenizer.batch_decode(outputs)[0]

def extract_code(text_input):
    """Extracts the last Lean 4 code block from the model's output."""
    try:
        matches = re.findall(r'
Construct the prompt for the modeltexttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
import re
torch.manual_seed(30)

model_id = "Goedel-LM/Goedel-Formalizer-V2-32B"
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", torch_dtype=torch.bfloat16, trust_remote_code=True)


problem_name = "test_problem"
informal_statement_content = "Prove that 3 cannot be written as the sum of two cubes."


# Construct the prompt for the model
user_prompt_content = (
    f"Please autoformalize the following natural language problem statement in Lean 4. "
    f"Use the following theorem name: {problem_name}\n"
    f"The natural language statement is: \n"
    f"{informal_statement_content}"
    f"Think before you provide the lean statement."
)



chat = [
  {"role": "user", "content": user_prompt_content},
]

inputs = tokenizer.apply_chat_template(chat, tokenize=True, add_generation_prompt=True, return_tensors="pt").to(model.device)

import time
start = time.time()
outputs = model.generate(inputs, max_new_tokens=16384, temperature = 0.9, do_sample = True, top_k=20, top_p=0.95)


model_output_text = tokenizer.batch_decode(outputs)[0]

def extract_code(text_input):
    """Extracts the last Lean 4 code block from the model's output."""
    try:
        matches = re.findall(r'
Construct the prompt for the modeltexttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
import re
torch.manual_seed(30)

model_id = "Goedel-LM/Goedel-Formalizer-V2-32B"
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", torch_dtype=torch.bfloat16, trust_remote_code=True)


problem_name = "test_problem"
informal_statement_content = "Prove that 3 cannot be written as the sum of two cubes."


# Construct the prompt for the model
user_prompt_content = (
    f"Please autoformalize the following natural language problem statement in Lean 4. "
    f"Use the following theorem name: {problem_name}\n"
    f"The natural language statement is: \n"
    f"{informal_statement_content}"
    f"Think before you provide the lean statement."
)



chat = [
  {"role": "user", "content": user_prompt_content},
]

inputs = tokenizer.apply_chat_template(chat, tokenize=True, add_generation_prompt=True, return_tensors="pt").to(model.device)

import time
start = time.time()
outputs = model.generate(inputs, max_new_tokens=16384, temperature = 0.9, do_sample = True, top_k=20, top_p=0.95)


model_output_text = tokenizer.batch_decode(outputs)[0]

def extract_code(text_input):
    """Extracts the last Lean 4 code block from the model's output."""
    try:
        matches = re.findall(r'
Construct the prompt for the modeltexttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
import re
torch.manual_seed(30)

model_id = "Goedel-LM/Goedel-Formalizer-V2-32B"
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", torch_dtype=torch.bfloat16, trust_remote_code=True)


problem_name = "test_problem"
informal_statement_content = "Prove that 3 cannot be written as the sum of two cubes."


# Construct the prompt for the model
user_prompt_content = (
    f"Please autoformalize the following natural language problem statement in Lean 4. "
    f"Use the following theorem name: {problem_name}\n"
    f"The natural language statement is: \n"
    f"{informal_statement_content}"
    f"Think before you provide the lean statement."
)



chat = [
  {"role": "user", "content": user_prompt_content},
]

inputs = tokenizer.apply_chat_template(chat, tokenize=True, add_generation_prompt=True, return_tensors="pt").to(model.device)

import time
start = time.time()
outputs = model.generate(inputs, max_new_tokens=16384, temperature = 0.9, do_sample = True, top_k=20, top_p=0.95)


model_output_text = tokenizer.batch_decode(outputs)[0]

def extract_code(text_input):
    """Extracts the last Lean 4 code block from the model's output."""
    try:
        matches = re.findall(r'
Construct the prompt for the modeltexttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
import re
torch.manual_seed(30)

model_id = "Goedel-LM/Goedel-Formalizer-V2-32B"
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", torch_dtype=torch.bfloat16, trust_remote_code=True)


problem_name = "test_problem"
informal_statement_content = "Prove that 3 cannot be written as the sum of two cubes."


# Construct the prompt for the model
user_prompt_content = (
    f"Please autoformalize the following natural language problem statement in Lean 4. "
    f"Use the following theorem name: {problem_name}\n"
    f"The natural language statement is: \n"
    f"{informal_statement_content}"
    f"Think before you provide the lean statement."
)



chat = [
  {"role": "user", "content": user_prompt_content},
]

inputs = tokenizer.apply_chat_template(chat, tokenize=True, add_generation_prompt=True, return_tensors="pt").to(model.device)

import time
start = time.time()
outputs = model.generate(inputs, max_new_tokens=16384, temperature = 0.9, do_sample = True, top_k=20, top_p=0.95)


model_output_text = tokenizer.batch_decode(outputs)[0]

def extract_code(text_input):
    """Extracts the last Lean 4 code block from the model's output."""
    try:
        matches = re.findall(r'
Construct the prompt for the modeltexttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
import re
torch.manual_seed(30)

model_id = "Goedel-LM/Goedel-Formalizer-V2-32B"
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", torch_dtype=torch.bfloat16, trust_remote_code=True)


problem_name = "test_problem"
informal_statement_content = "Prove that 3 cannot be written as the sum of two cubes."


# Construct the prompt for the model
user_prompt_content = (
    f"Please autoformalize the following natural language problem statement in Lean 4. "
    f"Use the following theorem name: {problem_name}\n"
    f"The natural language statement is: \n"
    f"{informal_statement_content}"
    f"Think before you provide the lean statement."
)



chat = [
  {"role": "user", "content": user_prompt_content},
]

inputs = tokenizer.apply_chat_template(chat, tokenize=True, add_generation_prompt=True, return_tensors="pt").to(model.device)

import time
start = time.time()
outputs = model.generate(inputs, max_new_tokens=16384, temperature = 0.9, do_sample = True, top_k=20, top_p=0.95)


model_output_text = tokenizer.batch_decode(outputs)[0]

def extract_code(text_input):
    """Extracts the last Lean 4 code block from the model's output."""
    try:
        matches = re.findall(r'
Construct the prompt for the modeltexttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
import re
torch.manual_seed(30)

model_id = "Goedel-LM/Goedel-Formalizer-V2-32B"
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", torch_dtype=torch.bfloat16, trust_remote_code=True)


problem_name = "test_problem"
informal_statement_content = "Prove that 3 cannot be written as the sum of two cubes."


# Construct the prompt for the model
user_prompt_content = (
    f"Please autoformalize the following natural language problem statement in Lean 4. "
    f"Use the following theorem name: {problem_name}\n"
    f"The natural language statement is: \n"
    f"{informal_statement_content}"
    f"Think before you provide the lean statement."
)



chat = [
  {"role": "user", "content": user_prompt_content},
]

inputs = tokenizer.apply_chat_template(chat, tokenize=True, add_generation_prompt=True, return_tensors="pt").to(model.device)

import time
start = time.time()
outputs = model.generate(inputs, max_new_tokens=16384, temperature = 0.9, do_sample = True, top_k=20, top_p=0.95)


model_output_text = tokenizer.batch_decode(outputs)[0]

def extract_code(text_input):
    """Extracts the last Lean 4 code block from the model's output."""
    try:
        matches = re.findall(r'
Construct the prompt for the modeltexttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
import re
torch.manual_seed(30)

model_id = "Goedel-LM/Goedel-Formalizer-V2-32B"
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", torch_dtype=torch.bfloat16, trust_remote_code=True)


problem_name = "test_problem"
informal_statement_content = "Prove that 3 cannot be written as the sum of two cubes."


# Construct the prompt for the model
user_prompt_content = (
    f"Please autoformalize the following natural language problem statement in Lean 4. "
    f"Use the following theorem name: {problem_name}\n"
    f"The natural language statement is: \n"
    f"{informal_statement_content}"
    f"Think before you provide the lean statement."
)



chat = [
  {"role": "user", "content": user_prompt_content},
]

inputs = tokenizer.apply_chat_template(chat, tokenize=True, add_generation_prompt=True, return_tensors="pt").to(model.device)

import time
start = time.time()
outputs = model.generate(inputs, max_new_tokens=16384, temperature = 0.9, do_sample = True, top_k=20, top_p=0.95)


model_output_text = tokenizer.batch_decode(outputs)[0]

def extract_code(text_input):
    """Extracts the last Lean 4 code block from the model's output."""
    try:
        matches = re.findall(r'
Construct the prompt for the modeltexttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
import re
torch.manual_seed(30)

model_id = "Goedel-LM/Goedel-Formalizer-V2-32B"
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", torch_dtype=torch.bfloat16, trust_remote_code=True)


problem_name = "test_problem"
informal_statement_content = "Prove that 3 cannot be written as the sum of two cubes."


# Construct the prompt for the model
user_prompt_content = (
    f"Please autoformalize the following natural language problem statement in Lean 4. "
    f"Use the following theorem name: {problem_name}\n"
    f"The natural language statement is: \n"
    f"{informal_statement_content}"
    f"Think before you provide the lean statement."
)



chat = [
  {"role": "user", "content": user_prompt_content},
]

inputs = tokenizer.apply_chat_template(chat, tokenize=True, add_generation_prompt=True, return_tensors="pt").to(model.device)

import time
start = time.time()
outputs = model.generate(inputs, max_new_tokens=16384, temperature = 0.9, do_sample = True, top_k=20, top_p=0.95)


model_output_text = tokenizer.batch_decode(outputs)[0]

def extract_code(text_input):
    """Extracts the last Lean 4 code block from the model's output."""
    try:
        matches = re.findall(r'
Construct the prompt for the modeltexttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
import re
torch.manual_seed(30)

model_id = "Goedel-LM/Goedel-Formalizer-V2-32B"
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", torch_dtype=torch.bfloat16, trust_remote_code=True)


problem_name = "test_problem"
informal_statement_content = "Prove that 3 cannot be written as the sum of two cubes."


# Construct the prompt for the model
user_prompt_content = (
    f"Please autoformalize the following natural language problem statement in Lean 4. "
    f"Use the following theorem name: {problem_name}\n"
    f"The natural language statement is: \n"
    f"{informal_statement_content}"
    f"Think before you provide the lean statement."
)



chat = [
  {"role": "user", "content": user_prompt_content},
]

inputs = tokenizer.apply_chat_template(chat, tokenize=True, add_generation_prompt=True, return_tensors="pt").to(model.device)

import time
start = time.time()
outputs = model.generate(inputs, max_new_tokens=16384, temperature = 0.9, do_sample = True, top_k=20, top_p=0.95)


model_output_text = tokenizer.batch_decode(outputs)[0]

def extract_code(text_input):
    """Extracts the last Lean 4 code block from the model's output."""
    try:
        matches = re.findall(r'
Construct the prompt for the modeltexttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
import re
torch.manual_seed(30)

model_id = "Goedel-LM/Goedel-Formalizer-V2-32B"
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", torch_dtype=torch.bfloat16, trust_remote_code=True)


problem_name = "test_problem"
informal_statement_content = "Prove that 3 cannot be written as the sum of two cubes."


# Construct the prompt for the model
user_prompt_content = (
    f"Please autoformalize the following natural language problem statement in Lean 4. "
    f"Use the following theorem name: {problem_name}\n"
    f"The natural language statement is: \n"
    f"{informal_statement_content}"
    f"Think before you provide the lean statement."
)



chat = [
  {"role": "user", "content": user_prompt_content},
]

inputs = tokenizer.apply_chat_template(chat, tokenize=True, add_generation_prompt=True, return_tensors="pt").to(model.device)

import time
start = time.time()
outputs = model.generate(inputs, max_new_tokens=16384, temperature = 0.9, do_sample = True, top_k=20, top_p=0.95)


model_output_text = tokenizer.batch_decode(outputs)[0]

def extract_code(text_input):
    """Extracts the last Lean 4 code block from the model's output."""
    try:
        matches = re.findall(r'
Construct the prompt for the modeltexttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
import re
torch.manual_seed(30)

model_id = "Goedel-LM/Goedel-Formalizer-V2-32B"
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", torch_dtype=torch.bfloat16, trust_remote_code=True)


problem_name = "test_problem"
informal_statement_content = "Prove that 3 cannot be written as the sum of two cubes."


# Construct the prompt for the model
user_prompt_content = (
    f"Please autoformalize the following natural language problem statement in Lean 4. "
    f"Use the following theorem name: {problem_name}\n"
    f"The natural language statement is: \n"
    f"{informal_statement_content}"
    f"Think before you provide the lean statement."
)



chat = [
  {"role": "user", "content": user_prompt_content},
]

inputs = tokenizer.apply_chat_template(chat, tokenize=True, add_generation_prompt=True, return_tensors="pt").to(model.device)

import time
start = time.time()
outputs = model.generate(inputs, max_new_tokens=16384, temperature = 0.9, do_sample = True, top_k=20, top_p=0.95)


model_output_text = tokenizer.batch_decode(outputs)[0]

def extract_code(text_input):
    """Extracts the last Lean 4 code block from the model's output."""
    try:
        matches = re.findall(r'
Construct the prompt for the modeltexttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
import re
torch.manual_seed(30)

model_id = "Goedel-LM/Goedel-Formalizer-V2-32B"
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", torch_dtype=torch.bfloat16, trust_remote_code=True)


problem_name = "test_problem"
informal_statement_content = "Prove that 3 cannot be written as the sum of two cubes."


# Construct the prompt for the model
user_prompt_content = (
    f"Please autoformalize the following natural language problem statement in Lean 4. "
    f"Use the following theorem name: {problem_name}\n"
    f"The natural language statement is: \n"
    f"{informal_statement_content}"
    f"Think before you provide the lean statement."
)



chat = [
  {"role": "user", "content": user_prompt_content},
]

inputs = tokenizer.apply_chat_template(chat, tokenize=True, add_generation_prompt=True, return_tensors="pt").to(model.device)

import time
start = time.time()
outputs = model.generate(inputs, max_new_tokens=16384, temperature = 0.9, do_sample = True, top_k=20, top_p=0.95)


model_output_text = tokenizer.batch_decode(outputs)[0]

def extract_code(text_input):
    """Extracts the last Lean 4 code block from the model's output."""
    try:
        matches = re.findall(r'
Construct the prompt for the modeltexttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
import re
torch.manual_seed(30)

model_id = "Goedel-LM/Goedel-Formalizer-V2-32B"
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", torch_dtype=torch.bfloat16, trust_remote_code=True)


problem_name = "test_problem"
informal_statement_content = "Prove that 3 cannot be written as the sum of two cubes."


# Construct the prompt for the model
user_prompt_content = (
    f"Please autoformalize the following natural language problem statement in Lean 4. "
    f"Use the following theorem name: {problem_name}\n"
    f"The natural language statement is: \n"
    f"{informal_statement_content}"
    f"Think before you provide the lean statement."
)



chat = [
  {"role": "user", "content": user_prompt_content},
]

inputs = tokenizer.apply_chat_template(chat, tokenize=True, add_generation_prompt=True, return_tensors="pt").to(model.device)

import time
start = time.time()
outputs = model.generate(inputs, max_new_tokens=16384, temperature = 0.9, do_sample = True, top_k=20, top_p=0.95)


model_output_text = tokenizer.batch_decode(outputs)[0]

def extract_code(text_input):
    """Extracts the last Lean 4 code block from the model's output."""
    try:
        matches = re.findall(r'
Construct the prompt for the modeltexttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
import re
torch.manual_seed(30)

model_id = "Goedel-LM/Goedel-Formalizer-V2-32B"
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", torch_dtype=torch.bfloat16, trust_remote_code=True)


problem_name = "test_problem"
informal_statement_content = "Prove that 3 cannot be written as the sum of two cubes."


# Construct the prompt for the model
user_prompt_content = (
    f"Please autoformalize the following natural language problem statement in Lean 4. "
    f"Use the following theorem name: {problem_name}\n"
    f"The natural language statement is: \n"
    f"{informal_statement_content}"
    f"Think before you provide the lean statement."
)



chat = [
  {"role": "user", "content": user_prompt_content},
]

inputs = tokenizer.apply_chat_template(chat, tokenize=True, add_generation_prompt=True, return_tensors="pt").to(model.device)

import time
start = time.time()
outputs = model.generate(inputs, max_new_tokens=16384, temperature = 0.9, do_sample = True, top_k=20, top_p=0.95)


model_output_text = tokenizer.batch_decode(outputs)[0]

def extract_code(text_input):
    """Extracts the last Lean 4 code block from the model's output."""
    try:
        matches = re.findall(r'
Construct the prompt for the modeltexttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
import re
torch.manual_seed(30)

model_id = "Goedel-LM/Goedel-Formalizer-V2-32B"
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", torch_dtype=torch.bfloat16, trust_remote_code=True)


problem_name = "test_problem"
informal_statement_content = "Prove that 3 cannot be written as the sum of two cubes."


# Construct the prompt for the model
user_prompt_content = (
    f"Please autoformalize the following natural language problem statement in Lean 4. "
    f"Use the following theorem name: {problem_name}\n"
    f"The natural language statement is: \n"
    f"{informal_statement_content}"
    f"Think before you provide the lean statement."
)



chat = [
  {"role": "user", "content": user_prompt_content},
]

inputs = tokenizer.apply_chat_template(chat, tokenize=True, add_generation_prompt=True, return_tensors="pt").to(model.device)

import time
start = time.time()
outputs = model.generate(inputs, max_new_tokens=16384, temperature = 0.9, do_sample = True, top_k=20, top_p=0.95)


model_output_text = tokenizer.batch_decode(outputs)[0]

def extract_code(text_input):
    """Extracts the last Lean 4 code block from the model's output."""
    try:
        matches = re.findall(r'
Construct the prompt for the modeltexttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
import re
torch.manual_seed(30)

model_id = "Goedel-LM/Goedel-Formalizer-V2-32B"
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", torch_dtype=torch.bfloat16, trust_remote_code=True)


problem_name = "test_problem"
informal_statement_content = "Prove that 3 cannot be written as the sum of two cubes."


# Construct the prompt for the model
user_prompt_content = (
    f"Please autoformalize the following natural language problem statement in Lean 4. "
    f"Use the following theorem name: {problem_name}\n"
    f"The natural language statement is: \n"
    f"{informal_statement_content}"
    f"Think before you provide the lean statement."
)



chat = [
  {"role": "user", "content": user_prompt_content},
]

inputs = tokenizer.apply_chat_template(chat, tokenize=True, add_generation_prompt=True, return_tensors="pt").to(model.device)

import time
start = time.time()
outputs = model.generate(inputs, max_new_tokens=16384, temperature = 0.9, do_sample = True, top_k=20, top_p=0.95)


model_output_text = tokenizer.batch_decode(outputs)[0]

def extract_code(text_input):
    """Extracts the last Lean 4 code block from the model's output."""
    try:
        matches = re.findall(r'
Construct the prompt for the modeltexttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
import re
torch.manual_seed(30)

model_id = "Goedel-LM/Goedel-Formalizer-V2-32B"
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", torch_dtype=torch.bfloat16, trust_remote_code=True)


problem_name = "test_problem"
informal_statement_content = "Prove that 3 cannot be written as the sum of two cubes."


# Construct the prompt for the model
user_prompt_content = (
    f"Please autoformalize the following natural language problem statement in Lean 4. "
    f"Use the following theorem name: {problem_name}\n"
    f"The natural language statement is: \n"
    f"{informal_statement_content}"
    f"Think before you provide the lean statement."
)



chat = [
  {"role": "user", "content": user_prompt_content},
]

inputs = tokenizer.apply_chat_template(chat, tokenize=True, add_generation_prompt=True, return_tensors="pt").to(model.device)

import time
start = time.time()
outputs = model.generate(inputs, max_new_tokens=16384, temperature = 0.9, do_sample = True, top_k=20, top_p=0.95)


model_output_text = tokenizer.batch_decode(outputs)[0]

def extract_code(text_input):
    """Extracts the last Lean 4 code block from the model's output."""
    try:
        matches = re.findall(r'
Construct the prompt for the modeltexttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
import re
torch.manual_seed(30)

model_id = "Goedel-LM/Goedel-Formalizer-V2-32B"
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", torch_dtype=torch.bfloat16, trust_remote_code=True)


problem_name = "test_problem"
informal_statement_content = "Prove that 3 cannot be written as the sum of two cubes."


# Construct the prompt for the model
user_prompt_content = (
    f"Please autoformalize the following natural language problem statement in Lean 4. "
    f"Use the following theorem name: {problem_name}\n"
    f"The natural language statement is: \n"
    f"{informal_statement_content}"
    f"Think before you provide the lean statement."
)



chat = [
  {"role": "user", "content": user_prompt_content},
]

inputs = tokenizer.apply_chat_template(chat, tokenize=True, add_generation_prompt=True, return_tensors="pt").to(model.device)

import time
start = time.time()
outputs = model.generate(inputs, max_new_tokens=16384, temperature = 0.9, do_sample = True, top_k=20, top_p=0.95)


model_output_text = tokenizer.batch_decode(outputs)[0]

def extract_code(text_input):
    """Extracts the last Lean 4 code block from the model's output."""
    try:
        matches = re.findall(r'
Construct the prompt for the modeltexttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
import re
torch.manual_seed(30)

model_id = "Goedel-LM/Goedel-Formalizer-V2-32B"
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", torch_dtype=torch.bfloat16, trust_remote_code=True)


problem_name = "test_problem"
informal_statement_content = "Prove that 3 cannot be written as the sum of two cubes."


# Construct the prompt for the model
user_prompt_content = (
    f"Please autoformalize the following natural language problem statement in Lean 4. "
    f"Use the following theorem name: {problem_name}\n"
    f"The natural language statement is: \n"
    f"{informal_statement_content}"
    f"Think before you provide the lean statement."
)



chat = [
  {"role": "user", "content": user_prompt_content},
]

inputs = tokenizer.apply_chat_template(chat, tokenize=True, add_generation_prompt=True, return_tensors="pt").to(model.device)

import time
start = time.time()
outputs = model.generate(inputs, max_new_tokens=16384, temperature = 0.9, do_sample = True, top_k=20, top_p=0.95)


model_output_text = tokenizer.batch_decode(outputs)[0]

def extract_code(text_input):
    """Extracts the last Lean 4 code block from the model's output."""
    try:
        matches = re.findall(r'
Construct the prompt for the modeltexttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
import re
torch.manual_seed(30)

model_id = "Goedel-LM/Goedel-Formalizer-V2-32B"
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", torch_dtype=torch.bfloat16, trust_remote_code=True)


problem_name = "test_problem"
informal_statement_content = "Prove that 3 cannot be written as the sum of two cubes."


# Construct the prompt for the model
user_prompt_content = (
    f"Please autoformalize the following natural language problem statement in Lean 4. "
    f"Use the following theorem name: {problem_name}\n"
    f"The natural language statement is: \n"
    f"{informal_statement_content}"
    f"Think before you provide the lean statement."
)



chat = [
  {"role": "user", "content": user_prompt_content},
]

inputs = tokenizer.apply_chat_template(chat, tokenize=True, add_generation_prompt=True, return_tensors="pt").to(model.device)

import time
start = time.time()
outputs = model.generate(inputs, max_new_tokens=16384, temperature = 0.9, do_sample = True, top_k=20, top_p=0.95)


model_output_text = tokenizer.batch_decode(outputs)[0]

def extract_code(text_input):
    """Extracts the last Lean 4 code block from the model's output."""
    try:
        matches = re.findall(r'
Construct the prompt for the modeltexttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
import re
torch.manual_seed(30)

model_id = "Goedel-LM/Goedel-Formalizer-V2-32B"
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", torch_dtype=torch.bfloat16, trust_remote_code=True)


problem_name = "test_problem"
informal_statement_content = "Prove that 3 cannot be written as the sum of two cubes."


# Construct the prompt for the model
user_prompt_content = (
    f"Please autoformalize the following natural language problem statement in Lean 4. "
    f"Use the following theorem name: {problem_name}\n"
    f"The natural language statement is: \n"
    f"{informal_statement_content}"
    f"Think before you provide the lean statement."
)



chat = [
  {"role": "user", "content": user_prompt_content},
]

inputs = tokenizer.apply_chat_template(chat, tokenize=True, add_generation_prompt=True, return_tensors="pt").to(model.device)

import time
start = time.time()
outputs = model.generate(inputs, max_new_tokens=16384, temperature = 0.9, do_sample = True, top_k=20, top_p=0.95)


model_output_text = tokenizer.batch_decode(outputs)[0]

def extract_code(text_input):
    """Extracts the last Lean 4 code block from the model's output."""
    try:
        matches = re.findall(r'
Construct the prompt for the modeltexttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
import re
torch.manual_seed(30)

model_id = "Goedel-LM/Goedel-Formalizer-V2-32B"
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", torch_dtype=torch.bfloat16, trust_remote_code=True)


problem_name = "test_problem"
informal_statement_content = "Prove that 3 cannot be written as the sum of two cubes."


# Construct the prompt for the model
user_prompt_content = (
    f"Please autoformalize the following natural language problem statement in Lean 4. "
    f"Use the following theorem name: {problem_name}\n"
    f"The natural language statement is: \n"
    f"{informal_statement_content}"
    f"Think before you provide the lean statement."
)



chat = [
  {"role": "user", "content": user_prompt_content},
]

inputs = tokenizer.apply_chat_template(chat, tokenize=True, add_generation_prompt=True, return_tensors="pt").to(model.device)

import time
start = time.time()
outputs = model.generate(inputs, max_new_tokens=16384, temperature = 0.9, do_sample = True, top_k=20, top_p=0.95)


model_output_text = tokenizer.batch_decode(outputs)[0]

def extract_code(text_input):
    """Extracts the last Lean 4 code block from the model's output."""
    try:
        matches = re.findall(r'
Construct the prompt for the modeltexttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
import re
torch.manual_seed(30)

model_id = "Goedel-LM/Goedel-Formalizer-V2-32B"
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", torch_dtype=torch.bfloat16, trust_remote_code=True)


problem_name = "test_problem"
informal_statement_content = "Prove that 3 cannot be written as the sum of two cubes."


# Construct the prompt for the model
user_prompt_content = (
    f"Please autoformalize the following natural language problem statement in Lean 4. "
    f"Use the following theorem name: {problem_name}\n"
    f"The natural language statement is: \n"
    f"{informal_statement_content}"
    f"Think before you provide the lean statement."
)



chat = [
  {"role": "user", "content": user_prompt_content},
]

inputs = tokenizer.apply_chat_template(chat, tokenize=True, add_generation_prompt=True, return_tensors="pt").to(model.device)

import time
start = time.time()
outputs = model.generate(inputs, max_new_tokens=16384, temperature = 0.9, do_sample = True, top_k=20, top_p=0.95)


model_output_text = tokenizer.batch_decode(outputs)[0]

def extract_code(text_input):
    """Extracts the last Lean 4 code block from the model's output."""
    try:
        matches = re.findall(r'
Construct the prompt for the modeltexttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
import re
torch.manual_seed(30)

model_id = "Goedel-LM/Goedel-Formalizer-V2-32B"
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", torch_dtype=torch.bfloat16, trust_remote_code=True)


problem_name = "test_problem"
informal_statement_content = "Prove that 3 cannot be written as the sum of two cubes."


# Construct the prompt for the model
user_prompt_content = (
    f"Please autoformalize the following natural language problem statement in Lean 4. "
    f"Use the following theorem name: {problem_name}\n"
    f"The natural language statement is: \n"
    f"{informal_statement_content}"
    f"Think before you provide the lean statement."
)



chat = [
  {"role": "user", "content": user_prompt_content},
]

inputs = tokenizer.apply_chat_template(chat, tokenize=True, add_generation_prompt=True, return_tensors="pt").to(model.device)

import time
start = time.time()
outputs = model.generate(inputs, max_new_tokens=16384, temperature = 0.9, do_sample = True, top_k=20, top_p=0.95)


model_output_text = tokenizer.batch_decode(outputs)[0]

def extract_code(text_input):
    """Extracts the last Lean 4 code block from the model's output."""
    try:
        matches = re.findall(r'
Construct the prompt for the modeltexttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
import re
torch.manual_seed(30)

model_id = "Goedel-LM/Goedel-Formalizer-V2-32B"
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", torch_dtype=torch.bfloat16, trust_remote_code=True)


problem_name = "test_problem"
informal_statement_content = "Prove that 3 cannot be written as the sum of two cubes."


# Construct the prompt for the model
user_prompt_content = (
    f"Please autoformalize the following natural language problem statement in Lean 4. "
    f"Use the following theorem name: {problem_name}\n"
    f"The natural language statement is: \n"
    f"{informal_statement_content}"
    f"Think before you provide the lean statement."
)



chat = [
  {"role": "user", "content": user_prompt_content},
]

inputs = tokenizer.apply_chat_template(chat, tokenize=True, add_generation_prompt=True, return_tensors="pt").to(model.device)

import time
start = time.time()
outputs = model.generate(inputs, max_new_tokens=16384, temperature = 0.9, do_sample = True, top_k=20, top_p=0.95)


model_output_text = tokenizer.batch_decode(outputs)[0]

def extract_code(text_input):
    """Extracts the last Lean 4 code block from the model's output."""
    try:
        matches = re.findall(r'
Construct the prompt for the modeltexttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
import re
torch.manual_seed(30)

model_id = "Goedel-LM/Goedel-Formalizer-V2-32B"
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", torch_dtype=torch.bfloat16, trust_remote_code=True)


problem_name = "test_problem"
informal_statement_content = "Prove that 3 cannot be written as the sum of two cubes."


# Construct the prompt for the model
user_prompt_content = (
    f"Please autoformalize the following natural language problem statement in Lean 4. "
    f"Use the following theorem name: {problem_name}\n"
    f"The natural language statement is: \n"
    f"{informal_statement_content}"
    f"Think before you provide the lean statement."
)



chat = [
  {"role": "user", "content": user_prompt_content},
]

inputs = tokenizer.apply_chat_template(chat, tokenize=True, add_generation_prompt=True, return_tensors="pt").to(model.device)

import time
start = time.time()
outputs = model.generate(inputs, max_new_tokens=16384, temperature = 0.9, do_sample = True, top_k=20, top_p=0.95)


model_output_text = tokenizer.batch_decode(outputs)[0]

def extract_code(text_input):
    """Extracts the last Lean 4 code block from the model's output."""
    try:
        matches = re.findall(r'
Construct the prompt for the modeltexttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
import re
torch.manual_seed(30)

model_id = "Goedel-LM/Goedel-Formalizer-V2-32B"
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", torch_dtype=torch.bfloat16, trust_remote_code=True)


problem_name = "test_problem"
informal_statement_content = "Prove that 3 cannot be written as the sum of two cubes."


# Construct the prompt for the model
user_prompt_content = (
    f"Please autoformalize the following natural language problem statement in Lean 4. "
    f"Use the following theorem name: {problem_name}\n"
    f"The natural language statement is: \n"
    f"{informal_statement_content}"
    f"Think before you provide the lean statement."
)



chat = [
  {"role": "user", "content": user_prompt_content},
]

inputs = tokenizer.apply_chat_template(chat, tokenize=True, add_generation_prompt=True, return_tensors="pt").to(model.device)

import time
start = time.time()
outputs = model.generate(inputs, max_new_tokens=16384, temperature = 0.9, do_sample = True, top_k=20, top_p=0.95)


model_output_text = tokenizer.batch_decode(outputs)[0]

def extract_code(text_input):
    """Extracts the last Lean 4 code block from the model's output."""
    try:
        matches = re.findall(r'
Construct the prompt for the modeltexttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
import re
torch.manual_seed(30)

model_id = "Goedel-LM/Goedel-Formalizer-V2-32B"
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", torch_dtype=torch.bfloat16, trust_remote_code=True)


problem_name = "test_problem"
informal_statement_content = "Prove that 3 cannot be written as the sum of two cubes."


# Construct the prompt for the model
user_prompt_content = (
    f"Please autoformalize the following natural language problem statement in Lean 4. "
    f"Use the following theorem name: {problem_name}\n"
    f"The natural language statement is: \n"
    f"{informal_statement_content}"
    f"Think before you provide the lean statement."
)



chat = [
  {"role": "user", "content": user_prompt_content},
]

inputs = tokenizer.apply_chat_template(chat, tokenize=True, add_generation_prompt=True, return_tensors="pt").to(model.device)

import time
start = time.time()
outputs = model.generate(inputs, max_new_tokens=16384, temperature = 0.9, do_sample = True, top_k=20, top_p=0.95)


model_output_text = tokenizer.batch_decode(outputs)[0]

def extract_code(text_input):
    """Extracts the last Lean 4 code block from the model's output."""
    try:
        matches = re.findall(r'
Construct the prompt for the modeltexttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
import re
torch.manual_seed(30)

model_id = "Goedel-LM/Goedel-Formalizer-V2-32B"
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", torch_dtype=torch.bfloat16, trust_remote_code=True)


problem_name = "test_problem"
informal_statement_content = "Prove that 3 cannot be written as the sum of two cubes."


# Construct the prompt for the model
user_prompt_content = (
    f"Please autoformalize the following natural language problem statement in Lean 4. "
    f"Use the following theorem name: {problem_name}\n"
    f"The natural language statement is: \n"
    f"{informal_statement_content}"
    f"Think before you provide the lean statement."
)



chat = [
  {"role": "user", "content": user_prompt_content},
]

inputs = tokenizer.apply_chat_template(chat, tokenize=True, add_generation_prompt=True, return_tensors="pt").to(model.device)

import time
start = time.time()
outputs = model.generate(inputs, max_new_tokens=16384, temperature = 0.9, do_sample = True, top_k=20, top_p=0.95)


model_output_text = tokenizer.batch_decode(outputs)[0]

def extract_code(text_input):
    """Extracts the last Lean 4 code block from the model's output."""
    try:
        matches = re.findall(r'
Construct the prompt for the modeltexttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
import re
torch.manual_seed(30)

model_id = "Goedel-LM/Goedel-Formalizer-V2-32B"
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", torch_dtype=torch.bfloat16, trust_remote_code=True)


problem_name = "test_problem"
informal_statement_content = "Prove that 3 cannot be written as the sum of two cubes."


# Construct the prompt for the model
user_prompt_content = (
    f"Please autoformalize the following natural language problem statement in Lean 4. "
    f"Use the following theorem name: {problem_name}\n"
    f"The natural language statement is: \n"
    f"{informal_statement_content}"
    f"Think before you provide the lean statement."
)



chat = [
  {"role": "user", "content": user_prompt_content},
]

inputs = tokenizer.apply_chat_template(chat, tokenize=True, add_generation_prompt=True, return_tensors="pt").to(model.device)

import time
start = time.time()
outputs = model.generate(inputs, max_new_tokens=16384, temperature = 0.9, do_sample = True, top_k=20, top_p=0.95)


model_output_text = tokenizer.batch_decode(outputs)[0]

def extract_code(text_input):
    """Extracts the last Lean 4 code block from the model's output."""
    try:
        matches = re.findall(r'
Construct the prompt for the modeltexttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
import re
torch.manual_seed(30)

model_id = "Goedel-LM/Goedel-Formalizer-V2-32B"
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", torch_dtype=torch.bfloat16, trust_remote_code=True)


problem_name = "test_problem"
informal_statement_content = "Prove that 3 cannot be written as the sum of two cubes."


# Construct the prompt for the model
user_prompt_content = (
    f"Please autoformalize the following natural language problem statement in Lean 4. "
    f"Use the following theorem name: {problem_name}\n"
    f"The natural language statement is: \n"
    f"{informal_statement_content}"
    f"Think before you provide the lean statement."
)



chat = [
  {"role": "user", "content": user_prompt_content},
]

inputs = tokenizer.apply_chat_template(chat, tokenize=True, add_generation_prompt=True, return_tensors="pt").to(model.device)

import time
start = time.time()
outputs = model.generate(inputs, max_new_tokens=16384, temperature = 0.9, do_sample = True, top_k=20, top_p=0.95)


model_output_text = tokenizer.batch_decode(outputs)[0]

def extract_code(text_input):
    """Extracts the last Lean 4 code block from the model's output."""
    try:
        matches = re.findall(r'
Construct the prompt for the modeltexttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
import re
torch.manual_seed(30)

model_id = "Goedel-LM/Goedel-Formalizer-V2-32B"
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", torch_dtype=torch.bfloat16, trust_remote_code=True)


problem_name = "test_problem"
informal_statement_content = "Prove that 3 cannot be written as the sum of two cubes."


# Construct the prompt for the model
user_prompt_content = (
    f"Please autoformalize the following natural language problem statement in Lean 4. "
    f"Use the following theorem name: {problem_name}\n"
    f"The natural language statement is: \n"
    f"{informal_statement_content}"
    f"Think before you provide the lean statement."
)



chat = [
  {"role": "user", "content": user_prompt_content},
]

inputs = tokenizer.apply_chat_template(chat, tokenize=True, add_generation_prompt=True, return_tensors="pt").to(model.device)

import time
start = time.time()
outputs = model.generate(inputs, max_new_tokens=16384, temperature = 0.9, do_sample = True, top_k=20, top_p=0.95)


model_output_text = tokenizer.batch_decode(outputs)[0]

def extract_code(text_input):
    """Extracts the last Lean 4 code block from the model's output."""
    try:
        matches = re.findall(r'
Construct the prompt for the modeltexttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
import re
torch.manual_seed(30)

model_id = "Goedel-LM/Goedel-Formalizer-V2-32B"
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", torch_dtype=torch.bfloat16, trust_remote_code=True)


problem_name = "test_problem"
informal_statement_content = "Prove that 3 cannot be written as the sum of two cubes."


# Construct the prompt for the model
user_prompt_content = (
    f"Please autoformalize the following natural language problem statement in Lean 4. "
    f"Use the following theorem name: {problem_name}\n"
    f"The natural language statement is: \n"
    f"{informal_statement_content}"
    f"Think before you provide the lean statement."
)



chat = [
  {"role": "user", "content": user_prompt_content},
]

inputs = tokenizer.apply_chat_template(chat, tokenize=True, add_generation_prompt=True, return_tensors="pt").to(model.device)

import time
start = time.time()
outputs = model.generate(inputs, max_new_tokens=16384, temperature = 0.9, do_sample = True, top_k=20, top_p=0.95)


model_output_text = tokenizer.batch_decode(outputs)[0]

def extract_code(text_input):
    """Extracts the last Lean 4 code block from the model's output."""
    try:
        matches = re.findall(r'
Construct the prompt for the modeltexttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
import re
torch.manual_seed(30)

model_id = "Goedel-LM/Goedel-Formalizer-V2-32B"
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", torch_dtype=torch.bfloat16, trust_remote_code=True)


problem_name = "test_problem"
informal_statement_content = "Prove that 3 cannot be written as the sum of two cubes."


# Construct the prompt for the model
user_prompt_content = (
    f"Please autoformalize the following natural language problem statement in Lean 4. "
    f"Use the following theorem name: {problem_name}\n"
    f"The natural language statement is: \n"
    f"{informal_statement_content}"
    f"Think before you provide the lean statement."
)



chat = [
  {"role": "user", "content": user_prompt_content},
]

inputs = tokenizer.apply_chat_template(chat, tokenize=True, add_generation_prompt=True, return_tensors="pt").to(model.device)

import time
start = time.time()
outputs = model.generate(inputs, max_new_tokens=16384, temperature = 0.9, do_sample = True, top_k=20, top_p=0.95)


model_output_text = tokenizer.batch_decode(outputs)[0]

def extract_code(text_input):
    """Extracts the last Lean 4 code block from the model's output."""
    try:
        matches = re.findall(r'
Construct the prompt for the modeltexttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
import re
torch.manual_seed(30)

model_id = "Goedel-LM/Goedel-Formalizer-V2-32B"
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", torch_dtype=torch.bfloat16, trust_remote_code=True)


problem_name = "test_problem"
informal_statement_content = "Prove that 3 cannot be written as the sum of two cubes."


# Construct the prompt for the model
user_prompt_content = (
    f"Please autoformalize the following natural language problem statement in Lean 4. "
    f"Use the following theorem name: {problem_name}\n"
    f"The natural language statement is: \n"
    f"{informal_statement_content}"
    f"Think before you provide the lean statement."
)



chat = [
  {"role": "user", "content": user_prompt_content},
]

inputs = tokenizer.apply_chat_template(chat, tokenize=True, add_generation_prompt=True, return_tensors="pt").to(model.device)

import time
start = time.time()
outputs = model.generate(inputs, max_new_tokens=16384, temperature = 0.9, do_sample = True, top_k=20, top_p=0.95)


model_output_text = tokenizer.batch_decode(outputs)[0]

def extract_code(text_input):
    """Extracts the last Lean 4 code block from the model's output."""
    try:
        matches = re.findall(r'
Construct the prompt for the modeltexttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
import re
torch.manual_seed(30)

model_id = "Goedel-LM/Goedel-Formalizer-V2-32B"
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", torch_dtype=torch.bfloat16, trust_remote_code=True)


problem_name = "test_problem"
informal_statement_content = "Prove that 3 cannot be written as the sum of two cubes."


# Construct the prompt for the model
user_prompt_content = (
    f"Please autoformalize the following natural language problem statement in Lean 4. "
    f"Use the following theorem name: {problem_name}\n"
    f"The natural language statement is: \n"
    f"{informal_statement_content}"
    f"Think before you provide the lean statement."
)



chat = [
  {"role": "user", "content": user_prompt_content},
]

inputs = tokenizer.apply_chat_template(chat, tokenize=True, add_generation_prompt=True, return_tensors="pt").to(model.device)

import time
start = time.time()
outputs = model.generate(inputs, max_new_tokens=16384, temperature = 0.9, do_sample = True, top_k=20, top_p=0.95)


model_output_text = tokenizer.batch_decode(outputs)[0]

def extract_code(text_input):
    """Extracts the last Lean 4 code block from the model's output."""
    try:
        matches = re.findall(r'
Construct the prompt for the modeltexttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
import re
torch.manual_seed(30)

model_id = "Goedel-LM/Goedel-Formalizer-V2-32B"
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", torch_dtype=torch.bfloat16, trust_remote_code=True)


problem_name = "test_problem"
informal_statement_content = "Prove that 3 cannot be written as the sum of two cubes."


# Construct the prompt for the model
user_prompt_content = (
    f"Please autoformalize the following natural language problem statement in Lean 4. "
    f"Use the following theorem name: {problem_name}\n"
    f"The natural language statement is: \n"
    f"{informal_statement_content}"
    f"Think before you provide the lean statement."
)



chat = [
  {"role": "user", "content": user_prompt_content},
]

inputs = tokenizer.apply_chat_template(chat, tokenize=True, add_generation_prompt=True, return_tensors="pt").to(model.device)

import time
start = time.time()
outputs = model.generate(inputs, max_new_tokens=16384, temperature = 0.9, do_sample = True, top_k=20, top_p=0.95)


model_output_text = tokenizer.batch_decode(outputs)[0]

def extract_code(text_input):
    """Extracts the last Lean 4 code block from the model's output."""
    try:
        matches = re.findall(r'
Construct the prompt for the modeltexttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
import re
torch.manual_seed(30)

model_id = "Goedel-LM/Goedel-Formalizer-V2-32B"
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", torch_dtype=torch.bfloat16, trust_remote_code=True)


problem_name = "test_problem"
informal_statement_content = "Prove that 3 cannot be written as the sum of two cubes."


# Construct the prompt for the model
user_prompt_content = (
    f"Please autoformalize the following natural language problem statement in Lean 4. "
    f"Use the following theorem name: {problem_name}\n"
    f"The natural language statement is: \n"
    f"{informal_statement_content}"
    f"Think before you provide the lean statement."
)



chat = [
  {"role": "user", "content": user_prompt_content},
]

inputs = tokenizer.apply_chat_template(chat, tokenize=True, add_generation_prompt=True, return_tensors="pt").to(model.device)

import time
start = time.time()
outputs = model.generate(inputs, max_new_tokens=16384, temperature = 0.9, do_sample = True, top_k=20, top_p=0.95)


model_output_text = tokenizer.batch_decode(outputs)[0]

def extract_code(text_input):
    """Extracts the last Lean 4 code block from the model's output."""
    try:
        matches = re.findall(r'
Construct the prompt for the modeltexttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
import re
torch.manual_seed(30)

model_id = "Goedel-LM/Goedel-Formalizer-V2-32B"
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", torch_dtype=torch.bfloat16, trust_remote_code=True)


problem_name = "test_problem"
informal_statement_content = "Prove that 3 cannot be written as the sum of two cubes."


# Construct the prompt for the model
user_prompt_content = (
    f"Please autoformalize the following natural language problem statement in Lean 4. "
    f"Use the following theorem name: {problem_name}\n"
    f"The natural language statement is: \n"
    f"{informal_statement_content}"
    f"Think before you provide the lean statement."
)



chat = [
  {"role": "user", "content": user_prompt_content},
]

inputs = tokenizer.apply_chat_template(chat, tokenize=True, add_generation_prompt=True, return_tensors="pt").to(model.device)

import time
start = time.time()
outputs = model.generate(inputs, max_new_tokens=16384, temperature = 0.9, do_sample = True, top_k=20, top_p=0.95)


model_output_text = tokenizer.batch_decode(outputs)[0]

def extract_code(text_input):
    """Extracts the last Lean 4 code block from the model's output."""
    try:
        matches = re.findall(r'
Construct the prompt for the modeltexttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
import re
torch.manual_seed(30)

model_id = "Goedel-LM/Goedel-Formalizer-V2-32B"
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", torch_dtype=torch.bfloat16, trust_remote_code=True)


problem_name = "test_problem"
informal_statement_content = "Prove that 3 cannot be written as the sum of two cubes."


# Construct the prompt for the model
user_prompt_content = (
    f"Please autoformalize the following natural language problem statement in Lean 4. "
    f"Use the following theorem name: {problem_name}\n"
    f"The natural language statement is: \n"
    f"{informal_statement_content}"
    f"Think before you provide the lean statement."
)



chat = [
  {"role": "user", "content": user_prompt_content},
]

inputs = tokenizer.apply_chat_template(chat, tokenize=True, add_generation_prompt=True, return_tensors="pt").to(model.device)

import time
start = time.time()
outputs = model.generate(inputs, max_new_tokens=16384, temperature = 0.9, do_sample = True, top_k=20, top_p=0.95)


model_output_text = tokenizer.batch_decode(outputs)[0]

def extract_code(text_input):
    """Extracts the last Lean 4 code block from the model's output."""
    try:
        matches = re.findall(r'
Construct the prompt for the modeltexttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
import re
torch.manual_seed(30)

model_id = "Goedel-LM/Goedel-Formalizer-V2-32B"
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", torch_dtype=torch.bfloat16, trust_remote_code=True)


problem_name = "test_problem"
informal_statement_content = "Prove that 3 cannot be written as the sum of two cubes."


# Construct the prompt for the model
user_prompt_content = (
    f"Please autoformalize the following natural language problem statement in Lean 4. "
    f"Use the following theorem name: {problem_name}\n"
    f"The natural language statement is: \n"
    f"{informal_statement_content}"
    f"Think before you provide the lean statement."
)



chat = [
  {"role": "user", "content": user_prompt_content},
]

inputs = tokenizer.apply_chat_template(chat, tokenize=True, add_generation_prompt=True, return_tensors="pt").to(model.device)

import time
start = time.time()
outputs = model.generate(inputs, max_new_tokens=16384, temperature = 0.9, do_sample = True, top_k=20, top_p=0.95)


model_output_text = tokenizer.batch_decode(outputs)[0]

def extract_code(text_input):
    """Extracts the last Lean 4 code block from the model's output."""
    try:
        matches = re.findall(r'
Construct the prompt for the modeltexttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
import re
torch.manual_seed(30)

model_id = "Goedel-LM/Goedel-Formalizer-V2-32B"
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", torch_dtype=torch.bfloat16, trust_remote_code=True)


problem_name = "test_problem"
informal_statement_content = "Prove that 3 cannot be written as the sum of two cubes."


# Construct the prompt for the model
user_prompt_content = (
    f"Please autoformalize the following natural language problem statement in Lean 4. "
    f"Use the following theorem name: {problem_name}\n"
    f"The natural language statement is: \n"
    f"{informal_statement_content}"
    f"Think before you provide the lean statement."
)



chat = [
  {"role": "user", "content": user_prompt_content},
]

inputs = tokenizer.apply_chat_template(chat, tokenize=True, add_generation_prompt=True, return_tensors="pt").to(model.device)

import time
start = time.time()
outputs = model.generate(inputs, max_new_tokens=16384, temperature = 0.9, do_sample = True, top_k=20, top_p=0.95)


model_output_text = tokenizer.batch_decode(outputs)[0]

def extract_code(text_input):
    """Extracts the last Lean 4 code block from the model's output."""
    try:
        matches = re.findall(r'
Construct the prompt for the modeltexttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
import re
torch.manual_seed(30)

model_id = "Goedel-LM/Goedel-Formalizer-V2-32B"
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", torch_dtype=torch.bfloat16, trust_remote_code=True)


problem_name = "test_problem"
informal_statement_content = "Prove that 3 cannot be written as the sum of two cubes."


# Construct the prompt for the model
user_prompt_content = (
    f"Please autoformalize the following natural language problem statement in Lean 4. "
    f"Use the following theorem name: {problem_name}\n"
    f"The natural language statement is: \n"
    f"{informal_statement_content}"
    f"Think before you provide the lean statement."
)



chat = [
  {"role": "user", "content": user_prompt_content},
]

inputs = tokenizer.apply_chat_template(chat, tokenize=True, add_generation_prompt=True, return_tensors="pt").to(model.device)

import time
start = time.time()
outputs = model.generate(inputs, max_new_tokens=16384, temperature = 0.9, do_sample = True, top_k=20, top_p=0.95)


model_output_text = tokenizer.batch_decode(outputs)[0]

def extract_code(text_input):
    """Extracts the last Lean 4 code block from the model's output."""
    try:
        matches = re.findall(r'
Construct the prompt for the modeltexttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
import re
torch.manual_seed(30)

model_id = "Goedel-LM/Goedel-Formalizer-V2-32B"
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", torch_dtype=torch.bfloat16, trust_remote_code=True)


problem_name = "test_problem"
informal_statement_content = "Prove that 3 cannot be written as the sum of two cubes."


# Construct the prompt for the model
user_prompt_content = (
    f"Please autoformalize the following natural language problem statement in Lean 4. "
    f"Use the following theorem name: {problem_name}\n"
    f"The natural language statement is: \n"
    f"{informal_statement_content}"
    f"Think before you provide the lean statement."
)



chat = [
  {"role": "user", "content": user_prompt_content},
]

inputs = tokenizer.apply_chat_template(chat, tokenize=True, add_generation_prompt=True, return_tensors="pt").to(model.device)

import time
start = time.time()
outputs = model.generate(inputs, max_new_tokens=16384, temperature = 0.9, do_sample = True, top_k=20, top_p=0.95)


model_output_text = tokenizer.batch_decode(outputs)[0]

def extract_code(text_input):
    """Extracts the last Lean 4 code block from the model's output."""
    try:
        matches = re.findall(r'
Construct the prompt for the modeltexttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
import re
torch.manual_seed(30)

model_id = "Goedel-LM/Goedel-Formalizer-V2-32B"
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", torch_dtype=torch.bfloat16, trust_remote_code=True)


problem_name = "test_problem"
informal_statement_content = "Prove that 3 cannot be written as the sum of two cubes."


# Construct the prompt for the model
user_prompt_content = (
    f"Please autoformalize the following natural language problem statement in Lean 4. "
    f"Use the following theorem name: {problem_name}\n"
    f"The natural language statement is: \n"
    f"{informal_statement_content}"
    f"Think before you provide the lean statement."
)



chat = [
  {"role": "user", "content": user_prompt_content},
]

inputs = tokenizer.apply_chat_template(chat, tokenize=True, add_generation_prompt=True, return_tensors="pt").to(model.device)

import time
start = time.time()
outputs = model.generate(inputs, max_new_tokens=16384, temperature = 0.9, do_sample = True, top_k=20, top_p=0.95)


model_output_text = tokenizer.batch_decode(outputs)[0]

def extract_code(text_input):
    """Extracts the last Lean 4 code block from the model's output."""
    try:
        matches = re.findall(r'
Construct the prompt for the modeltexttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
import re
torch.manual_seed(30)

model_id = "Goedel-LM/Goedel-Formalizer-V2-32B"
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", torch_dtype=torch.bfloat16, trust_remote_code=True)


problem_name = "test_problem"
informal_statement_content = "Prove that 3 cannot be written as the sum of two cubes."


# Construct the prompt for the model
user_prompt_content = (
    f"Please autoformalize the following natural language problem statement in Lean 4. "
    f"Use the following theorem name: {problem_name}\n"
    f"The natural language statement is: \n"
    f"{informal_statement_content}"
    f"Think before you provide the lean statement."
)



chat = [
  {"role": "user", "content": user_prompt_content},
]

inputs = tokenizer.apply_chat_template(chat, tokenize=True, add_generation_prompt=True, return_tensors="pt").to(model.device)

import time
start = time.time()
outputs = model.generate(inputs, max_new_tokens=16384, temperature = 0.9, do_sample = True, top_k=20, top_p=0.95)


model_output_text = tokenizer.batch_decode(outputs)[0]

def extract_code(text_input):
    """Extracts the last Lean 4 code block from the model's output."""
    try:
        matches = re.findall(r'
Construct the prompt for the modeltexttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
import re
torch.manual_seed(30)

model_id = "Goedel-LM/Goedel-Formalizer-V2-32B"
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", torch_dtype=torch.bfloat16, trust_remote_code=True)


problem_name = "test_problem"
informal_statement_content = "Prove that 3 cannot be written as the sum of two cubes."


# Construct the prompt for the model
user_prompt_content = (
    f"Please autoformalize the following natural language problem statement in Lean 4. "
    f"Use the following theorem name: {problem_name}\n"
    f"The natural language statement is: \n"
    f"{informal_statement_content}"
    f"Think before you provide the lean statement."
)



chat = [
  {"role": "user", "content": user_prompt_content},
]

inputs = tokenizer.apply_chat_template(chat, tokenize=True, add_generation_prompt=True, return_tensors="pt").to(model.device)

import time
start = time.time()
outputs = model.generate(inputs, max_new_tokens=16384, temperature = 0.9, do_sample = True, top_k=20, top_p=0.95)


model_output_text = tokenizer.batch_decode(outputs)[0]

def extract_code(text_input):
    """Extracts the last Lean 4 code block from the model's output."""
    try:
        matches = re.findall(r'
Construct the prompt for the modeltexttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
import re
torch.manual_seed(30)

model_id = "Goedel-LM/Goedel-Formalizer-V2-32B"
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", torch_dtype=torch.bfloat16, trust_remote_code=True)


problem_name = "test_problem"
informal_statement_content = "Prove that 3 cannot be written as the sum of two cubes."


# Construct the prompt for the model
user_prompt_content = (
    f"Please autoformalize the following natural language problem statement in Lean 4. "
    f"Use the following theorem name: {problem_name}\n"
    f"The natural language statement is: \n"
    f"{informal_statement_content}"
    f"Think before you provide the lean statement."
)



chat = [
  {"role": "user", "content": user_prompt_content},
]

inputs = tokenizer.apply_chat_template(chat, tokenize=True, add_generation_prompt=True, return_tensors="pt").to(model.device)

import time
start = time.time()
outputs = model.generate(inputs, max_new_tokens=16384, temperature = 0.9, do_sample = True, top_k=20, top_p=0.95)


model_output_text = tokenizer.batch_decode(outputs)[0]

def extract_code(text_input):
    """Extracts the last Lean 4 code block from the model's output."""
    try:
        matches = re.findall(r'
Construct the prompt for the modeltexttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
import re
torch.manual_seed(30)

model_id = "Goedel-LM/Goedel-Formalizer-V2-32B"
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", torch_dtype=torch.bfloat16, trust_remote_code=True)


problem_name = "test_problem"
informal_statement_content = "Prove that 3 cannot be written as the sum of two cubes."


# Construct the prompt for the model
user_prompt_content = (
    f"Please autoformalize the following natural language problem statement in Lean 4. "
    f"Use the following theorem name: {problem_name}\n"
    f"The natural language statement is: \n"
    f"{informal_statement_content}"
    f"Think before you provide the lean statement."
)



chat = [
  {"role": "user", "content": user_prompt_content},
]

inputs = tokenizer.apply_chat_template(chat, tokenize=True, add_generation_prompt=True, return_tensors="pt").to(model.device)

import time
start = time.time()
outputs = model.generate(inputs, max_new_tokens=16384, temperature = 0.9, do_sample = True, top_k=20, top_p=0.95)


model_output_text = tokenizer.batch_decode(outputs)[0]

def extract_code(text_input):
    """Extracts the last Lean 4 code block from the model's output."""
    try:
        matches = re.findall(r'
Construct the prompt for the modeltexttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
import re
torch.manual_seed(30)

model_id = "Goedel-LM/Goedel-Formalizer-V2-32B"
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", torch_dtype=torch.bfloat16, trust_remote_code=True)


problem_name = "test_problem"
informal_statement_content = "Prove that 3 cannot be written as the sum of two cubes."


# Construct the prompt for the model
user_prompt_content = (
    f"Please autoformalize the following natural language problem statement in Lean 4. "
    f"Use the following theorem name: {problem_name}\n"
    f"The natural language statement is: \n"
    f"{informal_statement_content}"
    f"Think before you provide the lean statement."
)



chat = [
  {"role": "user", "content": user_prompt_content},
]

inputs = tokenizer.apply_chat_template(chat, tokenize=True, add_generation_prompt=True, return_tensors="pt").to(model.device)

import time
start = time.time()
outputs = model.generate(inputs, max_new_tokens=16384, temperature = 0.9, do_sample = True, top_k=20, top_p=0.95)


model_output_text = tokenizer.batch_decode(outputs)[0]

def extract_code(text_input):
    """Extracts the last Lean 4 code block from the model's output."""
    try:
        matches = re.findall(r'
Construct the prompt for the modeltexttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
import re
torch.manual_seed(30)

model_id = "Goedel-LM/Goedel-Formalizer-V2-32B"
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", torch_dtype=torch.bfloat16, trust_remote_code=True)


problem_name = "test_problem"
informal_statement_content = "Prove that 3 cannot be written as the sum of two cubes."


# Construct the prompt for the model
user_prompt_content = (
    f"Please autoformalize the following natural language problem statement in Lean 4. "
    f"Use the following theorem name: {problem_name}\n"
    f"The natural language statement is: \n"
    f"{informal_statement_content}"
    f"Think before you provide the lean statement."
)



chat = [
  {"role": "user", "content": user_prompt_content},
]

inputs = tokenizer.apply_chat_template(chat, tokenize=True, add_generation_prompt=True, return_tensors="pt").to(model.device)

import time
start = time.time()
outputs = model.generate(inputs, max_new_tokens=16384, temperature = 0.9, do_sample = True, top_k=20, top_p=0.95)


model_output_text = tokenizer.batch_decode(outputs)[0]

def extract_code(text_input):
    """Extracts the last Lean 4 code block from the model's output."""
    try:
        matches = re.findall(r'
Construct the prompt for the modeltexttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
import re
torch.manual_seed(30)

model_id = "Goedel-LM/Goedel-Formalizer-V2-32B"
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", torch_dtype=torch.bfloat16, trust_remote_code=True)


problem_name = "test_problem"
informal_statement_content = "Prove that 3 cannot be written as the sum of two cubes."


# Construct the prompt for the model
user_prompt_content = (
    f"Please autoformalize the following natural language problem statement in Lean 4. "
    f"Use the following theorem name: {problem_name}\n"
    f"The natural language statement is: \n"
    f"{informal_statement_content}"
    f"Think before you provide the lean statement."
)



chat = [
  {"role": "user", "content": user_prompt_content},
]

inputs = tokenizer.apply_chat_template(chat, tokenize=True, add_generation_prompt=True, return_tensors="pt").to(model.device)

import time
start = time.time()
outputs = model.generate(inputs, max_new_tokens=16384, temperature = 0.9, do_sample = True, top_k=20, top_p=0.95)


model_output_text = tokenizer.batch_decode(outputs)[0]

def extract_code(text_input):
    """Extracts the last Lean 4 code block from the model's output."""
    try:
        matches = re.findall(r'
Construct the prompt for the modeltexttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
import re
torch.manual_seed(30)

model_id = "Goedel-LM/Goedel-Formalizer-V2-32B"
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", torch_dtype=torch.bfloat16, trust_remote_code=True)


problem_name = "test_problem"
informal_statement_content = "Prove that 3 cannot be written as the sum of two cubes."


# Construct the prompt for the model
user_prompt_content = (
    f"Please autoformalize the following natural language problem statement in Lean 4. "
    f"Use the following theorem name: {problem_name}\n"
    f"The natural language statement is: \n"
    f"{informal_statement_content}"
    f"Think before you provide the lean statement."
)



chat = [
  {"role": "user", "content": user_prompt_content},
]

inputs = tokenizer.apply_chat_template(chat, tokenize=True, add_generation_prompt=True, return_tensors="pt").to(model.device)

import time
start = time.time()
outputs = model.generate(inputs, max_new_tokens=16384, temperature = 0.9, do_sample = True, top_k=20, top_p=0.95)


model_output_text = tokenizer.batch_decode(outputs)[0]

def extract_code(text_input):
    """Extracts the last Lean 4 code block from the model's output."""
    try:
        matches = re.findall(r'
Construct the prompt for the modeltexttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
import re
torch.manual_seed(30)

model_id = "Goedel-LM/Goedel-Formalizer-V2-32B"
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", torch_dtype=torch.bfloat16, trust_remote_code=True)


problem_name = "test_problem"
informal_statement_content = "Prove that 3 cannot be written as the sum of two cubes."


# Construct the prompt for the model
user_prompt_content = (
    f"Please autoformalize the following natural language problem statement in Lean 4. "
    f"Use the following theorem name: {problem_name}\n"
    f"The natural language statement is: \n"
    f"{informal_statement_content}"
    f"Think before you provide the lean statement."
)



chat = [
  {"role": "user", "content": user_prompt_content},
]

inputs = tokenizer.apply_chat_template(chat, tokenize=True, add_generation_prompt=True, return_tensors="pt").to(model.device)

import time
start = time.time()
outputs = model.generate(inputs, max_new_tokens=16384, temperature = 0.9, do_sample = True, top_k=20, top_p=0.95)


model_output_text = tokenizer.batch_decode(outputs)[0]

def extract_code(text_input):
    """Extracts the last Lean 4 code block from the model's output."""
    try:
        matches = re.findall(r'
Construct the prompt for the modeltexttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
import re
torch.manual_seed(30)

model_id = "Goedel-LM/Goedel-Formalizer-V2-32B"
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", torch_dtype=torch.bfloat16, trust_remote_code=True)


problem_name = "test_problem"
informal_statement_content = "Prove that 3 cannot be written as the sum of two cubes."


# Construct the prompt for the model
user_prompt_content = (
    f"Please autoformalize the following natural language problem statement in Lean 4. "
    f"Use the following theorem name: {problem_name}\n"
    f"The natural language statement is: \n"
    f"{informal_statement_content}"
    f"Think before you provide the lean statement."
)



chat = [
  {"role": "user", "content": user_prompt_content},
]

inputs = tokenizer.apply_chat_template(chat, tokenize=True, add_generation_prompt=True, return_tensors="pt").to(model.device)

import time
start = time.time()
outputs = model.generate(inputs, max_new_tokens=16384, temperature = 0.9, do_sample = True, top_k=20, top_p=0.95)


model_output_text = tokenizer.batch_decode(outputs)[0]

def extract_code(text_input):
    """Extracts the last Lean 4 code block from the model's output."""
    try:
        matches = re.findall(r'
Construct the prompt for the modeltexttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
import re
torch.manual_seed(30)

model_id = "Goedel-LM/Goedel-Formalizer-V2-32B"
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", torch_dtype=torch.bfloat16, trust_remote_code=True)


problem_name = "test_problem"
informal_statement_content = "Prove that 3 cannot be written as the sum of two cubes."


# Construct the prompt for the model
user_prompt_content = (
    f"Please autoformalize the following natural language problem statement in Lean 4. "
    f"Use the following theorem name: {problem_name}\n"
    f"The natural language statement is: \n"
    f"{informal_statement_content}"
    f"Think before you provide the lean statement."
)



chat = [
  {"role": "user", "content": user_prompt_content},
]

inputs = tokenizer.apply_chat_template(chat, tokenize=True, add_generation_prompt=True, return_tensors="pt").to(model.device)

import time
start = time.time()
outputs = model.generate(inputs, max_new_tokens=16384, temperature = 0.9, do_sample = True, top_k=20, top_p=0.95)


model_output_text = tokenizer.batch_decode(outputs)[0]

def extract_code(text_input):
    """Extracts the last Lean 4 code block from the model's output."""
    try:
        matches = re.findall(r'
Construct the prompt for the modeltexttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
import re
torch.manual_seed(30)

model_id = "Goedel-LM/Goedel-Formalizer-V2-32B"
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", torch_dtype=torch.bfloat16, trust_remote_code=True)


problem_name = "test_problem"
informal_statement_content = "Prove that 3 cannot be written as the sum of two cubes."


# Construct the prompt for the model
user_prompt_content = (
    f"Please autoformalize the following natural language problem statement in Lean 4. "
    f"Use the following theorem name: {problem_name}\n"
    f"The natural language statement is: \n"
    f"{informal_statement_content}"
    f"Think before you provide the lean statement."
)



chat = [
  {"role": "user", "content": user_prompt_content},
]

inputs = tokenizer.apply_chat_template(chat, tokenize=True, add_generation_prompt=True, return_tensors="pt").to(model.device)

import time
start = time.time()
outputs = model.generate(inputs, max_new_tokens=16384, temperature = 0.9, do_sample = True, top_k=20, top_p=0.95)


model_output_text = tokenizer.batch_decode(outputs)[0]

def extract_code(text_input):
    """Extracts the last Lean 4 code block from the model's output."""
    try:
        matches = re.findall(r'
Construct the prompt for the modeltexttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
import re
torch.manual_seed(30)

model_id = "Goedel-LM/Goedel-Formalizer-V2-32B"
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", torch_dtype=torch.bfloat16, trust_remote_code=True)


problem_name = "test_problem"
informal_statement_content = "Prove that 3 cannot be written as the sum of two cubes."


# Construct the prompt for the model
user_prompt_content = (
    f"Please autoformalize the following natural language problem statement in Lean 4. "
    f"Use the following theorem name: {problem_name}\n"
    f"The natural language statement is: \n"
    f"{informal_statement_content}"
    f"Think before you provide the lean statement."
)



chat = [
  {"role": "user", "content": user_prompt_content},
]

inputs = tokenizer.apply_chat_template(chat, tokenize=True, add_generation_prompt=True, return_tensors="pt").to(model.device)

import time
start = time.time()
outputs = model.generate(inputs, max_new_tokens=16384, temperature = 0.9, do_sample = True, top_k=20, top_p=0.95)


model_output_text = tokenizer.batch_decode(outputs)[0]

def extract_code(text_input):
    """Extracts the last Lean 4 code block from the model's output."""
    try:
        matches = re.findall(r'
Construct the prompt for the modeltexttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
import re
torch.manual_seed(30)

model_id = "Goedel-LM/Goedel-Formalizer-V2-32B"
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", torch_dtype=torch.bfloat16, trust_remote_code=True)


problem_name = "test_problem"
informal_statement_content = "Prove that 3 cannot be written as the sum of two cubes."


# Construct the prompt for the model
user_prompt_content = (
    f"Please autoformalize the following natural language problem statement in Lean 4. "
    f"Use the following theorem name: {problem_name}\n"
    f"The natural language statement is: \n"
    f"{informal_statement_content}"
    f"Think before you provide the lean statement."
)



chat = [
  {"role": "user", "content": user_prompt_content},
]

inputs = tokenizer.apply_chat_template(chat, tokenize=True, add_generation_prompt=True, return_tensors="pt").to(model.device)

import time
start = time.time()
outputs = model.generate(inputs, max_new_tokens=16384, temperature = 0.9, do_sample = True, top_k=20, top_p=0.95)


model_output_text = tokenizer.batch_decode(outputs)[0]

def extract_code(text_input):
    """Extracts the last Lean 4 code block from the model's output."""
    try:
        matches = re.findall(r'
Construct the prompt for the modeltexttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
import re
torch.manual_seed(30)

model_id = "Goedel-LM/Goedel-Formalizer-V2-32B"
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", torch_dtype=torch.bfloat16, trust_remote_code=True)


problem_name = "test_problem"
informal_statement_content = "Prove that 3 cannot be written as the sum of two cubes."


# Construct the prompt for the model
user_prompt_content = (
    f"Please autoformalize the following natural language problem statement in Lean 4. "
    f"Use the following theorem name: {problem_name}\n"
    f"The natural language statement is: \n"
    f"{informal_statement_content}"
    f"Think before you provide the lean statement."
)



chat = [
  {"role": "user", "content": user_prompt_content},
]

inputs = tokenizer.apply_chat_template(chat, tokenize=True, add_generation_prompt=True, return_tensors="pt").to(model.device)

import time
start = time.time()
outputs = model.generate(inputs, max_new_tokens=16384, temperature = 0.9, do_sample = True, top_k=20, top_p=0.95)


model_output_text = tokenizer.batch_decode(outputs)[0]

def extract_code(text_input):
    """Extracts the last Lean 4 code block from the model's output."""
    try:
        matches = re.findall(r'
Construct the prompt for the modeltexttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
import re
torch.manual_seed(30)

model_id = "Goedel-LM/Goedel-Formalizer-V2-32B"
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", torch_dtype=torch.bfloat16, trust_remote_code=True)


problem_name = "test_problem"
informal_statement_content = "Prove that 3 cannot be written as the sum of two cubes."


# Construct the prompt for the model
user_prompt_content = (
    f"Please autoformalize the following natural language problem statement in Lean 4. "
    f"Use the following theorem name: {problem_name}\n"
    f"The natural language statement is: \n"
    f"{informal_statement_content}"
    f"Think before you provide the lean statement."
)



chat = [
  {"role": "user", "content": user_prompt_content},
]

inputs = tokenizer.apply_chat_template(chat, tokenize=True, add_generation_prompt=True, return_tensors="pt").to(model.device)

import time
start = time.time()
outputs = model.generate(inputs, max_new_tokens=16384, temperature = 0.9, do_sample = True, top_k=20, top_p=0.95)


model_output_text = tokenizer.batch_decode(outputs)[0]

def extract_code(text_input):
    """Extracts the last Lean 4 code block from the model's output."""
    try:
        matches = re.findall(r'
Construct the prompt for the modeltexttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
import re
torch.manual_seed(30)

model_id = "Goedel-LM/Goedel-Formalizer-V2-32B"
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", torch_dtype=torch.bfloat16, trust_remote_code=True)


problem_name = "test_problem"
informal_statement_content = "Prove that 3 cannot be written as the sum of two cubes."


# Construct the prompt for the model
user_prompt_content = (
    f"Please autoformalize the following natural language problem statement in Lean 4. "
    f"Use the following theorem name: {problem_name}\n"
    f"The natural language statement is: \n"
    f"{informal_statement_content}"
    f"Think before you provide the lean statement."
)



chat = [
  {"role": "user", "content": user_prompt_content},
]

inputs = tokenizer.apply_chat_template(chat, tokenize=True, add_generation_prompt=True, return_tensors="pt").to(model.device)

import time
start = time.time()
outputs = model.generate(inputs, max_new_tokens=16384, temperature = 0.9, do_sample = True, top_k=20, top_p=0.95)


model_output_text = tokenizer.batch_decode(outputs)[0]

def extract_code(text_input):
    """Extracts the last Lean 4 code block from the model's output."""
    try:
        matches = re.findall(r'
Construct the prompt for the modeltexttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
import re
torch.manual_seed(30)

model_id = "Goedel-LM/Goedel-Formalizer-V2-32B"
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", torch_dtype=torch.bfloat16, trust_remote_code=True)


problem_name = "test_problem"
informal_statement_content = "Prove that 3 cannot be written as the sum of two cubes."


# Construct the prompt for the model
user_prompt_content = (
    f"Please autoformalize the following natural language problem statement in Lean 4. "
    f"Use the following theorem name: {problem_name}\n"
    f"The natural language statement is: \n"
    f"{informal_statement_content}"
    f"Think before you provide the lean statement."
)



chat = [
  {"role": "user", "content": user_prompt_content},
]

inputs = tokenizer.apply_chat_template(chat, tokenize=True, add_generation_prompt=True, return_tensors="pt").to(model.device)

import time
start = time.time()
outputs = model.generate(inputs, max_new_tokens=16384, temperature = 0.9, do_sample = True, top_k=20, top_p=0.95)


model_output_text = tokenizer.batch_decode(outputs)[0]

def extract_code(text_input):
    """Extracts the last Lean 4 code block from the model's output."""
    try:
        matches = re.findall(r'
Construct the prompt for the modeltexttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
import re
torch.manual_seed(30)

model_id = "Goedel-LM/Goedel-Formalizer-V2-32B"
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", torch_dtype=torch.bfloat16, trust_remote_code=True)


problem_name = "test_problem"
informal_statement_content = "Prove that 3 cannot be written as the sum of two cubes."


# Construct the prompt for the model
user_prompt_content = (
    f"Please autoformalize the following natural language problem statement in Lean 4. "
    f"Use the following theorem name: {problem_name}\n"
    f"The natural language statement is: \n"
    f"{informal_statement_content}"
    f"Think before you provide the lean statement."
)



chat = [
  {"role": "user", "content": user_prompt_content},
]

inputs = tokenizer.apply_chat_template(chat, tokenize=True, add_generation_prompt=True, return_tensors="pt").to(model.device)

import time
start = time.time()
outputs = model.generate(inputs, max_new_tokens=16384, temperature = 0.9, do_sample = True, top_k=20, top_p=0.95)


model_output_text = tokenizer.batch_decode(outputs)[0]

def extract_code(text_input):
    """Extracts the last Lean 4 code block from the model's output."""
    try:
        matches = re.findall(r'
Construct the prompt for the modeltexttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
import re
torch.manual_seed(30)

model_id = "Goedel-LM/Goedel-Formalizer-V2-32B"
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", torch_dtype=torch.bfloat16, trust_remote_code=True)


problem_name = "test_problem"
informal_statement_content = "Prove that 3 cannot be written as the sum of two cubes."


# Construct the prompt for the model
user_prompt_content = (
    f"Please autoformalize the following natural language problem statement in Lean 4. "
    f"Use the following theorem name: {problem_name}\n"
    f"The natural language statement is: \n"
    f"{informal_statement_content}"
    f"Think before you provide the lean statement."
)



chat = [
  {"role": "user", "content": user_prompt_content},
]

inputs = tokenizer.apply_chat_template(chat, tokenize=True, add_generation_prompt=True, return_tensors="pt").to(model.device)

import time
start = time.time()
outputs = model.generate(inputs, max_new_tokens=16384, temperature = 0.9, do_sample = True, top_k=20, top_p=0.95)


model_output_text = tokenizer.batch_decode(outputs)[0]

def extract_code(text_input):
    """Extracts the last Lean 4 code block from the model's output."""
    try:
        matches = re.findall(r'
Construct the prompt for the modeltexttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
import re
torch.manual_seed(30)

model_id = "Goedel-LM/Goedel-Formalizer-V2-32B"
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", torch_dtype=torch.bfloat16, trust_remote_code=True)


problem_name = "test_problem"
informal_statement_content = "Prove that 3 cannot be written as the sum of two cubes."


# Construct the prompt for the model
user_prompt_content = (
    f"Please autoformalize the following natural language problem statement in Lean 4. "
    f"Use the following theorem name: {problem_name}\n"
    f"The natural language statement is: \n"
    f"{informal_statement_content}"
    f"Think before you provide the lean statement."
)



chat = [
  {"role": "user", "content": user_prompt_content},
]

inputs = tokenizer.apply_chat_template(chat, tokenize=True, add_generation_prompt=True, return_tensors="pt").to(model.device)

import time
start = time.time()
outputs = model.generate(inputs, max_new_tokens=16384, temperature = 0.9, do_sample = True, top_k=20, top_p=0.95)


model_output_text = tokenizer.batch_decode(outputs)[0]

def extract_code(text_input):
    """Extracts the last Lean 4 code block from the model's output."""
    try:
        matches = re.findall(r'
Construct the prompt for the modeltexttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
import re
torch.manual_seed(30)

model_id = "Goedel-LM/Goedel-Formalizer-V2-32B"
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", torch_dtype=torch.bfloat16, trust_remote_code=True)


problem_name = "test_problem"
informal_statement_content = "Prove that 3 cannot be written as the sum of two cubes."


# Construct the prompt for the model
user_prompt_content = (
    f"Please autoformalize the following natural language problem statement in Lean 4. "
    f"Use the following theorem name: {problem_name}\n"
    f"The natural language statement is: \n"
    f"{informal_statement_content}"
    f"Think before you provide the lean statement."
)



chat = [
  {"role": "user", "content": user_prompt_content},
]

inputs = tokenizer.apply_chat_template(chat, tokenize=True, add_generation_prompt=True, return_tensors="pt").to(model.device)

import time
start = time.time()
outputs = model.generate(inputs, max_new_tokens=16384, temperature = 0.9, do_sample = True, top_k=20, top_p=0.95)


model_output_text = tokenizer.batch_decode(outputs)[0]

def extract_code(text_input):
    """Extracts the last Lean 4 code block from the model's output."""
    try:
        matches = re.findall(r'
Construct the prompt for the modeltexttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
import re
torch.manual_seed(30)

model_id = "Goedel-LM/Goedel-Formalizer-V2-32B"
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", torch_dtype=torch.bfloat16, trust_remote_code=True)


problem_name = "test_problem"
informal_statement_content = "Prove that 3 cannot be written as the sum of two cubes."


# Construct the prompt for the model
user_prompt_content = (
    f"Please autoformalize the following natural language problem statement in Lean 4. "
    f"Use the following theorem name: {problem_name}\n"
    f"The natural language statement is: \n"
    f"{informal_statement_content}"
    f"Think before you provide the lean statement."
)



chat = [
  {"role": "user", "content": user_prompt_content},
]

inputs = tokenizer.apply_chat_template(chat, tokenize=True, add_generation_prompt=True, return_tensors="pt").to(model.device)

import time
start = time.time()
outputs = model.generate(inputs, max_new_tokens=16384, temperature = 0.9, do_sample = True, top_k=20, top_p=0.95)


model_output_text = tokenizer.batch_decode(outputs)[0]

def extract_code(text_input):
    """Extracts the last Lean 4 code block from the model's output."""
    try:
        matches = re.findall(r'
Construct the prompt for the modeltexttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
import re
torch.manual_seed(30)

model_id = "Goedel-LM/Goedel-Formalizer-V2-32B"
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", torch_dtype=torch.bfloat16, trust_remote_code=True)


problem_name = "test_problem"
informal_statement_content = "Prove that 3 cannot be written as the sum of two cubes."


# Construct the prompt for the model
user_prompt_content = (
    f"Please autoformalize the following natural language problem statement in Lean 4. "
    f"Use the following theorem name: {problem_name}\n"
    f"The natural language statement is: \n"
    f"{informal_statement_content}"
    f"Think before you provide the lean statement."
)



chat = [
  {"role": "user", "content": user_prompt_content},
]

inputs = tokenizer.apply_chat_template(chat, tokenize=True, add_generation_prompt=True, return_tensors="pt").to(model.device)

import time
start = time.time()
outputs = model.generate(inputs, max_new_tokens=16384, temperature = 0.9, do_sample = True, top_k=20, top_p=0.95)


model_output_text = tokenizer.batch_decode(outputs)[0]

def extract_code(text_input):
    """Extracts the last Lean 4 code block from the model's output."""
    try:
        matches = re.findall(r'
Construct the prompt for the modeltexttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
import re
torch.manual_seed(30)

model_id = "Goedel-LM/Goedel-Formalizer-V2-32B"
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", torch_dtype=torch.bfloat16, trust_remote_code=True)


problem_name = "test_problem"
informal_statement_content = "Prove that 3 cannot be written as the sum of two cubes."


# Construct the prompt for the model
user_prompt_content = (
    f"Please autoformalize the following natural language problem statement in Lean 4. "
    f"Use the following theorem name: {problem_name}\n"
    f"The natural language statement is: \n"
    f"{informal_statement_content}"
    f"Think before you provide the lean statement."
)



chat = [
  {"role": "user", "content": user_prompt_content},
]

inputs = tokenizer.apply_chat_template(chat, tokenize=True, add_generation_prompt=True, return_tensors="pt").to(model.device)

import time
start = time.time()
outputs = model.generate(inputs, max_new_tokens=16384, temperature = 0.9, do_sample = True, top_k=20, top_p=0.95)


model_output_text = tokenizer.batch_decode(outputs)[0]

def extract_code(text_input):
    """Extracts the last Lean 4 code block from the model's output."""
    try:
        matches = re.findall(r'
Construct the prompt for the modeltexttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
import re
torch.manual_seed(30)

model_id = "Goedel-LM/Goedel-Formalizer-V2-32B"
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", torch_dtype=torch.bfloat16, trust_remote_code=True)


problem_name = "test_problem"
informal_statement_content = "Prove that 3 cannot be written as the sum of two cubes."


# Construct the prompt for the model
user_prompt_content = (
    f"Please autoformalize the following natural language problem statement in Lean 4. "
    f"Use the following theorem name: {problem_name}\n"
    f"The natural language statement is: \n"
    f"{informal_statement_content}"
    f"Think before you provide the lean statement."
)



chat = [
  {"role": "user", "content": user_prompt_content},
]

inputs = tokenizer.apply_chat_template(chat, tokenize=True, add_generation_prompt=True, return_tensors="pt").to(model.device)

import time
start = time.time()
outputs = model.generate(inputs, max_new_tokens=16384, temperature = 0.9, do_sample = True, top_k=20, top_p=0.95)


model_output_text = tokenizer.batch_decode(outputs)[0]

def extract_code(text_input):
    """Extracts the last Lean 4 code block from the model's output."""
    try:
        matches = re.findall(r'
Construct the prompt for the modeltexttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
import re
torch.manual_seed(30)

model_id = "Goedel-LM/Goedel-Formalizer-V2-32B"
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", torch_dtype=torch.bfloat16, trust_remote_code=True)


problem_name = "test_problem"
informal_statement_content = "Prove that 3 cannot be written as the sum of two cubes."


# Construct the prompt for the model
user_prompt_content = (
    f"Please autoformalize the following natural language problem statement in Lean 4. "
    f"Use the following theorem name: {problem_name}\n"
    f"The natural language statement is: \n"
    f"{informal_statement_content}"
    f"Think before you provide the lean statement."
)



chat = [
  {"role": "user", "content": user_prompt_content},
]

inputs = tokenizer.apply_chat_template(chat, tokenize=True, add_generation_prompt=True, return_tensors="pt").to(model.device)

import time
start = time.time()
outputs = model.generate(inputs, max_new_tokens=16384, temperature = 0.9, do_sample = True, top_k=20, top_p=0.95)


model_output_text = tokenizer.batch_decode(outputs)[0]

def extract_code(text_input):
    """Extracts the last Lean 4 code block from the model's output."""
    try:
        matches = re.findall(r'
Construct the prompt for the modeltexttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
import re
torch.manual_seed(30)

model_id = "Goedel-LM/Goedel-Formalizer-V2-32B"
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", torch_dtype=torch.bfloat16, trust_remote_code=True)


problem_name = "test_problem"
informal_statement_content = "Prove that 3 cannot be written as the sum of two cubes."


# Construct the prompt for the model
user_prompt_content = (
    f"Please autoformalize the following natural language problem statement in Lean 4. "
    f"Use the following theorem name: {problem_name}\n"
    f"The natural language statement is: \n"
    f"{informal_statement_content}"
    f"Think before you provide the lean statement."
)



chat = [
  {"role": "user", "content": user_prompt_content},
]

inputs = tokenizer.apply_chat_template(chat, tokenize=True, add_generation_prompt=True, return_tensors="pt").to(model.device)

import time
start = time.time()
outputs = model.generate(inputs, max_new_tokens=16384, temperature = 0.9, do_sample = True, top_k=20, top_p=0.95)


model_output_text = tokenizer.batch_decode(outputs)[0]

def extract_code(text_input):
    """Extracts the last Lean 4 code block from the model's output."""
    try:
        matches = re.findall(r'
Construct the prompt for the modeltexttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
import re
torch.manual_seed(30)

model_id = "Goedel-LM/Goedel-Formalizer-V2-32B"
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", torch_dtype=torch.bfloat16, trust_remote_code=True)


problem_name = "test_problem"
informal_statement_content = "Prove that 3 cannot be written as the sum of two cubes."


# Construct the prompt for the model
user_prompt_content = (
    f"Please autoformalize the following natural language problem statement in Lean 4. "
    f"Use the following theorem name: {problem_name}\n"
    f"The natural language statement is: \n"
    f"{informal_statement_content}"
    f"Think before you provide the lean statement."
)



chat = [
  {"role": "user", "content": user_prompt_content},
]

inputs = tokenizer.apply_chat_template(chat, tokenize=True, add_generation_prompt=True, return_tensors="pt").to(model.device)

import time
start = time.time()
outputs = model.generate(inputs, max_new_tokens=16384, temperature = 0.9, do_sample = True, top_k=20, top_p=0.95)


model_output_text = tokenizer.batch_decode(outputs)[0]

def extract_code(text_input):
    """Extracts the last Lean 4 code block from the model's output."""
    try:
        matches = re.findall(r'
Construct the prompt for the modeltexttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
import re
torch.manual_seed(30)

model_id = "Goedel-LM/Goedel-Formalizer-V2-32B"
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", torch_dtype=torch.bfloat16, trust_remote_code=True)


problem_name = "test_problem"
informal_statement_content = "Prove that 3 cannot be written as the sum of two cubes."


# Construct the prompt for the model
user_prompt_content = (
    f"Please autoformalize the following natural language problem statement in Lean 4. "
    f"Use the following theorem name: {problem_name}\n"
    f"The natural language statement is: \n"
    f"{informal_statement_content}"
    f"Think before you provide the lean statement."
)



chat = [
  {"role": "user", "content": user_prompt_content},
]

inputs = tokenizer.apply_chat_template(chat, tokenize=True, add_generation_prompt=True, return_tensors="pt").to(model.device)

import time
start = time.time()
outputs = model.generate(inputs, max_new_tokens=16384, temperature = 0.9, do_sample = True, top_k=20, top_p=0.95)


model_output_text = tokenizer.batch_decode(outputs)[0]

def extract_code(text_input):
    """Extracts the last Lean 4 code block from the model's output."""
    try:
        matches = re.findall(r'
Construct the prompt for the modeltexttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
import re
torch.manual_seed(30)

model_id = "Goedel-LM/Goedel-Formalizer-V2-32B"
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", torch_dtype=torch.bfloat16, trust_remote_code=True)


problem_name = "test_problem"
informal_statement_content = "Prove that 3 cannot be written as the sum of two cubes."


# Construct the prompt for the model
user_prompt_content = (
    f"Please autoformalize the following natural language problem statement in Lean 4. "
    f"Use the following theorem name: {problem_name}\n"
    f"The natural language statement is: \n"
    f"{informal_statement_content}"
    f"Think before you provide the lean statement."
)



chat = [
  {"role": "user", "content": user_prompt_content},
]

inputs = tokenizer.apply_chat_template(chat, tokenize=True, add_generation_prompt=True, return_tensors="pt").to(model.device)

import time
start = time.time()
outputs = model.generate(inputs, max_new_tokens=16384, temperature = 0.9, do_sample = True, top_k=20, top_p=0.95)


model_output_text = tokenizer.batch_decode(outputs)[0]

def extract_code(text_input):
    """Extracts the last Lean 4 code block from the model's output."""
    try:
        matches = re.findall(r'
Construct the prompt for the modeltexttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
import re
torch.manual_seed(30)

model_id = "Goedel-LM/Goedel-Formalizer-V2-32B"
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", torch_dtype=torch.bfloat16, trust_remote_code=True)


problem_name = "test_problem"
informal_statement_content = "Prove that 3 cannot be written as the sum of two cubes."


# Construct the prompt for the model
user_prompt_content = (
    f"Please autoformalize the following natural language problem statement in Lean 4. "
    f"Use the following theorem name: {problem_name}\n"
    f"The natural language statement is: \n"
    f"{informal_statement_content}"
    f"Think before you provide the lean statement."
)



chat = [
  {"role": "user", "content": user_prompt_content},
]

inputs = tokenizer.apply_chat_template(chat, tokenize=True, add_generation_prompt=True, return_tensors="pt").to(model.device)

import time
start = time.time()
outputs = model.generate(inputs, max_new_tokens=16384, temperature = 0.9, do_sample = True, top_k=20, top_p=0.95)


model_output_text = tokenizer.batch_decode(outputs)[0]

def extract_code(text_input):
    """Extracts the last Lean 4 code block from the model's output."""
    try:
        matches = re.findall(r'
Construct the prompt for the modeltexttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
import re
torch.manual_seed(30)

model_id = "Goedel-LM/Goedel-Formalizer-V2-32B"
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", torch_dtype=torch.bfloat16, trust_remote_code=True)


problem_name = "test_problem"
informal_statement_content = "Prove that 3 cannot be written as the sum of two cubes."


# Construct the prompt for the model
user_prompt_content = (
    f"Please autoformalize the following natural language problem statement in Lean 4. "
    f"Use the following theorem name: {problem_name}\n"
    f"The natural language statement is: \n"
    f"{informal_statement_content}"
    f"Think before you provide the lean statement."
)



chat = [
  {"role": "user", "content": user_prompt_content},
]

inputs = tokenizer.apply_chat_template(chat, tokenize=True, add_generation_prompt=True, return_tensors="pt").to(model.device)

import time
start = time.time()
outputs = model.generate(inputs, max_new_tokens=16384, temperature = 0.9, do_sample = True, top_k=20, top_p=0.95)


model_output_text = tokenizer.batch_decode(outputs)[0]

def extract_code(text_input):
    """Extracts the last Lean 4 code block from the model's output."""
    try:
        matches = re.findall(r'
Construct the prompt for the modeltexttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
import re
torch.manual_seed(30)

model_id = "Goedel-LM/Goedel-Formalizer-V2-32B"
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", torch_dtype=torch.bfloat16, trust_remote_code=True)


problem_name = "test_problem"
informal_statement_content = "Prove that 3 cannot be written as the sum of two cubes."


# Construct the prompt for the model
user_prompt_content = (
    f"Please autoformalize the following natural language problem statement in Lean 4. "
    f"Use the following theorem name: {problem_name}\n"
    f"The natural language statement is: \n"
    f"{informal_statement_content}"
    f"Think before you provide the lean statement."
)



chat = [
  {"role": "user", "content": user_prompt_content},
]

inputs = tokenizer.apply_chat_template(chat, tokenize=True, add_generation_prompt=True, return_tensors="pt").to(model.device)

import time
start = time.time()
outputs = model.generate(inputs, max_new_tokens=16384, temperature = 0.9, do_sample = True, top_k=20, top_p=0.95)


model_output_text = tokenizer.batch_decode(outputs)[0]

def extract_code(text_input):
    """Extracts the last Lean 4 code block from the model's output."""
    try:
        matches = re.findall(r'
Construct the prompt for the modeltexttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
import re
torch.manual_seed(30)

model_id = "Goedel-LM/Goedel-Formalizer-V2-32B"
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", torch_dtype=torch.bfloat16, trust_remote_code=True)


problem_name = "test_problem"
informal_statement_content = "Prove that 3 cannot be written as the sum of two cubes."


# Construct the prompt for the model
user_prompt_content = (
    f"Please autoformalize the following natural language problem statement in Lean 4. "
    f"Use the following theorem name: {problem_name}\n"
    f"The natural language statement is: \n"
    f"{informal_statement_content}"
    f"Think before you provide the lean statement."
)



chat = [
  {"role": "user", "content": user_prompt_content},
]

inputs = tokenizer.apply_chat_template(chat, tokenize=True, add_generation_prompt=True, return_tensors="pt").to(model.device)

import time
start = time.time()
outputs = model.generate(inputs, max_new_tokens=16384, temperature = 0.9, do_sample = True, top_k=20, top_p=0.95)


model_output_text = tokenizer.batch_decode(outputs)[0]

def extract_code(text_input):
    """Extracts the last Lean 4 code block from the model's output."""
    try:
        matches = re.findall(r'
Construct the prompt for the modeltexttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
import re
torch.manual_seed(30)

model_id = "Goedel-LM/Goedel-Formalizer-V2-32B"
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", torch_dtype=torch.bfloat16, trust_remote_code=True)


problem_name = "test_problem"
informal_statement_content = "Prove that 3 cannot be written as the sum of two cubes."


# Construct the prompt for the model
user_prompt_content = (
    f"Please autoformalize the following natural language problem statement in Lean 4. "
    f"Use the following theorem name: {problem_name}\n"
    f"The natural language statement is: \n"
    f"{informal_statement_content}"
    f"Think before you provide the lean statement."
)



chat = [
  {"role": "user", "content": user_prompt_content},
]

inputs = tokenizer.apply_chat_template(chat, tokenize=True, add_generation_prompt=True, return_tensors="pt").to(model.device)

import time
start = time.time()
outputs = model.generate(inputs, max_new_tokens=16384, temperature = 0.9, do_sample = True, top_k=20, top_p=0.95)


model_output_text = tokenizer.batch_decode(outputs)[0]

def extract_code(text_input):
    """Extracts the last Lean 4 code block from the model's output."""
    try:
        matches = re.findall(r'
Construct the prompt for the modeltexttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
import re
torch.manual_seed(30)

model_id = "Goedel-LM/Goedel-Formalizer-V2-32B"
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", torch_dtype=torch.bfloat16, trust_remote_code=True)


problem_name = "test_problem"
informal_statement_content = "Prove that 3 cannot be written as the sum of two cubes."


# Construct the prompt for the model
user_prompt_content = (
    f"Please autoformalize the following natural language problem statement in Lean 4. "
    f"Use the following theorem name: {problem_name}\n"
    f"The natural language statement is: \n"
    f"{informal_statement_content}"
    f"Think before you provide the lean statement."
)



chat = [
  {"role": "user", "content": user_prompt_content},
]

inputs = tokenizer.apply_chat_template(chat, tokenize=True, add_generation_prompt=True, return_tensors="pt").to(model.device)

import time
start = time.time()
outputs = model.generate(inputs, max_new_tokens=16384, temperature = 0.9, do_sample = True, top_k=20, top_p=0.95)


model_output_text = tokenizer.batch_decode(outputs)[0]

def extract_code(text_input):
    """Extracts the last Lean 4 code block from the model's output."""
    try:
        matches = re.findall(r'
Construct the prompt for the modeltexttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
import re
torch.manual_seed(30)

model_id = "Goedel-LM/Goedel-Formalizer-V2-32B"
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", torch_dtype=torch.bfloat16, trust_remote_code=True)


problem_name = "test_problem"
informal_statement_content = "Prove that 3 cannot be written as the sum of two cubes."


# Construct the prompt for the model
user_prompt_content = (
    f"Please autoformalize the following natural language problem statement in Lean 4. "
    f"Use the following theorem name: {problem_name}\n"
    f"The natural language statement is: \n"
    f"{informal_statement_content}"
    f"Think before you provide the lean statement."
)



chat = [
  {"role": "user", "content": user_prompt_content},
]

inputs = tokenizer.apply_chat_template(chat, tokenize=True, add_generation_prompt=True, return_tensors="pt").to(model.device)

import time
start = time.time()
outputs = model.generate(inputs, max_new_tokens=16384, temperature = 0.9, do_sample = True, top_k=20, top_p=0.95)


model_output_text = tokenizer.batch_decode(outputs)[0]

def extract_code(text_input):
    """Extracts the last Lean 4 code block from the model's output."""
    try:
        matches = re.findall(r'
Construct the prompt for the modeltexttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
import re
torch.manual_seed(30)

model_id = "Goedel-LM/Goedel-Formalizer-V2-32B"
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", torch_dtype=torch.bfloat16, trust_remote_code=True)


problem_name = "test_problem"
informal_statement_content = "Prove that 3 cannot be written as the sum of two cubes."


# Construct the prompt for the model
user_prompt_content = (
    f"Please autoformalize the following natural language problem statement in Lean 4. "
    f"Use the following theorem name: {problem_name}\n"
    f"The natural language statement is: \n"
    f"{informal_statement_content}"
    f"Think before you provide the lean statement."
)



chat = [
  {"role": "user", "content": user_prompt_content},
]

inputs = tokenizer.apply_chat_template(chat, tokenize=True, add_generation_prompt=True, return_tensors="pt").to(model.device)

import time
start = time.time()
outputs = model.generate(inputs, max_new_tokens=16384, temperature = 0.9, do_sample = True, top_k=20, top_p=0.95)


model_output_text = tokenizer.batch_decode(outputs)[0]

def extract_code(text_input):
    """Extracts the last Lean 4 code block from the model's output."""
    try:
        matches = re.findall(r'
Construct the prompt for the modeltexttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
import re
torch.manual_seed(30)

model_id = "Goedel-LM/Goedel-Formalizer-V2-32B"
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", torch_dtype=torch.bfloat16, trust_remote_code=True)


problem_name = "test_problem"
informal_statement_content = "Prove that 3 cannot be written as the sum of two cubes."


# Construct the prompt for the model
user_prompt_content = (
    f"Please autoformalize the following natural language problem statement in Lean 4. "
    f"Use the following theorem name: {problem_name}\n"
    f"The natural language statement is: \n"
    f"{informal_statement_content}"
    f"Think before you provide the lean statement."
)



chat = [
  {"role": "user", "content": user_prompt_content},
]

inputs = tokenizer.apply_chat_template(chat, tokenize=True, add_generation_prompt=True, return_tensors="pt").to(model.device)

import time
start = time.time()
outputs = model.generate(inputs, max_new_tokens=16384, temperature = 0.9, do_sample = True, top_k=20, top_p=0.95)


model_output_text = tokenizer.batch_decode(outputs)[0]

def extract_code(text_input):
    """Extracts the last Lean 4 code block from the model's output."""
    try:
        matches = re.findall(r'

Deploy This Model

Production-ready deployment in minutes

Together.ai

Instant API access to this model

Fastest API

Production-ready inference API. Start free, scale to millions.

Try Free API

Replicate

One-click model deployment

Easiest Setup

Run models in the cloud with simple API. No DevOps required.

Deploy Now

Disclosure: We may earn a commission from these partners. This helps keep LLMYourWay free.