DigitalForensicsText2SQLite
310
llama
by
pawlaszc
Language Model
OTHER
3B params
New
310 downloads
Early-stage
Edge AI:
Mobile
Laptop
Server
7GB+ RAM
Mobile
Laptop
Server
Quick Summary
AI model with specialized capabilities.
Device Compatibility
Mobile
4-6GB RAM
Laptop
16GB RAM
Server
GPU
Minimum Recommended
3GB+ RAM
Code Examples
How to Usepythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
model_name = "pawlaszc/ForensicSQL-Llama-3.2-3B"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(
model_name,
torch_dtype=torch.bfloat16,
device_map="auto"
)
model.eval()
schema = """
CREATE TABLE message (
ROWID INTEGER PRIMARY KEY,
text TEXT,
handle_id INTEGER,
date INTEGER,
is_from_me INTEGER,
cache_has_attachments INTEGER
);
CREATE TABLE handle (
ROWID INTEGER PRIMARY KEY,
id TEXT,
service TEXT
);
"""
request = "Find all messages received in the last 7 days that contain attachments"
# Note: do NOT use apply_chat_template — use plain-text prompt
prompt = f"""Generate a valid SQLite query for this forensic database request.
Database Schema:
{schema}
Request: {request}
SQLite Query:
"""
inputs = tokenizer(prompt, return_tensors="pt", truncation=True, max_length=2048)
inputs = {k: v.to(model.device) for k, v in inputs.items()}
with torch.no_grad():
outputs = model.generate(
**inputs,
max_new_tokens=300,
do_sample=False, # greedy decoding — do not change
)
input_length = inputs['input_ids'].shape[1]
sql = tokenizer.decode(outputs[0][input_length:], skip_special_tokens=True)
print(sql.strip())Python Helper Classpythontransformers
class ForensicSQLGenerator:
def __init__(self, model_name="pawlaszc/ForensicSQL-Llama-3.2-3B"):
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
self.tokenizer = AutoTokenizer.from_pretrained(model_name)
self.model = AutoModelForCausalLM.from_pretrained(
model_name,
torch_dtype=torch.bfloat16,
device_map="auto"
)
self.model.eval()
def generate_sql(self, schema: str, request: str) -> str:
prompt = (
"Generate a valid SQLite query for this forensic database request.\n\n"
f"Database Schema:\n{schema}\n\n"
f"Request: {request}\n\n"
"SQLite Query:\n"
)
inputs = self.tokenizer(
prompt, return_tensors="pt", truncation=True, max_length=4096
)
inputs = {k: v.to(self.model.device) for k, v in inputs.items()}
input_length = inputs["input_ids"].shape[1]
with torch.no_grad():
outputs = self.model.generate(
**inputs, max_new_tokens=300, do_sample=False
)
sql = self.tokenizer.decode(
outputs[0][input_length:], skip_special_tokens=True
)
# Return first statement only, normalized
return sql.strip().split("\n")[0].strip().rstrip(";") + ";"
# Usage
generator = ForensicSQLGenerator()
sql = generator.generate_sql(schema, "Find all unread messages from the last 24 hours")
print(sql)Citationbibtex
@article{pawlaszczyk2026forsqlitelm,
author = {Dirk Pawlaszczyk},
title = {AI-Based Automated SQL Query Generation for SQLite Databases
in Mobile Forensics},
journal = {Forensic Science International: Digital Investigation},
year = {2026},
note = {FSIDI-D-26-00029}
}Deploy This Model
Production-ready deployment in minutes
Together.ai
Instant API access to this model
Production-ready inference API. Start free, scale to millions.
Try Free APIReplicate
One-click model deployment
Run models in the cloud with simple API. No DevOps required.
Deploy NowDisclosure: We may earn a commission from these partners. This helps keep LLMYourWay free.