JoyAI-LLM-Flash-AWQ-4bit

18
by
cyankiwi
Language Model
OTHER
4B params
New
18 downloads
Early-stage
Edge AI:
Mobile
Laptop
Server
9GB+ RAM
Mobile
Laptop
Server
Quick Summary

AI model with specialized capabilities.

Device Compatibility

Mobile
4-6GB RAM
Laptop
16GB RAM
Server
GPU
Minimum Recommended
4GB+ RAM

Code Examples

Chat Completionpythonopenai
from openai import OpenAI

client = OpenAI(base_url="http://IP:PORT/v1", api_key="EMPTY")


def simple_chat(client: OpenAI):
    messages = [
        {
            "role": "user",
            "content": [
                {
                    "type": "text",
                    "text": "which one is bigger, 9.11 or 9.9? think carefully.",
                }
            ],
        },
    ]
    model_name = client.models.list().data[0].id
    response = client.chat.completions.create(
        model=model_name, messages=messages, stream=False, max_tokens=4096
    )
    print(f"response: {response.choices[0].message.content}")


if __name__ == "__main__":
    simple_chat(client)
Tool call Completionpythonopenai
import json

from openai import OpenAI

client = OpenAI(base_url="http://IP:PORT/v1", api_key="EMPTY")


def my_calculator(expression: str) -> str:
    return str(eval(expression))


def rewrite(expression: str) -> str:
    return str(expression)


def simple_tool_call(client: OpenAI):
    messages = [
        {
            "role": "user",
            "content": [
                {
                    "type": "text",
                    "text": "use my functions to compute the results for the equations: 6+1",
                },
            ],
        },
    ]
    tools = [
        {
            "type": "function",
            "function": {
                "name": "my_calculator",
                "description": "A calculator that can evaluate a mathematical equation and compute its results.",
                "parameters": {
                    "type": "object",
                    "properties": {
                        "expression": {
                            "type": "string",
                            "description": "The mathematical expression to evaluate.",
                        },
                    },
                    "required": ["expression"],
                },
            },
        },
        {
            "type": "function",
            "function": {
                "name": "rewrite",
                "description": "Rewrite a given text for improved clarity",
                "parameters": {
                    "type": "object",
                    "properties": {
                        "text": {
                            "type": "string",
                            "description": "The input text to rewrite",
                        }
                    },
                },
            },
        },
    ]
    model_name = client.models.list().data[0].id
    response = client.chat.completions.create(
        model=model_name,
        messages=messages,
        temperature=1.0,
        max_tokens=1024,
        tools=tools,
        tool_choice="auto",
    )
    tool_calls = response.choices[0].message.tool_calls

    results = []
    for tool_call in tool_calls:
        function_name = tool_call.function.name
        function_args = tool_call.function.arguments
        if function_name == "my_calculator":
            result = my_calculator(**json.loads(function_args))
            results.append(result)
    messages.append({"role": "assistant", "tool_calls": tool_calls})
    for tool_call, result in zip(tool_calls, results):
        messages.append(
            {
                "role": "tool",
                "tool_call_id": tool_call.id,
                "name": tool_call.function.name,
                "content": result,
            }
        )
    response = client.chat.completions.create(
        model=model_name,
        messages=messages,
        temperature=1.0,
        max_tokens=1024,
    )
    print(response.choices[0].message.content)


if __name__ == "__main__":
    simple_tool_call(client)

Deploy This Model

Production-ready deployment in minutes

Together.ai

Instant API access to this model

Fastest API

Production-ready inference API. Start free, scale to millions.

Try Free API

Replicate

One-click model deployment

Easiest Setup

Run models in the cloud with simple API. No DevOps required.

Deploy Now

Disclosure: We may earn a commission from these partners. This helps keep LLMYourWay free.