Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
7 changes: 6 additions & 1 deletion pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,12 @@ version = "0.0.9"
description = "Create programs that think, using LLMs."
readme = "README.md"
requires-python = ">=3.10"
dependencies = ["pydantic>=2.9.2", "jinja2>=3.1.2", "httpx>=0.27.2"]
dependencies = [
"pydantic>=2.9.2",
"jinja2>=3.1.2",
"httpx>=0.27.2",
"litellm>=1.75.7",
]
authors = [{ name = "Senko Rasic", email = "senko@senko.net" }]
license = { text = "MIT" }
homepage = "https://github.com/senko/think"
Expand Down
3 changes: 3 additions & 0 deletions tests/conftest.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,9 @@ def model_urls(vision: bool = False) -> list[str]:
retval.append(f"ollama:///{getenv('OLLAMA_MODEL')}")
if getenv("AWS_SECRET_ACCESS_KEY"):
retval.append("bedrock:///amazon.nova-lite-v1:0?region=us-east-1")
# LiteLLM can use any of the above API keys, but we'll test it specifically with OpenAI
if getenv("OPENAI_API_KEY"):
retval.append("litellm:///gpt-4o-mini")
if retval == []:
raise RuntimeError("No LLM API keys found in environment")
return retval
Expand Down
175 changes: 175 additions & 0 deletions tests/llm/test_litellm_adapter.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,175 @@
import pytest

from tests.llm.test_chat import (
BASIC_CHAT,
IMAGE_CHAT,
SIMPLE_TOOL_CHAT,
DOCUMENT_CHAT,
PDF_URI,
)
from think.llm.chat import Chat
from think.llm.litellm import LiteLLMAdapter

# LiteLLM uses OpenAI-compatible format, so we reuse the same expected messages
BASIC_LITELLM_MESSAGES = [
{"role": "system", "content": "You're a friendly assistant"},
{"role": "user", "content": "Say Hi."},
{"role": "assistant", "content": "Hi!", "tool_calls": None},
]

SIMPLE_TOOL_LITELLM_MESSAGES = [
{"role": "system", "content": "You're a friendly assistant"},
{
"role": "user",
"content": "Ask the user to give you a math question, then solve it yourself.",
},
{
"role": "assistant",
"content": None,
"tool_calls": [
{
"id": "call_boPTOC8z660AYdRFr9oogH4O",
"type": "function",
"function": {
"name": "ask_user",
"arguments": '{"question": "Please give me a math question to solve?"}',
},
}
],
},
{
"role": "tool",
"tool_call_id": "call_boPTOC8z660AYdRFr9oogH4O",
"content": "1 + 1",
},
{
"role": "assistant",
"content": "The solution to the math question \\(1 + 1\\) is \\(2\\).",
"tool_calls": None,
},
]

IMAGE_LITELLM_MESSAGES = [
{
"role": "user",
"content": [
{"type": "text", "text": "Describe the image in detail"},
{
"type": "image_url",
"image_url": {
"url": "data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAEAAAABAQAAAAA3bvkkAAAACklEQVR4AWNgAAAAAgABc3UBGAAAAABJRU5ErkJggg=="
},
},
],
},
{
"role": "assistant",
"content": "The image appears to be a simple black silhouette of a cat.",
"tool_calls": None,
},
]

DOCUMENT_LITELLM_MESSAGES = [
{
"role": "user",
"content": [
{"type": "text", "text": "Describe the document in detail"},
{
"type": "input_file",
"file_name": "document.pdf",
"file_data": PDF_URI.split(",", 1)[1],
},
],
},
{
"role": "assistant",
"content": "The document is one page long and contains text HELLO WORLD.",
"tool_calls": None,
},
]


@pytest.mark.parametrize(
"chat,expected",
[
(BASIC_CHAT, BASIC_LITELLM_MESSAGES),
(SIMPLE_TOOL_CHAT, SIMPLE_TOOL_LITELLM_MESSAGES),
(IMAGE_CHAT, IMAGE_LITELLM_MESSAGES),
(DOCUMENT_CHAT, DOCUMENT_LITELLM_MESSAGES),
],
)
def test_adapter(chat, expected):
"""Test that LiteLLMAdapter correctly converts think chats to OpenAI format."""
adapter = LiteLLMAdapter()

chat = Chat.load(chat)
system, messages = adapter.dump_chat(chat)
assert system == ""
assert messages == expected

chat2 = adapter.load_chat(messages)
assert chat.messages == chat2.messages


def test_tool_spec():
"""Test that LiteLLMAdapter correctly converts tool definitions."""
from think.llm.tool import ToolDefinition

def test_function(param: str) -> str:
"""A test tool for testing.

:param param: A test parameter
:return: A test result
"""
return f"test: {param}"

adapter = LiteLLMAdapter()
tool = ToolDefinition(test_function, name="test_tool")

spec = adapter.get_tool_spec(tool)

expected = {
"type": "function",
"function": {
"name": "test_tool",
"description": tool.description,
"parameters": tool.schema,
},
}

assert spec == expected


def test_error_handling():
"""Test that LiteLLMAdapter handles unsupported features appropriately."""
from think.llm.chat import ContentPart, ContentType, Message, Role

adapter = LiteLLMAdapter()

# Test document URL error (not supported)
message = Message(
role=Role.user,
content=[
ContentPart(
type=ContentType.document,
document="https://example.com/doc.pdf",
)
],
)

with pytest.raises(ValueError, match="does not support document URLs"):
adapter.dump_message(message)

# Test unsupported document MIME type
message = Message(
role=Role.user,
content=[
ContentPart(
type=ContentType.document,
document="data:application/msword;base64,dGVzdA==",
)
],
)

with pytest.raises(ValueError, match="Unsupported document MIME type"):
adapter.dump_message(message)
6 changes: 5 additions & 1 deletion think/llm/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -127,7 +127,7 @@ class LLM(ABC):
>>> client(...)
"""

PROVIDERS = ["anthropic", "google", "groq", "ollama", "openai"]
PROVIDERS = ["anthropic", "google", "groq", "litellm", "ollama", "openai"]

provider: str
adapter_class: type[BaseAdapter]
Expand Down Expand Up @@ -203,6 +203,10 @@ def for_provider(cls, provider: str) -> type["LLM"]:
from .bedrock import BedrockClient

return BedrockClient
elif provider == "litellm":
from .litellm import LiteLLMClient

return LiteLLMClient
else:
raise ValueError(f"Unknown provider: {provider}")

Expand Down
Loading
Loading