Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
11 changes: 7 additions & 4 deletions examples/integrations/openwebui/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,9 @@ Open WebUI Pipe Function examples for Context Compiler.
Tested target: Open WebUI `v0.8.12` (latest at time of testing).
Runtime-validated on stock Docker Open WebUI with a real backend model provider.

Compatibility note: OpenWebUI `0.9.x` changed `Users.get_user_by_id` to async.
These examples support both sync (`0.8.x`) and async (`0.9.x`) user lookup.

## Files

- `open_webui_pipe.py`: basic integration, no preprocessor layer (recommended/default).
Expand All @@ -15,7 +18,7 @@ Runtime-validated on stock Docker Open WebUI with a real backend model provider.
The minimal pipe path below is the easiest first-run flow and was runtime-validated in Docker via API flow with a real backend model.

1. Import `open_webui_pipe.py` (recommended/default) as a Function by URL.
2. Open WebUI installs `context-compiler>=0.6.7` from the function frontmatter requirements.
2. Open WebUI installs `context-compiler>=0.6.10` from the function frontmatter requirements.
3. Enable the function.
4. Set `BASE_MODEL_ID` to a valid Open WebUI model id (required).
5. Select the pipe model in chat.
Expand All @@ -24,7 +27,7 @@ Open WebUI is host-provided runtime infrastructure and must already be installed
Open WebUI also needs at least one real backend model/provider configured (for example Ollama or OpenAI) so `BASE_MODEL_ID` resolves to an actual model.
Note: The `PROVIDER` environment contract used in LiteLLM examples/demos does not apply to OpenWebUI. OpenWebUI manages providers via its own connection settings and model IDs.

Checkpoint continuation in these examples requires `context-compiler>=0.6.7`.
Checkpoint continuation in these examples requires `context-compiler>=0.6.10`.

If using `open_webui_pipe_with_preprocessor.py`:
- Install preprocessor support in the Open WebUI environment:
Expand All @@ -49,8 +52,8 @@ If frontmatter dependency installs are disabled, offline, or unavailable:
1. Open a shell in the Open WebUI container:
- `docker exec -it <openwebui-container> sh`
2. Install the package manually:
- Minimal pipe: `pip install "context-compiler>=0.6.7"`
- Preprocessor pipe: `pip install "context-compiler[experimental]>=0.6.7"`
- Minimal pipe: `pip install "context-compiler>=0.6.10"`
- Preprocessor pipe: `pip install "context-compiler[experimental]>=0.6.10"`
3. Import and enable the function in Open WebUI, then configure valves.

### Finding valid model ids
Expand Down
9 changes: 7 additions & 2 deletions examples/integrations/openwebui/open_webui_pipe.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,8 +3,8 @@
author: rlippmann
author_url: https://github.com/rlippmann/context-compiler
funding_url: https://github.com/rlippmann/context-compiler
version: 0.3
requirements: context-compiler>=0.6.7
version: 0.4
requirements: context-compiler>=0.6.10

Minimal Open WebUI Pipe integration for Context Compiler.

Expand All @@ -17,6 +17,7 @@
- No persistence, no multi-worker coordination, no external storage.
"""

import inspect
import logging
from typing import Any

Expand Down Expand Up @@ -213,6 +214,8 @@ async def _forward_passthrough(
payload = {**body}
payload["model"] = self.valves.BASE_MODEL_ID
user = Users.get_user_by_id(user_payload["id"])
if inspect.isawaitable(user):
user = await user
try:
response = await generate_chat_completion(request, payload, user)
except Exception as exc:
Expand Down Expand Up @@ -252,6 +255,8 @@ async def _forward_update(
)

user = Users.get_user_by_id(user_payload["id"])
if inspect.isawaitable(user):
user = await user
try:
response = await generate_chat_completion(request, payload, user)
except Exception as exc:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -3,8 +3,8 @@
author: rlippmann
author_url: https://github.com/rlippmann/context-compiler
funding_url: https://github.com/rlippmann/context-compiler
version: 0.3
requirements: context-compiler[experimental]>=0.6.7
version: 0.4
requirements: context-compiler[experimental]>=0.6.10

Open WebUI integration with Context Compiler preprocessor.

Expand All @@ -17,6 +17,7 @@
Core decision handling remains the same as the base integration.
"""

import inspect
import logging
from importlib.resources import as_file, files
from importlib.resources.abc import Traversable
Expand Down Expand Up @@ -266,6 +267,8 @@ async def _validate_configured_model_ids(
# If model discovery fails, preserve runtime behavior and rely on call-path
# normalization below.
user = Users.get_user_by_id(user_payload["id"])
if inspect.isawaitable(user):
user = await user
try:
models = await get_all_models(request, user=user)
except Exception:
Expand Down Expand Up @@ -316,6 +319,8 @@ async def _llm_fallback_precompile(
],
}
user = Users.get_user_by_id(user_payload["id"])
if inspect.isawaitable(user):
user = await user
try:
response = await generate_chat_completion(request, payload, user)
except Exception as exc:
Expand Down Expand Up @@ -376,6 +381,8 @@ async def _forward_passthrough(
payload = {**body}
payload["model"] = base_model_id
user = Users.get_user_by_id(user_payload["id"])
if inspect.isawaitable(user):
user = await user
try:
response = await generate_chat_completion(request, payload, user)
except Exception as exc:
Expand Down Expand Up @@ -412,6 +419,8 @@ async def _forward_update(
)

user = Users.get_user_by_id(user_payload["id"])
if inspect.isawaitable(user):
user = await user
try:
response = await generate_chat_completion(request, payload, user)
except Exception as exc:
Expand Down
2 changes: 1 addition & 1 deletion pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@ build-backend = "hatchling.build"

[project]
name = "context-compiler"
version = "0.6.9"
version = "0.6.10"
description = "Deterministic conversational state engine for LLM applications."
readme = "README.md"
requires-python = ">=3.11"
Expand Down
21 changes: 21 additions & 0 deletions tests/test_openwebui_pipe.py
Original file line number Diff line number Diff line change
Expand Up @@ -228,3 +228,24 @@ async def _chat_completion(
assert result == (
"Context Compiler pipe misconfigured: BASE_MODEL_ID was not found in Open WebUI models."
)


def test_pipe_supports_async_user_lookup(monkeypatch) -> None:
module = _load_module_with_openwebui_stubs("owui_pipe_async_user_lookup", monkeypatch)
pipe = module.Pipe()
pipe.valves.BASE_MODEL_ID = "base-model"

async def _get_user_by_id(user_id: object) -> dict[str, object]:
return {"id": user_id}

monkeypatch.setattr(module.Users, "get_user_by_id", _get_user_by_id)

result = asyncio.run(
pipe.pipe(
{"model": "pipe-model", "messages": [{"role": "user", "content": "hello"}]},
__user__={"id": "u1"},
__request__=object(),
)
)

assert isinstance(result, dict)
21 changes: 21 additions & 0 deletions tests/test_openwebui_preprocessor_pipe.py
Original file line number Diff line number Diff line change
Expand Up @@ -115,6 +115,27 @@ def test_preprocessor_model_can_be_overridden(monkeypatch) -> None:
assert pipe._resolve_preprocessor_model_id("base-model") == "prep-model"


def test_preprocessor_pipe_supports_async_user_lookup(monkeypatch) -> None:
module = _load_module_with_openwebui_stubs("owui_preproc_async_user_lookup", monkeypatch)
pipe = module.Pipe()

async def _get_user_by_id(user_id: object) -> dict[str, object]:
return {"id": user_id}

monkeypatch.setattr(module.Users, "get_user_by_id", _get_user_by_id)

error = asyncio.run(
pipe._validate_configured_model_ids(
request=object(),
user_payload={"id": "u1"},
base_model_id="base-model",
preprocessor_model_id="prep-model",
)
)

assert error is None


def test_invalid_preprocessor_model_is_normalized(monkeypatch) -> None:
module = _load_module_with_openwebui_stubs("owui_preproc_invalid", monkeypatch)
pipe = module.Pipe()
Expand Down
2 changes: 1 addition & 1 deletion uv.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

Loading