From cbcc56a39a486c8158d61ef656f0040e13df0634 Mon Sep 17 00:00:00 2001 From: "huhaoyu.hahahu" Date: Wed, 25 Mar 2026 11:03:47 +0800 Subject: [PATCH 1/5] Stabilize package for pre-release validation --- .github/workflows/python-package.yml | 9 ++-- build.sh | 11 ++-- dify_client/_clientx.py | 4 +- dify_client/errors.py | 43 ++++++++++++--- dify_client/models/__init__.py | 2 +- dify_client/models/base.py | 6 ++- dify_client/models/chat.py | 4 +- dify_client/models/completion.py | 4 +- dify_client/models/stream.py | 6 +-- dify_client/models/workflow.py | 18 +++---- setup.py | 12 ++--- tests/test_client.py | 81 ++++++++++++++++++++++++++++ tests/test_errors.py | 59 ++++++++++++++++++++ tests/test_import_and_models.py | 45 ++++++++++++++++ 14 files changed, 263 insertions(+), 41 deletions(-) create mode 100644 tests/test_client.py create mode 100644 tests/test_errors.py create mode 100644 tests/test_import_and_models.py diff --git a/.github/workflows/python-package.yml b/.github/workflows/python-package.yml index 73784a4..e2aa190 100644 --- a/.github/workflows/python-package.yml +++ b/.github/workflows/python-package.yml @@ -16,7 +16,7 @@ jobs: strategy: fail-fast: false matrix: - python-version: ["3.7", "3.8", "3.9", "3.10", "3.11"] + python-version: ["3.8", "3.9", "3.10", "3.11", "3.12"] steps: - uses: actions/checkout@v4 @@ -27,8 +27,7 @@ jobs: - name: Install dependencies run: | python -m pip install --upgrade pip - python -m pip install flake8 pytest - if [ -f requirements.txt ]; then pip install -r requirements.txt; fi + python -m pip install -e . flake8 pytest build twine - name: Lint with flake8 run: | # stop the build if there are Python syntax errors or undefined names @@ -38,3 +37,7 @@ jobs: - name: Test with pytest run: | pytest + - name: Build and verify package + run: | + python -m build --no-isolation + python -m twine check --strict dist/* diff --git a/build.sh b/build.sh index ca1a762..9945b0a 100755 --- a/build.sh +++ b/build.sh @@ -1,9 +1,10 @@ -#!/bin/bash +#!/usr/bin/env bash -set -e +set -euo pipefail rm -rf build dist *.egg-info -pip install setuptools wheel twine -python setup.py sdist bdist_wheel -twine upload dist/* \ No newline at end of file +python -m pip install --upgrade build twine +python -m build --no-isolation +python -m twine check --strict dist/* +python -m twine upload dist/* diff --git a/dify_client/_clientx.py b/dify_client/_clientx.py index bf0ed7a..5ee0a5d 100644 --- a/dify_client/_clientx.py +++ b/dify_client/_clientx.py @@ -342,7 +342,7 @@ def _stop_stream(self, endpoint: str, req: models.StopRequest, **kwargs) -> mode return models.StopResponse(**response.json()) def _prepare_url(self, endpoint: str, **kwargs) -> str: - return self.api_base + endpoint.format(**kwargs) + return f"{self.api_base.rstrip('/')}/{endpoint.format(**kwargs).lstrip('/')}" def _prepare_auth_headers(self, headers: Dict[str, str]): if "authorization" not in (key.lower() for key in headers.keys()): @@ -644,7 +644,7 @@ async def _astop_stream(self, endpoint: str, req: models.StopRequest, **kwargs) return models.StopResponse(**response.json()) def _prepare_url(self, endpoint: str, **kwargs) -> str: - return self.api_base + endpoint.format(**kwargs) + return f"{self.api_base.rstrip('/')}/{endpoint.format(**kwargs).lstrip('/')}" def _prepare_auth_headers(self, headers: Dict[str, str]): if "authorization" not in (key.lower() for key in headers.keys()): diff --git a/dify_client/errors.py b/dify_client/errors.py index 8d268fd..b849ba7 100644 --- a/dify_client/errors.py +++ b/dify_client/errors.py @@ -1,5 +1,5 @@ from http import HTTPStatus -from typing import Union +from typing import Any, Dict, Union import httpx import httpx_sse @@ -109,18 +109,49 @@ class DifyS3FileTooLarge(DifyAPIError): } +def _build_error_response(response: httpx.Response) -> models.ErrorResponse: + fallback_message = response.text or response.reason_phrase or "Request failed" + try: + payload = response.json() + except ValueError: + payload = {} + + if not isinstance(payload, dict): + payload = {} + if "status" not in payload: + payload["status"] = response.status_code + if not payload.get("code"): + payload["code"] = "" + if not payload.get("message"): + payload["message"] = fallback_message + return models.ErrorResponse(**payload) + + +def _build_error_stream_response(response: httpx_sse.ServerSentEvent) -> models.ErrorStreamResponse: + try: + payload: Dict[str, Any] = response.json() + except ValueError: + payload = {} + + if not isinstance(payload, dict): + payload = {} + payload.setdefault("event", response.event or models.StreamEvent.ERROR.value) + if not payload.get("message"): + payload["message"] = response.data or "" + if not payload.get("code"): + payload["code"] = "" + return models.ErrorStreamResponse(**payload) + + def raise_for_status(response: Union[httpx.Response, httpx_sse.ServerSentEvent]): if isinstance(response, httpx.Response): if response.is_success: return - json = response.json() - if "status" not in json: - json["status"] = response.status_code - details = models.ErrorResponse(**json) + details = _build_error_response(response) elif isinstance(response, httpx_sse.ServerSentEvent): if response.event != models.StreamEvent.ERROR.value: return - details = models.ErrorStreamResponse(**response.json()) + details = _build_error_stream_response(response) else: raise ValueError(f"Invalid dify response type: {type(response)}") diff --git a/dify_client/models/__init__.py b/dify_client/models/__init__.py index 9a9aa5f..69778c1 100644 --- a/dify_client/models/__init__.py +++ b/dify_client/models/__init__.py @@ -4,4 +4,4 @@ from .file import * from .workflow import * from .stream import * -from .base import StopRequest, StopResponse +from .base import * diff --git a/dify_client/models/base.py b/dify_client/models/base.py index ef7741a..1005f6d 100644 --- a/dify_client/models/base.py +++ b/dify_client/models/base.py @@ -5,14 +5,16 @@ from http import HTTPStatus from typing import Optional, List -from pydantic import BaseModel, ConfigDict +from pydantic import BaseModel, ConfigDict, Field class Mode(StrEnum): CHAT = "chat" COMPLETION = "completion" + ADVANCED_CHAT = "advanced-chat" ADAVANCED_CHAT = "advanced-chat" + class ResponseMode(StrEnum): STREAMING = 'streaming' BLOCKING = 'blocking' @@ -76,7 +78,7 @@ class RetrieverResource(BaseModel): class Metadata(BaseModel): usage: Usage - retriever_resources: List[RetrieverResource] = [] + retriever_resources: List[RetrieverResource] = Field(default_factory=list) class StopRequest(BaseModel): diff --git a/dify_client/models/chat.py b/dify_client/models/chat.py index 1dff07a..51618de 100644 --- a/dify_client/models/chat.py +++ b/dify_client/models/chat.py @@ -12,7 +12,7 @@ class ChatRequest(BaseModel): response_mode: ResponseMode user: str conversation_id: Optional[str] = "" - files: List[File] = [] + files: List[File] = Field(default_factory=list) auto_generate_name: bool = True @@ -26,4 +26,4 @@ class ChatSuggestRequest(BaseModel): class ChatSuggestResponse(BaseModel): result: str - data: List[str] = [] + data: List[str] = Field(default_factory=list) diff --git a/dify_client/models/completion.py b/dify_client/models/completion.py index e4b1832..942287e 100644 --- a/dify_client/models/completion.py +++ b/dify_client/models/completion.py @@ -1,6 +1,6 @@ from typing import Optional, List -from pydantic import BaseModel +from pydantic import BaseModel, Field from dify_client.models.base import CompletionInputs, ResponseMode, File, Metadata, Mode @@ -10,7 +10,7 @@ class CompletionRequest(BaseModel): response_mode: ResponseMode user: str conversation_id: Optional[str] = "" - files: List[File] = [] + files: List[File] = Field(default_factory=list) class CompletionResponse(BaseModel): diff --git a/dify_client/models/stream.py b/dify_client/models/stream.py index da0594d..b863586 100644 --- a/dify_client/models/stream.py +++ b/dify_client/models/stream.py @@ -4,7 +4,7 @@ from strenum import StrEnum from typing import Union, Optional, List -from pydantic import BaseModel, ConfigDict, field_validator +from pydantic import BaseModel, ConfigDict, Field, field_validator from dify_client import utils from dify_client.models.base import Metadata, ErrorResponse @@ -42,7 +42,7 @@ def new(cls, event: Union["StreamEvent", str]) -> "StreamEvent": class StreamResponse(BaseModel): model_config = ConfigDict(extra='allow') - event: StreamEvent | str + event: Union[StreamEvent, str] task_id: Optional[str] = "" @field_validator("event", mode="before") @@ -89,7 +89,7 @@ class AgentThoughtStreamResponse(StreamResponse): observation: str tool: str tool_input: str - message_files: List[str] = [] + message_files: List[str] = Field(default_factory=list) created_at: int # unix timestamp seconds diff --git a/dify_client/models/workflow.py b/dify_client/models/workflow.py index 7fad18f..66c1148 100644 --- a/dify_client/models/workflow.py +++ b/dify_client/models/workflow.py @@ -4,7 +4,7 @@ from strenum import StrEnum from typing import Dict, List, Optional -from pydantic import BaseModel +from pydantic import BaseModel, Field from dify_client.models.base import ResponseMode, File @@ -25,7 +25,7 @@ class ExecutionMetadata(BaseModel): class WorkflowStartedData(BaseModel): id: str # workflow run id workflow_id: str # workflow id - sequence_number: int | None = None + sequence_number: Optional[int] = None inputs: Optional[dict] = None created_at: int # unix timestamp seconds @@ -39,7 +39,7 @@ class NodeStartedData(BaseModel): predecessor_node_id: Optional[str] = None inputs: Optional[dict] = None created_at: int - extras: dict = {} + extras: dict = Field(default_factory=dict) class NodeFinishedData(BaseModel): @@ -51,14 +51,14 @@ class NodeFinishedData(BaseModel): predecessor_node_id: Optional[str] = None inputs: Optional[dict] = None process_data: Optional[dict] = None - outputs: Optional[dict] = {} + outputs: Optional[dict] = Field(default_factory=dict) status: WorkflowStatus error: Optional[str] = None elapsed_time: Optional[float] # seconds execution_metadata: Optional[ExecutionMetadata] = None created_at: int finished_at: int - files: List = [] + files: List = Field(default_factory=list) class WorkflowFinishedData(BaseModel): @@ -73,16 +73,16 @@ class WorkflowFinishedData(BaseModel): total_steps: Optional[int] = 0 created_at: int finished_at: int - created_by: dict = {} - files: List = [] + created_by: dict = Field(default_factory=dict) + files: List = Field(default_factory=list) class WorkflowsRunRequest(BaseModel): - inputs: Dict = {} + inputs: Dict = Field(default_factory=dict) response_mode: ResponseMode user: str conversation_id: Optional[str] = "" - files: List[File] = [] + files: List[File] = Field(default_factory=list) class WorkflowsRunResponse(BaseModel): diff --git a/setup.py b/setup.py index cd75a4e..4a05371 100644 --- a/setup.py +++ b/setup.py @@ -1,4 +1,4 @@ -from setuptools import setup +from setuptools import find_packages, setup with open("README.md", "r", encoding="utf-8") as fh: long_description = fh.read() @@ -12,20 +12,20 @@ long_description=long_description, long_description_content_type="text/markdown", url="https://github.com/haoyuhu/dify-client-python", - license='MIT', - packages=['dify_client'], + license="MIT", + packages=find_packages(exclude=("tests", "tests.*")), classifiers=[ "Programming Language :: Python :: 3", "License :: OSI Approved :: MIT License", "Operating System :: OS Independent", ], - python_requires=">=3.7", + python_requires=">=3.8", install_requires=[ "httpx", "httpx-sse", - "pydantic", + "pydantic>=2,<3", "StrEnum", ], - keywords='dify nlp ai language-processing', + keywords="dify nlp ai language-processing", include_package_data=True, ) diff --git a/tests/test_client.py b/tests/test_client.py new file mode 100644 index 0000000..814abc3 --- /dev/null +++ b/tests/test_client.py @@ -0,0 +1,81 @@ +import httpx +from httpx_sse import ServerSentEvent + +from dify_client._clientx import AsyncClient, Client + + +def _ok_response(method: str, url: str) -> httpx.Response: + request = httpx.Request(method, url) + return httpx.Response(status_code=200, json={"ok": True}, request=request) + + +def test_prepare_url_normalizes_slashes(): + client = Client(api_key="token", api_base="https://api.example.com/v1/") + assert client._prepare_url("/chat-messages") == "https://api.example.com/v1/chat-messages" + + async_client = AsyncClient(api_key="token", api_base="https://api.example.com/v1") + assert async_client._prepare_url("chat-messages") == "https://api.example.com/v1/chat-messages" + + +def test_prepare_auth_headers_keeps_existing_authorization(): + headers = {"authorization": "Bearer custom-token"} + Client(api_key="token")._prepare_auth_headers(headers) + assert headers["authorization"] == "Bearer custom-token" + assert "Authorization" not in headers + + +def test_request_injects_bearer_token(monkeypatch): + captured = {} + + def fake_request(method, endpoint, **kwargs): + captured["method"] = method + captured["endpoint"] = endpoint + captured["headers"] = kwargs["headers"] + return _ok_response(str(method), endpoint) + + from dify_client import _clientx + + monkeypatch.setattr(_clientx._httpx_client, "request", fake_request) + client = Client(api_key="token", api_base="https://api.example.com/v1") + client.request(client._prepare_url("/chat-messages"), "GET") + + assert captured["method"] == "GET" + assert captured["endpoint"] == "https://api.example.com/v1/chat-messages" + assert captured["headers"]["Authorization"] == "Bearer token" + + +def test_request_stream_ignores_ping_events(monkeypatch): + class FakeEventSource: + def __init__(self, response, events): + self.response = response + self._events = events + + def __enter__(self): + return self + + def __exit__(self, exc_type, exc_val, exc_tb): + return False + + def iter_sse(self): + yield from self._events + + response = httpx.Response( + status_code=200, + headers={"content-type": "text/event-stream"}, + request=httpx.Request("POST", "https://api.example.com/v1/chat-messages"), + ) + events = [ + ServerSentEvent(event="ping", data=""), + ServerSentEvent(event="message", data='{"event":"message","answer":"ok"}'), + ] + + def fake_connect_sse(*args, **kwargs): + return FakeEventSource(response=response, events=events) + + from dify_client import _clientx + + monkeypatch.setattr(_clientx, "connect_sse", fake_connect_sse) + client = Client(api_key="token", api_base="https://api.example.com/v1") + chunks = list(client.request_stream(client._prepare_url("/chat-messages"), "POST", json={"a": 1})) + assert len(chunks) == 1 + assert chunks[0].event == "message" diff --git a/tests/test_errors.py b/tests/test_errors.py new file mode 100644 index 0000000..85a5088 --- /dev/null +++ b/tests/test_errors.py @@ -0,0 +1,59 @@ +import httpx +from httpx_sse import ServerSentEvent + +from dify_client import errors + + +def test_raise_for_status_maps_known_error_code(): + response = httpx.Response( + status_code=400, + json={ + "status": 400, + "code": "invalid_param", + "message": "bad request", + }, + request=httpx.Request("POST", "https://api.example.com/v1/chat-messages"), + ) + + try: + errors.raise_for_status(response) + except errors.DifyInvalidParam as exc: + assert exc.status == 400 + assert exc.code == "invalid_param" + assert "bad request" in exc.message + else: + raise AssertionError("Expected DifyInvalidParam") + + +def test_raise_for_status_uses_fallback_for_non_json_response(): + response = httpx.Response( + status_code=502, + text="Bad gateway", + request=httpx.Request("GET", "https://api.example.com/v1/workflows/run"), + ) + + try: + errors.raise_for_status(response) + except errors.DifyAPIError as exc: + assert exc.status == 502 + assert exc.code == "" + assert "Bad gateway" in exc.message + else: + raise AssertionError("Expected DifyAPIError") + + +def test_raise_for_status_handles_non_json_stream_error(): + sse = ServerSentEvent(event="error", data="stream failed") + try: + errors.raise_for_status(sse) + except errors.DifyInternalServerError as exc: + assert exc.status == 500 + assert exc.code == "" + assert "stream failed" in exc.message + else: + raise AssertionError("Expected DifyInternalServerError") + + +def test_raise_for_status_ignores_non_error_stream_event(): + sse = ServerSentEvent(event="message", data="ok") + errors.raise_for_status(sse) diff --git a/tests/test_import_and_models.py b/tests/test_import_and_models.py new file mode 100644 index 0000000..24b5147 --- /dev/null +++ b/tests/test_import_and_models.py @@ -0,0 +1,45 @@ +from dify_client import AsyncClient, Client, models + + +def test_imports_and_type_annotations_work_on_python39_plus(): + client = Client(api_key="token") + async_client = AsyncClient(api_key="token") + assert client.api_base == "https://api.dify.ai/v1" + assert async_client.api_base == "https://api.dify.ai/v1" + assert models.Mode.ADVANCED_CHAT.value == "advanced-chat" + assert models.Mode.ADAVANCED_CHAT == models.Mode.ADVANCED_CHAT + + +def test_mutable_defaults_are_not_shared_between_instances(): + chat_req_a = models.ChatRequest( + query="hello", + response_mode=models.ResponseMode.BLOCKING, + user="u1", + ) + chat_req_b = models.ChatRequest( + query="world", + response_mode=models.ResponseMode.BLOCKING, + user="u2", + ) + + chat_req_a.files.append( + models.File( + type=models.FileType.IMAGE, + transfer_method=models.TransferMethod.REMOTE_URL, + url="https://example.com/a.png", + ) + ) + + assert chat_req_b.files == [] + + workflow_req_a = models.WorkflowsRunRequest( + response_mode=models.ResponseMode.BLOCKING, + user="u1", + ) + workflow_req_b = models.WorkflowsRunRequest( + response_mode=models.ResponseMode.BLOCKING, + user="u2", + ) + + workflow_req_a.inputs["city"] = "beijing" + assert workflow_req_b.inputs == {} From 12c7f8088e5552f2230b50eb7618e70da3fac519 Mon Sep 17 00:00:00 2001 From: "huhaoyu.hahahu" Date: Wed, 25 Mar 2026 11:47:02 +0800 Subject: [PATCH 2/5] Align runtime models with latest Dify API and fix CI build deps --- .github/workflows/python-package.yml | 4 +- build.sh | 2 +- dify_client/_clientx.py | 70 ++++++++++++++++---- dify_client/models/__init__.py | 1 + dify_client/models/audio.py | 19 ++++++ dify_client/models/base.py | 9 ++- dify_client/models/completion.py | 13 ++-- dify_client/models/feedback.py | 1 + dify_client/models/file.py | 11 +++- dify_client/models/stream.py | 98 +++++++++++++++++++++++++--- dify_client/models/workflow.py | 49 +++++++++----- tests/test_client.py | 53 +++++++++++++++ tests/test_import_and_models.py | 30 ++++++++- tests/test_stream.py | 47 +++++++++++++ 14 files changed, 358 insertions(+), 49 deletions(-) create mode 100644 dify_client/models/audio.py create mode 100644 tests/test_stream.py diff --git a/.github/workflows/python-package.yml b/.github/workflows/python-package.yml index e2aa190..c7b8b9d 100644 --- a/.github/workflows/python-package.yml +++ b/.github/workflows/python-package.yml @@ -21,13 +21,13 @@ jobs: steps: - uses: actions/checkout@v4 - name: Set up Python ${{ matrix.python-version }} - uses: actions/setup-python@v3 + uses: actions/setup-python@v5 with: python-version: ${{ matrix.python-version }} - name: Install dependencies run: | python -m pip install --upgrade pip - python -m pip install -e . flake8 pytest build twine + python -m pip install -e . flake8 pytest build twine setuptools wheel - name: Lint with flake8 run: | # stop the build if there are Python syntax errors or undefined names diff --git a/build.sh b/build.sh index 9945b0a..1477dee 100755 --- a/build.sh +++ b/build.sh @@ -4,7 +4,7 @@ set -euo pipefail rm -rf build dist *.egg-info -python -m pip install --upgrade build twine +python -m pip install --upgrade setuptools wheel build twine python -m build --no-isolation python -m twine check --strict dist/* python -m twine upload dist/* diff --git a/dify_client/_clientx.py b/dify_client/_clientx.py index 5ee0a5d..1ca6db7 100644 --- a/dify_client/_clientx.py +++ b/dify_client/_clientx.py @@ -40,12 +40,20 @@ class HTTPMethod(StrEnum): ENDPOINT_STOP_CHAT_MESSAGES = "/chat-messages/{task_id}/stop" # workflow ENDPOINT_RUN_WORKFLOWS = "/workflows/run" -ENDPOINT_STOP_WORKFLOWS = "/workflows/{task_id}/stop" +ENDPOINT_STOP_WORKFLOWS = "/workflows/tasks/{task_id}/stop" # audio <-> text ENDPOINT_TEXT_TO_AUDIO = "/text-to-audio" ENDPOINT_AUDIO_TO_TEXT = "/audio-to-text" +def _completion_request_payload(req: models.CompletionRequest) -> Dict[str, Any]: + # Keep compatibility with historical `conversation_id` usage while matching + # current runtime payload shape. + payload = req.model_dump(exclude_none=True) + payload.pop("conversation_id", None) + return payload + + class Client(BaseModel): api_key: str api_base: Optional[str] = "https://api.dify.ai/v1" @@ -191,12 +199,32 @@ def upload_files(self, file: types.FileTypes, req: models.UploadFileRequest, response = self.request( self._prepare_url(ENDPOINT_FILES_UPLOAD), HTTPMethod.POST, - data=req.model_dump(), + data=req.model_dump(exclude_none=True), files=[("file", file)], **kwargs, ) return models.UploadFileResponse(**response.json()) + def audio_to_text(self, file: types.FileTypes, req: models.AudioToTextRequest, + **kwargs) -> models.AudioToTextResponse: + response = self.request( + self._prepare_url(ENDPOINT_AUDIO_TO_TEXT), + HTTPMethod.POST, + data=req.model_dump(exclude_none=True), + files=[("file", file)], + **kwargs, + ) + return models.AudioToTextResponse(**response.json()) + + def text_to_audio(self, req: models.TextToAudioRequest, **kwargs) -> bytes: + response = self.request( + self._prepare_url(ENDPOINT_TEXT_TO_AUDIO), + HTTPMethod.POST, + json=req.model_dump(exclude_none=True), + **kwargs, + ) + return response.content + def completion_messages(self, req: models.CompletionRequest, **kwargs) \ -> Union[models.CompletionResponse, Iterator[models.CompletionStreamResponse]]: """ @@ -217,7 +245,7 @@ def _completion_messages(self, req: models.CompletionRequest, **kwargs) -> model response = self.request( self._prepare_url(ENDPOINT_COMPLETION_MESSAGES), HTTPMethod.POST, - json=req.model_dump(), + json=_completion_request_payload(req), **kwargs, ) return models.CompletionResponse(**response.json()) @@ -227,7 +255,7 @@ def _completion_messages_stream(self, req: models.CompletionRequest, **kwargs) \ event_source = self.request_stream( self._prepare_url(ENDPOINT_COMPLETION_MESSAGES), HTTPMethod.POST, - json=req.model_dump(), + json=_completion_request_payload(req), **kwargs, ) for sse in event_source: @@ -307,7 +335,7 @@ def _run_workflows(self, req: models.WorkflowsRunRequest, **kwargs) -> models.Wo response = self.request( self._prepare_url(ENDPOINT_RUN_WORKFLOWS), HTTPMethod.POST, - json=req.model_dump(), + json=req.model_dump(exclude_none=True), **kwargs, ) return models.WorkflowsRunResponse(**response.json()) @@ -317,7 +345,7 @@ def _run_workflows_stream(self, req: models.WorkflowsRunRequest, **kwargs) \ event_source = self.request_stream( self._prepare_url(ENDPOINT_RUN_WORKFLOWS), HTTPMethod.POST, - json=req.model_dump(), + json=req.model_dump(exclude_none=True), **kwargs, ) for sse in event_source: @@ -497,12 +525,32 @@ async def aupload_files(self, file: types.FileTypes, req: models.UploadFileReque response = await self.arequest( self._prepare_url(ENDPOINT_FILES_UPLOAD), HTTPMethod.POST, - data=req.model_dump(), + data=req.model_dump(exclude_none=True), files=[("file", file)], **kwargs, ) return models.UploadFileResponse(**response.json()) + async def aaudio_to_text(self, file: types.FileTypes, req: models.AudioToTextRequest, **kwargs) \ + -> models.AudioToTextResponse: + response = await self.arequest( + self._prepare_url(ENDPOINT_AUDIO_TO_TEXT), + HTTPMethod.POST, + data=req.model_dump(exclude_none=True), + files=[("file", file)], + **kwargs, + ) + return models.AudioToTextResponse(**response.json()) + + async def atext_to_audio(self, req: models.TextToAudioRequest, **kwargs) -> bytes: + response = await self.arequest( + self._prepare_url(ENDPOINT_TEXT_TO_AUDIO), + HTTPMethod.POST, + json=req.model_dump(exclude_none=True), + **kwargs, + ) + return response.content + async def acompletion_messages(self, req: models.CompletionRequest, **kwargs) \ -> Union[models.CompletionResponse, AsyncIterator[models.CompletionStreamResponse]]: """ @@ -523,7 +571,7 @@ async def _acompletion_messages(self, req: models.CompletionRequest, **kwargs) - response = await self.arequest( self._prepare_url(ENDPOINT_COMPLETION_MESSAGES), HTTPMethod.POST, - json=req.model_dump(), + json=_completion_request_payload(req), **kwargs, ) return models.CompletionResponse(**response.json()) @@ -533,7 +581,7 @@ async def _acompletion_messages_stream(self, req: models.CompletionRequest, **kw async for sse in self.arequest_stream( self._prepare_url(ENDPOINT_COMPLETION_MESSAGES), HTTPMethod.POST, - json=req.model_dump(), + json=_completion_request_payload(req), **kwargs): yield models.build_completion_stream_response(sse.json()) @@ -611,7 +659,7 @@ async def _arun_workflows(self, req: models.WorkflowsRunRequest, **kwargs) -> mo response = await self.arequest( self._prepare_url(ENDPOINT_RUN_WORKFLOWS), HTTPMethod.POST, - json=req.model_dump(), + json=req.model_dump(exclude_none=True), **kwargs, ) return models.WorkflowsRunResponse(**response.json()) @@ -621,7 +669,7 @@ async def _arun_workflows_stream(self, req: models.WorkflowsRunRequest, **kwargs async for sse in self.arequest_stream( self._prepare_url(ENDPOINT_RUN_WORKFLOWS), HTTPMethod.POST, - json=req.model_dump(), + json=req.model_dump(exclude_none=True), **kwargs): yield models.build_workflows_stream_response(sse.json()) diff --git a/dify_client/models/__init__.py b/dify_client/models/__init__.py index 69778c1..a9a4c09 100644 --- a/dify_client/models/__init__.py +++ b/dify_client/models/__init__.py @@ -2,6 +2,7 @@ from .completion import * from .feedback import * from .file import * +from .audio import * from .workflow import * from .stream import * from .base import * diff --git a/dify_client/models/audio.py b/dify_client/models/audio.py new file mode 100644 index 0000000..eb85845 --- /dev/null +++ b/dify_client/models/audio.py @@ -0,0 +1,19 @@ +from typing import Optional + +from pydantic import BaseModel + + +class AudioToTextRequest(BaseModel): + user: Optional[str] = None + + +class AudioToTextResponse(BaseModel): + text: str + + +class TextToAudioRequest(BaseModel): + message_id: Optional[str] = None + text: Optional[str] = None + user: Optional[str] = None + voice: Optional[str] = None + streaming: Optional[bool] = None diff --git a/dify_client/models/base.py b/dify_client/models/base.py index 1005f6d..ec4ece8 100644 --- a/dify_client/models/base.py +++ b/dify_client/models/base.py @@ -22,6 +22,10 @@ class ResponseMode(StrEnum): class FileType(StrEnum): IMAGE = "image" + DOCUMENT = "document" + AUDIO = "audio" + VIDEO = "video" + CUSTOM = "custom" class TransferMethod(StrEnum): @@ -35,8 +39,9 @@ class TransferMethod(StrEnum): # The text generation application requires at least one key/value pair to be inputted. class CompletionInputs(BaseModel): model_config = ConfigDict(extra='allow') - # Required The input text, the content to be processed. - query: str + # Legacy field. In newer applications, query should be passed in the + # input mapping as a regular variable. + query: Optional[str] = "" class File(BaseModel): diff --git a/dify_client/models/completion.py b/dify_client/models/completion.py index 942287e..39e5c3d 100644 --- a/dify_client/models/completion.py +++ b/dify_client/models/completion.py @@ -1,21 +1,24 @@ -from typing import Optional, List +from typing import Any, Dict, List, Optional from pydantic import BaseModel, Field -from dify_client.models.base import CompletionInputs, ResponseMode, File, Metadata, Mode +from dify_client.models.base import File, Metadata, Mode, ResponseMode class CompletionRequest(BaseModel): - inputs: CompletionInputs + inputs: Dict[str, Any] = Field(default_factory=dict) + # Legacy field. Prefer passing query in `inputs`. + query: Optional[str] = "" response_mode: ResponseMode user: str - conversation_id: Optional[str] = "" files: List[File] = Field(default_factory=list) class CompletionResponse(BaseModel): + event: Optional[str] = None + task_id: Optional[str] = None + id: Optional[str] = None message_id: str - conversation_id: Optional[str] = "" mode: Mode answer: str metadata: Metadata diff --git a/dify_client/models/feedback.py b/dify_client/models/feedback.py index aae0242..911ebff 100644 --- a/dify_client/models/feedback.py +++ b/dify_client/models/feedback.py @@ -15,6 +15,7 @@ class Rating(StrEnum): class FeedbackRequest(BaseModel): rating: Optional[Rating] = None user: str + content: Optional[str] = None class FeedbackResponse(BaseModel): diff --git a/dify_client/models/file.py b/dify_client/models/file.py index 9788f7a..978cec3 100644 --- a/dify_client/models/file.py +++ b/dify_client/models/file.py @@ -1,8 +1,10 @@ +from typing import Optional + from pydantic import BaseModel class UploadFileRequest(BaseModel): - user: str + user: Optional[str] = None class UploadFileResponse(BaseModel): @@ -13,3 +15,10 @@ class UploadFileResponse(BaseModel): mime_type: str created_by: str # created by user created_at: int # unix timestamp seconds + preview_url: Optional[str] = None + source_url: Optional[str] = None + original_url: Optional[str] = None + user_id: Optional[str] = None + tenant_id: Optional[str] = None + conversation_id: Optional[str] = None + file_key: Optional[str] = None diff --git a/dify_client/models/stream.py b/dify_client/models/stream.py index b863586..a20792d 100644 --- a/dify_client/models/stream.py +++ b/dify_client/models/stream.py @@ -2,7 +2,7 @@ from enum import StrEnum except ImportError: from strenum import StrEnum -from typing import Union, Optional, List +from typing import Any, Dict, List, Optional, Union from pydantic import BaseModel, ConfigDict, Field, field_validator @@ -18,18 +18,32 @@ class StreamEvent(StrEnum): AGENT_MESSAGE = "agent_message" AGENT_THOUGHT = "agent_thought" MESSAGE_FILE = "message_file" # need to show file + TTS_MESSAGE = "tts_message" + TTS_MESSAGE_END = "tts_message_end" WORKFLOW_STARTED = "workflow_started" NODE_STARTED = "node_started" NODE_FINISHED = "node_finished" + NODE_RETRY = "node_retry" + ITERATION_STARTED = "iteration_started" + ITERATION_NEXT = "iteration_next" + ITERATION_COMPLETED = "iteration_completed" + LOOP_STARTED = "loop_started" + LOOP_NEXT = "loop_next" + LOOP_COMPLETED = "loop_completed" + TEXT_CHUNK = "text_chunk" + TEXT_REPLACE = "text_replace" WORKFLOW_FINISHED = "workflow_finished" + WORKFLOW_PAUSED = "workflow_paused" + HUMAN_INPUT_REQUIRED = "human_input_required" + HUMAN_INPUT_FORM_FILLED = "human_input_form_filled" + HUMAN_INPUT_FORM_TIMEOUT = "human_input_form_timeout" MESSAGE_END = "message_end" MESSAGE_REPLACE = "message_replace" ERROR = "error" PING = "ping" - TTS_MESSAGE_END = "tts_message_end" + # Legacy events from old chatflow runtime. PARALLEL_BRANCH_STARTED = "parallel_branch_started" PARALLEL_BRANCH_FINISHED = "parallel_branch_finished" - NODE_RETRY = "node_retry" AGENT_LOG = "agent_log" @classmethod @@ -95,31 +109,55 @@ class AgentThoughtStreamResponse(StreamResponse): class MessageFileStreamResponse(StreamResponse): id: str # file id - conversation_id: str + conversation_id: Optional[str] = "" type: str # only image belongs_to: str # assistant url: str +class TtsMessageStreamResponse(StreamResponse): + audio: str + created_at: Optional[int] = None + + +class TextStreamData(BaseModel): + text: str + from_variable_selector: Optional[List[str]] = None + + +class TextChunkStreamResponse(StreamResponse): + data: TextStreamData + workflow_run_id: Optional[str] = "" + + +class TextReplaceStreamResponse(StreamResponse): + data: TextStreamData + workflow_run_id: Optional[str] = "" + + class WorkflowsStreamResponse(StreamResponse): - workflow_run_id: str + workflow_run_id: Optional[str] = "" data: Optional[Union[ WorkflowStartedData, WorkflowFinishedData, NodeStartedData, - NodeFinishedData] + NodeFinishedData, + Dict[str, Any]] ] = None class ChatWorkflowsStreamResponse(WorkflowsStreamResponse): - message_id: str - conversation_id: str - created_at: int + message_id: Optional[str] = "" + conversation_id: Optional[str] = "" + created_at: Optional[int] = None _COMPLETION_EVENT_TO_STREAM_RESP_MAPPING = { StreamEvent.PING: PingResponse, StreamEvent.MESSAGE: MessageStreamResponse, + StreamEvent.MESSAGE_FILE: MessageFileStreamResponse, + StreamEvent.TTS_MESSAGE: TtsMessageStreamResponse, + StreamEvent.TTS_MESSAGE_END: TtsMessageStreamResponse, StreamEvent.MESSAGE_END: MessageEndStreamResponse, StreamEvent.MESSAGE_REPLACE: MessageReplaceStreamResponse, } @@ -127,6 +165,8 @@ class ChatWorkflowsStreamResponse(WorkflowsStreamResponse): CompletionStreamResponse = Union[ PingResponse, MessageStreamResponse, + MessageFileStreamResponse, + TtsMessageStreamResponse, MessageEndStreamResponse, MessageReplaceStreamResponse, ] @@ -141,6 +181,8 @@ def build_completion_stream_response(data: dict) -> CompletionStreamResponse: StreamEvent.PING: PingResponse, # chat StreamEvent.MESSAGE: MessageStreamResponse, + StreamEvent.TTS_MESSAGE: TtsMessageStreamResponse, + StreamEvent.TTS_MESSAGE_END: TtsMessageStreamResponse, StreamEvent.MESSAGE_END: MessageEndStreamResponse, StreamEvent.MESSAGE_REPLACE: MessageReplaceStreamResponse, StreamEvent.MESSAGE_FILE: MessageFileStreamResponse, @@ -151,17 +193,36 @@ def build_completion_stream_response(data: dict) -> CompletionStreamResponse: StreamEvent.WORKFLOW_STARTED: WorkflowsStreamResponse, StreamEvent.NODE_STARTED: WorkflowsStreamResponse, StreamEvent.NODE_FINISHED: WorkflowsStreamResponse, + StreamEvent.NODE_RETRY: WorkflowsStreamResponse, + StreamEvent.ITERATION_STARTED: WorkflowsStreamResponse, + StreamEvent.ITERATION_NEXT: WorkflowsStreamResponse, + StreamEvent.ITERATION_COMPLETED: WorkflowsStreamResponse, + StreamEvent.LOOP_STARTED: WorkflowsStreamResponse, + StreamEvent.LOOP_NEXT: WorkflowsStreamResponse, + StreamEvent.LOOP_COMPLETED: WorkflowsStreamResponse, + StreamEvent.TEXT_CHUNK: TextChunkStreamResponse, + StreamEvent.TEXT_REPLACE: TextReplaceStreamResponse, StreamEvent.WORKFLOW_FINISHED: WorkflowsStreamResponse, + StreamEvent.WORKFLOW_PAUSED: WorkflowsStreamResponse, + StreamEvent.HUMAN_INPUT_REQUIRED: WorkflowsStreamResponse, + StreamEvent.HUMAN_INPUT_FORM_FILLED: WorkflowsStreamResponse, + StreamEvent.HUMAN_INPUT_FORM_TIMEOUT: WorkflowsStreamResponse, + StreamEvent.PARALLEL_BRANCH_STARTED: WorkflowsStreamResponse, + StreamEvent.PARALLEL_BRANCH_FINISHED: WorkflowsStreamResponse, + StreamEvent.AGENT_LOG: WorkflowsStreamResponse, } ChatStreamResponse = Union[ PingResponse, MessageStreamResponse, + TtsMessageStreamResponse, MessageEndStreamResponse, MessageReplaceStreamResponse, MessageFileStreamResponse, AgentMessageStreamResponse, AgentThoughtStreamResponse, + TextChunkStreamResponse, + TextReplaceStreamResponse, WorkflowsStreamResponse, ] @@ -177,11 +238,30 @@ def build_chat_stream_response(data: dict) -> ChatStreamResponse: StreamEvent.WORKFLOW_STARTED: WorkflowsStreamResponse, StreamEvent.NODE_STARTED: WorkflowsStreamResponse, StreamEvent.NODE_FINISHED: WorkflowsStreamResponse, + StreamEvent.NODE_RETRY: WorkflowsStreamResponse, + StreamEvent.ITERATION_STARTED: WorkflowsStreamResponse, + StreamEvent.ITERATION_NEXT: WorkflowsStreamResponse, + StreamEvent.ITERATION_COMPLETED: WorkflowsStreamResponse, + StreamEvent.LOOP_STARTED: WorkflowsStreamResponse, + StreamEvent.LOOP_NEXT: WorkflowsStreamResponse, + StreamEvent.LOOP_COMPLETED: WorkflowsStreamResponse, + StreamEvent.TEXT_CHUNK: TextChunkStreamResponse, + StreamEvent.TEXT_REPLACE: TextReplaceStreamResponse, StreamEvent.WORKFLOW_FINISHED: WorkflowsStreamResponse, + StreamEvent.WORKFLOW_PAUSED: WorkflowsStreamResponse, + StreamEvent.TTS_MESSAGE: TtsMessageStreamResponse, + StreamEvent.TTS_MESSAGE_END: TtsMessageStreamResponse, + StreamEvent.HUMAN_INPUT_REQUIRED: WorkflowsStreamResponse, + StreamEvent.HUMAN_INPUT_FORM_FILLED: WorkflowsStreamResponse, + StreamEvent.HUMAN_INPUT_FORM_TIMEOUT: WorkflowsStreamResponse, + StreamEvent.AGENT_LOG: WorkflowsStreamResponse, } WorkflowsRunStreamResponse = Union[ PingResponse, + TtsMessageStreamResponse, + TextChunkStreamResponse, + TextReplaceStreamResponse, WorkflowsStreamResponse, ] diff --git a/dify_client/models/workflow.py b/dify_client/models/workflow.py index 66c1148..b84c0d1 100644 --- a/dify_client/models/workflow.py +++ b/dify_client/models/workflow.py @@ -2,7 +2,7 @@ from enum import StrEnum except ImportError: from strenum import StrEnum -from typing import Dict, List, Optional +from typing import Any, Dict, List, Optional from pydantic import BaseModel, Field @@ -14,11 +14,14 @@ class WorkflowStatus(StrEnum): SUCCEEDED = "succeeded" FAILED = "failed" STOPPED = "stopped" + PARTIAL_SUCCEEDED = "partial-succeeded" + PAUSED = "paused" + EXCEPTION = "exception" class ExecutionMetadata(BaseModel): total_tokens: Optional[int] = None - total_price: Optional[str] = None + total_price: Optional[float] = None currency: Optional[str] = None @@ -26,8 +29,9 @@ class WorkflowStartedData(BaseModel): id: str # workflow run id workflow_id: str # workflow id sequence_number: Optional[int] = None - inputs: Optional[dict] = None + inputs: Optional[Dict[str, Any]] = None created_at: int # unix timestamp seconds + reason: Optional[str] = None class NodeStartedData(BaseModel): @@ -37,9 +41,12 @@ class NodeStartedData(BaseModel): title: str index: int predecessor_node_id: Optional[str] = None - inputs: Optional[dict] = None + inputs: Optional[Dict[str, Any]] = None + inputs_truncated: Optional[bool] = None created_at: int - extras: dict = Field(default_factory=dict) + extras: Dict[str, Any] = Field(default_factory=dict) + iteration_id: Optional[str] = None + loop_id: Optional[str] = None class NodeFinishedData(BaseModel): @@ -49,43 +56,51 @@ class NodeFinishedData(BaseModel): title: str index: int predecessor_node_id: Optional[str] = None - inputs: Optional[dict] = None - process_data: Optional[dict] = None - outputs: Optional[dict] = Field(default_factory=dict) + inputs: Optional[Dict[str, Any]] = None + inputs_truncated: Optional[bool] = None + process_data: Optional[Dict[str, Any]] = None + process_data_truncated: Optional[bool] = None + outputs: Optional[Dict[str, Any]] = Field(default_factory=dict) + outputs_truncated: Optional[bool] = None status: WorkflowStatus error: Optional[str] = None elapsed_time: Optional[float] # seconds execution_metadata: Optional[ExecutionMetadata] = None created_at: int finished_at: int - files: List = Field(default_factory=list) + files: List[Dict[str, Any]] = Field(default_factory=list) + iteration_id: Optional[str] = None + loop_id: Optional[str] = None + retry_index: Optional[int] = None class WorkflowFinishedData(BaseModel): id: str # workflow run id workflow_id: str # workflow id - sequence_number: int + sequence_number: Optional[int] = None status: WorkflowStatus - outputs: Optional[dict] = None + outputs: Optional[Dict[str, Any]] = None error: Optional[str] = None elapsed_time: Optional[float] = None total_tokens: Optional[int] = None total_steps: Optional[int] = 0 created_at: int - finished_at: int - created_by: dict = Field(default_factory=dict) - files: List = Field(default_factory=list) + finished_at: Optional[int] = None + created_by: Dict[str, Any] = Field(default_factory=dict) + exceptions_count: Optional[int] = None + files: List[Dict[str, Any]] = Field(default_factory=list) class WorkflowsRunRequest(BaseModel): - inputs: Dict = Field(default_factory=dict) + inputs: Dict[str, Any] = Field(default_factory=dict) response_mode: ResponseMode user: str - conversation_id: Optional[str] = "" files: List[File] = Field(default_factory=list) class WorkflowsRunResponse(BaseModel): - log_id: str + workflow_run_id: Optional[str] = None + # Backward compatibility with older API responses. + log_id: Optional[str] = None task_id: str data: WorkflowFinishedData diff --git a/tests/test_client.py b/tests/test_client.py index 814abc3..28dca5b 100644 --- a/tests/test_client.py +++ b/tests/test_client.py @@ -1,6 +1,7 @@ import httpx from httpx_sse import ServerSentEvent +from dify_client import models from dify_client._clientx import AsyncClient, Client @@ -79,3 +80,55 @@ def fake_connect_sse(*args, **kwargs): chunks = list(client.request_stream(client._prepare_url("/chat-messages"), "POST", json={"a": 1})) assert len(chunks) == 1 assert chunks[0].event == "message" + + +def test_stop_workflows_uses_latest_tasks_endpoint(monkeypatch): + captured = {} + + def fake_request(method, endpoint, **kwargs): + captured["endpoint"] = endpoint + return httpx.Response( + status_code=200, + json={"result": "success"}, + request=httpx.Request(str(method), endpoint), + ) + + from dify_client import _clientx + + monkeypatch.setattr(_clientx._httpx_client, "request", fake_request) + client = Client(api_key="token", api_base="https://api.example.com/v1") + response = client.stop_workflows("task-1", models.StopRequest(user="u1")) + assert captured["endpoint"] == "https://api.example.com/v1/workflows/tasks/task-1/stop" + assert response.result == "success" + + +def test_audio_to_text_and_text_to_audio(monkeypatch): + calls = [] + + def fake_request(method, endpoint, **kwargs): + calls.append((str(method), endpoint, kwargs)) + if endpoint.endswith("/audio-to-text"): + return httpx.Response( + status_code=200, + json={"text": "hello"}, + request=httpx.Request(str(method), endpoint), + ) + return httpx.Response( + status_code=200, + content=b"mp3-bytes", + request=httpx.Request(str(method), endpoint), + headers={"content-type": "audio/mpeg"}, + ) + + from dify_client import _clientx + + monkeypatch.setattr(_clientx._httpx_client, "request", fake_request) + client = Client(api_key="token", api_base="https://api.example.com/v1") + + audio_to_text = client.audio_to_text(("a.wav", b"abc", "audio/wav"), models.AudioToTextRequest(user="u1")) + assert audio_to_text.text == "hello" + + audio = client.text_to_audio(models.TextToAudioRequest(text="hello", user="u1")) + assert audio == b"mp3-bytes" + assert calls[0][1].endswith("/audio-to-text") + assert calls[1][1].endswith("/text-to-audio") diff --git a/tests/test_import_and_models.py b/tests/test_import_and_models.py index 24b5147..499c790 100644 --- a/tests/test_import_and_models.py +++ b/tests/test_import_and_models.py @@ -1,13 +1,41 @@ from dify_client import AsyncClient, Client, models -def test_imports_and_type_annotations_work_on_python39_plus(): +def test_imports_and_type_annotations_work_on_python38_plus(): client = Client(api_key="token") async_client = AsyncClient(api_key="token") assert client.api_base == "https://api.dify.ai/v1" assert async_client.api_base == "https://api.dify.ai/v1" assert models.Mode.ADVANCED_CHAT.value == "advanced-chat" assert models.Mode.ADAVANCED_CHAT == models.Mode.ADVANCED_CHAT + assert models.FileType.DOCUMENT.value == "document" + assert models.FileType.AUDIO.value == "audio" + assert models.FileType.VIDEO.value == "video" + assert models.FileType.CUSTOM.value == "custom" + + +def test_completion_request_supports_inputs_and_legacy_query(): + req = models.CompletionRequest( + response_mode=models.ResponseMode.BLOCKING, + user="u1", + ) + assert req.inputs == {} + assert req.query == "" + + +def test_workflows_run_response_accepts_workflow_run_id(): + res = models.WorkflowsRunResponse( + task_id="task-1", + workflow_run_id="run-1", + data=models.WorkflowFinishedData( + id="run-1", + workflow_id="wf-1", + status=models.WorkflowStatus.SUCCEEDED, + created_at=1, + finished_at=2, + ), + ) + assert res.workflow_run_id == "run-1" def test_mutable_defaults_are_not_shared_between_instances(): diff --git a/tests/test_stream.py b/tests/test_stream.py new file mode 100644 index 0000000..f50ac5d --- /dev/null +++ b/tests/test_stream.py @@ -0,0 +1,47 @@ +from dify_client import models + + +def test_completion_stream_supports_tts_message(): + item = models.build_completion_stream_response( + { + "event": "tts_message", + "task_id": "task-1", + "message_id": "msg-1", + "audio": "BASE64_AUDIO", + } + ) + assert isinstance(item, models.TtsMessageStreamResponse) + assert item.audio == "BASE64_AUDIO" + + +def test_chat_stream_supports_text_chunk_events(): + item = models.build_chat_stream_response( + { + "event": "text_chunk", + "task_id": "task-1", + "workflow_run_id": "run-1", + "data": { + "text": "partial answer", + "from_variable_selector": ["body", "text"], + }, + } + ) + assert isinstance(item, models.TextChunkStreamResponse) + assert item.data.text == "partial answer" + assert item.data.from_variable_selector == ["body", "text"] + + +def test_workflow_stream_supports_workflow_paused(): + item = models.build_workflows_stream_response( + { + "event": "workflow_paused", + "task_id": "task-1", + "workflow_run_id": "run-1", + "data": { + "paused_nodes": ["node-1"], + "status": "paused", + }, + } + ) + assert isinstance(item, models.WorkflowsStreamResponse) + assert item.event == models.StreamEvent.WORKFLOW_PAUSED From a54c08b2fbbf7629d8cd2ab87f95521ad8d1737f Mon Sep 17 00:00:00 2001 From: "huhaoyu.hahahu" Date: Wed, 25 Mar 2026 12:08:25 +0800 Subject: [PATCH 3/5] Finalize docs and version 1.0.3 with compatibility checks --- CHANGELOG.md | 34 ++++++ README.md | 179 +++++++++++++------------------ RELEASE.md | 38 +++++++ dify_client/_clientx.py | 23 +++- dify_client/models/completion.py | 2 + setup.py | 2 +- tests/test_client.py | 77 +++++++++++++ 7 files changed, 244 insertions(+), 111 deletions(-) create mode 100644 CHANGELOG.md create mode 100644 RELEASE.md diff --git a/CHANGELOG.md b/CHANGELOG.md new file mode 100644 index 0000000..c3241b6 --- /dev/null +++ b/CHANGELOG.md @@ -0,0 +1,34 @@ +# Changelog + +## 1.0.3 - 2026-03-25 + +### Added + +- Added audio models and client methods: + - `audio_to_text` / `aaudio_to_text` + - `text_to_audio` / `atext_to_audio` +- Added stream event support for newer Dify runtime events: + - `tts_message`, `workflow_paused`, `iteration_*`, `loop_*` + - `text_chunk`, `text_replace` + - `human_input_required`, `human_input_form_filled`, `human_input_form_timeout` +- Added compatibility fallback for workflow stop endpoint: + - tries `/workflows/tasks/{task_id}/stop` + - falls back to `/workflows/{task_id}/stop` on not found +- Added tests for stream parsing, audio APIs, and endpoint compatibility fallback. + +### Changed + +- Updated package metadata: + - `python_requires` is `>=3.8` + - `pydantic>=2,<3` +- Updated file models to include new file types: + - `image`, `document`, `audio`, `video`, `custom` +- Updated response/request models to align with latest runtime schema: + - completion, workflow, and file upload models now include newer fields. +- Updated CI and build scripts to ensure `setuptools` and `wheel` are installed for `python -m build --no-isolation`. + +### Fixed + +- Fixed Python 3.8/3.12 CI build failures caused by missing build backend dependencies. +- Fixed packaging scope to include subpackages. +- Hardened error parsing for non-JSON HTTP/SSE error bodies. diff --git a/README.md b/README.md index 2ff1abc..9dfb10b 100644 --- a/README.md +++ b/README.md @@ -1,155 +1,124 @@ # dify-client-python -Welcome to the `dify-client-python` repository! This Python package provides a convenient and powerful interface to -interact with the Dify API, enabling developers to integrate a wide range of features into their applications with ease. +`dify-client-python` is a typed Python SDK for Dify Runtime APIs, covering chat, completion, workflow, file upload, feedback, and audio conversion endpoints. -## Main Features +## Requirements -* **Synchronous and Asynchronous Support**: The client offers both synchronous and asynchronous methods, allowing for - flexible integration into various Python codebases and frameworks. -* **Stream and Non-stream Support**: Seamlessly work with both streaming and non-streaming endpoints of the Dify API for - real-time and batch processing use cases. -* **Comprehensive Endpoint Coverage**: Support completion, chat, workflows, feedback, file uploads, etc., the client - covers all available Dify API endpoints. +- Python `>=3.8` +- Dify Runtime API (cloud or self-hosted) compatible with `/v1` endpoints ## Installation -Before using the `dify-client-python` client, you'll need to install it. You can easily install it using `pip`: - ```bash pip install dify-client-python ``` -## Quick Start +## What Is Supported + +- Sync and async clients: `Client`, `AsyncClient` +- Blocking and streaming response modes +- Chat and completion message APIs +- Workflow run/stream/stop APIs +- File upload APIs +- Message feedback and suggestion APIs +- Audio APIs: + - `audio-to-text` + - `text-to-audio` +- Updated stream event support for newer workflow/chatflow runtimes: + - `workflow_paused`, `iteration_*`, `loop_*`, `text_chunk`, `text_replace` + - `human_input_*`, `node_retry`, `agent_log`, `tts_message` -Here's a quick example of how you can use the Dify Client to send a chat message. +## Quick Start (Sync) ```python import uuid from dify_client import Client, models -# Initialize the client with your API key client = Client( api_key="your-api-key", - api_base="http://localhost/v1", + api_base="https://api.dify.ai/v1", ) user = str(uuid.uuid4()) -# Create a blocking chat request -blocking_chat_req = models.ChatRequest( - query="Hi, dify-client-python!", - inputs={"city": "Beijing"}, +req = models.ChatRequest( + query="Hello from dify-client-python", + inputs={}, user=user, response_mode=models.ResponseMode.BLOCKING, ) -# Send the chat message -chat_response = client.chat_messages(blocking_chat_req, timeout=60.) -print(chat_response) +res = client.chat_messages(req, timeout=60.0) +print(res.answer) +``` + +### Streaming Chat -# Create a streaming chat request -streaming_chat_req = models.ChatRequest( - query="Hi, dify-client-python!", - inputs={"city": "Beijing"}, +```python +stream_req = models.ChatRequest( + query="Stream this answer", + inputs={}, user=user, response_mode=models.ResponseMode.STREAMING, ) -# Send the chat message -for chunk in client.chat_messages(streaming_chat_req, timeout=60.): - print(chunk) +for event in client.chat_messages(stream_req, timeout=60.0): + print(event.event, getattr(event, "answer", None)) ``` -For asynchronous operations, use the `AsyncClient` in a similar fashion: +### Audio APIs ```python -import asyncio -import uuid - -from dify_client import AsyncClient, models - -# Initialize the async client with your API key -async_client = AsyncClient( - api_key="your-api-key", - api_base="http://localhost/v1", +audio_text = client.audio_to_text( + ("sample.wav", open("sample.wav", "rb"), "audio/wav"), + models.AudioToTextRequest(user=user), ) +print(audio_text.text) - -# Define an asynchronous function to send a blocking chat message with BLOCKING ResponseMode -async def send_chat_message(): - user = str(uuid.uuid4()) - # Create a blocking chat request - blocking_chat_req = models.ChatRequest( - query="Hi, dify-client-python!", - inputs={"city": "Beijing"}, - user=user, - response_mode=models.ResponseMode.BLOCKING, - ) - chat_response = await async_client.achat_messages(blocking_chat_req, timeout=60.) - print(chat_response) - - -# Define an asynchronous function to send a chat message with STREAMING ResponseMode -async def send_chat_message_stream(): - user = str(uuid.uuid4()) - # Create a blocking chat request - streaming_chat_req = models.ChatRequest( - query="Hi, dify-client-python!", - inputs={"city": "Beijing"}, - user=user, - response_mode=models.ResponseMode.STREAMING, - ) - async for chunk in await async_client.achat_messages(streaming_chat_req, timeout=60.): - print(chunk) - - -# Run the asynchronous function -asyncio.gather(send_chat_message(), send_chat_message_stream()) +audio_bytes = client.text_to_audio( + models.TextToAudioRequest(text="Hello world", user=user) +) +with open("speech.mp3", "wb") as f: + f.write(audio_bytes) ``` -## Documentation - -For detailed information on all the functionalities and how to use each endpoint, please refer to the official Dify API -documentation. This will provide you with comprehensive guidance on request and response structures, error handling, and -other important details. +## Quick Start (Async) -## Contributing - -Contributions are welcome! If you would like to contribute to the `dify-client-python`, please feel free to make a pull -request or open an issue to discuss potential changes. - -## License +```python +import asyncio +from dify_client import AsyncClient, models -This project is licensed under the MIT License - see the LICENSE file for details. +async_client = AsyncClient(api_key="your-api-key", api_base="https://api.dify.ai/v1") -```text -MIT License +async def main(): + req = models.ChatRequest( + query="hello", + inputs={}, + user="user-1", + response_mode=models.ResponseMode.STREAMING, + ) + async for chunk in await async_client.achat_messages(req, timeout=60.0): + print(chunk.event) -Copyright (c) 2024 haoyuhu +asyncio.run(main()) +``` -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: +## Security Notes -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. +- Do not hardcode production API keys in source code. +- Prefer environment variables or secret managers for `api_key`. +- The SDK injects `Authorization: Bearer ...` headers, but does not log keys by default. +- If you add your own logging middleware around requests, redact `Authorization` headers. -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. +## Development +```bash +python -m pip install -e . pytest pytest-cov flake8 build twine setuptools wheel +python -m flake8 . --count --select=E9,F63,F7,F82 --show-source --statistics +python -m pytest -q --cov=dify_client --cov-report=term-missing +python -m build --no-isolation +python -m twine check --strict dist/* ``` -## Support - -If you encounter any issues or have questions regarding the usage of this client, please reach out to the Dify Client -support team. +## Release -Happy coding! 🚀 \ No newline at end of file +Use [RELEASE.md](./RELEASE.md) for the release checklist and commands. diff --git a/RELEASE.md b/RELEASE.md new file mode 100644 index 0000000..d5c2259 --- /dev/null +++ b/RELEASE.md @@ -0,0 +1,38 @@ +# Release Checklist + +## 1. Update Version + +- Update `version` in `setup.py`. +- Keep `CHANGELOG.md` and `README.md` aligned with the release. + +## 2. Validate Locally + +```bash +python -m pip install -e . pytest pytest-cov flake8 build twine setuptools wheel +python -m flake8 . --count --select=E9,F63,F7,F82 --show-source --statistics +python -m pytest -q --cov=dify_client --cov-report=term-missing +python -m build --no-isolation +python -m twine check --strict dist/* +``` + +## 3. Push And Verify CI + +- Push branch and wait for matrix checks (3.8/3.9/3.10/3.11/3.12) to pass. +- Merge PR only after all checks are green. + +## 4. Publish + +```bash +python -m twine upload dist/* +``` + +## 5. Post-release + +- Tag release in GitHub (for example `v1.0.3`). +- Verify install in a clean environment: + +```bash +python -m venv /tmp/dify-verify +/tmp/dify-verify/bin/pip install dify-client-python==1.0.3 +/tmp/dify-verify/bin/python -c "import dify_client; print('ok')" +``` diff --git a/dify_client/_clientx.py b/dify_client/_clientx.py index 1ca6db7..a5533a4 100644 --- a/dify_client/_clientx.py +++ b/dify_client/_clientx.py @@ -41,16 +41,17 @@ class HTTPMethod(StrEnum): # workflow ENDPOINT_RUN_WORKFLOWS = "/workflows/run" ENDPOINT_STOP_WORKFLOWS = "/workflows/tasks/{task_id}/stop" +ENDPOINT_STOP_WORKFLOWS_LEGACY = "/workflows/{task_id}/stop" # audio <-> text ENDPOINT_TEXT_TO_AUDIO = "/text-to-audio" ENDPOINT_AUDIO_TO_TEXT = "/audio-to-text" def _completion_request_payload(req: models.CompletionRequest) -> Dict[str, Any]: - # Keep compatibility with historical `conversation_id` usage while matching - # current runtime payload shape. payload = req.model_dump(exclude_none=True) - payload.pop("conversation_id", None) + conversation_id = payload.get("conversation_id") + if conversation_id in ("", None): + payload.pop("conversation_id", None) return payload @@ -358,7 +359,13 @@ def stop_workflows(self, task_id: str, req: models.StopRequest, **kwargs) -> mod Returns: A `StopResponse` object indicating the success of the operation. """ - return self._stop_stream(self._prepare_url(ENDPOINT_STOP_WORKFLOWS, task_id=task_id), req, **kwargs) + endpoint = self._prepare_url(ENDPOINT_STOP_WORKFLOWS, task_id=task_id) + try: + return self._stop_stream(endpoint, req, **kwargs) + except errors.DifyResourceNotFound: + # Backward compatibility for older Dify runtime route. + legacy_endpoint = self._prepare_url(ENDPOINT_STOP_WORKFLOWS_LEGACY, task_id=task_id) + return self._stop_stream(legacy_endpoint, req, **kwargs) def _stop_stream(self, endpoint: str, req: models.StopRequest, **kwargs) -> models.StopResponse: response = self.request( @@ -680,7 +687,13 @@ async def astop_workflows(self, task_id: str, req: models.StopRequest, **kwargs) Returns: A `StopResponse` object indicating the success of the operation. """ - return await self._astop_stream(self._prepare_url(ENDPOINT_STOP_WORKFLOWS, task_id=task_id), req, **kwargs) + endpoint = self._prepare_url(ENDPOINT_STOP_WORKFLOWS, task_id=task_id) + try: + return await self._astop_stream(endpoint, req, **kwargs) + except errors.DifyResourceNotFound: + # Backward compatibility for older Dify runtime route. + legacy_endpoint = self._prepare_url(ENDPOINT_STOP_WORKFLOWS_LEGACY, task_id=task_id) + return await self._astop_stream(legacy_endpoint, req, **kwargs) async def _astop_stream(self, endpoint: str, req: models.StopRequest, **kwargs) -> models.StopResponse: response = await self.arequest( diff --git a/dify_client/models/completion.py b/dify_client/models/completion.py index 39e5c3d..2292082 100644 --- a/dify_client/models/completion.py +++ b/dify_client/models/completion.py @@ -9,6 +9,8 @@ class CompletionRequest(BaseModel): inputs: Dict[str, Any] = Field(default_factory=dict) # Legacy field. Prefer passing query in `inputs`. query: Optional[str] = "" + # Legacy field kept for compatibility with older Dify completion apps. + conversation_id: Optional[str] = "" response_mode: ResponseMode user: str files: List[File] = Field(default_factory=list) diff --git a/setup.py b/setup.py index 4a05371..2fe1d3f 100644 --- a/setup.py +++ b/setup.py @@ -5,7 +5,7 @@ setup( name="dify-client-python", - version="1.0.2", + version="1.0.3", author="haoyuhu", author_email="im@huhaoyu.com", description="A package for interacting with the Dify Service-API", diff --git a/tests/test_client.py b/tests/test_client.py index 28dca5b..a46c7eb 100644 --- a/tests/test_client.py +++ b/tests/test_client.py @@ -102,6 +102,35 @@ def fake_request(method, endpoint, **kwargs): assert response.result == "success" +def test_stop_workflows_falls_back_to_legacy_endpoint(monkeypatch): + calls = [] + + def fake_request(method, endpoint, **kwargs): + calls.append(endpoint) + if endpoint.endswith("/workflows/tasks/task-1/stop"): + return httpx.Response( + status_code=404, + json={"status": 404, "code": "not_found", "message": "not found"}, + request=httpx.Request(str(method), endpoint), + ) + return httpx.Response( + status_code=200, + json={"result": "success"}, + request=httpx.Request(str(method), endpoint), + ) + + from dify_client import _clientx + + monkeypatch.setattr(_clientx._httpx_client, "request", fake_request) + client = Client(api_key="token", api_base="https://api.example.com/v1") + response = client.stop_workflows("task-1", models.StopRequest(user="u1")) + assert calls == [ + "https://api.example.com/v1/workflows/tasks/task-1/stop", + "https://api.example.com/v1/workflows/task-1/stop", + ] + assert response.result == "success" + + def test_audio_to_text_and_text_to_audio(monkeypatch): calls = [] @@ -132,3 +161,51 @@ def fake_request(method, endpoint, **kwargs): assert audio == b"mp3-bytes" assert calls[0][1].endswith("/audio-to-text") assert calls[1][1].endswith("/text-to-audio") + + +def test_completion_messages_payload_keeps_non_empty_legacy_conversation_id(monkeypatch): + payloads = [] + + def fake_request(method, endpoint, **kwargs): + payloads.append(kwargs.get("json")) + return httpx.Response( + status_code=200, + json={ + "message_id": "m1", + "mode": "completion", + "answer": "ok", + "metadata": { + "usage": { + "prompt_tokens": 1, + "completion_tokens": 1, + "total_tokens": 2, + "prompt_unit_price": "0", + "prompt_price_unit": "0", + "prompt_price": "0", + "completion_unit_price": "0", + "completion_price_unit": "0", + "completion_price": "0", + "total_price": "0", + "currency": "USD", + "latency": 0.1, + }, + "retriever_resources": [], + }, + "created_at": 1, + }, + request=httpx.Request(str(method), endpoint), + ) + + from dify_client import _clientx + + monkeypatch.setattr(_clientx._httpx_client, "request", fake_request) + client = Client(api_key="token", api_base="https://api.example.com/v1") + req = models.CompletionRequest( + inputs={"query": "hi"}, + query="hi", + response_mode=models.ResponseMode.BLOCKING, + user="u1", + conversation_id="conv-1", + ) + client.completion_messages(req) + assert payloads[0]["conversation_id"] == "conv-1" From de6ee52be494ac14691d3fc8446f35a9200b0d9f Mon Sep 17 00:00:00 2001 From: "huhaoyu.hahahu" Date: Wed, 25 Mar 2026 13:31:41 +0800 Subject: [PATCH 4/5] Add wrapper and async coverage tests for release validation --- tests/test_async_client.py | 334 +++++++++++++++++++++++++++++++++++++ tests/test_utils.py | 19 +++ tests/test_wrappers.py | 214 ++++++++++++++++++++++++ 3 files changed, 567 insertions(+) create mode 100644 tests/test_async_client.py create mode 100644 tests/test_utils.py create mode 100644 tests/test_wrappers.py diff --git a/tests/test_async_client.py b/tests/test_async_client.py new file mode 100644 index 0000000..f1a8fd7 --- /dev/null +++ b/tests/test_async_client.py @@ -0,0 +1,334 @@ +import httpx +import pytest +from httpx_sse import ServerSentEvent + +from dify_client import errors, models +from dify_client._clientx import AsyncClient + + +def _usage_payload(): + return { + "prompt_tokens": 1, + "completion_tokens": 1, + "total_tokens": 2, + "prompt_unit_price": "0", + "prompt_price_unit": "0", + "prompt_price": "0", + "completion_unit_price": "0", + "completion_price_unit": "0", + "completion_price": "0", + "total_price": "0", + "currency": "USD", + "latency": 0.1, + } + + +def _completion_payload(mode: str): + return { + "event": "message", + "task_id": "task-1", + "id": "evt-1", + "message_id": "msg-1", + "conversation_id": "conv-1", + "mode": mode, + "answer": "ok", + "metadata": {"usage": _usage_payload(), "retriever_resources": []}, + "created_at": 1, + } + + +def _workflow_payload(): + return { + "task_id": "task-1", + "workflow_run_id": "run-1", + "data": { + "id": "run-1", + "workflow_id": "wf-1", + "status": "succeeded", + "outputs": {}, + "created_at": 1, + "finished_at": 2, + }, + } + + +@pytest.mark.anyio +async def test_arequest_injects_authorization(monkeypatch): + captured = {} + + async def fake_request(method, endpoint, **kwargs): + captured["endpoint"] = endpoint + captured["headers"] = kwargs["headers"] + return httpx.Response( + status_code=200, + json={"ok": True}, + request=httpx.Request(str(method), endpoint), + ) + + from dify_client import _clientx + + monkeypatch.setattr(_clientx._async_httpx_client, "request", fake_request) + client = AsyncClient(api_key="token", api_base="https://api.example.com/v1") + await client.arequest(client._prepare_url("/chat-messages"), "GET") + assert captured["endpoint"] == "https://api.example.com/v1/chat-messages" + assert captured["headers"]["Authorization"] == "Bearer token" + + +@pytest.mark.anyio +async def test_arequest_stream_filters_ping(monkeypatch): + class FakeAsyncEventSource: + def __init__(self, response, events): + self.response = response + self._events = events + + async def __aenter__(self): + return self + + async def __aexit__(self, exc_type, exc_val, exc_tb): + return False + + async def aiter_sse(self): + for event in self._events: + yield event + + response = httpx.Response( + status_code=200, + headers={"content-type": "text/event-stream"}, + request=httpx.Request("POST", "https://api.example.com/v1/chat-messages"), + ) + events = [ + ServerSentEvent(event="ping", data=""), + ServerSentEvent(event="message", data='{"event":"message","answer":"ok"}'), + ] + + def fake_aconnect_sse(*args, **kwargs): + return FakeAsyncEventSource(response=response, events=events) + + from dify_client import _clientx + + monkeypatch.setattr(_clientx, "aconnect_sse", fake_aconnect_sse) + client = AsyncClient(api_key="token", api_base="https://api.example.com/v1") + seen = [] + async for event in client.arequest_stream(client._prepare_url("/chat-messages"), "POST", json={"a": 1}): + seen.append(event) + assert len(seen) == 1 + assert seen[0].event == "message" + + +@pytest.mark.anyio +async def test_arequest_stream_non_sse_raises_api_error(monkeypatch): + class FakeAsyncEventSource: + def __init__(self, response): + self.response = response + + async def __aenter__(self): + return self + + async def __aexit__(self, exc_type, exc_val, exc_tb): + return False + + async def aiter_sse(self): + if False: + yield None + + response = httpx.Response( + status_code=400, + headers={"content-type": "application/json"}, + json={"status": 400, "code": "invalid_param", "message": "bad request"}, + request=httpx.Request("POST", "https://api.example.com/v1/chat-messages"), + ) + + def fake_aconnect_sse(*args, **kwargs): + return FakeAsyncEventSource(response=response) + + from dify_client import _clientx + + monkeypatch.setattr(_clientx, "aconnect_sse", fake_aconnect_sse) + client = AsyncClient(api_key="token", api_base="https://api.example.com/v1") + + with pytest.raises(errors.DifyInvalidParam): + async for _ in client.arequest_stream(client._prepare_url("/chat-messages"), "POST", json={"a": 1}): + pass + + +@pytest.mark.anyio +async def test_async_wrapper_methods(monkeypatch): + async def fake_arequest(self, endpoint, method, **kwargs): + request = httpx.Request(str(method), endpoint) + if endpoint.endswith("/messages/msg-1/feedbacks"): + return httpx.Response(status_code=200, json={"result": "success"}, request=request) + if endpoint.endswith("/messages/msg-1/suggested"): + return httpx.Response( + status_code=200, + json={"result": "success", "data": ["q1"]}, + request=request, + ) + if endpoint.endswith("/files/upload"): + return httpx.Response( + status_code=200, + json={ + "id": "f1", + "name": "a.txt", + "size": 1, + "extension": ".txt", + "mime_type": "text/plain", + "created_by": "u1", + "created_at": 1, + }, + request=request, + ) + if endpoint.endswith("/audio-to-text"): + return httpx.Response(status_code=200, json={"text": "hello"}, request=request) + if endpoint.endswith("/text-to-audio"): + return httpx.Response( + status_code=200, + content=b"mp3-bytes", + headers={"content-type": "audio/mpeg"}, + request=request, + ) + if endpoint.endswith("/completion-messages"): + return httpx.Response(status_code=200, json=_completion_payload("completion"), request=request) + if endpoint.endswith("/completion-messages/task-1/stop"): + return httpx.Response(status_code=200, json={"result": "success"}, request=request) + if endpoint.endswith("/chat-messages"): + return httpx.Response(status_code=200, json=_completion_payload("chat"), request=request) + if endpoint.endswith("/chat-messages/task-1/stop"): + return httpx.Response(status_code=200, json={"result": "success"}, request=request) + if endpoint.endswith("/workflows/run"): + return httpx.Response(status_code=200, json=_workflow_payload(), request=request) + if endpoint.endswith("/workflows/tasks/task-1/stop"): + return httpx.Response(status_code=200, json={"result": "success"}, request=request) + raise AssertionError(endpoint) + + async def fake_arequest_stream(self, endpoint, method, **kwargs): + if endpoint.endswith("/completion-messages"): + yield ServerSentEvent( + event="message", + data='{"event":"message","message_id":"m1","answer":"ok","created_at":1}', + ) + return + if endpoint.endswith("/chat-messages"): + yield ServerSentEvent( + event="message", + data='{"event":"message","message_id":"m1","conversation_id":"c1","answer":"ok","created_at":1}', + ) + return + if endpoint.endswith("/workflows/run"): + yield ServerSentEvent( + event="workflow_finished", + data='{"event":"workflow_finished","workflow_run_id":"r1","data":{"id":"r1","workflow_id":"wf1","status":"succeeded","created_at":1,"finished_at":2}}', + ) + return + raise AssertionError(endpoint) + + monkeypatch.setattr(AsyncClient, "arequest", fake_arequest) + monkeypatch.setattr(AsyncClient, "arequest_stream", fake_arequest_stream) + + client = AsyncClient(api_key="token", api_base="https://api.example.com/v1") + + assert (await client.afeedback_messages("msg-1", models.FeedbackRequest(user="u1"))).result == "success" + assert (await client.asuggest_messages("msg-1", models.ChatSuggestRequest(user="u1"))).data == ["q1"] + assert (await client.aupload_files(("a.txt", b"x", "text/plain"), models.UploadFileRequest(user="u1"))).id == "f1" + assert (await client.aaudio_to_text(("a.wav", b"x", "audio/wav"), models.AudioToTextRequest(user="u1"))).text == "hello" + assert await client.atext_to_audio(models.TextToAudioRequest(text="hello", user="u1")) == b"mp3-bytes" + + completion_blocking = models.CompletionRequest( + inputs={"query": "hi"}, + response_mode=models.ResponseMode.BLOCKING, + user="u1", + ) + assert (await client.acompletion_messages(completion_blocking)).mode == models.Mode.COMPLETION + assert (await client.astop_completion_messages("task-1", models.StopRequest(user="u1"))).result == "success" + + completion_stream = models.CompletionRequest( + inputs={"query": "hi"}, + response_mode=models.ResponseMode.STREAMING, + user="u1", + ) + c_stream = await client.acompletion_messages(completion_stream) + c_items = [item async for item in c_stream] + assert c_items[0].event == models.StreamEvent.MESSAGE + + chat_blocking = models.ChatRequest( + query="hi", + inputs={}, + response_mode=models.ResponseMode.BLOCKING, + user="u1", + ) + assert (await client.achat_messages(chat_blocking)).mode == models.Mode.CHAT + assert (await client.astop_chat_messages("task-1", models.StopRequest(user="u1"))).result == "success" + + chat_stream = models.ChatRequest( + query="hi", + inputs={}, + response_mode=models.ResponseMode.STREAMING, + user="u1", + ) + ch_stream = await client.achat_messages(chat_stream) + ch_items = [item async for item in ch_stream] + assert ch_items[0].event == models.StreamEvent.MESSAGE + + workflow_blocking = models.WorkflowsRunRequest( + inputs={"a": 1}, + response_mode=models.ResponseMode.BLOCKING, + user="u1", + ) + assert (await client.arun_workflows(workflow_blocking)).workflow_run_id == "run-1" + assert (await client.astop_workflows("task-1", models.StopRequest(user="u1"))).result == "success" + + workflow_stream = models.WorkflowsRunRequest( + inputs={"a": 1}, + response_mode=models.ResponseMode.STREAMING, + user="u1", + ) + wf_stream = await client.arun_workflows(workflow_stream) + wf_items = [item async for item in wf_stream] + assert wf_items[0].event == models.StreamEvent.WORKFLOW_FINISHED + + +@pytest.mark.anyio +async def test_async_stop_workflows_fallback_and_invalid_mode(monkeypatch): + calls = [] + + async def fake_astop_stream(self, endpoint, req, **kwargs): + calls.append(endpoint) + if endpoint.endswith("/workflows/tasks/task-1/stop"): + raise errors.DifyResourceNotFound(404, "not_found", "not found") + return models.StopResponse(result="success") + + monkeypatch.setattr(AsyncClient, "_astop_stream", fake_astop_stream) + client = AsyncClient(api_key="token", api_base="https://api.example.com/v1") + stop = await client.astop_workflows("task-1", models.StopRequest(user="u1")) + assert stop.result == "success" + assert calls == [ + "https://api.example.com/v1/workflows/tasks/task-1/stop", + "https://api.example.com/v1/workflows/task-1/stop", + ] + + completion_req = models.CompletionRequest( + inputs={"query": "hi"}, + response_mode=models.ResponseMode.BLOCKING, + user="u1", + ) + completion_req.response_mode = "invalid" + with pytest.raises(ValueError): + await client.acompletion_messages(completion_req) + + chat_req = models.ChatRequest( + query="hi", + inputs={}, + response_mode=models.ResponseMode.BLOCKING, + user="u1", + ) + chat_req.response_mode = "invalid" + with pytest.raises(ValueError): + await client.achat_messages(chat_req) + + workflow_req = models.WorkflowsRunRequest( + inputs={"a": 1}, + response_mode=models.ResponseMode.BLOCKING, + user="u1", + ) + workflow_req.response_mode = "invalid" + with pytest.raises(ValueError): + await client.arun_workflows(workflow_req) diff --git a/tests/test_utils.py b/tests/test_utils.py new file mode 100644 index 0000000..25b16d7 --- /dev/null +++ b/tests/test_utils.py @@ -0,0 +1,19 @@ +import pytest + +from dify_client import models, utils + + +def test_str_to_enum_returns_member(): + result = utils.str_to_enum(models.StreamEvent, "message") + assert result == models.StreamEvent.MESSAGE + + +def test_str_to_enum_returns_default_when_ignored(): + sentinel = object() + result = utils.str_to_enum(models.StreamEvent, "not-exist", ignore_not_found=True, enum_default=sentinel) + assert result is sentinel + + +def test_str_to_enum_raises_when_not_found(): + with pytest.raises(ValueError): + utils.str_to_enum(models.StreamEvent, "not-exist") diff --git a/tests/test_wrappers.py b/tests/test_wrappers.py new file mode 100644 index 0000000..e80f4a0 --- /dev/null +++ b/tests/test_wrappers.py @@ -0,0 +1,214 @@ +import httpx +import pytest +from httpx_sse import ServerSentEvent + +from dify_client import models +from dify_client._clientx import Client + + +def _usage_payload(): + return { + "prompt_tokens": 1, + "completion_tokens": 1, + "total_tokens": 2, + "prompt_unit_price": "0", + "prompt_price_unit": "0", + "prompt_price": "0", + "completion_unit_price": "0", + "completion_price_unit": "0", + "completion_price": "0", + "total_price": "0", + "currency": "USD", + "latency": 0.1, + } + + +def _completion_payload(mode: str): + return { + "event": "message", + "task_id": "task-1", + "id": "evt-1", + "message_id": "msg-1", + "conversation_id": "conv-1", + "mode": mode, + "answer": "ok", + "metadata": {"usage": _usage_payload(), "retriever_resources": []}, + "created_at": 1, + } + + +def _workflow_payload(): + return { + "task_id": "task-1", + "workflow_run_id": "run-1", + "data": { + "id": "run-1", + "workflow_id": "wf-1", + "status": "succeeded", + "outputs": {}, + "created_at": 1, + "finished_at": 2, + }, + } + + +def _response(method: str, endpoint: str, payload): + return httpx.Response( + status_code=200, + json=payload, + request=httpx.Request(method, endpoint), + ) + + +def test_sync_wrapper_methods(monkeypatch): + calls = [] + + def fake_request(self, endpoint, method, **kwargs): + calls.append((endpoint, str(method))) + if endpoint.endswith("/messages/msg-1/feedbacks"): + return _response(str(method), endpoint, {"result": "success"}) + if endpoint.endswith("/messages/msg-1/suggested"): + return _response(str(method), endpoint, {"result": "success", "data": ["q1"]}) + if endpoint.endswith("/files/upload"): + return _response( + str(method), + endpoint, + { + "id": "f1", + "name": "a.txt", + "size": 1, + "extension": ".txt", + "mime_type": "text/plain", + "created_by": "u1", + "created_at": 1, + }, + ) + if endpoint.endswith("/completion-messages"): + return _response(str(method), endpoint, _completion_payload("completion")) + if endpoint.endswith("/completion-messages/task-1/stop"): + return _response(str(method), endpoint, {"result": "success"}) + if endpoint.endswith("/chat-messages"): + return _response(str(method), endpoint, _completion_payload("chat")) + if endpoint.endswith("/chat-messages/task-1/stop"): + return _response(str(method), endpoint, {"result": "success"}) + if endpoint.endswith("/workflows/run"): + return _response(str(method), endpoint, _workflow_payload()) + if endpoint.endswith("/workflows/tasks/task-1/stop"): + return _response(str(method), endpoint, {"result": "success"}) + raise AssertionError(endpoint) + + monkeypatch.setattr(Client, "request", fake_request) + client = Client(api_key="token", api_base="https://api.example.com/v1") + + assert client.feedback_messages("msg-1", models.FeedbackRequest(user="u1")).result == "success" + assert client.suggest_messages("msg-1", models.ChatSuggestRequest(user="u1")).data == ["q1"] + assert client.upload_files(("a.txt", b"x", "text/plain"), models.UploadFileRequest(user="u1")).id == "f1" + + completion_req = models.CompletionRequest( + inputs={"query": "hi"}, + response_mode=models.ResponseMode.BLOCKING, + user="u1", + ) + assert client.completion_messages(completion_req).mode == models.Mode.COMPLETION + assert client.stop_completion_messages("task-1", models.StopRequest(user="u1")).result == "success" + + chat_req = models.ChatRequest( + query="hi", + inputs={}, + response_mode=models.ResponseMode.BLOCKING, + user="u1", + ) + assert client.chat_messages(chat_req).mode == models.Mode.CHAT + assert client.stop_chat_messages("task-1", models.StopRequest(user="u1")).result == "success" + + workflow_req = models.WorkflowsRunRequest( + inputs={"a": 1}, + response_mode=models.ResponseMode.BLOCKING, + user="u1", + ) + assert client.run_workflows(workflow_req).workflow_run_id == "run-1" + assert client.stop_workflows("task-1", models.StopRequest(user="u1")).result == "success" + assert len(calls) == 9 + + +def test_sync_stream_wrapper_methods(monkeypatch): + def fake_request_stream(self, endpoint, method, **kwargs): + if endpoint.endswith("/completion-messages"): + yield ServerSentEvent( + event="message", + data='{"event":"message","message_id":"m1","answer":"ok","created_at":1}', + ) + return + if endpoint.endswith("/chat-messages"): + yield ServerSentEvent( + event="message", + data='{"event":"message","message_id":"m1","conversation_id":"c1","answer":"ok","created_at":1}', + ) + return + if endpoint.endswith("/workflows/run"): + yield ServerSentEvent( + event="workflow_finished", + data='{"event":"workflow_finished","workflow_run_id":"r1","data":{"id":"r1","workflow_id":"wf1","status":"succeeded","created_at":1,"finished_at":2}}', + ) + return + raise AssertionError(endpoint) + + monkeypatch.setattr(Client, "request_stream", fake_request_stream) + client = Client(api_key="token", api_base="https://api.example.com/v1") + + completion_req = models.CompletionRequest( + inputs={"query": "hi"}, + response_mode=models.ResponseMode.STREAMING, + user="u1", + ) + completion_items = list(client.completion_messages(completion_req)) + assert completion_items[0].event == models.StreamEvent.MESSAGE + + chat_req = models.ChatRequest( + query="hi", + inputs={}, + response_mode=models.ResponseMode.STREAMING, + user="u1", + ) + chat_items = list(client.chat_messages(chat_req)) + assert chat_items[0].event == models.StreamEvent.MESSAGE + + workflow_req = models.WorkflowsRunRequest( + inputs={"a": 1}, + response_mode=models.ResponseMode.STREAMING, + user="u1", + ) + workflow_items = list(client.run_workflows(workflow_req)) + assert workflow_items[0].event == models.StreamEvent.WORKFLOW_FINISHED + + +def test_sync_invalid_response_mode_raises_value_error(): + client = Client(api_key="token") + + completion_req = models.CompletionRequest( + inputs={"query": "hi"}, + response_mode=models.ResponseMode.BLOCKING, + user="u1", + ) + completion_req.response_mode = "invalid" + with pytest.raises(ValueError): + client.completion_messages(completion_req) + + chat_req = models.ChatRequest( + query="hi", + inputs={}, + response_mode=models.ResponseMode.BLOCKING, + user="u1", + ) + chat_req.response_mode = "invalid" + with pytest.raises(ValueError): + client.chat_messages(chat_req) + + workflow_req = models.WorkflowsRunRequest( + inputs={"a": 1}, + response_mode=models.ResponseMode.BLOCKING, + user="u1", + ) + workflow_req.response_mode = "invalid" + with pytest.raises(ValueError): + client.run_workflows(workflow_req) From 4bb606106aac3784d623aed49540863602e23234 Mon Sep 17 00:00:00 2001 From: "huhaoyu.hahahu" Date: Wed, 25 Mar 2026 13:35:26 +0800 Subject: [PATCH 5/5] Fix CI: force anyio async tests to use asyncio backend --- tests/conftest.py | 7 +++++++ 1 file changed, 7 insertions(+) create mode 100644 tests/conftest.py diff --git a/tests/conftest.py b/tests/conftest.py new file mode 100644 index 0000000..e59faae --- /dev/null +++ b/tests/conftest.py @@ -0,0 +1,7 @@ +import pytest + + +@pytest.fixture +def anyio_backend(): + """Run async tests on asyncio only to avoid optional trio dependency in CI.""" + return "asyncio"