From 8ccb258353d089dd1bdb06c4c66855ca7fb6bf07 Mon Sep 17 00:00:00 2001 From: Taitranz Date: Sat, 11 Apr 2026 09:15:32 +1000 Subject: [PATCH] feat(ai): add Azure AI Foundry provider --- packages/ai/azure-foundry/LICENSE | 21 + packages/ai/azure-foundry/README.md | 5 + packages/ai/azure-foundry/docgen.json | 34 + packages/ai/azure-foundry/package.json | 63 + .../azure-foundry/src/AzureFoundryClient.ts | 287 ++++ .../azure-foundry/src/AzureFoundryConfig.ts | 56 + .../src/AzureFoundryLanguageModel.ts | 1437 +++++++++++++++++ packages/ai/azure-foundry/src/index.ts | 14 + .../azure-foundry/src/internal/utilities.ts | 44 + packages/ai/azure-foundry/tsconfig.build.json | 17 + packages/ai/azure-foundry/tsconfig.json | 8 + packages/ai/azure-foundry/tsconfig.src.json | 17 + packages/ai/azure-foundry/tsconfig.test.json | 14 + packages/ai/azure-foundry/vitest.config.ts | 6 + pnpm-lock.yaml | 22 + tsconfig.base.json | 1 + 16 files changed, 2046 insertions(+) create mode 100644 packages/ai/azure-foundry/LICENSE create mode 100644 packages/ai/azure-foundry/README.md create mode 100644 packages/ai/azure-foundry/docgen.json create mode 100644 packages/ai/azure-foundry/package.json create mode 100644 packages/ai/azure-foundry/src/AzureFoundryClient.ts create mode 100644 packages/ai/azure-foundry/src/AzureFoundryConfig.ts create mode 100644 packages/ai/azure-foundry/src/AzureFoundryLanguageModel.ts create mode 100644 packages/ai/azure-foundry/src/index.ts create mode 100644 packages/ai/azure-foundry/src/internal/utilities.ts create mode 100644 packages/ai/azure-foundry/tsconfig.build.json create mode 100644 packages/ai/azure-foundry/tsconfig.json create mode 100644 packages/ai/azure-foundry/tsconfig.src.json create mode 100644 packages/ai/azure-foundry/tsconfig.test.json create mode 100644 packages/ai/azure-foundry/vitest.config.ts diff --git a/packages/ai/azure-foundry/LICENSE b/packages/ai/azure-foundry/LICENSE new file mode 100644 index 00000000000..be1f5c14c7b --- /dev/null +++ b/packages/ai/azure-foundry/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2023 Effectful Technologies Inc + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/packages/ai/azure-foundry/README.md b/packages/ai/azure-foundry/README.md new file mode 100644 index 00000000000..3dc94bb62e3 --- /dev/null +++ b/packages/ai/azure-foundry/README.md @@ -0,0 +1,5 @@ +# `@effect/ai-azure-foundry` + +## Documentation + +- **API Reference**: [View the full documentation](https://effect-ts.github.io/effect/docs/ai/azure-foundry). diff --git a/packages/ai/azure-foundry/docgen.json b/packages/ai/azure-foundry/docgen.json new file mode 100644 index 00000000000..04147fb653a --- /dev/null +++ b/packages/ai/azure-foundry/docgen.json @@ -0,0 +1,34 @@ +{ + "$schema": "../../../node_modules/@effect/docgen/schema.json", + "exclude": [ + "src/internal/**/*.ts" + ], + "srcLink": "https://github.com/Effect-TS/effect/tree/main/packages/ai/azure-foundry/src/", + "examplesCompilerOptions": { + "noEmit": true, + "strict": true, + "skipLibCheck": true, + "moduleResolution": "Bundler", + "module": "ES2022", + "target": "ES2022", + "lib": [ + "ES2022", + "DOM", + "DOM.Iterable" + ], + "paths": { + "effect": ["../../../../effect/src/index.js"], + "effect/*": ["../../../../effect/src/*.js"], + "@effect/experimental": ["../../../../experimental/src/index.js"], + "@effect/experimental/*": ["../../../../experimental/src/*.js"], + "@effect/platform": ["../../../../platform/src/index.js"], + "@effect/platform/*": ["../../../../platform/src/*.js"], + "@effect/ai": ["../../../ai/src/index.js"], + "@effect/ai/*": ["../../../ai/src/*.js"], + "@effect/ai-openai": ["../../../openai/src/index.js"], + "@effect/ai-openai/*": ["../../../openai/src/*.js"], + "@effect/ai-azure-foundry": ["../../../azure-foundry/src/index.js"], + "@effect/ai-azure-foundry/*": ["../../../azure-foundry/src/*.js"] + } + } +} diff --git a/packages/ai/azure-foundry/package.json b/packages/ai/azure-foundry/package.json new file mode 100644 index 00000000000..094fdb39149 --- /dev/null +++ b/packages/ai/azure-foundry/package.json @@ -0,0 +1,63 @@ +{ + "name": "@effect/ai-azure-foundry", + "type": "module", + "version": "0.1.0", + "license": "MIT", + "description": "Effect modules for working with Azure AI Foundry APIs", + "homepage": "https://effect.website", + "repository": { + "type": "git", + "url": "https://github.com/Effect-TS/effect.git", + "directory": "packages/ai/azure-foundry" + }, + "bugs": { + "url": "https://github.com/Effect-TS/effect/issues" + }, + "tags": [ + "typescript", + "algebraic-data-types", + "functional-programming" + ], + "keywords": [ + "typescript", + "algebraic-data-types", + "functional-programming" + ], + "publishConfig": { + "access": "public", + "provenance": true, + "directory": "dist", + "linkDirectory": false + }, + "exports": { + "./package.json": "./package.json", + ".": "./src/index.ts", + "./*": "./src/*.ts", + "./internal/*": null + }, + "scripts": { + "codegen": "build-utils prepare-v3", + "build": "pnpm build-esm && pnpm build-annotate && pnpm build-cjs && build-utils pack-v3", + "build-esm": "tsc -b tsconfig.build.json", + "build-cjs": "babel build/esm --plugins @babel/transform-export-namespace-from --plugins @babel/transform-modules-commonjs --out-dir build/cjs --source-maps", + "build-annotate": "babel build/esm --plugins annotate-pure-calls --out-dir build/esm --source-maps", + "check": "tsc -b tsconfig.json", + "test": "vitest", + "coverage": "vitest --coverage" + }, + "peerDependencies": { + "@effect/ai": "workspace:^", + "@effect/ai-openai": "workspace:^", + "@effect/experimental": "workspace:^", + "@effect/platform": "workspace:^", + "effect": "workspace:^" + }, + "devDependencies": { + "@effect/ai": "workspace:^", + "@effect/ai-openai": "workspace:^", + "@effect/experimental": "workspace:^", + "@effect/platform": "workspace:^", + "@effect/platform-node": "workspace:^", + "effect": "workspace:^" + } +} diff --git a/packages/ai/azure-foundry/src/AzureFoundryClient.ts b/packages/ai/azure-foundry/src/AzureFoundryClient.ts new file mode 100644 index 00000000000..b195c755962 --- /dev/null +++ b/packages/ai/azure-foundry/src/AzureFoundryClient.ts @@ -0,0 +1,287 @@ +/** + * @since 1.0.0 + */ +import * as Generated from "@effect/ai-openai/Generated" +import type { ResponseStreamEvent } from "@effect/ai-openai/OpenAiClient" +import { ResponseStreamEvent as ResponseStreamEventSchema } from "@effect/ai-openai/OpenAiClient" +import * as AiError from "@effect/ai/AiError" +import * as Sse from "@effect/experimental/Sse" +import * as Headers from "@effect/platform/Headers" +import * as HttpBody from "@effect/platform/HttpBody" +import * as HttpClient from "@effect/platform/HttpClient" +import * as HttpClientRequest from "@effect/platform/HttpClientRequest" +import * as Arr from "effect/Array" +import * as Config from "effect/Config" +import type { ConfigError } from "effect/ConfigError" +import * as Context from "effect/Context" +import * as Effect from "effect/Effect" +import { identity } from "effect/Function" +import * as Layer from "effect/Layer" +import * as Redacted from "effect/Redacted" +import * as Schema from "effect/Schema" +import type * as Scope from "effect/Scope" +import * as Stream from "effect/Stream" +import { AzureFoundryConfig } from "./AzureFoundryConfig.js" + +/** + * @since 1.0.0 + * @category Context + */ +export class AzureFoundryClient extends Context.Tag( + "@effect/ai-azure-foundry/AzureFoundryClient" +)() {} + +/** + * @since 1.0.0 + * @category Models + */ +export interface Service { + readonly client: Generated.Client + + readonly streamRequest: ( + request: HttpClientRequest.HttpClientRequest, + schema: Schema.Schema + ) => Stream.Stream + + readonly createResponse: ( + options: typeof Generated.CreateResponse.Encoded + ) => Effect.Effect + + readonly createResponseStream: ( + options: Omit + ) => Stream.Stream + + readonly createEmbedding: ( + options: typeof Generated.CreateEmbeddingRequest.Encoded + ) => Effect.Effect +} + +/** + * @since 1.0.0 + * @category Constructors + */ +export const make = (options: { + /** + * The base URL of the Azure AI Foundry resource. + * + * Example: `"https://myresource.openai.azure.com"` or + * `"https://myresource.services.ai.azure.com"` + */ + readonly apiUrl: string + /** + * An API key for authenticating with Azure AI Foundry. + * + * Sent as the `api-key` header (Azure's documented REST approach). + * + * Mutually exclusive with `tokenProvider`. + */ + readonly apiKey?: Redacted.Redacted | undefined + /** + * An Entra ID (Azure AD) token provider for authenticating with Azure AI + * Foundry. + * + * Called per-request to support token refresh. The token is sent as + * `Authorization: Bearer `. + * + * Mutually exclusive with `apiKey`. + * + * **Important:** The caller is responsible for acquiring tokens with the + * correct scope: + * - v1 API: `https://ai.azure.com/.default` + * - Classic API: `https://cognitiveservices.azure.com/.default` + */ + readonly tokenProvider?: Effect.Effect | undefined + /** + * API version for classic dated-version API fallback. + * + * When set, appended as `?api-version=` query param to all requests. + * + * Example: `"2024-10-21"` (stable) or `"2025-04-01-preview"` + */ + readonly apiVersion?: string | undefined + /** + * A method which can be used to transform the underlying `HttpClient` which + * will be used to communicate with the Azure AI Foundry API. + */ + readonly transformClient?: ((client: HttpClient.HttpClient) => HttpClient.HttpClient) | undefined +}): Effect.Effect => + Effect.gen(function*() { + const apiKeyHeader = "api-key" + + yield* Effect.locallyScopedWith(Headers.currentRedactedNames, Arr.append(apiKeyHeader)) + + let httpClient = (yield* HttpClient.HttpClient).pipe( + HttpClient.mapRequest((request) => + request.pipe( + HttpClientRequest.prependUrl(options.apiUrl + "/openai/v1"), + options.apiKey + ? HttpClientRequest.setHeader(apiKeyHeader, Redacted.value(options.apiKey)) + : identity, + options.apiVersion + ? HttpClientRequest.setUrlParam("api-version", options.apiVersion) + : identity, + HttpClientRequest.acceptJson + ) + ) + ) + + // Entra ID token provider — called per-request to support token refresh. + // Token acquisition failures become defects (the token provider should + // handle retries and caching internally). + if (options.tokenProvider) { + const tokenProvider = options.tokenProvider + httpClient = HttpClient.mapRequestEffect(httpClient, (request) => + tokenProvider.pipe( + Effect.map((token) => HttpClientRequest.bearerToken(request, token)), + Effect.orDie + )) + } + + httpClient = options.transformClient ? options.transformClient(httpClient) : httpClient + + const httpClientOk = HttpClient.filterStatusOk(httpClient) + + const client = Generated.make(httpClient, { + transformClient: (client) => + AzureFoundryConfig.getOrUndefined.pipe( + Effect.map((config) => config?.transformClient ? config.transformClient(client) : client) + ) + }) + + const streamRequest = ( + request: HttpClientRequest.HttpClientRequest, + schema: Schema.Schema + ): Stream.Stream => { + const decodeEvent = Schema.decode(Schema.parseJson(schema)) + return httpClientOk.execute(request).pipe( + Effect.map((r) => r.stream), + Stream.unwrapScoped, + Stream.decodeText(), + Stream.pipeThroughChannel(Sse.makeChannel()), + Stream.mapEffect((event) => decodeEvent(event.data)), + Stream.catchTags({ + RequestError: (error) => + AiError.HttpRequestError.fromRequestError({ + module: "AzureFoundryClient", + method: "streamRequest", + error + }), + ResponseError: (error) => + AiError.HttpResponseError.fromResponseError({ + module: "AzureFoundryClient", + method: "streamRequest", + error + }), + ParseError: (error) => + AiError.MalformedOutput.fromParseError({ + module: "AzureFoundryClient", + method: "streamRequest", + error + }) + }) + ) + } + + const createResponse = ( + options: typeof Generated.CreateResponse.Encoded + ): Effect.Effect => + client.createResponse(options).pipe( + Effect.catchTags({ + RequestError: (error) => + AiError.HttpRequestError.fromRequestError({ + module: "AzureFoundryClient", + method: "createResponse", + error + }), + ResponseError: (error) => + AiError.HttpResponseError.fromResponseError({ + module: "AzureFoundryClient", + method: "createResponse", + error + }), + ParseError: (error) => + AiError.MalformedOutput.fromParseError({ + module: "AzureFoundryClient", + method: "createResponse", + error + }) + }) + ) + + const createResponseStream = ( + options: Omit + ): Stream.Stream => { + const request = HttpClientRequest.post("/responses", { + body: HttpBody.unsafeJson({ ...options, stream: true }) + }) + return streamRequest(request, ResponseStreamEventSchema).pipe( + Stream.takeUntil((event) => event.type === "response.completed" || event.type === "response.incomplete") + ) + } + + const createEmbedding = ( + options: typeof Generated.CreateEmbeddingRequest.Encoded + ): Effect.Effect => + client.createEmbedding(options).pipe( + Effect.catchTags({ + RequestError: (error) => + AiError.HttpRequestError.fromRequestError({ + module: "AzureFoundryClient", + method: "createEmbedding", + error + }), + ResponseError: (error) => + AiError.HttpResponseError.fromResponseError({ + module: "AzureFoundryClient", + method: "createEmbedding", + error + }), + ParseError: (error) => + AiError.MalformedOutput.fromParseError({ + module: "AzureFoundryClient", + method: "createEmbedding", + error + }) + }) + ) + + return AzureFoundryClient.of({ + client, + streamRequest, + createResponse, + createResponseStream, + createEmbedding + }) + }) + +/** + * @since 1.0.0 + * @category Layers + */ +export const layer = (options: { + readonly apiUrl: string + readonly apiKey?: Redacted.Redacted | undefined + readonly tokenProvider?: Effect.Effect | undefined + readonly apiVersion?: string | undefined + readonly transformClient?: (client: HttpClient.HttpClient) => HttpClient.HttpClient +}): Layer.Layer => Layer.scoped(AzureFoundryClient, make(options)) + +/** + * @since 1.0.0 + * @category Layers + */ +export const layerConfig = ( + options: { + readonly apiUrl: Config.Config + readonly apiKey?: Config.Config | undefined + readonly tokenProvider?: Effect.Effect | undefined + readonly apiVersion?: Config.Config | undefined + readonly transformClient?: (client: HttpClient.HttpClient) => HttpClient.HttpClient + } +): Layer.Layer => { + const { tokenProvider, transformClient, ...configs } = options + return Config.all(configs).pipe( + Effect.flatMap((configs) => make({ ...configs, transformClient, tokenProvider })), + Layer.scoped(AzureFoundryClient) + ) +} diff --git a/packages/ai/azure-foundry/src/AzureFoundryConfig.ts b/packages/ai/azure-foundry/src/AzureFoundryConfig.ts new file mode 100644 index 00000000000..6436fcbe4c0 --- /dev/null +++ b/packages/ai/azure-foundry/src/AzureFoundryConfig.ts @@ -0,0 +1,56 @@ +/** + * @since 1.0.0 + */ +import type { HttpClient } from "@effect/platform/HttpClient" +import * as Context from "effect/Context" +import * as Effect from "effect/Effect" +import { dual } from "effect/Function" + +/** + * @since 1.0.0 + * @category Context + */ +export class AzureFoundryConfig extends Context.Tag("@effect/ai-azure-foundry/AzureFoundryConfig")< + AzureFoundryConfig, + AzureFoundryConfig.Service +>() { + /** + * @since 1.0.0 + */ + static readonly getOrUndefined: Effect.Effect = Effect.map( + Effect.context(), + (context) => context.unsafeMap.get(AzureFoundryConfig.key) + ) +} + +/** + * @since 1.0.0 + */ +export declare namespace AzureFoundryConfig { + /** + * @since 1.0.0 + * @category Models + */ + export interface Service { + readonly transformClient?: (client: HttpClient) => HttpClient + } +} + +/** + * @since 1.0.0 + * @category Configuration + */ +export const withClientTransform: { + (transform: (client: HttpClient) => HttpClient): (self: Effect.Effect) => Effect.Effect + (self: Effect.Effect, transform: (client: HttpClient) => HttpClient): Effect.Effect +} = dual< + (transform: (client: HttpClient) => HttpClient) => (self: Effect.Effect) => Effect.Effect, + (self: Effect.Effect, transform: (client: HttpClient) => HttpClient) => Effect.Effect +>( + 2, + (self, transformClient) => + Effect.flatMap( + AzureFoundryConfig.getOrUndefined, + (config) => Effect.provideService(self, AzureFoundryConfig, { ...config, transformClient }) + ) +) diff --git a/packages/ai/azure-foundry/src/AzureFoundryLanguageModel.ts b/packages/ai/azure-foundry/src/AzureFoundryLanguageModel.ts new file mode 100644 index 00000000000..c49554c3ddc --- /dev/null +++ b/packages/ai/azure-foundry/src/AzureFoundryLanguageModel.ts @@ -0,0 +1,1437 @@ +/** + * @since 1.0.0 + */ +import type * as Generated from "@effect/ai-openai/Generated" +import type { ResponseStreamEvent } from "@effect/ai-openai/OpenAiClient" +import * as AiError from "@effect/ai/AiError" +import * as IdGenerator from "@effect/ai/IdGenerator" +import * as LanguageModel from "@effect/ai/LanguageModel" +import * as AiModel from "@effect/ai/Model" +import type * as Prompt from "@effect/ai/Prompt" +import type * as Response from "@effect/ai/Response" +import * as Telemetry from "@effect/ai/Telemetry" +import * as Tool from "@effect/ai/Tool" +import * as Context from "effect/Context" +import * as DateTime from "effect/DateTime" +import * as Effect from "effect/Effect" +import * as Encoding from "effect/Encoding" +import { dual } from "effect/Function" +import * as Layer from "effect/Layer" +import * as Predicate from "effect/Predicate" +import * as Stream from "effect/Stream" +import type { Span } from "effect/Tracer" +import type { DeepMutable, Mutable, Simplify } from "effect/Types" +import { AzureFoundryClient } from "./AzureFoundryClient.js" +import * as InternalUtilities from "./internal/utilities.js" + +// ============================================================================= +// Configuration +// ============================================================================= + +/** + * @since 1.0.0 + * @category Context + */ +export class Config extends Context.Tag("@effect/ai-azure-foundry/AzureFoundryLanguageModel/Config")< + Config, + Config.Service +>() { + /** + * @since 1.0.0 + */ + static readonly getOrUndefined: Effect.Effect = Effect.map( + Effect.context(), + (context) => context.unsafeMap.get(Config.key) + ) +} + +/** + * @since 1.0.0 + */ +export declare namespace Config { + /** + * @since 1.0.0 + * @category Models + */ + export interface Service extends + Simplify< + Partial< + Omit< + typeof Generated.CreateResponse.Encoded, + "input" | "tools" | "tool_choice" | "stream" | "text" + > + > + > + { + /** + * File ID prefixes used to identify file IDs in Responses API. + * When undefined, all file data is treated as base64 content. + * + * Examples: + * - Azure OpenAI: `['assistant-']` for IDs like `'assistant-abc123'` + */ + readonly fileIdPrefixes?: ReadonlyArray + /** + * Configuration options for a text response from the model. + */ + readonly text?: { + /** + * Constrains the verbosity of the model's response. Lower values will + * result in more concise responses, while higher values will result in + * more verbose responses. + * + * Defaults to `"medium"`. + */ + readonly verbosity?: "low" | "medium" | "high" + } + /** + * Controls whether system messages use the `"system"` or `"developer"` + * role. + * + * Azure deployment names are arbitrary strings and cannot be + * auto-detected like OpenAI model names. Set to `"developer"` when + * deploying reasoning models (e.g. o1, o3). + * + * Defaults to `"system"`. + */ + readonly systemMessageMode?: "system" | "developer" + } +} + +// ============================================================================= +// Azure Foundry Provider Options / Metadata +// ============================================================================= + +declare module "@effect/ai/Prompt" { + export interface FilePartOptions extends ProviderOptions { + readonly openai?: { + /** + * The detail level of the image to be sent to the model. One of `high`, `low`, or `auto`. Defaults to `auto`. + */ + readonly imageDetail?: typeof Generated.ImageDetail.Encoded | undefined + } | undefined + } + + export interface ReasoningPartOptions extends ProviderOptions { + readonly openai?: { + /** + * The ID of the item to reference. + */ + readonly itemId?: string | undefined + /** + * The encrypted content of the reasoning item - populated when a response + * is generated with `reasoning.encrypted_content` in the `include` + * parameter. + */ + readonly encryptedContent?: string | undefined + } | undefined + } + + export interface ToolCallPartOptions extends ProviderOptions { + readonly openai?: { + /** + * The ID of the item to reference. + */ + readonly itemId?: string | undefined + } | undefined + } + + export interface TextPartOptions extends ProviderOptions { + readonly openai?: { + /** + * The ID of the item to reference. + */ + readonly itemId?: string | undefined + } | undefined + } +} + +declare module "@effect/ai/Response" { + export interface TextPartMetadata extends ProviderMetadata { + readonly openai?: { + readonly itemId?: string | undefined + /** + * If the model emits a refusal content part, the refusal explanation + * from the model will be contained in the metadata of an empty text + * part. + */ + readonly refusal?: string | undefined + } | undefined + readonly azureFoundry?: { + /** + * Azure content filter results for the output content. + */ + readonly contentFilterResults?: ContentFilterResults | undefined + } | undefined + } + + export interface TextStartPartMetadata extends ProviderMetadata { + readonly openai?: { + readonly itemId?: string | undefined + } | undefined + } + + export interface ReasoningPartMetadata extends ProviderMetadata { + readonly openai?: { + readonly itemId?: string | undefined + readonly encryptedContent?: string | undefined + } | undefined + } + + export interface ReasoningStartPartMetadata extends ProviderMetadata { + readonly openai?: { + readonly itemId?: string | undefined + readonly encryptedContent?: string | undefined + } | undefined + } + + export interface ReasoningDeltaPartMetadata extends ProviderMetadata { + readonly openai?: { + readonly itemId?: string | undefined + } | undefined + } + + export interface ReasoningEndPartMetadata extends ProviderMetadata { + readonly openai?: { + readonly itemId?: string | undefined + readonly encryptedContent?: string | undefined + } | undefined + } + + export interface ToolCallPartMetadata extends ProviderMetadata { + readonly openai?: { + readonly itemId?: string | undefined + } | undefined + } + + export interface DocumentSourcePartMetadata extends ProviderMetadata { + readonly openai?: { + readonly type: "file_citation" + /** + * The index of the file in the list of files. + */ + readonly index: number + } | undefined + } + + export interface UrlSourcePartMetadata extends ProviderMetadata { + readonly openai?: { + readonly type: "url_citation" + /** + * The index of the first character of the URL citation in the message. + */ + readonly startIndex: number + /** + * The index of the last character of the URL citation in the message. + */ + readonly endIndex: number + } | undefined + } + + export interface FinishPartMetadata extends ProviderMetadata { + readonly openai?: { + readonly serviceTier?: "default" | "auto" | "flex" | "scale" | "priority" | undefined + } | undefined + readonly azureFoundry?: { + /** + * Azure prompt-level content filter results. + */ + readonly promptFilterResults?: ReadonlyArray | undefined + } | undefined + } +} + +/** + * @since 1.0.0 + */ +export declare namespace ProviderMetadata { + /** + * @since 1.0.0 + * @category Provider Metadata + */ + export interface Service { + "source": {} | {} + } +} + +// ============================================================================= +// Azure Content Filter Types +// ============================================================================= + +/** + * Severity result from Azure content filtering. + * + * @since 1.0.0 + * @category Models + */ +export interface ContentFilterSeverityResult { + readonly filtered: boolean + readonly severity: "safe" | "low" | "medium" | "high" +} + +/** + * Detection result from Azure content filtering (for categories like + * jailbreak that use detected/filtered rather than severity levels). + * + * @since 1.0.0 + * @category Models + */ +export interface ContentFilterDetectionResult { + readonly filtered: boolean + readonly detected: boolean +} + +/** + * Azure content filter results for a response or prompt. + * + * @since 1.0.0 + * @category Models + */ +export interface ContentFilterResults { + readonly sexual?: ContentFilterSeverityResult | undefined + readonly violence?: ContentFilterSeverityResult | undefined + readonly hate?: ContentFilterSeverityResult | undefined + readonly self_harm?: ContentFilterSeverityResult | undefined + readonly profanity?: ContentFilterSeverityResult | undefined + readonly jailbreak?: ContentFilterDetectionResult | undefined + readonly custom_blocklists?: { + readonly filtered: boolean + readonly details?: + | ReadonlyArray<{ + readonly id: string + readonly filtered: boolean + }> + | undefined + } | undefined +} + +/** + * Azure prompt-level content filter result. + * + * @since 1.0.0 + * @category Models + */ +export interface PromptFilterResult { + readonly prompt_index: number + readonly content_filter_results: ContentFilterResults +} + +// ============================================================================= +// Azure Foundry Language Model +// ============================================================================= + +/** + * @since 1.0.0 + * @category Ai Models + */ +export const model = ( + model: string, + config?: Omit +): AiModel.Model<"azure-foundry", LanguageModel.LanguageModel, AzureFoundryClient> => + AiModel.make("azure-foundry", layer({ model, config })) + +/** + * @since 1.0.0 + * @category Constructors + */ +export const make = Effect.fnUntraced(function*(options: { + readonly model: string + readonly config?: Omit +}) { + const client = yield* AzureFoundryClient + + const makeRequest: (providerOptions: LanguageModel.ProviderOptions) => Effect.Effect< + typeof Generated.CreateResponse.Encoded, + AiError.AiError + > = Effect.fnUntraced( + function*(providerOptions) { + const context = yield* Effect.context() + const config = { model: options.model, ...options.config, ...context.unsafeMap.get(Config.key) } + const messages = yield* prepareMessages(providerOptions, config) + const { toolChoice, tools } = yield* prepareTools(providerOptions) + const include = prepareInclude(providerOptions, config) + const responseFormat = prepareResponseFormat(providerOptions) + const verbosity = config.text?.verbosity + const request: typeof Generated.CreateResponse.Encoded = { + ...config, + input: messages, + include, + text: { format: responseFormat, verbosity }, + tools, + tool_choice: toolChoice + } + return request + } + ) + + return yield* LanguageModel.make({ + generateText: Effect.fnUntraced( + function*(options) { + const request = yield* makeRequest(options) + annotateRequest(options.span, request) + const rawResponse = yield* client.createResponse(request) + annotateResponse(options.span, rawResponse) + return yield* makeResponse(rawResponse, options) + } + ), + streamText: Effect.fnUntraced( + function*(options) { + const request = yield* makeRequest(options) + annotateRequest(options.span, request) + return client.createResponseStream(request) + }, + (effect, options) => + effect.pipe( + Effect.flatMap((stream) => makeStreamResponse(stream, options)), + Stream.unwrap, + Stream.map((response) => { + annotateStreamResponse(options.span, response) + return response + }) + ) + ) + }) +}) + +/** + * @since 1.0.0 + * @category Layers + */ +export const layer = (options: { + readonly model: string + readonly config?: Omit +}): Layer.Layer => + Layer.effect(LanguageModel.LanguageModel, make({ model: options.model, config: options.config })) + +/** + * @since 1.0.0 + * @category Configuration + */ +export const withConfigOverride: { + (overrides: Config.Service): (self: Effect.Effect) => Effect.Effect + (self: Effect.Effect, overrides: Config.Service): Effect.Effect +} = dual< + (overrides: Config.Service) => (self: Effect.Effect) => Effect.Effect, + (self: Effect.Effect, overrides: Config.Service) => Effect.Effect +>(2, (self, overrides) => + Effect.flatMap( + Config.getOrUndefined, + (config) => Effect.provideService(self, Config, { ...config, ...overrides }) + )) + +// ============================================================================= +// Prompt Conversion +// ============================================================================= + +const getSystemMessageMode = (config: Config.Service): "system" | "developer" => config.systemMessageMode ?? "system" + +const prepareMessages: ( + options: LanguageModel.ProviderOptions, + config: Config.Service +) => Effect.Effect< + ReadonlyArray, + AiError.AiError +> = Effect.fnUntraced(function*(options, config) { + const messages: Array = [] + + for (const message of options.prompt.content) { + switch (message.role) { + case "system": { + messages.push({ + role: getSystemMessageMode(config), + content: message.content + }) + break + } + + case "user": { + const content: Array = [] + + for (let index = 0; index < message.content.length; index++) { + const part = message.content[index] + + switch (part.type) { + case "text": { + content.push({ type: "input_text", text: part.text }) + break + } + + case "file": { + if (part.mediaType.startsWith("image/")) { + const detail = getImageDetail(part) + const mediaType = part.mediaType === "image/*" ? "image/jpeg" : part.mediaType + + if (typeof part.data === "string" && isFileId(part.data, config)) { + content.push({ type: "input_image", file_id: part.data, detail }) + } + + if (part.data instanceof URL) { + content.push({ type: "input_image", image_url: part.data.toString(), detail }) + } + + if (part.data instanceof Uint8Array) { + const base64 = Encoding.encodeBase64(part.data) + const imageUrl = `data:${mediaType};base64,${base64}` + content.push({ type: "input_image", image_url: imageUrl, detail }) + } + } else if (part.mediaType === "application/pdf") { + if (typeof part.data === "string" && isFileId(part.data, config)) { + content.push({ type: "input_file", file_id: part.data }) + } + + if (part.data instanceof URL) { + content.push({ type: "input_file", file_url: part.data.toString() }) + } + + if (part.data instanceof Uint8Array) { + const base64 = Encoding.encodeBase64(part.data) + const fileName = part.fileName ?? `part-${index}.pdf` + const fileData = `data:application/pdf;base64,${base64}` + content.push({ type: "input_file", filename: fileName, file_data: fileData }) + } + } else { + return yield* new AiError.MalformedInput({ + module: "AzureFoundryLanguageModel", + method: "prepareMessages", + description: `Detected unsupported media type for file: '${part.mediaType}'` + }) + } + } + } + } + + messages.push({ role: "user", content }) + + break + } + + case "assistant": { + const reasoningMessages: Record> = {} + + for (const part of message.content) { + switch (part.type) { + case "text": { + messages.push({ + role: "assistant", + content: [{ type: "output_text", text: part.text }], + id: getItemId(part) + }) + break + } + + case "reasoning": { + const options = part.options.openai + + if (Predicate.isNotUndefined(options?.itemId)) { + const reasoningMessage = reasoningMessages[options.itemId] + const summaryParts: Mutable = [] + + if (part.text.length > 0) { + summaryParts.push({ type: "summary_text", text: part.text }) + } + + if (Predicate.isUndefined(reasoningMessage)) { + reasoningMessages[options.itemId] = { + id: options.itemId, + type: "reasoning", + summary: summaryParts, + encrypted_content: options.encryptedContent + } + messages.push(reasoningMessages[options.itemId]) + } else { + for (const summaryPart of summaryParts) { + reasoningMessage.summary.push(summaryPart) + } + } + } + + break + } + + case "tool-call": { + if (!part.providerExecuted) { + messages.push({ + id: getItemId(part), + type: "function_call", + call_id: part.id, + name: part.name, + arguments: JSON.stringify(part.params) + }) + } + + break + } + } + } + + break + } + + case "tool": { + for (const part of message.content) { + messages.push({ + type: "function_call_output", + call_id: part.id, + output: JSON.stringify(part.result) + }) + } + + break + } + } + } + + return messages +}) + +// ============================================================================= +// Response Conversion +// ============================================================================= + +const makeResponse: ( + response: Generated.Response, + options: LanguageModel.ProviderOptions +) => Effect.Effect< + Array, + AiError.AiError, + IdGenerator.IdGenerator +> = Effect.fnUntraced( + function*(response, options) { + const idGenerator = yield* IdGenerator.IdGenerator + + const webSearchTool = options.tools.find((tool) => + Tool.isProviderDefined(tool) && + (tool.id === "openai.web_search" || + tool.id === "openai.web_search_preview") + ) as Tool.AnyProviderDefined | undefined + + let hasToolCalls = false + const parts: Array = [] + + const createdAt = new Date(response.created_at * 1000) + parts.push({ + type: "response-metadata", + id: response.id, + modelId: response.model, + timestamp: DateTime.formatIso(DateTime.unsafeFromDate(createdAt)) + }) + + for (const part of response.output) { + switch (part.type) { + case "message": { + for (const contentPart of part.content) { + switch (contentPart.type) { + case "output_text": { + parts.push({ + type: "text", + text: contentPart.text, + metadata: { openai: { itemId: part.id } } + }) + + for (const annotation of contentPart.annotations) { + if (annotation.type === "file_citation") { + const metadata = { + type: annotation.type, + index: annotation.index + } + + parts.push({ + type: "source", + sourceType: "document", + id: yield* idGenerator.generateId(), + mediaType: "text/plain", + title: annotation.filename ?? "Untitled Document", + metadata: { openai: metadata } + }) + } + + if (annotation.type === "url_citation") { + const metadata = { + type: annotation.type, + startIndex: annotation.start_index, + endIndex: annotation.end_index + } + + parts.push({ + type: "source", + sourceType: "url", + id: yield* idGenerator.generateId(), + url: annotation.url, + title: annotation.title, + metadata: { openai: metadata } + }) + } + } + + break + } + case "refusal": { + parts.push({ + type: "text", + text: "", + metadata: { openai: { refusal: contentPart.refusal } } + }) + + break + } + } + } + + break + } + + case "function_call": { + hasToolCalls = true + + const toolName = part.name + const toolParams = part.arguments + + const params = yield* Effect.try({ + try: () => Tool.unsafeSecureJsonParse(toolParams), + catch: (cause) => + new AiError.MalformedOutput({ + module: "AzureFoundryLanguageModel", + method: "makeResponse", + description: "Failed to securely parse tool call parameters " + + `for tool '${toolName}':\nParameters: ${toolParams}`, + cause + }) + }) + + parts.push({ + type: "tool-call", + id: part.call_id, + name: toolName, + params, + metadata: { openai: { itemId: part.id } } + }) + + break + } + + case "code_interpreter_call": { + parts.push({ + type: "tool-call", + id: part.id, + name: "OpenAiCodeInterpreter", + params: { code: part.code, container_id: part.container_id }, + providerName: "code_interpreter", + providerExecuted: true + }) + + parts.push({ + type: "tool-result", + id: part.id, + name: "OpenAiCodeInterpreter", + isFailure: false, + result: part.outputs, + providerName: "code_interpreter", + providerExecuted: true + }) + + break + } + + case "file_search_call": { + parts.push({ + type: "tool-call", + id: part.id, + name: "OpenAiFileSearch", + params: {}, + providerName: "file_search", + providerExecuted: true + }) + + parts.push({ + type: "tool-result", + id: part.id, + name: "OpenAiFileSearch", + isFailure: false, + result: { + status: part.status, + queries: part.queries, + ...(part.results && { results: part.results }) + }, + providerName: "file_search", + providerExecuted: true + }) + + break + } + + case "web_search_call": { + parts.push({ + type: "tool-call", + id: part.id, + name: webSearchTool?.name ?? "OpenAiWebSearch", + params: { action: part.action }, + providerName: webSearchTool?.providerName ?? "web_search", + providerExecuted: true + }) + + parts.push({ + type: "tool-result", + id: part.id, + name: webSearchTool?.name ?? "OpenAiWebSearch", + isFailure: false, + result: { status: part.status }, + providerName: webSearchTool?.providerName ?? "web_search", + providerExecuted: true + }) + + break + } + + case "reasoning": { + // If there are no summary parts, we have to add an empty one to + // propagate the part identifier + if (part.summary.length === 0) { + parts.push({ + type: "reasoning", + text: "", + metadata: { openai: { itemId: part.id } } + }) + } else { + for (const summary of part.summary) { + const metadata = { + itemId: part.id, + encryptedContent: part.encrypted_content ?? undefined + } + parts.push({ + type: "reasoning", + text: summary.text, + metadata: { openai: metadata } + }) + } + } + + break + } + } + } + + const finishReason = InternalUtilities.resolveFinishReason( + response.incomplete_details?.reason, + hasToolCalls + ) + + const metadata = { + serviceTier: response.service_tier + } + + parts.push({ + type: "finish", + reason: finishReason, + usage: { + inputTokens: response.usage?.input_tokens, + outputTokens: response.usage?.output_tokens, + totalTokens: (response.usage?.input_tokens ?? 0) + (response.usage?.output_tokens ?? 0), + reasoningTokens: response.usage?.output_tokens_details?.reasoning_tokens, + cachedInputTokens: response.usage?.input_tokens_details?.cached_tokens + }, + metadata: { openai: metadata } + }) + + return parts + } +) + +// ============================================================================= +// Streaming Response Conversion +// ============================================================================= + +const makeStreamResponse: ( + stream: Stream.Stream, + options: LanguageModel.ProviderOptions +) => Effect.Effect< + Stream.Stream, + never, + IdGenerator.IdGenerator +> = Effect.fnUntraced( + function*(stream, options) { + const idGenerator = yield* IdGenerator.IdGenerator + + let hasToolCalls = false + + const activeReasoning: Record + readonly encryptedContent: string | undefined + }> = {} + + const activeToolCalls: Record = {} + + const webSearchTool = options.tools.find((tool) => + Tool.isProviderDefined(tool) && + (tool.id === "openai.web_search" || + tool.id === "openai.web_search_preview") + ) as Tool.AnyProviderDefined | undefined + + return stream.pipe( + Stream.mapEffect(Effect.fnUntraced(function*(event) { + const parts: Array = [] + + switch (event.type) { + case "response.created": { + const createdAt = new Date(event.response.created_at * 1000) + parts.push({ + type: "response-metadata", + id: event.response.id, + modelId: event.response.model, + timestamp: DateTime.formatIso(DateTime.unsafeFromDate(createdAt)) + }) + break + } + + case "error": { + parts.push({ type: "error", error: event }) + break + } + + case "response.completed": + case "response.incomplete": + case "response.failed": { + parts.push({ + type: "finish", + reason: InternalUtilities.resolveFinishReason( + event.response.incomplete_details?.reason, + hasToolCalls + ), + usage: { + inputTokens: event.response.usage?.input_tokens, + outputTokens: event.response.usage?.output_tokens, + totalTokens: (event.response.usage?.input_tokens ?? 0) + (event.response.usage?.output_tokens ?? 0), + reasoningTokens: event.response.usage?.output_tokens_details?.reasoning_tokens, + cachedInputTokens: event.response.usage?.input_tokens_details?.cached_tokens + }, + metadata: { openai: { serviceTier: event.response.service_tier } } + }) + break + } + + case "response.output_item.added": { + switch (event.item.type) { + case "computer_call": { + // TODO: support computer use + break + } + + case "file_search_call": { + activeToolCalls[event.output_index] = { + id: event.item.id, + name: "OpenAiFileSearch" + } + parts.push({ + type: "tool-params-start", + id: event.item.id, + name: "OpenAiFileSearch", + providerName: "file_search", + providerExecuted: true + }) + break + } + + case "function_call": { + activeToolCalls[event.output_index] = { + id: event.item.call_id, + name: event.item.name + } + parts.push({ + type: "tool-params-start", + id: event.item.call_id, + name: event.item.name + }) + break + } + + case "message": { + parts.push({ + type: "text-start", + id: event.item.id, + metadata: { openai: { itemId: event.item.id } } + }) + break + } + + case "reasoning": { + activeReasoning[event.item.id] = { + summaryParts: [0], + encryptedContent: event.item.encrypted_content + } + parts.push({ + type: "reasoning-start", + id: `${event.item.id}:0`, + metadata: { + openai: { + itemId: event.item.id, + encryptedContent: event.item.encrypted_content + } + } + }) + break + } + + case "web_search_call": { + activeToolCalls[event.output_index] = { + id: event.item.id, + name: webSearchTool?.name ?? "OpenAiWebSearch" + } + parts.push({ + type: "tool-params-start", + id: event.item.id, + name: webSearchTool?.name ?? "OpenAiWebSearch", + providerName: webSearchTool?.providerName ?? "web_search", + providerExecuted: true + }) + break + } + } + + break + } + + case "response.output_item.done": { + switch (event.item.type) { + case "code_interpreter_call": { + parts.push({ + type: "tool-call", + id: event.item.id, + name: "OpenAiCodeInterpreter", + params: { code: event.item.code, container_id: event.item.container_id }, + providerName: "code_interpreter", + providerExecuted: true + }) + parts.push({ + type: "tool-result", + id: event.item.id, + name: "OpenAiCodeInterpreter", + isFailure: false, + result: { outputs: event.item.outputs }, + providerName: "code_interpreter", + providerExecuted: true + }) + break + } + + case "computer_call": { + // TODO: support computer use + break + } + + case "file_search_call": { + delete activeToolCalls[event.output_index] + parts.push({ + type: "tool-params-end", + id: event.item.id + }) + parts.push({ + type: "tool-call", + id: event.item.id, + name: "OpenAiFileSearch", + params: {}, + providerName: "file_search", + providerExecuted: true + }) + parts.push({ + type: "tool-result", + id: event.item.id, + name: "OpenAiFileSearch", + isFailure: false, + result: { + status: event.item.status, + queries: event.item.queries, + ...(event.item.results && { results: event.item.results }) + }, + providerName: "file_search", + providerExecuted: true + }) + break + } + + case "function_call": { + hasToolCalls = true + + const toolName = event.item.name + const toolParams = event.item.arguments + + const params = yield* Effect.try({ + try: () => Tool.unsafeSecureJsonParse(toolParams), + catch: (cause) => + new AiError.MalformedOutput({ + module: "AzureFoundryLanguageModel", + method: "makeStreamResponse", + description: "Failed to securely parse tool call parameters " + + `for tool '${toolName}':\nParameters: ${toolParams}`, + cause + }) + }) + + parts.push({ + type: "tool-params-end", + id: event.item.call_id + }) + + parts.push({ + type: "tool-call", + id: event.item.call_id, + name: toolName, + params, + metadata: { openai: { itemId: event.item.id } } + }) + + delete activeToolCalls[event.output_index] + + break + } + + case "message": { + parts.push({ + type: "text-end", + id: event.item.id + }) + break + } + + case "reasoning": { + const reasoningPart = activeReasoning[event.item.id] + for (const summaryIndex of reasoningPart.summaryParts) { + parts.push({ + type: "reasoning-end", + id: `${event.item.id}:${summaryIndex}`, + metadata: { + openai: { + itemId: event.item.id, + encryptedContent: event.item.encrypted_content + } + } + }) + } + delete activeReasoning[event.item.id] + break + } + + case "web_search_call": { + delete activeToolCalls[event.output_index] + parts.push({ + type: "tool-params-end", + id: event.item.id + }) + parts.push({ + type: "tool-call", + id: event.item.id, + name: "OpenAiWebSearch", + params: { action: event.item.action }, + providerName: "web_search", + providerExecuted: true + }) + parts.push({ + type: "tool-result", + id: event.item.id, + name: "OpenAiWebSearch", + isFailure: false, + result: { status: event.item.status }, + providerName: "web_search", + providerExecuted: true + }) + break + } + } + + break + } + + case "response.output_text.delta": { + parts.push({ + type: "text-delta", + id: event.item_id, + delta: event.delta + }) + break + } + + case "response.output_text.annotation.added": { + if (event.annotation.type === "file_citation") { + parts.push({ + type: "source", + sourceType: "document", + id: yield* idGenerator.generateId(), + mediaType: "text/plain", + title: event.annotation.filename ?? "Untitled Document", + fileName: event.annotation.filename ?? event.annotation.file_id + }) + } + if (event.annotation.type === "url_citation") { + parts.push({ + type: "source", + sourceType: "url", + id: yield* idGenerator.generateId(), + url: event.annotation.url, + title: event.annotation.title + }) + } + break + } + + case "response.function_call_arguments.delta": { + const toolCallPart = activeToolCalls[event.output_index] + if (Predicate.isNotUndefined(toolCallPart)) { + parts.push({ + type: "tool-params-delta", + id: toolCallPart.id, + delta: event.delta + }) + } + break + } + + case "response.reasoning_summary_part.added": { + // The first reasoning start is pushed in the `response.output_item.added` block + if (event.summary_index > 0) { + const reasoningPart = activeReasoning[event.item_id] + if (Predicate.isNotUndefined(reasoningPart)) { + reasoningPart.summaryParts.push(event.summary_index) + } + parts.push({ + type: "reasoning-start", + id: `${event.item_id}:${event.summary_index}`, + metadata: { + openai: { + itemId: event.item_id, + encryptedContent: reasoningPart?.encryptedContent + } + } + }) + } + break + } + + case "response.reasoning_summary_text.delta": { + parts.push({ + type: "reasoning-delta", + id: `${event.item_id}:${event.summary_index}`, + delta: event.delta, + metadata: { openai: { itemId: event.item_id } } + }) + break + } + } + + return parts + })), + Stream.flattenIterables + ) + } +) + +// ============================================================================= +// Telemetry +// ============================================================================= + +const annotateRequest = ( + span: Span, + request: typeof Generated.CreateResponse.Encoded +): void => { + Telemetry.addGenAIAnnotations(span, { + system: "az.ai.openai", + operation: { name: "chat" }, + request: { + model: request.model, + temperature: request.temperature, + topP: request.top_p, + maxTokens: request.max_output_tokens + } + }) +} + +const annotateResponse = (span: Span, response: Generated.Response): void => { + const finishReason = response.incomplete_details?.reason + Telemetry.addGenAIAnnotations(span, { + response: { + id: response.id, + model: response.model, + finishReasons: Predicate.isNotUndefined(finishReason) ? [finishReason] : undefined + }, + usage: { + inputTokens: response.usage?.input_tokens, + outputTokens: response.usage?.output_tokens + } + }) +} + +const annotateStreamResponse = (span: Span, part: Response.StreamPartEncoded) => { + if (part.type === "response-metadata") { + Telemetry.addGenAIAnnotations(span, { + response: { + id: part.id, + model: part.modelId + } + }) + } + if (part.type === "finish") { + Telemetry.addGenAIAnnotations(span, { + response: { + finishReasons: [part.reason] + }, + usage: { + inputTokens: part.usage.inputTokens, + outputTokens: part.usage.outputTokens + } + }) + } +} + +// ============================================================================= +// Tool Calling +// ============================================================================= + +type OpenAiToolChoice = typeof Generated.CreateResponse.fields.tool_choice.from.Encoded + +const prepareTools: (options: LanguageModel.ProviderOptions) => Effect.Effect<{ + readonly tools: ReadonlyArray | undefined + readonly toolChoice: OpenAiToolChoice | undefined +}, AiError.AiError> = Effect.fnUntraced(function*(options) { + // Return immediately if no tools are in the toolkit + if (options.tools.length === 0) { + return { tools: undefined, toolChoice: undefined } + } + + const tools: Array = [] + let toolChoice: OpenAiToolChoice | undefined = undefined + + // Filter the incoming tools down to the set of allowed tools as indicated by + // the tool choice + let allowedTools = options.tools + if (typeof options.toolChoice === "object" && "oneOf" in options.toolChoice) { + const allowedToolNames = new Set(options.toolChoice.oneOf) + allowedTools = options.tools.filter((tool) => allowedToolNames.has(tool.name)) + toolChoice = options.toolChoice.mode === "required" ? "required" : "auto" + } + + // Convert the tools in the toolkit to the provider-defined format + for (const tool of allowedTools) { + if (Tool.isUserDefined(tool)) { + tools.push({ + type: "function", + name: tool.name, + description: Tool.getDescription(tool as any), + parameters: Tool.getJsonSchema(tool as any) as any, + strict: true + }) + } + + if (Tool.isProviderDefined(tool)) { + switch (tool.id) { + case "openai.code_interpreter": { + tools.push({ + ...tool.args, + type: "code_interpreter" + }) + break + } + case "openai.file_search": { + tools.push({ + ...tool.args, + type: "file_search" + }) + break + } + case "openai.web_search": { + tools.push({ + ...tool.args, + type: "web_search" + }) + break + } + case "openai.web_search_preview": { + tools.push({ + ...tool.args, + type: "web_search_preview" + }) + break + } + default: { + return yield* new AiError.MalformedInput({ + module: "AzureFoundryLanguageModel", + method: "prepareTools", + description: `Received request to call unknown provider-defined tool '${tool.name}'` + }) + } + } + } + } + + if (options.toolChoice === "auto" || options.toolChoice === "none" || options.toolChoice === "required") { + toolChoice = options.toolChoice + } + + if (typeof options.toolChoice === "object" && "tool" in options.toolChoice) { + toolChoice = Predicate.isUndefined(InternalUtilities.getProviderDefinedToolName(options.toolChoice.tool)) + ? { type: "function", name: options.toolChoice.tool } + : { type: options.toolChoice.tool } + } + + return { tools, toolChoice } +}) + +// ============================================================================= +// Utilities +// ============================================================================= + +const isFileId = (data: string, config: Config.Service): boolean => + Predicate.isNotUndefined(config.fileIdPrefixes) && config.fileIdPrefixes.some((prefix) => data.startsWith(prefix)) + +const getItemId = ( + part: + | Prompt.TextPart + | Prompt.ToolCallPart +): string | undefined => part.options.openai?.itemId + +const getImageDetail = (part: Prompt.FilePart): typeof Generated.ImageDetail.Encoded => + part.options.openai?.imageDetail ?? "auto" + +const prepareInclude = ( + options: LanguageModel.ProviderOptions, + config: Config.Service +): ReadonlyArray => { + const include: Set = new Set(config.include ?? []) + + const codeInterpreterTool = options.tools.find((tool) => + Tool.isProviderDefined(tool) && + tool.id === "openai.code_interpreter" + ) as Tool.AnyProviderDefined | undefined + + if (Predicate.isNotUndefined(codeInterpreterTool)) { + include.add("code_interpreter_call.outputs") + } + + const webSearchTool = options.tools.find((tool) => + Tool.isProviderDefined(tool) && + (tool.id === "openai.web_search" || + tool.id === "openai.web_search_preview") + ) as Tool.AnyProviderDefined | undefined + + if (Predicate.isNotUndefined(webSearchTool)) { + include.add("web_search_call.action.sources") + } + + return Array.from(include) +} + +const prepareResponseFormat = ( + options: LanguageModel.ProviderOptions +): typeof Generated.TextResponseFormatConfiguration.Encoded => { + if (options.responseFormat.type === "json") { + const name = options.responseFormat.objectName + const schema = options.responseFormat.schema + return { + type: "json_schema", + name, + description: Tool.getDescriptionFromSchemaAst(schema.ast) ?? "Response with a JSON object", + schema: Tool.getJsonSchemaFromSchemaAst(schema.ast) as any, + strict: true + } + } + return { type: "text" } +} diff --git a/packages/ai/azure-foundry/src/index.ts b/packages/ai/azure-foundry/src/index.ts new file mode 100644 index 00000000000..587676763c0 --- /dev/null +++ b/packages/ai/azure-foundry/src/index.ts @@ -0,0 +1,14 @@ +/** + * @since 1.0.0 + */ +export * as AzureFoundryClient from "./AzureFoundryClient.js" + +/** + * @since 1.0.0 + */ +export * as AzureFoundryConfig from "./AzureFoundryConfig.js" + +/** + * @since 1.0.0 + */ +export * as AzureFoundryLanguageModel from "./AzureFoundryLanguageModel.js" diff --git a/packages/ai/azure-foundry/src/internal/utilities.ts b/packages/ai/azure-foundry/src/internal/utilities.ts new file mode 100644 index 00000000000..2ead7f2ee8a --- /dev/null +++ b/packages/ai/azure-foundry/src/internal/utilities.ts @@ -0,0 +1,44 @@ +/** + * @internal + */ +import type * as Response from "@effect/ai/Response" +import * as Predicate from "effect/Predicate" + +/** @internal */ +export const ProviderOptionsKey = "@effect/ai-azure-foundry/AzureFoundryLanguageModel/ProviderOptions" + +/** @internal */ +export const ProviderMetadataKey = "@effect/ai-azure-foundry/AzureFoundryLanguageModel/ProviderMetadata" + +const finishReasonMap: Record = { + content_filter: "content-filter", + function_call: "tool-calls", + length: "length", + stop: "stop", + tool_calls: "tool-calls" +} + +const providerToolNamesMap: Map = new Map([ + ["code_interpreter", "OpenAiCodeInterpreter"], + ["file_search", "OpenAiFileSearch"], + ["web_search", "OpenAiWebSearch"], + ["web_search_preview", "OpenAiWebSearchPreview"] +]) + +/** @internal */ +export const getProviderDefinedToolName = (name: string): string | undefined => providerToolNamesMap.get(name) + +/** @internal */ +export const resolveFinishReason = ( + finishReason: string | undefined, + hasToolCalls: boolean +): Response.FinishReason => { + if (Predicate.isNullable(finishReason)) { + return hasToolCalls ? "tool-calls" : "stop" + } + const reason = finishReasonMap[finishReason] + if (Predicate.isNullable(reason)) { + return hasToolCalls ? "tool-calls" : "unknown" + } + return reason +} diff --git a/packages/ai/azure-foundry/tsconfig.build.json b/packages/ai/azure-foundry/tsconfig.build.json new file mode 100644 index 00000000000..bc39eb08e18 --- /dev/null +++ b/packages/ai/azure-foundry/tsconfig.build.json @@ -0,0 +1,17 @@ +{ + "extends": "./tsconfig.src.json", + "references": [ + { "path": "../ai/tsconfig.build.json" }, + { "path": "../openai/tsconfig.build.json" }, + { "path": "../../effect/tsconfig.build.json" }, + { "path": "../../experimental/tsconfig.build.json" }, + { "path": "../../platform/tsconfig.build.json" } + ], + "compilerOptions": { + "tsBuildInfoFile": ".tsbuildinfo/build.tsbuildinfo", + "outDir": "build/esm", + "declarationDir": "build/dts", + "stripInternal": true, + "exactOptionalPropertyTypes": false + } +} diff --git a/packages/ai/azure-foundry/tsconfig.json b/packages/ai/azure-foundry/tsconfig.json new file mode 100644 index 00000000000..f4464966e7d --- /dev/null +++ b/packages/ai/azure-foundry/tsconfig.json @@ -0,0 +1,8 @@ +{ + "extends": "../../../tsconfig.base.json", + "include": [], + "references": [ + { "path": "tsconfig.src.json" }, + { "path": "tsconfig.test.json" } + ] +} diff --git a/packages/ai/azure-foundry/tsconfig.src.json b/packages/ai/azure-foundry/tsconfig.src.json new file mode 100644 index 00000000000..e775ed9495d --- /dev/null +++ b/packages/ai/azure-foundry/tsconfig.src.json @@ -0,0 +1,17 @@ +{ + "extends": "../../../tsconfig.base.json", + "include": ["src"], + "references": [ + { "path": "../ai/tsconfig.src.json" }, + { "path": "../openai/tsconfig.src.json" }, + { "path": "../../effect/tsconfig.src.json" }, + { "path": "../../experimental/tsconfig.src.json" }, + { "path": "../../platform/tsconfig.src.json" } + ], + "compilerOptions": { + "tsBuildInfoFile": ".tsbuildinfo/src.tsbuildinfo", + "rootDir": "src", + "outDir": "build/src", + "exactOptionalPropertyTypes": false + } +} diff --git a/packages/ai/azure-foundry/tsconfig.test.json b/packages/ai/azure-foundry/tsconfig.test.json new file mode 100644 index 00000000000..95f3ae8f876 --- /dev/null +++ b/packages/ai/azure-foundry/tsconfig.test.json @@ -0,0 +1,14 @@ +{ + "extends": "../../../tsconfig.base.json", + "include": ["test"], + "references": [ + { "path": "tsconfig.src.json" }, + { "path": "../../vitest/tsconfig.src.json" } + ], + "compilerOptions": { + "tsBuildInfoFile": ".tsbuildinfo/test.tsbuildinfo", + "rootDir": "test", + "noEmit": true, + "exactOptionalPropertyTypes": false + } +} diff --git a/packages/ai/azure-foundry/vitest.config.ts b/packages/ai/azure-foundry/vitest.config.ts new file mode 100644 index 00000000000..bf29895f858 --- /dev/null +++ b/packages/ai/azure-foundry/vitest.config.ts @@ -0,0 +1,6 @@ +import { mergeConfig, type ViteUserConfig } from "vitest/config" +import shared from "../../../vitest.shared.js" + +const config: ViteUserConfig = {} + +export default mergeConfig(shared, config) diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml index c5cb9346f9f..225701f5475 100644 --- a/pnpm-lock.yaml +++ b/pnpm-lock.yaml @@ -239,6 +239,28 @@ importers: version: link:../../effect publishDirectory: dist + packages/ai/azure-foundry: + devDependencies: + '@effect/ai': + specifier: workspace:^ + version: link:../ai + '@effect/ai-openai': + specifier: workspace:^ + version: link:../openai + '@effect/experimental': + specifier: workspace:^ + version: link:../../experimental + '@effect/platform': + specifier: workspace:^ + version: link:../../platform + '@effect/platform-node': + specifier: workspace:^ + version: link:../../platform-node + effect: + specifier: workspace:^ + version: link:../../effect + publishDirectory: dist + packages/ai/google: devDependencies: '@effect/ai': diff --git a/tsconfig.base.json b/tsconfig.base.json index 7cc6f2eb2da..3c5c6b4e307 100644 --- a/tsconfig.base.json +++ b/tsconfig.base.json @@ -41,6 +41,7 @@ "effect", "@effect/ai", "@effect/ai-anthropic", + "@effect/ai-azure-foundry", "@effect/ai-openai", "@effect/cli", "@effect/cluster",