Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
package com.datadog.appsec.php.docker

import com.datadog.appsec.php.mock_agent.MockDatadogAgent
import com.datadog.appsec.php.mock_openai.MockOpenAIServer
import com.datadog.appsec.php.mock_agent.rem_cfg.RemoteConfigRequest
import com.datadog.appsec.php.mock_agent.rem_cfg.RemoteConfigResponse
import com.datadog.appsec.php.mock_agent.rem_cfg.Target
Expand Down Expand Up @@ -55,7 +56,7 @@ class AppSecContainer<SELF extends AppSecContainer<SELF>> extends GenericContain
.connectTimeout(Duration.ofSeconds(5))
.build()

private MockDatadogAgent mockDatadogAgent = new MockDatadogAgent()
private MockDatadogAgent mockDatadogAgent = new MockDatadogAgent()

AppSecContainer(Map options) {
super(imageNameFuture(options))
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,153 @@
package com.datadog.appsec.php.mock_openai

import groovy.json.JsonSlurper
import groovy.transform.CompileStatic
import groovy.util.logging.Slf4j
import io.javalin.Javalin
import io.javalin.http.Context
import org.testcontainers.lifecycle.Startable

@Slf4j
@CompileStatic
class MockOpenAIServer implements Startable {
Javalin httpServer

@Override
void start() {
this.httpServer = Javalin.create(config -> {
config.showJavalinBanner = false
})

// Support both /path and /v1/path for OpenAI client compatibility
this.httpServer.post('/chat/completions', this.&handleChatCompletions)
this.httpServer.post('/v1/chat/completions', this.&handleChatCompletions)
this.httpServer.post('/completions', this.&handleCompletions)
this.httpServer.post('/v1/completions', this.&handleCompletions)
this.httpServer.post('/responses', this.&handleResponses)
this.httpServer.post('/v1/responses', this.&handleResponses)

this.httpServer.error(404, ctx -> {
log.info("Unmatched OpenAI mock request: ${ctx.method()} ${ctx.path()}")
ctx.status(404).json(['error': 'Not Found'])
})
this.httpServer.error(405, ctx -> {
ctx.status(405).json(['error': 'Method Not Allowed'])
})

this.httpServer.start(0)
}

int getPort() {
this.httpServer.port()
}
Comment on lines +40 to +42
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

same effect could be gotten in groovy by not making PORT private (with no access modifiers, it generates setters/getters)


@Override
void stop() {
if (httpServer != null) {
this.httpServer.stop()
this.httpServer = null
}
}

private static Map<String, ?> parseBody(Context ctx) {
String raw = ctx.body()
if (raw == null || raw.isEmpty()) {
return [:]
}
try {
def decoded = new JsonSlurper().parseText(raw)
return decoded instanceof Map ? (Map<String, ?>) decoded : [:]
} catch (Exception e) {
return [:]
}
}

private static Map<String, ?> fakeUsage() {
[
'prompt_tokens' : 1,
'completion_tokens': 2,
'total_tokens' : 3,
]
}

private void handleChatCompletions(Context ctx) {
Map<String, ?> body = parseBody(ctx)
String model = (body['model'] as String) ?: 'gpt-4.1'
ctx.json([
'id' : 'chatcmpl-fake-internal',
'object' : 'chat.completion',
'created': (long)(System.currentTimeMillis() / 1000),
'model' : model,
'choices': [
[
'index' : 0,
'message' : [
'role' : 'assistant',
'content': 'Fake response from internal_server mock.',
],
'finish_reason': 'stop',
],
],
'usage' : fakeUsage(),
])
}

private void handleCompletions(Context ctx) {
Map<String, ?> body = parseBody(ctx)
String model = (body['model'] as String) ?: 'text-davinci-003'
ctx.json([
'id' : 'cmpl-fake-internal',
'object' : 'text_completion',
'created': (long)(System.currentTimeMillis() / 1000),
'model' : model,
'choices': [
[
'text' : 'Fake completion from internal_server mock.',
'index' : 0,
'finish_reason': 'stop',
'logprobs' : null,
],
],
'usage' : fakeUsage(),
])
}

private void handleResponses(Context ctx) {
Map<String, ?> body = parseBody(ctx)
String model = (body['model'] as String) ?: 'gpt-4.1'
ctx.json([
'id' : 'resp-fake-internal',
'object' : 'response',
'created_at' : (long)(System.currentTimeMillis() / 1000),
'status' : 'completed',
'model' : model,
'output' : [
[
'type' : 'message',
'id' : 'msg-fake-internal',
'role' : 'assistant',
'status' : 'completed',
'content': [
[
'type' : 'output_text',
'text' : 'Fake response from internal_server mock.',
'annotations': [],
],
],
],
],
'output_text' : 'Fake response from internal_server mock.',
'parallel_tool_calls' : false,
'tool_choice' : 'none',
'tools' : [],
'store' : true,
'usage' : [
'input_tokens' : 1,
'input_tokens_details' : ['cached_tokens': 0],
'output_tokens' : 2,
'output_tokens_details' : ['reasoning_tokens': 0],
'total_tokens' : 3,
],
])
}
}
Original file line number Diff line number Diff line change
@@ -0,0 +1,107 @@
package com.datadog.appsec.php.integration

import com.datadog.appsec.php.docker.AppSecContainer
import com.datadog.appsec.php.docker.FailOnUnmatchedTraces
import com.datadog.appsec.php.mock_openai.MockOpenAIServer
import com.datadog.appsec.php.docker.InspectContainerHelper
import com.datadog.appsec.php.model.Span
import com.datadog.appsec.php.model.Trace
import org.junit.jupiter.api.BeforeAll
import org.junit.jupiter.api.Test
import org.junit.jupiter.api.TestMethodOrder
import org.junit.jupiter.api.condition.EnabledIf
import org.testcontainers.junit.jupiter.Container
import org.testcontainers.junit.jupiter.Testcontainers

import java.io.InputStream

import static org.testcontainers.containers.Container.ExecResult
import java.net.http.HttpRequest
import java.net.http.HttpResponse

import static com.datadog.appsec.php.integration.TestParams.getPhpVersion
import static com.datadog.appsec.php.integration.TestParams.getVariant
import static com.datadog.appsec.php.integration.TestParams.phpVersionAtLeast
import com.datadog.appsec.php.TelemetryHelpers
import static java.net.http.HttpResponse.BodyHandlers.ofString

@Testcontainers
@EnabledIf('isExpectedVersion')
class LlmEventsTests {
static final String MODEL = 'gpt-4.1'
static boolean expectedVersion = phpVersionAtLeast('8.2') && !variant.contains('zts')

AppSecContainer getContainer() {
getClass().CONTAINER
}

public static final MockOpenAIServer mockOpenAIServer = new MockOpenAIServer()

@Container
@FailOnUnmatchedTraces
public static final AppSecContainer CONTAINER =
new AppSecContainer(
workVolume: this.name,
baseTag: 'apache2-mod-php',
phpVersion: phpVersion,
phpVariant: variant,
www: 'llm',
) {
{
dependsOn mockOpenAIServer
}

@Override
void configure() {
super.configure()
org.testcontainers.Testcontainers.exposeHostPorts(mockOpenAIServer.port)
withEnv('OPENAI_BASE_URL', "http://host.testcontainers.internal:${mockOpenAIServer.port}/v1")
}
}

static void main(String[] args) {
InspectContainerHelper.run(CONTAINER)
}

/** Common assertions for LLM endpoint spans. */
static void assertLlmSpan(Trace trace, String model) {
Span span = trace.first()
assert span.meta.'appsec.events.llm.call.provider' == 'openai'
assert span.meta.'appsec.events.llm.call.model' == model
assert span.metrics._sampling_priority_v1 == 2.0d
}

@Test
void 'OpenAI latest responses create'() {
def trace = container.traceFromRequest("/llm.php?model=${MODEL}&operation=openai-latest-responses.create") { HttpResponse<InputStream> resp ->
assert resp.statusCode() == 200
}
assertLlmSpan(trace, MODEL)
}

@Test
void 'OpenAI latest chat completions create'() {
def trace = container.traceFromRequest("/llm.php?model=${MODEL}&operation=openai-latest-chat.completions.create") { HttpResponse<InputStream> resp ->
assert resp.statusCode() == 200
}
assertLlmSpan(trace, MODEL)
}

@Test
void 'OpenAI latest completions create'() {
def trace = container.traceFromRequest("/llm.php?model=${MODEL}&operation=openai-latest-completions.create") { HttpResponse<InputStream> resp ->
assert resp.statusCode() == 200
}
assertLlmSpan(trace, MODEL)
}

@Test
void 'Root has no LLM tags'() {
def trace = container.traceFromRequest('/hello.php') { HttpResponse<InputStream> resp ->
assert resp.statusCode() == 200
}
Span span = trace.first()
assert !span.meta.containsKey('appsec.events.llm.call.provider')
assert !span.meta.containsKey('appsec.events.llm.call.model')
}
}
44 changes: 44 additions & 0 deletions appsec/tests/integration/src/test/waf/recommended.json
Original file line number Diff line number Diff line change
Expand Up @@ -6933,6 +6933,50 @@
"on_match": [
"stack_trace"
]
},
{
"id": "llm-001-000",
"name": "LLM call",
"tags": {
"type": "llm.event",
"category": "business_logic",
"module": "business_logic"
},
"min_version": "1.25.0",
"conditions": [
{
"parameters": {
"inputs": [
{
"address": "server.business_logic.llm.event",
"key_path": [
"provider"
]
}
]
},
"operator": "exists"
}
],
"transformers": [],
"output": {
"event": false,
"keep": true,
"attributes": {
"appsec.events.llm.call.provider": {
"address": "server.business_logic.llm.event",
"key_path": [
"provider"
]
},
"appsec.events.llm.call.model": {
"address": "server.business_logic.llm.event",
"key_path": [
"model"
]
}
}
}
}
],
"rules_compat": [
Expand Down
8 changes: 8 additions & 0 deletions appsec/tests/integration/src/test/www/llm/composer.json
Original file line number Diff line number Diff line change
@@ -0,0 +1,8 @@
{
"name": "datadog/appsec-integration-tests",
"type": "project",
"require": {
"openai-php/client": "*",
"guzzlehttp/guzzle": "*"
}
}
6 changes: 6 additions & 0 deletions appsec/tests/integration/src/test/www/llm/initialize.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,6 @@
#!/bin/bash -e

cd /var/www

composer install --no-dev
chown -R www-data.www-data vendor
8 changes: 8 additions & 0 deletions appsec/tests/integration/src/test/www/llm/public/hello.php
Original file line number Diff line number Diff line change
@@ -0,0 +1,8 @@
<?php
header('Content-Encoding: foobar');
header('Content-Language: en');

$content = "Hello world!";

echo $content;

Loading
Loading