-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathcompose.yaml
More file actions
347 lines (331 loc) · 12.8 KB
/
compose.yaml
File metadata and controls
347 lines (331 loc) · 12.8 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
# Local development databases for sqlgo. Bring up with:
# podman compose up -d
# or a single service:
# podman compose up -d mssql
#
# Credentials are dev-only. Do not reuse in production.
#
# Port allocations use 1x prefixes so they don't collide with any
# real DB servers a developer might be running locally:
# MSSQL -> 11433
# Postgres -> 15432
# MySQL -> 13306
# Oracle -> 11521
# Firebird -> 13050
# libSQL -> 18080 (hrana HTTP)
# Sybase -> 15000
# ClickHouse -> 19000 (native TCP), 18123 (HTTP)
# Trino -> 18081 (HTTP coordinator)
# Vertica -> 15433 (auth profile -- opentext/vertica-ce requires `podman login docker.io`)
# HANA -> 13901 (heavy profile -- ~20GB image, 8GB+ RAM)
# Spanner -> 19010 (emulator gRPC), 19020 (emulator REST)
# BigQuery -> 19050 (emulator REST), 19060 (emulator gRPC)
# Flight SQL -> 19070 (gRPC)
# sshd -> 12222
#
# The integration test suite in internal/db/<driver>/ reads these
# ports from env vars (SQLGO_IT_*_*); see each *_integration_test.go.
# D1 has no self-hostable image; the D1 integration test stands up
# a fake Cloudflare REST server in-process (see d1_integration_test.go).
services:
mssql:
image: mcr.microsoft.com/mssql/server:2022-latest
container_name: sqlgo-mssql
environment:
ACCEPT_EULA: "Y"
MSSQL_SA_PASSWORD: "SqlGo_dev_Pass1!"
MSSQL_PID: "Developer"
ports:
- "11433:1433"
# One-shot init. Waits for the server, then creates two user DBs
# (SqlgoA, SqlgoB) so the per-tab activeCatalog flow has something
# to switch between. Idempotent -- re-run is a no-op.
# Reuses the mssql server image because it already ships
# /opt/mssql-tools18/bin/sqlcmd. The standalone
# mcr.microsoft.com/mssql-tools18 image has no :latest tag -- only
# versioned pins -- so reusing the server image keeps this compose
# file free of a version that drifts.
mssql-init:
image: mcr.microsoft.com/mssql/server:2022-latest
container_name: sqlgo-mssql-init
depends_on:
- mssql
entrypoint: /bin/bash
command:
- -c
- |
for i in $$(seq 1 30); do
/opt/mssql-tools18/bin/sqlcmd -S mssql -U sa -P 'SqlGo_dev_Pass1!' -C -Q "SELECT 1" && break
echo "waiting for mssql ($$i)..."; sleep 2
done
/opt/mssql-tools18/bin/sqlcmd -S mssql -U sa -P 'SqlGo_dev_Pass1!' -C -Q "
IF DB_ID('SqlgoA') IS NULL CREATE DATABASE SqlgoA;
IF DB_ID('SqlgoB') IS NULL CREATE DATABASE SqlgoB;
"
/opt/mssql-tools18/bin/sqlcmd -S mssql -d SqlgoA -U sa -P 'SqlGo_dev_Pass1!' -C -Q "
IF OBJECT_ID('dbo.widgets','U') IS NULL
CREATE TABLE dbo.widgets (id INT PRIMARY KEY, name NVARCHAR(50));
IF NOT EXISTS (SELECT 1 FROM dbo.widgets)
INSERT dbo.widgets VALUES (1,'alpha-A'),(2,'beta-A');
"
/opt/mssql-tools18/bin/sqlcmd -S mssql -d SqlgoB -U sa -P 'SqlGo_dev_Pass1!' -C -Q "
IF OBJECT_ID('dbo.gadgets','U') IS NULL
CREATE TABLE dbo.gadgets (id INT PRIMARY KEY, label NVARCHAR(50));
IF NOT EXISTS (SELECT 1 FROM dbo.gadgets)
INSERT dbo.gadgets VALUES (1,'gizmo-B'),(2,'widget-B');
"
postgres:
image: postgres:16-alpine
container_name: sqlgo-postgres
environment:
POSTGRES_USER: sqlgo
POSTGRES_PASSWORD: sqlgo_dev
POSTGRES_DB: sqlgo_test
ports:
- "15432:5432"
healthcheck:
test: ["CMD", "pg_isready", "-U", "sqlgo", "-d", "sqlgo_test"]
interval: 5s
timeout: 3s
retries: 10
mysql:
image: mysql:8.4
container_name: sqlgo-mysql
environment:
MYSQL_ROOT_PASSWORD: sqlgo_dev
MYSQL_DATABASE: sqlgo_test
MYSQL_USER: sqlgo
MYSQL_PASSWORD: sqlgo_dev
ports:
- "13306:3306"
healthcheck:
test: ["CMD", "mysqladmin", "ping", "-h", "localhost", "-usqlgo", "-psqlgo_dev"]
interval: 5s
timeout: 3s
retries: 10
# Oracle Free edition. ~1.5GB image; first boot takes 60-120s while
# the DB initializes. The "-faststart" flavor skips the full
# db-recreate and is the right choice for dev/test containers.
#
# ORACLE_PASSWORD sets the SYS/SYSTEM password. APP_USER creates a
# non-privileged schema used by the integration tests.
oracle:
image: gvenzl/oracle-free:23-slim-faststart
container_name: sqlgo-oracle
environment:
ORACLE_PASSWORD: sqlgo_dev
APP_USER: sqlgo
APP_USER_PASSWORD: sqlgo_dev
ports:
- "11521:1521"
healthcheck:
test: ["CMD", "healthcheck.sh"]
interval: 10s
timeout: 5s
retries: 20
start_period: 60s
# Firebird 5.x. FIREBIRD_DATABASE seeds an initial database owned by
# the APP user; the integration test connects to it directly.
firebird:
image: firebirdsql/firebird:5
container_name: sqlgo-firebird
environment:
FIREBIRD_ROOT_PASSWORD: sqlgo_dev
FIREBIRD_DATABASE: sqlgo_test.fdb
FIREBIRD_USER: sqlgo
FIREBIRD_PASSWORD: sqlgo_dev
ports:
- "13050:3050"
# libSQL server (Turso's hrana-compatible daemon). Runs the embedded
# SQLite on disk; auth is disabled for local dev. The integration
# test hits http://localhost:18080 with an empty token.
libsql:
image: ghcr.io/tursodatabase/libsql-server:latest
container_name: sqlgo-libsql
environment:
SQLD_NODE: primary
SQLD_HTTP_LISTEN_ADDR: 0.0.0.0:8080
ports:
- "18080:8080"
# Sybase ASE 16 dev edition. datagrip/sybase is the image JetBrains
# uses for DataGrip's sybase connector tests; boots cleanly on
# Podman/Linux hosts where the nguoianphu image segfaults. Ships a
# preseeded `testdb` database owned by `tester`/`guest1234`. First
# boot takes ~60s while dataserver initializes.
sybase:
image: datagrip/sybase:16.0
container_name: sqlgo-sybase
ports:
- "15000:5000"
# ClickHouse server. Official image ships with a passwordless
# `default` user bound to 127.0.0.1 only; CLICKHOUSE_DEFAULT_ACCESS_
# MANAGEMENT=1 + listening on :: lets the host reach it on 19000
# (native TCP, what clickhouse-go/v2 uses) and 18123 (HTTP, handy
# for clickhouse-client curl checks). First boot ~10s.
#
# Native TLS is enabled on 9440 (host 19440) via the mounted
# config.d/tls.xml + certs from ./compose/clickhouse/certs. The TLS
# listener accepts plain-TLS clients that only verify the server and
# mTLS clients that present a client cert signed by our dev CA --
# verificationMode=relaxed enforces chain validity without requiring
# a cert on every connection. Used by the ClickHouseTLS / MTLS
# integration tests in internal/db/clickhouse/.
clickhouse:
image: clickhouse/clickhouse-server:24-alpine
container_name: sqlgo-clickhouse
environment:
CLICKHOUSE_DEFAULT_ACCESS_MANAGEMENT: "1"
CLICKHOUSE_SKIP_USER_SETUP: "1"
ports:
- "19000:9000"
- "19440:9440"
- "18123:8123"
volumes:
- ./compose/clickhouse/config/tls.xml:/etc/clickhouse-server/config.d/tls.xml:ro
- ./compose/clickhouse/certs:/etc/clickhouse-server/certs:ro
ulimits:
nofile:
soft: 262144
hard: 262144
healthcheck:
test: ["CMD", "wget", "--no-verbose", "--tries=1", "--spider", "http://localhost:8123/ping"]
interval: 5s
timeout: 3s
retries: 20
# Trino coordinator (single-node). The official trinodb/trino image
# ships with the "memory", "jmx", "tpch", and "tpcds" catalogs enabled
# by default -- "memory" is writable with no persistence, which is all
# the integration test needs. No auth on HTTP; basic auth requires
# HTTPS per the driver, so we stick to plain HTTP for dev. First boot
# takes ~15-30s while the coordinator finishes starting workers.
trino:
image: trinodb/trino:latest
container_name: sqlgo-trino
ports:
- "18081:8080"
healthcheck:
test: ["CMD", "/usr/lib/trino/bin/health-check"]
interval: 10s
timeout: 5s
retries: 20
start_period: 30s
# Vertica Community Edition (single-node). The official opentext/vertica-ce
# image boots a passwordless `dbadmin` superuser and a pre-created
# VMart database. First boot takes ~60s while the bootstrap scripts
# finish. No auth is enforced on localhost, so tests supply an empty
# password by default. Image size is ~2GB -- pull once and cache.
#
# OpenText now requires an authenticated Docker Hub pull (anonymous
# pulls get "denied"). Gate behind the `auth` profile so the default
# `podman compose up -d` stays login-free. To opt in:
# podman login docker.io
# podman compose --profile auth up -d vertica
vertica:
image: opentext/vertica-ce:latest
container_name: sqlgo-vertica
profiles: ["auth"]
ports:
- "15433:5433"
healthcheck:
test: ["CMD-SHELL", "/opt/vertica/bin/vsql -U dbadmin -c 'SELECT 1' >/dev/null 2>&1"]
interval: 10s
timeout: 5s
retries: 30
start_period: 60s
# SAP HANA Express (HXE). saplabs/hanaexpress is the official dev
# image; ~20GB download and needs 8GB+ of RAM plus license accept,
# so it lives under the `heavy` compose profile and is NOT pulled
# by the default `podman compose up -d`. Opt in with:
# podman compose --profile heavy up -d hana
# First boot takes several minutes while the tenant DB HXE comes
# online. Container port 39017 is the HXE tenant SQL port (systemDB
# lives on 39013). SYSTEM password is set via the master-password
# file baked into /etc/entry-hdb-master-password.txt at start-up.
hana:
image: saplabs/hanaexpress:latest
container_name: sqlgo-hana
profiles: ["heavy"]
# saplabs/hanaexpress wants the license flag and a master password
# for the SYSTEM account; password rules require >=8 chars, one
# upper, one lower, one digit. HXEHana1 satisfies that.
command:
- --agree-to-sap-license
- --dont-check-version
- --master-password
- HXEHana1
ulimits:
nofile:
soft: 1048576
hard: 1048576
sysctls:
kernel.shmmax: "1073741824"
net.ipv4.ip_local_port_range: "40000 60999"
ports:
- "13901:39017"
- "13913:39013"
# Cloud Spanner Emulator. gcr.io/cloud-spanner-emulator/emulator is
# Google's official local emulator -- implements the Spanner API over
# plain-text gRPC (no TLS, no auth). The integration test connects
# with autoConfigEmulator=true, which tells go-sql-spanner to use
# plain-text gRPC AND auto-create the target instance + database on
# first Open, so there's no seed step for Spanner. Ports: 9010 gRPC
# (what the driver uses), 9020 REST (handy for curl checks).
spanner:
image: gcr.io/cloud-spanner-emulator/emulator:latest
container_name: sqlgo-spanner
ports:
- "19010:9010"
- "19020:9020"
# BigQuery Emulator. ghcr.io/goccy/bigquery-emulator is the de-facto
# local emulator -- implements the BigQuery REST + gRPC surface with
# a SQLite-backed engine. No official Google emulator exists. The
# --project flag seeds a project so the driver's Open() + Ping()
# dry-run succeed without any prior provisioning; --dataset seeds a
# default dataset that the integration test uses for fixture tables.
# Ports: 9050 REST (what the Go client hits via WithEndpoint), 9060
# gRPC (unused by cloud.google.com/go/bigquery, published for tools).
# Auth is disabled: the driver auto-implies WithoutAuthentication when
# Host+Port resolve to an endpoint without explicit credentials.
bigquery:
image: ghcr.io/goccy/bigquery-emulator:latest
container_name: sqlgo-bigquery
command:
- --project=sqlgo-emu
- --dataset=sqlgo_test
- --log-level=warn
ports:
- "19050:9050"
- "19060:9060"
# Arrow Flight SQL server (voltrondata/sqlflite). Uses the SQLite
# backend (DuckDB backend segfaults on DDL via DoPut). TLS is disabled
# for local dev; basic auth uses the default username sqlflite_username
# (setting SQLFLITE_USERNAME causes a segfault in current builds).
# gRPC listens on 31337 in-container, mapped to host 19070. First boot ~5s.
flightsql:
image: voltrondata/sqlflite:latest
container_name: sqlgo-flightsql
environment:
SQLFLITE_PASSWORD: "sqlgo_dev"
TLS_ENABLED: "0"
DATABASE_BACKEND: "sqlite"
DATABASE_FILENAME: ":memory:"
ports:
- "19070:31337"
# sshd is the bastion used by the sshtunnel integration test. It
# forwards to the postgres service on the compose network so the
# tunnel path can be exercised end-to-end. Authenticates with
# password "sqlgo_dev" for user "sqlgo".
sshd:
image: linuxserver/openssh-server:latest
container_name: sqlgo-sshd
environment:
PUID: "1000"
PGID: "1000"
PASSWORD_ACCESS: "true"
USER_NAME: "sqlgo"
USER_PASSWORD: "sqlgo_dev"
DOCKER_MODS: "linuxserver/mods:openssh-server-ssh-tunnel"
ports:
- "12222:2222"
depends_on:
- postgres