diff --git a/containers/ceph-dev/Dockerfile b/containers/ceph-dev/Dockerfile new file mode 100644 index 0000000..ba82082 --- /dev/null +++ b/containers/ceph-dev/Dockerfile @@ -0,0 +1,60 @@ + +# Adapted from https://github.com/ceph/go-ceph +ARG CEPH_IMG=quay.io/ceph/ceph +ARG CEPH_TAG=v19 +FROM ${CEPH_IMG}:${CEPH_TAG} + +SHELL ["/bin/bash", "-o", "pipefail", "-c"] + +# Provide a few quality-of-life tools plus dependencies that the micro-ceph +# bootstrap script relies on (uuidgen, getent, jq, etc.). +RUN set -euxo pipefail \ + && pkg_mgr="dnf" \ + && if command -v microdnf >/dev/null 2>&1; then pkg_mgr="microdnf"; fi \ + && if [[ "${pkg_mgr}" == "microdnf" ]]; then \ + microdnf install -y --setopt=install_weak_deps=0 --setopt=tsflags=nodocs \ + bash-completion \ + findutils \ + hostname \ + iproute \ + jq \ + less \ + procps-ng \ + python3.11 \ + python3.11-devel \ + python3.11-pip \ + util-linux \ + vim-minimal \ + && microdnf clean all; \ + else \ + dnf install -y --setopt=install_weak_deps=0 --setopt=tsflags=nodocs \ + bash-completion \ + findutils \ + hostname \ + iproute \ + jq \ + less \ + procps-ng \ + python3.11 \ + python3.11-devel \ + python3.11-pip \ + util-linux \ + vim-minimal \ + && dnf clean all; \ + fi + +ARG UV_VERSION=v0.4.23 +RUN curl -LsSf https://astral.sh/uv/install.sh -o /tmp/uv-install.sh \ + && chmod +x /tmp/uv-install.sh \ + && UV_INSTALL_DIR=/usr/local/bin /tmp/uv-install.sh \ + && rm -f /tmp/uv-install.sh + +COPY micro-osd.sh /usr/local/bin/micro-osd.sh +COPY entrypoint.sh /usr/local/bin/ceph-dev-entrypoint.sh +RUN chmod +x /usr/local/bin/micro-osd.sh /usr/local/bin/ceph-dev-entrypoint.sh + +ENV CEPH_DATA_DIR=/var/lib/ceph-dev \ + CEPH_BOOTSTRAP=auto \ + CEPH_STATUS_ON_START=true +ENTRYPOINT ["/usr/local/bin/ceph-dev-entrypoint.sh"] +CMD ["/bin/bash"] diff --git a/containers/ceph-dev/entrypoint.sh b/containers/ceph-dev/entrypoint.sh new file mode 100644 index 0000000..37ed499 --- /dev/null +++ b/containers/ceph-dev/entrypoint.sh @@ -0,0 +1,83 @@ +#!/usr/bin/env bash +set -euo pipefail + +MICRO_OSD_BIN=${MICRO_OSD_BIN:-/usr/local/bin/micro-osd.sh} +DATA_DIR=${CEPH_DATA_DIR:-/var/lib/ceph-dev} +READY_FILE="${DATA_DIR}/.ready" +DEFAULT_FEATURES="mon osd mgr mds rgw selftest" +CEPH_FEATURESET="${CEPH_FEATURESET:-${DEFAULT_FEATURES}}" +CEPH_RESET="${CEPH_RESET:-false}" +CEPH_BOOTSTRAP="${CEPH_BOOTSTRAP:-auto}" +CEPH_STATUS_ON_START="${CEPH_STATUS_ON_START:-true}" + +mkdir -p "${DATA_DIR}" +export CEPH_CONF="${DATA_DIR}/ceph.conf" + +bootstrap_cluster() { + if [[ "${CEPH_RESET}" == "true" ]]; then + echo "[ceph-dev] Reset requested, wiping ${DATA_DIR}" + rm -rf "${DATA_DIR}" + mkdir -p "${DATA_DIR}" + elif [[ "${CEPH_BOOTSTRAP}" == "always" ]]; then + echo "[ceph-dev] CEPH_BOOTSTRAP=always, reinitializing ${DATA_DIR}" + rm -rf "${DATA_DIR}" + mkdir -p "${DATA_DIR}" + fi + + if [[ "${CEPH_BOOTSTRAP}" == "never" ]] && [[ ! -f "${READY_FILE}" ]]; then + echo "[ceph-dev] CEPH_BOOTSTRAP=never but no existing cluster found" >&2 + exit 1 + fi + + if [[ ! -x "${MICRO_OSD_BIN}" ]]; then + echo "[ceph-dev] micro-osd helper not found at ${MICRO_OSD_BIN}" >&2 + exit 1 + fi + + if [[ "${CEPH_BOOTSTRAP}" != "never" ]]; then + rm -f "${READY_FILE}" + echo "[ceph-dev] Bootstrapping micro cluster (features: ${CEPH_FEATURESET})" + CEPH_FEATURESET="${CEPH_FEATURESET}" "${MICRO_OSD_BIN}" "${DATA_DIR}" + else + echo "[ceph-dev] Reusing existing cluster state at ${DATA_DIR}" + fi + + if [[ ! -f "${READY_FILE}" ]]; then + echo "[ceph-dev] Cluster failed to report ready flag (${READY_FILE})" >&2 + exit 1 + fi +} + +show_status() { + if [[ "${CEPH_STATUS_ON_START}" == "true" ]]; then + echo "[ceph-dev] ceph status" + ceph status || true + fi +} + +bootstrap_cluster +show_status + +declare -a CMD=("$@") +if [[ ${#CMD[@]} -eq 0 ]]; then + CMD=("/bin/bash") +fi + +if [[ "${CMD[0]}" == "ceph-dev" ]]; then + case "${CMD[1]:-}" in + shell) + exec "/bin/bash" + ;; + status) + ceph status + exit $? + ;; + *) + echo "Usage: ceph-dev [shell|status]" >&2 + exit 2 + ;; + esac +else + echo "[ceph-dev] Executing: ${CMD[*]}" + exec "${CMD[@]}" +fi diff --git a/containers/ceph-dev/micro-osd.sh b/containers/ceph-dev/micro-osd.sh new file mode 100755 index 0000000..ce1068f --- /dev/null +++ b/containers/ceph-dev/micro-osd.sh @@ -0,0 +1,228 @@ +#!/bin/bash + +# Adapted from https://github.com/ceph/go-ceph +# +# Copyright (C) 2013,2014 Loic Dachary +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU Affero General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Affero General Public License for more details. +# +# You should have received a copy of the GNU Affero General Public License +# along with this program. If not, see . +# +set -e +set -x +set -u + +DIR=${1} + +# reset +pkill ceph || true +rm -rf "${DIR:?}"/* +LOG_DIR="${DIR}/log" +MON_DATA="${DIR}/mon" +MDS_DATA="${DIR}/mds" +MOUNTPT="${MDS_DATA}/mnt" +OSD_DATA="${DIR}/osd" +RGW_DATA="${DIR}/radosgw" +mkdir "${LOG_DIR}" "${MON_DATA}" "${OSD_DATA}" "${MDS_DATA}" "${MOUNTPT}" "${RGW_DATA}" +MDS_NAME="Z" +FS_NAME="cephfs" +ALT_MDS_NAME="Y" +ALT_FS_NAME="altfs" +MON_NAME="a" +MGR_NAME="x" +MIRROR_ID="m" +RGW_ID="r" + +# Following are examples for S3 credentials taken from official AWS docs: +# https://docs.aws.amazon.com/IAM/latest/UserGuide/security-creds.html#access-keys-and-secret-access-keys +# These does not represent real/valid credentials for AWS in any form. +# They are exclusively used for testing S3 compatible API from Ceph RGW. +S3_ACCESS_KEY=AKIAIOSFODNN7EXAMPLE +S3_SECRET_KEY=wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY + +HOST_IP=$(getent ahostsv4 "${HOSTNAME}" | grep STREAM | head -n 1 | awk '{print $1}') +FSID="$(uuidgen)" +export CEPH_CONF=${DIR}/ceph.conf + +generate_ceph_conf() { + # cluster wide parameters + cat >> "${CEPH_CONF}" <> "${MDS_DATA}/keyring" + ceph osd pool create "${fs}_data" 8 + ceph osd pool create "${fs}_metadata" 8 + ceph fs new "${fs}" "${fs}_metadata" "${fs}_data" + ceph fs ls + ceph-mds -i "${mds}" + ceph status + while ! ceph mds stat | grep -q "up:active"; do sleep 1; done + +} + +launch_mds() { + launch_mds_server "${MDS_NAME}" "${FS_NAME}" +} + +launch_mds2() { + launch_mds_server "${ALT_MDS_NAME}" "${ALT_FS_NAME}" + echo "${ALT_FS_NAME}" > "${DIR}/altfs.txt" +} + +launch_mgr() { + ceph-mgr --id ${MGR_NAME} +} + +launch_rbd_mirror() { + ceph auth get-or-create client.rbd-mirror.${MIRROR_ID} mon 'profile rbd-mirror' osd 'profile rbd' + rbd-mirror --id ${MIRROR_ID} --log-file "${LOG_DIR}/rbd-mirror.log" +} + +launch_cephfs_mirror() { + ceph auth get-or-create "client.cephfs-mirror.${MIRROR_ID}" \ + mon 'profile cephfs-mirror' \ + mds 'allow r' \ + osd 'allow rw tag cephfs metadata=*, allow r tag cephfs data=*' \ + mgr 'allow r' + cephfs-mirror --id "cephfs-mirror.${MIRROR_ID}" \ + --log-file "${LOG_DIR}/cephfs-mirror.log" + ceph fs authorize cephfs client.cephfs-mirror-remote / rwps > "${DIR}/cephfs-mirror-remote.out" + # the .out file above is not used by the scripts but can be used for debugging +} + +launch_radosgw() { + ceph auth get-or-create client.rgw."${RGW_ID}" osd 'allow rwx' mon 'allow rw' -o "${RGW_DATA}/keyring" + radosgw -n client.rgw."${RGW_ID}" -k "${RGW_DATA}/keyring" + # not going to try to make shellcheck happy with this line at this time + # shellcheck disable=SC2016 + timeout 60 sh -c 'until [ $(ceph -s | grep -c "rgw:") -eq 1 ]; do echo "waiting for rgw to show up" && sleep 1; done' + radosgw-admin user create --uid admin --display-name "Admin User" --caps "buckets=*;users=*;usage=read;metadata=read" --access-key="$S3_ACCESS_KEY" --secret-key="$S3_SECRET_KEY" +} + +launch_radosgw2() { + radosgw-admin caps add --uid=admin --caps="info=read" +} + +selftest() { + ceph --version + ceph status + test_pool=$(uuidgen) + temp_file=$(mktemp) + ceph osd pool create "${test_pool}" 0 + rados --pool "${test_pool}" put group /etc/group + rados --pool "${test_pool}" get group "${temp_file}" + diff /etc/group "${temp_file}" + ceph osd pool delete "${test_pool}" "${test_pool}" --yes-i-really-really-mean-it + rm "${temp_file}" +} + +FEATURESET="${CEPH_FEATURESET-}" +if [ -z "$FEATURESET" ] ; then + case "${CEPH_VERSION-}" in + nautilus|octopus) + FEATURESET="mon osd mgr mds rbd-mirror rgw selftest" + ;; + pacific) + FEATURESET="mon osd mgr mds mds2 rbd-mirror cephfs-mirror rgw selftest" + ;; + *) + FEATURESET="mon osd mgr mds mds2 rbd-mirror cephfs-mirror rgw rgw2 selftest" + ;; + esac +fi + +generate_ceph_conf +for fname in ${FEATURESET} ; do + case "${fname}" in + mon) launch_mon ;; + osd) launch_osd ;; + mds) launch_mds ;; + mds2) launch_mds2 ;; + mgr) launch_mgr ;; + rbd-mirror) launch_rbd_mirror ;; + cephfs-mirror) launch_cephfs_mirror ;; + rgw|radosgw) launch_radosgw ;; + rgw2|radosgw2) launch_radosgw2 ;; + selftest) selftest ;; + *) + echo "Invalid feature: ${fname}" + exit 2 + ;; + esac +done + +touch "${DIR}/.ready" + +# vim: set ts=4 sw=4 sts=4 et: diff --git a/containers/scripts/ceph-dev-shell.sh b/containers/scripts/ceph-dev-shell.sh new file mode 100755 index 0000000..e031e98 --- /dev/null +++ b/containers/scripts/ceph-dev-shell.sh @@ -0,0 +1,13 @@ +#!/usr/bin/env bash +set -euo pipefail + +DOCKER_CMD=${DOCKER_CMD:-sudo docker} +IMAGE=${CEPH_DEV_IMAGE:-ceph-dev} +WORKSPACE=${CEPH_DEV_WORKSPACE:-$(cd "$(dirname "${BASH_SOURCE[0]}")/../.." && pwd)} + +${DOCKER_CMD} run --rm -it \ + -v "${WORKSPACE}:/workspace" \ + -w /workspace \ + -e CEPH_BOOTSTRAP=always \ + -e CEPH_STATUS_ON_START=true \ + "${IMAGE}" bash -lc 'set -e; uv sync --all-packages;source .venv/bin/activate; exec "$SHELL"' diff --git a/docs/ceph-dev-container.md b/docs/ceph-dev-container.md new file mode 100644 index 0000000..88edd2c --- /dev/null +++ b/docs/ceph-dev-container.md @@ -0,0 +1,47 @@ +# Ceph Dev Container + +This repository now ships a ready-made container image that boots a tiny Ceph cluster (mon, osd, mgr, mds, rgw). It is intended for local CLI development: build the image once, run it whenever you need `ceph` / `rbd` / `cephfs` commands without touching a real cluster. + +## Build the image and run otto in a container + +```bash +sudo docker build -t ceph-dev containers/ceph-dev +./containers/scripts/ceph-dev-shell.sh +``` + +other possible commands +```bash +./containers/scripts/ceph-dev-shell.sh otto cluster checkup --ceph_report_json=tests/report.quincy.json +``` + +Build args: + +- `CEPH_IMG` / `CEPH_TAG` – override the upstream Ceph container source (defaults to `quay.io/ceph/ceph:v19`). + +## Run it + +Interactive shell with a fresh cluster: + +```bash +sudo docker run --rm -it --name ceph-dev ceph-dev +``` + +For day-to-day development use `containers/scripts/ceph-dev-shell.sh`, which always cleans the cluster (sets `CEPH_BOOTSTRAP=always`), runs `uv sync`, and opens a shell with your repo mounted at `/workspace`. + +The script accepts a few env vars: + +- `CEPH_DEV_IMAGE` – image name to run (default `ceph-dev`). +- `CEPH_DEV_WORKSPACE` – host path to mount at `/workspace` (default repo root). +- `DOCKER_CMD` – override the docker binary (set to `sudo docker` if you need root). + +Because the container stays up while you iterate, `uv sync` only happens the first time you launch a shell; subsequent `uv run ...` commands reuse the same environment immediately. By default `/var/lib/ceph-dev` lives inside the container (ephemeral); mount your own volume at that path if you want to reuse the same cluster state. + +## Useful environment switches + +| Variable | Values | Effect | +| --- | --- | --- | +| `CEPH_FEATURESET` | space-separated list of `mon osd mgr mds mds2 rbd-mirror cephfs-mirror rgw rgw2 selftest` | Select which micro-services the helper script bootstraps (default: `mon osd mgr mds rgw selftest`). | +| `CEPH_BOOTSTRAP` | `auto` (default), `always`, `never` | `auto` bootstraps only when no `.ready` marker exists under `/var/lib/ceph-dev`; `always` wipes the data dir each start; `never` requires you to persist `/var/lib/ceph-dev` (via a Docker volume) so the cluster can be reused. | +| `CEPH_RESET` | `true` / `false` | Forcefully delete the data dir before bringing the cluster up (same as `CEPH_BOOTSTRAP=always`, but explicit). | +| `CEPH_DATA_DIR` | path (default `/var/lib/ceph-dev`) | Where cluster state is stored. Mount this as a named volume to reuse OSDs between runs. | +