Skip to content
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
60 changes: 60 additions & 0 deletions containers/ceph-dev/Dockerfile
Original file line number Diff line number Diff line change
@@ -0,0 +1,60 @@

# Adapted from https://github.com/ceph/go-ceph
ARG CEPH_IMG=quay.io/ceph/ceph
ARG CEPH_TAG=v19
FROM ${CEPH_IMG}:${CEPH_TAG}

SHELL ["/bin/bash", "-o", "pipefail", "-c"]

# Provide a few quality-of-life tools plus dependencies that the micro-ceph
# bootstrap script relies on (uuidgen, getent, jq, etc.).
RUN set -euxo pipefail \
&& pkg_mgr="dnf" \
&& if command -v microdnf >/dev/null 2>&1; then pkg_mgr="microdnf"; fi \
&& if [[ "${pkg_mgr}" == "microdnf" ]]; then \
microdnf install -y --setopt=install_weak_deps=0 --setopt=tsflags=nodocs \
bash-completion \
findutils \
hostname \
iproute \
jq \
less \
procps-ng \
python3.11 \
python3.11-devel \
python3.11-pip \
util-linux \
vim-minimal \
&& microdnf clean all; \
else \
dnf install -y --setopt=install_weak_deps=0 --setopt=tsflags=nodocs \
bash-completion \
findutils \
hostname \
iproute \
jq \
less \
procps-ng \
python3.11 \
python3.11-devel \
python3.11-pip \
Comment on lines +38 to +40
Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

these are probably not needed

util-linux \
vim-minimal \
&& dnf clean all; \
fi

ARG UV_VERSION=v0.4.23
RUN curl -LsSf https://astral.sh/uv/install.sh -o /tmp/uv-install.sh \
&& chmod +x /tmp/uv-install.sh \
&& UV_INSTALL_DIR=/usr/local/bin /tmp/uv-install.sh \
&& rm -f /tmp/uv-install.sh

COPY micro-osd.sh /usr/local/bin/micro-osd.sh
COPY entrypoint.sh /usr/local/bin/ceph-dev-entrypoint.sh
RUN chmod +x /usr/local/bin/micro-osd.sh /usr/local/bin/ceph-dev-entrypoint.sh

ENV CEPH_DATA_DIR=/var/lib/ceph-dev \
CEPH_BOOTSTRAP=auto \
CEPH_STATUS_ON_START=true
ENTRYPOINT ["/usr/local/bin/ceph-dev-entrypoint.sh"]
CMD ["/bin/bash"]
83 changes: 83 additions & 0 deletions containers/ceph-dev/entrypoint.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,83 @@
#!/usr/bin/env bash
set -euo pipefail

MICRO_OSD_BIN=${MICRO_OSD_BIN:-/usr/local/bin/micro-osd.sh}
DATA_DIR=${CEPH_DATA_DIR:-/var/lib/ceph-dev}
READY_FILE="${DATA_DIR}/.ready"
DEFAULT_FEATURES="mon osd mgr mds rgw selftest"
CEPH_FEATURESET="${CEPH_FEATURESET:-${DEFAULT_FEATURES}}"
CEPH_RESET="${CEPH_RESET:-false}"
CEPH_BOOTSTRAP="${CEPH_BOOTSTRAP:-auto}"
CEPH_STATUS_ON_START="${CEPH_STATUS_ON_START:-true}"

mkdir -p "${DATA_DIR}"
export CEPH_CONF="${DATA_DIR}/ceph.conf"

bootstrap_cluster() {
if [[ "${CEPH_RESET}" == "true" ]]; then
echo "[ceph-dev] Reset requested, wiping ${DATA_DIR}"
rm -rf "${DATA_DIR}"
mkdir -p "${DATA_DIR}"
elif [[ "${CEPH_BOOTSTRAP}" == "always" ]]; then
echo "[ceph-dev] CEPH_BOOTSTRAP=always, reinitializing ${DATA_DIR}"
rm -rf "${DATA_DIR}"
mkdir -p "${DATA_DIR}"
fi

if [[ "${CEPH_BOOTSTRAP}" == "never" ]] && [[ ! -f "${READY_FILE}" ]]; then
echo "[ceph-dev] CEPH_BOOTSTRAP=never but no existing cluster found" >&2
exit 1
fi

if [[ ! -x "${MICRO_OSD_BIN}" ]]; then
echo "[ceph-dev] micro-osd helper not found at ${MICRO_OSD_BIN}" >&2
exit 1
fi

if [[ "${CEPH_BOOTSTRAP}" != "never" ]]; then
rm -f "${READY_FILE}"
echo "[ceph-dev] Bootstrapping micro cluster (features: ${CEPH_FEATURESET})"
CEPH_FEATURESET="${CEPH_FEATURESET}" "${MICRO_OSD_BIN}" "${DATA_DIR}"
else
echo "[ceph-dev] Reusing existing cluster state at ${DATA_DIR}"
fi

if [[ ! -f "${READY_FILE}" ]]; then
echo "[ceph-dev] Cluster failed to report ready flag (${READY_FILE})" >&2
exit 1
fi
}

show_status() {
if [[ "${CEPH_STATUS_ON_START}" == "true" ]]; then
echo "[ceph-dev] ceph status"
ceph status || true
fi
}

bootstrap_cluster
show_status

declare -a CMD=("$@")
if [[ ${#CMD[@]} -eq 0 ]]; then
CMD=("/bin/bash")
fi

if [[ "${CMD[0]}" == "ceph-dev" ]]; then
case "${CMD[1]:-}" in
shell)
exec "/bin/bash"
;;
status)
ceph status
exit $?
;;
*)
echo "Usage: ceph-dev [shell|status]" >&2
exit 2
;;
esac
else
echo "[ceph-dev] Executing: ${CMD[*]}"
exec "${CMD[@]}"
fi
228 changes: 228 additions & 0 deletions containers/ceph-dev/micro-osd.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,228 @@
#!/bin/bash

# Adapted from https://github.com/ceph/go-ceph
#
# Copyright (C) 2013,2014 Loic Dachary <loic@dachary.org>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
set -e
set -x
set -u

DIR=${1}

# reset
pkill ceph || true
rm -rf "${DIR:?}"/*
LOG_DIR="${DIR}/log"
MON_DATA="${DIR}/mon"
MDS_DATA="${DIR}/mds"
MOUNTPT="${MDS_DATA}/mnt"
OSD_DATA="${DIR}/osd"
RGW_DATA="${DIR}/radosgw"
mkdir "${LOG_DIR}" "${MON_DATA}" "${OSD_DATA}" "${MDS_DATA}" "${MOUNTPT}" "${RGW_DATA}"
MDS_NAME="Z"
FS_NAME="cephfs"
ALT_MDS_NAME="Y"
ALT_FS_NAME="altfs"
MON_NAME="a"
MGR_NAME="x"
MIRROR_ID="m"
RGW_ID="r"

# Following are examples for S3 credentials taken from official AWS docs:
# https://docs.aws.amazon.com/IAM/latest/UserGuide/security-creds.html#access-keys-and-secret-access-keys
# These does not represent real/valid credentials for AWS in any form.
# They are exclusively used for testing S3 compatible API from Ceph RGW.
S3_ACCESS_KEY=AKIAIOSFODNN7EXAMPLE
S3_SECRET_KEY=wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY

HOST_IP=$(getent ahostsv4 "${HOSTNAME}" | grep STREAM | head -n 1 | awk '{print $1}')
FSID="$(uuidgen)"
export CEPH_CONF=${DIR}/ceph.conf

generate_ceph_conf() {
# cluster wide parameters
cat >> "${CEPH_CONF}" <<EOF
[global]
fsid = ${FSID}
osd crush chooseleaf type = 0
run dir = ${DIR}/run
auth cluster required = none
auth service required = none
auth client required = none
osd pool default size = 1
mon host = ${HOSTNAME}

[mds.${MDS_NAME}]
host = ${HOSTNAME}

[mon.${MON_NAME}]
log file = ${LOG_DIR}/mon.log
chdir = ""
mon cluster log file = ${LOG_DIR}/mon-cluster.log
mon data = ${MON_DATA}
mon data avail crit = 0
mon addr = ${HOST_IP}:6789
mon allow pool delete = true

[osd.0]
log file = ${LOG_DIR}/osd.log
chdir = ""
osd data = ${OSD_DATA}
osd journal = ${OSD_DATA}.journal
osd journal size = 100
osd objectstore = memstore
osd class load list = *
osd class default list = *

[mgr.${MGR_NAME}]
log_file = ${LOG_DIR}/mgr.log

[client.rgw.${RGW_ID}]
rgw dns name = ${HOSTNAME}
rgw enable usage log = true
rgw usage log tick interval = 1
rgw usage log flush threshold = 1
rgw usage max shards = 32
rgw usage max user shards = 1
log file = ${LOG_DIR}/client.rgw.${RGW_ID}.log
rgw frontends = beast port=80
ms mon client mode = crc
EOF
}

launch_mon() {
ceph-mon --id ${MON_NAME} --mkfs --keyring /dev/null
touch "${MON_DATA}/keyring"
ceph-mon --id ${MON_NAME}
}

launch_osd() {
OSD_ID=$(ceph osd create)
ceph osd crush add "osd.${OSD_ID}" 1 root=default
ceph-osd --id "${OSD_ID}" --mkjournal --mkfs
ceph-osd --id "${OSD_ID}" || ceph-osd --id "${OSD_ID}" || ceph-osd --id "${OSD_ID}"
}

launch_mds_server() {
local mds="$1"
local fs="$2"

ceph auth get-or-create "mds.${mds}" mon 'profile mds' mgr 'profile mds' mds 'allow *' osd 'allow *' >> "${MDS_DATA}/keyring"
ceph osd pool create "${fs}_data" 8
ceph osd pool create "${fs}_metadata" 8
ceph fs new "${fs}" "${fs}_metadata" "${fs}_data"
ceph fs ls
ceph-mds -i "${mds}"
ceph status
while ! ceph mds stat | grep -q "up:active"; do sleep 1; done

}

launch_mds() {
launch_mds_server "${MDS_NAME}" "${FS_NAME}"
}

launch_mds2() {
launch_mds_server "${ALT_MDS_NAME}" "${ALT_FS_NAME}"
echo "${ALT_FS_NAME}" > "${DIR}/altfs.txt"
}

launch_mgr() {
ceph-mgr --id ${MGR_NAME}
}

launch_rbd_mirror() {
ceph auth get-or-create client.rbd-mirror.${MIRROR_ID} mon 'profile rbd-mirror' osd 'profile rbd'
rbd-mirror --id ${MIRROR_ID} --log-file "${LOG_DIR}/rbd-mirror.log"
}

launch_cephfs_mirror() {
ceph auth get-or-create "client.cephfs-mirror.${MIRROR_ID}" \
mon 'profile cephfs-mirror' \
mds 'allow r' \
osd 'allow rw tag cephfs metadata=*, allow r tag cephfs data=*' \
mgr 'allow r'
cephfs-mirror --id "cephfs-mirror.${MIRROR_ID}" \
--log-file "${LOG_DIR}/cephfs-mirror.log"
ceph fs authorize cephfs client.cephfs-mirror-remote / rwps > "${DIR}/cephfs-mirror-remote.out"
# the .out file above is not used by the scripts but can be used for debugging
}

launch_radosgw() {
ceph auth get-or-create client.rgw."${RGW_ID}" osd 'allow rwx' mon 'allow rw' -o "${RGW_DATA}/keyring"
radosgw -n client.rgw."${RGW_ID}" -k "${RGW_DATA}/keyring"
# not going to try to make shellcheck happy with this line at this time
# shellcheck disable=SC2016
timeout 60 sh -c 'until [ $(ceph -s | grep -c "rgw:") -eq 1 ]; do echo "waiting for rgw to show up" && sleep 1; done'
radosgw-admin user create --uid admin --display-name "Admin User" --caps "buckets=*;users=*;usage=read;metadata=read" --access-key="$S3_ACCESS_KEY" --secret-key="$S3_SECRET_KEY"
}

launch_radosgw2() {
radosgw-admin caps add --uid=admin --caps="info=read"
}

selftest() {
ceph --version
ceph status
test_pool=$(uuidgen)
temp_file=$(mktemp)
ceph osd pool create "${test_pool}" 0
rados --pool "${test_pool}" put group /etc/group
rados --pool "${test_pool}" get group "${temp_file}"
diff /etc/group "${temp_file}"
ceph osd pool delete "${test_pool}" "${test_pool}" --yes-i-really-really-mean-it
rm "${temp_file}"
}

FEATURESET="${CEPH_FEATURESET-}"
if [ -z "$FEATURESET" ] ; then
case "${CEPH_VERSION-}" in
nautilus|octopus)
FEATURESET="mon osd mgr mds rbd-mirror rgw selftest"
;;
pacific)
FEATURESET="mon osd mgr mds mds2 rbd-mirror cephfs-mirror rgw selftest"
;;
*)
FEATURESET="mon osd mgr mds mds2 rbd-mirror cephfs-mirror rgw rgw2 selftest"
;;
esac
fi

generate_ceph_conf
for fname in ${FEATURESET} ; do
case "${fname}" in
mon) launch_mon ;;
osd) launch_osd ;;
mds) launch_mds ;;
mds2) launch_mds2 ;;
mgr) launch_mgr ;;
rbd-mirror) launch_rbd_mirror ;;
cephfs-mirror) launch_cephfs_mirror ;;
rgw|radosgw) launch_radosgw ;;
rgw2|radosgw2) launch_radosgw2 ;;
selftest) selftest ;;
*)
echo "Invalid feature: ${fname}"
exit 2
;;
esac
done

touch "${DIR}/.ready"

# vim: set ts=4 sw=4 sts=4 et:
13 changes: 13 additions & 0 deletions containers/scripts/ceph-dev-shell.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,13 @@
#!/usr/bin/env bash
set -euo pipefail

DOCKER_CMD=${DOCKER_CMD:-sudo docker}
IMAGE=${CEPH_DEV_IMAGE:-ceph-dev}
WORKSPACE=${CEPH_DEV_WORKSPACE:-$(cd "$(dirname "${BASH_SOURCE[0]}")/../.." && pwd)}

${DOCKER_CMD} run --rm -it \
-v "${WORKSPACE}:/workspace" \
-w /workspace \
-e CEPH_BOOTSTRAP=always \
-e CEPH_STATUS_ON_START=true \
"${IMAGE}" bash -lc 'set -e; uv sync --all-packages;source .venv/bin/activate; exec "$SHELL"'
Loading