diff --git a/.editorconfig b/.editorconfig deleted file mode 100644 index 5f150f350..000000000 --- a/.editorconfig +++ /dev/null @@ -1,20 +0,0 @@ -# This file is globally distributed to all container image projects from -# https://github.com/linuxserver/docker-jenkins-builder/blob/master/.editorconfig - -# top-most EditorConfig file -root = true - -# Unix-style newlines with a newline ending every file -[*] -end_of_line = lf -insert_final_newline = true -# trim_trailing_whitespace may cause unintended issues and should not be globally set true -trim_trailing_whitespace = false - -[{Dockerfile*,**.yml}] -indent_style = space -indent_size = 2 - -[{**.sh,root/etc/s6-overlay/s6-rc.d/**,root/etc/cont-init.d/**,root/etc/services.d/**}] -indent_style = space -indent_size = 4 diff --git a/.github/CONTRIBUTING.md b/.github/CONTRIBUTING.md deleted file mode 100644 index 6f4a03bbf..000000000 --- a/.github/CONTRIBUTING.md +++ /dev/null @@ -1,123 +0,0 @@ -# Contributing to webtop - -## Gotchas - -* While contributing make sure to make all your changes before creating a Pull Request, as our pipeline builds each commit after the PR is open. -* Read, and fill the Pull Request template - * If this is a fix for a typo (in code, documentation, or the README) please file an issue and let us sort it out. We do not need a PR - * If the PR is addressing an existing issue include, closes #\, in the body of the PR commit message -* If you want to discuss changes, you can also bring it up in [#dev-talk](https://discordapp.com/channels/354974912613449730/757585807061155840) in our [Discord server](https://linuxserver.io/discord) - -## Common files - -| File | Use case | -| :----: | --- | -| `Dockerfile` | Dockerfile used to build amd64 images | -| `Dockerfile.aarch64` | Dockerfile used to build 64bit ARM architectures | -| `Dockerfile.armhf` | Dockerfile used to build 32bit ARM architectures | -| `Jenkinsfile` | This file is a product of our builder and should not be edited directly. This is used to build the image | -| `jenkins-vars.yml` | This file is used to generate the `Jenkinsfile` mentioned above, it only affects the build-process | -| `package_versions.txt` | This file is generated as a part of the build-process and should not be edited directly. It lists all the installed packages and their versions | -| `README.md` | This file is a product of our builder and should not be edited directly. This displays the readme for the repository and image registries | -| `readme-vars.yml` | This file is used to generate the `README.md` | - -## Readme - -If you would like to change our readme, please __**do not**__ directly edit the readme, as it is auto-generated on each commit. -Instead edit the [readme-vars.yml](https://github.com/linuxserver/docker-webtop/edit/master/readme-vars.yml). - -These variables are used in a template for our [Jenkins Builder](https://github.com/linuxserver/docker-jenkins-builder) as part of an ansible play. -Most of these variables are also carried over to [docs.linuxserver.io](https://docs.linuxserver.io/images/docker-webtop) - -### Fixing typos or clarify the text in the readme - -There are variables for multiple parts of the readme, the most common ones are: - -| Variable | Description | -| :----: | --- | -| `project_blurb` | This is the short excerpt shown above the project logo. | -| `app_setup_block` | This is the text that shows up under "Application Setup" if enabled | - -### Parameters - -The compose and run examples are also generated from these variables. - -We have a [reference file](https://github.com/linuxserver/docker-jenkins-builder/blob/master/vars/_container-vars-blank) in our Jenkins Builder. - -These are prefixed with `param_` for required parameters, or `opt_param` for optional parameters, except for `cap_add`. -Remember to enable param, if currently disabled. This differs between parameters, and can be seen in the reference file. - -Devices, environment variables, ports and volumes expects its variables in a certain way. - -### Devices - -```yml -param_devices: - - { device_path: "/dev/dri", device_host_path: "/dev/dri", desc: "For hardware transcoding" } -opt_param_devices: - - { device_path: "/dev/dri", device_host_path: "/dev/dri", desc: "For hardware transcoding" } -``` - -### Environment variables - -```yml -param_env_vars: - - { env_var: "TZ", env_value: "Europe/London", desc: "Specify a timezone to use EG Europe/London." } -opt_param_env_vars: - - { env_var: "VERSION", env_value: "latest", desc: "Supported values are LATEST, PLEXPASS or a specific version number." } -``` - -### Ports - -```yml -param_ports: - - { external_port: "80", internal_port: "80", port_desc: "Application WebUI" } -opt_param_ports: - - { external_port: "80", internal_port: "80", port_desc: "Application WebUI" } -``` - -### Volumes - -```yml -param_volumes: - - { vol_path: "/config", vol_host_path: "", desc: "Configuration files." } -opt_param_volumes: - - { vol_path: "/config", vol_host_path: "", desc: "Configuration files." } -``` - -### Testing template changes - -After you make any changes to the templates, you can use our [Jenkins Builder](https://github.com/linuxserver/docker-jenkins-builder) to have the files updated from the modified templates. Please use the command found under `Running Locally` [on this page](https://github.com/linuxserver/docker-jenkins-builder/blob/master/README.md) to generate them prior to submitting a PR. - -## Dockerfiles - -We use multiple Dockerfiles in our repos, this is because sometimes some CPU architectures needs different packages to work. -If you are proposing additional packages to be added, ensure that you added the packages to all the Dockerfiles in alphabetical order. - -### Testing your changes - -```bash -git clone https://github.com/linuxserver/docker-webtop.git -cd docker-webtop -docker build \ - --no-cache \ - --pull \ - -t linuxserver/webtop:latest . -``` - -The ARM variants can be built on x86_64 hardware and vice versa using `lscr.io/linuxserver/qemu-static` - -```bash -docker run --rm --privileged lscr.io/linuxserver/qemu-static --reset -``` - -Once registered you can define the dockerfile to use with `-f Dockerfile.aarch64`. - -## Update the changelog - -If you are modifying the Dockerfiles or any of the startup scripts in [root](https://github.com/linuxserver/docker-webtop/tree/master/root), add an entry to the changelog - -```yml -changelogs: - - { date: "DD.MM.YY:", desc: "Added some love to templates" } -``` diff --git a/.github/FUNDING.yml b/.github/FUNDING.yml deleted file mode 100644 index 7eaac7717..000000000 --- a/.github/FUNDING.yml +++ /dev/null @@ -1,2 +0,0 @@ -github: linuxserver -open_collective: linuxserver diff --git a/.github/ISSUE_TEMPLATE/config.yml b/.github/ISSUE_TEMPLATE/config.yml deleted file mode 100644 index 16d365fb1..000000000 --- a/.github/ISSUE_TEMPLATE/config.yml +++ /dev/null @@ -1,13 +0,0 @@ -blank_issues_enabled: false -contact_links: - - name: Discord chat support - url: https://linuxserver.io/discord - about: Realtime support / chat with the community and the team. - - - name: Discourse discussion forum - url: https://discourse.linuxserver.io - about: Post on our community forum. - - - name: Documentation - url: https://docs.linuxserver.io/images/docker-webtop - about: Documentation - information about all of our containers. diff --git a/.github/ISSUE_TEMPLATE/issue.bug.yml b/.github/ISSUE_TEMPLATE/issue.bug.yml deleted file mode 100644 index fba39242a..000000000 --- a/.github/ISSUE_TEMPLATE/issue.bug.yml +++ /dev/null @@ -1,76 +0,0 @@ -# Based on the issue template -name: Bug report -description: Create a report to help us improve -title: "[BUG] " -labels: [Bug] -body: - - type: checkboxes - attributes: - label: Is there an existing issue for this? - description: Please search to see if an issue already exists for the bug you encountered. - options: - - label: I have searched the existing issues - required: true - - type: textarea - attributes: - label: Current Behavior - description: Tell us what happens instead of the expected behavior. - validations: - required: true - - type: textarea - attributes: - label: Expected Behavior - description: Tell us what should happen. - validations: - required: false - - type: textarea - attributes: - label: Steps To Reproduce - description: Steps to reproduce the behavior. - placeholder: | - 1. In this environment... - 2. With this config... - 3. Run '...' - 4. See error... - validations: - required: true - - type: textarea - attributes: - label: Environment - description: | - examples: - - **OS**: Ubuntu 20.04 - - **How docker service was installed**: distro's packagemanager - value: | - - OS: - - How docker service was installed: - render: markdown - validations: - required: false - - type: dropdown - attributes: - label: CPU architecture - options: - - x86-64 - - arm64 - validations: - required: true - - type: textarea - attributes: - label: Docker creation - description: | - Command used to create docker container - Provide your docker create/run command or compose yaml snippet, or a screenshot of settings if using a gui to create the container - render: bash - validations: - required: true - - type: textarea - attributes: - description: | - Provide a full docker log, output of "docker logs webtop" - label: Container logs - placeholder: | - Output of `docker logs webtop` - render: bash - validations: - required: true diff --git a/.github/ISSUE_TEMPLATE/issue.feature.yml b/.github/ISSUE_TEMPLATE/issue.feature.yml deleted file mode 100644 index 099dcdb5e..000000000 --- a/.github/ISSUE_TEMPLATE/issue.feature.yml +++ /dev/null @@ -1,31 +0,0 @@ -# Based on the issue template -name: Feature request -description: Suggest an idea for this project -title: "[FEAT] <title>" -labels: [enhancement] -body: - - type: checkboxes - attributes: - label: Is this a new feature request? - description: Please search to see if a feature request already exists. - options: - - label: I have searched the existing issues - required: true - - type: textarea - attributes: - label: Wanted change - description: Tell us what you want to happen. - validations: - required: true - - type: textarea - attributes: - label: Reason for change - description: Justify your request, why do you want it, what is the benefit. - validations: - required: true - - type: textarea - attributes: - label: Proposed code change - description: Do you have a potential code change in mind? - validations: - required: false diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md deleted file mode 100644 index b80c297e7..000000000 --- a/.github/PULL_REQUEST_TEMPLATE.md +++ /dev/null @@ -1,43 +0,0 @@ -<!--- Provide a general summary of your changes in the Title above --> - -[linuxserverurl]: https://linuxserver.io -[![linuxserver.io](https://raw.githubusercontent.com/linuxserver/docker-templates/master/linuxserver.io/img/linuxserver_medium.png)][linuxserverurl] - - -<!--- Before submitting a pull request please check the following --> - -<!--- If this is a fix for a typo (in code, documentation, or the README) please file an issue and let us sort it out. We do not need a PR --> -<!--- Ask yourself if this modification is something the whole userbase will benefit from, if this is a specific change for corner case functionality or plugins please look at making a Docker Mod or local script https://blog.linuxserver.io/2019/09/14/customizing-our-containers/ --> -<!--- That if the PR is addressing an existing issue include, closes #<issue number> , in the body of the PR commit message --> -<!--- You have included links to any files / patches etc your PR may be using in the body of the PR commit message --> -<!--- We maintain a changelog of major revisions to the container at the end of readme-vars.yml in the root of this repository, please add your changes there if appropriate --> - - -<!--- Coding guidelines: --> -<!--- 1. Installed packages in the Dockerfiles should be in alphabetical order --> -<!--- 2. Changes to Dockerfile should be replicated in Dockerfile.armhf and Dockerfile.aarch64 if applicable --> -<!--- 3. Indentation style (tabs vs 4 spaces vs 1 space) should match the rest of the document --> -<!--- 4. Readme is auto generated from readme-vars.yml, make your changes there --> - ------------------------------- - - - [ ] I have read the [contributing](https://github.com/linuxserver/docker-webtop/blob/master/.github/CONTRIBUTING.md) guideline and understand that I have made the correct modifications - ------------------------------- - -<!--- We welcome all PR’s though this doesn’t guarantee it will be accepted. --> - -## Description: -<!--- Describe your changes in detail --> - -## Benefits of this PR and context: -<!--- Please explain why we should accept this PR. If this fixes an outstanding bug, please reference the issue # --> - -## How Has This Been Tested? -<!--- Please describe in detail how you tested your changes. --> -<!--- Include details of your testing environment, and the tests you ran to --> -<!--- see how your change affects other areas of the code, etc. --> - - -## Source / References: -<!--- Please include any forum posts/github links relevant to the PR --> diff --git a/.github/workflows/call_issue_pr_tracker.yml b/.github/workflows/call_issue_pr_tracker.yml deleted file mode 100644 index d07cf1212..000000000 --- a/.github/workflows/call_issue_pr_tracker.yml +++ /dev/null @@ -1,19 +0,0 @@ -name: Issue & PR Tracker - -on: - issues: - types: [opened,reopened,labeled,unlabeled,closed] - pull_request_target: - types: [opened,reopened,review_requested,review_request_removed,labeled,unlabeled,closed] - pull_request_review: - types: [submitted,edited,dismissed] - -permissions: - contents: read - -jobs: - manage-project: - permissions: - issues: write - uses: linuxserver/github-workflows/.github/workflows/issue-pr-tracker.yml@v1 - secrets: inherit diff --git a/.github/workflows/call_issues_cron.yml b/.github/workflows/call_issues_cron.yml deleted file mode 100644 index d79774167..000000000 --- a/.github/workflows/call_issues_cron.yml +++ /dev/null @@ -1,16 +0,0 @@ -name: Mark stale issues and pull requests -on: - schedule: - - cron: '36 11 * * *' - workflow_dispatch: - -permissions: - contents: read - -jobs: - stale: - permissions: - issues: write - pull-requests: write - uses: linuxserver/github-workflows/.github/workflows/issues-cron.yml@v1 - secrets: inherit diff --git a/.github/workflows/external_trigger.yml b/.github/workflows/external_trigger.yml deleted file mode 100644 index 5c9b10d60..000000000 --- a/.github/workflows/external_trigger.yml +++ /dev/null @@ -1,28 +0,0 @@ -name: External Trigger Main - -on: - workflow_dispatch: - -permissions: - contents: read - -jobs: - external-trigger-master: - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v4.1.1 - - - name: External Trigger - if: github.ref == 'refs/heads/master' - env: - SKIP_EXTERNAL_TRIGGER: ${{ vars.SKIP_EXTERNAL_TRIGGER }} - run: | - printf "# External trigger for docker-webtop\n\n" >> $GITHUB_STEP_SUMMARY - echo "Type is \`os\`" >> $GITHUB_STEP_SUMMARY - echo "No external release, exiting" >> $GITHUB_STEP_SUMMARY - exit 0 - if grep -q "^webtop_master_${EXT_RELEASE}" <<< "${SKIP_EXTERNAL_TRIGGER}"; then - echo "> [!WARNING]" >> $GITHUB_STEP_SUMMARY - echo "> Github organizational variable \`SKIP_EXTERNAL_TRIGGER\` matches current external release; skipping trigger." >> $GITHUB_STEP_SUMMARY - exit 0 - fi diff --git a/.github/workflows/external_trigger_scheduler.yml b/.github/workflows/external_trigger_scheduler.yml deleted file mode 100644 index 92e81f019..000000000 --- a/.github/workflows/external_trigger_scheduler.yml +++ /dev/null @@ -1,48 +0,0 @@ -name: External Trigger Scheduler - -on: - schedule: - - cron: '27 * * * *' - workflow_dispatch: - -permissions: - contents: read - -jobs: - external-trigger-scheduler: - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v4.1.1 - with: - fetch-depth: '0' - - - name: External Trigger Scheduler - run: | - printf "# External trigger scheduler for docker-webtop\n\n" >> $GITHUB_STEP_SUMMARY - printf "Found the branches:\n\n%s\n" "$(git for-each-ref --format='- %(refname:lstrip=3)' refs/remotes)" >> $GITHUB_STEP_SUMMARY - for br in $(git for-each-ref --format='%(refname:lstrip=3)' refs/remotes) - do - if [[ "${br}" == "HEAD" ]]; then - printf "\nSkipping %s.\n" ${br} >> $GITHUB_STEP_SUMMARY - continue - fi - printf "\n## Evaluating \`%s\`\n\n" ${br} >> $GITHUB_STEP_SUMMARY - ls_jenkins_vars=$(curl -sX GET https://raw.githubusercontent.com/linuxserver/docker-webtop/${br}/jenkins-vars.yml) - ls_branch=$(echo "${ls_jenkins_vars}" | yq -r '.ls_branch') - ls_trigger=$(echo "${ls_jenkins_vars}" | yq -r '.external_type') - if [[ "${br}" == "${ls_branch}" ]] && [[ "${ls_trigger}" != "os" ]]; then - echo "Branch appears to be live and trigger is not os; checking workflow." >> $GITHUB_STEP_SUMMARY - if curl -sfX GET https://raw.githubusercontent.com/linuxserver/docker-webtop/${br}/.github/workflows/external_trigger.yml > /dev/null 2>&1; then - echo "Triggering external trigger workflow for branch." >> $GITHUB_STEP_SUMMARY - curl -iX POST \ - -H "Authorization: token ${{ secrets.CR_PAT }}" \ - -H "Accept: application/vnd.github.v3+json" \ - -d "{\"ref\":\"refs/heads/${br}\"}" \ - https://api.github.com/repos/linuxserver/docker-webtop/actions/workflows/external_trigger.yml/dispatches - else - echo "Skipping branch due to no external trigger workflow present." >> $GITHUB_STEP_SUMMARY - fi - else - echo "Skipping branch due to being detected as dev branch or having no external version." >> $GITHUB_STEP_SUMMARY - fi - done diff --git a/.github/workflows/greetings.yml b/.github/workflows/greetings.yml deleted file mode 100644 index c99365854..000000000 --- a/.github/workflows/greetings.yml +++ /dev/null @@ -1,19 +0,0 @@ -name: Greetings - -on: [pull_request_target, issues] - -permissions: - contents: read - -jobs: - greeting: - permissions: - issues: write - pull-requests: write - runs-on: ubuntu-latest - steps: - - uses: actions/first-interaction@v1 - with: - issue-message: 'Thanks for opening your first issue here! Be sure to follow the relevant issue templates, or risk having this issue marked as invalid.' - pr-message: 'Thanks for opening this pull request! Be sure to follow the [pull request template](https://github.com/linuxserver/docker-webtop/blob/master/.github/PULL_REQUEST_TEMPLATE.md)!' - repo-token: ${{ secrets.GITHUB_TOKEN }} diff --git a/.github/workflows/package_trigger_scheduler.yml b/.github/workflows/package_trigger_scheduler.yml deleted file mode 100644 index 33b24b1de..000000000 --- a/.github/workflows/package_trigger_scheduler.yml +++ /dev/null @@ -1,103 +0,0 @@ -name: Package Trigger Scheduler - -on: - schedule: - - cron: '19 3 * * 2' - workflow_dispatch: - -permissions: - contents: read - -jobs: - package-trigger-scheduler: - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v4.1.1 - with: - fetch-depth: '0' - - - name: Package Trigger Scheduler - env: - SKIP_PACKAGE_TRIGGER: ${{ vars.SKIP_PACKAGE_TRIGGER }} - run: | - printf "# Package trigger scheduler for docker-webtop\n\n" >> $GITHUB_STEP_SUMMARY - printf "Found the branches:\n\n%s\n" "$(git for-each-ref --format='- %(refname:lstrip=3)' refs/remotes)" >> $GITHUB_STEP_SUMMARY - for br in $(git for-each-ref --format='%(refname:lstrip=3)' refs/remotes) - do - if [[ "${br}" == "HEAD" ]]; then - printf "\nSkipping %s.\n" ${br} >> $GITHUB_STEP_SUMMARY - continue - fi - printf "\n## Evaluating \`%s\`\n\n" ${br} >> $GITHUB_STEP_SUMMARY - JENKINS_VARS=$(curl -sX GET https://raw.githubusercontent.com/linuxserver/docker-webtop/${br}/jenkins-vars.yml) - if ! curl -sfX GET https://raw.githubusercontent.com/linuxserver/docker-webtop/${br}/Jenkinsfile >/dev/null 2>&1; then - echo "> [!WARNING]" >> $GITHUB_STEP_SUMMARY - echo "> No Jenkinsfile found. Branch is either deprecated or is an early dev branch." >> $GITHUB_STEP_SUMMARY - skipped_branches="${skipped_branches}${br} " - elif [[ "${br}" == $(yq -r '.ls_branch' <<< "${JENKINS_VARS}") ]]; then - echo "Branch appears to be live; checking workflow." >> $GITHUB_STEP_SUMMARY - README_VARS=$(curl -sX GET https://raw.githubusercontent.com/linuxserver/docker-webtop/${br}/readme-vars.yml) - if [[ $(yq -r '.project_deprecation_status' <<< "${README_VARS}") == "true" ]]; then - echo "> [!WARNING]" >> $GITHUB_STEP_SUMMARY - echo "> Branch appears to be deprecated; skipping trigger." >> $GITHUB_STEP_SUMMARY - skipped_branches="${skipped_branches}${br} " - elif [[ $(yq -r '.skip_package_check' <<< "${JENKINS_VARS}") == "true" ]]; then - echo "> [!WARNING]" >> $GITHUB_STEP_SUMMARY - echo "> Skipping branch ${br} due to \`skip_package_check\` being set in \`jenkins-vars.yml\`." >> $GITHUB_STEP_SUMMARY - skipped_branches="${skipped_branches}${br} " - elif grep -q "^webtop_${br}" <<< "${SKIP_PACKAGE_TRIGGER}"; then - echo "> [!WARNING]" >> $GITHUB_STEP_SUMMARY - echo "> Github organizational variable \`SKIP_PACKAGE_TRIGGER\` contains \`webtop_${br}\`; skipping trigger." >> $GITHUB_STEP_SUMMARY - skipped_branches="${skipped_branches}${br} " - elif [ $(curl -s https://ci.linuxserver.io/job/Docker-Pipeline-Builders/job/docker-webtop/job/${br}/lastBuild/api/json | jq -r '.building' 2>/dev/null) == "true" ]; then - echo "> [!WARNING]" >> $GITHUB_STEP_SUMMARY - echo "> There already seems to be an active build on Jenkins; skipping package trigger for ${br}" >> $GITHUB_STEP_SUMMARY - skipped_branches="${skipped_branches}${br} " - else - echo "> [!NOTE]" >> $GITHUB_STEP_SUMMARY - echo "> Triggering package trigger for branch ${br}" >> $GITHUB_STEP_SUMMARY - printf "> To disable, add \`webtop_%s\` into the Github organizational variable \`SKIP_PACKAGE_TRIGGER\`.\n\n" "${br}" >> $GITHUB_STEP_SUMMARY - triggered_branches="${triggered_branches}${br} " - response=$(curl -iX POST \ - https://ci.linuxserver.io/job/Docker-Pipeline-Builders/job/docker-webtop/job/${br}/buildWithParameters?PACKAGE_CHECK=true \ - --user ${{ secrets.JENKINS_USER }}:${{ secrets.JENKINS_TOKEN }} | grep -i location | sed "s|^[L|l]ocation: \(.*\)|\1|") - if [[ -z "${response}" ]]; then - echo "> [!WARNING]" >> $GITHUB_STEP_SUMMARY - echo "> Jenkins build could not be triggered. Skipping branch." - continue - fi - echo "Jenkins [job queue url](${response%$'\r'})" >> $GITHUB_STEP_SUMMARY - echo "Sleeping 10 seconds until job starts" >> $GITHUB_STEP_SUMMARY - sleep 10 - buildurl=$(curl -s "${response%$'\r'}api/json" | jq -r '.executable.url') - buildurl="${buildurl%$'\r'}" - echo "Jenkins job [build url](${buildurl})" >> $GITHUB_STEP_SUMMARY - echo "Attempting to change the Jenkins job description" >> $GITHUB_STEP_SUMMARY - if ! curl -ifX POST \ - "${buildurl}submitDescription" \ - --user ${{ secrets.JENKINS_USER }}:${{ secrets.JENKINS_TOKEN }} \ - --data-urlencode "description=GHA package trigger https://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }}" \ - --data-urlencode "Submit=Submit"; then - echo "> [!WARNING]" >> $GITHUB_STEP_SUMMARY - echo "> Unable to change the Jenkins job description." - fi - sleep 20 - fi - else - echo "Skipping branch ${br} due to being detected as dev branch." >> $GITHUB_STEP_SUMMARY - fi - done - if [[ -n "${triggered_branches}" ]] || [[ -n "${skipped_branches}" ]]; then - if [[ -n "${triggered_branches}" ]]; then - NOTIFY_BRANCHES="**Triggered:** ${triggered_branches} \n" - NOTIFY_BUILD_URL="**Build URL:** https://ci.linuxserver.io/blue/organizations/jenkins/Docker-Pipeline-Builders%2Fdocker-webtop/activity/ \n" - echo "**** Package check build(s) triggered for branch(es): ${triggered_branches} ****" - fi - if [[ -n "${skipped_branches}" ]]; then - NOTIFY_BRANCHES="${NOTIFY_BRANCHES}**Skipped:** ${skipped_branches} \n" - fi - echo "**** Notifying Discord ****" - curl -X POST -H "Content-Type: application/json" --data '{"avatar_url": "https://cdn.discordapp.com/avatars/354986384542662657/df91181b3f1cf0ef1592fbe18e0962d7.png","embeds": [{"color": 9802903, - "description": "**Package Check Build(s) for webtop** \n'"${NOTIFY_BRANCHES}"''"${NOTIFY_BUILD_URL}"'"}], - "username": "Github Actions"}' ${{ secrets.DISCORD_WEBHOOK }} - fi diff --git a/.github/workflows/permissions.yml b/.github/workflows/permissions.yml deleted file mode 100644 index 02e1bdb9a..000000000 --- a/.github/workflows/permissions.yml +++ /dev/null @@ -1,12 +0,0 @@ -name: Permission check -on: - pull_request_target: - paths: - - '**/run' - - '**/finish' - - '**/check' - - 'root/migrations/*' - -jobs: - permission_check: - uses: linuxserver/github-workflows/.github/workflows/init-svc-executable-permissions.yml@v1 diff --git a/.gitignore b/.gitignore index df432a482..700489667 100644 --- a/.gitignore +++ b/.gitignore @@ -1 +1,8 @@ -.jenkins-external +.DS_Store +*.log +ssl +*.pem +*.key +.vscode/ +.devcontainer/ +.env \ No newline at end of file diff --git a/Dockerfile b/Dockerfile deleted file mode 100644 index ad0195796..000000000 --- a/Dockerfile +++ /dev/null @@ -1,50 +0,0 @@ -# syntax=docker/dockerfile:1 - -FROM ghcr.io/linuxserver/baseimage-selkies:alpine322 - -# set version label -ARG BUILD_DATE -ARG VERSION -ARG XFCE_VERSION -LABEL build_version="Linuxserver.io version:- ${VERSION} Build-date:- ${BUILD_DATE}" -LABEL maintainer="thelamer" - -# title -ENV TITLE="Alpine XFCE" - -RUN \ - echo "**** add icon ****" && \ - curl -o \ - /usr/share/selkies/www/icon.png \ - https://raw.githubusercontent.com/linuxserver/docker-templates/master/linuxserver.io/img/webtop-logo.png && \ - echo "**** install packages ****" && \ - apk add --no-cache \ - adw-gtk3 \ - adwaita-xfce-icon-theme \ - chromium \ - mousepad \ - ristretto \ - thunar \ - util-linux-misc \ - xfce4 \ - xfce4-terminal && \ - echo "**** xfce-tweaks ****" && \ - mv \ - /usr/bin/thunar \ - /usr/bin/thunar-real && \ - echo "**** cleanup ****" && \ - rm -f \ - /etc/xdg/autostart/xfce4-power-manager.desktop \ - /etc/xdg/autostart/xscreensaver.desktop \ - /usr/share/xfce4/panel/plugins/power-manager-plugin.desktop && \ - rm -rf \ - /config/.cache \ - /tmp/* - -# add local files -COPY /root / - -# ports and volumes -EXPOSE 3001 - -VOLUME /config diff --git a/Dockerfile.aarch64 b/Dockerfile.aarch64 deleted file mode 100644 index f3a7a969d..000000000 --- a/Dockerfile.aarch64 +++ /dev/null @@ -1,50 +0,0 @@ -# syntax=docker/dockerfile:1 - -FROM ghcr.io/linuxserver/baseimage-selkies:arm64v8-alpine322 - -# set version label -ARG BUILD_DATE -ARG VERSION -ARG XFCE_VERSION -LABEL build_version="Linuxserver.io version:- ${VERSION} Build-date:- ${BUILD_DATE}" -LABEL maintainer="thelamer" - -# title -ENV TITLE="Alpine XFCE" - -RUN \ - echo "**** add icon ****" && \ - curl -o \ - /usr/share/selkies/www/icon.png \ - https://raw.githubusercontent.com/linuxserver/docker-templates/master/linuxserver.io/img/webtop-logo.png && \ - echo "**** install packages ****" && \ - apk add --no-cache \ - adw-gtk3 \ - adwaita-xfce-icon-theme \ - chromium \ - mousepad \ - ristretto \ - thunar \ - util-linux-misc \ - xfce4 \ - xfce4-terminal && \ - echo "**** xfce-tweaks ****" && \ - mv \ - /usr/bin/thunar \ - /usr/bin/thunar-real && \ - echo "**** cleanup ****" && \ - rm -f \ - /etc/xdg/autostart/xfce4-power-manager.desktop \ - /etc/xdg/autostart/xscreensaver.desktop \ - /usr/share/xfce4/panel/plugins/power-manager-plugin.desktop && \ - rm -rf \ - /config/.cache \ - /tmp/* - -# add local files -COPY /root / - -# ports and volumes -EXPOSE 3001 - -VOLUME /config diff --git a/Jenkinsfile b/Jenkinsfile deleted file mode 100644 index 06d7584d1..000000000 --- a/Jenkinsfile +++ /dev/null @@ -1,1250 +0,0 @@ -pipeline { - agent { - label 'X86-64-MULTI' - } - options { - buildDiscarder(logRotator(numToKeepStr: '10', daysToKeepStr: '60')) - parallelsAlwaysFailFast() - } - // Input to determine if this is a package check - parameters { - string(defaultValue: 'false', description: 'package check run', name: 'PACKAGE_CHECK') - } - // Configuration for the variables used for this specific repo - environment { - BUILDS_DISCORD=credentials('build_webhook_url') - GITHUB_TOKEN=credentials('498b4638-2d02-4ce5-832d-8a57d01d97ab') - GITLAB_TOKEN=credentials('b6f0f1dd-6952-4cf6-95d1-9c06380283f0') - GITLAB_NAMESPACE=credentials('gitlab-namespace-id') - DOCKERHUB_TOKEN=credentials('docker-hub-ci-pat') - QUAYIO_API_TOKEN=credentials('quayio-repo-api-token') - GIT_SIGNING_KEY=credentials('484fbca6-9a4f-455e-b9e3-97ac98785f5f') - BUILD_VERSION_ARG = 'XFCE_VERSION' - LS_USER = 'linuxserver' - LS_REPO = 'docker-webtop' - CONTAINER_NAME = 'webtop' - DOCKERHUB_IMAGE = 'linuxserver/webtop' - DEV_DOCKERHUB_IMAGE = 'lsiodev/webtop' - PR_DOCKERHUB_IMAGE = 'lspipepr/webtop' - DIST_IMAGE = 'alpine' - MULTIARCH = 'true' - CI = 'true' - CI_WEB = 'true' - CI_PORT = '3001' - CI_SSL = 'true' - CI_DELAY = '60' - CI_WEB_SCREENSHOT_DELAY = '10' - CI_DOCKERENV = 'TZ=US/Pacific' - CI_AUTH = 'user:password' - CI_WEBPATH = '' - } - stages { - stage("Set git config"){ - steps{ - sh '''#!/bin/bash - cat ${GIT_SIGNING_KEY} > /config/.ssh/id_sign - chmod 600 /config/.ssh/id_sign - ssh-keygen -y -f /config/.ssh/id_sign > /config/.ssh/id_sign.pub - echo "Using $(ssh-keygen -lf /config/.ssh/id_sign) to sign commits" - git config --global gpg.format ssh - git config --global user.signingkey /config/.ssh/id_sign - git config --global commit.gpgsign true - ''' - } - } - // Setup all the basic environment variables needed for the build - stage("Set ENV Variables base"){ - steps{ - echo "Running on node: ${NODE_NAME}" - sh '''#! /bin/bash - echo "Pruning builder" - docker builder prune -f --builder container || : - containers=$(docker ps -q) - if [[ -n "${containers}" ]]; then - BUILDX_CONTAINER_ID=$(docker ps -qf 'name=buildx_buildkit') - for container in ${containers}; do - if [[ "${container}" == "${BUILDX_CONTAINER_ID}" ]]; then - echo "skipping buildx container in docker stop" - else - echo "Stopping container ${container}" - docker stop ${container} - fi - done - fi - docker system prune -f --volumes || : - docker image prune -af || : - ''' - script{ - env.EXIT_STATUS = '' - env.LS_RELEASE = sh( - script: '''docker run --rm quay.io/skopeo/stable:v1 inspect docker://ghcr.io/${LS_USER}/${CONTAINER_NAME}:latest 2>/dev/null | jq -r '.Labels.build_version' | awk '{print $3}' | grep '\\-ls' || : ''', - returnStdout: true).trim() - env.LS_RELEASE_NOTES = sh( - script: '''cat readme-vars.yml | awk -F \\" '/date: "[0-9][0-9].[0-9][0-9].[0-9][0-9]:/ {print $4;exit;}' | sed -E ':a;N;$!ba;s/\\r{0,1}\\n/\\\\n/g' ''', - returnStdout: true).trim() - env.GITHUB_DATE = sh( - script: '''date '+%Y-%m-%dT%H:%M:%S%:z' ''', - returnStdout: true).trim() - env.COMMIT_SHA = sh( - script: '''git rev-parse HEAD''', - returnStdout: true).trim() - env.GH_DEFAULT_BRANCH = sh( - script: '''git remote show origin | grep "HEAD branch:" | sed 's|.*HEAD branch: ||' ''', - returnStdout: true).trim() - env.CODE_URL = 'https://github.com/' + env.LS_USER + '/' + env.LS_REPO + '/commit/' + env.GIT_COMMIT - env.DOCKERHUB_LINK = 'https://hub.docker.com/r/' + env.DOCKERHUB_IMAGE + '/tags/' - env.PULL_REQUEST = env.CHANGE_ID - env.TEMPLATED_FILES = 'Jenkinsfile README.md LICENSE .editorconfig ./.github/CONTRIBUTING.md ./.github/FUNDING.yml ./.github/ISSUE_TEMPLATE/config.yml ./.github/ISSUE_TEMPLATE/issue.bug.yml ./.github/ISSUE_TEMPLATE/issue.feature.yml ./.github/PULL_REQUEST_TEMPLATE.md ./.github/workflows/external_trigger_scheduler.yml ./.github/workflows/greetings.yml ./.github/workflows/package_trigger_scheduler.yml ./.github/workflows/call_issue_pr_tracker.yml ./.github/workflows/call_issues_cron.yml ./.github/workflows/permissions.yml ./.github/workflows/external_trigger.yml' - if ( env.SYFT_IMAGE_TAG == null ) { - env.SYFT_IMAGE_TAG = 'latest' - } - } - echo "Using syft image tag ${SYFT_IMAGE_TAG}" - sh '''#! /bin/bash - echo "The default github branch detected as ${GH_DEFAULT_BRANCH}" ''' - script{ - env.LS_RELEASE_NUMBER = sh( - script: '''echo ${LS_RELEASE} |sed 's/^.*-ls//g' ''', - returnStdout: true).trim() - } - script{ - env.LS_TAG_NUMBER = sh( - script: '''#! /bin/bash - tagsha=$(git rev-list -n 1 ${LS_RELEASE} 2>/dev/null) - if [ "${tagsha}" == "${COMMIT_SHA}" ]; then - echo ${LS_RELEASE_NUMBER} - elif [ -z "${GIT_COMMIT}" ]; then - echo ${LS_RELEASE_NUMBER} - else - echo $((${LS_RELEASE_NUMBER} + 1)) - fi''', - returnStdout: true).trim() - } - } - } - /* ####################### - Package Version Tagging - ####################### */ - // Grab the current package versions in Git to determine package tag - stage("Set Package tag"){ - steps{ - script{ - env.PACKAGE_TAG = sh( - script: '''#!/bin/bash - if [ -e package_versions.txt ] ; then - cat package_versions.txt | md5sum | cut -c1-8 - else - echo none - fi''', - returnStdout: true).trim() - } - } - } - /* ######################## - External Release Tagging - ######################## */ - // If this is an os release set release type to none to indicate no external release - stage("Set ENV os"){ - steps{ - script{ - env.EXT_RELEASE = env.PACKAGE_TAG - env.RELEASE_LINK = 'none' - } - } - } - // Sanitize the release tag and strip illegal docker or github characters - stage("Sanitize tag"){ - steps{ - script{ - env.EXT_RELEASE_CLEAN = sh( - script: '''echo ${EXT_RELEASE} | sed 's/[~,%@+;:/ ]//g' ''', - returnStdout: true).trim() - - def semver = env.EXT_RELEASE_CLEAN =~ /(\d+)\.(\d+)\.(\d+)/ - if (semver.find()) { - env.SEMVER = "${semver[0][1]}.${semver[0][2]}.${semver[0][3]}" - } else { - semver = env.EXT_RELEASE_CLEAN =~ /(\d+)\.(\d+)(?:\.(\d+))?(.*)/ - if (semver.find()) { - if (semver[0][3]) { - env.SEMVER = "${semver[0][1]}.${semver[0][2]}.${semver[0][3]}" - } else if (!semver[0][3] && !semver[0][4]) { - env.SEMVER = "${semver[0][1]}.${semver[0][2]}.${(new Date()).format('YYYYMMdd')}" - } - } - } - - if (env.SEMVER != null) { - if (BRANCH_NAME != "${env.GH_DEFAULT_BRANCH}") { - env.SEMVER = "${env.SEMVER}-${BRANCH_NAME}" - } - println("SEMVER: ${env.SEMVER}") - } else { - println("No SEMVER detected") - } - - } - } - } - // If this is a master build use live docker endpoints - stage("Set ENV live build"){ - when { - branch "master" - environment name: 'CHANGE_ID', value: '' - } - steps { - script{ - env.IMAGE = env.DOCKERHUB_IMAGE - env.GITHUBIMAGE = 'ghcr.io/' + env.LS_USER + '/' + env.CONTAINER_NAME - env.GITLABIMAGE = 'registry.gitlab.com/linuxserver.io/' + env.LS_REPO + '/' + env.CONTAINER_NAME - env.QUAYIMAGE = 'quay.io/linuxserver.io/' + env.CONTAINER_NAME - if (env.MULTIARCH == 'true') { - env.CI_TAGS = 'amd64-' + env.EXT_RELEASE_CLEAN + '-ls' + env.LS_TAG_NUMBER + '|arm64v8-' + env.EXT_RELEASE_CLEAN + '-ls' + env.LS_TAG_NUMBER - } else { - env.CI_TAGS = env.EXT_RELEASE_CLEAN + '-ls' + env.LS_TAG_NUMBER - } - env.VERSION_TAG = env.EXT_RELEASE_CLEAN + '-ls' + env.LS_TAG_NUMBER - env.META_TAG = env.EXT_RELEASE_CLEAN + '-ls' + env.LS_TAG_NUMBER - env.EXT_RELEASE_TAG = 'version-' + env.EXT_RELEASE_CLEAN - env.BUILDCACHE = 'docker.io/lsiodev/buildcache,registry.gitlab.com/linuxserver.io/docker-jenkins-builder/lsiodev-buildcache,ghcr.io/linuxserver/lsiodev-buildcache,quay.io/linuxserver.io/lsiodev-buildcache' - env.CITEST_IMAGETAG = 'latest' - } - } - } - // If this is a dev build use dev docker endpoints - stage("Set ENV dev build"){ - when { - not {branch "master"} - environment name: 'CHANGE_ID', value: '' - } - steps { - script{ - env.IMAGE = env.DEV_DOCKERHUB_IMAGE - env.GITHUBIMAGE = 'ghcr.io/' + env.LS_USER + '/lsiodev-' + env.CONTAINER_NAME - env.GITLABIMAGE = 'registry.gitlab.com/linuxserver.io/' + env.LS_REPO + '/lsiodev-' + env.CONTAINER_NAME - env.QUAYIMAGE = 'quay.io/linuxserver.io/lsiodev-' + env.CONTAINER_NAME - if (env.MULTIARCH == 'true') { - env.CI_TAGS = 'amd64-' + env.EXT_RELEASE_CLEAN + '-pkg-' + env.PACKAGE_TAG + '-dev-' + env.COMMIT_SHA + '|arm64v8-' + env.EXT_RELEASE_CLEAN + '-pkg-' + env.PACKAGE_TAG + '-dev-' + env.COMMIT_SHA - } else { - env.CI_TAGS = env.EXT_RELEASE_CLEAN + '-pkg-' + env.PACKAGE_TAG + '-dev-' + env.COMMIT_SHA - } - env.VERSION_TAG = env.EXT_RELEASE_CLEAN + '-pkg-' + env.PACKAGE_TAG + '-dev-' + env.COMMIT_SHA - env.META_TAG = env.EXT_RELEASE_CLEAN + '-pkg-' + env.PACKAGE_TAG + '-dev-' + env.COMMIT_SHA - env.EXT_RELEASE_TAG = 'version-' + env.EXT_RELEASE_CLEAN - env.DOCKERHUB_LINK = 'https://hub.docker.com/r/' + env.DEV_DOCKERHUB_IMAGE + '/tags/' - env.BUILDCACHE = 'docker.io/lsiodev/buildcache,registry.gitlab.com/linuxserver.io/docker-jenkins-builder/lsiodev-buildcache,ghcr.io/linuxserver/lsiodev-buildcache,quay.io/linuxserver.io/lsiodev-buildcache' - env.CITEST_IMAGETAG = 'develop' - } - } - } - // If this is a pull request build use dev docker endpoints - stage("Set ENV PR build"){ - when { - not {environment name: 'CHANGE_ID', value: ''} - } - steps { - script{ - env.IMAGE = env.PR_DOCKERHUB_IMAGE - env.GITHUBIMAGE = 'ghcr.io/' + env.LS_USER + '/lspipepr-' + env.CONTAINER_NAME - env.GITLABIMAGE = 'registry.gitlab.com/linuxserver.io/' + env.LS_REPO + '/lspipepr-' + env.CONTAINER_NAME - env.QUAYIMAGE = 'quay.io/linuxserver.io/lspipepr-' + env.CONTAINER_NAME - if (env.MULTIARCH == 'true') { - env.CI_TAGS = 'amd64-' + env.EXT_RELEASE_CLEAN + '-pkg-' + env.PACKAGE_TAG + '-dev-' + env.COMMIT_SHA + '-pr-' + env.PULL_REQUEST + '|arm64v8-' + env.EXT_RELEASE_CLEAN + '-pkg-' + env.PACKAGE_TAG + '-dev-' + env.COMMIT_SHA + '-pr-' + env.PULL_REQUEST - } else { - env.CI_TAGS = env.EXT_RELEASE_CLEAN + '-pkg-' + env.PACKAGE_TAG + '-dev-' + env.COMMIT_SHA + '-pr-' + env.PULL_REQUEST - } - env.VERSION_TAG = env.EXT_RELEASE_CLEAN + '-pkg-' + env.PACKAGE_TAG + '-dev-' + env.COMMIT_SHA + '-pr-' + env.PULL_REQUEST - env.META_TAG = env.EXT_RELEASE_CLEAN + '-pkg-' + env.PACKAGE_TAG + '-dev-' + env.COMMIT_SHA + '-pr-' + env.PULL_REQUEST - env.EXT_RELEASE_TAG = 'version-' + env.EXT_RELEASE_CLEAN - env.CODE_URL = 'https://github.com/' + env.LS_USER + '/' + env.LS_REPO + '/pull/' + env.PULL_REQUEST - env.DOCKERHUB_LINK = 'https://hub.docker.com/r/' + env.PR_DOCKERHUB_IMAGE + '/tags/' - env.BUILDCACHE = 'docker.io/lsiodev/buildcache,registry.gitlab.com/linuxserver.io/docker-jenkins-builder/lsiodev-buildcache,ghcr.io/linuxserver/lsiodev-buildcache,quay.io/linuxserver.io/lsiodev-buildcache' - env.CITEST_IMAGETAG = 'develop' - } - } - } - // Run ShellCheck - stage('ShellCheck') { - when { - environment name: 'CI', value: 'true' - } - steps { - withCredentials([ - string(credentialsId: 'ci-tests-s3-key-id', variable: 'S3_KEY'), - string(credentialsId: 'ci-tests-s3-secret-access-key', variable: 'S3_SECRET') - ]) { - script{ - env.SHELLCHECK_URL = 'https://ci-tests.linuxserver.io/' + env.IMAGE + '/' + env.META_TAG + '/shellcheck-result.xml' - } - sh '''curl -sL https://raw.githubusercontent.com/linuxserver/docker-jenkins-builder/master/checkrun.sh | /bin/bash''' - sh '''#! /bin/bash - docker run --rm \ - -v ${WORKSPACE}:/mnt \ - -e AWS_ACCESS_KEY_ID=\"${S3_KEY}\" \ - -e AWS_SECRET_ACCESS_KEY=\"${S3_SECRET}\" \ - ghcr.io/linuxserver/baseimage-alpine:3 s6-envdir -fn -- /var/run/s6/container_environment /bin/bash -c "\ - apk add --no-cache python3 && \ - python3 -m venv /lsiopy && \ - pip install --no-cache-dir -U pip && \ - pip install --no-cache-dir s3cmd && \ - s3cmd put --no-preserve --acl-public -m text/xml /mnt/shellcheck-result.xml s3://ci-tests.linuxserver.io/${IMAGE}/${META_TAG}/shellcheck-result.xml" || :''' - } - } - } - // Use helper containers to render templated files - stage('Update-Templates') { - when { - branch "master" - environment name: 'CHANGE_ID', value: '' - expression { - env.CONTAINER_NAME != null - } - } - steps { - sh '''#! /bin/bash - set -e - TEMPDIR=$(mktemp -d) - docker pull ghcr.io/linuxserver/jenkins-builder:latest - # Cloned repo paths for templating: - # ${TEMPDIR}/docker-${CONTAINER_NAME}: Cloned branch master of ${LS_USER}/${LS_REPO} for running the jenkins builder on - # ${TEMPDIR}/repo/${LS_REPO}: Cloned branch master of ${LS_USER}/${LS_REPO} for commiting various templated file changes and pushing back to Github - # ${TEMPDIR}/docs/docker-documentation: Cloned docs repo for pushing docs updates to Github - # ${TEMPDIR}/unraid/docker-templates: Cloned docker-templates repo to check for logos - # ${TEMPDIR}/unraid/templates: Cloned templates repo for commiting unraid template changes and pushing back to Github - git clone --branch master --depth 1 https://github.com/${LS_USER}/${LS_REPO}.git ${TEMPDIR}/docker-${CONTAINER_NAME} - docker run --rm -v ${TEMPDIR}/docker-${CONTAINER_NAME}:/tmp -e LOCAL=true -e PUID=$(id -u) -e PGID=$(id -g) ghcr.io/linuxserver/jenkins-builder:latest - echo "Starting Stage 1 - Jenkinsfile update" - if [[ "$(md5sum Jenkinsfile | awk '{ print $1 }')" != "$(md5sum ${TEMPDIR}/docker-${CONTAINER_NAME}/Jenkinsfile | awk '{ print $1 }')" ]]; then - mkdir -p ${TEMPDIR}/repo - git clone https://github.com/${LS_USER}/${LS_REPO}.git ${TEMPDIR}/repo/${LS_REPO} - cd ${TEMPDIR}/repo/${LS_REPO} - git checkout -f master - cp ${TEMPDIR}/docker-${CONTAINER_NAME}/Jenkinsfile ${TEMPDIR}/repo/${LS_REPO}/ - git add Jenkinsfile - git commit -m 'Bot Updating Templated Files' - git pull https://LinuxServer-CI:${GITHUB_TOKEN}@github.com/${LS_USER}/${LS_REPO}.git master - git push https://LinuxServer-CI:${GITHUB_TOKEN}@github.com/${LS_USER}/${LS_REPO}.git master - echo "true" > /tmp/${COMMIT_SHA}-${BUILD_NUMBER} - echo "Updating Jenkinsfile and exiting build, new one will trigger based on commit" - rm -Rf ${TEMPDIR} - exit 0 - else - echo "Jenkinsfile is up to date." - fi - echo "Starting Stage 2 - Delete old templates" - OLD_TEMPLATES=".github/ISSUE_TEMPLATE.md .github/ISSUE_TEMPLATE/issue.bug.md .github/ISSUE_TEMPLATE/issue.feature.md .github/workflows/call_invalid_helper.yml .github/workflows/stale.yml .github/workflows/package_trigger.yml" - for i in ${OLD_TEMPLATES}; do - if [[ -f "${i}" ]]; then - TEMPLATES_TO_DELETE="${i} ${TEMPLATES_TO_DELETE}" - fi - done - if [[ -n "${TEMPLATES_TO_DELETE}" ]]; then - mkdir -p ${TEMPDIR}/repo - git clone https://github.com/${LS_USER}/${LS_REPO}.git ${TEMPDIR}/repo/${LS_REPO} - cd ${TEMPDIR}/repo/${LS_REPO} - git checkout -f master - for i in ${TEMPLATES_TO_DELETE}; do - git rm "${i}" - done - git commit -m 'Bot Updating Templated Files' - git pull https://LinuxServer-CI:${GITHUB_TOKEN}@github.com/${LS_USER}/${LS_REPO}.git master - git push https://LinuxServer-CI:${GITHUB_TOKEN}@github.com/${LS_USER}/${LS_REPO}.git master - echo "true" > /tmp/${COMMIT_SHA}-${BUILD_NUMBER} - echo "Deleting old/deprecated templates and exiting build, new one will trigger based on commit" - rm -Rf ${TEMPDIR} - exit 0 - else - echo "No templates to delete" - fi - echo "Starting Stage 2.5 - Update init diagram" - if ! grep -q 'init_diagram:' readme-vars.yml; then - echo "Adding the key 'init_diagram' to readme-vars.yml" - sed -i '\\|^#.*changelog.*$|d' readme-vars.yml - sed -i 's|^changelogs:|# init diagram\\ninit_diagram:\\n\\n# changelog\\nchangelogs:|' readme-vars.yml - fi - mkdir -p ${TEMPDIR}/d2 - docker run --rm -v ${TEMPDIR}/d2:/output -e PUID=$(id -u) -e PGID=$(id -g) -e RAW="true" ghcr.io/linuxserver/d2-builder:latest ${CONTAINER_NAME}:latest - ls -al ${TEMPDIR}/d2 - yq -ei ".init_diagram |= load_str(\\"${TEMPDIR}/d2/${CONTAINER_NAME}-latest.d2\\")" readme-vars.yml - if [[ $(md5sum readme-vars.yml | cut -c1-8) != $(md5sum ${TEMPDIR}/docker-${CONTAINER_NAME}/readme-vars.yml | cut -c1-8) ]]; then - echo "'init_diagram' has been updated. Updating repo and exiting build, new one will trigger based on commit." - mkdir -p ${TEMPDIR}/repo - git clone https://github.com/${LS_USER}/${LS_REPO}.git ${TEMPDIR}/repo/${LS_REPO} - cd ${TEMPDIR}/repo/${LS_REPO} - git checkout -f master - cp ${WORKSPACE}/readme-vars.yml ${TEMPDIR}/repo/${LS_REPO}/readme-vars.yml - git add readme-vars.yml - git commit -m 'Bot Updating Templated Files' - git pull https://LinuxServer-CI:${GITHUB_TOKEN}@github.com/${LS_USER}/${LS_REPO}.git master - git push https://LinuxServer-CI:${GITHUB_TOKEN}@github.com/${LS_USER}/${LS_REPO}.git master - echo "true" > /tmp/${COMMIT_SHA}-${BUILD_NUMBER} - echo "Updating templates and exiting build, new one will trigger based on commit" - rm -Rf ${TEMPDIR} - exit 0 - else - echo "false" > /tmp/${COMMIT_SHA}-${BUILD_NUMBER} - echo "Init diagram is unchanged" - fi - echo "Starting Stage 3 - Update templates" - CURRENTHASH=$(grep -hs ^ ${TEMPLATED_FILES} | md5sum | cut -c1-8) - cd ${TEMPDIR}/docker-${CONTAINER_NAME} - NEWHASH=$(grep -hs ^ ${TEMPLATED_FILES} | md5sum | cut -c1-8) - if [[ "${CURRENTHASH}" != "${NEWHASH}" ]] || ! grep -q '.jenkins-external' "${WORKSPACE}/.gitignore" 2>/dev/null; then - mkdir -p ${TEMPDIR}/repo - git clone https://github.com/${LS_USER}/${LS_REPO}.git ${TEMPDIR}/repo/${LS_REPO} - cd ${TEMPDIR}/repo/${LS_REPO} - git checkout -f master - cd ${TEMPDIR}/docker-${CONTAINER_NAME} - mkdir -p ${TEMPDIR}/repo/${LS_REPO}/.github/workflows - mkdir -p ${TEMPDIR}/repo/${LS_REPO}/.github/ISSUE_TEMPLATE - cp --parents ${TEMPLATED_FILES} ${TEMPDIR}/repo/${LS_REPO}/ || : - cp --parents readme-vars.yml ${TEMPDIR}/repo/${LS_REPO}/ || : - cd ${TEMPDIR}/repo/${LS_REPO}/ - if ! grep -q '.jenkins-external' .gitignore 2>/dev/null; then - echo ".jenkins-external" >> .gitignore - git add .gitignore - fi - git add readme-vars.yml ${TEMPLATED_FILES} - git commit -m 'Bot Updating Templated Files' - git pull https://LinuxServer-CI:${GITHUB_TOKEN}@github.com/${LS_USER}/${LS_REPO}.git master - git push https://LinuxServer-CI:${GITHUB_TOKEN}@github.com/${LS_USER}/${LS_REPO}.git master - echo "true" > /tmp/${COMMIT_SHA}-${BUILD_NUMBER} - echo "Updating templates and exiting build, new one will trigger based on commit" - rm -Rf ${TEMPDIR} - exit 0 - else - echo "false" > /tmp/${COMMIT_SHA}-${BUILD_NUMBER} - echo "No templates to update" - fi - echo "Starting Stage 4 - External repo updates: Docs, Unraid Template and Readme Sync to Docker Hub" - mkdir -p ${TEMPDIR}/docs - git clone --depth=1 https://github.com/linuxserver/docker-documentation.git ${TEMPDIR}/docs/docker-documentation - if [[ "${BRANCH_NAME}" == "${GH_DEFAULT_BRANCH}" ]] && [[ (! -f ${TEMPDIR}/docs/docker-documentation/docs/images/docker-${CONTAINER_NAME}.md) || ("$(md5sum ${TEMPDIR}/docs/docker-documentation/docs/images/docker-${CONTAINER_NAME}.md | awk '{ print $1 }')" != "$(md5sum ${TEMPDIR}/docker-${CONTAINER_NAME}/.jenkins-external/docker-${CONTAINER_NAME}.md | awk '{ print $1 }')") ]]; then - cp ${TEMPDIR}/docker-${CONTAINER_NAME}/.jenkins-external/docker-${CONTAINER_NAME}.md ${TEMPDIR}/docs/docker-documentation/docs/images/ - cd ${TEMPDIR}/docs/docker-documentation - GH_DOCS_DEFAULT_BRANCH=$(git remote show origin | grep "HEAD branch:" | sed 's|.*HEAD branch: ||') - git add docs/images/docker-${CONTAINER_NAME}.md - echo "Updating docs repo" - git commit -m 'Bot Updating Documentation' - git pull https://LinuxServer-CI:${GITHUB_TOKEN}@github.com/linuxserver/docker-documentation.git ${GH_DOCS_DEFAULT_BRANCH} --rebase - git push https://LinuxServer-CI:${GITHUB_TOKEN}@github.com/linuxserver/docker-documentation.git ${GH_DOCS_DEFAULT_BRANCH} || \ - (MAXWAIT="10" && echo "Push to docs failed, trying again in ${MAXWAIT} seconds" && \ - sleep $((RANDOM % MAXWAIT)) && \ - git pull https://LinuxServer-CI:${GITHUB_TOKEN}@github.com/linuxserver/docker-documentation.git ${GH_DOCS_DEFAULT_BRANCH} --rebase && \ - git push https://LinuxServer-CI:${GITHUB_TOKEN}@github.com/linuxserver/docker-documentation.git ${GH_DOCS_DEFAULT_BRANCH}) - else - echo "Docs update not needed, skipping" - fi - mkdir -p ${TEMPDIR}/unraid - git clone --depth=1 https://github.com/linuxserver/docker-templates.git ${TEMPDIR}/unraid/docker-templates - git clone --depth=1 https://github.com/linuxserver/templates.git ${TEMPDIR}/unraid/templates - if [[ -f ${TEMPDIR}/unraid/docker-templates/linuxserver.io/img/${CONTAINER_NAME}-logo.png ]]; then - sed -i "s|master/linuxserver.io/img/linuxserver-ls-logo.png|master/linuxserver.io/img/${CONTAINER_NAME}-logo.png|" ${TEMPDIR}/docker-${CONTAINER_NAME}/.jenkins-external/${CONTAINER_NAME}.xml - elif [[ -f ${TEMPDIR}/unraid/docker-templates/linuxserver.io/img/${CONTAINER_NAME}-icon.png ]]; then - sed -i "s|master/linuxserver.io/img/linuxserver-ls-logo.png|master/linuxserver.io/img/${CONTAINER_NAME}-icon.png|" ${TEMPDIR}/docker-${CONTAINER_NAME}/.jenkins-external/${CONTAINER_NAME}.xml - fi - if [[ "${BRANCH_NAME}" == "${GH_DEFAULT_BRANCH}" ]] && [[ (! -f ${TEMPDIR}/unraid/templates/unraid/${CONTAINER_NAME}.xml) || ("$(md5sum ${TEMPDIR}/unraid/templates/unraid/${CONTAINER_NAME}.xml | awk '{ print $1 }')" != "$(md5sum ${TEMPDIR}/docker-${CONTAINER_NAME}/.jenkins-external/${CONTAINER_NAME}.xml | awk '{ print $1 }')") ]]; then - echo "Updating Unraid template" - cd ${TEMPDIR}/unraid/templates/ - GH_TEMPLATES_DEFAULT_BRANCH=$(git remote show origin | grep "HEAD branch:" | sed 's|.*HEAD branch: ||') - if grep -wq "^${CONTAINER_NAME}$" ${TEMPDIR}/unraid/templates/unraid/ignore.list && [[ -f ${TEMPDIR}/unraid/templates/unraid/deprecated/${CONTAINER_NAME}.xml ]]; then - echo "Image is on the ignore list, and already in the deprecation folder." - elif grep -wq "^${CONTAINER_NAME}$" ${TEMPDIR}/unraid/templates/unraid/ignore.list; then - echo "Image is on the ignore list, marking Unraid template as deprecated" - cp ${TEMPDIR}/docker-${CONTAINER_NAME}/.jenkins-external/${CONTAINER_NAME}.xml ${TEMPDIR}/unraid/templates/unraid/ - git add -u unraid/${CONTAINER_NAME}.xml - git mv unraid/${CONTAINER_NAME}.xml unraid/deprecated/${CONTAINER_NAME}.xml || : - git commit -m 'Bot Moving Deprecated Unraid Template' || : - else - cp ${TEMPDIR}/docker-${CONTAINER_NAME}/.jenkins-external/${CONTAINER_NAME}.xml ${TEMPDIR}/unraid/templates/unraid/ - git add unraid/${CONTAINER_NAME}.xml - git commit -m 'Bot Updating Unraid Template' - fi - git pull https://LinuxServer-CI:${GITHUB_TOKEN}@github.com/linuxserver/templates.git ${GH_TEMPLATES_DEFAULT_BRANCH} --rebase - git push https://LinuxServer-CI:${GITHUB_TOKEN}@github.com/linuxserver/templates.git ${GH_TEMPLATES_DEFAULT_BRANCH} || \ - (MAXWAIT="10" && echo "Push to unraid templates failed, trying again in ${MAXWAIT} seconds" && \ - sleep $((RANDOM % MAXWAIT)) && \ - git pull https://LinuxServer-CI:${GITHUB_TOKEN}@github.com/linuxserver/templates.git ${GH_TEMPLATES_DEFAULT_BRANCH} --rebase && \ - git push https://LinuxServer-CI:${GITHUB_TOKEN}@github.com/linuxserver/templates.git ${GH_TEMPLATES_DEFAULT_BRANCH}) - else - echo "No updates to Unraid template needed, skipping" - fi - if [[ "${BRANCH_NAME}" == "${GH_DEFAULT_BRANCH}" ]]; then - if [[ $(cat ${TEMPDIR}/docker-${CONTAINER_NAME}/README.md | wc -m) -gt 25000 ]]; then - echo "Readme is longer than 25,000 characters. Syncing the lite version to Docker Hub" - DH_README_SYNC_PATH="${TEMPDIR}/docker-${CONTAINER_NAME}/.jenkins-external/README.lite" - else - echo "Syncing readme to Docker Hub" - DH_README_SYNC_PATH="${TEMPDIR}/docker-${CONTAINER_NAME}/README.md" - fi - if curl -s https://hub.docker.com/v2/namespaces/${DOCKERHUB_IMAGE%%/*}/repositories/${DOCKERHUB_IMAGE##*/}/tags | jq -r '.message' | grep -q 404; then - echo "Docker Hub endpoint doesn't exist. Creating endpoint first." - DH_TOKEN=$(curl -d '{"username":"linuxserverci", "password":"'${DOCKERHUB_TOKEN}'"}' -H "Content-Type: application/json" -X POST https://hub.docker.com/v2/users/login | jq -r '.token') - curl -s \ - -H "Authorization: JWT ${DH_TOKEN}" \ - -H "Content-Type: application/json" \ - -X POST \ - -d '{"name":"'${DOCKERHUB_IMAGE##*/}'", "namespace":"'${DOCKERHUB_IMAGE%%/*}'"}' \ - https://hub.docker.com/v2/repositories/ || : - fi - DH_TOKEN=$(curl -d '{"username":"linuxserverci", "password":"'${DOCKERHUB_TOKEN}'"}' -H "Content-Type: application/json" -X POST https://hub.docker.com/v2/users/login | jq -r '.token') - curl -s \ - -H "Authorization: JWT ${DH_TOKEN}" \ - -H "Content-Type: application/json" \ - -X PATCH \ - -d "{\\"full_description\\":$(jq -Rsa . ${DH_README_SYNC_PATH})}" \ - https://hub.docker.com/v2/repositories/${DOCKERHUB_IMAGE} || : - else - echo "Not the default Github branch. Skipping readme sync to Docker Hub." - fi - rm -Rf ${TEMPDIR}''' - script{ - env.FILES_UPDATED = sh( - script: '''cat /tmp/${COMMIT_SHA}-${BUILD_NUMBER}''', - returnStdout: true).trim() - } - } - } - // Exit the build if the Templated files were just updated - stage('Template-exit') { - when { - branch "master" - environment name: 'CHANGE_ID', value: '' - environment name: 'FILES_UPDATED', value: 'true' - expression { - env.CONTAINER_NAME != null - } - } - steps { - script{ - env.EXIT_STATUS = 'ABORTED' - } - } - } - // If this is a master build check the S6 service file perms - stage("Check S6 Service file Permissions"){ - when { - branch "master" - environment name: 'CHANGE_ID', value: '' - environment name: 'EXIT_STATUS', value: '' - } - steps { - script{ - sh '''#! /bin/bash - WRONG_PERM=$(find ./ -path "./.git" -prune -o \\( -name "run" -o -name "finish" -o -name "check" \\) -not -perm -u=x,g=x,o=x -print) - if [[ -n "${WRONG_PERM}" ]]; then - echo "The following S6 service files are missing the executable bit; canceling the faulty build: ${WRONG_PERM}" - exit 1 - else - echo "S6 service file perms look good." - fi ''' - } - } - } - /* ####################### - GitLab Mirroring and Quay.io Repo Visibility - ####################### */ - // Ping into Gitlab to mirror this repo and have a registry endpoint & mark this repo on Quay.io as public - stage("GitLab Mirror and Quay.io Visibility"){ - when { - environment name: 'EXIT_STATUS', value: '' - } - steps{ - sh '''curl -H "Content-Type: application/json" -H "Private-Token: ${GITLAB_TOKEN}" -X POST https://gitlab.com/api/v4/projects \ - -d '{"namespace_id":'${GITLAB_NAMESPACE}',\ - "name":"'${LS_REPO}'", - "mirror":true,\ - "import_url":"https://github.com/linuxserver/'${LS_REPO}'.git",\ - "issues_access_level":"disabled",\ - "merge_requests_access_level":"disabled",\ - "repository_access_level":"enabled",\ - "visibility":"public"}' ''' - sh '''curl -H "Private-Token: ${GITLAB_TOKEN}" -X PUT "https://gitlab.com/api/v4/projects/Linuxserver.io%2F${LS_REPO}" \ - -d "mirror=true&import_url=https://github.com/linuxserver/${LS_REPO}.git" ''' - sh '''curl -H "Content-Type: application/json" -H "Authorization: Bearer ${QUAYIO_API_TOKEN}" -X POST "https://quay.io/api/v1/repository${QUAYIMAGE/quay.io/}/changevisibility" \ - -d '{"visibility":"public"}' ||: ''' - } - } - /* ############### - Build Container - ############### */ - // Build Docker container for push to LS Repo - stage('Build-Single') { - when { - expression { - env.MULTIARCH == 'false' || params.PACKAGE_CHECK == 'true' - } - environment name: 'EXIT_STATUS', value: '' - } - steps { - echo "Running on node: ${NODE_NAME}" - sh "sed -r -i 's|(^FROM .*)|\\1\\n\\nENV LSIO_FIRST_PARTY=true|g' Dockerfile" - sh "docker buildx build \ - --label \"org.opencontainers.image.created=${GITHUB_DATE}\" \ - --label \"org.opencontainers.image.authors=linuxserver.io\" \ - --label \"org.opencontainers.image.url=https://github.com/linuxserver/docker-webtop/packages\" \ - --label \"org.opencontainers.image.documentation=https://docs.linuxserver.io/images/docker-webtop\" \ - --label \"org.opencontainers.image.source=https://github.com/linuxserver/docker-webtop\" \ - --label \"org.opencontainers.image.version=${EXT_RELEASE_CLEAN}-ls${LS_TAG_NUMBER}\" \ - --label \"org.opencontainers.image.revision=${COMMIT_SHA}\" \ - --label \"org.opencontainers.image.vendor=linuxserver.io\" \ - --label \"org.opencontainers.image.licenses=GPL-3.0-only\" \ - --label \"org.opencontainers.image.ref.name=${COMMIT_SHA}\" \ - --label \"org.opencontainers.image.title=Webtop\" \ - --label \"org.opencontainers.image.description=[Webtop](https://github.com/linuxserver/docker-webtop) - Alpine, Ubuntu, Fedora, and Arch based containers containing full desktop environments in officially supported flavors accessible via any modern web browser. \" \ - --no-cache --pull -t ${IMAGE}:${META_TAG} --platform=linux/amd64 \ - --provenance=true --sbom=true --builder=container --load \ - --build-arg ${BUILD_VERSION_ARG}=${EXT_RELEASE} --build-arg VERSION=\"${VERSION_TAG}\" --build-arg BUILD_DATE=${GITHUB_DATE} ." - sh '''#! /bin/bash - set -e - IFS=',' read -ra CACHE <<< "$BUILDCACHE" - for i in "${CACHE[@]}"; do - docker tag ${IMAGE}:${META_TAG} ${i}:amd64-${COMMIT_SHA}-${BUILD_NUMBER} - done - ''' - withCredentials([ - [ - $class: 'UsernamePasswordMultiBinding', - credentialsId: 'Quay.io-Robot', - usernameVariable: 'QUAYUSER', - passwordVariable: 'QUAYPASS' - ] - ]) { - retry_backoff(5,5) { - sh '''#! /bin/bash - set -e - echo $DOCKERHUB_TOKEN | docker login -u linuxserverci --password-stdin - echo $GITHUB_TOKEN | docker login ghcr.io -u LinuxServer-CI --password-stdin - echo $GITLAB_TOKEN | docker login registry.gitlab.com -u LinuxServer.io --password-stdin - echo $QUAYPASS | docker login quay.io -u $QUAYUSER --password-stdin - - if [[ "${PACKAGE_CHECK}" != "true" ]]; then - declare -A pids - IFS=',' read -ra CACHE <<< "$BUILDCACHE" - for i in "${CACHE[@]}"; do - docker push ${i}:amd64-${COMMIT_SHA}-${BUILD_NUMBER} & - pids[$!]="$i" - done - for p in "${!pids[@]}"; do - wait "$p" || { [[ "${pids[$p]}" != *"quay.io"* ]] && exit 1; } - done - fi - ''' - } - } - } - } - // Build MultiArch Docker containers for push to LS Repo - stage('Build-Multi') { - when { - allOf { - environment name: 'MULTIARCH', value: 'true' - expression { params.PACKAGE_CHECK == 'false' } - } - environment name: 'EXIT_STATUS', value: '' - } - parallel { - stage('Build X86') { - steps { - echo "Running on node: ${NODE_NAME}" - sh "sed -r -i 's|(^FROM .*)|\\1\\n\\nENV LSIO_FIRST_PARTY=true|g' Dockerfile" - sh "docker buildx build \ - --label \"org.opencontainers.image.created=${GITHUB_DATE}\" \ - --label \"org.opencontainers.image.authors=linuxserver.io\" \ - --label \"org.opencontainers.image.url=https://github.com/linuxserver/docker-webtop/packages\" \ - --label \"org.opencontainers.image.documentation=https://docs.linuxserver.io/images/docker-webtop\" \ - --label \"org.opencontainers.image.source=https://github.com/linuxserver/docker-webtop\" \ - --label \"org.opencontainers.image.version=${EXT_RELEASE_CLEAN}-ls${LS_TAG_NUMBER}\" \ - --label \"org.opencontainers.image.revision=${COMMIT_SHA}\" \ - --label \"org.opencontainers.image.vendor=linuxserver.io\" \ - --label \"org.opencontainers.image.licenses=GPL-3.0-only\" \ - --label \"org.opencontainers.image.ref.name=${COMMIT_SHA}\" \ - --label \"org.opencontainers.image.title=Webtop\" \ - --label \"org.opencontainers.image.description=[Webtop](https://github.com/linuxserver/docker-webtop) - Alpine, Ubuntu, Fedora, and Arch based containers containing full desktop environments in officially supported flavors accessible via any modern web browser. \" \ - --no-cache --pull -t ${IMAGE}:amd64-${META_TAG} --platform=linux/amd64 \ - --provenance=true --sbom=true --builder=container --load \ - --build-arg ${BUILD_VERSION_ARG}=${EXT_RELEASE} --build-arg VERSION=\"${VERSION_TAG}\" --build-arg BUILD_DATE=${GITHUB_DATE} ." - sh '''#! /bin/bash - set -e - IFS=',' read -ra CACHE <<< "$BUILDCACHE" - for i in "${CACHE[@]}"; do - docker tag ${IMAGE}:amd64-${META_TAG} ${i}:amd64-${COMMIT_SHA}-${BUILD_NUMBER} - done - ''' - withCredentials([ - [ - $class: 'UsernamePasswordMultiBinding', - credentialsId: 'Quay.io-Robot', - usernameVariable: 'QUAYUSER', - passwordVariable: 'QUAYPASS' - ] - ]) { - retry_backoff(5,5) { - sh '''#! /bin/bash - set -e - echo $DOCKERHUB_TOKEN | docker login -u linuxserverci --password-stdin - echo $GITHUB_TOKEN | docker login ghcr.io -u LinuxServer-CI --password-stdin - echo $GITLAB_TOKEN | docker login registry.gitlab.com -u LinuxServer.io --password-stdin - echo $QUAYPASS | docker login quay.io -u $QUAYUSER --password-stdin - - if [[ "${PACKAGE_CHECK}" != "true" ]]; then - declare -A pids - IFS=',' read -ra CACHE <<< "$BUILDCACHE" - for i in "${CACHE[@]}"; do - docker push ${i}:amd64-${COMMIT_SHA}-${BUILD_NUMBER} & - pids[$!]="$i" - done - for p in "${!pids[@]}"; do - wait "$p" || { [[ "${pids[$p]}" != *"quay.io"* ]] && exit 1; } - done - fi - ''' - } - } - } - } - stage('Build ARM64') { - agent { - label 'ARM64' - } - steps { - echo "Running on node: ${NODE_NAME}" - sh "sed -r -i 's|(^FROM .*)|\\1\\n\\nENV LSIO_FIRST_PARTY=true|g' Dockerfile.aarch64" - sh "docker buildx build \ - --label \"org.opencontainers.image.created=${GITHUB_DATE}\" \ - --label \"org.opencontainers.image.authors=linuxserver.io\" \ - --label \"org.opencontainers.image.url=https://github.com/linuxserver/docker-webtop/packages\" \ - --label \"org.opencontainers.image.documentation=https://docs.linuxserver.io/images/docker-webtop\" \ - --label \"org.opencontainers.image.source=https://github.com/linuxserver/docker-webtop\" \ - --label \"org.opencontainers.image.version=${EXT_RELEASE_CLEAN}-ls${LS_TAG_NUMBER}\" \ - --label \"org.opencontainers.image.revision=${COMMIT_SHA}\" \ - --label \"org.opencontainers.image.vendor=linuxserver.io\" \ - --label \"org.opencontainers.image.licenses=GPL-3.0-only\" \ - --label \"org.opencontainers.image.ref.name=${COMMIT_SHA}\" \ - --label \"org.opencontainers.image.title=Webtop\" \ - --label \"org.opencontainers.image.description=[Webtop](https://github.com/linuxserver/docker-webtop) - Alpine, Ubuntu, Fedora, and Arch based containers containing full desktop environments in officially supported flavors accessible via any modern web browser. \" \ - --no-cache --pull -f Dockerfile.aarch64 -t ${IMAGE}:arm64v8-${META_TAG} --platform=linux/arm64 \ - --provenance=true --sbom=true --builder=container --load \ - --build-arg ${BUILD_VERSION_ARG}=${EXT_RELEASE} --build-arg VERSION=\"${VERSION_TAG}\" --build-arg BUILD_DATE=${GITHUB_DATE} ." - sh '''#! /bin/bash - set -e - IFS=',' read -ra CACHE <<< "$BUILDCACHE" - for i in "${CACHE[@]}"; do - docker tag ${IMAGE}:arm64v8-${META_TAG} ${i}:arm64v8-${COMMIT_SHA}-${BUILD_NUMBER} - done - ''' - withCredentials([ - [ - $class: 'UsernamePasswordMultiBinding', - credentialsId: 'Quay.io-Robot', - usernameVariable: 'QUAYUSER', - passwordVariable: 'QUAYPASS' - ] - ]) { - retry_backoff(5,5) { - sh '''#! /bin/bash - set -e - echo $DOCKERHUB_TOKEN | docker login -u linuxserverci --password-stdin - echo $GITHUB_TOKEN | docker login ghcr.io -u LinuxServer-CI --password-stdin - echo $GITLAB_TOKEN | docker login registry.gitlab.com -u LinuxServer.io --password-stdin - echo $QUAYPASS | docker login quay.io -u $QUAYUSER --password-stdin - if [[ "${PACKAGE_CHECK}" != "true" ]]; then - declare -A pids - IFS=',' read -ra CACHE <<< "$BUILDCACHE" - for i in "${CACHE[@]}"; do - docker push ${i}:arm64v8-${COMMIT_SHA}-${BUILD_NUMBER} & - pids[$!]="$i" - done - for p in "${!pids[@]}"; do - wait "$p" || { [[ "${pids[$p]}" != *"quay.io"* ]] && exit 1; } - done - fi - ''' - } - } - sh '''#! /bin/bash - containers=$(docker ps -aq) - if [[ -n "${containers}" ]]; then - docker stop ${containers} - fi - docker system prune -f --volumes || : - docker image prune -af || : - ''' - } - } - } - } - // Take the image we just built and dump package versions for comparison - stage('Update-packages') { - when { - branch "master" - environment name: 'CHANGE_ID', value: '' - environment name: 'EXIT_STATUS', value: '' - } - steps { - sh '''#! /bin/bash - set -e - TEMPDIR=$(mktemp -d) - if [ "${MULTIARCH}" == "true" ] && [ "${PACKAGE_CHECK}" != "true" ]; then - LOCAL_CONTAINER=${IMAGE}:amd64-${META_TAG} - else - LOCAL_CONTAINER=${IMAGE}:${META_TAG} - fi - touch ${TEMPDIR}/package_versions.txt - docker run --rm \ - -v /var/run/docker.sock:/var/run/docker.sock:ro \ - -v ${TEMPDIR}:/tmp \ - ghcr.io/anchore/syft:${SYFT_IMAGE_TAG} \ - ${LOCAL_CONTAINER} -o table=/tmp/package_versions.txt - NEW_PACKAGE_TAG=$(md5sum ${TEMPDIR}/package_versions.txt | cut -c1-8 ) - echo "Package tag sha from current packages in buit container is ${NEW_PACKAGE_TAG} comparing to old ${PACKAGE_TAG} from github" - if [ "${NEW_PACKAGE_TAG}" != "${PACKAGE_TAG}" ]; then - git clone https://github.com/${LS_USER}/${LS_REPO}.git ${TEMPDIR}/${LS_REPO} - git --git-dir ${TEMPDIR}/${LS_REPO}/.git checkout -f master - cp ${TEMPDIR}/package_versions.txt ${TEMPDIR}/${LS_REPO}/ - cd ${TEMPDIR}/${LS_REPO}/ - wait - git add package_versions.txt - git commit -m 'Bot Updating Package Versions' - git pull https://LinuxServer-CI:${GITHUB_TOKEN}@github.com/${LS_USER}/${LS_REPO}.git master - git push https://LinuxServer-CI:${GITHUB_TOKEN}@github.com/${LS_USER}/${LS_REPO}.git master - echo "true" > /tmp/packages-${COMMIT_SHA}-${BUILD_NUMBER} - echo "Package tag updated, stopping build process" - else - echo "false" > /tmp/packages-${COMMIT_SHA}-${BUILD_NUMBER} - echo "Package tag is same as previous continue with build process" - fi - rm -Rf ${TEMPDIR}''' - script{ - env.PACKAGE_UPDATED = sh( - script: '''cat /tmp/packages-${COMMIT_SHA}-${BUILD_NUMBER}''', - returnStdout: true).trim() - } - } - } - // Exit the build if the package file was just updated - stage('PACKAGE-exit') { - when { - branch "master" - environment name: 'CHANGE_ID', value: '' - environment name: 'PACKAGE_UPDATED', value: 'true' - environment name: 'EXIT_STATUS', value: '' - } - steps { - script{ - env.EXIT_STATUS = 'ABORTED' - } - } - } - // Exit the build if this is just a package check and there are no changes to push - stage('PACKAGECHECK-exit') { - when { - branch "master" - environment name: 'CHANGE_ID', value: '' - environment name: 'PACKAGE_UPDATED', value: 'false' - environment name: 'EXIT_STATUS', value: '' - expression { - params.PACKAGE_CHECK == 'true' - } - } - steps { - script{ - env.EXIT_STATUS = 'ABORTED' - } - } - } - /* ####### - Testing - ####### */ - // Run Container tests - stage('Test') { - when { - environment name: 'CI', value: 'true' - environment name: 'EXIT_STATUS', value: '' - } - steps { - withCredentials([ - string(credentialsId: 'ci-tests-s3-key-id', variable: 'S3_KEY'), - string(credentialsId: 'ci-tests-s3-secret-access-key ', variable: 'S3_SECRET') - ]) { - script{ - env.CI_URL = 'https://ci-tests.linuxserver.io/' + env.IMAGE + '/' + env.META_TAG + '/index.html' - env.CI_JSON_URL = 'https://ci-tests.linuxserver.io/' + env.IMAGE + '/' + env.META_TAG + '/report.json' - } - sh '''#! /bin/bash - set -e - if grep -q 'docker-baseimage' <<< "${LS_REPO}"; then - echo "Detected baseimage, setting LSIO_FIRST_PARTY=true" - if [ -n "${CI_DOCKERENV}" ]; then - CI_DOCKERENV="LSIO_FIRST_PARTY=true|${CI_DOCKERENV}" - else - CI_DOCKERENV="LSIO_FIRST_PARTY=true" - fi - fi - docker pull ghcr.io/linuxserver/ci:${CITEST_IMAGETAG} - if [ "${MULTIARCH}" == "true" ]; then - docker pull ghcr.io/linuxserver/lsiodev-buildcache:arm64v8-${COMMIT_SHA}-${BUILD_NUMBER} --platform=arm64 - docker tag ghcr.io/linuxserver/lsiodev-buildcache:arm64v8-${COMMIT_SHA}-${BUILD_NUMBER} ${IMAGE}:arm64v8-${META_TAG} - fi - docker run --rm \ - --shm-size=1gb \ - -v /var/run/docker.sock:/var/run/docker.sock \ - -e IMAGE=\"${IMAGE}\" \ - -e WEB_SCREENSHOT_DELAY=\"${CI_WEB_SCREENSHOT_DELAY}\" \ - -e DOCKER_LOGS_TIMEOUT=\"${CI_DELAY}\" \ - -e TAGS=\"${CI_TAGS}\" \ - -e META_TAG=\"${META_TAG}\" \ - -e RELEASE_TAG=\"latest\" \ - -e PORT=\"${CI_PORT}\" \ - -e SSL=\"${CI_SSL}\" \ - -e BASE=\"${DIST_IMAGE}\" \ - -e SECRET_KEY=\"${S3_SECRET}\" \ - -e ACCESS_KEY=\"${S3_KEY}\" \ - -e DOCKER_ENV=\"${CI_DOCKERENV}\" \ - -e WEB_SCREENSHOT=\"${CI_WEB}\" \ - -e WEB_AUTH=\"${CI_AUTH}\" \ - -e WEB_PATH=\"${CI_WEBPATH}\" \ - -e NODE_NAME=\"${NODE_NAME}\" \ - -e SYFT_IMAGE_TAG=\"${CI_SYFT_IMAGE_TAG:-${SYFT_IMAGE_TAG}}\" \ - -e COMMIT_SHA=\"${COMMIT_SHA}\" \ - -e BUILD_NUMBER=\"${BUILD_NUMBER}\" \ - -t ghcr.io/linuxserver/ci:${CITEST_IMAGETAG} \ - python3 test_build.py''' - } - } - } - /* ################## - Release Logic - ################## */ - // If this is an amd64 only image only push a single image - stage('Docker-Push-Single') { - when { - environment name: 'MULTIARCH', value: 'false' - environment name: 'EXIT_STATUS', value: '' - } - steps { - retry_backoff(5,5) { - sh '''#! /bin/bash - set -e - for PUSHIMAGE in "${IMAGE}" "${GITLABIMAGE}" "${GITHUBIMAGE}" "${QUAYIMAGE}"; do - [[ ${PUSHIMAGE%%/*} =~ \\. ]] && PUSHIMAGEPLUS="${PUSHIMAGE}" || PUSHIMAGEPLUS="docker.io/${PUSHIMAGE}" - IFS=',' read -ra CACHE <<< "$BUILDCACHE" - for i in "${CACHE[@]}"; do - if [[ "${PUSHIMAGEPLUS}" == "$(cut -d "/" -f1 <<< ${i})"* ]]; then - CACHEIMAGE=${i} - fi - done - docker buildx imagetools create --prefer-index=false -t ${PUSHIMAGE}:${META_TAG} -t ${PUSHIMAGE}:latest -t ${PUSHIMAGE}:${EXT_RELEASE_TAG} ${CACHEIMAGE}:amd64-${COMMIT_SHA}-${BUILD_NUMBER} || \ - { if [[ "${PUSHIMAGE}" != "${QUAYIMAGE}" ]]; then exit 1; fi; } - if [ -n "${SEMVER}" ]; then - docker buildx imagetools create --prefer-index=false -t ${PUSHIMAGE}:${SEMVER} ${CACHEIMAGE}:amd64-${COMMIT_SHA}-${BUILD_NUMBER} || \ - { if [[ "${PUSHIMAGE}" != "${QUAYIMAGE}" ]]; then exit 1; fi; } - fi - done - ''' - } - } - } - // If this is a multi arch release push all images and define the manifest - stage('Docker-Push-Multi') { - when { - environment name: 'MULTIARCH', value: 'true' - environment name: 'EXIT_STATUS', value: '' - } - steps { - retry_backoff(5,5) { - sh '''#! /bin/bash - set -e - for MANIFESTIMAGE in "${IMAGE}" "${GITLABIMAGE}" "${GITHUBIMAGE}" "${QUAYIMAGE}"; do - [[ ${MANIFESTIMAGE%%/*} =~ \\. ]] && MANIFESTIMAGEPLUS="${MANIFESTIMAGE}" || MANIFESTIMAGEPLUS="docker.io/${MANIFESTIMAGE}" - IFS=',' read -ra CACHE <<< "$BUILDCACHE" - for i in "${CACHE[@]}"; do - if [[ "${MANIFESTIMAGEPLUS}" == "$(cut -d "/" -f1 <<< ${i})"* ]]; then - CACHEIMAGE=${i} - fi - done - docker buildx imagetools create --prefer-index=false -t ${MANIFESTIMAGE}:amd64-${META_TAG} -t ${MANIFESTIMAGE}:amd64-latest -t ${MANIFESTIMAGE}:amd64-${EXT_RELEASE_TAG} ${CACHEIMAGE}:amd64-${COMMIT_SHA}-${BUILD_NUMBER} || \ - { if [[ "${MANIFESTIMAGE}" != "${QUAYIMAGE}" ]]; then exit 1; fi; } - docker buildx imagetools create --prefer-index=false -t ${MANIFESTIMAGE}:arm64v8-${META_TAG} -t ${MANIFESTIMAGE}:arm64v8-latest -t ${MANIFESTIMAGE}:arm64v8-${EXT_RELEASE_TAG} ${CACHEIMAGE}:arm64v8-${COMMIT_SHA}-${BUILD_NUMBER} || \ - { if [[ "${MANIFESTIMAGE}" != "${QUAYIMAGE}" ]]; then exit 1; fi; } - if [ -n "${SEMVER}" ]; then - docker buildx imagetools create --prefer-index=false -t ${MANIFESTIMAGE}:amd64-${SEMVER} ${CACHEIMAGE}:amd64-${COMMIT_SHA}-${BUILD_NUMBER} || \ - { if [[ "${MANIFESTIMAGE}" != "${QUAYIMAGE}" ]]; then exit 1; fi; } - docker buildx imagetools create --prefer-index=false -t ${MANIFESTIMAGE}:arm64v8-${SEMVER} ${CACHEIMAGE}:arm64v8-${COMMIT_SHA}-${BUILD_NUMBER} || \ - { if [[ "${MANIFESTIMAGE}" != "${QUAYIMAGE}" ]]; then exit 1; fi; } - fi - done - for MANIFESTIMAGE in "${IMAGE}" "${GITLABIMAGE}" "${GITHUBIMAGE}" "${QUAYIMAGE}"; do - docker buildx imagetools create -t ${MANIFESTIMAGE}:latest ${MANIFESTIMAGE}:amd64-latest ${MANIFESTIMAGE}:arm64v8-latest || \ - { if [[ "${MANIFESTIMAGE}" != "${QUAYIMAGE}" ]]; then exit 1; fi; } - docker buildx imagetools create -t ${MANIFESTIMAGE}:${META_TAG} ${MANIFESTIMAGE}:amd64-${META_TAG} ${MANIFESTIMAGE}:arm64v8-${META_TAG} || \ - { if [[ "${MANIFESTIMAGE}" != "${QUAYIMAGE}" ]]; then exit 1; fi; } - docker buildx imagetools create -t ${MANIFESTIMAGE}:${EXT_RELEASE_TAG} ${MANIFESTIMAGE}:amd64-${EXT_RELEASE_TAG} ${MANIFESTIMAGE}:arm64v8-${EXT_RELEASE_TAG} || \ - { if [[ "${MANIFESTIMAGE}" != "${QUAYIMAGE}" ]]; then exit 1; fi; } - if [ -n "${SEMVER}" ]; then - docker buildx imagetools create -t ${MANIFESTIMAGE}:${SEMVER} ${MANIFESTIMAGE}:amd64-${SEMVER} ${MANIFESTIMAGE}:arm64v8-${SEMVER} || \ - { if [[ "${MANIFESTIMAGE}" != "${QUAYIMAGE}" ]]; then exit 1; fi; } - fi - done - ''' - } - } - } - // If this is a public release tag it in the LS Github - stage('Github-Tag-Push-Release') { - when { - branch "master" - expression { - env.LS_RELEASE != env.EXT_RELEASE_CLEAN + '-ls' + env.LS_TAG_NUMBER - } - environment name: 'CHANGE_ID', value: '' - environment name: 'EXIT_STATUS', value: '' - } - steps { - sh '''#! /bin/bash - echo "Auto-generating release notes" - if [ "$(git tag --points-at HEAD)" != "" ]; then - echo "Existing tag points to current commit, suggesting no new LS changes" - AUTO_RELEASE_NOTES="No changes" - else - AUTO_RELEASE_NOTES=$(curl -fsL -H "Authorization: token ${GITHUB_TOKEN}" -H "Accept: application/vnd.github+json" -X POST https://api.github.com/repos/${LS_USER}/${LS_REPO}/releases/generate-notes \ - -d '{"tag_name":"'${META_TAG}'",\ - "target_commitish": "master"}' \ - | jq -r '.body' | sed 's|## What.s Changed||') - fi - echo "Pushing New tag for current commit ${META_TAG}" - curl -H "Authorization: token ${GITHUB_TOKEN}" -X POST https://api.github.com/repos/${LS_USER}/${LS_REPO}/git/tags \ - -d '{"tag":"'${META_TAG}'",\ - "object": "'${COMMIT_SHA}'",\ - "message": "Tagging Release '${EXT_RELEASE_CLEAN}'-ls'${LS_TAG_NUMBER}' to master",\ - "type": "commit",\ - "tagger": {"name": "LinuxServer-CI","email": "ci@linuxserver.io","date": "'${GITHUB_DATE}'"}}' - echo "Pushing New release for Tag" - echo "Updating base packages to ${PACKAGE_TAG}" > releasebody.json - jq -n \ - --arg tag_name "$META_TAG" \ - --arg target_commitish "master" \ - --arg ci_url "${CI_URL:-N/A}" \ - --arg ls_notes "$AUTO_RELEASE_NOTES" \ - --arg remote_notes "$(cat releasebody.json)" \ - '{ - "tag_name": $tag_name, - "target_commitish": $target_commitish, - "name": $tag_name, - "body": ("**CI Report:**\\n\\n" + $ci_url + "\\n\\n**LinuxServer Changes:**\\n\\n" + $ls_notes + "\\n\\n**Remote Changes:**\\n\\n" + $remote_notes), - "draft": false, - "prerelease": false }' > releasebody.json.done - curl -H "Authorization: token ${GITHUB_TOKEN}" -X POST https://api.github.com/repos/${LS_USER}/${LS_REPO}/releases -d @releasebody.json.done - ''' - } - } - // Add protection to the release branch - stage('Github-Release-Branch-Protection') { - when { - branch "master" - environment name: 'CHANGE_ID', value: '' - environment name: 'EXIT_STATUS', value: '' - } - steps { - echo "Setting up protection for release branch master" - sh '''#! /bin/bash - curl -H "Authorization: token ${GITHUB_TOKEN}" -X PUT https://api.github.com/repos/${LS_USER}/${LS_REPO}/branches/master/protection \ - -d $(jq -c . << EOF - { - "required_status_checks": null, - "enforce_admins": false, - "required_pull_request_reviews": { - "dismiss_stale_reviews": false, - "require_code_owner_reviews": false, - "require_last_push_approval": false, - "required_approving_review_count": 1 - }, - "restrictions": null, - "required_linear_history": false, - "allow_force_pushes": false, - "allow_deletions": false, - "block_creations": false, - "required_conversation_resolution": true, - "lock_branch": false, - "allow_fork_syncing": false, - "required_signatures": false - } -EOF - ) ''' - } - } - // If this is a Pull request send the CI link as a comment on it - stage('Pull Request Comment') { - when { - not {environment name: 'CHANGE_ID', value: ''} - environment name: 'EXIT_STATUS', value: '' - } - steps { - sh '''#! /bin/bash - # Function to retrieve JSON data from URL - get_json() { - local url="$1" - local response=$(curl -s "$url") - if [ $? -ne 0 ]; then - echo "Failed to retrieve JSON data from $url" - return 1 - fi - local json=$(echo "$response" | jq .) - if [ $? -ne 0 ]; then - echo "Failed to parse JSON data from $url" - return 1 - fi - echo "$json" - } - - build_table() { - local data="$1" - - # Get the keys in the JSON data - local keys=$(echo "$data" | jq -r 'to_entries | map(.key) | .[]') - - # Check if keys are empty - if [ -z "$keys" ]; then - echo "JSON report data does not contain any keys or the report does not exist." - return 1 - fi - - # Build table header - local header="| Tag | Passed |\\n| --- | --- |\\n" - - # Loop through the JSON data to build the table rows - local rows="" - for build in $keys; do - local status=$(echo "$data" | jq -r ".[\\"$build\\"].test_success") - if [ "$status" = "true" ]; then - status="✅" - else - status="❌" - fi - local row="| "$build" | "$status" |\\n" - rows="${rows}${row}" - done - - local table="${header}${rows}" - local escaped_table=$(echo "$table" | sed 's/\"/\\\\"/g') - echo "$escaped_table" - } - - if [[ "${CI}" = "true" ]]; then - # Retrieve JSON data from URL - data=$(get_json "$CI_JSON_URL") - # Create table from JSON data - table=$(build_table "$data") - echo -e "$table" - - curl -X POST -H "Authorization: token $GITHUB_TOKEN" \ - -H "Accept: application/vnd.github.v3+json" \ - "https://api.github.com/repos/$LS_USER/$LS_REPO/issues/$PULL_REQUEST/comments" \ - -d "{\\"body\\": \\"I am a bot, here are the test results for this PR: \\n${CI_URL}\\n${SHELLCHECK_URL}\\n${table}\\"}" - else - curl -X POST -H "Authorization: token $GITHUB_TOKEN" \ - -H "Accept: application/vnd.github.v3+json" \ - "https://api.github.com/repos/$LS_USER/$LS_REPO/issues/$PULL_REQUEST/comments" \ - -d "{\\"body\\": \\"I am a bot, here is the pushed image/manifest for this PR: \\n\\n\\`${GITHUBIMAGE}:${META_TAG}\\`\\"}" - fi - ''' - - } - } - } - /* ###################### - Send status to Discord - ###################### */ - post { - always { - sh '''#!/bin/bash - rm -rf /config/.ssh/id_sign - rm -rf /config/.ssh/id_sign.pub - git config --global --unset gpg.format - git config --global --unset user.signingkey - git config --global --unset commit.gpgsign - ''' - script{ - env.JOB_DATE = sh( - script: '''date '+%Y-%m-%dT%H:%M:%S%:z' ''', - returnStdout: true).trim() - if (env.EXIT_STATUS == "ABORTED"){ - sh 'echo "build aborted"' - }else{ - if (currentBuild.currentResult == "SUCCESS"){ - if (env.GITHUBIMAGE =~ /lspipepr/){ - env.JOB_WEBHOOK_STATUS='Success' - env.JOB_WEBHOOK_COLOUR=3957028 - env.JOB_WEBHOOK_FOOTER='PR Build' - }else if (env.GITHUBIMAGE =~ /lsiodev/){ - env.JOB_WEBHOOK_STATUS='Success' - env.JOB_WEBHOOK_COLOUR=3957028 - env.JOB_WEBHOOK_FOOTER='Dev Build' - }else{ - env.JOB_WEBHOOK_STATUS='Success' - env.JOB_WEBHOOK_COLOUR=1681177 - env.JOB_WEBHOOK_FOOTER='Live Build' - } - }else{ - if (env.GITHUBIMAGE =~ /lspipepr/){ - env.JOB_WEBHOOK_STATUS='Failure' - env.JOB_WEBHOOK_COLOUR=12669523 - env.JOB_WEBHOOK_FOOTER='PR Build' - }else if (env.GITHUBIMAGE =~ /lsiodev/){ - env.JOB_WEBHOOK_STATUS='Failure' - env.JOB_WEBHOOK_COLOUR=12669523 - env.JOB_WEBHOOK_FOOTER='Dev Build' - }else{ - env.JOB_WEBHOOK_STATUS='Failure' - env.JOB_WEBHOOK_COLOUR=16711680 - env.JOB_WEBHOOK_FOOTER='Live Build' - } - } - sh ''' curl -X POST -H "Content-Type: application/json" --data '{"avatar_url": "https://raw.githubusercontent.com/linuxserver/docker-templates/master/linuxserver.io/img/jenkins-avatar.png","embeds": [{"'color'": '${JOB_WEBHOOK_COLOUR}',\ - "footer": {"text" : "'"${JOB_WEBHOOK_FOOTER}"'"},\ - "timestamp": "'${JOB_DATE}'",\ - "description": "**Build:** '${BUILD_NUMBER}'\\n**CI Results:** '${CI_URL}'\\n**ShellCheck Results:** '${SHELLCHECK_URL}'\\n**Status:** '${JOB_WEBHOOK_STATUS}'\\n**Job:** '${RUN_DISPLAY_URL}'\\n**Change:** '${CODE_URL}'\\n**External Release:**: '${RELEASE_LINK}'\\n**DockerHub:** '${DOCKERHUB_LINK}'\\n"}],\ - "username": "Jenkins"}' ${BUILDS_DISCORD} ''' - } - } - } - cleanup { - sh '''#! /bin/bash - echo "Pruning builder!!" - docker builder prune -f --builder container || : - containers=$(docker ps -q) - if [[ -n "${containers}" ]]; then - BUILDX_CONTAINER_ID=$(docker ps -qf 'name=buildx_buildkit') - for container in ${containers}; do - if [[ "${container}" == "${BUILDX_CONTAINER_ID}" ]]; then - echo "skipping buildx container in docker stop" - else - echo "Stopping container ${container}" - docker stop ${container} - fi - done - fi - docker system prune -f --volumes || : - docker image prune -af || : - ''' - cleanWs() - } - } -} - -def retry_backoff(int max_attempts, int power_base, Closure c) { - int n = 0 - while (n < max_attempts) { - try { - c() - return - } catch (err) { - if ((n + 1) >= max_attempts) { - throw err - } - sleep(power_base ** n) - n++ - } - } - return -} diff --git a/README.md b/README.md index 2af8a18a1..759482e63 100644 --- a/README.md +++ b/README.md @@ -1,620 +1,762 @@ -<!-- DO NOT EDIT THIS FILE MANUALLY --> -<!-- Please read https://github.com/linuxserver/docker-webtop/blob/master/.github/CONTRIBUTING.md --> -[![linuxserver.io](https://raw.githubusercontent.com/linuxserver/docker-templates/master/linuxserver.io/img/linuxserver_medium.png)](https://linuxserver.io) +# kde-selkies-webtop-devcontainer -[![Blog](https://img.shields.io/static/v1.svg?color=94398d&labelColor=555555&logoColor=ffffff&style=for-the-badge&label=linuxserver.io&message=Blog)](https://blog.linuxserver.io "all the things you can do with our containers including How-To guides, opinions and much more!") -[![Discord](https://img.shields.io/discord/354974912613449730.svg?color=94398d&labelColor=555555&logoColor=ffffff&style=for-the-badge&label=Discord&logo=discord)](https://linuxserver.io/discord "realtime support / chat with the community and the team.") -[![Discourse](https://img.shields.io/discourse/https/discourse.linuxserver.io/topics.svg?color=94398d&labelColor=555555&logoColor=ffffff&style=for-the-badge&logo=discourse)](https://discourse.linuxserver.io "post on our community forum.") -[![GitHub](https://img.shields.io/static/v1.svg?color=94398d&labelColor=555555&logoColor=ffffff&style=for-the-badge&label=linuxserver.io&message=GitHub&logo=github)](https://github.com/linuxserver "view the source for all of our repositories.") -[![Open Collective](https://img.shields.io/opencollective/all/linuxserver.svg?color=94398d&labelColor=555555&logoColor=ffffff&style=for-the-badge&label=Supporters&logo=open%20collective)](https://opencollective.com/linuxserver "please consider helping us by either donating or contributing to our budget") +**[English Version (README_en.md)](README_en.md)** -The [LinuxServer.io](https://linuxserver.io) team brings you another container release featuring: +ブラウザからアクセス可能なコンテナ化されたKubuntu (KDE Plasma) デスクトップ環境。Selkies WebRTCストリーミングを使用し、VNC/RDPなしでフル機能のLinuxデスクトップを提供します。VS Code Dev Containerにも対応。 -* regular and timely application updates -* easy user mappings (PGID, PUID) -* custom base image with s6 overlay -* weekly base OS updates with common layers across the entire LinuxServer.io ecosystem to minimise space usage, down time and bandwidth -* regular security updates +### 機能対応表(プラットフォーム) -Find us at: +| 環境 | GPUレンダリング | WebGL/Vulkan | ハードウェアエンコード | 備考 | +|------|----------------|--------------|----------------------|------| +| **Ubuntu + NVIDIA GPU** | ✅ 対応 | ✅ 対応 | ✅ NVENC | 高パフォーマンス | +| **Ubuntu + Intel GPU** | ✅ 対応 | ✅ 対応 | ✅ VA-API (QSV) | 統合GPU可 | +| **Ubuntu + AMD GPU** | ✅ 対応 | ✅ 対応 | ✅ VA-API | RDNA/GCN対応 | +| **WSL2 + NVIDIA GPU** | ❌ ソフトウェア | ❌ ソフトウェアのみ | ✅ NVENC | WSL2で動作確認済み | +| **macOS (Docker)** | ❌ 非対応 | ❌ ソフトウェアのみ | ❌ 非対応 | VM制限 | -* [Blog](https://blog.linuxserver.io) - all the things you can do with our containers including How-To guides, opinions and much more! -* [Discord](https://linuxserver.io/discord) - realtime support / chat with the community and the team. -* [Discourse](https://discourse.linuxserver.io) - post on our community forum. -* [GitHub](https://github.com/linuxserver) - view the source for all of our repositories. -* [Open Collective](https://opencollective.com/linuxserver) - please consider helping us by either donating or contributing to our budget +--- -# [linuxserver/webtop](https://github.com/linuxserver/docker-webtop) +## クイックスタート -[![Scarf.io pulls](https://scarf.sh/installs-badge/linuxserver-ci/linuxserver%2Fwebtop?color=94398d&label-color=555555&logo-color=ffffff&style=for-the-badge&package-type=docker)](https://scarf.sh) -[![GitHub Stars](https://img.shields.io/github/stars/linuxserver/docker-webtop.svg?color=94398d&labelColor=555555&logoColor=ffffff&style=for-the-badge&logo=github)](https://github.com/linuxserver/docker-webtop) -[![GitHub Release](https://img.shields.io/github/release/linuxserver/docker-webtop.svg?color=94398d&labelColor=555555&logoColor=ffffff&style=for-the-badge&logo=github)](https://github.com/linuxserver/docker-webtop/releases) -[![GitHub Package Repository](https://img.shields.io/static/v1.svg?color=94398d&labelColor=555555&logoColor=ffffff&style=for-the-badge&label=linuxserver.io&message=GitHub%20Package&logo=github)](https://github.com/linuxserver/docker-webtop/packages) -[![GitLab Container Registry](https://img.shields.io/static/v1.svg?color=94398d&labelColor=555555&logoColor=ffffff&style=for-the-badge&label=linuxserver.io&message=GitLab%20Registry&logo=gitlab)](https://gitlab.com/linuxserver.io/docker-webtop/container_registry) -[![Quay.io](https://img.shields.io/static/v1.svg?color=94398d&labelColor=555555&logoColor=ffffff&style=for-the-badge&label=linuxserver.io&message=Quay.io)](https://quay.io/repository/linuxserver.io/webtop) -[![Docker Pulls](https://img.shields.io/docker/pulls/linuxserver/webtop.svg?color=94398d&labelColor=555555&logoColor=ffffff&style=for-the-badge&label=pulls&logo=docker)](https://hub.docker.com/r/linuxserver/webtop) -[![Docker Stars](https://img.shields.io/docker/stars/linuxserver/webtop.svg?color=94398d&labelColor=555555&logoColor=ffffff&style=for-the-badge&label=stars&logo=docker)](https://hub.docker.com/r/linuxserver/webtop) -[![Jenkins Build](https://img.shields.io/jenkins/build?labelColor=555555&logoColor=ffffff&style=for-the-badge&jobUrl=https%3A%2F%2Fci.linuxserver.io%2Fjob%2FDocker-Pipeline-Builders%2Fjob%2Fdocker-webtop%2Fjob%2Fmaster%2F&logo=jenkins)](https://ci.linuxserver.io/job/Docker-Pipeline-Builders/job/docker-webtop/job/master/) -[![LSIO CI](https://img.shields.io/badge/dynamic/yaml?color=94398d&labelColor=555555&logoColor=ffffff&style=for-the-badge&label=CI&query=CI&url=https%3A%2F%2Fci-tests.linuxserver.io%2Flinuxserver%2Fwebtop%2Flatest%2Fci-status.yml)](https://ci-tests.linuxserver.io/linuxserver/webtop/latest/index.html) - -[Webtop](https://github.com/linuxserver/docker-webtop) - Alpine, Ubuntu, Fedora, and Arch based containers containing full desktop environments in officially supported flavors accessible via any modern web browser. - -[![webtop](https://raw.githubusercontent.com/linuxserver/docker-templates/master/linuxserver.io/img/webtop-logo.png)](https://github.com/linuxserver/docker-webtop) - -## Supported Architectures - -We utilise the docker manifest for multi-platform awareness. More information is available from docker [here](https://distribution.github.io/distribution/spec/manifest-v2-2/#manifest-list) and our announcement [here](https://blog.linuxserver.io/2019/02/21/the-lsio-pipeline-project/). - -Simply pulling `lscr.io/linuxserver/webtop:latest` should retrieve the correct image for your arch, but you can also pull specific arch images via tags. - -The architectures supported by this image are: - -| Architecture | Available | Tag | -| :----: | :----: | ---- | -| x86-64 | ✅ | amd64-\<version tag\> | -| arm64 | ✅ | arm64v8-\<version tag\> | - -## Version Tags - -This image provides various versions that are available via tags. Please read the descriptions carefully and exercise caution when using unstable or development tags. - -| Tag | Available | Description | -| :----: | :----: |--- | -| latest | ✅ | XFCE Alpine | -| alpine-i3 | ✅ | i3 Alpine | -| alpine-mate | ✅ | MATE Alpine | -| arch-i3 | ✅ | i3 Arch | -| arch-kde | ✅ | KDE Arch | -| arch-mate | ✅ | MATE Arch | -| arch-xfce | ✅ | XFCE Arch | -| debian-i3 | ✅ | i3 Debian | -| debian-kde | ✅ | KDE Debian | -| debian-mate | ✅ | MATE Debian | -| debian-xfce | ✅ | XFCE Debian | -| el-i3 | ✅ | i3 Enterprise Linux | -| el-mate | ✅ | MATE Enterprise Linux | -| el-xfce | ✅ | XFCE Enterprise Linux | -| fedora-i3 | ✅ | i3 Fedora | -| fedora-kde | ✅ | KDE Fedora | -| fedora-mate | ✅ | MATE Fedora | -| fedora-xfce | ✅ | XFCE Fedora | -| ubuntu-i3 | ✅ | i3 Ubuntu | -| ubuntu-kde | ✅ | KDE Ubuntu | -| ubuntu-mate | ✅ | MATE Ubuntu | -| ubuntu-xfce | ✅ | XFCE Ubuntu | - -## Application Setup - -The application can be accessed at: - -* https://yourhost:3001/ +```bash +# 1. ユーザーイメージをビルド(1-2分) +# ベースイメージはGHCRから自動取得されます +./build-user-image.sh # 英語環境 +./build-user-image.sh -l ja # 日本語環境 +./build-user-image.sh -u 22.04 # Ubuntu 22.04 + +# 2. コンテナを起動 +./start-container.sh --encoder software # ソフトウェアエンコード +./start-container.sh --encoder nvidia --gpu all # NVIDIA NVENC(全GPU) +./start-container.sh --encoder nvidia --num 0 # NVIDIA NVENC(GPU 0のみ) +./start-container.sh --encoder intel # Intel VA-API +./start-container.sh --encoder amd # AMD VA-API +./start-container.sh --encoder nvidia-wsl --gpu all # WSL2 + NVIDIA NVENC + +# 3. ブラウザでアクセス +# → https://localhost:<30000+UID> (例: UID=1000 → https://localhost:31000) +# → http://localhost:<40000+UID> (例: UID=1000 → http://localhost:41000) + +# 4. 変更を保存(重要!コンテナ削除前に必ず実行) +./commit-container.sh + +# 5. 停止 +./stop-container.sh # 停止(コンテナ保持、再起動可能) +./stop-container.sh --rm # 停止して削除(commitした後のみ推奨) +``` -### Strict reverse proxies +以上で完了です! 🎉 -This image uses a self-signed certificate by default. This naturally means the scheme is `https`. -If you are using a reverse proxy which validates certificates, you need to [disable this check for the container](https://docs.linuxserver.io/faq#strict-proxy). +### VS Code Dev Container を使用する場合 -**Modern GUI desktop apps may have compatibility issues with the latest Docker syscall restrictions. You can use Docker with the `--security-opt seccomp=unconfined` setting to allow these syscalls on hosts with older Kernels or libseccomp versions.** +```bash +# 1. Dev Container設定を生成 +./create-devcontainer-config.sh -### Security +# 2. VS Codeで開く +# VS Codeで「F1」→「Dev Containers: Reopen in Container」を選択 ->[!WARNING] ->This container provides privileged access to the host system. Do not expose it to the Internet unless you have secured it properly. +# 3. コンテナ内でワークスペースが自動的に開きます +# ブラウザから https://localhost:<表示されたポート> でデスクトップにアクセス +``` -**HTTPS is required for full functionality.** Modern browser features such as WebCodecs, used for video and audio, will not function over an insecure HTTP connection. +--- -By default, this container has no authentication. The optional `CUSTOM_USER` and `PASSWORD` environment variables enable basic HTTP auth, which is suitable only for securing the container on a trusted local network. For internet exposure, we strongly recommend placing the container behind a reverse proxy, such as [SWAG](https://github.com/linuxserver/docker-swag), with a robust authentication mechanism. +## 🚀 このプロジェクトの特徴 + +### アーキテクチャの改善 + +- **🏗️ 2段階ビルドシステム**: ベースイメージ(5-10 GB)とユーザーイメージ(~100 MB、1-2分でビルド)を分離 + - ベースイメージはシステムパッケージとデスクトップ環境を含む + - ユーザーイメージはあなたのUID/GIDに合わせたユーザーを追加 + - 毎回30-60分待つ必要なし! + +- **🔒 非rootコンテナ実行**: デフォルトでユーザー権限で実行 + - `fakeroot`ハックや権限エスカレーション回避策を削除 + - システムとユーザー操作の適切な権限分離 + - 必要時はsudoアクセス可能 + +- **📁 自動UID/GID一致**: ファイル権限がシームレスに動作 + - ユーザーイメージが自動的にホストのUID/GIDに一致 + - マウントしたホストディレクトリの所有権が正しく設定 + - 共有フォルダでの「permission denied」エラーなし + +### ユーザー体験の向上 + +- **🔐 セキュアパスワード管理**: 環境変数でパスワード入力 + - コマンドにパスワードを平文で表示しない + - イメージ内に安全に保存 + +- **💻 Ubuntu Desktop標準環境**: 完全な`.bashrc`設定 + - Git branch検出付きカラープロンプト + - ヒストリー最適化(重複無視、追記モード、タイムスタンプ) + - 便利なエイリアス(ll, la, grep色付けなど) + +- **🎮 柔軟なエンコーダー/GPU選択**: 明確なコマンド引数 + - `--encoder nvidia` - NVIDIA NVENC + - `--encoder intel` - Intel VA-API + - `--encoder amd` - AMD VA-API + - `--encoder software` - ソフトウェアエンコード + - `--gpu all` - Docker全GPU使用(NVIDIA) + - `--num 0,1` - 特定GPUデバイス指定 + +### 開発者体験 + +- **📦 バージョン固定**: 再現可能なビルドを保証 + - VirtualGL 3.1.4、Selkies 1.6.2 + - 「昨日は動いた」問題なし + +- **🛠️ 完全な管理スクリプト**: 全操作用シェルスクリプト + - `build-user-image.sh` - パスワード付きビルド + - `start-container.sh --encoder <type>` - エンコーダー選択で起動 + - `stop/shell-container.sh` - ライフサイクル管理 + - `commit-container.sh` - 変更を保存 + +- **🌐 多言語サポート**: 日本語環境対応 + - ビルド時に`-l ja`で日本語入力(Mozc) + - タイムゾーン(Asia/Tokyo)とロケール(ja_JP.UTF-8)自動設定 + - fcitx入力メソッドフレームワーク含む + - 英語がデフォルト + +### なぜこのフォーク? + +| 元プロジェクト | このフォーク | +|---------------|-------------| +| Pull可能イメージ | ローカルビルド(1-2分) | +| rootコンテナ | ユーザー権限コンテナ | +| 手動UID/GID設定 | 自動マッチング | +| コマンドにパスワード | 環境変数で安全に | +| 汎用bash | Ubuntu Desktop bash | +| GPU自動検出 | エンコーダー/GPU明示的選択 | +| バージョンドリフト | バージョン固定 | +| 英語のみ | 多言語(EN/JP) | -The web interface includes a terminal with passwordless `sudo` access. Any user with access to the GUI can gain root control within the container, install arbitrary software, and probe your local network. +--- -While not generally recommended, certain legacy environments specifically those with older hardware or outdated Linux distributions may require the deactivation of the standard seccomp profile to get containerized desktop software to run. This can be achieved by utilizing the `--security-opt seccomp=unconfined` parameter. It is critical to use this option only when absolutely necessary as it disables a key security layer of Docker, elevating the potential for container escape vulnerabilities. +## 目次 + +- [システム要件](#システム要件) +- [2段階ビルドシステム](#2段階ビルドシステム) +- [Intel/AMD GPUホストセットアップ](#intelamd-gpuホストセットアップ) +- [セットアップ(通常使用)](#セットアップ通常使用) +- [使い方](#使い方) +- [付録: ベースイメージのビルド](#付録-ベースイメージのビルド) +- [付録: スクリプトリファレンス](#付録-スクリプトリファレンス) +- [付録: 設定](#付録-設定) +- [付録: HTTPS/SSL](#付録-httpsssl) +- [トラブルシューティング](#トラブルシューティング) +- [既知の制限](#既知の制限) +- [付録: 高度なトピック](#付録-高度なトピック) -### Options in all Selkies-based GUI containers +--- -This container is based on [Docker Baseimage Selkies](https://github.com/linuxserver/docker-baseimage-selkies), which provides the following environment variables and run configurations to customize its functionality. +## システム要件 -#### Optional Environment Variables +### 必須 +- **Docker** 20.10以降(Docker Desktop 4.0+) +- **8GB以上のRAM**(16GB推奨) +- **20GB以上のディスク空き容量** -| Variable | Description | -| :----: | --- | -| CUSTOM_PORT | Internal port the container listens on for http if it needs to be swapped from the default `3000` | -| CUSTOM_HTTPS_PORT | Internal port the container listens on for https if it needs to be swapped from the default `3001` | -| CUSTOM_WS_PORT | Internal port the container listens on for websockets if it needs to be swapped from the default 8082 | -| CUSTOM_USER | HTTP Basic auth username, abc is default. | -| DRI_NODE | Enable VAAPI stream encoding and use the specified device IE `/dev/dri/renderD128` | -| DRINODE | Specify which GPU to use for DRI3 acceleration IE `/dev/dri/renderD129` | -| PASSWORD | HTTP Basic auth password, abc is default. If unset there will be no auth | -| SUBFOLDER | Subfolder for the application if running a subfolder reverse proxy, need both slashes IE `/subfolder/` | -| TITLE | The page title displayed on the web browser, default "Selkies" | -| DASHBOARD | Allows the user to set their dashboard. Options: `selkies-dashboard`, `selkies-dashboard-zinc`, `selkies-dashboard-wish` | -| FILE_MANAGER_PATH | Modifies the default upload/download file path, path must have proper permissions for abc user | -| START_DOCKER | If set to false a container with privilege will not automatically start the DinD Docker setup | -| DISABLE_IPV6 | If set to true or any value this will disable IPv6 | -| LC_ALL | Set the Language for the container to run as IE `fr_FR.UTF-8` `ar_AE.UTF-8` | -| NO_DECOR | If set the application will run without window borders for use as a PWA. (Decor can be enabled and disabled with Ctrl+Shift+d) | -| NO_FULL | Do not autmatically fullscreen applications when using openbox. | -| NO_GAMEPAD | Disable userspace gamepad interposer injection. | -| DISABLE_ZINK | Do not set the Zink environment variables if a video card is detected (userspace applications will use CPU rendering) | -| DISABLE_DRI3 | Do not use DRI3 acceleration if a video card is detected (userspace applications will use CPU rendering) | -| MAX_RES | Pass a larger maximum resolution for the container default is 16k `15360x8640` | -| WATERMARK_PNG | Full path inside the container to a watermark png IE `/usr/share/selkies/www/icon.png` | -| WATERMARK_LOCATION | Where to paint the image over the stream integer options below | +### GPU(オプション、ハードウェアアクセラレーション用) +- **NVIDIA GPU** ✅ テスト済み + - ドライバーバージョン 470以降 + - Maxwell世代以降 + - NVIDIA Container Toolkit インストール済み +- **Intel GPU** ✅ テスト済み + - Intel統合グラフィックス(HD Graphics, Iris, Arc) + - Quick Sync Videoサポート + - VA-APIドライバはコンテナに含む + - **ホストセットアップ必要**(下記参照) +- **AMD GPU** ⚠️ 部分的にテスト済み + - VCE/VCNエンコーダー搭載Radeonグラフィックス + - VA-APIドライバはコンテナに含む + - **ホストセットアップ必要**(下記参照) -**`WATERMARK_LOCATION` Options:** -- **1**: Top Left -- **2**: Top Right -- **3**: Bottom Left -- **4**: Bottom Right -- **5**: Centered -- **6**: Animated +## 2段階ビルドシステム -#### Optional Run Configurations +このプロジェクトは高速セットアップと適切なファイル権限のために2段階ビルドアプローチを使用: -| Argument | Description | -| :----: | --- | -| `--privileged` | Starts a Docker-in-Docker (DinD) environment. For better performance, mount the Docker data directory from the host, e.g., `-v /path/to/docker-data:/var/lib/docker`. | -| `-v /var/run/docker.sock:/var/run/docker.sock` | Mounts the host's Docker socket to manage host containers from within this container. | -| `--device /dev/dri:/dev/dri` | Mount a GPU into the container, this can be used in conjunction with the `DRINODE` environment variable to leverage a host video card for GPU accelerated applications. Only **Open Source** drivers are supported IE (Intel,AMDGPU,Radeon,ATI,Nouveau) | +``` +┌─────────────────────────┐ +│ ベースイメージ (5-10 GB) │ ← 初回のみビルド(30-60分) +│ • 全システムパッケージ │ +│ • デスクトップ環境 │ +│ • プリインストールアプリ │ +└────────────┬────────────┘ + │ + ↓ これを基にビルド +┌────────────┴────────────┐ +│ ユーザーイメージ (~100 MB) │ ← あなたがビルド(1-2分) +│ • あなたのユーザー名 │ +│ • あなたのUID/GID │ +│ • あなたのパスワード │ +└─────────────────────────┘ +``` -### Language Support - Internationalization +**メリット:** -To launch the desktop session in a different language, set the `LC_ALL` environment variable. For example: +- ✅ **高速セットアップ:** 30-60分のビルド待ち不要 +- ✅ **適切な権限:** ファイルがホストのUID/GIDに一致 +- ✅ **簡単な更新:** 新しいベースイメージをビルド、ユーザーイメージを再ビルド -* `-e LC_ALL=zh_CN.UTF-8` - Chinese -* `-e LC_ALL=ja_JP.UTF-8` - Japanese -* `-e LC_ALL=ko_KR.UTF-8` - Korean -* `-e LC_ALL=ar_AE.UTF-8` - Arabic -* `-e LC_ALL=ru_RU.UTF-8` - Russian -* `-e LC_ALL=es_MX.UTF-8` - Spanish (Latin America) -* `-e LC_ALL=de_DE.UTF-8` - German -* `-e LC_ALL=fr_FR.UTF-8` - French -* `-e LC_ALL=nl_NL.UTF-8` - Netherlands -* `-e LC_ALL=it_IT.UTF-8` - Italian +**なぜUID/GID一致が重要?** -### DRI3 GPU Acceleration +- ホストディレクトリ(`$HOME`など)をマウントする際、ファイルに一致する所有権が必要 +- UID/GID不一致だと権限エラーが発生 +- ユーザーイメージが自動的にホストの認証情報に一致 -For accelerated apps or games, render devices can be mounted into the container and leveraged by applications using: +--- -`--device /dev/dri:/dev/dri` +## Intel/AMD GPUホストセットアップ -This feature only supports **Open Source** GPU drivers: +Intel/AMD GPUでハードウェアエンコード(VA-API)を使用する場合、ホスト側のセットアップが必要: -| Driver | Description | -| :----: | --- | -| Intel | i965 and i915 drivers for Intel iGPU chipsets | -| AMD | AMDGPU, Radeon, and ATI drivers for AMD dedicated or APU chipsets | -| NVIDIA | nouveau2 drivers only, closed source NVIDIA drivers lack DRI3 support | +### 1. ユーザーをvideo/renderグループに追加 -The `DRINODE` environment variable can be used to point to a specific GPU. +コンテナがGPUデバイス(`/dev/dri/*`)にアクセスするには、ホストユーザーが`video`と`render`グループのメンバーである必要があります: -DRI3 will work on aarch64 given the correct drivers are installed inside the container for your chipset. +```bash +# video/renderグループに追加 +sudo usermod -aG video,render $USER -### Nvidia GPU Support +# ログアウト&再ログインまたは再起動してグループ変更を適用 +# 確認: +groups +# 出力に "video" と "render" が含まれていることを確認 +``` -**Note: Nvidia support is not available for Alpine-based images.** +### 2. VA-APIドライバーのインストール(Intel) -Nvidia GPU support is available by leveraging Zink for OpenGL. When a compatible Nvidia GPU is passed through, it will also be **automatically utilized for hardware-accelerated video stream encoding** (using the `x264enc` full-frame profile), significantly reducing CPU load. +IntelGPUハードウェアエンコード用: -Enable Nvidia support with the following runtime flags: +```bash +# VA-APIツールとIntelドライバーをインストール +sudo apt update +sudo apt install vainfo intel-media-va-driver-non-free -| Flag | Description | -| :----: | --- | -| `--gpus all` | Passes all available host GPUs to the container. This can be filtered to specific GPUs. | -| `--runtime nvidia` | Specifies the Nvidia runtime, which provides the necessary drivers and tools from the host. | +# インストール確認(H.264エンコードサポートを確認): +vainfo +# 出力に "VAProfileH264Main : VAEntrypointEncSlice" などが含まれていることを確認 +``` -For Docker Compose, you must first configure the Nvidia runtime as the default on the host: +### 3. VA-APIドライバーのインストール(AMD) -``` -sudo nvidia-ctk runtime configure --runtime=docker --set-as-default -sudo systemctl restart docker -``` +AMD GPUハードウェアエンコード用: -Then, assign the GPU to the service in your `compose.yaml`: +```bash +# VA-APIツールとAMDドライバーをインストール +sudo apt update +sudo apt install vainfo mesa-va-drivers -``` -services: - webtop: - image: lscr.io/linuxserver/webtop:latest - deploy: - resources: - reservations: - devices: - - driver: nvidia - count: 1 - capabilities: [compute,video,graphics,utility] +# インストール確認: +vainfo +# 出力に "VAProfileH264Main : VAEntrypointEncSlice" などが含まれていることを確認 ``` -### Application Management +**注意:** +- NVIDIA GPUはこのセットアップ不要 +- ホストでVA-APIが正しく動作すれば、コンテナでも自動的に動作 +- グループ変更後は必ずログアウト/再ログインまたは再起動 -There are two methods for installing applications inside the container: PRoot Apps (recommended for persistence) and Native Apps. +--- -#### PRoot Apps (Persistent) +## セットアップ(通常使用) -Natively installed packages (e.g., via `apt-get install`) will not persist if the container is recreated. To retain applications and their settings across container updates, we recommend using [proot-apps](https://github.com/linuxserver/proot-apps). These are portable applications installed to the user's persistent `$HOME` directory. +ベースイメージはGHCRから自動取得されるため、通常利用ではビルド不要です。 -To install an application, use the command line inside the container: +### ユーザーイメージのビルド -``` -proot-apps install filezilla +UID/GIDが一致するパーソナルイメージを作成(1-2分): + +```bash +# 英語(デフォルト) +./build-user-image.sh + +# 日本語 +./build-user-image.sh -l ja ``` -A list of supported applications is available [here](https://github.com/linuxserver/proot-apps?tab=readme-ov-file#supported-apps). +※ `USER_PASSWORD=...` を先に付けると対話プロンプトを省略できます。 -#### Native Apps (Non-Persistent) +**オプション: カスタマイズ** + +```bash +# Ubuntu 22.04を使用 +./build-user-image.sh -u 22.04 -You can install packages from the system's native repository using the [universal-package-install](https://github.com/linuxserver/docker-mods/tree/universal-package-install) mod. This method will increase the container's start time and is not persistent. Add the following to your `compose.yaml`: +# 別バージョン +./build-user-image.sh -v 2.0.0 -```yaml - environment: - - DOCKER_MODS=linuxserver/mods:universal-package-install - - INSTALL_PACKAGES=libfuse2|git|gdb +# 別のベースイメージを使用 +./build-user-image.sh -b my-custom-base:1.0.0 ``` -#### Hardening - -These variables can be used to lock down the desktop environment for single-application use cases or to restrict user capabilities. - -##### Meta Variables - -These variables act as presets, enabling multiple hardening options at once. Individual options can still be set to override the preset. - -| Variable | Description | -| :----: | --- | -| **`HARDEN_DESKTOP`** | Enables `DISABLE_OPEN_TOOLS`, `DISABLE_SUDO`, and `DISABLE_TERMINALS`. Also sets related Selkies UI settings (`SELKIES_FILE_TRANSFERS`, `SELKIES_COMMAND_ENABLED`, `SELKIES_UI_SIDEBAR_SHOW_FILES`, `SELKIES_UI_SIDEBAR_SHOW_APPS`) if they are not explicitly set by the user. | -| **`HARDEN_OPENBOX`** | Enables `DISABLE_CLOSE_BUTTON`, `DISABLE_MOUSE_BUTTONS`, and `HARDEN_KEYBINDS`. It also flags `RESTART_APP` if not set by the user, ensuring the primary application is automatically restarted if closed. | - -##### Individual Hardening Variables - -| Variable | Description | -| :--- | --- | -| **`DISABLE_OPEN_TOOLS`** | If true, disables `xdg-open` and `exo-open` binaries by removing their execute permissions. | -| **`DISABLE_SUDO`** | If true, disables the `sudo` command by removing its execute permissions and invalidating the passwordless sudo configuration. | -| **`DISABLE_TERMINALS`** | If true, disables common terminal emulators by removing their execute permissions and hiding them from the Openbox right-click menu. | -| **`DISABLE_CLOSE_BUTTON`** | If true, removes the close button from window title bars in the Openbox window manager. | -| **`DISABLE_MOUSE_BUTTONS`** | If true, disables the right-click and middle-click context menus and actions within the Openbox window manager. | -| **`HARDEN_KEYBINDS`** | If true, disables default Openbox keybinds that can bypass other hardening options (e.g., `Alt+F4` to close windows, `Alt+Escape` to show the root menu). | -| **`RESTART_APP`** | If true, enables a watchdog service that automatically restarts the main application if it is closed. The user's autostart script is made read-only and root owned to prevent tampering. | - -#### Selkies application settings - -Using environment variables every facet of the application can be configured. - -##### Booleans and Locking -Boolean settings accept `true` or `false`. You can also prevent the user from changing a boolean setting in the UI by appending `|locked`. The UI toggle for this setting will be hidden. - -* **Example**: To force CPU encoding on and prevent the user from disabling it: - ```bash - -e SELKIES_USE_CPU="true|locked" - ``` - -##### Enums and Lists -These settings accept a comma-separated list of values. Their behavior depends on the number of items provided: - -* **Multiple Values**: The first item in the list becomes the default selection, and all items in the list become the available options in the UI dropdown. -* **Single Value**: The provided value becomes the default, and the UI dropdown is hidden because the choice is locked. - -* **Example**: Force the encoder to be `jpeg` with no other options available to the user: - ```bash - -e SELKIES_ENCODER="jpeg" - ``` - -##### Ranges -Range settings define a minimum and maximum for a value (e.g., framerate). - -* **To set a range**: Use a hyphen-separated `min-max` format. The UI will show a slider. -* **To set a fixed value**: Provide a single number. This will lock the value and hide the UI slider. - -* **Example**: Lock the framerate to exactly 60 FPS. - ```bash - -e SELKIES_FRAMERATE="60" - ``` - -##### Manual Resolution Mode -The server can be forced to use a single, fixed resolution for all connecting clients. This mode is automatically activated if `SELKIES_MANUAL_WIDTH`, `SELKIES_MANUAL_HEIGHT`, or `SELKIES_IS_MANUAL_RESOLUTION_MODE` is set. - -* If `SELKIES_MANUAL_WIDTH` and/or `SELKIES_MANUAL_HEIGHT` are set, the resolution is locked to those values. -* If `SELKIES_IS_MANUAL_RESOLUTION_MODE` is set to `true` without specifying width or height, the resolution defaults to **1024x768**. -* When this mode is active, the client UI for changing resolution is disabled. - -| Environment Variable | Default Value | Description | -| --- | --- | --- | -| `SELKIES_UI_TITLE` | `'Selkies'` | Title in top left corner of sidebar. | -| `SELKIES_UI_SHOW_LOGO` | `True` | Show the Selkies logo in the sidebar. | -| `SELKIES_UI_SHOW_SIDEBAR` | `True` | Show the main sidebar UI. | -| `SELKIES_UI_SHOW_CORE_BUTTONS` | `True` | Show the core components buttons display, audio, microphone, and gamepad. | -| `SELKIES_UI_SIDEBAR_SHOW_VIDEO_SETTINGS` | `True` | Show the video settings section in the sidebar. | -| `SELKIES_UI_SIDEBAR_SHOW_SCREEN_SETTINGS` | `True` | Show the screen settings section in the sidebar. | -| `SELKIES_UI_SIDEBAR_SHOW_AUDIO_SETTINGS` | `True` | Show the audio settings section in the sidebar. | -| `SELKIES_UI_SIDEBAR_SHOW_STATS` | `True` | Show the stats section in the sidebar. | -| `SELKIES_UI_SIDEBAR_SHOW_CLIPBOARD` | `True` | Show the clipboard section in the sidebar. | -| `SELKIES_UI_SIDEBAR_SHOW_FILES` | `True` | Show the file transfer section in the sidebar. | -| `SELKIES_UI_SIDEBAR_SHOW_APPS` | `True` | Show the applications section in the sidebar. | -| `SELKIES_UI_SIDEBAR_SHOW_SHARING` | `True` | Show the sharing section in the sidebar. | -| `SELKIES_UI_SIDEBAR_SHOW_GAMEPADS` | `True` | Show the gamepads section in the sidebar. | -| `SELKIES_UI_SIDEBAR_SHOW_FULLSCREEN` | `True` | Show the fullscreen button in the sidebar. | -| `SELKIES_UI_SIDEBAR_SHOW_GAMING_MODE` | `True` | Show the gaming mode button in the sidebar. | -| `SELKIES_UI_SIDEBAR_SHOW_TRACKPAD` | `True` | Show the virtual trackpad button in the sidebar. | -| `SELKIES_UI_SIDEBAR_SHOW_KEYBOARD_BUTTON` | `True` | Show the on-screen keyboard button in the display area. | -| `SELKIES_UI_SIDEBAR_SHOW_SOFT_BUTTONS` | `True` | Show the soft buttons section in the sidebar. | -| `SELKIES_AUDIO_ENABLED` | `True` | Enable server-to-client audio streaming. | -| `SELKIES_MICROPHONE_ENABLED` | `True` | Enable client-to-server microphone forwarding. | -| `SELKIES_GAMEPAD_ENABLED` | `True` | Enable gamepad support. | -| `SELKIES_CLIPBOARD_ENABLED` | `True` | Enable clipboard synchronization. | -| `SELKIES_COMMAND_ENABLED` | `True` | Enable parsing of command websocket messages. | -| `SELKIES_FILE_TRANSFERS` | `'upload,download'` | Allowed file transfer directions (comma-separated: "upload,download"). Set to "" or "none" to disable. | -| `SELKIES_ENCODER` | `'x264enc,x264enc-striped,jpeg'` | The default video encoders. | -| `SELKIES_FRAMERATE` | `'8-120'` | Allowed framerate range or a fixed value. | -| `SELKIES_H264_CRF` | `'5-50'` | Allowed H.264 CRF range or a fixed value. | -| `SELKIES_JPEG_QUALITY` | `'1-100'` | Allowed JPEG quality range or a fixed value. | -| `SELKIES_H264_FULLCOLOR` | `False` | Enable H.264 full color range for pixelflux encoders. | -| `SELKIES_H264_STREAMING_MODE` | `False` | Enable H.264 streaming mode for pixelflux encoders. | -| `SELKIES_USE_CPU` | `False` | Force CPU-based encoding for pixelflux. | -| `SELKIES_USE_PAINT_OVER_QUALITY` | `True` | Enable high-quality paint-over for static scenes. | -| `SELKIES_PAINT_OVER_JPEG_QUALITY` | `'1-100'` | Allowed JPEG paint-over quality range or a fixed value. | -| `SELKIES_H264_PAINTOVER_CRF` | `'5-50'` | Allowed H.264 paint-over CRF range or a fixed value. | -| `SELKIES_H264_PAINTOVER_BURST_FRAMES` | `'1-30'` | Allowed H.264 paint-over burst frames range or a fixed value. | -| `SELKIES_SECOND_SCREEN` | `True` | Enable support for a second monitor/display. | -| `SELKIES_AUDIO_BITRATE` | `'320000'` | The default audio bitrate. | -| `SELKIES_IS_MANUAL_RESOLUTION_MODE` | `False` | Lock the resolution to the manual width/height values. | -| `SELKIES_MANUAL_WIDTH` | `0` | Lock width to a fixed value. Setting this forces manual resolution mode. | -| `SELKIES_MANUAL_HEIGHT` | `0` | Lock height to a fixed value. Setting this forces manual resolution mode. | -| `SELKIES_SCALING_DPI` | `'96'` | The default DPI for UI scaling. | -| `SELKIES_ENABLE_BINARY_CLIPBOARD` | `False` | Allow binary data on the clipboard. | -| `SELKIES_USE_BROWSER_CURSORS` | `False` | Use browser CSS cursors instead of rendering to canvas. | -| `SELKIES_USE_CSS_SCALING` | `False` | HiDPI when false, if true a lower resolution is sent from the client and the canvas is stretched. | -| `SELKIES_PORT` (or `CUSTOM_WS_PORT`) | `8082` | Port for the data websocket server. | -| `SELKIES_DRI_NODE` (or `DRI_NODE`) | `''` | Path to the DRI render node for VA-API. | -| `SELKIES_AUDIO_DEVICE_NAME` | `'output.monitor'` | Audio device name for pcmflux capture. | -| `SELKIES_WATERMARK_PATH` (or `WATERMARK_PNG`) | `''` | Absolute path to the watermark PNG file. | -| `SELKIES_WATERMARK_LOCATION` (or `WATERMARK_LOCATION`) | `-1` | Watermark location enum (0-6). | -| `SELKIES_DEBUG` | `False` | Enable debug logging. | -| `SELKIES_ENABLE_SHARING` | `True` | Master toggle for all sharing features. | -| `SELKIES_ENABLE_COLLAB` | `True` | Enable collaborative (read-write) sharing link. | -| `SELKIES_ENABLE_SHARED` | `True` | Enable view-only sharing links. | -| `SELKIES_ENABLE_PLAYER2` | `True` | Enable sharing link for gamepad player 2. | -| `SELKIES_ENABLE_PLAYER3` | `True` | Enable sharing link for gamepad player 3. | -| `SELKIES_ENABLE_PLAYER4` | `True` | Enable sharing link for gamepad player 4. | - -## Usage - -To help you get started creating a container from this image you can either use docker-compose or the docker cli. - ->[!NOTE] ->Unless a parameter is flaged as 'optional', it is *mandatory* and a value must be provided. - -### docker-compose (recommended, [click here for more info](https://docs.linuxserver.io/general/docker-compose)) - -```yaml --- -services: - webtop: - image: lscr.io/linuxserver/webtop:latest - container_name: webtop - environment: - - PUID=1000 - - PGID=1000 - - TZ=Etc/UTC - volumes: - - /path/to/data:/config - ports: - - 3000:3000 - - 3001:3001 - shm_size: "1gb" - restart: unless-stopped + +## 使い方 + +### コンテナの起動 + +`start-container.sh`スクリプトはGPUとオプションの引数を使用: + +```bash +# 構文: ./start-container.sh [--gpu <type>] [options] +# デフォルト: オプション未指定時はソフトウェアレンダリング + +# NVIDIA GPUオプション: +./start-container.sh --gpu nvidia --all # 全利用可能NVIDIA GPUを使用 +./start-container.sh --gpu nvidia --num 0 # NVIDIA GPU 0のみ使用 +./start-container.sh --gpu nvidia --num 0,1 # NVIDIA GPU 0と1を使用 + +# Intel/AMD GPUオプション: +./start-container.sh --gpu intel # Intel統合GPU使用(Quick Sync Video) +./start-container.sh --gpu amd # AMD GPU使用(VCE/VCN) + +# WSL2 NVIDIA: +./start-container.sh --gpu nvidia-wsl --all # WSL2でのNVIDIA GPU + +# ソフトウェアレンダリング: +./start-container.sh # GPUなし(デフォルト) +./start-container.sh --gpu none # GPUなしを明示的に指定 + +# 解像度とDPI: +./start-container.sh --gpu nvidia --all -r 3840x2160 -d 192 # 4K HiDPI +./start-container.sh -r 2560x1440 -d 144 # WQHD ``` -### docker cli ([click here for more info](https://docs.docker.com/engine/reference/commandline/cli/)) +**UIDベースのポート割り当て(マルチユーザー対応):** + +ポートは自動的にユーザーIDに基づいて割り当てられ、同一ホストで複数ユーザーが使用可能: + +- **HTTPSポート**: `30000 + UID`(例: UID 1000 → ポート 31000) +- **HTTPポート**: `40000 + UID`(例: UID 1000 → ポート 41000) + +アクセス: `https://localhost:${HTTPS_PORT}`(例: UID 1000で `https://localhost:31000`) + +**リモートアクセス(LAN/WAN):** + +WebRTCによるリモートアクセスが可能: + +- LAN IPアドレスを自動検出 +- リモートPCからアクセス: `https://<host-ip>:<https-port>` + +**コンテナの特徴:** + +- **コンテナ永続化:** 停止しても削除されない(再起動またはcommit可能) +- **ホスト名:** `Docker-$(hostname)`に設定 +- **ホストホームマウント:** `~/host_home`で利用可能 +- **コンテナ名:** `linuxserver-kde-{username}` + +### 変更の保存(重要!) + +ソフトウェアをインストールしたり設定を変更した場合: ```bash -docker run -d \ - --name=webtop \ - -e PUID=1000 \ - -e PGID=1000 \ - -e TZ=Etc/UTC \ - -p 3000:3000 \ - -p 3001:3001 \ - -v /path/to/data:/config \ - --shm-size="1gb" \ - --restart unless-stopped \ - lscr.io/linuxserver/webtop:latest +# コンテナ状態をイメージに保存 +./commit-container.sh ``` -## Parameters +**重要な注意:** + +- ⚠️ **`./stop-container.sh --rm`の前に必ずcommit** - commitしないと変更が失われます +- ✅ イメージ名形式は `webtop-kde-{username}-{arch}:{version}` +- ✅ commitしたイメージはコンテナ削除後も残る +- ✅ 次回起動時は自動的にcommitしたイメージを使用 -Containers are configured using parameters passed at runtime (such as those above). These parameters are separated by a colon and indicate `<external>:<internal>` respectively. For example, `-p 8080:80` would expose port `80` from inside the container to be accessible from the host's IP on port `8080` outside the container. +**ワークフロー例:** -| Parameter | Function | -| :----: | --- | -| `-p 3000:3000` | Web Desktop GUI HTTP, must be proxied | -| `-p 3001:3001` | Web Desktop GUI HTTPS | -| `-e PUID=1000` | for UserID - see below for explanation | -| `-e PGID=1000` | for GroupID - see below for explanation | -| `-e TZ=Etc/UTC` | specify a timezone to use, see this [list](https://en.wikipedia.org/wiki/List_of_tz_database_time_zones#List). | -| `-v /config` | abc users home directory | -| `--shm-size=` | Recommended for all desktop images. | +```bash +# 1. コンテナ内で作業、ソフトウェアインストール、設定変更 +./shell-container.sh +# ... パッケージインストール、環境設定 ... +exit + +# 2. 変更をイメージに保存 +./commit-container.sh -## Environment variables from files (Docker secrets) +# 3. コンテナを安全に停止・削除(変更はイメージに保存済み) +./stop-container.sh --rm -You can set any environment variable from a file by using a special prepend `FILE__`. +# 4. 次回起動時、commitしたイメージで全変更が反映 +./start-container.sh --gpu intel +``` -As an example: +### コンテナの停止 ```bash --e FILE__MYVAR=/run/secrets/mysecretvariable +# 停止(再起動またはcommit用に保持) +./stop-container.sh + +# 停止して削除 +./stop-container.sh --rm +# または +./stop-container.sh -r ``` -Will set the environment variable `MYVAR` based on the contents of the `/run/secrets/mysecretvariable` file. +--- + +## 付録: ベースイメージのビルド + +ベースイメージは初回のみビルドが必要(30-60分): + +```bash +# デフォルトのリポジトリ: ghcr.io/tatsuyai713/webtop-kde +# ホストアーキテクチャに合わせて自動検出 +./files/build-base-image.sh # Ubuntu 24.04 (デフォルト) +./files/build-base-image.sh -u 22.04 # Ubuntu 22.04 + +# または明示的に指定 +./files/build-base-image.sh -a amd64 # Intel/AMD 64-bit +./files/build-base-image.sh -a arm64 # Apple Silicon / ARM +./files/build-base-image.sh -a amd64 -u 22.04 # AMD64 + Ubuntu 22.04 + +# キャッシュなしでビルド(問題がある場合) +./files/build-base-image.sh --no-cache + +# GHCRに保存する場合(デフォルトのリポジトリ名を使用) +./files/push-base-image.sh -## Umask for running applications +# リポジトリ名を変える場合 +IMAGE_NAME=ghcr.io/tatsuyai713/your-base ./files/build-base-image.sh +IMAGE_NAME=ghcr.io/tatsuyai713/your-base ./files/push-base-image.sh +``` + +--- + +## 付録: スクリプトリファレンス -For all of our images we provide the ability to override the default umask settings for services started within the containers using the optional `-e UMASK=022` setting. -Keep in mind umask is not chmod it subtracts from permissions based on it's value it does not add. Please read up [here](https://en.wikipedia.org/wiki/Umask) before asking for support. +### コアスクリプト -## User / Group Identifiers +| スクリプト | 説明 | 使い方 | +|--------|-------------|-------| +| `files/build-base-image.sh` | ベースイメージをビルド | `./files/build-base-image.sh [-a arch]` | +| `build-user-image.sh` | ユーザー固有イメージをビルド | `./build-user-image.sh [-l ja]` | +| `start-container.sh` | デスクトップコンテナを起動 | `./start-container.sh [--gpu <type>]` | +| `stop-container.sh` | コンテナを停止 | `./stop-container.sh [--rm]` | -When using volumes (`-v` flags), permissions issues can arise between the host OS and the container, we avoid this issue by allowing you to specify the user `PUID` and group `PGID`. +### 管理スクリプト -Ensure any volume directories on the host are owned by the same user you specify and any permissions issues will vanish like magic. +| スクリプト | 説明 | 使い方 | +|--------|-------------|-------| +| `shell-container.sh` | コンテナシェルにアクセス | `./shell-container.sh` | +| `commit-container.sh` | コンテナ変更をイメージに保存 | `./commit-container.sh` | +| `files/push-base-image.sh` | ベースイメージをGHCRへPush | `./files/push-base-image.sh` | -In this instance `PUID=1000` and `PGID=1000`, to find yours use `id your_user` as below: +### GPUオプション詳細 ```bash -id your_user +./start-container.sh [オプション] + +GPU選択: + -g, --gpu <vendor> GPUベンダー: none|nvidia|nvidia-wsl|intel|amd + --all 全GPU使用(nvidia/nvidia-wsl用) + --num <list> カンマ区切りGPUリスト(nvidia用、WSL非対応) + +GPU使用例: + --gpu nvidia --all # NVIDIA GPU - 全利用可能 + --gpu nvidia --num 0,1 # NVIDIA GPU - 特定GPU + --gpu nvidia-wsl --all # WSL2上のNVIDIA + --gpu intel # Intel統合/ディスクリートGPU(VA-API) + --gpu amd # AMD GPU(VA-API + 利用可能ならROCm) + --gpu none # ソフトウェアレンダリングのみ + +その他オプション: + -n <name> コンテナ名 + -r <WxH> 解像度(例: 1920x1080) + -d <dpi> DPI(例: 96, 144, 192) + -s <ssl_dir> SSL証明書ディレクトリ ``` -Example output: +--- + +## 付録: 設定 -```text -uid=1000(your_user) gid=1000(your_user) groups=1000(your_user) +### 表示設定 + +```bash +# 解像度とDPI +./start-container.sh -r 1920x1080 -d 96 # 標準 +./start-container.sh -r 2560x1440 -d 144 # WQHD HiDPI +./start-container.sh -r 3840x2160 -d 192 # 4K HiDPI ``` -## Docker Mods +### ビデオエンコード -[![Docker Mods](https://img.shields.io/badge/dynamic/yaml?color=94398d&labelColor=555555&logoColor=ffffff&style=for-the-badge&label=webtop&query=%24.mods%5B%27webtop%27%5D.mod_count&url=https%3A%2F%2Fraw.githubusercontent.com%2Flinuxserver%2Fdocker-mods%2Fmaster%2Fmod-list.yml)](https://mods.linuxserver.io/?mod=webtop "view available mods for this container.") [![Docker Universal Mods](https://img.shields.io/badge/dynamic/yaml?color=94398d&labelColor=555555&logoColor=ffffff&style=for-the-badge&label=universal&query=%24.mods%5B%27universal%27%5D.mod_count&url=https%3A%2F%2Fraw.githubusercontent.com%2Flinuxserver%2Fdocker-mods%2Fmaster%2Fmod-list.yml)](https://mods.linuxserver.io/?mod=universal "view available universal mods.") +**ハードウェアエンコード (Pixelflux):** -We publish various [Docker Mods](https://github.com/linuxserver/docker-mods) to enable additional functionality within the containers. The list of Mods available for this image (if any) as well as universal mods that can be applied to any one of our images can be accessed via the dynamic badges above. +| GPU | エンコーダー | 品質 | CPU負荷 | +|-----|-------------|------|---------| +| NVIDIA | NVENC | 高 | 低 | +| Intel | VA-API (Quick Sync) | 高 | 低 | +| AMD | VA-API | 高 | 低 | +| なし | Software (libx264) | 中 | 高 | -## Support Info +エンコーダーは`--gpu`オプションに基づいてPixelfluxが自動選択します。 +ハードウェアエンコードはゼロコピーパイプラインにより低遅延を実現します。 -* Shell access whilst the container is running: +### オーディオ設定 - ```bash - docker exec -it webtop /bin/bash - ``` +**オーディオサポート:** -* To monitor the logs of the container in realtime: +| 機能 | サポート | 技術 | +|------|---------|------| +| スピーカー出力 | ✅ 内蔵 | WebRTC(ブラウザネイティブ) | +| マイク入力 | ✅ 内蔵 | WebRTC(ブラウザネイティブ) | - ```bash - docker logs -f webtop - ``` +Selkiesは双方向オーディオをブラウザにWebRTC経由でストリーミングします。 -* Container version number: +--- - ```bash - docker inspect -f '{{ index .Config.Labels "build_version" }}' webtop - ``` +## 付録: HTTPS/SSL -* Image version number: +### SSL証明書の設定 - ```bash - docker inspect -f '{{ index .Config.Labels "build_version" }}' lscr.io/linuxserver/webtop:latest - ``` +```bash +# 1. ssl/ディレクトリを作成 +mkdir -p ssl -## Updating Info +# 2. 証明書を配置 +cp /path/to/your/cert.pem ssl/ +cp /path/to/your/key.pem ssl/cert.key -Most of our images are static, versioned, and require an image update and container recreation to update the app inside. With some exceptions (noted in the relevant readme.md), we do not recommend or support updating apps inside the container. Please consult the [Application Setup](#application-setup) section above to see if it is recommended for the image. +# 3. コンテナ起動(ssl/フォルダを自動検出) +./start-container.sh --gpu nvidia --all +``` -Below are the instructions for updating containers: +### 自己署名証明書の生成 -### Via Docker Compose +```bash +openssl req -x509 -nodes -days 365 -newkey rsa:2048 \ + -keyout ssl/cert.key -out ssl/cert.pem \ + -subj "/C=JP/ST=Tokyo/L=Tokyo/O=Dev/CN=localhost" +``` -* Update images: - * All images: +### 証明書の優先順位 - ```bash - docker-compose pull - ``` +`start-container.sh`スクリプトは以下の順序で証明書を自動検出: - * Single image: +1. `ssl/cert.pem`と`ssl/cert.key` +2. 環境変数`SSL_DIR` +3. 証明書が見つからない場合はイメージのデフォルト証明書を使用 - ```bash - docker-compose pull webtop - ``` +--- -* Update containers: - * All containers: +## トラブルシューティング - ```bash - docker-compose up -d - ``` +### コンテナが起動しない - * Single container: +```bash +# ログを確認 +docker logs linuxserver-kde-$(whoami) - ```bash - docker-compose up -d webtop - ``` +# イメージが存在するか確認 +docker images | grep webtop-kde -* You can also remove the old dangling images: +# ユーザーイメージを再ビルド +./build-user-image.sh - ```bash - docker image prune - ``` +# ポートが使用中か確認 +sudo netstat -tulpn | grep -E "31000|41000" +``` -### Via Docker Run +### GPUが検出されない -* Update the image: +```bash +# NVIDIA +./shell-container.sh +nvidia-smi - ```bash - docker pull lscr.io/linuxserver/webtop:latest - ``` +# Intel/AMD +./shell-container.sh +ls -la /dev/dri/ +vainfo -* Stop the running container: +# Docker GPUアクセス確認 +docker run --rm --gpus all nvidia/cuda:11.0-base nvidia-smi +``` - ```bash - docker stop webtop - ``` +### 権限の問題 -* Delete the container: +```bash +# UID一致確認 +id # ホスト上 +./shell-container.sh +id # コンテナ内 - ```bash - docker rm webtop - ``` +# UID/GID不一致の場合、ユーザーイメージを再ビルド +./build-user-image.sh +``` -* Recreate a new container with the same docker run parameters as instructed above (if mapped correctly to a host folder, your `/config` folder and settings will be preserved) -* You can also remove the old dangling images: +### 黒画面 / デスクトップが表示されない - ```bash - docker image prune - ``` +```bash +# ログ確認 +docker logs linuxserver-kde-$(whoami) -### Image Update Notifications - Diun (Docker Image Update Notifier) +# plasmashellの状態確認 +docker exec linuxserver-kde-$(whoami) pgrep -af plasmashell ->[!TIP] ->We recommend [Diun](https://crazymax.dev/diun/) for update notifications. Other tools that automatically update containers unattended are not recommended or supported. +# ランタイムディレクトリ確認 +docker exec linuxserver-kde-$(whoami) ls -la /run/user/$(id -u) +``` -## Building locally +**原因と対処:** +- `/run/user/<uid>`が存在しない/権限が不正 → コンテナ再起動 +- plasmashellがクラッシュ → コンテナ再起動 -If you want to make local modifications to these images for development purposes or just to customize the logic: +### WebGL/Vulkanが動かない ```bash -git clone https://github.com/linuxserver/docker-webtop.git -cd docker-webtop -docker build \ - --no-cache \ - --pull \ - -t lscr.io/linuxserver/webtop:latest . +# OpenGL情報 +docker exec linuxserver-kde-$(whoami) glxinfo | head -30 + +# Vulkan情報 +docker exec linuxserver-kde-$(whoami) vulkaninfo | head -50 ``` -The ARM variants can be built on x86_64 hardware and vice versa using `lscr.io/linuxserver/qemu-static` +**macOSの場合:** Docker VMの制限により、GPUアクセラレーションは不可。ソフトウェアレンダリングで動作。 + +### 音声が出ない ```bash -docker run --rm --privileged lscr.io/linuxserver/qemu-static --reset +# PulseAudioサーバー確認 +docker exec linuxserver-kde-$(whoami) pactl info + +# シンク一覧 +docker exec linuxserver-kde-$(whoami) pactl list sinks short +``` + +**対処:** +- ブラウザのオーディオ権限を確認 +- HTTPS接続を使用(一部ブラウザはHTTPでオーディオをブロック) + +--- + +## 既知の制限 + +### Vulkanの制限 + +- XvfbはDRI3をサポートしていないため、Vulkanアプリケーションはフレームをプレゼントできず動作しません +- VirtualGLを使用したOpenGLアプリケーションは正常に動作します +- 環境によってはXvfb上でもvkcubeが動作しNVIDIA GPUを認識します(ただし表示/presentの挙動は構成依存です) + +### macOSの制限 + +- Docker Desktop for MacはLinux VM内でコンテナを実行するため、Apple GPU(Metal)へのアクセス不可 +- WebGL/Vulkanはソフトウェアレンダリング(llvmpipe)で動作 +- ハードウェアアクセラレーションが必要な場合はLinux実機またはWSL2を使用 + +### WSL2 GPUメモ + +- WSL2はNVIDIAのみ対応 +- WSL2ではレンダリングはソフトウェア(llvmpipe)になり、WebGL/Vulkanもソフトウェア動作 + +--- + +## 付録: 高度なトピック + +### 環境変数リファレンス + +<details> +<summary>クリックで環境変数一覧を展開</summary> + +#### コンテナ設定 + +| 変数 | 説明 | デフォルト | +|------|------|----------| +| `CONTAINER_NAME` | コンテナ名 | `linuxserver-kde-$(whoami)` | +| `IMAGE_BASE` | イメージベース名 | `webtop-kde` | +| `IMAGE_VERSION` | イメージバージョン | `1.0.0` | + +#### 表示 + +| 変数 | 説明 | デフォルト | +|------|------|----------| +| `RESOLUTION` | 解像度 | `1920x1080` | +| `DPI` | DPI設定 | `96` | + +#### GPU + +| 変数 | 説明 | デフォルト | +|------|------|----------| +| `GPU_VENDOR` | GPUベンダー | `none` | + +#### ネットワーク + +| 変数 | 説明 | デフォルト | +|------|------|----------| +| `PORT_SSL_OVERRIDE` | HTTPSポート上書き | `UID+30000` | +| `PORT_HTTP_OVERRIDE` | HTTPポート上書き | `UID+40000` | + +</details> + +### プロジェクト構造 + ``` +devcontainer-ubuntu-kde-selkies-for-mac/ +├── build-user-image.sh # ユーザーイメージビルド +├── start-container.sh # コンテナ起動 +├── stop-container.sh # コンテナ停止 +├── shell-container.sh # シェルアクセス +├── commit-container.sh # 変更保存 +├── ssl/ # SSL証明書(自動検出) +│ ├── cert.pem +│ └── cert.key +└── files/ # システムファイル + ├── build-base-image.sh # ベースイメージビルド + ├── push-base-image.sh # ベースイメージをPush + ├── linuxserver-kde.base.dockerfile # ベースイメージ定義 + ├── linuxserver-kde.user.dockerfile # ユーザーイメージ定義 + ├── alpine-root/ # s6-overlay設定 + ├── kde-root/ # KDE設定 + └── ubuntu-root/ # Ubuntu設定 +``` + +### バージョン固定 + +再現可能なビルドのため、外部依存関係は特定バージョンに固定: + +- **VirtualGL:** 3.1.4 +- **Selkies + Pixelflux:** Selkies WebRTCストリーミングとPixelfluxエンコーダー + +**ハードウェアエンコード:** +- **NVIDIA GPU:** Pixelflux経由でNVENC自動検出 +- **Intel GPU:** Pixelflux経由でVA-API (Quick Sync Video) +- **AMD GPU:** Pixelflux経由でVA-API + +これらは [files/linuxserver-kde.base.dockerfile](files/linuxserver-kde.base.dockerfile) でビルド引数として定義。 + +--- + +## ライセンス + +**メインプロジェクト:** + +このプロジェクトは複数のオープンソースプロジェクトを基にしています: +- [linuxserver/webtop](https://github.com/linuxserver/docker-webtop) - GPL-3.0 +- [selkies-project/selkies](https://github.com/selkies-project/selkies) - MPL-2.0 +- [VirtualGL](https://github.com/VirtualGL/virtualgl) - LGPL + +詳細は各プロジェクトのライセンスを参照してください。 + +--- + +## 関連プロジェクト + +- [tatsuyai713/devcontainer-egl-desktop](https://github.com/tatsuyai713/devcontainer-egl-desktop) - EGLベース版(3つの表示モード対応) +- [linuxserver/docker-webtop](https://github.com/linuxserver/docker-webtop) - 元プロジェクト +- [selkies-project/selkies](https://github.com/selkies-project/selkies) - WebRTCストリーミング + +--- + +## クレジット + +### 元プロジェクト + +- **Selkies Project:** [github.com/selkies-project](https://github.com/selkies-project) +- **LinuxServer.io:** [github.com/linuxserver](https://github.com/linuxserver) + +### このフォーク -Once registered you can define the dockerfile to use with `-f Dockerfile.aarch64`. - -## Versions - -* **17.11.25:** - Rebase Fedora images to 43. -* **24.07.25:** - Rebase Debian images to Trixie. -* **17.06.25:** - Rebase all images to Selkies, drop openbox and icewm, bump Alpine to 3.22, bump Fedora to 42. -* **10.01.25:** - Rebase Fedora to 41. -* **06.12.24:** - Rebase Alpine to 3.21. -* **26.09.24:** - Swap from firefox to chromium on Alpine images. -* **23.05.24:** - Rebase Alpine to 3.20, document Nvidia support. -* **22.04.24:** - Rebase Ubuntu to Noble. -* **16.04.24:** - Add docs on PRoot Apps. -* **14.04.24:** - Rebase Fedora to 40. -* **11.02.24:** - Add PWA icons and title variants properly. -* **06.02.24:** - Update Readme about native language support. -* **29.12.23:** - Rebase Alpine to 3.19 and swap back to Firefox. -* **07.11.23:** - Rebase Fedora to 39. -* **14.06.23:** - Rebase to Debian Bookworm. -* **13.05.23:** - Rebase to Alpine 3.18 and Fedora 38. -* **23.03.23:** - Rebase all Webtops to KasmVNC base image. -* **21.10.22:** - Rebase xfce to Alpine 3.16, migrate to s6v3. -* **12.03.22:** - Add documentation for mounting in a GPU. -* **05.02.22:** - Rebase KDE Ubuntu to Jammy, add new documentation for updated gclient, stop recommending priv mode. -* **21.09.21:** - Add Fedora and Arch images, show seccomp settings in readme. -* **26.09.21:** - Rebase to Alpine versions to 3.14. -* **20.04.21:** - Initial release. +- **強化:** 2段階ビルドシステム、非root実行、UID/GID一致、セキュアパスワード管理、管理スクリプト、バージョン固定、マルチGPU対応 +- **メンテナー:** [@tatsuyai713](https://github.com/tatsuyai713) diff --git a/README_en.md b/README_en.md new file mode 100644 index 000000000..556a0bf3c --- /dev/null +++ b/README_en.md @@ -0,0 +1,763 @@ +# kde-selkies-webtop-devcontainer + +**[日本語版 (README.md)](README.md)** + +A containerized Kubuntu (KDE Plasma) desktop environment accessible via browser. Uses Selkies WebRTC streaming to provide a fully functional Linux desktop without VNC/RDP. Supports VS Code Dev Containers. + +### Feature Support Matrix (Platforms) + +| Environment | GPU Rendering | WebGL/Vulkan | Hardware Encoding | Notes | +|-------------|---------------|--------------|-------------------|-------| +| **Ubuntu + NVIDIA GPU** | ✅ Supported | ✅ Supported | ✅ NVENC | Best performance | +| **Ubuntu + Intel GPU** | ✅ Supported | ✅ Supported | ✅ VA-API (QSV) | Integrated GPU OK | +| **Ubuntu + AMD GPU** | ✅ Supported | ✅ Supported | ✅ VA-API | RDNA/GCN supported | +| **WSL2 + NVIDIA GPU** | ❌ Software | ❌ Software only | ✅ NVENC | Tested on WSL2 | +| **macOS (Docker)** | ❌ Not supported | ❌ Software only | ❌ Not supported | VM limitation | + +--- + +## Quick Start + +```bash +# 1. Build user image (1-2 minutes) +# The base image is pulled automatically from GHCR +./build-user-image.sh # English environment +./build-user-image.sh -l ja # Japanese environment +./build-user-image.sh -u 22.04 # Ubuntu 22.04 + +# 2. Start container +./start-container.sh --encoder software # Software encoding +./start-container.sh --encoder nvidia --gpu all # NVIDIA NVENC (all GPUs) +./start-container.sh --encoder nvidia --num 0 # NVIDIA NVENC (GPU 0 only) +./start-container.sh --encoder intel # Intel VA-API +./start-container.sh --encoder amd # AMD VA-API +./start-container.sh --encoder nvidia-wsl --gpu all # WSL2 + NVIDIA NVENC + +# 3. Access via browser +# → https://localhost:<30000+UID> (e.g., UID=1000 → https://localhost:31000) +# → http://localhost:<40000+UID> (e.g., UID=1000 → http://localhost:41000) + +# 4. Save your changes (IMPORTANT! Always do this before removing container) +./commit-container.sh + +# 5. Stop +./stop-container.sh # Stop (container persists, can restart) +./stop-container.sh --rm # Stop and remove (only after commit!) +``` + +That's it! 🎉 + +### Using VS Code Dev Container + +```bash +# 1. Generate Dev Container configuration +./create-devcontainer-config.sh + +# 2. Open in VS Code +# In VS Code, press "F1" → select "Dev Containers: Reopen in Container" + +# 3. The workspace will automatically open inside the container +# Access the desktop via browser at https://localhost:<displayed-port> +``` + +--- + +## 🚀 Key Improvements in This Project + +### Architecture Improvements + +- **🏗️ Two-Stage Build System:** Split into base (5-10 GB) and user images (~100 MB, 1-2 min build) + - Base image contains all system packages and desktop environment + - User image adds your specific user with matching UID/GID + - No more 30-60 minute builds for every user! + +- **🔒 Non-Root Container Execution:** Containers run with user privileges by default + - Removed all `fakeroot` hacks and privilege escalation workarounds + - Proper permission separation between system and user operations + - Sudo access available when needed for specific operations + +- **📁 Automatic UID/GID Matching:** File permissions work seamlessly + - User image matches your host UID/GID automatically + - Mounted host directories have correct ownership + - No more "permission denied" errors on shared folders + +### User Experience Enhancements + +- **🔐 Secure Password Management:** Environment variable for password input + - No plain text passwords in commands + - Passwords stored securely in the image + +- **💻 Ubuntu Desktop Standard Environment:** Full `.bashrc` configuration + - Colored prompt with Git branch detection + - History optimization (ignoredups, append mode, timestamps) + - Useful aliases (ll, la, grep colors, etc.) + +- **🎮 Flexible Encoder/GPU Selection:** Clear command arguments + - `--encoder nvidia` - NVIDIA NVENC + - `--encoder intel` - Intel VA-API + - `--encoder amd` - AMD VA-API + - `--encoder software` - Software encoding + - `--gpu all` - Use all Docker GPUs (NVIDIA) + - `--num 0,1` - Specific GPU devices + +### Developer Experience + +- **📦 Version Pinning:** Reproducible builds guaranteed + - VirtualGL 3.1.4, Selkies 1.6.2 + - No more "it worked yesterday" issues + +- **🛠️ Complete Management Scripts:** Shell scripts for all operations + - `build-user-image.sh` - Build with password + - `start-container.sh --encoder <type>` - Start with encoder selection + - `stop/shell-container.sh` - Lifecycle management + - `commit-container.sh` - Save your changes + +- **🌐 Multi-Language Support:** Japanese language environment available + - Pass `-l ja` argument during build for Japanese input (Mozc) + - Automatic timezone (Asia/Tokyo) and locale (ja_JP.UTF-8) configuration + - fcitx input method framework included + - English remains the default + +### Why This Project? + +| Original Projects | This Project | +|------------------|--------------| +| Pull-ready image | Local build (1-2 min) | +| Root container | User-privilege container | +| Manual UID/GID setup | Automatic matching | +| Password in command | Environment variable | +| Generic bash | Ubuntu Desktop bash | +| GPU auto-detected | Encoder/GPU explicitly selected | +| Version drift | Version pinned | +| English only | Multi-language (EN/JP) | + +--- + +## Table of Contents + +- [System Requirements](#system-requirements) +- [Two-Stage Build System](#two-stage-build-system) +- [Intel/AMD GPU Host Setup](#intelamd-gpu-host-setup) +- [Setup (Typical Use)](#setup-typical-use) +- [Usage](#usage) +- [Appendix: Build Base Image (For Developers)](#appendix-build-base-image-for-developers) +- [Appendix: Scripts Reference](#appendix-scripts-reference) +- [Appendix: Configuration](#appendix-configuration) +- [Appendix: HTTPS/SSL](#appendix-httpsssl) +- [Troubleshooting](#troubleshooting) +- [Known Limitations](#known-limitations) +- [Appendix: Advanced Topics](#appendix-advanced-topics) + +--- + +## System Requirements + +### Required +- **Docker** 20.10 or later (Docker Desktop 4.0+) +- **8GB+ RAM** (16GB recommended) +- **20GB+ free disk space** + +### GPU (Optional, for hardware acceleration) +- **NVIDIA GPU** ✅ Tested + - Driver version 470 or later + - Maxwell generation or newer + - NVIDIA Container Toolkit installed +- **Intel GPU** ✅ Tested + - Intel integrated graphics (HD Graphics, Iris, Arc) + - Quick Sync Video support + - VA-API drivers included in container + - **Host setup required** (see below) +- **AMD GPU** ⚠️ Partially Tested + - Radeon graphics with VCE/VCN encoder + - VA-API drivers included in container + - **Host setup required** (see below) + +## Two-Stage Build System + +This project uses a two-stage build approach for fast setup and proper file permissions: + +``` +┌─────────────────────────┐ +│ Base Image (5-10 GB) │ ← Build once (30-60 minutes) +│ • All system packages │ +│ • Desktop environment │ +│ • Pre-installed apps │ +└────────────┬────────────┘ + │ + ↓ builds from +┌────────────┴────────────┐ +│ User Image (~100 MB) │ ← You build this (1-2 minutes) +│ • Your username │ +│ • Your UID/GID │ +│ • Your password │ +└─────────────────────────┘ +``` + +**Benefits:** + +- ✅ **Fast Setup:** No 30-60 minute build wait +- ✅ **Proper Permissions:** Files match your host UID/GID +- ✅ **Easy Updates:** Build new base image, rebuild user image + +**Why UID/GID Matching Matters:** + +- When you mount host directories (like `$HOME`), files need matching ownership +- Without matching UID/GID, you get permission errors +- The user image automatically matches your host credentials + +--- + +## Intel/AMD GPU Host Setup + +If you plan to use hardware encoding (VA-API) with Intel or AMD GPUs, host-side setup is required: + +### 1. Add User to video/render Groups + +For the container to access GPU devices (`/dev/dri/*`), the host user must be a member of the `video` and `render` groups: + +```bash +# Add user to video/render groups +sudo usermod -aG video,render $USER + +# Logout and re-login or reboot to apply group changes +# Verify: +groups +# Confirm output includes "video" and "render" +``` + +### 2. Install VA-API Drivers (Intel) + +For Intel GPU hardware encoding: + +```bash +# Install VA-API tools and Intel driver +sudo apt update +sudo apt install vainfo intel-media-va-driver-non-free + +# Verify installation (check for H.264 encoding support): +vainfo +# Confirm output includes "VAProfileH264Main : VAEntrypointEncSlice" etc. +``` + +### 3. Install VA-API Drivers (AMD) + +For AMD GPU hardware encoding: + +```bash +# Install VA-API tools and AMD driver +sudo apt update +sudo apt install vainfo mesa-va-drivers + +# Verify installation: +vainfo +# Confirm output includes "VAProfileH264Main : VAEntrypointEncSlice" etc. +``` + +**Notes:** +- NVIDIA GPUs do not require this setup +- If VA-API works correctly on the host, it will automatically work in the container +- Always logout/re-login or reboot after group changes + +--- + +## Setup (Typical Use) + +The base image is pulled automatically from GHCR, so no build is required for normal use. + +### Build User Image + +Create your personal image with matching UID/GID (1-2 minutes): + +```bash +# English (default) +./build-user-image.sh + +# Japanese +./build-user-image.sh -l ja +``` + +Note: Prefix with `USER_PASSWORD=...` to skip the interactive prompt. + +**Optional: Customization** + +```bash +# Use Ubuntu 22.04 +./build-user-image.sh -u 22.04 + +# Different version +./build-user-image.sh -v 2.0.0 + +# Use a different base image +./build-user-image.sh -b my-custom-base:1.0.0 +``` + +--- + +## Usage + +### Starting the Container + +The `start-container.sh` script uses GPU and optional arguments: + +```bash +# Syntax: ./start-container.sh [--gpu <type>] [options] +# Default: Software rendering if no options specified + +# NVIDIA GPU options: +./start-container.sh --gpu nvidia --all # Use all available NVIDIA GPUs +./start-container.sh --gpu nvidia --num 0 # Use NVIDIA GPU 0 only +./start-container.sh --gpu nvidia --num 0,1 # Use NVIDIA GPU 0 and 1 + +# Intel/AMD GPU options: +./start-container.sh --gpu intel # Use Intel integrated GPU (Quick Sync Video) +./start-container.sh --gpu amd # Use AMD GPU (VCE/VCN) + +# WSL2 NVIDIA: +./start-container.sh --gpu nvidia-wsl --all # NVIDIA GPU on WSL2 + +# Software rendering: +./start-container.sh # No GPU (default) +./start-container.sh --gpu none # Explicitly specify no GPU + +# Resolution and DPI: +./start-container.sh --gpu nvidia --all -r 3840x2160 -d 192 # 4K HiDPI +./start-container.sh -r 2560x1440 -d 144 # WQHD +``` + +**UID-Based Port Assignment (Multi-User Support):** + +Ports are automatically assigned based on your user ID to enable multiple users on the same host: + +- **HTTPS Port**: `30000 + UID` (e.g., UID 1000 → port 31000) +- **HTTP Port**: `40000 + UID` (e.g., UID 1000 → port 41000) + +Access via: `https://localhost:${HTTPS_PORT}` (e.g., `https://localhost:31000` for UID 1000) + +**Remote Access (LAN/WAN):** + +WebRTC remote access is available: + +- Auto-detects LAN IP address +- Access from remote PC: `https://<host-ip>:<https-port>` + +**Container Features:** + +- **Container persistence:** Not removed when stopped (can restart or commit) +- **Hostname:** Set to `Docker-$(hostname)` +- **Host home mount:** Available at `~/host_home` +- **Container name:** `linuxserver-kde-{username}` + +### Saving Changes (Important!) + +If you've installed software or made changes: + +```bash +# Save container state to image +./commit-container.sh +``` + +**Important Notes:** + +- ⚠️ **Always commit before `./stop-container.sh --rm`** - Changes are lost if you remove without committing +- ✅ The image name format is `webtop-kde-{username}-{arch}:{version}` +- ✅ Committed images persist even after container deletion +- ✅ Next startup automatically uses the committed image + +**Workflow Example:** + +```bash +# 1. Work in container, install software, configure settings +./shell-container.sh +# ... install packages, configure environment ... +exit + +# 2. Save your changes to the image +./commit-container.sh + +# 3. Stop and remove container safely (changes are saved in image) +./stop-container.sh --rm + +# 4. Next startup uses the committed image with all your changes +./start-container.sh --gpu intel +``` + +### Stopping the Container + +```bash +# Stop (persists for restart or commit) +./stop-container.sh + +# Stop and remove +./stop-container.sh --rm +# or +./stop-container.sh -r +``` + +--- + +## Appendix: Build Base Image (For Developers) + +The base image only needs to be built once (30-60 minutes): + +```bash +# Default repository: ghcr.io/tatsuyai713/webtop-kde +# Auto-detect host architecture +./files/build-base-image.sh # Ubuntu 24.04 (default) +./files/build-base-image.sh -u 22.04 # Ubuntu 22.04 + +# Or specify explicitly +./files/build-base-image.sh -a amd64 # Intel/AMD 64-bit +./files/build-base-image.sh -a arm64 # Apple Silicon / ARM +./files/build-base-image.sh -a amd64 -u 22.04 # AMD64 + Ubuntu 22.04 + +# Build without cache (if having issues) +./files/build-base-image.sh --no-cache + +# Push to GHCR (uses the default repository) +./files/push-base-image.sh + +# Use a custom repository name +IMAGE_NAME=ghcr.io/tatsuyai713/your-base ./files/build-base-image.sh +IMAGE_NAME=ghcr.io/tatsuyai713/your-base ./files/push-base-image.sh +``` + +--- + +## Appendix: Scripts Reference + +### Core Scripts + +| Script | Description | Usage | +|--------|-------------|-------| +| `files/build-base-image.sh` | Build the base image | `./files/build-base-image.sh [-a arch]` | +| `build-user-image.sh` | Build user-specific image | `./build-user-image.sh [-l ja]` | +| `start-container.sh` | Start the desktop container | `./start-container.sh [--gpu <type>]` | +| `stop-container.sh` | Stop the container | `./stop-container.sh [--rm]` | + +### Management Scripts + +| Script | Description | Usage | +|--------|-------------|-------| +| `shell-container.sh` | Access container shell | `./shell-container.sh` | +| `commit-container.sh` | Save container changes to image | `./commit-container.sh` | +| `files/push-base-image.sh` | Push base image to GHCR | `./files/push-base-image.sh` | + +### GPU Options Details + +```bash +./start-container.sh [options] + +GPU Selection: + -g, --gpu <vendor> GPU vendor: none|nvidia|nvidia-wsl|intel|amd + --all Use all GPUs (for nvidia/nvidia-wsl) + --num <list> Comma-separated GPU list (for nvidia, not supported on WSL) + +GPU Examples: + --gpu nvidia --all # NVIDIA GPU - all available + --gpu nvidia --num 0,1 # NVIDIA GPU - specific GPUs + --gpu nvidia-wsl --all # NVIDIA on WSL2 + --gpu intel # Intel integrated/discrete GPU (VA-API) + --gpu amd # AMD GPU (VA-API + ROCm if available) + --gpu none # Software rendering only + +Other Options: + -n <name> Container name + -r <WxH> Resolution (e.g., 1920x1080) + -d <dpi> DPI (e.g., 96, 144, 192) + -s <ssl_dir> SSL certificate directory +``` + +--- + +## Appendix: Configuration + +### Display Settings + +```bash +# Resolution and DPI +./start-container.sh -r 1920x1080 -d 96 # Standard +./start-container.sh -r 2560x1440 -d 144 # WQHD HiDPI +./start-container.sh -r 3840x2160 -d 192 # 4K HiDPI +``` + +### Video Encoding + +**Hardware Encoding (Pixelflux):** + +| GPU | Encoder | Quality | CPU Load | +|-----|---------|---------|----------| +| NVIDIA | NVENC | High | Low | +| Intel | VA-API (Quick Sync) | High | Low | +| AMD | VA-API | High | Low | +| None | Software (libx264) | Medium | High | + +Encoder is automatically selected by Pixelflux based on `--gpu` option. +Hardware encoding achieves low latency through zero-copy pipeline. + +### Audio Settings + +**Audio Support:** + +| Feature | Support | Technology | +|---------|---------|------------| +| Speaker output | ✅ Built-in | WebRTC (browser native) | +| Microphone input | ✅ Built-in | WebRTC (browser native) | + +Selkies streams bidirectional audio to the browser via WebRTC. + +--- + +## Appendix: HTTPS/SSL + +### SSL Certificate Setup + +```bash +# 1. Create ssl/ directory +mkdir -p ssl + +# 2. Place certificates +cp /path/to/your/cert.pem ssl/ +cp /path/to/your/key.pem ssl/cert.key + +# 3. Start container (auto-detects ssl/ folder) +./start-container.sh --gpu nvidia --all +``` + +### Self-Signed Certificate Generation + +```bash +openssl req -x509 -nodes -days 365 -newkey rsa:2048 \ + -keyout ssl/cert.key -out ssl/cert.pem \ + -subj "/C=US/ST=State/L=City/O=Dev/CN=localhost" +``` + +### Certificate Priority + +The `start-container.sh` script auto-detects certificates in this order: + +1. `ssl/cert.pem` and `ssl/cert.key` +2. Environment variable `SSL_DIR` +3. Uses image default certificate if none found + +--- + +## Troubleshooting + +### Container Won't Start + +```bash +# Check logs +docker logs linuxserver-kde-$(whoami) + +# Check if image exists +docker images | grep webtop-kde + +# Rebuild user image +./build-user-image.sh + +# Check if port is in use +sudo netstat -tulpn | grep -E "31000|41000" +``` + +### GPU Not Detected + +```bash +# NVIDIA +./shell-container.sh +nvidia-smi + +# Intel/AMD +./shell-container.sh +ls -la /dev/dri/ +vainfo + +# Check Docker GPU access +docker run --rm --gpus all nvidia/cuda:11.0-base nvidia-smi +``` + +### Permission Issues + +```bash +# Check UID match +id # on host +./shell-container.sh +id # inside container + +# If UID/GID mismatch, rebuild user image +./build-user-image.sh +``` + +### Black Screen / Desktop Not Showing + +```bash +# Check logs +docker logs linuxserver-kde-$(whoami) + +# Check plasmashell status +docker exec linuxserver-kde-$(whoami) pgrep -af plasmashell + +# Check runtime directory +docker exec linuxserver-kde-$(whoami) ls -la /run/user/$(id -u) +``` + +**Causes and Solutions:** +- `/run/user/<uid>` doesn't exist / wrong permissions → Restart container +- plasmashell crashed → Restart container + +### WebGL/Vulkan Not Working + +```bash +# OpenGL info +docker exec linuxserver-kde-$(whoami) glxinfo | head -30 + +# Vulkan info +docker exec linuxserver-kde-$(whoami) vulkaninfo | head -50 +``` + +**For macOS:** Due to Docker VM limitations, GPU acceleration is not available. Works with software rendering. + +### No Audio + +```bash +# Check PulseAudio server +docker exec linuxserver-kde-$(whoami) pactl info + +# List sinks +docker exec linuxserver-kde-$(whoami) pactl list sinks short +``` + +**Solutions:** +- Check browser audio permissions +- Use HTTPS connection (some browsers block audio over HTTP) + +--- + +## Known Limitations + +### Vulkan Limitation + +- Xvfb does not support DRI3, so Vulkan applications cannot present frames +- VirtualGL-based OpenGL applications work normally +- In some setups, vkcube runs under Xvfb and detects the NVIDIA GPU, but presentation behavior depends on the configuration + +### macOS Limitation + +- Docker Desktop for Mac runs containers inside a Linux VM, so Apple GPU (Metal) access is not possible +- WebGL/Vulkan runs via software rendering (llvmpipe) +- Use Linux native or WSL2 if hardware acceleration is needed + +### WSL2 GPU Notes + +- Only NVIDIA is supported on WSL2 +- Rendering is software (llvmpipe), so WebGL/Vulkan are software-only + +--- + +## Appendix: Advanced Topics + +### Environment Variables Reference + +<details> +<summary>Click to expand environment variables list</summary> + +#### Container Settings + +| Variable | Description | Default | +|----------|-------------|---------| +| `CONTAINER_NAME` | Container name | `linuxserver-kde-$(whoami)` | +| `IMAGE_BASE` | Image base name | `webtop-kde` | +| `IMAGE_VERSION` | Image version | `1.0.0` | + +#### Display + +| Variable | Description | Default | +|----------|-------------|---------| +| `RESOLUTION` | Resolution | `1920x1080` | +| `DPI` | DPI setting | `96` | + +#### GPU + +| Variable | Description | Default | +|----------|-------------|---------| +| `GPU_VENDOR` | GPU vendor | `none` | + +#### Network + +| Variable | Description | Default | +|----------|-------------|---------| +| `PORT_SSL_OVERRIDE` | HTTPS port override | `UID+30000` | +| `PORT_HTTP_OVERRIDE` | HTTP port override | `UID+40000` | + + +</details> + +### Project Structure + +``` +devcontainer-ubuntu-kde-selkies-for-mac/ +├── build-user-image.sh # Build user image +├── start-container.sh # Start container +├── stop-container.sh # Stop container +├── shell-container.sh # Shell access +├── commit-container.sh # Save changes +├── ssl/ # SSL certificates (auto-detected) +│ ├── cert.pem +│ └── cert.key +└── files/ # System files + ├── build-base-image.sh # Build base image + ├── push-base-image.sh # Push base image + ├── linuxserver-kde.base.dockerfile # Base image definition + ├── linuxserver-kde.user.dockerfile # User image definition + ├── alpine-root/ # s6-overlay configuration + ├── kde-root/ # KDE configuration + └── ubuntu-root/ # Ubuntu configuration +``` + +### Version Pinning + +External dependencies are pinned to specific versions for reproducible builds: + +- **VirtualGL:** 3.1.4 +- **Selkies + Pixelflux:** Selkies WebRTC streaming with Pixelflux encoder + +**Hardware Encoding:** +- **NVIDIA GPU:** NVENC auto-detection via Pixelflux +- **Intel GPU:** VA-API (Quick Sync Video) via Pixelflux +- **AMD GPU:** VA-API via Pixelflux + +These are defined in [files/linuxserver-kde.base.dockerfile](files/linuxserver-kde.base.dockerfile) as build arguments. + +--- + +## License + +**Main Project:** + +This project is based on multiple open source projects: +- [linuxserver/webtop](https://github.com/linuxserver/docker-webtop) - GPL-3.0 +- [selkies-project/selkies](https://github.com/selkies-project/selkies) - MPL-2.0 +- [VirtualGL](https://github.com/VirtualGL/virtualgl) - LGPL + +See each project's license for details. + +--- + +## Related Projects + +- [tatsuyai713/devcontainer-egl-desktop](https://github.com/tatsuyai713/devcontainer-egl-desktop) - EGL-based version (3 display modes) +- [linuxserver/docker-webtop](https://github.com/linuxserver/docker-webtop) - Original project +- [selkies-project/selkies](https://github.com/selkies-project/selkies) - WebRTC streaming + +--- + +## Credits + +### Original Projects + +- **Selkies Project:** [github.com/selkies-project](https://github.com/selkies-project) +- **LinuxServer.io:** [github.com/linuxserver](https://github.com/linuxserver) + +### This Project + +- **Enhancements:** Two-stage build system, non-root execution, UID/GID matching, secure password management, management scripts, version pinning, multi-GPU support +- **Maintainer:** [@tatsuyai713](https://github.com/tatsuyai713) diff --git a/build-user-image.sh b/build-user-image.sh new file mode 100755 index 000000000..efb031dfc --- /dev/null +++ b/build-user-image.sh @@ -0,0 +1,188 @@ +#!/usr/bin/env bash +set -euo pipefail + +SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)" +FILES_DIR="${SCRIPT_DIR}/files" +DOCKERFILE_USER="${FILES_DIR}/linuxserver-kde.user.dockerfile" + +HOST_ARCH=$(uname -m) +VERSION=${VERSION:-1.0.0} +UBUNTU_VERSION=${UBUNTU_VERSION:-24.04} +USER_NAME=$(whoami) +USER_UID=$(id -u) +USER_GID=$(id -g) +VIDEO_GID=$(getent group video 2>/dev/null | cut -d: -f3 || true) +RENDER_GID=$(getent group render 2>/dev/null | cut -d: -f3 || true) +BASE_IMAGE=${BASE_IMAGE:-} +IMAGE_NAME_BASE=${IMAGE_NAME:-ghcr.io/tatsuyai713/webtop-kde} +OUTPUT_IMAGE_BASE="" +TARGET_ARCH=${ARCH_OVERRIDE:-} +PLATFORM_OVERRIDE=${PLATFORM_OVERRIDE:-} +USER_PASSWORD=${USER_PASSWORD:-} +USER_LANGUAGE=${USER_LANGUAGE:-en} +HOST_HOSTNAME_DEFAULT="Docker-$(hostname)" +PLATFORM_ARCH_HINT="" +LANG_ARG="en_US.UTF-8" +LANGUAGE_ARG="en_US:en" +NO_CACHE_FLAG="" +DOCKER_CMD=(docker) + +usage() { + cat <<EOF +Usage: $0 [-b base_image] [-i base_image_name] [-a arch] [-p platform] [-l language] [-v version] [-u ubuntu_version] + -b, --base Base image tag (required; expected: <name>-base-<arch>-u<ubuntu_ver>:<version>) + -i, --image Base image name (default: ${IMAGE_NAME_BASE}) + -u, --ubuntu Ubuntu version (22.04 or 24.04). Default: ${UBUNTU_VERSION} + -a, --arch Arch hint (amd64/arm64) to pick base tag + -p, --platform Platform override for buildx (e.g. linux/arm64) + -l, --language Language pack to install (en or ja). Default: ${USER_LANGUAGE} + -v, --version Version tag to use (default: ${VERSION}) + -n, --no-cache Build without cache (passes --no-cache to buildx) + (env) USER_PASSWORD Password to set for the user (will prompt if empty) +EOF +} + +while [[ $# -gt 0 ]]; do + case "$1" in + -b|--base) BASE_IMAGE=$2; shift 2 ;; + -i|--image) IMAGE_NAME_BASE=$2; shift 2 ;; + -u|--ubuntu) UBUNTU_VERSION=$2; shift 2 ;; + -a|--arch) TARGET_ARCH=$2; shift 2 ;; + -p|--platform) PLATFORM_OVERRIDE=$2; shift 2 ;; + -l|--language) USER_LANGUAGE=$2; shift 2 ;; + -v|--version) VERSION=$2; shift 2 ;; + -n|--no-cache) NO_CACHE_FLAG="--no-cache"; shift ;; + -h|--help) usage; exit 0 ;; + *) echo "Unknown option: $1" >&2; usage; exit 1 ;; + esac +done + +if command -v docker >/dev/null 2>&1; then + CURRENT_CONTEXT="$(docker context show 2>/dev/null || true)" + if [[ -n "${CURRENT_CONTEXT}" && "${CURRENT_CONTEXT}" != "default" ]]; then + if docker context inspect default >/dev/null 2>&1; then + DOCKER_CMD=(docker --context=default) + fi + fi +fi + +if [[ -n "${PLATFORM_OVERRIDE}" ]]; then + case "${PLATFORM_OVERRIDE}" in + linux/amd64) PLATFORM_ARCH_HINT=amd64 ;; + linux/arm64) PLATFORM_ARCH_HINT=arm64 ;; + *) PLATFORM_ARCH_HINT="" ;; + esac +fi + +if [[ -z "${OUTPUT_IMAGE_BASE}" ]]; then + OUTPUT_IMAGE_BASE="${IMAGE_NAME_BASE##*/}" +fi + +if [[ -z "${TARGET_ARCH}" ]]; then + if [[ -n "${PLATFORM_ARCH_HINT}" ]]; then + TARGET_ARCH="${PLATFORM_ARCH_HINT}" + else + case "${HOST_ARCH}" in + x86_64|amd64) TARGET_ARCH=amd64 ;; + aarch64|arm64) TARGET_ARCH=arm64 ;; + *) echo "Unsupported host arch: ${HOST_ARCH}. Please pass -a." >&2; exit 1 ;; + esac + fi +fi + +PLATFORM="linux/${TARGET_ARCH}" +if [[ -n "${PLATFORM_OVERRIDE}" ]]; then + PLATFORM="${PLATFORM_OVERRIDE}" +fi + +if [[ -z "${BASE_IMAGE}" ]]; then + # macOS bash (3.x) lacks mapfile; use a portable read loop + BASE_CANDIDATES=() + while IFS= read -r line; do + BASE_CANDIDATES+=("$line") + done < <("${DOCKER_CMD[@]}" images --format '{{.Repository}}:{{.Tag}}' | grep "^${IMAGE_NAME_BASE}-base-${TARGET_ARCH}-u${UBUNTU_VERSION}:" || true) + while IFS= read -r line; do + BASE_CANDIDATES+=("$line") + done < <("${DOCKER_CMD[@]}" images --format '{{.Repository}}:{{.Tag}}' | grep "/${OUTPUT_IMAGE_BASE}-base-${TARGET_ARCH}-u${UBUNTU_VERSION}:" || true) + + if [[ ${#BASE_CANDIDATES[@]} -eq 0 ]]; then + DEFAULT_BASE_IMAGE="${IMAGE_NAME_BASE}-base-${TARGET_ARCH}-u${UBUNTU_VERSION}:${VERSION}" + echo "No local base image found. Attempting to pull ${DEFAULT_BASE_IMAGE}..." >&2 + if "${DOCKER_CMD[@]}" pull --platform "${PLATFORM}" "${DEFAULT_BASE_IMAGE}"; then + BASE_IMAGE="${DEFAULT_BASE_IMAGE}" + else + echo "BASE_IMAGE not provided and no local base found matching ${IMAGE_NAME_BASE}-base-${TARGET_ARCH}-u${UBUNTU_VERSION}:<tag>. Pass -b/--base." >&2 + exit 1 + fi + else + for candidate in "${BASE_CANDIDATES[@]}"; do + if [[ "${candidate}" != *":latest" ]]; then + BASE_IMAGE="${candidate}" + break + fi + done + if [[ -z "${BASE_IMAGE}" ]]; then + BASE_IMAGE="${BASE_CANDIDATES[0]}" + fi + echo "Using detected base image: ${BASE_IMAGE}" + fi +fi + +if [[ "${BASE_IMAGE}" == *":latest" ]]; then + echo "Warning: BASE_IMAGE uses ':latest' (${BASE_IMAGE}); consider pinning a version." >&2 +fi + +if [[ -z "${USER_PASSWORD}" ]]; then + read -s -p "Enter password for user ${USER_NAME}: " USER_PASSWORD; echo + read -s -p "Confirm password: " USER_PASSWORD_CONFIRM; echo + if [[ "${USER_PASSWORD}" != "${USER_PASSWORD_CONFIRM}" ]]; then + echo "Password mismatch." >&2 + exit 1 + fi +fi + +echo "Building user image from ${BASE_IMAGE}" +echo "User: ${USER_NAME} (${USER_UID}:${USER_GID})" +if [[ -n "${VIDEO_GID}" ]] || [[ -n "${RENDER_GID}" ]]; then + echo "Video/render GIDs: ${VIDEO_GID:-N/A}/${RENDER_GID:-N/A}" +fi +echo "Target arch: ${TARGET_ARCH}, platform: ${PLATFORM}" +echo "Ubuntu version: ${UBUNTU_VERSION}" +echo "Language: ${USER_LANGUAGE}" +echo "Version tag: ${VERSION}" + +# Check if base image exists using docker images (more reliable than inspect) +if ! "${DOCKER_CMD[@]}" images --format '{{.Repository}}:{{.Tag}}' | grep -q "^${BASE_IMAGE}$"; then + echo "Base image ${BASE_IMAGE} not found locally. Build it first (e.g. ./files/build-base-image.sh -a ${TARGET_ARCH} --ubuntu ${UBUNTU_VERSION} -v ${VERSION})." >&2 + exit 1 +fi + +if [[ "${USER_LANGUAGE}" == "ja" ]]; then + LANG_ARG="ja_JP.UTF-8" + LANGUAGE_ARG="ja_JP:ja" +fi + +if [[ ! -f "${DOCKERFILE_USER}" ]]; then + echo "User Dockerfile not found: ${DOCKERFILE_USER}" >&2 + exit 1 +fi + +"${DOCKER_CMD[@]}" buildx build \ + --platform "${PLATFORM}" \ + ${NO_CACHE_FLAG} \ + -f "${DOCKERFILE_USER}" \ + --build-arg BASE_IMAGE="${BASE_IMAGE}" \ + --build-arg USER_NAME="${USER_NAME}" \ + --build-arg USER_UID="${USER_UID}" \ + --build-arg USER_GID="${USER_GID}" \ + --build-arg VIDEO_GID="${VIDEO_GID}" \ + --build-arg RENDER_GID="${RENDER_GID}" \ + --build-arg USER_PASSWORD="${USER_PASSWORD}" \ + --build-arg USER_LANGUAGE="${USER_LANGUAGE}" \ + --build-arg USER_LANG_ENV="${LANG_ARG}" \ + --build-arg USER_LANGUAGE_ENV="${LANGUAGE_ARG}" \ + --build-arg HOST_HOSTNAME="${HOST_HOSTNAME_DEFAULT}" \ + --progress=plain \ + --load \ + -t "${OUTPUT_IMAGE_BASE}-${USER_NAME}-${TARGET_ARCH}-u${UBUNTU_VERSION}:${VERSION}" \ + "${FILES_DIR}" diff --git a/commit-container.sh b/commit-container.sh new file mode 100755 index 000000000..6eb7433a9 --- /dev/null +++ b/commit-container.sh @@ -0,0 +1,120 @@ +#!/usr/bin/env bash +set -euo pipefail + +HOST_USER=${USER:-$(whoami)} +NAME=${CONTAINER_NAME:-linuxserver-kde-${HOST_USER}} +TARGET_IMAGE=${TARGET_IMAGE:-webtop-kde} +TARGET_ARCH=${TARGET_ARCH:-} +TARGET_VERSION=${TARGET_VERSION:-1.0.0} +UBUNTU_VERSION=${UBUNTU_VERSION:-} +RESTART=${RESTART:-false} + +usage() { + cat <<EOF +Usage: $0 [-n container_name] [-t target_image_base] [-v version] [-u ubuntu_version] [-r] + -n container name to commit (default: ${NAME}) + -t target image base (no arch/tag), e.g. webtop-kde (default: ${TARGET_IMAGE}) + -v version tag to use (default: ${TARGET_VERSION}) + -u, --ubuntu Ubuntu version (22.04 or 24.04). Auto-detected if not specified + -r restart container after commit + +Environment variables: + RESTART set to 'true' to restart container after commit + +Examples: + $0 # Commit container (auto-detect Ubuntu version) + $0 -r # Commit and restart container + $0 -v 2.0.0 # Commit with specific version tag + $0 -u 22.04 # Commit with specific Ubuntu version +EOF +} + +while getopts ":n:t:v:u:rh-:" opt; do + case "$opt" in + n) NAME=$OPTARG ;; + t) TARGET_IMAGE=$OPTARG ;; + v) TARGET_VERSION=$OPTARG ;; + u) UBUNTU_VERSION=$OPTARG ;; + r) RESTART=true ;; + h) usage; exit 0 ;; + -) + case "${OPTARG}" in + ubuntu) UBUNTU_VERSION="${!OPTIND}"; OPTIND=$((OPTIND + 1)) ;; + *) echo "Unknown option: --${OPTARG}" >&2; usage; exit 1 ;; + esac + ;; + *) usage; exit 1 ;; + esac +done + +if ! docker ps -a --format '{{.Names}}' | grep -qx "$NAME"; then + echo "Container ${NAME} not found." >&2 + exit 1 +fi + +ARCH_FROM_LABEL=$(docker inspect --format '{{ index .Config.Labels "org.opencontainers.image.architecture" }}' "$NAME" 2>/dev/null || true) +IMAGE_FROM_CONFIG=$(docker inspect --format '{{ .Config.Image }}' "$NAME" 2>/dev/null || true) +IMAGE_ID_FROM_CONTAINER=$(docker inspect --format '{{ .Image }}' "$NAME" 2>/dev/null || true) + +detect_arch_from_image_name() { + # Expect patterns like webtop-kde-<user>-amd64:1.0.0 or webtop-kde-<user>-arm64:tag + local img="$1" + local repo="${img%%:*}" + local suffix="${repo##*-}" + case "${suffix}" in + amd64|x86_64) echo "amd64" ;; + arm64|aarch64) echo "arm64" ;; + *) echo "" ;; + esac +} + +if [[ -z "${TARGET_ARCH}" ]]; then + if [[ -n "${IMAGE_ID_FROM_CONTAINER}" ]]; then + TARGET_ARCH=$(docker image inspect --format '{{ .Architecture }}' "${IMAGE_ID_FROM_CONTAINER}" 2>/dev/null || true) + fi + if [[ -z "${TARGET_ARCH}" && -n "${ARCH_FROM_LABEL}" ]]; then + TARGET_ARCH="${ARCH_FROM_LABEL}" + elif [[ -z "${TARGET_ARCH}" && -n "${IMAGE_FROM_CONFIG}" ]]; then + TARGET_ARCH="$(detect_arch_from_image_name "${IMAGE_FROM_CONFIG}")" + elif [[ -z "${TARGET_ARCH}" ]]; then + TARGET_ARCH=$(docker inspect "$NAME" 2>/dev/null \ + | python3 -c 'import sys,json; data=json.load(sys.stdin); print(data[0].get("Architecture",""))' || true) + fi +fi +if [[ -z "${TARGET_ARCH}" ]]; then + HOST_ARCH=$(uname -m) + case "${HOST_ARCH}" in + x86_64|amd64) TARGET_ARCH=amd64 ;; + aarch64|arm64) TARGET_ARCH=arm64 ;; + *) echo "Unable to detect container architecture; set TARGET_ARCH env." >&2; exit 1 ;; + esac +fi + +# Auto-detect Ubuntu version from image name if not specified +if [[ -z "${UBUNTU_VERSION}" && -n "${IMAGE_FROM_CONFIG}" ]]; then + # Try to extract Ubuntu version from image name pattern: ...-u22.04:... or ...-u24.04:... + if [[ "${IMAGE_FROM_CONFIG}" =~ -u([0-9]+\.[0-9]+) ]]; then + UBUNTU_VERSION="${BASH_REMATCH[1]}" + echo "Auto-detected Ubuntu version: ${UBUNTU_VERSION}" + fi +fi + +# Default to 24.04 if still not set +if [[ -z "${UBUNTU_VERSION}" ]]; then + UBUNTU_VERSION="24.04" + echo "Ubuntu version not detected, defaulting to ${UBUNTU_VERSION}" +fi + +# Final naming: <base>-<user>-<arch>-u<ubuntu_ver>:<version> +FINAL_IMAGE="${TARGET_IMAGE}-${HOST_USER}-${TARGET_ARCH}-u${UBUNTU_VERSION}:${TARGET_VERSION}" + +echo "Committing container ${NAME} -> ${FINAL_IMAGE}" +docker commit "$NAME" "$FINAL_IMAGE" + +if [[ "${RESTART}" == "true" ]]; then + echo "Restarting container ${NAME}..." + docker restart "$NAME" >/dev/null + echo "Container ${NAME} restarted." +fi + +echo "Done." diff --git a/compose-env.sh b/compose-env.sh new file mode 100755 index 000000000..4c5abd4ad --- /dev/null +++ b/compose-env.sh @@ -0,0 +1,403 @@ +#!/bin/bash +# Generate environment variables for docker-compose (same settings as start-container.sh) +# Usage: source <(./compose-env.sh --encoder nvidia --gpu all) +# ./compose-env.sh --env-file .env --encoder intel + +set -e + +show_usage() { + cat <<'EOF' +Usage: compose-env.sh [options] + +Options (same as start-container.sh): + -e, --encoder <type> Encoder: software, nvidia, nvidia-wsl, intel, amd (required) + -g, --gpu <value> Docker --gpus value (optional): all or device=0,1 + --all Shortcut for --gpu all + --num <list> Shortcut for --gpu device=<list> + --dri-node <path> DRI render node for VA-API (e.g. /dev/dri/renderD129) + -u, --ubuntu <ver> Ubuntu version: 22.04 or 24.04 (default: 24.04) + -r, --resolution <res> Resolution in WIDTHxHEIGHT format (default: 1920x1080) + -d, --dpi <dpi> DPI setting (default: 96) + -t, --timezone <tz> Timezone (default: UTC, example: Asia/Tokyo) + -s, --ssl <dir> SSL directory path for HTTPS (optional) + -a, --arch <arch> Target architecture: amd64 or arm64 (default: host) + --env-file <path> Write KEY=VALUE pairs to the specified file instead of exports + -h, --help Show this help + +Environment overrides: + Resolution: RESOLUTION + DPI: DPI + Timezone: TIMEZONE + Ports: PORT_SSL_OVERRIDE, PORT_HTTP_OVERRIDE + SSL: SSL_DIR + Container: CONTAINER_NAME, CONTAINER_HOSTNAME + Image: IMAGE_BASE, IMAGE_TAG +EOF +} + +PROJECT_ROOT="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" + +# Defaults (matching start-container.sh) +ENCODER="${ENCODER:-}" +GPU_VENDOR="${GPU_VENDOR:-}" +GPU_ALL="${GPU_ALL:-false}" +GPU_NUMS="${GPU_NUMS:-}" +DOCKER_GPUS="${DOCKER_GPUS:-}" +DRI_NODE="${DRI_NODE:-}" +UBUNTU_VERSION="${UBUNTU_VERSION:-24.04}" +RESOLUTION="${RESOLUTION:-1920x1080}" +DPI="${DPI:-96}" +TIMEZONE="${TIMEZONE:-UTC}" +SSL_DIR="${SSL_DIR:-}" +OUTPUT_MODE="export" +ENV_FILE="" +ARCH_OVERRIDE="" + +# Option parsing +while [[ $# -gt 0 ]]; do + case $1 in + -e|--encoder) + if [ -z "${2:-}" ]; then + echo "Error: --encoder requires an argument" >&2 + exit 1 + fi + ENCODER="${2}" + shift 2 + ;; + -g|--gpu) + if [ -z "${2:-}" ]; then + echo "Error: --gpu requires an argument" >&2 + exit 1 + fi + DOCKER_GPUS="${2}" + shift 2 + ;; + --all) + GPU_ALL="true" + shift + ;; + --num) + if [ -z "${2:-}" ]; then + echo "Error: --num requires a value (e.g. --num 0 or --num 0,1)" >&2 + exit 1 + fi + GPU_NUMS="${2}" + shift 2 + ;; + --dri-node) + if [ -z "${2:-}" ]; then + echo "Error: --dri-node requires a path (e.g. /dev/dri/renderD129)" >&2 + exit 1 + fi + DRI_NODE="${2}" + shift 2 + ;; + -u|--ubuntu) + if [ -z "${2:-}" ]; then + echo "Error: --ubuntu requires a version (22.04 or 24.04)" >&2 + exit 1 + fi + UBUNTU_VERSION="${2}" + shift 2 + ;; + -r|--resolution) + if [ -z "${2:-}" ]; then + echo "Error: --resolution requires a value (e.g. 1920x1080)" >&2 + exit 1 + fi + RESOLUTION="${2}" + shift 2 + ;; + -d|--dpi) + if [ -z "${2:-}" ]; then + echo "Error: --dpi requires a value" >&2 + exit 1 + fi + DPI="${2}" + shift 2 + ;; + -t|--timezone) + if [ -z "${2:-}" ]; then + echo "Error: --timezone requires a value (e.g. Asia/Tokyo)" >&2 + exit 1 + fi + TIMEZONE="${2}" + shift 2 + ;; + -s|--ssl) + if [ -z "${2:-}" ]; then + echo "Error: --ssl requires a directory path" >&2 + exit 1 + fi + SSL_DIR="${2}" + shift 2 + ;; + -a|--arch) + if [ -z "${2:-}" ]; then + echo "Error: --arch requires a value (amd64 or arm64)" >&2 + exit 1 + fi + ARCH_OVERRIDE="${2}" + shift 2 + ;; + --env-file) + if [ -z "${2:-}" ]; then + echo "Error: --env-file requires a path" >&2 + exit 1 + fi + ENV_FILE="${2}" + OUTPUT_MODE="envfile" + shift 2 + ;; + -h|--help) + show_usage + exit 0 + ;; + *) + echo "Error: Unknown option: $1" >&2 + show_usage + exit 1 + ;; + esac +done + +# Validation (match start-container.sh behavior) +if [[ ! $RESOLUTION =~ ^[0-9]+x[0-9]+$ ]]; then + echo "Error: Resolution must be WIDTHxHEIGHT (e.g. 1920x1080)" >&2 + exit 1 +fi + +if [ -z "${ENCODER}" ]; then + echo "Error: --encoder is required" >&2 + exit 1 +fi + +ENCODER=$(echo "${ENCODER}" | tr '[:upper:]' '[:lower:]') +case "${ENCODER}" in + software|none|cpu) + ENCODER="software" + ;; + nvidia|nvidia-wsl|intel|amd) + ;; + *) + echo "Error: Unknown encoder: ${ENCODER}" >&2 + exit 1 + ;; +esac + +GPU_VENDOR="${ENCODER}" + +if [ -z "${DOCKER_GPUS}" ]; then + if [ "${GPU_ALL}" = "true" ]; then + DOCKER_GPUS="all" + elif [ -n "${GPU_NUMS}" ]; then + DOCKER_GPUS="device=${GPU_NUMS}" + fi +fi + +if [ -n "${DOCKER_GPUS}" ]; then + if [[ "${DOCKER_GPUS}" != "all" && ! "${DOCKER_GPUS}" =~ ^device=[0-9,]+$ ]]; then + echo "Error: --gpu value must be 'all' or 'device=0,1'." >&2 + exit 1 + fi +fi + +# Base configuration +HOST_USER=${USER:-$(whoami)} +HOST_UID=$(id -u "${HOST_USER}") +HOST_GID=$(id -g "${HOST_USER}") +CONTAINER_NAME="${CONTAINER_NAME:-linuxserver-kde-${HOST_USER}}" +IMAGE_BASE="${IMAGE_BASE:-webtop-kde}" +IMAGE_TAG="${IMAGE_TAG:-}" +IMAGE_VERSION="${IMAGE_VERSION:-1.0.0}" +SHM_SIZE="${SHM_SIZE:-4g}" + +# Determine architecture +HOST_ARCH_RAW=$(uname -m) +case "${HOST_ARCH_RAW}" in + x86_64|amd64) IMAGE_ARCH="amd64" ;; + aarch64|arm64) IMAGE_ARCH="arm64" ;; + *) IMAGE_ARCH="${HOST_ARCH_RAW}" ;; +esac +if [ -n "${ARCH_OVERRIDE}" ]; then + case "${ARCH_OVERRIDE}" in + amd64|x86_64) IMAGE_ARCH="amd64" ;; + arm64|aarch64) IMAGE_ARCH="arm64" ;; + *) + echo "Error: Unsupported arch override: ${ARCH_OVERRIDE}" >&2 + exit 1 + ;; + esac +fi + +if [ -z "${IMAGE_TAG}" ]; then + IMAGE_TAG="${IMAGE_VERSION}" +fi + +USER_IMAGE="${IMAGE_BASE}-${HOST_USER}-${IMAGE_ARCH}-u${UBUNTU_VERSION}:${IMAGE_TAG}" +HOSTNAME_RAW="$(hostname)" +if [[ "$(uname -s)" == "Darwin" ]]; then + HOSTNAME_RAW="$(scutil --get HostName 2>/dev/null || true)" + if [[ -z "${HOSTNAME_RAW}" ]]; then + HOSTNAME_RAW="$(scutil --get LocalHostName 2>/dev/null || true)" + fi + if [[ -z "${HOSTNAME_RAW}" ]]; then + HOSTNAME_RAW="$(scutil --get ComputerName 2>/dev/null || hostname)" + fi +fi +HOSTNAME_RAW="$(printf '%s' "${HOSTNAME_RAW}" | tr ' ' '-' | sed 's/[^A-Za-z0-9._-]/-/g; s/--*/-/g; s/^-//; s/-$//')" +HOSTNAME_RAW="${HOSTNAME_RAW:-Host}" +CONTAINER_HOSTNAME="${CONTAINER_HOSTNAME:-Docker-${HOSTNAME_RAW}}" + +# Extract width and height from resolution +WIDTH=${RESOLUTION%x*} +HEIGHT=${RESOLUTION#*x} +SCALE_FACTOR=$(awk "BEGIN { printf \"%.2f\", ${DPI} / 96 }") +FORCE_DEVICE_SCALE_FACTOR="${SCALE_FACTOR}" +ORIG_CHROMIUM_FLAGS="${CHROMIUM_FLAGS:-}" +if [ -n "${ORIG_CHROMIUM_FLAGS}" ]; then + CHROMIUM_FLAGS="--force-device-scale-factor=${SCALE_FACTOR} ${ORIG_CHROMIUM_FLAGS}" +else + CHROMIUM_FLAGS="--force-device-scale-factor=${SCALE_FACTOR}" +fi + +# Ports (UID-based, but allow overrides) +HOST_PORT_SSL="${PORT_SSL_OVERRIDE:-$((HOST_UID + 30000))}" +HOST_PORT_HTTP="${PORT_HTTP_OVERRIDE:-$((HOST_UID + 40000))}" + +# Get host IP +HOST_IP="${HOST_IP:-$(hostname -I 2>/dev/null | awk '{print $1}' || ip route get 1 2>/dev/null | awk '{print $7; exit}' || echo "127.0.0.1")}" +if [ -z "${HOST_IP}" ]; then + if [ "$(uname -s)" = "Darwin" ]; then + HOST_IP="$(ipconfig getifaddr en0 2>/dev/null || ipconfig getifaddr en1 2>/dev/null || echo "127.0.0.1")" + else + HOST_IP="127.0.0.1" + fi +fi + +# Home mount path +HOST_HOME_MOUNT="/home/${HOST_USER}/host_home" +HOST_MNT_MOUNT="/home/${HOST_USER}/host_mnt" + +# GPU configuration +# Note: pixelflux handles hardware encoding automatically based on GPU_VENDOR +ENABLE_NVIDIA="false" +LIBVA_DRIVER_NAME="" +NVIDIA_VISIBLE_DEVICES="" +GPU_DEVICES="" +WSL_ENVIRONMENT="false" +DISABLE_ZINK="false" +XDG_RUNTIME_DIR="" +LD_LIBRARY_PATH="" + +case "${GPU_VENDOR}" in + nvidia) + ENABLE_NVIDIA="true" + DISABLE_ZINK="true" + if [ "${DOCKER_GPUS}" = "all" ]; then + NVIDIA_VISIBLE_DEVICES="all" + elif [[ "${DOCKER_GPUS}" =~ ^device= ]]; then + NVIDIA_VISIBLE_DEVICES="${DOCKER_GPUS#device=}" + fi + if [ -d "/dev/dri" ]; then + GPU_DEVICES="/dev/dri:/dev/dri:rwm" + fi + ;; + nvidia-wsl) + ENABLE_NVIDIA="true" + WSL_ENVIRONMENT="true" + DISABLE_ZINK="true" + XDG_RUNTIME_DIR="/mnt/wslg/runtime-dir" + LD_LIBRARY_PATH="/usr/lib/wsl/lib" + if [ "${DOCKER_GPUS}" = "all" ]; then + NVIDIA_VISIBLE_DEVICES="all" + elif [[ "${DOCKER_GPUS}" =~ ^device= ]]; then + NVIDIA_VISIBLE_DEVICES="${DOCKER_GPUS#device=}" + fi + ;; + intel) + LIBVA_DRIVER_NAME="${LIBVA_DRIVER_NAME:-iHD}" + if [ -d "/dev/dri" ]; then + GPU_DEVICES="/dev/dri:/dev/dri:rwm" + else + echo "Warning: /dev/dri not found, Intel VA-API not available." >&2 + fi + # Pass DRI_NODE if specified + if [ -n "${DRI_NODE}" ]; then + echo "Using specified DRI node: ${DRI_NODE}" >&2 + fi + ;; + amd) + LIBVA_DRIVER_NAME="${LIBVA_DRIVER_NAME:-radeonsi}" + if [ -d "/dev/dri" ]; then + GPU_DEVICES="/dev/dri:/dev/dri:rwm" + else + echo "Warning: /dev/dri not found, AMD VA-API not available." >&2 + fi + if [ -e "/dev/kfd" ]; then + GPU_DEVICES="${GPU_DEVICES:+${GPU_DEVICES},}/dev/kfd:/dev/kfd:rwm" + fi + # Pass DRI_NODE if specified + if [ -n "${DRI_NODE}" ]; then + echo "Using specified DRI node: ${DRI_NODE}" >&2 + fi + ;; + software|"") + ENABLE_NVIDIA="false" + ;; +esac + +USER_UID="${HOST_UID}" +USER_GID="${HOST_GID}" +USER_NAME="${HOST_USER}" + +# SSL configuration +SSL_CERT_PATH="" +SSL_KEY_PATH="" +if [ -n "${SSL_DIR}" ] && [ -d "${SSL_DIR}" ]; then + if [ -f "${SSL_DIR}/cert.pem" ] && [ -f "${SSL_DIR}/cert.key" ]; then + SSL_CERT_PATH="${SSL_DIR}/cert.pem" + SSL_KEY_PATH="${SSL_DIR}/cert.key" + fi +fi + +# Environment variables for docker-compose +# Note: VIDEO_ENCODER, SELKIES_ENCODER, TURN-related variables removed (pixelflux handles encoding) +ENV_VARS=( + HOST_USER HOST_UID HOST_GID CONTAINER_NAME USER_IMAGE CONTAINER_HOSTNAME + IMAGE_BASE IMAGE_TAG IMAGE_VERSION IMAGE_ARCH UBUNTU_VERSION + HOST_PORT_SSL HOST_PORT_HTTP HOST_IP + WIDTH HEIGHT DPI SCALE_FACTOR FORCE_DEVICE_SCALE_FACTOR CHROMIUM_FLAGS SHM_SIZE RESOLUTION TIMEZONE + ENCODER GPU_VENDOR GPU_ALL GPU_NUMS DOCKER_GPUS DRI_NODE + ENABLE_NVIDIA LIBVA_DRIVER_NAME NVIDIA_VISIBLE_DEVICES GPU_DEVICES + WSL_ENVIRONMENT DISABLE_ZINK XDG_RUNTIME_DIR LD_LIBRARY_PATH + SSL_DIR SSL_CERT_PATH SSL_KEY_PATH + HOST_HOME_MOUNT HOST_MNT_MOUNT + USER_UID USER_GID USER_NAME +) + +if [ -n "${DISABLE_ZINK}" ]; then + ENV_VARS+=(DISABLE_ZINK) +fi +if [ -n "${WSL_ENVIRONMENT}" ]; then + ENV_VARS+=(WSL_ENVIRONMENT) +fi + +emit_exports() { + for var in "${ENV_VARS[@]}"; do + printf 'export %s="%s"\n' "${var}" "${!var}" + done +} + +emit_envfile() { + for var in "${ENV_VARS[@]}"; do + printf '%s=%s\n' "${var}" "${!var}" + done +} + +if [ -n "${ENV_FILE}" ]; then + mkdir -p "$(dirname "${ENV_FILE}")" + emit_envfile > "${ENV_FILE}" +else + emit_exports +fi diff --git a/create-devcontainer-config.sh b/create-devcontainer-config.sh new file mode 100755 index 000000000..43c95b876 --- /dev/null +++ b/create-devcontainer-config.sh @@ -0,0 +1,621 @@ +#!/bin/bash +# Create VS Code .devcontainer configuration +# This script creates a devcontainer.json that works with the webtop KDE desktop container + +set -e + +echo "========================================" +echo "VS Code Dev Container Configuration" +echo "========================================" +echo "This script will create a .devcontainer configuration" +echo "for using this container with VS Code." +echo "" + +# Check if .devcontainer already exists +if [ -d ".devcontainer" ]; then + echo "⚠️ .devcontainer directory already exists." + read -p "Overwrite existing configuration? (y/N): " -n 1 -r + echo + if [[ ! $REPLY =~ ^[Yy]$ ]]; then + echo "Cancelled." + exit 0 + fi + rm -rf .devcontainer +fi + +# Default values +ENCODER="software" +GPU_VENDOR="" +GPU_ALL="false" +GPU_NUMS="" +DOCKER_GPUS="" +DRI_NODE="" +UBUNTU_VERSION="24.04" +RESOLUTION="1920x1080" +DPI="96" +SSL_DIR="" +HOST_ARCH_RAW=$(uname -m) +case "${HOST_ARCH_RAW}" in + x86_64|amd64) DETECTED_ARCH="amd64" ;; + aarch64|arm64) DETECTED_ARCH="arm64" ;; + *) DETECTED_ARCH="${HOST_ARCH_RAW}" ;; +esac +TARGET_ARCH="${DETECTED_ARCH}" + +# Interactive configuration +echo "========================================" +echo "Configuration Questions" +echo "========================================" +echo "" + +# Encoder configuration +echo "1. Encoder Configuration" +echo "------------------------" +echo "Select encoder type:" +echo " 1) Software (CPU)" +echo " 2) NVIDIA (NVENC)" +echo " 3) NVIDIA WSL2 (NVENC)" +echo " 4) Intel (VA-API)" +echo " 5) AMD (VA-API)" +read -p "Select [1-5] (default: 1): " encoder_choice + +case "${encoder_choice}" in + 2) + ENCODER="nvidia" + echo "NVIDIA encoder selected." + ;; + 3) + ENCODER="nvidia-wsl" + echo "NVIDIA WSL2 encoder selected." + ;; + 4) + ENCODER="intel" + echo "Intel encoder selected." + echo "" + echo "DRI Node Configuration (for multi-GPU systems)" + echo "----------------------------------------------" + echo "If you have multiple GPUs (e.g., NVIDIA + Intel), you may need to specify" + echo "which render node to use for VA-API encoding." + echo "Run 'ls -la /dev/dri/renderD*' to see available nodes." + read -p "Specify DRI node? (e.g., /dev/dri/renderD129, or press Enter to skip): " DRI_NODE_INPUT + DRI_NODE="${DRI_NODE_INPUT}" + ;; + 5) + ENCODER="amd" + echo "AMD encoder selected." + echo "" + echo "DRI Node Configuration (for multi-GPU systems)" + echo "----------------------------------------------" + echo "If you have multiple GPUs (e.g., NVIDIA + AMD), you may need to specify" + echo "which render node to use for VA-API encoding." + echo "Run 'ls -la /dev/dri/renderD*' to see available nodes." + read -p "Specify DRI node? (e.g., /dev/dri/renderD129, or press Enter to skip): " DRI_NODE_INPUT + DRI_NODE="${DRI_NODE_INPUT}" + ;; + *) + ENCODER="none" + echo "Software encoder selected." + ;; +esac + +GPU_VENDOR="${ENCODER}" + +# Docker GPU selection (optional, mostly for NVIDIA) +if [ "${ENCODER}" = "nvidia" ] || [ "${ENCODER}" = "nvidia-wsl" ]; then + echo "" + echo "Docker GPU Selection (Optional)" + echo "-------------------------------" + read -p "Enable Docker --gpus? (Y/n): " -n 1 -r + echo + if [[ ! $REPLY =~ ^[Nn]$ ]]; then + if [ "${ENCODER}" = "nvidia-wsl" ]; then + GPU_ALL="true" + GPU_NUMS="" + echo "WSL2 uses all GPUs (gpus=all)." + else + read -p "Use all NVIDIA GPUs? (Y/n): " -n 1 -r + echo + if [[ $REPLY =~ ^[Nn]$ ]]; then + read -p "Enter GPU device numbers (comma-separated, e.g., 0,1): " GPU_NUMS + GPU_ALL="false" + else + GPU_ALL="true" + GPU_NUMS="" + fi + fi + fi +fi +echo "" + +# Ubuntu version +echo "2. Ubuntu Version" +echo "----------------" +read -p "Ubuntu version (22.04 or 24.04, default: 24.04): " UBUNTU_VERSION +UBUNTU_VERSION="${UBUNTU_VERSION:-24.04}" +echo "" + +# Architecture +echo "3. Architecture" +echo "---------------" +read -p "Target architecture (amd64 or arm64, default: ${DETECTED_ARCH}): " TARGET_ARCH_INPUT +TARGET_ARCH_INPUT="${TARGET_ARCH_INPUT:-${DETECTED_ARCH}}" +case "${TARGET_ARCH_INPUT}" in + amd64|x86_64) TARGET_ARCH="amd64" ;; + arm64|aarch64) TARGET_ARCH="arm64" ;; + *) + echo "Unsupported architecture: ${TARGET_ARCH_INPUT}" >&2 + exit 1 + ;; +esac +echo "" + +# Display settings +echo "4. Display Settings" +echo "-------------------" +read -p "Display resolution (default: 1920x1080): " RESOLUTION +RESOLUTION="${RESOLUTION:-1920x1080}" +read -p "DPI (default: 96): " DPI +DPI="${DPI:-96}" +echo "" + +# Language/Timezone settings +echo "5. Language/Timezone Settings" +echo "-----------------------------" +echo "Select language (affects timezone):" +echo " ja) Japanese (Asia/Tokyo)" +echo " en) English (UTC)" +read -p "Select language [ja/en] (default: en): " lang_choice +case "${lang_choice}" in + ja|JA|jp|JP) + TIMEZONE="Asia/Tokyo" + echo "Japanese selected. Timezone: Asia/Tokyo" + ;; + *) + TIMEZONE="UTC" + echo "English selected. Timezone: UTC" + ;; +esac +echo "" + +# SSL directory (optional) +echo "6. SSL Configuration (Optional)" +echo "-------------------------------" +read -p "SSL directory path (leave empty to skip): " SSL_DIR +echo "" + +# Default SSL dir fallback (same as start-container.sh) +if [ -z "${SSL_DIR}" ]; then + DEFAULT_SSL_DIR="$(pwd)/ssl" + if [ -d "${DEFAULT_SSL_DIR}" ]; then + SSL_DIR="${DEFAULT_SSL_DIR}" + echo "Using SSL dir: ${SSL_DIR}" + fi +fi + +CURRENT_USER=$(whoami) +SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)" +COMPOSE_ENV_SCRIPT="${SCRIPT_DIR}/compose-env.sh" + +if [ ! -x "${COMPOSE_ENV_SCRIPT}" ]; then + echo "Error: ${COMPOSE_ENV_SCRIPT} not found. Run this script from the repository root." >&2 + exit 1 +fi + +# Create .devcontainer directory +mkdir -p .devcontainer + +# Build compose-env arguments +COMPOSE_ARGS=(--encoder "${ENCODER}" --ubuntu "${UBUNTU_VERSION}" --resolution "${RESOLUTION}" --dpi "${DPI}" --arch "${TARGET_ARCH}" --timezone "${TIMEZONE}") +if [ "${GPU_ALL}" = "true" ]; then + COMPOSE_ARGS+=(--all) +elif [ -n "${GPU_NUMS}" ]; then + COMPOSE_ARGS+=(--num "${GPU_NUMS}") +fi +if [ -n "${DRI_NODE}" ]; then + COMPOSE_ARGS+=(--dri-node "${DRI_NODE}") +fi +if [ -n "${SSL_DIR}" ]; then + COMPOSE_ARGS+=(--ssl "${SSL_DIR}") +fi + +# Generate environment variables +ENV_FILE=".devcontainer/.env" +"${COMPOSE_ENV_SCRIPT}" "${COMPOSE_ARGS[@]}" --env-file "${ENV_FILE}" + +# Load generated environment values +set -a +# shellcheck disable=SC1090 +source "${ENV_FILE}" +set +a + +DEVCONTAINER_CONTAINER_NAME="${CONTAINER_NAME}" +{ + echo "" + echo "# Dev Container specific" + echo "DEVCONTAINER_CONTAINER_NAME=${DEVCONTAINER_CONTAINER_NAME}" +} >> "${ENV_FILE}" +export DEVCONTAINER_CONTAINER_NAME + +WORKSPACE_FOLDER="/home/${CURRENT_USER}/host_home" + +GPU_DEVICES="" +case "${GPU_VENDOR}" in + intel) + if [ -d "/dev/dri" ]; then + GPU_DEVICES="/dev/dri:/dev/dri:rwm" + fi + ;; + amd) + if [ -d "/dev/dri" ]; then + GPU_DEVICES="/dev/dri:/dev/dri:rwm" + fi + if [ -e "/dev/kfd" ]; then + GPU_DEVICES="${GPU_DEVICES:+${GPU_DEVICES},}/dev/kfd:/dev/kfd:rwm" + fi + ;; + nvidia) + if [ -d "/dev/dri" ]; then + GPU_DEVICES="/dev/dri:/dev/dri:rwm" + fi + ;; + nvidia-wsl) + if [ -e "/dev/dxg" ]; then + GPU_DEVICES="/dev/dxg:/dev/dxg:rwm" + fi + ;; +esac + +# Build forward port list +FORWARD_PORTS=("${HOST_PORT_SSL}" "${HOST_PORT_HTTP}") + +FORWARD_PORTS_JSON="" +for PORT in "${FORWARD_PORTS[@]}"; do + if [ -n "${FORWARD_PORTS_JSON}" ]; then + FORWARD_PORTS_JSON="${FORWARD_PORTS_JSON}, +" + fi + FORWARD_PORTS_JSON="${FORWARD_PORTS_JSON} ${PORT}" +done + +PORT_ATTRIBUTES_JSON=" \"${HOST_PORT_SSL}\": { + \"label\": \"HTTPS Web UI\", + \"onAutoForward\": \"notify\" + }, + \"${HOST_PORT_HTTP}\": { + \"label\": \"HTTP Web UI\", + \"onAutoForward\": \"silent\" + }" + +# devcontainer.json +cat > .devcontainer/devcontainer.json << EOF +{ + "name": "KDE Desktop (encoder: ${ENCODER})", + "dockerComposeFile": [ + "docker-compose.base.yml", + "docker-compose.override.yml" + ], + "service": "webtop", + "workspaceFolder": "${WORKSPACE_FOLDER}", + "runServices": ["webtop"], + "overrideCommand": false, + "shutdownAction": "none", + "initializeCommand": "cd \${localWorkspaceFolder:-${PWD}} && if [ -f .devcontainer/.env ]; then CN=\$(sed -n 's/^CONTAINER_NAME=//p' .devcontainer/.env | head -n1); fi; if [ -n \"\$CN\" ]; then docker rm -f \"\$CN\" >/dev/null 2>&1 || true; fi", + "forwardPorts": [ +${FORWARD_PORTS_JSON} + ], + "portsAttributes": { +${PORT_ATTRIBUTES_JSON} + }, + "customizations": { + "vscode": { + "extensions": [ + "ms-vscode-remote.remote-containers", + "ms-vscode.cpptools-extension-pack", + "ms-vscode.cmake-tools", + "ms-vscode.makefile-tools", + "redhat.vscode-yaml", + "redhat.vscode-xml", + "ms-vscode.hexeditor", + "ms-python.python", + "ms-python.vscode-pylance", + "vscode-icons-team.vscode-icons", + "donjayamanne.git-extension-pack" + ], + "settings": { + "terminal.integrated.defaultProfile.linux": "bash" + } + } + }, + "remoteUser": "${CURRENT_USER}", + "containerUser": "root", + "updateRemoteUserUID": false, + "remoteEnv": { + "USER": "${CURRENT_USER}", + "HOME": "/home/${CURRENT_USER}" + }, +EOF + # Add GPU hostRequirements if applicable + if [ "${ENCODER}" = "nvidia" ] || [ "${ENCODER}" = "nvidia-wsl" ]; then + cat >> .devcontainer/devcontainer.json << 'EOF' + "hostRequirements": { + "gpu": "optional" + }, +EOF + fi + + cat >> .devcontainer/devcontainer.json << EOF + "postCreateCommand": "echo '===== Dev Container Ready =====' && echo 'Desktop access' && echo ' HTTPS: https://localhost:${HOST_PORT_SSL}' && echo ' HTTP : http://localhost:${HOST_PORT_HTTP}' && echo 'If HTTPS fails, confirm your SSL certs or use HTTP.' && echo '==============================='" +} +EOF + +# docker-compose base (match start-container.sh) +cat > .devcontainer/docker-compose.base.yml << EOF +services: + webtop: + image: \${USER_IMAGE} + container_name: \${CONTAINER_NAME} + hostname: \${CONTAINER_HOSTNAME} + shm_size: \${SHM_SIZE:-4g} + privileged: true + security_opt: + - seccomp:unconfined + environment: + - HOSTNAME=\${CONTAINER_HOSTNAME} + - HOST_HOSTNAME=\${CONTAINER_HOSTNAME} + - SHELL=/bin/bash + - DISPLAY=:1 + - DPI=\${DPI} + - SCALE_FACTOR=\${SCALE_FACTOR} + - FORCE_DEVICE_SCALE_FACTOR=\${FORCE_DEVICE_SCALE_FACTOR} + - CHROMIUM_FLAGS=\${CHROMIUM_FLAGS} + - DISPLAY_WIDTH=\${WIDTH} + - DISPLAY_HEIGHT=\${HEIGHT} + - CUSTOM_RESOLUTION=\${RESOLUTION} + - USER_UID=\${USER_UID} + - USER_GID=\${USER_GID} + - USER_NAME=\${USER_NAME} + - PUID=\${HOST_UID} + - PGID=\${HOST_GID} + - GPU_VENDOR=\${GPU_VENDOR} + - ENABLE_NVIDIA=\${ENABLE_NVIDIA} + - LIBVA_DRIVER_NAME=\${LIBVA_DRIVER_NAME} + - WSL_ENVIRONMENT=\${WSL_ENVIRONMENT} + - DISABLE_ZINK=\${DISABLE_ZINK} + - XDG_RUNTIME_DIR=\${XDG_RUNTIME_DIR} + - LD_LIBRARY_PATH=\${LD_LIBRARY_PATH} + volumes: + - \${HOME}:\${HOST_HOME_MOUNT}:rw + ports: + - \${HOST_PORT_HTTP}:3000 + - \${HOST_PORT_SSL}:3001 + restart: unless-stopped +EOF + +# docker-compose override for devcontainer +cat > .devcontainer/docker-compose.override.yml << EOF +services: + webtop: + network_mode: bridge +EOF + +DEVICE_ENTRIES=() +VOLUME_ENTRIES=() +GROUPS_TO_ADD=() + +# Add host group mappings (match start-container.sh) +VIDEO_GID=$(getent group video 2>/dev/null | cut -d: -f3 || true) +RENDER_GID=$(getent group render 2>/dev/null | cut -d: -f3 || true) +if [ -n "${VIDEO_GID}" ]; then + GROUPS_TO_ADD+=("${VIDEO_GID}") +fi +if [ -n "${RENDER_GID}" ]; then + GROUPS_TO_ADD+=("${RENDER_GID}") +fi + +if [ "${#GROUPS_TO_ADD[@]}" -gt 0 ]; then + { + echo " group_add:" + for GID in "${GROUPS_TO_ADD[@]}"; do + echo " - \"${GID}\"" + done + } >> .devcontainer/docker-compose.override.yml +fi + +if [ "${ENCODER}" = "nvidia" ] || [ "${ENCODER}" = "nvidia-wsl" ]; then + if [ "${GPU_ALL}" = "true" ]; then + echo " gpus: all" >> .devcontainer/docker-compose.override.yml + elif [ -n "${GPU_NUMS}" ]; then + echo " gpus: \"device=${GPU_NUMS}\"" >> .devcontainer/docker-compose.override.yml + fi +fi + +if [ "${ENCODER}" = "nvidia-wsl" ]; then + # Add WSL-specific devices if they exist + if [ -e "/dev/dxg" ]; then + DEVICE_ENTRIES+=("/dev/dxg:/dev/dxg:rwm") + fi + # Add WSL-specific volumes + if [ -d "/usr/lib/wsl/lib" ]; then + VOLUME_ENTRIES+=("/usr/lib/wsl/lib:/usr/lib/wsl/lib:ro") + fi + if [ -d "/mnt/wslg" ]; then + VOLUME_ENTRIES+=("/mnt/wslg:/mnt/wslg:rw") + VOLUME_ENTRIES+=("/mnt/wslg/.X11-unix:/tmp/.X11-unix:rw") + VOLUME_ENTRIES+=("/usr/lib/wsl/drivers:/usr/lib/wsl/drivers:ro") + fi +fi + +if [ -n "${GPU_DEVICES}" ]; then + IFS=',' read -r -a GPU_DEVICE_LIST <<< "${GPU_DEVICES}" + for DEVICE in "${GPU_DEVICE_LIST[@]}"; do + DEVICE_ENTRIES+=("${DEVICE}") + done +fi + +DEVICE_ENTRIES+=("/dev/bus/usb:/dev/bus/usb:rwm") + +# Add SSL mount when available (match start-container.sh) +if [ -n "${SSL_DIR}" ] && [ -f "${SSL_DIR}/cert.pem" ] && [ -f "${SSL_DIR}/cert.key" ]; then + VOLUME_ENTRIES+=("\${SSL_DIR}:/config/ssl:ro") +fi + +# Add /mnt mount on non-mac hosts (Docker Desktop for Mac does not share /mnt by default) +if [ "$(uname -s)" != "Darwin" ] && [ -d "/mnt" ]; then + VOLUME_ENTRIES+=("/mnt:\${HOST_MNT_MOUNT}:rw") +fi + +if [ "${#DEVICE_ENTRIES[@]}" -gt 0 ]; then + { + echo " devices:" + for DEVICE in "${DEVICE_ENTRIES[@]}"; do + echo " - ${DEVICE}" + done + } >> .devcontainer/docker-compose.override.yml +fi + +if [ "${#VOLUME_ENTRIES[@]}" -gt 0 ]; then + { + echo " volumes:" + for VOLUME in "${VOLUME_ENTRIES[@]}"; do + echo " - ${VOLUME}" + done + } >> .devcontainer/docker-compose.override.yml +fi + +# Copy .env to workspace root for docker-compose +cp "${ENV_FILE}" .env + +# README +cat > .devcontainer/README.md << EOF +# VS Code Dev Container Configuration + +The files in this directory are generated by \`./create-devcontainer-config.sh\`. It writes the same environment variables as \`start-container.sh\` into \`.devcontainer/.env\` and the repository root \`.env\`. + +## Generated settings + +- **Encoder**: ${ENCODER} +EOF + +if [ "${ENCODER}" = "nvidia" ] || [ "${ENCODER}" = "nvidia-wsl" ]; then + if [ "${GPU_ALL}" = "true" ]; then + cat >> .devcontainer/README.md << 'EOF' +- **Docker GPUs**: all +EOF + elif [ -n "${GPU_NUMS}" ]; then + cat >> .devcontainer/README.md << EOF +- **Docker GPUs**: device=${GPU_NUMS} +EOF + fi +fi + +cat >> .devcontainer/README.md << EOF +- **Ubuntu Version**: ${UBUNTU_VERSION} +- **Resolution**: ${RESOLUTION} +- **DPI**: ${DPI} +- **Timezone**: ${TIMEZONE} + +## Access URLs + +- **HTTPS**: https://localhost:${HOST_PORT_SSL} +- **HTTP**: http://localhost:${HOST_PORT_HTTP} + +## How to use in VS Code +1. Install the Dev Containers extension +2. Open the workspace and run \`F1\` → \`Dev Containers: Reopen in Container\` +3. VS Code reads \`.env\` and starts \`docker compose\` + +## How to use in VS Code +1. Install the Dev Containers extension +2. Open the workspace and run \`F1\` → \`Dev Containers: Reopen in Container\` +3. VS Code reads \`.env\` and starts \`docker compose\` +EOF + +# Copy .env to workspace root for docker-compose +cp "${ENV_FILE}" .env + +echo "" +echo "========================================" +echo "Configuration Complete!" +echo "========================================" +echo "" +echo "Created files:" +echo " - .devcontainer/devcontainer.json" +echo " - .devcontainer/docker-compose.base.yml" +echo " - .devcontainer/docker-compose.override.yml" +echo " - .devcontainer/.env" +echo " - .devcontainer/README.md" +echo " - .env (for docker-compose)" +echo "" +echo "Configuration summary:" +echo " - Encoder: ${ENCODER}" +if [ "${ENCODER}" = "nvidia" ] || [ "${ENCODER}" = "nvidia-wsl" ]; then + if [ "${GPU_ALL}" = "true" ]; then + echo " Docker GPUs: all" + elif [ -n "${GPU_NUMS}" ]; then + echo " Docker GPUs: device=${GPU_NUMS}" + fi +fi +echo " - Ubuntu: ${UBUNTU_VERSION}" +echo " - Resolution: ${RESOLUTION}" +echo " - DPI: ${DPI}" +echo " - Timezone: ${TIMEZONE}" +echo " - HTTPS Port: ${HOST_PORT_SSL}" +echo " - HTTP Port: ${HOST_PORT_HTTP}" +echo "" +echo "========================================" + +# Check if the user image exists +echo "Checking for user image: ${USER_IMAGE}..." +if ! docker images --format '{{.Repository}}:{{.Tag}}' | grep -q "^${USER_IMAGE}$"; then + echo "" + echo "⚠️ User image not found: ${USER_IMAGE}" + echo "Building user image automatically..." + echo "" + + # Prepare build-user-image.sh arguments + BUILD_ARGS=(--ubuntu "${UBUNTU_VERSION}" --arch "${TARGET_ARCH}") + + # Determine language argument + case "${TIMEZONE}" in + Asia/Tokyo) + BUILD_ARGS+=(--language "ja") + ;; + *) + BUILD_ARGS+=(--language "en") + ;; + esac + + # Execute build-user-image.sh + BUILD_SCRIPT="${SCRIPT_DIR}/build-user-image.sh" + if [ ! -x "${BUILD_SCRIPT}" ]; then + echo "Error: ${BUILD_SCRIPT} not found or not executable." >&2 + echo "Please run: ./build-user-image.sh ${BUILD_ARGS[*]}" >&2 + exit 1 + fi + + echo "Executing: ${BUILD_SCRIPT} ${BUILD_ARGS[*]}" + if "${BUILD_SCRIPT}" "${BUILD_ARGS[@]}"; then + echo "" + echo "✅ User image built successfully!" + echo "" + else + echo "" + echo "❌ Failed to build user image." >&2 + echo "Please manually run: ./build-user-image.sh ${BUILD_ARGS[*]}" >&2 + exit 1 + fi +else + echo "✅ User image found: ${USER_IMAGE}" +fi +echo "" +echo "========================================" +echo "Ready to use Dev Container!" +echo "========================================" +echo "" +echo "To start the devcontainer from VS Code:" +echo " 1) Open this workspace in VS Code." +echo " 2) Press F1 to open the Command Palette." +echo " 3) Type and run: Dev Containers: Reopen in Container" +echo " (or select 'Dev Containers: Reopen in Container')" +echo "" +echo "Tip: You can also click the green >< icon in the lower-left corner and choose 'Reopen in Container'." diff --git a/delete-image.sh b/delete-image.sh new file mode 100755 index 000000000..211b45e2d --- /dev/null +++ b/delete-image.sh @@ -0,0 +1,117 @@ +#!/usr/bin/env bash +set -euo pipefail + +HOST_USER=${USER:-$(whoami)} +IMAGE_BASE=${IMAGE_BASE:-webtop-kde} +IMAGE_VERSION=${IMAGE_VERSION:-1.0.0} +UBUNTU_VERSION=${UBUNTU_VERSION:-24.04} +IMAGE_NAME=${IMAGE_NAME:-} +FORCE=${FORCE:-false} +DELETE_BASE=${DELETE_BASE:-false} + +# Detect architecture +HOST_ARCH_RAW=$(uname -m) +case "${HOST_ARCH_RAW}" in + x86_64|amd64) DETECTED_ARCH=amd64 ;; + aarch64|arm64) DETECTED_ARCH=arm64 ;; + *) DETECTED_ARCH="${HOST_ARCH_RAW}" ;; +esac +TARGET_ARCH=${TARGET_ARCH:-${DETECTED_ARCH}} + +usage() { + cat <<EOF +Usage: $0 [-i image_name] [-u ubuntu_version] [-b] [-f] [-h] + -i full image name to delete (overrides auto-detection) + -u, --ubuntu Ubuntu version (22.04 or 24.04). Default: ${UBUNTU_VERSION} + -b also delete base image + -f force delete (remove dependent containers first) + -h show this help + +Default image: ${IMAGE_BASE}-${HOST_USER}-${TARGET_ARCH}-u${UBUNTU_VERSION}:${IMAGE_VERSION} + +Environment variables: + IMAGE_BASE image base name (default: webtop-kde) + IMAGE_VERSION image version (default: 1.0.0) + UBUNTU_VERSION Ubuntu version (default: 24.04) + IMAGE_NAME full image name (overrides auto-detection) + TARGET_ARCH architecture (default: auto-detect) + FORCE set to 'true' to force delete + DELETE_BASE set to 'true' to also delete base image + +Examples: + $0 # Delete user image + $0 -u 22.04 # Delete Ubuntu 22.04 user image + $0 -b # Delete user and base images + $0 -f # Force delete (remove containers first) + $0 -i myimage:1.0 # Delete specific image +EOF +} + +while getopts ":i:u:bfh-:" opt; do + case "$opt" in + i) IMAGE_NAME=$OPTARG ;; + u) UBUNTU_VERSION=$OPTARG ;; + b) DELETE_BASE=true ;; + f) FORCE=true ;; + h) usage; exit 0 ;; + -) + case "${OPTARG}" in + ubuntu) UBUNTU_VERSION="${!OPTIND}"; OPTIND=$((OPTIND + 1)) ;; + *) echo "Unknown option: --${OPTARG}" >&2; usage; exit 1 ;; + esac + ;; + *) usage; exit 1 ;; + esac +done + +# Determine image name +if [[ -z "${IMAGE_NAME}" ]]; then + IMAGE_NAME="${IMAGE_BASE}-${HOST_USER}-${TARGET_ARCH}-u${UBUNTU_VERSION}:${IMAGE_VERSION}" +fi + +delete_image() { + local img="$1" + local force_flag="$2" + + if ! docker image inspect "$img" >/dev/null 2>&1; then + echo "Image ${img} not found." + return 0 + fi + + # Check for dependent containers + local containers + containers=$(docker ps -a --filter "ancestor=${img}" --format '{{.Names}}' 2>/dev/null || true) + + if [[ -n "${containers}" ]]; then + echo "Found containers using ${img}:" + echo "${containers}" | sed 's/^/ - /' + + if [[ "${force_flag}" == "true" ]]; then + echo "Force mode: Removing dependent containers..." + echo "${containers}" | xargs -r docker rm -f + else + echo "Error: Cannot delete image with dependent containers." + echo "Either remove containers first, or use -f to force delete." + return 1 + fi + fi + + echo "Deleting image ${img}..." + docker rmi "$img" + echo "Image ${img} deleted." +} + +# Delete user image +echo "=== Deleting User Image ===" +delete_image "${IMAGE_NAME}" "${FORCE}" + +# Delete base image if requested +if [[ "${DELETE_BASE}" == "true" ]]; then + BASE_IMAGE="${IMAGE_BASE}-base-${TARGET_ARCH}-u${UBUNTU_VERSION}:${IMAGE_VERSION}" + echo "" + echo "=== Deleting Base Image ===" + delete_image "${BASE_IMAGE}" "${FORCE}" +fi + +echo "" +echo "Done." diff --git a/files/alpine-root/etc/s6-overlay/s6-rc.d/ci-service-check/dependencies.d/legacy-services b/files/alpine-root/etc/s6-overlay/s6-rc.d/ci-service-check/dependencies.d/legacy-services new file mode 100644 index 000000000..8b1378917 --- /dev/null +++ b/files/alpine-root/etc/s6-overlay/s6-rc.d/ci-service-check/dependencies.d/legacy-services @@ -0,0 +1 @@ + diff --git a/files/alpine-root/etc/s6-overlay/s6-rc.d/ci-service-check/type b/files/alpine-root/etc/s6-overlay/s6-rc.d/ci-service-check/type new file mode 100644 index 000000000..bdd22a185 --- /dev/null +++ b/files/alpine-root/etc/s6-overlay/s6-rc.d/ci-service-check/type @@ -0,0 +1 @@ +oneshot diff --git a/files/alpine-root/etc/s6-overlay/s6-rc.d/ci-service-check/up b/files/alpine-root/etc/s6-overlay/s6-rc.d/ci-service-check/up new file mode 100644 index 000000000..18de1bb49 --- /dev/null +++ b/files/alpine-root/etc/s6-overlay/s6-rc.d/ci-service-check/up @@ -0,0 +1 @@ +echo "[ls.io-init] done." diff --git a/files/alpine-root/etc/s6-overlay/s6-rc.d/init-adduser/branding b/files/alpine-root/etc/s6-overlay/s6-rc.d/init-adduser/branding new file mode 100644 index 000000000..99077a5ea --- /dev/null +++ b/files/alpine-root/etc/s6-overlay/s6-rc.d/init-adduser/branding @@ -0,0 +1,12 @@ +─────────────────────────────────────── + _____ __ __ _____ _____ _____ _____ + | | | | __|_ _| | | + | --| | |__ | | | | | | | | | + |_____|_____|_____| |_| |_____|_|_|_| + _____ __ __ _ __ ____ + | __ | | | | | | \ + | __ -| | | | |__| | | + |_____|_____|_|_____|____/ + + Based on images from linuxserver.io +─────────────────────────────────────── diff --git a/files/alpine-root/etc/s6-overlay/s6-rc.d/init-adduser/dependencies.d/init-migrations b/files/alpine-root/etc/s6-overlay/s6-rc.d/init-adduser/dependencies.d/init-migrations new file mode 100644 index 000000000..e69de29bb diff --git a/files/alpine-root/etc/s6-overlay/s6-rc.d/init-adduser/run b/files/alpine-root/etc/s6-overlay/s6-rc.d/init-adduser/run new file mode 100755 index 000000000..4582db9b1 --- /dev/null +++ b/files/alpine-root/etc/s6-overlay/s6-rc.d/init-adduser/run @@ -0,0 +1,50 @@ +#!/usr/bin/with-contenv bash +# shellcheck shell=bash + +TARGET_USER="${CUSTOM_USER:-${USER_NAME:-user}}" +PUID=${PUID:-${USER_UID:-911}} +PGID=${PGID:-${USER_GID:-911}} +USER_HOME="/home/${TARGET_USER}" + +if [[ -z ${LSIO_READ_ONLY_FS} ]] && [[ -z ${LSIO_NON_ROOT_USER} ]]; then + # ensure primary group + if getent group "${TARGET_USER}" >/dev/null; then + groupmod -o -g "${PGID}" "${TARGET_USER}" || true + else + groupadd -o -g "${PGID}" "${TARGET_USER}" || true + fi + + # ensure user + if getent passwd "${TARGET_USER}" >/dev/null; then + usermod -o -u "${PUID}" -g "${PGID}" -d "${USER_HOME}" "${TARGET_USER}" || true + else + useradd -M -d "${USER_HOME}" -u "${PUID}" -g "${PGID}" -s /bin/bash "${TARGET_USER}" || true + fi + + # basic home and ownership for shared dirs + install -d -m 755 "${USER_HOME}" + lsiown "${TARGET_USER}:${TARGET_USER}" /app /config /defaults /lsiopy "${USER_HOME}" +fi + +cat /run/branding 2>/dev/null || true +echo ' +─────────────────────────────────────── +GID/UID +───────────────────────────────────────' +if [[ -z ${LSIO_NON_ROOT_USER} ]]; then +echo " +User UID: $(id -u "${TARGET_USER}") +User GID: $(id -g "${TARGET_USER}") +───────────────────────────────────────" +else +echo " +User UID: $(stat /run -c %u) +User GID: $(stat /run -c %g) +───────────────────────────────────────" +fi +if [[ -f /build_version ]]; then + cat /build_version + echo ' +─────────────────────────────────────── + ' +fi diff --git a/files/alpine-root/etc/s6-overlay/s6-rc.d/init-adduser/type b/files/alpine-root/etc/s6-overlay/s6-rc.d/init-adduser/type new file mode 100644 index 000000000..bdd22a185 --- /dev/null +++ b/files/alpine-root/etc/s6-overlay/s6-rc.d/init-adduser/type @@ -0,0 +1 @@ +oneshot diff --git a/files/alpine-root/etc/s6-overlay/s6-rc.d/init-adduser/up b/files/alpine-root/etc/s6-overlay/s6-rc.d/init-adduser/up new file mode 100644 index 000000000..b8522da3e --- /dev/null +++ b/files/alpine-root/etc/s6-overlay/s6-rc.d/init-adduser/up @@ -0,0 +1 @@ +/etc/s6-overlay/s6-rc.d/init-adduser/run diff --git a/files/alpine-root/etc/s6-overlay/s6-rc.d/init-config-end/dependencies.d/init-config b/files/alpine-root/etc/s6-overlay/s6-rc.d/init-config-end/dependencies.d/init-config new file mode 100644 index 000000000..e69de29bb diff --git a/files/alpine-root/etc/s6-overlay/s6-rc.d/init-config-end/dependencies.d/init-crontab-config b/files/alpine-root/etc/s6-overlay/s6-rc.d/init-config-end/dependencies.d/init-crontab-config new file mode 100644 index 000000000..e69de29bb diff --git a/files/alpine-root/etc/s6-overlay/s6-rc.d/init-config-end/type b/files/alpine-root/etc/s6-overlay/s6-rc.d/init-config-end/type new file mode 100644 index 000000000..bdd22a185 --- /dev/null +++ b/files/alpine-root/etc/s6-overlay/s6-rc.d/init-config-end/type @@ -0,0 +1 @@ +oneshot diff --git a/files/alpine-root/etc/s6-overlay/s6-rc.d/init-config-end/up b/files/alpine-root/etc/s6-overlay/s6-rc.d/init-config-end/up new file mode 100644 index 000000000..c329423ed --- /dev/null +++ b/files/alpine-root/etc/s6-overlay/s6-rc.d/init-config-end/up @@ -0,0 +1 @@ +# This file doesn't do anything, it's just the end of the downstream image init process diff --git a/files/alpine-root/etc/s6-overlay/s6-rc.d/init-config/dependencies.d/init-os-end b/files/alpine-root/etc/s6-overlay/s6-rc.d/init-config/dependencies.d/init-os-end new file mode 100644 index 000000000..e69de29bb diff --git a/files/alpine-root/etc/s6-overlay/s6-rc.d/init-config/type b/files/alpine-root/etc/s6-overlay/s6-rc.d/init-config/type new file mode 100644 index 000000000..bdd22a185 --- /dev/null +++ b/files/alpine-root/etc/s6-overlay/s6-rc.d/init-config/type @@ -0,0 +1 @@ +oneshot diff --git a/files/alpine-root/etc/s6-overlay/s6-rc.d/init-config/up b/files/alpine-root/etc/s6-overlay/s6-rc.d/init-config/up new file mode 100644 index 000000000..e80acfe67 --- /dev/null +++ b/files/alpine-root/etc/s6-overlay/s6-rc.d/init-config/up @@ -0,0 +1 @@ +# This file doesn't do anything, it's just the start of the downstream image init process diff --git a/files/alpine-root/etc/s6-overlay/s6-rc.d/init-crontab-config/dependencies.d/init-config b/files/alpine-root/etc/s6-overlay/s6-rc.d/init-crontab-config/dependencies.d/init-config new file mode 100644 index 000000000..e69de29bb diff --git a/files/alpine-root/etc/s6-overlay/s6-rc.d/init-crontab-config/run b/files/alpine-root/etc/s6-overlay/s6-rc.d/init-crontab-config/run new file mode 100755 index 000000000..652460af9 --- /dev/null +++ b/files/alpine-root/etc/s6-overlay/s6-rc.d/init-crontab-config/run @@ -0,0 +1,34 @@ +#!/usr/bin/with-contenv bash +# shellcheck shell=bash + +TARGET_USER="${CUSTOM_USER:-${USER_NAME:-root}}" +for cron_user in "$TARGET_USER" root; do + if [[ -z ${LSIO_READ_ONLY_FS} ]] && [[ -z ${LSIO_NON_ROOT_USER} ]]; then + if [[ -f "/etc/crontabs/${cron_user}" ]]; then + lsiown "${cron_user}":"${cron_user}" "/etc/crontabs/${cron_user}" + crontab -u "${cron_user}" "/etc/crontabs/${cron_user}" + fi + fi + + if [[ -f "/defaults/crontabs/${cron_user}" ]]; then + # make folders + mkdir -p \ + /config/crontabs + + # if crontabs do not exist in config + if [[ ! -f "/config/crontabs/${cron_user}" ]]; then + # copy crontab from system + if crontab -l -u "${cron_user}" >/dev/null 2>&1; then + crontab -l -u "${cron_user}" >"/config/crontabs/${cron_user}" + fi + + # if crontabs still do not exist in config (were not copied from system) + # copy crontab from image defaults (using -n, do not overwrite an existing file) + cp -n "/defaults/crontabs/${cron_user}" /config/crontabs/ + fi + + # set permissions and import user crontabs + lsiown "${cron_user}":"${cron_user}" "/config/crontabs/${cron_user}" + crontab -u "${cron_user}" "/config/crontabs/${cron_user}" + fi +done diff --git a/files/alpine-root/etc/s6-overlay/s6-rc.d/init-crontab-config/type b/files/alpine-root/etc/s6-overlay/s6-rc.d/init-crontab-config/type new file mode 100644 index 000000000..bdd22a185 --- /dev/null +++ b/files/alpine-root/etc/s6-overlay/s6-rc.d/init-crontab-config/type @@ -0,0 +1 @@ +oneshot diff --git a/files/alpine-root/etc/s6-overlay/s6-rc.d/init-crontab-config/up b/files/alpine-root/etc/s6-overlay/s6-rc.d/init-crontab-config/up new file mode 100644 index 000000000..d35411185 --- /dev/null +++ b/files/alpine-root/etc/s6-overlay/s6-rc.d/init-crontab-config/up @@ -0,0 +1 @@ +/etc/s6-overlay/s6-rc.d/init-crontab-config/run diff --git a/files/alpine-root/etc/s6-overlay/s6-rc.d/init-custom-files/dependencies.d/init-mods-end b/files/alpine-root/etc/s6-overlay/s6-rc.d/init-custom-files/dependencies.d/init-mods-end new file mode 100644 index 000000000..e69de29bb diff --git a/files/alpine-root/etc/s6-overlay/s6-rc.d/init-custom-files/run b/files/alpine-root/etc/s6-overlay/s6-rc.d/init-custom-files/run new file mode 100755 index 000000000..6b57858bb --- /dev/null +++ b/files/alpine-root/etc/s6-overlay/s6-rc.d/init-custom-files/run @@ -0,0 +1,22 @@ +#!/usr/bin/with-contenv bash +# shellcheck shell=bash + +# Directories +SCRIPTS_DIR="/custom-cont-init.d" + +# Make sure custom init directory exists and has files in it +if [[ -e "${SCRIPTS_DIR}" ]] && [[ -n "$(/bin/ls -A ${SCRIPTS_DIR} 2>/dev/null)" ]]; then + echo "[custom-init] Files found, executing" + for SCRIPT in "${SCRIPTS_DIR}"/*; do + NAME="$(basename "${SCRIPT}")" + if [[ -x "${SCRIPT}" ]]; then + echo "[custom-init] ${NAME}: executing..." + /bin/bash "${SCRIPT}" + echo "[custom-init] ${NAME}: exited $?" + elif [[ ! -x "${SCRIPT}" ]]; then + echo "[custom-init] ${NAME}: is not an executable file" + fi + done +else + echo "[custom-init] No custom files found, skipping..." +fi diff --git a/files/alpine-root/etc/s6-overlay/s6-rc.d/init-custom-files/type b/files/alpine-root/etc/s6-overlay/s6-rc.d/init-custom-files/type new file mode 100644 index 000000000..bdd22a185 --- /dev/null +++ b/files/alpine-root/etc/s6-overlay/s6-rc.d/init-custom-files/type @@ -0,0 +1 @@ +oneshot diff --git a/files/alpine-root/etc/s6-overlay/s6-rc.d/init-custom-files/up b/files/alpine-root/etc/s6-overlay/s6-rc.d/init-custom-files/up new file mode 100644 index 000000000..28bf31859 --- /dev/null +++ b/files/alpine-root/etc/s6-overlay/s6-rc.d/init-custom-files/up @@ -0,0 +1 @@ +/etc/s6-overlay/s6-rc.d/init-custom-files/run diff --git a/files/alpine-root/etc/s6-overlay/s6-rc.d/init-device-perms/dependencies.d/init-adduser b/files/alpine-root/etc/s6-overlay/s6-rc.d/init-device-perms/dependencies.d/init-adduser new file mode 100644 index 000000000..e69de29bb diff --git a/files/alpine-root/etc/s6-overlay/s6-rc.d/init-device-perms/run b/files/alpine-root/etc/s6-overlay/s6-rc.d/init-device-perms/run new file mode 100755 index 000000000..2f4a83444 --- /dev/null +++ b/files/alpine-root/etc/s6-overlay/s6-rc.d/init-device-perms/run @@ -0,0 +1,39 @@ +#!/usr/bin/with-contenv bash +# shellcheck shell=bash + +TARGET_USER="${CUSTOM_USER:-${USER_NAME:-root}}" + +if [[ -z ${LSIO_NON_ROOT_USER} ]] && [[ -n ${ATTACHED_DEVICES_PERMS} ]]; then + FILES=$(find ${ATTACHED_DEVICES_PERMS} -print 2>/dev/null) + + for i in ${FILES}; do + FILE_GID=$(stat -c '%g' "${i}") + FILE_UID=$(stat -c '%u' "${i}") + # check if user matches device + if id -u "$TARGET_USER" | grep -qw "${FILE_UID}"; then + echo "**** permissions for ${i} are good ****" + else + # check if group matches and that device has group rw + if id -G "$TARGET_USER" | grep -qw "${FILE_GID}" && [[ $(stat -c '%A' "${i}" | cut -b 5,6) == "rw" ]]; then + echo "**** permissions for ${i} are good ****" + # check if device needs to be added to group + elif ! id -G "$TARGET_USER" | grep -qw "${FILE_GID}"; then + # check if group needs to be created + GROUP_NAME=$(getent group "${FILE_GID}" | awk -F: '{print $1}') + if [[ -z "${GROUP_NAME}" ]]; then + GROUP_NAME="group$(head /dev/urandom | tr -dc 'a-z0-9' | head -c4)" + groupadd "${GROUP_NAME}" + groupmod -g "${FILE_GID}" "${GROUP_NAME}" + echo "**** creating group ${GROUP_NAME} with id ${FILE_GID} ****" + fi + echo "**** adding ${i} to group ${GROUP_NAME} with id ${FILE_GID} ****" + usermod -a -G "${GROUP_NAME}" "$TARGET_USER" + fi + # check if device has group rw + if [[ $(stat -c '%A' "${i}" | cut -b 5,6) != "rw" ]]; then + echo -e "**** The device ${i} does not have group read/write permissions, attempting to fix inside the container. ****" + chmod g+rw "${i}" + fi + fi + done +fi diff --git a/files/alpine-root/etc/s6-overlay/s6-rc.d/init-device-perms/type b/files/alpine-root/etc/s6-overlay/s6-rc.d/init-device-perms/type new file mode 100644 index 000000000..3d92b15f2 --- /dev/null +++ b/files/alpine-root/etc/s6-overlay/s6-rc.d/init-device-perms/type @@ -0,0 +1 @@ +oneshot \ No newline at end of file diff --git a/files/alpine-root/etc/s6-overlay/s6-rc.d/init-device-perms/up b/files/alpine-root/etc/s6-overlay/s6-rc.d/init-device-perms/up new file mode 100644 index 000000000..050e0b296 --- /dev/null +++ b/files/alpine-root/etc/s6-overlay/s6-rc.d/init-device-perms/up @@ -0,0 +1 @@ +/etc/s6-overlay/s6-rc.d/init-device-perms/run \ No newline at end of file diff --git a/files/alpine-root/etc/s6-overlay/s6-rc.d/init-envfile/run b/files/alpine-root/etc/s6-overlay/s6-rc.d/init-envfile/run new file mode 100755 index 000000000..592df5270 --- /dev/null +++ b/files/alpine-root/etc/s6-overlay/s6-rc.d/init-envfile/run @@ -0,0 +1,19 @@ +#!/usr/bin/with-contenv bash +# shellcheck shell=bash + +if find /run/s6/container_environment/FILE__* -maxdepth 1 > /dev/null 2>&1; then + for FILENAME in /run/s6/container_environment/FILE__*; do + SECRETFILE=$(cat "${FILENAME}") + if [[ -f ${SECRETFILE} ]]; then + FILESTRIP=${FILENAME//FILE__/} + if [[ $(tail -n1 "${SECRETFILE}" | wc -l) != 0 ]]; then + echo "[env-init] Your secret: ${FILENAME##*/}" + echo " contains a trailing newline and may not work as expected" + fi + cat "${SECRETFILE}" >"${FILESTRIP}" + echo "[env-init] ${FILESTRIP##*/} set from ${FILENAME##*/}" + else + echo "[env-init] cannot find secret in ${FILENAME##*/}" + fi + done +fi diff --git a/files/alpine-root/etc/s6-overlay/s6-rc.d/init-envfile/type b/files/alpine-root/etc/s6-overlay/s6-rc.d/init-envfile/type new file mode 100644 index 000000000..bdd22a185 --- /dev/null +++ b/files/alpine-root/etc/s6-overlay/s6-rc.d/init-envfile/type @@ -0,0 +1 @@ +oneshot diff --git a/files/alpine-root/etc/s6-overlay/s6-rc.d/init-envfile/up b/files/alpine-root/etc/s6-overlay/s6-rc.d/init-envfile/up new file mode 100644 index 000000000..b2b4fb8c2 --- /dev/null +++ b/files/alpine-root/etc/s6-overlay/s6-rc.d/init-envfile/up @@ -0,0 +1 @@ +/etc/s6-overlay/s6-rc.d/init-envfile/run diff --git a/files/alpine-root/etc/s6-overlay/s6-rc.d/init-migrations/run b/files/alpine-root/etc/s6-overlay/s6-rc.d/init-migrations/run new file mode 100755 index 000000000..baf86a249 --- /dev/null +++ b/files/alpine-root/etc/s6-overlay/s6-rc.d/init-migrations/run @@ -0,0 +1,32 @@ +#!/usr/bin/with-contenv bash +# shellcheck shell=bash + +MIGRATIONS_DIR="/migrations" +MIGRATIONS_HISTORY="/config/.migrations" + +echo "[migrations] started" + +if [[ ! -d ${MIGRATIONS_DIR} ]]; then + echo "[migrations] no migrations found" + exit +fi + +for MIGRATION in $(find ${MIGRATIONS_DIR}/* | sort -n); do + NAME="$(basename "${MIGRATION}")" + if [[ -f ${MIGRATIONS_HISTORY} ]] && grep -Fxq "${NAME}" ${MIGRATIONS_HISTORY}; then + echo "[migrations] ${NAME}: skipped" + continue + fi + echo "[migrations] ${NAME}: executing..." + # Execute migration script in a subshell to prevent it from modifying the current environment + ("${MIGRATION}") + EXIT_CODE=$? + if [[ ${EXIT_CODE} -ne 0 ]]; then + echo "[migrations] ${NAME}: failed with exit code ${EXIT_CODE}, contact support" + exit "${EXIT_CODE}" + fi + echo "${NAME}" >>${MIGRATIONS_HISTORY} + echo "[migrations] ${NAME}: succeeded" +done + +echo "[migrations] done" diff --git a/files/alpine-root/etc/s6-overlay/s6-rc.d/init-migrations/type b/files/alpine-root/etc/s6-overlay/s6-rc.d/init-migrations/type new file mode 100644 index 000000000..bdd22a185 --- /dev/null +++ b/files/alpine-root/etc/s6-overlay/s6-rc.d/init-migrations/type @@ -0,0 +1 @@ +oneshot diff --git a/files/alpine-root/etc/s6-overlay/s6-rc.d/init-migrations/up b/files/alpine-root/etc/s6-overlay/s6-rc.d/init-migrations/up new file mode 100644 index 000000000..7c4cbcf6f --- /dev/null +++ b/files/alpine-root/etc/s6-overlay/s6-rc.d/init-migrations/up @@ -0,0 +1 @@ +/etc/s6-overlay/s6-rc.d/init-migrations/run diff --git a/files/alpine-root/etc/s6-overlay/s6-rc.d/init-mods-end/dependencies.d/init-mods-package-install b/files/alpine-root/etc/s6-overlay/s6-rc.d/init-mods-end/dependencies.d/init-mods-package-install new file mode 100644 index 000000000..e69de29bb diff --git a/files/alpine-root/etc/s6-overlay/s6-rc.d/init-mods-end/type b/files/alpine-root/etc/s6-overlay/s6-rc.d/init-mods-end/type new file mode 100644 index 000000000..bdd22a185 --- /dev/null +++ b/files/alpine-root/etc/s6-overlay/s6-rc.d/init-mods-end/type @@ -0,0 +1 @@ +oneshot diff --git a/files/alpine-root/etc/s6-overlay/s6-rc.d/init-mods-end/up b/files/alpine-root/etc/s6-overlay/s6-rc.d/init-mods-end/up new file mode 100644 index 000000000..092149d53 --- /dev/null +++ b/files/alpine-root/etc/s6-overlay/s6-rc.d/init-mods-end/up @@ -0,0 +1 @@ +# This file doesn't do anything, it's just the end of the mod init process diff --git a/files/alpine-root/etc/s6-overlay/s6-rc.d/init-mods-package-install/dependencies.d/init-mods b/files/alpine-root/etc/s6-overlay/s6-rc.d/init-mods-package-install/dependencies.d/init-mods new file mode 100644 index 000000000..e69de29bb diff --git a/files/alpine-root/etc/s6-overlay/s6-rc.d/init-mods-package-install/type b/files/alpine-root/etc/s6-overlay/s6-rc.d/init-mods-package-install/type new file mode 100644 index 000000000..bdd22a185 --- /dev/null +++ b/files/alpine-root/etc/s6-overlay/s6-rc.d/init-mods-package-install/type @@ -0,0 +1 @@ +oneshot diff --git a/files/alpine-root/etc/s6-overlay/s6-rc.d/init-mods-package-install/up b/files/alpine-root/etc/s6-overlay/s6-rc.d/init-mods-package-install/up new file mode 100644 index 000000000..fb633014c --- /dev/null +++ b/files/alpine-root/etc/s6-overlay/s6-rc.d/init-mods-package-install/up @@ -0,0 +1 @@ +/etc/s6-overlay/s6-rc.d/init-mods-package-install/run diff --git a/files/alpine-root/etc/s6-overlay/s6-rc.d/init-mods/dependencies.d/init-config-end b/files/alpine-root/etc/s6-overlay/s6-rc.d/init-mods/dependencies.d/init-config-end new file mode 100644 index 000000000..e69de29bb diff --git a/files/alpine-root/etc/s6-overlay/s6-rc.d/init-mods/type b/files/alpine-root/etc/s6-overlay/s6-rc.d/init-mods/type new file mode 100644 index 000000000..bdd22a185 --- /dev/null +++ b/files/alpine-root/etc/s6-overlay/s6-rc.d/init-mods/type @@ -0,0 +1 @@ +oneshot diff --git a/files/alpine-root/etc/s6-overlay/s6-rc.d/init-mods/up b/files/alpine-root/etc/s6-overlay/s6-rc.d/init-mods/up new file mode 100644 index 000000000..040d8013c --- /dev/null +++ b/files/alpine-root/etc/s6-overlay/s6-rc.d/init-mods/up @@ -0,0 +1 @@ +# This file doesn't do anything, it's just the start of the mod init process diff --git a/files/alpine-root/etc/s6-overlay/s6-rc.d/init-os-end/dependencies.d/init-adduser b/files/alpine-root/etc/s6-overlay/s6-rc.d/init-os-end/dependencies.d/init-adduser new file mode 100644 index 000000000..e69de29bb diff --git a/files/alpine-root/etc/s6-overlay/s6-rc.d/init-os-end/dependencies.d/init-device-perms b/files/alpine-root/etc/s6-overlay/s6-rc.d/init-os-end/dependencies.d/init-device-perms new file mode 100644 index 000000000..e69de29bb diff --git a/files/alpine-root/etc/s6-overlay/s6-rc.d/init-os-end/dependencies.d/init-envfile b/files/alpine-root/etc/s6-overlay/s6-rc.d/init-os-end/dependencies.d/init-envfile new file mode 100644 index 000000000..e69de29bb diff --git a/files/alpine-root/etc/s6-overlay/s6-rc.d/init-os-end/type b/files/alpine-root/etc/s6-overlay/s6-rc.d/init-os-end/type new file mode 100644 index 000000000..bdd22a185 --- /dev/null +++ b/files/alpine-root/etc/s6-overlay/s6-rc.d/init-os-end/type @@ -0,0 +1 @@ +oneshot diff --git a/files/alpine-root/etc/s6-overlay/s6-rc.d/init-os-end/up b/files/alpine-root/etc/s6-overlay/s6-rc.d/init-os-end/up new file mode 100644 index 000000000..092149d53 --- /dev/null +++ b/files/alpine-root/etc/s6-overlay/s6-rc.d/init-os-end/up @@ -0,0 +1 @@ +# This file doesn't do anything, it's just the end of the mod init process diff --git a/files/alpine-root/etc/s6-overlay/s6-rc.d/init-services/dependencies.d/init-custom-files b/files/alpine-root/etc/s6-overlay/s6-rc.d/init-services/dependencies.d/init-custom-files new file mode 100644 index 000000000..e69de29bb diff --git a/files/alpine-root/etc/s6-overlay/s6-rc.d/init-services/type b/files/alpine-root/etc/s6-overlay/s6-rc.d/init-services/type new file mode 100644 index 000000000..bdd22a185 --- /dev/null +++ b/files/alpine-root/etc/s6-overlay/s6-rc.d/init-services/type @@ -0,0 +1 @@ +oneshot diff --git a/files/alpine-root/etc/s6-overlay/s6-rc.d/init-services/up b/files/alpine-root/etc/s6-overlay/s6-rc.d/init-services/up new file mode 100644 index 000000000..a7c3905b2 --- /dev/null +++ b/files/alpine-root/etc/s6-overlay/s6-rc.d/init-services/up @@ -0,0 +1 @@ +# This file doesn't do anything, it just signals that services can start diff --git a/files/alpine-root/etc/s6-overlay/s6-rc.d/svc-cron/dependencies.d/init-services b/files/alpine-root/etc/s6-overlay/s6-rc.d/svc-cron/dependencies.d/init-services new file mode 100644 index 000000000..e69de29bb diff --git a/files/alpine-root/etc/s6-overlay/s6-rc.d/svc-cron/run b/files/alpine-root/etc/s6-overlay/s6-rc.d/svc-cron/run new file mode 100755 index 000000000..f8ec9d211 --- /dev/null +++ b/files/alpine-root/etc/s6-overlay/s6-rc.d/svc-cron/run @@ -0,0 +1,16 @@ +#!/usr/bin/with-contenv bash +# shellcheck shell=bash + +TARGET_USER="${CUSTOM_USER:-${USER_NAME:-root}}" +if builtin command -v crontab >/dev/null 2>&1 && [[ -n "$(crontab -l -u "$TARGET_USER" 2>/dev/null || true)" || -n "$(crontab -l -u root 2>/dev/null || true)" ]]; then + if builtin command -v busybox >/dev/null 2>&1 && [[ $(busybox || true) =~ [[:space:]](crond)([,]|$) ]]; then + exec busybox crond -f -S -l 5 + elif [[ -f /usr/bin/apt ]] && [[ -f /usr/sbin/cron ]]; then + exec /usr/sbin/cron -f -L 5 + else + echo "**** cron not found ****" + sleep infinity + fi +else + sleep infinity +fi diff --git a/files/alpine-root/etc/s6-overlay/s6-rc.d/svc-cron/type b/files/alpine-root/etc/s6-overlay/s6-rc.d/svc-cron/type new file mode 100644 index 000000000..5883cff0c --- /dev/null +++ b/files/alpine-root/etc/s6-overlay/s6-rc.d/svc-cron/type @@ -0,0 +1 @@ +longrun diff --git a/files/alpine-root/etc/s6-overlay/s6-rc.d/user/contents.d/init-adduser b/files/alpine-root/etc/s6-overlay/s6-rc.d/user/contents.d/init-adduser new file mode 100644 index 000000000..e69de29bb diff --git a/files/alpine-root/etc/s6-overlay/s6-rc.d/user/contents.d/init-config b/files/alpine-root/etc/s6-overlay/s6-rc.d/user/contents.d/init-config new file mode 100644 index 000000000..e69de29bb diff --git a/files/alpine-root/etc/s6-overlay/s6-rc.d/user/contents.d/init-config-end b/files/alpine-root/etc/s6-overlay/s6-rc.d/user/contents.d/init-config-end new file mode 100644 index 000000000..e69de29bb diff --git a/files/alpine-root/etc/s6-overlay/s6-rc.d/user/contents.d/init-crontab-config b/files/alpine-root/etc/s6-overlay/s6-rc.d/user/contents.d/init-crontab-config new file mode 100644 index 000000000..e69de29bb diff --git a/files/alpine-root/etc/s6-overlay/s6-rc.d/user/contents.d/init-custom-files b/files/alpine-root/etc/s6-overlay/s6-rc.d/user/contents.d/init-custom-files new file mode 100644 index 000000000..e69de29bb diff --git a/files/alpine-root/etc/s6-overlay/s6-rc.d/user/contents.d/init-device-perms b/files/alpine-root/etc/s6-overlay/s6-rc.d/user/contents.d/init-device-perms new file mode 100644 index 000000000..e69de29bb diff --git a/files/alpine-root/etc/s6-overlay/s6-rc.d/user/contents.d/init-envfile b/files/alpine-root/etc/s6-overlay/s6-rc.d/user/contents.d/init-envfile new file mode 100644 index 000000000..e69de29bb diff --git a/files/alpine-root/etc/s6-overlay/s6-rc.d/user/contents.d/init-migrations b/files/alpine-root/etc/s6-overlay/s6-rc.d/user/contents.d/init-migrations new file mode 100644 index 000000000..e69de29bb diff --git a/files/alpine-root/etc/s6-overlay/s6-rc.d/user/contents.d/init-mods b/files/alpine-root/etc/s6-overlay/s6-rc.d/user/contents.d/init-mods new file mode 100644 index 000000000..e69de29bb diff --git a/files/alpine-root/etc/s6-overlay/s6-rc.d/user/contents.d/init-mods-end b/files/alpine-root/etc/s6-overlay/s6-rc.d/user/contents.d/init-mods-end new file mode 100644 index 000000000..e69de29bb diff --git a/files/alpine-root/etc/s6-overlay/s6-rc.d/user/contents.d/init-mods-package-install b/files/alpine-root/etc/s6-overlay/s6-rc.d/user/contents.d/init-mods-package-install new file mode 100644 index 000000000..e69de29bb diff --git a/files/alpine-root/etc/s6-overlay/s6-rc.d/user/contents.d/init-os-end b/files/alpine-root/etc/s6-overlay/s6-rc.d/user/contents.d/init-os-end new file mode 100644 index 000000000..e69de29bb diff --git a/files/alpine-root/etc/s6-overlay/s6-rc.d/user/contents.d/init-services b/files/alpine-root/etc/s6-overlay/s6-rc.d/user/contents.d/init-services new file mode 100644 index 000000000..e69de29bb diff --git a/files/alpine-root/etc/s6-overlay/s6-rc.d/user/contents.d/svc-cron b/files/alpine-root/etc/s6-overlay/s6-rc.d/user/contents.d/svc-cron new file mode 100644 index 000000000..e69de29bb diff --git a/files/alpine-root/etc/s6-overlay/s6-rc.d/user2/contents.d/ci-service-check b/files/alpine-root/etc/s6-overlay/s6-rc.d/user2/contents.d/ci-service-check new file mode 100644 index 000000000..8b1378917 --- /dev/null +++ b/files/alpine-root/etc/s6-overlay/s6-rc.d/user2/contents.d/ci-service-check @@ -0,0 +1 @@ + diff --git a/files/build-base-image.sh b/files/build-base-image.sh new file mode 100755 index 000000000..bef1411d1 --- /dev/null +++ b/files/build-base-image.sh @@ -0,0 +1,165 @@ +#!/usr/bin/env bash +set -euo pipefail + +SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)" +if [[ -f "${SCRIPT_DIR}/linuxserver-kde.base.dockerfile" ]]; then + FILES_DIR="${SCRIPT_DIR}" +else + FILES_DIR="${SCRIPT_DIR}/files" +fi +DOCKERFILE_BASE="${FILES_DIR}/linuxserver-kde.base.dockerfile" + +IMAGE_NAME=${IMAGE_NAME:-ghcr.io/tatsuyai713/webtop-kde} +VERSION=${VERSION:-1.0.0} +UBUNTU_VERSION=${UBUNTU_VERSION:-24.04} +ARCH_OVERRIDE=${ARCH_OVERRIDE:-} +PLATFORM_OVERRIDE=${PLATFORM_OVERRIDE:-} +NO_CACHE_FLAG="" + +usage() { + cat <<EOF +Usage: $0 [-a arch] [-i image] [-v version] [-u ubuntu_version] [--no-cache] + -a, --arch Target arch (amd64 or arm64). Default: host arch + -i, --image Image name (default: ${IMAGE_NAME}) + -v, --version Version tag (default: ${VERSION}) + -u, --ubuntu Ubuntu version (22.04 or 24.04). Default: ${UBUNTU_VERSION} + -p, --platform Docker platform (e.g. linux/amd64 or linux/arm64). Default: derived from arch + -n, --no-cache Build without using cache (passes --no-cache to docker buildx) +EOF +} + +while [[ $# -gt 0 ]]; do + case "$1" in + -a|--arch) ARCH_OVERRIDE=$2; shift 2 ;; + -i|--image) IMAGE_NAME=$2; shift 2 ;; + -v|--version) VERSION=$2; shift 2 ;; + -u|--ubuntu) UBUNTU_VERSION=$2; shift 2 ;; + -p|--platform) PLATFORM_OVERRIDE=$2; shift 2 ;; + -n|--no-cache) NO_CACHE_FLAG="--no-cache"; shift ;; + -h|--help) usage; exit 0 ;; + *) echo "Unknown option: $1" >&2; usage; exit 1 ;; + esac +done + +HOST_ARCH=$(uname -m) +PLATFORM_ARCH_HINT="" +if [[ -n "${PLATFORM_OVERRIDE}" ]]; then + case "${PLATFORM_OVERRIDE}" in + linux/amd64) PLATFORM_ARCH_HINT=amd64 ;; + linux/arm64) PLATFORM_ARCH_HINT=arm64 ;; + *) PLATFORM_ARCH_HINT="" ;; + esac +fi + +if [[ -n "${ARCH_OVERRIDE}" ]]; then + ARCH_INPUT=${ARCH_OVERRIDE} +elif [[ -n "${PLATFORM_ARCH_HINT}" ]]; then + ARCH_INPUT=${PLATFORM_ARCH_HINT} +else + ARCH_INPUT=${HOST_ARCH} +fi + +# Validate and set Ubuntu version parameters +case "${UBUNTU_VERSION}" in + 22.04) + UBUNTU_REL=jammy + UBUNTU_TAG=oci-jammy-22.04 + ;; + 24.04) + UBUNTU_REL=noble + UBUNTU_TAG=oci-noble-24.04 + ;; + *) + echo "Unsupported Ubuntu version: ${UBUNTU_VERSION}. Use 22.04 or 24.04" >&2 + exit 1 + ;; +esac + +case "$ARCH_INPUT" in + x86_64|amd64) + TARGET_ARCH=amd64 + ALPINE_ARCH=x86_64 + UBUNTU_ARCH=amd64 + S6_OVERLAY_ARCH=x86_64 + LIBVA_DEB_URL="https://launchpad.net/ubuntu/+source/libva/2.22.0-3ubuntu2/+build/30591127/+files/libva2_2.22.0-3ubuntu2_amd64.deb" + LIBVA_LIBDIR="/usr/lib/x86_64-linux-gnu" + APT_EXTRA_PACKAGES="intel-media-va-driver xserver-xorg-video-intel" + PLATFORM="linux/amd64" + ;; + aarch64|arm64) + TARGET_ARCH=arm64 + ALPINE_ARCH=aarch64 + UBUNTU_ARCH=arm64 + S6_OVERLAY_ARCH=aarch64 + LIBVA_DEB_URL="https://launchpad.net/ubuntu/+source/libva/2.22.0-3ubuntu2/+build/30591128/+files/libva2_2.22.0-3ubuntu2_arm64.deb" + LIBVA_LIBDIR="/usr/lib/aarch64-linux-gnu" + APT_EXTRA_PACKAGES="" + PLATFORM="linux/arm64" + ;; + *) + echo "Unsupported arch: $ARCH_INPUT" >&2 + exit 1 + ;; +esac + +if [[ -n "${PLATFORM_OVERRIDE}" ]]; then + PLATFORM=${PLATFORM_OVERRIDE} + if [[ -n "${PLATFORM_ARCH_HINT}" && "${PLATFORM_ARCH_HINT}" != "${TARGET_ARCH}" ]]; then + echo "Warning: platform (${PLATFORM_OVERRIDE}) and arch (${TARGET_ARCH}) differ; proceeding with platform override." >&2 + fi +fi + +PROOT_ARCH=${PROOT_ARCH_OVERRIDE:-x86_64} + +LOG_FILE="${FILES_DIR}/build-${TARGET_ARCH}-${UBUNTU_VERSION}-${VERSION}.log" + +echo "==========================================" +echo "Building ${IMAGE_NAME}-base-${TARGET_ARCH}-u${UBUNTU_VERSION}:${VERSION}" +echo "Platform: ${PLATFORM}" +echo "Ubuntu Version: ${UBUNTU_VERSION} (${UBUNTU_REL})" +echo "Dockerfile: ${FILES_DIR}/linuxserver-kde.dockerfile" +echo "==========================================" + +REQUIRED_FILES=( + "${DOCKERFILE_BASE}" + "${FILES_DIR}/alpine-root" + "${FILES_DIR}/ubuntu-root" + "${FILES_DIR}/kde-root" + "${FILES_DIR}/patches/21-xvfb-dri3.patch" +) + +for path in "${REQUIRED_FILES[@]}"; do + if [[ ! -e "$path" ]]; then + echo "Missing required file or directory: $path" >&2 + exit 1 + fi +done + +set -o pipefail + +docker buildx build \ + --platform "${PLATFORM}" \ + ${NO_CACHE_FLAG} \ + -f "${DOCKERFILE_BASE}" \ + --build-arg VERSION="${VERSION}" \ + --build-arg ALPINE_ARCH="${ALPINE_ARCH}" \ + --build-arg UBUNTU_ARCH="${UBUNTU_ARCH}" \ + --build-arg UBUNTU_REL="${UBUNTU_REL}" \ + --build-arg UBUNTU_TAG="${UBUNTU_TAG}" \ + --build-arg S6_OVERLAY_ARCH="${S6_OVERLAY_ARCH}" \ + --build-arg APT_EXTRA_PACKAGES="${APT_EXTRA_PACKAGES}" \ + --build-arg LIBVA_DEB_URL="${LIBVA_DEB_URL}" \ + --build-arg LIBVA_LIBDIR="${LIBVA_LIBDIR}" \ + --build-arg PROOT_ARCH="${PROOT_ARCH}" \ + --progress=plain \ + --load \ + -t ${IMAGE_NAME}-base-${TARGET_ARCH}-u${UBUNTU_VERSION}:${VERSION} \ + "${FILES_DIR}" 2>&1 | tee "${LOG_FILE}" + +BUILD_STATUS=${PIPESTATUS[0]} +if [ $BUILD_STATUS -eq 0 ]; then + echo "Build successful." +else + echo "Build failed. See ${LOG_FILE}" + exit 1 +fi diff --git a/files/kde-root/defaults/autostart b/files/kde-root/defaults/autostart new file mode 100644 index 000000000..ca916d098 --- /dev/null +++ b/files/kde-root/defaults/autostart @@ -0,0 +1 @@ +exit 0 diff --git a/files/kde-root/defaults/startwm.sh b/files/kde-root/defaults/startwm.sh new file mode 100644 index 000000000..1c36b64ce --- /dev/null +++ b/files/kde-root/defaults/startwm.sh @@ -0,0 +1,116 @@ +#!/bin/bash + +# Set scaling for HiDPI displays +if [ -n "${DPI}" ] && [ "${DPI}" != "96" ]; then + # Calculate scale factor from DPI (96 DPI = 1.0 scale) + SCALE_FACTOR=$(echo "scale=2; ${DPI} / 96" | bc) + export QT_SCALE_FACTOR=${SCALE_FACTOR} + export GDK_SCALE=${SCALE_FACTOR%.*} # Integer part for GTK + export GDK_DPI_SCALE=$(echo "scale=2; 96 / ${DPI}" | bc) # Inverse for text + + # Set KDE scaling + kwriteconfig5 --file $HOME/.config/kcmfonts --group General --key forceFontDPI ${DPI} + kwriteconfig5 --file $HOME/.config/kdeglobals --group KScreen --key ScaleFactor ${SCALE_FACTOR} +fi + +# Disable compositing and screen lock +if [ ! -f $HOME/.config/kwinrc ]; then + kwriteconfig5 --file $HOME/.config/kwinrc --group Compositing --key Enabled false +fi +if [ ! -f $HOME/.config/kscreenlockerrc ]; then + kwriteconfig5 --file $HOME/.config/kscreenlockerrc --group Daemon --key Autolock false +fi + +# Power related +setterm blank 0 +setterm powerdown 0 + +# Directories / DBus noise control (run as session user; no sudo) +rm -f /usr/share/dbus-1/system-services/org.freedesktop.UDisks2.service \ + /usr/share/dbus-1/system-services/org.freedesktop.PackageKit.service \ + /etc/xdg/autostart/packagekitd.desktop +mkdir -p "${HOME}/.config/autostart" "${HOME}/.XDG" "${HOME}/.local/share/" +# Fix perms in case persisted home left root-owned +chown -R "$(id -u)":"$(id -g)" "${HOME}/.config" "${HOME}/.XDG" "${HOME}/.local" 2>/dev/null || true +chown "$(id -u)":"$(id -g)" "${HOME}/.xsettingsd" "${HOME}/.Xauthority" "${HOME}/.ICEauthority" 2>/dev/null || true +chmod 700 "${HOME}/.XDG" +touch "${HOME}/.local/share/user-places.xbel" + +# Background perm loop +if [ ! -d $HOME/.config/kde.org ]; then + ( + loop_end_time=$((SECONDS + 30)) + while [ $SECONDS -lt $loop_end_time ]; do + find "$HOME/.cache" "$HOME/.config" "$HOME/.local" -type f -perm 000 -exec chmod 644 {} + 2>/dev/null + sleep .1 + done + ) & +fi + +# Ensure XDG_RUNTIME_DIR exists (required for dbus/Qt) with correct perms +if [ -z "${XDG_RUNTIME_DIR:-}" ]; then + export XDG_RUNTIME_DIR="/run/user/$(id -u)" +fi +if ! mkdir -p "${XDG_RUNTIME_DIR}" 2>/dev/null; then + export XDG_RUNTIME_DIR="/tmp/runtime-$(id -u)" + mkdir -p "${XDG_RUNTIME_DIR}" +fi +chmod 700 "${XDG_RUNTIME_DIR}" + +# Create startup script if it does not exist (keep in sync with openbox) +STARTUP_FILE="${HOME}/.config/autostart/autostart.desktop" +if [ ! -f "${STARTUP_FILE}" ]; then + echo "[Desktop Entry]" > $STARTUP_FILE + echo "Exec=bash /config/.config/openbox/autostart" >> $STARTUP_FILE + echo "Icon=dialog-scripts" >> $STARTUP_FILE + echo "Name=autostart" >> $STARTUP_FILE + echo "Path=" >> $STARTUP_FILE + echo "Type=Application" >> $STARTUP_FILE + echo "X-KDE-AutostartScript=true" >> $STARTUP_FILE + chmod +x $STARTUP_FILE +fi + +# Enable Nvidia GPU support if detected +NVIDIA_PRESENT=false +if which nvidia-smi > /dev/null 2>&1 && nvidia-smi --query-gpu=uuid --format=csv,noheader 2>/dev/null | head -n1 | grep -q .; then + NVIDIA_PRESENT=true + echo "NVIDIA GPU detected" +fi + +if [ "${NVIDIA_PRESENT}" = "true" ] && [ "${DISABLE_ZINK}" == "false" ]; then + export LIBGL_KOPPER_DRI2=1 + export MESA_LOADER_DRIVER_OVERRIDE=zink + export GALLIUM_DRIVER=zink +fi + +# Configure GPU acceleration +# If USE_XORG=true, use native OpenGL (no VirtualGL needed) +# If USE_XORG=false (Xvfb), use VirtualGL for GPU acceleration +USE_VGL=false +if [ "${USE_XORG}" = "true" ]; then + # Xorg mode: direct GPU access, no VirtualGL needed + if [ "${NVIDIA_PRESENT}" = "true" ]; then + echo "Xorg mode with NVIDIA GPU - using native OpenGL" + export __GLX_VENDOR_LIBRARY_NAME=nvidia + export __NV_PRIME_RENDER_OFFLOAD=1 + fi +elif [ "${NVIDIA_PRESENT}" = "true" ] && which vglrun > /dev/null 2>&1; then + # Xvfb mode with NVIDIA: use VirtualGL + export VGL_DISPLAY="${VGL_DISPLAY:-egl}" + export __GLX_VENDOR_LIBRARY_NAME=nvidia + export __NV_PRIME_RENDER_OFFLOAD=1 + USE_VGL=true + echo "Xvfb mode with NVIDIA GPU - using VirtualGL" +fi + +# Start DE (without exec to allow dbus-launch to work properly) +# Export XDG_RUNTIME_DIR for the session +export XDG_RUNTIME_DIR +eval "$(dbus-launch --sh-syntax)" +if [ "${USE_VGL}" = "true" ]; then + echo "Starting KDE Plasma with VirtualGL (VGL_DISPLAY=${VGL_DISPLAY})" + vglrun -d "${VGL_DISPLAY}" /usr/bin/startplasma-x11 > /dev/null 2>&1 +else + echo "Starting KDE Plasma (native rendering)" + /usr/bin/startplasma-x11 > /dev/null 2>&1 +fi diff --git a/files/kde-root/usr/local/bin/wrapped-chromium b/files/kde-root/usr/local/bin/wrapped-chromium new file mode 100755 index 000000000..570a471b4 --- /dev/null +++ b/files/kde-root/usr/local/bin/wrapped-chromium @@ -0,0 +1,13 @@ +#! /bin/bash + +BIN=/usr/bin/chromium +DEFAULT_FLAGS="--password-store=basic --in-process-gpu" +EXTRA_FLAGS=(${CHROMIUM_FLAGS:-}) + +# Cleanup +if ! pgrep chromium > /dev/null;then + rm -f $HOME/.config/chromium/Singleton* +fi + +# Run with --no-sandbox (same as Chrome wrapper) +${BIN} ${DEFAULT_FLAGS} --no-sandbox "${EXTRA_FLAGS[@]}" "$@" diff --git a/files/linuxserver-kde.base.dockerfile b/files/linuxserver-kde.base.dockerfile new file mode 100644 index 000000000..ad3ee0797 --- /dev/null +++ b/files/linuxserver-kde.base.dockerfile @@ -0,0 +1,703 @@ +# syntax=docker/dockerfile:1 + +########################################### +# Stage 1: Alpine rootfs builder +########################################### +FROM alpine:3.21 AS alpine-rootfs-stage + +ARG S6_OVERLAY_VERSION="3.2.1.0" +ARG ROOTFS=/root-out +ARG REL=v3.21 +ARG ALPINE_ARCH=x86_64 +ARG S6_OVERLAY_ARCH=x86_64 +ARG MIRROR=http://dl-cdn.alpinelinux.org/alpine +ARG PACKAGES=alpine-baselayout,alpine-keys,apk-tools,busybox,libc-utils + +# install packages +RUN \ + apk add --no-cache bash xz + +# build rootfs +RUN \ + mkdir -p "${ROOTFS}/etc/apk" && \ + { \ + echo "${MIRROR}/${REL}/main"; \ + echo "${MIRROR}/${REL}/community"; \ + } > "${ROOTFS}/etc/apk/repositories" && \ + apk --root "${ROOTFS}" --no-cache --keys-dir /etc/apk/keys add --arch ${ALPINE_ARCH} --initdb ${PACKAGES//,/ } && \ + sed -i -e 's/^root::/root:!:/' /root-out/etc/shadow + +# add s6 overlay +ADD https://github.com/just-containers/s6-overlay/releases/download/v${S6_OVERLAY_VERSION}/s6-overlay-noarch.tar.xz /tmp +RUN tar -C /root-out -Jxpf /tmp/s6-overlay-noarch.tar.xz +ADD https://github.com/just-containers/s6-overlay/releases/download/v${S6_OVERLAY_VERSION}/s6-overlay-${S6_OVERLAY_ARCH}.tar.xz /tmp +RUN tar -C /root-out -Jxpf /tmp/s6-overlay-${S6_OVERLAY_ARCH}.tar.xz + +# add s6 optional symlinks +ADD https://github.com/just-containers/s6-overlay/releases/download/v${S6_OVERLAY_VERSION}/s6-overlay-symlinks-noarch.tar.xz /tmp +RUN tar -C /root-out -Jxpf /tmp/s6-overlay-symlinks-noarch.tar.xz && unlink /root-out/usr/bin/with-contenv +ADD https://github.com/just-containers/s6-overlay/releases/download/v${S6_OVERLAY_VERSION}/s6-overlay-symlinks-arch.tar.xz /tmp +RUN tar -C /root-out -Jxpf /tmp/s6-overlay-symlinks-arch.tar.xz + + +########################################### +# Stage 2: Ubuntu rootfs builder +########################################### +FROM alpine:3 AS ubuntu-rootfs-stage + +ARG UBUNTU_ARCH=amd64 +ARG UBUNTU_REL +ARG UBUNTU_TAG +ENV REL=${UBUNTU_REL} +ENV ARCH=${UBUNTU_ARCH} +ENV TAG=${UBUNTU_TAG} + +# install packages +RUN \ + apk add --no-cache bash curl git jq tzdata xz + +# grab base tarball +RUN \ + git clone --depth=1 https://git.launchpad.net/cloud-images/+oci/ubuntu-base -b ${TAG} /build && \ + cd /build/oci && \ + DIGEST=$(jq -r '.manifests[0].digest[7:]' < index.json) && \ + cd /build/oci/blobs/sha256 && \ + if jq -e '.layers // empty' < "${DIGEST}" >/dev/null 2>&1; then \ + TARBALL=$(jq -r '.layers[0].digest[7:]' < ${DIGEST}); \ + else \ + MULTIDIGEST=$(jq -r ".manifests[] | select(.platform.architecture == \"${ARCH}\") | .digest[7:]" < ${DIGEST}) && \ + TARBALL=$(jq -r '.layers[0].digest[7:]' < ${MULTIDIGEST}); \ + fi && \ + mkdir /root-out && \ + tar xf ${TARBALL} -C /root-out && \ + rm -rf \ + /root-out/var/log/* \ + /root-out/home/ubuntu \ + /root-out/root/{.ssh,.bashrc,.profile} \ + /build + +# set version for s6 overlay +ARG S6_OVERLAY_VERSION="3.2.1.0" +ARG S6_OVERLAY_ARCH="x86_64" + +# add s6 overlay +ADD https://github.com/just-containers/s6-overlay/releases/download/v${S6_OVERLAY_VERSION}/s6-overlay-noarch.tar.xz /tmp +RUN tar -C /root-out -Jxpf /tmp/s6-overlay-noarch.tar.xz +ADD https://github.com/just-containers/s6-overlay/releases/download/v${S6_OVERLAY_VERSION}/s6-overlay-${S6_OVERLAY_ARCH}.tar.xz /tmp +RUN tar -C /root-out -Jxpf /tmp/s6-overlay-${S6_OVERLAY_ARCH}.tar.xz + +# add s6 optional symlinks +ADD https://github.com/just-containers/s6-overlay/releases/download/v${S6_OVERLAY_VERSION}/s6-overlay-symlinks-noarch.tar.xz /tmp +RUN tar -C /root-out -Jxpf /tmp/s6-overlay-symlinks-noarch.tar.xz && unlink /root-out/usr/bin/with-contenv +ADD https://github.com/just-containers/s6-overlay/releases/download/v${S6_OVERLAY_VERSION}/s6-overlay-symlinks-arch.tar.xz /tmp +RUN tar -C /root-out -Jxpf /tmp/s6-overlay-symlinks-arch.tar.xz + + +########################################### +# Stage 3: Ubuntu base temp +########################################### +FROM scratch AS ubuntu-base-temp + +COPY --from=ubuntu-rootfs-stage /root-out/ / + +ARG VERSION +ARG MODS_VERSION="v3" +ARG PKG_INST_VERSION="v1" +ARG LSIOWN_VERSION="v1" +ARG WITHCONTENV_VERSION="v1" + +ADD --chmod=755 "https://raw.githubusercontent.com/linuxserver/docker-mods/mod-scripts/docker-mods.${MODS_VERSION}" "/docker-mods" +ADD --chmod=755 "https://raw.githubusercontent.com/linuxserver/docker-mods/mod-scripts/package-install.${PKG_INST_VERSION}" "/etc/s6-overlay/s6-rc.d/init-mods-package-install/run" +ADD --chmod=755 "https://raw.githubusercontent.com/linuxserver/docker-mods/mod-scripts/lsiown.${LSIOWN_VERSION}" "/usr/bin/lsiown" +ADD --chmod=755 "https://raw.githubusercontent.com/linuxserver/docker-mods/mod-scripts/with-contenv.${WITHCONTENV_VERSION}" "/usr/bin/with-contenv" + +ARG DEBIAN_FRONTEND="noninteractive" +ENV HOME="/root" \ + LANGUAGE="en_US.UTF-8" \ + LANG="en_US.UTF-8" \ + TERM="xterm" \ + S6_CMD_WAIT_FOR_SERVICES_MAXTIME="0" \ + S6_VERBOSITY=1 \ + S6_STAGE2_HOOK=/docker-mods \ + VIRTUAL_ENV=/lsiopy \ + PATH="/lsiopy/bin:$PATH" + +# Generate sources.list dynamically based on UBUNTU_REL and arch +ARG UBUNTU_REL +ARG UBUNTU_ARCH +RUN \ + echo "**** Generating sources.list for ${UBUNTU_REL} (${UBUNTU_ARCH}) ****" && \ + if [ "${UBUNTU_ARCH}" = "amd64" ]; then \ + MIRROR="http://archive.ubuntu.com/ubuntu"; \ + else \ + MIRROR="http://ports.ubuntu.com/ubuntu-ports"; \ + fi && \ + echo "deb ${MIRROR} ${UBUNTU_REL} main restricted" > /etc/apt/sources.list && \ + echo "deb-src ${MIRROR} ${UBUNTU_REL} main restricted" >> /etc/apt/sources.list && \ + echo "deb ${MIRROR} ${UBUNTU_REL}-updates main restricted" >> /etc/apt/sources.list && \ + echo "deb-src ${MIRROR} ${UBUNTU_REL}-updates main restricted" >> /etc/apt/sources.list && \ + echo "deb ${MIRROR} ${UBUNTU_REL} universe multiverse" >> /etc/apt/sources.list && \ + echo "deb-src ${MIRROR} ${UBUNTU_REL} universe multiverse" >> /etc/apt/sources.list && \ + echo "deb ${MIRROR} ${UBUNTU_REL}-updates universe multiverse" >> /etc/apt/sources.list && \ + echo "deb-src ${MIRROR} ${UBUNTU_REL}-updates universe multiverse" >> /etc/apt/sources.list && \ + echo "deb ${MIRROR} ${UBUNTU_REL}-security main restricted" >> /etc/apt/sources.list && \ + echo "deb-src ${MIRROR} ${UBUNTU_REL}-security main restricted" >> /etc/apt/sources.list && \ + echo "deb ${MIRROR} ${UBUNTU_REL}-security universe multiverse" >> /etc/apt/sources.list && \ + echo "deb-src ${MIRROR} ${UBUNTU_REL}-security universe multiverse" >> /etc/apt/sources.list + +RUN \ + echo "**** Ripped from Ubuntu Docker Logic ****" && \ + rm -f /etc/apt/sources.list.d/ubuntu.sources && \ + set -xe && \ + echo '#!/bin/sh' > /usr/sbin/policy-rc.d && \ + echo 'exit 101' >> /usr/sbin/policy-rc.d && \ + chmod +x /usr/sbin/policy-rc.d && \ + dpkg-divert --local --rename --add /sbin/initctl && \ + cp -a /usr/sbin/policy-rc.d /sbin/initctl && \ + sed -i 's/^exit.*/exit 0/' /sbin/initctl && \ + echo 'force-unsafe-io' > /etc/dpkg/dpkg.cfg.d/docker-apt-speedup && \ + echo 'DPkg::Post-Invoke { "rm -f /var/cache/apt/archives/*.deb /var/cache/apt/archives/partial/*.deb /var/cache/apt/*.bin || true"; };' > /etc/apt/apt.conf.d/docker-clean && \ + echo 'APT::Update::Post-Invoke { "rm -f /var/cache/apt/archives/*.deb /var/cache/apt/archives/partial/*.deb /var/cache/apt/*.bin || true"; };' >> /etc/apt/apt.conf.d/docker-clean && \ + echo 'Dir::Cache::pkgcache ""; Dir::Cache::srcpkgcache "";' >> /etc/apt/apt.conf.d/docker-clean && \ + echo 'Acquire::Languages "none";' > /etc/apt/apt.conf.d/docker-no-languages && \ + echo 'Acquire::GzipIndexes "true"; Acquire::CompressionTypes::Order:: "gz";' > /etc/apt/apt.conf.d/docker-gzip-indexes && \ + echo 'Apt::AutoRemove::SuggestsImportant "false";' > /etc/apt/apt.conf.d/docker-autoremove-suggests && \ + mkdir -p /run/systemd && \ + echo 'docker' > /run/systemd/container && \ + echo "**** install apt-utils and locales ****" && \ + apt-get update && \ + apt-get upgrade -y && \ + apt-get install -y apt-utils locales && \ + echo "**** install packages ****" && \ + apt-get install -y \ + bc catatonit cron curl gnupg jq netcat-openbsd systemd-standalone-sysusers tzdata && \ + echo "**** generate locale ****" && \ + locale-gen en_US.UTF-8 && \ + echo "**** prepare shared folders ****" && \ + mkdir -p /app /config /defaults /lsiopy && \ + echo "**** create video and render groups with standard GIDs ****" && \ + groupadd -g 44 video 2>/dev/null || groupmod -g 44 video 2>/dev/null || true && \ + groupadd -g 106 render 2>/dev/null || groupmod -g 106 render 2>/dev/null || true && \ + echo "**** cleanup ****" && \ + id ubuntu >/dev/null 2>&1 && userdel ubuntu || echo "ubuntu user does not exist, skipping" && \ + apt-get autoremove && \ + apt-get clean && \ + rm -rf /tmp/* /var/lib/apt/lists/* /var/tmp/* /var/log/* + +# add local files for ubuntu base +COPY ubuntu-root/ / + + +########################################### +# Stage 4: Xvfb builder +########################################### +FROM ubuntu-base-temp AS xvfb-builder + +COPY /patches /patches +ENV PATCH_VERSION=21 \ + HOME=/config + +RUN \ + echo "**** build deps ****" && \ + apt-get update && \ + apt-get install -y devscripts dpkg-dev && \ + apt-get build-dep -y xorg-server + +RUN \ + echo "**** get and build xvfb ****" && \ + apt-get source xorg-server && \ + cd xorg-server-* && \ + cp /patches/${PATCH_VERSION}-xvfb-dri3.patch patch.patch && \ + patch -p0 < patch.patch && \ + awk ' \ + { print } \ + /include \/usr\/share\/dpkg\/architecture.mk/ { \ + print ""; \ + print "GLAMOR_DEP_LIBS := $(shell pkg-config --libs gbm epoxy libdrm)"; \ + print "GLAMOR_DEP_CFLAGS := $(shell pkg-config --cflags gbm epoxy libdrm)"; \ + print "export DEB_LDFLAGS_PREPEND ?= $(GLAMOR_DEP_LIBS)"; \ + print "export DEB_CFLAGS_PREPEND ?= $(GLAMOR_DEP_CFLAGS)"; \ + } \ + ' debian/rules > debian/rules.tmp && mv debian/rules.tmp debian/rules && \ + debuild -us -uc -b && \ + mkdir -p /build-out/usr/bin && \ + mv debian/xvfb/usr/bin/Xvfb /build-out/usr/bin/ + + +########################################### +# Stage 5: Alpine base temp +########################################### +FROM alpine-rootfs-stage AS alpine-base-temp + +COPY --from=alpine-rootfs-stage /root-out/ / + +ARG BUILD_DATE +ARG VERSION +ARG MODS_VERSION="v3" +ARG PKG_INST_VERSION="v1" +ARG LSIOWN_VERSION="v1" +ARG WITHCONTENV_VERSION="v1" + +ADD --chmod=755 "https://raw.githubusercontent.com/linuxserver/docker-mods/mod-scripts/docker-mods.${MODS_VERSION}" "/docker-mods" +ADD --chmod=755 "https://raw.githubusercontent.com/linuxserver/docker-mods/mod-scripts/package-install.${PKG_INST_VERSION}" "/etc/s6-overlay/s6-rc.d/init-mods-package-install/run" +ADD --chmod=755 "https://raw.githubusercontent.com/linuxserver/docker-mods/mod-scripts/lsiown.${LSIOWN_VERSION}" "/usr/bin/lsiown" +ADD --chmod=755 "https://raw.githubusercontent.com/linuxserver/docker-mods/mod-scripts/with-contenv.${WITHCONTENV_VERSION}" "/usr/bin/with-contenv" + +ENV PS1="$(whoami)@$(hostname):$(pwd)\\$ " \ + HOME="/root" \ + TERM="xterm" \ + S6_CMD_WAIT_FOR_SERVICES_MAXTIME="0" \ + S6_VERBOSITY=1 \ + S6_STAGE2_HOOK=/docker-mods \ + VIRTUAL_ENV=/lsiopy \ + PATH="/lsiopy/bin:$PATH" + +RUN \ + echo "**** install runtime packages ****" && \ + apk add --no-cache \ + alpine-release bash ca-certificates catatonit coreutils curl findutils jq \ + netcat-openbsd procps-ng shadow tzdata && \ + echo "**** prepare shared folders ****" && \ + mkdir -p /app /config /defaults /lsiopy && \ + echo "**** cleanup ****" && \ + rm -rf /tmp/* + +# add local files for alpine base +COPY alpine-root/ / + + +########################################### +# Stage 6: Selkies frontend builder +########################################### +FROM alpine-base-temp AS frontend + +RUN \ + echo "**** install build packages ****" && \ + apk add cmake git nodejs npm + +RUN \ + echo "**** ingest code ****" && \ + git clone https://github.com/selkies-project/selkies.git /src && \ + cd /src && \ + git checkout -f f1ade4dd700bf0157bb78a8a58eab42fbb8f02ee + +RUN \ + echo "**** build shared core library ****" && \ + cd /src/addons/gst-web-core && \ + npm install && \ + npm run build && \ + echo "**** build multiple dashboards ****" && \ + DASHBOARDS="selkies-dashboard selkies-dashboard-zinc selkies-dashboard-wish" && \ + mkdir /buildout && \ + for DASH in $DASHBOARDS; do \ + cd /src/addons/$DASH && \ + cp ../gst-web-core/dist/selkies-core.js src/ && \ + npm install && \ + npm run build && \ + mkdir -p dist/src dist/nginx && \ + cp ../gst-web-core/dist/selkies-core.js dist/src/ && \ + cp ../universal-touch-gamepad/universalTouchGamepad.js dist/src/ && \ + cp ../gst-web-core/nginx/* dist/nginx/ && \ + cp -r ../gst-web-core/dist/jsdb dist/ && \ + mkdir -p /buildout/$DASH && \ + cp -ar dist/* /buildout/$DASH/; \ + done + + +########################################### +# Stage 7: Selkies base image +########################################### +FROM ubuntu-base-temp AS selkies-base + +# set version label +ARG VERSION +LABEL build_version="Linuxserver.io version:- ${VERSION}" +LABEL maintainer="thelamer" + +# env +ENV DISPLAY=:1 \ + PERL5LIB=/usr/local/bin \ + HOME=/config \ + START_DOCKER=true \ + PULSE_RUNTIME_PATH=/defaults \ + SELKIES_INTERPOSER=/usr/lib/selkies_joystick_interposer.so \ + NVIDIA_DRIVER_CAPABILITIES=all \ + DISABLE_ZINK=false \ + DISABLE_DRI3=false \ + DPI=96 \ + TITLE=Selkies + +ARG APT_EXTRA_PACKAGES="" +ARG LIBVA_DEB_URL="https://launchpad.net/ubuntu/+source/libva/2.22.0-3ubuntu2/+build/30591127/+files/libva2_2.22.0-3ubuntu2_amd64.deb" +ARG LIBVA_DEB_URL_ARM64="http://ports.ubuntu.com/ubuntu-ports/pool/main/libv/libva/libva2_2.22.0-3ubuntu2_arm64.deb" +# Optional: jammy-friendly libva deb (built against glibc 2.35). Leave empty to skip on 22.04. +ARG LIBVA_DEB_URL_JAMMY="http://launchpadlibrarian.net/587480468/libva2_2.14.0-1_amd64.deb" +ARG LIBVA_LIBDIR="/usr/lib/x86_64-linux-gnu" +ARG PROOT_ARCH="x86_64" +ARG PROOT_APPS_VERSION="0.3.1" +ARG VIRTUALGL_VERSION="3.1.4" + +# Step 1: Install base system packages and Docker +RUN \ + echo "**** dev deps ****" && \ + apt-get update && \ + DEBIAN_FRONTEND=noninteractive apt-get install --no-install-recommends -y python3-dev && \ + echo "**** enable locales ****" && \ + sed -i '/locale/d' /etc/dpkg/dpkg.cfg.d/excludes && \ + echo "**** install docker ****" && \ + unset VERSION && \ + curl https://get.docker.com | sh && \ + echo "**** install deps ****" && \ + curl -fsSL https://deb.nodesource.com/setup_22.x | bash - && \ + apt-get update && \ + echo "**** determine Ubuntu version-specific packages ****" && \ + UBUNTU_VERSION=$(lsb_release -rs 2>/dev/null || grep VERSION_ID /etc/os-release | cut -d= -f2 | tr -d '"') && \ + LABWC_PKG="" && \ + MESA_GALLIUM_PKG="" && \ + if [ "$(echo "${UBUNTU_VERSION}" | cut -d. -f1)" -ge 24 ]; then \ + LABWC_PKG="labwc" && \ + MESA_GALLIUM_PKG="mesa-libgallium"; \ + fi && \ + echo "**** Installing packages (Ubuntu ${UBUNTU_VERSION}) ****" && \ + DEBIAN_FRONTEND=noninteractive apt-get install --no-install-recommends -y \ + breeze-cursor-theme ca-certificates cmake console-data dbus-x11 \ + dunst file \ + fonts-noto-cjk fonts-noto-color-emoji fonts-noto-core foot fuse-overlayfs \ + g++ gcc git ${APT_EXTRA_PACKAGES} kbd ${LABWC_PKG} libatk1.0-0 libatk-bridge2.0-0 \ + libev4 libfontenc1 libfreetype6 libgbm1 libgcrypt20 libgirepository-1.0-1 \ + libgl1-mesa-dri libglu1-mesa libgnutls30 libgtk-3.0 libjpeg-turbo8 \ + libnginx-mod-http-fancyindex libnotify-bin libnss3 libnvidia-egl-wayland1 \ + libopus0 libp11-kit0 libpam0g libtasn1-6 libvulkan1 libwayland-client0 \ + libwayland-cursor0 libwayland-egl1 libwayland-server0 libx11-6 \ + libxau6 libxcb1 libxcb-icccm4 libxcb-image0 libxcb-keysyms1 libxcb-render-util0 \ + libxcursor1 libxdmcp6 libxext6 libxfconf-0-3 libxfixes3 libxfont2 libxinerama1 \ + libxkbcommon-dev libxkbcommon-x11-0 libxshmfence1 libxtst6 locales-all make \ + ${MESA_GALLIUM_PKG} mesa-va-drivers mesa-vulkan-drivers mesa-utils vainfo vdpauinfo \ + libvulkan-dev ocl-icd-libopencl1 clinfo libdrm2 libegl1 libgl1 libopengl0 libgles1 libgles2 \ + libglvnd0 libglx0 libglu1 libglvnd-dev \ + nginx openbox openssh-client \ + openssl pciutils procps psmisc pulseaudio pulseaudio-utils python3 python3-venv \ + bash-completion software-properties-common ssl-cert stterm sudo tar util-linux vulkan-tools \ + wl-clipboard wtype x11-apps x11-common x11-utils x11-xkb-utils x11-xserver-utils \ + xauth xclip xcvt xdg-utils xdotool xfconf xfonts-base xkb-data xsel \ + xserver-common xserver-xorg-core xserver-xorg-video-amdgpu xserver-xorg-video-ati \ + xserver-xorg-video-nouveau xserver-xorg-video-qxl xserver-xorg-video-dummy \ + xsettingsd xterm xutils xvfb zlib1g zstd && \ + echo "**** install Intel VA drivers (AMD64 only) ****" && \ + ARCH_CUR=$(dpkg --print-architecture) && \ + if [ "${ARCH_CUR}" = "amd64" ]; then \ + apt-get install -y --no-install-recommends i965-va-driver-shaders intel-media-va-driver-; \ + apt-get install -y --no-install-recommends intel-media-va-driver-non-free; \ + fi && \ + apt-get clean && \ + rm -rf /var/lib/apt/lists/* + +# Step 2: Install selkies and related components +RUN \ + echo "**** install selkies ****" && \ + SELKIES_RELEASE=$(curl -sX GET "https://api.github.com/repos/selkies-project/selkies/releases/latest" \ + | awk '/tag_name/{print $4;exit}' FS='[""]') && \ + curl -o /tmp/selkies.tar.gz -L \ + "https://github.com/selkies-project/selkies/archive/f1ade4dd700bf0157bb78a8a58eab42fbb8f02ee.tar.gz" && \ + cd /tmp && \ + tar xf selkies.tar.gz && \ + cd selkies-* && \ + sed -i '/cryptography/d' pyproject.toml && \ + UBUNTU_VERSION="$(. /etc/os-release && echo ${VERSION_ID})" && \ + if [ "$(echo "${UBUNTU_VERSION}" | cut -d. -f1)" -lt 24 ]; then \ + echo "**** Ubuntu ${UBUNTU_VERSION}: removing xkbcommon dependency (not compatible) ****" && \ + sed -i '/xkbcommon/d' pyproject.toml; \ + fi && \ + echo "**** install PyAV build tools (all versions) ****" && \ + apt-get update && \ + apt-get install -y --no-install-recommends \ + build-essential python3-dev pkg-config cython3 \ + ffmpeg \ + libavcodec-dev libavdevice-dev libavfilter-dev libavformat-dev \ + libavutil-dev libswresample-dev libswscale-dev && \ + if ! ffmpeg -version 2>/dev/null | head -n 1 | grep -q "ffmpeg version 7"; then \ + echo "**** build FFmpeg 7 for PyAV ****" && \ + apt-get update && \ + apt-get install -y --no-install-recommends \ + yasm nasm libssl-dev zlib1g-dev && \ + SELKIES_DIR="$(pwd)" && \ + cd /tmp && \ + curl -fsSL https://ffmpeg.org/releases/ffmpeg-7.0.2.tar.xz | tar -xJ && \ + cd ffmpeg-7.0.2 && \ + ./configure --prefix=/usr/local --enable-shared --disable-static --disable-debug --disable-doc && \ + make -j"$(nproc)" && \ + make install && \ + ldconfig && \ + cd "${SELKIES_DIR}" && \ + rm -rf /var/lib/apt/lists/*; \ + fi && \ + python3 -m venv --system-site-packages /lsiopy && \ + export PKG_CONFIG_PATH="/usr/local/lib/$(dpkg-architecture -qDEB_HOST_MULTIARCH)/pkgconfig:/usr/local/lib/pkgconfig:/usr/local/lib64/pkgconfig:/usr/lib/$(dpkg-architecture -qDEB_HOST_MULTIARCH)/pkgconfig:/usr/lib/pkgconfig" && \ + if [ "${UBUNTU_VERSION}" = "22.04" ] || [ "${UBUNTU_VERSION}" = "24.04" ]; then \ + echo "av==14.4.0" > /tmp/selkies-constraints.txt; \ + pip install -c /tmp/selkies-constraints.txt .; \ + else \ + pip install .; \ + fi && \ + pip install setuptools && \ + if [ "${UBUNTU_VERSION}" = "22.04" ]; then \ + echo "Installing pixelflux 1.4.7 for Ubuntu 22.04 (GLIBC 2.35)" && \ + pip install pixelflux==1.4.7; \ + elif [ "${UBUNTU_VERSION}" = "24.04" ]; then \ + echo "Installing pixelflux from selkies dependencies for Ubuntu 24.04" && \ + echo "pixelflux already installed"; \ + else \ + echo "Warning: Unknown Ubuntu version ${UBUNTU_VERSION}, using default pixelflux"; \ + fi && \ + echo "**** install selkies interposer ****" && \ + cd addons/js-interposer && \ + gcc -shared -fPIC -ldl -o selkies_joystick_interposer.so joystick_interposer.c && \ + mv selkies_joystick_interposer.so /usr/lib/selkies_joystick_interposer.so && \ + echo "**** install selkies fake udev ****" && \ + cd ../fake-udev && \ + make && \ + mkdir /opt/lib && \ + mv libudev.so.1.0.0-fake /opt/lib/ && \ + echo "**** add icon ****" && \ + mkdir -p /usr/share/selkies/www && \ + curl -o /usr/share/selkies/www/icon.png \ + https://raw.githubusercontent.com/linuxserver/docker-templates/master/linuxserver.io/img/selkies-logo.png && \ + curl -o /usr/share/selkies/www/favicon.ico \ + https://raw.githubusercontent.com/linuxserver/docker-templates/refs/heads/master/linuxserver.io/img/selkies-icon.ico && \ + rm -rf /tmp/* + +# Step 3: System configuration and tools +RUN \ + echo "**** openbox tweaks ****" && \ + sed -i \ + -e 's/NLIMC/NLMC/g' \ + -e '/debian-menu/d' \ + -e 's|</applications>| <application class="*"><maximized>yes</maximized></application>\n</applications>|' \ + -e 's|</keyboard>| <keybind key="C-S-d"><action name="ToggleDecorations"/></keybind>\n</keyboard>|' \ + -e 's|<number>4</number>|<number>1</number>|' \ + /etc/xdg/openbox/rc.xml && \ + sed -i 's/--startup/--replace --startup/g' /usr/bin/openbox-session && \ + echo "**** user perms ****" && \ + sed -e 's/%sudo ALL=(ALL:ALL) ALL/%sudo ALL=(ALL:ALL) NOPASSWD: ALL/g' -i /etc/sudoers && \ + echo "**** proot-apps ****" && \ + mkdir /proot-apps/ && \ + curl -L https://github.com/linuxserver/proot-apps/releases/download/${PROOT_APPS_VERSION}/proot-apps-${PROOT_ARCH}.tar.gz \ + | tar -xzf - -C /proot-apps/ && \ + echo "${PROOT_APPS_VERSION}" > /proot-apps/pversion && \ + echo "**** dind support ****" && \ + useradd -U dockremap && \ + usermod -G dockremap dockremap && \ + echo 'dockremap:165536:65536' >> /etc/subuid && \ + echo 'dockremap:165536:65536' >> /etc/subgid && \ + curl -o /usr/local/bin/dind -L \ + https://raw.githubusercontent.com/moby/moby/master/hack/dind && \ + chmod +x /usr/local/bin/dind && \ + echo 'hosts: files dns' > /etc/nsswitch.conf && \ + groupadd -f docker && \ +echo "**** install VirtualGL ****" && \ + cd /tmp && VIRTUALGL_VERSION="$(echo ${VIRTUALGL_VERSION} | sed 's/[^0-9\.\-]*//g')" && \ + if [ "$(dpkg --print-architecture)" = "amd64" ]; then \ + dpkg --add-architecture i386 && \ + apt-get update && \ + curl -fsSL -O "https://github.com/VirtualGL/virtualgl/releases/download/${VIRTUALGL_VERSION}/virtualgl_${VIRTUALGL_VERSION}_amd64.deb" && \ + curl -fsSL -O "https://github.com/VirtualGL/virtualgl/releases/download/${VIRTUALGL_VERSION}/virtualgl32_${VIRTUALGL_VERSION}_amd64.deb" && \ + apt-get install -y --no-install-recommends "./virtualgl_${VIRTUALGL_VERSION}_amd64.deb" "./virtualgl32_${VIRTUALGL_VERSION}_amd64.deb" && \ + rm -f "virtualgl_${VIRTUALGL_VERSION}_amd64.deb" "virtualgl32_${VIRTUALGL_VERSION}_amd64.deb" && \ + chmod -f u+s /usr/lib/libvglfaker.so /usr/lib/libvglfaker-nodl.so /usr/lib/libvglfaker-opencl.so /usr/lib/libdlfaker.so /usr/lib/libgefaker.so 2>/dev/null || true && \ + chmod -f u+s /usr/lib32/libvglfaker.so /usr/lib32/libvglfaker-nodl.so /usr/lib32/libvglfaker-opencl.so /usr/lib32/libdlfaker.so /usr/lib32/libgefaker.so 2>/dev/null || true && \ + chmod -f u+s /usr/lib/i386-linux-gnu/libvglfaker.so /usr/lib/i386-linux-gnu/libvglfaker-nodl.so /usr/lib/i386-linux-gnu/libvglfaker-opencl.so /usr/lib/i386-linux-gnu/libdlfaker.so /usr/lib/i386-linux-gnu/libgefaker.so 2>/dev/null || true; \ + elif [ "$(dpkg --print-architecture)" = "arm64" ]; then \ + curl -fsSL -O "https://github.com/VirtualGL/virtualgl/releases/download/${VIRTUALGL_VERSION}/virtualgl_${VIRTUALGL_VERSION}_arm64.deb" && \ + apt-get update && apt-get install -y --no-install-recommends ./virtualgl_${VIRTUALGL_VERSION}_arm64.deb && \ + rm -f "virtualgl_${VIRTUALGL_VERSION}_arm64.deb" && \ + chmod -f u+s /usr/lib/libvglfaker.so /usr/lib/libvglfaker-nodl.so /usr/lib/libdlfaker.so /usr/lib/libgefaker.so 2>/dev/null || true; \ + fi && \ + echo "**** configure OpenCL and EGL for NVIDIA ****" && \ + mkdir -pm755 /etc/OpenCL/vendors && echo "libnvidia-opencl.so.1" > /etc/OpenCL/vendors/nvidia.icd && \ + mkdir -pm755 /usr/share/glvnd/egl_vendor.d/ && echo '{\n\ + "file_format_version" : "1.0.0",\n\ + "ICD": {\n\ + "library_path": "libEGL_nvidia.so.0"\n\ + }\n\ +}' > /usr/share/glvnd/egl_vendor.d/10_nvidia.json && \ + echo "/usr/local/nvidia/lib" >> /etc/ld.so.conf.d/nvidia.conf && \ + echo "/usr/local/nvidia/lib64" >> /etc/ld.so.conf.d/nvidia.conf && \ + apt-get clean && \ + rm -rf /var/lib/apt/lists/* /tmp/* + +# Step 5: Install libva, locales, and theme +ARG LIBVA_DEB_URL +ARG LIBVA_DEB_URL_ARM64 +ARG LIBVA_DEB_URL_JAMMY +ARG LIBVA_LIBDIR +RUN \ + UBUNTU_VERSION="$(. /etc/os-release && echo ${VERSION_ID})" && \ + UBUNTU_MAJOR="$(echo "${UBUNTU_VERSION}" | cut -d. -f1)" && \ + ARCH_CUR="$(dpkg --print-architecture)" && \ + LIBVA_TARGET_LIBDIR="${LIBVA_LIBDIR}" && \ + if [ "${ARCH_CUR}" = "arm64" ] && [ "${LIBVA_TARGET_LIBDIR}" = "/usr/lib/x86_64-linux-gnu" ]; then \ + LIBVA_TARGET_LIBDIR="/usr/lib/aarch64-linux-gnu"; \ + fi && \ + if [ "${UBUNTU_MAJOR}" -ge 24 ] && [ "${ARCH_CUR}" = "amd64" ] && [ -n "${LIBVA_DEB_URL}" ]; then \ + echo "**** libva hack (Ubuntu ${UBUNTU_VERSION}) ****" && \ + mkdir /tmp/libva && \ + curl -o /tmp/libva/libva.deb -L "${LIBVA_DEB_URL}" && \ + cd /tmp/libva && \ + ar x libva.deb && \ + tar xf data.tar.zst && \ + if ls "usr/lib/${LIBVA_TARGET_LIBDIR#/usr/lib/}/libva.so.2"* >/dev/null 2>&1; then \ + rm -f ${LIBVA_TARGET_LIBDIR}/libva.so.2* && \ + cp -a usr/lib/${LIBVA_TARGET_LIBDIR#/usr/lib/}/libva.so.2* ${LIBVA_TARGET_LIBDIR}/; \ + else \ + echo "**** libva hack skipped (libva.so.2 not found in ${LIBVA_TARGET_LIBDIR}) ****"; \ + fi; \ + elif [ "${UBUNTU_MAJOR}" -ge 24 ] && [ "${ARCH_CUR}" = "arm64" ] && [ -n "${LIBVA_DEB_URL_ARM64}" ]; then \ + echo "**** libva hack (Ubuntu ${UBUNTU_VERSION}, arm64) ****" && \ + mkdir /tmp/libva && \ + curl -o /tmp/libva/libva.deb -L "${LIBVA_DEB_URL_ARM64}" && \ + cd /tmp/libva && \ + ar x libva.deb && \ + tar xf data.tar.zst && \ + if ls "usr/lib/${LIBVA_TARGET_LIBDIR#/usr/lib/}/libva.so.2"* >/dev/null 2>&1; then \ + rm -f ${LIBVA_TARGET_LIBDIR}/libva.so.2* && \ + cp -a usr/lib/${LIBVA_TARGET_LIBDIR#/usr/lib/}/libva.so.2* ${LIBVA_TARGET_LIBDIR}/; \ + else \ + echo "**** libva hack skipped (libva.so.2 not found in ${LIBVA_TARGET_LIBDIR}) ****"; \ + fi; \ + elif [ "${ARCH_CUR}" = "amd64" ] && [ -n "${LIBVA_DEB_URL_JAMMY}" ]; then \ + echo "**** libva hack (Ubuntu ${UBUNTU_VERSION}, jammy-compatible override) ****" && \ + mkdir /tmp/libva && \ + curl -o /tmp/libva/libva.deb -L "${LIBVA_DEB_URL_JAMMY}" && \ + cd /tmp/libva && \ + ar x libva.deb && \ + tar xf data.tar.zst && \ + if ls "usr/lib/${LIBVA_TARGET_LIBDIR#/usr/lib/}/libva.so.2"* >/dev/null 2>&1; then \ + rm -f ${LIBVA_TARGET_LIBDIR}/libva.so.2* && \ + cp -a usr/lib/${LIBVA_TARGET_LIBDIR#/usr/lib/}/libva.so.2* ${LIBVA_TARGET_LIBDIR}/; \ + else \ + echo "**** libva hack skipped (libva.so.2 not found in ${LIBVA_TARGET_LIBDIR}) ****"; \ + fi; \ + else \ + echo "**** skip libva hack on Ubuntu ${UBUNTU_VERSION} (use distro libva) ****"; \ + fi && \ + echo "**** locales ****" && \ + for LOCALE in $(curl -sL https://raw.githubusercontent.com/thelamer/lang-stash/master/langs); do \ + localedef -i $LOCALE -f UTF-8 $LOCALE.UTF-8; \ + done && \ + echo "**** theme ****" && \ + curl -s https://raw.githubusercontent.com/thelamer/lang-stash/master/theme.tar.gz \ + | tar xzvf - -C /usr/share/themes/Clearlooks/openbox-3/ && \ + rm -rf /tmp/* + +# Step 6: Install CUDA NVRTC for WSL2 support (AMD64 only) +# Note: selkies-gstreamer has been removed. GPU encoding now uses pixelflux with VA-API/NVENC +RUN \ + echo "**** install CUDA NVRTC for WSL2 cudaconvert support (amd64 only) ****" && \ + ARCH_CUR=$(dpkg --print-architecture) && \ + if [ "${ARCH_CUR}" = "amd64" ]; then \ + UBUNTU_VERSION="$(. /etc/os-release && echo ${VERSION_ID})"; \ + UBUNTU_VERSION_NODOT=$(echo "${UBUNTU_VERSION}" | tr -d '.'); \ + CUDA_VERSION="12-6"; \ + CUDA_KEYRING_VERSION="1.1"; \ + curl -fsSL "https://developer.download.nvidia.com/compute/cuda/repos/ubuntu${UBUNTU_VERSION_NODOT}/x86_64/cuda-keyring_${CUDA_KEYRING_VERSION}-1_all.deb" -o /tmp/cuda-keyring.deb && \ + dpkg -i /tmp/cuda-keyring.deb && rm /tmp/cuda-keyring.deb && \ + apt-get update && \ + apt-get install -y --no-install-recommends cuda-nvrtc-${CUDA_VERSION} && \ + rm -rf /var/lib/apt/lists/*; \ + fi + +# Step 8: Final cleanup +RUN \ + echo "**** cleanup ****" && \ + apt-get purge -y --autoremove python3-dev && \ + apt-get autoclean && \ + rm -rf /config/.cache /config/.npm /var/lib/apt/lists/* /var/tmp/* /tmp/* + +# add local files - this will overwrite ubuntu-root files if conflicts exist +COPY ubuntu-root/ / + +# selkies-gstreamer is no longer used; websockets 15.x patch is not needed +COPY --from=frontend /buildout /usr/share/selkies +COPY --from=xvfb-builder /build-out/ / + +# Apply Safari keyboard input patch for Selkies web UIs +RUN if [ -f /usr/local/bin/patch-selkies-safari-keyboard.py ]; then \ + chmod +x /usr/local/bin/patch-selkies-safari-keyboard.py && \ + python3 /usr/local/bin/patch-selkies-safari-keyboard.py; \ + fi + +# ports and volumes +EXPOSE 3000 3001 +VOLUME /config +########################################### +# Stage 8: Final webtop image +########################################### +FROM selkies-base + +# set version label +ARG VERSION +LABEL build_version="Linuxserver.io version:- ${VERSION}" +LABEL maintainer="thelamer" +ARG DEBIAN_FRONTEND="noninteractive" + +# title +ENV TITLE="Ubuntu KDE" \ + NO_GAMEPAD=true + +RUN \ + echo "**** add icon ****" && \ + curl -o /usr/share/selkies/www/icon.png \ + https://raw.githubusercontent.com/linuxserver/docker-templates/master/linuxserver.io/img/webtop-logo.png && \ + echo "**** install packages ****" && \ + add-apt-repository ppa:xtradeb/apps && \ + apt-get update && \ + DEBIAN_FRONTEND=noninteractive apt-get install --no-install-recommends -y \ + bc chromium dolphin gwenview kde-config-gtk-style kdialog kfind khotkeys \ + kio-extras knewstuff-dialog konsole ksystemstats kubuntu-settings-desktop \ + kubuntu-wallpapers kubuntu-web-shortcuts kwin-addons kwin-x11 kwrite \ + plasma-desktop plasma-workspace qml-module-qt-labs-platform systemsettings kubuntu-desktop && \ + if [ "$(dpkg --print-architecture)" = "amd64" ]; then \ + echo "**** install latest google-chrome (amd64) ****" && \ + cd /tmp && \ + curl -fsSL -o google-chrome-stable.deb https://dl.google.com/linux/direct/google-chrome-stable_current_amd64.deb && \ + apt-get update && \ + DEBIAN_FRONTEND=noninteractive apt-get install --no-install-recommends -y ./google-chrome-stable.deb && \ + rm -f /tmp/google-chrome-stable.deb; \ + fi && \ + echo "**** application tweaks ****" && \ + sed -i 's#^Exec=.*#Exec=/usr/local/bin/wrapped-chromium#g' \ + /usr/share/applications/chromium.desktop && \ + echo "**** kde tweaks ****" && \ + sed -i \ + 's/applications:org.kde.discover.desktop,/applications:org.kde.konsole.desktop,/g' \ + /usr/share/plasma/plasmoids/org.kde.plasma.taskmanager/contents/config/main.xml && \ + echo "**** cleanup ****" && \ + apt-get autoclean && \ + rm -rf /config/.cache /config/.launchpadlib /var/lib/apt/lists/* /var/tmp/* /tmp/* + +# Initialize bash-completion and command-not-found databases +# This ensures apt tab completion works properly +RUN apt-get update && \ + apt-get install -y apt-file command-not-found && \ + apt-file update && \ + /usr/lib/cnf-update-db && \ + # Disable docker-clean that prevents apt cache completion + rm -f /etc/apt/apt.conf.d/docker-clean && \ + # Configure apt to keep cache files for completion + mkdir -p /etc/apt/apt.conf.d && \ + echo 'Dir::Cache::pkgcache "/var/cache/apt/pkgcache.bin";' > /etc/apt/apt.conf.d/00-apt-cache-completion && \ + echo 'Dir::Cache::srcpkgcache "/var/cache/apt/srcpkgcache.bin";' >> /etc/apt/apt.conf.d/00-apt-cache-completion && \ + # Generate apt cache for package name completion + apt-cache gencaches && \ + chmod 644 /var/cache/apt/*.bin && \ + # Verify cache files were created + ls -la /var/cache/apt/*.bin + +# add local files for KDE webtop +COPY kde-root/ / + +# ports and volumes +EXPOSE 3000 +VOLUME /config + +ENTRYPOINT ["/init"] diff --git a/files/linuxserver-kde.user.dockerfile b/files/linuxserver-kde.user.dockerfile new file mode 100644 index 000000000..b92f81ee2 --- /dev/null +++ b/files/linuxserver-kde.user.dockerfile @@ -0,0 +1,422 @@ +# Base image must be provided via --build-arg BASE_IMAGE=<image> +ARG BASE_IMAGE=scratch +FROM ${BASE_IMAGE} + +ARG USER_NAME +ARG USER_UID +ARG USER_GID +ARG VIDEO_GID="" +ARG RENDER_GID="" +# Note: USER_PASSWORD is used only during image build for initial setup. +# It is not stored in the image layers. Change password after first login. +ARG USER_PASSWORD="" +ARG HOST_HOSTNAME="Docker-Host" +ARG USER_LANGUAGE="en" +ARG USER_LANG_ENV="en_US.UTF-8" +ARG USER_LANGUAGE_ENV="en_US:en" + +ENV HOME="/home/${USER_NAME}" \ + USER_NAME="${USER_NAME}" \ + HOST_HOSTNAME="${HOST_HOSTNAME}" \ + SHELL="/bin/bash" \ + LANG="${USER_LANG_ENV}" \ + LANGUAGE="${USER_LANGUAGE_ENV}" \ + LC_ALL="${USER_LANG_ENV}" + +RUN set -eux; \ + TARGET_USER="${USER_NAME}"; \ + TARGET_UID="${USER_UID}"; \ + TARGET_GID="${USER_GID}"; \ + if [ -z "${TARGET_UID}" ] || [ -z "${TARGET_GID}" ]; then echo "USER_UID/USER_GID must be provided (host UID/GID)"; exit 1; fi; \ + if [ -z "${USER_PASSWORD}" ]; then echo "USER_PASSWORD must be provided"; exit 1; fi; \ + echo "Using user=${TARGET_USER} uid=${TARGET_UID} gid=${TARGET_GID}"; \ + # ensure primary group named ${TARGET_USER} exists with TARGET_GID (allow non-unique gid to satisfy chown <user>:<user>) \ + if getent group "${TARGET_USER}" >/dev/null; then \ + groupmod -g "${TARGET_GID}" "${TARGET_USER}" || true; \ + else \ + groupadd -o -g "${TARGET_GID}" "${TARGET_USER}" 2>/dev/null || true; \ + fi; \ + if [ -n "${VIDEO_GID}" ]; then \ + if getent group video >/dev/null; then \ + groupmod -o -g "${VIDEO_GID}" video || true; \ + else \ + groupadd -o -g "${VIDEO_GID}" video || true; \ + fi; \ + fi; \ + if [ -n "${RENDER_GID}" ]; then \ + if getent group render >/dev/null; then \ + groupmod -o -g "${RENDER_GID}" render || true; \ + else \ + groupadd -o -g "${RENDER_GID}" render || true; \ + fi; \ + fi; \ + # remove any user that already has the desired UID to avoid conflicts \ + if getent passwd "${TARGET_UID}" >/dev/null; then \ + OLD_USER=$(getent passwd "${TARGET_UID}" | cut -d: -f1); \ + if [ "${OLD_USER}" != "${TARGET_USER}" ]; then \ + echo "UID ${TARGET_UID} in use by ${OLD_USER}, removing it"; \ + userdel -r "${OLD_USER}" || true; \ + fi; \ + fi; \ + # ensure common supplemental groups exist (similar to Ubuntu adduser) plus docker/sudo \ + for g in adm cdrom dip plugdev lpadmin lxd sudo docker users audio video render; do \ + getent group "$g" >/dev/null || groupadd "$g"; \ + done; \ + # set hostname to host-derived value (baked into image) \ + echo "${HOST_HOSTNAME}" > /etc/hostname; \ + # create or update main user matching host uid/gid (home=/home/<user>) \ + if ! getent passwd "${TARGET_USER}" >/dev/null; then \ + useradd -m -d "/home/${TARGET_USER}" -u "${TARGET_UID}" -g "${TARGET_USER}" -s /bin/bash "${TARGET_USER}"; \ + else \ + usermod -u "${TARGET_UID}" -g "${TARGET_USER}" -d "/home/${TARGET_USER}" "${TARGET_USER}"; \ + install -d -m 755 "/home/${TARGET_USER}"; \ + fi; \ + usermod -aG adm,cdrom,dip,plugdev,lpadmin,lxd,sudo,docker,users,audio,video,render "${TARGET_USER}"; \ + echo "${TARGET_USER}:${USER_PASSWORD}" | chpasswd; \ + # store auth secret/hash for web login \ + SECRET_SALT=$(openssl rand -hex 16); \ + env TARGET_USER="${TARGET_USER}" TARGET_PW="${USER_PASSWORD}" SECRET_SALT="${SECRET_SALT}" \ + python3 -c "import json,hashlib,os;user=os.environ['TARGET_USER'];pw=os.environ['TARGET_PW'];salt=os.environ['SECRET_SALT'];pw_hash=hashlib.sha256((pw+salt).encode()).hexdigest();secret=hashlib.sha256((user+pw+salt).encode()).hexdigest();data={'user':user,'salt':salt,'pw_hash':pw_hash,'secret':secret};open('/etc/web-auth.json','w').write(json.dumps(data));os.chmod('/etc/web-auth.json',0o600)" ; \ + # ensure skeleton and Ubuntu-like bashrc for user (HOME=/home/<user>) and root \ + install -d -m 755 "/home/${TARGET_USER}"; \ + chown -R "${TARGET_UID}:${TARGET_GID}" "/home/${TARGET_USER}"; \ + # create common XDG-style folders in the user's home \ + for d in Desktop Documents Downloads Music Pictures Videos Templates Public; do \ + install -d -m 755 "/home/${TARGET_USER}/${d}"; \ + chown "${TARGET_UID}:${TARGET_GID}" "/home/${TARGET_USER}/${d}"; \ + done; \ + DEFAULT_BASHRC="/usr/local/share/default_bashrc"; \ + printf '%s\n' \ + "# DEFAULT_BASHRC" \ + "# ~/.bashrc: executed by bash(1) for non-login shells." \ + "# If not running interactively, don't do anything" \ + "case \$- in" \ + " *i*) ;;" \ + " *) return;;" \ + "esac" \ + "HISTCONTROL=ignoreboth" \ + "shopt -s histappend" \ + "HISTSIZE=1000" \ + "HISTFILESIZE=2000" \ + "shopt -s checkwinsize" \ + "[ -x /usr/bin/lesspipe ] && eval \"\$(SHELL=/bin/sh lesspipe)\"" \ + "if [ -z \"\${debian_chroot:-}\" ] && [ -r /etc/debian_chroot ]; then" \ + " debian_chroot=\$(cat /etc/debian_chroot)" \ + "fi" \ + "case \"\$TERM\" in" \ + " xterm-color|*-256color) color_prompt=yes;;" \ + "esac" \ + "if [ -n \"\$force_color_prompt\" ]; then" \ + " if [ -x /usr/bin/tput ] && tput setaf 1 >&/dev/null; then" \ + " color_prompt=yes" \ + " else" \ + " color_prompt=" \ + " fi" \ + "fi" \ + "if [ \"\$color_prompt\" = yes ]; then" \ + " PS1=\"\${debian_chroot:+(\$debian_chroot)}\\[\\033[01;32m\\]\\u@\\h\\[\\033[00m\\]:\\[\\033[01;34m\\]\\w\\[\\033[00m\\]\\$ \" " \ + "else" \ + " PS1=\"\${debian_chroot:+(\$debian_chroot)}\\u@\\h:\\w\\$ \" " \ + "fi" \ + "unset color_prompt force_color_prompt" \ + "case \"\$TERM\" in" \ + "xterm*|rxvt*)" \ + " PS1=\"\\[\\e]0;\${debian_chroot:+(\$debian_chroot)}\\u@\\h: \\w\\a\\]\$PS1\"" \ + " ;;" \ + "*)" \ + " ;;" \ + "esac" \ + "if [ -x /usr/bin/dircolors ]; then" \ + " test -r ~/.dircolors && eval \"\$(dircolors -b ~/.dircolors)\" || eval \"\$(dircolors -b)\"" \ + " alias ls='ls --color=auto'" \ + " alias grep='grep --color=auto'" \ + " alias fgrep='fgrep --color=auto'" \ + " alias egrep='egrep --color=auto'" \ + "fi" \ + "alias ll='ls -alF'" \ + "alias la='ls -A'" \ + "alias l='ls -CF'" \ + "if [ -f ~/.bash_aliases ]; then" \ + " . ~/.bash_aliases" \ + "fi" \ + "if ! shopt -oq posix; then" \ + " if [ -f /usr/share/bash-completion/bash_completion ]; then" \ + " . /usr/share/bash-completion/bash_completion" \ + " elif [ -f /etc/bash_completion ]; then" \ + " . /etc/bash_completion" \ + " fi" \ + "fi" \ + > "${DEFAULT_BASHRC}" \ + && cp "${DEFAULT_BASHRC}" "/home/${TARGET_USER}/.bashrc" \ + && cp "${DEFAULT_BASHRC}" /root/.bashrc \ + && chown "${TARGET_UID}:${TARGET_GID}" "/home/${TARGET_USER}/.bashrc" \ + && rm -f /etc/profile.d/00-ps1.sh /etc/profile.d/01-bashcomp.sh; \ + # reset sudoers to require password \ + sed -i 's/^%sudo\tALL=(ALL:ALL) NOPASSWD: ALL/%sudo\tALL=(ALL:ALL) ALL/' /etc/sudoers; \ + if ! grep -q "^%sudo\s\+ALL=(ALL:ALL)\s\+ALL" /etc/sudoers; then echo "%sudo ALL=(ALL:ALL) ALL" >> /etc/sudoers; fi; \ + # disable PackageKit/UDisks2 autostart and D-Bus activation (prevents permission-denied spam) \ + rm -f \ + /etc/xdg/autostart/packagekitd.desktop \ + /usr/share/dbus-1/system-services/org.freedesktop.PackageKit.service \ + /usr/share/dbus-1/system-services/org.freedesktop.UDisks2.service; \ + install -d -m 755 /etc/dbus-1/system.d; \ + printf '%s\n' \ + '<!DOCTYPE busconfig PUBLIC "-//freedesktop//DTD D-Bus Bus Configuration 1.0//EN"' \ + '"http://www.freedesktop.org/standards/dbus/1.0/busconfig.dtd">' \ + '<busconfig>' \ + ' <policy context="default">' \ + ' <deny send_destination="org.freedesktop.PackageKit"/>' \ + ' <deny send_destination="org.freedesktop.UDisks2"/>' \ + ' </policy>' \ + '</busconfig>' \ + > /etc/dbus-1/system.d/disable-packagekit.conf; \ + mkdir -p /defaults /app /lsiopy && \ + chown -R "${TARGET_UID}:${TARGET_GID}" /defaults /app /lsiopy + +# optional Japanese locale and input (toggle via USER_LANGUAGE=ja) +RUN set -eux; \ + LANG_SEL="$(echo "${USER_LANGUAGE}" | tr '[:upper:]' '[:lower:]')" ; \ + if [ "${LANG_SEL}" = "ja" ] || [ "${LANG_SEL}" = "ja_jp" ] || [ "${LANG_SEL}" = "ja-jp" ]; then \ + apt-get update && \ + DEBIAN_FRONTEND=noninteractive apt-get install --no-install-recommends -y \ + language-pack-ja-base language-pack-ja im-config \ + fonts-noto-cjk fonts-noto-color-emoji \ + fcitx fcitx-bin fcitx-data fcitx-table-all \ + fcitx-mozc fcitx-config-gtk \ + fcitx-frontend-gtk2 fcitx-frontend-gtk3 fcitx-frontend-qt5 \ + fcitx-module-dbus fcitx-module-kimpanel fcitx-module-x11 fcitx-module-lua fcitx-ui-classic \ + kde-config-fcitx && \ + locale-gen ja_JP.UTF-8 && \ + update-locale LANG=ja_JP.UTF-8 LANGUAGE=ja_JP:ja LC_ALL=ja_JP.UTF-8 && \ + apt-get clean && rm -rf /var/lib/apt/lists/*; \ + echo "ja_JP.UTF-8 UTF-8" > /etc/locale.gen || true; \ + echo 'LANG=ja_JP.UTF-8' > /etc/default/locale; \ + echo 'LANGUAGE=ja_JP:ja' >> /etc/default/locale; \ + echo 'LC_ALL=ja_JP.UTF-8' >> /etc/default/locale; \ + rm -f /etc/localtime; \ + ln -snf /usr/share/zoneinfo/Asia/Tokyo /etc/localtime; \ + echo "Asia/Tokyo" > /etc/timezone; \ + printf '%s\n' \ + 'XKBMODEL="jp106"' \ + 'XKBLAYOUT="jp"' \ + 'XKBVARIANT=""' \ + 'XKBOPTIONS=""' \ + 'BACKSPACE="guess"' \ + > /etc/default/keyboard; \ + install -d -m 755 /etc/X11/xorg.conf.d; \ + printf '%s\n' \ + 'Section "InputClass"' \ + ' Identifier "system-keyboard"' \ + ' MatchIsKeyboard "on"' \ + ' Option "XkbLayout" "jp"' \ + ' Option "XkbModel" "jp106"' \ + ' Option "XkbVariant" ""' \ + ' Option "XkbOptions" ""' \ + 'EndSection' \ + > /etc/X11/xorg.conf.d/00-keyboard.conf; \ + im-config -n fcitx; \ + install -d -m 755 /etc/xdg/autostart "/home/${USER_NAME}/.config/autostart"; \ + printf '%s\n' \ + '[Desktop Entry]' \ + 'Type=Application' \ + 'Exec=fcitx -d' \ + 'Hidden=false' \ + 'X-GNOME-Autostart-enabled=true' \ + 'Name=fcitx' \ + 'Comment=Start Fcitx input method daemon' \ + > /etc/xdg/autostart/fcitx-autostart.desktop; \ + cp /etc/xdg/autostart/fcitx-autostart.desktop "/home/${USER_NAME}/.config/autostart/fcitx-autostart.desktop"; \ + chown "${USER_UID}:${USER_GID}" "/home/${USER_NAME}/.config/autostart/fcitx-autostart.desktop"; \ + printf '%s\n' \ + '[Layout]' \ + 'DisplayNames=' \ + 'LayoutList=jp' \ + 'Model=jp106' \ + 'Options=' \ + 'ResetOldOptions=true' \ + 'Use=true' \ + > "/home/${USER_NAME}/.config/kxkbrc"; \ + chown "${USER_UID}:${USER_GID}" "/home/${USER_NAME}/.config/kxkbrc"; \ + fi + +# Set fcitx environment variables globally when Japanese locale is selected +ARG USER_LANGUAGE +RUN LANG_SEL="$(echo "${USER_LANGUAGE}" | tr '[:upper:]' '[:lower:]')" ; \ + if [ "${LANG_SEL}" = "ja" ] || [ "${LANG_SEL}" = "ja_jp" ] || [ "${LANG_SEL}" = "ja-jp" ]; then \ + mkdir -p /etc/profile.d && \ + printf '%s\n' \ + 'export GTK_IM_MODULE=fcitx' \ + 'export QT_IM_MODULE=fcitx' \ + 'export XMODIFIERS="@im=fcitx"' \ + 'export INPUT_METHOD=fcitx' \ + 'export SDL_IM_MODULE=fcitx' \ + 'export GLFW_IM_MODULE=fcitx' \ + > /etc/profile.d/99-fcitx-env.sh && \ + chmod 644 /etc/profile.d/99-fcitx-env.sh; \ + fi + +# Apply fcitx ENV globally when USER_LANGUAGE is ja +ENV GTK_IM_MODULE=fcitx \ + QT_IM_MODULE=fcitx \ + XMODIFIERS="@im=fcitx" \ + INPUT_METHOD=fcitx \ + SDL_IM_MODULE=fcitx \ + GLFW_IM_MODULE=fcitx + +# create XDG user dirs and desktop shortcuts (Home/Trash) +RUN set -eux; \ + for d in Desktop Documents Downloads Music Pictures Videos Templates Public; do \ + install -d -m 755 "/home/${USER_NAME}/${d}"; \ + chown "${USER_UID}:${USER_GID}" "/home/${USER_NAME}/${d}"; \ + done; \ + install -d -m 755 "/home/${USER_NAME}/.config"; \ + printf '%s\n' \ + 'XDG_DESKTOP_DIR="$HOME/Desktop"' \ + 'XDG_DOWNLOAD_DIR="$HOME/Downloads"' \ + 'XDG_TEMPLATES_DIR="$HOME/Templates"' \ + 'XDG_PUBLICSHARE_DIR="$HOME/Public"' \ + 'XDG_DOCUMENTS_DIR="$HOME/Documents"' \ + 'XDG_MUSIC_DIR="$HOME/Music"' \ + 'XDG_PICTURES_DIR="$HOME/Pictures"' \ + 'XDG_VIDEOS_DIR="$HOME/Videos"' \ + > "/home/${USER_NAME}/.config/user-dirs.dirs"; \ + printf '%s\n' \ + '[Desktop Entry]' \ + 'Encoding=UTF-8' \ + 'Name=Home' \ + 'GenericName=Personal Files' \ + 'URL[$e]=$HOME' \ + 'Icon=user-home' \ + 'Type=Link' \ + > "/home/${USER_NAME}/Desktop/home.desktop"; \ + printf '%s\n' \ + '[Desktop Entry]' \ + 'Name=Trash' \ + 'Comment=Contains removed files' \ + 'Icon=user-trash-full' \ + 'EmptyIcon=user-trash' \ + 'URL=trash:/' \ + 'Type=Link' \ + > "/home/${USER_NAME}/Desktop/trash.desktop"; \ + chown "${USER_UID}:${USER_GID}" /home/${USER_NAME}/Desktop/home.desktop /home/${USER_NAME}/Desktop/trash.desktop + +# browser wrappers (Chromium on arm64, Chrome on amd64) to enforce flags even after package updates +RUN <<'EOF' +set -eux + +ARCH="$(dpkg --print-architecture)" +if [ -x /usr/lib/chromium/chromium ] || [ -x /usr/bin/chromium ] || [ -x /usr/bin/chromium.distrib ]; then + cat > /usr/local/bin/wrapped-chromium <<'EOF_WRAPPED_CHROMIUM' +#!/bin/bash + +if [ -x /usr/bin/chromium.distrib ]; then + BIN=/usr/bin/chromium.distrib +elif [ -x /usr/lib/chromium/chromium ]; then + BIN=/usr/lib/chromium/chromium +elif [ -x /usr/lib/chromium-browser/chromium-browser ]; then + BIN=/usr/lib/chromium-browser/chromium-browser +else + BIN=/usr/bin/chromium +fi +DEFAULT_FLAGS="--password-store=basic --in-process-gpu" +EXTRA_FLAGS=(${CHROMIUM_FLAGS:-}) + +# Cleanup +if ! pgrep chromium > /dev/null; then + rm -f $HOME/.config/chromium/Singleton* +fi + +# Run with --no-sandbox (same as Chrome wrapper) +${BIN} ${DEFAULT_FLAGS} --no-sandbox "${EXTRA_FLAGS[@]}" "$@" +EOF_WRAPPED_CHROMIUM + chmod 755 /usr/local/bin/wrapped-chromium +fi +if [ -x /usr/bin/chromium ]; then + cat > /usr/local/bin/ensure-chromium-wrap <<'EOF_CHROMIUM' +#!/bin/bash +set -e + +if [ -x /usr/bin/chromium ]; then + printf '%s\n' '#!/bin/bash' 'exec /usr/local/bin/wrapped-chromium "$@"' > /usr/bin/chromium + chmod 755 /usr/bin/chromium +fi + +if [ -f /usr/share/applications/chromium.desktop ]; then + sed -i -E 's#^Exec=.*#Exec=/usr/local/bin/wrapped-chromium#g' /usr/share/applications/chromium.desktop || true +fi + +if [ -n "${USER_NAME:-}" ]; then + USER_HOME="/home/${USER_NAME}" + mkdir -p "${USER_HOME}/.local/share/applications" + if [ -f /usr/share/applications/chromium.desktop ]; then + cp /usr/share/applications/chromium.desktop "${USER_HOME}/.local/share/applications/chromium.desktop" + sed -i -E 's#^Exec=.*#Exec=/usr/local/bin/wrapped-chromium#g' "${USER_HOME}/.local/share/applications/chromium.desktop" || true + chown "${USER_UID:-0}:${USER_GID:-0}" "${USER_HOME}/.local/share/applications/chromium.desktop" || true + fi +fi +EOF_CHROMIUM + chmod 755 /usr/local/bin/ensure-chromium-wrap + /usr/local/bin/ensure-chromium-wrap + printf '%s\n' 'DPkg::Post-Invoke {"/usr/local/bin/ensure-chromium-wrap || true";};' \ + > /etc/apt/apt.conf.d/99-chromium-wrap +fi + +if [ "${ARCH}" != "arm64" ]; then + if [ -x /usr/bin/google-chrome-stable ]; then + printf '%s\n' \ + '#!/bin/bash' \ + 'CHROME_BIN="/usr/bin/google-chrome-stable"' \ + 'exec "${CHROME_BIN}" --password-store=basic --in-process-gpu --no-sandbox ${CHROME_EXTRA_FLAGS} "$@"' \ + > /usr/local/bin/google-chrome-wrapped + chmod 755 /usr/local/bin/google-chrome-wrapped + for chrome_bin in google-chrome-beta google-chrome-unstable; do + if [ -x "/usr/bin/${chrome_bin}" ]; then + printf '%s\n' '#!/bin/bash' 'exec /usr/local/bin/google-chrome-wrapped "$@"' \ + > "/usr/local/bin/${chrome_bin}-wrapped" + chmod 755 "/usr/local/bin/${chrome_bin}-wrapped" + fi + done + for desktop in /usr/share/applications/google-chrome*.desktop; do + [ -f "$desktop" ] || continue + sed -i -E 's#^Exec=/usr/bin/google-chrome-stable(.*)#Exec=/usr/local/bin/google-chrome-wrapped\1#g' "$desktop" + done + if [ -n "${USER_NAME:-}" ]; then + USER_HOME="/home/${USER_NAME}" + mkdir -p "${USER_HOME}/.local/share/applications" + for desktop in /usr/share/applications/google-chrome*.desktop; do + [ -f "$desktop" ] || continue + base=$(basename "$desktop") + cp "$desktop" "${USER_HOME}/.local/share/applications/$base" + sed -i -E 's#^Exec=/usr/bin/google-chrome-stable(.*)#Exec=/usr/local/bin/google-chrome-wrapped\1#g' \ + "${USER_HOME}/.local/share/applications/$base" + chown "${USER_UID:-0}:${USER_GID:-0}" "${USER_HOME}/.local/share/applications/$base" + done + fi + cat > /usr/local/bin/ensure-google-chrome-wrap <<'EOF_CHROME' +#!/bin/bash +set -e + +if [ -f /usr/share/applications/google-chrome.desktop ]; then + sed -i -E 's#^Exec=/usr/bin/google-chrome-stable(.*)#Exec=/usr/local/bin/google-chrome-wrapped\1#g' /usr/share/applications/google-chrome.desktop || true +fi + +if [ -n "${USER_NAME:-}" ]; then + USER_HOME="/home/${USER_NAME}" + mkdir -p "${USER_HOME}/.local/share/applications" + if [ -f /usr/share/applications/google-chrome.desktop ]; then + cp /usr/share/applications/google-chrome.desktop "${USER_HOME}/.local/share/applications/google-chrome.desktop" + sed -i -E 's#^Exec=/usr/bin/google-chrome-stable(.*)#Exec=/usr/local/bin/google-chrome-wrapped\1#g' \ + "${USER_HOME}/.local/share/applications/google-chrome.desktop" || true + chown "${USER_UID:-0}:${USER_GID:-0}" "${USER_HOME}/.local/share/applications/google-chrome.desktop" || true + fi +fi +EOF_CHROME + chmod 755 /usr/local/bin/ensure-google-chrome-wrap + /usr/local/bin/ensure-google-chrome-wrap + printf '%s\n' 'DPkg::Post-Invoke {"/usr/local/bin/ensure-google-chrome-wrap || true";};' \ + > /etc/apt/apt.conf.d/99-google-chrome-wrap + fi +fi +EOF + +# Keep default USER=root so s6 init can modify system paths. diff --git a/files/patches/21-xvfb-dri3.patch b/files/patches/21-xvfb-dri3.patch new file mode 100644 index 000000000..3dde58b10 --- /dev/null +++ b/files/patches/21-xvfb-dri3.patch @@ -0,0 +1,1130 @@ +diff --git glamor/Makefile.am glamor/Makefile.am +index aaf0aab..a7354c9 100644 +--- glamor/Makefile.am ++++ glamor/Makefile.am +@@ -1,8 +1,13 @@ +-noinst_LTLIBRARIES = libglamor.la libglamor_egl_stubs.la ++noinst_LTLIBRARIES = libglamor.la libglamor_egl.la libglamor_egl_stubs.la + + libglamor_la_LIBADD = $(GLAMOR_LIBS) + +-AM_CFLAGS = $(CWARNFLAGS) $(DIX_CFLAGS) $(GLAMOR_CFLAGS) ++AM_CFLAGS = \ ++ -I$(top_srcdir)/dri3 \ ++ $(CWARNFLAGS) \ ++ $(DIX_CFLAGS) \ ++ $(GLAMOR_CFLAGS) \ ++ $(GBM_CFLAGS) $(DRM_CFLAGS) $(EPOXY_CFLAGS) + + libglamor_la_SOURCES = \ + glamor.c \ +@@ -54,6 +59,15 @@ libglamor_la_SOURCES += \ + glamor_xv.c + endif + ++libglamor_egl_la_SOURCES = \ ++ glamor_egl.c ++ ++libglamor_egl_la_LIBADD = \ ++ $(GLAMOR_EGL_LIBS) \ ++ $(GBM_LIBS) \ ++ $(DRM_LIBS) \ ++ $(EPOXY_LIBS) ++ + libglamor_egl_stubs_la_SOURCES = \ + glamor_egl_stubs.c \ + glamor_egl_ext.h \ +diff --git glamor/glamor_egl.c glamor/glamor_egl.c +index 6e0fc65..2c5c9c8 100644 +--- glamor/glamor_egl.c ++++ glamor/glamor_egl.c +@@ -29,50 +29,67 @@ + + #include "dix-config.h" + +-#define GLAMOR_FOR_XORG + #include <unistd.h> + #include <fcntl.h> + #include <sys/ioctl.h> + #include <errno.h> +-#include <xf86.h> +-#include <xf86Priv.h> +-#include <xf86drm.h> + #define EGL_DISPLAY_NO_X_MESA + ++#include <xf86drm.h> + #include <gbm.h> + #include <drm_fourcc.h> +- + #include "glamor_egl.h" +- + #include "glamor.h" + #include "glamor_priv.h" + #include "dri3.h" ++#ifdef GLAMOR_FOR_XORG ++#include <xf86.h> ++#include <xf86Priv.h> ++#endif ++ ++static Bool ++glamor_egl_create_textured_pixmap(PixmapPtr pixmap, int handle, int stride); ++ ++_X_EXPORT Bool ++glamor_egl_create_textured_pixmap_from_gbm_bo(PixmapPtr pixmap, ++ struct gbm_bo *bo, ++ Bool used_modifiers); + + struct glamor_egl_screen_private { + EGLDisplay display; + EGLContext context; + char *device_path; + +- CreateScreenResourcesProcPtr CreateScreenResources; +- CloseScreenProcPtr CloseScreen; + int fd; + struct gbm_device *gbm; + int dmabuf_capable; + + CloseScreenProcPtr saved_close_screen; + DestroyPixmapProcPtr saved_destroy_pixmap; ++#ifdef GLAMOR_FOR_XORG + xf86FreeScreenProc *saved_free_screen; ++#endif + }; + ++#ifdef GLAMOR_FOR_XORG + int xf86GlamorEGLPrivateIndex = -1; + +- + static struct glamor_egl_screen_private * + glamor_egl_get_screen_private(ScrnInfoPtr scrn) + { + return (struct glamor_egl_screen_private *) + scrn->privates[xf86GlamorEGLPrivateIndex].ptr; + } ++#else ++static DevPrivateKeyRec glamor_egl_screen_private_key; ++ ++static struct glamor_egl_screen_private * ++glamor_egl_get_screen_private(ScreenPtr screen) ++{ ++ return dixLookupPrivate(&screen->devPrivates, ++ &glamor_egl_screen_private_key); ++} ++#endif + + static void + glamor_egl_make_current(struct glamor_context *glamor_ctx) +@@ -138,22 +155,33 @@ glamor_create_texture_from_image(ScreenPtr screen, + struct gbm_device * + glamor_egl_get_gbm_device(ScreenPtr screen) + { ++#ifdef GLAMOR_FOR_XORG + struct glamor_egl_screen_private *glamor_egl = + glamor_egl_get_screen_private(xf86ScreenToScrn(screen)); ++#else ++ struct glamor_egl_screen_private *glamor_egl = ++ glamor_egl_get_screen_private(screen); ++#endif + return glamor_egl->gbm; + } + + Bool + glamor_egl_create_textured_screen(ScreenPtr screen, int handle, int stride) + { ++#ifdef GLAMOR_FOR_XORG + ScrnInfoPtr scrn = xf86ScreenToScrn(screen); ++#endif + PixmapPtr screen_pixmap; + + screen_pixmap = screen->GetScreenPixmap(screen); + + if (!glamor_egl_create_textured_pixmap(screen_pixmap, handle, stride)) { ++#ifdef GLAMOR_FOR_XORG + xf86DrvMsg(scrn->scrnIndex, X_ERROR, + "Failed to create textured screen."); ++#else ++ LogMessage(X_ERROR, "Failed to create textured screen."); ++#endif + return FALSE; + } + return TRUE; +@@ -170,22 +198,30 @@ glamor_egl_set_pixmap_image(PixmapPtr pixmap, EGLImageKHR image, + old = pixmap_priv->image; + if (old) { + ScreenPtr screen = pixmap->drawable.pScreen; +- ScrnInfoPtr scrn = xf86ScreenToScrn(screen); +- struct glamor_egl_screen_private *glamor_egl = glamor_egl_get_screen_private(scrn); +- ++#ifdef GLAMOR_FOR_XORG ++ struct glamor_egl_screen_private *glamor_egl = ++ glamor_egl_get_screen_private(xf86ScreenToScrn(screen)); ++#else ++ struct glamor_egl_screen_private *glamor_egl = ++ glamor_egl_get_screen_private(screen); ++#endif + eglDestroyImageKHR(glamor_egl->display, old); + } + pixmap_priv->image = image; + pixmap_priv->used_modifiers = used_modifiers; + } + +-Bool ++static Bool + glamor_egl_create_textured_pixmap(PixmapPtr pixmap, int handle, int stride) + { + ScreenPtr screen = pixmap->drawable.pScreen; +- ScrnInfoPtr scrn = xf86ScreenToScrn(screen); ++#ifdef GLAMOR_FOR_XORG ++ struct glamor_egl_screen_private *glamor_egl = ++ glamor_egl_get_screen_private(xf86ScreenToScrn(screen)); ++#else + struct glamor_egl_screen_private *glamor_egl = +- glamor_egl_get_screen_private(scrn); ++ glamor_egl_get_screen_private(screen); ++#endif + int ret, fd; + + /* GBM doesn't have an import path from handles, so we make a +@@ -193,19 +229,29 @@ glamor_egl_create_textured_pixmap(PixmapPtr pixmap, int handle, int stride) + */ + ret = drmPrimeHandleToFD(glamor_egl->fd, handle, O_CLOEXEC, &fd); + if (ret) { ++#ifdef GLAMOR_FOR_XORG ++ ScrnInfoPtr scrn = xf86ScreenToScrn(screen); + xf86DrvMsg(scrn->scrnIndex, X_ERROR, + "Failed to make prime FD for handle: %d\n", errno); ++#else ++ LogMessage(X_ERROR, "Failed to make prime FD for handle: %d\n", errno); ++#endif + return FALSE; + } + + if (!glamor_back_pixmap_from_fd(pixmap, fd, + pixmap->drawable.width, + pixmap->drawable.height, +- stride, +- pixmap->drawable.depth, ++ stride, pixmap->drawable.depth, + pixmap->drawable.bitsPerPixel)) { ++#ifdef GLAMOR_FOR_XORG ++ ScrnInfoPtr scrn = xf86ScreenToScrn(screen); + xf86DrvMsg(scrn->scrnIndex, X_ERROR, + "Failed to make import prime FD as pixmap: %d\n", errno); ++#else ++ LogMessage(X_ERROR, ++ "Failed to make import prime FD as pixmap: %d\n", errno); ++#endif + close(fd); + return FALSE; + } +@@ -214,21 +260,24 @@ glamor_egl_create_textured_pixmap(PixmapPtr pixmap, int handle, int stride) + return TRUE; + } + +-Bool ++_X_EXPORT Bool + glamor_egl_create_textured_pixmap_from_gbm_bo(PixmapPtr pixmap, + struct gbm_bo *bo, + Bool used_modifiers) + { + ScreenPtr screen = pixmap->drawable.pScreen; +- ScrnInfoPtr scrn = xf86ScreenToScrn(screen); + struct glamor_screen_private *glamor_priv = + glamor_get_screen_private(screen); + struct glamor_egl_screen_private *glamor_egl; + EGLImageKHR image; + GLuint texture; + Bool ret = FALSE; +- +- glamor_egl = glamor_egl_get_screen_private(scrn); ++#ifdef GLAMOR_FOR_XORG ++ glamor_egl = ++ glamor_egl_get_screen_private(xf86ScreenToScrn(screen)); ++#else ++ glamor_egl = glamor_egl_get_screen_private(screen); ++#endif + + glamor_make_current(glamor_priv); + +@@ -263,9 +312,13 @@ static Bool + glamor_make_pixmap_exportable(PixmapPtr pixmap, Bool modifiers_ok) + { + ScreenPtr screen = pixmap->drawable.pScreen; +- ScrnInfoPtr scrn = xf86ScreenToScrn(screen); ++#ifdef GLAMOR_FOR_XORG ++ struct glamor_egl_screen_private *glamor_egl = ++ glamor_egl_get_screen_private(xf86ScreenToScrn(screen)); ++#else + struct glamor_egl_screen_private *glamor_egl = +- glamor_egl_get_screen_private(scrn); ++ glamor_egl_get_screen_private(screen); ++#endif + struct glamor_pixmap_private *pixmap_priv = + glamor_get_pixmap_private(pixmap); + unsigned width = pixmap->drawable.width; +@@ -298,9 +351,15 @@ glamor_make_pixmap_exportable(PixmapPtr pixmap, Bool modifiers_ok) + format = GBM_FORMAT_R8; + break; + default: ++#ifdef GLAMOR_FOR_XORG ++ ScrnInfoPtr scrn = xf86ScreenToScrn(screen); + xf86DrvMsg(scrn->scrnIndex, X_ERROR, + "Failed to make %d depth, %dbpp pixmap exportable\n", + pixmap->drawable.depth, pixmap->drawable.bitsPerPixel); ++#else ++ LogMessage(X_ERROR, "Failed to make %d depth, %dbpp pixmap exportable\n", ++ pixmap->drawable.depth, pixmap->drawable.bitsPerPixel); ++#endif + return FALSE; + } + +@@ -330,20 +389,32 @@ glamor_make_pixmap_exportable(PixmapPtr pixmap, Bool modifiers_ok) + } + + if (!bo) { ++#ifdef GLAMOR_FOR_XORG ++ ScrnInfoPtr scrn = xf86ScreenToScrn(screen); + xf86DrvMsg(scrn->scrnIndex, X_ERROR, + "Failed to make %dx%dx%dbpp GBM bo\n", + width, height, pixmap->drawable.bitsPerPixel); ++#else ++ LogMessage(X_ERROR, "Failed to make %dx%dx%dbpp GBM bo\n", ++ width, height, pixmap->drawable.bitsPerPixel); ++#endif + return FALSE; + } + + exported = screen->CreatePixmap(screen, 0, 0, pixmap->drawable.depth, 0); + screen->ModifyPixmapHeader(exported, width, height, 0, 0, + gbm_bo_get_stride(bo), NULL); +- if (!glamor_egl_create_textured_pixmap_from_gbm_bo(exported, bo, +- used_modifiers)) { ++ if (!glamor_egl_create_textured_pixmap_from_gbm_bo ++ (exported, bo, used_modifiers)) { ++#ifdef GLAMOR_FOR_XORG ++ ScrnInfoPtr scrn = xf86ScreenToScrn(screen); + xf86DrvMsg(scrn->scrnIndex, X_ERROR, + "Failed to make %dx%dx%dbpp pixmap from GBM bo\n", + width, height, pixmap->drawable.bitsPerPixel); ++#else ++ LogMessage(X_ERROR, "Failed to make %dx%dx%dbpp pixmap from GBM bo\n", ++ width, height, pixmap->drawable.bitsPerPixel); ++#endif + screen->DestroyPixmap(exported); + gbm_bo_destroy(bo); + return FALSE; +@@ -373,8 +444,13 @@ glamor_make_pixmap_exportable(PixmapPtr pixmap, Bool modifiers_ok) + static struct gbm_bo * + glamor_gbm_bo_from_pixmap_internal(ScreenPtr screen, PixmapPtr pixmap) + { ++#ifdef GLAMOR_FOR_XORG + struct glamor_egl_screen_private *glamor_egl = + glamor_egl_get_screen_private(xf86ScreenToScrn(screen)); ++#else ++ struct glamor_egl_screen_private *glamor_egl = ++ glamor_egl_get_screen_private(screen); ++#endif + struct glamor_pixmap_private *pixmap_priv = + glamor_get_pixmap_private(pixmap); + +@@ -471,7 +547,11 @@ glamor_egl_fd_name_from_pixmap(ScreenPtr screen, + struct gbm_bo *bo; + int fd = -1; + ++#ifdef GLAMOR_FOR_XORG + glamor_egl = glamor_egl_get_screen_private(xf86ScreenToScrn(screen)); ++#else ++ glamor_egl = glamor_egl_get_screen_private(screen); ++#endif + + if (!glamor_make_pixmap_exportable(pixmap, FALSE)) + goto failure; +@@ -499,14 +579,15 @@ glamor_back_pixmap_from_fd(PixmapPtr pixmap, + CARD16 stride, CARD8 depth, CARD8 bpp) + { + ScreenPtr screen = pixmap->drawable.pScreen; +- ScrnInfoPtr scrn = xf86ScreenToScrn(screen); +- struct glamor_egl_screen_private *glamor_egl; ++#ifdef GLAMOR_FOR_XORG ++ struct glamor_egl_screen_private *glamor_egl = glamor_egl_get_screen_private(xf86ScreenToScrn(screen)); ++#else ++ struct glamor_egl_screen_private *glamor_egl = glamor_egl_get_screen_private(screen); ++#endif + struct gbm_bo *bo; + struct gbm_import_fd_data import_data = { 0 }; + Bool ret; + +- glamor_egl = glamor_egl_get_screen_private(scrn); +- + if (bpp != 32 || !(depth == 24 || depth == 32 || depth == 30) || width == 0 || height == 0) + return FALSE; + +@@ -559,7 +640,11 @@ glamor_pixmap_from_fds(ScreenPtr screen, + Bool ret = FALSE; + int i; + ++#ifdef GLAMOR_FOR_XORG + glamor_egl = glamor_egl_get_screen_private(xf86ScreenToScrn(screen)); ++#else ++ glamor_egl = glamor_egl_get_screen_private(screen); ++#endif + + pixmap = screen->CreatePixmap(screen, 0, 0, depth, 0); + +@@ -633,7 +718,11 @@ glamor_get_formats(ScreenPtr screen, + /* Explicitly zero the count as the caller may ignore the return value */ + *num_formats = 0; + ++#ifdef GLAMOR_FOR_XORG + glamor_egl = glamor_egl_get_screen_private(xf86ScreenToScrn(screen)); ++#else ++ glamor_egl = glamor_egl_get_screen_private(screen); ++#endif + + if (!glamor_egl->dmabuf_capable) + return TRUE; +@@ -673,7 +762,11 @@ glamor_get_modifiers(ScreenPtr screen, uint32_t format, + /* Explicitly zero the count as the caller may ignore the return value */ + *num_modifiers = 0; + ++#ifdef GLAMOR_FOR_XORG + glamor_egl = glamor_egl_get_screen_private(xf86ScreenToScrn(screen)); ++#else ++ glamor_egl = glamor_egl_get_screen_private(screen); ++#endif + + if (!glamor_egl->dmabuf_capable) + return FALSE; +@@ -703,6 +796,7 @@ glamor_get_modifiers(ScreenPtr screen, uint32_t format, + #endif + } + ++#ifdef GLAMOR_FOR_XORG + _X_EXPORT const char * + glamor_egl_get_driver_name(ScreenPtr screen) + { +@@ -717,15 +811,19 @@ glamor_egl_get_driver_name(ScreenPtr screen) + + return NULL; + } +- ++#endif + + static Bool + glamor_egl_destroy_pixmap(PixmapPtr pixmap) + { + ScreenPtr screen = pixmap->drawable.pScreen; +- ScrnInfoPtr scrn = xf86ScreenToScrn(screen); ++#ifdef GLAMOR_FOR_XORG ++ struct glamor_egl_screen_private *glamor_egl = ++ glamor_egl_get_screen_private(xf86ScreenToScrn(screen)); ++#else + struct glamor_egl_screen_private *glamor_egl = +- glamor_egl_get_screen_private(scrn); ++ glamor_egl_get_screen_private(screen); ++#endif + Bool ret; + + if (pixmap->refcnt == 1) { +@@ -770,21 +868,35 @@ glamor_egl_exchange_buffers(PixmapPtr front, PixmapPtr back) + static Bool + glamor_egl_close_screen(ScreenPtr screen) + { +- ScrnInfoPtr scrn; + struct glamor_egl_screen_private *glamor_egl; + struct glamor_pixmap_private *pixmap_priv; + PixmapPtr screen_pixmap; + +- scrn = xf86ScreenToScrn(screen); +- glamor_egl = glamor_egl_get_screen_private(scrn); ++#ifdef GLAMOR_FOR_XORG ++ glamor_egl = glamor_egl_get_screen_private(xf86ScreenToScrn(screen)); ++#else ++ glamor_egl = glamor_egl_get_screen_private(screen); ++#endif + screen_pixmap = screen->GetScreenPixmap(screen); + pixmap_priv = glamor_get_pixmap_private(screen_pixmap); + +- eglDestroyImageKHR(glamor_egl->display, pixmap_priv->image); +- pixmap_priv->image = NULL; +- ++ if (pixmap_priv && pixmap_priv->image) { ++ eglDestroyImageKHR(glamor_egl->display, pixmap_priv->image); ++ pixmap_priv->image = NULL; ++ } + screen->CloseScreen = glamor_egl->saved_close_screen; + ++#ifndef GLAMOR_FOR_XORG ++ if (glamor_egl->display != EGL_NO_DISPLAY) { ++ eglMakeCurrent(glamor_egl->display, ++ EGL_NO_SURFACE, EGL_NO_SURFACE, EGL_NO_CONTEXT); ++ lastGLContext = NULL; ++ eglTerminate(glamor_egl->display); ++ } ++ if (glamor_egl->gbm) ++ gbm_device_destroy(glamor_egl->gbm); ++#endif ++ + return screen->CloseScreen(screen); + } + +@@ -795,9 +907,13 @@ glamor_dri3_open_client(ClientPtr client, + RRProviderPtr provider, + int *fdp) + { +- ScrnInfoPtr scrn = xf86ScreenToScrn(screen); ++#ifdef GLAMOR_FOR_XORG + struct glamor_egl_screen_private *glamor_egl = +- glamor_egl_get_screen_private(scrn); ++ glamor_egl_get_screen_private(xf86ScreenToScrn(screen)); ++#else ++ struct glamor_egl_screen_private *glamor_egl = ++ glamor_egl_get_screen_private(screen); ++#endif + int fd; + drm_magic_t magic; + +@@ -854,12 +970,14 @@ static const dri3_screen_info_rec glamor_dri3_info = { + void + glamor_egl_screen_init(ScreenPtr screen, struct glamor_context *glamor_ctx) + { +- ScrnInfoPtr scrn = xf86ScreenToScrn(screen); ++#ifdef GLAMOR_FOR_XORG + struct glamor_egl_screen_private *glamor_egl = +- glamor_egl_get_screen_private(scrn); +-#ifdef DRI3 +- glamor_screen_private *glamor_priv = glamor_get_screen_private(screen); ++ glamor_egl_get_screen_private(xf86ScreenToScrn(screen)); ++#else ++ struct glamor_egl_screen_private *glamor_egl = ++ glamor_egl_get_screen_private(screen); + #endif ++ glamor_screen_private *glamor_priv = glamor_get_screen_private(screen); + + glamor_egl->saved_close_screen = screen->CloseScreen; + screen->CloseScreen = glamor_egl_close_screen; +@@ -890,8 +1008,13 @@ glamor_egl_screen_init(ScreenPtr screen, struct glamor_context *glamor_ctx) + glamor_egl->device_path = drmGetDeviceNameFromFd2(glamor_egl->fd); + + if (!dri3_screen_init(screen, &glamor_dri3_info)) { ++#ifdef GLAMOR_FOR_XORG ++ ScrnInfoPtr scrn = xf86ScreenToScrn(screen); + xf86DrvMsg(scrn->scrnIndex, X_ERROR, + "Failed to initialize DRI3.\n"); ++#else ++ LogMessage(X_ERROR, "Failed to initialize DRI3.\n"); ++#endif + } + } + #endif +@@ -915,6 +1038,7 @@ static void glamor_egl_cleanup(struct glamor_egl_screen_private *glamor_egl) + free(glamor_egl); + } + ++#ifdef GLAMOR_FOR_XORG + static void + glamor_egl_free_screen(ScrnInfoPtr scrn) + { +@@ -927,38 +1051,64 @@ glamor_egl_free_screen(ScrnInfoPtr scrn) + scrn->FreeScreen(scrn); + } + } ++#endif + +-Bool +-glamor_egl_init(ScrnInfoPtr scrn, int fd) ++#ifdef GLAMOR_FOR_XORG ++Bool glamor_egl_init(ScrnInfoPtr scrn, int fd) ++#else ++_X_EXPORT Bool glamor_egl_init(ScreenPtr screen, int fd) ++#endif + { + struct glamor_egl_screen_private *glamor_egl; + const GLubyte *renderer; + EGLConfig egl_config; + int n; ++#ifdef GLAMOR_FOR_XORG ++ ScreenPtr screen = scrn->pScreen; ++#endif + + glamor_egl = calloc(sizeof(*glamor_egl), 1); + if (glamor_egl == NULL) + return FALSE; ++#ifdef GLAMOR_FOR_XORG + if (xf86GlamorEGLPrivateIndex == -1) + xf86GlamorEGLPrivateIndex = xf86AllocateScrnInfoPrivateIndex(); +- + scrn->privates[xf86GlamorEGLPrivateIndex].ptr = glamor_egl; ++#else ++ if (!dixRegisterPrivateKey(&glamor_egl_screen_private_key, PRIVATE_SCREEN, 0)) { ++ free(glamor_egl); ++ return FALSE; ++ } ++ dixSetPrivate(&screen->devPrivates, &glamor_egl_screen_private_key, glamor_egl); ++#endif ++ + glamor_egl->fd = fd; + glamor_egl->gbm = gbm_create_device(glamor_egl->fd); + if (glamor_egl->gbm == NULL) { +- ErrorF("couldn't get display device\n"); ++ ErrorF("couldn't create gbm device\n"); + goto error; + } + +- glamor_egl->display = glamor_egl_get_display(EGL_PLATFORM_GBM_MESA, +- glamor_egl->gbm); +- if (!glamor_egl->display) { +- xf86DrvMsg(scrn->scrnIndex, X_ERROR, "eglGetDisplay() failed\n"); ++ glamor_egl->display = eglGetPlatformDisplayEXT(EGL_PLATFORM_GBM_KHR, ++ glamor_egl->gbm, NULL); ++ ++ if (glamor_egl->display == EGL_NO_DISPLAY) { ++#ifdef GLAMOR_FOR_XORG ++ xf86DrvMsg(scrn->scrnIndex, X_ERROR, ++ "eglGetPlatformDisplayEXT() failed with error %d\n", eglGetError()); ++#else ++ LogMessage(X_ERROR, ++ "eglGetPlatformDisplayEXT() failed with error %d\n", eglGetError()); ++#endif + goto error; + } + + if (!eglInitialize(glamor_egl->display, NULL, NULL)) { ++#ifdef GLAMOR_FOR_XORG + xf86DrvMsg(scrn->scrnIndex, X_ERROR, "eglInitialize() failed\n"); ++#else ++ LogMessage(X_ERROR, "eglInitialize() failed\n"); ++#endif + glamor_egl->display = EGL_NO_DISPLAY; + goto error; + } +@@ -978,6 +1128,7 @@ glamor_egl_init(ScrnInfoPtr scrn, int fd) + + GLAMOR_CHECK_EGL_EXTENSION(KHR_surfaceless_context); + ++ glamor_egl->context = EGL_NO_CONTEXT; + if (eglBindAPI(EGL_OPENGL_API)) { + static const EGLint config_attribs_core[] = { + EGL_CONTEXT_OPENGL_PROFILE_MASK_KHR, +@@ -996,25 +1147,10 @@ glamor_egl_init(ScrnInfoPtr scrn, int fd) + NULL, EGL_NO_CONTEXT, + config_attribs_core); + +- if (glamor_egl->context == EGL_NO_CONTEXT) ++ if (glamor_egl->context == EGL_NO_CONTEXT) { + glamor_egl->context = eglCreateContext(glamor_egl->display, + NULL, EGL_NO_CONTEXT, + config_attribs); +- } +- +- if (glamor_egl->context != EGL_NO_CONTEXT) { +- if (!eglMakeCurrent(glamor_egl->display, +- EGL_NO_SURFACE, EGL_NO_SURFACE, glamor_egl->context)) { +- xf86DrvMsg(scrn->scrnIndex, X_ERROR, +- "Failed to make GL context current\n"); +- goto error; +- } +- +- if (epoxy_gl_version() < 21) { +- xf86DrvMsg(scrn->scrnIndex, X_INFO, +- "glamor: Ignoring GL < 2.1, falling back to GLES.\n"); +- eglDestroyContext(glamor_egl->display, glamor_egl->context); +- glamor_egl->context = EGL_NO_CONTEXT; + } + } + +@@ -1024,48 +1160,75 @@ glamor_egl_init(ScrnInfoPtr scrn, int fd) + EGL_NONE + }; + if (!eglBindAPI(EGL_OPENGL_ES_API)) { ++#ifdef GLAMOR_FOR_XORG + xf86DrvMsg(scrn->scrnIndex, X_ERROR, + "glamor: Failed to bind either GL or GLES APIs.\n"); ++#else ++ LogMessage(X_ERROR, "glamor: Failed to bind either GL or GLES APIs.\n"); ++#endif + goto error; + } + + if (!eglChooseConfig(glamor_egl->display, NULL, &egl_config, 1, &n)) { ++#ifdef GLAMOR_FOR_XORG + xf86DrvMsg(scrn->scrnIndex, X_ERROR, + "glamor: No acceptable EGL configs found\n"); ++#else ++ LogMessage(X_ERROR, "glamor: No acceptable EGL configs found\n"); ++#endif + goto error; + } + + glamor_egl->context = eglCreateContext(glamor_egl->display, + egl_config, EGL_NO_CONTEXT, + config_attribs); ++ } + +- if (glamor_egl->context == EGL_NO_CONTEXT) { +- xf86DrvMsg(scrn->scrnIndex, X_ERROR, +- "glamor: Failed to create GL or GLES2 contexts\n"); +- goto error; +- } ++ if (glamor_egl->context == EGL_NO_CONTEXT) { ++#ifdef GLAMOR_FOR_XORG ++ xf86DrvMsg(scrn->scrnIndex, X_ERROR, ++ "glamor: Failed to create GL or GLES2 contexts\n"); ++#else ++ LogMessage(X_ERROR, "glamor: Failed to create GL or GLES2 contexts\n"); ++#endif ++ goto error; ++ } + +- if (!eglMakeCurrent(glamor_egl->display, +- EGL_NO_SURFACE, EGL_NO_SURFACE, glamor_egl->context)) { +- xf86DrvMsg(scrn->scrnIndex, X_ERROR, +- "Failed to make GLES2 context current\n"); +- goto error; +- } ++ if (!eglMakeCurrent(glamor_egl->display, ++ EGL_NO_SURFACE, EGL_NO_SURFACE, glamor_egl->context)) { ++#ifdef GLAMOR_FOR_XORG ++ xf86DrvMsg(scrn->scrnIndex, X_ERROR, ++ "Failed to make EGL context current\n"); ++#else ++ LogMessage(X_ERROR, "Failed to make EGL context current\n"); ++#endif ++ goto error; + } + + renderer = glGetString(GL_RENDERER); + if (!renderer) { ++#ifdef GLAMOR_FOR_XORG + xf86DrvMsg(scrn->scrnIndex, X_ERROR, + "glGetString() returned NULL, your GL is broken\n"); ++#else ++ LogMessage(X_ERROR, "glGetString() returned NULL, your GL is broken\n"); ++#endif + goto error; + } + if (strstr((const char *)renderer, "llvmpipe")) { +- if (scrn->confScreen->num_gpu_devices) ++#ifdef GLAMOR_FOR_XORG ++ if (scrn->confScreen->num_gpu_devices) { + xf86DrvMsg(scrn->scrnIndex, X_INFO, + "Allowing glamor on llvmpipe for PRIME\n"); +- else { ++ } else ++#endif ++ { ++#ifdef GLAMOR_FOR_XORG + xf86DrvMsg(scrn->scrnIndex, X_INFO, + "Refusing to try glamor on llvmpipe\n"); ++#else ++ LogMessage(X_INFO, "Refusing to try glamor on llvmpipe\n"); ++#endif + goto error; + } + } +@@ -1077,29 +1240,41 @@ glamor_egl_init(ScrnInfoPtr scrn, int fd) + lastGLContext = NULL; + + if (!epoxy_has_gl_extension("GL_OES_EGL_image")) { ++#ifdef GLAMOR_FOR_XORG + xf86DrvMsg(scrn->scrnIndex, X_ERROR, + "glamor acceleration requires GL_OES_EGL_image\n"); ++#else ++ LogMessage(X_ERROR, "glamor acceleration requires GL_OES_EGL_image\n"); ++#endif + goto error; + } + ++#ifdef GLAMOR_FOR_XORG + xf86DrvMsg(scrn->scrnIndex, X_INFO, "glamor X acceleration enabled on %s\n", + renderer); ++#endif + + #ifdef GBM_BO_WITH_MODIFIERS + if (epoxy_has_egl_extension(glamor_egl->display, + "EGL_EXT_image_dma_buf_import") && + epoxy_has_egl_extension(glamor_egl->display, + "EGL_EXT_image_dma_buf_import_modifiers")) { ++#ifdef GLAMOR_FOR_XORG + if (xf86Info.debug != NULL) +- glamor_egl->dmabuf_capable = !!strstr(xf86Info.debug, +- "dmabuf_capable"); ++ glamor_egl->dmabuf_capable = ++ !!strstr(xf86Info.debug, "dmabuf_capable"); + else + glamor_egl->dmabuf_capable = FALSE; ++#else ++ glamor_egl->dmabuf_capable = TRUE; ++#endif + } + #endif + ++#ifdef GLAMOR_FOR_XORG + glamor_egl->saved_free_screen = scrn->FreeScreen; + scrn->FreeScreen = glamor_egl_free_screen; ++#endif + return TRUE; + + error: +diff --git glamor/glamor_egl.h glamor/glamor_egl.h +index 8f6ed78..5edc016 100644 +--- glamor/glamor_egl.h ++++ glamor/glamor_egl.h +@@ -27,6 +27,12 @@ + #ifndef GLAMOR_EGL_H + #define GLAMOR_EGL_H + ++#include <scrnintstr.h> ++#include <pixmapstr.h> ++ ++struct gbm_bo; ++struct glamor_context; ++ + #define MESA_EGL_NO_X11_HEADERS + #define EGL_NO_X11 + #include <epoxy/gl.h> +@@ -74,4 +80,37 @@ glamor_egl_get_display(EGLint type, void *native) + return eglGetDisplay(native); + } + ++#ifdef GLAMOR_FOR_XORG ++ ++#define GLAMOR_EGL_MODULE_NAME "glamoregl" ++ ++extern _X_EXPORT Bool glamor_egl_init(ScrnInfoPtr scrn, int fd); ++ ++extern _X_EXPORT Bool glamor_egl_init_textured_pixmap(ScreenPtr screen); ++ ++extern _X_EXPORT Bool glamor_egl_create_textured_screen(ScreenPtr screen, ++ int handle, int stride); ++ ++extern _X_EXPORT Bool glamor_egl_create_textured_pixmap(PixmapPtr pixmap, ++ int handle, int stride); ++ ++extern _X_EXPORT const char *glamor_egl_get_driver_name(ScreenPtr screen); ++ ++#else ++/* ++ * These are the glamor EGL interfaces required for DIX-only servers ++ * like Xvfb that do not use the full Xorg DDX framework. ++ */ ++extern _X_EXPORT Bool glamor_egl_init(ScreenPtr screen, int fd); + #endif ++ ++/* This function is used by both Xorg DDX drivers and Xvfb. */ ++extern _X_EXPORT Bool ++glamor_egl_create_textured_pixmap_from_gbm_bo(PixmapPtr pixmap, ++ struct gbm_bo *bo, ++ Bool used_modifiers); ++ ++extern _X_EXPORT void glamor_egl_screen_init(ScreenPtr screen, ++ struct glamor_context *glamor_ctx); ++ ++#endif /* GLAMOR_EGL_H */ +diff --git glamor/meson.build glamor/meson.build +index 268af59..48d8d68 100644 +--- glamor/meson.build ++++ glamor/meson.build +@@ -40,13 +40,20 @@ endif + + epoxy_dep = dependency('epoxy') + ++if get_option('glamor') == 'true' ++ gbm_dep = dependency('gbm', version: '>= 17.1.0') ++ drm_dep = dependency('libdrm') ++endif ++ + glamor = static_library('glamor', + srcs_glamor, + include_directories: inc, + dependencies: [ + common_dep, + epoxy_dep, ++ gbm_dep, + ], ++ install: false, + ) + + glamor_egl_stubs = static_library('glamor_egl_stubs', +@@ -55,6 +62,18 @@ glamor_egl_stubs = static_library('glamor_egl_stubs', + dependencies: common_dep, + ) + ++glamor_egl = static_library('glamor_egl', ++ 'glamor_egl.c', ++ include_directories: inc, ++ dependencies: [ ++ common_dep, ++ epoxy_dep, ++ gbm_dep, ++ drm_dep, ++ ], ++ install: false, ++) ++ + if build_xorg + install_data('glamor.h', install_dir: xorgsdkdir) + endif +diff --git hw/vfb/InitOutput.c hw/vfb/InitOutput.c +index 48efb61..5475c28 100644 +--- hw/vfb/InitOutput.c ++++ hw/vfb/InitOutput.c +@@ -55,6 +55,7 @@ from The Open Group. + #endif /* HAVE_MMAP */ + #include <sys/stat.h> + #include <errno.h> ++static char *vfbDeviceNode = NULL; + #ifndef WIN32 + #include <sys/param.h> + #endif +@@ -68,6 +69,15 @@ from The Open Group. + #include "glx_extinit.h" + #include "randrstr.h" + ++#ifdef GLAMOR_HAS_GBM ++#include <glamor.h> ++#include <glamor_egl.h> ++#include <gbm.h> ++#include <unistd.h> ++#include <fcntl.h> ++#include <errno.h> ++#endif ++ + #define VFB_DEFAULT_WIDTH 1280 + #define VFB_DEFAULT_HEIGHT 1024 + #define VFB_DEFAULT_DEPTH 24 +@@ -101,6 +111,12 @@ typedef struct { + #ifdef HAS_SHM + int shmid; + #endif ++#ifdef GLAMOR_HAS_GBM ++ int fd; ++ CreateScreenResourcesProcPtr createScreenResources; ++ struct gbm_device *gbm; ++ struct gbm_bo *front_bo; ++#endif + } vfbScreenInfo, *vfbScreenInfoPtr; + + static int vfbNumScreens; +@@ -255,6 +271,9 @@ ddxUseMsg(void) + #ifdef HAS_SHM + ErrorF("-shmem put framebuffers in shared memory\n"); + #endif ++#ifdef GLAMOR_HAS_GBM ++ ErrorF("-vfbdevice device-path use specified DRI render node for glamor/DRI3\n"); ++#endif + } + + int +@@ -375,6 +394,14 @@ ddxProcessArgument(int argc, char *argv[], int i) + } + #endif + ++#ifdef GLAMOR_HAS_GBM ++ if (strcmp(argv[i], "-vfbdevice") == 0) { ++ CHECK_FOR_REQUIRED_ARGUMENTS(1); ++ vfbDeviceNode = argv[++i]; ++ return 2; ++ } ++#endif ++ + return 0; + } + +@@ -727,9 +754,104 @@ vfbCloseScreen(ScreenPtr pScreen) + (*pScreen->DestroyPixmap) (pScreen->devPrivate); + pScreen->devPrivate = NULL; + ++#ifdef GLAMOR_HAS_GBM ++ if (pvfb->fd >= 0) { ++ if (pvfb->front_bo) { ++ gbm_bo_destroy(pvfb->front_bo); ++ pvfb->front_bo = NULL; ++ } ++ close(pvfb->fd); ++ pvfb->fd = -1; ++ } ++#endif ++ + return pScreen->CloseScreen(pScreen); + } + ++#ifdef GLAMOR_HAS_GBM ++static Bool ++vfbCreateScreenResources(ScreenPtr pScreen) ++{ ++ vfbScreenInfoPtr pvfb = &vfbScreens[pScreen->myNum]; ++ PixmapPtr pixmap; ++ Bool ret; ++ ++ pScreen->CreateScreenResources = pvfb->createScreenResources; ++ ret = pScreen->CreateScreenResources(pScreen); ++ pScreen->CreateScreenResources = vfbCreateScreenResources; ++ ++ if (!ret) ++ return FALSE; ++ ++ pixmap = pScreen->GetScreenPixmap(pScreen); ++ ++ /* We don't support modifiers for the screen pixmap */ ++ if (!glamor_egl_create_textured_pixmap_from_gbm_bo(pixmap, pvfb->front_bo, ++ FALSE)) { ++ LogMessage(X_ERROR, "glamor_egl_create_textured_pixmap_from_gbm_bo() failed\n"); ++ /* The screen is unusable, but there's no good way to unwind here */ ++ return FALSE; ++ } ++ ++ return TRUE; ++} ++ ++ ++static void ++vfbDRIInit(ScreenPtr pScreen) ++{ ++ vfbScreenInfoPtr pvfb = &vfbScreens[pScreen->myNum]; ++ const char *dri_node = vfbDeviceNode; ++ const char *error_msg = NULL; ++ ++ pvfb->fd = open(dri_node, O_RDWR | O_CLOEXEC); ++ if (pvfb->fd < 0) { ++ error_msg = "Failed to open DRI render node"; ++ goto fail; ++ } ++ ++ if (!glamor_egl_init(pScreen, pvfb->fd)) { ++ error_msg = "Failed to initialize glamor EGL"; ++ goto fail_fd; ++ } ++ ++ pvfb->gbm = glamor_egl_get_gbm_device(pScreen); ++ if (!pvfb->gbm) { ++ error_msg = "Failed to get gbm device"; ++ goto fail_fd; ++ } ++ ++ pvfb->front_bo = gbm_bo_create(pvfb->gbm, ++ pScreen->width, pScreen->height, ++ GBM_FORMAT_ARGB8888, ++ GBM_BO_USE_RENDERING); ++ if (!pvfb->front_bo) { ++ error_msg = "Failed to create front buffer"; ++ goto fail_fd; ++ } ++ ++ if (!glamor_init(pScreen, GLAMOR_USE_EGL_SCREEN)) { ++ error_msg = "Failed to initialize glamor"; ++ goto fail_bo; ++ } ++ ++ pvfb->createScreenResources = pScreen->CreateScreenResources; ++ pScreen->CreateScreenResources = vfbCreateScreenResources; ++ LogMessage(X_INFO, "glamor/DRI3 initialized for VFB screen %d on %s\n", pScreen->myNum, dri_node); ++ return; ++ ++ fail_bo: ++ gbm_bo_destroy(pvfb->front_bo); ++ pvfb->front_bo = NULL; ++ fail_fd: ++ close(pvfb->fd); ++ pvfb->fd = -1; ++ fail: ++ if (error_msg) ++ LogMessage(X_ERROR, "%s. Disabling GLAMOR/DRI3.\n", error_msg); ++} ++#endif ++ + static Bool + vfbRROutputValidateMode(ScreenPtr pScreen, + RROutputPtr output, +@@ -922,12 +1044,15 @@ vfbScreenInit(ScreenPtr pScreen, int argc, char **argv) + + ret = fbScreenInit(pScreen, pbits, pvfb->width, pvfb->height, + dpix, dpiy, pvfb->paddedWidth, pvfb->bitsPerPixel); +- if (ret && Render) +- fbPictureInit(pScreen, 0, 0); +- + if (!ret) + return FALSE; +- ++ if (Render) { ++ fbPictureInit(pScreen, 0, 0); ++#ifdef GLAMOR_HAS_GBM ++ if (vfbDeviceNode) ++ vfbDRIInit(pScreen); ++#endif ++ } + if (!vfbRandRInit(pScreen)) + return FALSE; + +diff --git hw/vfb/Makefile.am hw/vfb/Makefile.am +index a4b4526..725ffca 100644 +--- hw/vfb/Makefile.am ++++ hw/vfb/Makefile.am +@@ -3,7 +3,11 @@ SUBDIRS = man + bin_PROGRAMS = Xvfb + + AM_CFLAGS = -DHAVE_DIX_CONFIG_H \ +- $(XVFBMODULES_CFLAGS) \ ++ $(GBM_CFLAGS) \ ++ $(DRM_CFLAGS) \ ++ -I$(top_srcdir)/glamor \ ++ -DGLAMOR_HAS_GBM=1 \ ++ $(XVFBMODULES_CFLAGS) \ + $(DIX_CFLAGS) + + SRCS = InitInput.c \ +@@ -14,13 +18,17 @@ SRCS = InitInput.c \ + Xvfb_SOURCES = $(SRCS) + + XVFB_LIBS = \ +- @XVFB_LIBS@ \ ++ $(top_builddir)/glamor/libglamor.la \ ++ $(top_builddir)/glamor/libglamor_egl.la \ ++ @XVFB_LIBS@ \ + $(MAIN_LIB) \ + $(XSERVER_LIBS) \ + $(top_builddir)/Xi/libXistubs.la + +-Xvfb_LDADD = $(XVFB_LIBS) $(XVFB_SYS_LIBS) $(XSERVER_SYS_LIBS) +-Xvfb_DEPENDENCIES = $(XVFB_LIBS) ++Xvfb_LDADD = $(XVFB_LIBS) $(XVFB_SYS_LIBS) $(XSERVER_SYS_LIBS) $(GBM_LIBS) $(DRM_LIBS) $(EPOXY_LIBS) ++Xvfb_DEPENDENCIES = \ ++ $(top_builddir)/glamor/libglamor.la \ ++ $(top_builddir)/glamor/libglamor_egl.la + Xvfb_LDFLAGS = $(LD_EXPORT_SYMBOLS_FLAG) + + relink: +diff --git hw/vfb/meson.build hw/vfb/meson.build +index 7332866..de56e86 100644 +--- hw/vfb/meson.build ++++ hw/vfb/meson.build +@@ -5,11 +5,18 @@ srcs = [ + '../../mi/miinitext.h', + ] + ++glamor_deps = [] ++glamor_link = [] ++if get_option('glamor') == 'true' ++ glamor_deps += dependency('gbm') ++ glamor_link += [glamor, glamor_egl] ++endif ++ + xvfb_server = executable( + 'Xvfb', + srcs, + include_directories: inc, +- dependencies: common_dep, ++ dependencies: [common_dep] + glamor_deps, + link_with: [ + libxserver_main, + libxserver_fb, +@@ -18,7 +25,7 @@ xvfb_server = executable( + libxserver_xi_stubs, + libxserver_glx, + libglxvnd, +- ], ++ ] + glamor_link, + install: true, + ) + diff --git a/files/push-base-image.sh b/files/push-base-image.sh new file mode 100755 index 000000000..abd96a66f --- /dev/null +++ b/files/push-base-image.sh @@ -0,0 +1,67 @@ +#!/usr/bin/env bash +set -euo pipefail + +IMAGE_NAME=${IMAGE_NAME:-ghcr.io/tatsuyai713/webtop-kde} +VERSION=${VERSION:-1.0.0} +UBUNTU_VERSION=${UBUNTU_VERSION:-24.04} +ARCH_OVERRIDE=${ARCH_OVERRIDE:-} +PLATFORM_OVERRIDE=${PLATFORM_OVERRIDE:-} + +usage() { + cat <<EOF +Usage: $0 [-a arch] [-i image] [-v version] [-u ubuntu_version] [-p platform] + -a, --arch Target arch (amd64 or arm64). Default: host arch + -i, --image Image name (default: ${IMAGE_NAME}) + -v, --version Version tag (default: ${VERSION}) + -u, --ubuntu Ubuntu version (22.04 or 24.04). Default: ${UBUNTU_VERSION} + -p, --platform Docker platform (e.g. linux/amd64 or linux/arm64). Default: derived from arch +EOF +} + +while [[ $# -gt 0 ]]; do + case "$1" in + -a|--arch) ARCH_OVERRIDE=$2; shift 2 ;; + -i|--image) IMAGE_NAME=$2; shift 2 ;; + -v|--version) VERSION=$2; shift 2 ;; + -u|--ubuntu) UBUNTU_VERSION=$2; shift 2 ;; + -p|--platform) PLATFORM_OVERRIDE=$2; shift 2 ;; + -h|--help) usage; exit 0 ;; + *) echo "Unknown option: $1" >&2; usage; exit 1 ;; + esac +done + +HOST_ARCH=$(uname -m) +PLATFORM_ARCH_HINT="" +if [[ -n "${PLATFORM_OVERRIDE}" ]]; then + case "${PLATFORM_OVERRIDE}" in + linux/amd64) PLATFORM_ARCH_HINT=amd64 ;; + linux/arm64) PLATFORM_ARCH_HINT=arm64 ;; + *) PLATFORM_ARCH_HINT="" ;; + esac +fi + +if [[ -n "${ARCH_OVERRIDE}" ]]; then + TARGET_ARCH=${ARCH_OVERRIDE} +elif [[ -n "${PLATFORM_ARCH_HINT}" ]]; then + TARGET_ARCH=${PLATFORM_ARCH_HINT} +else + TARGET_ARCH=${HOST_ARCH} +fi + +case "${TARGET_ARCH}" in + x86_64|amd64) TARGET_ARCH=amd64 ;; + aarch64|arm64) TARGET_ARCH=arm64 ;; + *) echo "Unsupported arch: ${TARGET_ARCH}. Use amd64 or arm64." >&2; exit 1 ;; +esac + +IMAGE_TAG="${IMAGE_NAME}-base-${TARGET_ARCH}-u${UBUNTU_VERSION}:${VERSION}" + +if ! docker images --format '{{.Repository}}:{{.Tag}}' | grep -q "^${IMAGE_TAG}$"; then + echo "Local image not found: ${IMAGE_TAG}" >&2 + echo "Build it first (e.g. ./files/build-base-image.sh -a ${TARGET_ARCH} --ubuntu ${UBUNTU_VERSION} -v ${VERSION})." >&2 + exit 1 +fi + +echo "Pushing ${IMAGE_TAG}" +echo "If you haven't logged in, run: docker login ghcr.io" +docker push "${IMAGE_TAG}" diff --git a/files/ubuntu-root/defaults/Xresources b/files/ubuntu-root/defaults/Xresources new file mode 100644 index 000000000..034fb8360 --- /dev/null +++ b/files/ubuntu-root/defaults/Xresources @@ -0,0 +1,16 @@ +! X resources for high DPI displays +! This file is loaded by the X session startup + +! DPI setting +Xft.dpi: 144 + +! Font rendering +Xft.antialias: 1 +Xft.hinting: 1 +Xft.hintstyle: hintfull +Xft.rgba: rgb +Xft.lcdfilter: lcddefault + +! Cursor size +Xcursor.size: 32 +Xcursor.theme: breeze_cursors diff --git a/files/ubuntu-root/defaults/autostart b/files/ubuntu-root/defaults/autostart new file mode 100644 index 000000000..25cee50f8 --- /dev/null +++ b/files/ubuntu-root/defaults/autostart @@ -0,0 +1 @@ +st diff --git a/files/ubuntu-root/defaults/autostart_wayland b/files/ubuntu-root/defaults/autostart_wayland new file mode 100644 index 000000000..e1aeb2352 --- /dev/null +++ b/files/ubuntu-root/defaults/autostart_wayland @@ -0,0 +1 @@ +foot diff --git a/files/ubuntu-root/defaults/default.conf b/files/ubuntu-root/defaults/default.conf new file mode 100644 index 000000000..d9986d290 --- /dev/null +++ b/files/ubuntu-root/defaults/default.conf @@ -0,0 +1,273 @@ +server { + listen 3000 default_server; + listen [::]:3000 default_server; + server_name _; + port_in_redirect off; + absolute_redirect off; + + location SUBFOLDER { + auth_request /auth/verify; + error_page 401 = @auth_login; + alias /usr/share/selkies/web/; + index index.html index.htm; + try_files $uri $uri/ /index.html; + } + location /devmode { + proxy_set_header Upgrade $http_upgrade; + proxy_set_header Connection "upgrade"; + proxy_set_header Host $host; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_set_header X-Forwarded-Proto $scheme; + proxy_http_version 1.1; + proxy_read_timeout 3600s; + proxy_send_timeout 3600s; + proxy_connect_timeout 3600s; + proxy_buffering off; + client_max_body_size 10M; + proxy_pass http://127.0.0.1:5173; + } + location SUBFOLDERwebsocket { + auth_request /auth/verify; + error_page 401 = @auth_login; + proxy_set_header Upgrade $http_upgrade; + proxy_set_header Connection "upgrade"; + proxy_set_header Host $host; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_set_header X-Forwarded-Proto $scheme; + proxy_http_version 1.1; + proxy_read_timeout 3600s; + proxy_send_timeout 3600s; + proxy_connect_timeout 3600s; + proxy_buffering off; + client_max_body_size 10M; + proxy_pass http://127.0.0.1:CWS; + } + # WebSocket endpoint for selkies signalling + location /ws { + auth_request /auth/verify; + error_page 401 = @auth_login; + proxy_set_header Upgrade $http_upgrade; + proxy_set_header Connection "upgrade"; + proxy_set_header Host $host; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_set_header X-Forwarded-Proto $scheme; + proxy_http_version 1.1; + proxy_read_timeout 3600s; + proxy_send_timeout 3600s; + proxy_connect_timeout 3600s; + proxy_buffering off; + client_max_body_size 10M; + proxy_pass http://127.0.0.1:8082; + } + # WebRTC signalling endpoint for selkies + location /webrtc/signalling/ { + auth_request /auth/verify; + error_page 401 = @auth_login; + proxy_set_header Upgrade $http_upgrade; + proxy_set_header Connection "upgrade"; + proxy_set_header Host $host; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_set_header X-Forwarded-Proto $scheme; + proxy_http_version 1.1; + proxy_read_timeout 3600s; + proxy_send_timeout 3600s; + proxy_connect_timeout 3600s; + proxy_buffering off; + client_max_body_size 10M; + proxy_pass http://127.0.0.1:8082/ws; + } + # Health check endpoint for selkies + location /health { + auth_request /auth/verify; + error_page 401 = @auth_login; + proxy_http_version 1.1; + proxy_read_timeout 3600s; + proxy_send_timeout 3600s; + proxy_connect_timeout 3600s; + proxy_buffering off; + client_max_body_size 10M; + proxy_pass http://127.0.0.1:8082; + } + location SUBFOLDERfiles { + auth_request /auth/verify; + error_page 401 = @auth_login; + fancyindex on; + fancyindex_footer SUBFOLDERnginx/footer.html; + fancyindex_header SUBFOLDERnginx/header.html; + alias REPLACE_DOWNLOADS_PATH/; + if (-f $request_filename) { + add_header Content-Disposition "attachment"; + add_header X-Content-Type-Options "nosniff"; + } + } + error_page 500 502 503 504 /50x.html; + location = SUBFOLDER50x.html { + root /usr/share/selkies/web/; + } + location /auth/ { + auth_request off; + proxy_set_header Host $host; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_set_header X-Forwarded-Proto $scheme; + proxy_pass http://127.0.0.1:6060; + } + location @auth_login { + return 302 /auth/login; + } + location ~* ^/auth/%2[fF].* { + return 302 /; + } + location = /auth/verify { + internal; + proxy_set_header Host $host; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_set_header X-Forwarded-Proto $scheme; + proxy_set_header Cookie $http_cookie; + proxy_pass_request_body off; + proxy_set_header Content-Length ""; + proxy_set_header X-Original-URI $request_uri; + proxy_pass http://127.0.0.1:6060/auth/verify; + } +} + +server { + listen 3001 ssl; + listen [::]:3001 ssl; + server_name _; + port_in_redirect off; + absolute_redirect off; + ssl_certificate /config/ssl/cert.pem; + ssl_certificate_key /config/ssl/cert.key; + + location SUBFOLDER { + auth_request /auth/verify; + error_page 401 = @auth_login; + alias /usr/share/selkies/web/; + index index.html index.htm; + try_files $uri $uri/ /index.html; + } + location /devmode { + proxy_set_header Upgrade $http_upgrade; + proxy_set_header Connection "upgrade"; + proxy_set_header Host $host; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_set_header X-Forwarded-Proto $scheme; + proxy_http_version 1.1; + proxy_read_timeout 3600s; + proxy_send_timeout 3600s; + proxy_connect_timeout 3600s; + proxy_buffering off; + client_max_body_size 10M; + proxy_pass http://127.0.0.1:5173; + } + location SUBFOLDERwebsocket { + auth_request /auth/verify; + error_page 401 = @auth_login; + proxy_set_header Upgrade $http_upgrade; + proxy_set_header Connection "upgrade"; + proxy_set_header Host $host; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_set_header X-Forwarded-Proto $scheme; + proxy_http_version 1.1; + proxy_read_timeout 3600s; + proxy_send_timeout 3600s; + proxy_connect_timeout 3600s; + proxy_buffering off; + client_max_body_size 10M; + proxy_pass http://127.0.0.1:CWS; + } + # WebSocket endpoint for selkies signalling + location /ws { + auth_request /auth/verify; + error_page 401 = @auth_login; + proxy_set_header Upgrade $http_upgrade; + proxy_set_header Connection "upgrade"; + proxy_set_header Host $host; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_set_header X-Forwarded-Proto $scheme; + proxy_http_version 1.1; + proxy_read_timeout 3600s; + proxy_send_timeout 3600s; + proxy_connect_timeout 3600s; + proxy_buffering off; + client_max_body_size 10M; + proxy_pass http://127.0.0.1:8082; + } + # WebRTC signalling endpoint for selkies + location /webrtc/signalling/ { + auth_request /auth/verify; + error_page 401 = @auth_login; + proxy_set_header Upgrade $http_upgrade; + proxy_set_header Connection "upgrade"; + proxy_set_header Host $host; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_set_header X-Forwarded-Proto $scheme; + proxy_http_version 1.1; + proxy_read_timeout 3600s; + proxy_send_timeout 3600s; + proxy_connect_timeout 3600s; + proxy_buffering off; + client_max_body_size 10M; + proxy_pass http://127.0.0.1:8082/ws; + } + # Health check endpoint for selkies + location /health { + auth_request /auth/verify; + error_page 401 = @auth_login; + proxy_http_version 1.1; + proxy_read_timeout 3600s; + proxy_send_timeout 3600s; + proxy_connect_timeout 3600s; + proxy_buffering off; + client_max_body_size 10M; + proxy_pass http://127.0.0.1:8082; + } + location SUBFOLDERfiles { + auth_request /auth/verify; + error_page 401 = @auth_login; + fancyindex on; + fancyindex_footer SUBFOLDERnginx/footer.html; + fancyindex_header SUBFOLDERnginx/header.html; + alias REPLACE_DOWNLOADS_PATH/; + if (-f $request_filename) { + add_header Content-Disposition "attachment"; + add_header X-Content-Type-Options "nosniff"; + } + } + error_page 500 502 503 504 /50x.html; + location = SUBFOLDER50x.html { + root /usr/share/selkies/web/; + } + location /auth/ { + auth_request off; + proxy_set_header Host $host; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_set_header X-Forwarded-Proto $scheme; + proxy_pass http://127.0.0.1:6060; + } + location @auth_login { + return 302 /auth/login; + } + location ~* ^/auth/%2[fF].* { + return 302 /; + } + location = /auth/verify { + internal; + proxy_set_header Host $host; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_set_header X-Forwarded-Proto $scheme; + proxy_pass http://127.0.0.1:6060/auth/verify; + } +} diff --git a/files/ubuntu-root/defaults/environment b/files/ubuntu-root/defaults/environment new file mode 100644 index 000000000..8fb0dd1d7 --- /dev/null +++ b/files/ubuntu-root/defaults/environment @@ -0,0 +1,17 @@ +# Global environment variables for all applications +# This file is sourced by the desktop session and all applications + +# DPI will be set dynamically by init-config +# QT_SCALE_FACTOR will be calculated based on DPI +# GDK_SCALE will be set based on DPI + +# Force Qt and GTK to respect environment DPI +QT_AUTO_SCREEN_SCALE_FACTOR=1 +QT_SCALE_FACTOR_ROUNDING_POLICY=PassThrough + +# Electron/VSCode settings for high DPI +ELECTRON_FORCE_IS_PACKAGED=0 +ELECTRON_OZONE_PLATFORM_HINT=auto + +# Java/SWT settings +SWT_GTK3=1 diff --git a/files/ubuntu-root/defaults/kdeglobals b/files/ubuntu-root/defaults/kdeglobals new file mode 100644 index 000000000..4fa970c0f --- /dev/null +++ b/files/ubuntu-root/defaults/kdeglobals @@ -0,0 +1,11 @@ +[KScreen] +ScaleFactor=2 +ScreenScaleFactors=eDP-1=2; + +[General] +fixed=Ubuntu Mono,13,-1,5,50,0,0,0,0,0 +font=Ubuntu,12,-1,5,50,0,0,0,0,0 +menuFont=Ubuntu,12,-1,5,50,0,0,0,0,0 +smallestReadableFont=Ubuntu,11,-1,5,50,0,0,0,0,0 +taskbarFont=Ubuntu,12,-1,5,50,0,0,0,0,0 +toolBarFont=Ubuntu,11,-1,5,50,0,0,0,0,0 diff --git a/files/ubuntu-root/defaults/labwc.xml b/files/ubuntu-root/defaults/labwc.xml new file mode 100644 index 000000000..d67a369b1 --- /dev/null +++ b/files/ubuntu-root/defaults/labwc.xml @@ -0,0 +1,56 @@ +<labwc_config> + + <core> + <decoration>server</decoration> + <gap>0</gap> + </core> + + <theme> + <titlebar> + <layout>icon:iconify,max,close</layout> + <showTitle>yes</showTitle> + </titlebar> + <cornerRadius>8</cornerRadius> + </theme> + + <keyboard> + <default /> + <keybind key="A-F4"><action name="Close" /></keybind> + <keybind key="A-Escape"><action name="Close" /></keybind> + <keybind key="A-space"><action name="ShowMenu" menu="client-menu" /></keybind> + <keybind key="W-e"><action name="Execute" command="lab-sensible-terminal" /></keybind> + </keyboard> + + <mouse> + <default /> + <context name="Root"> + <mousebind button="Left" action="Press" /> + <mousebind button="Right" action="Press"><action name="ShowMenu" menu="root-menu" /></mousebind> + <mousebind button="Middle" action="Press"><action name="ShowMenu" menu="client-list-combined-menu" /></mousebind> + </context> + <context name="TitleBar"> + <mousebind button="Right" action="Click"><action name="ShowMenu" menu="client-menu" /></mousebind> + <mousebind button="Middle" action="Click"><action name="ToggleMaximize" direction="vertical" /></mousebind> + </context> + </mouse> + + <windowRules> + <windowRule identifier="*" serverDecoration="yes" /> + <windowRule identifier="*"><action name="Maximize" /></windowRule> + <windowRule identifier="brave" serverDecoration="no" /> + <windowRule identifier="chrome" serverDecoration="no" /> + <windowRule identifier="chromium-browser" serverDecoration="no" /> + <windowRule identifier="chromium" serverDecoration="no" /> + <windowRule identifier="firefox-esr" serverDecoration="no" /> + <windowRule identifier="firefox" serverDecoration="no" /> + <windowRule identifier="google-chrome" serverDecoration="no" /> + <windowRule identifier="librewolf" serverDecoration="no" /> + <windowRule identifier="mullvad-browser" serverDecoration="no" /> + <windowRule identifier="opera-bin" serverDecoration="no" /> + <windowRule identifier="opera" serverDecoration="no" /> + <windowRule identifier="vivaldi" serverDecoration="no" /> + <windowRule identifier="wrapped-chromium" serverDecoration="no" /> + <windowRule identifier="zen" serverDecoration="no" /> + </windowRules> + +</labwc_config> diff --git a/files/ubuntu-root/defaults/menu.xml b/files/ubuntu-root/defaults/menu.xml new file mode 100644 index 000000000..133307682 --- /dev/null +++ b/files/ubuntu-root/defaults/menu.xml @@ -0,0 +1,6 @@ +<?xml version="1.0" encoding="utf-8"?> +<openbox_menu xmlns="http://openbox.org/3.4/menu"> +<menu id="root-menu" label="MENU"> +<item label="xterm" icon="/usr/share/pixmaps/xterm-color_48x48.xpm"><action name="Execute"><command>/usr/bin/xterm</command></action></item> +</menu> +</openbox_menu> diff --git a/files/ubuntu-root/defaults/menu_wayland.xml b/files/ubuntu-root/defaults/menu_wayland.xml new file mode 100644 index 000000000..fcfbdbbda --- /dev/null +++ b/files/ubuntu-root/defaults/menu_wayland.xml @@ -0,0 +1,6 @@ +<?xml version="1.0" encoding="utf-8"?> +<openbox_menu xmlns="http://openbox.org/3.4/menu"> +<menu id="root-menu" label="MENU"> +<item label="foot" icon="/usr/share/pixmaps/xterm-color_48x48.xpm"><action name="Execute"><command>/usr/bin/foot</command></action></item> +</menu> +</openbox_menu> diff --git a/files/ubuntu-root/defaults/startwm.sh b/files/ubuntu-root/defaults/startwm.sh new file mode 100755 index 000000000..3f7f24c6b --- /dev/null +++ b/files/ubuntu-root/defaults/startwm.sh @@ -0,0 +1,171 @@ +#!/usr/bin/env bash + +# Setup XDG runtime directory for KDE/Plasma +export XDG_RUNTIME_DIR="${XDG_RUNTIME_DIR:-/tmp/runtime-$USER}" +mkdir -p "$XDG_RUNTIME_DIR" +chmod 700 "$XDG_RUNTIME_DIR" + +# Load Xresources with dynamic DPI if available +if [ -f /defaults/Xresources ]; then + # Update DPI in Xresources if DPI environment variable is set + if [ -n "${DPI}" ]; then + sed "s/Xft\.dpi:.*/Xft.dpi: ${DPI}/" /defaults/Xresources > /tmp/.Xresources + xrdb -merge /tmp/.Xresources + echo "Loaded Xresources with DPI: ${DPI}" + else + xrdb -merge /defaults/Xresources + echo "Loaded Xresources with default DPI settings" + fi +fi + +# GPU detection and configuration for WebGL/Vulkan/OpenGL support +NVIDIA_PRESENT=false +GPU_AVAILABLE=false + +# Check for NVIDIA GPU +if which nvidia-smi > /dev/null 2>&1 && nvidia-smi --query-gpu=uuid --format=csv,noheader 2>/dev/null | head -n1 | grep -q .; then + NVIDIA_PRESENT=true + GPU_AVAILABLE=true + echo "NVIDIA GPU detected" +fi + +# Check for other GPUs via /dev/dri +if ls -A /dev/dri 2>/dev/null | grep -q .; then + GPU_AVAILABLE=true + echo "GPU device detected at /dev/dri" +fi + +# Configure GPU acceleration based on detected hardware +if [ "${NVIDIA_PRESENT}" = "true" ]; then + if [ "${DISABLE_ZINK}" != "true" ]; then + # NVIDIA GPU with Zink (Mesa's Vulkan-based OpenGL implementation) + echo "Configuring NVIDIA GPU with Zink driver" + export LIBGL_KOPPER_DRI2=1 + export MESA_LOADER_DRIVER_OVERRIDE=zink + export GALLIUM_DRIVER=zink + else + # NVIDIA GPU with native OpenGL (EGL backend) + echo "Configuring NVIDIA GPU with native EGL/OpenGL" + fi + export VGL_DISPLAY="${VGL_DISPLAY:-egl}" + export __GLX_VENDOR_LIBRARY_NAME=nvidia + export __NV_PRIME_RENDER_OFFLOAD=1 + export __VK_LAYER_NV_optimus=NVIDIA_only +elif [ "${GPU_AVAILABLE}" = "true" ]; then + # Non-NVIDIA GPU (Intel/AMD) - use native drivers with Xvfb's DRI3 device + echo "Configuring GPU with native drivers (Intel/AMD)" + # For Intel/AMD GPUs, VirtualGL must use the Xvfb display (:1) not EGL + # Xvfb is running with -vfbdevice /dev/dri/renderD128 which provides DRI3/GLX support + export VGL_DISPLAY="${DISPLAY}" + # Enable DRI3 and hardware acceleration for Intel/AMD + export LIBGL_ALWAYS_SOFTWARE=0 + export MESA_GL_VERSION_OVERRIDE=4.5 + export MESA_GLSL_VERSION_OVERRIDE=450 + # Force DRI3 for better performance + if [ "${DISABLE_DRI3}" != "true" ]; then + export LIBGL_DRI3_ENABLE=1 + fi +fi + +# Set VirtualGL frame rate to match display refresh +if [ -n "${DISPLAY_REFRESH}" ]; then + export VGL_FPS="${DISPLAY_REFRESH}" +fi + +# DPI and scaling configuration for applications +# Qt/KDE applications scaling +export QT_AUTO_SCREEN_SCALE_FACTOR=1 +export QT_SCALE_FACTOR_ROUNDING_POLICY=PassThrough + +# Calculate scale factor from DPI (96 DPI = 1.0 scale) +DPI=${DPI:-96} +SCALE_FACTOR=$(echo "scale=2; ${DPI} / 96" | bc) +export QT_SCALE_FACTOR="${SCALE_FACTOR}" +export QT_FONT_DPI="${DPI}" +echo "Qt scaling: QT_SCALE_FACTOR=${SCALE_FACTOR}, QT_FONT_DPI=${DPI}" + +# GTK applications scaling (dynamic based on DPI) +# GTK works best with integer scale (2) for high DPI, then adjust with DPI_SCALE +if [ "${DPI}" -ge 120 ]; then + export GDK_SCALE=2 + # DPI_SCALE should compensate: for 128 DPI (1.33x), we want 2 * 0.667 = 1.33 + GDK_DPI_SCALE_VALUE=$(echo "scale=3; ${SCALE_FACTOR} / 2" | bc) + export GDK_DPI_SCALE="${GDK_DPI_SCALE_VALUE}" +else + export GDK_SCALE=1 + export GDK_DPI_SCALE=1 +fi +echo "GTK scaling: GDK_SCALE=${GDK_SCALE}, GDK_DPI_SCALE=${GDK_DPI_SCALE} (DPI=${DPI}, effective=${SCALE_FACTOR}x)" + +# Electron applications (VSCode, etc.) - force high DPI scaling +export ELECTRON_FORCE_IS_PACKAGED=0 +export ELECTRON_OZONE_PLATFORM_HINT=auto +# Force device scale factor for all Chromium/Electron apps +export FORCE_DEVICE_SCALE_FACTOR="${SCALE_FACTOR}" +echo "Electron/Chromium scaling: FORCE_DEVICE_SCALE_FACTOR=${SCALE_FACTOR}" + +# Java/Eclipse applications scaling +export SWT_GTK3=1 +# Convert DPI to percentage: 96=100%, 128=133%, 192=200% +SWT_AUTO_SCALE=$((${DPI} * 100 / 96)) +export SWT_AUTOSCALE="${SWT_AUTO_SCALE}" +# Java 9+ UI scaling +export GDK_SCALE=2 +export GDK_DPI_SCALE=$(echo "scale=3; ${SCALE_FACTOR} / 2" | bc) +# Set _JAVA_OPTIONS for all Java applications +export _JAVA_OPTIONS="-Dsun.java2d.uiScale=${SCALE_FACTOR} -Dswt.autoScale=${SWT_AUTO_SCALE} -Dswt.dpi.awareness=1 ${_JAVA_OPTIONS:-}" +echo "Java/Eclipse scaling: sun.java2d.uiScale=${SCALE_FACTOR}, swt.autoScale=${SWT_AUTO_SCALE}% (DPI=${DPI})" + +# Set additional session variables for KDE +export XDG_SESSION_ID="${DISPLAY#*:}" +export QT_LOGGING_RULES="${QT_LOGGING_RULES:-*.debug=false;qt.qpa.*=false}" + +# Start KDE Plasma desktop with appropriate GPU acceleration +if which startplasma-x11 > /dev/null 2>&1; then + echo "Starting KDE Plasma desktop" + # Use VirtualGL for all GPU types (NVIDIA, Intel, AMD) + # Only skip VirtualGL when no GPU is available (software rendering mode) + if [ "${GPU_AVAILABLE}" = "true" ] && which vglrun > /dev/null 2>&1; then + if [ "${NVIDIA_PRESENT}" = "true" ]; then + echo "Starting with NVIDIA GPU acceleration via VirtualGL (EGL backend)" + export VGL_FPS="${DISPLAY_REFRESH:-60}" + /usr/bin/vglrun -d "${VGL_DISPLAY:-egl}" +wm /usr/bin/dbus-launch --exit-with-session /usr/bin/startplasma-x11 > /tmp/startwm.log 2>&1 & + else + echo "Starting with GPU acceleration (Intel/AMD) via VirtualGL (Xvfb DRI3 backend)" + export VGL_FPS="${DISPLAY_REFRESH:-60}" + # For Intel/AMD, VGL_DISPLAY must point to Xvfb display running with -vfbdevice + /usr/bin/vglrun -d "${VGL_DISPLAY}" +wm /usr/bin/dbus-launch --exit-with-session /usr/bin/startplasma-x11 > /tmp/startwm.log 2>&1 & + fi + else + echo "Starting with software rendering (no GPU acceleration)" + /usr/bin/dbus-launch --exit-with-session /usr/bin/startplasma-x11 > /tmp/startwm.log 2>&1 & + fi + + # Start fcitx if installed + if which fcitx > /dev/null 2>&1; then + /usr/bin/fcitx & + fi + + # Keep the script running + echo "Session running. Desktop environment started in background." + wait + +elif which openbox-session > /dev/null 2>&1; then + echo "Starting Openbox desktop" + # Use VirtualGL for all GPU types, skip only for software rendering + if [ "${GPU_AVAILABLE}" = "true" ] && which vglrun > /dev/null 2>&1; then + if [ "${NVIDIA_PRESENT}" = "true" ]; then + echo "Starting with NVIDIA GPU acceleration via VirtualGL" + else + echo "Starting with GPU acceleration (Intel/AMD) via VirtualGL" + fi + export VGL_FPS="${DISPLAY_REFRESH:-60}" + exec vglrun -d "${VGL_DISPLAY:-egl}" +wm dbus-launch --exit-with-session /usr/bin/openbox-session + else + echo "Starting with software rendering" + exec dbus-launch --exit-with-session /usr/bin/openbox-session + fi +else + echo "ERROR: No desktop environment found" + exit 1 +fi diff --git a/files/ubuntu-root/defaults/startwm_wayland.sh b/files/ubuntu-root/defaults/startwm_wayland.sh new file mode 100755 index 000000000..fd69e288d --- /dev/null +++ b/files/ubuntu-root/defaults/startwm_wayland.sh @@ -0,0 +1,9 @@ +#!/usr/bin/env bash + +# Start DE +export XCURSOR_THEME=breeze +export XCURSOR_SIZE=24 +export XKB_DEFAULT_LAYOUT=us +export XKB_DEFAULT_RULES=evdev +export WAYLAND_DISPLAY=wayland-1 +labwc > /dev/null 2>&1 diff --git a/files/ubuntu-root/etc/s6-overlay/s6-rc.d/ci-service-check/dependencies.d/legacy-services b/files/ubuntu-root/etc/s6-overlay/s6-rc.d/ci-service-check/dependencies.d/legacy-services new file mode 100644 index 000000000..8b1378917 --- /dev/null +++ b/files/ubuntu-root/etc/s6-overlay/s6-rc.d/ci-service-check/dependencies.d/legacy-services @@ -0,0 +1 @@ + diff --git a/files/ubuntu-root/etc/s6-overlay/s6-rc.d/ci-service-check/type b/files/ubuntu-root/etc/s6-overlay/s6-rc.d/ci-service-check/type new file mode 100644 index 000000000..bdd22a185 --- /dev/null +++ b/files/ubuntu-root/etc/s6-overlay/s6-rc.d/ci-service-check/type @@ -0,0 +1 @@ +oneshot diff --git a/files/ubuntu-root/etc/s6-overlay/s6-rc.d/ci-service-check/up b/files/ubuntu-root/etc/s6-overlay/s6-rc.d/ci-service-check/up new file mode 100644 index 000000000..18de1bb49 --- /dev/null +++ b/files/ubuntu-root/etc/s6-overlay/s6-rc.d/ci-service-check/up @@ -0,0 +1 @@ +echo "[ls.io-init] done." diff --git a/files/ubuntu-root/etc/s6-overlay/s6-rc.d/contents b/files/ubuntu-root/etc/s6-overlay/s6-rc.d/contents new file mode 100644 index 000000000..019b3695e --- /dev/null +++ b/files/ubuntu-root/etc/s6-overlay/s6-rc.d/contents @@ -0,0 +1,2 @@ +user +user2 diff --git a/files/ubuntu-root/etc/s6-overlay/s6-rc.d/init-adduser/branding b/files/ubuntu-root/etc/s6-overlay/s6-rc.d/init-adduser/branding new file mode 100644 index 000000000..99077a5ea --- /dev/null +++ b/files/ubuntu-root/etc/s6-overlay/s6-rc.d/init-adduser/branding @@ -0,0 +1,12 @@ +─────────────────────────────────────── + _____ __ __ _____ _____ _____ _____ + | | | | __|_ _| | | + | --| | |__ | | | | | | | | | + |_____|_____|_____| |_| |_____|_|_|_| + _____ __ __ _ __ ____ + | __ | | | | | | \ + | __ -| | | | |__| | | + |_____|_____|_|_____|____/ + + Based on images from linuxserver.io +─────────────────────────────────────── diff --git a/files/ubuntu-root/etc/s6-overlay/s6-rc.d/init-adduser/dependencies.d/init-migrations b/files/ubuntu-root/etc/s6-overlay/s6-rc.d/init-adduser/dependencies.d/init-migrations new file mode 100644 index 000000000..e69de29bb diff --git a/files/ubuntu-root/etc/s6-overlay/s6-rc.d/init-adduser/run b/files/ubuntu-root/etc/s6-overlay/s6-rc.d/init-adduser/run new file mode 100755 index 000000000..676827d95 --- /dev/null +++ b/files/ubuntu-root/etc/s6-overlay/s6-rc.d/init-adduser/run @@ -0,0 +1,50 @@ +#!/usr/bin/with-contenv bash +# shellcheck shell=bash + +TARGET_USER="${CUSTOM_USER:-${USER_NAME:-user}}" +PUID=${PUID:-${USER_UID:-911}} +PGID=${PGID:-${USER_GID:-911}} +USER_HOME="/home/${TARGET_USER}" + +if [[ -z ${LSIO_READ_ONLY_FS} ]] && [[ -z ${LSIO_NON_ROOT_USER} ]]; then + # ensure primary group + if getent group "${TARGET_USER}" >/dev/null; then + groupmod -o -g "${PGID}" "${TARGET_USER}" || true + else + groupadd -o -g "${PGID}" "${TARGET_USER}" || true + fi + + # ensure user + if getent passwd "${TARGET_USER}" >/dev/null; then + usermod -o -u "${PUID}" -g "${PGID}" -d "${USER_HOME}" "${TARGET_USER}" || true + else + useradd -M -d "${USER_HOME}" -u "${PUID}" -g "${PGID}" -s /bin/bash "${TARGET_USER}" || true + fi + + # basic home and ownership for shared dirs + install -d -m 755 "${USER_HOME}" + chown "${PUID}:${PGID}" "${USER_HOME}" +fi + +cat /run/branding 2>/dev/null || true +echo ' +─────────────────────────────────────── +GID/UID +───────────────────────────────────────' +if [[ -z ${LSIO_NON_ROOT_USER} ]]; then +echo " +User UID: $(id -u "${TARGET_USER}") +User GID: $(id -g "${TARGET_USER}") +───────────────────────────────────────" +else +echo " +User UID: $(stat /run -c %u) +User GID: $(stat /run -c %g) +───────────────────────────────────────" +fi +if [[ -f /build_version ]]; then + cat /build_version + echo ' +─────────────────────────────────────── + ' +fi diff --git a/files/ubuntu-root/etc/s6-overlay/s6-rc.d/init-adduser/type b/files/ubuntu-root/etc/s6-overlay/s6-rc.d/init-adduser/type new file mode 100644 index 000000000..bdd22a185 --- /dev/null +++ b/files/ubuntu-root/etc/s6-overlay/s6-rc.d/init-adduser/type @@ -0,0 +1 @@ +oneshot diff --git a/files/ubuntu-root/etc/s6-overlay/s6-rc.d/init-adduser/up b/files/ubuntu-root/etc/s6-overlay/s6-rc.d/init-adduser/up new file mode 100644 index 000000000..b8522da3e --- /dev/null +++ b/files/ubuntu-root/etc/s6-overlay/s6-rc.d/init-adduser/up @@ -0,0 +1 @@ +/etc/s6-overlay/s6-rc.d/init-adduser/run diff --git a/files/ubuntu-root/etc/s6-overlay/s6-rc.d/init-config-end/dependencies.d/init-config b/files/ubuntu-root/etc/s6-overlay/s6-rc.d/init-config-end/dependencies.d/init-config new file mode 100644 index 000000000..e69de29bb diff --git a/files/ubuntu-root/etc/s6-overlay/s6-rc.d/init-config-end/dependencies.d/init-crontab-config b/files/ubuntu-root/etc/s6-overlay/s6-rc.d/init-config-end/dependencies.d/init-crontab-config new file mode 100644 index 000000000..e69de29bb diff --git a/files/ubuntu-root/etc/s6-overlay/s6-rc.d/init-config-end/type b/files/ubuntu-root/etc/s6-overlay/s6-rc.d/init-config-end/type new file mode 100644 index 000000000..bdd22a185 --- /dev/null +++ b/files/ubuntu-root/etc/s6-overlay/s6-rc.d/init-config-end/type @@ -0,0 +1 @@ +oneshot diff --git a/files/ubuntu-root/etc/s6-overlay/s6-rc.d/init-config-end/up b/files/ubuntu-root/etc/s6-overlay/s6-rc.d/init-config-end/up new file mode 100644 index 000000000..c329423ed --- /dev/null +++ b/files/ubuntu-root/etc/s6-overlay/s6-rc.d/init-config-end/up @@ -0,0 +1 @@ +# This file doesn't do anything, it's just the end of the downstream image init process diff --git a/files/ubuntu-root/etc/s6-overlay/s6-rc.d/init-config/dependencies.d/init-selkies-end b/files/ubuntu-root/etc/s6-overlay/s6-rc.d/init-config/dependencies.d/init-selkies-end new file mode 100644 index 000000000..e69de29bb diff --git a/files/ubuntu-root/etc/s6-overlay/s6-rc.d/init-config/type b/files/ubuntu-root/etc/s6-overlay/s6-rc.d/init-config/type new file mode 100644 index 000000000..bdd22a185 --- /dev/null +++ b/files/ubuntu-root/etc/s6-overlay/s6-rc.d/init-config/type @@ -0,0 +1 @@ +oneshot diff --git a/files/ubuntu-root/etc/s6-overlay/s6-rc.d/init-config/up b/files/ubuntu-root/etc/s6-overlay/s6-rc.d/init-config/up new file mode 100644 index 000000000..836b5e7b2 --- /dev/null +++ b/files/ubuntu-root/etc/s6-overlay/s6-rc.d/init-config/up @@ -0,0 +1 @@ +/bin/true diff --git a/files/ubuntu-root/etc/s6-overlay/s6-rc.d/init-crontab-config/dependencies.d/init-config b/files/ubuntu-root/etc/s6-overlay/s6-rc.d/init-crontab-config/dependencies.d/init-config new file mode 100644 index 000000000..e69de29bb diff --git a/files/ubuntu-root/etc/s6-overlay/s6-rc.d/init-crontab-config/run b/files/ubuntu-root/etc/s6-overlay/s6-rc.d/init-crontab-config/run new file mode 100755 index 000000000..652460af9 --- /dev/null +++ b/files/ubuntu-root/etc/s6-overlay/s6-rc.d/init-crontab-config/run @@ -0,0 +1,34 @@ +#!/usr/bin/with-contenv bash +# shellcheck shell=bash + +TARGET_USER="${CUSTOM_USER:-${USER_NAME:-root}}" +for cron_user in "$TARGET_USER" root; do + if [[ -z ${LSIO_READ_ONLY_FS} ]] && [[ -z ${LSIO_NON_ROOT_USER} ]]; then + if [[ -f "/etc/crontabs/${cron_user}" ]]; then + lsiown "${cron_user}":"${cron_user}" "/etc/crontabs/${cron_user}" + crontab -u "${cron_user}" "/etc/crontabs/${cron_user}" + fi + fi + + if [[ -f "/defaults/crontabs/${cron_user}" ]]; then + # make folders + mkdir -p \ + /config/crontabs + + # if crontabs do not exist in config + if [[ ! -f "/config/crontabs/${cron_user}" ]]; then + # copy crontab from system + if crontab -l -u "${cron_user}" >/dev/null 2>&1; then + crontab -l -u "${cron_user}" >"/config/crontabs/${cron_user}" + fi + + # if crontabs still do not exist in config (were not copied from system) + # copy crontab from image defaults (using -n, do not overwrite an existing file) + cp -n "/defaults/crontabs/${cron_user}" /config/crontabs/ + fi + + # set permissions and import user crontabs + lsiown "${cron_user}":"${cron_user}" "/config/crontabs/${cron_user}" + crontab -u "${cron_user}" "/config/crontabs/${cron_user}" + fi +done diff --git a/files/ubuntu-root/etc/s6-overlay/s6-rc.d/init-crontab-config/type b/files/ubuntu-root/etc/s6-overlay/s6-rc.d/init-crontab-config/type new file mode 100644 index 000000000..bdd22a185 --- /dev/null +++ b/files/ubuntu-root/etc/s6-overlay/s6-rc.d/init-crontab-config/type @@ -0,0 +1 @@ +oneshot diff --git a/files/ubuntu-root/etc/s6-overlay/s6-rc.d/init-crontab-config/up b/files/ubuntu-root/etc/s6-overlay/s6-rc.d/init-crontab-config/up new file mode 100644 index 000000000..d35411185 --- /dev/null +++ b/files/ubuntu-root/etc/s6-overlay/s6-rc.d/init-crontab-config/up @@ -0,0 +1 @@ +/etc/s6-overlay/s6-rc.d/init-crontab-config/run diff --git a/files/ubuntu-root/etc/s6-overlay/s6-rc.d/init-custom-files/dependencies.d/init-mods-end b/files/ubuntu-root/etc/s6-overlay/s6-rc.d/init-custom-files/dependencies.d/init-mods-end new file mode 100644 index 000000000..e69de29bb diff --git a/files/ubuntu-root/etc/s6-overlay/s6-rc.d/init-custom-files/run b/files/ubuntu-root/etc/s6-overlay/s6-rc.d/init-custom-files/run new file mode 100755 index 000000000..6b57858bb --- /dev/null +++ b/files/ubuntu-root/etc/s6-overlay/s6-rc.d/init-custom-files/run @@ -0,0 +1,22 @@ +#!/usr/bin/with-contenv bash +# shellcheck shell=bash + +# Directories +SCRIPTS_DIR="/custom-cont-init.d" + +# Make sure custom init directory exists and has files in it +if [[ -e "${SCRIPTS_DIR}" ]] && [[ -n "$(/bin/ls -A ${SCRIPTS_DIR} 2>/dev/null)" ]]; then + echo "[custom-init] Files found, executing" + for SCRIPT in "${SCRIPTS_DIR}"/*; do + NAME="$(basename "${SCRIPT}")" + if [[ -x "${SCRIPT}" ]]; then + echo "[custom-init] ${NAME}: executing..." + /bin/bash "${SCRIPT}" + echo "[custom-init] ${NAME}: exited $?" + elif [[ ! -x "${SCRIPT}" ]]; then + echo "[custom-init] ${NAME}: is not an executable file" + fi + done +else + echo "[custom-init] No custom files found, skipping..." +fi diff --git a/files/ubuntu-root/etc/s6-overlay/s6-rc.d/init-custom-files/type b/files/ubuntu-root/etc/s6-overlay/s6-rc.d/init-custom-files/type new file mode 100644 index 000000000..bdd22a185 --- /dev/null +++ b/files/ubuntu-root/etc/s6-overlay/s6-rc.d/init-custom-files/type @@ -0,0 +1 @@ +oneshot diff --git a/files/ubuntu-root/etc/s6-overlay/s6-rc.d/init-custom-files/up b/files/ubuntu-root/etc/s6-overlay/s6-rc.d/init-custom-files/up new file mode 100644 index 000000000..28bf31859 --- /dev/null +++ b/files/ubuntu-root/etc/s6-overlay/s6-rc.d/init-custom-files/up @@ -0,0 +1 @@ +/etc/s6-overlay/s6-rc.d/init-custom-files/run diff --git a/files/ubuntu-root/etc/s6-overlay/s6-rc.d/init-device-perms/dependencies.d/init-adduser b/files/ubuntu-root/etc/s6-overlay/s6-rc.d/init-device-perms/dependencies.d/init-adduser new file mode 100644 index 000000000..e69de29bb diff --git a/files/ubuntu-root/etc/s6-overlay/s6-rc.d/init-device-perms/run b/files/ubuntu-root/etc/s6-overlay/s6-rc.d/init-device-perms/run new file mode 100755 index 000000000..2f4a83444 --- /dev/null +++ b/files/ubuntu-root/etc/s6-overlay/s6-rc.d/init-device-perms/run @@ -0,0 +1,39 @@ +#!/usr/bin/with-contenv bash +# shellcheck shell=bash + +TARGET_USER="${CUSTOM_USER:-${USER_NAME:-root}}" + +if [[ -z ${LSIO_NON_ROOT_USER} ]] && [[ -n ${ATTACHED_DEVICES_PERMS} ]]; then + FILES=$(find ${ATTACHED_DEVICES_PERMS} -print 2>/dev/null) + + for i in ${FILES}; do + FILE_GID=$(stat -c '%g' "${i}") + FILE_UID=$(stat -c '%u' "${i}") + # check if user matches device + if id -u "$TARGET_USER" | grep -qw "${FILE_UID}"; then + echo "**** permissions for ${i} are good ****" + else + # check if group matches and that device has group rw + if id -G "$TARGET_USER" | grep -qw "${FILE_GID}" && [[ $(stat -c '%A' "${i}" | cut -b 5,6) == "rw" ]]; then + echo "**** permissions for ${i} are good ****" + # check if device needs to be added to group + elif ! id -G "$TARGET_USER" | grep -qw "${FILE_GID}"; then + # check if group needs to be created + GROUP_NAME=$(getent group "${FILE_GID}" | awk -F: '{print $1}') + if [[ -z "${GROUP_NAME}" ]]; then + GROUP_NAME="group$(head /dev/urandom | tr -dc 'a-z0-9' | head -c4)" + groupadd "${GROUP_NAME}" + groupmod -g "${FILE_GID}" "${GROUP_NAME}" + echo "**** creating group ${GROUP_NAME} with id ${FILE_GID} ****" + fi + echo "**** adding ${i} to group ${GROUP_NAME} with id ${FILE_GID} ****" + usermod -a -G "${GROUP_NAME}" "$TARGET_USER" + fi + # check if device has group rw + if [[ $(stat -c '%A' "${i}" | cut -b 5,6) != "rw" ]]; then + echo -e "**** The device ${i} does not have group read/write permissions, attempting to fix inside the container. ****" + chmod g+rw "${i}" + fi + fi + done +fi diff --git a/files/ubuntu-root/etc/s6-overlay/s6-rc.d/init-device-perms/type b/files/ubuntu-root/etc/s6-overlay/s6-rc.d/init-device-perms/type new file mode 100644 index 000000000..3d92b15f2 --- /dev/null +++ b/files/ubuntu-root/etc/s6-overlay/s6-rc.d/init-device-perms/type @@ -0,0 +1 @@ +oneshot \ No newline at end of file diff --git a/files/ubuntu-root/etc/s6-overlay/s6-rc.d/init-device-perms/up b/files/ubuntu-root/etc/s6-overlay/s6-rc.d/init-device-perms/up new file mode 100644 index 000000000..050e0b296 --- /dev/null +++ b/files/ubuntu-root/etc/s6-overlay/s6-rc.d/init-device-perms/up @@ -0,0 +1 @@ +/etc/s6-overlay/s6-rc.d/init-device-perms/run \ No newline at end of file diff --git a/files/ubuntu-root/etc/s6-overlay/s6-rc.d/init-envfile/run b/files/ubuntu-root/etc/s6-overlay/s6-rc.d/init-envfile/run new file mode 100755 index 000000000..592df5270 --- /dev/null +++ b/files/ubuntu-root/etc/s6-overlay/s6-rc.d/init-envfile/run @@ -0,0 +1,19 @@ +#!/usr/bin/with-contenv bash +# shellcheck shell=bash + +if find /run/s6/container_environment/FILE__* -maxdepth 1 > /dev/null 2>&1; then + for FILENAME in /run/s6/container_environment/FILE__*; do + SECRETFILE=$(cat "${FILENAME}") + if [[ -f ${SECRETFILE} ]]; then + FILESTRIP=${FILENAME//FILE__/} + if [[ $(tail -n1 "${SECRETFILE}" | wc -l) != 0 ]]; then + echo "[env-init] Your secret: ${FILENAME##*/}" + echo " contains a trailing newline and may not work as expected" + fi + cat "${SECRETFILE}" >"${FILESTRIP}" + echo "[env-init] ${FILESTRIP##*/} set from ${FILENAME##*/}" + else + echo "[env-init] cannot find secret in ${FILENAME##*/}" + fi + done +fi diff --git a/files/ubuntu-root/etc/s6-overlay/s6-rc.d/init-envfile/type b/files/ubuntu-root/etc/s6-overlay/s6-rc.d/init-envfile/type new file mode 100644 index 000000000..bdd22a185 --- /dev/null +++ b/files/ubuntu-root/etc/s6-overlay/s6-rc.d/init-envfile/type @@ -0,0 +1 @@ +oneshot diff --git a/files/ubuntu-root/etc/s6-overlay/s6-rc.d/init-envfile/up b/files/ubuntu-root/etc/s6-overlay/s6-rc.d/init-envfile/up new file mode 100644 index 000000000..b2b4fb8c2 --- /dev/null +++ b/files/ubuntu-root/etc/s6-overlay/s6-rc.d/init-envfile/up @@ -0,0 +1 @@ +/etc/s6-overlay/s6-rc.d/init-envfile/run diff --git a/files/ubuntu-root/etc/s6-overlay/s6-rc.d/init-migrations/run b/files/ubuntu-root/etc/s6-overlay/s6-rc.d/init-migrations/run new file mode 100755 index 000000000..baf86a249 --- /dev/null +++ b/files/ubuntu-root/etc/s6-overlay/s6-rc.d/init-migrations/run @@ -0,0 +1,32 @@ +#!/usr/bin/with-contenv bash +# shellcheck shell=bash + +MIGRATIONS_DIR="/migrations" +MIGRATIONS_HISTORY="/config/.migrations" + +echo "[migrations] started" + +if [[ ! -d ${MIGRATIONS_DIR} ]]; then + echo "[migrations] no migrations found" + exit +fi + +for MIGRATION in $(find ${MIGRATIONS_DIR}/* | sort -n); do + NAME="$(basename "${MIGRATION}")" + if [[ -f ${MIGRATIONS_HISTORY} ]] && grep -Fxq "${NAME}" ${MIGRATIONS_HISTORY}; then + echo "[migrations] ${NAME}: skipped" + continue + fi + echo "[migrations] ${NAME}: executing..." + # Execute migration script in a subshell to prevent it from modifying the current environment + ("${MIGRATION}") + EXIT_CODE=$? + if [[ ${EXIT_CODE} -ne 0 ]]; then + echo "[migrations] ${NAME}: failed with exit code ${EXIT_CODE}, contact support" + exit "${EXIT_CODE}" + fi + echo "${NAME}" >>${MIGRATIONS_HISTORY} + echo "[migrations] ${NAME}: succeeded" +done + +echo "[migrations] done" diff --git a/files/ubuntu-root/etc/s6-overlay/s6-rc.d/init-migrations/type b/files/ubuntu-root/etc/s6-overlay/s6-rc.d/init-migrations/type new file mode 100644 index 000000000..bdd22a185 --- /dev/null +++ b/files/ubuntu-root/etc/s6-overlay/s6-rc.d/init-migrations/type @@ -0,0 +1 @@ +oneshot diff --git a/files/ubuntu-root/etc/s6-overlay/s6-rc.d/init-migrations/up b/files/ubuntu-root/etc/s6-overlay/s6-rc.d/init-migrations/up new file mode 100644 index 000000000..7c4cbcf6f --- /dev/null +++ b/files/ubuntu-root/etc/s6-overlay/s6-rc.d/init-migrations/up @@ -0,0 +1 @@ +/etc/s6-overlay/s6-rc.d/init-migrations/run diff --git a/files/ubuntu-root/etc/s6-overlay/s6-rc.d/init-mods-end/dependencies.d/init-mods-package-install b/files/ubuntu-root/etc/s6-overlay/s6-rc.d/init-mods-end/dependencies.d/init-mods-package-install new file mode 100644 index 000000000..e69de29bb diff --git a/files/ubuntu-root/etc/s6-overlay/s6-rc.d/init-mods-end/type b/files/ubuntu-root/etc/s6-overlay/s6-rc.d/init-mods-end/type new file mode 100644 index 000000000..bdd22a185 --- /dev/null +++ b/files/ubuntu-root/etc/s6-overlay/s6-rc.d/init-mods-end/type @@ -0,0 +1 @@ +oneshot diff --git a/files/ubuntu-root/etc/s6-overlay/s6-rc.d/init-mods-end/up b/files/ubuntu-root/etc/s6-overlay/s6-rc.d/init-mods-end/up new file mode 100644 index 000000000..092149d53 --- /dev/null +++ b/files/ubuntu-root/etc/s6-overlay/s6-rc.d/init-mods-end/up @@ -0,0 +1 @@ +# This file doesn't do anything, it's just the end of the mod init process diff --git a/files/ubuntu-root/etc/s6-overlay/s6-rc.d/init-mods-package-install/dependencies.d/init-mods b/files/ubuntu-root/etc/s6-overlay/s6-rc.d/init-mods-package-install/dependencies.d/init-mods new file mode 100644 index 000000000..e69de29bb diff --git a/files/ubuntu-root/etc/s6-overlay/s6-rc.d/init-mods-package-install/type b/files/ubuntu-root/etc/s6-overlay/s6-rc.d/init-mods-package-install/type new file mode 100644 index 000000000..bdd22a185 --- /dev/null +++ b/files/ubuntu-root/etc/s6-overlay/s6-rc.d/init-mods-package-install/type @@ -0,0 +1 @@ +oneshot diff --git a/files/ubuntu-root/etc/s6-overlay/s6-rc.d/init-mods-package-install/up b/files/ubuntu-root/etc/s6-overlay/s6-rc.d/init-mods-package-install/up new file mode 100644 index 000000000..fb633014c --- /dev/null +++ b/files/ubuntu-root/etc/s6-overlay/s6-rc.d/init-mods-package-install/up @@ -0,0 +1 @@ +/etc/s6-overlay/s6-rc.d/init-mods-package-install/run diff --git a/files/ubuntu-root/etc/s6-overlay/s6-rc.d/init-mods/dependencies.d/init-config-end b/files/ubuntu-root/etc/s6-overlay/s6-rc.d/init-mods/dependencies.d/init-config-end new file mode 100644 index 000000000..e69de29bb diff --git a/files/ubuntu-root/etc/s6-overlay/s6-rc.d/init-mods/type b/files/ubuntu-root/etc/s6-overlay/s6-rc.d/init-mods/type new file mode 100644 index 000000000..bdd22a185 --- /dev/null +++ b/files/ubuntu-root/etc/s6-overlay/s6-rc.d/init-mods/type @@ -0,0 +1 @@ +oneshot diff --git a/files/ubuntu-root/etc/s6-overlay/s6-rc.d/init-mods/up b/files/ubuntu-root/etc/s6-overlay/s6-rc.d/init-mods/up new file mode 100644 index 000000000..040d8013c --- /dev/null +++ b/files/ubuntu-root/etc/s6-overlay/s6-rc.d/init-mods/up @@ -0,0 +1 @@ +# This file doesn't do anything, it's just the start of the mod init process diff --git a/files/ubuntu-root/etc/s6-overlay/s6-rc.d/init-nginx/dependencies.d/init-selkies b/files/ubuntu-root/etc/s6-overlay/s6-rc.d/init-nginx/dependencies.d/init-selkies new file mode 100644 index 000000000..e69de29bb diff --git a/files/ubuntu-root/etc/s6-overlay/s6-rc.d/init-nginx/run b/files/ubuntu-root/etc/s6-overlay/s6-rc.d/init-nginx/run new file mode 100755 index 000000000..5cf044c0b --- /dev/null +++ b/files/ubuntu-root/etc/s6-overlay/s6-rc.d/init-nginx/run @@ -0,0 +1,95 @@ +#!/usr/bin/with-contenv bash + +# nginx Path +NGINX_CONFIG=/etc/nginx/sites-available/default + +# user passed env vars +CPORT="${CUSTOM_PORT:-3000}" +CHPORT="${CUSTOM_HTTPS_PORT:-3001}" +CWS="${CUSTOM_WS_PORT:-8082}" +CUSER="${CUSTOM_USER:-${USER_NAME:-root}}" +SFOLDER="${SUBFOLDER:-/}" +FILE_MANAGER_PATH="${FILE_MANAGER_PATH:-$HOME}" +DASHBOARD="${DASHBOARD:-selkies-dashboard}" +SELKIES_FILE_TRANSFERS="${SELKIES_FILE_TRANSFERS:-upload,download}" +HARDEN_DESKTOP="${HARDEN_DESKTOP:-false}" + +# create self signed cert +if [ ! -f "/config/ssl/cert.pem" ]; then + mkdir -p /config/ssl + openssl req -new -x509 \ + -days 3650 -nodes \ + -out /config/ssl/cert.pem \ + -keyout /config/ssl/cert.key \ + -subj "/C=US/ST=CA/L=Carlsbad/O=Linuxserver.io/OU=LSIO Server/CN=*" + chmod 600 /config/ssl/cert.key + TARGET_USER="${CUSER}" + chown -R "${TARGET_USER}:${TARGET_USER}" /config/ssl +fi + +# modify nginx config +cp /defaults/default.conf ${NGINX_CONFIG} +sed -i "s/3000/$CPORT/g" ${NGINX_CONFIG} +sed -i "s/3001/$CHPORT/g" ${NGINX_CONFIG} +sed -i "s/CWS/$CWS/g" ${NGINX_CONFIG} +sed -i "s|SUBFOLDER|$SFOLDER|g" ${NGINX_CONFIG} +sed -i "s|REPLACE_DOWNLOADS_PATH|$FILE_MANAGER_PATH|g" ${NGINX_CONFIG} +s6-setuidgid "${CUSER}" mkdir -p ${FILE_MANAGER_PATH} +if [[ $SELKIES_FILE_TRANSFERS != *"download"* ]] || [[ ${HARDEN_DESKTOP,,} == "true" ]]; then + sed -i '/files {/,/^ }/d' ${NGINX_CONFIG} +fi +if [ ! -z ${DISABLE_IPV6+x} ]; then + sed -i '/listen \[::\]/d' ${NGINX_CONFIG} +fi +if [ ! -z ${PASSWORD+x} ]; then + printf "${CUSER}:$(openssl passwd -apr1 ${PASSWORD})\n" > /etc/nginx/.htpasswd + sed -i 's/#//g' ${NGINX_CONFIG} +fi +if [ ! -z ${DEV_MODE+x} ]; then + sed -i \ + -e 's:location / {:location /null {:g' \ + -e 's:location /devmode:location /:g' \ + ${NGINX_CONFIG} +fi + +# set dashboard and icon +rm -Rf \ + /usr/share/selkies/web + +# Always use dashboard (selkies with pixelflux) +# pixelflux handles hardware encoding internally via NVENC/VA-API +cp -a \ + /usr/share/selkies/$DASHBOARD \ + /usr/share/selkies/web + +# Ensure Safari keyboard fix is applied after runtime copy +if [ -f /usr/local/bin/patch-selkies-safari-keyboard.py ]; then + chmod +x /usr/local/bin/patch-selkies-safari-keyboard.py + python3 /usr/local/bin/patch-selkies-safari-keyboard.py +fi + +sed -i "s|REPLACE_DOWNLOADS_PATH|$FILE_MANAGER_PATH|g" /usr/share/selkies/web/nginx/footer.html 2>/dev/null || true +cp \ + /usr/share/selkies/www/icon.png \ + /usr/share/selkies/web/favicon.ico +cp \ + /usr/share/selkies/www/icon.png \ + /usr/share/selkies/web/icon.png +# manifest creation +echo "{ + \"name\": \"${TITLE}\", + \"short_name\": \"${TITLE}\", + \"manifest_version\": 2, + \"version\": \"1.0.0\", + \"display\": \"fullscreen\", + \"background_color\": \"#000000\", + \"theme_color\": \"#000000\", + \"icons\": [ + { + \"src\": \"icon.png\", + \"type\": \"image/png\", + \"sizes\": \"180x180\" + } + ], + \"start_url\": \"/\" +}" > /usr/share/selkies/web/manifest.json diff --git a/files/ubuntu-root/etc/s6-overlay/s6-rc.d/init-nginx/type b/files/ubuntu-root/etc/s6-overlay/s6-rc.d/init-nginx/type new file mode 100644 index 000000000..bdd22a185 --- /dev/null +++ b/files/ubuntu-root/etc/s6-overlay/s6-rc.d/init-nginx/type @@ -0,0 +1 @@ +oneshot diff --git a/files/ubuntu-root/etc/s6-overlay/s6-rc.d/init-nginx/up b/files/ubuntu-root/etc/s6-overlay/s6-rc.d/init-nginx/up new file mode 100644 index 000000000..b3b5b494b --- /dev/null +++ b/files/ubuntu-root/etc/s6-overlay/s6-rc.d/init-nginx/up @@ -0,0 +1 @@ +/etc/s6-overlay/s6-rc.d/init-nginx/run diff --git a/files/ubuntu-root/etc/s6-overlay/s6-rc.d/init-os-end/dependencies.d/init-adduser b/files/ubuntu-root/etc/s6-overlay/s6-rc.d/init-os-end/dependencies.d/init-adduser new file mode 100644 index 000000000..e69de29bb diff --git a/files/ubuntu-root/etc/s6-overlay/s6-rc.d/init-os-end/dependencies.d/init-device-perms b/files/ubuntu-root/etc/s6-overlay/s6-rc.d/init-os-end/dependencies.d/init-device-perms new file mode 100644 index 000000000..e69de29bb diff --git a/files/ubuntu-root/etc/s6-overlay/s6-rc.d/init-os-end/dependencies.d/init-envfile b/files/ubuntu-root/etc/s6-overlay/s6-rc.d/init-os-end/dependencies.d/init-envfile new file mode 100644 index 000000000..e69de29bb diff --git a/files/ubuntu-root/etc/s6-overlay/s6-rc.d/init-os-end/type b/files/ubuntu-root/etc/s6-overlay/s6-rc.d/init-os-end/type new file mode 100644 index 000000000..bdd22a185 --- /dev/null +++ b/files/ubuntu-root/etc/s6-overlay/s6-rc.d/init-os-end/type @@ -0,0 +1 @@ +oneshot diff --git a/files/ubuntu-root/etc/s6-overlay/s6-rc.d/init-os-end/up b/files/ubuntu-root/etc/s6-overlay/s6-rc.d/init-os-end/up new file mode 100644 index 000000000..092149d53 --- /dev/null +++ b/files/ubuntu-root/etc/s6-overlay/s6-rc.d/init-os-end/up @@ -0,0 +1 @@ +# This file doesn't do anything, it's just the end of the mod init process diff --git a/files/ubuntu-root/etc/s6-overlay/s6-rc.d/init-selkies-config/dependencies.d/init-nginx b/files/ubuntu-root/etc/s6-overlay/s6-rc.d/init-selkies-config/dependencies.d/init-nginx new file mode 100644 index 000000000..e69de29bb diff --git a/files/ubuntu-root/etc/s6-overlay/s6-rc.d/init-selkies-config/run b/files/ubuntu-root/etc/s6-overlay/s6-rc.d/init-selkies-config/run new file mode 100755 index 000000000..cffb66f33 --- /dev/null +++ b/files/ubuntu-root/etc/s6-overlay/s6-rc.d/init-selkies-config/run @@ -0,0 +1,295 @@ +#!/usr/bin/with-contenv bash + +# set paths for wayland or xorg +TARGET_USER="${CUSTOM_USER:-${USER_NAME:-root}}" + +if [[ "${PIXELFLUX_WAYLAND}" == "true" ]];then + CONF_DIR="$HOME/.config/labwc" + DEF_AUTOSTART="/defaults/autostart_wayland" + DEF_MENU="/defaults/menu_wayland.xml" + DEF_RC="/defaults/labwc.xml" +else + CONF_DIR="$HOME/.config/openbox" + DEF_AUTOSTART="/defaults/autostart" + DEF_MENU="/defaults/menu.xml" +fi + +# default file copies first run +mkdir -p "$HOME/.config" +chown "$TARGET_USER:$TARGET_USER" "$HOME/.config" +if [[ ! -f "$CONF_DIR/autostart" ]]; then + mkdir -p "$CONF_DIR" + if [[ -f "$DEF_AUTOSTART" ]]; then + cp "$DEF_AUTOSTART" "$CONF_DIR/autostart" + chown "$TARGET_USER:$TARGET_USER" "$CONF_DIR" "$CONF_DIR/autostart" + fi +fi +if [[ ! -f "$CONF_DIR/menu.xml" ]]; then + mkdir -p "$CONF_DIR" && \ + cp "$DEF_MENU" "$CONF_DIR/menu.xml" + chown "$TARGET_USER:$TARGET_USER" "$CONF_DIR" "$CONF_DIR/menu.xml" +fi + +# XDG Home +if [ ! -d "$HOME/.XDG" ]; then + mkdir -p "$HOME/.XDG" + chown "$TARGET_USER:$TARGET_USER" "$HOME/.XDG" +fi +printf "$HOME/.XDG" > /run/s6/container_environment/XDG_RUNTIME_DIR + +# locale Support +if [ ! -z ${LC_ALL+x} ]; then + printf "${LC_ALL%.UTF-8}" > /run/s6/container_environment/LANGUAGE + printf "${LC_ALL}" > /run/s6/container_environment/LANG +fi + +# hardening flags +if [[ ${HARDEN_DESKTOP,,} == "true" ]]; then + export DISABLE_OPEN_TOOLS="true" + export DISABLE_SUDO="true" + export DISABLE_TERMINALS="true" + # application hardening if unset + if [ -z ${SELKIES_FILE_TRANSFERS+x} ]; then + printf "" > /run/s6/container_environment/SELKIES_FILE_TRANSFERS + fi + if [ -z ${SELKIES_COMMAND_ENABLED+x} ]; then + printf "false" > /run/s6/container_environment/SELKIES_COMMAND_ENABLED + fi + if [ -z ${SELKIES_UI_SIDEBAR_SHOW_FILES+x} ]; then + printf "false" > /run/s6/container_environment/SELKIES_UI_SIDEBAR_SHOW_FILES + fi + if [ -z ${SELKIES_UI_SIDEBAR_SHOW_APPS+x} ]; then + printf "false" > /run/s6/container_environment/SELKIES_UI_SIDEBAR_SHOW_APPS + fi +fi +if [[ ${HARDEN_OPENBOX,,} == "true" ]]; then + export DISABLE_CLOSE_BUTTON="true" + export DISABLE_MOUSE_BUTTONS="true" + export HARDEN_KEYBINDS="true" + if [ -z ${RESTART_APP+x} ]; then + export RESTART_APP=true + printf "true" > /run/s6/container_environment/RESTART_APP + fi +fi + +# disable open tools +xdg_open_path=$(which xdg-open 2>/dev/null) +exo_open_path=$(which exo-open 2>/dev/null) +if [[ ${DISABLE_OPEN_TOOLS,,} == "true" ]]; then + echo "[ls.io-init] Disabling xdg-open and exo-open" + [ -n "$xdg_open_path" ] && chmod 0000 "$xdg_open_path" + [ -n "$exo_open_path" ] && chmod 0000 "$exo_open_path" +else + [ -n "$xdg_open_path" ] && chmod 755 "$xdg_open_path" + [ -n "$exo_open_path" ] && chmod 755 "$exo_open_path" +fi + +# disable sudo +sudo_path=$(which sudo 2>/dev/null) +if [[ ${DISABLE_SUDO,,} == "true" ]]; then + echo "[ls.io-init] Disabling sudo binary and corrupting sudoers config" + [ -n "$sudo_path" ] && chmod 0000 "$sudo_path" + sed -i "s/NOPASSWD/CORRUPT_FILE/g" /etc/sudoers +else + [ -n "$sudo_path" ] && chmod 4755 "$sudo_path" + sed -i "s/CORRUPT_FILE/NOPASSWD/g" /etc/sudoers +fi + +# disable terminals and menu entries +USER_MENU_DIR="$CONF_DIR" +USER_MENU_XML="$USER_MENU_DIR/menu.xml" +USER_MENU_BAK="$USER_MENU_DIR/menu.xml.bak" +TERMINAL_NAMES=("xterm" "st" "stterm" "uxterm" "lxterminal" "gnome-terminal" "konsole" "xfce4-terminal" "terminator" "foot" "weston-terminal") +if [ -f "$USER_MENU_XML" ] && [ ! -f "$USER_MENU_BAK" ]; then + echo "[ls.io-init] Creating initial backup of menu.xml" + cp "$USER_MENU_XML" "$USER_MENU_BAK" + chown "$TARGET_USER:$TARGET_USER" "$USER_MENU_BAK" +fi +if [[ ${DISABLE_TERMINALS,,} == "true" ]]; then + echo "[ls.io-init] Disabling terminal binaries and removing from menu" + [ -f "$USER_MENU_BAK" ] && cp "$USER_MENU_BAK" "$USER_MENU_XML" + for term_name in "${TERMINAL_NAMES[@]}"; do + term_path=$(which "$term_name" 2>/dev/null) + if [ -n "$term_path" ]; then + chmod 0000 "$term_path" + escaped_path=$(echo "$term_path" | sed 's/[&/\]/\\&/g') + sed -i "/<command>${escaped_path}<\/command>/d" "$USER_MENU_XML" + fi + done + chown "$TARGET_USER:$TARGET_USER" "$USER_MENU_XML" +else + if [ -f "$USER_MENU_BAK" ]; then + cp "$USER_MENU_BAK" "$USER_MENU_XML" + chown "$TARGET_USER:$TARGET_USER" "$USER_MENU_XML" + fi + for term_name in "${TERMINAL_NAMES[@]}"; do + term_path=$(which "$term_name" 2>/dev/null) + if [ -n "$term_path" ] && [ ! -x "$term_path" ]; then + chmod 755 "$term_path" + fi + done +fi + +# lock down autostart file if auto restart is enabled +AUTOSTART_SCRIPT="$CONF_DIR/autostart" +if [ -f "$AUTOSTART_SCRIPT" ]; then + if [[ ${RESTART_APP,,} == "true" ]]; then + echo "[ls.io-init] RESTART_APP is set. Setting autostart owner to root and making read-only for user" + chown root:"$TARGET_USER" "$AUTOSTART_SCRIPT" + chmod 550 "$AUTOSTART_SCRIPT" + else + chown "$TARGET_USER:$TARGET_USER" "$AUTOSTART_SCRIPT" + chmod 644 "$AUTOSTART_SCRIPT" + fi +fi + +# wm tweaks +if [[ "${PIXELFLUX_WAYLAND}" == "true" ]];then + # labwc tweaks + USER_RC_XML="$CONF_DIR/rc.xml" + if [[ -f "$DEF_RC" ]]; then + echo "[ls.io-init] Generating labwc rc.xml from template" + cp "$DEF_RC" "$USER_RC_XML" + chown "$TARGET_USER:$TARGET_USER" "$USER_RC_XML" + + if [[ -n "${DISABLE_CLOSE_BUTTON}" ]]; then + echo "[ls.io-init] Disabling close button" + sed -i 's/close//' "$USER_RC_XML" + fi + if [[ ${DISABLE_MOUSE_BUTTONS,,} == "true" ]]; then + echo "[ls.io-init] Disabling right and middle mouse clicks" + sed -i -e '/<mousebind button="Right"/,/<\/mousebind>/d' \ + -e '/<mousebind button="Middle"/,/<\/mousebind>/d' "$USER_RC_XML" + fi + if [[ ! -z ${NO_DECOR+x} ]]; then + echo "[ls.io-init] Removing window decorations" + sed -i 's/serverDecoration="yes"/serverDecoration="no"/' "$USER_RC_XML" + fi + if [[ ! -z ${NO_FULL+x} ]]; then + echo "[ls.io-init] Disabling auto-maximization" + sed -i '/<windowRule identifier="\*"><action name="Maximize" \/><\/windowRule>/d' "$USER_RC_XML" + fi + if [[ ${HARDEN_KEYBINDS,,} == "true" ]]; then + echo "[ls.io-init] Disabling dangerous keybinds" + KEYS_TO_DISABLE=( + "A-F4" + "A-Escape" + "A-space" + "W-e" + ) + for key in "${KEYS_TO_DISABLE[@]}"; do + sed -i "/<keybind key=\"${key}\"/I,/<\/keybind>/{s/^/ <!-- /;s/$/ -->/}" "$USER_RC_XML" + done + fi + + # lock file if hardened + if [[ ${DISABLE_MOUSE_BUTTONS,,} == "true" || ${HARDEN_KEYBINDS,,} == "true" ]]; then + echo "[ls.io-init] Locking labwc rc.xml to prevent security overrides" + chown root:"$TARGET_USER" "$USER_RC_XML" + chmod 444 "$USER_RC_XML" + fi + fi +else + # openbox tweaks + SYS_RC_XML="/etc/xdg/openbox/rc.xml" + SYS_RC_BAK="/etc/xdg/openbox/rc.xml.bak" + if [ ! -f "$SYS_RC_BAK" ]; then + echo "[ls.io-init] Creating initial backup of system rc.xml" + cp "$SYS_RC_XML" "$SYS_RC_BAK" + fi + cp "$SYS_RC_BAK" "$SYS_RC_XML" + if [[ -n "${DISABLE_CLOSE_BUTTON}" ]]; then + echo "[ls.io-init] Disabling close button" + sed -i '/<titleLayout>/s/C//' "$SYS_RC_XML" + fi + if [[ ${DISABLE_MOUSE_BUTTONS,,} == "true" ]]; then + echo "[ls.io-init] Disabling right and middle mouse clicks" + sed -i -e '/<mousebind button="Right"/,/<\/mousebind>/d' \ + -e '/<mousebind button="Middle"/,/<\/mousebind>/d' "$SYS_RC_XML" + fi + if [[ ! -z ${NO_DECOR+x} ]]; then + echo "[ls.io-init] Removing window decorations" + sed -i 's/<application class="\*">/&<decor>no<\/decor>/' "$SYS_RC_XML" + fi + if [[ ! -z ${NO_FULL+x} ]]; then + echo "[ls.io-init] Disabling maximization" + sed -i '/<maximized>yes<\/maximized>/d' "$SYS_RC_XML" + fi + if [[ ${HARDEN_KEYBINDS,,} == "true" ]]; then + echo "[ls.io-init] Disabling dangerous keybinds" + KEYS_TO_DISABLE=( + "A-F4" + "A-Escape" + "A-space" + "W-e" + ) + for key in "${KEYS_TO_DISABLE[@]}"; do + sed -i "/<keybind key=\"${key}\"/,/<\/keybind>/{s/^/ <!-- /;s/$/ -->/}" "$SYS_RC_XML" + done + fi + + # disable user rc path if config is hardened + USER_RC_XML="$CONF_DIR/rc.xml" + if [[ ${DISABLE_MOUSE_BUTTONS,,} == "true" || ${HARDEN_KEYBINDS,,} == "true" ]]; then + echo "[ls.io-init] Locking user rc.xml to prevent security overrides" + mkdir -p "$(dirname $USER_RC_XML)" + chown "$TARGET_USER:$TARGET_USER" "$(dirname $USER_RC_XML)" + cp "$SYS_RC_XML" "$USER_RC_XML" + chown root:"$TARGET_USER" "$USER_RC_XML" + chmod 444 "$USER_RC_XML" + else + if [ -f "$USER_RC_XML" ] && [ "$(stat -c '%U' $USER_RC_XML)" == "root" ]; then + echo "[ls.io-init] Hardening disabled, removing locked user rc.xml" + rm -f "$USER_RC_XML" + fi + fi +fi + +# add proot-apps +proot_updated=false +if [ ! -f "$HOME/.local/bin/proot-apps" ]; then + mkdir -p "$HOME/.local/bin/" + cp /proot-apps/* "$HOME/.local/bin/" + echo 'export PATH="$HOME/.local/bin:$PATH"' >> "$HOME/.bashrc" + proot_updated=true +elif ! diff -q /proot-apps/pversion "$HOME/.local/bin/pversion" > /dev/null; then + cp /proot-apps/* "$HOME/.local/bin/" + proot_updated=true +fi +if [ "$proot_updated" = true ]; then + chown -R "$TARGET_USER:$TARGET_USER" "$HOME/.local" + [ -f "$HOME/.bashrc" ] && chown "$TARGET_USER:$TARGET_USER" "$HOME/.bashrc" +fi + +# Enable gpu encode if device detected +if ! which nvidia-smi && [ -e "/dev/dri/renderD128" ] && [ ! -e "/dev/dri/renderD129" ] && [ -z ${DRI_NODE+x} ]; then + printf "/dev/dri/renderD128" > /run/s6/container_environment/DRI_NODE +fi +if [[ "${PIXELFLUX_WAYLAND}" == "true" ]] && [ -e "/dev/dri/renderD128" ] && [ ! -e "/dev/dri/renderD129" ] && [ -z ${DRI_NODE+x} ]; then + printf "/dev/dri/renderD128" > /run/s6/container_environment/DRI_NODE + if [ -z ${DRINODE+x} ]; then + printf "/dev/dri/renderD128" > /run/s6/container_environment/DRINODE + fi +fi + +# js setup +mkdir -pm1777 /dev/input +touch /tmp/selkies_js.log +chmod 777 /tmp/selkies* +if [[ -z ${NO_GAMEPAD+x} ]] && mknod /dev/input/js0 c 13 0; then + printf "/usr/lib/selkies_joystick_interposer.so:/opt/lib/libudev.so.1.0.0-fake" > /run/s6/container_environment/LD_PRELOAD + mknod /dev/input/js1 c 13 1 + mknod /dev/input/js2 c 13 2 + mknod /dev/input/js3 c 13 3 + mknod /dev/input/event1000 c 13 1064 + mknod /dev/input/event1001 c 13 1065 + mknod /dev/input/event1002 c 13 1066 + mknod /dev/input/event1003 c 13 1067 + chmod 777 /dev/input/js* /dev/input/event* +else + printf "false" > /run/s6/container_environment/SELKIES_UI_SIDEBAR_SHOW_GAMEPADS + printf "false" > /run/s6/container_environment/SELKIES_GAMEPAD_ENABLED + printf "false" > /run/s6/container_environment/SELKIES_ENABLE_PLAYER2 + printf "false" > /run/s6/container_environment/SELKIES_ENABLE_PLAYER3 + printf "false" > /run/s6/container_environment/SELKIES_ENABLE_PLAYER3 +fi diff --git a/files/ubuntu-root/etc/s6-overlay/s6-rc.d/init-selkies-config/type b/files/ubuntu-root/etc/s6-overlay/s6-rc.d/init-selkies-config/type new file mode 100644 index 000000000..bdd22a185 --- /dev/null +++ b/files/ubuntu-root/etc/s6-overlay/s6-rc.d/init-selkies-config/type @@ -0,0 +1 @@ +oneshot diff --git a/files/ubuntu-root/etc/s6-overlay/s6-rc.d/init-selkies-config/up b/files/ubuntu-root/etc/s6-overlay/s6-rc.d/init-selkies-config/up new file mode 100644 index 000000000..2dcf2f57b --- /dev/null +++ b/files/ubuntu-root/etc/s6-overlay/s6-rc.d/init-selkies-config/up @@ -0,0 +1 @@ +/etc/s6-overlay/s6-rc.d/init-selkies-config/run diff --git a/files/ubuntu-root/etc/s6-overlay/s6-rc.d/init-selkies-end/dependencies.d/init-video b/files/ubuntu-root/etc/s6-overlay/s6-rc.d/init-selkies-end/dependencies.d/init-video new file mode 100644 index 000000000..e69de29bb diff --git a/files/ubuntu-root/etc/s6-overlay/s6-rc.d/init-selkies-end/type b/files/ubuntu-root/etc/s6-overlay/s6-rc.d/init-selkies-end/type new file mode 100644 index 000000000..3d92b15f2 --- /dev/null +++ b/files/ubuntu-root/etc/s6-overlay/s6-rc.d/init-selkies-end/type @@ -0,0 +1 @@ +oneshot \ No newline at end of file diff --git a/files/ubuntu-root/etc/s6-overlay/s6-rc.d/init-selkies-end/up b/files/ubuntu-root/etc/s6-overlay/s6-rc.d/init-selkies-end/up new file mode 100644 index 000000000..a2dcf944b --- /dev/null +++ b/files/ubuntu-root/etc/s6-overlay/s6-rc.d/init-selkies-end/up @@ -0,0 +1 @@ +# This file doesn't do anything, it's just the end of the selkies init process diff --git a/files/ubuntu-root/etc/s6-overlay/s6-rc.d/init-selkies/dependencies.d/init-os-end b/files/ubuntu-root/etc/s6-overlay/s6-rc.d/init-selkies/dependencies.d/init-os-end new file mode 100644 index 000000000..e69de29bb diff --git a/files/ubuntu-root/etc/s6-overlay/s6-rc.d/init-selkies/type b/files/ubuntu-root/etc/s6-overlay/s6-rc.d/init-selkies/type new file mode 100644 index 000000000..3d92b15f2 --- /dev/null +++ b/files/ubuntu-root/etc/s6-overlay/s6-rc.d/init-selkies/type @@ -0,0 +1 @@ +oneshot \ No newline at end of file diff --git a/files/ubuntu-root/etc/s6-overlay/s6-rc.d/init-selkies/up b/files/ubuntu-root/etc/s6-overlay/s6-rc.d/init-selkies/up new file mode 100644 index 000000000..2124cce63 --- /dev/null +++ b/files/ubuntu-root/etc/s6-overlay/s6-rc.d/init-selkies/up @@ -0,0 +1 @@ +# This file doesn't do anything, it's just the beginning of the selkies init process diff --git a/files/ubuntu-root/etc/s6-overlay/s6-rc.d/init-services/dependencies.d/init-custom-files b/files/ubuntu-root/etc/s6-overlay/s6-rc.d/init-services/dependencies.d/init-custom-files new file mode 100644 index 000000000..e69de29bb diff --git a/files/ubuntu-root/etc/s6-overlay/s6-rc.d/init-services/type b/files/ubuntu-root/etc/s6-overlay/s6-rc.d/init-services/type new file mode 100644 index 000000000..bdd22a185 --- /dev/null +++ b/files/ubuntu-root/etc/s6-overlay/s6-rc.d/init-services/type @@ -0,0 +1 @@ +oneshot diff --git a/files/ubuntu-root/etc/s6-overlay/s6-rc.d/init-services/up b/files/ubuntu-root/etc/s6-overlay/s6-rc.d/init-services/up new file mode 100644 index 000000000..a7c3905b2 --- /dev/null +++ b/files/ubuntu-root/etc/s6-overlay/s6-rc.d/init-services/up @@ -0,0 +1 @@ +# This file doesn't do anything, it just signals that services can start diff --git a/files/ubuntu-root/etc/s6-overlay/s6-rc.d/init-video/dependencies.d/init-selkies-config b/files/ubuntu-root/etc/s6-overlay/s6-rc.d/init-video/dependencies.d/init-selkies-config new file mode 100644 index 000000000..e69de29bb diff --git a/files/ubuntu-root/etc/s6-overlay/s6-rc.d/init-video/run b/files/ubuntu-root/etc/s6-overlay/s6-rc.d/init-video/run new file mode 100755 index 000000000..fa1b6f152 --- /dev/null +++ b/files/ubuntu-root/etc/s6-overlay/s6-rc.d/init-video/run @@ -0,0 +1,119 @@ +#!/usr/bin/with-contenv bash + +TARGET_USER="${CUSTOM_USER:-${USER_NAME:-root}}" +FILES=$(find /dev/dri /dev/dvb -type c -print 2>/dev/null) + +for i in $FILES +do + VIDEO_GID=$(stat -c '%g' "${i}") + VIDEO_UID=$(stat -c '%u' "${i}") + # check if user matches device + if id -u "$TARGET_USER" | grep -qw "${VIDEO_UID}"; then + echo "**** permissions for ${i} are good ****" + else + # check if group matches and that device has group rw + if id -G "$TARGET_USER" | grep -qw "${VIDEO_GID}" && [ $(stat -c '%A' "${i}" | cut -b 5,6) = "rw" ]; then + echo "**** permissions for ${i} are good ****" + # check if device needs to be added to video group + elif ! id -G "$TARGET_USER" | grep -qw "${VIDEO_GID}"; then + # check if video group needs to be created + VIDEO_NAME=$(getent group "${VIDEO_GID}" | awk -F: '{print $1}') + if [ -z "${VIDEO_NAME}" ]; then + VIDEO_NAME="video$(head /dev/urandom | tr -dc 'a-z0-9' | head -c4)" + groupadd "${VIDEO_NAME}" + groupmod -g "${VIDEO_GID}" "${VIDEO_NAME}" + echo "**** creating video group ${VIDEO_NAME} with id ${VIDEO_GID} ****" + fi + echo "**** adding ${i} to video group ${VIDEO_NAME} with id ${VIDEO_GID} ****" + usermod -a -G "${VIDEO_NAME}" "$TARGET_USER" + fi + # check if device has group rw + if [ $(stat -c '%A' "${i}" | cut -b 5,6) != "rw" ]; then + echo -e "**** The device ${i} does not have group read/write permissions, attempting to fix inside the container.If it doesn't work, you can run the following on your docker host: ****\nsudo chmod g+rw ${i}\n" + chmod g+rw "${i}" + fi + fi +done + +# pixelflux handles encoding internally based on GPU availability +# NVIDIA: auto-detects NVENC +# Intel/AMD: uses VA-API via vaapi_render_node_index +# No explicit encoder setting needed + +# check if nvidia gpu is present +if which nvidia-smi > /dev/null 2>&1 && ls -A /dev/dri 2>/dev/null; then + # nvidia-container-toolkit may not place files correctly, so we set them up here + echo "**** NVIDIA GPU detected ****" + OPENCL_ICDS=$(find /etc/OpenCL/vendors -name '*nvidia*.icd' 2>/dev/null) + # if no opencl icd found + if [ -z "${OPENCL_ICDS}" ]; then + echo "**** Setting up OpenCL ICD for NVIDIA ****" + mkdir -pm755 /etc/OpenCL/vendors/ + echo "libnvidia-opencl.so.1" > /etc/OpenCL/vendors/nvidia.icd + fi + # find vulkan icds + ICDS=$(find /usr/share/vulkan/icd.d /etc/vulkan/icd.d -name '*nvidia*.json' 2>/dev/null) + # if no icd found + if [ -z "${ICDS}" ]; then + echo "**** Setting up Vulkan ICD for NVIDIA ****" + # get vulkan api version + VULKAN_API_VERSION=$(ldconfig -p | grep "libvulkan.so" | awk '{print $NF}' | xargs readlink | grep -oE "[0-9]+\.[0-9]+\.[0-9]+") + # Fallback if pipeline fails + if [ -z "${VULKAN_API_VERSION}" ]; then + # version 1.1 or greater allows vulkan-loader to load the driver's dynamic library + VULKAN_API_VERSION="1.1.0" + fi + mkdir -pm755 /etc/vulkan/icd.d/ + cat > /etc/vulkan/icd.d/nvidia_icd.json << EOF +{ + "file_format_version" : "1.0.0", + "ICD": { + "library_path": "libGLX_nvidia.so.0", + "api_version" : "${VULKAN_API_VERSION}" + } +} +EOF + fi + # find glvnd egl_vendor files + EGLS=$(find /usr/share/glvnd/egl_vendor.d /etc/glvnd/egl_vendor.d -name '*nvidia*.json' 2>/dev/null) + # if no egl_vendor file found + if [ -z "${EGLS}" ]; then + echo "**** Setting up EGL vendor file for NVIDIA ****" + mkdir -pm755 /etc/glvnd/egl_vendor.d/ + cat > /etc/glvnd/egl_vendor.d/10_nvidia.json << EOF +{ + "file_format_version" : "1.0.0", + "ICD": { + "library_path": "libEGL_nvidia.so.0" + } +} +EOF + fi + # fix gbm library linkage + if ! ldconfig -p | grep -q "nvidia-drm_gbm.so"; then + GBM_PATHS=( + "/usr/lib/x86_64-linux-gnu/gbm/nvidia-drm_gbm.so" + "/usr/lib/aarch64-linux-gnu/gbm/nvidia-drm_gbm.so" + "/usr/lib64/gbm/nvidia-drm_gbm.so" + "/usr/lib/gbm/nvidia-drm_gbm.so" + "/usr/local/lib/gbm/nvidia-drm_gbm.so" + "/usr/local/lib64/gbm/nvidia-drm_gbm.so" + ) + GBM_SRC="" + for p in "${GBM_PATHS[@]}"; do + if [ -f "${p}" ]; then GBM_SRC="${p}"; break; fi + done + if [ -n "${GBM_SRC}" ]; then + echo "**** Fixing GBM library linkage ****" + LIB_PATH=$(ldconfig -p | grep "libc.so.6" | head -n1 | awk -F '=> ' '{print $2}' | xargs dirname) + if [ -z "${LIB_PATH}" ]; then LIB_PATH="/usr/lib"; fi + if [ -d "${LIB_PATH}/gbm" ]; then + GBM_DEST="${LIB_PATH}/gbm" + else + GBM_DEST="${LIB_PATH}" + fi + cp "${GBM_SRC}" "${GBM_DEST}/" + ldconfig + fi + fi +fi diff --git a/files/ubuntu-root/etc/s6-overlay/s6-rc.d/init-video/type b/files/ubuntu-root/etc/s6-overlay/s6-rc.d/init-video/type new file mode 100644 index 000000000..bdd22a185 --- /dev/null +++ b/files/ubuntu-root/etc/s6-overlay/s6-rc.d/init-video/type @@ -0,0 +1 @@ +oneshot diff --git a/files/ubuntu-root/etc/s6-overlay/s6-rc.d/init-video/up b/files/ubuntu-root/etc/s6-overlay/s6-rc.d/init-video/up new file mode 100644 index 000000000..01141b133 --- /dev/null +++ b/files/ubuntu-root/etc/s6-overlay/s6-rc.d/init-video/up @@ -0,0 +1 @@ +/etc/s6-overlay/s6-rc.d/init-video/run diff --git a/files/ubuntu-root/etc/s6-overlay/s6-rc.d/svc-auth/dependencies.d/init-services b/files/ubuntu-root/etc/s6-overlay/s6-rc.d/svc-auth/dependencies.d/init-services new file mode 100644 index 000000000..9e66fc24b --- /dev/null +++ b/files/ubuntu-root/etc/s6-overlay/s6-rc.d/svc-auth/dependencies.d/init-services @@ -0,0 +1 @@ +../init-services diff --git a/files/ubuntu-root/etc/s6-overlay/s6-rc.d/svc-auth/run b/files/ubuntu-root/etc/s6-overlay/s6-rc.d/svc-auth/run new file mode 100644 index 000000000..b0a843fa6 --- /dev/null +++ b/files/ubuntu-root/etc/s6-overlay/s6-rc.d/svc-auth/run @@ -0,0 +1,2 @@ +#!/usr/bin/with-contenv bash +exec /usr/bin/python3 /usr/local/bin/auth-server.py diff --git a/files/ubuntu-root/etc/s6-overlay/s6-rc.d/svc-auth/type b/files/ubuntu-root/etc/s6-overlay/s6-rc.d/svc-auth/type new file mode 100644 index 000000000..5883cff0c --- /dev/null +++ b/files/ubuntu-root/etc/s6-overlay/s6-rc.d/svc-auth/type @@ -0,0 +1 @@ +longrun diff --git a/files/ubuntu-root/etc/s6-overlay/s6-rc.d/svc-cron/dependencies.d/init-services b/files/ubuntu-root/etc/s6-overlay/s6-rc.d/svc-cron/dependencies.d/init-services new file mode 100644 index 000000000..e69de29bb diff --git a/files/ubuntu-root/etc/s6-overlay/s6-rc.d/svc-cron/run b/files/ubuntu-root/etc/s6-overlay/s6-rc.d/svc-cron/run new file mode 100755 index 000000000..f8ec9d211 --- /dev/null +++ b/files/ubuntu-root/etc/s6-overlay/s6-rc.d/svc-cron/run @@ -0,0 +1,16 @@ +#!/usr/bin/with-contenv bash +# shellcheck shell=bash + +TARGET_USER="${CUSTOM_USER:-${USER_NAME:-root}}" +if builtin command -v crontab >/dev/null 2>&1 && [[ -n "$(crontab -l -u "$TARGET_USER" 2>/dev/null || true)" || -n "$(crontab -l -u root 2>/dev/null || true)" ]]; then + if builtin command -v busybox >/dev/null 2>&1 && [[ $(busybox || true) =~ [[:space:]](crond)([,]|$) ]]; then + exec busybox crond -f -S -l 5 + elif [[ -f /usr/bin/apt ]] && [[ -f /usr/sbin/cron ]]; then + exec /usr/sbin/cron -f -L 5 + else + echo "**** cron not found ****" + sleep infinity + fi +else + sleep infinity +fi diff --git a/files/ubuntu-root/etc/s6-overlay/s6-rc.d/svc-cron/type b/files/ubuntu-root/etc/s6-overlay/s6-rc.d/svc-cron/type new file mode 100644 index 000000000..5883cff0c --- /dev/null +++ b/files/ubuntu-root/etc/s6-overlay/s6-rc.d/svc-cron/type @@ -0,0 +1 @@ +longrun diff --git a/files/ubuntu-root/etc/s6-overlay/s6-rc.d/svc-dbus/dependencies.d/init-services b/files/ubuntu-root/etc/s6-overlay/s6-rc.d/svc-dbus/dependencies.d/init-services new file mode 100644 index 000000000..e69de29bb diff --git a/files/ubuntu-root/etc/s6-overlay/s6-rc.d/svc-dbus/dependencies.d/svc-xorg b/files/ubuntu-root/etc/s6-overlay/s6-rc.d/svc-dbus/dependencies.d/svc-xorg new file mode 100644 index 000000000..e69de29bb diff --git a/files/ubuntu-root/etc/s6-overlay/s6-rc.d/svc-dbus/run b/files/ubuntu-root/etc/s6-overlay/s6-rc.d/svc-dbus/run new file mode 100755 index 000000000..42b0fd517 --- /dev/null +++ b/files/ubuntu-root/etc/s6-overlay/s6-rc.d/svc-dbus/run @@ -0,0 +1,15 @@ +#!/usr/bin/with-contenv bash + +TARGET_USER="${CUSTOM_USER:-${USER_NAME:-root}}" + +# Folder setup +mkdir -p /run/dbus +chown "$TARGET_USER:$TARGET_USER" /run/dbus +rm -f /run/dbus/pid + +# Run dbus +exec s6-setuidgid "$TARGET_USER" \ + dbus-daemon \ + --system \ + --nofork \ + --nosyslog diff --git a/files/ubuntu-root/etc/s6-overlay/s6-rc.d/svc-dbus/type b/files/ubuntu-root/etc/s6-overlay/s6-rc.d/svc-dbus/type new file mode 100644 index 000000000..5883cff0c --- /dev/null +++ b/files/ubuntu-root/etc/s6-overlay/s6-rc.d/svc-dbus/type @@ -0,0 +1 @@ +longrun diff --git a/files/ubuntu-root/etc/s6-overlay/s6-rc.d/svc-de/dependencies.d/init-services b/files/ubuntu-root/etc/s6-overlay/s6-rc.d/svc-de/dependencies.d/init-services new file mode 100644 index 000000000..e69de29bb diff --git a/files/ubuntu-root/etc/s6-overlay/s6-rc.d/svc-de/dependencies.d/svc-nginx b/files/ubuntu-root/etc/s6-overlay/s6-rc.d/svc-de/dependencies.d/svc-nginx new file mode 100644 index 000000000..e69de29bb diff --git a/files/ubuntu-root/etc/s6-overlay/s6-rc.d/svc-de/dependencies.d/svc-selkies b/files/ubuntu-root/etc/s6-overlay/s6-rc.d/svc-de/dependencies.d/svc-selkies new file mode 100644 index 000000000..e69de29bb diff --git a/files/ubuntu-root/etc/s6-overlay/s6-rc.d/svc-de/dependencies.d/svc-xorg b/files/ubuntu-root/etc/s6-overlay/s6-rc.d/svc-de/dependencies.d/svc-xorg new file mode 100644 index 000000000..e69de29bb diff --git a/files/ubuntu-root/etc/s6-overlay/s6-rc.d/svc-de/finish b/files/ubuntu-root/etc/s6-overlay/s6-rc.d/svc-de/finish new file mode 100644 index 000000000..acf5f066c --- /dev/null +++ b/files/ubuntu-root/etc/s6-overlay/s6-rc.d/svc-de/finish @@ -0,0 +1,46 @@ +#!/usr/bin/with-contenv bash + +# exit early if no pid file +PID_FILE="/de-pid" +WAIT_SECONDS=5 +if [ ! -r "$PID_FILE" ]; then + exit 0 +fi +PARENT_PID=$(cat "$PID_FILE") +if [ -z "$PARENT_PID" ]; then + rm -f "$PID_FILE" + exit 0 +fi + +# get all descendant pids of de +PIDS_TO_KILL=$(pstree -p "$PARENT_PID" | grep -o '([0-9]\+)' | grep -o '[0-9]\+' | grep -v "^${PARENT_PID}$") + +# kill all descendant pids +if [ -z "$PIDS_TO_KILL" ]; then + echo "No desktop processes found to terminate." +else + echo "$PIDS_TO_KILL" | xargs --no-run-if-empty kill -TERM -- 2>/dev/null + echo "Waiting up to ${WAIT_SECONDS} seconds for desktop processes to terminate..." + for ((i=0; i < WAIT_SECONDS * 4; i++)); do + all_gone=1 + for pid in $PIDS_TO_KILL; do + if kill -0 "$pid" 2>/dev/null; then + all_gone=0 + break + fi + done + if [ "$all_gone" -eq 1 ]; then + echo "All desktop processes terminated cleanly." + break + fi + sleep 0.25 + done + if [ "$all_gone" -eq 0 ]; then + echo "Timeout reached. Handing off to s6 for final termination." + fi +fi + +# clean up the PID file. +rm -f "$PID_FILE" + +exit 0 diff --git a/files/ubuntu-root/etc/s6-overlay/s6-rc.d/svc-de/run b/files/ubuntu-root/etc/s6-overlay/s6-rc.d/svc-de/run new file mode 100755 index 000000000..ef0f78eb8 --- /dev/null +++ b/files/ubuntu-root/etc/s6-overlay/s6-rc.d/svc-de/run @@ -0,0 +1,83 @@ +#!/usr/bin/with-contenv bash + +TARGET_USER="${CUSTOM_USER:-${USER_NAME:-root}}" + +# ensure runtime dir for DBus/Qt +TARGET_UID=$(id -u "${TARGET_USER}" 2>/dev/null || true) +if [ -n "${TARGET_UID}" ]; then + RUNTIME_DIR="/run/user/${TARGET_UID}" + if ! mkdir -p "${RUNTIME_DIR}"; then + RUNTIME_DIR="/tmp/runtime-${TARGET_UID}" + mkdir -p "${RUNTIME_DIR}" + fi + chown "${TARGET_USER}:${TARGET_USER}" "${RUNTIME_DIR}" || true + chmod 700 "${RUNTIME_DIR}" || true +fi + +# wayland entrypoint +if [[ "${PIXELFLUX_WAYLAND}" == "true" ]]; then + SOCKET_PATH="${XDG_RUNTIME_DIR}/${WAYLAND_DISPLAY:-wayland-1}" + echo "[svc-de] Wayland mode: Waiting for socket at ${SOCKET_PATH}..." + while [ ! -e "${SOCKET_PATH}" ]; do + sleep 0.5 + done + echo "[svc-de] ${SOCKET_PATH} found launching de" + cd $HOME + exec s6-setuidgid "${TARGET_USER}" \ + /bin/bash /defaults/startwm_wayland.sh & + PID=$! + echo "$PID" > /de-pid + wait "$PID" + exit 1 +fi + +# wait for X to be running +while true; do + if xset q &>/dev/null; then + break + fi + sleep .5 +done + +# set resolution before starting apps +RESOLUTION_WIDTH=${SELKIES_MANUAL_WIDTH:-1024} +RESOLUTION_HEIGHT=${SELKIES_MANUAL_HEIGHT:-768} +if [ "$RESOLUTION_WIDTH" = "0" ]; then RESOLUTION_WIDTH=1024; fi +if [ "$RESOLUTION_HEIGHT" = "0" ]; then RESOLUTION_HEIGHT=768; fi +MODELINE=$(s6-setuidgid "${TARGET_USER}" cvt "${RESOLUTION_WIDTH}" "${RESOLUTION_HEIGHT}" | grep "Modeline" | sed 's/^.*Modeline //') +MODELINE_ARGS=$(echo "$MODELINE" | tr -d '"') +MODELINE_NAME=$(echo "$MODELINE_ARGS" | awk '{print $1}') +if ! s6-setuidgid "${TARGET_USER}" xrandr | grep -q "$MODELINE_NAME"; then + s6-setuidgid "${TARGET_USER}" xrandr --newmode $MODELINE_ARGS + s6-setuidgid "${TARGET_USER}" xrandr --addmode screen "$MODELINE_NAME" + s6-setuidgid "${TARGET_USER}" xrandr --output screen --mode "$MODELINE_NAME" --dpi 96 +fi + +# set xresources +if [ -f "${HOME}/.Xresources" ]; then + xrdb "${HOME}/.Xresources" +else + echo "Xcursor.theme: breeze" > "${HOME}/.Xresources" + xrdb "${HOME}/.Xresources" +fi +if [[ "${LANG:-}" == ja* ]]; then + setxkbmap -layout jp -model jp106 -option '' +fi +chown "${TARGET_USER}:${TARGET_USER}" "${HOME}/.Xresources" +chmod 777 /tmp/selkies* + +# Harden home ownership for persisted volumes (avoids root-owned files causing black screens) +chown -R "${TARGET_USER}:${TARGET_USER}" "${HOME}/.config" "${HOME}/.local" "${HOME}/.cache" 2>/dev/null || true +for f in "${HOME}/.xsettingsd" "${HOME}/.Xauthority" "${HOME}/.ICEauthority"; do + [ -e "$f" ] && chown "${TARGET_USER}:${TARGET_USER}" "$f" 2>/dev/null || true +done +# Ensure UDisks2 activation is gone at runtime too +rm -f /usr/share/dbus-1/system-services/org.freedesktop.UDisks2.service + +# run +cd $HOME +exec s6-setuidgid "${TARGET_USER}" \ + /bin/bash /defaults/startwm.sh & +PID=$! +echo "$PID" > /de-pid +wait "$PID" diff --git a/files/ubuntu-root/etc/s6-overlay/s6-rc.d/svc-de/type b/files/ubuntu-root/etc/s6-overlay/s6-rc.d/svc-de/type new file mode 100644 index 000000000..5883cff0c --- /dev/null +++ b/files/ubuntu-root/etc/s6-overlay/s6-rc.d/svc-de/type @@ -0,0 +1 @@ +longrun diff --git a/files/ubuntu-root/etc/s6-overlay/s6-rc.d/svc-docker/dependencies.d/init-services b/files/ubuntu-root/etc/s6-overlay/s6-rc.d/svc-docker/dependencies.d/init-services new file mode 100644 index 000000000..e69de29bb diff --git a/files/ubuntu-root/etc/s6-overlay/s6-rc.d/svc-docker/run b/files/ubuntu-root/etc/s6-overlay/s6-rc.d/svc-docker/run new file mode 100755 index 000000000..ab55f5ba9 --- /dev/null +++ b/files/ubuntu-root/etc/s6-overlay/s6-rc.d/svc-docker/run @@ -0,0 +1,13 @@ +#!/usr/bin/with-contenv bash + +# Make sure this is a priv container +if [ -e /dev/cpu_dma_latency ]; then + if [ "${START_DOCKER}" == "true" ]; then + mount -t tmpfs none /tmp + exec /usr/local/bin/dockerd-entrypoint.sh -l error + else + sleep infinity + fi +fi +# if anything goes wrong with Docker don't loop +sleep infinity diff --git a/files/ubuntu-root/etc/s6-overlay/s6-rc.d/svc-docker/type b/files/ubuntu-root/etc/s6-overlay/s6-rc.d/svc-docker/type new file mode 100644 index 000000000..5883cff0c --- /dev/null +++ b/files/ubuntu-root/etc/s6-overlay/s6-rc.d/svc-docker/type @@ -0,0 +1 @@ +longrun diff --git a/files/ubuntu-root/etc/s6-overlay/s6-rc.d/svc-nginx/dependencies.d/init-services b/files/ubuntu-root/etc/s6-overlay/s6-rc.d/svc-nginx/dependencies.d/init-services new file mode 100644 index 000000000..e69de29bb diff --git a/files/ubuntu-root/etc/s6-overlay/s6-rc.d/svc-nginx/dependencies.d/svc-auth b/files/ubuntu-root/etc/s6-overlay/s6-rc.d/svc-nginx/dependencies.d/svc-auth new file mode 100644 index 000000000..4adf5c4e6 --- /dev/null +++ b/files/ubuntu-root/etc/s6-overlay/s6-rc.d/svc-nginx/dependencies.d/svc-auth @@ -0,0 +1 @@ +../svc-auth diff --git a/files/ubuntu-root/etc/s6-overlay/s6-rc.d/svc-nginx/run b/files/ubuntu-root/etc/s6-overlay/s6-rc.d/svc-nginx/run new file mode 100755 index 000000000..521a47d60 --- /dev/null +++ b/files/ubuntu-root/etc/s6-overlay/s6-rc.d/svc-nginx/run @@ -0,0 +1,16 @@ +#!/usr/bin/with-contenv bash +# shellcheck shell=bash + +if pgrep -f "[n]ginx:" >/dev/null; then + echo "Zombie nginx processes detected, sending SIGTERM" + pkill -ef [n]ginx: + sleep 1 +fi + +if pgrep -f "[n]ginx:" >/dev/null; then + echo "Zombie nginx processes still active, sending SIGKILL" + pkill -9 -ef [n]ginx: + sleep 1 +fi + +exec /usr/sbin/nginx -g 'daemon off;' diff --git a/files/ubuntu-root/etc/s6-overlay/s6-rc.d/svc-nginx/type b/files/ubuntu-root/etc/s6-overlay/s6-rc.d/svc-nginx/type new file mode 100644 index 000000000..5883cff0c --- /dev/null +++ b/files/ubuntu-root/etc/s6-overlay/s6-rc.d/svc-nginx/type @@ -0,0 +1 @@ +longrun diff --git a/files/ubuntu-root/etc/s6-overlay/s6-rc.d/svc-pulseaudio/dependencies.d/init-services b/files/ubuntu-root/etc/s6-overlay/s6-rc.d/svc-pulseaudio/dependencies.d/init-services new file mode 100644 index 000000000..e69de29bb diff --git a/files/ubuntu-root/etc/s6-overlay/s6-rc.d/svc-pulseaudio/run b/files/ubuntu-root/etc/s6-overlay/s6-rc.d/svc-pulseaudio/run new file mode 100755 index 000000000..25d07d959 --- /dev/null +++ b/files/ubuntu-root/etc/s6-overlay/s6-rc.d/svc-pulseaudio/run @@ -0,0 +1,8 @@ +#!/usr/bin/with-contenv bash +TARGET_USER="${CUSTOM_USER:-${USER_NAME:-root}}" + +exec s6-setuidgid "$TARGET_USER" \ + /usr/bin/pulseaudio \ + --log-level=0 \ + --log-target=stderr \ + --exit-idle-time=-1 > /dev/null 2>&1 diff --git a/files/ubuntu-root/etc/s6-overlay/s6-rc.d/svc-pulseaudio/type b/files/ubuntu-root/etc/s6-overlay/s6-rc.d/svc-pulseaudio/type new file mode 100644 index 000000000..5883cff0c --- /dev/null +++ b/files/ubuntu-root/etc/s6-overlay/s6-rc.d/svc-pulseaudio/type @@ -0,0 +1 @@ +longrun diff --git a/files/ubuntu-root/etc/s6-overlay/s6-rc.d/svc-selkies/dependencies.d/init-services b/files/ubuntu-root/etc/s6-overlay/s6-rc.d/svc-selkies/dependencies.d/init-services new file mode 100644 index 000000000..e69de29bb diff --git a/files/ubuntu-root/etc/s6-overlay/s6-rc.d/svc-selkies/dependencies.d/svc-nginx b/files/ubuntu-root/etc/s6-overlay/s6-rc.d/svc-selkies/dependencies.d/svc-nginx new file mode 100644 index 000000000..e69de29bb diff --git a/files/ubuntu-root/etc/s6-overlay/s6-rc.d/svc-selkies/dependencies.d/svc-pulseaudio b/files/ubuntu-root/etc/s6-overlay/s6-rc.d/svc-selkies/dependencies.d/svc-pulseaudio new file mode 100644 index 000000000..e69de29bb diff --git a/files/ubuntu-root/etc/s6-overlay/s6-rc.d/svc-selkies/dependencies.d/svc-xorg b/files/ubuntu-root/etc/s6-overlay/s6-rc.d/svc-selkies/dependencies.d/svc-xorg new file mode 100644 index 000000000..e69de29bb diff --git a/files/ubuntu-root/etc/s6-overlay/s6-rc.d/svc-selkies/run b/files/ubuntu-root/etc/s6-overlay/s6-rc.d/svc-selkies/run new file mode 100755 index 000000000..82a3488b4 --- /dev/null +++ b/files/ubuntu-root/etc/s6-overlay/s6-rc.d/svc-selkies/run @@ -0,0 +1,225 @@ +#!/usr/bin/with-contenv bash + +TARGET_USER="${CUSTOM_USER:-${USER_NAME:-root}}" +PORT="${SELKIES_PORT:-8082}" + +# ENCODER is passed from start-container.sh: software|nvidia|nvidia-wsl|intel|amd +ENCODER="${ENCODER:-}" +GPU_VENDOR="${ENCODER:-${GPU_VENDOR:-software}}" + +# Check if running in WSL environment +WSL_ENVIRONMENT="${WSL_ENVIRONMENT:-false}" + +# Export LIBVA_DRIVER_NAME for Intel/AMD VA-API (set by start-container.sh) +if [ -n "${LIBVA_DRIVER_NAME}" ]; then + export LIBVA_DRIVER_NAME + echo "[svc-selkies] Using VA-API driver: ${LIBVA_DRIVER_NAME}" +fi + +# WSL2 specific setup for NVIDIA - add WSL libs to paths +if [ "${WSL_ENVIRONMENT}" = "true" ]; then + echo "[svc-selkies] Running in WSL2 environment" + if [ -d "/usr/lib/wsl/lib" ]; then + # Remove /usr/lib/wsl/lib if already present, then add to front + LD_LIBRARY_PATH="${LD_LIBRARY_PATH//\/usr\/lib\/wsl\/lib:/}" + LD_LIBRARY_PATH="${LD_LIBRARY_PATH//\/usr\/lib\/wsl\/lib/}" + export LD_LIBRARY_PATH="/usr/lib/wsl/lib:${LD_LIBRARY_PATH}" + export PATH="/usr/lib/wsl/lib:${PATH}" + echo "[svc-selkies] Added WSL library path at front: /usr/lib/wsl/lib" + fi +fi + +# Function to find nvidia-smi (handles WSL2 where it's in /usr/lib/wsl/lib) +find_nvidia_smi() { + if command -v nvidia-smi &> /dev/null; then + echo "nvidia-smi" + elif [ -x "/usr/lib/wsl/lib/nvidia-smi" ]; then + echo "/usr/lib/wsl/lib/nvidia-smi" + else + return 1 + fi +} + +# Check if NVIDIA GPU is available (works on both native Linux and WSL2) +nvidia_available() { + local nvidia_smi_cmd + nvidia_smi_cmd=$(find_nvidia_smi) || return 1 + "${nvidia_smi_cmd}" >/dev/null 2>&1 +} + +# Configure pixelflux hardware encoding based on GPU_VENDOR +# pixelflux supports: NVENC (NVIDIA), VA-API (Intel/AMD) +# Setting vaapi_render_node_index >= 0 enables VA-API encoding on /dev/dri/renderD(128 + index) +# -1 means try NVENC if available, otherwise software + +# Determine GPU render node index for pixelflux +if [ "${GPU_VENDOR}" = "nvidia" ] || [ "${GPU_VENDOR}" = "nvidia-wsl" ]; then + # NVIDIA: pixelflux will auto-detect NVENC, set to -1 + export PIXELFLUX_VAAPI_RENDER_NODE_INDEX="-1" + echo "[svc-selkies] GPU_VENDOR=${GPU_VENDOR}: pixelflux will use NVENC hardware encoding" + + if nvidia_available; then + NVIDIA_SMI_CMD=$(find_nvidia_smi) + echo "[svc-selkies] NVIDIA GPU detected via: ${NVIDIA_SMI_CMD}" + fi +elif [ "${GPU_VENDOR}" = "intel" ] || [ "${GPU_VENDOR}" = "amd" ]; then + # Intel/AMD: Use VA-API with render node + # Need to find the correct render node for the specified GPU vendor + VAAPI_INDEX="" + + # Find the correct render node by checking the driver + for renderD in /dev/dri/renderD*; do + if [ -e "$renderD" ]; then + RENDER_NUM=$(echo "$renderD" | grep -oP 'renderD\K[0-9]+') + DRIVER_LINK="/sys/class/drm/renderD${RENDER_NUM}/device/driver" + if [ -L "$DRIVER_LINK" ]; then + DRIVER_NAME=$(basename "$(readlink -f "$DRIVER_LINK")") + if [ "${GPU_VENDOR}" = "intel" ] && [ "$DRIVER_NAME" = "i915" ]; then + VAAPI_INDEX=$((RENDER_NUM - 128)) + echo "[svc-selkies] Found Intel GPU at renderD${RENDER_NUM} (driver: ${DRIVER_NAME})" + break + elif [ "${GPU_VENDOR}" = "amd" ] && [ "$DRIVER_NAME" = "amdgpu" ]; then + VAAPI_INDEX=$((RENDER_NUM - 128)) + echo "[svc-selkies] Found AMD GPU at renderD${RENDER_NUM} (driver: ${DRIVER_NAME})" + break + fi + fi + fi + done + + if [ -n "${VAAPI_INDEX}" ]; then + export PIXELFLUX_VAAPI_RENDER_NODE_INDEX="${VAAPI_INDEX}" + echo "[svc-selkies] GPU_VENDOR=${GPU_VENDOR}: pixelflux will use VA-API hardware encoding (index ${VAAPI_INDEX})" + elif [ -n "${DRI_NODE}" ]; then + # Fallback to DRI_NODE if set + RENDER_NUM=$(echo "${DRI_NODE}" | grep -oP 'renderD\K[0-9]+') + if [ -n "${RENDER_NUM}" ]; then + VAAPI_INDEX=$((RENDER_NUM - 128)) + export PIXELFLUX_VAAPI_RENDER_NODE_INDEX="${VAAPI_INDEX}" + echo "[svc-selkies] GPU_VENDOR=${GPU_VENDOR}: pixelflux will use VA-API on ${DRI_NODE} (index ${VAAPI_INDEX})" + else + export PIXELFLUX_VAAPI_RENDER_NODE_INDEX="-1" + echo "[svc-selkies] GPU_VENDOR=${GPU_VENDOR}: Could not determine render node, falling back to software encoding" + fi + else + export PIXELFLUX_VAAPI_RENDER_NODE_INDEX="-1" + echo "[svc-selkies] GPU_VENDOR=${GPU_VENDOR}: No matching render node found, falling back to software encoding" + fi +else + # No GPU or unknown: software encoding + export PIXELFLUX_VAAPI_RENDER_NODE_INDEX="-1" + echo "[svc-selkies] GPU_VENDOR=${GPU_VENDOR}: pixelflux will use software encoding" +fi + +# Always use selkies with pixelflux (WebSocket mode) +SELKIES_CMD="selkies" +CMD_OPTS=(--addr="localhost" --mode="websockets" --port="${PORT}") + +# Configure encoder based on GPU vendor +ENCODER_TYPE="x264enc" # Default to software encoder + +if [ "${GPU_VENDOR}" = "nvidia" ] || [ "${GPU_VENDOR}" = "nvidia-wsl" ]; then + ENCODER_TYPE="nvh264enc" + CMD_OPTS+=(--encoder="${ENCODER_TYPE}") + echo "[svc-selkies] Using NVIDIA NVENC encoder: ${ENCODER_TYPE}" +elif [ "${GPU_VENDOR}" = "intel" ] || [ "${GPU_VENDOR}" = "amd" ]; then + # VA-API hardware encoder + ENCODER_TYPE="vah264enc" + CMD_OPTS+=(--encoder="${ENCODER_TYPE}") + + if [ -n "${VAAPI_INDEX}" ]; then + DRI_NODE_PATH="/dev/dri/renderD$((128 + VAAPI_INDEX))" + if [ -e "${DRI_NODE_PATH}" ]; then + CMD_OPTS+=(--dri-node="${DRI_NODE_PATH}") + echo "[svc-selkies] Added --dri-node=${DRI_NODE_PATH} for VA-API encoding" + fi + fi + echo "[svc-selkies] Using VA-API encoder: ${ENCODER_TYPE}" +else + # Software encoder (x264) + CMD_OPTS+=(--encoder="${ENCODER_TYPE}") + echo "[svc-selkies] Using software encoder: ${ENCODER_TYPE}" +fi + +# Enable H.264 streaming mode for lower latency (crucial for responsiveness!) +# This enables low-latency encoding with reduced buffering +CMD_OPTS+=(--h264-streaming-mode=true) +echo "[svc-selkies] H.264 streaming mode enabled for low latency" + +# Optimize CRF for better quality/latency balance (lower = better quality, more bandwidth) +# Default is 25, we use 18-20 for better responsiveness +SELKIES_H264_CRF="${SELKIES_H264_CRF:-18}" +CMD_OPTS+=(--h264-crf="${SELKIES_H264_CRF}") +echo "[svc-selkies] H.264 CRF set to: ${SELKIES_H264_CRF}" + +echo "[svc-selkies] Using selkies with pixelflux (GPU_VENDOR=${GPU_VENDOR}, Encoder=${ENCODER_TYPE})" + +# Default sink setup (wait until pulseaudio is ready, then create virtual sinks) +if [ ! -f '/dev/shm/audio.lock' ]; then + # ensure pulseaudio is up + for i in $(seq 1 30); do + if s6-setuidgid "$TARGET_USER" with-contenv pactl info >/dev/null 2>&1; then + READY=1 + break + fi + sleep 0.5 + done + if [ "${READY:-0}" -eq 1 ]; then + s6-setuidgid "$TARGET_USER" with-contenv pactl \ + load-module module-null-sink \ + sink_name="output" \ + sink_properties=device.description="output" + s6-setuidgid "$TARGET_USER" with-contenv pactl \ + load-module module-null-sink \ + sink_name="input" \ + sink_properties=device.description="input" + touch /dev/shm/audio.lock + else + echo "[svc-selkies] pulseaudio not ready; skipped null-sink setup (audio may be missing)." >&2 + fi +fi + +# Setup dev mode if defined +if [ ! -z ${DEV_MODE+x} ]; then + # Dev deps + apt-get update + apt-get install -y \ + nodejs + npm install -g nodemon + rm -Rf $HOME/.npm + # Frontend setup + if [[ "${DEV_MODE}" == "core" ]]; then + # Core just runs from directory + cd $HOME/src/addons/gst-web-core + s6-setuidgid "$TARGET_USER" npm install + s6-setuidgid "$TARGET_USER" npm run serve & + else + # Build core + cd $HOME/src/addons/gst-web-core + s6-setuidgid "$TARGET_USER" npm install + s6-setuidgid "$TARGET_USER" npm run build + s6-setuidgid "$TARGET_USER" cp dist/selkies-core.js ../${DEV_MODE}/src/ + s6-setuidgid "$TARGET_USER" nodemon --watch selkies-core.js --exec "npm run build && cp dist/selkies-core.js ../${DEV_MODE}/src/" & + # Copy touch gamepad + s6-setuidgid "$TARGET_USER" cp ../universal-touch-gamepad/universalTouchGamepad.js ../${DEV_MODE}/src/ + s6-setuidgid "$TARGET_USER" nodemon --watch ../universal-touch-gamepad/universalTouchGamepad.js --exec "cp ../universal-touch-gamepad/universalTouchGamepad.js ../${DEV_MODE}/src/" & + # Copy themes + s6-setuidgid "$TARGET_USER" cp -a nginx ../${DEV_MODE}/ + # Run passed frontend + cd $HOME/src/addons/${DEV_MODE} + s6-setuidgid "$TARGET_USER" npm install + s6-setuidgid "$TARGET_USER" npm run serve & + fi + # Run backend + cd $HOME/src/src + s6-setuidgid "$TARGET_USER" \ + nodemon -V --ext py --exec \ + "python3" -m selkies \ + --addr="localhost" \ + --mode="websockets" \ + --debug="true" +fi + +# Start Selkies +echo "[svc-selkies] Starting ${SELKIES_CMD} on port ${PORT}" +exec s6-setuidgid "$TARGET_USER" "${SELKIES_CMD}" "${CMD_OPTS[@]}" diff --git a/files/ubuntu-root/etc/s6-overlay/s6-rc.d/svc-selkies/type b/files/ubuntu-root/etc/s6-overlay/s6-rc.d/svc-selkies/type new file mode 100644 index 000000000..5883cff0c --- /dev/null +++ b/files/ubuntu-root/etc/s6-overlay/s6-rc.d/svc-selkies/type @@ -0,0 +1 @@ +longrun diff --git a/files/ubuntu-root/etc/s6-overlay/s6-rc.d/svc-watchdog/dependencies.d/init-services b/files/ubuntu-root/etc/s6-overlay/s6-rc.d/svc-watchdog/dependencies.d/init-services new file mode 100644 index 000000000..e69de29bb diff --git a/files/ubuntu-root/etc/s6-overlay/s6-rc.d/svc-watchdog/run b/files/ubuntu-root/etc/s6-overlay/s6-rc.d/svc-watchdog/run new file mode 100755 index 000000000..d0ce865aa --- /dev/null +++ b/files/ubuntu-root/etc/s6-overlay/s6-rc.d/svc-watchdog/run @@ -0,0 +1,33 @@ +#!/usr/bin/with-contenv bash + +if [[ ${RESTART_APP,,} != "true" ]]; then + exec sleep infinity +fi + +# monitor loop for autostart +TARGET_USER="${CUSTOM_USER:-${USER_NAME:-root}}" +AUTOSTART_CMD="sh $HOME/.config/openbox/autostart" +while true; do + if pgrep -o -u "$TARGET_USER" -f "$AUTOSTART_CMD" > /dev/null; then + echo "SVC Watchdog: Initial process detected. Starting active monitoring." + break + fi + sleep 2 +done +last_known_pid="" +while true; do + current_pid=$(pgrep -o -u "$TARGET_USER" -f "$AUTOSTART_CMD") + if [ -z "$current_pid" ]; then + if [ -n "$last_known_pid" ]; then + echo "SVC Watchdog: Application process (PID: $last_known_pid) has terminated. Restarting..." + else + echo "SVC Watchdog: Application not running. Attempting to start..." + fi + s6-setuidgid "$TARGET_USER" $AUTOSTART_CMD & + last_known_pid="" + elif [ "$current_pid" != "$last_known_pid" ]; then + echo "SVC Watchdog: Application process found with PID: $current_pid. Monitoring..." + last_known_pid="$current_pid" + fi + sleep 1 +done diff --git a/files/ubuntu-root/etc/s6-overlay/s6-rc.d/svc-watchdog/type b/files/ubuntu-root/etc/s6-overlay/s6-rc.d/svc-watchdog/type new file mode 100644 index 000000000..5883cff0c --- /dev/null +++ b/files/ubuntu-root/etc/s6-overlay/s6-rc.d/svc-watchdog/type @@ -0,0 +1 @@ +longrun diff --git a/files/ubuntu-root/etc/s6-overlay/s6-rc.d/svc-xorg/dependencies.d/init-services b/files/ubuntu-root/etc/s6-overlay/s6-rc.d/svc-xorg/dependencies.d/init-services new file mode 100644 index 000000000..e69de29bb diff --git a/files/ubuntu-root/etc/s6-overlay/s6-rc.d/svc-xorg/run b/files/ubuntu-root/etc/s6-overlay/s6-rc.d/svc-xorg/run new file mode 100755 index 000000000..ac5fa9801 --- /dev/null +++ b/files/ubuntu-root/etc/s6-overlay/s6-rc.d/svc-xorg/run @@ -0,0 +1,60 @@ +#!/usr/bin/with-contenv bash + +# bail early on wayland +if [[ "${PIXELFLUX_WAYLAND}" == "true" ]]; then + sleep infinity +fi + +# Cleanup +rm -f /tmp/.X1-lock + +# Enable DRI3 support if detected +VFBCOMMAND="" +if ! which nvidia-smi && [ -e "/dev/dri/renderD128" ]; then + VFBCOMMAND="-vfbdevice /dev/dri/renderD128" +fi +if [ ! -z ${DRINODE+x} ]; then + VFBCOMMAND="-vfbdevice ${DRINODE}" +fi +if [ "${DISABLE_DRI3}" != "false" ]; then + VFBCOMMAND="" +fi + +# Clamp virtual screen max size based on env +DEFAULT_RES="7680x4320" +if [ ! -z ${MAX_RES+x} ]; then + DEFAULT_RES="${MAX_RES}" +fi +if [ -n "${SELKIES_MANUAL_HEIGHT}" ] || [ -n "${SELKIES_MANUAL_WIDTH}" ]; then + T_WIDTH="${SELKIES_MANUAL_WIDTH:-1024}" + T_HEIGHT="${SELKIES_MANUAL_HEIGHT:-768}" + if [ "${T_WIDTH}" = "0" ]; then T_WIDTH="1024"; fi + if [ "${T_HEIGHT}" = "0" ]; then T_HEIGHT="768"; fi + DEFAULT_RES="${T_WIDTH}x${T_HEIGHT}" +fi + +# Set DPI based on environment variable (default 96 for standard displays, use 144 or 192 for HiDPI) +DPI_VALUE="${DPI:-96}" + +# Run Xvfb server with required extensions +TARGET_USER="${CUSTOM_USER:-${USER_NAME:-root}}" +exec s6-setuidgid "$TARGET_USER" \ + /usr/bin/Xvfb \ + "${DISPLAY}" \ + -screen 0 "${DEFAULT_RES}x24" \ + -dpi "${DPI_VALUE}" \ + +extension "COMPOSITE" \ + +extension "DAMAGE" \ + +extension "GLX" \ + +extension "RANDR" \ + +extension "RENDER" \ + +extension "MIT-SHM" \ + +extension "XFIXES" \ + +extension "XTEST" \ + +iglx \ + +render \ + -nolisten "tcp" \ + -ac \ + -noreset \ + -shmem \ + ${VFBCOMMAND} diff --git a/files/ubuntu-root/etc/s6-overlay/s6-rc.d/svc-xorg/type b/files/ubuntu-root/etc/s6-overlay/s6-rc.d/svc-xorg/type new file mode 100644 index 000000000..5883cff0c --- /dev/null +++ b/files/ubuntu-root/etc/s6-overlay/s6-rc.d/svc-xorg/type @@ -0,0 +1 @@ +longrun diff --git a/files/ubuntu-root/etc/s6-overlay/s6-rc.d/svc-xsettingsd/dependencies.d/init-services b/files/ubuntu-root/etc/s6-overlay/s6-rc.d/svc-xsettingsd/dependencies.d/init-services new file mode 100644 index 000000000..e69de29bb diff --git a/files/ubuntu-root/etc/s6-overlay/s6-rc.d/svc-xsettingsd/dependencies.d/svc-nginx b/files/ubuntu-root/etc/s6-overlay/s6-rc.d/svc-xsettingsd/dependencies.d/svc-nginx new file mode 100644 index 000000000..e69de29bb diff --git a/files/ubuntu-root/etc/s6-overlay/s6-rc.d/svc-xsettingsd/dependencies.d/svc-xorg b/files/ubuntu-root/etc/s6-overlay/s6-rc.d/svc-xsettingsd/dependencies.d/svc-xorg new file mode 100644 index 000000000..e69de29bb diff --git a/files/ubuntu-root/etc/s6-overlay/s6-rc.d/svc-xsettingsd/run b/files/ubuntu-root/etc/s6-overlay/s6-rc.d/svc-xsettingsd/run new file mode 100755 index 000000000..adca79519 --- /dev/null +++ b/files/ubuntu-root/etc/s6-overlay/s6-rc.d/svc-xsettingsd/run @@ -0,0 +1,18 @@ +#!/usr/bin/with-contenv bash + +TARGET_USER="${CUSTOM_USER:-${USER_NAME:-root}}" + +# bail early on xfce based systems or wayland +if which xfce4-session > /dev/null 2>&1 || [[ "${PIXELFLUX_WAYLAND}" == "true" ]]; then + sleep infinity +fi + +# create default xsettings +if [ ! -f "${HOME}/.xsettingsd" ]; then + echo "Xft/DPI 98304" > "${HOME}/.xsettingsd" +fi +chown "$TARGET_USER:$TARGET_USER" "${HOME}/.xsettingsd" + +# run +exec s6-setuidgid "$TARGET_USER" \ + xsettingsd diff --git a/files/ubuntu-root/etc/s6-overlay/s6-rc.d/svc-xsettingsd/type b/files/ubuntu-root/etc/s6-overlay/s6-rc.d/svc-xsettingsd/type new file mode 100644 index 000000000..5883cff0c --- /dev/null +++ b/files/ubuntu-root/etc/s6-overlay/s6-rc.d/svc-xsettingsd/type @@ -0,0 +1 @@ +longrun diff --git a/files/ubuntu-root/etc/s6-overlay/s6-rc.d/user/contents.d/init-config b/files/ubuntu-root/etc/s6-overlay/s6-rc.d/user/contents.d/init-config new file mode 100644 index 000000000..e69de29bb diff --git a/files/ubuntu-root/etc/s6-overlay/s6-rc.d/user/contents.d/init-nginx b/files/ubuntu-root/etc/s6-overlay/s6-rc.d/user/contents.d/init-nginx new file mode 100644 index 000000000..e69de29bb diff --git a/files/ubuntu-root/etc/s6-overlay/s6-rc.d/user/contents.d/init-selkies b/files/ubuntu-root/etc/s6-overlay/s6-rc.d/user/contents.d/init-selkies new file mode 100644 index 000000000..e69de29bb diff --git a/files/ubuntu-root/etc/s6-overlay/s6-rc.d/user/contents.d/init-selkies-config b/files/ubuntu-root/etc/s6-overlay/s6-rc.d/user/contents.d/init-selkies-config new file mode 100644 index 000000000..e69de29bb diff --git a/files/ubuntu-root/etc/s6-overlay/s6-rc.d/user/contents.d/init-selkies-end b/files/ubuntu-root/etc/s6-overlay/s6-rc.d/user/contents.d/init-selkies-end new file mode 100644 index 000000000..e69de29bb diff --git a/files/ubuntu-root/etc/s6-overlay/s6-rc.d/user/contents.d/init-video b/files/ubuntu-root/etc/s6-overlay/s6-rc.d/user/contents.d/init-video new file mode 100644 index 000000000..e69de29bb diff --git a/files/ubuntu-root/etc/s6-overlay/s6-rc.d/user/contents.d/svc-auth b/files/ubuntu-root/etc/s6-overlay/s6-rc.d/user/contents.d/svc-auth new file mode 100644 index 000000000..4adf5c4e6 --- /dev/null +++ b/files/ubuntu-root/etc/s6-overlay/s6-rc.d/user/contents.d/svc-auth @@ -0,0 +1 @@ +../svc-auth diff --git a/files/ubuntu-root/etc/s6-overlay/s6-rc.d/user/contents.d/svc-dbus b/files/ubuntu-root/etc/s6-overlay/s6-rc.d/user/contents.d/svc-dbus new file mode 100644 index 000000000..e69de29bb diff --git a/files/ubuntu-root/etc/s6-overlay/s6-rc.d/user/contents.d/svc-de b/files/ubuntu-root/etc/s6-overlay/s6-rc.d/user/contents.d/svc-de new file mode 100644 index 000000000..e69de29bb diff --git a/files/ubuntu-root/etc/s6-overlay/s6-rc.d/user/contents.d/svc-docker b/files/ubuntu-root/etc/s6-overlay/s6-rc.d/user/contents.d/svc-docker new file mode 100644 index 000000000..e69de29bb diff --git a/files/ubuntu-root/etc/s6-overlay/s6-rc.d/user/contents.d/svc-nginx b/files/ubuntu-root/etc/s6-overlay/s6-rc.d/user/contents.d/svc-nginx new file mode 100644 index 000000000..e69de29bb diff --git a/files/ubuntu-root/etc/s6-overlay/s6-rc.d/user/contents.d/svc-pulseaudio b/files/ubuntu-root/etc/s6-overlay/s6-rc.d/user/contents.d/svc-pulseaudio new file mode 100644 index 000000000..e69de29bb diff --git a/files/ubuntu-root/etc/s6-overlay/s6-rc.d/user/contents.d/svc-selkies b/files/ubuntu-root/etc/s6-overlay/s6-rc.d/user/contents.d/svc-selkies new file mode 100644 index 000000000..e69de29bb diff --git a/files/ubuntu-root/etc/s6-overlay/s6-rc.d/user/contents.d/svc-watchdog b/files/ubuntu-root/etc/s6-overlay/s6-rc.d/user/contents.d/svc-watchdog new file mode 100644 index 000000000..e69de29bb diff --git a/files/ubuntu-root/etc/s6-overlay/s6-rc.d/user/contents.d/svc-xorg b/files/ubuntu-root/etc/s6-overlay/s6-rc.d/user/contents.d/svc-xorg new file mode 100644 index 000000000..e69de29bb diff --git a/files/ubuntu-root/etc/s6-overlay/s6-rc.d/user/contents.d/svc-xsettingsd b/files/ubuntu-root/etc/s6-overlay/s6-rc.d/user/contents.d/svc-xsettingsd new file mode 100644 index 000000000..e69de29bb diff --git a/files/ubuntu-root/etc/s6-overlay/s6-rc.d/user/type b/files/ubuntu-root/etc/s6-overlay/s6-rc.d/user/type new file mode 100644 index 000000000..757b42211 --- /dev/null +++ b/files/ubuntu-root/etc/s6-overlay/s6-rc.d/user/type @@ -0,0 +1 @@ +bundle diff --git a/files/ubuntu-root/usr/local/bin/auth-server.py b/files/ubuntu-root/usr/local/bin/auth-server.py new file mode 100755 index 000000000..b1c883804 --- /dev/null +++ b/files/ubuntu-root/usr/local/bin/auth-server.py @@ -0,0 +1,186 @@ +#!/usr/bin/env python3 +import base64 +import hashlib +import hmac +import http.server +import json +import os +import secrets +import time + +PORT = 6060 +SESSION_TTL = 24 * 3600 +COOKIE_NAME = "selkies_session" +AUTH_PATH = "/auth" + + +def load_auth(): + with open("/etc/web-auth.json", "r") as f: + data = json.load(f) + return { + "user": data["user"], + "salt": data["salt"], + "pw_hash": data["pw_hash"], + "secret": data["secret"], + } + + +AUTH = load_auth() + + +def hash_pw(pw: str) -> str: + return hashlib.sha256((pw + AUTH["salt"]).encode()).hexdigest() + + +def sign_session(user: str) -> str: + exp = int(time.time()) + SESSION_TTL + nonce = secrets.token_hex(8) + payload = f"{user}:{exp}:{nonce}" + sig = hmac.new(AUTH["secret"].encode(), payload.encode(), hashlib.sha256).hexdigest() + token = f"{payload}:{sig}" + return base64.urlsafe_b64encode(token.encode()).decode() + + +def verify_session(token: str) -> bool: + try: + raw = base64.urlsafe_b64decode(token.encode()).decode() + user, exp, nonce, sig = raw.split(":") + if user != AUTH["user"]: + return False + if int(exp) < int(time.time()): + return False + expected = hmac.new( + AUTH["secret"].encode(), f"{user}:{exp}:{nonce}".encode(), hashlib.sha256 + ).hexdigest() + return hmac.compare_digest(sig, expected) + except Exception: + return False + + +def render_login(message: str = "") -> bytes: + msg_html = f"<div class='msg error'>{message}</div>" if message else "" + return f"""<!doctype html> +<html><head><meta charset="utf-8"><title>Login + + +
+
Sign in
+

Authenticate to open the remote desktop.

+ {msg_html} +
+ + +
+ + + +
+
+""".encode() + + +class Handler(http.server.BaseHTTPRequestHandler): + def do_GET(self): + if self.path.startswith(f"{AUTH_PATH}/login"): + self._send_response(200, render_login()) + elif self.path.startswith(f"{AUTH_PATH}/logout"): + self._clear_cookie() + self._redirect(f"{AUTH_PATH}/login") + elif self.path.startswith(f"{AUTH_PATH}/verify"): + self._handle_verify() + else: + # Fallback: show login page instead of 404 + self._send_response(200, render_login()) + + def do_POST(self): + if self.path.startswith(f"{AUTH_PATH}/login"): + self._handle_login() + else: + self.send_error(404) + + def _handle_verify(self): + token = self._get_cookie() + if token and verify_session(token): + self.send_response(200) + self.end_headers() + return + self.send_response(401) + self.end_headers() + + def _handle_login(self): + length = int(self.headers.get("Content-Length", "0")) + body = self.rfile.read(length).decode() + fields = dict(part.split("=", 1) for part in body.split("&") if "=" in part) + username = fields.get("username", "") + password = fields.get("password", "") + if username == AUTH["user"] and hash_pw(password) == AUTH["pw_hash"]: + token = sign_session(username) + self.send_response(302) + self.send_header("Set-Cookie", f"{COOKIE_NAME}={token}; HttpOnly; Path=/; SameSite=Lax") + self.send_header("Location", "/") + self.end_headers() + else: + self._send_response(401, render_login("Invalid credentials")) + + def _redirect(self, location: str): + self.send_response(302) + self.send_header("Location", location) + self.end_headers() + + def _get_cookie(self): + cookie = self.headers.get("Cookie", "") + for part in cookie.split(";"): + if part.strip().startswith(f"{COOKIE_NAME}="): + return part.strip().split("=", 1)[1] + return "" + + def _clear_cookie(self): + self.send_response(302) + self.send_header( + "Set-Cookie", + f"{COOKIE_NAME}=deleted; Path=/; Expires=Thu, 01 Jan 1970 00:00:00 GMT", + ) + self.send_header("Location", f"{AUTH_PATH}/login") + self.end_headers() + + def _send_response(self, code: int, body: bytes): + self.send_response(code) + self.send_header("Content-Type", "text/html; charset=utf-8") + self.send_header("Content-Length", str(len(body))) + self.end_headers() + self.wfile.write(body) + + def log_message(self, fmt, *args): + return + + +if __name__ == "__main__": + server = http.server.ThreadingHTTPServer(("127.0.0.1", PORT), Handler) + server.serve_forever() diff --git a/files/ubuntu-root/usr/local/bin/dockerd-entrypoint.sh b/files/ubuntu-root/usr/local/bin/dockerd-entrypoint.sh new file mode 100755 index 000000000..0f843e001 --- /dev/null +++ b/files/ubuntu-root/usr/local/bin/dockerd-entrypoint.sh @@ -0,0 +1,196 @@ +#!/bin/sh +set -eu + +_tls_ensure_private() { + local f="$1"; shift + [ -s "$f" ] || openssl genrsa -out "$f" 4096 +} +_tls_san() { + { + ip -oneline address | awk '{ gsub(/\/.+$/, "", $4); print "IP:" $4 }' + { + cat /etc/hostname + echo 'docker' + echo 'localhost' + hostname -f + hostname -s + } | sed 's/^/DNS:/' + [ -z "${DOCKER_TLS_SAN:-}" ] || echo "$DOCKER_TLS_SAN" + } | sort -u | xargs printf '%s,' | sed "s/,\$//" +} +_tls_generate_certs() { + local dir="$1"; shift + + # if ca/key.pem || !ca/cert.pem, generate CA public if necessary + # if ca/key.pem, generate server public + # if ca/key.pem, generate client public + # (regenerating public certs every startup to account for SAN/IP changes and/or expiration) + + # https://github.com/FiloSottile/mkcert/issues/174 + local certValidDays='825' + + if [ -s "$dir/ca/key.pem" ] || [ ! -s "$dir/ca/cert.pem" ]; then + # if we either have a CA private key or do *not* have a CA public key, then we should create/manage the CA + mkdir -p "$dir/ca" + _tls_ensure_private "$dir/ca/key.pem" + openssl req -new -key "$dir/ca/key.pem" \ + -out "$dir/ca/cert.pem" \ + -subj '/CN=docker:dind CA' -x509 -days "$certValidDays" + fi + + if [ -s "$dir/ca/key.pem" ]; then + # if we have a CA private key, we should create/manage a server key + mkdir -p "$dir/server" + _tls_ensure_private "$dir/server/key.pem" + openssl req -new -key "$dir/server/key.pem" \ + -out "$dir/server/csr.pem" \ + -subj '/CN=docker:dind server' + cat > "$dir/server/openssl.cnf" <<-EOF + [ x509_exts ] + subjectAltName = $(_tls_san) + EOF + openssl x509 -req \ + -in "$dir/server/csr.pem" \ + -CA "$dir/ca/cert.pem" \ + -CAkey "$dir/ca/key.pem" \ + -CAcreateserial \ + -out "$dir/server/cert.pem" \ + -days "$certValidDays" \ + -extfile "$dir/server/openssl.cnf" \ + -extensions x509_exts + cp "$dir/ca/cert.pem" "$dir/server/ca.pem" + openssl verify -CAfile "$dir/server/ca.pem" "$dir/server/cert.pem" + fi + + if [ -s "$dir/ca/key.pem" ]; then + # if we have a CA private key, we should create/manage a client key + mkdir -p "$dir/client" + _tls_ensure_private "$dir/client/key.pem" + chmod 0644 "$dir/client/key.pem" # openssl defaults to 0600 for the private key, but this one needs to be shared with arbitrary client contexts + openssl req -new \ + -key "$dir/client/key.pem" \ + -out "$dir/client/csr.pem" \ + -subj '/CN=docker:dind client' + cat > "$dir/client/openssl.cnf" <<-'EOF' + [ x509_exts ] + extendedKeyUsage = clientAuth + EOF + openssl x509 -req \ + -in "$dir/client/csr.pem" \ + -CA "$dir/ca/cert.pem" \ + -CAkey "$dir/ca/key.pem" \ + -CAcreateserial \ + -out "$dir/client/cert.pem" \ + -days "$certValidDays" \ + -extfile "$dir/client/openssl.cnf" \ + -extensions x509_exts + cp "$dir/ca/cert.pem" "$dir/client/ca.pem" + openssl verify -CAfile "$dir/client/ca.pem" "$dir/client/cert.pem" + fi +} + +# no arguments passed +# or first arg is `-f` or `--some-option` +if [ "$#" -eq 0 ] || [ "${1#-}" != "$1" ]; then + # set "dockerSocket" to the default "--host" *unix socket* value (for both standard or rootless) + uid="$(id -u)" + if [ "$uid" = '0' ]; then + dockerSocket='unix:///var/run/docker.sock' + else + # if we're not root, we must be trying to run rootless + : "${XDG_RUNTIME_DIR:=/run/user/$uid}" + dockerSocket="unix://$XDG_RUNTIME_DIR/docker.sock" + fi + case "${DOCKER_HOST:-}" in + unix://*) + dockerSocket="$DOCKER_HOST" + ;; + esac + + # add our default arguments + if [ -n "${DOCKER_TLS_CERTDIR:-}" ] \ + && _tls_generate_certs "$DOCKER_TLS_CERTDIR" \ + && [ -s "$DOCKER_TLS_CERTDIR/server/ca.pem" ] \ + && [ -s "$DOCKER_TLS_CERTDIR/server/cert.pem" ] \ + && [ -s "$DOCKER_TLS_CERTDIR/server/key.pem" ] \ + ; then + # generate certs and use TLS if requested/possible (default in 19.03+) + set -- dockerd \ + --host="$dockerSocket" \ + --host=tcp://0.0.0.0:2376 \ + --tlsverify \ + --tlscacert "$DOCKER_TLS_CERTDIR/server/ca.pem" \ + --tlscert "$DOCKER_TLS_CERTDIR/server/cert.pem" \ + --tlskey "$DOCKER_TLS_CERTDIR/server/key.pem" \ + "$@" + DOCKERD_ROOTLESS_ROOTLESSKIT_FLAGS="${DOCKERD_ROOTLESS_ROOTLESSKIT_FLAGS:-} -p 0.0.0.0:2376:2376/tcp" + else + # TLS disabled (-e DOCKER_TLS_CERTDIR='') or missing certs + set -- dockerd \ + --host="$dockerSocket" \ + "$@" + DOCKERD_ROOTLESS_ROOTLESSKIT_FLAGS="${DOCKERD_ROOTLESS_ROOTLESSKIT_FLAGS:-} -p 0.0.0.0:2375:2375/tcp" + fi +fi + +if [ "$1" = 'dockerd' ]; then + # explicitly remove Docker's default PID file to ensure that it can start properly if it was stopped uncleanly (and thus didn't clean up the PID file) + find /run /var/run -iname 'docker*.pid' -delete || : + + if dockerd --version | grep -qF ' 20.10.'; then + set -- docker-init -- "$@" + fi + + if ! iptables -nL > /dev/null 2>&1; then + # if iptables fails to run, chances are high the necessary kernel modules aren't loaded (perhaps the host is using nftables with the translating "iptables" wrappers, for example) + # https://github.com/docker-library/docker/issues/350 + # https://github.com/moby/moby/issues/26824 + modprobe ip_tables || : + fi + + uid="$(id -u)" + if [ "$uid" != '0' ]; then + # if we're not root, we must be trying to run rootless + if ! command -v rootlesskit > /dev/null; then + echo >&2 "error: attempting to run rootless dockerd but missing 'rootlesskit' (perhaps the 'docker:dind-rootless' image variant is intended?)" + exit 1 + fi + user="$(id -un 2>/dev/null || :)" + if ! grep -qE "^($uid${user:+|$user}):" /etc/subuid || ! grep -qE "^($uid${user:+|$user}):" /etc/subgid; then + echo >&2 "error: attempting to run rootless dockerd but missing necessary entries in /etc/subuid and/or /etc/subgid for $uid" + exit 1 + fi + : "${XDG_RUNTIME_DIR:=/run/user/$uid}" + export XDG_RUNTIME_DIR + if ! mkdir -p "$XDG_RUNTIME_DIR" || [ ! -w "$XDG_RUNTIME_DIR" ] || ! mkdir -p "$HOME/.local/share/docker" || [ ! -w "$HOME/.local/share/docker" ]; then + echo >&2 "error: attempting to run rootless dockerd but need writable HOME ($HOME) and XDG_RUNTIME_DIR ($XDG_RUNTIME_DIR) for user $uid" + exit 1 + fi + if [ -f /proc/sys/kernel/unprivileged_userns_clone ] && unprivClone="$(cat /proc/sys/kernel/unprivileged_userns_clone)" && [ "$unprivClone" != '1' ]; then + echo >&2 "error: attempting to run rootless dockerd but need 'kernel.unprivileged_userns_clone' (/proc/sys/kernel/unprivileged_userns_clone) set to 1" + exit 1 + fi + if [ -f /proc/sys/user/max_user_namespaces ] && maxUserns="$(cat /proc/sys/user/max_user_namespaces)" && [ "$maxUserns" = '0' ]; then + echo >&2 "error: attempting to run rootless dockerd but need 'user.max_user_namespaces' (/proc/sys/user/max_user_namespaces) set to a sufficiently large value" + exit 1 + fi + # TODO overlay support detection? + exec rootlesskit \ + --net="${DOCKERD_ROOTLESS_ROOTLESSKIT_NET:-vpnkit}" \ + --mtu="${DOCKERD_ROOTLESS_ROOTLESSKIT_MTU:-1500}" \ + --disable-host-loopback \ + --port-driver=builtin \ + --copy-up=/etc \ + --copy-up=/run \ + ${DOCKERD_ROOTLESS_ROOTLESSKIT_FLAGS:-} \ + "$@" + elif [ -x '/usr/local/bin/dind' ]; then + # if we have the (mostly defunct now) Docker-in-Docker wrapper script, use it + set -- '/usr/local/bin/dind' "$@" + fi +else + # if it isn't `dockerd` we're trying to run, pass it through `docker-entrypoint.sh` so it gets `DOCKER_HOST` set appropriately too + set -- docker-entrypoint.sh "$@" +fi + +exec "$@" diff --git a/files/ubuntu-root/usr/local/bin/patch-selkies-safari-keyboard.py b/files/ubuntu-root/usr/local/bin/patch-selkies-safari-keyboard.py new file mode 100644 index 000000000..b117e8f93 --- /dev/null +++ b/files/ubuntu-root/usr/local/bin/patch-selkies-safari-keyboard.py @@ -0,0 +1,590 @@ +#!/usr/bin/env python3 +""" +Patch Selkies web UIs to ensure Safari captures keyboard input without IME. +""" + +from __future__ import annotations + +from pathlib import Path + + +MARKER = "selkies-safari-keyboard-fix" +SCRIPT = f""" + +""".strip() + +MARKER_V2 = "selkies-safari-keyboard-fix-v2" +SCRIPT_V2 = f""" + +""".strip() + +MARKER_V3 = "selkies-safari-keyboard-fix-v3" +SCRIPT_V3 = f""" + +""".strip() + +MARKER_V4 = "selkies-safari-clipboard-toolbar" +SCRIPT_V4 = f""" + +""".strip() + +JS_MARKER = f"// {MARKER} (selkies-core)" +JS_SNIPPET = f""" +{JS_MARKER} +(function () {{ + var ua = navigator.userAgent || ""; + var isSafari = /^((?!chrome|android).)*safari/i.test(ua); + if (!isSafari) return; + + function focusAssist() {{ + var kbd = document.getElementById("keyboard-input-assist"); + if (!kbd) return; + try {{ + kbd.focus({{ preventScroll: true }}); + }} catch (e) {{ + kbd.focus(); + }} + }} + + function bind() {{ + var overlay = document.getElementById("overlayInput"); + if (overlay) {{ + overlay.addEventListener("pointerdown", focusAssist, true); + overlay.addEventListener("mousedown", focusAssist, true); + overlay.addEventListener("touchstart", focusAssist, true); + }} + window.addEventListener("focus", focusAssist); + }} + + if (document.readyState === "loading") {{ + document.addEventListener("DOMContentLoaded", bind); + }} else {{ + bind(); + }} +}})(); +""".strip() + + +def patch_html(path: Path) -> bool: + try: + text = path.read_text(encoding="utf-8") + except FileNotFoundError: + return False + + scripts = [] + if MARKER not in text: + scripts.append(SCRIPT) + if MARKER_V2 not in text: + scripts.append(SCRIPT_V2) + if MARKER_V3 not in text: + scripts.append(SCRIPT_V3) + if MARKER_V4 not in text: + scripts.append(SCRIPT_V4) + if not scripts: + return False + + injection = "\n".join(scripts) + if "" in text: + updated = text.replace("", f"{injection}\n", 1) + elif "" in text: + updated = text.replace("", f"{injection}\n", 1) + else: + return False + + path.write_text(updated, encoding="utf-8") + return True + + +def patch_js(path: Path) -> bool: + try: + text = path.read_text(encoding="utf-8") + except FileNotFoundError: + return False + + if JS_MARKER in text: + return False + + updated = f"{text}\n{JS_SNIPPET}\n" + path.write_text(updated, encoding="utf-8") + return True + + +def main() -> int: + roots = [ + Path("/usr/share/selkies"), + Path("/usr/share/selkies/web"), + Path("/opt/gst-web"), + ] + targets: list[Path] = [] + for root in roots: + if not root.exists(): + continue + targets.extend(root.rglob("index.html")) + + patched = 0 + for path in targets: + if patch_html(path): + patched += 1 + print(f"Patched Safari keyboard fix into {path}") + + js_targets: list[Path] = [] + for root in roots: + if not root.exists(): + continue + js_targets.extend(root.rglob("selkies-core.js")) + + js_patched = 0 + for path in js_targets: + if patch_js(path): + js_patched += 1 + print(f"Patched Safari keyboard fix into {path}") + + if patched == 0 and js_patched == 0: + print("No Selkies HTML/JS files patched for Safari keyboard fix.") + return 0 + + +if __name__ == "__main__": + raise SystemExit(main()) diff --git a/generate-ssl-cert.sh b/generate-ssl-cert.sh new file mode 100755 index 000000000..972fcfe50 --- /dev/null +++ b/generate-ssl-cert.sh @@ -0,0 +1,181 @@ +#!/usr/bin/env bash +set -euo pipefail + +# Default values +SSL_DIR="${SSL_DIR:-$(pwd)/ssl}" +DAYS=${DAYS:-365} +CN=${CN:-localhost} +SUBJECT=${SUBJECT:-} +CREATE_CA=${CREATE_CA:-true} +FORCE=${FORCE:-false} + +usage() { + cat <&2; usage; exit 1 ;; + esac +done + +# Check if certificates already exist +if [[ -f "${SSL_DIR}/cert.pem" && "${FORCE}" != "true" ]]; then + echo "Certificate already exists at ${SSL_DIR}/cert.pem" + echo "Use -f to force overwrite." + exit 1 +fi + +# Create SSL directory +mkdir -p "${SSL_DIR}" + +# Build subject string +if [[ -z "${SUBJECT}" ]]; then + SUBJECT="/C=US/ST=State/L=City/O=Development/CN=${CN}" +fi + +echo "Generating SSL certificates..." +echo " Output: ${SSL_DIR}/" +echo " Common Name: ${CN}" +echo " Validity: ${DAYS} days" +echo "" + +if [[ "${CREATE_CA}" == "true" ]]; then + # Method 1: Create CA + signed certificate (recommended) + echo "Creating Certificate Authority..." + + # Generate CA private key + openssl genrsa -out "${SSL_DIR}/ca.key" 4096 2>/dev/null + + # Generate CA certificate + openssl req -new -x509 -days "${DAYS}" \ + -key "${SSL_DIR}/ca.key" \ + -out "${SSL_DIR}/ca.crt" \ + -subj "/C=US/ST=State/L=City/O=Development CA/CN=Local Development CA" \ + 2>/dev/null + + echo "Creating server certificate signed by CA..." + + # Generate server private key + openssl genrsa -out "${SSL_DIR}/cert.key" 2048 2>/dev/null + + # Generate certificate signing request + openssl req -new \ + -key "${SSL_DIR}/cert.key" \ + -out "${SSL_DIR}/cert.csr" \ + -subj "${SUBJECT}" \ + 2>/dev/null + + # Create extensions file for SAN (Subject Alternative Names) + cat > "${SSL_DIR}/cert.ext" </dev/null + + # Cleanup temporary files + rm -f "${SSL_DIR}/cert.csr" "${SSL_DIR}/cert.ext" "${SSL_DIR}/ca.srl" + + # Set permissions + chmod 600 "${SSL_DIR}/ca.key" "${SSL_DIR}/cert.key" + chmod 644 "${SSL_DIR}/ca.crt" "${SSL_DIR}/cert.pem" + + echo "" + echo "=== Certificate Authority Created ===" + echo " CA Certificate: ${SSL_DIR}/ca.crt" + echo " CA Private Key: ${SSL_DIR}/ca.key (keep secure!)" + echo "" + echo "=== Server Certificate Created ===" + echo " Certificate: ${SSL_DIR}/cert.pem" + echo " Private Key: ${SSL_DIR}/cert.key" + echo "" + echo "=== Next Steps ===" + echo "1. Trust the CA certificate on your system:" + echo "" + echo " macOS:" + echo " sudo security add-trusted-cert -d -r trustRoot -k /Library/Keychains/System.keychain ${SSL_DIR}/ca.crt" + echo "" + echo " Linux (Ubuntu/Debian):" + echo " sudo cp ${SSL_DIR}/ca.crt /usr/local/share/ca-certificates/local-dev-ca.crt" + echo " sudo update-ca-certificates" + echo "" + echo " Chrome/Chromium (manual):" + echo " Settings → Privacy and security → Security → Manage certificates" + echo " → Authorities → Import → Select ${SSL_DIR}/ca.crt" + echo "" + echo "2. Start the container (SSL auto-detected from ./ssl/):" + echo " ./start-container.sh --encoder nvidia --gpu all" + echo "" + +else + # Method 2: Simple self-signed certificate (no CA) + echo "Creating self-signed certificate (no CA)..." + + openssl req -x509 -nodes -days "${DAYS}" -newkey rsa:2048 \ + -keyout "${SSL_DIR}/cert.key" \ + -out "${SSL_DIR}/cert.pem" \ + -subj "${SUBJECT}" \ + -addext "subjectAltName=DNS:${CN},DNS:localhost,IP:127.0.0.1" \ + 2>/dev/null + + chmod 600 "${SSL_DIR}/cert.key" + chmod 644 "${SSL_DIR}/cert.pem" + + echo "" + echo "=== Self-Signed Certificate Created ===" + echo " Certificate: ${SSL_DIR}/cert.pem" + echo " Private Key: ${SSL_DIR}/cert.key" + echo "" + echo "Note: Browsers will show security warnings for self-signed certificates." + echo "Use without --no-ca to create a CA that can be trusted system-wide." + echo "" +fi + +echo "Done!" diff --git a/jenkins-vars.yml b/jenkins-vars.yml deleted file mode 100644 index 4cc7c5dde..000000000 --- a/jenkins-vars.yml +++ /dev/null @@ -1,28 +0,0 @@ ---- - -# jenkins variables -project_name: docker-webtop -external_type: os -release_type: stable -release_tag: latest -ls_branch: master -build_armhf: false -repo_vars: - - BUILD_VERSION_ARG = 'XFCE_VERSION' - - LS_USER = 'linuxserver' - - LS_REPO = 'docker-webtop' - - CONTAINER_NAME = 'webtop' - - DOCKERHUB_IMAGE = 'linuxserver/webtop' - - DEV_DOCKERHUB_IMAGE = 'lsiodev/webtop' - - PR_DOCKERHUB_IMAGE = 'lspipepr/webtop' - - DIST_IMAGE = 'alpine' - - MULTIARCH = 'true' - - CI = 'true' - - CI_WEB = 'true' - - CI_PORT = '3001' - - CI_SSL = 'true' - - CI_DELAY = '60' - - CI_WEB_SCREENSHOT_DELAY = '10' - - CI_DOCKERENV = 'TZ=US/Pacific' - - CI_AUTH = 'user:password' - - CI_WEBPATH = '' diff --git a/logs-container.sh b/logs-container.sh new file mode 100755 index 000000000..12fb4a149 --- /dev/null +++ b/logs-container.sh @@ -0,0 +1,52 @@ +#!/usr/bin/env bash +set -euo pipefail + +HOST_USER=${USER:-$(whoami)} +NAME=${CONTAINER_NAME:-linuxserver-kde-${HOST_USER}} +LINES=${LINES:-100} +FOLLOW=${FOLLOW:-false} + +usage() { + cat <&2 + exit 1 +fi + +DOCKER_ARGS=("--tail" "${LINES}") + +if [[ "${FOLLOW}" == "true" ]]; then + DOCKER_ARGS+=("-f") + echo "Following logs for ${NAME} (Ctrl+C to stop)..." +else + echo "Showing last ${LINES} lines of logs for ${NAME}..." +fi + +docker logs "${DOCKER_ARGS[@]}" "$NAME" diff --git a/package_versions.txt b/package_versions.txt deleted file mode 100755 index 14f30c2f1..000000000 --- a/package_versions.txt +++ /dev/null @@ -1,1116 +0,0 @@ -NAME VERSION TYPE -Simple Launcher 1.1.0.14 binary (+5 duplicates) -abseil-cpp-raw-logging-internal 20240722.1-r1 apk -abseil-cpp-strings 20240722.1-r1 apk -abseil-cpp-strings-internal 20240722.1-r1 apk -acl-libs 2.3.2-r1 apk -adw-gtk3 6.2-r0 apk -adwaita-fonts 48.2-r0 apk -adwaita-fonts-mono 48.2-r0 apk -adwaita-fonts-sans 48.2-r0 apk -adwaita-icon-theme 48.1-r0 apk -adwaita-xfce-icon-theme 0.0.4-r0 apk -agetty 2.41-r9 apk -aiohappyeyeballs 2.6.1 python -aiohttp 3.13.2 python -aioice 0.10.2 python -aiosignal 1.4.0 python -alpine-baselayout 3.7.0-r0 apk -alpine-baselayout-data 3.7.0-r0 apk -alpine-keys 2.5-r0 apk -alpine-release 3.22.2-r0 apk -alsa-lib 1.2.14-r0 apk -anstream 0.6.8 rust-crate (+1 duplicate) -anstyle 1.0.4 rust-crate (+1 duplicate) -anstyle-parse 0.2.3 rust-crate (+1 duplicate) -anstyle-query 1.0.2 rust-crate (+1 duplicate) -anyhow 1.0.79 rust-crate (+1 duplicate) -anyhow 1.0.98 rust-crate (+1 duplicate) -aom-libs 3.12.1-r0 apk -apk-tools 2.14.9-r3 apk -arg_enum_proc_macro 0.3.4 rust-crate (+1 duplicate) -arrayvec 0.7.4 rust-crate (+1 duplicate) -at-spi2-core 2.56.5-r0 apk -at-spi2-core-lang 2.56.5-r0 apk -at-spi2-core-libs 2.56.5-r0 apk -attrs 25.4.0 python -autocommand 2.2.2 python -av 14.4.0 python -av-metrics 0.9.1 rust-crate (+1 duplicate) -av1-grain 0.2.3 rust-crate (+1 duplicate) -avahi-libs 0.8-r21 apk -backports-tarfile 1.2.0 python -bash 5.2.37-r0 apk -bitflags 2.4.1 rust-crate (+1 duplicate) -bitstream-io 2.2.0 rust-crate (+1 duplicate) -bitstream-io 2.6.0 rust-crate (+1 duplicate) -bitvec 1.0.1 rust-crate (+1 duplicate) -bitvec_helpers 3.1.6 rust-crate (+1 duplicate) -blkid 2.41-r9 apk -breeze-cursors 6.3.6-r0 apk -brotli-libs 1.1.0-r2 apk -bubblewrap 0.11.0-r1 apk -busybox 1.37.0-r20 apk -busybox-binsh 1.37.0-r20 apk -c-ares 1.34.6-r0 apk -ca-certificates 20250911-r0 apk -ca-certificates-bundle 20250911-r0 apk -cairo 1.18.4-r0 apk -cairo-gobject 1.18.4-r0 apk -catatonit 0.2.1-r0 apk -cdparanoia-libs 10.2-r14 apk -cfdisk 2.41-r9 apk -cffi 2.0.0 python -cfg-if 1.0.0 rust-crate (+1 duplicate) -chromium 142.0.7444.59-r0 apk -chromium-lang 142.0.7444.59-r0 apk -cjson 1.7.19-r0 apk -clap 4.4.14 rust-crate (+1 duplicate) -clap_builder 4.4.14 rust-crate (+1 duplicate) -clap_complete 4.4.6 rust-crate (+1 duplicate) -clap_derive 4.4.7 rust-crate (+1 duplicate) -clap_lex 0.6.0 rust-crate (+1 duplicate) -cli UNKNOWN binary -cli-32 UNKNOWN binary -cli-64 UNKNOWN binary -cli-arm64 UNKNOWN binary -cloud.google.com/go/compute/metadata v0.6.0 go-module -cloud.google.com/go/logging v1.9.0 go-module -cloud.google.com/go/longrunning v0.5.5 go-module -cmake 3.31.7-r1 apk -code.cloudfoundry.org/clock v1.1.0 go-module -colorchoice 1.0.0 rust-crate (+1 duplicate) -console 0.15.8 rust-crate (+1 duplicate) -containerd 2.1.5-r1 apk -coreutils 9.7-r1 apk -coreutils-env 9.7-r1 apk -coreutils-fmt 9.7-r1 apk -coreutils-sha512sum 9.7-r1 apk -crc 3.3.0 rust-crate (+1 duplicate) -crc-catalog 2.4.0 rust-crate (+1 duplicate) -crc32c 1.1.2-r2 apk -crossbeam 0.8.4 rust-crate (+1 duplicate) -crossbeam-channel 0.5.14 rust-crate (+1 duplicate) -crossbeam-deque 0.8.5 rust-crate (+1 duplicate) -crossbeam-epoch 0.9.18 rust-crate (+1 duplicate) -crossbeam-queue 0.3.11 rust-crate (+1 duplicate) -crossbeam-utils 0.8.19 rust-crate (+1 duplicate) -cryptography 46.0.3 python -cups-libs 2.4.11-r0 apk -curl 8.14.1-r2 apk -dario.cat/mergo v1.0.1 go-module (+2 duplicates) -dbus 1.16.2-r1 apk -dbus-daemon-launch-helper 1.16.2-r1 apk -dbus-libs 1.16.2-r1 apk -dbus-x11 1.16.2-r1 apk -desktop-file-utils 0.28-r0 apk -directfb 1.7.7-r8 apk -dmesg 2.41-r9 apk -dnspython 2.8.0 python -docker 28.3.3-r4 apk -docker-cli 28.3.3-r4 apk -docker-cli-buildx 0.24.0-r4 apk -docker-cli-compose 2.36.2-r4 apk -docker-engine 28.3.3-r4 apk -dolby_vision 3.3.1 rust-crate (+1 duplicate) -double-conversion 3.3.1-r0 apk -duktape-libs 2.7.0-r2 apk -dunst 1.12.2-r0 apk -dunstify 1.12.2-r0 apk -either 1.9.0 rust-crate (+1 duplicate) -enchant2-libs 2.8.10-r0 apk -encodings 1.0.7-r1 apk -errno 0.3.8 rust-crate (+1 duplicate) -eudev-libs 3.2.14-r5 apk -evdev 1.9.2 python -exo 4.20.0-r0 apk -exo-lang 4.20.0-r0 apk -exo-libs 4.20.0-r0 apk -faac 1.31.1-r0 apk -fcft 3.3.1-r0 apk -fdk-aac 2.0.2-r4 apk -fern 0.6.2 rust-crate (+1 duplicate) -ffmpeg-libavcodec 6.1.2-r2 apk -ffmpeg-libavfilter 6.1.2-r2 apk -ffmpeg-libavformat 6.1.2-r2 apk -ffmpeg-libavutil 6.1.2-r2 apk -ffmpeg-libpostproc 6.1.2-r2 apk -ffmpeg-libswresample 6.1.2-r2 apk -ffmpeg-libswscale 6.1.2-r2 apk -ffmpegthumbnailer 2.2.3-r0 apk -fftw-single-libs 3.3.10-r6 apk -file 5.46-r2 apk -findmnt 2.41-r9 apk -findutils 4.10.0-r0 apk -flite 2.2-r5 apk -flock 2.41-r9 apk -font-adobe-100dpi 1.0.4-r2 apk -font-adobe-75dpi 1.0.4-r2 apk -font-alias 1.0.5-r0 apk -font-cursor-misc 1.0.4-r1 apk -font-dejavu 2.37-r6 apk -font-liberation 2.1.5-r2 apk -font-misc-misc 1.1.3-r1 apk -font-noto 2025.05.01-r0 apk -font-noto-cjk 0_git20220127-r1 apk -font-noto-common 2025.05.01-r0 apk -font-noto-emoji 2.047-r0 apk -font-noto-math 2025.05.01-r0 apk -font-noto-symbols 2025.05.01-r0 apk -font-opensans 0_git20210927-r1 apk -fontconfig 2.15.0-r3 apk -foot 1.22.3-r0 apk -freetype 2.13.3-r0 apk -fribidi 1.0.16-r1 apk -frozenlist 1.8.0 python -fstrim 2.41-r9 apk -funty 2.0.0 rust-crate (+1 duplicate) -fuse-common 3.16.2-r1 apk -fuse-overlayfs 1.15-r0 apk -fuse3 3.16.2-r1 apk -fuse3-libs 3.16.2-r1 apk -garcon 4.20.0-r0 apk -garcon-lang 4.20.0-r0 apk -gcr 3.41.2-r1 apk -gcr-base 3.41.2-r1 apk -gcr-lang 3.41.2-r1 apk -gdbm 1.24-r0 apk -gdk-pixbuf 2.42.12-r1 apk -gdk-pixbuf-lang 2.42.12-r1 apk -getrandom 0.2.12 rust-crate (+1 duplicate) -giflib 5.2.2-r1 apk -git 2.49.1-r0 apk -git-init-template 2.49.1-r0 apk -github.com/AlecAivazis/survey/v2 v2.3.7 go-module -github.com/Azure/azure-sdk-for-go/sdk/azcore v1.16.0 go-module -github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0 go-module -github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.5.0 go-module -github.com/DefangLabs/secret-detector v0.0.0-20250403165618-22662109213e go-module -github.com/Graylog2/go-gelf v0.0.0-20191017102106-1550ee647df0 go-module -github.com/Masterminds/semver/v3 v3.2.1 go-module (+1 duplicate) -github.com/Microsoft/hcsshim v0.13.0 go-module (+1 duplicate) -github.com/RackSec/srslog v0.0.0-20180709174129-a4725f04ec91 go-module -github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d go-module -github.com/agext/levenshtein v1.2.3 go-module (+1 duplicate) -github.com/anchore/go-struct-converter v0.0.0-20221118182256-c68fdcfa2092 go-module -github.com/apparentlymart/go-cidr v1.0.1 go-module -github.com/apparentlymart/go-textseg/v15 v15.0.0 go-module (+1 duplicate) -github.com/armon/circbuf v0.0.0-20190214190532-5111143e8da2 go-module -github.com/armon/go-metrics v0.4.1 go-module -github.com/aws/aws-sdk-go-v2 v1.30.3 go-module (+2 duplicates) -github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.3 go-module -github.com/aws/aws-sdk-go-v2/config v1.27.27 go-module (+2 duplicates) -github.com/aws/aws-sdk-go-v2/credentials v1.17.27 go-module (+2 duplicates) -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.11 go-module (+2 duplicates) -github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.15 go-module (+2 duplicates) -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.15 go-module (+2 duplicates) -github.com/aws/aws-sdk-go-v2/internal/ini v1.8.0 go-module (+2 duplicates) -github.com/aws/aws-sdk-go-v2/service/cloudwatchlogs v1.32.0 go-module -github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.3 go-module (+2 duplicates) -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.17 go-module (+2 duplicates) -github.com/aws/aws-sdk-go-v2/service/sso v1.22.4 go-module (+2 duplicates) -github.com/aws/aws-sdk-go-v2/service/ssooidc v1.26.4 go-module (+2 duplicates) -github.com/aws/aws-sdk-go-v2/service/sts v1.30.3 go-module (+2 duplicates) -github.com/aws/smithy-go v1.20.3 go-module (+2 duplicates) -github.com/beorn7/perks v1.0.1 go-module (+2 duplicates) -github.com/bits-and-blooms/bitset v1.13.0 go-module -github.com/buger/goterm v1.0.4 go-module -github.com/cenkalti/backoff/v4 v4.3.0 go-module (+3 duplicates) -github.com/cespare/xxhash/v2 v2.3.0 go-module (+2 duplicates) -github.com/checkpoint-restore/checkpointctl v1.3.0 go-module -github.com/checkpoint-restore/go-criu/v6 v6.3.0 go-module -github.com/checkpoint-restore/go-criu/v7 v7.2.0 go-module -github.com/cilium/ebpf v0.16.0 go-module (+1 duplicate) -github.com/cilium/ebpf v0.17.3 go-module (+1 duplicate) -github.com/cloudflare/cfssl v1.6.4 go-module -github.com/compose-spec/compose-go/v2 v2.6.3 go-module -github.com/compose-spec/compose-go/v2 v2.6.4 go-module -github.com/container-storage-interface/spec v1.5.0 go-module -github.com/containerd/accelerated-container-image v1.3.0 go-module -github.com/containerd/btrfs/v2 v2.0.0 go-module -github.com/containerd/cgroups/v3 v3.0.5 go-module (+2 duplicates) -github.com/containerd/console v1.0.4 go-module (+3 duplicates) -github.com/containerd/console v1.0.5 go-module (+1 duplicate) -github.com/containerd/containerd/api v1.8.0 go-module -github.com/containerd/containerd/api v1.9.0 go-module (+3 duplicates) -github.com/containerd/containerd/v2 UNKNOWN go-module (+1 duplicate) -github.com/containerd/containerd/v2 v2.0.5 go-module -github.com/containerd/containerd/v2 v2.1.1 go-module -github.com/containerd/containerd/v2 v2.1.3 go-module -github.com/containerd/continuity v0.4.5 go-module (+4 duplicates) -github.com/containerd/errdefs v1.0.0 go-module (+4 duplicates) -github.com/containerd/errdefs/pkg v0.3.0 go-module (+4 duplicates) -github.com/containerd/fifo v1.1.0 go-module (+2 duplicates) -github.com/containerd/go-cni v1.1.12 go-module (+1 duplicate) -github.com/containerd/go-runc v1.1.0 go-module (+2 duplicates) -github.com/containerd/imgcrypt/v2 v2.0.1 go-module -github.com/containerd/log v0.1.0 go-module (+5 duplicates) -github.com/containerd/nri v0.8.0 go-module -github.com/containerd/otelttrpc v0.1.0 go-module -github.com/containerd/platforms v1.0.0-rc.1 go-module (+3 duplicates) -github.com/containerd/plugin v1.0.0 go-module (+2 duplicates) -github.com/containerd/stargz-snapshotter/estargz v0.16.3 go-module -github.com/containerd/ttrpc v1.2.7 go-module (+4 duplicates) -github.com/containerd/typeurl/v2 v2.2.3 go-module (+4 duplicates) -github.com/containerd/zfs/v2 v2.0.0-rc.0 go-module -github.com/containernetworking/cni v1.3.0 go-module (+1 duplicate) -github.com/containernetworking/plugins v1.7.1 go-module (+1 duplicate) -github.com/containers/ocicrypt v1.2.1 go-module -github.com/coreos/go-systemd/v22 v22.5.0 go-module (+3 duplicates) -github.com/cyphar/filepath-securejoin v0.4.1 go-module -github.com/cyphar/filepath-securejoin v0.5.2 go-module -github.com/davecgh/go-spew v1.1.1 go-module (+3 duplicates) -github.com/deckarep/golang-set/v2 v2.3.0 go-module -github.com/dimchansky/utfbom v1.1.1 go-module -github.com/distribution/reference v0.6.0 go-module (+3 duplicates) -github.com/docker/buildx UNKNOWN go-module -github.com/docker/buildx v0.24.0 go-module -github.com/docker/cli v28.1.1+incompatible go-module (+1 duplicate) -github.com/docker/cli-docs-tool v0.9.0 go-module (+1 duplicate) -github.com/docker/cli/cmd/docker UNKNOWN go-module -github.com/docker/compose/v2 UNKNOWN go-module -github.com/docker/distribution v2.8.3+incompatible go-module (+1 duplicate) -github.com/docker/docker UNKNOWN go-module (+1 duplicate) -github.com/docker/docker v28.1.1+incompatible go-module (+1 duplicate) -github.com/docker/docker-credential-helpers v0.9.3 go-module (+1 duplicate) -github.com/docker/go v1.5.1-1.0.20160303222718-d30aec9fd63c go-module -github.com/docker/go-connections v0.5.0 go-module (+2 duplicates) -github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c go-module (+1 duplicate) -github.com/docker/go-metrics v0.0.1 go-module (+2 duplicates) -github.com/docker/go-units v0.5.0 go-module (+5 duplicates) -github.com/docker/libtrust v0.0.0-20150526203908-9cbd2a1374f4 go-module -github.com/dustin/go-humanize v1.0.0 go-module -github.com/eiannone/keyboard v0.0.0-20220611211555-0d226195f203 go-module -github.com/emicklei/go-restful/v3 v3.11.0 go-module (+2 duplicates) -github.com/felixge/httpsnoop v1.0.4 go-module (+3 duplicates) -github.com/fernet/fernet-go v0.0.0-20211208181803-9f70042a33ee go-module -github.com/fluent/fluent-logger-golang v1.9.0 go-module -github.com/fsnotify/fsnotify v1.9.0 go-module (+1 duplicate) -github.com/fvbommel/sortorder v1.0.1 go-module -github.com/fvbommel/sortorder v1.1.0 go-module -github.com/fxamacker/cbor/v2 v2.7.0 go-module (+2 duplicates) -github.com/go-jose/go-jose/v4 v4.0.5 go-module -github.com/go-logr/logr v1.4.2 go-module (+3 duplicates) -github.com/go-logr/stdr v1.2.2 go-module (+3 duplicates) -github.com/go-openapi/jsonpointer v0.19.6 go-module -github.com/go-openapi/jsonpointer v0.21.0 go-module -github.com/go-openapi/jsonreference v0.20.2 go-module (+1 duplicate) -github.com/go-openapi/swag v0.22.4 go-module -github.com/go-openapi/swag v0.23.0 go-module -github.com/go-viper/mapstructure/v2 v2.0.0 go-module (+1 duplicate) -github.com/godbus/dbus/v5 v5.1.0 go-module (+3 duplicates) -github.com/gofrs/flock v0.12.1 go-module (+2 duplicates) -github.com/gogo/protobuf v1.3.2 go-module (+4 duplicates) -github.com/golang-jwt/jwt/v5 v5.2.2 go-module (+1 duplicate) -github.com/golang/gddo v0.0.0-20190904175337-72a348e765d2 go-module -github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da go-module -github.com/golang/protobuf v1.5.4 go-module (+3 duplicates) -github.com/google/btree v1.1.2 go-module -github.com/google/certificate-transparency-go v1.1.4 go-module -github.com/google/gnostic-models v0.6.8 go-module (+1 duplicate) -github.com/google/go-cmp v0.7.0 go-module (+4 duplicates) -github.com/google/gofuzz v1.2.0 go-module (+2 duplicates) -github.com/google/s2a-go v0.1.7 go-module -github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 go-module (+2 duplicates) -github.com/google/uuid v1.6.0 go-module (+3 duplicates) -github.com/googleapis/enterprise-certificate-proxy v0.3.2 go-module -github.com/googleapis/gax-go/v2 v2.12.0 go-module -github.com/gorilla/mux v1.8.1 go-module (+1 duplicate) -github.com/gorilla/websocket v1.5.0 go-module (+2 duplicates) -github.com/grpc-ecosystem/go-grpc-middleware/providers/prometheus v1.0.1 go-module -github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.1.0 go-module -github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 go-module -github.com/grpc-ecosystem/grpc-gateway/v2 v2.22.0 go-module -github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.1 go-module (+2 duplicates) -github.com/hashicorp/errwrap v1.1.0 go-module (+2 duplicates) -github.com/hashicorp/go-cleanhttp v0.5.2 go-module (+1 duplicate) -github.com/hashicorp/go-cty-funcs v0.0.0-20250210171435-dda779884a9f go-module -github.com/hashicorp/go-immutable-radix v1.3.1 go-module -github.com/hashicorp/go-immutable-radix/v2 v2.1.0 go-module -github.com/hashicorp/go-memdb v1.3.2 go-module -github.com/hashicorp/go-msgpack v0.5.5 go-module -github.com/hashicorp/go-multierror v1.1.1 go-module (+2 duplicates) -github.com/hashicorp/go-sockaddr v1.0.2 go-module -github.com/hashicorp/go-version v1.7.0 go-module -github.com/hashicorp/golang-lru v0.5.4 go-module -github.com/hashicorp/golang-lru/v2 v2.0.7 go-module -github.com/hashicorp/hcl/v2 v2.23.0 go-module -github.com/hashicorp/memberlist v0.4.0 go-module -github.com/hashicorp/serf v0.8.5 go-module -github.com/imdario/mergo v0.3.16 go-module -github.com/in-toto/in-toto-golang v0.5.0 go-module (+1 duplicate) -github.com/in-toto/in-toto-golang v0.9.0 go-module -github.com/inhies/go-bytesize v0.0.0-20220417184213-4913239db9cf go-module -github.com/intel/goresctrl v0.8.0 go-module -github.com/ishidawataru/sctp v0.0.0-20230406120618-7ff4192f6ff2 go-module (+1 duplicate) -github.com/jmoiron/sqlx v1.3.3 go-module -github.com/jonboulle/clockwork v0.5.0 go-module -github.com/josharian/intern v1.0.0 go-module (+1 duplicate) -github.com/json-iterator/go v1.1.12 go-module (+2 duplicates) -github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 go-module -github.com/klauspost/compress v1.18.0 go-module (+3 duplicates) -github.com/mailru/easyjson v0.7.7 go-module (+1 duplicate) -github.com/mattn/go-colorable v0.1.13 go-module -github.com/mattn/go-isatty v0.0.20 go-module -github.com/mattn/go-runewidth v0.0.16 go-module (+1 duplicate) -github.com/mattn/go-shellwords v1.0.12 go-module (+1 duplicate) -github.com/mdlayher/socket v0.5.1 go-module (+1 duplicate) -github.com/mdlayher/vsock v1.2.1 go-module (+1 duplicate) -github.com/mgutz/ansi v0.0.0-20170206155736-9520e82c474b go-module -github.com/miekg/dns v1.1.66 go-module -github.com/miekg/pkcs11 v1.1.1 go-module -github.com/mistifyio/go-zfs/v3 v3.0.1 go-module (+1 duplicate) -github.com/mitchellh/copystructure v1.2.0 go-module -github.com/mitchellh/go-wordwrap v0.0.0-20150314170334-ad45545899c7 go-module -github.com/mitchellh/hashstructure/v2 v2.0.2 go-module (+2 duplicates) -github.com/mitchellh/mapstructure v1.5.0 go-module -github.com/mitchellh/reflectwalk v1.0.2 go-module -github.com/moby/buildkit v0.22.0 go-module (+1 duplicate) -github.com/moby/buildkit v0.23.2 go-module -github.com/moby/docker-image-spec v1.3.1 go-module (+2 duplicates) -github.com/moby/go-archive v0.1.0 go-module (+2 duplicates) -github.com/moby/ipvs v1.1.0 go-module -github.com/moby/locker v1.0.1 go-module (+3 duplicates) -github.com/moby/patternmatcher v0.6.0 go-module (+2 duplicates) -github.com/moby/pubsub v1.0.0 go-module -github.com/moby/spdystream v0.5.0 go-module (+2 duplicates) -github.com/moby/swarmkit/v2 v2.0.0 go-module -github.com/moby/sys/atomicwriter v0.1.0 go-module (+2 duplicates) -github.com/moby/sys/capability v0.4.0 go-module (+1 duplicate) -github.com/moby/sys/mount v0.3.4 go-module -github.com/moby/sys/mountinfo v0.7.2 go-module (+5 duplicates) -github.com/moby/sys/reexec v0.1.0 go-module -github.com/moby/sys/sequential v0.6.0 go-module (+2 duplicates) -github.com/moby/sys/signal v0.7.1 go-module (+3 duplicates) -github.com/moby/sys/symlink v0.3.0 go-module (+2 duplicates) -github.com/moby/sys/user v0.3.0 go-module -github.com/moby/sys/user v0.4.0 go-module (+3 duplicates) -github.com/moby/sys/userns v0.1.0 go-module (+5 duplicates) -github.com/moby/term v0.5.2 go-module (+2 duplicates) -github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd go-module (+2 duplicates) -github.com/modern-go/reflect2 v1.0.2 go-module (+2 duplicates) -github.com/morikuni/aec v1.0.0 go-module (+2 duplicates) -github.com/mrunalp/fileutils v0.5.1 go-module -github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 go-module (+3 duplicates) -github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f go-module (+2 duplicates) -github.com/opencontainers/cgroups v0.0.3 go-module -github.com/opencontainers/cgroups v0.0.4 go-module -github.com/opencontainers/go-digest v1.0.0 go-module (+4 duplicates) -github.com/opencontainers/image-spec v1.1.1 go-module (+4 duplicates) -github.com/opencontainers/runc UNKNOWN go-module -github.com/opencontainers/runtime-spec v1.2.1 go-module (+3 duplicates) -github.com/opencontainers/runtime-tools v0.9.1-0.20221107090550-2e043c6bd626 go-module (+1 duplicate) -github.com/opencontainers/selinux v1.12.0 go-module (+1 duplicate) -github.com/opencontainers/selinux v1.13.1 go-module -github.com/package-url/packageurl-go v0.1.1 go-module -github.com/pelletier/go-toml v1.9.5 go-module (+2 duplicates) -github.com/pelletier/go-toml/v2 v2.2.4 go-module (+1 duplicate) -github.com/philhofer/fwd v1.1.2 go-module -github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c go-module -github.com/pkg/errors v0.9.1 go-module (+2 duplicates) -github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 go-module (+2 duplicates) -github.com/pmezard/go-difflib v1.0.0 go-module (+1 duplicate) -github.com/prometheus/client_golang v1.22.0 go-module (+2 duplicates) -github.com/prometheus/client_model v0.6.1 go-module (+2 duplicates) -github.com/prometheus/common v0.62.0 go-module (+2 duplicates) -github.com/prometheus/procfs v0.15.1 go-module (+2 duplicates) -github.com/rivo/uniseg v0.2.0 go-module (+1 duplicate) -github.com/rootless-containers/rootlesskit/v2 v2.3.4 go-module -github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 go-module -github.com/seccomp/libseccomp-golang v0.10.0 go-module -github.com/secure-systems-lab/go-securesystemslib v0.4.0 go-module (+1 duplicate) -github.com/secure-systems-lab/go-securesystemslib v0.6.0 go-module -github.com/serialx/hashring v0.0.0-20200727003509-22c0c7ab6b1b go-module (+1 duplicate) -github.com/shibumi/go-pathspec v1.3.0 go-module (+2 duplicates) -github.com/sirupsen/logrus v1.9.3 go-module (+6 duplicates) -github.com/skratchdot/open-golang v0.0.0-20200116055534-eef842397966 go-module -github.com/smallstep/pkcs7 v0.1.1 go-module -github.com/spdx/tools-golang v0.5.5 go-module -github.com/spf13/cobra v1.9.1 go-module (+2 duplicates) -github.com/spf13/pflag v1.0.6 go-module (+2 duplicates) -github.com/stefanberger/go-pkcs11uri v0.0.0-20230803200340-78284954bff6 go-module -github.com/stretchr/testify v1.10.0 go-module (+1 duplicate) -github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635 go-module (+1 duplicate) -github.com/tchap/go-patricia/v2 v2.3.2 go-module -github.com/theupdateframework/notary v0.7.0 go-module -github.com/tilt-dev/fsnotify v1.4.8-0.20220602155310-fff9c274a375 go-module -github.com/tinylib/msgp v1.1.8 go-module -github.com/tonistiigi/dchapes-mode v0.0.0-20250318174251-73d941a28323 go-module (+2 duplicates) -github.com/tonistiigi/fsutil v0.0.0-20250417144416-3f76f8130144 go-module (+1 duplicate) -github.com/tonistiigi/fsutil v0.0.0-20250605211040-586307ad452f go-module -github.com/tonistiigi/go-actions-cache v0.0.0-20250626083717-378c5ed1ddd9 go-module -github.com/tonistiigi/go-archvariant v1.0.0 go-module -github.com/tonistiigi/go-csvvalue v0.0.0-20240710180619-ddb21b71c0b4 go-module (+1 duplicate) -github.com/tonistiigi/go-csvvalue v0.0.0-20240814133006-030d3b2625d0 go-module -github.com/tonistiigi/jaeger-ui-rest v0.0.0-20250408171107-3dd17559e117 go-module -github.com/tonistiigi/units v0.0.0-20180711220420-6950e57a87ea go-module (+2 duplicates) -github.com/tonistiigi/vt100 v0.0.0-20240514184818-90bafcd6abab go-module (+1 duplicate) -github.com/urfave/cli v1.22.16 go-module -github.com/urfave/cli/v2 v2.27.6 go-module -github.com/vbatts/tar-split v0.12.1 go-module -github.com/vishvananda/netlink v1.3.0 go-module -github.com/vishvananda/netlink v1.3.1 go-module -github.com/vishvananda/netlink v1.3.1-0.20250303224720-0e7078ed04c8 go-module -github.com/vishvananda/netns v0.0.4 go-module -github.com/vishvananda/netns v0.0.5 go-module (+1 duplicate) -github.com/weppos/publicsuffix-go v0.15.1-0.20210511084619-b1f36a2d6c0b go-module -github.com/x448/float16 v0.8.4 go-module (+2 duplicates) -github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb go-module (+1 duplicate) -github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 go-module (+1 duplicate) -github.com/xeipuuv/gojsonschema v1.2.0 go-module (+1 duplicate) -github.com/xhit/go-str2duration/v2 v2.1.0 go-module (+1 duplicate) -github.com/xrash/smetrics v0.0.0-20240521201337-686a1a2994c1 go-module -github.com/zclconf/go-cty v1.16.2 go-module (+1 duplicate) -github.com/zmap/zcrypto v0.0.0-20210511125630-18f1e0152cfc go-module -github.com/zmap/zlint/v3 v3.1.0 go-module -glib 2.84.4-r0 apk -glib-lang 2.84.4-r0 apk -glib-networking 2.80.1-r1 apk -glib-networking-lang 2.80.1-r1 apk -glslang-libs 1.4.309.0-r0 apk -gmp 6.3.0-r3 apk -gnome-keyring 48.0-r0 apk -gnome-keyring-lang 48.0-r0 apk -gnome-keyring-pam 48.0-r0 apk -gnutls 3.8.8-r0 apk -go.etcd.io/bbolt v1.4.0 go-module (+1 duplicate) -go.etcd.io/etcd/client/pkg/v3 v3.5.16 go-module -go.etcd.io/etcd/pkg/v3 v3.5.16 go-module -go.etcd.io/etcd/raft/v3 v3.5.16 go-module -go.etcd.io/etcd/server/v3 v3.5.16 go-module -go.opencensus.io v0.24.0 go-module -go.opentelemetry.io/auto/sdk v1.1.0 go-module (+2 duplicates) -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.56.0 go-module -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.60.0 go-module (+2 duplicates) -go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace v0.56.0 go-module (+1 duplicate) -go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace v0.60.0 go-module -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.56.0 go-module -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.60.0 go-module (+2 duplicates) -go.opentelemetry.io/contrib/processors/baggagecopy v0.4.0 go-module -go.opentelemetry.io/otel v1.31.0 go-module -go.opentelemetry.io/otel v1.35.0 go-module (+2 duplicates) -go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.31.0 go-module (+1 duplicate) -go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.35.0 go-module -go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.31.0 go-module (+1 duplicate) -go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.35.0 go-module -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.31.0 go-module -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.35.0 go-module (+2 duplicates) -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.31.0 go-module -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.35.0 go-module (+2 duplicates) -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.31.0 go-module -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.35.0 go-module (+2 duplicates) -go.opentelemetry.io/otel/metric v1.31.0 go-module -go.opentelemetry.io/otel/metric v1.35.0 go-module (+2 duplicates) -go.opentelemetry.io/otel/sdk v1.31.0 go-module -go.opentelemetry.io/otel/sdk v1.35.0 go-module (+2 duplicates) -go.opentelemetry.io/otel/sdk/metric v1.31.0 go-module -go.opentelemetry.io/otel/sdk/metric v1.35.0 go-module (+1 duplicate) -go.opentelemetry.io/otel/trace v1.31.0 go-module -go.opentelemetry.io/otel/trace v1.35.0 go-module (+2 duplicates) -go.opentelemetry.io/proto/otlp v1.3.1 go-module -go.opentelemetry.io/proto/otlp v1.5.0 go-module (+2 duplicates) -go.uber.org/atomic v1.9.0 go-module -go.uber.org/mock v0.5.2 go-module -go.uber.org/multierr v1.8.0 go-module -go.uber.org/zap v1.21.0 go-module -gobject-introspection 1.84.0-r0 apk -golang.org/x/crypto v0.36.0 go-module -golang.org/x/crypto v0.37.0 go-module (+2 duplicates) -golang.org/x/exp v0.0.0-20241108190413-2d47ceb2692f go-module (+1 duplicate) -golang.org/x/exp v0.0.0-20250408133849-7e4ce0ab07d0 go-module -golang.org/x/mod v0.24.0 go-module (+1 duplicate) -golang.org/x/net v0.35.0 go-module -golang.org/x/net v0.38.0 go-module (+1 duplicate) -golang.org/x/net v0.39.0 go-module (+3 duplicates) -golang.org/x/oauth2 v0.27.0 go-module -golang.org/x/oauth2 v0.29.0 go-module (+2 duplicates) -golang.org/x/sync v0.13.0 go-module -golang.org/x/sync v0.14.0 go-module (+3 duplicates) -golang.org/x/sys v0.30.0 go-module -golang.org/x/sys v0.32.0 go-module -golang.org/x/sys v0.33.0 go-module (+4 duplicates) -golang.org/x/term v0.30.0 go-module -golang.org/x/term v0.31.0 go-module (+1 duplicate) -golang.org/x/text v0.23.0 go-module -golang.org/x/text v0.24.0 go-module (+2 duplicates) -golang.org/x/time v0.11.0 go-module (+2 duplicates) -golang.org/x/time v0.7.0 go-module -google-crc32c 1.8.0 python -google.golang.org/api v0.160.0 go-module -google.golang.org/genproto v0.0.0-20240227224415-6ceb2ff114de go-module -google.golang.org/genproto/googleapis/api v0.0.0-20241021214115-324edc3d5d38 go-module -google.golang.org/genproto/googleapis/api v0.0.0-20250218202821-56aae31c358a go-module (+2 duplicates) -google.golang.org/genproto/googleapis/rpc v0.0.0-20241021214115-324edc3d5d38 go-module -google.golang.org/genproto/googleapis/rpc v0.0.0-20250218202821-56aae31c358a go-module (+3 duplicates) -google.golang.org/grpc v1.69.4 go-module -google.golang.org/grpc v1.72.1 go-module -google.golang.org/grpc v1.72.2 go-module (+2 duplicates) -google.golang.org/protobuf v1.35.2 go-module -google.golang.org/protobuf v1.36.5 go-module -google.golang.org/protobuf v1.36.6 go-module (+3 duplicates) -gopkg.in/evanphx/json-patch.v4 v4.12.0 go-module -gopkg.in/inf.v0 v0.9.1 go-module (+2 duplicates) -gopkg.in/ini.v1 v1.67.0 go-module -gopkg.in/yaml.v2 v2.4.0 go-module -gopkg.in/yaml.v3 v3.0.1 go-module (+3 duplicates) -gputil 1.4.0 python -graphene 1.10.8-r5 apk -graphite2 1.3.14-r6 apk -gsettings-desktop-schemas 48.0-r0 apk -gsettings-desktop-schemas-lang 48.0-r0 apk -gsm 1.0.22-r3 apk -gspell 1.14.0-r1 apk -gspell-lang 1.14.0-r1 apk -gst-plugins-bad 1.26.3-r0 apk -gst-plugins-bad-lang 1.26.3-r0 apk -gst-plugins-base 1.26.3-r0 apk -gst-plugins-base-lang 1.26.3-r0 apk -gstreamer 1.26.3-r0 apk -gstreamer-lang 1.26.3-r0 apk -gstreamer-ptp-helper 1.26.3-r0 apk -gtk+3.0 3.24.50-r0 apk -gtk+3.0-lang 3.24.50-r0 apk -gtk-layer-shell 0.9.2-r0 apk -gtk-update-icon-cache 3.24.50-r0 apk -gtksourceview 3.24.11-r3 apk -gtksourceview-lang 3.24.11-r3 apk -gui UNKNOWN binary -gui-32 UNKNOWN binary -gui-64 UNKNOWN binary -gui-arm64 UNKNOWN binary -harfbuzz 11.2.1-r0 apk -harfbuzz-icu 11.2.1-r0 apk -harfbuzz-subset 11.2.1-r0 apk -heck 0.4.1 rust-crate (+1 duplicate) -hexdump 2.41-r9 apk -hicolor-icon-theme 0.18-r0 apk -hidapi 0.14.0-r0 apk -hwdata-pci 0.395-r0 apk -hyphen 2.8.8-r3 apk -iceauth 1.0.10-r0 apk -icu-data-full 76.1-r1 apk -icu-libs 76.1-r1 apk -idna 3.11 python -ifaddr 0.2.0 python -imath 3.1.12-r0 apk -imlib2 1.12.4-r0 apk -importlib-metadata 8.0.0 python -inflect 7.3.1 python -intel-gmmlib 22.7.3-r0 apk -intel-media-driver 25.2.6-r0 apk -interpolate_name 0.2.4 rust-crate (+1 duplicate) -iptables 1.8.11-r1 apk -iso-codes 4.17.0-r0 apk -iso-codes-lang 4.17.0-r0 apk -itertools 0.10.5 rust-crate (+1 duplicate) -itertools 0.12.0 rust-crate (+1 duplicate) -ivf 0.1.3 rust-crate (+1 duplicate) -jaraco-collections 5.1.0 python -jaraco-context 5.3.0 python -jaraco-functools 4.0.1 python -jaraco-text 3.12.1 python -jq 1.7.1 binary -jq 1.8.1-r0 apk -json-glib 1.10.8-r0 apk -json-glib-lang 1.10.8-r0 apk -k8s.io/api v0.31.2 go-module -k8s.io/api v0.32.3 go-module (+1 duplicate) -k8s.io/apimachinery v0.31.2 go-module -k8s.io/apimachinery v0.32.3 go-module (+1 duplicate) -k8s.io/apiserver v0.32.3 go-module -k8s.io/client-go v0.31.2 go-module -k8s.io/client-go v0.32.3 go-module (+1 duplicate) -k8s.io/cri-api v0.32.3 go-module -k8s.io/klog/v2 v2.130.1 go-module (+3 duplicates) -k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 go-module -k8s.io/kube-openapi v0.0.0-20241105132330-32ad38e42d3f go-module -k8s.io/kubelet v0.32.3 go-module -k8s.io/utils v0.0.0-20240711033017-18e509b52bc8 go-module -k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738 go-module (+1 duplicate) -kbd 2.7.1-r1 apk -kbd-misc 2.7.1-r1 apk -lab 0.11.0 rust-crate (+1 duplicate) -labwc 0.8.4-r0 apk -labwc-lang 0.8.4-r0 apk -lame-libs 3.100-r5 apk -lang 0.1-r2 apk -lazy_static 1.4.0 rust-crate (+1 duplicate) -lcms2 2.16-r0 apk -libSvtAv1Enc 2.3.0-r0 apk -libapk2 2.14.9-r3 apk -libarchive 3.8.3-r0 apk -libass 0.17.3-r0 apk -libasyncns 0.8-r4 apk -libatk-1.0 2.56.5-r0 apk -libatk-bridge-2.0 2.56.5-r0 apk -libatomic 14.2.0-r6 apk -libattr 2.5.2-r2 apk -libavif 1.3.0-r0 apk -libblkid 2.41-r9 apk -libbluray 1.3.4-r1 apk -libbsd 0.12.2-r0 apk -libbz2 1.0.8-r6 apk -libc 0.2.155 rust-crate (+1 duplicate) -libc 0.2.172 rust-crate (+1 duplicate) -libcamera 0.5.1-r0 apk -libcamera-ipa 0.5.1-r0 apk -libcap-ng 0.8.5-r0 apk -libcap2 2.76-r0 apk -libcrypto3 3.5.4-r0 apk -libcurl 8.14.1-r2 apk -libdav1d 1.5.1-r0 apk -libdbusmenu-glib 16.04.0-r6 apk -libdbusmenu-gtk3 16.04.0-r6 apk -libdc1394 2.2.7-r0 apk -libde265 1.0.15-r1 apk -libdeflate 1.23-r0 apk -libdisplay-info 0.2.0-r0 apk -libdovi 3.3.1-r1 apk -libdrm 2.4.124-r0 apk -libeconf 0.6.3-r0 apk -libedit 20250104.3.1-r1 apk -libelf 0.193-r0 apk -libelogind 252.24-r1 apk -libepoxy 1.5.10-r1 apk -libev 4.33-r1 apk -libevdev 1.13.3-r0 apk -libexif 0.6.25-r0 apk -libexpat 2.7.3-r0 apk -libfdisk 2.41-r9 apk -libffi 3.4.8-r0 apk -libflac 1.4.3-r1 apk -libfontenc 1.1.8-r0 apk -libfreeaptx 0.2.2-r0 apk -libgcc 14.2.0-r6 apk -libgcrypt 1.10.3-r1 apk -libgepub 0.7.1-r1 apk -libgomp 14.2.0-r6 apk -libgpg-error 1.55-r0 apk -libgsf 1.14.53-r1 apk -libgsf-lang 1.14.53-r1 apk -libgtop 2.41.3-r1 apk -libgtop-lang 2.41.3-r1 apk -libgudev 238-r0 apk -libhwy 1.0.7-r1 apk -libice 1.1.2-r0 apk -libid3tag 0.16.3-r0 apk -libidn2 2.3.7-r0 apk -libinput-libs 1.28.1-r0 apk -libintl 0.24.1-r0 apk -libjpeg-turbo 3.1.0-r0 apk -libjxl 0.10.3-r2 apk -libldac 2.0.2.3-r1 apk -libltdl 2.5.4-r1 apk -libmagic 5.46-r2 apk -libmanette 0.2.12-r0 apk -libmd 1.1.0-r0 apk -libmnl 1.0.5-r2 apk -libmodplug 0.8.9.0-r3 apk -libmount 2.41-r9 apk -libncursesw 6.5_p20250503-r0 apk -libnftnl 1.2.9-r0 apk -libnice 0.1.22-r0 apk -libnotify 0.8.6-r0 apk -libogg 1.3.5-r5 apk -libopenmpt 0.7.15-r0 apk -libopenraw 0.3.7-r1 apk -libpanelw 6.5_p20250503-r0 apk -libpciaccess 0.18.1-r0 apk -libplacebo 6.338.2-r3 apk -libpng 1.6.53-r0 apk -libproc2 4.0.4-r3 apk -libproxy 0.5.10-r0 apk -libpsl 0.21.5-r3 apk -libpulse 17.0-r5 apk -libraw1394 2.1.2-r5 apk -librist 0.2.10-r1 apk -librsvg 2.60.0-r0 apk -librtmp 2.4_git20190330-r5 apk -libseat 0.9.1-r0 apk -libseccomp 2.6.0-r0 apk -libsecret 0.21.7-r0 apk -libsecret-lang 0.21.7-r0 apk -libsfdo 0.1.4-r0 apk -libsharpyuv 1.5.0-r0 apk -libsm 1.2.5-r0 apk -libsmartcols 2.41-r9 apk -libsndfile 1.2.2-r2 apk -libsodium 1.0.20-r0 apk -libsoup3 3.6.5-r0 apk -libsoup3-lang 3.6.5-r0 apk -libsrt 1.5.3-r1 apk -libsrtp 2.6.0-r0 apk -libssh 0.11.2-r0 apk -libssl3 3.5.4-r0 apk -libstdc++ 14.2.0-r6 apk -libtasn1 4.20.0-r0 apk -libtheora 1.1.1-r18 apk -libtirpc-conf 1.3.5-r0 apk -libtirpc-nokrb 1.3.5-r0 apk -libunibreak 6.1-r0 apk -libunistring 1.3-r0 apk -libunwind 1.8.1-r0 apk -libusb 1.0.28-r0 apk -libuuid 2.41-r9 apk -libuv 1.51.0-r0 apk -libva 2.22.0-r1 apk -libva-intel-driver 2.4.1-r2 apk -libvdpau 1.5-r4 apk -libvorbis 1.3.7-r2 apk -libvpx 1.15.0-r0 apk -libwebp 1.5.0-r0 apk -libwebpdemux 1.5.0-r0 apk -libwebpmux 1.5.0-r0 apk -libwnck3 43.2-r0 apk -libwnck3-lang 43.2-r0 apk -libwoff2common 1.0.2-r4 apk -libwoff2enc 1.0.2-r4 apk -libx11 1.8.11-r0 apk -libxau 1.0.12-r0 apk -libxaw 1.0.16-r1 apk -libxcb 1.17.0-r0 apk -libxcomposite 0.4.6-r5 apk -libxcursor 1.2.3-r0 apk -libxcvt 0.1.3-r0 apk -libxdamage 1.1.6-r5 apk -libxdmcp 1.1.5-r1 apk -libxext 1.3.6-r2 apk -libxfce4panel 4.20.4-r0 apk -libxfce4ui 4.20.1-r0 apk -libxfce4ui-lang 4.20.1-r0 apk -libxfce4util 4.20.0-r0 apk -libxfce4util-lang 4.20.0-r0 apk -libxfce4windowing 4.20.3-r0 apk -libxfce4windowing-lang 4.20.3-r0 apk -libxfixes 6.0.1-r4 apk -libxfont2 2.0.7-r0 apk -libxft 2.3.8-r3 apk -libxi 1.8.2-r0 apk -libxinerama 1.1.5-r4 apk -libxkbcommon 1.8.1-r2 apk -libxkbcommon-x11 1.8.1-r2 apk -libxkbfile 1.1.3-r0 apk -libxklavier 5.4-r8 apk -libxml2 2.13.9-r0 apk -libxmu 1.2.1-r0 apk -libxpm 3.5.17-r0 apk -libxpresent 1.0.1-r3 apk -libxrandr 1.5.4-r1 apk -libxrender 0.9.12-r0 apk -libxres 1.2.2-r3 apk -libxscrnsaver 1.2.4-r3 apk -libxshmfence 1.3.3-r0 apk -libxslt 1.1.43-r3 apk -libxt 1.3.1-r0 apk -libxtables 1.8.11-r1 apk -libxtst 1.2.5-r0 apk -libxv 1.0.13-r0 apk -libxvmc 1.0.14-r1 apk -libxxf86vm 1.1.6-r0 apk -libyuv 0.0.1887.20251502-r1 apk -libzbar 0.23.93-r1 apk -libzmq 4.3.5-r2 apk -lilv-libs 0.24.26-r0 apk -linux-firmware-none 20250509-r0 apk -linux-pam 1.7.0-r4 apk -linux-raw-sys 0.4.12 rust-crate (+1 duplicate) -llvm20-libs 20.1.8-r0 apk -log 0.4.20 rust-crate (+1 duplicate) -logger 2.41-r9 apk -losetup 2.41-r9 apk -lsblk 2.41-r9 apk -lscpu 2.41-r9 apk -lz4-libs 1.10.0-r0 apk -maybe-rayon 0.1.1 rust-crate (+1 duplicate) -mbedtls 3.6.5-r0 apk -mcookie 2.41-r9 apk -memchr 2.7.1 rust-crate (+1 duplicate) -mesa 25.1.9-r0 apk -mesa-dri-gallium 25.1.9-r0 apk -mesa-egl 25.1.9-r0 apk -mesa-gbm 25.1.9-r0 apk -mesa-gl 25.1.9-r0 apk -mesa-gles 25.1.9-r0 apk -mesa-va-gallium 25.1.9-r0 apk -mesa-vulkan-ati 25.1.9-r0 apk -mesa-vulkan-intel 25.1.9-r0 apk -mesa-vulkan-swrast 25.1.9-r0 apk -minimal-lexical 0.2.1 rust-crate (+1 duplicate) -minizip 1.3.1-r0 apk -mkfontscale 1.2.3-r1 apk -more-itertools 10.3.0 python -mount 2.41-r9 apk -mousepad 0.6.3-r1 apk -mousepad-lang 0.6.3-r1 apk -mpdecimal 4.0.1-r0 apk -mpg123-libs 1.32.10-r0 apk -msgpack 1.1.2 python -mtdev 1.1.7-r0 apk -multidict 6.7.0 python -musl 1.2.5-r10 apk -musl-locales 0.1.0-r1 apk -musl-locales-lang 0.1.0-r1 apk -musl-utils 1.2.5-r10 apk -my-test-package 1.0 python -ncurses-terminfo 6.5_p20250503-r0 apk -ncurses-terminfo-base 6.5_p20250503-r0 apk -neon 0.33.0-r0 apk -netcat-openbsd 1.229.1-r0 apk -nettle 3.10.1-r0 apk -new_debug_unreachable 1.0.4 rust-crate (+1 duplicate) -nghttp2-libs 1.65.0-r0 apk -nginx 1.28.0-r3 apk -nginx-mod-http-fancyindex 1.28.0-r3 apk -nom 7.1.3 rust-crate (+1 duplicate) -noop_proc_macro 0.3.0 rust-crate (+1 duplicate) -nspr 4.36-r0 apk -nss 3.114-r0 apk -num-bigint 0.4.4 rust-crate (+1 duplicate) -num-derive 0.4.1 rust-crate (+1 duplicate) -num-integer 0.1.45 rust-crate (+1 duplicate) -num-rational 0.4.1 rust-crate (+1 duplicate) -num-traits 0.2.17 rust-crate (+1 duplicate) -numactl 2.0.18-r0 apk -once_cell 1.19.0 rust-crate (+1 duplicate) -onevpl-libs 2023.3.1-r2 apk -oniguruma 6.9.10-r0 apk -openal-soft-libs 1.24.2-r0 apk -openbox 3.6.1-r8 apk -openbox-lang 3.6.1-r8 apk -openbox-libs 3.6.1-r8 apk -openexr-libiex 3.3.2-r0 apk -openexr-libilmthread 3.3.2-r0 apk -openexr-libopenexr 3.3.2-r0 apk -openexr-libopenexrcore 3.3.2-r0 apk -openh264 2.6.0-r0 apk -openjpeg 2.5.3-r0 apk -openssh-client-common 10.0_p1-r10 apk -openssh-client-default 10.0_p1-r10 apk -openssh-keygen 10.0_p1-r10 apk -openssl 3.5.4-r0 apk -opus 1.5.2-r1 apk -orc 0.4.40-r1 apk -p11-kit 0.25.5-r2 apk -packaging 24.2 python -pango 1.56.3-r0 apk -partx 2.41-r9 apk -pasimple 0.0.3 python -paste 1.0.14 rust-crate (+1 duplicate) -pciutils 3.13.0-r1 apk -pciutils-libs 3.13.0-r1 apk -pcmflux 1.0.6 python -pcre2 10.46-r0 apk -pillow 12.0.0 python -pip 25.0.1 python -pipewire-libs 1.4.7-r0 apk -pixelflux 1.5.0 python -pixman 0.46.4-r0 apk -pkgconf 2.4.3-r0 apk -platformdirs 4.2.2 python -polkit 126-r0 apk -polkit-common 126-r0 apk -polkit-lang 126-r0 apk -polkit-noelogind-libs 126-r0 apk -poppler 25.04.0-r0 apk -poppler-glib 25.04.0-r0 apk -ppv-lite86 0.2.17 rust-crate (+1 duplicate) -proc-macro2 1.0.76 rust-crate (+1 duplicate) -procps-ng 4.0.4-r3 apk -procps-ng-lang 4.0.4-r3 apk -profiling 1.0.13 rust-crate (+1 duplicate) -profiling-procmacros 1.0.13 rust-crate (+1 duplicate) -prometheus-client 0.23.1 python -propcache 0.4.1 python -psutil 7.1.3 python -pulseaudio 17.0-r5 apk -pulseaudio-alsa 17.0-r5 apk -pulseaudio-lang 17.0-r5 apk -pulseaudio-utils 17.0-r5 apk -pulsectl 24.12.0 python -pyc 3.12.12-r0 apk -pycparser 2.23 python -pyee 13.0.0 python -pylibsrtp 1.0.0 python -pynput 1.8.1 python -pyopenssl 25.3.0 python -python-xlib 0.33 python -python3 3.12.12-r0 apk -python3-pyc 3.12.12-r0 apk -python3-pycache-pyc0 3.12.12-r0 apk -quote 1.0.35 rust-crate (+1 duplicate) -radium 0.7.0 rust-crate (+1 duplicate) -rand 0.8.5 rust-crate (+1 duplicate) -rand_chacha 0.3.1 rust-crate (+1 duplicate) -rand_core 0.6.4 rust-crate (+1 duplicate) -rav1e 0.7.1 rust-crate (+1 duplicate) -rav1e-libs 0.7.1-r2 apk -rayon 1.8.0 rust-crate (+1 duplicate) -rayon-core 1.12.0 rust-crate (+1 duplicate) -readline 8.2.13-r1 apk -resenje.org/singleflight v0.4.3 go-module -rhash-libs 1.4.5-r0 apk -ristretto 0.13.3-r0 apk -ristretto-lang 0.13.3-r0 apk -roc-toolkit-libs 0.4.0-r1 apk -runc 1.3.4-r0 apk -runuser 2.41-r9 apk -rustix 0.38.28 rust-crate (+1 duplicate) -sbc 2.0-r2 apk -scan_fmt 0.2.6 rust-crate (+1 duplicate) -scanelf 1.3.8-r1 apk -selkies 0.0.0 python -serd-libs 0.32.4-r0 apk -setarch 2.41-r9 apk -setpriv 2.41-r9 apk -setuptools 80.9.0 python -setxkbmap 1.3.4-r0 apk -sfdisk 2.41-r9 apk -shaderc 2024.4-r0 apk -shadow 4.17.3-r0 apk -shared-mime-info 2.4-r6 apk -shared-mime-info-lang 2.4-r6 apk -signal-hook 0.3.17 rust-crate (+1 duplicate) -signal-hook-registry 1.4.1 rust-crate (+1 duplicate) -sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd go-module -sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 go-module (+1 duplicate) -sigs.k8s.io/structured-merge-diff/v4 v4.4.1 go-module -sigs.k8s.io/structured-merge-diff/v4 v4.4.2 go-module (+1 duplicate) -sigs.k8s.io/yaml v1.4.0 go-module (+3 duplicates) -simd_helpers 0.1.0 rust-crate (+1 duplicate) -simdutf 7.2.1-r0 apk -six 1.17.0 python -skalibs-libs 2.14.4.0-r0 apk -sord-libs 0.16.18-r0 apk -soundtouch 2.3.3-r0 apk -soxr 0.1.3-r7 apk -spandsp 0.0.6-r5 apk -speexdsp 1.2.1-r2 apk -spirv-tools 1.4.313.0-r0 apk -sqlite-libs 3.49.2-r1 apk -sratom 0.6.18-r0 apk -ssl_client 1.37.0-r20 apk -st 0.9.2-r0 apk -startup-notification 0.12-r8 apk -stdlib go1.24.11 go-module (+7 duplicates) -sudo 1.9.17_p2-r0 apk -syn 2.0.48 rust-crate (+1 duplicate) -tags.cncf.io/container-device-interface v1.0.1 go-module (+2 duplicates) -tags.cncf.io/container-device-interface/specs-go v1.0.0 go-module (+1 duplicate) -tap 1.0.1 rust-crate (+1 duplicate) -tar 1.35-r3 apk -tdb-libs 1.4.12-r0 apk -terminal_size 0.3.0 rust-crate (+1 duplicate) -thiserror 1.0.56 rust-crate (+1 duplicate) -thiserror-impl 1.0.56 rust-crate (+1 duplicate) -thunar 4.20.3-r0 apk -thunar-lang 4.20.3-r0 apk -thunar-volman 4.20.0-r0 apk -thunar-volman-lang 4.20.0-r0 apk -tiff 4.7.1-r0 apk -tini-static 0.19.0-r3 apk -tinyvec 1.9.0 rust-crate (+1 duplicate) -tomli 2.0.1 python -tslib 1.23-r0 apk -tumbler 4.20.0-r0 apk -tumbler-lang 4.20.0-r0 apk -typeguard 4.3.0 python -typing-extensions 4.12.2 python -typing-extensions 4.15.0 python -tzdata 2025c-r0 apk -umount 2.41-r9 apk -unicode-ident 1.0.12 rust-crate (+1 duplicate) -unicode-width 0.1.11 rust-crate (+1 duplicate) -upower 1.90.9-r0 apk -upower-lang 1.90.9-r0 apk -utf8parse 0.2.1 rust-crate (+1 duplicate) -utf8proc 2.10.0-r0 apk -util-linux 2.41-r9 apk -util-linux-misc 2.41-r9 apk -util-macros 1.20.1-r0 apk -utmps-libs 0.1.3.1-r0 apk -uuidgen 2.41-r9 apk -v_frame 0.3.7 rust-crate (+1 duplicate) -vidstab 1.1.1-r0 apk -vo-aacenc 0.1.3-r3 apk -vo-amrwbenc 0.1.3-r3 apk -vte3 0.80.4-r0 apk -vte3-lang 0.80.4-r0 apk -vte3-profile 0.80.4-r0 apk -vulkan-loader 1.4.313.0-r0 apk -vulkan-tools 1.4.313.0-r0 apk -watchdog 6.0.0 python -wayland 1.23.1-r3 apk -wayland-libs-client 1.23.1-r3 apk -wayland-libs-cursor 1.23.1-r3 apk -wayland-libs-egl 1.23.1-r3 apk -wayland-libs-server 1.23.1-r3 apk -webkit2gtk-4.1 2.48.1-r1 apk -webkit2gtk-4.1-lang 2.48.1-r1 apk -webrtc-audio-processing-1 1.3-r1 apk -webrtc-audio-processing-2 2.1-r0 apk -websockets 15.0.1 python -wheel 0.45.1 python -wipefs 2.41-r9 apk -wl-clipboard 2.2.1-r0 apk -wlroots 0.18.2-r1 apk -wtype 0.4-r0 apk -wyz 0.5.1 rust-crate (+1 duplicate) -x264-libs 0.164.3108-r0 apk -x265-libs 3.6-r0 apk -xauth 1.1.3-r0 apk -xcb-util 0.4.1-r3 apk -xcb-util-renderutil 0.3.10-r0 apk -xcb-util-wm 0.4.2-r0 apk -xclip 0.13-r3 apk -xdg-dbus-proxy 0.1.6-r0 apk -xdg-utils 1.2.1-r1 apk -xdotool 3.20211022.1-r1 apk -xf86-video-amdgpu 23.0.0-r3 apk -xf86-video-ati 22.0.0-r3 apk -xf86-video-intel 2.99.917_git20221028-r7 apk -xf86-video-nouveau 1.0.18-r0 apk -xf86-video-qxl 0.1.6-r3 apk -xfce4 4.20-r0 apk -xfce4-appfinder 4.20.0-r0 apk -xfce4-appfinder-lang 4.20.0-r0 apk -xfce4-panel 4.20.4-r0 apk -xfce4-panel-lang 4.20.4-r0 apk -xfce4-power-manager 4.20.0-r1 apk -xfce4-power-manager-lang 4.20.0-r1 apk -xfce4-session 4.20.2-r0 apk -xfce4-session-lang 4.20.2-r0 apk -xfce4-settings 4.20.1-r0 apk -xfce4-settings-lang 4.20.1-r0 apk -xfce4-terminal 1.1.3-r0 apk -xfce4-terminal-lang 1.1.3-r0 apk -xfconf 4.20.0-r1 apk -xfconf-lang 4.20.0-r1 apk -xfdesktop 4.20.1-r0 apk -xfdesktop-lang 4.20.1-r0 apk -xfwm4 4.20.0-r1 apk -xfwm4-lang 4.20.0-r1 apk -xkbcommon 1.5.1 python -xkbcomp 1.5.0-r0 apk -xkeyboard-config 2.43-r0 apk -xkeyboard-config-lang 2.43-r0 apk -xorg-server 21.1.19-r0 apk -xorg-server-common 21.1.19-r0 apk -xprop 1.2.8-r0 apk -xrandr 1.5.2-r0 apk -xrdb 1.2.2-r0 apk -xsel 1.2.1-r0 apk -xset 1.2.5-r1 apk -xsettingsd 1.0.2-r2 apk -xterm 399-r0 apk -xvfb 21.1.19-r0 apk -xvidcore 1.3.7-r2 apk -xwayland 24.1.9-r0 apk -xz-libs 5.8.1-r0 apk -y4m 0.8.0 rust-crate (+1 duplicate) -yaml 0.2.5-r2 apk -yarl 1.22.0 python -zimg 3.0.5-r3 apk -zipp 3.19.2 python -zix-libs 0.6.2-r0 apk -zlib 1.3.1-r2 apk -zstd-libs 1.5.7-r0 apk diff --git a/readme-vars.yml b/readme-vars.yml deleted file mode 100644 index 89b285125..000000000 --- a/readme-vars.yml +++ /dev/null @@ -1,150 +0,0 @@ ---- - -# project information -project_name: webtop -project_url: "https://github.com/linuxserver/docker-webtop" -project_logo: "https://raw.githubusercontent.com/linuxserver/docker-templates/master/linuxserver.io/img/webtop-logo.png" -project_blurb: | - [{{ project_name|capitalize }}]({{ project_url }}) - Alpine, Ubuntu, Fedora, and Arch based containers containing full desktop environments in officially supported flavors accessible via any modern web browser. -project_lsio_github_repo_url: "https://github.com/linuxserver/docker-{{ project_name }}" -project_categories: "Remote Desktop" -# supported architectures -available_architectures: - - {arch: "{{ arch_x86_64 }}", tag: "amd64-latest"} - - {arch: "{{ arch_arm64 }}", tag: "arm64v8-latest"} -# development version -development_versions: true -development_versions_items: - - {tag: "latest", desc: "XFCE Alpine"} - - {tag: "alpine-i3", desc: "i3 Alpine"} - - {tag: "alpine-mate", desc: "MATE Alpine"} - - {tag: "arch-i3", desc: "i3 Arch"} - - {tag: "arch-kde", desc: "KDE Arch"} - - {tag: "arch-mate", desc: "MATE Arch"} - - {tag: "arch-xfce", desc: "XFCE Arch"} - - {tag: "debian-i3", desc: "i3 Debian"} - - {tag: "debian-kde", desc: "KDE Debian"} - - {tag: "debian-mate", desc: "MATE Debian"} - - {tag: "debian-xfce", desc: "XFCE Debian"} - - {tag: "el-i3", desc: "i3 Enterprise Linux"} - - {tag: "el-mate", desc: "MATE Enterprise Linux"} - - {tag: "el-xfce", desc: "XFCE Enterprise Linux"} - - {tag: "fedora-i3", desc: "i3 Fedora"} - - {tag: "fedora-kde", desc: "KDE Fedora"} - - {tag: "fedora-mate", desc: "MATE Fedora"} - - {tag: "fedora-xfce", desc: "XFCE Fedora"} - - {tag: "ubuntu-i3", desc: "i3 Ubuntu"} - - {tag: "ubuntu-kde", desc: "KDE Ubuntu"} - - {tag: "ubuntu-mate", desc: "MATE Ubuntu"} - - {tag: "ubuntu-xfce", desc: "XFCE Ubuntu"} -# container parameters -param_container_name: "{{ project_name }}" -param_usage_include_vols: true -param_volumes: - - {vol_path: "/config", vol_host_path: "/path/to/data", desc: "abc users home directory"} -param_usage_include_ports: true -param_ports: - - {external_port: "3000", internal_port: "3000", port_desc: "Web Desktop GUI HTTP, must be proxied"} - - {external_port: "3001", internal_port: "3001", port_desc: "Web Desktop GUI HTTPS"} -custom_params: - - {name: "shm-size", name_compose: "shm_size", value: "1gb", desc: "Recommended for all desktop images."} -# Selkies blurb settings -selkies_blurb: true -show_nvidia: true -# application setup block -app_setup_block_enabled: true -app_setup_block: | - The application can be accessed at: - - * https://yourhost:3001/ -# init diagram -init_diagram: | - "webtop:latest": { - docker-mods - base { - fix-attr +\nlegacy cont-init - } - docker-mods -> base - legacy-services - custom services - init-services -> legacy-services - init-services -> custom services - custom services -> legacy-services - legacy-services -> ci-service-check - init-migrations -> init-adduser - init-os-end -> init-config - init-selkies-end -> init-config - init-config -> init-config-end - init-crontab-config -> init-config-end - init-config -> init-crontab-config - init-mods-end -> init-custom-files - init-adduser -> init-device-perms - base -> init-envfile - base -> init-migrations - init-config-end -> init-mods - init-mods-package-install -> init-mods-end - init-mods -> init-mods-package-install - init-selkies -> init-nginx - init-adduser -> init-os-end - init-device-perms -> init-os-end - init-envfile -> init-os-end - init-os-end -> init-selkies - init-nginx -> init-selkies-config - init-video -> init-selkies-end - init-custom-files -> init-services - init-selkies-config -> init-video - init-services -> svc-cron - svc-cron -> legacy-services - init-services -> svc-de - svc-nginx -> svc-de - svc-xorg -> svc-de - svc-de -> legacy-services - init-services -> svc-docker - svc-docker -> legacy-services - init-services -> svc-nginx - svc-nginx -> legacy-services - init-services -> svc-pulseaudio - svc-pulseaudio -> legacy-services - init-services -> svc-selkies - svc-nginx -> svc-selkies - svc-pulseaudio -> svc-selkies - svc-xorg -> svc-selkies - svc-selkies -> legacy-services - init-services -> svc-watchdog - svc-watchdog -> legacy-services - init-services -> svc-xorg - svc-xorg -> legacy-services - init-services -> svc-xsettingsd - svc-nginx -> svc-xsettingsd - svc-xorg -> svc-xsettingsd - svc-xsettingsd -> legacy-services - } - Base Images: { - "baseimage-selkies:alpine322" <- "baseimage-alpine:3.22" - } - "webtop:latest" <- Base Images -# changelog -changelogs: - - {date: "17.11.25:", desc: "Rebase Fedora images to 43."} - - {date: "24.07.25:", desc: "Rebase Debian images to Trixie."} - - {date: "17.06.25:", desc: "Rebase all images to Selkies, drop openbox and icewm, bump Alpine to 3.22, bump Fedora to 42."} - - {date: "10.01.25:", desc: "Rebase Fedora to 41."} - - {date: "06.12.24:", desc: "Rebase Alpine to 3.21."} - - {date: "26.09.24:", desc: "Swap from firefox to chromium on Alpine images."} - - {date: "23.05.24:", desc: "Rebase Alpine to 3.20, document Nvidia support."} - - {date: "22.04.24:", desc: "Rebase Ubuntu to Noble."} - - {date: "16.04.24:", desc: "Add docs on PRoot Apps."} - - {date: "14.04.24:", desc: "Rebase Fedora to 40."} - - {date: "11.02.24:", desc: "Add PWA icons and title variants properly."} - - {date: "06.02.24:", desc: "Update Readme about native language support."} - - {date: "29.12.23:", desc: "Rebase Alpine to 3.19 and swap back to Firefox."} - - {date: "07.11.23:", desc: "Rebase Fedora to 39."} - - {date: "14.06.23:", desc: "Rebase to Debian Bookworm."} - - {date: "13.05.23:", desc: "Rebase to Alpine 3.18 and Fedora 38."} - - {date: "23.03.23:", desc: "Rebase all Webtops to KasmVNC base image."} - - {date: "21.10.22:", desc: "Rebase xfce to Alpine 3.16, migrate to s6v3."} - - {date: "12.03.22:", desc: "Add documentation for mounting in a GPU."} - - {date: "05.02.22:", desc: "Rebase KDE Ubuntu to Jammy, add new documentation for updated gclient, stop recommending priv mode."} - - {date: "21.09.21:", desc: "Add Fedora and Arch images, show seccomp settings in readme."} - - {date: "26.09.21:", desc: "Rebase to Alpine versions to 3.14."} - - {date: "20.04.21:", desc: "Initial release."} diff --git a/restart-container.sh b/restart-container.sh new file mode 100755 index 000000000..fbfff311a --- /dev/null +++ b/restart-container.sh @@ -0,0 +1,39 @@ +#!/usr/bin/env bash +set -euo pipefail + +HOST_USER=${USER:-$(whoami)} +NAME=${CONTAINER_NAME:-linuxserver-kde-${HOST_USER}} + +usage() { + cat <&2 + echo "Use start-container.sh to create a new container." >&2 + exit 1 +fi + +# Check if container is running +if docker ps --format '{{.Names}}' | grep -qx "$NAME"; then + echo "Restarting container ${NAME}..." + docker restart "$NAME" >/dev/null + echo "Container ${NAME} restarted." +else + echo "Container ${NAME} is stopped. Starting..." + docker start "$NAME" >/dev/null + echo "Container ${NAME} started." +fi diff --git a/root/defaults/startwm.sh b/root/defaults/startwm.sh deleted file mode 100755 index 2a42e0fd8..000000000 --- a/root/defaults/startwm.sh +++ /dev/null @@ -1,10 +0,0 @@ -#!/bin/bash - -# Default settings -if [ ! -d "${HOME}"/.config/xfce4/xfconf/xfce-perchannel-xml ]; then - mkdir -p "${HOME}"/.config/xfce4/xfconf/xfce-perchannel-xml - cp /defaults/xfce/* "${HOME}"/.config/xfce4/xfconf/xfce-perchannel-xml/ -fi - -# Start DE -exec dbus-launch --exit-with-session /usr/bin/xfce4-session > /dev/null 2>&1 diff --git a/root/defaults/xfce/xfce4-desktop.xml b/root/defaults/xfce/xfce4-desktop.xml deleted file mode 100644 index 1e4d18b6d..000000000 --- a/root/defaults/xfce/xfce4-desktop.xml +++ /dev/null @@ -1,23 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - diff --git a/root/defaults/xfce/xfce4-panel.xml b/root/defaults/xfce/xfce4-panel.xml deleted file mode 100644 index fbcf29179..000000000 --- a/root/defaults/xfce/xfce4-panel.xml +++ /dev/null @@ -1,53 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/root/defaults/xfce/xfwm4.xml b/root/defaults/xfce/xfwm4.xml deleted file mode 100644 index ab08aaa9c..000000000 --- a/root/defaults/xfce/xfwm4.xml +++ /dev/null @@ -1,91 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/root/defaults/xfce/xsettings.xml b/root/defaults/xfce/xsettings.xml deleted file mode 100644 index 127e8b512..000000000 --- a/root/defaults/xfce/xsettings.xml +++ /dev/null @@ -1,42 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/root/usr/bin/chromium b/root/usr/bin/chromium deleted file mode 100755 index 987e18b5b..000000000 --- a/root/usr/bin/chromium +++ /dev/null @@ -1,18 +0,0 @@ -#! /bin/bash - -BIN=/usr/bin/chromium-browser - -# Bugfix for Chromium in Alpine -export GTK_THEME=Adwaita:light - -# Cleanup -if ! pgrep chromium > /dev/null;then - rm -f $HOME/.config/chromium/Singleton* -fi - -# Run normally on privved containers or modified un non priv -if grep -q 'Seccomp:.0' /proc/1/status; then - ${BIN} --no-first-run --password-store=basic "$@" -else - ${BIN} --no-first-run --password-store=basic --no-sandbox --test-type "$@" -fi diff --git a/root/usr/bin/chromium-browser b/root/usr/bin/chromium-browser deleted file mode 100755 index fcc41e937..000000000 --- a/root/usr/bin/chromium-browser +++ /dev/null @@ -1,18 +0,0 @@ -#! /bin/bash - -BIN=/usr/lib/chromium/chromium-launcher.sh - -# Bugfix for Chromium in Alpine -export GTK_THEME=Adwaita:light - -# Cleanup -if ! pgrep chromium > /dev/null;then - rm -f $HOME/.config/chromium/Singleton* -fi - -# Run normally on privved containers or modified un non priv -if grep -q 'Seccomp:.0' /proc/1/status; then - ${BIN} --no-first-run --password-store=basic "$@" -else - ${BIN} --no-first-run --password-store=basic --no-sandbox --test-type "$@" -fi diff --git a/root/usr/bin/thunar b/root/usr/bin/thunar deleted file mode 100755 index f4e3f8e7e..000000000 --- a/root/usr/bin/thunar +++ /dev/null @@ -1,3 +0,0 @@ -#!/bin/bash -unset LD_PRELOAD -thunar-real "$@" diff --git a/shell-container.sh b/shell-container.sh new file mode 100755 index 000000000..cb75b962d --- /dev/null +++ b/shell-container.sh @@ -0,0 +1,47 @@ +#!/usr/bin/env bash +set -euo pipefail + +HOST_USER=${USER:-$(whoami)} +NAME=${CONTAINER_NAME:-linuxserver-kde-${HOST_USER}} +WORKDIR="/home/${HOST_USER}" + +usage() { + echo "Usage: $0 [-n name]" + echo " -n container name (default: ${NAME})" +} + +while getopts ":n:h" opt; do + case "$opt" in + n) NAME=$OPTARG ;; + h) usage; exit 0 ;; + *) usage; exit 1 ;; + esac +done + +if ! docker ps --format '{{.Names}}' | grep -qx "$NAME"; then + echo "Container ${NAME} is not running." >&2 + exit 1 +fi + +HOST_UID=$(id -u "${HOST_USER}") +HOST_GID=$(id -g "${HOST_USER}") + +# Use username so container supplemental groups (sudo, docker, etc.) are preserved +if ! docker exec "$NAME" id "${HOST_USER}" >/dev/null 2>&1; then + echo "User ${HOST_USER} not found in container. Falling back to uid:gid." >&2 + exec docker exec -it \ + -u "${HOST_UID}:${HOST_GID}" \ + -e USER="${HOST_USER}" \ + -e HOME="/home/${HOST_USER}" \ + -e TERM="${TERM:-xterm-256color}" \ + -w "${WORKDIR}" \ + "$NAME" bash -i -l +fi + +exec docker exec -it \ + --user "${HOST_USER}" \ + -e USER="${HOST_USER}" \ + -e HOME="/home/${HOST_USER}" \ + -e TERM="${TERM:-xterm-256color}" \ + -w "${WORKDIR}" \ + "$NAME" bash -i -l diff --git a/start-container.sh b/start-container.sh new file mode 100755 index 000000000..bc8b6ca7c --- /dev/null +++ b/start-container.sh @@ -0,0 +1,511 @@ +#!/usr/bin/env bash +set -euo pipefail + +HOST_USER=${USER:-$(whoami)} +HOST_UID=$(id -u "${HOST_USER}") +HOST_GID=$(id -g "${HOST_USER}") +NAME=${CONTAINER_NAME:-linuxserver-kde-${HOST_USER}} +IMAGE_BASE=${IMAGE_BASE:-webtop-kde} +IMAGE_TAG=${IMAGE_TAG:-} +IMAGE_OVERRIDE=${IMAGE_NAME:-} +UBUNTU_VERSION=${UBUNTU_VERSION:-24.04} +RESOLUTION=${RESOLUTION:-1920x1080} +DPI=${DPI:-96} +SHM_SIZE=${SHM_SIZE:-4g} +PLATFORM=${PLATFORM:-} +ARCH_OVERRIDE=${ARCH_OVERRIDE:-} +SSL_DIR=${SSL_DIR:-} +ENCODER=${ENCODER:-} +GPU_VENDOR=${GPU_VENDOR:-} # deprecated (use ENCODER) +GPU_ALL=false +GPU_NUMS="" +DOCKER_GPUS="" +DRI_NODE="" +IMAGE_TAG_SET=false +IMAGE_VERSION_DEFAULT=${IMAGE_VERSION:-1.0.0} +HOST_ARCH_RAW=$(uname -m) +case "${HOST_ARCH_RAW}" in + x86_64|amd64) DETECTED_ARCH=amd64 ;; + aarch64|arm64) DETECTED_ARCH=arm64 ;; + *) DETECTED_ARCH="${HOST_ARCH_RAW}" ;; +esac + +usage() { + cat <---u: (default base: ${IMAGE_BASE}) + -t image version tag (default: ${IMAGE_VERSION_DEFAULT}) + -u, --ubuntu Ubuntu version (22.04 or 24.04). Default: ${UBUNTU_VERSION} + -r resolution (e.g. 1920x1080, default: ${RESOLUTION}) + -d DPI (default: ${DPI}) + -p platform for docker run (e.g. linux/arm64). Default: host + -a image arch for tag (amd64/arm64). Overrides auto-detect + -s host directory containing cert.pem and cert.key to mount at ssl (recommended for WSS) + -e, --encoder Encoder: software|nvidia|nvidia-wsl|intel|amd (required) + -g, --gpu Docker --gpus value (optional): all or device=0,1 + --all shortcut for --gpu all + --num shortcut for --gpu device= + --dri-node DRI render node for VA-API (e.g. /dev/dri/renderD129) + + Encoder Examples: + --encoder software # Software encoding + --encoder intel # Intel VA-API + --encoder amd # AMD VA-API + --encoder nvidia # NVIDIA NVENC + --encoder nvidia-wsl # NVIDIA NVENC on WSL2 + + Docker GPU Examples (optional): + --gpu all # Use all GPUs (NVIDIA) + --gpu device=0,1 # Use GPU 0 and 1 (NVIDIA) + --all # Same as --gpu all + --num 0,1 # Same as --gpu device=0,1 +EOF +} + +while [[ $# -gt 0 ]]; do + case "$1" in + -n) NAME=$2; shift 2 ;; + -i) IMAGE_BASE=$2; shift 2 ;; + -t) IMAGE_TAG=$2; IMAGE_TAG_SET=true; shift 2 ;; + -u|--ubuntu) UBUNTU_VERSION=$2; shift 2 ;; + -r) RESOLUTION=$2; shift 2 ;; + -d) DPI=$2; shift 2 ;; + -p) PLATFORM=$2; shift 2 ;; + -a|--arch) ARCH_OVERRIDE=$2; shift 2 ;; + -s) SSL_DIR=$2; shift 2 ;; + -e|--encoder) ENCODER=$2; shift 2 ;; + -g|--gpu) DOCKER_GPUS=$2; shift 2 ;; + --all) GPU_ALL=true; shift ;; + --num) GPU_NUMS=$2; shift 2 ;; + --dri-node) DRI_NODE=$2; shift 2 ;; + -h|--help) usage; exit 0 ;; + --) shift; break ;; + -*) echo "Unknown option: $1" >&2; usage; exit 1 ;; + *) break ;; + esac +done + +if [[ ! $RESOLUTION =~ ^[0-9]+x[0-9]+$ ]]; then + echo "Resolution must be WIDTHxHEIGHT (e.g. 1920x1080)" >&2 + exit 1 +fi + +if [[ -z "${ENCODER}" ]]; then + echo "Error: --encoder is required." >&2 + usage + exit 1 +fi + +ENCODER=$(echo "${ENCODER}" | tr '[:upper:]' '[:lower:]') +case "${ENCODER}" in + software|none|cpu) + ENCODER="software" + ;; + nvidia|nvidia-wsl|intel|amd) + ;; + *) + echo "Unsupported encoder: ${ENCODER}" >&2 + usage + exit 1 + ;; +esac + +GPU_VENDOR="${ENCODER}" + +if [[ -z "${DOCKER_GPUS}" ]]; then + if [[ "${GPU_ALL}" = true ]]; then + DOCKER_GPUS="all" + elif [[ -n "${GPU_NUMS}" ]]; then + DOCKER_GPUS="device=${GPU_NUMS}" + fi +fi + +if [[ -n "${DOCKER_GPUS}" ]]; then + if [[ "${DOCKER_GPUS}" != "all" && ! "${DOCKER_GPUS}" =~ ^device=[0-9,]+$ ]]; then + echo "Error: --gpu value must be 'all' or 'device=0,1'." >&2 + exit 1 + fi +fi + +if [[ -n "${PLATFORM}" ]]; then + PLATFORM_ARCH="${PLATFORM#*/}" + case "${PLATFORM_ARCH}" in + amd64|x86_64) IMAGE_ARCH="amd64" ;; + arm64|aarch64) IMAGE_ARCH="arm64" ;; + *) IMAGE_ARCH="${DETECTED_ARCH}" ;; + esac +elif [[ -n "${ARCH_OVERRIDE}" ]]; then + IMAGE_ARCH="${ARCH_OVERRIDE}" +else + IMAGE_ARCH="${DETECTED_ARCH}" +fi + +if [[ "${IMAGE_TAG_SET}" = false || -z "${IMAGE_TAG}" ]]; then + IMAGE_TAG="${IMAGE_VERSION_DEFAULT}" +fi + +WIDTH=${RESOLUTION%x*} +HEIGHT=${RESOLUTION#*x} +SCALE_FACTOR=$(awk "BEGIN { printf \"%.2f\", ${DPI} / 96 }") +CHROMIUM_FLAGS_COMBINED="--force-device-scale-factor=${SCALE_FACTOR} ${CHROMIUM_FLAGS:-}" +HOST_PORT_SSL=${PORT_SSL_OVERRIDE:-$((HOST_UID + 30000))} +HOST_PORT_HTTP=${PORT_HTTP_OVERRIDE:-$((HOST_UID + 40000))} +HOSTNAME_RAW="$(hostname)" +if [[ "$(uname -s)" == "Darwin" ]]; then + HOSTNAME_RAW="$(scutil --get HostName 2>/dev/null || true)" + if [[ -z "${HOSTNAME_RAW}" ]]; then + HOSTNAME_RAW="$(scutil --get LocalHostName 2>/dev/null || true)" + fi + if [[ -z "${HOSTNAME_RAW}" ]]; then + HOSTNAME_RAW="$(scutil --get ComputerName 2>/dev/null || hostname)" + fi +fi +HOSTNAME_RAW="$(printf '%s' "${HOSTNAME_RAW}" | tr ' ' '-' | sed 's/[^A-Za-z0-9._-]/-/g; s/--*/-/g; s/^-//; s/-$//')" +HOSTNAME_RAW="${HOSTNAME_RAW:-Host}" +HOSTNAME_VAL=${CONTAINER_HOSTNAME:-Docker-${HOSTNAME_RAW}} +echo "Using container hostname: ${HOSTNAME_VAL}" +HOST_HOME_MOUNT="/home/${HOST_USER}/host_home" +HOST_MNT_MOUNT="/home/${HOST_USER}/host_mnt" +MNT_FLAGS=() +if [[ "$(uname -s)" != "Darwin" ]]; then + MNT_FLAGS=(-v "/mnt":"${HOST_MNT_MOUNT}":rw) +else + echo "Info: Skipping /mnt mount on macOS (Docker Desktop file sharing restriction)." >&2 +fi + +if [[ -n "${IMAGE_OVERRIDE}" ]]; then + IMAGE="${IMAGE_OVERRIDE}" +else + IMAGE="${IMAGE_BASE}-${HOST_USER}-${IMAGE_ARCH}-u${UBUNTU_VERSION}:${IMAGE_TAG}" +fi +REPO_PREFIX="${IMAGE_BASE}-${HOST_USER}-${IMAGE_ARCH}-u${UBUNTU_VERSION}" + +if docker ps -a --format '{{.Names}}' | grep -qx "$NAME"; then + echo "Container ${NAME} already exists. Stop/remove it before starting a new one." >&2 + exit 1 +fi + +if ! docker image inspect "$IMAGE" >/dev/null 2>&1; then + echo "Image ${IMAGE} not found. Searching for fallback tags under ${REPO_PREFIX}:*" >&2 + REPO_IMAGES=() + while IFS= read -r line; do + REPO_IMAGES+=("$line") + done < <(docker images --format '{{.Repository}}:{{.Tag}}' | grep "^${REPO_PREFIX}:" || true) + while IFS= read -r line; do + REPO_IMAGES+=("$line") + done < <(docker images --format '{{.Repository}}:{{.Tag}}' | grep "/${REPO_PREFIX}:" || true) + if [[ ${#REPO_IMAGES[@]} -gt 0 ]]; then + FALLBACK_IMAGE="${REPO_IMAGES[0]}" + echo "Using fallback image: ${FALLBACK_IMAGE}" >&2 + IMAGE="${FALLBACK_IMAGE}" + else + echo "Image ${IMAGE} not found. Build user image first (e.g. ./build-user-image.sh)." >&2 + exit 1 + fi +fi + +GPU_VENDOR="${ENCODER}" +GPU_FLAGS=() +GPU_ENV_VARS=() + +# Function to detect GPU vendor from render node +# Returns: intel, amd, nvidia, or unknown +detect_gpu_vendor() { + local render_node="$1" + local node_name + node_name=$(basename "$render_node") + local vendor_file="/sys/class/drm/${node_name}/device/vendor" + + if [ -f "$vendor_file" ]; then + local vendor_id + vendor_id=$(cat "$vendor_file" 2>/dev/null) + case "$vendor_id" in + 0x8086) echo "intel" ;; + 0x10de) echo "nvidia" ;; + 0x1002) echo "amd" ;; + *) echo "unknown" ;; + esac + else + echo "unknown" + fi +} + +# Function to find all render nodes for a specific vendor +# Outputs nodes sorted by number (smallest first) +find_vendor_render_nodes() { + local target_vendor="$1" + local nodes=() + + for node in /dev/dri/renderD*; do + if [ -e "$node" ]; then + local vendor + vendor=$(detect_gpu_vendor "$node") + if [ "$vendor" = "$target_vendor" ]; then + nodes+=("$node") + fi + fi + done + + # Sort by render node number (renderD128 < renderD129) + printf '%s\n' "${nodes[@]}" | sort -t 'D' -k2 -n +} + +# Function to list all detected GPUs +list_detected_gpus() { + echo "Detected GPUs:" + for node in /dev/dri/renderD*; do + if [ -e "$node" ]; then + local vendor + vendor=$(detect_gpu_vendor "$node") + echo " $node: $vendor" + fi + done +} + +case "${GPU_VENDOR}" in + software|"") + GPU_VENDOR="software" + # Support --gpu option to still pass NVIDIA GPUs for other purposes (CUDA, ML, etc.) + if [ -n "${DOCKER_GPUS}" ]; then + GPU_FLAGS+=(--gpus "${DOCKER_GPUS}") + GPU_ENV_VARS+=(-e ENABLE_NVIDIA=true) + echo "NVIDIA GPUs enabled (--gpus ${DOCKER_GPUS}) even with software encoding" + else + GPU_ENV_VARS+=(-e ENABLE_NVIDIA=false) + fi + ;; + intel) + GPU_ENV_VARS+=(-e LIBVA_DRIVER_NAME="${LIBVA_DRIVER_NAME:-iHD}") + # Support --gpu option to also pass NVIDIA GPUs for other purposes (CUDA, ML, etc.) + if [ -n "${DOCKER_GPUS}" ]; then + GPU_FLAGS+=(--gpus "${DOCKER_GPUS}") + GPU_ENV_VARS+=(-e ENABLE_NVIDIA=true) + echo "NVIDIA GPUs enabled (--gpus ${DOCKER_GPUS}) for non-encoding purposes" + else + GPU_ENV_VARS+=(-e ENABLE_NVIDIA=false) + fi + if [ -d "/dev/dri" ]; then + # List detected GPUs for debugging + list_detected_gpus + + # Determine which render node to use + if [ -n "${DRI_NODE}" ]; then + # User specified a node + VAAPI_CHECK_NODE="${DRI_NODE}" + echo "Using user-specified DRI node: ${DRI_NODE}" + else + # Auto-detect Intel GPU render node + INTEL_NODES=$(find_vendor_render_nodes "intel") + if [ -n "$INTEL_NODES" ]; then + # Use the first (smallest numbered) Intel GPU + VAAPI_CHECK_NODE=$(echo "$INTEL_NODES" | head -n1) + echo "Auto-detected Intel GPU: ${VAAPI_CHECK_NODE}" + else + echo "Warning: No Intel GPU found in /dev/dri." >&2 + echo "Available GPUs:" >&2 + list_detected_gpus >&2 + echo "Falling back to software encoding..." >&2 + VAAPI_CHECK_NODE="" + fi + fi + + if [ -n "${VAAPI_CHECK_NODE}" ]; then + # Always mount /dev/dri and set DRI_NODE if we detected an Intel GPU + GPU_FLAGS+=(--device=/dev/dri:/dev/dri:rwm) + GPU_ENV_VARS+=(-e DRI_NODE="${VAAPI_CHECK_NODE}") + + # Optionally verify VA-API on host (may fail due to permissions) + if command -v vainfo >/dev/null 2>&1; then + if LIBVA_DRIVER_NAME=iHD vainfo --display drm --device "${VAAPI_CHECK_NODE}" >/dev/null 2>&1; then + echo "Intel VA-API verified on ${VAAPI_CHECK_NODE}, using hardware acceleration" + else + echo "Note: VA-API verification failed on host (may work in container with proper permissions)" >&2 + echo "Using ${VAAPI_CHECK_NODE} for Intel VA-API encoding" >&2 + fi + else + echo "Note: vainfo not found on host, VA-API will be verified in container" >&2 + echo "Using ${VAAPI_CHECK_NODE} for Intel VA-API encoding" >&2 + fi + fi + else + echo "Warning: /dev/dri not found, Intel VA-API not available." >&2 + echo "Falling back to software encoding..." >&2 + fi + ;; + amd) + GPU_ENV_VARS+=(-e LIBVA_DRIVER_NAME="${LIBVA_DRIVER_NAME:-radeonsi}") + # Support --gpu option to also pass NVIDIA GPUs for other purposes (CUDA, ML, etc.) + if [ -n "${DOCKER_GPUS}" ]; then + GPU_FLAGS+=(--gpus "${DOCKER_GPUS}") + GPU_ENV_VARS+=(-e ENABLE_NVIDIA=true) + echo "NVIDIA GPUs enabled (--gpus ${DOCKER_GPUS}) for non-encoding purposes" + else + GPU_ENV_VARS+=(-e ENABLE_NVIDIA=false) + fi + if [ -d "/dev/dri" ]; then + # List detected GPUs for debugging + list_detected_gpus + + # Determine which render node to use + if [ -n "${DRI_NODE}" ]; then + # User specified a node + VAAPI_CHECK_NODE="${DRI_NODE}" + echo "Using user-specified DRI node: ${DRI_NODE}" + else + # Auto-detect AMD GPU render node + AMD_NODES=$(find_vendor_render_nodes "amd") + if [ -n "$AMD_NODES" ]; then + # Use the first (smallest numbered) AMD GPU + VAAPI_CHECK_NODE=$(echo "$AMD_NODES" | head -n1) + echo "Auto-detected AMD GPU: ${VAAPI_CHECK_NODE}" + else + echo "Warning: No AMD GPU found in /dev/dri." >&2 + echo "Available GPUs:" >&2 + list_detected_gpus >&2 + echo "Falling back to software encoding..." >&2 + VAAPI_CHECK_NODE="" + fi + fi + + if [ -n "${VAAPI_CHECK_NODE}" ]; then + # Always mount /dev/dri and set DRI_NODE if we detected an AMD GPU + GPU_FLAGS+=(--device=/dev/dri:/dev/dri:rwm) + GPU_ENV_VARS+=(-e DRI_NODE="${VAAPI_CHECK_NODE}") + + # Optionally verify VA-API on host (may fail due to permissions) + if command -v vainfo >/dev/null 2>&1; then + if LIBVA_DRIVER_NAME=radeonsi vainfo --display drm --device "${VAAPI_CHECK_NODE}" >/dev/null 2>&1; then + echo "AMD VA-API verified on ${VAAPI_CHECK_NODE}, using hardware acceleration" + else + echo "Note: VA-API verification failed on host (may work in container with proper permissions)" >&2 + echo "Using ${VAAPI_CHECK_NODE} for AMD VA-API encoding" >&2 + fi + else + echo "Note: vainfo not found on host, VA-API will be verified in container" >&2 + echo "Using ${VAAPI_CHECK_NODE} for AMD VA-API encoding" >&2 + fi + fi + else + echo "Warning: /dev/dri not found, AMD VA-API not available." >&2 + echo "Falling back to software encoding..." >&2 + fi + if [ -e "/dev/kfd" ]; then + GPU_FLAGS+=(--device=/dev/kfd:/dev/kfd:rwm) + fi + ;; + nvidia) + if [ -n "${DOCKER_GPUS}" ]; then + GPU_FLAGS+=(--gpus "${DOCKER_GPUS}") + else + echo "Warning: --encoder nvidia selected but no --gpu value provided; NVENC may be unavailable." >&2 + fi + if [ -d "/dev/dri" ]; then + GPU_FLAGS+=(--device=/dev/dri:/dev/dri:rwm) + fi + GPU_ENV_VARS+=(-e ENABLE_NVIDIA=true -e DISABLE_ZINK=true) + ;; + nvidia-wsl) + # WSL2 with NVIDIA GPU support + # WSL2 only supports --gpus all (no individual GPU selection) + if [ -n "${DOCKER_GPUS}" ]; then + GPU_FLAGS+=(--gpus "${DOCKER_GPUS}") + else + echo "Warning: --encoder nvidia-wsl selected but no --gpu value provided; NVENC may be unavailable." >&2 + fi + # Mount WSL-specific devices and libraries + if [ -e "/dev/dxg" ]; then + GPU_FLAGS+=(--device=/dev/dxg:/dev/dxg:rwm) + else + echo "Warning: /dev/dxg not found. Are you running on WSL2?" >&2 + fi + if [ -d "/usr/lib/wsl/lib" ]; then + GPU_FLAGS+=(-v /usr/lib/wsl/lib:/usr/lib/wsl/lib:ro) + fi + # WSLg support + if [ -d "/mnt/wslg" ]; then + GPU_FLAGS+=(-v /mnt/wslg:/mnt/wslg:ro) + fi + GPU_ENV_VARS+=(-e ENABLE_NVIDIA=true -e WSL_ENVIRONMENT=true -e DISABLE_ZINK=true) + ;; + *) + echo "Unsupported GPU vendor: ${GPU_VENDOR}" >&2 + exit 1 + ;; +esac + +echo "Starting: name=${NAME}, image=${IMAGE}, resolution=${RESOLUTION}, DPI=${DPI}, encoder=${ENCODER}, docker-gpus=${DOCKER_GPUS:-none}, host ports https=${HOST_PORT_SSL}->3001, http=${HOST_PORT_HTTP}->3000" +echo "Chromium scale: ${SCALE_FACTOR} (CHROMIUM_FLAGS=${CHROMIUM_FLAGS_COMBINED})" + +# Add video and render groups for GPU access (use host GIDs) +GROUP_FLAGS=() +VIDEO_GID=$(getent group video 2>/dev/null | cut -d: -f3 || true) +RENDER_GID=$(getent group render 2>/dev/null | cut -d: -f3 || true) +if [ -n "${VIDEO_GID}" ]; then + GROUP_FLAGS+=(--group-add="${VIDEO_GID}") + echo "Adding video group (GID: ${VIDEO_GID})" +fi +if [ -n "${RENDER_GID}" ]; then + GROUP_FLAGS+=(--group-add="${RENDER_GID}") + echo "Adding render group (GID: ${RENDER_GID})" +fi + +PLATFORM_FLAGS=() +if [[ -n "$PLATFORM" ]]; then + PLATFORM_FLAGS=(--platform "$PLATFORM") +fi +SSL_FLAGS=() +# default SSL dir fallback if not specified +if [[ -z "$SSL_DIR" ]]; then + # prefer ./ssl next to repo + DEFAULT_SSL_DIR="$(pwd)/ssl" + if [[ -d "$DEFAULT_SSL_DIR" ]]; then + SSL_DIR="$DEFAULT_SSL_DIR" + echo "Using SSL dir: $SSL_DIR" + fi +fi + +if [[ -n "$SSL_DIR" ]]; then + if [[ -f "$SSL_DIR/cert.pem" && -f "$SSL_DIR/cert.key" ]]; then + SSL_FLAGS=(-v "$SSL_DIR":/config/ssl:ro) + else + echo "Warning: SSL_DIR set but cert.pem or cert.key missing in $SSL_DIR. Skipping mount." >&2 + fi +else + echo "Warning: No SSL dir mounted. Using image self-signed cert (CN=*), browsers may reject WSS." >&2 +fi + +docker run -d \ + ${PLATFORM_FLAGS[@]+"${PLATFORM_FLAGS[@]}"} \ + ${GPU_FLAGS[@]+"${GPU_FLAGS[@]}"} \ + ${GROUP_FLAGS[@]+"${GROUP_FLAGS[@]}"} \ + --name "$NAME" \ + --hostname "${HOSTNAME_VAL}" \ + -e HOSTNAME="${HOSTNAME_VAL}" \ + -e HOST_HOSTNAME="${HOSTNAME_VAL}" \ + -e SHELL=/bin/bash \ + -p ${HOST_PORT_HTTP}:3000 \ + -p ${HOST_PORT_SSL}:3001 \ + -e DISPLAY=:1 \ + -e DPI="$DPI" \ + -e SCALE_FACTOR="${SCALE_FACTOR}" \ + -e FORCE_DEVICE_SCALE_FACTOR="${SCALE_FACTOR}" \ + -e CHROMIUM_FLAGS="${CHROMIUM_FLAGS_COMBINED}" \ + -e DISPLAY_WIDTH="$WIDTH" \ + -e DISPLAY_HEIGHT="$HEIGHT" \ + -e CUSTOM_RESOLUTION="$RESOLUTION" \ + -e USER_UID="${HOST_UID}" \ + -e USER_GID="${HOST_GID}" \ + -e USER_NAME="${HOST_USER}" \ + -e PUID="${HOST_UID}" \ + -e PGID="${HOST_GID}" \ + -e ENCODER="${ENCODER}" \ + -e GPU_VENDOR="${GPU_VENDOR}" \ + --shm-size "${SHM_SIZE}" \ + --privileged \ + -v "${HOME}":"${HOST_HOME_MOUNT}":rw \ + ${MNT_FLAGS[@]+"${MNT_FLAGS[@]}"} \ + -v "${HOME}/.ssh":"/home/${HOST_USER}/.ssh":rw \ + ${GPU_ENV_VARS[@]+"${GPU_ENV_VARS[@]}"} \ + ${SSL_FLAGS[@]+"${SSL_FLAGS[@]}"} \ + "$IMAGE" diff --git a/stop-container.sh b/stop-container.sh new file mode 100755 index 000000000..0ca2a5da3 --- /dev/null +++ b/stop-container.sh @@ -0,0 +1,50 @@ +#!/usr/bin/env bash +set -euo pipefail + +HOST_USER=${USER:-$(whoami)} +NAME=${CONTAINER_NAME:-linuxserver-kde-${HOST_USER}} +REMOVE=0 + +usage() { + echo "Usage: $0 [-n name] [--rm|-r]" + echo " -n container name (default: ${NAME})" + echo " -r, --rm remove container after stopping" +} + +# parse options (short and long) +while [[ $# -gt 0 ]]; do + case "$1" in + -n) + NAME=$2 + shift 2 + ;; + -r|--rm) + REMOVE=1 + shift + ;; + -h|--help) + usage + exit 0 + ;; + *) + echo "Unknown argument: $1" >&2 + usage + exit 1 + ;; + esac +done + +if ! docker ps -a --format '{{.Names}}' | grep -qx "$NAME"; then + echo "Container ${NAME} not found." >&2 + exit 1 +fi + +echo "Stopping container ${NAME}..." +docker stop "$NAME" >/dev/null +echo "Container ${NAME} stopped." + +if [[ $REMOVE -eq 1 ]]; then + echo "Removing container ${NAME}..." + docker rm "$NAME" >/dev/null + echo "Container ${NAME} removed." +fi