diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml
index a58f0212..00b0358f 100644
--- a/.github/workflows/lint.yml
+++ b/.github/workflows/lint.yml
@@ -20,7 +20,7 @@ jobs:
- name: golangci-lint
uses: golangci/golangci-lint-action@v7
with:
- version: v2.1
+ version: v2.11.2
args: --timeout=30m
cpd:
runs-on: ubuntu-latest
diff --git a/README.md b/README.md
index af8aeb6c..581d5a0e 100644
--- a/README.md
+++ b/README.md
@@ -5,7 +5,9 @@


+[](https://app.fossa.com/projects/git%2Bgithub.com%2Finterlink-hq%2FinterLink?ref=badge_shield&issueType=license)
[](https://goreportcard.com/report/github.com/interlink-hq/interlink)
+[](https://www.bestpractices.dev/projects/10839)
[](https://join.slack.com/t/intertwin/shared_invite/zt-2cs67h9wz-2DFQ6EiSQGS1vlbbbJHctA)
@@ -77,12 +79,14 @@ interLink supports a wide range of remote execution environments through its plu
## Use Cases
### In Scope
+
- **HPC Workloads**: AI training, ML inference, scientific simulations requiring specialized hardware
- **GPU-intensive Tasks**: Remote execution on powerful GPU resources for ML training, data analysis, rendering
- **Batch Processing**: On-demand container execution with specific compute requirements
- **Hybrid Cloud**: Workload distribution across multiple infrastructure providers
### Out of Scope
+
- **Long-running Services**: Persistent services with continuous availability requirements
- **Kubernetes Federation**: Multi-cluster resource management and federation
diff --git a/cmd/virtual-kubelet/main.go b/cmd/virtual-kubelet/main.go
index 875ff0f8..4d228420 100644
--- a/cmd/virtual-kubelet/main.go
+++ b/cmd/virtual-kubelet/main.go
@@ -50,6 +50,7 @@ import (
"net/http"
"os"
"path"
+ "path/filepath"
"strconv"
"strings"
"time"
@@ -373,7 +374,11 @@ func createHTTPTransport(ctx context.Context, interLinkConfig commonIL.Config, v
// setupKubernetesClient creates the Kubernetes client configuration
func setupKubernetesClient(ctx context.Context) (*rest.Config, *kubernetes.Clientset) {
var kubecfg *rest.Config
- kubecfgFile, err := os.ReadFile(os.Getenv("KUBECONFIG"))
+ kubeconfigPath := os.Getenv("KUBECONFIG")
+ if !filepath.IsAbs(kubeconfigPath) || strings.Contains(kubeconfigPath, "..") {
+ log.G(ctx).Fatal("Invalid KUBECONFIG path")
+ }
+ kubecfgFile, err := os.ReadFile(kubeconfigPath) // #nosec G703
if err != nil {
if os.Getenv("KUBECONFIG") != "" {
log.G(ctx).Debug(err)
diff --git a/docs/docusaurus.config.local.ts b/docs/docusaurus.config.local.ts
index 1d72fd90..90912ad9 100644
--- a/docs/docusaurus.config.local.ts
+++ b/docs/docusaurus.config.local.ts
@@ -80,7 +80,7 @@ const config: Config = {
announcementBar: {
id: 'support_us',
content:
- 'We are onboarding for our contribution to CNCF Sandbox! Please let us know for any broken or missing information as we move to the new home.',
+ 'We will be at KubeCon EU 2026! Come visit us at booth P-24AS Wednesday 10:00 - 13:30 to learn more about interLink!',
backgroundColor: '#fafbfc',
textColor: '#091E42',
isCloseable: false,
@@ -126,10 +126,10 @@ const config: Config = {
],
},
{
- title: 'Community',
+ title: 'Contact Us',
items: [
{
- label: 'interTwin project Slack',
+ label: 'Join us on Slack!',
href: 'https://join.slack.com/t/intertwin/shared_invite/zt-2cs67h9wz-2DFQ6EiSQGS1vlbbbJHctA',
}
],
diff --git a/docs/docusaurus.config.ts b/docs/docusaurus.config.ts
index d98780cd..a89453a6 100644
--- a/docs/docusaurus.config.ts
+++ b/docs/docusaurus.config.ts
@@ -80,7 +80,7 @@ const config: Config = {
announcementBar: {
id: 'support_us',
content:
- 'We are onboarding for our contribution to CNCF Sandbox! Please let us know for any broken or missing information as we move to the new home.',
+ 'We will be at KubeCon EU 2026! Come visit us at booth P-24AS Wednesday 10:00 - 13:30 to learn more about interLink!',
backgroundColor: '#fafbfc',
textColor: '#091E42',
isCloseable: false,
@@ -126,10 +126,10 @@ const config: Config = {
],
},
{
- title: 'Community',
+ title: 'Contact Us',
items: [
{
- label: 'interTwin project Slack',
+ label: 'Join us on Slack!',
href: 'https://join.slack.com/t/intertwin/shared_invite/zt-2cs67h9wz-2DFQ6EiSQGS1vlbbbJHctA',
}
],
diff --git a/docs/src/pages/index.tsx b/docs/src/pages/index.tsx
index 5e874cba..88e65392 100644
--- a/docs/src/pages/index.tsx
+++ b/docs/src/pages/index.tsx
@@ -59,14 +59,22 @@ function HomepageHeader() {
className={styles.badge}
onClick={() => window.open('https://github.com/interlink-hq/interLink', '_blank')}
/>
+
window.open('https://app.fossa.com/projects/git%2Bgithub.com%2Finterlink-hq%2FinterLink?ref=badge_shield&issueType=license', '_blank')}
+ />
window.open('https://goreportcard.com/report/github.com/interlink-hq/interlink', '_blank')}
/>
+
+

window.open('https://join.slack.com/t/intertwin/shared_invite/zt-2cs67h9wz-2DFQ6EiSQGS1vlbbbJHctA', '_blank')}
diff --git a/docs/versioned_docs/version-0.6.x/Developers.md b/docs/versioned_docs/version-0.6.x/Developers.md
new file mode 100644
index 00000000..38f3d8e1
--- /dev/null
+++ b/docs/versioned_docs/version-0.6.x/Developers.md
@@ -0,0 +1,276 @@
+---
+sidebar_position: 5
+---
+
+# Developers guide
+
+Here you can find how to test a virtual kubelet implementation against the main
+pod use cases we mean to support.
+
+## Requirements
+
+- [Docker engine](https://docs.docker.com/engine/install/)
+- [Dagger CLI v0.13.x](https://docs.dagger.io/install/)
+
+## What's in the Dagger module
+
+- E2e integration tests: a reproducible test environment (selfcontained in
+ Dagger runtime). Run the very same tests executed by github actions to
+ validate any PR
+- A development setup tool: optionally you can use your k8s cluster of choice to
+ run and install different interlink components via this module.
+
+:warning: by default the docker plugin is the one tested and to be referred to
+for any change as first thing.
+
+## Usage
+
+The whole test suite is based on the application of k8s manifests inside a
+folder that must be passed at runtime. In `./ci/manifests` of this repo you can
+find the one executed by default by the github actions.
+
+That means you can test your code **before** any commit, discovering in advance
+if anything is breaking.
+
+### Run e2e tests
+
+The easiest way is to simply run `make test` from the root folder of interlink.
+But if you need to debug or understand further the test utility or a plugin, you
+should follow these instructions.
+
+#### Edit manifests with your images
+
+- `service-account.yaml` is the default set of permission needed by the
+ virtualkubelet. Do not touch unless you know what you are doing.
+- `virtual-kubelet-config.yaml` is the configuration mounted into the **virtual
+ kubelet** component to determine its behaviour.
+- `virtual-kubelet.yaml` is the one that you should touch if you are pointing to
+ different interlink endpoints or if you want to change the **virtual kubelet**
+ image to be tested.
+- `interlink-config.yaml` is the configuration mounted into the **interlink
+ API** component to determine its behaviour.
+- `interlink.yaml` is the one that you should touch if you are pointing to
+ different plugin endpoints or if you want to change the **interlink API**
+ image to be tested.
+- `plugin-config.yaml` is the configuration for the **interLink plugin**
+ component that you MUST TO START MANUALLY on your host.
+ - we do have solution to make it start inside dagger environment, but is not
+ documented yet.
+
+#### Start the local docker plugin service
+
+For a simple demonstration, you can use the plugin that we actually use in are
+Github Actions:
+
+```bash
+wget https://github.com/interlink-hq/interlink-docker-plugin/releases/download/0.0.24-no-gpu/docker-plugin_Linux_x86_64 -O docker-plugin \
+ && chmod +x docker-plugin \
+ && docker ps \
+ && export INTERLINKCONFIGPATH=$PWD/ci/manifests/plugin-config.yaml \
+ && ./docker-plugin
+```
+
+#### Run the tests
+
+Then, in another terminal sessions you are ready to execute the e2e tests with
+Dagger.
+
+First of all, in `ci/manifests/vktest_config.yaml` you will find the pytest
+configuration file. Please see the
+[test documentation](https://github.com/interlink-hq/vk-test-set/tree/main) for
+understanding how to tweak it.
+
+The following instructions are thought for building docker images of the
+virtual-kubelet and interlink api server components at runtime and published on
+`virtual-kubelet-ref` and `interlink-ref` repositories (in this example it will
+be dockerHUB repository of the dciangot user). It basically consists on a chain
+of Dagger tasks for building core images (`build-images`), creating the
+kubernetes environment configured with core components (`new-interlink`),
+installing the plugin of choice indicated in the `manifest` folder
+(`load-plugin`), and eventually the execution of the tests (`test`)
+
+To run the default tests you can move to `ci` folder and execute the Dagger
+pipeline with:
+
+```bash
+dagger call \
+ --name my-tests \
+ build-images \
+ new-interlink \
+ --plugin-endpoint tcp://localhost:4000 \
+ test stdout
+```
+
+:warning: by default the docker plugin is the one tested and to be referred to
+for any change as first thing.
+
+In case of success the output should print something like the following:
+
+```text
+cachedir: .pytest_cache
+rootdir: /opt/vk-test-set
+configfile: pyproject.toml
+collecting ... collected 12 items / 1 deselected / 11 selected
+
+vktestset/basic_test.py::test_namespace_exists[default] PASSED [ 9%]
+vktestset/basic_test.py::test_namespace_exists[kube-system] PASSED [ 18%]
+vktestset/basic_test.py::test_namespace_exists[interlink] PASSED [ 27%]
+vktestset/basic_test.py::test_node_exists[virtual-kubelet] PASSED [ 36%]
+vktestset/basic_test.py::test_manifest[virtual-kubelet-000-hello-world.yaml] PASSED [ 45%]
+vktestset/basic_test.py::test_manifest[virtual-kubelet-010-simple-python.yaml] PASSED [ 54%]
+vktestset/basic_test.py::test_manifest[virtual-kubelet-020-python-env.yaml] PASSED [ 63%]
+vktestset/basic_test.py::test_manifest[virtual-kubelet-030-simple-shared-volume.yaml] PASSED [ 72%]
+vktestset/basic_test.py::test_manifest[virtual-kubelet-040-config-volumes.yaml] PASSED [ 81%]
+vktestset/basic_test.py::test_manifest[virtual-kubelet-050-limits.yaml] PASSED [ 90%]
+vktestset/basic_test.py::test_manifest[virtual-kubelet-060-init-container.yaml] PASSED [100%]
+
+====================== 11 passed, 1 deselected in 41.71s =======================
+```
+
+#### Debug with interactive session
+
+In case something went wrong, you have the possibility to spawn a session inside
+the final step of the pipeline to debug things:
+
+```bash
+dagger call \
+ --name my-tests \
+ build-images \
+ new-interlink \
+ --plugin-endpoint tcp://localhost:4000 \
+ run terminal
+
+```
+
+with this command (after some minutes) then you should be able to access a bash
+session doing the following commands:
+
+```bash
+bash
+source .venv/bin/activate
+export KUBECONFIG=/.kube/config
+
+## check connectivity with k8s cluster
+kubectl get pod -A
+
+## re-run the tests
+pytest -vk 'not rclone'
+```
+
+#### Debug from kubectl on your host
+
+You can get the Kubernetes service running with:
+
+```bash
+dagger call \
+ --name my-tests \
+ build-images \
+ new-interlink \
+ --plugin-endpoint tcp://localhost:4000 \
+ kube up
+```
+
+and then from another session, you can get the kubeconfig with:
+
+```bash
+dagger call \
+ --name my-tests \
+ config export --path ./kubeconfig.yaml
+```
+
+### Deploy on existing K8s cluster
+
+TBD
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+### Develop Virtual Kubelet code
+
+:warning: Coming soon
+
+### Develop Interlink API code
+
+:warning: Coming soon
+
+### Develop your plugin
+
+:warning: Coming soon
+
+## SSL Certificate Management
+
+### CSR Integration for Virtual Kubelet
+
+As of this version, Virtual Kubelet now supports proper SSL certificate management using Kubernetes Certificate Signing Requests (CSRs) instead of self-signed certificates. This resolves compatibility issues with `kubectl logs` and other Kubernetes clients.
+
+#### Key Changes
+
+- **CSR-based certificates**: Virtual Kubelet now requests certificates from the Kubernetes cluster CA using the standard `kubernetes.io/kubelet-serving` signer
+- **Automatic fallback**: If CSR creation fails, the system falls back to self-signed certificates with a warning
+- **Improved compatibility**: No longer requires `--insecure-skip-tls-verify-backend` flag for `kubectl logs`
+
+#### Technical Details
+
+The implementation uses:
+- **Signer**: `kubernetes.io/kubelet-serving` (standard kubelet serving certificate signer)
+- **Certificate store**: `/tmp/certs` directory with `virtual-kubelet` prefix
+- **Subject**: `system:node:
` with `system:nodes` organization
+- **IP SANs**: Node IP address for proper certificate validation
+
+#### Testing Certificate Integration
+
+To verify CSR-based certificate functionality:
+
+1. **Check CSR creation**:
+ ```bash
+ kubectl get csr
+ ```
+
+2. **Test kubectl logs without insecure flag**:
+ ```bash
+ kubectl logs
+ ```
+
+3. **Monitor Virtual Kubelet logs** for certificate retrieval messages:
+ ```bash
+ kubectl logs -n interlink virtual-kubelet-
+ ```
+
+#### ⚠️ IMPORTANT: CSR Manual Approval Required
+
+:exclamation: **CRITICAL**: CSRs (Certificate Signing Requests) must be manually approved by a cluster administrator, otherwise **log access will not work**. Without CSR approval, `kubectl logs` and other log-related operations will fail.
+
+**Required steps for enabling log functionality:**
+
+1. **Check for pending CSRs**:
+ ```bash
+ kubectl get csr
+ ```
+
+2. **Approve the CSR** (replace `csr-xxxxx` with the actual CSR name):
+ ```bash
+ kubectl certificate approve csr-xxxxx
+ ```
+
+3. **Verify logs are accessible**:
+ ```bash
+ kubectl logs
+ ```
+
+#### Troubleshooting
+
+- **CSR approval**: Ensure your cluster has automatic CSR approval configured or manually approve CSRs
+- **RBAC permissions**: Virtual Kubelet needs permissions to create CSRs in the `certificates.k8s.io` API group
+- **Fallback behavior**: Check logs for warnings about falling back to self-signed certificates
+
+For clusters without proper CSR support, the system maintains backward compatibility by automatically using self-signed certificates with appropriate warnings.
diff --git a/docs/versioned_docs/version-0.6.x/Limitations.md b/docs/versioned_docs/version-0.6.x/Limitations.md
new file mode 100644
index 00000000..8d6e15d3
--- /dev/null
+++ b/docs/versioned_docs/version-0.6.x/Limitations.md
@@ -0,0 +1,23 @@
+---
+sidebar_position: 6
+---
+
+# Current limitations
+
+It's not black magic, we have to pay something:
+
+- **Cluster wide shared FS**: there is no support for cluster-wide filesystem
+ mounting on the remote container. The only volumes supported are: `Secret`,
+ `ConfigMap`, `EmptyDir`
+- **InCluster pod-to-pod network**: we are in the middle of the beta period to
+ release this feature!
+
+:::note
+
+Reach out to us if you are willing to test the network implementation as beta
+users!
+
+:::
+
+That's all. If you find anything else, feel free to let it know filing a github
+issue.
diff --git a/docs/versioned_docs/version-0.6.x/arch.mdx b/docs/versioned_docs/version-0.6.x/arch.mdx
new file mode 100644
index 00000000..5ad980e7
--- /dev/null
+++ b/docs/versioned_docs/version-0.6.x/arch.mdx
@@ -0,0 +1,24 @@
+---
+sidebar_position: 2
+---
+import ThemedImage from '@theme/ThemedImage';
+import useBaseUrl from '@docusaurus/useBaseUrl';
+
+# Architecture
+
+InterLink aims to provide an abstraction for the execution of a Kubernetes pod on any remote resource capable of managing a Container execution lifecycle.
+
+The project consists of two main components:
+
+- __A Kubernetes Virtual Node:__ based on the [VirtualKubelet](https://virtual-kubelet.io/) technology. Translating request for a kubernetes pod execution into a remote call to the interLink API server.
+- __The interLink API server:__ a modular and pluggable REST server where you can create your own Container manager plugin (called sidecars), or use the existing ones: remote docker execution on a remote host, singularity Container on a remote SLURM batch system.
+
+The project got inspired by the [KNoC](https://github.com/CARV-ICS-FORTH/knoc) and [Liqo](https://github.com/liqotech/liqo/tree/master) projects, enhancing that with the implemention a generic API layer b/w the virtual kubelet component and the provider logic for the container lifecycle management.
+
+
diff --git a/docs/versioned_docs/version-0.6.x/cookbook/1-edge.mdx b/docs/versioned_docs/version-0.6.x/cookbook/1-edge.mdx
new file mode 100644
index 00000000..9fac1a8b
--- /dev/null
+++ b/docs/versioned_docs/version-0.6.x/cookbook/1-edge.mdx
@@ -0,0 +1,406 @@
+---
+sidebar_position: 3
+---
+
+import Tabs from "@theme/Tabs";
+import TabItem from "@theme/TabItem";
+import ThemedImage from "@theme/ThemedImage";
+import useBaseUrl from "@docusaurus/useBaseUrl";
+
+# Edge node deployment
+
+Deploy interLink on an edge node, outside the local K8S cluster.
+
+
+
+## Install interLink
+
+### Deploy Remote components
+
+In general, starting from the deployment of the remote components is advised.
+Since the kubernetes virtual node won't reach the `Ready` status until all the
+stack is successfully deployed.
+
+#### Interlink API server
+
+**For this deployment mode the remote host has to allow the kubernetes cluster
+to connect to the Oauth2 proxy service port (30443 if you use the automatic
+script for installation)**
+
+:::note Authentication Options
+
+InterLink supports two authentication methods for secure communication:
+
+1. **OIDC Authentication (OAuth2 Proxy)** - Described in this guide
+2. **mTLS Authentication (Mutual TLS)** - See the
+ [mTLS Deployment Guide](../guides/mtls-deployment) for detailed instructions
+
+:::
+
+You first need to initialize an OIDC client with you Identity Provider (IdP).
+
+Since any OIDC provider working with
+[OAuth2 Proxy](https://oauth2-proxy.github.io/oauth2-proxy/) tool will do the
+work, we are going to put the configuration for a generic OIDC identity provider
+in this cookbook. Nevertheless you can find more detailed on dedicated pages
+with instructions ready for
+[GitHub](../guides/deploy-interlink#create-an-oauth-github-app),
+[EGI checkin](../guides/04-oidc-IAM.md#egi-check-in),
+[INFN IAM](../guides/oidc-IAM#indigo-iam).
+
+Then download the
+[latest release](https://github.com/interlink-hq/interLink/releases) of the
+interLink installer:
+
+```bash
+mkdir -p $HOME/.interlink
+export VERSION=$(curl -s https://api.github.com/repos/interlink-hq/interlink/releases/latest | jq -r .name)
+wget -O $HOME/interlink-installer https://github.com/interlink-hq/interLink/releases/download/$VERSION/interlink-installer_Linux_x86_64
+chmod +x $HOME/.interlink/interlink-installer
+```
+
+Create a template configuration with the init option:
+
+```bash
+mkdir -p $HOME/.interlink/logs
+mkdir -p $HOME/.interlink/bin
+mkdir -p $HOME/.interlink/config
+$HOME/.interlink/interlink-installer --init --config $HOME/.interlink/installer.yaml
+```
+
+The configuration file should be filled as followed. This is the case where the
+`my-node` will contact an edge service that will be listening on `PUBLIC_IP` and
+`API_PORT` authenticating requests from an OIDC provider
+`https://my_oidc_idp.com`:
+
+```bash title="$HOME/.interlink/installer.yaml"
+interlink_ip: PUBLIC_IP
+interlink_port: API_PORT
+interlink_version: X.X.X
+kubelet_node_name: my-node
+kubernetes_namespace: interlink
+node_limits:
+ cpu: "1000"
+ # MEMORY in GB
+ memory: 25600
+ pods: "100"
+oauth:
+ provider: oidc
+ issuer: https://my_oidc_idp.com/
+ scopes:
+ - "openid"
+ - "email"
+ - "offline_access"
+ - "profile"
+ audience: interlink
+ grant_type: authorization_code
+ group_claim: groups
+ group: "my_vk_allowed_group"
+ token_url: "https://my_oidc_idp.com/token"
+ device_code_url: "https://my_oidc_idp/auth/device"
+ client_id: "oidc-client-xx"
+ client_secret: "xxxxxx"
+insecure_http: true
+```
+
+:::note
+
+Please fill interlink_version with the desired version.
+In alternative get the latest with:
+
+```bash
+curl -s https://api.github.com/repos/interlink-hq/interlink/releases/latest | jq -r .name
+```
+
+:::
+
+Now you are ready to start the OIDC authentication flow to generate all your
+manifests and configuration files for the interLink components. To do so, just
+execute the installer:
+
+```bash
+$HOME/.interlink/interlink-installer --config $HOME/.interlink/installer.yaml --output-dir $HOME/.interlink/manifests/
+```
+
+Install Oauth2-Proxy and interLink API server services and configurations with:
+
+```bash
+chmod +x $HOME/.interlink/manifests/interlink-remote.sh
+$HOME/.interlink/manifests/interlink-remote.sh install
+```
+
+Then start the services with:
+
+```bash
+$HOME/.interlink/manifests/interlink-remote.sh start
+```
+
+With `stop` command you can stop the service. By default logs are store in
+`~/.interlink/logs`, checkout there for any error before moving to the next
+step.
+
+:::note
+
+**N.B.** you can look the oauth2_proxy configuration parameters looking directly
+into the `interlink-remote.sh` script.
+
+:::
+
+:::warning
+
+**N.B.** logs (expecially if in verbose mode) can become pretty huge, consider
+to implement your favorite rotation routine for all the logs in
+`~/.interlink/logs/`.
+
+:::
+
+#### Plugin service
+
+Select here the featured plugin you want to try:
+
+
+
+ _Offload your pods to a remote machine with Docker engine available._
+
+ - Create a configuration file:
+
+ ```bash title="$HOME/.interlink/config/plugin-config.yaml"
+ ## Multi user host
+ Socket: "unix:///home/myusername/.interlink/.plugin.sock"
+ InterlinkPort: "0"
+ SidecarPort: "0"
+
+ CommandPrefix: ""
+ DataRootFolder: "/home/myusername/.interlink/jobs/"
+ BashPath: /bin/bash
+ VerboseLogging: false
+ ErrorsOnlyLogging: false
+ ```
+ - __N.B.__ Depending on wheter you edge is single user or not,
+ you should know by previous steps which section to uncomment here.
+ - More on configuration options at
+ [official repo](https://github.com/interlink-hq/interlink-docker-plugin/blob/main/README.md)
+
+ - Download the [latest release](https://github.com/interlink-hq/interlink-docker-plugin/releases)
+ binary in `$HOME/.interlink/bin/plugin` for either GPU host or CPU host (tags ending with `no-GPU`)
+ - Start the plugins passing the configuration that you have just created:
+
+ ```bash
+ export INTERLINKCONFIGPATH=$HOME/.interlink/config/plugin-config.yaml
+ $HOME/.interlink/bin/plugin &> $HOME/.interlink/logs/plugin.log &
+ echo $! > $HOME/.interlink/plugin.pid
+ ```
+
+ - Check the logs in `$HOME/.interlink/logs/plugin.log`.
+ - To kill and restart the process is enough:
+
+ ```bash
+ # kill
+ kill $(cat $HOME/.interlink/plugin.pid)
+
+ # restart
+ export INTERLINKCONFIGPATH=$HOME/.interlink/config/plugin-config.yaml
+ $HOME/.interlink/bin/plugin &> $HOME/.interlink/logs/plugin.log &
+ echo $! > $HOME/.interlink/plugin.pid
+
+ Almost there! Now it's time to add this virtual node into the Kubernetes cluster!
+
+
+
+
+ _Offload your pods to an HPC SLURM based batch system._
+
+ - Please be sure that you have a shared filesystem area with the SLURM nodes available from the edge node. In this case our `DataRootFolder` is `$HOME/.interlink/jobs`
+ - Create a configuration file (__remember to substitute `/home/username/` with your actual home path__):
+
+ ```bash title="./interlink/manifests/plugin-config.yaml"
+ Socket: "unix:///home/myusername/.interlink/.plugin.sock"
+ InterlinkPort: "0"
+ SidecarPort: "0"
+
+ CommandPrefix: ""
+ DataRootFolder: "/home/myusername/.interlink/jobs/"
+ BashPath: /bin/bash
+ VerboseLogging: false
+ ErrorsOnlyLogging: false
+ SbatchPath: "/usr/bin/sbatch"
+ ScancelPath: "/usr/bin/scancel"
+ SqueuePath: "/usr/bin/squeue"
+ SingularityPrefix: ""
+ ```
+
+ - More on configuration options at
+ [official repo](https://github.com/interlink-hq/interlink-slurm-plugin/blob/main/README.md)
+
+ - Download the [latest release](https://github.com/interlink-hq/interlink-slurm-plugin/releases)
+ binary in `$HOME/.interlink/bin/plugin`
+
+ ```bash
+ export PLUGIN_VERSION=$(curl -s https://api.github.com/repos/interlink-hq/interlink-slurm-plugin/releases/latest | jq -r .name)
+ wget -O $HOME/.interlink/bin/plugin https://github.com/interlink-hq/interlink-slurm-plugin/releases/download/${PLUGIN_VERSION}/interlink-sidecar-slurm_Linux_x86_64
+ ```
+
+ - Start the plugins passing the configuration that you have just created:
+
+ ```bash
+ export SLURMCONFIGPATH=$HOME/.interlink/manifests/plugin-config.yaml
+ $HOME/.interlink/bin/plugin &> $HOME/.interlink/logs/plugin.log &
+ echo $! > $HOME/.interlink/plugin.pid
+ ```
+
+ - Check the logs in `$HOME/.interlink/logs/plugin.log`.
+ - To kill and restart the process is enough:
+
+ ```bash
+ # kill
+ kill $(cat $HOME/.interlink/plugin.pid)
+
+ # restart
+ export SLURMCONFIGPATH=$HOME/.interlink/manifests/plugin-config.yaml
+ $HOME/.interlink/bin/plugin &> $HOME/.interlink/logs/plugin.log &
+ echo $! > $HOME/.interlink/plugin.pid
+
+ Almost there! Now it's time to add this virtual node into the Kubernetes cluster!
+
+
+
+
+ _Offload your pods to a remote Kubernetes cluster._
+
+ KUBERNETES PLUGIN IS COMING SOON! For test instructions contact us!
+
+
+
+
+
+:::tip Production Deployment
+
+For production deployments, you can manage all InterLink processes through
+`systemd`. See the [Systemd Deployment Guide](../guides/systemd-deployment) for
+comprehensive instructions.
+
+:::
+
+##### 3rd-party plugins
+
+There are more 3rd-party plugins developed that you can get inspired by or even
+use out of the box. You can find some ref in the
+[quick start section](../guides/deploy-interlink#attach-your-favorite-plugin-or-develop-one)
+
+#### Test interLink stack health
+
+interLink comes with a call that can be used to monitor the overall status of
+both interlink server and plugins, at once.
+
+```
+curl -v --unix-socket ${HOME}/.interlink/.interlink.sock http://unix/pinglink
+```
+
+This call will return the status of the system and its readiness to submit jobs.
+
+### Deploy Kubernetes components
+
+The deployment of the Kubernetes components are managed by the official
+[HELM chart](https://github.com/interlink-hq/interlink-helm-chart). Depending on
+the scenario you selected, there might be additional operations to be done.
+
+You can now install the helm chart with the preconfigured (by the installer
+script) helm values in `./interlink/manifests/values.yaml`
+
+```bash
+ export INTERLINK_CHART_VERSION="X.X.X"
+ helm upgrade --install \
+ --create-namespace \
+ -n interlink \
+ my-node \
+ oci://ghcr.io/interlink-hq/interlink-helm-chart/interlink \
+ --version $INTERLINK_CHART_VERSION \
+ --values ./.interlink/manifests/values.yaml
+```
+
+:::warning
+
+Remember to pick the
+[version of the chart](https://github.com/interlink-hq/interlink-helm-chart/blob/main/interlink/Chart.yaml#L18)
+and put it into the `INTERLINK_CHART_VERSION` env var above.
+
+:::
+
+Whenever you see the node ready, you are good to go!
+
+:::note
+
+You can find a demo pod to test your setup
+[here](../guides/develop-a-plugin#lets-test-is-out).
+
+:::
+
+To start debugging in case of problems we suggest starting from the pod
+containers logs!
+
+#### Verify the setup
+
+Test the complete setup:
+
+```bash
+# Check if node appears in Kubernetes
+kubectl get nodes
+
+# Deploy a test pod
+kubectl apply -f - <
+
+## Install interLink
+
+### Deploy Kubernetes components
+
+The deployment of the Kubernetes components are managed by the official
+[HELM chart](https://github.com/interlink-hq/interlink-helm-chart). Depending on
+the scenario you selected, there might be additional operations to be done.
+
+- Create an helm values file:
+
+```yaml title="values.yaml"
+nodeName: interlink-in-cluster
+
+interlink:
+ enabled: true
+ address: http://localhost
+ port: 3000
+ logging:
+ verboseLogging: true
+
+plugin:
+ enabled: true
+ image: "ghcr.io/interlink-hq/interlink-sidecar-slurm/interlink-sidecar-slurm:0.5.1"
+ address: "http://localhost"
+ port: 4000
+ privileged: true
+ extraVolumeMounts:
+ - name: plugin-data
+ mountPath: /slurm-data
+ envs:
+ - name: SLURMCONFIGPATH
+ value: "/etc/interlink/plugin.yaml"
+ - name: SHARED_FS
+ values: "true"
+ config: |
+ #Socket: "unix:///var/run/plugin.sock"
+ ImagePrefix: "docker://"
+ SidecarPort: 4000
+ VerboseLogging: true
+ ErrorsOnlyLogging: false
+ DataRootFolder: "/slurm-data/"
+ ExportPodData: true
+ SbatchPath: "/usr/bin/sbatch"
+ ScancelPath: "/usr/bin/scancel"
+ SqueuePath: "/usr/bin/squeue"
+ CommandPrefix: ""
+ SingularityPrefix: ""
+ Namespace: "vk"
+ Tsocks: false
+ TsocksPath: "$WORK/tsocks-1.8beta5+ds1/libtsocks.so"
+ TsocksLoginNode: "login01"
+ BashPath: /bin/bash
+
+virtualNode:
+ resources:
+ CPUs: 4
+ memGiB: 16
+ pods: 50
+
+extraVolumes:
+ - name: plugin-data
+ hostPath:
+ path: /tmp/test
+ type: DirectoryOrCreate
+```
+
+Eventually deploy the latest release of the official:
+
+```bash
+ export INTERLINK_CHART_VERSION="X.X.X"
+ helm upgrade --install \
+ --create-namespace \
+ -n interlink \
+ my-node \
+ oci://ghcr.io/interlink-hq/interlink-helm-chart/interlink \
+ --version $INTERLINK_CHART_VERSION \
+ --values values.yaml
+```
+
+:::warning
+
+Remember to pick the
+[version of the chart](https://github.com/interlink-hq/interlink-helm-chart/blob/main/interlink/Chart.yaml#L18)
+and put it into the `INTERLINK_CHART_VERSION` env var above.
+
+:::
+
+Whenever you see the node ready, you are good to go!
+
+:::note
+
+You can find a demo pod to test your setup
+[here](../guides/develop-a-plugin#lets-test-is-out).
+
+:::
+
+To start debugging in case of problems we suggest starting from the pod
+containers logs!
diff --git a/docs/versioned_docs/version-0.6.x/cookbook/3-tunneled.mdx b/docs/versioned_docs/version-0.6.x/cookbook/3-tunneled.mdx
new file mode 100644
index 00000000..a891069c
--- /dev/null
+++ b/docs/versioned_docs/version-0.6.x/cookbook/3-tunneled.mdx
@@ -0,0 +1,735 @@
+---
+sidebar_position: 3
+---
+
+import Tabs from "@theme/Tabs";
+import TabItem from "@theme/TabItem";
+import ThemedImage from "@theme/ThemedImage";
+import useBaseUrl from "@docusaurus/useBaseUrl";
+
+# Tunneled deployment
+
+Deploy interLink components in both systems, linked through a tunnelled
+communication.
+
+
+
+## SSH Tunnel Setup for interLink
+
+This guide explains how to configure SSH tunneling between Virtual Kubelet and interLink API server using the built-in `ssh-tunnel` command. SSH tunneling enables secure communication in scenarios where direct network connectivity is not available or desired.
+
+### Overview
+
+The SSH tunnel functionality allows you to:
+
+- Connect Virtual Kubelet to a remote interLink API server through an SSH tunnel
+- Secure communication over untrusted networks
+- Bypass network restrictions and firewalls
+- Enable the **tunneled deployment pattern** where the API server runs locally and the plugin runs remotely
+
+### Architecture
+
+In a tunneled deployment:
+
+1. **Virtual Kubelet** runs in your local Kubernetes cluster
+2. **interLink API server** runs locally (same network as Virtual Kubelet)
+3. **SSH tunnel** forwards traffic from local Unix socket to remote TCP port
+4. **Plugin** runs on the remote compute resource (HPC cluster, cloud, etc.)
+
+```
+[Virtual Kubelet] -> [interLink API] -> [Unix Socket] -> [SSH Tunnel] -> [Remote Plugin]
+ (local) (local) (local) (ssh bridge) (remote)
+```
+
+### Prerequisites
+
+Before setting up SSH tunneling, ensure you have:
+
+1. **SSH access** to the remote system where the plugin runs
+2. **SSH key pair** for authentication
+3. **Network connectivity** from local system to remote SSH server
+4. **interLink binary** built with ssh-tunnel command (`make ssh-tunnel`)
+
+#### SSH Key Setup
+
+Generate an SSH key pair if you don't have one:
+
+```bash
+# Generate SSH key pair
+ssh-keygen -t rsa -b 4096 -f ~/.ssh/interlink_rsa
+
+# Copy public key to remote server
+ssh-copy-id -i ~/.ssh/interlink_rsa.pub user@remote-server
+
+# Test SSH connection
+ssh -i ~/.ssh/interlink_rsa user@remote-server
+```
+
+#### Optional: Host Key Verification
+
+For enhanced security, extract the remote server's host key:
+
+```bash
+# Extract host public key from remote server
+ssh-keyscan -t rsa remote-server > ~/.ssh/interlink_host_key
+
+# Or get it from known_hosts
+ssh-keygen -F remote-server -f ~/.ssh/known_hosts | grep -o 'ssh-rsa.*' > ~/.ssh/interlink_host_key
+```
+
+### Configuration
+
+#### Step 1: Configure interLink API Server
+
+Configure the interLink API server to listen on a Unix socket instead of a TCP port:
+
+```yaml title="InterLinkConfig.yaml"
+# Use Unix socket for local communication
+InterlinkAddress: "unix:///tmp/interlink.sock"
+InterlinkPort: "" # Not used for Unix sockets
+
+# Remote plugin configuration
+SidecarURL: "http://remote-plugin"
+SidecarPort: "4000"
+
+VerboseLogging: true
+ErrorsOnlyLogging: false
+DataRootFolder: "/tmp/interlink"
+```
+
+#### Step 2: Configure Virtual Kubelet
+
+Configure Virtual Kubelet to connect to the Unix socket:
+
+```yaml title="VirtualKubeletConfig.yaml"
+# Connect to Unix socket
+InterlinkURL: "unix:///tmp/interlink.sock"
+InterlinkPort: "" # Not used for Unix sockets
+
+VerboseLogging: true
+ErrorsOnlyLogging: false
+
+# Node configuration
+NodeName: "my-interlink-node"
+NodeLabels:
+ "interlink.cern.ch/provider": "remote-hpc"
+```
+
+#### Step 3: Start SSH Tunnel
+
+Use the built-in `ssh-tunnel` command to establish the tunnel:
+
+##### Basic Usage
+
+```bash
+# Start SSH tunnel
+./bin/ssh-tunnel \
+ -addr "remote-server:22" \
+ -user "username" \
+ -keyfile "~/.ssh/interlink_rsa" \
+ -lsock "/tmp/interlink.sock" \
+ -rport "4000"
+```
+
+##### With Host Key Verification
+
+```bash
+# Start SSH tunnel with host key verification
+./bin/ssh-tunnel \
+ -addr "remote-server:22" \
+ -user "username" \
+ -keyfile "~/.ssh/interlink_rsa" \
+ -lsock "/tmp/interlink.sock" \
+ -rport "4000" \
+ -hostkeyfile "~/.ssh/interlink_host_key"
+```
+
+##### Command Line Options
+
+| Option | Description | Required |
+|--------|-------------|----------|
+| `-addr` | SSH server address as `hostname:port` | Yes |
+| `-user` | Username for SSH authentication | Yes |
+| `-keyfile` | Path to private key file | Yes |
+| `-lsock` | Path to local Unix socket | Yes |
+| `-rport` | Remote port where plugin listens | Yes |
+| `-hostkeyfile` | Path to host public key for verification | No |
+
+### Complete Deployment Example
+
+#### Step 1: Prepare Remote Environment
+
+On the remote server, start your interLink plugin:
+
+```bash
+# Example: Start SLURM plugin on remote HPC system
+cd /path/to/plugin
+python3 slurm_plugin.py --port 4000
+```
+
+#### Step 2: Start Local Components
+
+Start components in this order:
+
+```bash
+# 1. Start SSH tunnel (runs in foreground)
+./bin/ssh-tunnel \
+ -addr "hpc-cluster.example.com:22" \
+ -user "hpc-user" \
+ -keyfile "~/.ssh/interlink_rsa" \
+ -lsock "/tmp/interlink.sock" \
+ -rport "4000" \
+ -hostkeyfile "~/.ssh/interlink_host_key"
+```
+
+In separate terminals:
+
+```bash
+# 2. Start interLink API server
+export INTERLINKCONFIGPATH=/path/to/InterLinkConfig.yaml
+./bin/interlink
+
+# 3. Start Virtual Kubelet
+export KUBECONFIG=~/.kube/config
+./bin/virtual-kubelet \
+ --provider interlink \
+ --nodename interlink-node \
+ --config /path/to/VirtualKubeletConfig.yaml
+```
+
+#### Step 3: Verify Connection
+
+Test the complete setup:
+
+```bash
+# Check if node appears in Kubernetes
+kubectl get nodes
+
+# Deploy a test pod
+kubectl apply -f - </dev/null || true
+ endscript
+}
+
+/var/log/interlink/*.log {
+ daily
+ rotate 30
+ compress
+ delaycompress
+ missingok
+ notifempty
+ postrotate
+ systemctl reload interlink-remote-plugin 2>/dev/null || true
+ endscript
+}
+```
+
+### Service Management Commands
+
+Enable and start all services in the correct order:
+
+```bash
+# Local services (where Virtual Kubelet runs)
+sudo systemctl daemon-reload
+sudo systemctl enable interlink-tunnel interlink-api interlink-virtual-kubelet
+
+# Start services in dependency order
+sudo systemctl start interlink-tunnel
+sudo systemctl start interlink-api
+sudo systemctl start interlink-virtual-kubelet
+
+# Remote services (on the plugin server)
+sudo systemctl daemon-reload
+sudo systemctl enable interlink-remote-plugin
+sudo systemctl start interlink-remote-plugin
+
+# Check service status
+sudo systemctl status interlink-tunnel
+sudo systemctl status interlink-api
+sudo systemctl status interlink-virtual-kubelet
+```
+
+### Service Operations
+
+Common systemd operations for managing tunneled interLink services:
+
+```bash
+# View service logs
+sudo journalctl -u interlink-tunnel -f
+sudo journalctl -u interlink-api -f
+sudo journalctl -u interlink-virtual-kubelet -f
+
+# Restart tunnel (will cascade to dependent services)
+sudo systemctl restart interlink-tunnel
+
+# Stop all local interLink services
+sudo systemctl stop interlink-virtual-kubelet interlink-api interlink-tunnel
+
+# Start all local interLink services
+sudo systemctl start interlink-tunnel interlink-api interlink-virtual-kubelet
+
+# Check service dependencies
+sudo systemctl list-dependencies interlink-virtual-kubelet
+```
+
+### Monitoring and Health Checks
+
+Create a comprehensive health check script for tunneled deployment:
+
+```bash title="/opt/interlink/bin/tunneled-health-check.sh"
+#!/bin/bash
+
+# Health check script for tunneled interLink deployment
+LOG_FILE="/opt/interlink/logs/health-check.log"
+SOCKET_PATH="/tmp/interlink.sock"
+REMOTE_HOST="remote-server"
+REMOTE_PORT="4000"
+
+echo "$(date): Starting tunneled deployment health check" >> "$LOG_FILE"
+
+# Check SSH tunnel connectivity
+if ! pgrep -f "ssh-tunnel" > /dev/null; then
+ echo "$(date): ERROR - SSH tunnel process not running" >> "$LOG_FILE"
+ exit 1
+fi
+
+# Check if Unix socket exists and is responding
+if [ -S "$SOCKET_PATH" ]; then
+ response=$(curl -s --unix-socket "$SOCKET_PATH" http://unix/pinglink 2>/dev/null)
+ if [ $? -eq 0 ]; then
+ echo "$(date): Local API health check passed - $response" >> "$LOG_FILE"
+ else
+ echo "$(date): ERROR - Local API not responding via socket" >> "$LOG_FILE"
+ exit 1
+ fi
+else
+ echo "$(date): ERROR - Unix socket not found at $SOCKET_PATH" >> "$LOG_FILE"
+ exit 1
+fi
+
+# Check Virtual Kubelet node status
+if kubectl get node interlink-node --no-headers 2>/dev/null | grep -q Ready; then
+ echo "$(date): Virtual Kubelet node is Ready" >> "$LOG_FILE"
+else
+ echo "$(date): WARNING - Virtual Kubelet node not Ready" >> "$LOG_FILE"
+fi
+
+# Test remote connectivity through tunnel
+if nc -z -w5 127.0.0.1 4000 2>/dev/null; then
+ echo "$(date): Remote plugin connectivity through tunnel - OK" >> "$LOG_FILE"
+else
+ echo "$(date): WARNING - Cannot reach remote plugin through tunnel" >> "$LOG_FILE"
+fi
+
+echo "$(date): Tunneled deployment health check completed" >> "$LOG_FILE"
+exit 0
+```
+
+```bash
+# Make executable
+sudo chmod +x /opt/interlink/bin/tunneled-health-check.sh
+sudo chown interlink:interlink /opt/interlink/bin/tunneled-health-check.sh
+```
+
+Create systemd timer for health checks:
+
+```ini title="/etc/systemd/system/interlink-tunneled-health-check.service"
+[Unit]
+Description=interLink Tunneled Health Check
+After=interlink-virtual-kubelet.service
+Requires=interlink-virtual-kubelet.service
+
+[Service]
+Type=oneshot
+User=interlink
+Group=interlink
+ExecStart=/opt/interlink/bin/tunneled-health-check.sh
+```
+
+```ini title="/etc/systemd/system/interlink-tunneled-health-check.timer"
+[Unit]
+Description=Run interLink Tunneled Health Check every 5 minutes
+Requires=interlink-tunneled-health-check.service
+
+[Timer]
+OnCalendar=*:0/5
+Persistent=true
+
+[Install]
+WantedBy=timers.target
+```
+
+Enable the health check timer:
+
+```bash
+sudo systemctl daemon-reload
+sudo systemctl enable interlink-tunneled-health-check.timer
+sudo systemctl start interlink-tunneled-health-check.timer
+```
+
+### Troubleshooting Tunneled Deployment
+
+#### SSH Tunnel Issues
+
+```bash
+# Check SSH tunnel process
+ps aux | grep ssh-tunnel
+
+# Test SSH connection manually
+sudo -u interlink ssh -i /opt/interlink/.ssh/id_rsa interlink@remote-server
+
+# Check SSH tunnel logs
+sudo journalctl -u interlink-tunnel --since "1 hour ago"
+
+# Test local socket
+echo "test" | nc -U /tmp/interlink.sock
+```
+
+#### Virtual Kubelet Issues
+
+```bash
+# Check Virtual Kubelet logs
+sudo journalctl -u interlink-virtual-kubelet -f
+
+# Verify kubeconfig access
+sudo -u interlink kubectl get nodes
+
+# Check node status
+kubectl describe node interlink-node
+```
+
+#### Remote Plugin Issues
+
+```bash
+# On remote server, check plugin status
+sudo systemctl status interlink-remote-plugin
+
+# Check if plugin port is listening
+netstat -tlnp | grep :4000
+
+# Test plugin connectivity from remote server
+curl -X GET http://localhost:4000/status
+```
+
+### Security Considerations for Tunneled Deployment
+
+#### SSH Security
+
+1. **Dedicated SSH keys**: Use separate keys for interLink tunneling
+2. **Key restrictions**: Add restrictions in `authorized_keys`:
+
+```bash
+# On remote server in ~/.ssh/authorized_keys
+command="/usr/bin/false",no-pty,no-X11-forwarding,no-agent-forwarding,no-port-forwarding ssh-rsa AAAAB3... interlink-tunnel-key
+```
+
+3. **SSH configuration**: Secure SSH server configuration:
+
+```bash title="/etc/ssh/sshd_config.d/interlink.conf"
+# Dedicated configuration for interLink tunnel user
+Match User interlink
+ AllowTcpForwarding yes
+ AllowStreamLocalForwarding yes
+ PermitTunnel no
+ X11Forwarding no
+ AllowAgentForwarding no
+ PermitTTY no
+ ForceCommand /bin/false
+```
+
+#### Network Security
+
+```bash
+# Firewall rules for local server
+sudo ufw allow in on lo
+sudo ufw allow out 22/tcp comment "SSH for tunnel"
+
+# Firewall rules for remote server
+sudo ufw allow from to any port 22 comment "SSH tunnel"
+sudo ufw allow 4000/tcp comment "Plugin API"
+```
+
+#### File Permissions
+
+```bash
+# Secure SSH directory
+sudo chmod 700 /opt/interlink/.ssh
+sudo chmod 600 /opt/interlink/.ssh/id_rsa
+sudo chmod 644 /opt/interlink/.ssh/id_rsa.pub /opt/interlink/.ssh/host_key
+
+# Secure configuration files
+sudo chmod 640 /opt/interlink/config/*
+sudo chown root:interlink /opt/interlink/config/*
+```
+
+This comprehensive tunneled deployment setup provides a robust, secure, and manageable solution for connecting Kubernetes clusters to remote compute resources through SSH tunneling.
+
+:::note
+
+For additional case studies and advanced configurations, reach out to the interLink community through the Slack channel.
+
+:::
diff --git a/docs/versioned_docs/version-0.6.x/cookbook/_category_.json b/docs/versioned_docs/version-0.6.x/cookbook/_category_.json
new file mode 100644
index 00000000..d8253b65
--- /dev/null
+++ b/docs/versioned_docs/version-0.6.x/cookbook/_category_.json
@@ -0,0 +1,8 @@
+{
+ "label": "Cookbook",
+ "position": 3,
+ "link": {
+ "type": "generated-index",
+ "description": "Practical recipes for different deployment scenarios."
+ }
+}
diff --git a/docs/versioned_docs/version-0.6.x/guides/01-deploy-interlink.mdx b/docs/versioned_docs/version-0.6.x/guides/01-deploy-interlink.mdx
new file mode 100644
index 00000000..7eb57de4
--- /dev/null
+++ b/docs/versioned_docs/version-0.6.x/guides/01-deploy-interlink.mdx
@@ -0,0 +1,84 @@
+---
+sidebar_position: 1
+---
+
+import ThemedImage from "@theme/ThemedImage";
+import useBaseUrl from "@docusaurus/useBaseUrl";
+
+# Deploy your plugin
+
+## Attach your favorite plugin or develop one! (remote host)
+
+[Next chapter](./develop-a-plugin) will show the basics for developing a new
+plugin following the interLink openAPI spec.
+
+In alterative you can start an already supported one.
+
+### Remote SLURM job submission
+
+If you manage a SLURM batch system, and you satisfy the requirements below, you
+can offload pod from a kubernetes cluster to your batch system, using interLink
+SLURM plugin of course.
+
+- [github.com/interlink-hq/interlink-slurm-plugin](https://github.com/interlink-hq/interlink-slurm-plugin)
+
+#### Requirements
+
+- a SLURM CLI available on the remote host and configured to interact with the
+ computing cluster
+- a sharedFS with all the worker nodes
+ - an experimental feature is available for cases in which this is not possible
+
+#### Configuration
+
+Please refer to either the plugin repository or the
+[cookbook](../cookbook/1-edge.mdx) for more information.
+
+### Create UNICORE jobs to run on HPC centers
+
+[UNICORE](https://www.unicore.eu/) (Uniform Interface to Computing Resources)
+offers a ready-to-run system including client and server software. UNICORE makes
+distributed computing and data resources available in a seamless and secure way
+in intranets and the internet.
+
+- [UNICORE plugin](https://github.com/interlink-hq/interlink-unicore-plugin)
+
+#### Configuration
+
+Please refer to either the plugin repository for more information.
+
+### Remote docker execution
+
+You get a VM from you cloud provider, with some GPUs maybe. You can attach it to
+your Kubernetes cluster using interLink docker plugin.
+
+- [Docker plugin repository](https://github.com/interlink-hq/interlink-docker-plugin)
+
+#### Configuration
+
+Please refer to either the plugin repository or the
+[cookbook](../cookbook/1-edge.mdx) for more information.
+
+### Submit pods to HTcondor or ARC batch systems
+
+- [HTCondor plugin repository](https://github.com/interlink-hq/interlink-htcondor-plugin)
+- [ARC plugin repository](https://github.com/interlink-hq/interlink-arc-plugin)
+
+### Remote Kubernetes Plugin
+
+InterLink plugin to extend the capabilities of existing Kubernetes clusters,
+enabling them to offload workloads to another remote cluster. The plugin
+supports the offloading of PODs that expose HTTP endpoints (i.e., HTTP
+Microservices).
+
+- [Interlink Kubernetes Plugin](https://baltig.infn.it/mgattari/interlink-kubernetes-plugin)
+
+#### Configuration
+
+Please refer to either the plugin repository or the
+[cookbook](../cookbook/1-edge.mdx) for more information.
+
+## Test your setup
+
+Please find a demo pod to test your setup
+[here](./develop-a-plugin#lets-test-is-out).
diff --git a/docs/versioned_docs/version-0.6.x/guides/02-develop-a-plugin.md b/docs/versioned_docs/version-0.6.x/guides/02-develop-a-plugin.md
new file mode 100644
index 00000000..fc75297f
--- /dev/null
+++ b/docs/versioned_docs/version-0.6.x/guides/02-develop-a-plugin.md
@@ -0,0 +1,754 @@
+---
+sidebar_position: 2
+---
+
+# Develop an interLink plugin
+
+Learn how to develop your interLink plugin to manage containers on your remote
+host.
+
+We are going to follow up
+[the setup of an interlink node](../cookbook/1-edge.mdx) with the last piece of
+the puzzle:
+
+- setup of a python SDK
+- demoing the fundamentals development of a plugin executing containers locally
+ through the host docker daemon
+
+:::warning
+
+The python SDK also produce an openAPI spec through FastAPI, therefore you can
+use any language you want as long as the API spec is satisfied.
+
+:::
+
+## Plugin API Specification
+
+Before diving into development, familiarize yourself with the complete plugin
+API specification. The OpenAPI specification defines all the endpoints,
+request/response schemas, and data types your plugin must implement:
+
+📋 **[Plugin OpenAPI Specification](./03-api-reference.mdx#plugin-api-spec)**
+
+This specification is the authoritative reference for:
+
+- Required HTTP endpoints (`/create`, `/delete`, `/status`, `/getLogs`)
+- Request and response data structures
+- Error handling and status codes
+- Authentication requirements
+
+Any plugin implementation in any programming language must comply with this API
+specification to work with interLink.
+
+## Setup the python SDK
+
+### Requirements
+
+- The tutorial is done on a Ubuntu VM, but there are not hard requirements
+ around that
+- Python>=3.10 and pip (`sudo apt install -y python3-pip`)
+- Any python IDE will work and it is strongly suggested to use one :)
+- A [docker engine running](https://docs.docker.com/engine/install/)
+
+### Install the SDK
+
+Look for the latest release on
+[the release page](https://github.com/interlink-hq/interLink/releases) and set
+the environment variable `VERSION` to it. Then you are ready to install the
+python SDK with:
+
+```bash
+#export VERSION=X.X.X
+#pip install "uvicorn[standard]" "git+https://github.com/interlink-hq/interlink-plugin-sdk@${VERSION}"
+
+# Or download the latest one with
+pip install "uvicorn[standard]" "git+https://github.com/interlink-hq/interlink-plugin-sdk"
+
+```
+
+## Understanding the Plugin Architecture
+
+InterLink plugins act as "sidecar" containers that handle the actual execution
+of workloads on remote resources. The plugin communicates with the interLink API
+server via REST endpoints and translates Kubernetes pod specifications into
+commands suitable for your target infrastructure.
+
+### Core Data Structures
+
+The plugin interface uses several key data structures defined in the interLink
+types:
+
+#### PodCreateRequests
+
+```json
+{
+ "pod": {...}, // Standard Kubernetes Pod spec
+ "configmaps": [...], // Associated ConfigMaps
+ "secrets": [...], // Associated Secrets
+ "projectedvolumesmaps": [...], // ServiceAccount projected volumes
+ "jobscriptURL": "" // Optional job script builder endpoint
+}
+```
+
+#### PodStatus
+
+```json
+{
+ "name": "pod-name",
+ "UID": "pod-uid",
+ "namespace": "default",
+ "JID": "remote-job-id",
+ "containers": [...], // Container status array
+ "initContainers": [...] // Init container status array
+}
+```
+
+#### CreateStruct
+
+```json
+{
+ "PodUID": "kubernetes-pod-uid",
+ "PodJID": "remote-system-job-id"
+}
+```
+
+## Plugin Interface Requirements
+
+Your plugin must implement the following REST API endpoints:
+
+### POST /create
+
+Creates one or more pods on the remote system.
+
+**Request Body**: `List[PodCreateRequests]` **Response**: `List[CreateStruct]`
+
+### POST /delete
+
+Deletes a pod from the remote system.
+
+**Request Body**: `PodStatus` **Response**: Success/error status
+
+### GET /status
+
+Retrieves the current status of one or more pods.
+
+**Query Parameters**: List of pod UIDs **Response**: `List[PodStatus]`
+
+### GET /getLogs
+
+Retrieves logs from a specific container.
+
+**Query Parameters**: Pod UID, container name, log options **Response**:
+Container logs (plain text)
+
+## Developing with the Python SDK
+
+### Basic Plugin Structure
+
+Here's a complete example of a Docker-based plugin using the interLink Python
+SDK:
+
+```python
+import interlink
+from fastapi.responses import PlainTextResponse
+from fastapi import FastAPI, HTTPException
+from typing import List
+import docker
+import re
+import os
+
+# Initialize Docker client
+docker_client = docker.DockerClient()
+app = FastAPI()
+
+class MyProvider(interlink.provider.Provider):
+ def __init__(self, docker):
+ super().__init__(docker)
+ self.container_pod_map = {}
+
+ # Recover already running containers
+ statuses = self.docker.api.containers(all=True)
+ for status in statuses:
+ name = status["Names"][0]
+ if len(name.split("-")) > 1:
+ uid = "-".join(name.split("-")[-5:])
+ self.container_pod_map.update({uid: [status["Id"]]})
+
+ def create(self, pod: interlink.Pod) -> None:
+ """Create a pod by running Docker containers"""
+ container = pod.pod.spec.containers[0]
+
+ # Handle volumes if present
+ if pod.pod.spec.volumes:
+ self.dump_volumes(pod.pod.spec.volumes, pod.container)
+
+ # Set up volume mounts
+ volumes = []
+ if container.volume_mounts:
+ for mount in container.volume_mounts:
+ if mount.sub_path:
+ volumes.append(
+ f"{pod.pod.metadata.namespace}-{mount.name}/{mount.sub_path}:{mount.mount_path}"
+ )
+ else:
+ volumes.append(
+ f"{pod.pod.metadata.namespace}-{mount.name}:{mount.mount_path}"
+ )
+
+ try:
+ # Prepare command and arguments
+ cmds = " ".join(container.command) if container.command else ""
+ args = " ".join(container.args) if container.args else ""
+
+ # Run the container
+ docker_container = self.docker.containers.run(
+ f"{container.image}",
+ f"{cmds} {args}".strip(),
+ name=f"{container.name}-{pod.pod.metadata.uid}",
+ detach=True,
+ volumes=volumes,
+ # Add additional Docker options as needed
+ # environment=container.env,
+ # ports=container.ports,
+ )
+
+ # Store container mapping
+ self.container_pod_map.update({
+ pod.pod.metadata.uid: [docker_container.id]
+ })
+
+ except Exception as ex:
+ raise HTTPException(status_code=500, detail=str(ex))
+
+ def delete(self, pod: interlink.PodRequest) -> None:
+ """Delete a pod by removing its containers"""
+ try:
+ container_id = self.container_pod_map[pod.metadata.uid][0]
+ container = self.docker.containers.get(container_id)
+ container.remove(force=True)
+ self.container_pod_map.pop(pod.metadata.uid)
+ except KeyError:
+ raise HTTPException(
+ status_code=404,
+ detail="No containers found for UUID"
+ )
+
+ def status(self, pod: interlink.PodRequest) -> interlink.PodStatus:
+ """Get the current status of a pod"""
+ try:
+ container_id = self.container_pod_map[pod.metadata.uid][0]
+ container = self.docker.containers.get(container_id)
+ status = container.status
+ except KeyError:
+ raise HTTPException(
+ status_code=404,
+ detail="No containers found for UUID"
+ )
+
+ # Map Docker status to Kubernetes container status
+ if status == "running":
+ statuses = self.docker.api.containers(
+ filters={"status": "running", "id": container.id}
+ )
+ started_at = statuses[0]["Created"]
+
+ return interlink.PodStatus(
+ name=pod.metadata.name,
+ UID=pod.metadata.uid,
+ namespace=pod.metadata.namespace,
+ containers=[
+ interlink.ContainerStatus(
+ name=pod.spec.containers[0].name,
+ state=interlink.ContainerStates(
+ running=interlink.StateRunning(started_at=started_at),
+ waiting=None,
+ terminated=None,
+ ),
+ )
+ ],
+ )
+ elif status == "exited":
+ # Extract exit code from status
+ statuses = self.docker.api.containers(
+ filters={"status": "exited", "id": container.id}
+ )
+ reason = statuses[0]["Status"]
+ pattern = re.compile(r"Exited \((.*?)\)")
+
+ exit_code = -1
+ for match in re.findall(pattern, reason):
+ exit_code = int(match)
+
+ return interlink.PodStatus(
+ name=pod.metadata.name,
+ UID=pod.metadata.uid,
+ namespace=pod.metadata.namespace,
+ containers=[
+ interlink.ContainerStatus(
+ name=pod.spec.containers[0].name,
+ state=interlink.ContainerStates(
+ running=None,
+ waiting=None,
+ terminated=interlink.StateTerminated(
+ reason=reason,
+ exitCode=exit_code
+ ),
+ ),
+ )
+ ],
+ )
+
+ # Default completed status
+ return interlink.PodStatus(
+ name=pod.metadata.name,
+ UID=pod.metadata.uid,
+ namespace=pod.metadata.namespace,
+ containers=[
+ interlink.ContainerStatus(
+ name=pod.spec.containers[0].name,
+ state=interlink.ContainerStates(
+ running=None,
+ waiting=None,
+ terminated=interlink.StateTerminated(
+ reason="Completed",
+ exitCode=0
+ ),
+ ),
+ )
+ ],
+ )
+
+ def Logs(self, req: interlink.LogRequest) -> bytes:
+ """Retrieve logs from a container"""
+ try:
+ container_id = self.container_pod_map[req.pod_uid][0]
+ container = self.docker.containers.get(container_id)
+ log = container.logs(
+ timestamps=req.Opts.Timestamps if hasattr(req.Opts, 'Timestamps') else False,
+ tail=req.Opts.Tail if hasattr(req.Opts, 'Tail') else 'all'
+ )
+ return log
+ except KeyError:
+ raise HTTPException(
+ status_code=404,
+ detail="No containers found for UUID"
+ )
+
+ def dump_volumes(self, pod_volumes: List, container_volumes: List) -> List[str]:
+ """Handle ConfigMaps, Secrets, and other volume types"""
+ data_list = []
+
+ for volume in container_volumes:
+ # Handle ConfigMaps
+ if volume.config_maps:
+ for config_map in volume.config_maps:
+ for pod_vol in pod_volumes:
+ if (pod_vol.volume_source.config_map and
+ pod_vol.name == config_map.metadata.name):
+
+ for filename, content in config_map.data.items():
+ path = f"{config_map.metadata.namespace}-{config_map.metadata.name}/{filename}"
+ os.makedirs(os.path.dirname(path), exist_ok=True)
+
+ with open(path, "w") as f:
+ f.write(content)
+ data_list.append(path)
+
+ # Handle Secrets (base64 decode)
+ if volume.secrets:
+ for secret in volume.secrets:
+ # Similar logic for secrets
+ pass
+
+ # Handle EmptyDirs
+ if volume.empty_dirs:
+ # Create empty directories
+ pass
+
+ return data_list
+
+# Initialize provider
+provider = MyProvider(docker_client)
+
+# FastAPI endpoints
+@app.post("/create")
+async def create_pod(pods: List[interlink.Pod]) -> List[interlink.CreateStruct]:
+ return provider.create_pod(pods)
+
+@app.post("/delete")
+async def delete_pod(pod: interlink.PodRequest) -> str:
+ return provider.delete_pod(pod)
+
+@app.get("/status")
+async def status_pod(pods: List[interlink.PodRequest]) -> List[interlink.PodStatus]:
+ return provider.get_status(pods)
+
+@app.get("/getLogs", response_class=PlainTextResponse)
+async def get_logs(req: interlink.LogRequest) -> bytes:
+ return provider.get_logs(req)
+
+# Run the server
+if __name__ == "__main__":
+ import uvicorn
+ uvicorn.run(app, host="0.0.0.0", port=8000)
+```
+
+### Advanced Plugin Features
+
+#### Volume Handling
+
+The plugin can handle various Kubernetes volume types:
+
+```python
+def handle_persistent_volumes(self, pod_spec):
+ """Example of handling PersistentVolumeClaims"""
+ for volume in pod_spec.volumes:
+ if volume.persistent_volume_claim:
+ pvc_name = volume.persistent_volume_claim.claim_name
+ # Mount the PVC to your remote system
+ self.mount_pvc(pvc_name, volume.name)
+
+def handle_projected_volumes(self, projected_volumes):
+ """Handle ServiceAccount tokens and projected volumes"""
+ for pv_map in projected_volumes:
+ for filename, content in pv_map.data.items():
+ # Write ServiceAccount tokens, CA certificates, etc.
+ self.write_projected_file(filename, content)
+```
+
+#### Resource Management
+
+```python
+def apply_resource_limits(self, container_spec, docker_params):
+ """Apply CPU and memory limits to containers"""
+ if container_spec.resources:
+ if container_spec.resources.limits:
+ limits = container_spec.resources.limits
+ if 'cpu' in limits:
+ # Convert Kubernetes CPU units to Docker format
+ docker_params['cpu_period'] = 100000
+ docker_params['cpu_quota'] = int(float(limits['cpu']) * 100000)
+ if 'memory' in limits:
+ # Convert memory units (Ki, Mi, Gi)
+ docker_params['mem_limit'] = self.parse_memory(limits['memory'])
+```
+
+#### Environment Variables and Secrets
+
+```python
+def setup_environment(self, container_spec, secrets, config_maps):
+ """Set up environment variables from various sources"""
+ env_vars = {}
+
+ # Direct environment variables
+ for env in container_spec.env or []:
+ if env.value:
+ env_vars[env.name] = env.value
+ elif env.value_from:
+ # Handle valueFrom sources
+ if env.value_from.secret_key_ref:
+ secret_name = env.value_from.secret_key_ref.name
+ secret_key = env.value_from.secret_key_ref.key
+ env_vars[env.name] = self.get_secret_value(secrets, secret_name, secret_key)
+ elif env.value_from.config_map_key_ref:
+ cm_name = env.value_from.config_map_key_ref.name
+ cm_key = env.value_from.config_map_key_ref.key
+ env_vars[env.name] = self.get_configmap_value(config_maps, cm_name, cm_key)
+
+ return env_vars
+```
+
+## Testing Your Plugin
+
+### Local Testing
+
+Create a simple test script to verify your plugin endpoints:
+
+```python
+import requests
+import json
+
+# Test data
+test_pod = {
+ "pod": {
+ "metadata": {"name": "test-pod", "uid": "test-uid", "namespace": "default"},
+ "spec": {
+ "containers": [{
+ "name": "test-container",
+ "image": "nginx:latest",
+ "command": ["nginx"],
+ "args": ["-g", "daemon off;"]
+ }]
+ }
+ },
+ "configmaps": [],
+ "secrets": [],
+ "projectedvolumesmaps": []
+}
+
+# Test creation
+response = requests.post("http://localhost:8000/create", json=[test_pod])
+print(f"Create response: {response.json()}")
+
+# Test status
+response = requests.get("http://localhost:8000/status", params={"pod_uid": "test-uid"})
+print(f"Status response: {response.json()}")
+```
+
+### Integration Testing
+
+Use the interLink test suite to verify your plugin works with the full system:
+
+```bash
+# Build your plugin image
+docker build -t my-plugin:latest .
+
+# Update plugin configuration
+export PLUGIN_IMAGE=my-plugin:latest
+export PLUGIN_PORT=8000
+
+# Run integration tests
+make test
+```
+
+## Deployment and Configuration
+
+### Plugin Configuration
+
+Create a configuration file for your plugin:
+
+```yaml
+# plugin-config.yaml
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: plugin-config
+data:
+ plugin.yaml: |
+ plugin:
+ endpoint: "http://plugin-service:8000"
+ authentication:
+ type: "bearer"
+ token: "your-auth-token"
+ timeout: 30s
+```
+
+### Kubernetes Deployment
+
+Deploy your plugin as a Kubernetes service:
+
+```yaml
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: my-plugin
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ app: my-plugin
+ template:
+ metadata:
+ labels:
+ app: my-plugin
+ spec:
+ containers:
+ - name: plugin
+ image: my-plugin:latest
+ ports:
+ - containerPort: 8000
+ env:
+ - name: PLUGIN_CONFIG
+ value: "/etc/plugin/config.yaml"
+---
+apiVersion: v1
+kind: Service
+metadata:
+ name: plugin-service
+spec:
+ selector:
+ app: my-plugin
+ ports:
+ - port: 8000
+ targetPort: 8000
+```
+
+## Real-World Examples
+
+### SLURM Plugin
+
+For HPC workloads using SLURM:
+
+```python
+class SLURMProvider(interlink.provider.Provider):
+ def create(self, pod: interlink.Pod) -> None:
+ # Convert pod spec to SLURM job script
+ job_script = self.generate_slurm_script(pod)
+
+ # Submit to SLURM
+ result = subprocess.run(
+ ["sbatch", "--parsable"],
+ input=job_script,
+ capture_output=True,
+ text=True
+ )
+
+ job_id = result.stdout.strip()
+ self.job_pod_map[pod.pod.metadata.uid] = job_id
+
+ def generate_slurm_script(self, pod):
+ container = pod.pod.spec.containers[0]
+ return f"""#!/bin/bash
+#SBATCH --job-name={pod.pod.metadata.name}
+#SBATCH --output=job_%j.out
+#SBATCH --error=job_%j.err
+
+# Run container with Singularity/Apptainer
+singularity exec {container.image} {' '.join(container.command or [])}
+"""
+```
+
+### Cloud Provider Plugin
+
+For cloud platforms like AWS ECS or Google Cloud Run:
+
+```python
+class CloudProvider(interlink.provider.Provider):
+ def create(self, pod: interlink.Pod) -> None:
+ # Convert to cloud-native format
+ task_definition = self.pod_to_task_definition(pod)
+
+ # Submit to cloud provider
+ response = self.cloud_client.run_task(
+ taskDefinition=task_definition,
+ cluster=self.cluster_name
+ )
+
+ task_arn = response['tasks'][0]['taskArn']
+ self.task_pod_map[pod.pod.metadata.uid] = task_arn
+```
+
+### Kubernetes Plugin (Cross-Cluster)
+
+Based on the
+[interLink Kubernetes Plugin](https://github.com/interlink-hq/interlink-kubernetes-plugin):
+
+```python
+class KubernetesProvider(interlink.provider.Provider):
+ def __init__(self, remote_kubeconfig):
+ super().__init__()
+ self.k8s_client = kubernetes.client.ApiClient(
+ kubernetes.config.load_kube_config(remote_kubeconfig)
+ )
+ self.core_v1 = kubernetes.client.CoreV1Api(self.k8s_client)
+
+ def create(self, pod: interlink.Pod) -> None:
+ # Handle volume offloading
+ self.sync_volumes(pod)
+
+ # Handle microservice offloading with TCP tunnels
+ if self.has_exposed_ports(pod):
+ self.setup_tcp_tunnel(pod)
+
+ # Create pod on remote cluster
+ try:
+ response = self.core_v1.create_namespaced_pod(
+ namespace=pod.pod.metadata.namespace,
+ body=pod.pod
+ )
+ self.pod_map[pod.pod.metadata.uid] = response.metadata.name
+ except kubernetes.client.ApiException as e:
+ raise HTTPException(status_code=500, detail=str(e))
+
+ def sync_volumes(self, pod):
+ """Sync ConfigMaps, Secrets, and PVCs to remote cluster"""
+ for volume in pod.container:
+ if volume.config_maps:
+ for cm in volume.config_maps:
+ self.create_or_update_configmap(cm)
+ if volume.secrets:
+ for secret in volume.secrets:
+ self.create_or_update_secret(secret)
+```
+
+## Best Practices
+
+1. **Error Handling**: Always provide meaningful error messages and appropriate
+ HTTP status codes
+2. **Logging**: Implement comprehensive logging for debugging and monitoring
+3. **Resource Cleanup**: Ensure proper cleanup of resources when pods are
+ deleted
+4. **State Persistence**: Consider persisting plugin state to handle restarts
+5. **Security**: Implement proper authentication and authorization for your
+ plugin endpoints
+6. **Monitoring**: Add health checks and metrics endpoints for observability
+7. **Idempotency**: Make operations idempotent to handle retries gracefully
+8. **Resource Limits**: Always respect and enforce Kubernetes resource limits
+9. **Graceful Shutdown**: Handle SIGTERM signals for graceful container shutdown
+
+## Running Your Plugin
+
+### Development Mode
+
+```bash
+# Install dependencies
+pip install -r requirements.txt
+
+# Run with auto-reload
+uvicorn main:app --reload --host 0.0.0.0 --port 8000
+```
+
+### Production Mode
+
+```bash
+# Build container
+docker build -t my-plugin:v1.0.0 .
+
+# Run container
+docker run -d \
+ --name my-plugin \
+ -p 8000:8000 \
+ -v /var/run/docker.sock:/var/run/docker.sock \
+ my-plugin:v1.0.0
+```
+
+### Dockerfile Example
+
+```dockerfile
+FROM python:3.11-slim
+
+WORKDIR /app
+
+# Install system dependencies
+RUN apt-get update && apt-get install -y \
+ curl \
+ && rm -rf /var/lib/apt/lists/*
+
+# Install Python dependencies
+COPY requirements.txt .
+RUN pip install --no-cache-dir -r requirements.txt
+
+# Copy application
+COPY . .
+
+# Health check
+HEALTHCHECK --interval=30s --timeout=3s --start-period=5s --retries=3 \
+ CMD curl -f http://localhost:8000/health || exit 1
+
+# Run application
+CMD ["uvicorn", "main:app", "--host", "0.0.0.0", "--port", "8000"]
+```
+
+## Next Steps
+
+- Explore the
+ [interLink Kubernetes Plugin](https://github.com/interlink-hq/interlink-kubernetes-plugin)
+ for a production example
+- Check out the
+ [Plugin SDK documentation](https://github.com/interlink-hq/interlink-plugin-sdk)
+ for API details
+- Review the [monitoring guide](./05-monitoring.md) to add observability to your
+ plugin
+- Study the [API reference](./03-api-reference.mdx) for detailed endpoint
+ specifications
+- Join the interLink community for support and contributions
diff --git a/docs/versioned_docs/version-0.6.x/guides/03-api-reference.mdx b/docs/versioned_docs/version-0.6.x/guides/03-api-reference.mdx
new file mode 100644
index 00000000..3ea6ee69
--- /dev/null
+++ b/docs/versioned_docs/version-0.6.x/guides/03-api-reference.mdx
@@ -0,0 +1,20 @@
+---
+sidebar_position: 3
+---
+
+# OpenAPI references
+
+## Plugin API spec
+
+Please find the sidecar OpenAPI JSON spec can be found [here](/plugin-openapi).
+
+import ApiDocMdx from "@theme/ApiDocMdx";
+
+
+
+## interLink API spec
+
+Please find the interLink OpenAPI JSON spec can be found
+[here](/interlink-openapi).
+
+
diff --git a/docs/versioned_docs/version-0.6.x/guides/04-oidc-IAM.md b/docs/versioned_docs/version-0.6.x/guides/04-oidc-IAM.md
new file mode 100644
index 00000000..0a31c03c
--- /dev/null
+++ b/docs/versioned_docs/version-0.6.x/guides/04-oidc-IAM.md
@@ -0,0 +1,168 @@
+---
+sidebar_position: 4
+---
+
+import ThemedImage from "@theme/ThemedImage";
+import useBaseUrl from "@docusaurus/useBaseUrl";
+
+# Configure OpenID connect identity providers
+
+We support any OpenID compliant identity provider and also GitHub authN
+workflow.
+
+## OAuth Audience Configuration
+
+When configuring OIDC providers, the `audience` field is required and specifies the intended audience for the JWT tokens. This field should match the audience configured in your OIDC provider and helps ensure tokens are intended for interLink. Different providers may use different audience values:
+
+- **EGI Check-in**: Uses `interlink` as the audience
+- **Indigo IAM**: Typically uses `users` or a custom client-specific audience
+- **Generic OIDC**: Contact your provider administrator for the correct audience value
+
+## GitHub authN
+
+### Requirements
+
+- **kubectl host**: an host with MiniKube installed and running
+- A GitHub account
+- **remote host**: A "remote" machine with a port that is reachable by the
+ MiniKube host
+
+### Create an OAuth GitHub app
+
+As a first step, you need to create a GitHub OAuth application to allow
+interLink to make authentication between your Kubernetes cluster and the remote
+endpoint.
+
+Head to [https://github.com/settings/apps](https://github.com/settings/apps) and
+click on `New GitHub App`. You should now be looking at a form like this:
+
+
+
+Provide a name for the OAuth2 application, e.g. `interlink-demo-test`, and you
+can skip the description, unless you want to provide one for future reference.
+For our purpose Homepage reference is also not used, so fill free to put there
+`https://interlink-hq.github.io/interLink/`.
+
+Check now that refresh token and device flow authentication:
+
+
+
+Disable webhooks and save clicking on `Create GitHub App`
+
+
+
+You can click then on your application that should now appear at
+[https://github.com/settings/apps](https://github.com/settings/apps) and you
+need to save two strings: the `Client ID` and clicking on
+`Generate a new client secret` you should be able to note down the relative
+`Client Secret`.
+
+Now it's all set for the next steps. You should be able to set it for
+authenticating the virtual kubelet with the interLink remote components with the
+following piece of the installer configuration:
+
+```yaml
+oauth:
+ provider: github
+ issuer: https://github.com/oauth
+ grant_type: authorization_code
+ scopes:
+ - "read:user"
+ github_user: "GH USERNAME HERE"
+ token_url: "https://github.com/login/oauth/access_token"
+ device_code_url: "https://github.com/login/device/code"
+ client_id: "XXXXXXX"
+ client_secret: "XXXXXXXX"
+```
+
+## EGI Check-in
+
+If you have an account for [EGI check-in](https://aai.egi.eu), you should be
+able to set it for authenticating the virtual kubelet with the interLink remote
+components with the following piece of the installer configuration:
+
+```yaml
+oauth:
+ provider: oidc
+ issuer: https://aai.egi.eu/auth/realms/egi
+ scopes:
+ - "openid"
+ - "email"
+ - "offline_access"
+ - "profile"
+ audience: interlink
+ grant_type: authorization_code
+ group_claim: email
+ group: "YOUR EMAIL HERE"
+ token_url: "https://aai.egi.eu/auth/realms/egi/protocol/openid-connect/token"
+ device_code_url: "https://aai.egi.eu/auth/realms/egi/protocol/openid-connect/auth/device"
+ client_id: "oidc-agent"
+ client_secret: ""
+```
+
+:::danger
+
+Remember to put your email in the group field!
+
+:::
+
+## Indigo IAM
+
+If you have an account for [Indigo IAM](https://iam.cloud.infn.it/), you should
+be able to set it for authenticating the virtual kubelet with the interLink
+remote components. Follow those steps to setup a new client in the IAM portal
+and get the necessary information to fill the configuration. This guide is
+specific for the IAM portal
+[https://iam.cloud.infn.it/](https://iam.cloud.infn.it/) but it should be
+similar for other IAM portals that are OpenID compliant.
+
+1. Go to the [IAM portal](https://iam.cloud.infn.it/) and log in.
+2. After logging in, click on the `My clients` tab on the left side of the page
+ and then select `New client` as shown in the images below.
+
+
+ 3. Set a name you prefer for the
+client. 4. Select the `Scopes` tab and add the following scopes: `openid`,
+`email`, `offline_access`, `profile`, `wlcg`, `wlcg.groups`. 5. Select the
+`Grant types` tab and add the following grant types: `authorization_code`,
+`client_credentials`, `refresh_token`,
+`urn:ietf:params:oauth:grant-type:device_code`. 6. Save the client by pressing
+the `Save client` button.
+
+After creating the client, you will be able to see the new client in the
+`My clients` page as show in the image below.
+
+
+
+You can click on the client to see the client details. You will find the
+`Client id` under the `Main` tab and the `Client secret` under the `Credentials`
+tab. Now, with those information, you can fill this piece of the installer
+configuration:
+
+```yaml
+oauth:
+ provider: oidc
+ issuer: "https://iam.cloud.infn.it/"
+ scopes:
+ - "openid"
+ - "email"
+ - "offline_access"
+ - "profile"
+ audience: users
+ grant_type: authorization_code
+ group_claim: email
+ group: "YOUR EMAIL HERE"
+ token_url: "https://iam.cloud.infn.it/token"
+ device_code_url: "https://iam.cloud.infn.it/devicecode"
+ client_id: "YOUR CLIENT ID HERE"
+ client_secret: "YOUR CLIENT SECRET HERE"
+```
diff --git a/docs/versioned_docs/version-0.6.x/guides/05-monitoring.md b/docs/versioned_docs/version-0.6.x/guides/05-monitoring.md
new file mode 100644
index 00000000..5a6a2bec
--- /dev/null
+++ b/docs/versioned_docs/version-0.6.x/guides/05-monitoring.md
@@ -0,0 +1,152 @@
+---
+sidebar_position: 5
+---
+
+# InterLink Monitoring System
+
+## Description
+
+To monitor the InterLink stack, you can use an helm chart that deploys the
+entire stack and components needed for monitoring the Interlink components, in
+particular the Virtual Kubelet (VK) of the Interlink project. In the near
+future, the monitoring system will be extended to monitor other components of
+the Interlink project.
+
+## Monitoring System Components
+
+The monitoring system is composed of the following two components:
+
+1. [Grafana Tempo](https://grafana.com/docs/tempo/latest/)
+2. [Grafana](https://grafana.com)
+
+### Grafana Tempo
+
+Grafana Tempo (or simply **Tempo**) is an open-source distributed tracing
+backend developed by Grafana Labs designed to handle high-scale and high-volume
+distributed tracing data. The choice of Tempo was made because of its key
+features:
+
+- **Scalability**: Tempo is designed to scale horizontally and handle millions
+ of spans (a span is a single operation in a trace) per second. Moreover, it is
+ capable of storing traces without requiring a database, instead leveraging
+ object storage.
+- **Simplicity**: Tempo does not index traces because it relies Grafana for
+ querying and visualizing traces. This approach reduces complexity and
+ operational overhead.
+- **Cost**: Tempo is cost-effective because it uses object storage for storing
+ traces, which is cheaper than traditional databases.
+- **Integration**: Tempo integrates with popular tracing protocols. One of them
+ is OpenTelemetry, which is used by the Virtual Kubelet (VK) of the Interlink
+ project.
+- **Querying**: Tempo provides a query language that allows users to filter and
+ aggregate traces. It also supports distributed sampling, which allows users to
+ sample traces across services. The powerful query language is a key feature
+ that enables users to extract insights from traces and it is one of the main
+ reasons why Tempo was chosen for the monitoring system.
+
+### Grafana
+
+Grafana is an open-source platform for monitoring and observability that allows
+users to query, visualize, alert on, and understand metrics no matter where they
+are stored. It is used to create, explore, and share dashboards with teams and
+stakeholders. Grafana supports a wide range of data sources, including Tempo.
+
+## VK tracing
+
+The Virtual Kubelet (VK) of the Interlink project is instrumented with
+**OpenTelemetry** to generate traces. OpenTelemetry is an open-source
+observability framework that provides APIs, libraries, agents, and
+instrumentation to collect telemetry data from applications and services. The
+traces generated by the VK are sent to Tempo, where they are stored and queried.
+Traces are generated by the VK when a request is made to the VK, and they
+contain information about the request, such as the details of the operation, the
+duration of the operation, and the services involved in the operation. A trace
+is a collection of spans, where each span represents a single operation in the
+trace. Spans are linked together to form a trace, which provides a complete view
+of the flow and performance of the operation. A span contains metadata, such as
+the name of the operation, the start and end time of the operation, and the
+service that generated the span.
+
+The flow is represented in the following image:
+
+
+
+In the actual setup, Tempo is deployed in the same Kubernetes cluster as the VK.
+Thanks to a Kubernetes service, the VK can send traces to Tempo. No TLS is used
+for the communication between the VK and Tempo, as the communication is internal
+to the cluster. In the near future, the communication between the VK and Tempo
+will be secured using TLS if Tempo is deployed outside the cluster and a proxy
+will be used to authenticate the VK with Tempo.
+
+The following table is a list of spans generated by the VK:
+
+| Span Name | Description | Attributes |
+| -------------- | -------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------- |
+| CreateHttpCall | Span that represents the HTTP call made by the VK to the Interlink API to create a pod. | pod.name, pod.namespace, start.timestamp, end.timestamp, duration, exitc.code |
+| DeleteHttpCall | Span that represents the HTTP call made by the VK to the Interlink API to delete a pod. | pod.name pod.namespace, start.timestamp, end.timestamp, duration, exitc.code |
+| StatusHttpCall | Span that represents the HTTP call made by the VK to the Interlink API to get the status of a pod. | pod.name pod.namespace, start.timestamp, end.timestamp, duration, exitc.code |
+| LogHttpCall | Span that represents the HTTP call made by the VK to the Interlink API to get the logs of a pod. | pod.name pod.namespace, start.timestamp, end.timestamp, duration, exitc.code |
+| PingHttpCall | Span that represents the HTTP call made by the VK to the Interlink API check if the API is alive. | start.timestamp, end.timestamp, duration, exitc.code |
+| CreatePodVK | Span that represents the call made by the VK to the Kubernetes API to create a pod. | pod.name pod.namespace, start.timestamp, end.timestamp, duration |
+| DeletePodVK | Span that represents the call made by the VK to the Kubernetes API to delete a pod. | pod.name pod.namespace, start.timestamp, end.timestamp, duration |
+| UpdatePodVK | Span that represents the call made by the VK to the Kubernetes API to update a pod. | pod.name pod.namespace, start.timestamp, end.timestamp, duration |
+| GetPodVK | Span that represents the call made by the VK to the Kubernetes API to get a pod. | pod.name pod.namespace, start.timestamp, end.timestamp, duration |
+| GetPodStatusVK | Span that represents the call made by the VK to the Kubernetes API to get the status of a pod. | pod.name pod.namespace, start.timestamp, end.timestamp, duration |
+| GetPodsVK | Span that represents the call made by the VK to the Kubernetes API to get all pods. | start.timestamp, end.timestamp, duration |
+
+## Installation
+
+:::warning To Enable tracing for the Virtual Kubelet, you have to set the
+Environment Variable `ENABLE_TRACING` to 1 :::
+
+A requirement for the monitoring system is to have Helm installed in your
+Kubernetes cluster or in your local machine. If you don't have Helm installed,
+you can follow the instructions in the following
+[link](https://helm.sh/docs/intro/install/)
+
+This helm chart requires a NGINX Ingress Controller to be installed in the
+cluster in order to expose the services and access them from outside the
+cluster. In particular, the Grafana service is exposed using an Ingress
+resource.
+
+To install the monitoring system, follow the steps below:
+
+1. Clone the repository:
+
+```bash
+git clone https://github.com/interlink-hq/interlink-monitoring-stack
+```
+
+2. Change the values in the `values.yaml` file to match your environment.
+3. Create a namespace for the monitoring system using the following command:
+
+```bash
+kubectl create namespace interlink-mon
+```
+
+4. Install the monitoring system using the following command:
+
+```bash
+helm upgrade --install helm-vk-monitoring-release interlink-monitoring-stack/ -n interlink-mon --debug
+```
+
+5. Deploy (or re-deploy) your Virtual Kubelet stack setting the
+ `TELEMETRY_ENDPOINT` Environment Variable to your actual endpoint. If not
+ set, it defaults to `localhost:4317`. Remember to enable the monitoring by
+ also setting `ENABLE_TRACING` to 1. If you are using an external Tempo
+ instance which is not in the same cluster as the VK and mutual TLS is
+ enabled, you have to set the `TELEMETRY_CA_CRT_FILEPATH` that points to the
+ CA certificate file used by Tempo to sign the certificates, the
+ `TELEMETRY_CLIENT_KEY_FILEPATH` that points to the client key file used by
+ the VK to authenticate itself to Tempo and the
+ `TELEMETRY_CLIENT_CRT_FILEPATH` that points to the client certificate file
+ used by the VK to authenticate itself to Tempo. Finally, if the TLS
+ certificate on Tempo are not verfied by unknown authorities, you can set the
+ `TELEMETRY_INSECURE_SKIP_VERIFY` to true.
+
+6. Access Grafana dashboard through the domain you defined in the `values.yaml`
+ file with the credentials you defined in the `values.yaml` file.
+
+The following image shows the Grafana dashboard:
+
+
diff --git a/docs/versioned_docs/version-0.6.x/guides/06-enable-service-accounts.mdx b/docs/versioned_docs/version-0.6.x/guides/06-enable-service-accounts.mdx
new file mode 100644
index 00000000..7426052b
--- /dev/null
+++ b/docs/versioned_docs/version-0.6.x/guides/06-enable-service-accounts.mdx
@@ -0,0 +1,373 @@
+---
+sidebar_position: 6
+---
+import ThemedImage from '@theme/ThemedImage';
+import useBaseUrl from '@docusaurus/useBaseUrl';
+
+# Enable service accounts
+
+By default, InterLink does not translate Kubernetes Service Accounts from Pod into Plugin. That means workload that needs to interact with Kubernetes API like Argo will not work.
+
+However after following deployment and configuration in this guide, InterLink will give means for Plugin for that. A test has been done with InterLink Slurm Plugin and Argo.
+
+There are two parts on this guide: how to deploy means to access Kubernetes API service from external cluster network, and how to configure InterLink so that Plugin can access it.
+
+The prerequisite of this guide are:
+- provide an external way to access Kubernetes API service (out of scope of InterLink, but an example is written below)
+- provide certificates (that can be self-signed), and its CA root certificate (also out of scope of InterLink, but an example is written below)
+
+## Provide an external way to access Kubernetes API service
+
+By default, InterLink Plugin containers cannot access Kubernetes internal network. Thus they cannot access Kubernetes API service (`kubernetes.default.svc.cluster.local`). Kubernetes offers ways to access internal services (see https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-types):
+- LoadBalancer
+- NodePort
+- ...
+
+Because this is highly dependent of the Kubernetes cluster infrastructure, this deployment is out of scope of InterLink automatic deployment. However, below are examples of how and what to deploy, using ingress. This requires a bit of Ingress knowledge as prerequisite.
+
+### Design of ingress use
+
+The Kubernetes cluster can already contain an Ingress resource to let external access to web services. However it is a best-practice to separate internal (meaning flow between Kubernetes and Plugin infrastructure, like Slurm machines) and external flows (meaning flow between Kubernetes and the world).
+
+This guide will thus deploy as an example another ingress separate from the already existing ingress, if it exist. This also works if there is no ingress yet.
+
+Here is a diagram (generated with https://asciiflow.com) with Interlink Slurm Plugin as example. This Kubernetes cluster is deployed next to a Slurm cluster, but it can also be deployed on cloud.
+
+Because Ingress can only serve services in the same namespace, and because Kubernetes API is in default namespace, different from the Ingress "ingress-nginx-internal", a reverse-proxy NGINX HTTPS is instantiated to make Kubernetes API available to Ingress.
+
+```
+ ┌───────────────────────────┐
+ │ │
+ │ Kubernetes cluster │
+ │┌────────┐ ┌───────────┐ │
+ ││K8S API ├───┤Nginx HTTPS│ │
+ │└────────┘ └─────┬─────┘ │
+ │ │ │ Slurm cluster
+ └─┬─────────────────┼───────┘ │
+ │Ingress │Ingress ──────────┴─────────
+ │TCP 80, 443 │TCP 6080, 60443 Slurm network
+ ┌─────┴────┐ ┌───┴──────┐ ──────────┬─────────
+ │ External │ │ Internal │ │
+ │ Firewall │ │ Firewall │ │
+ └─────┬────┘ └───┬──────┘ ┌────────┐ │
+ │ └──────────────┤ Router ├─────┘
+ xxxxxxxxx xxxx └────────┘
+ x xxx xxx
+ xx x
+ xx Internet x
+x xxxxx
+x x xx
+xxxxxxx xxx
+ xxxxxxxxx
+```
+
+### NGINX HTTPS
+
+This deploys a reverse-proxy.
+
+```
+cat < $HOME/.interlink/logs/interlink.log &
+echo $! > $HOME/.interlink/interlink.pid
+```
+
+## Kubernetes Cluster Setup
+
+### Create Kubernetes Secrets for Certificates
+
+Before deploying the Helm chart, create the necessary secrets:
+
+```bash
+# Create namespace
+kubectl create namespace interlink
+
+# Create secret with client certificates for Virtual Kubelet
+# Note: The secret name must match the pattern: -tls-certs
+# where corresponds to the nodeName value in your helm values
+kubectl create secret generic my-node-tls-certs \
+ --from-file=ca.crt=$HOME/.interlink/certs/ca.pem \
+ --from-file=tls.crt=$HOME/.interlink/certs/client-cert.pem \
+ --from-file=tls.key=$HOME/.interlink/certs/client-key.pem \
+ -n interlink
+```
+
+### Deploy with Helm Chart
+
+Create a custom values file for mTLS deployment:
+
+```bash
+# Create values file for mTLS
+cat > $HOME/.interlink/mtls-values.yaml << EOF
+nodeName: "my-node"
+
+virtualNode:
+ resources:
+ CPUs: 8
+ memGiB: 49
+ pods: 100
+ HTTPProxies:
+ HTTP: null
+ HTTPs: null
+ HTTP:
+ insecure: true
+ CACert: ""
+ kubeletHTTP:
+ insecure: true
+ # Tracing configuration
+ tracing:
+ enabled: false
+
+interlink:
+ enabled: false
+ address: https://172.16.213.51
+ port: 3000
+ tls:
+ enabled: true
+ certFile: "/etc/vk/certs/tls.crt"
+ keyFile: "/etc/vk/certs/tls.key"
+ caCertFile: "/etc/vk/certs/ca.crt"
+EOF
+
+# Deploy with mTLS configuration
+export INTERLINK_CHART_VERSION="X.X.X"
+helm upgrade --install \
+ --create-namespace \
+ -n interlink \
+ my-node \
+ oci://ghcr.io/interlink-hq/interlink-helm-chart/interlink \
+ --version $INTERLINK_CHART_VERSION \
+ --values $HOME/.interlink/mtls-values.yaml
+```
+
+:::warning
+
+Remember to pick the [version of the chart](https://github.com/interlink-hq/interlink-helm-chart/blob/main/interlink/Chart.yaml#L18) and put it into the `INTERLINK_CHART_VERSION` env var above.
+
+:::
+
+## Security Considerations
+
+### Certificate Management
+
+- Implement regular certificate rotation for production deployments
+- Store private keys securely with restricted file permissions (600)
+- Keep CA private key highly secure and consider using a proper PKI solution
+- Monitor certificate expiration dates
+
+### Network Security
+
+```bash
+# Example firewall configuration
+sudo ufw allow from to any port 3000 comment "InterLink mTLS API"
+sudo ufw deny 3000 comment "Block public access to InterLink API"
+```
+
+### Certificate Validation
+
+```bash
+# Verify certificate details and chain
+openssl x509 -in server-cert.pem -text -noout
+openssl verify -CAfile ca.pem server-cert.pem
+
+# Test mTLS connection
+openssl s_client -connect YOUR_EDGE_NODE_IP:3000 -CAfile ca.pem -cert client-cert.pem -key client-key.pem
+```
+
+## Troubleshooting
+
+### Common Issues
+
+1. **Certificate verification errors** - Check certificate chain and CA
+2. **Permission denied** - Verify file permissions and paths
+3. **Handshake failures** - Ensure client certificate is signed by the same CA
+
+### Debug Commands
+
+```bash
+# Check certificate chain
+openssl verify -CAfile ca.pem client-cert.pem
+
+# Test server connectivity
+curl -v --cacert ca.pem --cert client-cert.pem --key client-key.pem https://YOUR_EDGE_NODE_IP:3000/pinglink
+
+# Check InterLink logs for TLS errors
+tail -f $HOME/.interlink/logs/interlink.log | grep -i tls
+```
+
+### Log Messages to Monitor
+
+- "Loaded CA certificate for TLS client"
+- "mTLS enabled - requiring client certificates"
+- "Failed to create TLS HTTP client"
+- "certificate verification failed"
+
+## Testing Your Setup
+
+Test the mTLS connection to ensure everything is working correctly:
+
+```bash
+# Test the pinglink endpoint
+curl -v --cacert $HOME/.interlink/certs/ca.pem \
+ --cert $HOME/.interlink/certs/client-cert.pem \
+ --key $HOME/.interlink/certs/client-key.pem \
+ https://YOUR_EDGE_NODE_IP:3000/pinglink
+```
+
+If successful, you should receive a response indicating the InterLink service is running and accessible via mTLS.
+
+:::note
+
+You can find a demo pod to test your setup [here](../guides/develop-a-plugin#lets-test-is-out).
+
+:::
diff --git a/docs/versioned_docs/version-0.6.x/guides/08-systemd-deployment.mdx b/docs/versioned_docs/version-0.6.x/guides/08-systemd-deployment.mdx
new file mode 100644
index 00000000..0c82eda6
--- /dev/null
+++ b/docs/versioned_docs/version-0.6.x/guides/08-systemd-deployment.mdx
@@ -0,0 +1,456 @@
+---
+sidebar_position: 8
+---
+
+# Systemd Deployment Guide
+
+This guide explains how to deploy InterLink components using systemd services
+for production environments. Systemd provides automatic startup, restart on
+failure, proper logging, and robust service management.
+
+## Overview
+
+Using systemd to manage InterLink components offers several advantages:
+
+- **Automatic startup**: Services start automatically on boot
+- **Automatic restart**: Failed services are automatically restarted
+- **Centralized logging**: Logs are managed through journald
+- **Process supervision**: Systemd monitors service health
+- **Security isolation**: Services run with limited privileges
+- **Dependency management**: Services start in the correct order
+
+## Prerequisites
+
+- SystemD-enabled Linux distribution (most modern distributions)
+- Root access to create system services
+- InterLink binaries and configuration files
+- Basic understanding of systemd service files
+
+## System Setup
+
+### Create System User
+
+First, create a dedicated system user for running InterLink services:
+
+```bash
+sudo useradd --system --create-home --home-dir /opt/interlink --shell /bin/bash interlink
+sudo mkdir -p /opt/interlink/{bin,config,logs}
+sudo chown -R interlink:interlink /opt/interlink
+```
+
+### Copy Binaries and Configuration
+
+Move your InterLink components to the system directories:
+
+```bash
+# Copy binaries
+sudo cp $HOME/.interlink/bin/* /opt/interlink/bin/
+sudo chmod +x /opt/interlink/bin/*
+
+# Copy configuration files
+sudo cp $HOME/.interlink/config/* /opt/interlink/config/
+sudo cp $HOME/.interlink/manifests/*.yaml /opt/interlink/config/
+
+# Set ownership
+sudo chown -R interlink:interlink /opt/interlink
+```
+
+## Service Configuration
+
+### OAuth2 Proxy Service
+
+Create the OAuth2 proxy systemd service:
+
+```ini title="/etc/systemd/system/interlink-oauth2-proxy.service"
+[Unit]
+Description=OAuth2 Proxy for InterLink
+After=network.target
+Wants=network.target
+
+[Service]
+Type=simple
+User=interlink
+Group=interlink
+WorkingDirectory=/opt/interlink
+Environment=OAUTH2_PROXY_CONFIG=/opt/interlink/config/oauth2-proxy.cfg
+ExecStart=/opt/interlink/bin/oauth2-proxy --config=/opt/interlink/config/oauth2-proxy.cfg
+ExecReload=/bin/kill -HUP $MAINPID
+Restart=always
+RestartSec=10
+StandardOutput=append:/opt/interlink/logs/oauth2-proxy.log
+StandardError=append:/opt/interlink/logs/oauth2-proxy.log
+
+# Security settings
+NoNewPrivileges=true
+ProtectSystem=strict
+ProtectHome=true
+ReadWritePaths=/opt/interlink/logs /tmp
+PrivateTmp=true
+
+[Install]
+WantedBy=multi-user.target
+```
+
+### InterLink API Server Service
+
+Create the InterLink API server systemd service:
+
+```ini title="/etc/systemd/system/interlink-api.service"
+[Unit]
+Description=InterLink API Server
+After=network.target interlink-oauth2-proxy.service
+Wants=network.target
+Requires=interlink-oauth2-proxy.service
+
+[Service]
+Type=simple
+User=interlink
+Group=interlink
+WorkingDirectory=/opt/interlink
+Environment=INTERLINKCONFIGPATH=/opt/interlink/config/InterLinkConfig.yaml
+ExecStart=/opt/interlink/bin/interlink
+ExecReload=/bin/kill -HUP $MAINPID
+Restart=always
+RestartSec=10
+StandardOutput=append:/opt/interlink/logs/interlink-api.log
+StandardError=append:/opt/interlink/logs/interlink-api.log
+
+# Security settings
+NoNewPrivileges=true
+ProtectSystem=strict
+ProtectHome=true
+ReadWritePaths=/opt/interlink/logs /opt/interlink/jobs /tmp
+PrivateTmp=true
+
+[Install]
+WantedBy=multi-user.target
+```
+
+### Plugin Service
+
+Create the plugin systemd service (example for Docker plugin):
+
+```ini title="/etc/systemd/system/interlink-docker-plugin.service"
+[Unit]
+Description=InterLink Docker Plugin
+After=network.target docker.service interlink-api.service
+Wants=network.target
+Requires=docker.service interlink-api.service
+
+[Service]
+Type=simple
+User=interlink
+Group=interlink
+WorkingDirectory=/opt/interlink
+Environment=INTERLINKCONFIGPATH=/opt/interlink/config/plugin-config.yaml
+ExecStart=/opt/interlink/bin/plugin
+ExecReload=/bin/kill -HUP $MAINPID
+Restart=always
+RestartSec=10
+StandardOutput=append:/opt/interlink/logs/plugin.log
+StandardError=append:/opt/interlink/logs/plugin.log
+
+# Security settings
+NoNewPrivileges=true
+ProtectSystem=strict
+ProtectHome=true
+ReadWritePaths=/opt/interlink/logs /opt/interlink/jobs /tmp /var/run/docker.sock
+PrivateTmp=true
+
+# Docker access
+SupplementaryGroups=docker
+
+[Install]
+WantedBy=multi-user.target
+```
+
+### SLURM Plugin Service
+
+For SLURM plugin environments:
+
+```ini title="/etc/systemd/system/interlink-slurm-plugin.service"
+[Unit]
+Description=InterLink SLURM Plugin
+After=network.target interlink-api.service
+Wants=network.target
+Requires=interlink-api.service
+
+[Service]
+Type=simple
+User=interlink
+Group=interlink
+WorkingDirectory=/opt/interlink
+Environment=SLURMCONFIGPATH=/opt/interlink/config/plugin-config.yaml
+ExecStart=/opt/interlink/bin/plugin
+ExecReload=/bin/kill -HUP $MAINPID
+Restart=always
+RestartSec=10
+StandardOutput=append:/opt/interlink/logs/plugin.log
+StandardError=append:/opt/interlink/logs/plugin.log
+
+# Security settings
+NoNewPrivileges=true
+ProtectSystem=strict
+ProtectHome=true
+ReadWritePaths=/opt/interlink/logs /opt/interlink/jobs /tmp
+PrivateTmp=true
+
+[Install]
+WantedBy=multi-user.target
+```
+
+## Log Management
+
+### Log Rotation Configuration
+
+Create log rotation configuration to prevent log files from growing too large:
+
+```bash title="/etc/logrotate.d/interlink"
+/opt/interlink/logs/*.log {
+ daily
+ rotate 30
+ compress
+ delaycompress
+ missingok
+ notifempty
+ postrotate
+ systemctl reload interlink-oauth2-proxy interlink-api interlink-*-plugin 2>/dev/null || true
+ endscript
+}
+```
+
+## Service Management
+
+### Enable and Start Services
+
+Enable services to start on boot and start them in the correct order:
+
+```bash
+# Enable services to start on boot
+sudo systemctl daemon-reload
+sudo systemctl enable interlink-oauth2-proxy interlink-api interlink-docker-plugin
+
+# Start services in order
+sudo systemctl start interlink-oauth2-proxy
+sudo systemctl start interlink-api
+sudo systemctl start interlink-docker-plugin
+
+# Check service status
+sudo systemctl status interlink-oauth2-proxy
+sudo systemctl status interlink-api
+sudo systemctl status interlink-docker-plugin
+```
+
+### Common Operations
+
+Common systemd operations for managing InterLink services:
+
+```bash
+# View service logs
+sudo journalctl -u interlink-api -f
+sudo journalctl -u interlink-oauth2-proxy -f
+sudo journalctl -u interlink-docker-plugin -f
+
+# Restart a service
+sudo systemctl restart interlink-api
+
+# Stop all InterLink services
+sudo systemctl stop interlink-docker-plugin interlink-api interlink-oauth2-proxy
+
+# Start all InterLink services
+sudo systemctl start interlink-oauth2-proxy interlink-api interlink-docker-plugin
+
+# View service configuration
+sudo systemctl cat interlink-api
+
+# Check service dependencies
+sudo systemctl list-dependencies interlink-api
+```
+
+## Monitoring and Health Checks
+
+### Create Health Check Script
+
+Create a simple health check script:
+
+```bash title="/opt/interlink/bin/health-check.sh"
+#!/bin/bash
+
+# Health check script for InterLink services
+SOCKET_PATH="/opt/interlink/.interlink.sock"
+LOG_FILE="/opt/interlink/logs/health-check.log"
+
+echo "$(date): Starting health check" >> "$LOG_FILE"
+
+# Check if socket exists and is responding
+if [ -S "$SOCKET_PATH" ]; then
+ response=$(curl -s --unix-socket "$SOCKET_PATH" http://unix/pinglink)
+ if [ $? -eq 0 ]; then
+ echo "$(date): Health check passed - $response" >> "$LOG_FILE"
+ exit 0
+ else
+ echo "$(date): Health check failed - no response from socket" >> "$LOG_FILE"
+ exit 1
+ fi
+else
+ echo "$(date): Health check failed - socket not found" >> "$LOG_FILE"
+ exit 1
+fi
+```
+
+```bash
+# Make executable
+sudo chmod +x /opt/interlink/bin/health-check.sh
+sudo chown interlink:interlink /opt/interlink/bin/health-check.sh
+```
+
+### Systemd Timer for Health Checks
+
+Add a systemd timer for regular health checks:
+
+```ini title="/etc/systemd/system/interlink-health-check.service"
+[Unit]
+Description=InterLink Health Check
+After=interlink-api.service
+Requires=interlink-api.service
+
+[Service]
+Type=oneshot
+User=interlink
+Group=interlink
+ExecStart=/opt/interlink/bin/health-check.sh
+```
+
+```ini title="/etc/systemd/system/interlink-health-check.timer"
+[Unit]
+Description=Run InterLink Health Check every 5 minutes
+Requires=interlink-health-check.service
+
+[Timer]
+OnCalendar=*:0/5
+Persistent=true
+
+[Install]
+WantedBy=timers.target
+```
+
+Enable the health check timer:
+
+```bash
+sudo systemctl daemon-reload
+sudo systemctl enable interlink-health-check.timer
+sudo systemctl start interlink-health-check.timer
+```
+
+## Troubleshooting
+
+### Common Issues
+
+```bash
+# Check service status
+sudo systemctl status interlink-api --no-pager -l
+
+# View recent logs
+sudo journalctl -u interlink-api --since "1 hour ago"
+
+# Check configuration syntax
+sudo systemd-analyze verify /etc/systemd/system/interlink-api.service
+
+# View service dependencies
+sudo systemctl list-dependencies interlink-api
+
+# Reset failed state
+sudo systemctl reset-failed interlink-api
+```
+
+### Service Debugging
+
+```bash
+# Run service manually for debugging
+sudo -u interlink /opt/interlink/bin/interlink
+
+# Check environment variables
+sudo systemctl show interlink-api --property=Environment
+
+# View service logs with priority
+sudo journalctl -u interlink-api -p err
+
+# Monitor service activity
+sudo systemctl status interlink-api --lines=50
+```
+
+## Security Considerations
+
+The systemd configuration includes several security features:
+
+1. **Dedicated user**: Services run as non-privileged `interlink` user
+2. **Filesystem protection**: `ProtectSystem` and `ProtectHome` limit filesystem
+ access
+3. **No new privileges**: `NoNewPrivileges` prevents privilege escalation
+4. **Private temp**: `PrivateTmp` provides isolated temporary directories
+5. **Minimal permissions**: `ReadWritePaths` restricts write access to necessary
+ directories
+
+### Additional Security
+
+For additional security, consider:
+
+```bash
+# Set up firewall rules
+sudo ufw allow 30443/tcp comment "OAuth2 Proxy"
+sudo ufw allow from to any port 3000 comment "InterLink API"
+
+# Secure configuration files
+sudo chmod 640 /opt/interlink/config/*
+sudo chown root:interlink /opt/interlink/config/*
+
+# Secure log directory
+sudo chmod 750 /opt/interlink/logs
+sudo chown interlink:interlink /opt/interlink/logs
+```
+
+## Advanced Configuration
+
+### Resource Limits
+
+Add resource limits to service files:
+
+```ini
+[Service]
+# Memory limits
+MemoryMax=2G
+MemoryHigh=1.5G
+
+# CPU limits
+CPUQuota=200%
+
+# Process limits
+LimitNOFILE=65536
+LimitNPROC=4096
+```
+
+### Custom Environment
+
+Set custom environment variables:
+
+```ini
+[Service]
+Environment=LOG_LEVEL=debug
+Environment=CUSTOM_CONFIG=/opt/interlink/custom.yaml
+EnvironmentFile=/opt/interlink/config/env.conf
+```
+
+### Notifications
+
+Configure systemd notifications:
+
+```ini
+[Service]
+Type=notify
+NotifyAccess=all
+WatchdogSec=30
+```
+
+This comprehensive systemd setup provides a robust, secure, and manageable
+solution for deploying InterLink components in production environments.
+
diff --git a/docs/versioned_docs/version-0.6.x/guides/09-pod-annotations.mdx b/docs/versioned_docs/version-0.6.x/guides/09-pod-annotations.mdx
new file mode 100644
index 00000000..74b82305
--- /dev/null
+++ b/docs/versioned_docs/version-0.6.x/guides/09-pod-annotations.mdx
@@ -0,0 +1,396 @@
+---
+title: "Pod Annotations Reference"
+description: "Complete reference guide for all available pod annotations in interLink"
+sidebar_position: 9
+---
+
+# Pod Annotations Reference
+
+This document provides a comprehensive reference for all pod annotations available in interLink. These annotations allow you to customize pod behavior, configure networking, and control various aspects of pod execution.
+
+## Overview
+
+InterLink supports several categories of annotations:
+
+- **[VPN & Networking](#vpn--networking)**: Configure pod IP allocation and VPN connectivity
+- **[Wstunnel Integration](#wstunnel-integration)**: Control websocket tunneling for exposed ports
+- **[Job Management](#job-management)**: Handle remote job execution and tracking
+- **[System Annotations](#system-annotations)**: Internal annotations used by interLink components
+
+---
+
+## VPN & Networking
+
+### `interlink.eu/pod-vpn`
+
+Enables VPN connectivity for the pod, allowing it to access the cluster's internal network.
+
+**Usage:**
+```yaml
+apiVersion: v1
+kind: Pod
+metadata:
+ name: my-pod
+ annotations:
+ interlink.eu/pod-vpn: "true"
+spec:
+ containers:
+ - name: app
+ image: nginx:latest
+```
+
+**Behavior:**
+- When present, the pod will be assigned an IP from the virtual node's CIDR range
+- Enables connectivity to Kubernetes services and other pods
+- Requires proper VPN configuration on the interLink deployment
+- Takes precedence over wstunnel networking features
+
+**Default:** Not set (VPN disabled)
+
+---
+
+### `interlink.eu/pod-ip`
+
+Specifies or retrieves the assigned IP address for the pod.
+
+**Usage:**
+```yaml
+apiVersion: v1
+kind: Pod
+metadata:
+ name: my-pod
+ annotations:
+ interlink.eu/pod-ip: "10.244.1.100"
+spec:
+ containers:
+ - name: app
+ image: nginx:latest
+```
+
+**Behavior:**
+- Can be manually set to request a specific IP (subject to availability)
+- Automatically populated by interLink when VPN is enabled
+- Used internally for IP allocation and tracking
+- IP must be within the configured CIDR range
+
+**Default:** Automatically assigned when VPN is enabled
+
+---
+
+## Wstunnel Integration
+
+### `interlink.virtual-kubelet.io/wstunnel-timeout`
+
+Configures the timeout for wstunnel infrastructure creation and IP assignment.
+
+**Usage:**
+```yaml
+apiVersion: v1
+kind: Pod
+metadata:
+ name: my-pod
+ annotations:
+ interlink.virtual-kubelet.io/wstunnel-timeout: "60s"
+spec:
+ containers:
+ - name: web-server
+ image: nginx:latest
+ ports:
+ - containerPort: 80
+ name: http
+```
+
+**Behavior:**
+- Only effective when wstunnel feature is enabled (`Network.EnableTunnel: true`)
+- Applies to pods with exposed ports and no VPN annotation
+- Controls how long to wait for the wstunnel pod to get an IP address
+- If timeout is exceeded, pod creation fails and resources are cleaned up
+
+**Valid Values:**
+- Duration strings: `30s`, `1m`, `2m30s`, `10m`
+- Must be parseable by Go's `time.ParseDuration()`
+
+**Default:** `30s`
+
+---
+
+### `interlink.eu/wstunnel-client-commands`
+
+Contains the complete wstunnel client command to connect to exposed ports from a remote location.
+
+**Usage:**
+```yaml
+apiVersion: v1
+kind: Pod
+metadata:
+ name: my-pod
+ annotations:
+ interlink.eu/wstunnel-client-commands: |
+ curl -L https://github.com/erebe/wstunnel/releases/latest/download/wstunnel-linux-x64 -o wstunnel && chmod +x wstunnel
+
+ ./wstunnel client --http-upgrade-path-prefix a1b2c3d4 -R tcp://[::]:8080:localhost:8080 -R tcp://[::]:9090:localhost:9090 ws://my-pod-wstunnel.example.com:80
+spec:
+ containers:
+ - name: web-server
+ image: nginx:latest
+ ports:
+ - containerPort: 8080
+ name: http
+ - containerPort: 9090
+ name: metrics
+```
+
+**Behavior:**
+- Automatically generated by interLink when wstunnel feature is enabled
+- Only added to pods with exposed ports and no VPN annotation
+- Contains the complete command with download instructions and client connection
+- Includes unique random password for secure tunnel authentication
+- Multiple ports are handled with multiple `-R` options in a single command
+- Uses the configured wildcard DNS or service name for ingress endpoint
+
+**Command Structure:**
+1. **Download:** Downloads the latest wstunnel binary for Linux x64
+2. **Connect:** Establishes websocket tunnel with:
+ - `--http-upgrade-path-prefix`: Unique password for authentication
+ - `-R tcp://[::]:PORT:localhost:PORT`: Port forwarding rules (one per exposed port)
+ - `ws://ENDPOINT:80`: Websocket endpoint (ingress URL)
+
+**Default:** Set automatically by interLink when wstunnel is enabled
+
+---
+
+### `interlink.eu/wstunnel-extra-ports`
+
+Specifies additional ports to forward through the wstunnel beyond those defined in the pod specification.
+
+**Usage:**
+```yaml
+apiVersion: v1
+kind: Pod
+metadata:
+ name: my-pod
+ annotations:
+ interlink.eu/wstunnel-extra-ports: "8080,9090:metrics:UDP,3000:api"
+spec:
+ containers:
+ - name: web-server
+ image: nginx:latest
+ ports:
+ - containerPort: 80
+ name: http
+```
+
+**Format:**
+- **Simple port**: `8080` - Forward port 8080 with TCP protocol
+- **Named port**: `9090:metrics` - Forward port 9090 with name "metrics" and TCP protocol
+- **Full specification**: `3000:api:UDP` - Forward port 3000 with name "api" and UDP protocol
+- **Multiple ports**: `8080,9090:metrics:UDP,3000:api` - Comma-separated list
+
+**Behavior:**
+- Only effective when wstunnel feature is enabled (`Network.EnableTunnel: true`)
+- Allows forwarding ports not explicitly defined in container specs
+- Useful for internal services, debug ports, or dynamic port allocation
+- Ports are added to the generated wstunnel client command
+- Invalid port specifications are silently ignored
+
+**Protocol Support:**
+- `TCP` (default if not specified)
+- `UDP`
+- Case-insensitive protocol names
+
+**Default:** Not set (no additional ports forwarded)
+
+---
+
+## Job Management
+
+### `JobID`
+
+Tracks the remote job identifier for pods executed through interLink.
+
+**Usage:**
+```yaml
+apiVersion: v1
+kind: Pod
+metadata:
+ name: my-pod
+ annotations:
+ JobID: "slurm-12345"
+spec:
+ containers:
+ - name: compute
+ image: ubuntu:latest
+ command: ["sleep", "3600"]
+```
+
+**Behavior:**
+- Automatically set by interLink when a pod is successfully submitted to a remote system
+- Used to correlate Kubernetes pods with remote jobs (e.g., SLURM, PBS, Docker)
+- Required for pod status tracking and lifecycle management
+- Used during cluster restart to reassign pods to the virtual kubelet
+
+**Default:** Set automatically by interLink
+
+---
+
+## System Annotations
+
+### `interlink.virtual-kubelet.io/ping-response`
+
+Stores the response from interLink API health checks.
+
+**Usage:**
+This annotation is set automatically by the virtual kubelet.
+
+**Behavior:**
+- Updated during periodic health checks to the interLink API
+- Contains the response body from successful ping operations
+- Used for monitoring and debugging connectivity issues
+- Visible in node annotations, not pod annotations
+
+**Default:** Set automatically by virtual kubelet
+
+---
+
+## Configuration Examples
+
+### Basic Pod with VPN
+
+```yaml
+apiVersion: v1
+kind: Pod
+metadata:
+ name: vpn-enabled-pod
+ annotations:
+ interlink.eu/pod-vpn: "true"
+spec:
+ containers:
+ - name: app
+ image: nginx:latest
+ ports:
+ - containerPort: 80
+ restartPolicy: Never
+```
+
+### Pod with Wstunnel (No VPN)
+
+```yaml
+apiVersion: v1
+kind: Pod
+metadata:
+ name: wstunnel-pod
+ annotations:
+ interlink.virtual-kubelet.io/wstunnel-timeout: "2m"
+spec:
+ containers:
+ - name: web-server
+ image: nginx:latest
+ ports:
+ - containerPort: 80
+ name: http
+ - containerPort: 443
+ name: https
+ restartPolicy: Never
+```
+
+### Pod with Custom IP
+
+```yaml
+apiVersion: v1
+kind: Pod
+metadata:
+ name: custom-ip-pod
+ annotations:
+ interlink.eu/pod-vpn: "true"
+ interlink.eu/pod-ip: "10.244.1.50"
+spec:
+ containers:
+ - name: app
+ image: alpine:latest
+ command: ["sleep", "3600"]
+ restartPolicy: Never
+```
+
+---
+
+## Best Practices
+
+### VPN Configuration
+
+- Use VPN annotations when pods need to access Kubernetes services
+- Ensure CIDR ranges are properly configured in the virtual kubelet config
+- Monitor IP allocation to avoid conflicts
+
+### Wstunnel Usage
+
+- Enable wstunnel only when needed for external port access
+- Use appropriate timeouts based on your infrastructure
+- Remember that wstunnel and VPN are mutually exclusive
+
+### Job Management
+
+- Never manually set `JobID` annotations
+- Use `JobID` for debugging and monitoring remote job execution
+- Check `JobID` presence to verify successful job submission
+
+### Networking Considerations
+
+- VPN takes precedence over wstunnel networking
+- Pods with exposed ports automatically trigger wstunnel creation (if enabled)
+- Use `interlink.eu/pod-vpn` for internal cluster communication
+- Use wstunnel for external port exposure without VPN overhead
+
+---
+
+## Troubleshooting
+
+### Common Issues
+
+1. **Pod IP not assigned**
+ - Check VPN configuration and CIDR settings
+ - Verify `interlink.eu/pod-vpn` annotation syntax
+ - Review virtual kubelet logs for IP allocation errors
+
+2. **Wstunnel timeout errors**
+ - Increase timeout with `interlink.virtual-kubelet.io/wstunnel-timeout`
+ - Check network connectivity and DNS resolution
+ - Verify wstunnel template and configuration
+
+3. **Job not tracked**
+ - Verify `JobID` annotation is set after pod creation
+ - Check interLink API connectivity
+ - Review plugin logs for job submission errors
+
+### Debug Commands
+
+```bash
+# Check pod annotations
+kubectl get pod -o yaml | grep -A 10 annotations
+
+# Check virtual kubelet node annotations
+kubectl get node -o yaml | grep -A 5 annotations
+
+# Check wstunnel resources (if applicable)
+kubectl get deployment,service,ingress -l interlink.virtual-kubelet.io/type=wstunnel
+```
+
+---
+
+## Version Compatibility
+
+| Annotation | Version | Notes |
+|------------|---------|-------|
+| `interlink.eu/pod-vpn` | v0.1.0+ | Core VPN functionality |
+| `interlink.eu/pod-ip` | v0.1.0+ | IP allocation and tracking |
+| `JobID` | v0.1.0+ | Remote job management |
+| `interlink.virtual-kubelet.io/wstunnel-timeout` | v0.6.0+ | Wstunnel integration |
+| `interlink.virtual-kubelet.io/ping-response` | v0.2.0+ | Health check responses |
+
+---
+
+## Related Documentation
+
+- [Deploy interLink](./01-deploy-interlink.mdx) - Basic deployment guide
+- [API Reference](./03-api-reference.mdx) - REST API documentation
+- [Monitoring](./05-monitoring.md) - Observability and monitoring setup
+- [mTLS Deployment](./07-mtls-deployment.mdx) - Secure communication setup
\ No newline at end of file
diff --git a/docs/versioned_docs/version-0.6.x/guides/10-wstunnel-configuration.mdx b/docs/versioned_docs/version-0.6.x/guides/10-wstunnel-configuration.mdx
new file mode 100644
index 00000000..f0fb55be
--- /dev/null
+++ b/docs/versioned_docs/version-0.6.x/guides/10-wstunnel-configuration.mdx
@@ -0,0 +1,768 @@
+---
+title: "Wstunnel Configuration"
+description: "Complete guide to configuring websocket tunneling for port exposure in interLink"
+sidebar_position: 10
+---
+
+# Wstunnel Configuration
+
+InterLink's wstunnel feature enables secure port exposure for pods with containers that have exposed ports. This feature automatically creates websocket tunnel infrastructure outside the virtual node, allowing external access to pod services without requiring VPN connectivity.
+
+## Overview
+
+The wstunnel integration provides:
+
+- **Automatic Infrastructure Creation**: Deployment, Service, and Ingress resources
+- **Secure Tunneling**: WebSocket-based tunneling with random password protection
+- **Port Forwarding**: Automatic forwarding of all exposed container ports
+- **External Access**: Ingress-based external access with customizable DNS
+- **Resource Management**: Automatic cleanup when pods are deleted
+
+:::info
+Wstunnel is disabled by default and must be explicitly enabled in the configuration.
+:::
+
+---
+
+## Configuration
+
+### Virtual Kubelet Configuration
+
+Add the wstunnel configuration to your Virtual Kubelet config file:
+
+```yaml
+# VirtualKubeletConfig.yaml
+InterlinkURL: "http://interlink-api:3000"
+InterlinkPort: "3000"
+VerboseLogging: true
+ErrorsOnlyLogging: false
+
+# Network configuration
+Network:
+ EnableTunnel: true # Enable wstunnel feature
+ WildcardDNS: "tunnel.example.com" # DNS domain for ingress
+ WstunnelTemplatePath: "/etc/templates/custom.yaml" # Optional: custom template path
+ WstunnelCommand: "custom-wstunnel-command" # Optional: custom wstunnel client command
+
+# Other configuration...
+Resources:
+ CPU: "10"
+ Memory: "20Gi"
+ Pods: "100"
+```
+
+### Configuration Options
+
+| Option | Type | Required | Default | Description |
+|--------|------|----------|---------|-------------|
+| `EnableTunnel` | `bool` | No | `false` | Enable/disable wstunnel feature |
+| `WildcardDNS` | `string` | Yes* | `""` | DNS domain for ingress hostnames (e.g., `*.tunnel.example.com` or `tunnel.example.com`) |
+| `WstunnelTemplatePath` | `string` | No | `""` | Path to custom wstunnel template |
+| `WstunnelCommand` | `string` | No | `DefaultWstunnelCommand` | Custom wstunnel client command template |
+
+*Required when `EnableTunnel` is `true`
+
+---
+
+## How It Works
+
+### Automatic Trigger
+
+Wstunnel infrastructure is automatically created when:
+1. `Network.EnableTunnel` is set to `true`
+2. Pod has containers with exposed ports
+3. Pod does NOT have `interlink.eu/pod-vpn` annotation
+
+### Resource Creation
+
+For each qualifying pod, interLink creates:
+
+1. **Deployment**: Runs wstunnel server with WireGuard
+2. **Service**: Exposes websocket and forwarded ports
+3. **Ingress**: Provides external access via DNS
+
+### Naming Convention
+
+Resources are named using the pattern: `{pod-name}-wstunnel`
+
+Example: Pod `my-web-app` → Resources `my-web-app-wstunnel`
+
+---
+
+## Template System
+
+### Default Template
+
+InterLink includes an embedded default template that creates:
+
+```yaml
+# Deployment with wstunnel server
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: {{.Name}}
+ namespace: {{.Namespace}}
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ app.kubernetes.io/component: {{.Name}}
+ template:
+ metadata:
+ labels:
+ app.kubernetes.io/component: {{.Name}}
+ spec:
+ containers:
+ - name: wireguard
+ image: ghcr.io/dciangot/dciangot/wg:v0.2
+ command: ["bash", "-c"]
+ args:
+ - ./wstunnel server --log-lvl DEBUG --dns-resolver-prefer-ipv4 --restrict-http-upgrade-path-prefix {{.RandomPassword}} ws://0.0.0.0:8080
+ ports:
+ - containerPort: 8080
+ name: webhook
+ protocol: TCP
+ - containerPort: 51820
+ name: vpn
+ protocol: UDP
+ {{- range .ExposedPorts}}
+ - containerPort: {{.Port}}
+ name: {{.Name}}
+ protocol: {{.Protocol}}
+ {{- end}}
+ resources:
+ requests:
+ cpu: 100m
+ memory: 90Mi
+ nodeSelector:
+ kubernetes.io/os: linux
+
+---
+# Service for port exposure
+apiVersion: v1
+kind: Service
+metadata:
+ name: {{.Name}}
+ namespace: {{.Namespace}}
+spec:
+ type: ClusterIP
+ selector:
+ app.kubernetes.io/component: {{.Name}}
+ ports:
+ - port: 8080
+ targetPort: 8080
+ name: ws
+ {{- range .ExposedPorts}}
+ - port: {{.Port}}
+ targetPort: {{.TargetPort}}
+ name: {{.Name}}
+ protocol: {{.Protocol}}
+ {{- end}}
+
+---
+# Ingress for external access
+apiVersion: networking.k8s.io/v1
+kind: Ingress
+metadata:
+ name: {{.Name}}
+ namespace: {{.Namespace}}
+ annotations:
+ nginx.ingress.kubernetes.io/proxy-read-timeout: "3600"
+ nginx.ingress.kubernetes.io/proxy-send-timeout: "3600"
+ nginx.ingress.kubernetes.io/server-snippets: |
+ location / {
+ proxy_set_header Upgrade $http_upgrade;
+ proxy_set_header Connection "upgrade";
+ proxy_http_version 1.1;
+ proxy_set_header X-Forwarded-For $remote_addr;
+ proxy_set_header Host $host;
+ proxy_cache_bypass $http_upgrade;
+ }
+ kubernetes.io/ingress.class: "nginx"
+spec:
+ rules:
+ - host: ws-{{.Name}}.{{.WildcardDNS}}
+ http:
+ paths:
+ - path: /
+ pathType: Prefix
+ backend:
+ service:
+ name: {{.Name}}
+ port:
+ number: 8080
+```
+
+### Template Variables
+
+The template system provides these variables:
+
+| Variable | Type | Description | Example |
+|----------|------|-------------|---------|
+| `{{.Name}}` | `string` | Resource name | `my-web-app-wstunnel` |
+| `{{.Namespace}}` | `string` | Pod namespace | `default` |
+| `{{.RandomPassword}}` | `string` | Security password | `a1b2c3d4e5f6...` |
+| `{{.WildcardDNS}}` | `string` | DNS domain | `tunnel.example.com` |
+| `{{.ExposedPorts}}` | `[]PortMapping` | Port mappings | See below |
+
+### Port Mapping Structure
+
+```go
+type PortMapping struct {
+ Port int32 // Container port number
+ TargetPort int32 // Target port (usually same as Port)
+ Name string // Port name
+ Protocol string // Protocol (TCP/UDP)
+}
+```
+
+### Custom Templates
+
+You can provide custom templates by:
+
+1. **Creating a custom template file**:
+ ```yaml
+ # /etc/templates/custom-wstunnel.yaml
+ apiVersion: apps/v1
+ kind: Deployment
+ metadata:
+ name: {{.Name}}
+ namespace: {{.Namespace}}
+ spec:
+ # Your custom configuration...
+ ```
+
+2. **Configuring the template path**:
+ ```yaml
+ Network:
+ EnableTunnel: true
+ WildcardDNS: "tunnel.example.com"
+ WstunnelTemplatePath: "/etc/templates/custom-wstunnel.yaml"
+ ```
+
+3. **Mounting the template in the Virtual Kubelet deployment**:
+ ```yaml
+ apiVersion: apps/v1
+ kind: Deployment
+ metadata:
+ name: virtual-kubelet
+ spec:
+ template:
+ spec:
+ containers:
+ - name: virtual-kubelet
+ volumeMounts:
+ - name: wstunnel-template
+ mountPath: /etc/templates/custom-wstunnel.yaml
+ subPath: custom-wstunnel.yaml
+ volumes:
+ - name: wstunnel-template
+ configMap:
+ name: wstunnel-template
+ ```
+
+---
+
+## Pod Configuration
+
+### Basic Pod with Exposed Ports
+
+```yaml
+apiVersion: v1
+kind: Pod
+metadata:
+ name: web-server
+ namespace: default
+spec:
+ containers:
+ - name: nginx
+ image: nginx:latest
+ ports:
+ - containerPort: 80
+ name: http
+ protocol: TCP
+ - containerPort: 443
+ name: https
+ protocol: TCP
+ restartPolicy: Never
+```
+
+### Pod with Custom Timeout
+
+```yaml
+apiVersion: v1
+kind: Pod
+metadata:
+ name: web-server
+ namespace: default
+ annotations:
+ interlink.virtual-kubelet.io/wstunnel-timeout: "5m"
+spec:
+ containers:
+ - name: nginx
+ image: nginx:latest
+ ports:
+ - containerPort: 80
+ name: http
+ restartPolicy: Never
+```
+
+### Disabling Wstunnel for Specific Pods
+
+```yaml
+apiVersion: v1
+kind: Pod
+metadata:
+ name: web-server
+ namespace: default
+ annotations:
+ interlink.eu/pod-vpn: "true" # Use VPN instead of wstunnel
+spec:
+ containers:
+ - name: nginx
+ image: nginx:latest
+ ports:
+ - containerPort: 80
+ name: http
+ restartPolicy: Never
+```
+
+### Adding Extra Ports
+
+You can specify additional ports to forward beyond those defined in the pod specification using the `interlink.eu/wstunnel-extra-ports` annotation:
+
+```yaml
+apiVersion: v1
+kind: Pod
+metadata:
+ name: web-server
+ namespace: default
+ annotations:
+ interlink.eu/wstunnel-extra-ports: "8080,9090:metrics:UDP,3000:api"
+spec:
+ containers:
+ - name: nginx
+ image: nginx:latest
+ ports:
+ - containerPort: 80
+ name: http
+ restartPolicy: Never
+```
+
+**Extra Ports Format:**
+- **Simple port**: `8080` - Forward port 8080 with TCP protocol
+- **Named port**: `9090:metrics` - Forward port 9090 with name "metrics" and TCP protocol
+- **Full specification**: `3000:api:UDP` - Forward port 3000 with name "api" and UDP protocol
+- **Multiple ports**: `8080,9090:metrics:UDP,3000:api` - Comma-separated list
+
+This is useful for:
+- Internal services not exposed in container specs
+- Debug ports (e.g., pprof, metrics endpoints)
+- Dynamic port allocation scenarios
+- Services running on non-standard ports
+
+---
+
+## DNS Configuration
+
+### Wildcard DNS Setup
+
+Configure your DNS provider to point wildcard subdomains to your ingress controller. The WildcardDNS configuration accepts either format:
+
+**Option 1: Full wildcard format**
+```yaml
+Network:
+ WildcardDNS: "*.tunnel.example.com"
+```
+
+**Option 2: Domain only format**
+```yaml
+Network:
+ WildcardDNS: "tunnel.example.com"
+```
+
+Both formats work identically - InterLink will automatically handle the wildcard pattern internally.
+
+**DNS Provider Setup:**
+Configure your DNS provider to point wildcard subdomains to your ingress controller:
+
+```
+*.tunnel.example.com → your-ingress-controller-ip
+```
+
+**Examples for different DNS providers:**
+
+- **Cloudflare**: Create an `A` record with name `*` and subdomain `tunnel.example.com`
+- **AWS Route53**: Create an `A` record with name `*.tunnel.example.com`
+- **Google Cloud DNS**: Add a record set with DNS name `*.tunnel.example.com`
+
+### Generated Hostnames
+
+For each pod, the system generates hostnames using the pattern:
+```
+ws-{pod-name}-wstunnel.{WildcardDNS}
+```
+
+Examples:
+- Pod `web-server` → `ws-web-server-wstunnel.tunnel.example.com`
+- Pod `api-gateway` → `ws-api-gateway-wstunnel.tunnel.example.com`
+
+---
+
+## Client Connection Commands
+
+### Automatic Command Generation
+
+When wstunnel is enabled, interLink automatically generates a client command annotation for each pod with exposed ports. This annotation contains the complete command needed to connect to the pod's services from a remote location.
+
+**Annotation:** `interlink.eu/wstunnel-client-commands`
+
+### Example
+
+For a pod with exposed ports, the annotation will contain:
+
+```bash
+curl -L https://github.com/erebe/wstunnel/releases/latest/download/wstunnel-linux-x64 -o wstunnel && chmod +x wstunnel
+
+./wstunnel client --http-upgrade-path-prefix a1b2c3d4e5f6 -R tcp://[::]:8080:localhost:8080 -R tcp://[::]:9090:localhost:9090 ws://ws-my-pod-wstunnel.tunnel.example.com:80
+```
+
+### Command Structure
+
+1. **Download**: Downloads the latest wstunnel client binary
+2. **Connect**: Establishes the websocket tunnel with:
+ - `--http-upgrade-path-prefix`: Unique random password for authentication
+ - `-R tcp://[::]:PORT:localhost:PORT`: Port forwarding rules (one per exposed port)
+ - `ws://ENDPOINT:80`: Websocket endpoint using generated hostname
+
+### Retrieving the Command
+
+```bash
+# Get the complete client command for a pod
+kubectl get pod my-pod -o jsonpath='{.metadata.annotations.interlink\.eu/wstunnel-client-commands}'
+
+# Or view all annotations
+kubectl describe pod my-pod
+```
+
+### Multiple Ports
+
+When a pod exposes multiple ports, all ports are included in a single command with multiple `-R` options:
+
+```bash
+# Pod with ports 8080, 9090, and 3000
+./wstunnel client --http-upgrade-path-prefix randompassword \
+ -R tcp://[::]:8080:localhost:8080 \
+ -R tcp://[::]:9090:localhost:9090 \
+ -R tcp://[::]:3000:localhost:3000 \
+ ws://ws-my-pod-wstunnel.tunnel.example.com:80
+```
+
+### Custom Client Command
+
+You can customize the wstunnel client command that gets generated in the pod annotations by setting the `WstunnelCommand` configuration option.
+
+#### Default Command Template
+
+The default command template is:
+```bash
+curl -L https://github.com/erebe/wstunnel/releases/download/v10.4.4/wstunnel_10.4.4_linux_amd64.tar.gz -o wstunnel.tar.gz && tar -xzvf wstunnel.tar.gz && chmod +x wstunnel\n\n./wstunnel client --http-upgrade-path-prefix %s %s ws://%s:80
+```
+
+#### Custom Command Configuration
+
+```yaml
+# VirtualKubeletConfig.yaml
+Network:
+ EnableTunnel: true
+ WildcardDNS: "tunnel.example.com"
+ WstunnelCommand: "wget https://example.com/wstunnel -O wstunnel && chmod +x wstunnel && ./wstunnel client --http-upgrade-path-prefix %s %s ws://%s:80"
+```
+
+#### Command Template Parameters
+
+The command template must include three `%s` placeholders in this order:
+1. **Random Password**: Unique authentication token for the tunnel
+2. **Port Options**: Space-separated `-R` options for each exposed port
+3. **Ingress Endpoint**: The websocket endpoint hostname
+
+#### Example Custom Commands
+
+**Using different wstunnel versions:**
+```yaml
+WstunnelCommand: "curl -L https://github.com/erebe/wstunnel/releases/download/v10.1.0/wstunnel_10.1.0_linux_amd64.tar.gz -o wstunnel.tar.gz && tar -xzvf wstunnel.tar.gz && chmod +x wstunnel && ./wstunnel client --http-upgrade-path-prefix %s %s ws://%s:80"
+```
+
+**Using pre-installed wstunnel:**
+```yaml
+WstunnelCommand: "wstunnel client --http-upgrade-path-prefix %s %s ws://%s:80"
+```
+
+**Using different download methods:**
+```yaml
+WstunnelCommand: "wget https://github.com/erebe/wstunnel/releases/download/v10.4.4/wstunnel_10.4.4_linux_amd64.tar.gz && tar -xzvf wstunnel_10.4.4_linux_amd64.tar.gz && chmod +x wstunnel && ./wstunnel client --http-upgrade-path-prefix %s %s ws://%s:80"
+```
+
+**Using custom binary location:**
+```yaml
+WstunnelCommand: "curl -L https://github.com/erebe/wstunnel/releases/download/v10.4.4/wstunnel_10.4.4_linux_amd64.tar.gz -o wstunnel.tar.gz && tar -xzvf wstunnel.tar.gz && chmod +x wstunnel && /usr/local/bin/wstunnel client --http-upgrade-path-prefix %s %s ws://%s:80"
+```
+
+:::warning
+The custom command template must be properly formatted with exactly three `%s` placeholders. Incorrect formatting will result in malformed client commands in the pod annotations.
+:::
+
+---
+
+## Security Considerations
+
+### Authentication
+
+- Each wstunnel instance uses a unique random password
+- Passwords are generated using cryptographically secure random numbers
+- Access is restricted via the `restrict-http-upgrade-path-prefix` parameter
+
+### Network Security
+
+- WebSocket connections are established over HTTP/HTTPS
+- Consider using TLS termination at the ingress level
+- Implement network policies to restrict traffic if needed
+
+### Resource Security
+
+- Wstunnel resources are automatically cleaned up when pods are deleted
+- Resources are labeled for easy identification and management
+- Consider implementing RBAC policies for wstunnel resources
+
+---
+
+## Monitoring and Troubleshooting
+
+### Checking Wstunnel Status
+
+```bash
+# List all wstunnel resources
+kubectl get deployment,service,ingress -l interlink.virtual-kubelet.io/type=wstunnel
+
+# Check specific pod's wstunnel resources
+kubectl get deployment,service,ingress -l app.kubernetes.io/component=my-pod-wstunnel
+
+# Check wstunnel pod logs
+kubectl logs -l app.kubernetes.io/component=my-pod-wstunnel
+```
+
+### Common Issues
+
+1. **Pod IP not assigned**
+ ```bash
+ # Check pod status
+ kubectl get pod my-pod -o yaml | grep -A 10 status
+
+ # Check virtual kubelet logs
+ kubectl logs -l nodeName=virtual-kubelet
+ ```
+
+2. **Wstunnel timeout errors**
+ ```bash
+ # Increase timeout annotation
+ kubectl annotate pod my-pod interlink.virtual-kubelet.io/wstunnel-timeout=10m
+ ```
+
+3. **DNS resolution issues**
+ ```bash
+ # Test DNS resolution
+ nslookup ws-my-pod-wstunnel.tunnel.example.com
+
+ # Check ingress configuration
+ kubectl get ingress my-pod-wstunnel -o yaml
+ ```
+
+4. **Template errors**
+ ```bash
+ # Check virtual kubelet logs for template parsing errors
+ kubectl logs -l nodeName=virtual-kubelet | grep -i template
+ ```
+
+5. **Custom command issues**
+ ```bash
+ # Check if custom WstunnelCommand is properly formatted
+ kubectl get pod my-pod -o jsonpath='{.metadata.annotations.interlink\.eu/wstunnel-client-commands}'
+
+ # Verify the command has exactly three %s placeholders
+ # Check virtual kubelet logs for command formatting errors
+ kubectl logs -l nodeName=virtual-kubelet | grep -i wstunnel
+ ```
+
+### Debug Commands
+
+```bash
+# Get pod with full details
+kubectl get pod my-pod -o yaml
+
+# Check wstunnel deployment
+kubectl describe deployment my-pod-wstunnel
+
+# Test websocket connection
+curl -i -N -H "Connection: Upgrade" -H "Upgrade: websocket" \
+ -H "Sec-WebSocket-Key: test" -H "Sec-WebSocket-Version: 13" \
+ http://ws-my-pod-wstunnel.tunnel.example.com/your-random-password
+
+# Check service endpoints
+kubectl get endpoints my-pod-wstunnel
+```
+
+---
+
+## Best Practices
+
+### Configuration
+
+- Set appropriate `WildcardDNS` that you control
+- Use custom templates for specific requirements
+- Configure reasonable timeout values based on your infrastructure
+- When using custom `WstunnelCommand`, ensure it includes exactly three `%s` placeholders
+- Test custom commands thoroughly before deploying to production
+
+### Security
+
+- Regularly rotate ingress TLS certificates
+- Implement network policies to restrict access
+- Monitor wstunnel resource usage
+
+### Operations
+
+- Monitor DNS resolution and ingress health
+- Set up alerts for wstunnel pod failures
+- Regular cleanup of orphaned resources
+
+### Performance
+
+- Size wstunnel pods appropriately for your traffic
+- Use appropriate resource limits in custom templates
+- Consider using NodePort or LoadBalancer services for high traffic
+
+---
+
+## Migration Guide
+
+### From VPN to Wstunnel
+
+1. **Update Virtual Kubelet configuration**:
+ ```yaml
+ Network:
+ EnableTunnel: true
+ WildcardDNS: "tunnel.example.com"
+ ```
+
+2. **Remove VPN annotations from pods**:
+ ```bash
+ kubectl annotate pod my-pod interlink.eu/pod-vpn-
+ ```
+
+3. **Restart pods to trigger wstunnel creation**:
+ ```bash
+ kubectl delete pod my-pod
+ kubectl apply -f my-pod.yaml
+ ```
+
+### From Wstunnel to VPN
+
+1. **Add VPN annotation to pods**:
+ ```yaml
+ metadata:
+ annotations:
+ interlink.eu/pod-vpn: "true"
+ ```
+
+2. **Restart pods** to use VPN instead of wstunnel
+
+---
+
+## Advanced Configuration
+
+### Custom Ingress Controller
+
+```yaml
+# Custom template with different ingress controller
+apiVersion: networking.k8s.io/v1
+kind: Ingress
+metadata:
+ name: {{.Name}}
+ namespace: {{.Namespace}}
+ annotations:
+ traefik.ingress.kubernetes.io/router.entrypoints: websecure
+ traefik.ingress.kubernetes.io/router.tls: "true"
+spec:
+ tls:
+ - hosts:
+ - ws-{{.Name}}.{{.WildcardDNS}}
+ secretName: {{.Name}}-tls
+ rules:
+ - host: ws-{{.Name}}.{{.WildcardDNS}}
+ http:
+ paths:
+ - path: /
+ pathType: Prefix
+ backend:
+ service:
+ name: {{.Name}}
+ port:
+ number: 8080
+```
+
+### Resource Limits
+
+```yaml
+# Custom template with resource limits
+spec:
+ template:
+ spec:
+ containers:
+ - name: wireguard
+ resources:
+ requests:
+ cpu: 100m
+ memory: 90Mi
+ limits:
+ cpu: 500m
+ memory: 256Mi
+```
+
+### Multiple Ingress Rules
+
+```yaml
+# Multiple ingress rules for different services
+spec:
+ rules:
+ - host: ws-{{.Name}}.{{.WildcardDNS}}
+ http:
+ paths:
+ - path: /
+ pathType: Prefix
+ backend:
+ service:
+ name: {{.Name}}
+ port:
+ number: 8080
+ {{- range .ExposedPorts}}
+ - host: {{.Name}}-{{.Port}}.{{.WildcardDNS}}
+ http:
+ paths:
+ - path: /
+ pathType: Prefix
+ backend:
+ service:
+ name: {{.Name}}
+ port:
+ number: {{.Port}}
+ {{- end}}
+```
+
+---
+
+## Related Documentation
+
+- [Pod Annotations Reference](./09-pod-annotations.mdx) - Pod annotation documentation
+- [Deploy interLink](./01-deploy-interlink.mdx) - Basic deployment guide
+- [API Reference](./03-api-reference.mdx) - REST API documentation
+- [mTLS Deployment](./07-mtls-deployment.mdx) - Secure communication setup
\ No newline at end of file
diff --git a/docs/versioned_docs/version-0.6.x/guides/11-interlink-configuration.mdx b/docs/versioned_docs/version-0.6.x/guides/11-interlink-configuration.mdx
new file mode 100644
index 00000000..b6349aac
--- /dev/null
+++ b/docs/versioned_docs/version-0.6.x/guides/11-interlink-configuration.mdx
@@ -0,0 +1,310 @@
+---
+sidebar_position: 11
+---
+
+# InterLink Configuration Reference
+
+This guide provides a comprehensive reference for configuring the interLink API
+server using the `InterLinkConfig.yaml` file and environment variables.
+
+## Configuration Overview
+
+The interLink configuration is defined in `pkg/interlink/config.go` and consists
+of several main sections:
+
+- **Core Configuration**: Basic interLink and plugin connection settings
+- **TLS Configuration**: Security settings for encrypted communication
+- **Job Script Configuration**: Settings for container runtime and job execution
+- **Logging Configuration**: Control over log output levels
+- **Telemetry Configuration**: OpenTelemetry tracing setup
+
+## Configuration File Structure
+
+The main configuration file is typically located at
+`/etc/interlink/InterLinkConfig.yaml` and follows this structure:
+
+```yaml
+# Core InterLink Settings
+InterlinkAddress: "http://0.0.0.0" # IP address for interLink API server
+InterlinkPort: "3000" # Port for interLink API server
+
+SidecarURL: "http://localhost" # URL of the InterLink plugin
+SidecarPort: "4000" # Port of the InterLink plugin
+
+# Data and Storage
+DataRootFolder: "/tmp/interlink" # Root directory for temporary data
+
+# Logging Configuration
+VerboseLogging: false # Enable debug level logging
+ErrorsOnlyLogging: false # Only log errors (overrides verbose)
+
+# TLS Configuration (optional)
+TLS:
+ Enabled: true # Enable TLS/mTLS
+ CertFile: "/path/to/cert.pem" # Server certificate file
+ KeyFile: "/path/to/key.pem" # Server private key file
+ CACertFile: "/path/to/ca.pem" # CA certificate for client verification
+
+# Job Script Build Configuration (optional)
+JobScriptBuildConfig:
+ singularity_hub:
+ server: "https://singularityhub.org"
+ master_token: "your-token"
+ cache_validity_seconds: 3600
+
+ apptainer_options:
+ executable: "apptainer"
+ fakeroot: true
+ containall: true
+ fuse_mode: "overlayfs"
+ no_init: false
+ no_home: false
+ no_privs: false
+ nvidia_support: true
+ cleanenv: false
+ unsquash: false
+
+ volumes_options:
+ scratch_area: "/tmp/interlink-scratch"
+ apptainer_cachedir: "/tmp/apptainer-cache"
+ image_dir: "/tmp/interlink-images"
+ additional_directories_in_path: []
+ fuse_sleep_seconds: 5
+
+# Job Script Template (optional)
+JobScriptTemplate: |
+ #!/bin/bash
+ # Custom job script template
+```
+
+## Configuration Reference
+
+### Core Configuration
+
+| Field | Type | Default | Description |
+| ------------------ | ------ | -------------------- | ------------------------------------------------------- |
+| `InterlinkAddress` | string | `"http://0.0.0.0"` | IP address for the interLink API server to bind to |
+| `InterlinkPort` | string | `"3000"` | Port for the interLink API server |
+| `SidecarURL` | string | `"http://localhost"` | Base URL of the InterLink plugin |
+| `SidecarPort` | string | `"4000"` | Port of the InterLink plugin |
+| `DataRootFolder` | string | `"/tmp/interlink"` | Root directory for storing temporary data and job files |
+
+### Logging Configuration
+
+| Field | Type | Default | Description |
+| ------------------- | ---- | ------- | ----------------------------------------------- |
+| `VerboseLogging` | bool | `false` | Enable debug level logging for detailed output |
+| `ErrorsOnlyLogging` | bool | `false` | Only log errors (takes precedence over verbose) |
+
+### TLS Configuration
+
+The `TLS` section enables secure communication between interLink components:
+
+| Field | Type | Required | Description |
+| ------------ | ------ | -------------- | ---------------------------------------------- |
+| `Enabled` | bool | No | Enable TLS/mTLS for secure communication |
+| `CertFile` | string | If TLS enabled | Path to the server certificate file |
+| `KeyFile` | string | If TLS enabled | Path to the server private key file |
+| `CACertFile` | string | For mTLS | Path to CA certificate for client verification |
+
+### Job Script Build Configuration
+
+The `JobScriptBuildConfig` section configures container runtime options and job
+script generation.
+
+:::info What is a Job? A job in this context refers to the execution scripts
+generated by InterLink plugins to run containers on different backend systems
+(like SLURM, HTCondor, or Docker). The JobScriptBuildConfig allows plugins to
+customize how these execution scripts are generated and what runtime options are
+used.
+
+For example:
+
+- **SLURM plugin**: Generates SLURM batch scripts with `sbatch` commands
+- **HTCondor plugin**: Creates HTCondor ClassAd files
+- **Docker plugin**: Builds `docker run` commands
+- **Apptainer/Singularity plugin**: Constructs `apptainer run` scripts with
+ specific runtime flags :::
+
+This configuration is optional and only used by plugins that need to generate
+custom job execution scripts:
+
+#### Singularity Hub Configuration
+
+:::warning Plugin-Specific Configuration The Singularity Hub configuration is
+specific to Apptainer/Singularity-based plugins. This section may be removed in
+future versions as it's not used by all plugin types. Consider using
+plugin-specific configuration files instead. :::
+
+| Field | Type | Description |
+| ------------------------ | ------ | --------------------------------------------------------------- |
+| `server` | string | Singularity Hub server URL (e.g., `https://singularityhub.org`) |
+| `master_token` | string | Authentication token for accessing private repositories |
+| `cache_validity_seconds` | int | Duration in seconds to cache downloaded images |
+
+#### Apptainer Options
+
+| Field | Type | Default | Description |
+| ---------------- | ------ | ------------- | ----------------------------------------------- |
+| `executable` | string | `"apptainer"` | Path to Apptainer/Singularity executable |
+| `fakeroot` | bool | `false` | Enable fakeroot mode for unprivileged execution |
+| `containall` | bool | `false` | Contain all mount points |
+| `fuse_mode` | string | `""` | FUSE mount mode (e.g., "overlayfs") |
+| `no_init` | bool | `false` | Skip container initialization |
+| `no_home` | bool | `false` | Don't mount home directory |
+| `no_privs` | bool | `false` | Drop all privileges |
+| `nvidia_support` | bool | `false` | Enable NVIDIA GPU support |
+| `cleanenv` | bool | `false` | Clean environment variables |
+| `unsquash` | bool | `false` | Unsquash container image |
+
+#### Volume Options
+
+| Field | Type | Description |
+| -------------------------------- | -------- | -------------------------------------- |
+| `scratch_area` | string | Temporary scratch space directory |
+| `apptainer_cachedir` | string | Apptainer cache directory |
+| `image_dir` | string | Directory for storing container images |
+| `additional_directories_in_path` | []string | Additional directories to add to PATH |
+| `fuse_sleep_seconds` | int | Delay in seconds for FUSE operations |
+
+## Environment Variable Overrides
+
+The following environment variables can override configuration file values:
+
+| Environment Variable | Config Field | Description |
+| --------------------- | ------------------ | --------------------------------------------------------------------------- |
+| `INTERLINKCONFIGPATH` | - | Path to configuration file (default: `/etc/interlink/InterLinkConfig.yaml`) |
+| `INTERLINKURL` | `InterlinkAddress` | Override interLink server address |
+| `INTERLINKPORT` | `InterlinkPort` | Override interLink server port |
+| `SIDECARURL` | `SidecarURL` | Override InterLink plugin URL |
+| `SIDECARPORT` | `SidecarPort` | Override InterLink plugin port |
+
+## Telemetry Configuration
+
+OpenTelemetry tracing is configured through environment variables:
+
+| Environment Variable | Default | Description |
+| -------------------------------- | ------------------ | --------------------------------------------- |
+| `TELEMETRY_ENDPOINT` | `"localhost:4317"` | OTLP endpoint for trace export |
+| `TELEMETRY_UNIQUE_ID` | Generated UUID | Unique identifier for service instances |
+| `TELEMETRY_CA_CRT_FILEPATH` | - | CA certificate for mTLS to telemetry endpoint |
+| `TELEMETRY_CLIENT_CRT_FILEPATH` | - | Client certificate for mTLS |
+| `TELEMETRY_CLIENT_KEY_FILEPATH` | - | Client private key for mTLS |
+| `TELEMETRY_INSECURE_SKIP_VERIFY` | `"false"` | Skip TLS certificate verification |
+
+## Command Line Options
+
+The interLink binary supports these command-line flags:
+
+| Flag | Type | Default | Description |
+| ---------------------- | ------ | ------- | -------------------------- |
+| `-verbose` | bool | `false` | Enable debug level logging |
+| `-errorsonly` | bool | `false` | Only log errors |
+| `-interlinkconfigpath` | string | - | Path to configuration file |
+
+## Configuration Examples
+
+### Basic Configuration
+
+```yaml
+InterlinkAddress: "http://0.0.0.0"
+InterlinkPort: "3000"
+SidecarURL: "http://localhost"
+SidecarPort: "4000"
+DataRootFolder: "/tmp/interlink"
+VerboseLogging: false
+ErrorsOnlyLogging: false
+```
+
+### TLS-Enabled Configuration
+
+```yaml
+InterlinkAddress: "http://0.0.0.0"
+InterlinkPort: "3000"
+SidecarURL: "https://remote-sidecar.example.com"
+SidecarPort: "443"
+DataRootFolder: "/tmp/interlink"
+
+TLS:
+ Enabled: true
+ CertFile: "/etc/ssl/certs/interlink.crt"
+ KeyFile: "/etc/ssl/private/interlink.key"
+ CACertFile: "/etc/ssl/certs/ca.crt"
+```
+
+### Apptainer/Singularity Configuration
+
+```yaml
+InterlinkAddress: "http://0.0.0.0"
+InterlinkPort: "3000"
+SidecarURL: "http://localhost"
+SidecarPort: "4000"
+DataRootFolder: "/tmp/interlink"
+
+JobScriptBuildConfig:
+ apptainer_options:
+ executable: "/usr/bin/apptainer"
+ fakeroot: true
+ containall: true
+ nvidia_support: true
+ cleanenv: true
+
+ volumes_options:
+ scratch_area: "/scratch/interlink"
+ apptainer_cachedir: "/var/cache/apptainer"
+ image_dir: "/var/lib/interlink/images"
+ fuse_sleep_seconds: 10
+```
+
+## Configuration Loading Order
+
+The configuration is loaded in the following priority order:
+
+1. **Command-line flags** (highest priority)
+2. **Environment variables**
+3. **Configuration file** (lowest priority)
+
+## Validation and Troubleshooting
+
+### Common Configuration Issues
+
+1. **Port conflicts**: Ensure `InterlinkPort` and `SidecarPort` don't conflict
+ with other services
+2. **File permissions**: Verify that certificate files and data directories are
+ readable
+3. **Network connectivity**: Test connectivity between interLink and sidecar
+ components
+4. **TLS certificate validation**: Ensure certificate subject names match
+ hostnames
+
+### Configuration Validation
+
+The interLink binary validates configuration at startup and will log errors for:
+
+- Missing required certificate files when TLS is enabled
+- Invalid port numbers or addresses
+- Inaccessible data directories
+- Malformed YAML syntax
+
+### Debug Configuration
+
+Enable verbose logging to troubleshoot configuration issues:
+
+```bash
+# Using command-line flag
+./interlink -verbose
+
+# Using environment variable in config
+VerboseLogging: true
+
+# Using environment variable
+export VERBOSE_LOGGING=true
+```
+
+## See Also
+
+- [Deploy InterLink Guide](./01-deploy-interlink.mdx)
+- [mTLS Deployment Guide](./07-mtls-deployment.mdx)
+- [Monitoring Guide](./05-monitoring.md)
+- [API Reference](./03-api-reference.mdx)
+
diff --git a/docs/versioned_docs/version-0.6.x/guides/12-kubelet-certificate-management.mdx b/docs/versioned_docs/version-0.6.x/guides/12-kubelet-certificate-management.mdx
new file mode 100644
index 00000000..34e1e683
--- /dev/null
+++ b/docs/versioned_docs/version-0.6.x/guides/12-kubelet-certificate-management.mdx
@@ -0,0 +1,635 @@
+---
+sidebar_position: 12
+---
+
+# Virtual Kubelet Certificate Management
+
+This guide explains how to manage TLS certificates for the Virtual Kubelet HTTPS server. The Virtual Kubelet provides three flexible certificate management options to accommodate different security requirements and operational workflows.
+
+## Overview
+
+The Virtual Kubelet exposes an HTTPS API that the Kubernetes control plane uses to interact with the virtual node. This API requires TLS certificates for secure communication. InterLink provides three certificate management modes with a clear priority order:
+
+1. **Manually Provided Certificates** (highest priority) - Use your own certificate files
+2. **Self-Signed Certificates** - Automatically generated certificates for testing/development
+3. **CSR-Based Certificates** (default) - Kubernetes CertificateSigningRequest workflow
+
+## Certificate Management Modes
+
+### Mode 1: Manually Provided Certificates (Recommended for Production)
+
+Use this mode when you want full control over certificate management, such as using certificates from your organization's PKI, cert-manager, or external certificate authorities.
+
+#### Configuration
+
+Add these fields to your Virtual Kubelet configuration:
+
+```yaml
+KubeletCertFile: /etc/kubernetes/pki/kubelet-server.crt
+KubeletKeyFile: /etc/kubernetes/pki/kubelet-server.key
+```
+
+#### Use Cases
+
+- Production environments with existing PKI infrastructure
+- Certificates managed by cert-manager or external tools
+- Compliance requirements for certificate authorities
+- Custom certificate rotation workflows
+
+#### Certificate Requirements
+
+The certificate must include:
+- **Common Name (CN)**: `system:node:`
+- **Organization (O)**: `system:nodes`
+- **Subject Alternative Name (SAN)**: IP address of the virtual node
+- **Key Usage**: Digital Signature, Key Encipherment
+- **Extended Key Usage**: Server Authentication
+
+#### Example: Creating Certificates with Custom CA
+
+```bash
+# Generate private key
+openssl genrsa -out kubelet-server.key 2048
+
+# Create certificate signing request
+openssl req -new -key kubelet-server.key \
+ -out kubelet-server.csr \
+ -subj "/CN=system:node:my-vk-node/O=system:nodes" \
+ -addext "subjectAltName=IP:10.0.0.100"
+
+# Sign with your CA
+openssl x509 -req -in kubelet-server.csr \
+ -CA ca.crt -CAkey ca.key -CAcreateserial \
+ -out kubelet-server.crt -days 365 \
+ -extfile <(echo "subjectAltName=IP:10.0.0.100")
+```
+
+#### Example: Using Kubernetes Secrets
+
+Mount certificates from a Kubernetes secret:
+
+```yaml
+apiVersion: v1
+kind: Secret
+metadata:
+ name: vk-kubelet-certs
+ namespace: interlink
+type: kubernetes.io/tls
+data:
+ tls.crt:
+ tls.key:
+---
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: virtual-kubelet
+ namespace: interlink
+spec:
+ template:
+ spec:
+ containers:
+ - name: virtual-kubelet
+ volumeMounts:
+ - name: kubelet-certs
+ mountPath: /etc/vk/certs
+ readOnly: true
+ volumes:
+ - name: kubelet-certs
+ secret:
+ secretName: vk-kubelet-certs
+```
+
+Update your Virtual Kubelet configuration:
+
+```yaml
+KubeletCertFile: /etc/vk/certs/tls.crt
+KubeletKeyFile: /etc/vk/certs/tls.key
+```
+
+#### Hot Reload Support
+
+Certificates are loaded on each TLS handshake, enabling hot reload:
+- Update the certificate files on disk
+- No Virtual Kubelet restart required
+- New connections will use the updated certificate
+
+This is particularly useful with cert-manager or external rotation systems.
+
+---
+
+### Mode 2: Self-Signed Certificates
+
+Use this mode for development, testing, or environments without PKI infrastructure. The Virtual Kubelet automatically generates and manages self-signed certificates.
+
+#### Configuration
+
+```yaml
+DisableCSR: true
+```
+
+#### Behavior
+
+- Generates a new self-signed certificate on startup
+- Certificate valid for 1 year
+- Automatically regenerates when within 1 day of expiration
+- No CSR created in Kubernetes
+
+#### Use Cases
+
+- Development and testing environments
+- Quick prototypes and demos
+- Environments without CSR approval infrastructure
+- Isolated deployments not requiring CA trust
+
+#### Limitations
+
+- Certificates are not trusted by default
+- Not recommended for production
+- May trigger TLS verification warnings
+
+---
+
+### Mode 3: CSR-Based Certificates (Default)
+
+Use this mode for production environments with Kubernetes CSR workflows. This is the recommended approach for production when CSR auto-approval is configured.
+
+#### Configuration
+
+```yaml
+# Default behavior - no configuration needed
+# Or explicitly:
+DisableCSR: false
+```
+
+#### Improved CSR Behavior (New)
+
+Previous versions of InterLink used the standard Kubernetes certificate manager, which had a hardcoded 15-minute timeout. This caused repeated CSR creation when approval was delayed.
+
+**The new implementation eliminates this issue:**
+
+- **Creates ONE CSR** on startup
+- **Waits indefinitely** for approval (no 15-minute timeout)
+- **Polls every 10 seconds** to check if the CSR has been approved
+- **Only creates new CSRs** when the certificate reaches 80% of its lifetime
+- **Reuses existing certificates** from previous runs if still valid
+- **Handles denied CSRs** by creating a new one
+
+This eliminates unnecessary CSR accumulation in clusters without auto-approval.
+
+#### CSR Workflow
+
+1. Virtual Kubelet starts and cleans up any old pending CSRs
+2. Creates a new CSR with the signer `kubernetes.io/kubelet-serving`
+3. Waits patiently for the CSR to be approved (manual or automatic)
+4. Once approved, retrieves and uses the certificate
+5. Stores certificate in `/tmp/certs` for reuse on restart
+6. Creates a new CSR only when the certificate is near expiration (80% of lifetime)
+
+#### With Auto-Approval (Recommended)
+
+Enable automatic CSR approval for the kubelet-serving signer:
+
+```bash
+# Check if auto-approval is configured
+kubectl get clusterrolebinding system:certificates.k8s.io:certificatesigningrequests:kubelet-serving
+
+# If not present, you may need to configure a CSR approver controller
+# This depends on your Kubernetes distribution
+```
+
+Many Kubernetes distributions include auto-approval by default. Check your distribution's documentation.
+
+#### With Manual Approval
+
+If CSR auto-approval is not configured, you'll need to manually approve CSRs:
+
+```bash
+# List pending CSRs
+kubectl get csr
+
+# You should see a CSR like: vk-my-node-xxxxx (Pending)
+
+# Approve the CSR
+kubectl certificate approve vk-my-node-xxxxx
+
+# Verify it's approved
+kubectl get csr vk-my-node-xxxxx
+```
+
+The Virtual Kubelet will automatically detect the approval and begin using the certificate.
+
+#### With cert-manager as CSR Signer (Experimental)
+
+You can use cert-manager to automatically sign Kubernetes CSR objects. This combines the CSR workflow with cert-manager's powerful issuer ecosystem.
+
+**Prerequisites:**
+- cert-manager installed in your cluster
+- cert-manager experimental feature enabled
+
+**Step 1: Enable cert-manager CSR support**
+
+Enable the experimental feature in cert-manager:
+
+```yaml
+# In cert-manager controller deployment
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: cert-manager
+ namespace: cert-manager
+spec:
+ template:
+ spec:
+ containers:
+ - name: cert-manager
+ args:
+ - --feature-gates=ExperimentalCertificateSigningRequestControllers=true
+```
+
+**Step 2: Create a cert-manager Issuer or ClusterIssuer**
+
+```yaml
+apiVersion: cert-manager.io/v1
+kind: ClusterIssuer
+metadata:
+ name: kubelet-ca-issuer
+spec:
+ ca:
+ secretName: kubelet-ca-key-pair # Your CA certificate and key
+---
+# Or use other issuer types (ACME, Vault, etc.)
+apiVersion: cert-manager.io/v1
+kind: ClusterIssuer
+metadata:
+ name: vault-issuer
+spec:
+ vault:
+ server: https://vault.example.com
+ path: pki/sign/kubelet
+ auth:
+ kubernetes:
+ role: cert-manager
+ mountPath: /v1/auth/kubernetes
+ secretRef:
+ name: vault-token
+ key: token
+```
+
+**Step 3: Configure Virtual Kubelet to use cert-manager signer**
+
+```yaml
+# Virtual Kubelet configuration
+KubeletCSRSignerName: clusterissuers.cert-manager.io/kubelet-ca-issuer
+
+# Or for namespaced Issuer:
+# KubeletCSRSignerName: issuers.cert-manager.io/interlink.my-issuer
+```
+
+**Step 4: Set up RBAC for cert-manager**
+
+cert-manager needs permission to approve and sign CSRs:
+
+```yaml
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+ name: cert-manager-csr-approver
+rules:
+- apiGroups: ["certificates.k8s.io"]
+ resources: ["certificatesigningrequests"]
+ verbs: ["get", "list", "watch"]
+- apiGroups: ["certificates.k8s.io"]
+ resources: ["certificatesigningrequests/status"]
+ verbs: ["update"]
+- apiGroups: ["certificates.k8s.io"]
+ resources: ["signers"]
+ resourceNames: ["clusterissuers.cert-manager.io/*", "issuers.cert-manager.io/*"]
+ verbs: ["sign"]
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRoleBinding
+metadata:
+ name: cert-manager-csr-approver
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: cert-manager-csr-approver
+subjects:
+- kind: ServiceAccount
+ name: cert-manager
+ namespace: cert-manager
+```
+
+**How it works:**
+
+1. Virtual Kubelet creates a CSR with `signerName: clusterissuers.cert-manager.io/kubelet-ca-issuer`
+2. cert-manager detects the CSR (polls every 10 seconds)
+3. cert-manager validates the request matches its issuer
+4. cert-manager signs the CSR using the configured issuer (CA, Vault, ACME, etc.)
+5. Virtual Kubelet retrieves the signed certificate and uses it
+
+**Benefits:**
+- Automatic CSR approval and signing
+- Leverage cert-manager's issuer ecosystem
+- Centralized certificate management
+- Support for various backends (Vault, ACME, external CAs)
+
+**Important Notes:**
+- ⚠️ This is an **experimental feature** in cert-manager (as of 2024)
+- ⚠️ Manual approval is still required by default - cert-manager doesn't auto-approve
+- You may need additional automation to approve CSRs, or use a separate CSR approver
+- Behavior may change in future cert-manager releases
+
+For production use, consider using **Approach 2** (manual certificates with cert-manager Certificate CRD) which is more mature and stable.
+
+#### Certificate Storage and Reuse
+
+Certificates are stored in `/tmp/certs` with the prefix `virtual-kubelet-`. On restart:
+- The Virtual Kubelet checks for an existing valid certificate
+- If found and not expired, reuses it immediately
+- If expired or not found, creates a new CSR
+
+This prevents creating new CSRs on every restart.
+
+---
+
+## Certificate Priority Matrix
+
+| Configuration | Mode Used | CSR Created? | Certificate Source |
+|---------------|-----------|--------------|-------------------|
+| `KubeletCertFile` + `KubeletKeyFile` set | Manual | No | User-provided files |
+| `DisableCSR: true` | Self-Signed | No | Auto-generated |
+| Default / `DisableCSR: false` | CSR-Based | Yes | Kubernetes CSR approval |
+
+---
+
+## Common Scenarios
+
+### Scenario 1: Production with cert-manager
+
+Use cert-manager to issue and rotate certificates automatically:
+
+```yaml
+apiVersion: cert-manager.io/v1
+kind: Certificate
+metadata:
+ name: vk-kubelet-cert
+ namespace: interlink
+spec:
+ secretName: vk-kubelet-certs
+ duration: 2160h # 90 days
+ renewBefore: 360h # 15 days
+ subject:
+ organizations:
+ - system:nodes
+ commonName: system:node:my-vk-node
+ isCA: false
+ privateKey:
+ algorithm: RSA
+ size: 2048
+ usages:
+ - digital signature
+ - key encipherment
+ - server auth
+ ipAddresses:
+ - "10.0.0.100"
+ issuerRef:
+ name: ca-issuer
+ kind: Issuer
+```
+
+Virtual Kubelet configuration:
+
+```yaml
+KubeletCertFile: /etc/vk/certs/tls.crt
+KubeletKeyFile: /etc/vk/certs/tls.key
+```
+
+### Scenario 2: Development Environment
+
+Quick setup for testing:
+
+```yaml
+DisableCSR: true
+VerboseLogging: true
+```
+
+No additional certificate management needed.
+
+### Scenario 3: Production with CSR Auto-Approval
+
+Standard production setup with Kubernetes CSR workflow:
+
+```yaml
+# No special configuration needed - uses defaults
+# Ensure CSR auto-approval is configured in your cluster
+```
+
+The Virtual Kubelet will automatically create and manage certificates.
+
+### Scenario 4: Production with cert-manager CSR Signer (Experimental)
+
+Use cert-manager to sign CSRs automatically, combining CSR workflow with cert-manager's issuer ecosystem:
+
+```yaml
+# Virtual Kubelet configuration
+KubeletCSRSignerName: clusterissuers.cert-manager.io/vault-issuer
+```
+
+This approach:
+- Uses Kubernetes CSR workflow
+- Leverages cert-manager's powerful issuers (Vault, ACME, CA, etc.)
+- Automatic certificate signing (still requires CSR approval)
+- Centralized certificate management
+
+See the [cert-manager as CSR Signer](#with-cert-manager-as-csr-signer-experimental) section for detailed setup.
+
+### Scenario 5: Air-Gapped Environment with Manual Certificates
+
+For environments without internet access or external PKI:
+
+1. Generate certificates offline with your CA
+2. Mount them as secrets in the Virtual Kubelet pod
+3. Configure the paths:
+
+```yaml
+KubeletCertFile: /etc/vk/certs/tls.crt
+KubeletKeyFile: /etc/vk/certs/tls.key
+```
+
+---
+
+## Troubleshooting
+
+### CSR Not Being Approved
+
+**Symptoms:**
+- Virtual Kubelet logs show "no certificate available yet - CSR pending approval"
+- CSR remains in Pending state
+
+**Solutions:**
+
+1. Check if CSR exists:
+ ```bash
+ kubectl get csr | grep vk-
+ ```
+
+2. Manually approve:
+ ```bash
+ kubectl certificate approve
+ ```
+
+3. Check for auto-approval configuration:
+ ```bash
+ kubectl get clusterrolebinding | grep certificate
+ ```
+
+4. Or switch to manual certificates:
+ ```yaml
+ KubeletCertFile: /path/to/cert.crt
+ KubeletKeyFile: /path/to/key.key
+ ```
+
+### Certificate Load Failures
+
+**Symptoms:**
+- Error: "failed to load kubelet certificate"
+
+**Solutions:**
+
+1. Verify file paths are correct and accessible
+2. Check file permissions:
+ ```bash
+ ls -la /path/to/cert.crt
+ chmod 644 /path/to/cert.crt
+ chmod 600 /path/to/key.key
+ ```
+
+3. Verify certificate format (PEM):
+ ```bash
+ openssl x509 -in /path/to/cert.crt -text -noout
+ ```
+
+### Certificate Expiration
+
+**Symptoms:**
+- TLS handshake errors
+- Kubelet API becomes unreachable
+
+**Solutions:**
+
+For manual certificates:
+- Rotate certificates before expiration
+- The Virtual Kubelet will automatically reload them
+
+For CSR-based:
+- New CSR is automatically created at 80% of certificate lifetime
+- Approve the new CSR before the old certificate expires
+
+For self-signed:
+- Automatically regenerated at 1 day before expiration
+- No action required
+
+### CSR Accumulation (Legacy Issue - Fixed)
+
+**Symptoms:**
+- Many pending CSRs with the same node name
+
+**This issue is fixed** in the current version. The new implementation:
+- Creates only ONE CSR and waits indefinitely
+- Only creates new CSRs for certificate renewal
+- Cleans up old CSRs on startup
+
+If you still see accumulation, ensure you're using the latest version.
+
+---
+
+## Security Best Practices
+
+1. **Use Manual Certificates in Production**: Provides the most control and integrates with existing PKI workflows
+
+2. **Enable Certificate Rotation**: Ensure certificates are rotated regularly (e.g., every 90 days)
+
+3. **Protect Private Keys**:
+ - Use appropriate file permissions (600)
+ - Store in Kubernetes secrets with restricted RBAC
+ - Never commit private keys to version control
+
+4. **Monitor Certificate Expiration**: Set up alerts for certificates approaching expiration
+
+5. **Verify Certificate Trust Chain**: Ensure the Kubernetes API server trusts your certificate authority
+
+6. **Use Strong Key Sizes**: Minimum 2048-bit RSA or equivalent
+
+7. **Restrict Certificate Usage**: Use proper key usage and extended key usage extensions
+
+---
+
+## Migration Guide
+
+### Migrating from Old CSR Behavior
+
+If you previously experienced CSR accumulation due to the 15-minute timeout:
+
+1. **Clean up old CSRs**:
+ ```bash
+ kubectl delete csr $(kubectl get csr -o name | grep vk-)
+ ```
+
+2. **Update to latest version**: The new version automatically handles this
+
+3. **Restart Virtual Kubelet**: It will create a single CSR and wait for approval
+
+No configuration changes required - the improvement is automatic.
+
+### Migrating to Manual Certificates
+
+If you want to switch from CSR to manual certificate management:
+
+1. Generate or obtain certificates using your preferred method
+
+2. Create a Kubernetes secret:
+ ```bash
+ kubectl create secret tls vk-kubelet-certs \
+ --cert=kubelet-server.crt \
+ --key=kubelet-server.key \
+ -n interlink
+ ```
+
+3. Update Virtual Kubelet deployment to mount the secret
+
+4. Update configuration:
+ ```yaml
+ KubeletCertFile: /etc/vk/certs/tls.crt
+ KubeletKeyFile: /etc/vk/certs/tls.key
+ ```
+
+5. Restart Virtual Kubelet
+
+The CSR mode will be automatically disabled.
+
+---
+
+## Configuration Reference
+
+| Field | Type | Default | Description |
+|-------|------|---------|-------------|
+| `KubeletCertFile` | string | `""` | Path to kubelet server certificate file (PEM format) |
+| `KubeletKeyFile` | string | `""` | Path to kubelet server private key file (PEM format) |
+| `KubeletCSRSignerName` | string | `"kubernetes.io/kubelet-serving"` | Signer name for CSR-based certificates. Use cert-manager format: `clusterissuers.cert-manager.io/` or `issuers.cert-manager.io/.` |
+| `DisableCSR` | bool | `false` | Disable CSR creation and use self-signed certificates |
+
+**Priority order:**
+1. When both `KubeletCertFile` and `KubeletKeyFile` are set, manual certificates are used (highest priority)
+2. When `DisableCSR: true`, self-signed certificates are used
+3. Otherwise, CSR-based certificates are used with the specified `KubeletCSRSignerName`
+
+---
+
+## Related Documentation
+
+- [mTLS Deployment Guide](./07-mtls-deployment.mdx) - For securing InterLink API communication
+- [InterLink Configuration](./11-interlink-configuration.mdx) - Complete configuration reference
+- [Kubernetes CSR Documentation](https://kubernetes.io/docs/reference/access-authn-authz/certificate-signing-requests/)
+- [cert-manager CSR Support](https://cert-manager.io/docs/usage/kube-csr/) - Using cert-manager as a CSR signer
+- [TLS Bootstrapping](https://kubernetes.io/docs/reference/access-authn-authz/kubelet-tls-bootstrapping/)
diff --git a/docs/versioned_docs/version-0.6.x/guides/13-mesh-network-configuration.mdx b/docs/versioned_docs/version-0.6.x/guides/13-mesh-network-configuration.mdx
new file mode 100644
index 00000000..61687579
--- /dev/null
+++ b/docs/versioned_docs/version-0.6.x/guides/13-mesh-network-configuration.mdx
@@ -0,0 +1,1088 @@
+# Virtual Kubelet Mesh Networking Documentation
+
+## Overview
+
+The mesh networking feature enables full network connectivity between Virtual Kubelet pods and the Kubernetes cluster using a combination of **WireGuard VPN** and **wstunnel** (WebSocket tunneling). This allows pods running on remote compute resources (e.g., HPC clusters via SLURM) to seamlessly communicate with services and pods in the main Kubernetes cluster.
+
+### High-Level Architecture Diagram
+
+
+
+```
+Network Traffic Flow Example:
+═════════════════════════════
+
+Pod on HPC wants to access service "mysql.default.svc.cluster.local:3306"
+
+1. Application makes request to mysql.default.svc.cluster.local:3306
+ └─▶ DNS resolution via 10.244.0.99
+ └─▶ Resolves to service IP (e.g., 10.105.123.45)
+
+2. Traffic is routed to WireGuard interface (matches 10.105.0.0/16)
+ └─▶ Packet: [Src: 10.7.0.2] [Dst: 10.105.123.45:3306]
+
+3. WireGuard encrypts and encapsulates packet
+ └─▶ Sends to peer 10.7.0.1 via endpoint 127.0.0.1:51821
+
+4. wstunnel client receives UDP packet on 127.0.0.1:51821
+ └─▶ Forwards to local WireGuard on 127.0.0.1:51820
+
+5. wstunnel encapsulates in WebSocket frame
+ └─▶ Sends over WSS connection to pod-ns.example.com:443
+
+6. Ingress controller receives WSS connection
+ └─▶ Routes to wstunnel server pod service
+
+7. wstunnel server receives WebSocket frame
+ └─▶ Extracts UDP packet
+ └─▶ Forwards to local WireGuard on 127.0.0.1:51820
+
+8. WireGuard server (10.7.0.1) decrypts packet
+ └─▶ Routes to destination: 10.105.123.45:3306
+
+9. Kubernetes service forwards to MySQL pod endpoint
+
+10. Return traffic follows reverse path
+```
+
+### Mesh Overlay Network Topology
+
+This diagram shows how the WireGuard overlay network (10.7.0.0/24) creates a virtual mesh connecting remote HPC pods to the Kubernetes cluster network:
+
+
+
+```
+PACKET FLOW EXAMPLE: HPC Pod → MySQL Service
+═════════════════════════════════════════════
+
+Step 1: DNS Resolution
+──────────────────────
+HPC Pod: "What is mysql.default.svc.cluster.local?"
+ │
+ └──▶ Query sent to 10.244.0.99 (kube-dns)
+ │
+ ├─▶ Routed via wg* interface (matches 10.244.0.0/16)
+ │
+ ├─▶ Encrypted by WireGuard client (10.7.0.2)
+ │
+ ├─▶ Sent via wstunnel → Ingress → wstunnel server
+ │
+ ├─▶ Decrypted by WireGuard server (10.7.0.1)
+ │
+ └─▶ Reaches kube-dns pod at 10.244.0.99
+ │
+ └─▶ Response: 10.105.123.45 (mysql service ClusterIP)
+
+
+Step 2: TCP Connection to Service
+──────────────────────────────────
+HPC Pod: TCP SYN to 10.105.123.45:3306
+ │
+ ├─▶ Packet: [Src: 10.7.0.2:random] [Dst: 10.105.123.45:3306]
+ │
+ ├─▶ Routing decision: matches 10.105.0.0/16 → via wg* interface
+ │
+ ├─▶ WireGuard client encrypts packet
+ │ │
+ │ └─▶ Encrypted packet: [Src: 10.7.0.2] [Dst: 10.7.0.1]
+ │
+ ├─▶ wstunnel client on HPC (127.0.0.1:51821)
+ │ │
+ │ └─▶ Forwards to WireGuard (127.0.0.1:51820)
+ │
+ ├─▶ Encapsulated in WebSocket frame
+ │ │
+ │ └─▶ WSS connection: HPC → pod-ns.example.com:443
+ │
+ ├─▶ Ingress controller routes to wstunnel server service
+ │
+ ├─▶ wstunnel server (in cluster) extracts WebSocket payload
+ │ │
+ │ └─▶ Forwards UDP to local WireGuard (127.0.0.1:51820)
+ │
+ ├─▶ WireGuard server (10.7.0.1) decrypts packet
+ │ │
+ │ └─▶ Original packet: [Src: 10.7.0.2:random] [Dst: 10.105.123.45:3306]
+ │
+ ├─▶ Kernel routing: 10.105.123.45 is a service IP
+ │ │
+ │ └─▶ kube-proxy/iptables/IPVS handles service load balancing
+ │
+ └─▶ Traffic reaches MySQL pod at 10.244.1.15:3306
+
+
+Step 3: Return Path
+───────────────────
+MySQL Pod: TCP SYN-ACK from 10.244.1.15:3306
+ │
+ ├─▶ Packet: [Src: 10.244.1.15:3306] [Dst: 10.7.0.2:random]
+ │
+ ├─▶ Routing: destination is in WireGuard network
+ │
+ ├─▶ WireGuard server encrypts and sends to peer 10.7.0.2
+ │
+ ├─▶ Reverse path through wstunnel
+ │
+ └─▶ Arrives at HPC pod: [Src: 10.105.123.45:3306] [Dst: 10.7.0.2:random]
+ │
+ └─▶ Application receives response
+
+KEY CHARACTERISTICS OF THE MESH OVERLAY
+════════════════════════════════════════
+
+1. Point-to-Point Tunnels
+ • Each HPC pod has a dedicated tunnel to the cluster
+ • Not a true "mesh" between HPC pods (they don't directly communicate)
+ • But appears as a "mesh" from cluster perspective
+
+2. Consistent Addressing
+ • Server side: Always 10.7.0.1/32
+ • Client side: Always 10.7.0.2/32
+ • Isolated per tunnel (no IP conflicts)
+
+3. Network Isolation
+ • Each pod runs in its own network namespace
+ • WireGuard interface unique per pod (wg)
+ • No cross-pod interference
+
+4. Transparent Cluster Access
+ • HPC pods use standard Kubernetes service DNS names
+ • No special configuration in application code
+ • Native service discovery works
+
+5. Scalability
+ • Independent tunnels scale linearly
+ • No coordination needed between HPC pods
+ • Server resources scale with pod count
+```
+
+## Architecture
+
+### Components
+
+1. **WireGuard VPN**: Provides encrypted peer-to-peer network tunnel
+2. **wstunnel**: WebSocket tunnel that encapsulates WireGuard traffic, allowing it to traverse firewalls and NAT
+3. **slirp4netns**: User-mode networking for unprivileged containers
+4. **Network Namespace Management**: Provides network isolation and routing
+
+### Network Flow
+
+```
+Remote Pod (Client) <-> WireGuard Client <-> wstunnel Client <-> wstunnel Server <-> WireGuard Server <-> K8s Cluster Network
+```
+
+#### Detailed Flow:
+1. Remote pod initiates connection
+2. Traffic is routed through WireGuard interface (`wg*`)
+3. WireGuard encrypts and encapsulates traffic
+4. wstunnel client forwards encrypted WireGuard packets via WebSocket to the ingress endpoint
+5. wstunnel server in the cluster receives WebSocket traffic
+6. WireGuard server decrypts and routes traffic to cluster services/pods
+7. Return traffic follows the reverse path
+
+## Configuration
+
+### Enabling Full Mesh Mode
+
+In your Virtual Kubelet configuration or Helm values:
+
+```yaml
+virtualNode:
+ network:
+ # Enable full mesh networking
+ fullMesh: true
+
+ # Kubernetes cluster network ranges
+ serviceCIDR: "10.105.0.0/16" # Service CIDR range
+ podCIDRCluster: "10.244.0.0/16" # Pod CIDR range
+
+ # DNS configuration
+ dnsService: "10.244.0.99" # IP of kube-dns service
+
+ # Optional: Custom binary URLs
+ wireguardGoURL: "https://github.com/interlink-hq/interlink-artifacts/raw/main/wireguard-go/v0.0.20201118/linux-amd64/wireguard-go"
+ wgToolURL: "https://github.com/interlink-hq/interlink-artifacts/raw/main/wgtools/v1.0.20210914/linux-amd64/wg"
+ wstunnelExecutableURL: "https://github.com/interlink-hq/interlink-artifacts/raw/main/wstunnel/v10.4.4/linux-amd64/wstunnel"
+ slirp4netnsURL: "https://github.com/interlink-hq/interlink-artifacts/raw/main/slirp4netns/v1.2.3/linux-amd64/slirp4netns"
+
+ # Unshare mode for network namespaces
+ unshareMode: "auto" # Options: "auto", "none", "user"
+
+ # Custom mesh script template path (optional)
+ meshScriptTemplatePath: "/path/to/custom/mesh.sh"
+```
+
+### Configuration Options
+
+#### Network CIDRs
+
+- **`serviceCIDR`**: CIDR range for Kubernetes services
+ - Default: `10.105.0.0/16`
+ - Used to route service traffic through the VPN
+
+- **`podCIDRCluster`**: CIDR range for Kubernetes pods
+ - Default: `10.244.0.0/16`
+ - Used to route inter-pod traffic through the VPN
+
+- **`dnsService`**: IP address of the cluster DNS service
+ - Default: `10.244.0.99`
+ - Typically the kube-dns or CoreDNS service IP
+
+#### Binary URLs
+
+Default URLs point to pre-built binaries in the interlink-artifacts repository. You can override these to use your own hosted binaries or different versions.
+
+#### Unshare Mode
+
+Controls how network namespaces are created:
+
+- **`auto`** (default): Automatically detects the best method
+- **`none`**: No namespace isolation (may be needed for certain HPC environments)
+- **`user`**: Uses user namespaces (requires kernel support)
+
+## How It Works
+
+### 1. WireGuard Key Generation
+
+When a pod is created, the system generates:
+- A WireGuard private/public key pair for the client (remote pod)
+- The server's public key is derived from its private key
+
+Keys are generated using X25519 curve cryptography:
+
+```go
+func generateWGKeypair() (string, string, error) {
+ privRaw := make([]byte, 32)
+ rand.Read(privRaw)
+
+ // Clamp private key per RFC 7748
+ privRaw[0] &= 248
+ privRaw[31] &= 127
+ privRaw[31] |= 64
+
+ pubRaw, _ := curve25519.X25519(privRaw, curve25519.Basepoint)
+ return base64Encode(privRaw), base64Encode(pubRaw), nil
+}
+```
+
+### 2. Pre-Exec Script Generation
+
+The system generates a bash script that is executed before the main pod application starts. This script:
+
+1. **Downloads necessary binaries**:
+ - `wstunnel` - WebSocket tunnel client
+ - `wireguard-go` - Userspace WireGuard implementation
+ - `wg` - WireGuard configuration tool
+ - `slirp4netns` - User-mode networking (if needed)
+
+2. **Sets up network namespace**:
+ - Creates isolated network environment
+ - Configures routing tables
+ - Sets up DNS resolution
+
+3. **Configures WireGuard interface**:
+ - Creates interface (named `wg`)
+ - Applies configuration with keys and allowed IPs
+ - Sets MTU (default: 1280 bytes)
+
+4. **Establishes wstunnel connection**:
+ - Connects to ingress endpoint via WebSocket
+ - Forwards WireGuard traffic through the tunnel
+ - Uses password-based authentication
+
+5. **Configures routing**:
+ - Routes cluster service CIDR through VPN
+ - Routes cluster pod CIDR through VPN
+ - Sets DNS to cluster DNS service
+
+### 3. Annotations Added to Pod
+
+The system adds several annotations to the pod:
+
+```yaml
+annotations:
+ # Pre-execution script that sets up the mesh
+ slurm-job.vk.io/pre-exec: ""
+
+ # WireGuard client configuration snippet
+ interlink.eu/wireguard-client-snippet: |
+ [Interface]
+ Address = 10.7.0.2/32
+ PrivateKey =
+ DNS = 1.1.1.1
+ MTU = 1280
+
+ [Peer]
+ PublicKey =
+ AllowedIPs = 10.7.0.1/32, 10.0.0.0/8
+ Endpoint = 127.0.0.1:51821
+ PersistentKeepalive = 25
+```
+
+### 4. Server-Side Resources
+
+For each pod, the system creates (or can create) server-side resources in the cluster:
+
+- **Deployment**: Runs wstunnel server and WireGuard server containers
+- **ConfigMap**: Contains WireGuard server configuration
+- **Service**: Exposes wstunnel endpoint
+- **Ingress**: Provides external access via DNS (e.g., `podname-namespace.example.com`)
+
+## Network Address Allocation
+
+### IP Addressing Scheme
+
+- **WireGuard Overlay Network**: `10.7.0.0/24`
+ - Server (cluster side): `10.7.0.1/32`
+ - Client (remote pod): `10.7.0.2/32`
+
+### Allowed IPs Configuration
+
+**Client side** allows traffic to:
+- `10.7.0.1/32` - WireGuard server
+- `10.0.0.0/8` - General overlay range
+- `` - Kubernetes services
+- `` - Kubernetes pods
+
+**Server side** allows traffic from:
+- `10.7.0.2/32` - WireGuard client
+
+## DNS Name Sanitization
+
+The system ensures all generated resource names comply with RFC 1123 DNS naming requirements:
+
+### Rules Applied:
+1. Convert to lowercase
+2. Replace invalid characters with hyphens
+3. Remove leading/trailing hyphens
+4. Collapse consecutive hyphens
+5. Truncate to 63 characters (max label length)
+6. Truncate full DNS names to 253 characters
+
+Example:
+```
+Input: "My_Pod.Name@123"
+Output: "my-pod-name-123"
+```
+
+## Template Customization
+
+### Mesh Script Template Structure
+
+The mesh script template is a Go template that generates a bash script. The default template is embedded in the Virtual Kubelet binary but can be overridden with a custom template.
+
+#### Default Template Location
+
+- **Embedded**: `templates/mesh.sh` (in the VK binary)
+- **Custom**: Specified via `meshScriptTemplatePath` configuration
+
+#### Template Loading Priority
+
+1. **Custom Template** (if `meshScriptTemplatePath` is set):
+ ```go
+ if p.config.Network.MeshScriptTemplatePath != "" {
+ content, err := os.ReadFile(p.config.Network.MeshScriptTemplatePath)
+ // Use custom template
+ }
+ ```
+
+2. **Embedded Template** (fallback):
+ ```go
+ tmplContent, err := meshScriptTemplate.ReadFile("templates/mesh.sh")
+ // Use embedded template
+ ```
+
+### Using Custom Mesh Script Template
+
+You can provide a custom template for the mesh setup script:
+
+```yaml
+virtualNode:
+ network:
+ meshScriptTemplatePath: "/etc/custom/mesh-template.sh"
+```
+
+The custom template file should be mounted into the Virtual Kubelet container:
+
+```yaml
+extraVolumes:
+ - name: mesh-template
+ configMap:
+ name: custom-mesh-template
+
+extraVolumeMounts:
+ - name: mesh-template
+ mountPath: /etc/custom
+ readOnly: true
+```
+
+### Template Variables
+
+The mesh script template receives the following data structure:
+
+```go
+type MeshScriptTemplateData struct {
+ WGInterfaceName string // WireGuard interface name (e.g., "wg5f3b9c2d3a4e")
+ WSTunnelExecutableURL string // URL to download wstunnel binary
+ WireguardGoURL string // URL to download wireguard-go binary
+ WgToolURL string // URL to download wg tool
+ Slirp4netnsURL string // URL to download slirp4netns
+ WGConfig string // Complete WireGuard configuration
+ DNSServiceIP string // Cluster DNS service IP (e.g., "10.244.0.99")
+ RandomPassword string // Authentication password for wstunnel
+ IngressEndpoint string // wstunnel server endpoint (e.g., "pod-ns.example.com")
+ WGMTU int // MTU for WireGuard interface (default: 1280)
+ PodCIDRCluster string // Cluster pod CIDR (e.g., "10.244.0.0/16")
+ ServiceCIDR string // Cluster service CIDR (e.g., "10.105.0.0/16")
+ UnshareMode string // Namespace creation mode ("auto", "none", "user")
+}
+```
+
+#### Template Variable Usage Examples
+
+```bash
+# Access variables in template using Go template syntax
+{{.WGInterfaceName}} # => "wg5f3b9c2d3a4e"
+{{.WSTunnelExecutableURL}} # => "https://github.com/.../wstunnel"
+{{.DNSServiceIP}} # => "10.244.0.99"
+{{.WGMTU}} # => 1280
+{{.IngressEndpoint}} # => "pod-namespace.example.com"
+```
+
+#### WireGuard Configuration Variable
+
+The `{{.WGConfig}}` variable contains a complete WireGuard configuration:
+
+```ini
+[Interface]
+PrivateKey =
+
+[Peer]
+PublicKey =
+AllowedIPs = 10.7.0.1/32,10.0.0.0/8,10.244.0.0/16,10.105.0.0/16
+Endpoint = 127.0.0.1:51821
+PersistentKeepalive = 25
+```
+
+### Example Default Custom Template
+
+Here's the default mesh script template used by Virtual Kubelet:
+
+```bash
+#!/bin/bash
+set -e
+set -m
+
+export PATH=$PATH:$PWD:/usr/sbin:/sbin
+
+# Prepare the temporary directory
+TMPDIR=${SLIRP_TMPDIR:-/tmp/.slirp.$RANDOM$RANDOM}
+mkdir -p $TMPDIR
+cd $TMPDIR
+
+# Set WireGuard interface name
+WG_IFACE="{{.WGInterfaceName}}"
+
+echo "=== Downloading binaries (outside namespace) ==="
+
+# Download wstunnel
+echo "Downloading wstunnel..."
+if ! curl -L -f -k {{.WSTunnelExecutableURL}} -o wstunnel; then
+ echo "ERROR: Failed to download wstunnel"
+ exit 1
+fi
+chmod +x wstunnel
+
+# Download wireguard-go
+echo "Downloading wireguard-go..."
+if ! curl -L -f -k {{.WireguardGoURL}} -o wireguard-go; then
+ echo "ERROR: Failed to download wireguard-go"
+ exit 1
+fi
+chmod +x wireguard-go
+
+# Download and build wg tool
+echo "Downloading wg tool..."
+if ! curl -L -f -k {{.WgToolURL}} -o wg; then
+ echo "ERROR: Failed to download wg tools"
+ exit 1
+fi
+chmod +x wg
+
+# Download slirp4netns
+echo "Downloading slirp4netns..."
+if ! curl -L -f -k {{.Slirp4netnsURL}} -o slirp4netns; then
+ echo "ERROR: Failed to download slirp4netns"
+ exit 1
+fi
+chmod +x slirp4netns
+
+# Check if iproute2 is available
+if ! command -v ip &> /dev/null; then
+ echo "ERROR: 'ip' command not found. Please install iproute2 package"
+ exit 1
+fi
+
+# Copy ip command to tmpdir for use in namespace
+IP_CMD=$(command -v ip)
+cp $IP_CMD $TMPDIR/ || echo "Warning: could not copy ip command"
+
+echo "=== All binaries downloaded successfully ==="
+
+# Create WireGuard config with dynamic interface name
+cat <<'EOFWG' > $WG_IFACE.conf
+{{.WGConfig}}
+EOFWG
+
+# Generate the execution script that will run inside the namespace
+cat <<'EOFSLIRP' > $TMPDIR/slirp.sh
+#!/bin/bash
+set -e
+
+# Ensure PATH includes tmpdir
+export PATH=$TMPDIR:$PATH:/usr/sbin:/sbin
+
+# Get WireGuard interface name from parent
+WG_IFACE="{{.WGInterfaceName}}"
+
+echo "=== Inside network namespace ==="
+echo "Using WireGuard interface: $WG_IFACE"
+
+export WG_SOCKET_DIR="$TMPDIR"
+
+# Override /etc/resolv.conf to avoid issues with read-only filesystems
+# Not all environments support this; ignore errors
+set -euo pipefail
+
+HOST_DNS=$(grep "^nameserver" /etc/resolv.conf | head -1 | awk '{print $2}')
+
+{
+ mkdir -p /tmp/etc-override
+ echo "search default.svc.cluster.local svc.cluster.local cluster.local" > /tmp/etc-override/resolv.conf
+ echo "nameserver $HOST_DNS" >> /tmp/etc-override/resolv.conf
+ echo "nameserver {{.DNSServiceIP}}" >> /tmp/etc-override/resolv.conf
+ echo "nameserver 1.1.1.1" >> /tmp/etc-override/resolv.conf
+ echo "nameserver 8.8.8.8" >> /tmp/etc-override/resolv.conf
+ mount --bind /tmp/etc-override/resolv.conf /etc/resolv.conf
+} || {
+ rc=$?
+ echo "ERROR: one of the commands failed (exit $rc)" >&2
+ exit $rc
+}
+
+# Make filesystem private to allow bind mounts
+mount --make-rprivate / 2>/dev/null || true
+
+# Create writable /var/run with wireguard subdirectory
+mkdir -p $TMPDIR/var-run/wireguard
+mount --bind $TMPDIR/var-run /var/run
+
+cat > $TMPDIR/resolv.conf </dev/null || echo "1")
+ else
+ USERNS_ALLOWED="1" # Assume allowed if file doesn't exist
+ fi
+
+ if [ "$USERNS_ALLOWED" != "1" ]; then
+ echo "User namespaces are disabled on this system"
+ UNSHARE_FLAGS=""
+ else
+ # Check for newuidmap/newgidmap and subuid/subgid support
+ if command -v newuidmap &> /dev/null && command -v newgidmap &> /dev/null && [ -f /etc/subuid ] && [ -f /etc/subgid ]; then
+ SUBUID_START=$(grep "^$(id -un):" /etc/subuid 2>/dev/null | cut -d: -f2)
+ SUBUID_COUNT=$(grep "^$(id -un):" /etc/subuid 2>/dev/null | cut -d: -f3)
+
+ if [ -n "$SUBUID_START" ] && [ -n "$SUBUID_COUNT" ] && [ "$SUBUID_COUNT" -gt 0 ]; then
+ echo "Using user namespace with UID/GID mapping (subuid available)"
+ UNSHARE_FLAGS="--user --map-user=$(id -u) --map-group=$(id -g)"
+ else
+ echo "Using user namespace with root mapping (no subuid)"
+ UNSHARE_FLAGS="--user --map-root-user"
+ fi
+ else
+ echo "Using user namespace with root mapping (no newuidmap/newgidmap)"
+ UNSHARE_FLAGS="--user --map-root-user"
+ fi
+ fi
+ ;;
+esac
+
+echo "Unshare flags: $UNSHARE_FLAGS"
+
+# Execute the script within unshare
+unshare $UNSHARE_FLAGS --net --mount $TMPDIR/slirp.sh "$@" &
+sleep 0.1
+JOBPID=$!
+echo "$JOBPID" > /tmp/slirp_jobpid
+
+# Wait for the job pid to be established
+sleep 1
+
+# Create the tap0 device with slirp4netns
+echo "Starting slirp4netns..."
+./slirp4netns --api-socket /tmp/slirp4netns_$JOBPID.sock --configure --mtu=65520 --disable-host-loopback $JOBPID tap0 &
+SLIRPPID=$!
+
+# Wait a bit for slirp4netns to be ready
+sleep 5
+
+# Bring the main job to foreground and wait for completion
+echo "=== Bringing job to foreground ==="
+fg 1
+```
+
+### Template Best Practices
+
+1. **Error Handling**: Always use `set -e` to exit on errors
+2. **Logging**: Print informative messages for each step
+3. **Binary Validation**: Check download success of binaries
+4. **Connectivity Tests**: Verify WireGuard connection before continuing
+5. **Cleanup**: Handle cleanup in trap handlers if needed
+6. **Timeouts**: Add appropriate timeout values
+7. **Conditional Logic**: Use Go template conditionals for different modes
+
+### Heredoc Format
+
+The Virtual Kubelet wraps the generated script in a heredoc for transmission:
+
+```bash
+cat <<'EOFMESH' > $TMPDIR/mesh.sh
+
+EOFMESH
+chmod +x $TMPDIR/mesh.sh
+$TMPDIR/mesh.sh
+```
+
+This heredoc is then:
+1. Extracted by the SLURM plugin
+2. Written to a separate `mesh.sh` file
+3. Executed before the main job script
+
+### Advanced Customization Examples
+
+#### Adding Custom DNS Configuration
+
+```bash
+# In your custom template
+{{if .DNSServiceIP}}
+echo "Configuring DNS..."
+echo "nameserver {{.DNSServiceIP}}" > /etc/resolv.conf
+echo "search default.svc.cluster.local svc.cluster.local cluster.local" >> /etc/resolv.conf
+{{end}}
+```
+
+#### Custom MTU Detection
+
+```bash
+# Auto-detect optimal MTU
+echo "Detecting optimal MTU..."
+BASE_MTU=$(ip route get {{.IngressEndpoint}} | grep -oP 'mtu \K[0-9]+' || echo 1500)
+WG_MTU=$((BASE_MTU - 80)) # Account for WireGuard overhead
+echo "Using MTU: $WG_MTU"
+ip link set {{.WGInterfaceName}} mtu $WG_MTU
+```
+
+#### Environment-Specific Binary Downloads
+
+```bash
+{{if eq .UnshareMode "none"}}
+# HPC environment - binaries might be pre-installed
+if [ -f "/opt/wireguard/wg" ]; then
+ echo "Using pre-installed WireGuard"
+ ln -s /opt/wireguard/wg ./wg
+else
+ wget -q {{.WgToolURL}} -O wg
+ chmod +x wg
+fi
+{{end}}
+```
+
+## Security Considerations
+
+### Encryption
+
+- All traffic is encrypted using WireGuard's ChaCha20-Poly1305 cipher
+- Keys are generated using secure random number generation
+- Private keys are never transmitted; only public keys are exchanged
+
+### Authentication
+
+- wstunnel uses password-based path prefix authentication
+- Each pod gets a unique random password
+- Prevents unauthorized access to the tunnel
+
+### Network Isolation
+
+- WireGuard operates in a separate network namespace
+- Only allowed IPs can traverse the VPN
+- Server-side firewall rules restrict WireGuard port access
+
+## Troubleshooting
+
+### Common Issues
+
+#### 1. Pod Cannot Reach Cluster Services
+
+**Symptoms**: Pod starts but cannot connect to Kubernetes services
+
+**Checks**:
+- Verify `serviceCIDR` matches your cluster configuration
+- Check if WireGuard interface is up: `ip addr show wg*`
+- Verify routing: `ip route show`
+- Test WireGuard peer connectivity: `ping 10.7.0.1`
+
+#### 2. WireGuard Connection Fails
+
+**Symptoms**: WireGuard interface doesn't come up
+
+**Checks**:
+- Ensure binaries are accessible from the configured URLs
+- Check if wstunnel server is reachable
+- Verify ingress endpoint DNS resolution
+- Review pre-exec script logs in job output
+
+#### 3. DNS Resolution Not Working
+
+**Symptoms**: Cannot resolve cluster service names
+
+**Checks**:
+- Verify `dnsService` IP is correct
+- Ensure DNS traffic is routed through VPN
+- Check `/etc/resolv.conf` in the pod
+- Test direct IP connectivity first
+
+#### 4. MTU Issues
+
+**Symptoms**: Large packets fail, small packets work
+
+**Solution**: Reduce MTU in configuration:
+```yaml
+virtualNode:
+ network:
+ wgMTU: 1280 # Try lower values like 1280, 1200, etc.
+```
+
+### Debug Mode
+
+Enable verbose logging:
+
+```yaml
+VerboseLogging: true
+ErrorsOnlyLogging: false
+```
+
+Check pod annotations for generated configuration:
+```bash
+kubectl get pod -o yaml | grep -A 50 annotations
+```
+
+## Performance Considerations
+
+### MTU Optimization
+
+- Default MTU: 1280 bytes
+- Lower MTU values increase overhead but improve compatibility
+- Higher MTU values improve throughput but may cause fragmentation
+
+### Keepalive Settings
+
+- Default persistent keepalive: 25 seconds
+- Keeps NAT mappings alive
+- Adjust based on your network environment
+
+### Resource Usage
+
+Typical resource consumption per pod:
+- CPU: ~100m (mostly during setup)
+- Memory: ~90Mi for wstunnel
+- Network: Minimal overhead (~5-10% for WireGuard encryption)
+
+## Integration with SLURM Plugin
+
+The mesh networking feature integrates with the SLURM plugin through a sophisticated script handling mechanism that optimizes the job submission process.
+
+### Virtual Kubelet Side
+
+When a pod is created with mesh networking enabled:
+
+1. **Mesh Script Generation** (`mesh.go`):
+ - Generates a complete bash script for setting up the mesh network
+ - Includes WireGuard configuration, binary downloads, and network setup
+ - Wraps the script in a heredoc format for transmission
+
+2. **Annotation Addition**:
+ - Adds `slurm-job.vk.io/pre-exec` annotation to the pod
+ - Contains the heredoc-wrapped mesh script
+ - Format: `cat <<'EOFMESH' > $TMPDIR/mesh.sh ... EOFMESH`
+
+3. **Pod Patching**:
+ - Patches the pod's annotations in the Kubernetes API
+ - Makes the mesh configuration available to the SLURM plugin
+
+### SLURM Plugin Side
+
+The SLURM plugin (`prepare.go`) processes the mesh script intelligently:
+
+#### 1. Script Reception (`Create.go`)
+```go
+// In SubmitHandler, pod data including annotations are received
+var data commonIL.RetrievedPodData
+json.Unmarshal(bodyBytes, &data)
+```
+
+#### 2. Heredoc Extraction (`prepare.go`, lines 1067-1100)
+
+The plugin performs smart heredoc handling:
+
+```go
+if preExecAnnotations, ok := metadata.Annotations["slurm-job.vk.io/pre-exec"]; ok {
+ // Check if pre-exec contains a heredoc that creates mesh.sh
+ if strings.Contains(preExecAnnotations, "cat <<'EOFMESH' > $TMPDIR/mesh.sh") {
+ // Extract the heredoc content
+ meshScript, err := extractHeredoc(preExecAnnotations, "EOFMESH")
+ if err == nil && meshScript != "" {
+ // Write mesh script to separate file
+ meshPath := filepath.Join(path, "mesh.sh")
+ os.WriteFile(meshPath, []byte(meshScript), 0755)
+
+ // Remove heredoc from pre-exec and add mesh.sh call
+ preExecWithoutHeredoc := removeHeredoc(preExecAnnotations, "EOFMESH")
+ prefix += "\n" + preExecWithoutHeredoc + "\n" + meshPath
+ }
+ }
+}
+```
+
+**Why This Approach?**
+- **File Size Optimization**: Avoids embedding large heredocs directly in the SLURM script
+- **Readability**: Keeps the SLURM script cleaner and more maintainable
+- **Execution Efficiency**: Allows the mesh script to be executed as a standalone file
+- **Debugging**: Makes it easier to inspect and debug the mesh script separately
+
+#### 3. SLURM Script Generation
+
+The final SLURM script structure:
+
+```bash
+#!/bin/bash
+#SBATCH --job-name=
+#SBATCH --output=/job.out
+#SBATCH --cpus-per-task=
+#SBATCH --mem=
+
+# Pre-exec section (mesh script call)
+/mesh.sh
+
+# Call main job script
+/job.sh
+```
+
+The `job.sh` contains:
+- Helper functions (waitFileExist, runInitCtn, runCtn, etc.)
+- Pod and container identification
+- Container runtime commands (Singularity/Enroot)
+- Probe scripts (if enabled)
+- Cleanup and exit handling
+
+### Script Execution Flow
+
+1. **SLURM Scheduler** allocates resources and starts the job
+2. **job.slurm** is executed by SLURM
+3. **Pre-exec** section runs:
+ - Executes `mesh.sh` to set up networking
+ - Downloads binaries (wstunnel, wireguard-go, wg, slirp4netns)
+ - Creates network namespaces
+ - Configures WireGuard interface
+ - Establishes wstunnel connection
+ - Sets up routing tables
+4. **job.sh** is executed after networking is ready:
+ - Runs init containers sequentially
+ - Starts regular containers in background
+ - Monitors container health (if probes enabled)
+ - Waits for all containers to complete
+ - Reports highest exit code
+
+### Error Handling
+
+The plugin includes robust error handling:
+
+- **Script Generation Failures**: Return HTTP 500, clean up created files
+- **Mount Preparation Errors**: Return HTTP 502 (Gateway Timeout)
+- **SLURM Submission Failures**: Clean up job directory, return error
+- **File Permission Errors**: Log warnings but continue execution
+
+### Monitoring and Debugging
+
+#### View Generated Scripts
+
+The plugin creates all scripts in the data root folder:
+```bash
+ls -la /slurm-data/-/
+cat /slurm-data/-/mesh.sh
+cat /slurm-data/-/job.slurm
+cat /slurm-data/-/job.sh
+```
+
+#### Check Job Output
+
+```bash
+# View SLURM job output
+cat /slurm-data/-/job.out
+
+# View container outputs
+cat /slurm-data/-/run-.out
+
+# Check container exit codes
+cat /slurm-data/-/run-.status
+```
+
+## Example: Complete Configuration
+
+```yaml
+virtualNode:
+ image: ghcr.io/interlink-hq/interlink/virtual-kubelet:latest
+ resources:
+ CPUs: 4
+ memGiB: 16
+ pods: 50
+
+ network:
+ # Enable full mesh networking
+ fullMesh: true
+
+ # Cluster network configuration
+ serviceCIDR: "10.105.0.0/16"
+ podCIDRCluster: "10.244.0.0/16"
+ dnsService: "10.244.0.99"
+
+ # WireGuard configuration
+ wgMTU: 1280
+ keepaliveSecs: 25
+
+ # Unshare mode
+ unshareMode: "auto"
+
+ # Binary URLs (optional - uses defaults if not specified)
+ wireguardGoURL: "https://github.com/interlink-hq/interlink-artifacts/raw/main/wireguard-go/v0.0.20201118/linux-amd64/wireguard-go"
+ wgToolURL: "https://github.com/interlink-hq/interlink-artifacts/raw/main/wgtools/v1.0.20210914/linux-amd64/wg"
+ wstunnelExecutableURL: "https://github.com/interlink-hq/interlink-artifacts/raw/main/wstunnel/v10.4.4/linux-amd64/wstunnel"
+ slirp4netnsURL: "https://github.com/interlink-hq/interlink-artifacts/raw/main/slirp4netns/v1.2.3/linux-amd64/slirp4netns"
+
+ # Tunnel configuration
+ enableTunnel: true
+ tunnelImage: "ghcr.io/erebe/wstunnel:latest"
+ wildcardDNS: "example.com"
+```
+
+## Comparison: Full Mesh vs. Port Forwarding
+
+| Feature | Full Mesh | Port Forwarding (Non-Mesh) |
+|---------|-----------|---------------------------|
+| **Connectivity** | Full cluster access | Specific exposed ports only |
+| **Service Discovery** | Native DNS | Manual port mapping |
+| **Protocols** | TCP, UDP, ICMP | TCP only (typically) |
+| **Complexity** | Higher setup | Simpler setup |
+| **Use Case** | Complex multi-service apps | Simple web services |
+| **Performance** | Slight overhead (VPN) | Direct forwarding |
+
+## References
+
+### Related Technologies
+
+- **WireGuard**: https://www.wireguard.com/
+- **wstunnel**: https://github.com/erebe/wstunnel
+- **slirp4netns**: https://github.com/rootless-containers/slirp4netns
+
+### RFCs and Standards
+
+- RFC 7748: Elliptic Curves for Security (X25519)
+- RFC 1123: Requirements for Internet Hosts
+- RFC 1918: Address Allocation for Private Internets
+
+### Source Code References
+
+- `mesh.go`: Core mesh networking implementation
+- `templates/mesh.sh`: Default mesh setup script template
+- `virtualkubelet.go`: Main Virtual Kubelet provider implementation
diff --git a/docs/versioned_docs/version-0.6.x/guides/_category_.json b/docs/versioned_docs/version-0.6.x/guides/_category_.json
new file mode 100644
index 00000000..d7405889
--- /dev/null
+++ b/docs/versioned_docs/version-0.6.x/guides/_category_.json
@@ -0,0 +1,8 @@
+{
+ "label": "Admin guides",
+ "position": 4,
+ "link": {
+ "type": "generated-index",
+ "description": "Learn how to deploy and adapt interLink plugins for your use case."
+ }
+}
diff --git a/docs/versioned_docs/version-0.6.x/guides/img/dashboard.png b/docs/versioned_docs/version-0.6.x/guides/img/dashboard.png
new file mode 100644
index 00000000..fa7fa612
Binary files /dev/null and b/docs/versioned_docs/version-0.6.x/guides/img/dashboard.png differ
diff --git a/docs/versioned_docs/version-0.6.x/guides/img/docsVersionDropdown.png b/docs/versioned_docs/version-0.6.x/guides/img/docsVersionDropdown.png
new file mode 100644
index 00000000..97e41646
Binary files /dev/null and b/docs/versioned_docs/version-0.6.x/guides/img/docsVersionDropdown.png differ
diff --git a/docs/versioned_docs/version-0.6.x/guides/img/high-level-architecture-diagram.png b/docs/versioned_docs/version-0.6.x/guides/img/high-level-architecture-diagram.png
new file mode 100644
index 00000000..dbf25d03
Binary files /dev/null and b/docs/versioned_docs/version-0.6.x/guides/img/high-level-architecture-diagram.png differ
diff --git a/docs/versioned_docs/version-0.6.x/guides/img/iam-client0.png b/docs/versioned_docs/version-0.6.x/guides/img/iam-client0.png
new file mode 100644
index 00000000..dcbf94ff
Binary files /dev/null and b/docs/versioned_docs/version-0.6.x/guides/img/iam-client0.png differ
diff --git a/docs/versioned_docs/version-0.6.x/guides/img/iam-client1.png b/docs/versioned_docs/version-0.6.x/guides/img/iam-client1.png
new file mode 100644
index 00000000..c65dbe17
Binary files /dev/null and b/docs/versioned_docs/version-0.6.x/guides/img/iam-client1.png differ
diff --git a/docs/versioned_docs/version-0.6.x/guides/img/iam-client2.png b/docs/versioned_docs/version-0.6.x/guides/img/iam-client2.png
new file mode 100644
index 00000000..431c488a
Binary files /dev/null and b/docs/versioned_docs/version-0.6.x/guides/img/iam-client2.png differ
diff --git a/docs/versioned_docs/version-0.6.x/guides/img/localeDropdown.png b/docs/versioned_docs/version-0.6.x/guides/img/localeDropdown.png
new file mode 100644
index 00000000..e257edc1
Binary files /dev/null and b/docs/versioned_docs/version-0.6.x/guides/img/localeDropdown.png differ
diff --git a/docs/versioned_docs/version-0.6.x/guides/img/mesh-overlay-network.png b/docs/versioned_docs/version-0.6.x/guides/img/mesh-overlay-network.png
new file mode 100644
index 00000000..f363fd81
Binary files /dev/null and b/docs/versioned_docs/version-0.6.x/guides/img/mesh-overlay-network.png differ
diff --git a/docs/versioned_docs/version-0.6.x/guides/img/vk_tracing.png b/docs/versioned_docs/version-0.6.x/guides/img/vk_tracing.png
new file mode 100644
index 00000000..42a37443
Binary files /dev/null and b/docs/versioned_docs/version-0.6.x/guides/img/vk_tracing.png differ
diff --git a/docs/versioned_docs/version-0.6.x/intro.mdx b/docs/versioned_docs/version-0.6.x/intro.mdx
new file mode 100644
index 00000000..9b895c8c
--- /dev/null
+++ b/docs/versioned_docs/version-0.6.x/intro.mdx
@@ -0,0 +1,192 @@
+---
+sidebar_position: 1
+---
+
+import ThemedImage from "@theme/ThemedImage";
+import useBaseUrl from "@docusaurus/useBaseUrl";
+
+# Introduction
+
+[](https://img.shields.io/github/license/interlink-hq/interlink)
+
+
+
+
+[](https://goreportcard.com/report/github.com/interlink-hq/interlink)
+
+[](https://join.slack.com/t/intertwin/shared_invite/zt-2cs67h9wz-2DFQ6EiSQGS1vlbbbJHctA)
+
+:::warning
+
+interLink is in early development phase, thus subject to breaking changes with
+no guarantee of backward compatibility.
+
+:::
+
+## Overview
+
+interLink enables whoever is running a Kubernetes cluster (named as "local"
+here) to offload some of the containers to be executed on other (remote)
+systems; e.g. another K8S cluster, an HPC cluster, any other batch systems or
+VMs.
+
+Usually the best fit for such an architecture are batch-like (or "job") payloads
+-- with a pre-defined lifecycle, non-interactive containers (see
+[Targets](#target-applications)). The dispatching to the other (remote) system
+is done through a combination of [Virtual Kubelets](https://virtual-kubelet.io/)
+interface and [interLink API plugins spec](./guides/03-api-reference.mdx).
+Plugins will define how the containers will run on the remote system (see
+[Target providers](#providers)).
+
+InterLink API and the plugin can be arranged in three different ways across the
+local cluster and the remote part:
+
+- both deployed remote (**[Edge-node](#edge-node)**)
+- both deployed local (**[In-cluster](#in-cluster)**)
+- API local, plugin remote (**[Tunneled](#tunneled)**)
+
+```
++---------------------------+ +----------------------------+
+| Virtual K8s Node | | Containers Runtime |
+| | | |
+| | | |
+| | | |
+| +-----------------------------------------+ |
+| | (API + plugin) interLink | |
+| | (API) interLink (plugin) | |
+| | interLink (API + plugin) | |
+| +-----------------------------------------+ |
+| | | |
+| | | |
+| | | |
+| | | |
+| | | |
+| | | |
++---------------------------+ +----------------------------+
+```
+
+For more information visit the [architecture page](arch)
+
+## Target applications
+
+- **K8s applications with tasks to be executed on HPC systems**: This target
+ focuses on Kubernetes applications that require high-performance computing
+ (HPC) resources for executing tasks (AI training and inference, ML algorithm
+ optimizations etc). These tasks might involve complex computations,
+ simulations, or data processing that benefit from the specialized hardware and
+ optimized performance of HPC systems.
+
+- **Remote "runner"-like application for heavy payload execution requiring
+ GPUs**: This target is designed for applications that need to execute heavy
+ computational payloads, particularly those requiring GPU resources. These
+ applications can be run remotely, leveraging powerful GPU hardware to handle
+ tasks such as machine learning model training, data analysis, or rendering.
+
+- **Lambda-like functions calling on external resources**: This target involves
+ running containers on demand with specific computing needs. Now these
+ resources might also be outside of the Kubernetes cluster thanks to interLink
+ functionality.
+
+## Providers
+
+interLink is designed to ease the work required to include new remote providers.
+It already targets a wide range of providers with container execution
+capabilities, including but not limited to:
+
+- **SLURM or HTCondor batch systems with Apptainer, Enroot, or Singularity**:
+ These batch systems are widely used in high-performance computing environments
+ to manage and schedule jobs. By integrating with container runtimes like
+ Apptainer, Enroot, or Singularity, our solution can efficiently execute
+ containerized tasks on these systems.
+- **On-demand virtual machines with any container runtime**: This includes
+ virtual machines that can be provisioned on-demand and support container
+ runtimes such as Docker, Podman, or others. This flexibility allows for
+ scalable and dynamic resource allocation based on workload requirements.
+- **Remote Kubernetes clusters**: Our solution can extend the capabilities of
+ existing Kubernetes clusters, enabling them to offload workloads to another
+ remote cluster. This is particularly useful for distributing workloads across
+ multiple clusters for better resource utilization and fault tolerance.
+- **Lambda-like services**: These are serverless computing services that execute
+ code in response to events and automatically manage the underlying compute
+ resources. By targeting these services, our solution can leverage the
+ scalability and efficiency of serverless architectures for containerized
+ workloads. All of this, while exposing a bare Kubernetes API kind of
+ orchestration.
+
+## NOT a target
+
+- **Long-running services**: Our solution is not designed for services that need
+ to run continuously for extended periods. It is optimized for tasks that have
+ a defined start and end, rather than persistent services exposing
+ intra-cluster communication endpoints.
+- **Kubernetes Federation**: We do not aim to support Kubernetes Federation,
+ which involves managing multiple Kubernetes clusters as a single entity. Our
+ focus is on enabling Kubernetes pods to execute on remote resources, not on
+ federating all kind of resources on multiple clusters.
+
+## Deployment scenarios
+
+### Edge-node
+
+In this scenario, the Virtual Kubelet communicates with remote services deployed
+on a dedicate edge node exposing authenticated interLink APIs and its associated
+plugin. This setup is ideal for scenarios where edge computing resources are
+utilized for controlled communication b/w the Kubernetes cluster and the remote
+resources.
+
+
+
+:::note
+
+Get started with edge-node deployment [here](./cookbook/1-edge.mdx)
+
+:::
+
+### In-cluster
+
+This scenario involves deploying a Virtual Kubelet along with the interLink API
+server and the plugin to interact with a remote API. This setup allows
+Kubernetes pods to be executed on remote resources while all other components
+sits inside the Kubernetes cluster.
+
+
+
+:::note
+
+Get started with in-cluster deployment [here](./cookbook/2-incluster.mdx)
+
+:::
+
+### Tunneled
+
+This deployment involves the Virtual Kubelet connecting to a remote interLink
+API server and its plugin through a secure tunnel. This setup ensures secure
+communication between the Kubernetes cluster and the remote resources, making it
+suitable for environments with strict security requirements or to host services
+on a multi user host like a login node.
+
+
+
+:::note
+
+Get started with tunneled deployment [here](./cookbook/3-tunneled.mdx)
+
+:::
diff --git a/docs/versioned_sidebars/version-0.6.x-sidebars.json b/docs/versioned_sidebars/version-0.6.x-sidebars.json
new file mode 100644
index 00000000..caea0c03
--- /dev/null
+++ b/docs/versioned_sidebars/version-0.6.x-sidebars.json
@@ -0,0 +1,8 @@
+{
+ "tutorialSidebar": [
+ {
+ "type": "autogenerated",
+ "dirName": "."
+ }
+ ]
+}
diff --git a/docs/versions.json b/docs/versions.json
index 75062ec7..5ccfa7ce 100644
--- a/docs/versions.json
+++ b/docs/versions.json
@@ -1,4 +1,5 @@
[
+ "0.6.x",
"0.5.x",
"0.4.x"
]
diff --git a/docs/yarn.lock b/docs/yarn.lock
index 3afebb0f..fe6430ad 100644
--- a/docs/yarn.lock
+++ b/docs/yarn.lock
@@ -84,7 +84,7 @@
"@algolia/requester-common" "4.22.0"
"@algolia/transporter" "4.22.0"
-"@algolia/client-search@>= 4.9.1 < 6", "@algolia/client-search@4.22.0":
+"@algolia/client-search@4.22.0":
version "4.22.0"
resolved "https://registry.npmjs.org/@algolia/client-search/-/client-search-4.22.0.tgz"
integrity sha512-bn4qQiIdRPBGCwsNuuqB8rdHhGKKWIij9OqidM1UkQxnSG8yzxHdb7CujM30pvp5EnV7jTqDZRbxacbjYVW20Q==
@@ -159,7 +159,7 @@
resolved "https://registry.npmjs.org/@babel/compat-data/-/compat-data-7.23.5.tgz"
integrity sha512-uU27kfDRlhfKl+w1U6vp16IuvSLtjAxdArVXPa9BvLkrr7CYIsxH5adpHObeAGY/41+syctUWOZ140a2Rvkgjw==
-"@babel/core@^7.0.0", "@babel/core@^7.0.0-0", "@babel/core@^7.0.0-0 || ^8.0.0-0 <8.0.0", "@babel/core@^7.12.0", "@babel/core@^7.13.0", "@babel/core@^7.19.6", "@babel/core@^7.23.3", "@babel/core@^7.4.0 || ^8.0.0-0 <8.0.0":
+"@babel/core@^7.19.6", "@babel/core@^7.23.3":
version "7.23.6"
resolved "https://registry.npmjs.org/@babel/core/-/core-7.23.6.tgz"
integrity sha512-FxpRyGjrMJXh7X3wGLGhNDCRiwpWEF74sKjTLDJSG5Kyvow3QZaG0Adbqzi9ZrVjTWpsX+2cxWXD71NMg93kdw==
@@ -1543,7 +1543,7 @@
tslib "^2.6.0"
utility-types "^3.10.0"
-"@docusaurus/theme-common@^3.0.0", "@docusaurus/theme-common@3.0.1":
+"@docusaurus/theme-common@3.0.1":
version "3.0.1"
resolved "https://registry.npmjs.org/@docusaurus/theme-common/-/theme-common-3.0.1.tgz"
integrity sha512-cr9TOWXuIOL0PUfuXv6L5lPlTgaphKP+22NdVBOYah5jSq5XAAulJTjfe+IfLsEG4L7lJttLbhW7LXDFSAI7Ag==
@@ -1599,7 +1599,7 @@
resolved "https://registry.npmjs.org/@docusaurus/tsconfig/-/tsconfig-3.0.1.tgz"
integrity sha512-hT2HCdNE3pWTzXV/7cSsowfmaOxXVOTFOXmkqaYjBWjaxjJ3FO0nHbdJ8rF6Da7PvWmIPbUekdP5gep1XCJ7Vg==
-"@docusaurus/types@*", "@docusaurus/types@3.0.1":
+"@docusaurus/types@3.0.1":
version "3.0.1"
resolved "https://registry.npmjs.org/@docusaurus/types/-/types-3.0.1.tgz"
integrity sha512-plyX2iU1tcUsF46uQ01pAd4JhexR7n0iiQ5MSnBFX6M6NSJgDYdru/i1/YNPKOnQHBoXGLHv0dNT6OAlDWNjrg==
@@ -1631,7 +1631,7 @@
js-yaml "^4.1.0"
tslib "^2.6.0"
-"@docusaurus/utils@^3.0.0", "@docusaurus/utils@3.0.1":
+"@docusaurus/utils@3.0.1":
version "3.0.1"
resolved "https://registry.npmjs.org/@docusaurus/utils/-/utils-3.0.1.tgz"
integrity sha512-TwZ33Am0q4IIbvjhUOs+zpjtD/mXNmLmEgeTGuRq01QzulLHuPhaBTTAC/DHu6kFx3wDgmgpAlaRuCHfTcXv8g==
@@ -1678,31 +1678,6 @@
dependencies:
eslint-visitor-keys "^3.3.0"
-"@eslint-community/regexpp@^4.6.1":
- version "4.11.1"
- resolved "https://registry.npmjs.org/@eslint-community/regexpp/-/regexpp-4.11.1.tgz"
- integrity sha512-m4DVN9ZqskZoLU5GlWZadwDnYo3vAEydiUayB9widCl9ffWx2IvPnp6n3on5rJmziJSw9Bv+Z3ChDVdMwXCY8Q==
-
-"@eslint/eslintrc@^2.1.4":
- version "2.1.4"
- resolved "https://registry.npmjs.org/@eslint/eslintrc/-/eslintrc-2.1.4.tgz"
- integrity sha512-269Z39MS6wVJtsoUl10L60WdkhJVdPG24Q4eZTH3nnF6lpvSShEK3wQjDX9JRWAUPvPh7COouPpU9IrqaZFvtQ==
- dependencies:
- ajv "^6.12.4"
- debug "^4.3.2"
- espree "^9.6.0"
- globals "^13.19.0"
- ignore "^5.2.0"
- import-fresh "^3.2.1"
- js-yaml "^4.1.0"
- minimatch "^3.1.2"
- strip-json-comments "^3.1.1"
-
-"@eslint/js@8.57.1":
- version "8.57.1"
- resolved "https://registry.npmjs.org/@eslint/js/-/js-8.57.1.tgz"
- integrity sha512-d9zaMRSTIKDLhctzH12MtXvJKSSUhaHcjV+2Z+GK+EEY7XKpP5yR4x+N3TAcHTcu963nIr+TMcCb4DBCYX1z6Q==
-
"@exodus/schemasafe@^1.0.0-rc.2":
version "1.3.0"
resolved "https://registry.npmjs.org/@exodus/schemasafe/-/schemasafe-1.3.0.tgz"
@@ -1720,25 +1695,6 @@
dependencies:
"@hapi/hoek" "^9.0.0"
-"@humanwhocodes/config-array@^0.13.0":
- version "0.13.0"
- resolved "https://registry.npmjs.org/@humanwhocodes/config-array/-/config-array-0.13.0.tgz"
- integrity sha512-DZLEEqFWQFiyK6h5YIeynKx7JlvCYWL0cImfSRXZ9l4Sg2efkFGTuFf6vzXjK1cq6IYkU+Eg/JizXw+TD2vRNw==
- dependencies:
- "@humanwhocodes/object-schema" "^2.0.3"
- debug "^4.3.1"
- minimatch "^3.0.5"
-
-"@humanwhocodes/module-importer@^1.0.1":
- version "1.0.1"
- resolved "https://registry.npmjs.org/@humanwhocodes/module-importer/-/module-importer-1.0.1.tgz"
- integrity sha512-bxveV4V8v5Yb4ncFTT3rPSgZBOpCkjfK0y4oVVVJwIuDVBRMDXrPyXRL988i5ap9m9bnyEEjWfm5WkBmtffLfA==
-
-"@humanwhocodes/object-schema@^2.0.3":
- version "2.0.3"
- resolved "https://registry.npmjs.org/@humanwhocodes/object-schema/-/object-schema-2.0.3.tgz"
- integrity sha512-93zYdMES/c1D69yZiKDBj0V24vqNzB/koF26KPaagAfd3P/4gUlh3Dys5ogAK+Exi9QyzlD8x/08Zt7wIKcDcA==
-
"@jest/schemas@^29.6.3":
version "29.6.3"
resolved "https://registry.npmjs.org/@jest/schemas/-/schemas-29.6.3.tgz"
@@ -1847,12 +1803,12 @@
"@nodelib/fs.stat" "2.0.5"
run-parallel "^1.1.9"
-"@nodelib/fs.stat@^2.0.2", "@nodelib/fs.stat@2.0.5":
+"@nodelib/fs.stat@2.0.5", "@nodelib/fs.stat@^2.0.2":
version "2.0.5"
resolved "https://registry.npmjs.org/@nodelib/fs.stat/-/fs.stat-2.0.5.tgz"
integrity sha512-RkhPPp2zrqDAQA/2jNhnztcPAlv64XdhIp7a7454A5ovI7Bukxgt7MX7udwAu3zg1DcpPU0rz3VV1SeaqvY4+A==
-"@nodelib/fs.walk@^1.2.3", "@nodelib/fs.walk@^1.2.8":
+"@nodelib/fs.walk@^1.2.3":
version "1.2.8"
resolved "https://registry.npmjs.org/@nodelib/fs.walk/-/fs.walk-1.2.8.tgz"
integrity sha512-oGB+UxlgWcgQkgwo8GcEGwemoTFt3FIO9ababBmaGwXIoBKZ+GTy0pP185beGg7Llih/NSHSV2XAs1lnznocSg==
@@ -1901,7 +1857,7 @@
resolved "https://registry.npmjs.org/@redocly/config/-/config-0.6.3.tgz"
integrity sha512-hGWJgCsXRw0Ow4rplqRlUQifZvoSwZipkYnt11e3SeH1Eb23VUIDBcRuaQOUqy1wn0eevXkU2GzzQ8fbKdQ7Mg==
-"@redocly/openapi-core@^1.4.0", "@redocly/openapi-core@1.16.0":
+"@redocly/openapi-core@1.16.0", "@redocly/openapi-core@^1.4.0":
version "1.16.0"
resolved "https://registry.npmjs.org/@redocly/openapi-core/-/openapi-core-1.16.0.tgz"
integrity sha512-z06h+svyqbUcdAaePq8LPSwTPlm6Ig7j2VlL8skPBYnJvyaQ2IN7x/JkOvRL4ta+wcOCBdAex5JWnZbKaNktJg==
@@ -2022,7 +1978,7 @@
"@svgr/babel-plugin-transform-react-native-svg" "^6.5.1"
"@svgr/babel-plugin-transform-svg-component" "^6.5.1"
-"@svgr/core@*", "@svgr/core@^6.0.0", "@svgr/core@^6.5.1":
+"@svgr/core@^6.5.1":
version "6.5.1"
resolved "https://registry.npmjs.org/@svgr/core/-/core-6.5.1.tgz"
integrity sha512-/xdLSWxK5QkqG524ONSjvg3V/FkNyCv538OIBdQqPNaAta3AsXj/Bd2FbvR87yMbXO2hFSWiAe/Q6IkVPDw+mw==
@@ -2333,7 +2289,7 @@
"@types/history" "^4.7.11"
"@types/react" "*"
-"@types/react@*", "@types/react@>= 16.8.0 < 19.0.0", "@types/react@>=16":
+"@types/react@*":
version "18.2.45"
resolved "https://registry.npmjs.org/@types/react/-/react-18.2.45.tgz"
integrity sha512-TtAxCNrlrBp8GoeEp1npd5g+d/OejJHFxS3OWmrPBMFaVQMSN0OFySozJio5BHxTuTeug00AVXVAjfDSfk+lUg==
@@ -2477,12 +2433,12 @@
"@typescript-eslint/types" "5.62.0"
eslint-visitor-keys "^3.3.0"
-"@ungap/structured-clone@^1.0.0", "@ungap/structured-clone@^1.2.0":
+"@ungap/structured-clone@^1.0.0":
version "1.2.0"
resolved "https://registry.npmjs.org/@ungap/structured-clone/-/structured-clone-1.2.0.tgz"
integrity sha512-zuVdFrMJiuCDQUMCzQaD6KL28MjnqqN8XnAqiEq9PNm/hCPTSGfrXCOfwj1ow4LFb/tNymJPwsNbVePc1xFqrQ==
-"@webassemblyjs/ast@^1.11.5", "@webassemblyjs/ast@1.11.6":
+"@webassemblyjs/ast@1.11.6", "@webassemblyjs/ast@^1.11.5":
version "1.11.6"
resolved "https://registry.npmjs.org/@webassemblyjs/ast/-/ast-1.11.6.tgz"
integrity sha512-IN1xI7PwOvLPgjcf180gC1bqn3q/QaOCwYUahIOhbYUu8KA/3tw2RT/T0Gidi1l7Hhj5D/INhJxiICObqpMu4Q==
@@ -2583,7 +2539,7 @@
"@webassemblyjs/wasm-gen" "1.11.6"
"@webassemblyjs/wasm-parser" "1.11.6"
-"@webassemblyjs/wasm-parser@^1.11.5", "@webassemblyjs/wasm-parser@1.11.6":
+"@webassemblyjs/wasm-parser@1.11.6", "@webassemblyjs/wasm-parser@^1.11.5":
version "1.11.6"
resolved "https://registry.npmjs.org/@webassemblyjs/wasm-parser/-/wasm-parser-1.11.6.tgz"
integrity sha512-6ZwPeGzMJM3Dqp3hCsLgESxBGtT/OeCvCZ4TA1JUPYgmhAx38tTPR9JaKy0S5H3evQpO/h2uWs2j6Yc/fjkpTQ==
@@ -2626,7 +2582,7 @@ acorn-import-assertions@^1.9.0:
resolved "https://registry.npmjs.org/acorn-import-assertions/-/acorn-import-assertions-1.9.0.tgz"
integrity sha512-cmMwop9x+8KFhxvKrKfPYmN6/pKTYYHBqLa0DfvVZcKMJWNyWLnaqND7dx/qn66R7ewM1UX5XMaDVP5wlVTaVA==
-acorn-jsx@^5.0.0, acorn-jsx@^5.3.2:
+acorn-jsx@^5.0.0:
version "5.3.2"
resolved "https://registry.npmjs.org/acorn-jsx/-/acorn-jsx-5.3.2.tgz"
integrity sha512-rq9s+JNhf0IChjtDXxllJ7g41oZk5SlXtp0LHwyA5cejwn7vKmKp4pPri6YEePv2PU65sAsegbXtIinmDFDXgQ==
@@ -2636,7 +2592,7 @@ acorn-walk@^8.0.0:
resolved "https://registry.npmjs.org/acorn-walk/-/acorn-walk-8.3.1.tgz"
integrity sha512-TgUZgYvqZprrl7YldZNoa9OciCAyZR+Ejm9eXzKCmjsF5IKp/wgQ7Z/ZpjpGTIUPwrHQIcYeI8qDh4PsEwxMbw==
-"acorn@^6.0.0 || ^7.0.0 || ^8.0.0", acorn@^8, acorn@^8.0.0, acorn@^8.0.4, acorn@^8.7.1, acorn@^8.8.2, acorn@^8.9.0:
+acorn@^8.0.0, acorn@^8.0.4, acorn@^8.7.1, acorn@^8.8.2:
version "8.11.2"
resolved "https://registry.npmjs.org/acorn/-/acorn-8.11.2.tgz"
integrity sha512-nc0Axzp/0FILLEVsm4fNwLCwMttvhEI263QtVPQcbpfZZ3ts0hLsZGOpE6czNlid7CJ9MlyH8reXkpsf3YUY4w==
@@ -2668,12 +2624,7 @@ ajv-formats@^2.1.1:
dependencies:
ajv "^8.0.0"
-ajv-keywords@^3.4.1:
- version "3.5.2"
- resolved "https://registry.npmjs.org/ajv-keywords/-/ajv-keywords-3.5.2.tgz"
- integrity sha512-5p6WTN0DdTGVQk6VjcEju19IgaHudalcfabD7yhDGeA6bcQnmL+CpveLJq/3hvfwd1aof6L386Ougkx6RfyMIQ==
-
-ajv-keywords@^3.5.2:
+ajv-keywords@^3.4.1, ajv-keywords@^3.5.2:
version "3.5.2"
resolved "https://registry.npmjs.org/ajv-keywords/-/ajv-keywords-3.5.2.tgz"
integrity sha512-5p6WTN0DdTGVQk6VjcEju19IgaHudalcfabD7yhDGeA6bcQnmL+CpveLJq/3hvfwd1aof6L386Ougkx6RfyMIQ==
@@ -2685,17 +2636,7 @@ ajv-keywords@^5.1.0:
dependencies:
fast-deep-equal "^3.1.3"
-ajv@^6.12.2, ajv@^6.12.5, ajv@^6.9.1:
- version "6.12.6"
- resolved "https://registry.npmjs.org/ajv/-/ajv-6.12.6.tgz"
- integrity sha512-j3fVLgvTo527anyYyJOGTYJbG+vnnQYvE0m5mmkc1TK+nxAppkCLMIL0aZ4dblVCNoGShhm+kzE4ZUykBoMg4g==
- dependencies:
- fast-deep-equal "^3.1.1"
- fast-json-stable-stringify "^2.0.0"
- json-schema-traverse "^0.4.1"
- uri-js "^4.2.2"
-
-ajv@^6.12.4:
+ajv@^6.12.2, ajv@^6.12.5:
version "6.12.6"
resolved "https://registry.npmjs.org/ajv/-/ajv-6.12.6.tgz"
integrity sha512-j3fVLgvTo527anyYyJOGTYJbG+vnnQYvE0m5mmkc1TK+nxAppkCLMIL0aZ4dblVCNoGShhm+kzE4ZUykBoMg4g==
@@ -2705,7 +2646,7 @@ ajv@^6.12.4:
json-schema-traverse "^0.4.1"
uri-js "^4.2.2"
-ajv@^8.0.0, ajv@^8.8.2, ajv@^8.9.0:
+ajv@^8.0.0, ajv@^8.9.0:
version "8.12.0"
resolved "https://registry.npmjs.org/ajv/-/ajv-8.12.0.tgz"
integrity sha512-sRu1kpcO9yLtYxBKvqfTeh9KzZEwO3STyX1HT+4CaDzC6HpTGYhIhPIzj9XuKU7KYDwnaeh5hcOwjy1QuJzBPA==
@@ -2722,7 +2663,7 @@ algoliasearch-helper@^3.13.3:
dependencies:
"@algolia/events" "^4.0.1"
-algoliasearch@^4.18.0, algoliasearch@^4.19.1, "algoliasearch@>= 3.1 < 6", "algoliasearch@>= 4.9.1 < 6":
+algoliasearch@^4.18.0, algoliasearch@^4.19.1:
version "4.22.0"
resolved "https://registry.npmjs.org/algoliasearch/-/algoliasearch-4.22.0.tgz"
integrity sha512-gfceltjkwh7PxXwtkS8KVvdfK+TSNQAWUeNSxf4dA29qW5tf2EGwa8jkJujlT9jLm17cixMVoGNc+GJFO1Mxhg==
@@ -2816,43 +2757,21 @@ array-buffer-byte-length@^1.0.1:
call-bind "^1.0.5"
is-array-buffer "^3.0.4"
-array-flatten@^2.1.2:
- version "2.1.2"
- resolved "https://registry.npmjs.org/array-flatten/-/array-flatten-2.1.2.tgz"
- integrity sha512-hNfzcOV8W4NdualtqBFPyVO+54DSJuZGY9qT4pRroB6S9e3iiido2ISIC5h9R2sPJ8H3FHCIiEnsv1lPXO3KtQ==
-
array-flatten@1.1.1:
version "1.1.1"
resolved "https://registry.npmjs.org/array-flatten/-/array-flatten-1.1.1.tgz"
integrity sha512-PCVAQswWemu6UdxsDFFX/+gVeYqKAod3D3UVm91jHwynguOwAvYPhx8nNlM++NqRcK6CxxpUafjmhIdKiHibqg==
+array-flatten@^2.1.2:
+ version "2.1.2"
+ resolved "https://registry.npmjs.org/array-flatten/-/array-flatten-2.1.2.tgz"
+ integrity sha512-hNfzcOV8W4NdualtqBFPyVO+54DSJuZGY9qT4pRroB6S9e3iiido2ISIC5h9R2sPJ8H3FHCIiEnsv1lPXO3KtQ==
+
array-union@^2.1.0:
version "2.1.0"
resolved "https://registry.npmjs.org/array-union/-/array-union-2.1.0.tgz"
integrity sha512-HGyxoOTYUyCM6stUe6EJgnd4EoewAI7zMdfqO+kGjnlZmBDz/cR5pf8r/cR4Wq60sL/p0IkcjUEEPwS3GFrIyw==
-array.prototype.filter@^1.0.0:
- version "1.0.4"
- resolved "https://registry.npmjs.org/array.prototype.filter/-/array.prototype.filter-1.0.4.tgz"
- integrity sha512-r+mCJ7zXgXElgR4IRC+fkvNCeoaavWBs6EdCso5Tbcf+iEMKzBU/His60lt34WEZ9vlb8wDkZvQGcVI5GwkfoQ==
- dependencies:
- call-bind "^1.0.7"
- define-properties "^1.2.1"
- es-abstract "^1.23.2"
- es-array-method-boxes-properly "^1.0.0"
- es-object-atoms "^1.0.0"
- is-string "^1.0.7"
-
-array.prototype.flat@^1.2.3:
- version "1.3.2"
- resolved "https://registry.npmjs.org/array.prototype.flat/-/array.prototype.flat-1.3.2.tgz"
- integrity sha512-djYB+Zx2vLewY8RWlNCUdHjDXs2XOgm602S9E7P/UpHgfeHL00cRiIF+IN/G/aUJ7kGPb6yO/ErDI5V2s8iycA==
- dependencies:
- call-bind "^1.0.2"
- define-properties "^1.2.0"
- es-abstract "^1.22.1"
- es-shim-unscopables "^1.0.0"
-
arraybuffer.prototype.slice@^1.0.3:
version "1.0.3"
resolved "https://registry.npmjs.org/arraybuffer.prototype.slice/-/arraybuffer.prototype.slice-1.0.3.tgz"
@@ -3043,7 +2962,7 @@ braces@^3.0.2, braces@~3.0.2:
dependencies:
fill-range "^7.0.1"
-browserslist@^4.0.0, browserslist@^4.14.5, browserslist@^4.18.1, browserslist@^4.21.10, browserslist@^4.21.4, browserslist@^4.22.2, "browserslist@>= 4.21.0":
+browserslist@^4.0.0, browserslist@^4.14.5, browserslist@^4.18.1, browserslist@^4.21.10, browserslist@^4.21.4, browserslist@^4.22.2:
version "4.22.2"
resolved "https://registry.npmjs.org/browserslist/-/browserslist-4.22.2.tgz"
integrity sha512-0UgcrvQmBDvZHFGdYUehrCNIazki7/lUP3kkoi/r3YB2amZbFM9J43ZRkJTXBUZK4gmx56+Sqk9+Vs9mwZx9+A==
@@ -3209,7 +3128,7 @@ cheerio-select@^2.1.0:
domhandler "^5.0.3"
domutils "^3.0.1"
-cheerio@^1.0.0-rc.12, cheerio@^1.0.0-rc.3:
+cheerio@^1.0.0-rc.12:
version "1.0.0-rc.12"
resolved "https://registry.npmjs.org/cheerio/-/cheerio-1.0.0-rc.12.tgz"
integrity sha512-VqR8m68vM46BNnuZ5NtnGBKIE/DfN0cRIzg9n40EIq9NOv90ayxLBXA8fXC5gquFRGJSTRqBq25Jt2ECLR431Q==
@@ -3325,16 +3244,16 @@ color-convert@^2.0.1:
dependencies:
color-name "~1.1.4"
-color-name@~1.1.4:
- version "1.1.4"
- resolved "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz"
- integrity sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==
-
color-name@1.1.3:
version "1.1.3"
resolved "https://registry.npmjs.org/color-name/-/color-name-1.1.3.tgz"
integrity sha512-72fSenhMw2HZMTVHeCA9KCmpEIbzWiQsjN+BHcBbS9vr1mtt+vJjPdksIBNUmKAW8TFUDPJK5SUU3QhE9NEXDw==
+color-name@~1.1.4:
+ version "1.1.4"
+ resolved "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz"
+ integrity sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==
+
colord@^2.9.1:
version "2.9.3"
resolved "https://registry.npmjs.org/colord/-/colord-2.9.3.tgz"
@@ -3365,11 +3284,6 @@ commander@^10.0.0:
resolved "https://registry.npmjs.org/commander/-/commander-10.0.1.tgz"
integrity sha512-y4Mg2tXshplEbSGzx7amzPwKKOCGuoSRP/CjEdwwk0FOGlUbq6lKuoyDZTNZkmxHdJtp54hdfY/JUrdL7Xfdug==
-commander@^2.19.0:
- version "2.20.3"
- resolved "https://registry.npmjs.org/commander/-/commander-2.20.3.tgz"
- integrity sha512-GpVkmM8vF2vQUkj2LvZmD35JxeJOLCwJ9cUkugyk2nuhbv3+mJvpLYYt+0+USMxE+oj+ey/lJEnhZw75x/OMcQ==
-
commander@^2.20.0:
version "2.20.3"
resolved "https://registry.npmjs.org/commander/-/commander-2.20.3.tgz"
@@ -3510,7 +3424,7 @@ core-js-pure@^3.30.2:
resolved "https://registry.npmjs.org/core-js-pure/-/core-js-pure-3.34.0.tgz"
integrity sha512-pmhivkYXkymswFfbXsANmBAewXx86UBfmagP+w0wkK06kLsLlTK5oQmsURPivzMkIBQiYq2cjamcZExIwlFQIg==
-core-js@^3.1.4, core-js@^3.31.1:
+core-js@^3.31.1:
version "3.34.0"
resolved "https://registry.npmjs.org/core-js/-/core-js-3.34.0.tgz"
integrity sha512-aDdvlDder8QmY91H88GzNi9EtQi2TjvQhpCX6B1v/dAZHU1AuLgHvRh54RiOerpEhEW46Tkf+vgAViB/CWC0ag==
@@ -3552,7 +3466,7 @@ cosmiconfig@^8.2.0:
parse-json "^5.2.0"
path-type "^4.0.0"
-cross-spawn@^7.0.2, cross-spawn@^7.0.3:
+cross-spawn@^7.0.3:
version "7.0.3"
resolved "https://registry.npmjs.org/cross-spawn/-/cross-spawn-7.0.3.tgz"
integrity sha512-iRDPJKUPVEND7dHPO8rkbOnPpyDygcDFtWjpeWNCgy8WP2rXcxXL8TskReQl6OrB2G7+UJrags1q15Fudc7G6w==
@@ -3721,7 +3635,7 @@ csso@^4.2.0:
dependencies:
css-tree "^1.1.2"
-csstype@^3.0.2, csstype@3.1.3:
+csstype@3.1.3, csstype@^3.0.2:
version "3.1.3"
resolved "https://registry.npmjs.org/csstype/-/csstype-3.1.3.tgz"
integrity sha512-M1uQkMl8rQK/szD0LNhtqxIPLpimGm8sOBwU7lLnCpSbTyY3yeU1Vc7l4KT5zT4s/yOxHH5O7tIuuLOCnLADRw==
@@ -3758,27 +3672,20 @@ debounce@^1.2.1:
resolved "https://registry.npmjs.org/debounce/-/debounce-1.2.1.tgz"
integrity sha512-XRRe6Glud4rd/ZGQfiV1ruXSfbvfJedlV9Y6zOlP+2K04vBYiJEte6stfFkCP03aMnY5tsipamumUjL14fofug==
-debug@^2.6.0:
+debug@2.6.9, debug@^2.6.0:
version "2.6.9"
resolved "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz"
integrity sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==
dependencies:
ms "2.0.0"
-debug@^4.0.0, debug@^4.1.0, debug@^4.1.1, debug@^4.3.1, debug@^4.3.2, debug@^4.3.4, debug@4:
+debug@4, debug@^4.0.0, debug@^4.1.0, debug@^4.1.1, debug@^4.3.1, debug@^4.3.4:
version "4.3.4"
resolved "https://registry.npmjs.org/debug/-/debug-4.3.4.tgz"
integrity sha512-PRWFHuSU3eDtQJPvnNY7Jcket1j0t5OuOsFzPPzsekD52Zl8qUfFIPEiswXqIvHWGVHOgX+7G/vCNNhehwxfkQ==
dependencies:
ms "2.1.2"
-debug@2.6.9:
- version "2.6.9"
- resolved "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz"
- integrity sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==
- dependencies:
- ms "2.0.0"
-
decko@^1.2.0:
version "1.2.0"
resolved "https://registry.npmjs.org/decko/-/decko-1.2.0.tgz"
@@ -3803,11 +3710,6 @@ deep-extend@^0.6.0:
resolved "https://registry.npmjs.org/deep-extend/-/deep-extend-0.6.0.tgz"
integrity sha512-LOHxIOaPYdHlJRtCQfDIVZtfw/ufM8+rVj649RIHzcm/vGwQRXFt6OPqIFWsm2XEMrNIEtWR64sY1LEKD2vAOA==
-deep-is@^0.1.3:
- version "0.1.4"
- resolved "https://registry.npmjs.org/deep-is/-/deep-is-0.1.4.tgz"
- integrity sha512-oIPzksmTg4/MriiaYGO+okXDT7ztn/w3Eptv/+gSIdMdKsJo0u4CfYNFJPy+4SKMuCqGw2wxnA+URMg3t8a/bQ==
-
deepmerge@^4.2.2:
version "4.3.1"
resolved "https://registry.npmjs.org/deepmerge/-/deepmerge-4.3.1.tgz"
@@ -3862,16 +3764,16 @@ del@^6.1.1:
rimraf "^3.0.2"
slash "^3.0.0"
-depd@~1.1.2:
- version "1.1.2"
- resolved "https://registry.npmjs.org/depd/-/depd-1.1.2.tgz"
- integrity sha512-7emPTl6Dpo6JRXOXjLRxck+FlLRX5847cLKEn00PLAgc3g2hTZZgr+e4c2v6QpSmLeFP3n5yUo7ft6avBK/5jQ==
-
depd@2.0.0:
version "2.0.0"
resolved "https://registry.npmjs.org/depd/-/depd-2.0.0.tgz"
integrity sha512-g7nH6P6dyDioJogAAGprGpCtVImJhpPk/roCzdb3fIh61/s/nPsfR6onyMwkCAR/OlC3yBC0lESvUoQEAssIrw==
+depd@~1.1.2:
+ version "1.1.2"
+ resolved "https://registry.npmjs.org/depd/-/depd-1.1.2.tgz"
+ integrity sha512-7emPTl6Dpo6JRXOXjLRxck+FlLRX5847cLKEn00PLAgc3g2hTZZgr+e4c2v6QpSmLeFP3n5yUo7ft6avBK/5jQ==
+
dequal@^2.0.0:
version "2.0.3"
resolved "https://registry.npmjs.org/dequal/-/dequal-2.0.3.tgz"
@@ -3917,11 +3819,6 @@ dir-glob@^3.0.1:
dependencies:
path-type "^4.0.0"
-discontinuous-range@1.0.0:
- version "1.0.0"
- resolved "https://registry.npmjs.org/discontinuous-range/-/discontinuous-range-1.0.0.tgz"
- integrity sha512-c68LpLbO+7kP/b1Hr1qs8/BJ09F5khZGTxqxZuhzxpmwJKOgRFHJWIb9/KmqnqHhLdO55aOxFH/EGBvUQbL/RQ==
-
dns-equal@^1.0.0:
version "1.0.0"
resolved "https://registry.npmjs.org/dns-equal/-/dns-equal-1.0.0.tgz"
@@ -3934,13 +3831,6 @@ dns-packet@^5.2.2:
dependencies:
"@leichtgewicht/ip-codec" "^2.0.1"
-doctrine@^3.0.0:
- version "3.0.0"
- resolved "https://registry.npmjs.org/doctrine/-/doctrine-3.0.0.tgz"
- integrity sha512-yS+Q5i3hBf7GBkd4KG8a7eBNNWNGLTaEwwYWUijIYM7zrlYDM0BFXHjjPWlWZ1Rg7UaddZeIDmi9jF3HmqiQ2w==
- dependencies:
- esutils "^2.0.2"
-
docusaurus-plugin-redoc@2.1.1:
version "2.1.1"
resolved "https://registry.npmjs.org/docusaurus-plugin-redoc/-/docusaurus-plugin-redoc-2.1.1.tgz"
@@ -4113,7 +4003,7 @@ entities@^4.2.0, entities@^4.4.0:
resolved "https://registry.npmjs.org/entities/-/entities-4.5.0.tgz"
integrity sha512-V0hjH4dGPh9Ao5p0MoRY6BVqtwCjhz6vI5LT8AJ55H+4g9/4vbHx1I54fS0XuclLhDHArPQCiMjDxjaL8fPxhw==
-enzyme-shallow-equal@^1.0.0, enzyme-shallow-equal@^1.0.1:
+enzyme-shallow-equal@^1.0.0:
version "1.0.7"
resolved "https://registry.npmjs.org/enzyme-shallow-equal/-/enzyme-shallow-equal-1.0.7.tgz"
integrity sha512-/um0GFqUXnpM9SvKtje+9Tjoz3f1fpBC3eXRFrNs8kpYn69JljciYP7KZTqM/YQbUY9KUjvKB4jo/q+L6WGGvg==
@@ -4121,34 +4011,6 @@ enzyme-shallow-equal@^1.0.0, enzyme-shallow-equal@^1.0.1:
hasown "^2.0.0"
object-is "^1.1.5"
-enzyme@^3.11.0:
- version "3.11.0"
- resolved "https://registry.npmjs.org/enzyme/-/enzyme-3.11.0.tgz"
- integrity sha512-Dw8/Gs4vRjxY6/6i9wU0V+utmQO9kvh9XLnz3LIudviOnVYDEe2ec+0k+NQoMamn1VrjKgCUOWj5jG/5M5M0Qw==
- dependencies:
- array.prototype.flat "^1.2.3"
- cheerio "^1.0.0-rc.3"
- enzyme-shallow-equal "^1.0.1"
- function.prototype.name "^1.1.2"
- has "^1.0.3"
- html-element-map "^1.2.0"
- is-boolean-object "^1.0.1"
- is-callable "^1.1.5"
- is-number-object "^1.0.4"
- is-regex "^1.0.5"
- is-string "^1.0.5"
- is-subset "^0.1.1"
- lodash.escape "^4.0.1"
- lodash.isequal "^4.5.0"
- object-inspect "^1.7.0"
- object-is "^1.0.2"
- object.assign "^4.1.0"
- object.entries "^1.1.1"
- object.values "^1.1.1"
- raf "^3.4.1"
- rst-selector-parser "^2.2.3"
- string.prototype.trim "^1.2.1"
-
error-ex@^1.3.1:
version "1.3.2"
resolved "https://registry.npmjs.org/error-ex/-/error-ex-1.3.2.tgz"
@@ -4156,7 +4018,7 @@ error-ex@^1.3.1:
dependencies:
is-arrayish "^0.2.1"
-es-abstract@^1.22.1, es-abstract@^1.22.3, es-abstract@^1.23.0, es-abstract@^1.23.2:
+es-abstract@^1.22.1, es-abstract@^1.22.3, es-abstract@^1.23.0:
version "1.23.3"
resolved "https://registry.npmjs.org/es-abstract/-/es-abstract-1.23.3.tgz"
integrity sha512-e+HfNH61Bj1X9/jLc5v1owaLYuHdeHHSQlkhCBiTK8rBvKaULl/beGMxwrMXjpYrv4pz22BlY570vVePA2ho4A==
@@ -4208,11 +4070,6 @@ es-abstract@^1.22.1, es-abstract@^1.22.3, es-abstract@^1.23.0, es-abstract@^1.23
unbox-primitive "^1.0.2"
which-typed-array "^1.1.15"
-es-array-method-boxes-properly@^1.0.0:
- version "1.0.0"
- resolved "https://registry.npmjs.org/es-array-method-boxes-properly/-/es-array-method-boxes-properly-1.0.0.tgz"
- integrity sha512-wd6JXUmyHmt8T5a2xreUwKcGPq6f1f+WwIJkijUqiGcJz1qqnZgP6XIK+QyIWU5lT7imeNxUll48bziG+TSYcA==
-
es-define-property@^1.0.0:
version "1.0.0"
resolved "https://registry.npmjs.org/es-define-property/-/es-define-property-1.0.0.tgz"
@@ -4246,13 +4103,6 @@ es-set-tostringtag@^2.0.3:
has-tostringtag "^1.0.2"
hasown "^2.0.1"
-es-shim-unscopables@^1.0.0:
- version "1.0.2"
- resolved "https://registry.npmjs.org/es-shim-unscopables/-/es-shim-unscopables-1.0.2.tgz"
- integrity sha512-J3yBRXCzDu4ULnQwxyToo/OjdMx6akgVC7K6few0a7F/0wLtmKKN7I73AH5T2836UuXRqN7Qg+IIUw/+YJksRw==
- dependencies:
- hasown "^2.0.0"
-
es-to-primitive@^1.2.1:
version "1.2.1"
resolved "https://registry.npmjs.org/es-to-primitive/-/es-to-primitive-1.2.1.tgz"
@@ -4297,7 +4147,7 @@ escape-string-regexp@^5.0.0:
resolved "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-5.0.0.tgz"
integrity sha512-/veY75JbMK4j1yjvuUxuVsiS/hr/4iHs9FTT6cgTexxdE0Ly/glccBAkloH/DofkjRbZU3bnoj38mOmhkZ0lHw==
-eslint-scope@^5.1.1, eslint-scope@5.1.1:
+eslint-scope@5.1.1, eslint-scope@^5.1.1:
version "5.1.1"
resolved "https://registry.npmjs.org/eslint-scope/-/eslint-scope-5.1.1.tgz"
integrity sha512-2NxwbF/hZ0KpepYN0cNbo+FN6XoK7GaHlQhgx/hIZl6Va0bF45RQOOwhLIy8lQDbuCiadSLCBnH2CFYquit5bw==
@@ -4305,84 +4155,16 @@ eslint-scope@^5.1.1, eslint-scope@5.1.1:
esrecurse "^4.3.0"
estraverse "^4.1.1"
-eslint-scope@^7.2.2:
- version "7.2.2"
- resolved "https://registry.npmjs.org/eslint-scope/-/eslint-scope-7.2.2.tgz"
- integrity sha512-dOt21O7lTMhDM+X9mB4GX+DZrZtCUJPL/wlcTqxyrx5IvO0IYtILdtrQGQp+8n5S0gwSVmOf9NQrjMOgfQZlIg==
- dependencies:
- esrecurse "^4.3.0"
- estraverse "^5.2.0"
-
-eslint-visitor-keys@^3.3.0, eslint-visitor-keys@^3.4.1, eslint-visitor-keys@^3.4.3:
+eslint-visitor-keys@^3.3.0:
version "3.4.3"
resolved "https://registry.npmjs.org/eslint-visitor-keys/-/eslint-visitor-keys-3.4.3.tgz"
integrity sha512-wpc+LXeiyiisxPlEkUzU6svyS1frIO3Mgxj1fdy7Pm8Ygzguax2N3Fa/D/ag1WqbOprdI+uY6wMUl8/a2G+iag==
-"eslint@^6.0.0 || ^7.0.0 || ^8.0.0", "eslint@^6.0.0 || ^7.0.0 || >=8.0.0", "eslint@>= 6", eslint@>=6:
- version "8.57.1"
- resolved "https://registry.npmjs.org/eslint/-/eslint-8.57.1.tgz"
- integrity sha512-ypowyDxpVSYpkXr9WPv2PAZCtNip1Mv5KTW0SCurXv/9iOpcrH9PaqUElksqEB6pChqHGDRCFTyrZlGhnLNGiA==
- dependencies:
- "@eslint-community/eslint-utils" "^4.2.0"
- "@eslint-community/regexpp" "^4.6.1"
- "@eslint/eslintrc" "^2.1.4"
- "@eslint/js" "8.57.1"
- "@humanwhocodes/config-array" "^0.13.0"
- "@humanwhocodes/module-importer" "^1.0.1"
- "@nodelib/fs.walk" "^1.2.8"
- "@ungap/structured-clone" "^1.2.0"
- ajv "^6.12.4"
- chalk "^4.0.0"
- cross-spawn "^7.0.2"
- debug "^4.3.2"
- doctrine "^3.0.0"
- escape-string-regexp "^4.0.0"
- eslint-scope "^7.2.2"
- eslint-visitor-keys "^3.4.3"
- espree "^9.6.1"
- esquery "^1.4.2"
- esutils "^2.0.2"
- fast-deep-equal "^3.1.3"
- file-entry-cache "^6.0.1"
- find-up "^5.0.0"
- glob-parent "^6.0.2"
- globals "^13.19.0"
- graphemer "^1.4.0"
- ignore "^5.2.0"
- imurmurhash "^0.1.4"
- is-glob "^4.0.0"
- is-path-inside "^3.0.3"
- js-yaml "^4.1.0"
- json-stable-stringify-without-jsonify "^1.0.1"
- levn "^0.4.1"
- lodash.merge "^4.6.2"
- minimatch "^3.1.2"
- natural-compare "^1.4.0"
- optionator "^0.9.3"
- strip-ansi "^6.0.1"
- text-table "^0.2.0"
-
-espree@^9.6.0, espree@^9.6.1:
- version "9.6.1"
- resolved "https://registry.npmjs.org/espree/-/espree-9.6.1.tgz"
- integrity sha512-oruZaFkjorTpF32kDSI5/75ViwGeZginGGy2NoOSg3Q9bnwlnmDm4HLnkl0RE3n+njDXR037aY1+x58Z/zFdwQ==
- dependencies:
- acorn "^8.9.0"
- acorn-jsx "^5.3.2"
- eslint-visitor-keys "^3.4.1"
-
esprima@^4.0.0:
version "4.0.1"
resolved "https://registry.npmjs.org/esprima/-/esprima-4.0.1.tgz"
integrity sha512-eGuFFw7Upda+g4p+QHvnW0RyTX/SVeJBDM/gCtMARO0cLuT2HcEKnTPvhjV6aGeqrCB/sbNop0Kszm0jsaWU4A==
-esquery@^1.4.2:
- version "1.6.0"
- resolved "https://registry.npmjs.org/esquery/-/esquery-1.6.0.tgz"
- integrity sha512-ca9pw9fomFcKPvFLXhBKUK90ZvGibiGOvRJNbjljY7s7uq/5YO4BOzcYtJqExdx99rF6aAcnRxHmcUHcz6sQsg==
- dependencies:
- estraverse "^5.1.0"
-
esrecurse@^4.3.0:
version "4.3.0"
resolved "https://registry.npmjs.org/esrecurse/-/esrecurse-4.3.0.tgz"
@@ -4395,11 +4177,6 @@ estraverse@^4.1.1:
resolved "https://registry.npmjs.org/estraverse/-/estraverse-4.3.0.tgz"
integrity sha512-39nnKffWz8xN1BU/2c79n9nB9HDzo0niYUqx6xyqUnyoAnQyyWpOTdZEeiCch8BBu515t4wp9ZmgVfVhn9EBpw==
-estraverse@^5.1.0:
- version "5.3.0"
- resolved "https://registry.npmjs.org/estraverse/-/estraverse-5.3.0.tgz"
- integrity sha512-MMdARuVEQziNTeJD8DgMqmhwR11BRQ/cBP+pLtYdSTnf3MIO8fFeiINEbX36ZdNlfU/7A9f3gUw49B3oQsvwBA==
-
estraverse@^5.2.0:
version "5.3.0"
resolved "https://registry.npmjs.org/estraverse/-/estraverse-5.3.0.tgz"
@@ -4582,11 +4359,6 @@ fast-json-stable-stringify@^2.0.0:
resolved "https://registry.npmjs.org/fast-json-stable-stringify/-/fast-json-stable-stringify-2.1.0.tgz"
integrity sha512-lhd/wF+Lk98HZoTCtlVraHtfh5XYijIjalXck7saUtuanSDyLMxnHhSXEDJqHxD7msR8D0uCmqlkwjCV8xvwHw==
-fast-levenshtein@^2.0.6:
- version "2.0.6"
- resolved "https://registry.npmjs.org/fast-levenshtein/-/fast-levenshtein-2.0.6.tgz"
- integrity sha512-DCXu6Ifhqcks7TZKY3Hxp3y6qphY5SJZmrWMDrKcERSOXWQdMhU9Ig/PYrzyw/ul9jOIyh0N4M0tbC5hodg8dw==
-
fast-safe-stringify@^2.0.7:
version "2.1.1"
resolved "https://registry.npmjs.org/fast-safe-stringify/-/fast-safe-stringify-2.1.1.tgz"
@@ -4627,14 +4399,7 @@ feed@^4.2.2:
dependencies:
xml-js "^1.6.11"
-file-entry-cache@^6.0.1:
- version "6.0.1"
- resolved "https://registry.npmjs.org/file-entry-cache/-/file-entry-cache-6.0.1.tgz"
- integrity sha512-7Gps/XWymbLk2QLYK4NzpMOrYjMhdIxXuIvy2QBsLE6ljuodKvdkWs/cpyJJ3CVIVpH0Oi1Hvg1ovbMzLdFBBg==
- dependencies:
- flat-cache "^3.0.4"
-
-file-loader@*, file-loader@^6.2.0:
+file-loader@^6.2.0:
version "6.2.0"
resolved "https://registry.npmjs.org/file-loader/-/file-loader-6.2.0.tgz"
integrity sha512-qo3glqyTa61Ytg4u73GultjHGjdRyig3tG6lPtyX/jOEJvHif9uB0/OCI2Kif6ctF3caQTW2G5gym21oAsI4pw==
@@ -4698,25 +4463,11 @@ find-up@^6.3.0:
locate-path "^7.1.0"
path-exists "^5.0.0"
-flat-cache@^3.0.4:
- version "3.2.0"
- resolved "https://registry.npmjs.org/flat-cache/-/flat-cache-3.2.0.tgz"
- integrity sha512-CYcENa+FtcUKLmhhqyctpclsq7QF38pKjZHsGNiSQF5r4FtoKDWabFDl3hzaEQMvT1LHEysw5twgLvpYYb4vbw==
- dependencies:
- flatted "^3.2.9"
- keyv "^4.5.3"
- rimraf "^3.0.2"
-
flat@^5.0.2:
version "5.0.2"
resolved "https://registry.npmjs.org/flat/-/flat-5.0.2.tgz"
integrity sha512-b6suED+5/3rTpUBdG1gupIl8MPFCAMA0QXwmljLhvCUKcUvdE4gWky9zpuGCcXHOsz4J9wPGNWq6OKpmIzz3hQ==
-flatted@^3.2.9:
- version "3.3.1"
- resolved "https://registry.npmjs.org/flatted/-/flatted-3.3.1.tgz"
- integrity sha512-X8cqMLLie7KsNUDSdzeN8FYK9rEt4Dt67OsG/DNGnYTSDBG4uFAJFBnUeiV+zCVAvwFy56IjM9sH51jVaEhNxw==
-
follow-redirects@^1.0.0:
version "1.15.3"
resolved "https://registry.npmjs.org/follow-redirects/-/follow-redirects-1.15.3.tgz"
@@ -4807,12 +4558,17 @@ fs.realpath@^1.0.0:
resolved "https://registry.npmjs.org/fs.realpath/-/fs.realpath-1.0.0.tgz"
integrity sha512-OO0pH2lK6a0hZnAdau5ItzHPI6pUlvI7jMVnxUQRtw4owF2wk8lOSabtGDCTP4Ggrg2MbGnWO9X8K1t4+fGMDw==
+fsevents@~2.3.2:
+ version "2.3.3"
+ resolved "https://registry.yarnpkg.com/fsevents/-/fsevents-2.3.3.tgz#cac6407785d03675a2a5e1a5305c697b347d90d6"
+ integrity sha512-5xoDfX+fL7faATnagmWPpbFtwh/R77WmMMqqHGS65C3vvB0YHrgF+B1YmZ3441tMj5n63k0212XNoJwzlhffQw==
+
function-bind@^1.1.2:
version "1.1.2"
resolved "https://registry.npmjs.org/function-bind/-/function-bind-1.1.2.tgz"
integrity sha512-7XHNxH7qX9xG5mIwxkhumTox/MIRNcOgDrxWsMt2pAr23WHp6MrRlN7FBSFpCpr+oVO0F744iUgR82nJMfG2SA==
-function.prototype.name@^1.1.2, function.prototype.name@^1.1.6:
+function.prototype.name@^1.1.6:
version "1.1.6"
resolved "https://registry.npmjs.org/function.prototype.name/-/function.prototype.name-1.1.6.tgz"
integrity sha512-Z5kx79swU5P27WEayXM1tBi5Ze/lbIyiNgU3qyXUOf9b2rgXYyF9Dy9Cx+IQv/Lc8WCG6L82zwUPpSS9hGehIg==
@@ -4886,13 +4642,6 @@ glob-parent@^6.0.1:
dependencies:
is-glob "^4.0.3"
-glob-parent@^6.0.2:
- version "6.0.2"
- resolved "https://registry.npmjs.org/glob-parent/-/glob-parent-6.0.2.tgz"
- integrity sha512-XxwI8EOhVQgWp6iDL+3b0r86f4d6AX6zSU55HfB4ydCEuXLXc5FcYeOu+nnGftS4TEju/11rt4KJPTMgbfmv4A==
- dependencies:
- is-glob "^4.0.3"
-
glob-to-regexp@^0.4.1:
version "0.4.1"
resolved "https://registry.npmjs.org/glob-to-regexp/-/glob-to-regexp-0.4.1.tgz"
@@ -4938,13 +4687,6 @@ globals@^11.1.0:
resolved "https://registry.npmjs.org/globals/-/globals-11.12.0.tgz"
integrity sha512-WOBp/EEGUiIsJSp7wcv/y6MO+lV9UoncWqxuFfm8eBwzWNgyfBd6Gz+IeKQ9jCmyhoH99g15M3T+QaVHFjizVA==
-globals@^13.19.0:
- version "13.24.0"
- resolved "https://registry.npmjs.org/globals/-/globals-13.24.0.tgz"
- integrity sha512-AhO5QUcj8llrbG09iWhPU2B204J1xnPeL8kQmVorSsy+Sjj1sk8gIyh6cUocGmH4L0UuhAJy+hJMRA4mgA4mFQ==
- dependencies:
- type-fest "^0.20.2"
-
globalthis@^1.0.3:
version "1.0.4"
resolved "https://registry.npmjs.org/globalthis/-/globalthis-1.0.4.tgz"
@@ -5000,20 +4742,15 @@ got@^12.1.0:
p-cancelable "^3.0.0"
responselike "^3.0.0"
-graceful-fs@^4.1.2, graceful-fs@^4.1.6, graceful-fs@^4.2.0, graceful-fs@^4.2.4, graceful-fs@^4.2.6, graceful-fs@^4.2.9:
- version "4.2.11"
- resolved "https://registry.npmjs.org/graceful-fs/-/graceful-fs-4.2.11.tgz"
- integrity sha512-RbJ5/jmFcNNCcDV5o9eTnBLJ/HszWV0P73bc+Ff4nS/rJj+YaS6IGyiOL0VoBYX+l1Wrl3k63h/KrH+nhJ0XvQ==
-
graceful-fs@4.2.10:
version "4.2.10"
resolved "https://registry.npmjs.org/graceful-fs/-/graceful-fs-4.2.10.tgz"
integrity sha512-9ByhssR2fPVsNZj478qUUbKfmL0+t5BDVyjShtyZZLiK7ZDAArFFfopyOTj0M05wE2tJPisA4iTnnXl2YoPvOA==
-graphemer@^1.4.0:
- version "1.4.0"
- resolved "https://registry.npmjs.org/graphemer/-/graphemer-1.4.0.tgz"
- integrity sha512-EtKwoO6kxCL9WO5xipiHTZlSzBm7WLT627TqC/uVRd0HKmq8NXyebnNYxDoBi7wt8eTWrUrKXCOVaFq9x1kgag==
+graceful-fs@^4.1.2, graceful-fs@^4.1.6, graceful-fs@^4.2.0, graceful-fs@^4.2.4, graceful-fs@^4.2.6, graceful-fs@^4.2.9:
+ version "4.2.11"
+ resolved "https://registry.npmjs.org/graceful-fs/-/graceful-fs-4.2.11.tgz"
+ integrity sha512-RbJ5/jmFcNNCcDV5o9eTnBLJ/HszWV0P73bc+Ff4nS/rJj+YaS6IGyiOL0VoBYX+l1Wrl3k63h/KrH+nhJ0XvQ==
gray-matter@^4.0.3:
version "4.0.3"
@@ -5081,7 +4818,7 @@ has-yarn@^3.0.0:
resolved "https://registry.npmjs.org/has-yarn/-/has-yarn-3.0.0.tgz"
integrity sha512-IrsVwUHhEULx3R8f/aA8AHuEzAorplsab/v8HBzEiIukwq5i/EC+xmOW+HfP1OaDP+2JkgT1yILHN2O3UFIbcA==
-has@^1.0.3, has@^1.0.4:
+has@^1.0.4:
version "1.0.4"
resolved "https://registry.npmjs.org/has/-/has-1.0.4.tgz"
integrity sha512-qdSAmqLF6209RFj4VVItywPMbm3vWylknmB3nvNiUIs72xAimcM8nVYxYr7ncvZq5qzk9MKIZR8ijqD/1QuYjQ==
@@ -5241,14 +4978,6 @@ hpack.js@^2.1.6:
readable-stream "^2.0.1"
wbuf "^1.1.0"
-html-element-map@^1.2.0:
- version "1.3.1"
- resolved "https://registry.npmjs.org/html-element-map/-/html-element-map-1.3.1.tgz"
- integrity sha512-6XMlxrAFX4UEEGxctfFnmrFaaZFNf9i5fNuV5wZ3WWQ4FVaNP1aX1LkX9j2mfEx1NpjeE/rL3nmgEn23GdFmrg==
- dependencies:
- array.prototype.filter "^1.0.0"
- call-bind "^1.0.2"
-
html-entities@^2.3.2:
version "2.4.0"
resolved "https://registry.npmjs.org/html-entities/-/html-entities-2.4.0.tgz"
@@ -5336,16 +5065,6 @@ http-deceiver@^1.2.7:
resolved "https://registry.npmjs.org/http-deceiver/-/http-deceiver-1.2.7.tgz"
integrity sha512-LmpOGxTfbpgtGVxJrj5k7asXHCgNZp5nLfp+hWc8QQRqtb7fUy6kRY3BO1h9ddF6yIPYUARgxGOwB42DnxIaNw==
-http-errors@~1.6.2:
- version "1.6.3"
- resolved "https://registry.npmjs.org/http-errors/-/http-errors-1.6.3.tgz"
- integrity sha512-lks+lVC8dgGyh97jxvxeYTWQFvh4uw4yC12gVl63Cg30sjPX4wuGcdkICVXDAESr6OJGjqGA8Iz5mkeN6zlD7A==
- dependencies:
- depd "~1.1.2"
- inherits "2.0.3"
- setprototypeof "1.1.0"
- statuses ">= 1.4.0 < 2"
-
http-errors@2.0.0:
version "2.0.0"
resolved "https://registry.npmjs.org/http-errors/-/http-errors-2.0.0.tgz"
@@ -5357,6 +5076,16 @@ http-errors@2.0.0:
statuses "2.0.1"
toidentifier "1.0.1"
+http-errors@~1.6.2:
+ version "1.6.3"
+ resolved "https://registry.npmjs.org/http-errors/-/http-errors-1.6.3.tgz"
+ integrity sha512-lks+lVC8dgGyh97jxvxeYTWQFvh4uw4yC12gVl63Cg30sjPX4wuGcdkICVXDAESr6OJGjqGA8Iz5mkeN6zlD7A==
+ dependencies:
+ depd "~1.1.2"
+ inherits "2.0.3"
+ setprototypeof "1.1.0"
+ statuses ">= 1.4.0 < 2"
+
http-parser-js@>=0.5.1:
version "0.5.8"
resolved "https://registry.npmjs.org/http-parser-js/-/http-parser-js-0.5.8.tgz"
@@ -5473,7 +5202,7 @@ inflight@^1.0.4:
once "^1.3.0"
wrappy "1"
-inherits@^2.0.1, inherits@^2.0.3, inherits@~2.0.3, inherits@2, inherits@2.0.4:
+inherits@2, inherits@2.0.4, inherits@^2.0.1, inherits@^2.0.3, inherits@~2.0.3:
version "2.0.4"
resolved "https://registry.npmjs.org/inherits/-/inherits-2.0.4.tgz"
integrity sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ==
@@ -5483,16 +5212,16 @@ inherits@2.0.3:
resolved "https://registry.npmjs.org/inherits/-/inherits-2.0.3.tgz"
integrity sha512-x00IRNXNy63jwGkJmzPigoySHbaqpNuzKbBOmzK+g2OdZpQ9w+sxCN+VSB3ja7IAge2OP2qpfxTjeNcyjmW1uw==
-ini@^1.3.4, ini@^1.3.5, ini@~1.3.0:
- version "1.3.8"
- resolved "https://registry.npmjs.org/ini/-/ini-1.3.8.tgz"
- integrity sha512-JV/yugV2uzW5iMRSiZAyDtQd+nxtUnjeLt0acNdw98kKLrvuRVyB80tsREOE7yvGVgalhZ6RNXCmEHkUKBKxew==
-
ini@2.0.0:
version "2.0.0"
resolved "https://registry.npmjs.org/ini/-/ini-2.0.0.tgz"
integrity sha512-7PnF4oN3CvZF23ADhA5wRaYEQpJ8qygSkbtTXWBeXWXmEVRXK+1ITciHWwHhsjv1TmW0MgacIv6hEi5pX5NQdA==
+ini@^1.3.4, ini@^1.3.5, ini@~1.3.0:
+ version "1.3.8"
+ resolved "https://registry.npmjs.org/ini/-/ini-1.3.8.tgz"
+ integrity sha512-JV/yugV2uzW5iMRSiZAyDtQd+nxtUnjeLt0acNdw98kKLrvuRVyB80tsREOE7yvGVgalhZ6RNXCmEHkUKBKxew==
+
inline-style-parser@0.1.1:
version "0.1.1"
resolved "https://registry.npmjs.org/inline-style-parser/-/inline-style-parser-0.1.1.tgz"
@@ -5524,16 +5253,16 @@ invariant@^2.2.4:
dependencies:
loose-envify "^1.0.0"
-ipaddr.js@^2.0.1:
- version "2.1.0"
- resolved "https://registry.npmjs.org/ipaddr.js/-/ipaddr.js-2.1.0.tgz"
- integrity sha512-LlbxQ7xKzfBusov6UMi4MFpEg0m+mAm9xyNGEduwXMEDuf4WfzB/RZwMVYEd7IKGvh4IUkEXYxtAVu9T3OelJQ==
-
ipaddr.js@1.9.1:
version "1.9.1"
resolved "https://registry.npmjs.org/ipaddr.js/-/ipaddr.js-1.9.1.tgz"
integrity sha512-0KI/607xoxSToH7GjN1FfSbLoU0+btTicjsQSWQlh/hZykN8KpmMf7uYwPW3R+akZ6R/w18ZlXSHBYXiYUPO3g==
+ipaddr.js@^2.0.1:
+ version "2.1.0"
+ resolved "https://registry.npmjs.org/ipaddr.js/-/ipaddr.js-2.1.0.tgz"
+ integrity sha512-LlbxQ7xKzfBusov6UMi4MFpEg0m+mAm9xyNGEduwXMEDuf4WfzB/RZwMVYEd7IKGvh4IUkEXYxtAVu9T3OelJQ==
+
is-alphabetical@^2.0.0:
version "2.0.1"
resolved "https://registry.npmjs.org/is-alphabetical/-/is-alphabetical-2.0.1.tgz"
@@ -5574,7 +5303,7 @@ is-binary-path@~2.1.0:
dependencies:
binary-extensions "^2.0.0"
-is-boolean-object@^1.0.1, is-boolean-object@^1.1.0:
+is-boolean-object@^1.1.0:
version "1.1.2"
resolved "https://registry.npmjs.org/is-boolean-object/-/is-boolean-object-1.1.2.tgz"
integrity sha512-gDYaKHJmnj4aWxyj6YHyXVpdQawtVLHU5cb+eztPGczf6cjuTdwve5ZIEfgXqH4e57An1D1AKf8CZ3kYrQRqYA==
@@ -5582,7 +5311,7 @@ is-boolean-object@^1.0.1, is-boolean-object@^1.1.0:
call-bind "^1.0.2"
has-tostringtag "^1.0.0"
-is-callable@^1.1.3, is-callable@^1.1.4, is-callable@^1.1.5, is-callable@^1.2.7:
+is-callable@^1.1.3, is-callable@^1.1.4, is-callable@^1.2.7:
version "1.2.7"
resolved "https://registry.npmjs.org/is-callable/-/is-callable-1.2.7.tgz"
integrity sha512-1BC0BVFhS/p0qtw6enp8e+8OD0UrK0oFLztSjNzhcKA3WDuJxxAPXzPuPtKkjEY9UUoEWlX/8fgKeu2S8i9JTA==
@@ -5640,7 +5369,7 @@ is-fullwidth-code-point@^3.0.0:
resolved "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-3.0.0.tgz"
integrity sha512-zymm5+u+sCsSWyD9qNaejV3DFvhCKclKdizYaJUuHA83RLjb7nSuGnddCHGv0hk+KY7BMAlsWeK4Ueg6EV6XQg==
-is-glob@^4.0.0, is-glob@^4.0.1, is-glob@^4.0.3, is-glob@~4.0.1:
+is-glob@^4.0.1, is-glob@^4.0.3, is-glob@~4.0.1:
version "4.0.3"
resolved "https://registry.npmjs.org/is-glob/-/is-glob-4.0.3.tgz"
integrity sha512-xelSayHH36ZgE7ZWhli7pW34hNbNl8Ojv5KVmkJD4hBdD3th8Tfk9vYasLM+mXWOZhFkgZfxhLSnrwRr4elSSg==
@@ -5697,7 +5426,7 @@ is-path-cwd@^2.2.0:
resolved "https://registry.npmjs.org/is-path-cwd/-/is-path-cwd-2.2.0.tgz"
integrity sha512-w942bTcih8fdJPJmQHFzkS76NEP8Kzzvmw92cXsazb8intwLqPibPPdXf4ANdKV3rYMuuQYGIWtvz9JilB3NFQ==
-is-path-inside@^3.0.2, is-path-inside@^3.0.3:
+is-path-inside@^3.0.2:
version "3.0.3"
resolved "https://registry.npmjs.org/is-path-inside/-/is-path-inside-3.0.3.tgz"
integrity sha512-Fd4gABb+ycGAmKou8eMftCupSir5lRxqf4aD/vd0cD2qc4HL07OjCeuHMr8Ro4CoMaeCKDB0/ECBOVWjTwUvPQ==
@@ -5731,7 +5460,7 @@ is-reference@^3.0.0:
dependencies:
"@types/estree" "*"
-is-regex@^1.0.5, is-regex@^1.1.4:
+is-regex@^1.1.4:
version "1.1.4"
resolved "https://registry.npmjs.org/is-regex/-/is-regex-1.1.4.tgz"
integrity sha512-kvRdxDsxZjhzUX07ZnLydzS1TU/TJlTUHHY4YLL87e37oUA49DfkLqgy+VjFocowy29cKvcSiu+kIv728jTTVg==
@@ -5768,11 +5497,6 @@ is-string@^1.0.5, is-string@^1.0.7:
dependencies:
has-tostringtag "^1.0.0"
-is-subset@^0.1.1:
- version "0.1.1"
- resolved "https://registry.npmjs.org/is-subset/-/is-subset-0.1.1.tgz"
- integrity sha512-6Ybun0IkarhmEqxXCNw/C0bna6Zb/TkfUX9UbwJtK6ObwAVCxmAP308WWTHviM/zAqXk05cdhYsUsZeGQh99iw==
-
is-symbol@^1.0.2, is-symbol@^1.0.3:
version "1.0.4"
resolved "https://registry.npmjs.org/is-symbol/-/is-symbol-1.0.4.tgz"
@@ -5811,6 +5535,11 @@ is-yarn-global@^0.4.0:
resolved "https://registry.npmjs.org/is-yarn-global/-/is-yarn-global-0.4.1.tgz"
integrity sha512-/kppl+R+LO5VmhYSEWARUFjodS25D68gvj8W7z0I7OWhUla5xWu8KL6CtB2V0R6yqhnRgbcaREMr4EEM6htLPQ==
+isarray@0.0.1:
+ version "0.0.1"
+ resolved "https://registry.npmjs.org/isarray/-/isarray-0.0.1.tgz"
+ integrity sha512-D2S+3GLxWH+uhrNEcoh/fnmYeP8E8/zHl644d/jdA0g2uyXvy3sb0qxotE+ne0LtccHknQzWwZEzhak7oJ0COQ==
+
isarray@^2.0.5:
version "2.0.5"
resolved "https://registry.npmjs.org/isarray/-/isarray-2.0.5.tgz"
@@ -5821,11 +5550,6 @@ isarray@~1.0.0:
resolved "https://registry.npmjs.org/isarray/-/isarray-1.0.0.tgz"
integrity sha512-VLghIWNM6ELQzo7zwmcg0NmTVyWKYjvIeM83yjp0wRDTmUnrM678fQbcKBo6n2CJEF0szoG//ytg+TKla89ALQ==
-isarray@0.0.1:
- version "0.0.1"
- resolved "https://registry.npmjs.org/isarray/-/isarray-0.0.1.tgz"
- integrity sha512-D2S+3GLxWH+uhrNEcoh/fnmYeP8E8/zHl644d/jdA0g2uyXvy3sb0qxotE+ne0LtccHknQzWwZEzhak7oJ0COQ==
-
isexe@^2.0.0:
version "2.0.0"
resolved "https://registry.npmjs.org/isexe/-/isexe-2.0.0.tgz"
@@ -5928,7 +5652,7 @@ json-parse-even-better-errors@^2.3.0, json-parse-even-better-errors@^2.3.1:
resolved "https://registry.npmjs.org/json-parse-even-better-errors/-/json-parse-even-better-errors-2.3.1.tgz"
integrity sha512-xyFwyhro/JEof6Ghe2iz2NcXoj2sloNsWr/XsERDK/oiPCfaNhl5ONfp+jQdAZRQQ0IJWNzH9zIZF7li91kh2w==
-json-pointer@^0.6.2, json-pointer@0.6.2:
+json-pointer@0.6.2, json-pointer@^0.6.2:
version "0.6.2"
resolved "https://registry.npmjs.org/json-pointer/-/json-pointer-0.6.2.tgz"
integrity sha512-vLWcKbOaXlO+jvRy4qNd+TI1QUPZzfJj1tpJ3vAXDych5XJf93ftpUKe5pKCrzyIIwgBJcOcCVRUfqQP25afBw==
@@ -5945,11 +5669,6 @@ json-schema-traverse@^1.0.0:
resolved "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-1.0.0.tgz"
integrity sha512-NM8/P9n3XjXhIZn1lLhkFaACTOURQXjWhV4BA/RnOv8xvgqtqpAX9IO4mRQxSx1Rlo4tqzeqb0sOlruaOy3dug==
-json-stable-stringify-without-jsonify@^1.0.1:
- version "1.0.1"
- resolved "https://registry.npmjs.org/json-stable-stringify-without-jsonify/-/json-stable-stringify-without-jsonify-1.0.1.tgz"
- integrity sha512-Bdboy+l7tA3OGW6FjyFHWkP5LuByj1Tk33Ljyq0axyzdk9//JSi2u3fP1QSmd1KNwq6VOKYGlAu87CisVir6Pw==
-
json5@^2.1.2, json5@^2.2.3:
version "2.2.3"
resolved "https://registry.npmjs.org/json5/-/json5-2.2.3.tgz"
@@ -6001,14 +5720,6 @@ leven@^3.1.0:
resolved "https://registry.npmjs.org/leven/-/leven-3.1.0.tgz"
integrity sha512-qsda+H8jTaUaN/x5vzW2rzc+8Rw4TAQ/4KjB46IwK5VH+IlVeeeje/EoZRpiXvIqjFgK84QffqPztGI3VBLG1A==
-levn@^0.4.1:
- version "0.4.1"
- resolved "https://registry.npmjs.org/levn/-/levn-0.4.1.tgz"
- integrity sha512-+bT2uH4E5LGE7h/n3evcS/sQlJXCpIp6ym8OWJ5eV6+67Dsql/LaaT7qJBAt2rzfoa/5QBGBhxDix1dMt2kQKQ==
- dependencies:
- prelude-ls "^1.2.1"
- type-check "~0.4.0"
-
lilconfig@^2.0.3:
version "2.1.0"
resolved "https://registry.npmjs.org/lilconfig/-/lilconfig-2.1.0.tgz"
@@ -6065,16 +5776,6 @@ lodash.debounce@^4.0.8:
resolved "https://registry.npmjs.org/lodash.debounce/-/lodash.debounce-4.0.8.tgz"
integrity sha512-FT1yDzDYEoYWhnSGnpE/4Kj1fLZkDFyqRb7fNt6FdYOSxlUWAtp42Eh6Wb0rGIv/m9Bgo7x4GhQbm5Ys4SG5ow==
-lodash.escape@^4.0.1:
- version "4.0.1"
- resolved "https://registry.npmjs.org/lodash.escape/-/lodash.escape-4.0.1.tgz"
- integrity sha512-nXEOnb/jK9g0DYMr1/Xvq6l5xMD7GDG55+GSYIYmS0G4tBk/hURD4JR9WCavs04t33WmJx9kCyp9vJ+mr4BOUw==
-
-lodash.flattendeep@^4.4.0:
- version "4.4.0"
- resolved "https://registry.npmjs.org/lodash.flattendeep/-/lodash.flattendeep-4.4.0.tgz"
- integrity sha512-uHaJFihxmJcEX3kT4I23ABqKKalJ/zDrDg0lsFtc1h+3uw49SIJ5beyhx5ExVRti3AvKoOJngIj7xz3oylPdWQ==
-
lodash.isequal@^4.5.0:
version "4.5.0"
resolved "https://registry.npmjs.org/lodash.isequal/-/lodash.isequal-4.5.0.tgz"
@@ -6085,11 +5786,6 @@ lodash.memoize@^4.1.2:
resolved "https://registry.npmjs.org/lodash.memoize/-/lodash.memoize-4.1.2.tgz"
integrity sha512-t7j+NzmgnQzTAYXcsHYLgimltOV1MXHtlOWf6GjL9Kj8GK5FInw5JotxvbOs+IvV1/Dzo04/fCGfLVs7aXb4Ag==
-lodash.merge@^4.6.2:
- version "4.6.2"
- resolved "https://registry.npmjs.org/lodash.merge/-/lodash.merge-4.6.2.tgz"
- integrity sha512-0KpjqXRVvrYyCsX1swR/XTK0va6VQkQM6MNo7PqW77ByjAhoARA8EfrP1N4+KlKj8YS0ZUCtRT/YUuhyYDujIQ==
-
lodash.uniq@^4.5.0:
version "4.5.0"
resolved "https://registry.npmjs.org/lodash.uniq/-/lodash.uniq-4.5.0.tgz"
@@ -6840,7 +6536,7 @@ micromatch@^4.0.2, micromatch@^4.0.4, micromatch@^4.0.5:
braces "^3.0.2"
picomatch "^2.3.1"
-"mime-db@>= 1.43.0 < 2":
+mime-db@1.52.0, "mime-db@>= 1.43.0 < 2":
version "1.52.0"
resolved "https://registry.npmjs.org/mime-db/-/mime-db-1.52.0.tgz"
integrity sha512-sPU4uV7dYlvtWJxwwxHD0PuihVNiE7TyAbQ5SWxDCB9mUYvOgroQOwYQQOKPJ8CIbE+1ETVlOoK1UC2nU3gYvg==
@@ -6850,40 +6546,14 @@ mime-db@~1.33.0:
resolved "https://registry.npmjs.org/mime-db/-/mime-db-1.33.0.tgz"
integrity sha512-BHJ/EKruNIqJf/QahvxwQZXKygOQ256myeN/Ew+THcAa5q+PjyTTMMeNQC4DZw5AwfvelsUrA6B67NKMqXDbzQ==
-mime-db@1.52.0:
- version "1.52.0"
- resolved "https://registry.npmjs.org/mime-db/-/mime-db-1.52.0.tgz"
- integrity sha512-sPU4uV7dYlvtWJxwwxHD0PuihVNiE7TyAbQ5SWxDCB9mUYvOgroQOwYQQOKPJ8CIbE+1ETVlOoK1UC2nU3gYvg==
-
-mime-types@^2.1.27:
- version "2.1.35"
- resolved "https://registry.npmjs.org/mime-types/-/mime-types-2.1.35.tgz"
- integrity sha512-ZDY+bPm5zTTF+YpCrAU9nK0UgICYPT0QtT1NZWFv4s++TNkcgVaT0g6+4R2uI4MjQjzysHB1zxuWL50hzaeXiw==
- dependencies:
- mime-db "1.52.0"
-
-mime-types@^2.1.31:
- version "2.1.35"
- resolved "https://registry.npmjs.org/mime-types/-/mime-types-2.1.35.tgz"
- integrity sha512-ZDY+bPm5zTTF+YpCrAU9nK0UgICYPT0QtT1NZWFv4s++TNkcgVaT0g6+4R2uI4MjQjzysHB1zxuWL50hzaeXiw==
- dependencies:
- mime-db "1.52.0"
-
-mime-types@~2.1.17, mime-types@2.1.18:
+mime-types@2.1.18, mime-types@~2.1.17:
version "2.1.18"
resolved "https://registry.npmjs.org/mime-types/-/mime-types-2.1.18.tgz"
integrity sha512-lc/aahn+t4/SWV/qcmumYjymLsWfN3ELhpmVuUFjgsORruuZPVSwAQryq+HHGvO/SI2KVX26bx+En+zhM8g8hQ==
dependencies:
mime-db "~1.33.0"
-mime-types@~2.1.24:
- version "2.1.35"
- resolved "https://registry.npmjs.org/mime-types/-/mime-types-2.1.35.tgz"
- integrity sha512-ZDY+bPm5zTTF+YpCrAU9nK0UgICYPT0QtT1NZWFv4s++TNkcgVaT0g6+4R2uI4MjQjzysHB1zxuWL50hzaeXiw==
- dependencies:
- mime-db "1.52.0"
-
-mime-types@~2.1.34:
+mime-types@^2.1.27, mime-types@^2.1.31, mime-types@~2.1.24, mime-types@~2.1.34:
version "2.1.35"
resolved "https://registry.npmjs.org/mime-types/-/mime-types-2.1.35.tgz"
integrity sha512-ZDY+bPm5zTTF+YpCrAU9nK0UgICYPT0QtT1NZWFv4s++TNkcgVaT0g6+4R2uI4MjQjzysHB1zxuWL50hzaeXiw==
@@ -6922,7 +6592,7 @@ minimalistic-assert@^1.0.0:
resolved "https://registry.npmjs.org/minimalistic-assert/-/minimalistic-assert-1.0.1.tgz"
integrity sha512-UtJcAD4yEaGtjPezWuO9wC4nwUnVH/8/Im3yEHQP4b67cXlD/Qr9hdITCU1xDbSEXg2XKNaP8jsReV7vQd00/A==
-minimatch@^3.0.4, minimatch@^3.0.5, minimatch@^3.1.1, minimatch@^3.1.2, minimatch@3.1.2:
+minimatch@3.1.2, minimatch@^3.0.4, minimatch@^3.0.5, minimatch@^3.1.1:
version "3.1.2"
resolved "https://registry.npmjs.org/minimatch/-/minimatch-3.1.2.tgz"
integrity sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==
@@ -6955,16 +6625,11 @@ mobx-react@^9.1.1:
dependencies:
mobx-react-lite "^4.0.7"
-mobx@^6.0.4, mobx@^6.12.4, mobx@^6.9.0:
+mobx@^6.12.4:
version "6.13.3"
resolved "https://registry.npmjs.org/mobx/-/mobx-6.13.3.tgz"
integrity sha512-YtAS+ZMbdpbHYUU4ESht3na8KiX11KuMT1yOiKtbKlQ0GZkHDYPKyEw/Tdp7h7aHyLrTWj2TBaSNJ6bCr638iQ==
-moo@^0.5.0:
- version "0.5.2"
- resolved "https://registry.npmjs.org/moo/-/moo-0.5.2.tgz"
- integrity sha512-iSAJLHYKnX41mKcJKjqvnAN9sf0LMDTXDEvFv+ffuRR9a1MIuXLjMNL6EsnDHSkKLTWNqQQ5uo61P4EbU4NU+Q==
-
mrmime@^2.0.0:
version "2.0.0"
resolved "https://registry.npmjs.org/mrmime/-/mrmime-2.0.0.tgz"
@@ -6998,21 +6663,6 @@ nanoid@^3.3.7:
resolved "https://registry.npmjs.org/nanoid/-/nanoid-3.3.7.tgz"
integrity sha512-eSRppjcPIatRIMC1U6UngP8XFcz8MQWGQdt1MTBQ7NaAmvXDfvNxbvWV3x2y6CdEUciCSsDHDQZbhYaB8QEo2g==
-natural-compare@^1.4.0:
- version "1.4.0"
- resolved "https://registry.npmjs.org/natural-compare/-/natural-compare-1.4.0.tgz"
- integrity sha512-OWND8ei3VtNC9h7V60qff3SVobHr996CTwgxubgyQYEpg290h9J0buyECNNJexkFm5sOajh5G116RYA1c8ZMSw==
-
-nearley@^2.7.10:
- version "2.20.1"
- resolved "https://registry.npmjs.org/nearley/-/nearley-2.20.1.tgz"
- integrity sha512-+Mc8UaAebFzgV+KpI5n7DasuuQCHA89dmwm7JXw3TV43ukfNQ9DnBH3Mdb2g/I4Fdxc26pwimBWvjIw0UAILSQ==
- dependencies:
- commander "^2.19.0"
- moo "^0.5.0"
- railroad-diagrams "^1.0.0"
- randexp "0.4.6"
-
negotiator@0.6.3:
version "0.6.3"
resolved "https://registry.npmjs.org/negotiator/-/negotiator-0.6.3.tgz"
@@ -7162,12 +6812,12 @@ object-assign@^4.1.1:
resolved "https://registry.npmjs.org/object-assign/-/object-assign-4.1.1.tgz"
integrity sha512-rJgTQnkUnH1sFw8yT6VSU3zD3sWmu6sZhIseY8VX+GRu3P6F7Fu+JNDoXfklElbLJSnc3FUQHVe4cU5hj+BcUg==
-object-inspect@^1.13.1, object-inspect@^1.7.0, object-inspect@^1.9.0:
+object-inspect@^1.13.1, object-inspect@^1.9.0:
version "1.13.1"
resolved "https://registry.npmjs.org/object-inspect/-/object-inspect-1.13.1.tgz"
integrity sha512-5qoj1RUiKOMsCCNLV1CBiPYE10sziTsnmNxkAI/rZhiD63CF7IqdFGC/XzjWjpSgLf0LxXX3bDFIh0E18f6UhQ==
-object-is@^1.0.2, object-is@^1.1.5:
+object-is@^1.1.5:
version "1.1.6"
resolved "https://registry.npmjs.org/object-is/-/object-is-1.1.6.tgz"
integrity sha512-F8cZ+KfGlSGi09lJT7/Nd6KJZ9ygtvYC0/UYYLI9nmQKLMnydpB9yvbv9K1uSkEu7FU9vYPmVwLg328tX+ot3Q==
@@ -7190,24 +6840,6 @@ object.assign@^4.1.0, object.assign@^4.1.5:
has-symbols "^1.0.3"
object-keys "^1.1.1"
-object.entries@^1.1.1:
- version "1.1.8"
- resolved "https://registry.npmjs.org/object.entries/-/object.entries-1.1.8.tgz"
- integrity sha512-cmopxi8VwRIAw/fkijJohSfpef5PdN0pMQJN6VC/ZKvn0LIknWD8KtgY6KlQdEc4tIjcQ3HxSMmnvtzIscdaYQ==
- dependencies:
- call-bind "^1.0.7"
- define-properties "^1.2.1"
- es-object-atoms "^1.0.0"
-
-object.values@^1.1.1:
- version "1.2.0"
- resolved "https://registry.npmjs.org/object.values/-/object.values-1.2.0.tgz"
- integrity sha512-yBYjY9QX2hnRmZHAjG/f13MzmBzxzYgQhFrke06TTyKY5zSTEqkOeukBzIdVA3j3ulu8Qa3MbVFShV7T2RmGtQ==
- dependencies:
- call-bind "^1.0.7"
- define-properties "^1.2.1"
- es-object-atoms "^1.0.0"
-
obuf@^1.0.0, obuf@^1.1.2:
version "1.1.2"
resolved "https://registry.npmjs.org/obuf/-/obuf-1.1.2.tgz"
@@ -7261,18 +6893,6 @@ opener@^1.5.2:
resolved "https://registry.npmjs.org/opener/-/opener-1.5.2.tgz"
integrity sha512-ur5UIdyw5Y7yEj9wLzhqXiy6GZ3Mwx0yGI+5sMn2r0N0v3cKJvUmFH5yPP+WXh9e0xfyzyJX95D8l088DNFj7A==
-optionator@^0.9.3:
- version "0.9.4"
- resolved "https://registry.npmjs.org/optionator/-/optionator-0.9.4.tgz"
- integrity sha512-6IpQ7mKUxRcZNLIObR0hz7lxsapSSIYNZJwXPGeF0mTVqGKFIXj1DQcMoT22S3ROcLyY/rz0PWaWZ9ayWmad9g==
- dependencies:
- deep-is "^0.1.3"
- fast-levenshtein "^2.0.6"
- levn "^0.4.1"
- prelude-ls "^1.2.1"
- type-check "^0.4.0"
- word-wrap "^1.2.5"
-
p-cancelable@^3.0.0:
version "3.0.0"
resolved "https://registry.npmjs.org/p-cancelable/-/p-cancelable-3.0.0.tgz"
@@ -7462,13 +7082,6 @@ path-parse@^1.0.7:
resolved "https://registry.npmjs.org/path-parse/-/path-parse-1.0.7.tgz"
integrity sha512-LDJzPVEEEPR+y48z93A0Ed0yXb8pAByGWo/k5YYdYgpY2/2EsOsksJrq7lOHxryrVOn1ejG6oAp8ahvOIQD8sw==
-path-to-regexp@^1.7.0:
- version "1.8.0"
- resolved "https://registry.npmjs.org/path-to-regexp/-/path-to-regexp-1.8.0.tgz"
- integrity sha512-n43JRhlUKUAlibEJhPeir1ncUID16QnEjNpwzNdO3Lm4ywrBpBZ5oLD0I6br9evr1Y9JTqwRtAh7JLoOzAQdVA==
- dependencies:
- isarray "0.0.1"
-
path-to-regexp@0.1.7:
version "0.1.7"
resolved "https://registry.npmjs.org/path-to-regexp/-/path-to-regexp-0.1.7.tgz"
@@ -7479,6 +7092,13 @@ path-to-regexp@2.2.1:
resolved "https://registry.npmjs.org/path-to-regexp/-/path-to-regexp-2.2.1.tgz"
integrity sha512-gu9bD6Ta5bwGrrU8muHzVOBFFREpp2iRkVfhBJahwJ6p6Xw20SjT0MxLnwkjOibQmGSYhiUnf2FLe7k+jcFmGQ==
+path-to-regexp@^1.7.0:
+ version "1.8.0"
+ resolved "https://registry.npmjs.org/path-to-regexp/-/path-to-regexp-1.8.0.tgz"
+ integrity sha512-n43JRhlUKUAlibEJhPeir1ncUID16QnEjNpwzNdO3Lm4ywrBpBZ5oLD0I6br9evr1Y9JTqwRtAh7JLoOzAQdVA==
+ dependencies:
+ isarray "0.0.1"
+
path-type@^4.0.0:
version "4.0.0"
resolved "https://registry.npmjs.org/path-type/-/path-type-4.0.0.tgz"
@@ -7489,11 +7109,6 @@ perfect-scrollbar@^1.5.5:
resolved "https://registry.npmjs.org/perfect-scrollbar/-/perfect-scrollbar-1.5.5.tgz"
integrity sha512-dzalfutyP3e/FOpdlhVryN4AJ5XDVauVWxybSkLZmakFE2sS3y3pc4JnSprw8tGmHvkaG5Edr5T7LBTZ+WWU2g==
-performance-now@^2.1.0:
- version "2.1.0"
- resolved "https://registry.npmjs.org/performance-now/-/performance-now-2.1.0.tgz"
- integrity sha512-7EAHlyLHI56VEIdK57uwHdHKIaAGbnXPiw0yWbarQZOKaKpvUIgW0jWRVLiatnM+XXlSwsanIBH/hzGMJulMow==
-
periscopic@^3.0.0:
version "3.1.0"
resolved "https://registry.npmjs.org/periscopic/-/periscopic-3.1.0.tgz"
@@ -7830,7 +7445,7 @@ postcss-zindex@^5.1.0:
resolved "https://registry.npmjs.org/postcss-zindex/-/postcss-zindex-5.1.0.tgz"
integrity sha512-fgFMf0OtVSBR1va1JNHYgMxYk73yhn/qb4uQDq1DLGYolz8gHCyr/sesEuGUaYs58E3ZJRcpoGuPVoB7Meiq9A==
-"postcss@^7.0.0 || ^8.0.1", postcss@^8.0.9, postcss@^8.1.0, postcss@^8.2.15, postcss@^8.2.2, postcss@^8.4.16, postcss@^8.4.17, postcss@^8.4.21, postcss@^8.4.26, "postcss@>4 <9", postcss@8.4.38:
+postcss@8.4.38, postcss@^8.4.17, postcss@^8.4.21, postcss@^8.4.26:
version "8.4.38"
resolved "https://registry.npmjs.org/postcss/-/postcss-8.4.38.tgz"
integrity sha512-Wglpdk03BSfXkHoQa3b/oulrotAkwrlLDRSOb9D0bN86FdRyE9lppSp33aHNPgBa0JKCoB+drFLZkQoRRYae5A==
@@ -7848,11 +7463,6 @@ postcss@^8.4.45:
picocolors "^1.1.0"
source-map-js "^1.2.1"
-prelude-ls@^1.2.1:
- version "1.2.1"
- resolved "https://registry.npmjs.org/prelude-ls/-/prelude-ls-1.2.1.tgz"
- integrity sha512-vkcDPrRZo1QZLbn5RLGPpg/WmIQ65qoWWhcGKf/b5eplkkarX0m9z8ppCat4mlOqUsWpyNuYgO3VRyrYHSzX5g==
-
pretty-error@^4.0.0:
version "4.0.0"
resolved "https://registry.npmjs.org/pretty-error/-/pretty-error-4.0.0.tgz"
@@ -7960,26 +7570,6 @@ quick-lru@^5.1.1:
resolved "https://registry.npmjs.org/quick-lru/-/quick-lru-5.1.1.tgz"
integrity sha512-WuyALRjWPDGtt/wzJiadO5AXY+8hZ80hVpe6MyivgraREW751X3SbhRvG3eLKOYN+8VEvqLcf3wdnt44Z4S4SA==
-raf@^3.4.1:
- version "3.4.1"
- resolved "https://registry.npmjs.org/raf/-/raf-3.4.1.tgz"
- integrity sha512-Sq4CW4QhwOHE8ucn6J34MqtZCeWFP2aQSmrlroYgqAV1PjStIhJXxYuTgUIfkEk7zTLjmIjLmU5q+fbD1NnOJA==
- dependencies:
- performance-now "^2.1.0"
-
-railroad-diagrams@^1.0.0:
- version "1.0.0"
- resolved "https://registry.npmjs.org/railroad-diagrams/-/railroad-diagrams-1.0.0.tgz"
- integrity sha512-cz93DjNeLY0idrCNOH6PviZGRN9GJhsdm9hpn1YCS879fj4W+x5IFJhhkRZcwVgMmFF7R82UA/7Oh+R8lLZg6A==
-
-randexp@0.4.6:
- version "0.4.6"
- resolved "https://registry.npmjs.org/randexp/-/randexp-0.4.6.tgz"
- integrity sha512-80WNmd9DA0tmZrw9qQa62GPPWfuXJknrmVmLcxvq4uZBdYqb1wYoKTmnlGUchvVWe0XiLupYkBoXVOxz3C8DYQ==
- dependencies:
- discontinuous-range "1.0.0"
- ret "~0.1.10"
-
randombytes@^2.1.0:
version "2.1.0"
resolved "https://registry.npmjs.org/randombytes/-/randombytes-2.1.0.tgz"
@@ -7987,21 +7577,16 @@ randombytes@^2.1.0:
dependencies:
safe-buffer "^5.1.0"
-range-parser@^1.2.1:
- version "1.2.1"
- resolved "https://registry.npmjs.org/range-parser/-/range-parser-1.2.1.tgz"
- integrity sha512-Hrgsx+orqoygnmhFbKaHE6c296J+HTAQXoxEF6gNupROmmGJRoyzfG3ccAveqCBrwr/2yxQ5BVd/GTl5agOwSg==
-
-range-parser@~1.2.1:
- version "1.2.1"
- resolved "https://registry.npmjs.org/range-parser/-/range-parser-1.2.1.tgz"
- integrity sha512-Hrgsx+orqoygnmhFbKaHE6c296J+HTAQXoxEF6gNupROmmGJRoyzfG3ccAveqCBrwr/2yxQ5BVd/GTl5agOwSg==
-
range-parser@1.2.0:
version "1.2.0"
resolved "https://registry.npmjs.org/range-parser/-/range-parser-1.2.0.tgz"
integrity sha512-kA5WQoNVo4t9lNx2kQNFCxKeBl5IbbSNBl1M/tLkw9WCn+hxNBAW5Qh8gdhs63CJnhjJ2zQWFoqPJP2sK1AV5A==
+range-parser@^1.2.1, range-parser@~1.2.1:
+ version "1.2.1"
+ resolved "https://registry.npmjs.org/range-parser/-/range-parser-1.2.1.tgz"
+ integrity sha512-Hrgsx+orqoygnmhFbKaHE6c296J+HTAQXoxEF6gNupROmmGJRoyzfG3ccAveqCBrwr/2yxQ5BVd/GTl5agOwSg==
+
raw-body@2.5.1:
version "2.5.1"
resolved "https://registry.npmjs.org/raw-body/-/raw-body-2.5.1.tgz"
@@ -8060,7 +7645,7 @@ react-dev-utils@^12.0.1:
strip-ansi "^6.0.1"
text-table "^0.2.0"
-react-dom@*, "react-dom@^16.6.0 || ^17.0.0 || ^18.0.0", "react-dom@^16.8.4 || ^17.0.0 || ^18.0.0", react-dom@^18.0.0, "react-dom@>= 16.8.0", "react-dom@>= 16.8.0 < 19.0.0", react-dom@>=18:
+react-dom@^18.0.0:
version "18.2.0"
resolved "https://registry.npmjs.org/react-dom/-/react-dom-18.2.0.tgz"
integrity sha512-6IMTriUmvsjHUjNtEDudZfuDQUoWXVxKHhlEGSk81n4YFS+r/Kl99wXiwlVXtPBtJenozv2P+hxDsw9eA7Xo6g==
@@ -8111,7 +7696,7 @@ react-loadable-ssr-addon-v5-slorber@^1.0.1:
dependencies:
"@babel/runtime" "^7.10.3"
-react-loadable@*, "react-loadable@npm:@docusaurus/react-loadable@5.5.2":
+"react-loadable@npm:@docusaurus/react-loadable@5.5.2":
version "5.5.2"
resolved "https://registry.npmjs.org/@docusaurus/react-loadable/-/react-loadable-5.5.2.tgz"
integrity sha512-A3dYjdBGuy0IGT+wyLIGIKLRE+sAk1iNk0f1HjNDysO7u8lhL4N3VEm+FAubmJbAztn94F7MxBTPmnixbiyFdQ==
@@ -8139,7 +7724,7 @@ react-router-dom@^5.3.4:
tiny-invariant "^1.0.2"
tiny-warning "^1.0.0"
-react-router@^5.3.4, react-router@>=5, react-router@5.3.4:
+react-router@5.3.4, react-router@^5.3.4:
version "5.3.4"
resolved "https://registry.npmjs.org/react-router/-/react-router-5.3.4.tgz"
integrity sha512-Ys9K+ppnJah3QuaRiLxk+jDWOR1MekYQrlytiXxC1RyfbdsZkS5pvKAzCCr031xHixZwpnsYNT5xysdFHQaYsA==
@@ -8170,7 +7755,7 @@ react-tabs@^6.0.2:
clsx "^2.0.0"
prop-types "^15.5.0"
-react@*, "react@^16.0.0 || ^17.0.0 || ^18.0.0", "react@^16.13.1 || ^17.0.0 || ^18.0.0", "react@^16.6.0 || ^17.0.0 || ^18.0.0", "react@^16.8.0 || ^17 || ^18", "react@^16.8.0 || ^17.0.0 || ^18.0.0", "react@^16.8.4 || ^17.0.0 || ^18.0.0", react@^18.0.0, react@^18.2.0, "react@>= 16.8.0", "react@>= 16.8.0 < 19.0.0", react@>=15, react@>=16, react@>=16.0.0, react@>=18:
+react@^18.0.0:
version "18.2.0"
resolved "https://registry.npmjs.org/react/-/react-18.2.0.tgz"
integrity sha512-/3IjMdb2L9QbBdWiW5e3P2/npwMBaU9mHCSCUzNln0ZCYbcfTsGbTJrU/kGemdH2IWmB2ioZ+zkxtmq6g09fGQ==
@@ -8490,11 +8075,6 @@ responselike@^3.0.0:
dependencies:
lowercase-keys "^3.0.0"
-ret@~0.1.10:
- version "0.1.15"
- resolved "https://registry.npmjs.org/ret/-/ret-0.1.15.tgz"
- integrity sha512-TTlYpa+OL+vMMNG24xSlQGEJ3B/RzEfUlLct7b5G/ytav+wPrplCpVMFuwzXbkecJrb6IYo1iFb0S9v37754mg==
-
retry@^0.13.1:
version "0.13.1"
resolved "https://registry.npmjs.org/retry/-/retry-0.13.1.tgz"
@@ -8512,14 +8092,6 @@ rimraf@^3.0.2:
dependencies:
glob "^7.1.3"
-rst-selector-parser@^2.2.3:
- version "2.2.3"
- resolved "https://registry.npmjs.org/rst-selector-parser/-/rst-selector-parser-2.2.3.tgz"
- integrity sha512-nDG1rZeP6oFTLN6yNDV/uiAvs1+FS/KlrEwh7+y7dpuApDBy6bI2HTBcc0/V8lv9OTqfyD34eF7au2pm8aBbhA==
- dependencies:
- lodash.flattendeep "^4.4.0"
- nearley "^2.7.10"
-
rtl-detect@^1.0.4:
version "1.1.2"
resolved "https://registry.npmjs.org/rtl-detect/-/rtl-detect-1.1.2.tgz"
@@ -8552,20 +8124,15 @@ safe-array-concat@^1.1.2:
has-symbols "^1.0.3"
isarray "^2.0.5"
-safe-buffer@^5.1.0, safe-buffer@>=5.1.0, safe-buffer@~5.2.0, safe-buffer@5.2.1:
- version "5.2.1"
- resolved "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.2.1.tgz"
- integrity sha512-rp3So07KcdmmKbGvgaNxQSJr7bGVSVk5S9Eq1F+ppbRo70+YeaDxkw5Dd8NPN+GD6bjnYm2VuPuCXmpuYvmCXQ==
-
-safe-buffer@~5.1.0, safe-buffer@~5.1.1:
+safe-buffer@5.1.2, safe-buffer@~5.1.0, safe-buffer@~5.1.1:
version "5.1.2"
resolved "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.1.2.tgz"
integrity sha512-Gd2UZBJDkXlY7GbJxfsE8/nvKkUEU1G38c1siN6QP6a9PT9MmHB8GnpscSmMJSoF8LOIrt8ud/wPtojys4G6+g==
-safe-buffer@5.1.2:
- version "5.1.2"
- resolved "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.1.2.tgz"
- integrity sha512-Gd2UZBJDkXlY7GbJxfsE8/nvKkUEU1G38c1siN6QP6a9PT9MmHB8GnpscSmMJSoF8LOIrt8ud/wPtojys4G6+g==
+safe-buffer@5.2.1, safe-buffer@>=5.1.0, safe-buffer@^5.1.0, safe-buffer@~5.2.0:
+ version "5.2.1"
+ resolved "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.2.1.tgz"
+ integrity sha512-rp3So07KcdmmKbGvgaNxQSJr7bGVSVk5S9Eq1F+ppbRo70+YeaDxkw5Dd8NPN+GD6bjnYm2VuPuCXmpuYvmCXQ==
safe-regex-test@^1.0.3:
version "1.0.3"
@@ -8593,25 +8160,16 @@ scheduler@^0.23.0:
dependencies:
loose-envify "^1.1.0"
-schema-utils@^3.0.0:
- version "3.3.0"
- resolved "https://registry.npmjs.org/schema-utils/-/schema-utils-3.3.0.tgz"
- integrity sha512-pN/yOAvcC+5rQ5nERGuwrjLlYvLTbCibnZ1I7B1LaiAz9BRBlE9GMgE/eqV30P7aJQUf7Ddimy/RsbYO/GrVGg==
- dependencies:
- "@types/json-schema" "^7.0.8"
- ajv "^6.12.5"
- ajv-keywords "^3.5.2"
-
-schema-utils@^3.1.1:
- version "3.3.0"
- resolved "https://registry.npmjs.org/schema-utils/-/schema-utils-3.3.0.tgz"
- integrity sha512-pN/yOAvcC+5rQ5nERGuwrjLlYvLTbCibnZ1I7B1LaiAz9BRBlE9GMgE/eqV30P7aJQUf7Ddimy/RsbYO/GrVGg==
+schema-utils@2.7.0:
+ version "2.7.0"
+ resolved "https://registry.npmjs.org/schema-utils/-/schema-utils-2.7.0.tgz"
+ integrity sha512-0ilKFI6QQF5nxDZLFn2dMjvc4hjg/Wkg7rHd3jK6/A4a1Hl9VFdQWvgB1UMGoU94pad1P/8N7fMcEnLnSiju8A==
dependencies:
- "@types/json-schema" "^7.0.8"
- ajv "^6.12.5"
- ajv-keywords "^3.5.2"
+ "@types/json-schema" "^7.0.4"
+ ajv "^6.12.2"
+ ajv-keywords "^3.4.1"
-schema-utils@^3.2.0:
+schema-utils@^3.0.0, schema-utils@^3.1.1, schema-utils@^3.2.0:
version "3.3.0"
resolved "https://registry.npmjs.org/schema-utils/-/schema-utils-3.3.0.tgz"
integrity sha512-pN/yOAvcC+5rQ5nERGuwrjLlYvLTbCibnZ1I7B1LaiAz9BRBlE9GMgE/eqV30P7aJQUf7Ddimy/RsbYO/GrVGg==
@@ -8630,20 +8188,6 @@ schema-utils@^4.0.0:
ajv-formats "^2.1.1"
ajv-keywords "^5.1.0"
-schema-utils@2.7.0:
- version "2.7.0"
- resolved "https://registry.npmjs.org/schema-utils/-/schema-utils-2.7.0.tgz"
- integrity sha512-0ilKFI6QQF5nxDZLFn2dMjvc4hjg/Wkg7rHd3jK6/A4a1Hl9VFdQWvgB1UMGoU94pad1P/8N7fMcEnLnSiju8A==
- dependencies:
- "@types/json-schema" "^7.0.4"
- ajv "^6.12.2"
- ajv-keywords "^3.4.1"
-
-"search-insights@>= 1 < 3":
- version "2.13.0"
- resolved "https://registry.npmjs.org/search-insights/-/search-insights-2.13.0.tgz"
- integrity sha512-Orrsjf9trHHxFRuo9/rzm0KIWmgzE8RMlZMzuhZOJ01Rnz3D0YBAe+V6473t6/H6c7irs6Lt48brULAiRWb3Vw==
-
section-matter@^1.0.0:
version "1.0.0"
resolved "https://registry.npmjs.org/section-matter/-/section-matter-1.0.0.tgz"
@@ -8786,7 +8330,7 @@ shallow-clone@^3.0.0:
dependencies:
kind-of "^6.0.2"
-shallowequal@^1.1.0, shallowequal@1.1.0:
+shallowequal@1.1.0, shallowequal@^1.1.0:
version "1.1.0"
resolved "https://registry.npmjs.org/shallowequal/-/shallowequal-1.1.0.tgz"
integrity sha512-y0m1JoUZSlPAjXVtPPW70aZWfIL/dSP7AFkRnniLCrK/8MDKog3TySTBmckD+RObVxH0v4Tox67+F14PdED2oQ==
@@ -8948,12 +8492,7 @@ source-map-support@~0.5.20:
buffer-from "^1.0.0"
source-map "^0.6.0"
-source-map@^0.6.0:
- version "0.6.1"
- resolved "https://registry.npmjs.org/source-map/-/source-map-0.6.1.tgz"
- integrity sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g==
-
-source-map@^0.6.1:
+source-map@^0.6.0, source-map@^0.6.1, source-map@~0.6.0:
version "0.6.1"
resolved "https://registry.npmjs.org/source-map/-/source-map-0.6.1.tgz"
integrity sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g==
@@ -8963,11 +8502,6 @@ source-map@^0.7.0:
resolved "https://registry.npmjs.org/source-map/-/source-map-0.7.4.tgz"
integrity sha512-l3BikUxvPOcn5E74dZiq5BGsTb5yEwhaTSzccU6t4sDOH8NWJCstKO5QT2CvtFoK6F0saL7p9xHAqHOlCPJygA==
-source-map@~0.6.0:
- version "0.6.1"
- resolved "https://registry.npmjs.org/source-map/-/source-map-0.6.1.tgz"
- integrity sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g==
-
space-separated-tokens@^2.0.0:
version "2.0.2"
resolved "https://registry.npmjs.org/space-separated-tokens/-/space-separated-tokens-2.0.2.tgz"
@@ -9011,16 +8545,16 @@ stable@^0.1.8:
resolved "https://registry.npmjs.org/stable/-/stable-0.1.8.tgz"
integrity sha512-ji9qxRnOVfcuLDySj9qzhGSEFVobyt1kIOSkj1qZzYLzq7Tos/oUUWvotUPQLlrsidqsK6tBH89Bc9kL5zHA6w==
-"statuses@>= 1.4.0 < 2":
- version "1.5.0"
- resolved "https://registry.npmjs.org/statuses/-/statuses-1.5.0.tgz"
- integrity sha512-OpZ3zP+jT1PI7I8nemJX4AKmAX070ZkYPVWV/AaKTJl+tXCTGyVdC1a4SL8RUQYEwk/f34ZX8UTykN68FwrqAA==
-
statuses@2.0.1:
version "2.0.1"
resolved "https://registry.npmjs.org/statuses/-/statuses-2.0.1.tgz"
integrity sha512-RwNA9Z/7PrK06rYLIzFMlaF+l73iwpzsqRIFgbMLbTcLD6cOao82TaWefPXQvB2fOC4AjuYSEndS7N/mTCbkdQ==
+"statuses@>= 1.4.0 < 2":
+ version "1.5.0"
+ resolved "https://registry.npmjs.org/statuses/-/statuses-1.5.0.tgz"
+ integrity sha512-OpZ3zP+jT1PI7I8nemJX4AKmAX070ZkYPVWV/AaKTJl+tXCTGyVdC1a4SL8RUQYEwk/f34ZX8UTykN68FwrqAA==
+
std-env@^3.0.1:
version "3.7.0"
resolved "https://registry.npmjs.org/std-env/-/std-env-3.7.0.tgz"
@@ -9031,30 +8565,7 @@ stickyfill@^1.1.1:
resolved "https://registry.npmjs.org/stickyfill/-/stickyfill-1.1.1.tgz"
integrity sha512-GCp7vHAfpao+Qh/3Flh9DXEJ/qSi0KJwJw6zYlZOtRYXWUIpMM6mC2rIep/dK8RQqwW0KxGJIllmjPIBOGN8AA==
-string_decoder@^1.1.1:
- version "1.3.0"
- resolved "https://registry.npmjs.org/string_decoder/-/string_decoder-1.3.0.tgz"
- integrity sha512-hkRX8U1WjJFd8LsDJ2yQ/wWWxaopEsABU1XfkM8A+j0+85JAGppt16cr1Whg6KIbb4okU6Mql6BOj+uup/wKeA==
- dependencies:
- safe-buffer "~5.2.0"
-
-string_decoder@~1.1.1:
- version "1.1.1"
- resolved "https://registry.npmjs.org/string_decoder/-/string_decoder-1.1.1.tgz"
- integrity sha512-n/ShnvDi6FHbbVfviro+WojiFzv+s8MPMHBczVePfUpDJLwoLT0ht1l4YwBCbi8pJAveEEdnkHyPyTP/mzRfwg==
- dependencies:
- safe-buffer "~5.1.0"
-
-string-width@^4.1.0, string-width@^4.2.0:
- version "4.2.3"
- resolved "https://registry.npmjs.org/string-width/-/string-width-4.2.3.tgz"
- integrity sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==
- dependencies:
- emoji-regex "^8.0.0"
- is-fullwidth-code-point "^3.0.0"
- strip-ansi "^6.0.1"
-
-string-width@^4.2.3:
+string-width@^4.1.0, string-width@^4.2.0, string-width@^4.2.3:
version "4.2.3"
resolved "https://registry.npmjs.org/string-width/-/string-width-4.2.3.tgz"
integrity sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==
@@ -9072,7 +8583,7 @@ string-width@^5.0.1, string-width@^5.1.2:
emoji-regex "^9.2.2"
strip-ansi "^7.0.1"
-string.prototype.trim@^1.2.1, string.prototype.trim@^1.2.9:
+string.prototype.trim@^1.2.9:
version "1.2.9"
resolved "https://registry.npmjs.org/string.prototype.trim/-/string.prototype.trim-1.2.9.tgz"
integrity sha512-klHuCNxiMZ8MlsOihJhJEBJAiMVqU3Z2nEXWfWnIqjN0gEFS9J9+IxKozWWtQGcgoa1WUZzLjKPTr4ZHNFTFxw==
@@ -9100,6 +8611,20 @@ string.prototype.trimstart@^1.0.8:
define-properties "^1.2.1"
es-object-atoms "^1.0.0"
+string_decoder@^1.1.1:
+ version "1.3.0"
+ resolved "https://registry.npmjs.org/string_decoder/-/string_decoder-1.3.0.tgz"
+ integrity sha512-hkRX8U1WjJFd8LsDJ2yQ/wWWxaopEsABU1XfkM8A+j0+85JAGppt16cr1Whg6KIbb4okU6Mql6BOj+uup/wKeA==
+ dependencies:
+ safe-buffer "~5.2.0"
+
+string_decoder@~1.1.1:
+ version "1.1.1"
+ resolved "https://registry.npmjs.org/string_decoder/-/string_decoder-1.1.1.tgz"
+ integrity sha512-n/ShnvDi6FHbbVfviro+WojiFzv+s8MPMHBczVePfUpDJLwoLT0ht1l4YwBCbi8pJAveEEdnkHyPyTP/mzRfwg==
+ dependencies:
+ safe-buffer "~5.1.0"
+
stringify-entities@^4.0.0:
version "4.0.3"
resolved "https://registry.npmjs.org/stringify-entities/-/stringify-entities-4.0.3.tgz"
@@ -9165,7 +8690,7 @@ style-to-object@^1.0.0:
dependencies:
inline-style-parser "0.2.2"
-"styled-components@^4.1.1 || ^5.1.1 || ^6.0.5", styled-components@^6.1.11:
+styled-components@^6.1.11:
version "6.1.13"
resolved "https://registry.npmjs.org/styled-components/-/styled-components-6.1.13.tgz"
integrity sha512-M0+N2xSnAtwcVAQeFEsGWFFxXDftHUD7XrKla06QbpUMmbmtFBMMTcKWvFXtWxuD5qQkB8iU5gk6QASlx2ZRMw==
@@ -9342,16 +8867,16 @@ trough@^2.0.0:
resolved "https://registry.npmjs.org/trough/-/trough-2.1.0.tgz"
integrity sha512-AqTiAOLcj85xS7vQ8QkAV41hPDIJ71XJB4RCUrzo/1GM2CQwhkJGaf9Hgr7BOugMRpgGUrqRg/DrBDl4H40+8g==
+tslib@2.6.2, tslib@^2.0.3, tslib@^2.6.0:
+ version "2.6.2"
+ resolved "https://registry.npmjs.org/tslib/-/tslib-2.6.2.tgz"
+ integrity sha512-AEYxH93jGFPn/a2iVAwW87VuUIkR1FVUKB77NwMF7nBTDkDrrT/Hpt/IrCJ0QXhW27jTBDcf5ZY7w6RiqTMw2Q==
+
tslib@^1.8.1:
version "1.14.1"
resolved "https://registry.npmjs.org/tslib/-/tslib-1.14.1.tgz"
integrity sha512-Xni35NKzjgMrwevysHTCArtLDpPvye8zV/0E4EyYn43P7/7qvQwPh9BGkHewbMulVntbigmcT7rdX3BNo9wRJg==
-tslib@^2.0.3, tslib@^2.6.0, tslib@2.6.2:
- version "2.6.2"
- resolved "https://registry.npmjs.org/tslib/-/tslib-2.6.2.tgz"
- integrity sha512-AEYxH93jGFPn/a2iVAwW87VuUIkR1FVUKB77NwMF7nBTDkDrrT/Hpt/IrCJ0QXhW27jTBDcf5ZY7w6RiqTMw2Q==
-
tsutils@^3.21.0:
version "3.21.0"
resolved "https://registry.npmjs.org/tsutils/-/tsutils-3.21.0.tgz"
@@ -9359,18 +8884,6 @@ tsutils@^3.21.0:
dependencies:
tslib "^1.8.1"
-type-check@^0.4.0, type-check@~0.4.0:
- version "0.4.0"
- resolved "https://registry.npmjs.org/type-check/-/type-check-0.4.0.tgz"
- integrity sha512-XleUoc9uwGXqjWwXaUTZAmzMcFZ5858QA2vvx1Ur5xIcixXIP+8LnFDgRplU30us6teqdlskFfu+ae4K79Ooew==
- dependencies:
- prelude-ls "^1.2.1"
-
-type-fest@^0.20.2:
- version "0.20.2"
- resolved "https://registry.npmjs.org/type-fest/-/type-fest-0.20.2.tgz"
- integrity sha512-Ne+eE4r0/iWnpAxD852z3A+N0Bt5RN//NjJwRd2VFHEmrywxf5vsZlh4R6lixl6B+wz/8d+maTSAkN1FIkI3LQ==
-
type-fest@^1.0.1:
version "1.4.0"
resolved "https://registry.npmjs.org/type-fest/-/type-fest-1.4.0.tgz"
@@ -9440,7 +8953,7 @@ typedarray-to-buffer@^3.1.5:
dependencies:
is-typedarray "^1.0.0"
-"typescript@>= 2.7", "typescript@>=2.8.0 || >= 3.2.0-dev || >= 3.3.0-dev || >= 3.4.0-dev || >= 3.5.0-dev || >= 3.6.0-dev || >= 3.6.0-beta || >= 3.7.0-dev || >= 3.7.0-beta", typescript@>=4.9.5, typescript@~5.2.2:
+typescript@~5.2.2:
version "5.2.2"
resolved "https://registry.npmjs.org/typescript/-/typescript-5.2.2.tgz"
integrity sha512-mI4WrpHsbCIcwT9cF4FZvr80QUeKvsUsUvKDoR+X/7XHQH98xYD8YHZg7ANtz2GtZt/CBq2QJ0thkGJMHfqc1w==
@@ -9566,7 +9079,7 @@ universalify@^2.0.0:
resolved "https://registry.npmjs.org/universalify/-/universalify-2.0.1.tgz"
integrity sha512-gptHNQghINnc/vTGIk0SOFGFNXw7JVrlRUtConJRlvaw6DuX0wO5Jeko9sWrMBhh+PsYAZ7oXAiOnf/UKogyiw==
-unpipe@~1.0.0, unpipe@1.0.0:
+unpipe@1.0.0, unpipe@~1.0.0:
version "1.0.0"
resolved "https://registry.npmjs.org/unpipe/-/unpipe-1.0.0.tgz"
integrity sha512-pjy2bYhSsufwWlKwPc+l3cN7+wuJlK6uz0YdJEOlQDbl6jo/YlPi4mb8agUkVC8BF7V8NuzeyPNqRksA3hztKQ==
@@ -9795,7 +9308,7 @@ webpack-sources@^3.2.2, webpack-sources@^3.2.3:
resolved "https://registry.npmjs.org/webpack-sources/-/webpack-sources-3.2.3.tgz"
integrity sha512-/DyMEOrDgLKKIG0fmvtz+4dUX/3Ghozwgm6iPp8KRhvn+eQf9+Q7GWxVNMk3+uCPWfdXYC4ExGBckIXdFEfH1w==
-"webpack@^4.0.0 || ^5.0.0", "webpack@^4.37.0 || ^5.0.0", webpack@^5.0.0, webpack@^5.1.0, webpack@^5.20.0, webpack@^5.88.1, "webpack@>= 4", "webpack@>=4.41.1 || 5.x", webpack@>=5, "webpack@3 || 4 || 5":
+webpack@^5.88.1:
version "5.89.0"
resolved "https://registry.npmjs.org/webpack/-/webpack-5.89.0.tgz"
integrity sha512-qyfIC10pOr70V+jkmud8tMfajraGCZMBWJtrmuBymQKCrLTRejBI8STDp1MCyZu/QTdZSeacCQYpYNQVOzX5kw==
@@ -9835,7 +9348,7 @@ webpackbar@^5.0.2:
pretty-time "^1.1.0"
std-env "^3.0.1"
-websocket-driver@^0.7.4, websocket-driver@>=0.5.1:
+websocket-driver@>=0.5.1, websocket-driver@^0.7.4:
version "0.7.4"
resolved "https://registry.npmjs.org/websocket-driver/-/websocket-driver-0.7.4.tgz"
integrity sha512-b17KeDIQVjvb0ssuSDF2cYXSg2iztliJ4B9WdsuB6J952qCPKmnVq4DyW5motImXHDC1cBT/1UezrJVsKw5zjg==
@@ -9905,11 +9418,6 @@ wildcard@^2.0.0:
resolved "https://registry.npmjs.org/wildcard/-/wildcard-2.0.1.tgz"
integrity sha512-CC1bOL87PIWSBhDcTrdeLo6eGT7mCFtrg0uIJtqJUFyK+eJnzl8A1niH56uu7KMa5XFrtiV+AQuHO3n7DsHnLQ==
-word-wrap@^1.2.5:
- version "1.2.5"
- resolved "https://registry.npmjs.org/word-wrap/-/word-wrap-1.2.5.tgz"
- integrity sha512-BN22B5eaMMI9UMtjrGd5g5eCYPpCPDUy0FJXbYsaT5zYxjFOckS53SQDE3pWkVoWpHXVb3BrYcEN4Twa55B5cA==
-
wrap-ansi@^7.0.0:
version "7.0.0"
resolved "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-7.0.0.tgz"
diff --git a/go.mod b/go.mod
index 88491743..99ce781a 100644
--- a/go.mod
+++ b/go.mod
@@ -1,8 +1,6 @@
module github.com/interlink-hq/interlink
-go 1.24.0
-
-toolchain go1.24.3
+go 1.26.0
require (
github.com/containerd/containerd v1.7.6
diff --git a/pkg/interlink/api/handler.go b/pkg/interlink/api/handler.go
index 40656bed..52d6c8c3 100644
--- a/pkg/interlink/api/handler.go
+++ b/pkg/interlink/api/handler.go
@@ -7,8 +7,11 @@ import (
"bufio"
"context"
"fmt"
+ "html"
"io"
"net/http"
+ "net/url"
+ "strings"
"github.com/containerd/containerd/log"
"github.com/google/uuid"
@@ -18,6 +21,22 @@ import (
"github.com/interlink-hq/interlink/pkg/interlink"
)
+// isSafeURL checks for SSRF by allowing only http(s) URLs and blocking localhost/internal addresses.
+func isSafeURL(rawurl string) bool {
+ u, err := url.Parse(rawurl)
+ if err != nil {
+ return false
+ }
+ if u.Scheme != "http" && u.Scheme != "https" {
+ return false
+ }
+ host := u.Hostname()
+ if host == "localhost" || host == "127.0.0.1" || host == "::1" || strings.HasSuffix(host, ".internal") {
+ return false
+ }
+ return true
+}
+
// InterLinkHandler handles HTTP requests for the interLink API server.
// It acts as a proxy between the Virtual Kubelet and sidecar plugins,
// forwarding requests and managing pod lifecycle operations.
@@ -87,7 +106,6 @@ func ReqWithError(
sessionContext string,
clientHTTP *http.Client,
) ([]byte, error) {
-
log.G(ctx).Infof("[ReqWithError] Starting request to %s | respondWithValues=%v | respondWithReturn=%v | session=%s",
req.URL.String(), respondWithValues, respondWithReturn, sessionContext)
@@ -99,7 +117,14 @@ func ReqWithError(
// Add session number for end-to-end trace
AddSessionContext(req, sessionContext)
- resp, err := clientHTTP.Do(req)
+ if !isSafeURL(req.URL.String()) {
+ return nil, fmt.Errorf("potential SSRF detected: %s", req.URL.String())
+ }
+ // SSRF protection: ensure URL is safe before making the request
+ if !isSafeURL(req.URL.String()) {
+ return nil, fmt.Errorf("potential SSRF detected: %s", req.URL.String())
+ }
+ resp, err := clientHTTP.Do(req) // #nosec G704
if err != nil {
statusCode := http.StatusInternalServerError
log.G(ctx).Errorf("%s HTTP client.Do() failed: %v", sessionContextMessage, err)
@@ -145,7 +170,9 @@ func ReqWithError(
errHTTP := fmt.Errorf("%s call exit status: %d. Body: %s", sessionContextMessage, statusCode, ret)
log.G(ctx).Error(errHTTP)
- _, err = w.Write([]byte(errHTTP.Error()))
+ // Prevent XSS by escaping error message
+ safeErr := html.EscapeString(errHTTP.Error())
+ _, err = w.Write([]byte(safeErr))
if err != nil {
return nil, fmt.Errorf(sessionContextMessage+
"HTTP request in error and could not write all body response to InterLink Node error: %w", err)
diff --git a/pkg/interlink/config.go b/pkg/interlink/config.go
index ac3e9773..032b6c77 100644
--- a/pkg/interlink/config.go
+++ b/pkg/interlink/config.go
@@ -7,6 +7,8 @@ import (
"flag"
"fmt"
"os"
+ "path/filepath"
+ "strings"
"time"
"github.com/containerd/containerd/log"
@@ -15,7 +17,9 @@ import (
"go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc"
"go.opentelemetry.io/otel/propagation"
"go.opentelemetry.io/otel/sdk/resource"
+
sdktrace "go.opentelemetry.io/otel/sdk/trace"
+
semconv "go.opentelemetry.io/otel/semconv/v1.21.0"
"google.golang.org/grpc"
"google.golang.org/grpc/connectivity"
@@ -184,7 +188,14 @@ func SetupTelemetry(ctx context.Context, serviceName string) (*sdktrace.TracerPr
log.G(ctx).Info("CA certificate provided, setting up mutual TLS")
- caCert, err := os.ReadFile(caCrtFilePath)
+ if !filepath.IsAbs(caCrtFilePath) || strings.Contains(caCrtFilePath, "..") {
+ return nil, fmt.Errorf("invalid CA certificate file path")
+ }
+ // Path traversal protection
+ if !filepath.IsAbs(caCrtFilePath) || strings.Contains(caCrtFilePath, "..") {
+ return nil, fmt.Errorf("invalid CA certificate file path")
+ }
+ caCert, err := os.ReadFile(caCrtFilePath) // #nosec G703
if err != nil {
return nil, fmt.Errorf("failed to load CA certificate: %w", err)
}
@@ -318,13 +329,24 @@ func NewInterLinkConfig() (Config, error) {
}
}
- if _, err := os.Stat(path); err != nil {
+ if !filepath.IsAbs(path) || strings.Contains(path, "..") {
+ return Config{}, fmt.Errorf("invalid config file path")
+ }
+ // Path traversal protection
+ if !filepath.IsAbs(path) || strings.Contains(path, "..") {
+ return Config{}, fmt.Errorf("invalid config file path")
+ }
+ if _, err := os.Stat(path); err != nil { // #nosec G703
log.G(context.Background()).Error("File " + path + " doesn't exist. You can set a custom path by exporting INTERLINKCONFIGPATH. Exiting...")
return Config{}, err
}
log.G(context.Background()).Info("Loading InterLink config from " + path)
- yfile, err := os.ReadFile(path)
+ // Path traversal protection
+ if !filepath.IsAbs(path) || strings.Contains(path, "..") {
+ return Config{}, fmt.Errorf("invalid config file path")
+ }
+ yfile, err := os.ReadFile(path) // #nosec G703
if err != nil {
log.G(context.Background()).Error("Error opening config file, exiting...")
return Config{}, err
diff --git a/pkg/virtualkubelet/execute.go b/pkg/virtualkubelet/execute.go
index 3b6c994b..fee4f68a 100644
--- a/pkg/virtualkubelet/execute.go
+++ b/pkg/virtualkubelet/execute.go
@@ -11,25 +11,42 @@ import (
"io"
"math/rand"
"net/http"
+ "net/url"
"os"
"regexp"
"strconv"
"strings"
"time"
+ types "github.com/interlink-hq/interlink/pkg/interlink"
+ authenticationv1 "k8s.io/api/authentication/v1"
+ k8sTypes "k8s.io/apimachinery/pkg/types"
+
"github.com/containerd/containerd/log"
"go.opentelemetry.io/otel"
"go.opentelemetry.io/otel/attribute"
trace "go.opentelemetry.io/otel/trace"
- authenticationv1 "k8s.io/api/authentication/v1"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
-
- types "github.com/interlink-hq/interlink/pkg/interlink"
- k8sTypes "k8s.io/apimachinery/pkg/types"
)
+// isSafeURL checks for SSRF by allowing only http(s) URLs and blocking localhost/internal addresses.
+func isSafeURL(rawurl string) bool {
+ u, err := url.Parse(rawurl)
+ if err != nil {
+ return false
+ }
+ if u.Scheme != "http" && u.Scheme != "https" {
+ return false
+ }
+ host := u.Hostname()
+ if host == "localhost" || host == "127.0.0.1" || host == "::1" || strings.HasSuffix(host, ".internal") {
+ return false
+ }
+ return true
+}
+
const (
PodPhaseInitialize = "Initializing"
PodPhaseCompleted = "Completed"
@@ -107,7 +124,10 @@ func doRequestWithClient(req *http.Request, token string, httpClient *http.Clien
req.Header.Add("Authorization", "Bearer "+token)
}
req.Header.Set("Content-Type", "application/json")
- return httpClient.Do(req)
+ if !isSafeURL(req.URL.String()) {
+ return nil, fmt.Errorf("potential SSRF detected: %s", req.URL.String())
+ }
+ return httpClient.Do(req) // #nosec G704
}
func getSidecarEndpoint(ctx context.Context, interLinkURL string, interLinkPort string) string {