diff --git a/.check/VerifySourceCalls.py b/.check/VerifySourceCalls.py index 99d6d66..c93a12f 100644 --- a/.check/VerifySourceCalls.py +++ b/.check/VerifySourceCalls.py @@ -138,27 +138,47 @@ def parse_functions_in_file(filepath): def parse_script_for_includes_and_local_funcs(script_path): includes = set() + shellcheck_directives = set() local_funcs = parse_functions_in_file(script_path) if not os.path.isfile(script_path): return includes, local_funcs with open(script_path, "r", encoding="utf-8", newline='\n') as f: - for line in f: - line_stripped = line.strip() + lines = f.readlines() + + for i, line in enumerate(lines): + line_stripped = line.strip() + + # Check for shellcheck directive + sc_match = SHELLCHECK_SOURCE_REGEX.search(line_stripped) + if sc_match: + util_name = sc_match.group(1) + shellcheck_directives.add(util_name) - # Check for source statement - s_match = SOURCE_REGEX.search(line_stripped) - if s_match: - # e.g., "Prompts.sh" or "Cluster.sh" - includes.add(s_match.group(1)) + # Look for matching source statement in next few lines (max 5) + found_source = False + for j in range(i + 1, min(i + 6, len(lines))): + next_line = lines[j].strip() + s_match = SOURCE_REGEX.search(next_line) + if s_match and s_match.group(1) == util_name: + found_source = True + break + # Stop if we hit another shellcheck or source for different file + if SHELLCHECK_SOURCE_REGEX.search(next_line) or SOURCE_REGEX.search(next_line): + break + + if not found_source: + # Orphaned shellcheck directive - don't add to includes + print(f" [WARNING] Orphaned shellcheck directive for {util_name} at line {i+1}") + continue + + # Check for source statement + s_match = SOURCE_REGEX.search(line_stripped) + if s_match: + # e.g., "Prompts.sh" or "Cluster.sh" + includes.add(s_match.group(1)) - # Also check for shellcheck directive (for consistency checking) - sc_match = SHELLCHECK_SOURCE_REGEX.search(line_stripped) - if sc_match: - # Verify this matches an actual source line - includes.add(sc_match.group(1)) - return includes, local_funcs ############################################################################### diff --git a/.check/_RunChecks.sh b/.check/_RunChecks.sh index c48c230..36105ae 100644 --- a/.check/_RunChecks.sh +++ b/.check/_RunChecks.sh @@ -126,6 +126,33 @@ fi CHECKS_RUN=$((CHECKS_RUN + 1)) echo "" +# Check 1a: Basic shell syntax validation +echo "1a. Validating shell syntax (bash -n)..." +SYNTAX_ERRORS=0 +SYNTAX_FILES=() +while IFS= read -r -d '' file; do + if ! bash -n "$file" 2>/dev/null; then + SYNTAX_ERRORS=$((SYNTAX_ERRORS + 1)) + SYNTAX_FILES+=("$file") + fi +done < <(find . -name "*.sh" -not -path "*/.git/*" -not -path "*/.check/*" -print0) + +if [ $SYNTAX_ERRORS -eq 0 ]; then + echo "- All shell scripts have valid syntax" + CHECKS_PASSED=$((CHECKS_PASSED + 1)) +else + echo "- FAILED: $SYNTAX_ERRORS file(s) with syntax errors" + if [ "$VERBOSE" = true ] || [ $SYNTAX_ERRORS -le 5 ]; then + for file in "${SYNTAX_FILES[@]}"; do + echo " $file" + bash -n "$file" 2>&1 | head -3 | sed 's/^/ /' + done + fi + CHECKS_FAILED=$((CHECKS_FAILED + 1)) +fi +CHECKS_RUN=$((CHECKS_RUN + 1)) +echo "" + # Check 2: Update function indices echo "2. Updating function indices..." python3 .check/UpdateFunctionIndex.py ./ 2>&1 | tail -3 diff --git a/.gitignore b/.gitignore index 8e636cf..126d5ee 100644 --- a/.gitignore +++ b/.gitignore @@ -4,6 +4,7 @@ nodes.json TestConnectionInfo.json .d/ +.nfs* # Downloaded Proxmox documentation (keep scripts, ignore generated content) .docs/*.html diff --git a/.site/MultiView.png b/.site/MultiView.png new file mode 100644 index 0000000..ecdd95c Binary files /dev/null and b/.site/MultiView.png differ diff --git a/.site/MultiView2.png b/.site/MultiView2.png new file mode 100644 index 0000000..6e58447 Binary files /dev/null and b/.site/MultiView2.png differ diff --git a/CHANGELOG.md b/CHANGELOG.md index 772f798..13e7193 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,6 +5,68 @@ All notable changes to this project will be documented in this file. The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). +## [2.1.6] - 2025-11-25 + +Bug fixes, username support, and validation improvements + +### Added +- **Username Configuration** - Support for specifying SSH usernames per node + - Added `username` field to nodes.json configuration + - Username prompts in all node configuration flows (manual entry, saved nodes, IP ranges, VMID ranges) + - Default username is "root" with option to specify alternatives + - Display format changed to `username@ip` throughout GUI +- **Dependency Checking** - Runtime validation before remote execution + - `__check_remote_dependencies__()` function checks for sshpass and jq + - Helpful error messages with installation commands for all major distros + - Notes that sshpass is not required when using SSH keys +- **Syntax Validation** - Basic shell syntax checking added to validation suite + - New Check 1a. in `_RunChecks.sh` runs `bash -n` on all .sh files + - Catches structural errors and orphaned code blocks + - Shows file names and line numbers for syntax errors +- **Enhanced Source Verification** - Improved validation of shellcheck directives + - `VerifySourceCalls.py` now validates shellcheck comments have matching source statements + - Detects orphaned shellcheck directives within 5 lines + - Prevents mismatched documentation and code + +### Changed +- **Remote Execution UI** - Scripts hidden in remote mode for better UX + - GUI.sh and CCPVE.sh hidden from root menu when in remote execution mode + - Prevents accidental execution of control scripts on remote nodes + - Scripts still shown in local mode and subdirectories +- **README.md** - Clarified dependency requirements + - Updated installation command to include `jq` and `sshpass` + - Documented that sshpass is only needed for password-based authentication + - Separated build-time tools from runtime dependencies + +### Fixed +- **Critical: Orphaned Error Handler** - Fixed syntax error in `Host/HostInfo.sh` + - Removed orphaned error handler code block (lines 34-36) + - File had error message without matching source statement + - Bug prevented script execution on remote nodes +- **Username Hardcoding** - Removed hardcoded "root@" from all remote operations + - Updated all SSH/SCP operations in `RemoteExecutor.sh` to use configured username + - `__ssh_exec__`, `__scp_exec__`, `__scp_exec_recursive__`, `__scp_download__` now accept username parameter + - `ConfigManager.sh` tracks username per node in `NODE_USERNAMES` associative array +- **Missing Validation** - Syntax check gap closed + - `_RunChecks.sh` never validated basic syntax :C + - Now catches structural errors that bash -n would detect + - Prevents orphaned code and malformed control structures from entering repository + +### Technical Details +- `nodes.json.template` - Added username field with "root" default +- `Utilities/ConfigManager.sh` - Added NODE_USERNAMES tracking and __get_node_username__() function +- `Utilities/RemoteExecutor.sh` - All remote operations parameterized with username +- `GUI.sh` - Dependency checking, username prompts, and script filtering +- `.check/VerifySourceCalls.py` - Enhanced shellcheck directive validation +- `.check/_RunChecks.sh` - Added Check 1a. for syntax validation + +### Developer Notes +The orphaned error handler bug existed because: +- `bash -n` syntax check was not being run in validation suite +- `DeadCodeCheck.py` only checks unused functions/variables, not code structure +- `VerifySourceCalls.py` didn't validate orphaned error handlers +The fix adds syntax validation to prevent similar issues... + ## [2.1.5] - 2025-11-24 PVE documentation automation and repository cleanup diff --git a/GUI.sh b/GUI.sh index 626a180..c01d861 100644 --- a/GUI.sh +++ b/GUI.sh @@ -42,6 +42,7 @@ # - display_path # - show_common_footer # - process_common_input +# - __check_remote_dependencies__ # - select_execution_mode # - configure_single_remote # - configure_multi_remote @@ -57,6 +58,7 @@ # - change_branch # - view_branches # - navigate +# - __startup_check__ # set -euo pipefail @@ -349,6 +351,45 @@ process_common_input() { ############################################################################### # EXECUTION MODE SELECTION ############################################################################### + +# Check for required dependencies for remote execution +# Returns: 0 if all dependencies met, 1 if missing dependencies +__check_remote_dependencies__() { + local missing_deps=() + + # Check for sshpass (only needed if not using SSH keys) + if ! command -v sshpass &>/dev/null; then + missing_deps+=("sshpass") + fi + + # Check for jq (for nodes.json parsing) + if ! command -v jq &>/dev/null; then + missing_deps+=("jq") + fi + + if [[ ${#missing_deps[@]} -gt 0 ]]; then + echo + __line_rgb__ "⚠ Missing Dependencies for Remote Execution" 255 200 0 + echo + echo "The following tools are required but not installed:" + for dep in "${missing_deps[@]}"; do + echo " - $dep" + done + echo + echo "Installation commands:" + echo " Debian/Ubuntu: sudo apt install ${missing_deps[*]}" + echo " Arch Linux: sudo pacman -S ${missing_deps[*]}" + echo " RHEL/CentOS: sudo yum install ${missing_deps[*]}" + echo + echo "Note: If you have SSH keys configured, sshpass is not required." + echo + read -rp "Press Enter to continue..." + return 1 + fi + + return 0 +} + select_execution_mode() { while true; do clear @@ -381,11 +422,19 @@ select_execution_mode() { return 0 ;; 2) + # Check dependencies before proceeding + if ! __check_remote_dependencies__; then + continue + fi if configure_single_remote; then return 0 fi ;; 3) + # Check dependencies before proceeding + if ! __check_remote_dependencies__; then + continue + fi if configure_multi_remote; then return 0 fi @@ -416,15 +465,17 @@ configure_single_remote() { echo read -rp "Enter node IP manually: " manual_ip read -rp "Enter node name: " manual_name + read -rp "Enter username (default: root): " manual_user + manual_user="${manual_user:-root}" # Check for SSH key auth automatically (test since not in nodes.json) - local has_keys=$(__has_ssh_keys__ "$manual_ip" "") + local has_keys=$(__has_ssh_keys__ "$manual_ip" "$manual_user" "") if [[ "$has_keys" == "true" ]]; then echo __line_rgb__ "SSH key authentication detected" 0 255 0 export USE_SSH_KEYS=true __clear_remote_targets__ - __add_remote_target__ "$manual_name" "$manual_ip" "" + __add_remote_target__ "$manual_name" "$manual_ip" "" "$manual_user" __set_execution_mode__ "single-remote" __success__ "Using SSH key authentication (no password needed)" sleep 1 @@ -437,7 +488,7 @@ configure_single_remote() { echo __clear_remote_targets__ - __add_remote_target__ "$manual_name" "$manual_ip" "$manual_pass" + __add_remote_target__ "$manual_name" "$manual_ip" "$manual_pass" "$manual_user" __set_execution_mode__ "single-remote" return 0 fi @@ -448,17 +499,19 @@ configure_single_remote() { while IFS= read -r node_name; do local node_ip node_ip=$(__get_node_ip__ "$node_name") + local node_username + node_username=$(__get_node_username__ "$node_name") # Check if SSH keys work for this node (use cache) local key_indicator="" - local has_keys=$(__has_ssh_keys__ "$node_ip" "$node_name") + local has_keys=$(__has_ssh_keys__ "$node_ip" "$node_username" "$node_name") if [[ "$has_keys" == "true" ]]; then key_indicator=" [SSH Key]" else key_indicator=" [No Key]" fi - __line_rgb__ " $i) $node_name ($node_ip) $key_indicator" 0 200 200 + __line_rgb__ " $i) $node_name ($node_username@$node_ip) $key_indicator" 0 200 200 node_menu[$i]="$node_name:$node_ip" ((i += 1)) done < <(__get_available_nodes__) @@ -497,15 +550,17 @@ configure_single_remote() { if [[ "$node_choice" == "m" ]]; then read -rp "Enter node IP: " manual_ip read -rp "Enter node name: " manual_name + read -rp "Enter username (default: root): " manual_user + manual_user="${manual_user:-root}" # Check for SSH key auth automatically (use cache) - local has_keys=$(__has_ssh_keys__ "$manual_ip" "") + local has_keys=$(__has_ssh_keys__ "$manual_ip" "$manual_user" "") if [[ "$has_keys" == "true" ]]; then echo __line_rgb__ "SSH key authentication detected" 0 255 0 export USE_SSH_KEYS=true __clear_remote_targets__ - __add_remote_target__ "$manual_name" "$manual_ip" "" + __add_remote_target__ "$manual_name" "$manual_ip" "" "$manual_user" __set_execution_mode__ "single-remote" __success__ "Using SSH key authentication (no password needed)" sleep 1 @@ -518,20 +573,22 @@ configure_single_remote() { echo __clear_remote_targets__ - __add_remote_target__ "$manual_name" "$manual_ip" "$manual_pass" + __add_remote_target__ "$manual_name" "$manual_ip" "$manual_pass" "$manual_user" __set_execution_mode__ "single-remote" return 0 elif [[ -n "$node_choice" && -n "${node_menu[$node_choice]:-}" ]]; then IFS=':' read -r selected_name selected_ip <<<"${node_menu[$node_choice]}" + local selected_username + selected_username=$(__get_node_username__ "$selected_name") # Check for SSH key auth automatically (use cache) - local has_keys=$(__has_ssh_keys__ "$selected_ip" "$selected_name") + local has_keys=$(__has_ssh_keys__ "$selected_ip" "$selected_username" "$selected_name") if [[ "$has_keys" == "true" ]]; then echo __line_rgb__ "SSH key authentication detected" 0 255 0 export USE_SSH_KEYS=true __clear_remote_targets__ - __add_remote_target__ "$selected_name" "$selected_ip" "" + __add_remote_target__ "$selected_name" "$selected_ip" "" "$selected_username" __set_execution_mode__ "single-remote" __success__ "Using SSH key authentication (no password needed)" sleep 1 @@ -544,7 +601,7 @@ configure_single_remote() { echo __clear_remote_targets__ - __add_remote_target__ "$selected_name" "$selected_ip" "$node_pass" + __add_remote_target__ "$selected_name" "$selected_ip" "$node_pass" "$selected_username" __set_execution_mode__ "single-remote" return 0 else @@ -625,17 +682,19 @@ configure_multi_saved() { while IFS= read -r node_name; do local node_ip node_ip=$(__get_node_ip__ "$node_name") + local node_username + node_username=$(__get_node_username__ "$node_name") # Check if SSH keys work for this node (use cache) local key_indicator=" [No Key]" - local has_keys=$(__has_ssh_keys__ "$node_ip" "$node_name") + local has_keys=$(__has_ssh_keys__ "$node_ip" "$node_username" "$node_name") if [[ "$has_keys" == "true" ]]; then key_indicator=" [SSH Key]" else key_indicator=" [No Key]" fi - __line_rgb__ " $i) $node_name ($node_ip) $key_indicator" 0 200 200 + __line_rgb__ " $i) $node_name ($node_username@$node_ip) $key_indicator" 0 200 200 node_menu[$i]="$node_name:$node_ip" ((i += 1)) done < <(__get_available_nodes__) @@ -700,12 +759,17 @@ configure_multi_saved() { echo # Check if SSH keys are available (test first node) - if ssh -o BatchMode=yes -o ConnectTimeout=2 root@${REMOTE_TARGETS[0]##*:} echo "test" &>/dev/null 2>&1; then + # Get username for first node + local first_node_name="${REMOTE_TARGETS[0]%%:*}" + local first_node_username="${NODE_USERNAMES[$first_node_name]:-$DEFAULT_USERNAME}" + local first_node_ip="${REMOTE_TARGETS[0]##*:}" + if ssh -o BatchMode=yes -o ConnectTimeout=2 "${first_node_username}@${first_node_ip}" echo "test" &>/dev/null 2>&1; then # Verify all selected nodes have SSH keys configured local all_have_keys=true for target in "${REMOTE_TARGETS[@]}"; do IFS=':' read -r node_name node_ip <<<"$target" - if ! ssh -o BatchMode=yes -o ConnectTimeout=2 root@$node_ip echo "test" &>/dev/null 2>&1; then + local node_username="${NODE_USERNAMES[$node_name]:-$DEFAULT_USERNAME}" + if ! ssh -o BatchMode=yes -o ConnectTimeout=2 "${node_username}@$node_ip" echo "test" &>/dev/null 2>&1; then all_have_keys=false break fi @@ -730,14 +794,16 @@ configure_multi_saved() { echo "Nodes needing passwords:" for target in "${REMOTE_TARGETS[@]}"; do IFS=':' read -r node_name node_ip <<<"$target" - if ! ssh -o BatchMode=yes -o ConnectTimeout=2 root@$node_ip echo "test" &>/dev/null 2>&1; then - echo " - $node_name ($node_ip)" + local node_username="${NODE_USERNAMES[$node_name]:-$DEFAULT_USERNAME}" + if ! ssh -o BatchMode=yes -o ConnectTimeout=2 "${node_username}@$node_ip" echo "test" &>/dev/null 2>&1; then + echo " - $node_name ($node_username@$node_ip)" fi done echo for target in "${REMOTE_TARGETS[@]}"; do IFS=':' read -r node_name node_ip <<<"$target" - if ! ssh -o BatchMode=yes -o ConnectTimeout=2 root@$node_ip echo "test" &>/dev/null 2>&1; then + local node_username="${NODE_USERNAMES[$node_name]:-$DEFAULT_USERNAME}" + if ! ssh -o BatchMode=yes -o ConnectTimeout=2 "${node_username}@$node_ip" echo "test" &>/dev/null 2>&1; then read -rsp "Enter password for $node_name: " node_pass echo NODE_PASSWORDS["$node_name"]="$node_pass" @@ -809,6 +875,8 @@ configure_multi_ip_range() { fi echo + read -rp "Enter username for all nodes in range (default: root): " shared_user + shared_user="${shared_user:-root}" read -rsp "Enter password for all nodes in range: " shared_pass echo @@ -819,6 +887,7 @@ configure_multi_ip_range() { local node_name="temp-${ip//\./-}" REMOTE_TARGETS+=("$node_name:$ip") NODE_PASSWORDS["$node_name"]="$shared_pass" + NODE_USERNAMES["$node_name"]="$shared_user" done echo @@ -843,6 +912,8 @@ configure_multi_vmid_range() { return 1 fi + read -rp "Proxmox host username (default: root): " query_user + query_user="${query_user:-root}" read -rp "Start VMID: " start_vmid read -rp "End VMID: " end_vmid @@ -861,6 +932,8 @@ configure_multi_vmid_range() { echo read -rsp "Enter password for query host: " query_pass echo + read -rp "Enter username for all target nodes (default: root): " shared_user + shared_user="${shared_user:-root}" read -rsp "Enter password for all target nodes: " shared_pass echo @@ -874,7 +947,7 @@ configure_multi_vmid_range() { for ((vmid = start_vmid; vmid <= end_vmid; vmid++)); do # Query for VM/LXC IP address local ip=$(sshpass -p "$query_pass" ssh -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null \ - root@"$query_host" \ + "${query_user}@${query_host}" \ "pvesh get /nodes/\$(hostname)/qemu/$vmid/agent/network-get-interfaces 2>/dev/null | grep -oP '(?<=\"ip-address\":\")[^\"]*' | grep -v '^127\\.' | grep -v '^::' | head -n1 || \ pvesh get /nodes/\$(hostname)/lxc/$vmid/interfaces 2>/dev/null | grep -oP '(?<=inet )[0-9.]+' | head -n1" 2>/dev/null) @@ -882,6 +955,7 @@ configure_multi_vmid_range() { local node_name="vmid-$vmid" REMOTE_TARGETS+=("$node_name:$ip") NODE_PASSWORDS["$node_name"]="$shared_pass" + NODE_USERNAMES["$node_name"]="$shared_user" echo " Found VMID $vmid: $ip" ((found_count += 1)) fi @@ -936,7 +1010,8 @@ manage_nodes_menu() { echo " (none)" else for node_name in "${!AVAILABLE_NODES[@]}"; do - echo " - $node_name: ${AVAILABLE_NODES[$node_name]}" + local node_username="${NODE_USERNAMES[$node_name]:-$DEFAULT_USERNAME}" + echo " - $node_name: $node_username@${AVAILABLE_NODES[$node_name]}" done fi echo @@ -946,13 +1021,17 @@ manage_nodes_menu() { echo read -rp "Node name: " new_name read -rp "Node IP: " new_ip + read -rp "Username (default: root): " new_username + new_username="${new_username:-root}" if command -v jq &>/dev/null; then - jq --arg name "$new_name" --arg ip "$new_ip" \ - '.nodes += [{"name": $name, "ip": $ip}]' \ + jq --arg name "$new_name" --arg ip "$new_ip" --arg username "$new_username" \ + '.nodes += [{"name": $name, "ip": $ip, "username": $username, "ssh_keys": false}]' \ "$NODES_FILE" >"${NODES_FILE}.tmp" \ && mv "${NODES_FILE}.tmp" "$NODES_FILE" AVAILABLE_NODES["$new_name"]="$new_ip" + NODE_USERNAMES["$new_name"]="$new_username" + NODE_SSH_KEYS["$new_name"]="false" echo "Added $new_name" else echo "jq not installed" @@ -1363,6 +1442,14 @@ navigate() { for s in "${scripts[@]}"; do local sname sname="$(basename "$s")" + + # Skip GUI.sh and CCPVE.sh in remote execution mode at root level + if [[ "$EXECUTION_MODE" != "local" ]] && [[ "$current_dir" == "$BASE_DIR" ]]; then + if [[ "$sname" == "GUI.sh" ]] || [[ "$sname" == "CCPVE.sh" ]]; then + continue + fi + fi + __line_rgb__ "$index) $sname" 100 200 100 menu_map[$index]="$s" ((index += 1)) @@ -1491,6 +1578,43 @@ navigate() { # MAIN ############################################################################### +# Startup dependency check (informational only) +__startup_check__() { + local warnings=() + + # Check for jq (optional but recommended) + if ! command -v jq &>/dev/null; then + warnings+=("jq - Required for node management and remote execution configuration") + fi + + # Only show warnings if any exist + if [[ ${#warnings[@]} -gt 0 ]]; then + echo + echo "----------------------------------------" + __line_rgb__ "Optional Dependencies Not Found" 255 200 0 + echo "----------------------------------------" + echo + echo "The following optional tools are not installed:" + for warning in "${warnings[@]}"; do + echo " • $warning" + done + echo + echo "You can still use GUI.sh for local execution." + echo "For remote execution, install missing tools:" + echo + echo " Debian/Ubuntu: sudo apt install jq" + echo " Arch Linux: sudo pacman -S jq" + echo " RHEL/CentOS: sudo yum install jq" + echo + echo "----------------------------------------" + echo + sleep 3 + fi +} + +# Run startup check +__startup_check__ + # Make all scripts executable find . -type f -name "*.sh" -exec chmod +x {} \; diff --git a/Host/HostInfo.sh b/Host/HostInfo.sh index 128d854..63d0f1a 100644 --- a/Host/HostInfo.sh +++ b/Host/HostInfo.sh @@ -33,9 +33,6 @@ export UTILITYPATH="${UTILITYPATH:-$REPO_ROOT/Utilities}" source "$UTILITYPATH/ArgumentParser.sh" 2>/dev/null || { echo "Error: Cannot find ArgumentParser.sh" exit 1 -} - echo "Error: Cannot find Communication.sh" - exit 1 } # shellcheck source=Utilities/Prompts.sh source "$UTILITYPATH/Prompts.sh" 2>/dev/null || { diff --git a/README.md b/README.md index 9abbf1e..804fe90 100644 --- a/README.md +++ b/README.md @@ -1,6 +1,7 @@ # Proxmox VE Management Scripts -[![Version](https://img.shields.io/badge/version-2.1.5-blue.svg)](https://github.com/coelacant1/ProxmoxScripts/releases) +[![Version](https://img.shields.io/badge/version-2.1.6-blue.svg)](https://github.com/coelacant1/ProxmoxScripts/releases) +[![Repository Checks](https://github.com/coelacant1/ProxmoxScripts/actions/workflows/checks.yml/badge.svg)](https://github.com/coelacant1/ProxmoxScripts/actions/workflows/checks.yml) [![Deploy static content to Pages](https://github.com/coelacant1/ProxmoxScripts/actions/workflows/static.yml/badge.svg?branch=main)](https://github.com/coelacant1/ProxmoxScripts/actions/workflows/static.yml) [![Release on .sh changes](https://github.com/coelacant1/ProxmoxScripts/actions/workflows/release.yml/badge.svg?branch=main)](https://github.com/coelacant1/ProxmoxScripts/actions/workflows/release.yml) @@ -52,8 +53,9 @@ These instructions will guide you on how to start using the scripts for managing **For Remote Execution from Any Linux System:** - Any major Linux distribution (Debian, Ubuntu, Fedora, RHEL, CentOS, Arch, openSUSE, etc.) -- `wget` and `unzip` (auto-installed via package manager) -- `sshpass` for remote execution (optional, prompted when needed) +- `wget` and `unzip` (for updating scripts from GitHub) +- `jq` (for parsing node configuration files) +- `sshpass` (for password-based SSH authentication, not needed if using SSH keys) - Network access to target Proxmox nodes ### Single Line Usage @@ -118,7 +120,7 @@ bash -c "$(wget -qLO - https://github.com/coelacant1/ProxmoxScripts/raw/main/CCP > - For repeated usage, clone the repo and call `GUI.sh` directly > - `UTILITYPATH` is exported automatically so scripts can source shared helpers -![Single Line Online Command](.site/SingleLineCommand.png) +![Single Line Online Command](.site/MultiView.png) ### Installation @@ -128,9 +130,11 @@ bash -c "$(wget -qLO - https://github.com/coelacant1/ProxmoxScripts/raw/main/CCP Requires git if you plan to download through cloning: ```bash apt update -apt install git wget unzip +apt install git wget unzip jq sshpass ``` +**Note:** `sshpass` is only needed if you plan to use password-based SSH authentication for remote execution. If you configure SSH keys, it's not required. + To execute these scripts offline on your system, follow these steps: 1. Clone the repository to your Proxmox server or your local machine and move the folder to your server: diff --git a/Utilities/ConfigManager.sh b/Utilities/ConfigManager.sh index 2b9dd0f..8069b8f 100644 --- a/Utilities/ConfigManager.sh +++ b/Utilities/ConfigManager.sh @@ -11,6 +11,7 @@ # - __add_remote_target__ # - __clear_remote_targets__ # - __get_node_ip__ +# - __get_node_username__ # - __node_exists__ # - __get_available_nodes__ # - __count_available_nodes__ @@ -28,9 +29,11 @@ declare -g TARGET_DISPLAY="This System" declare -ga REMOTE_TARGETS=() declare -gA NODE_PASSWORDS=() declare -gA AVAILABLE_NODES=() +declare -gA NODE_USERNAMES=() # Track username for each node declare -gA NODE_SSH_KEYS=() # Track SSH key status declare -g NODES_FILE="nodes.json" declare -g REMOTE_TEMP_DIR="/tmp/ProxmoxScripts_gui" +declare -g DEFAULT_USERNAME="root" # Default username for nodes # Only set default if not already set (e.g., from command-line flags) if [[ -z "${REMOTE_LOG_LEVEL:-}" ]]; then declare -g REMOTE_LOG_LEVEL="INFO" @@ -48,12 +51,14 @@ __init_config__() { if [[ -f "$NODES_FILE" ]] && command -v jq &>/dev/null; then while IFS= read -r line; do - local node_name node_ip ssh_keys + local node_name node_ip ssh_keys node_username node_name=$(echo "$line" | jq -r '.name') node_ip=$(echo "$line" | jq -r '.ip') ssh_keys=$(echo "$line" | jq -r 'if has("ssh_keys") then (.ssh_keys | tostring) else "unknown" end') + node_username=$(echo "$line" | jq -r 'if has("username") then .username else "'"$DEFAULT_USERNAME"'" end') AVAILABLE_NODES["$node_name"]="$node_ip" NODE_SSH_KEYS["$node_name"]="$ssh_keys" + NODE_USERNAMES["$node_name"]="$node_username" done < <(jq -c '.nodes[]' "$NODES_FILE" 2>/dev/null || true) fi } @@ -88,20 +93,23 @@ __set_execution_mode__() { } # Add remote target -# Args: node_name node_ip password +# Args: node_name node_ip password [username] __add_remote_target__() { local node_name="$1" local node_ip="$2" local password="$3" + local username="${4:-$DEFAULT_USERNAME}" REMOTE_TARGETS+=("$node_name:$node_ip") NODE_PASSWORDS["$node_name"]="$password" + NODE_USERNAMES["$node_name"]="$username" } # Clear all remote targets __clear_remote_targets__() { REMOTE_TARGETS=() NODE_PASSWORDS=() + NODE_USERNAMES=() } # Get node IP by name @@ -112,6 +120,14 @@ __get_node_ip__() { echo "${AVAILABLE_NODES[$node_name]:-}" } +# Get node username by name +# Args: node_name +# Returns: username or default username +__get_node_username__() { + local node_name="$1" + echo "${NODE_USERNAMES[$node_name]:-$DEFAULT_USERNAME}" +} + # Check if node exists # Args: node_name # Returns: 0 if exists, 1 if not @@ -131,11 +147,12 @@ __count_available_nodes__() { } # Check if node has SSH keys configured (from cache or test) -# Args: node_ip node_name +# Args: node_ip username node_name # Returns: "true" if keys work, "false" if not, "unknown" if not tested __has_ssh_keys__() { local node_ip="$1" - local node_name="${2:-}" + local username="${2:-$DEFAULT_USERNAME}" + local node_name="${3:-}" # Check cache first if node_name provided if [[ -n "$node_name" ]] && [[ "${NODE_SSH_KEYS[$node_name]:-unknown}" != "unknown" ]]; then @@ -144,7 +161,7 @@ __has_ssh_keys__() { fi # Test SSH connection - if ssh -o BatchMode=yes -o ConnectTimeout=2 "root@${node_ip}" echo "test" &>/dev/null 2>&1; then + if ssh -o BatchMode=yes -o ConnectTimeout=2 "${username}@${node_ip}" echo "test" &>/dev/null 2>&1; then # Update cache if node_name provided if [[ -n "$node_name" ]]; then NODE_SSH_KEYS["$node_name"]="true" @@ -187,10 +204,11 @@ __scan_ssh_keys__() { for node_name in "${!AVAILABLE_NODES[@]}"; do local node_ip="${AVAILABLE_NODES[$node_name]}" - echo -n " Checking $node_name ($node_ip)... " + local node_username="${NODE_USERNAMES[$node_name]:-$DEFAULT_USERNAME}" + echo -n " Checking $node_name ($node_username@$node_ip)... " local has_keys - has_keys=$(__has_ssh_keys__ "$node_ip" "$node_name") + has_keys=$(__has_ssh_keys__ "$node_ip" "$node_username" "$node_name") if [[ "$has_keys" == "true" ]]; then echo "[SSH]" diff --git a/Utilities/RemoteExecutor.sh b/Utilities/RemoteExecutor.sh index f844f6a..5173951 100644 --- a/Utilities/RemoteExecutor.sh +++ b/Utilities/RemoteExecutor.sh @@ -10,6 +10,7 @@ # - REMOTE_TEMP_DIR: Remote temporary directory path # - REMOTE_TARGETS: Array of target nodes (name:ip format) # - NODE_PASSWORDS: Associative array of node passwords +# - NODE_USERNAMES: Associative array of node usernames # - REMOTE_LOG_LEVEL: Log level for remote execution # # Function Index: @@ -95,80 +96,85 @@ __prompt_for_params__() { } # Helper: Execute SSH command with appropriate auth method -# Args: node_ip node_pass command +# Args: node_ip node_pass username command # Returns: output of ssh command __ssh_exec__() { local node_ip="$1" local node_pass="$2" - shift 2 + local username="$3" + shift 3 local command="$*" if [[ "$USE_SSH_KEYS" == "true" ]] || [[ -z "$node_pass" ]]; then - ssh -o StrictHostKeyChecking=no -o ConnectTimeout=5 "root@${node_ip}" "$command" + ssh -o StrictHostKeyChecking=no -o ConnectTimeout=5 "${username}@${node_ip}" "$command" else - sshpass -p "$node_pass" ssh -o StrictHostKeyChecking=no -o ConnectTimeout=5 "root@${node_ip}" "$command" + sshpass -p "$node_pass" ssh -o StrictHostKeyChecking=no -o ConnectTimeout=5 "${username}@${node_ip}" "$command" fi } # Helper: Execute SCP with appropriate auth method -# Args: node_ip node_pass source destination +# Args: node_ip node_pass username source destination # Returns: exit code of scp __scp_exec__() { local node_ip="$1" local node_pass="$2" - local source="$3" - local destination="$4" + local username="$3" + local source="$4" + local destination="$5" if [[ "$USE_SSH_KEYS" == "true" ]] || [[ -z "$node_pass" ]]; then - scp -q -o StrictHostKeyChecking=no "$source" "root@$node_ip:$destination" + scp -q -o StrictHostKeyChecking=no "$source" "${username}@$node_ip:$destination" else - sshpass -p "$node_pass" scp -q -o StrictHostKeyChecking=no "$source" "root@$node_ip:$destination" + sshpass -p "$node_pass" scp -q -o StrictHostKeyChecking=no "$source" "${username}@$node_ip:$destination" fi } # Helper: Execute SCP recursive with appropriate auth method -# Args: node_ip node_pass source destination +# Args: node_ip node_pass username source destination # Returns: exit code of scp __scp_exec_recursive__() { local node_ip="$1" local node_pass="$2" - local source="$3" - local destination="$4" + local username="$3" + local source="$4" + local destination="$5" if [[ "$USE_SSH_KEYS" == "true" ]] || [[ -z "$node_pass" ]]; then - scp -q -r -o StrictHostKeyChecking=no "$source" "root@$node_ip:$destination" + scp -q -r -o StrictHostKeyChecking=no "$source" "${username}@$node_ip:$destination" else - sshpass -p "$node_pass" scp -q -r -o StrictHostKeyChecking=no "$source" "root@$node_ip:$destination" + sshpass -p "$node_pass" scp -q -r -o StrictHostKeyChecking=no "$source" "${username}@$node_ip:$destination" fi } # Helper: Download file from remote with appropriate auth method -# Args: node_ip node_pass remote_path local_path +# Args: node_ip node_pass username remote_path local_path # Returns: exit code of scp __scp_download__() { local node_ip="$1" local node_pass="$2" - local remote_path="$3" - local local_path="$4" + local username="$3" + local remote_path="$4" + local local_path="$5" if [[ "$USE_SSH_KEYS" == "true" ]] || [[ -z "$node_pass" ]]; then - scp -q -o StrictHostKeyChecking=no "root@$node_ip:$remote_path" "$local_path" + scp -q -o StrictHostKeyChecking=no "${username}@$node_ip:$remote_path" "$local_path" else - sshpass -p "$node_pass" scp -q -o StrictHostKeyChecking=no "root@$node_ip:$remote_path" "$local_path" + sshpass -p "$node_pass" scp -q -o StrictHostKeyChecking=no "${username}@$node_ip:$remote_path" "$local_path" fi } # Execute workflow on single remote node -# Args: node_name node_ip node_pass script_path script_relative script_dir_relative param_line +# Args: node_name node_ip node_pass username script_path script_relative script_dir_relative param_line # Returns: 0 on success, 1 on failure __execute_on_remote_node__() { local node_name="$1" local node_ip="$2" local node_pass="$3" - local script_path="$4" - local script_relative="$5" - local script_dir_relative="$6" - local param_line="$7" + local username="$4" + local script_path="$5" + local script_relative="$6" + local script_dir_relative="$7" + local param_line="$8" echo "----------------------------------------" echo "Target: $node_name ($node_ip)" @@ -180,7 +186,7 @@ __execute_on_remote_node__() { __log_info__ "Cleaning and creating remote directory structure on $node_name" "REMOTE" local ssh_output - if ! ssh_output=$(__ssh_exec__ "$node_ip" "$node_pass" \ + if ! ssh_output=$(__ssh_exec__ "$node_ip" "$node_pass" "$username" \ "rm -rf $REMOTE_TEMP_DIR && mkdir -p $REMOTE_TEMP_DIR/{Utilities,Host,LXC,Storage,VirtualMachines,Networking,Cluster,Security,HighAvailability,Firewall,Resources,RemoteManagement}" 2>&1); then # Check if interrupted if [[ $REMOTE_INTERRUPTED -eq 1 ]]; then @@ -218,7 +224,7 @@ __execute_on_remote_node__() { return 1 fi - if ! __scp_exec_recursive__ "$node_ip" "$node_pass" "Utilities/*.sh" "$REMOTE_TEMP_DIR/Utilities/" 2>/dev/null; then + if ! __scp_exec_recursive__ "$node_ip" "$node_pass" "$username" "Utilities/*.sh" "$REMOTE_TEMP_DIR/Utilities/" 2>/dev/null; then # Check if interrupted or actual failure if [[ $REMOTE_INTERRUPTED -eq 1 ]]; then return 1 @@ -233,7 +239,7 @@ __execute_on_remote_node__() { return 1 fi - if ! __scp_exec__ "$node_ip" "$node_pass" "$script_path" "$REMOTE_TEMP_DIR/$script_dir_relative/" 2>/dev/null; then + if ! __scp_exec__ "$node_ip" "$node_pass" "$username" "$script_path" "$REMOTE_TEMP_DIR/$script_dir_relative/" 2>/dev/null; then # Check if interrupted or actual failure if [[ $REMOTE_INTERRUPTED -eq 1 ]]; then return 1 @@ -254,14 +260,14 @@ __execute_on_remote_node__() { # If tar succeeded, transfer and extract if [[ -f "$temp_tar" ]]; then - if __scp_exec__ "$node_ip" "$node_pass" "$temp_tar" "/tmp/" 2>/dev/null; then + if __scp_exec__ "$node_ip" "$node_pass" "$username" "$temp_tar" "/tmp/" 2>/dev/null; then # Check if interrupted if [[ $REMOTE_INTERRUPTED -eq 1 ]]; then rm -f "$temp_tar" return 1 fi - __ssh_exec__ "$node_ip" "$node_pass" \ + __ssh_exec__ "$node_ip" "$node_pass" "$username" \ "tar -xzf /tmp/$(basename "$temp_tar") -C $REMOTE_TEMP_DIR && rm /tmp/$(basename "$temp_tar")" 2>/dev/null __ok__ "Files transferred (tarball)" __log_info__ "Files transferred successfully (tarball)" "REMOTE" @@ -294,10 +300,10 @@ __execute_on_remote_node__() { local ssh_exit_code=0 if [[ -n "$param_line" ]]; then - __ssh_exec__ "$node_ip" "$node_pass" \ + __ssh_exec__ "$node_ip" "$node_pass" "$username" \ "export PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin NON_INTERACTIVE=1 DEBIAN_FRONTEND=noninteractive UTILITYPATH='$REMOTE_TEMP_DIR/Utilities' LOG_FILE='$remote_debug_log' LOG_LEVEL=$REMOTE_LOG_LEVEL LOG_CONSOLE=0 && cd $REMOTE_TEMP_DIR && echo '=== Remote Execution Start ===' > $remote_log 2>&1 && echo 'Script: bash $script_relative' >> $remote_log 2>&1 && echo 'Arguments: $param_line' >> $remote_log 2>&1 && echo 'Working directory: '\$(pwd) >> $remote_log 2>&1 && echo 'UTILITYPATH: '\$UTILITYPATH >> $remote_log 2>&1 && echo 'LOG_FILE: '\$LOG_FILE >> $remote_log 2>&1 && echo 'LOG_LEVEL: '\$LOG_LEVEL >> $remote_log 2>&1 && echo 'LOG_LEVEL (actual): $REMOTE_LOG_LEVEL' >> $remote_log 2>&1 && echo '===================================' >> $remote_log 2>&1 && eval bash $script_relative $param_line >> $remote_log 2>&1; echo \$? > ${remote_log}.exit" else - __ssh_exec__ "$node_ip" "$node_pass" \ + __ssh_exec__ "$node_ip" "$node_pass" "$username" \ "export PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin NON_INTERACTIVE=1 DEBIAN_FRONTEND=noninteractive UTILITYPATH='$REMOTE_TEMP_DIR/Utilities' LOG_FILE='$remote_debug_log' LOG_LEVEL=$REMOTE_LOG_LEVEL LOG_CONSOLE=0 && cd $REMOTE_TEMP_DIR && echo '=== Remote Execution Start ===' > $remote_log 2>&1 && echo 'Script: bash $script_relative' >> $remote_log 2>&1 && echo 'Working directory: '\$(pwd) >> $remote_log 2>&1 && echo 'UTILITYPATH: '\$UTILITYPATH >> $remote_log 2>&1 && echo 'LOG_FILE: '\$LOG_FILE >> $remote_log 2>&1 && echo 'LOG_LEVEL: '\$LOG_LEVEL >> $remote_log 2>&1 && echo 'LOG_LEVEL (actual): $REMOTE_LOG_LEVEL' >> $remote_log 2>&1 && echo '===================================' >> $remote_log 2>&1 && bash $script_relative >> $remote_log 2>&1; echo \$? > ${remote_log}.exit" fi @@ -305,7 +311,7 @@ __execute_on_remote_node__() { # Retrieve exit code local temp_exit_file="/tmp/remote_exit_$$" - if __scp_download__ "$node_ip" "$node_pass" "${remote_log}.exit" "$temp_exit_file" 2>/dev/null; then + if __scp_download__ "$node_ip" "$node_pass" "$username" "${remote_log}.exit" "$temp_exit_file" 2>/dev/null; then ssh_exit_code=$(cat "$temp_exit_file") rm -f "$temp_exit_file" else @@ -319,7 +325,7 @@ __execute_on_remote_node__() { local local_debug_log="/tmp/remote_${node_name}_$$.debug.log" # Retrieve stdout/stderr log - if __scp_download__ "$node_ip" "$node_pass" "$remote_log" "$local_remote_log" 2>/dev/null; then + if __scp_download__ "$node_ip" "$node_pass" "$username" "$remote_log" "$local_remote_log" 2>/dev/null; then __log_info__ "Retrieved remote execution log from $node_name" "REMOTE" echo echo "--- Output from $node_name ---" @@ -328,7 +334,7 @@ __execute_on_remote_node__() { echo # Retrieve debug log if it exists - if __scp_download__ "$node_ip" "$node_pass" "$remote_debug_log" "$local_debug_log" 2>/dev/null; then + if __scp_download__ "$node_ip" "$node_pass" "$username" "$remote_debug_log" "$local_debug_log" 2>/dev/null; then __log_info__ "Retrieved debug log from $node_name" "REMOTE" # Only show debug log if it has content @@ -357,7 +363,7 @@ __execute_on_remote_node__() { CURRENT_MESSAGE="Cleaning up..." __update__ "$CURRENT_MESSAGE" __log_info__ "Cleaning up remote directory: $REMOTE_TEMP_DIR" "REMOTE" - __ssh_exec__ "$node_ip" "$node_pass" \ + __ssh_exec__ "$node_ip" "$node_pass" "$username" \ "rm -rf $REMOTE_TEMP_DIR $remote_log $remote_debug_log ${remote_log}.exit" 2>/dev/null || __log_warn__ "Cleanup failed (non-critical)" "REMOTE" __ok__ "Cleanup complete" @@ -424,8 +430,11 @@ __execute_remote_script__() { continue fi + # Get username safely + local node_username="${NODE_USERNAMES[$node_name]:-$DEFAULT_USERNAME}" + # Execute on this node (always continue to next node regardless of result) - if __execute_on_remote_node__ "$node_name" "$node_ip" "$node_pass" "$script_path" "$script_relative" "$script_dir_relative" "$param_line"; then + if __execute_on_remote_node__ "$node_name" "$node_ip" "$node_pass" "$node_username" "$script_path" "$script_relative" "$script_dir_relative" "$param_line"; then ((success_count += 1)) else ((fail_count += 1)) diff --git a/Utilities/_Utilities.md b/Utilities/_Utilities.md index 1ee943d..a662fd3 100644 --- a/Utilities/_Utilities.md +++ b/Utilities/_Utilities.md @@ -1,13 +1,13 @@ # ProxmoxScripts Utility Functions Reference -**Auto-generated documentation** - Last updated: 2025-11-24 17:11:41 +**Auto-generated documentation** - Last updated: 2025-11-25 10:18:58 --- ## Overview -This reference provides comprehensive documentation for all utility functions in the ProxmoxScripts repository. -These utilities provide reusable functions for building automation scripts, +This reference provides comprehensive documentation for all utility functions in the ProxmoxScripts repository. +These utilities provide reusable functions for building automation scripts, management tools, and integration solutions for Proxmox VE environments. ## Utility Files Overview @@ -1159,6 +1159,7 @@ Shows "Selected script", top comments, and example invocations sections. - `__add_remote_target__` - `__clear_remote_targets__` - `__get_node_ip__` +- `__get_node_username__` - `__node_exists__` - `__get_available_nodes__` - `__count_available_nodes__` @@ -2583,7 +2584,7 @@ __require_root_and_proxmox__ --- # RemoteExecutor.sh -**Purpose**: !/bin/bash Handles all remote script execution logic including SSH, file transfer, and result collection. Supports both password-based (sshpass) and SSH key-based authentication. This utility is sourced by GUI.sh for remote node execution. It expects: - REMOTE_TEMP_DIR: Remote temporary directory path - REMOTE_TARGETS: Array of target nodes (name:ip format) - NODE_PASSWORDS: Associative array of node passwords - REMOTE_LOG_LEVEL: Log level for remote execution +**Purpose**: !/bin/bash Handles all remote script execution logic including SSH, file transfer, and result collection. Supports both password-based (sshpass) and SSH key-based authentication. This utility is sourced by GUI.sh for remote node execution. It expects: - REMOTE_TEMP_DIR: Remote temporary directory path - REMOTE_TARGETS: Array of target nodes (name:ip format) - NODE_PASSWORDS: Associative array of node passwords - NODE_USERNAMES: Associative array of node usernames - REMOTE_LOG_LEVEL: Log level for remote execution **Functions**: - `__remote_cleanup__` @@ -3024,5 +3025,5 @@ fi --- -**Note**: This documentation is automatically generated from source code comments. +**Note**: This documentation is automatically generated from source code comments. To update, run: `python3 .check/UpdateUtilityDocumentation.py` diff --git a/nodes.json.template b/nodes.json.template index c7a1fcc..b0d1370 100644 --- a/nodes.json.template +++ b/nodes.json.template @@ -3,11 +3,13 @@ { "name": "node1", "ip": "192.168.1.11", + "username": "root", "ssh_keys": true }, { "name": "node2", "ip": "192.168.1.12", + "username": "root", "ssh_keys": false } ]