- Add 12 section headers for better code navigation - Create 7 common helper functions (backup, directories, networks, etc.) - Break down monolithic functions: setup_ssh_key_to_core (140→31 lines), main (213→70 lines) - Consolidate redundant backup operations using common_backup() - Consolidate directory/network creation with common helpers - Extract menu handling and completion display into separate functions - Improve visual consistency with ║-bordered output formatting - Suppress verbose output from SSH and deployment operations - Add deployment-ready feedback to install-prerequisites.sh Total functions increased from ~50 to 58 for better modularity Script now 2,824 lines with clearer structure and reduced redundancy
2818 lines
107 KiB
Bash
Executable File
2818 lines
107 KiB
Bash
Executable File
#!/bin/bash
|
|
# EZ-Homelab Setup & Deployment Script
|
|
|
|
#═══════════════════════════════════════════════════════════
|
|
# SECTION 1: CONFIGURATION & CONSTANTS
|
|
#═══════════════════════════════════════════════════════════
|
|
|
|
# Debug logging configuration
|
|
DEBUG=${DEBUG:-false}
|
|
VERBOSE=${VERBOSE:-false}
|
|
DEBUG_LOG_FILE="/tmp/ez-homelab-debug.log"
|
|
|
|
# Colors for output
|
|
RED='\033[0;31m'
|
|
GREEN='\033[0;32m'
|
|
YELLOW='\033[1;33m'
|
|
BLUE='\033[0;34m'
|
|
NC='\033[0m' # No Color
|
|
|
|
#═══════════════════════════════════════════════════════════
|
|
# SECTION 2: LOGGING & UTILITY FUNCTIONS
|
|
#═══════════════════════════════════════════════════════════
|
|
|
|
# Debug logging function
|
|
debug_log() {
|
|
if [ "$DEBUG" = true ]; then
|
|
echo "$(date '+%Y-%m-%d %H:%M:%S') [DEBUG] $1" >> "$DEBUG_LOG_FILE"
|
|
fi
|
|
}
|
|
|
|
# Initialize debug log
|
|
if [ "$DEBUG" = true ]; then
|
|
echo "$(date '+%Y-%m-%d %H:%M:%S') [DEBUG] ===== EZ-HOMELAB DEBUG LOG STARTED =====" > "$DEBUG_LOG_FILE"
|
|
debug_log "Script started with DEBUG=true"
|
|
debug_log "User: $USER, EUID: $EUID, PWD: $PWD"
|
|
fi
|
|
|
|
# Log functions
|
|
log_info() {
|
|
if [ "$VERBOSE" = true ]; then
|
|
echo -e "${BLUE}[INFO]${NC} $1"
|
|
fi
|
|
debug_log "[INFO] $1"
|
|
}
|
|
|
|
log_success() {
|
|
if [ "$VERBOSE" = true ]; then
|
|
echo -e "${GREEN}[SUCCESS]${NC} $1"
|
|
fi
|
|
debug_log "[SUCCESS] $1"
|
|
}
|
|
|
|
log_warning() {
|
|
if [ "$VERBOSE" = true ]; then
|
|
echo -e "${YELLOW}[WARNING]${NC} $1"
|
|
fi
|
|
debug_log "[WARNING] $1"
|
|
}
|
|
|
|
log_error() {
|
|
echo -e "${RED}[ERROR]${NC} $1"
|
|
debug_log "[ERROR] $1"
|
|
}
|
|
|
|
# Common helper: Backup existing file
|
|
common_backup() {
|
|
local file="$1"
|
|
if [ -f "$file" ]; then
|
|
sudo cp "$file" "${file}.backup.$(date +%Y%m%d_%H%M%S)"
|
|
fi
|
|
}
|
|
|
|
# Common helper: Create directories with proper ownership
|
|
common_create_directories() {
|
|
local -a dirs=("$@")
|
|
for dir in "${dirs[@]}"; do
|
|
sudo mkdir -p "$dir" || { log_error "Failed to create $dir"; return 1; }
|
|
done
|
|
sudo chown -R "$ACTUAL_USER:$ACTUAL_USER" /opt/stacks /opt/dockge /opt/arcane 2>/dev/null || true
|
|
}
|
|
|
|
# Common helper: Create Docker networks
|
|
common_create_networks() {
|
|
docker network create homelab-network 2>/dev/null || true
|
|
docker network create traefik-network 2>/dev/null || true
|
|
docker network create media-network 2>/dev/null || true
|
|
}
|
|
|
|
# Run command function (handles dry-run and test modes)
|
|
run_cmd() {
|
|
local quiet=false
|
|
if [ "$1" = "--quiet" ]; then
|
|
quiet=true
|
|
shift
|
|
fi
|
|
|
|
local cmd="$*"
|
|
|
|
if [ "$DRY_RUN" = true ]; then
|
|
echo "[DRY-RUN] Would execute: $cmd"
|
|
return 0
|
|
fi
|
|
|
|
if [ "$TEST_MODE" = true ]; then
|
|
echo "[TEST] Simulating: $cmd"
|
|
return 0
|
|
fi
|
|
|
|
if [ "$quiet" = true ]; then
|
|
eval "$cmd" >/dev/null 2>&1
|
|
return $?
|
|
else
|
|
eval "$cmd"
|
|
return $?
|
|
fi
|
|
}
|
|
|
|
# Check if Docker is installed
|
|
check_docker_installed() {
|
|
if command -v docker &> /dev/null && docker --version &> /dev/null; then
|
|
return 0
|
|
else
|
|
return 1
|
|
fi
|
|
}
|
|
|
|
# Check system resources
|
|
check_system_resources() {
|
|
local available_mem=$(free -m | awk '/^Mem:/{print $7}')
|
|
local available_disk=$(df -BG / | awk 'NR==2 {print $4}' | sed 's/G//')
|
|
|
|
if [ "$available_mem" -lt 512 ]; then
|
|
log_warning "Low memory detected: ${available_mem}MB available"
|
|
fi
|
|
|
|
if [ "$available_disk" -lt 10 ]; then
|
|
log_warning "Low disk space: ${available_disk}GB available"
|
|
fi
|
|
}
|
|
|
|
# Cleanup orphaned processes
|
|
cleanup_orphaned_processes() {
|
|
debug_log "Checking for orphaned processes"
|
|
# Kill any stuck Docker Compose processes
|
|
pkill -9 -f "docker-compose" 2>/dev/null || true
|
|
pkill -9 -f "docker compose" 2>/dev/null || true
|
|
}
|
|
|
|
#═══════════════════════════════════════════════════════════
|
|
# SECTION 3: ENVIRONMENT & CONFIGURATION MANAGEMENT
|
|
#═══════════════════════════════════════════════════════════
|
|
|
|
# Safely load environment variables from .env file
|
|
load_env_file_safely() {
|
|
local env_file="$1"
|
|
debug_log "Loading env file safely: $env_file"
|
|
|
|
if [ ! -f "$env_file" ]; then
|
|
debug_log "Env file does not exist: $env_file"
|
|
return 1
|
|
fi
|
|
|
|
# Read the .env file line by line and export variables safely
|
|
while IFS= read -r line || [ -n "$line" ]; do
|
|
# Skip comments and empty lines
|
|
[[ $line =~ ^[[:space:]]*# ]] && continue
|
|
[[ -z "$line" ]] && continue
|
|
|
|
# Parse KEY=VALUE, handling quoted values
|
|
if [[ $line =~ ^([^=]+)=(.*)$ ]]; then
|
|
local key="${BASH_REMATCH[1]}"
|
|
local value="${BASH_REMATCH[2]}"
|
|
|
|
# Strip inline comments
|
|
value=${value%%#*}
|
|
|
|
# Trim whitespace from key and value
|
|
key=$(echo "$key" | xargs)
|
|
value=$(echo "$value" | xargs)
|
|
|
|
# Strip carriage return if present (DOS line endings)
|
|
value=${value%$'\r'}
|
|
|
|
# Export the variable
|
|
export "$key"="$value"
|
|
|
|
debug_log "Exported $key=[HIDDEN]" # Don't log actual values for security
|
|
fi
|
|
done < "$env_file"
|
|
|
|
# Second pass: expand any ${VAR} references in loaded variables
|
|
while IFS= read -r line || [ -n "$line" ]; do
|
|
# Skip comments and empty lines
|
|
[[ $line =~ ^[[:space:]]*# ]] && continue
|
|
[[ -z "$line" ]] && continue
|
|
|
|
# Parse KEY=VALUE
|
|
if [[ $line =~ ^([^=]+)=(.*)$ ]]; then
|
|
local key="${BASH_REMATCH[1]}"
|
|
key=$(echo "$key" | xargs)
|
|
|
|
# Get current value
|
|
local current_value="${!key}"
|
|
|
|
# Check if value contains ${...} and expand it
|
|
if [[ "$current_value" =~ \$\{[^}]+\} ]]; then
|
|
# Use eval to expand the variable reference safely within quotes
|
|
local expanded_value=$(eval echo "\"$current_value\"")
|
|
export "$key"="$expanded_value"
|
|
debug_log "Expanded $key variable reference"
|
|
fi
|
|
fi
|
|
done < "$env_file"
|
|
|
|
debug_log "Env file loaded successfully"
|
|
}
|
|
# Generate .env.global file without comments and blank lines
|
|
generate_env_global() {
|
|
local source_env="$1"
|
|
local target_env="$2"
|
|
|
|
debug_log "Generating .env.global from $source_env to $target_env"
|
|
|
|
if [ ! -f "$source_env" ]; then
|
|
log_error "Source .env file not found: $source_env"
|
|
return 1
|
|
fi
|
|
|
|
# Remove comments and blank lines, keep only KEY=VALUE lines
|
|
grep -v "^[[:space:]]*#" "$source_env" | grep -v "^[[:space:]]*$" > "$target_env"
|
|
|
|
debug_log ".env.global created at $target_env"
|
|
}
|
|
|
|
# Process stack .env.example file and populate with values from main .env
|
|
process_stack_env() {
|
|
local stack_dir="$1"
|
|
local repo_stack_dir="$2"
|
|
|
|
debug_log "Processing stack .env for $stack_dir"
|
|
|
|
# Check if .env.example exists in repo
|
|
if [ ! -f "$repo_stack_dir/.env.example" ]; then
|
|
debug_log "No .env.example found for stack, skipping"
|
|
return 0
|
|
fi
|
|
|
|
# Copy .env.example to stack directory
|
|
cp "$repo_stack_dir/.env.example" "$stack_dir/.env"
|
|
debug_log "Copied .env.example to $stack_dir/.env"
|
|
|
|
# Replace values in the .env file using values from loaded environment
|
|
local temp_file="$stack_dir/.env.tmp"
|
|
|
|
while IFS= read -r line || [ -n "$line" ]; do
|
|
# Skip comments and empty lines
|
|
if [[ $line =~ ^[[:space:]]*# ]] || [[ -z "$line" ]]; then
|
|
echo "$line" >> "$temp_file"
|
|
continue
|
|
fi
|
|
|
|
# Parse KEY=VALUE
|
|
if [[ $line =~ ^([^=]+)=(.*)$ ]]; then
|
|
local key="${BASH_REMATCH[1]}"
|
|
local example_value="${BASH_REMATCH[2]}"
|
|
|
|
# Trim whitespace from key
|
|
key=$(echo "$key" | xargs)
|
|
|
|
# Get actual value from environment
|
|
local actual_value="${!key}"
|
|
|
|
# If we have a value, expand any nested variable references
|
|
if [ -n "$actual_value" ]; then
|
|
# Use eval to expand nested ${VAR} references
|
|
local expanded_value
|
|
expanded_value=$(eval echo "\"$actual_value\"")
|
|
echo "$key=$expanded_value" >> "$temp_file"
|
|
else
|
|
echo "$line" >> "$temp_file"
|
|
fi
|
|
else
|
|
echo "$line" >> "$temp_file"
|
|
fi
|
|
done < "$stack_dir/.env"
|
|
|
|
mv "$temp_file" "$stack_dir/.env"
|
|
debug_log "Populated .env values for $stack_dir"
|
|
}
|
|
|
|
#═══════════════════════════════════════════════════════════
|
|
# SECTION 4: FILE PROCESSING & LOCALIZATION
|
|
#═══════════════════════════════════════════════════════════
|
|
|
|
# Localize only labels and x-dockge sections in docker-compose files
|
|
localize_yml_file() {
|
|
local file_path="$1"
|
|
local fail_on_missing="${2:-false}"
|
|
|
|
debug_log "localize_yml_file called for file: $file_path, fail_on_missing: $fail_on_missing"
|
|
|
|
if [ ! -f "$file_path" ]; then
|
|
log_warning "File $file_path does not exist, skipping YAML localization"
|
|
debug_log "File $file_path does not exist"
|
|
return
|
|
fi
|
|
|
|
# Check if file is writable
|
|
if [ ! -w "$file_path" ]; then
|
|
log_error "File $file_path is not writable, cannot localize"
|
|
debug_log "Permission denied for $file_path"
|
|
if [ "$fail_on_missing" = true ]; then
|
|
exit 1
|
|
fi
|
|
return
|
|
fi
|
|
|
|
# Only process labels and x-dockge sections in docker-compose files
|
|
# For other config files (traefik, authelia), process the entire file
|
|
if [[ "$file_path" == */docker-compose.yml ]] || [[ "$file_path" == */docker-compose.yaml ]]; then
|
|
debug_log "Processing docker-compose file - replacing variables only in labels and x-dockge sections"
|
|
|
|
if ! command -v python3 >/dev/null 2>&1; then
|
|
log_error "python3 is required for selective variable replacement"
|
|
if [ "$fail_on_missing" = true ]; then
|
|
exit 1
|
|
fi
|
|
return
|
|
fi
|
|
|
|
# Use Python to process only labels and x-dockge sections
|
|
COMPOSE_FILE_PATH="$file_path" python3 << 'PYEOF'
|
|
import sys
|
|
import re
|
|
import os
|
|
|
|
file_path = os.environ.get('COMPOSE_FILE_PATH')
|
|
|
|
with open(file_path, 'r') as f:
|
|
lines = f.readlines()
|
|
|
|
output_lines = []
|
|
in_labels = False
|
|
in_xdockge = False
|
|
indent_level = 0
|
|
|
|
for i, line in enumerate(lines):
|
|
# Check if we're entering a labels section
|
|
if re.match(r'^(\s*)labels:\s*$', line):
|
|
in_labels = True
|
|
indent_level = len(re.match(r'^(\s*)', line).group(1))
|
|
output_lines.append(line)
|
|
continue
|
|
|
|
# Check if we're entering x-dockge section
|
|
if re.match(r'^x-dockge:\s*$', line):
|
|
in_xdockge = True
|
|
indent_level = 0
|
|
output_lines.append(line)
|
|
continue
|
|
|
|
# Check if we're exiting labels or x-dockge section
|
|
if in_labels or in_xdockge:
|
|
current_indent = len(re.match(r'^(\s*)', line).group(1))
|
|
|
|
# Exit if we've dedented or hit a new top-level key
|
|
if (current_indent <= indent_level and line.strip() and not line.strip().startswith('#')):
|
|
in_labels = False
|
|
in_xdockge = False
|
|
|
|
# Replace variables only in labels or x-dockge sections
|
|
if in_labels or in_xdockge:
|
|
# Replace ${VAR} with environment variable values
|
|
def replace_var(match):
|
|
var_name = match.group(1)
|
|
return os.environ.get(var_name, match.group(0))
|
|
|
|
line = re.sub(r'\$\{([^}]+)\}', replace_var, line)
|
|
|
|
output_lines.append(line)
|
|
|
|
with open(file_path, 'w') as f:
|
|
f.writelines(output_lines)
|
|
PYEOF
|
|
|
|
debug_log "Replaced variables in labels and x-dockge sections of $file_path"
|
|
|
|
# For docker-compose files, no validation needed since we intentionally leave
|
|
# environment variables and volumes as ${VAR} for Docker Compose to handle
|
|
return
|
|
else
|
|
# For non-docker-compose files, process the entire file as before
|
|
debug_log "Processing config file - replacing variables in entire file"
|
|
|
|
if ! command -v envsubst >/dev/null 2>&1; then
|
|
log_warning "envsubst not available, cannot localize $file_path"
|
|
if [ "$fail_on_missing" = true ]; then
|
|
exit 1
|
|
fi
|
|
return
|
|
fi
|
|
|
|
temp_file="$file_path.tmp"
|
|
cp "$file_path" "$temp_file"
|
|
changed=true
|
|
while [ "$changed" = true ]; do
|
|
changed=false
|
|
new_content=$(envsubst < "$temp_file")
|
|
if [ "$new_content" != "$(cat "$temp_file")" ]; then
|
|
changed=true
|
|
echo "$new_content" > "$temp_file"
|
|
fi
|
|
done
|
|
mv "$temp_file" "$file_path"
|
|
debug_log "Replaced variables in $file_path using envsubst"
|
|
|
|
# Post-replacement validation for config files only
|
|
if [ "$fail_on_missing" = true ]; then
|
|
local remaining_vars=$(grep -v '^[ \t]*#' "$file_path" | grep -o '\${[^}]*}' | sed 's/\${//' | sed 's/}//' | sort | uniq)
|
|
local invalid_remaining=""
|
|
for rvar in $remaining_vars; do
|
|
rvar=$(echo "$rvar" | xargs)
|
|
case "$rvar" in
|
|
"ACME_EMAIL"|"AUTHELIA_ADMIN_EMAIL"|"SMTP_USERNAME"|"SMTP_PASSWORD")
|
|
continue
|
|
;;
|
|
*)
|
|
invalid_remaining="$invalid_remaining $rvar"
|
|
;;
|
|
esac
|
|
done
|
|
if [ -n "$invalid_remaining" ]; then
|
|
log_warning "Some variables not replaced in $file_path: $invalid_remaining"
|
|
debug_log "Unreplaced variables in config file: $invalid_remaining"
|
|
# Don't exit - warn only
|
|
fi
|
|
fi
|
|
fi
|
|
}
|
|
|
|
# Enhanced placeholder replacement for all configuration files
|
|
localize_deployment() {
|
|
log_info "Starting deployment localization..."
|
|
|
|
local processed_files=0
|
|
GLOBAL_MISSING_VARS=""
|
|
|
|
# Process docker-compose files
|
|
if [ -d "$REPO_DIR/docker-compose" ]; then
|
|
while IFS= read -r -d '' file_path; do
|
|
if [ -f "$file_path" ]; then
|
|
debug_log "Processing docker-compose file: $file_path"
|
|
localize_yml_file "$file_path" false
|
|
processed_files=$((processed_files + 1))
|
|
fi
|
|
done < <(find "$REPO_DIR/docker-compose" -name "*.yml" -o -name "*.yaml" -print0 2>/dev/null)
|
|
fi
|
|
|
|
log_success "Deployment localization completed - processed $processed_files files"
|
|
debug_log "Localization completed for $processed_files files"
|
|
|
|
# Report aggregated missing variables
|
|
if [ -n "$GLOBAL_MISSING_VARS" ]; then
|
|
log_warning "Aggregated missing environment variables across all files: $GLOBAL_MISSING_VARS"
|
|
debug_log "Global missing vars: $GLOBAL_MISSING_VARS"
|
|
fi
|
|
}
|
|
|
|
#═══════════════════════════════════════════════════════════
|
|
# SECTION 5: INFRASTRUCTURE SETUP (TLS, CA, Docker)
|
|
#═══════════════════════════════════════════════════════════
|
|
|
|
# Function to generate shared CA for multi-server TLS
|
|
generate_shared_ca() {
|
|
local ca_dir="/opt/stacks/core/shared-ca"
|
|
mkdir -p "$ca_dir"
|
|
openssl genrsa -out "$ca_dir/ca-key.pem" 4096
|
|
openssl req -new -x509 -days 365 -key "$ca_dir/ca-key.pem" -sha256 -out "$ca_dir/ca.pem" -subj "/C=US/ST=State/L=City/O=Homelab/CN=Homelab-CA"
|
|
chown -R "$ACTUAL_USER:$ACTUAL_USER" "$ca_dir"
|
|
log_success "Shared CA generated"
|
|
}
|
|
|
|
#═══════════════════════════════════════════════════════════
|
|
# SECTION 6: SSH CONFIGURATION FOR REMOTE SERVERS
|
|
#═══════════════════════════════════════════════════════════
|
|
|
|
# Setup SSH key authentication to core server
|
|
# Check existing SSH key or cleanup
|
|
ssh_cleanup_and_check_existing() {
|
|
local key_path="$1"
|
|
|
|
# Ensure .ssh directory exists
|
|
mkdir -p "/home/$ACTUAL_USER/.ssh"
|
|
chmod 700 "/home/$ACTUAL_USER/.ssh"
|
|
|
|
# Clean up any conflicting known_hosts entries for core server
|
|
log_info "Cleaning up known_hosts entries..."
|
|
ssh-keygen -R "${CORE_SERVER_IP}" >/dev/null 2>&1 || true
|
|
ssh-keygen -R "${CORE_SERVER_HOSTNAME}" >/dev/null 2>&1 || true
|
|
|
|
# Check if key already exists
|
|
if [ -f "$key_path" ]; then
|
|
log_info "SSH key already exists: $key_path"
|
|
|
|
# Test if key works with aggressive timeout
|
|
log_info "Testing existing SSH key..."
|
|
if timeout 3 bash -c "LC_ALL=C ssh -i '$key_path' -o BatchMode=yes -o ConnectTimeout=2 -o StrictHostKeyChecking=no \
|
|
-o ServerAliveInterval=1 -o ServerAliveCountMax=1 -o LogLevel=ERROR \
|
|
'${CORE_SERVER_USER}@${CORE_SERVER_IP}' 'echo test' 2>&1 | grep -v 'locale\|LC_ALL\|setlocale' | grep -q 'test'"; then
|
|
log_success "Existing SSH key works, skipping key setup"
|
|
return 0
|
|
else
|
|
log_warning "Existing key doesn't work or connection failed, will regenerate and install"
|
|
rm -f "$key_path" "$key_path.pub"
|
|
fi
|
|
fi
|
|
|
|
return 1
|
|
}
|
|
|
|
# Generate new SSH key pair
|
|
ssh_generate_key() {
|
|
local key_path="$1"
|
|
|
|
log_info "Generating SSH key: $key_path"
|
|
ssh-keygen -t rsa -b 4096 -f "$key_path" -N "" \
|
|
-C "${SERVER_HOSTNAME}-to-core-${CORE_SERVER_HOSTNAME}" 2>&1 | grep -v "^Generating\|^Your identification\|^Your public key"
|
|
|
|
if [ ${PIPESTATUS[0]} -ne 0 ]; then
|
|
log_error "Failed to generate SSH key"
|
|
return 1
|
|
fi
|
|
|
|
log_success "SSH key generated"
|
|
return 0
|
|
}
|
|
|
|
# Install SSH key on core server using password authentication
|
|
ssh_install_key_with_password() {
|
|
local key_path="$1"
|
|
|
|
log_info "Installing SSH key on core server ${CORE_SERVER_IP}..."
|
|
|
|
# Ensure sshpass is installed
|
|
if ! command -v sshpass &> /dev/null; then
|
|
log_info "sshpass is not installed. Installing now..."
|
|
sudo apt-get update -qq && sudo apt-get install -y sshpass >/dev/null 2>&1
|
|
fi
|
|
|
|
# Validate password is set
|
|
if [ -z "$CORE_SERVER_PASSWORD" ]; then
|
|
log_error "CORE_SERVER_PASSWORD is empty!"
|
|
log_error "Check your .env file - ensure CORE_SERVER_PASSWORD is set correctly"
|
|
return 1
|
|
fi
|
|
|
|
# Show password length for debugging (not actual password)
|
|
log_info "Password length: ${#CORE_SERVER_PASSWORD} characters"
|
|
|
|
# Test SSH connection with password first
|
|
log_info "Testing SSH connection with password..."
|
|
if ! LC_ALL=C sshpass -p "$CORE_SERVER_PASSWORD" ssh \
|
|
-o StrictHostKeyChecking=no \
|
|
-o ConnectTimeout=10 \
|
|
-o LogLevel=ERROR \
|
|
"${CORE_SERVER_USER}@${CORE_SERVER_IP}" "echo 'SSH connection successful'" 2>&1 | grep -v "locale\|LC_ALL\|setlocale" | grep -q "successful"; then
|
|
log_error "SSH password authentication failed"
|
|
log_error "Please verify the password in your .env file is correct"
|
|
log_error "You can test manually: sshpass -p 'YOUR_PASSWORD' ssh ${CORE_SERVER_USER}@${CORE_SERVER_IP}"
|
|
return 1
|
|
fi
|
|
|
|
log_success "SSH password authentication works"
|
|
|
|
# Read the public key
|
|
local pub_key=$(cat "${key_path}.pub")
|
|
|
|
# Copy key to core server using direct SSH command (more reliable than ssh-copy-id)
|
|
log_info "Adding public key to authorized_keys on core server..."
|
|
LC_ALL=C sshpass -p "$CORE_SERVER_PASSWORD" ssh \
|
|
-o StrictHostKeyChecking=no \
|
|
-o ConnectTimeout=10 \
|
|
-o LogLevel=ERROR \
|
|
"${CORE_SERVER_USER}@${CORE_SERVER_IP}" \
|
|
"mkdir -p ~/.ssh && chmod 700 ~/.ssh && echo '$pub_key' >> ~/.ssh/authorized_keys && chmod 600 ~/.ssh/authorized_keys" 2>&1 | grep -v "locale\|LC_ALL\|setlocale"
|
|
|
|
if [ $? -ne 0 ]; then
|
|
log_error "Failed to copy SSH key to core server"
|
|
log_error "Please verify:"
|
|
echo " 1. Core server IP is correct: ${CORE_SERVER_IP}"
|
|
echo " 2. Username is correct: ${CORE_SERVER_USER}"
|
|
echo " 3. Password is correct"
|
|
echo " 4. SSH server is running on core server"
|
|
return 1
|
|
fi
|
|
|
|
return 0
|
|
}
|
|
|
|
# Verify SSH key works and add config entry
|
|
ssh_verify_and_add_config() {
|
|
local key_path="$1"
|
|
|
|
# Verify key works
|
|
log_info "Verifying SSH key authentication..."
|
|
if ! LC_ALL=C ssh -i "$key_path" -o BatchMode=yes -o ConnectTimeout=5 -o StrictHostKeyChecking=no -o LogLevel=ERROR \
|
|
"${CORE_SERVER_USER}@${CORE_SERVER_IP}" "echo 'SSH key authentication successful'" 2>&1 | grep -v "locale\|LC_ALL\|setlocale" | grep -q "successful"; then
|
|
log_error "SSH key verification failed"
|
|
return 1
|
|
fi
|
|
|
|
log_success "SSH key authentication verified"
|
|
|
|
# Add SSH config entry for automatic key usage
|
|
log_info "Adding SSH config entry for core server..."
|
|
local ssh_config="/home/$ACTUAL_USER/.ssh/config"
|
|
|
|
# Create config file if it doesn't exist
|
|
touch "$ssh_config"
|
|
chmod 600 "$ssh_config"
|
|
|
|
# Check if entry already exists
|
|
if ! grep -q "Host ${CORE_SERVER_HOSTNAME}" "$ssh_config" 2>/dev/null; then
|
|
cat >> "$ssh_config" <<SSHCONFIG
|
|
|
|
# Auto-generated by EZ-Homelab for remote server ${SERVER_HOSTNAME}
|
|
Host ${CORE_SERVER_HOSTNAME}
|
|
HostName ${CORE_SERVER_IP}
|
|
User ${CORE_SERVER_USER}
|
|
IdentityFile ${key_path}
|
|
StrictHostKeyChecking no
|
|
UserKnownHostsFile /dev/null
|
|
LogLevel ERROR
|
|
SSHCONFIG
|
|
log_success "SSH config entry added for ${CORE_SERVER_HOSTNAME}"
|
|
else
|
|
log_info "SSH config entry already exists"
|
|
fi
|
|
|
|
return 0
|
|
}
|
|
|
|
# Main SSH key setup orchestration
|
|
setup_ssh_key_to_core() {
|
|
local key_name="id_rsa_${SERVER_HOSTNAME}_to_core"
|
|
local key_path="/home/$ACTUAL_USER/.ssh/$key_name"
|
|
|
|
log_info "Setting up SSH key authentication to core server..."
|
|
|
|
# Check if existing key works
|
|
if ssh_cleanup_and_check_existing "$key_path"; then
|
|
export SSH_KEY_PATH="$key_path"
|
|
return 0
|
|
fi
|
|
|
|
# Generate new key
|
|
if ! ssh_generate_key "$key_path"; then
|
|
return 1
|
|
fi
|
|
|
|
# Install key on core server
|
|
if ! ssh_install_key_with_password "$key_path"; then
|
|
return 1
|
|
fi
|
|
|
|
# Verify and configure
|
|
if ! ssh_verify_and_add_config "$key_path"; then
|
|
return 1
|
|
fi
|
|
|
|
# Export key path for use by other functions
|
|
export SSH_KEY_PATH="$key_path"
|
|
return 0
|
|
}
|
|
|
|
# Function to setup multi-server TLS for remote servers
|
|
setup_multi_server_tls() {
|
|
local ca_dir="/opt/stacks/core/shared-ca"
|
|
|
|
# Use the SSH key path that was exported by setup_ssh_key_to_core
|
|
if [ -z "$SSH_KEY_PATH" ]; then
|
|
log_error "SSH_KEY_PATH not set. Please run setup_ssh_key_to_core first."
|
|
return 1
|
|
fi
|
|
|
|
local key_path="$SSH_KEY_PATH"
|
|
|
|
log_info "Using SSH key: $key_path"
|
|
|
|
sudo mkdir -p "$ca_dir"
|
|
sudo chown "$ACTUAL_USER:$ACTUAL_USER" "$ca_dir"
|
|
|
|
log_info "Fetching shared CA from core server ${CORE_SERVER_IP}..."
|
|
|
|
# Check if shared CA exists on core server
|
|
log_info "Checking for shared CA on core server..."
|
|
|
|
SHARED_CA_PATH=""
|
|
|
|
# Test for shared-ca directory (preferred location)
|
|
if LC_ALL=C ssh -i "$key_path" -o StrictHostKeyChecking=no -o LogLevel=ERROR "${CORE_SERVER_USER}@${CORE_SERVER_IP}" \
|
|
"test -f /opt/stacks/core/shared-ca/ca.pem && test -f /opt/stacks/core/shared-ca/ca-key.pem && echo 'EXISTS'" 2>/dev/null | grep -q "EXISTS"; then
|
|
SHARED_CA_PATH="/opt/stacks/core/shared-ca"
|
|
log_success "Found shared CA in: $SHARED_CA_PATH"
|
|
# Test for docker-tls directory (alternative location)
|
|
elif LC_ALL=C ssh -i "$key_path" -o StrictHostKeyChecking=no -o LogLevel=ERROR "${CORE_SERVER_USER}@${CORE_SERVER_IP}" \
|
|
"test -f /opt/stacks/core/docker-tls/ca.pem && test -f /opt/stacks/core/docker-tls/ca-key.pem && echo 'EXISTS'" 2>/dev/null | grep -q "EXISTS"; then
|
|
SHARED_CA_PATH="/opt/stacks/core/docker-tls"
|
|
log_success "Found shared CA in: $SHARED_CA_PATH"
|
|
fi
|
|
|
|
if [ -z "$SHARED_CA_PATH" ]; then
|
|
log_error "Shared CA not found on core server"
|
|
log_error "Please ensure core server is fully deployed with Option 2 first"
|
|
log_info "Checking what exists on core server..."
|
|
LC_ALL=C ssh -i "$key_path" -o StrictHostKeyChecking=no "${CORE_SERVER_USER}@${CORE_SERVER_IP}" \
|
|
"ls -la /opt/stacks/core/shared-ca/ /opt/stacks/core/docker-tls/ 2>&1" | grep -v "locale\|LC_ALL\|setlocale"
|
|
TLS_ISSUES_SUMMARY="⚠️ TLS Configuration Issue: Shared CA not found on core server ${CORE_SERVER_IP}
|
|
|
|
To fix this:
|
|
1. Deploy core server first using Option 2
|
|
2. Verify CA exists: ssh ${CORE_SERVER_USER}@${CORE_SERVER_IP} 'ls -la /opt/stacks/core/shared-ca/'
|
|
3. Re-run Option 3 deployment"
|
|
return 1
|
|
fi
|
|
|
|
# Copy shared CA from core server using SCP
|
|
log_info "Copying shared CA certificates..."
|
|
|
|
# Copy ca.pem
|
|
if ! LC_ALL=C scp -i "$key_path" -o StrictHostKeyChecking=no -o LogLevel=ERROR \
|
|
"${CORE_SERVER_USER}@${CORE_SERVER_IP}:${SHARED_CA_PATH}/ca.pem" \
|
|
"$ca_dir/" 2>/dev/null; then
|
|
log_error "Failed to copy ca.pem from core server"
|
|
TLS_ISSUES_SUMMARY="⚠️ TLS Configuration Issue: Could not copy shared CA from ${CORE_SERVER_IP}
|
|
|
|
To fix this:
|
|
1. Verify SSH key works: ssh -i $key_path ${CORE_SERVER_USER}@${CORE_SERVER_IP}
|
|
2. Check file permissions: ssh ${CORE_SERVER_USER}@${CORE_SERVER_IP} 'ls -la ${SHARED_CA_PATH}/'
|
|
3. Manually copy if needed: scp -i $key_path ${CORE_SERVER_USER}@${CORE_SERVER_IP}:${SHARED_CA_PATH}/ca* $ca_dir/"
|
|
return 1
|
|
fi
|
|
|
|
# Copy ca-key.pem
|
|
if ! LC_ALL=C scp -i "$key_path" -o StrictHostKeyChecking=no -o LogLevel=ERROR \
|
|
"${CORE_SERVER_USER}@${CORE_SERVER_IP}:${SHARED_CA_PATH}/ca-key.pem" \
|
|
"$ca_dir/" 2>/dev/null; then
|
|
log_error "Failed to copy ca-key.pem from core server"
|
|
return 1
|
|
fi
|
|
|
|
log_success "Shared CA copied successfully"
|
|
|
|
# Now setup Docker TLS using the shared CA
|
|
setup_docker_tls
|
|
}
|
|
|
|
# Get script directory and repo directory
|
|
SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
|
|
REPO_DIR="$( cd "$SCRIPT_DIR/.." && pwd )"
|
|
|
|
# Source common functions
|
|
source "$REPO_DIR/scripts/common.sh"
|
|
|
|
# Get actual user
|
|
if [ "$EUID" -eq 0 ]; then
|
|
ACTUAL_USER=${SUDO_USER:-$USER}
|
|
else
|
|
ACTUAL_USER=$USER
|
|
fi
|
|
|
|
# Default values
|
|
DOMAIN=""
|
|
SERVER_IP=""
|
|
ADMIN_USER=""
|
|
ADMIN_EMAIL=""
|
|
AUTHELIA_ADMIN_PASSWORD=""
|
|
DEPLOY_CORE=false
|
|
DEPLOY_INFRASTRUCTURE=false
|
|
DEPLOY_DASHBOARDS=false
|
|
SETUP_STACKS=false
|
|
DEPLOY_REMOTE_SERVER=false
|
|
TLS_ISSUES_SUMMARY=""
|
|
CORE_SERVER_IP=""
|
|
CORE_SERVER_HOSTNAME=""
|
|
CORE_SERVER_USER=""
|
|
CORE_SERVER_PASSWORD=""
|
|
SSH_KEY_PATH=""
|
|
|
|
# Required variables for configuration
|
|
REQUIRED_VARS=("SERVER_IP" "SERVER_HOSTNAME" "DUCKDNS_SUBDOMAINS" "DUCKDNS_TOKEN" "DOMAIN" "DEFAULT_USER" "DEFAULT_PASSWORD" "DEFAULT_EMAIL")
|
|
|
|
#═══════════════════════════════════════════════════════════
|
|
# SECTION 7: VARIABLE VALIDATION & PROMPTING
|
|
#═══════════════════════════════════════════════════════════
|
|
|
|
# Load existing .env file if it exists
|
|
load_env_file() {
|
|
if [ -f "$REPO_DIR/.env" ]; then
|
|
log_info "Found existing .env file, loading current configuration..."
|
|
load_env_file_safely "$REPO_DIR/.env"
|
|
return 0
|
|
else
|
|
log_info "No existing .env file found. We'll create one during setup."
|
|
return 1
|
|
fi
|
|
}
|
|
|
|
# Validate variable values
|
|
validate_variable() {
|
|
local var_name="$1"
|
|
local var_value="$2"
|
|
|
|
case "$var_name" in
|
|
"SERVER_IP")
|
|
# Basic IP validation
|
|
if [[ $var_value =~ ^[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+$ ]]; then
|
|
return 0
|
|
else
|
|
return 1
|
|
fi
|
|
;;
|
|
"DOMAIN")
|
|
# Basic domain validation
|
|
if [[ $var_value =~ ^[a-zA-Z0-9.-]+\.[a-zA-Z]{2,}$ ]]; then
|
|
return 0
|
|
else
|
|
return 1
|
|
fi
|
|
;;
|
|
"DUCKDNS_SUBDOMAINS")
|
|
# DuckDNS subdomain should be non-empty and contain only valid characters
|
|
local trimmed_value=$(echo "$var_value" | sed 's/^[[:space:]]*//;s/[[:space:]]*$//')
|
|
if [ -n "$trimmed_value" ] && [[ $trimmed_value =~ ^[a-zA-Z0-9.-]+$ ]]; then
|
|
return 0
|
|
else
|
|
return 1
|
|
fi
|
|
;;
|
|
"DEFAULT_PASSWORD")
|
|
# Password should be at least 8 characters
|
|
if [ ${#var_value} -ge 8 ]; then
|
|
return 0
|
|
else
|
|
return 1
|
|
fi
|
|
;;
|
|
"DEFAULT_EMAIL")
|
|
# Basic email validation
|
|
if [[ $var_value =~ ^[a-zA-Z0-9._%+-]+@[a-zA-Z0-9.-]+\.[a-zA-Z]{2,}$ ]]; then
|
|
return 0
|
|
else
|
|
return 1
|
|
fi
|
|
;;
|
|
*)
|
|
# For other variables, trim whitespace and check they're not empty
|
|
local trimmed_value=$(echo "$var_value" | sed 's/^[[:space:]]*//;s/[[:space:]]*$//')
|
|
if [ -n "$trimmed_value" ]; then
|
|
return 0
|
|
else
|
|
return 1
|
|
fi
|
|
;;
|
|
esac
|
|
}
|
|
|
|
# Prompt for a single variable
|
|
prompt_for_variable() {
|
|
local var="$1"
|
|
local user_input=""
|
|
local current_value="${!var:-}"
|
|
local prompt_text=""
|
|
|
|
while true; do
|
|
# Build prompt text with current value if it exists
|
|
if [ -n "$current_value" ]; then
|
|
if [ "$var" = "DEFAULT_PASSWORD" ]; then
|
|
prompt_text="║ 🔒 ${var} ([HIDDEN]): "
|
|
else
|
|
prompt_text="${var} (${current_value}): "
|
|
fi
|
|
else
|
|
prompt_text="${var}: "
|
|
fi
|
|
|
|
# Add icon prefix
|
|
case "$var" in
|
|
"SERVER_IP")
|
|
prompt_text="║ 🌐 ${prompt_text}"
|
|
;;
|
|
"DOMAIN")
|
|
prompt_text="║ 🌍 ${prompt_text}"
|
|
;;
|
|
"DUCKDNS_SUBDOMAINS")
|
|
prompt_text="║ 🦆 ${prompt_text}"
|
|
;;
|
|
"DUCKDNS_TOKEN")
|
|
prompt_text="║ 🔑 ${prompt_text}"
|
|
;;
|
|
"DEFAULT_USER")
|
|
prompt_text="║ 👤 ${prompt_text}"
|
|
;;
|
|
"DEFAULT_PASSWORD")
|
|
# Lock icon already added above for passwords
|
|
;;
|
|
"DEFAULT_EMAIL")
|
|
prompt_text="║ 📧 ${prompt_text}"
|
|
;;
|
|
"SERVER_HOSTNAME")
|
|
prompt_text="║ 🏠 ${prompt_text}"
|
|
;;
|
|
"CORE_SERVER_IP")
|
|
prompt_text="║ 🌐 ${prompt_text}"
|
|
;;
|
|
"CORE_SERVER_HOSTNAME")
|
|
prompt_text="║ 🏠 ${prompt_text}"
|
|
;;
|
|
"CORE_SERVER_USER")
|
|
prompt_text="║ 👤 ${prompt_text}"
|
|
;;
|
|
"CORE_SERVER_PASSWORD")
|
|
prompt_text="║ 🔑 ${prompt_text}"
|
|
;;
|
|
esac
|
|
|
|
# Get user input
|
|
if [ "$var" = "DEFAULT_PASSWORD" ]; then
|
|
read -s -p "$prompt_text" user_input
|
|
echo ""
|
|
else
|
|
read -p "$prompt_text" user_input
|
|
fi
|
|
|
|
# Check for quit command
|
|
if [ "$user_input" = "q" ] || [ "$user_input" = "Q" ]; then
|
|
log_info "Setup cancelled by user"
|
|
exit 0
|
|
fi
|
|
|
|
if [ -z "$user_input" ]; then
|
|
if [ -n "$current_value" ]; then
|
|
# Use existing value - overwrite prompt with status
|
|
if [ "$var" != "DEFAULT_PASSWORD" ]; then
|
|
echo -e "\033[1A\033[K║ ✅ ${var}: ${current_value}"
|
|
fi
|
|
return 0
|
|
else
|
|
log_warning "${var} cannot be empty. Please provide a value."
|
|
continue
|
|
fi
|
|
fi
|
|
|
|
if validate_variable "$var" "$user_input"; then
|
|
eval "$var=\"$user_input\""
|
|
# Overwrite prompt with status
|
|
if [ "$var" != "DEFAULT_PASSWORD" ]; then
|
|
echo -e "\033[1A\033[K║ ✅ ${var}: ${user_input}"
|
|
else
|
|
echo -e "\033[1A\033[K║ ✅ ${var}: [HIDDEN]"
|
|
fi
|
|
return 0
|
|
else
|
|
log_warning "Invalid value for ${var}. Please try again."
|
|
continue
|
|
fi
|
|
done
|
|
}
|
|
|
|
# Validate and prompt for required variables with loop
|
|
validate_and_prompt_variables() {
|
|
local all_valid=false
|
|
local user_wants_to_review=false
|
|
local first_display=true
|
|
|
|
while true; do
|
|
user_wants_to_review=false
|
|
|
|
all_valid=true
|
|
|
|
# Check validity without showing initial summary
|
|
for var in "${REQUIRED_VARS[@]}"; do
|
|
local display_value=$(echo "║ ${!var:-}" | sed 's/^[[:space:]]*//;s/[[:space:]]*$//')
|
|
if [ -z "$display_value" ] || ! validate_variable "$var" "${!var}"; then
|
|
all_valid=false
|
|
fi
|
|
done
|
|
|
|
if [ "$all_valid" = true ]; then
|
|
if [ "$first_display" = true ]; then
|
|
# echo "║ Current configuration:"
|
|
for var in "${REQUIRED_VARS[@]}"; do
|
|
local display_value=$(echo "${!var:-}" | sed 's/^[[:space:]]*//;s/[[:space:]]*$//')
|
|
if [ "$var" = "DEFAULT_PASSWORD" ]; then
|
|
echo "║ ✅ ${var}: [HIDDEN]"
|
|
else
|
|
echo "║ ✅ ${var}: ${display_value}"
|
|
fi
|
|
done
|
|
echo "║"
|
|
first_display=false
|
|
fi
|
|
echo "║"
|
|
echo "║ 1) Deploy 2) Modify q) Quit"
|
|
echo "║"
|
|
echo -n "╚═════════════════════════════════════════════ "
|
|
read -p "Choose : " user_choice
|
|
|
|
case "$user_choice" in
|
|
1|"p"|"proceed")
|
|
log_info "Proceeding with current configuration..."
|
|
return 0
|
|
;;
|
|
2|"r"|"review"|"change")
|
|
user_wants_to_review=true
|
|
echo "║"
|
|
echo "║ Press Enter to keep current value:"
|
|
echo "║"
|
|
;;
|
|
[Qq]|[Qq]uit)
|
|
log_info "Setup cancelled by user"
|
|
exit 0
|
|
;;
|
|
*)
|
|
log_warning "Invalid choice. Please enter 1, 2, or q."
|
|
# echo ""
|
|
continue
|
|
;;
|
|
esac
|
|
else
|
|
# echo ""
|
|
echo "║ Missing variables: ${missing_vars[*]}"
|
|
fi
|
|
|
|
# Prompt for variables (either missing ones or all if reviewing)
|
|
if [ "$user_wants_to_review" = true ]; then
|
|
# Review all variables one by one
|
|
for var in "${REQUIRED_VARS[@]}"; do
|
|
prompt_for_variable "$var"
|
|
done
|
|
# After review, continue the loop to show menu again
|
|
continue
|
|
else
|
|
# Only prompt for missing/invalid variables
|
|
for var in "${REQUIRED_VARS[@]}"; do
|
|
if [ -z "${!var:-}" ] || ! validate_variable "$var" "${!var}"; then
|
|
prompt_for_variable "$var"
|
|
fi
|
|
done
|
|
fi
|
|
done
|
|
}
|
|
|
|
# Save configuration to .env file
|
|
save_env_file() {
|
|
debug_log "save_env_file() called, DEPLOY_CORE=$DEPLOY_CORE"
|
|
log_info "Saving configuration to .env file..."
|
|
|
|
# Create .env file if it doesn't exist
|
|
if [ ! -f "$REPO_DIR/.env" ]; then
|
|
sudo -u "$ACTUAL_USER" cp "$REPO_DIR/.env.example" "$REPO_DIR/.env"
|
|
fi
|
|
|
|
# Update only the required variables
|
|
for var in "${REQUIRED_VARS[@]}"; do
|
|
if [ -n "${!var:-}" ]; then
|
|
sudo -u "$ACTUAL_USER" sed -i "s|^${var}=.*|${var}=${!var}|" "$REPO_DIR/.env"
|
|
fi
|
|
done
|
|
|
|
# Update HOMEPAGE_ALLOWED_HOSTS dynamically
|
|
if [ -n "${DOMAIN:-}" ] && [ -n "${SERVER_IP:-}" ]; then
|
|
# Extract Homepage port from compose file
|
|
HOMEPAGE_PORT=$(grep -A1 'ports:' "$REPO_DIR/docker-compose/dashboards/docker-compose.yml" | grep -o '"[0-9]*:3000"' | cut -d'"' -f2 | cut -d: -f1)
|
|
if [ -z "$HOMEPAGE_PORT" ]; then
|
|
HOMEPAGE_PORT=3003 # Fallback
|
|
fi
|
|
HOMEPAGE_ALLOWED_HOSTS="homepage.${DOMAIN},${SERVER_IP}:${HOMEPAGE_PORT}"
|
|
sudo -u "$ACTUAL_USER" sed -i "s|HOMEPAGE_ALLOWED_HOSTS=.*|HOMEPAGE_ALLOWED_HOSTS=$HOMEPAGE_ALLOWED_HOSTS|" "$REPO_DIR/.env"
|
|
fi
|
|
|
|
# Authelia settings (only generate secrets if deploying core)
|
|
if [ "$DEPLOY_CORE" = true ]; then
|
|
# Ensure we have admin credentials
|
|
if [ -z "$ADMIN_USER" ]; then
|
|
ADMIN_USER="${DEFAULT_USER:-admin}"
|
|
fi
|
|
if [ -z "$ADMIN_EMAIL" ]; then
|
|
ADMIN_EMAIL="${DEFAULT_EMAIL:-${ADMIN_USER}@${DOMAIN}}"
|
|
fi
|
|
if [ -z "$AUTHELIA_ADMIN_PASSWORD" ] || [ "$AUTHELIA_ADMIN_PASSWORD" = "generate-with-openssl-rand-hex-64" ]; then
|
|
AUTHELIA_ADMIN_PASSWORD="${DEFAULT_PASSWORD}"
|
|
if [ "$AUTHELIA_ADMIN_PASSWORD" = "changeme123" ]; then
|
|
log_info "Using default admin password (changeme123) - please change this after setup!"
|
|
fi
|
|
fi
|
|
|
|
if [ -z "$AUTHELIA_JWT_SECRET" ] || [ "$AUTHELIA_JWT_SECRET" = "generate-with-openssl-rand-hex-64" ]; then
|
|
AUTHELIA_JWT_SECRET=$(openssl rand -hex 64)
|
|
fi
|
|
if [ -z "$AUTHELIA_SESSION_SECRET" ] || [ "$AUTHELIA_SESSION_SECRET" = "generate-with-openssl-rand-hex-64" ]; then
|
|
AUTHELIA_SESSION_SECRET=$(openssl rand -hex 64)
|
|
fi
|
|
if [ -z "$AUTHELIA_STORAGE_ENCRYPTION_KEY" ] || [ "$AUTHELIA_STORAGE_ENCRYPTION_KEY" = "generate-with-openssl-rand-hex-64" ]; then
|
|
AUTHELIA_STORAGE_ENCRYPTION_KEY=$(openssl rand -hex 64)
|
|
fi
|
|
|
|
# Generate Arcane secrets
|
|
if [ -z "$ARCANE_ENCRYPTION_KEY" ] || [ "$ARCANE_ENCRYPTION_KEY" = "generate-with-openssl-rand-hex-64" ]; then
|
|
ARCANE_ENCRYPTION_KEY=$(openssl rand -hex 64)
|
|
fi
|
|
if [ -z "$ARCANE_JWT_SECRET" ] || [ "$ARCANE_JWT_SECRET" = "generate-with-openssl-rand-hex-64" ]; then
|
|
ARCANE_JWT_SECRET=$(openssl rand -hex 64)
|
|
fi
|
|
|
|
# Save Authelia settings to .env
|
|
sudo -u "$ACTUAL_USER" sed -i "s%AUTHELIA_JWT_SECRET=.*%AUTHELIA_JWT_SECRET=$AUTHELIA_JWT_SECRET%" "$REPO_DIR/.env"
|
|
sudo -u "$ACTUAL_USER" sed -i "s%AUTHELIA_SESSION_SECRET=.*%AUTHELIA_SESSION_SECRET=$AUTHELIA_SESSION_SECRET%" "$REPO_DIR/.env"
|
|
sudo -u "$ACTUAL_USER" sed -i "s%AUTHELIA_STORAGE_ENCRYPTION_KEY=.*%AUTHELIA_STORAGE_ENCRYPTION_KEY=$AUTHELIA_STORAGE_ENCRYPTION_KEY%" "$REPO_DIR/.env"
|
|
|
|
# Save Arcane settings to .env
|
|
sudo -u "$ACTUAL_USER" sed -i "s%# ARCANE_ENCRYPTION_KEY=.*%ARCANE_ENCRYPTION_KEY=$ARCANE_ENCRYPTION_KEY%" "$REPO_DIR/.env"
|
|
sudo -u "$ACTUAL_USER" sed -i "s%ARCANE_ENCRYPTION_KEY=.*%ARCANE_ENCRYPTION_KEY=$ARCANE_ENCRYPTION_KEY%" "$REPO_DIR/.env"
|
|
sudo -u "$ACTUAL_USER" sed -i "s%# ARCANE_JWT_SECRET=.*%ARCANE_JWT_SECRET=$ARCANE_JWT_SECRET%" "$REPO_DIR/.env"
|
|
sudo -u "$ACTUAL_USER" sed -i "s%ARCANE_JWT_SECRET=.*%ARCANE_JWT_SECRET=$ARCANE_JWT_SECRET%" "$REPO_DIR/.env"
|
|
|
|
sudo -u "$ACTUAL_USER" sed -i "s%# AUTHELIA_ADMIN_USER=.*%AUTHELIA_ADMIN_USER=$ADMIN_USER%" "$REPO_DIR/.env"
|
|
sudo -u "$ACTUAL_USER" sed -i "s%AUTHELIA_ADMIN_USER=.*%AUTHELIA_ADMIN_USER=$ADMIN_USER%" "$REPO_DIR/.env"
|
|
sudo -u "$ACTUAL_USER" sed -i "s%# AUTHELIA_ADMIN_EMAIL=.*%AUTHELIA_ADMIN_EMAIL=$ADMIN_EMAIL%" "$REPO_DIR/.env"
|
|
sudo -u "$ACTUAL_USER" sed -i "s%AUTHELIA_ADMIN_EMAIL=.*%AUTHELIA_ADMIN_EMAIL=$ADMIN_EMAIL%" "$REPO_DIR/.env"
|
|
sudo -u "$ACTUAL_USER" sed -i "s%# AUTHELIA_ADMIN_PASSWORD=.*%AUTHELIA_ADMIN_PASSWORD=$AUTHELIA_ADMIN_PASSWORD%" "$REPO_DIR/.env"
|
|
sudo -u "$ACTUAL_USER" sed -i "s%AUTHELIA_ADMIN_PASSWORD=.*%AUTHELIA_ADMIN_PASSWORD=$AUTHELIA_ADMIN_PASSWORD%" "$REPO_DIR/.env"
|
|
|
|
# Generate password hash if needed
|
|
if [ -z "$AUTHELIA_ADMIN_PASSWORD_HASH" ]; then
|
|
log_info "Generating Authelia password hash..."
|
|
# Pull Authelia image if needed
|
|
if ! docker images | grep -q authelia/authelia; then
|
|
docker pull authelia/authelia:latest > /dev/null 2>&1
|
|
fi
|
|
AUTHELIA_ADMIN_PASSWORD_HASH=$(docker run --rm authelia/authelia:latest authelia crypto hash generate argon2 --password "$AUTHELIA_ADMIN_PASSWORD" 2>&1 | awk '/\$argon2id/ {print $NF}')
|
|
if [ -z "$AUTHELIA_ADMIN_PASSWORD_HASH" ]; then
|
|
log_error "Failed to generate Authelia password hash. Please check that AUTHELIA_ADMIN_PASSWORD is set."
|
|
exit 1
|
|
fi
|
|
fi
|
|
|
|
# Save password hash
|
|
sudo -u "$ACTUAL_USER" sed -i "s%# AUTHELIA_ADMIN_PASSWORD_HASH=.*%AUTHELIA_ADMIN_PASSWORD_HASH=$AUTHELIA_ADMIN_PASSWORD_HASH%" "$REPO_DIR/.env"
|
|
sudo -u "$ACTUAL_USER" sed -i "s%AUTHELIA_ADMIN_PASSWORD_HASH=.*%AUTHELIA_ADMIN_PASSWORD_HASH=$AUTHELIA_ADMIN_PASSWORD_HASH%" "$REPO_DIR/.env"
|
|
fi
|
|
|
|
debug_log "Configuration saved to .env file"
|
|
log_success "Configuration saved to .env file"
|
|
}
|
|
|
|
# Validate that required secrets are present for core deployment
|
|
validate_secrets() {
|
|
debug_log "validate_secrets called, DEPLOY_CORE=$DEPLOY_CORE"
|
|
|
|
if [ "$DEPLOY_CORE" = false ]; then
|
|
debug_log "Core not being deployed, skipping secret validation"
|
|
return 0
|
|
fi
|
|
|
|
log_info "Validating required secrets for core deployment..."
|
|
debug_log "Checking Authelia secrets..."
|
|
|
|
local missing_secrets=""
|
|
|
|
# Check required Authelia secrets
|
|
if [ -z "${AUTHELIA_JWT_SECRET:-}" ]; then
|
|
missing_secrets="$missing_secrets AUTHELIA_JWT_SECRET"
|
|
debug_log "AUTHELIA_JWT_SECRET is missing"
|
|
fi
|
|
|
|
if [ -z "${AUTHELIA_SESSION_SECRET:-}" ]; then
|
|
missing_secrets="$missing_secrets AUTHELIA_SESSION_SECRET"
|
|
debug_log "AUTHELIA_SESSION_SECRET is missing"
|
|
fi
|
|
|
|
if [ -z "${AUTHELIA_STORAGE_ENCRYPTION_KEY:-}" ]; then
|
|
missing_secrets="$missing_secrets AUTHELIA_STORAGE_ENCRYPTION_KEY"
|
|
debug_log "AUTHELIA_STORAGE_ENCRYPTION_KEY is missing"
|
|
fi
|
|
|
|
if [ -z "${AUTHELIA_ADMIN_PASSWORD_HASH:-}" ]; then
|
|
missing_secrets="$missing_secrets AUTHELIA_ADMIN_PASSWORD_HASH"
|
|
debug_log "AUTHELIA_ADMIN_PASSWORD_HASH is missing"
|
|
fi
|
|
|
|
# Check other required variables
|
|
if [ -z "${DOMAIN:-}" ]; then
|
|
missing_secrets="$missing_secrets DOMAIN"
|
|
debug_log "DOMAIN is missing"
|
|
fi
|
|
|
|
if [ -z "${SERVER_IP:-}" ]; then
|
|
missing_secrets="$missing_secrets SERVER_IP"
|
|
debug_log "SERVER_IP is missing"
|
|
fi
|
|
|
|
if [ -n "$missing_secrets" ]; then
|
|
log_error "Critical configuration missing: $missing_secrets"
|
|
log_error "This will prevent Authelia and other services from starting correctly."
|
|
debug_log "Failing deployment due to missing secrets: $missing_secrets"
|
|
exit 1
|
|
fi
|
|
|
|
log_success "All required secrets validated"
|
|
debug_log "Secret validation passed"
|
|
}
|
|
|
|
#═══════════════════════════════════════════════════════════
|
|
# SECTION 8: INDIVIDUAL STACK DEPLOYMENT FUNCTIONS
|
|
#═══════════════════════════════════════════════════════════
|
|
|
|
# Install NVIDIA drivers function
|
|
install_nvidia() {
|
|
log_info "Installing NVIDIA drivers and Docker support..."
|
|
|
|
# Check if running as root
|
|
if [ "$EUID" -ne 0 ]; then
|
|
log_warning "NVIDIA installation requires root privileges. Running with sudo..."
|
|
exec sudo "$0" "$@"
|
|
fi
|
|
|
|
# Check for NVIDIA GPU
|
|
if ! lspci | grep -i nvidia > /dev/null; then
|
|
log_warning "No NVIDIA GPU detected. Skipping NVIDIA driver installation."
|
|
return
|
|
fi
|
|
|
|
# Add NVIDIA repository
|
|
log_info "Adding NVIDIA repository..."
|
|
apt-get update
|
|
apt-get install -y software-properties-common
|
|
add-apt-repository -y ppa:graphics-drivers/ppa
|
|
apt-get update
|
|
|
|
# Install NVIDIA drivers (latest)
|
|
log_info "Installing NVIDIA drivers..."
|
|
apt-get install -y nvidia-driver-470 # Adjust version as needed
|
|
|
|
# Install NVIDIA Docker support
|
|
log_info "Installing NVIDIA Docker support..."
|
|
distribution=$(. /etc/os-release;echo $ID$VERSION_ID)
|
|
curl -s -L https://nvidia.github.io/nvidia-docker/gpgkey | apt-key add -
|
|
curl -s -L https://nvidia.github.io/nvidia-docker/$distribution/nvidia-docker.list | tee /etc/apt/sources.list.d/nvidia-docker.list
|
|
apt-get update && apt-get install -y nvidia-docker2
|
|
systemctl restart docker
|
|
|
|
log_success "NVIDIA drivers and Docker support installed. A reboot may be required."
|
|
}
|
|
|
|
#═══════════════════════════════════════════════════════════
|
|
# SECTION 8: STACK DEPLOYMENT FUNCTIONS
|
|
#═══════════════════════════════════════════════════════════
|
|
|
|
# Deploy Dockge function
|
|
deploy_dockge() {
|
|
echo -n "║ Deploying Dockge... "
|
|
log_info " - Dockge (Docker Compose Manager)"
|
|
|
|
# Backup existing files if they exist
|
|
common_backup "/opt/dockge/docker-compose.yml"
|
|
|
|
# Copy Dockge stack files
|
|
sudo cp "$REPO_DIR/docker-compose/dockge/docker-compose.yml" /opt/dockge/docker-compose.yml
|
|
sudo chown "$ACTUAL_USER:$ACTUAL_USER" /opt/dockge/docker-compose.yml
|
|
|
|
# Process .env file from .env.example
|
|
process_stack_env "/opt/dockge" "$REPO_DIR/docker-compose/dockge"
|
|
sudo chown "$ACTUAL_USER:$ACTUAL_USER" /opt/dockge/.env
|
|
|
|
# Replace placeholders in Dockge compose file (labels and x-dockge only)
|
|
localize_yml_file "/opt/dockge/docker-compose.yml"
|
|
|
|
# Deploy Dockge stack
|
|
cd /opt/dockge
|
|
if run_cmd --quiet docker compose up -d; then
|
|
echo "Success"
|
|
fi
|
|
}
|
|
|
|
# Deploy core stack function
|
|
deploy_core() {
|
|
debug_log "deploy_core called"
|
|
echo -n "║ Deploying Core Stack... "
|
|
log_info " - DuckDNS (Dynamic DNS)"
|
|
log_info " - Traefik (Reverse Proxy with SSL)"
|
|
log_info " - Authelia (Single Sign-On)"
|
|
|
|
# Copy core stack files
|
|
debug_log "Copying core stack files"
|
|
|
|
# Backup existing files if they exist
|
|
if [ -f /opt/stacks/core/docker-compose.yml ]; then
|
|
sudo cp /opt/stacks/core/docker-compose.yml /opt/stacks/core/docker-compose.yml.backup.$(date +%Y%m%d_%H%M%S)
|
|
fi
|
|
|
|
sudo cp "$REPO_DIR/docker-compose/core/docker-compose.yml" /opt/stacks/core/docker-compose.yml
|
|
sudo chown "$ACTUAL_USER:$ACTUAL_USER" /opt/stacks/core/docker-compose.yml
|
|
|
|
# Process .env file from .env.example
|
|
process_stack_env "/opt/stacks/core" "$REPO_DIR/docker-compose/core"
|
|
sudo chown "$ACTUAL_USER:$ACTUAL_USER" /opt/stacks/core/.env
|
|
|
|
# Fix multi-line secrets in .env file (merge split lines)
|
|
debug_log "Fixing multi-line secrets in .env file"
|
|
python3 << 'PYFIX'
|
|
import sys
|
|
with open('/opt/stacks/core/.env', 'r') as f:
|
|
lines = f.readlines()
|
|
new_lines = []
|
|
i = 0
|
|
while i < len(lines):
|
|
if any(k in lines[i] for k in ['AUTHELIA_JWT_SECRET=', 'AUTHELIA_SESSION_SECRET=', 'AUTHELIA_STORAGE_ENCRYPTION_KEY=']):
|
|
if i + 1 < len(lines) and '=' not in lines[i+1] and lines[i+1].strip() and not lines[i+1].strip().startswith('#'):
|
|
new_lines.append(lines[i].rstrip('\n') + lines[i+1].lstrip())
|
|
i += 2
|
|
continue
|
|
new_lines.append(lines[i])
|
|
i += 1
|
|
with open('/opt/stacks/core/.env', 'w') as f:
|
|
f.writelines(new_lines)
|
|
PYFIX
|
|
|
|
# Escape $ characters in password hashes to prevent Docker Compose variable substitution
|
|
sed -i '/^AUTHELIA_ADMIN_PASSWORD_HASH=/ s/\$/\\$/g' /opt/stacks/core/.env
|
|
|
|
# Replace placeholders in core compose file (labels and x-dockge only, fail on missing critical vars)
|
|
localize_yml_file "/opt/stacks/core/docker-compose.yml" true
|
|
|
|
# Copy and configure Traefik config
|
|
debug_log "Setting up Traefik configuration"
|
|
common_backup "/opt/stacks/core/traefik"
|
|
cp -r "$REPO_DIR/docker-compose/core/traefik" /opt/stacks/core/
|
|
sudo chown -R "$ACTUAL_USER:$ACTUAL_USER" /opt/stacks/core/traefik
|
|
|
|
# Move Traefik config file to the correct location for Docker mount
|
|
debug_log "Moving Traefik config file to config directory"
|
|
mkdir -p /opt/stacks/core/traefik/config
|
|
mv /opt/stacks/core/traefik/traefik.yml /opt/stacks/core/traefik/config/
|
|
|
|
# Only copy external host files on core server (where Traefik runs)
|
|
if [ "$DEPLOY_CORE" = true ]; then
|
|
log_info "Core server detected - copying external host routing files"
|
|
# Remove local-host-production.yml if no remote server hostname is set (single-server setup)
|
|
if [ -z "${REMOTE_SERVER_HOSTNAME:-}" ]; then
|
|
rm -f /opt/stacks/core/traefik/dynamic/local-host-production.yml
|
|
# Remove remote server sections from sablier.yml for single-server setup
|
|
sed -i '335,$d' /opt/stacks/core/traefik/dynamic/sablier.yml
|
|
log_info "Single-server setup - removed remote server sections from sablier.yml"
|
|
fi
|
|
else
|
|
log_info "Remote server detected - removing external host routing files"
|
|
rm -f /opt/stacks/core/traefik/dynamic/external-host-*.yml
|
|
fi
|
|
|
|
# Replace all placeholders in Traefik config files
|
|
debug_log "Replacing placeholders in Traefik config files"
|
|
for config_file in $(find /opt/stacks/core/traefik -name "*.yml" -type f); do
|
|
# Don't fail on missing variables for external host files (they're optional)
|
|
if [[ "$config_file" == *external-host* ]]; then
|
|
localize_yml_file "$config_file" false
|
|
else
|
|
localize_yml_file "$config_file" true
|
|
fi
|
|
done
|
|
|
|
# Rename external-host-production.yml to use remote server hostname (only for multi-server setups)
|
|
if [ -n "${REMOTE_SERVER_HOSTNAME:-}" ] && [ -f "/opt/stacks/core/traefik/dynamic/external-host-production.yml" ]; then
|
|
mv "/opt/stacks/core/traefik/dynamic/external-host-production.yml" "/opt/stacks/core/traefik/dynamic/external-host-${REMOTE_SERVER_HOSTNAME}.yml"
|
|
log_info "Renamed external-host-production.yml to external-host-${REMOTE_SERVER_HOSTNAME}.yml"
|
|
fi
|
|
|
|
# Copy and configure Authelia config
|
|
debug_log "Setting up Authelia configuration"
|
|
common_backup "/opt/stacks/core/authelia"
|
|
cp -r "$REPO_DIR/docker-compose/core/authelia" /opt/stacks/core/
|
|
sudo chown -R "$ACTUAL_USER:$ACTUAL_USER" /opt/stacks/core/authelia
|
|
|
|
# Replace all placeholders in Authelia config files
|
|
debug_log "Replacing placeholders in Authelia config files"
|
|
for config_file in $(find /opt/stacks/core/authelia -name "*.yml" -type f); do
|
|
if [[ "$config_file" == *"users_database.yml" ]]; then
|
|
localize_users_database_file "$config_file"
|
|
else
|
|
localize_yml_file "$config_file" true
|
|
fi
|
|
done
|
|
|
|
# Remove invalid session.cookies section from Authelia config (not supported in v4.37.5)
|
|
debug_log "Removing invalid session.cookies section from Authelia config"
|
|
sed -i '/^ cookies:/,/^$/d' /opt/stacks/core/authelia/config/configuration.yml
|
|
|
|
# Ensure proper ownership of Authelia files
|
|
sudo chown -R "$ACTUAL_USER:$ACTUAL_USER" /opt/stacks/core/authelia
|
|
|
|
# Generate shared CA for multi-server TLS
|
|
debug_log "Generating shared CA"
|
|
log_info "Generating shared CA certificate for multi-server TLS..."
|
|
generate_shared_ca
|
|
|
|
# Deploy core stack
|
|
debug_log "Deploying core stack with docker compose"
|
|
cd /opt/stacks/core
|
|
if run_cmd --quiet docker compose up -d; then
|
|
echo "Success"
|
|
fi
|
|
|
|
# Deploy Sablier stack for lazy loading
|
|
echo -n "║ Deploying Sablier... "
|
|
deploy_sablier_stack
|
|
}
|
|
|
|
# Deploy infrastructure stack function
|
|
deploy_infrastructure() {
|
|
echo -n "║ Deploying Infrastructure Stack... "
|
|
log_info " - Pi-hole (DNS Ad Blocker)"
|
|
log_info " - Watchtower (Container Updates)"
|
|
log_info " - Dozzle (Log Viewer)"
|
|
log_info " - Glances (System Monitor)"
|
|
log_info " - Docker Proxy (Security)"
|
|
|
|
# Backup existing files if they exist
|
|
if [ -f /opt/stacks/infrastructure/docker-compose.yml ]; then
|
|
cp /opt/stacks/infrastructure/docker-compose.yml /opt/stacks/infrastructure/docker-compose.yml.backup.$(date +%Y%m%d_%H%M%S)
|
|
fi
|
|
|
|
# Copy infrastructure stack
|
|
cp "$REPO_DIR/docker-compose/infrastructure/docker-compose.yml" /opt/stacks/infrastructure/docker-compose.yml
|
|
sudo chown "$ACTUAL_USER:$ACTUAL_USER" /opt/stacks/infrastructure/docker-compose.yml
|
|
|
|
# Process .env file from .env.example
|
|
process_stack_env "/opt/stacks/infrastructure" "$REPO_DIR/docker-compose/infrastructure"
|
|
sudo chown "$ACTUAL_USER:$ACTUAL_USER" /opt/stacks/infrastructure/.env
|
|
|
|
# Replace placeholders in infrastructure compose file (labels and x-dockge only)
|
|
localize_yml_file "/opt/stacks/infrastructure/docker-compose.yml"
|
|
|
|
# Copy any additional config directories
|
|
for config_dir in "$REPO_DIR/docker-compose/infrastructure"/*/; do
|
|
if [ -d "$config_dir" ] && [ "$(basename "$config_dir")" != "." ]; then
|
|
cp -r "$config_dir" /opt/stacks/infrastructure/
|
|
fi
|
|
done
|
|
|
|
# If core is not deployed, remove Authelia middleware references
|
|
if [ "$DEPLOY_CORE" = false ]; then
|
|
log_info "Core infrastructure not deployed - removing Authelia middleware references..."
|
|
sed -i '/middlewares=authelia@docker/d' /opt/stacks/infrastructure/docker-compose.yml
|
|
fi
|
|
|
|
# Replace placeholders in infrastructure compose file
|
|
localize_yml_file "/opt/stacks/infrastructure/docker-compose.yml"
|
|
|
|
# Deploy infrastructure stack
|
|
cd /opt/stacks/infrastructure
|
|
if run_cmd --quiet docker compose up -d; then
|
|
echo "Success"
|
|
fi
|
|
}
|
|
|
|
# Deploy dashboards stack function
|
|
deploy_dashboards() {
|
|
echo -n "║ Deploying Dashboard Stack... "
|
|
log_info " - Homepage (Application Dashboard)"
|
|
log_info " - Homarr (Modern Dashboard)"
|
|
|
|
# Create dashboards directory
|
|
sudo mkdir -p /opt/stacks/dashboards
|
|
|
|
# Backup existing files if they exist
|
|
common_backup "/opt/stacks/dashboards/docker-compose.yml"
|
|
|
|
# Copy dashboards compose file
|
|
cp "$REPO_DIR/docker-compose/dashboards/docker-compose.yml" /opt/stacks/dashboards/docker-compose.yml
|
|
sudo chown "$ACTUAL_USER:$ACTUAL_USER" /opt/stacks/dashboards/docker-compose.yml
|
|
|
|
# Process .env file from .env.example
|
|
process_stack_env "/opt/stacks/dashboards" "$REPO_DIR/docker-compose/dashboards"
|
|
sudo chown "$ACTUAL_USER:$ACTUAL_USER" /opt/stacks/dashboards/.env
|
|
|
|
# Replace placeholders in dashboards compose file (labels and x-dockge only)
|
|
localize_yml_file "/opt/stacks/dashboards/docker-compose.yml"
|
|
|
|
# Copy homepage config
|
|
if [ -d "$REPO_DIR/docker-compose/dashboards/homepage" ]; then
|
|
cp -r "$REPO_DIR/docker-compose/dashboards/homepage" /opt/stacks/dashboards/
|
|
sudo chown -R "$ACTUAL_USER:$ACTUAL_USER" /opt/stacks/dashboards/homepage
|
|
|
|
# Replace placeholders in homepage config files
|
|
find /opt/stacks/dashboards/homepage -name "*.yaml" -type f | while read -r config_file; do
|
|
localize_yml_file "$config_file"
|
|
done
|
|
|
|
# Remove remote server entries from homepage services for single-server setup
|
|
if [ -z "${REMOTE_SERVER_HOSTNAME:-}" ]; then
|
|
sed -i '/\${REMOTE_SERVER_HOSTNAME}/d' /opt/stacks/dashboards/homepage/services.yaml
|
|
log_info "Single-server setup - removed remote server entries from homepage services"
|
|
fi
|
|
|
|
# Process template files and rename them
|
|
find /opt/stacks/dashboards/homepage -name "*.template" -type f | while read -r template_file; do
|
|
localize_yml_file "$template_file"
|
|
# Rename template file to remove .template extension
|
|
new_file="${template_file%.template}"
|
|
mv "$template_file" "$new_file"
|
|
log_info "Processed and renamed $template_file to $new_file"
|
|
done
|
|
fi
|
|
|
|
# Replace placeholders in dashboards compose file
|
|
localize_yml_file "/opt/stacks/dashboards/docker-compose.yml"
|
|
|
|
# Deploy dashboards stack
|
|
cd /opt/stacks/dashboards
|
|
if run_cmd --quiet docker compose up -d; then
|
|
echo "Success"
|
|
fi
|
|
}
|
|
|
|
deploy_arcane() {
|
|
echo -n "║ Deploying Arcane... "
|
|
log_info " - Arcane (Docker Management UI)"
|
|
|
|
# Backup existing files if they exist
|
|
common_backup "/opt/arcane/docker-compose.yml"
|
|
|
|
# Copy arcane compose file
|
|
sudo cp "$REPO_DIR/docker-compose/arcane/docker-compose.yml" /opt/arcane/docker-compose.yml
|
|
sudo chown "$ACTUAL_USER:$ACTUAL_USER" /opt/arcane/docker-compose.yml
|
|
|
|
# Process .env file from .env.example
|
|
process_stack_env "/opt/arcane" "$REPO_DIR/docker-compose/arcane"
|
|
sudo chown "$ACTUAL_USER:$ACTUAL_USER" /opt/arcane/.env
|
|
|
|
# Replace placeholders in arcane compose file (labels and x-dockge only)
|
|
localize_yml_file "/opt/arcane/docker-compose.yml"
|
|
|
|
# Deploy arcane stack
|
|
cd /opt/arcane
|
|
if run_cmd --quiet docker compose up -d; then
|
|
echo "Success"
|
|
fi
|
|
}
|
|
|
|
#═══════════════════════════════════════════════════════════
|
|
# SECTION 9: CORE DEPLOYMENT ORCHESTRATION
|
|
#═══════════════════════════════════════════════════════════
|
|
|
|
# Deployment function
|
|
perform_deployment() {
|
|
debug_log "perform_deployment() called with DEPLOY_CORE=$DEPLOY_CORE, DEPLOY_INFRASTRUCTURE=$DEPLOY_INFRASTRUCTURE, DEPLOY_DASHBOARDS=$DEPLOY_DASHBOARDS, SETUP_STACKS=$SETUP_STACKS"
|
|
log_info "Starting deployment..."
|
|
|
|
# Initialize missing vars summary
|
|
GLOBAL_MISSING_VARS=""
|
|
TLS_ISSUES_SUMMARY=""
|
|
|
|
# Switch back to regular user if we were running as root
|
|
if [ "$EUID" -eq 0 ]; then
|
|
ACTUAL_USER=${SUDO_USER:-$USER}
|
|
debug_log "Running as root, switching to user $ACTUAL_USER"
|
|
log_info "Switching to user $ACTUAL_USER for deployment..."
|
|
exec sudo -u "$ACTUAL_USER" "$0" "$@"
|
|
fi
|
|
|
|
# Source the .env file safely
|
|
debug_log "Sourcing .env file from $REPO_DIR/.env"
|
|
load_env_file_safely "$REPO_DIR/.env"
|
|
debug_log "Environment loaded, DOMAIN=$DOMAIN, SERVER_IP=$SERVER_IP"
|
|
|
|
# Generate Authelia password hash if needed
|
|
if [ "$AUTHELIA_ADMIN_PASSWORD_HASH" = "generate-with-openssl-rand-hex-64" ] || [ -z "$AUTHELIA_ADMIN_PASSWORD_HASH" ]; then
|
|
log_info "Generating Authelia password hash..."
|
|
if ! docker images | grep -q authelia/authelia; then
|
|
docker pull authelia/authelia:latest > /dev/null 2>&1
|
|
fi
|
|
AUTHELIA_ADMIN_PASSWORD_HASH=$(docker run --rm authelia/authelia:latest authelia crypto hash generate argon2 --password "$DEFAULT_PASSWORD" 2>&1 | awk '/\$argon2id/ {print $NF}')
|
|
if [ -z "$AUTHELIA_ADMIN_PASSWORD_HASH" ]; then
|
|
log_error "Failed to generate Authelia password hash."
|
|
exit 1
|
|
fi
|
|
# Save it back to .env
|
|
sed -i "s%AUTHELIA_ADMIN_PASSWORD_HASH=.*%AUTHELIA_ADMIN_PASSWORD_HASH=\"$AUTHELIA_ADMIN_PASSWORD_HASH\"%" "$REPO_DIR/.env"
|
|
log_success "Authelia password hash generated and saved"
|
|
fi
|
|
|
|
# Reload .env to get updated secrets
|
|
load_env_file_safely "$REPO_DIR/.env"
|
|
|
|
# Step 1: Create required directories
|
|
log_info "Step 1: Creating required directories..."
|
|
sudo mkdir -p /opt/stacks/core || { log_error "Failed to create /opt/stacks/core"; exit 1; }
|
|
sudo mkdir -p /opt/stacks/infrastructure || { log_error "Failed to create /opt/stacks/infrastructure"; exit 1; }
|
|
sudo mkdir -p /opt/stacks/dashboards || { log_error "Failed to create /opt/stacks/dashboards"; exit 1; }
|
|
sudo mkdir -p /opt/dockge || { log_error "Failed to create /opt/dockge"; exit 1; }
|
|
sudo mkdir -p /opt/arcane || { log_error "Failed to create /opt/arcane"; exit 1; }
|
|
sudo chown -R "$ACTUAL_USER:$ACTUAL_USER" /opt/stacks
|
|
sudo chown -R "$ACTUAL_USER:$ACTUAL_USER" /opt/dockge
|
|
sudo chown -R "$ACTUAL_USER:$ACTUAL_USER" /opt/arcane
|
|
log_success "Directories created"
|
|
|
|
# Generate .env.global file for all stacks (without comments and blank lines)
|
|
log_info "Generating .env.global for all stacks..."
|
|
generate_env_global "$REPO_DIR/.env" "/opt/stacks/.env.global"
|
|
sudo chown "$ACTUAL_USER:$ACTUAL_USER" /opt/stacks/.env.global
|
|
log_success ".env.global created"
|
|
|
|
# Step 2: Setup multi-server TLS if needed
|
|
if [ "$DEPLOY_CORE" = false ]; then
|
|
setup_multi_server_tls
|
|
fi
|
|
|
|
# Step 3: Create Docker networks (if they don't exist)
|
|
echo "║"
|
|
echo "║ Creating Docker networks..."
|
|
|
|
docker network create homelab-network >/dev/null 2>&1 && echo "║ homelab-network created" || echo "║ homelab-network exists"
|
|
docker network create traefik-network >/dev/null 2>&1 && echo "║ traefik-network created" || echo "║ traefik-network exists"
|
|
docker network create media-network >/dev/null 2>&1 && echo "║ media-network created" || echo "║ media-network exists"
|
|
echo "║"
|
|
|
|
# Step 4: Deploy Dockge (always deployed)
|
|
deploy_dockge
|
|
|
|
deploy_arcane
|
|
|
|
# Deploy core stack
|
|
if [ "$DEPLOY_CORE" = true ]; then
|
|
deploy_core
|
|
fi
|
|
|
|
# Deploy infrastructure stack
|
|
if [ "$DEPLOY_INFRASTRUCTURE" = true ]; then
|
|
step_num=$([ "$DEPLOY_CORE" = true ] && echo "6" || echo "5")
|
|
deploy_infrastructure
|
|
fi
|
|
|
|
# Deploy dashboard stack
|
|
if [ "$DEPLOY_DASHBOARDS" = true ]; then
|
|
if [ "$DEPLOY_CORE" = true ] && [ "$DEPLOY_INFRASTRUCTURE" = true ]; then
|
|
step_num=7
|
|
elif [ "$DEPLOY_CORE" = true ] || [ "$DEPLOY_INFRASTRUCTURE" = true ]; then
|
|
step_num=6
|
|
else
|
|
step_num=5
|
|
fi
|
|
deploy_dashboards
|
|
fi
|
|
|
|
# Setup stacks for Dockge
|
|
if [ "$SETUP_STACKS" = true ]; then
|
|
setup_stacks_for_dockge
|
|
fi
|
|
|
|
# Report any missing variables
|
|
if [ -n "$GLOBAL_MISSING_VARS" ]; then
|
|
log_warning "The following environment variables were missing and may cause issues:"
|
|
echo "$GLOBAL_MISSING_VARS"
|
|
log_info "Please update your .env file and redeploy affected stacks."
|
|
fi
|
|
|
|
# TLS issues will be reported in the final summary
|
|
}
|
|
|
|
# Setup Docker TLS function
|
|
setup_docker_tls() {
|
|
local TLS_DIR="/home/$ACTUAL_USER/EZ-Homelab/docker-tls"
|
|
|
|
# Check if TLS is already configured
|
|
if [ -f "/etc/docker/daemon.json" ] && grep -q '"tls": true' /etc/docker/daemon.json 2>/dev/null; then
|
|
if systemctl cat docker.service | grep -q 'tcp://0.0.0.0:2376'; then
|
|
log_info "Docker TLS already configured, skipping..."
|
|
return 0
|
|
fi
|
|
fi
|
|
|
|
# Create TLS directory
|
|
sudo mkdir -p "$TLS_DIR"
|
|
sudo chown "$ACTUAL_USER:$ACTUAL_USER" "$TLS_DIR"
|
|
|
|
# Use shared CA if available, otherwise generate local CA
|
|
if [ -f "/opt/stacks/core/shared-ca/ca.pem" ] && [ -f "/opt/stacks/core/shared-ca/ca-key.pem" ]; then
|
|
log_info "Using shared CA certificate for Docker TLS..."
|
|
cp "/opt/stacks/core/shared-ca/ca.pem" "$TLS_DIR/ca.pem"
|
|
cp "/opt/stacks/core/shared-ca/ca-key.pem" "$TLS_DIR/ca-key.pem"
|
|
else
|
|
log_info "Generating local CA certificate for Docker TLS..."
|
|
# Generate CA
|
|
openssl genrsa -out "$TLS_DIR/ca-key.pem" 4096
|
|
openssl req -new -x509 -days 365 -key "$TLS_DIR/ca-key.pem" -sha256 -out "$TLS_DIR/ca.pem" -subj "/C=US/ST=State/L=City/O=Organization/CN=Docker-CA"
|
|
fi
|
|
|
|
# Generate server key and cert
|
|
openssl genrsa -out "$TLS_DIR/server-key.pem" 4096
|
|
openssl req -subj "/CN=$SERVER_IP" -new -key "$TLS_DIR/server-key.pem" -out "$TLS_DIR/server.csr"
|
|
echo "subjectAltName = DNS:$SERVER_IP,IP:$SERVER_IP,IP:127.0.0.1" > "$TLS_DIR/extfile.cnf"
|
|
openssl x509 -req -days 365 -in "$TLS_DIR/server.csr" -CA "$TLS_DIR/ca.pem" -CAkey "$TLS_DIR/ca-key.pem" -CAcreateserial -out "$TLS_DIR/server-cert.pem" -extfile "$TLS_DIR/extfile.cnf"
|
|
|
|
# Generate client key and cert
|
|
openssl genrsa -out "$TLS_DIR/client-key.pem" 4096
|
|
openssl req -subj "/CN=client" -new -key "$TLS_DIR/client-key.pem" -out "$TLS_DIR/client.csr"
|
|
openssl x509 -req -days 365 -in "$TLS_DIR/client.csr" -CA "$TLS_DIR/ca.pem" -CAkey "$TLS_DIR/ca-key.pem" -CAcreateserial -out "$TLS_DIR/client-cert.pem"
|
|
|
|
# Configure Docker daemon
|
|
sudo tee /etc/docker/daemon.json > /dev/null <<EOF
|
|
{
|
|
"tls": true,
|
|
"tlsverify": true,
|
|
"tlscacert": "$TLS_DIR/ca.pem",
|
|
"tlscert": "$TLS_DIR/server-cert.pem",
|
|
"tlskey": "$TLS_DIR/server-key.pem"
|
|
}
|
|
EOF
|
|
|
|
# Update systemd service only if not already configured (idempotent)
|
|
if ! systemctl cat docker.service | grep -q 'tcp://0.0.0.0:2376'; then
|
|
log_info "Adding TCP socket to Docker service..."
|
|
sudo sed -i 's|^ExecStart=/usr/bin/dockerd -H fd://|ExecStart=/usr/bin/dockerd -H fd:// -H tcp://0.0.0.0:2376|' /lib/systemd/system/docker.service
|
|
else
|
|
# Clean up any duplicate TCP socket entries
|
|
if systemctl cat docker.service | grep -c 'tcp://0.0.0.0:2376' | grep -q '^[2-9]'; then
|
|
log_warning "Found duplicate TCP socket entries, cleaning up..."
|
|
# Extract the current ExecStart line and remove duplicates
|
|
local exec_start=$(systemctl cat docker.service | grep '^ExecStart=' | head -1)
|
|
local cleaned_exec=$(echo "$exec_start" | sed 's|-H tcp://0.0.0.0:2376||g')
|
|
cleaned_exec="${cleaned_exec} -H tcp://0.0.0.0:2376"
|
|
sudo sed -i "s|^ExecStart=.*|${cleaned_exec}|" /lib/systemd/system/docker.service
|
|
fi
|
|
fi
|
|
|
|
# Reload and restart Docker
|
|
sudo systemctl daemon-reload
|
|
sudo systemctl restart docker
|
|
|
|
# Wait for Docker to be ready
|
|
sleep 3
|
|
if ! docker ps &>/dev/null; then
|
|
log_error "Docker failed to start after TLS configuration"
|
|
return 1
|
|
fi
|
|
|
|
log_success "Docker TLS configured on port 2376"
|
|
}
|
|
setup_stacks_for_dockge() {
|
|
log_info "Setting up all stacks for Dockge..."
|
|
echo -n "║ Copy & configure all stacks... "
|
|
|
|
# List of stacks to setup
|
|
STACKS=("vpn" "media" "media-management" "transcoders" "monitoring" "productivity" "wikis" "utilities" "alternatives" "homeassistant")
|
|
|
|
for stack in "${STACKS[@]}"; do
|
|
STACK_DIR="/opt/stacks/$stack"
|
|
REPO_STACK_DIR="$REPO_DIR/docker-compose/$stack"
|
|
|
|
if [ -d "$REPO_STACK_DIR" ]; then
|
|
mkdir -p "$STACK_DIR"
|
|
if [ -f "$REPO_STACK_DIR/docker-compose.yml" ]; then
|
|
cp "$REPO_STACK_DIR/docker-compose.yml" "$STACK_DIR/docker-compose.yml"
|
|
cp "$REPO_DIR/.env" "$STACK_DIR/.env"
|
|
sudo chown "$ACTUAL_USER:$ACTUAL_USER" "$STACK_DIR/docker-compose.yml"
|
|
sudo chown "$ACTUAL_USER:$ACTUAL_USER" "$STACK_DIR/.env"
|
|
|
|
# Remove sensitive/unnecessary variables from stack .env
|
|
sed -i '/^AUTHELIA_ADMIN_PASSWORD_HASH=/d' "$STACK_DIR/.env"
|
|
sed -i '/^AUTHELIA_JWT_SECRET=/d' "$STACK_DIR/.env"
|
|
sed -i '/^AUTHELIA_SESSION_SECRET=/d' "$STACK_DIR/.env"
|
|
sed -i '/^AUTHELIA_STORAGE_ENCRYPTION_KEY=/d' "$STACK_DIR/.env"
|
|
sed -i '/^SURFSHARK_/d' "$STACK_DIR/.env"
|
|
sed -i '/^SMTP_/d' "$STACK_DIR/.env"
|
|
sed -i '/^REMOTE_SERVER_/d' "$STACK_DIR/.env"
|
|
sed -i '/^PIHOLE_/d' "$STACK_DIR/.env"
|
|
sed -i '/^WATCHTOWER_/d' "$STACK_DIR/.env"
|
|
sed -i '/^QBITTORRENT_/d' "$STACK_DIR/.env"
|
|
sed -i '/^GRAFANA_/d' "$STACK_DIR/.env"
|
|
sed -i '/^CODE_SERVER_/d' "$STACK_DIR/.env"
|
|
sed -i '/^JUPYTER_/d' "$STACK_DIR/.env"
|
|
sed -i '/^POSTGRES_/d' "$STACK_DIR/.env"
|
|
sed -i '/^PGADMIN_/d' "$STACK_DIR/.env"
|
|
sed -i '/^NEXTCLOUD_/d' "$STACK_DIR/.env"
|
|
sed -i '/^GITEA_/d' "$STACK_DIR/.env"
|
|
sed -i '/^WORDPRESS_/d' "$STACK_DIR/.env"
|
|
sed -i '/^BOOKSTACK_/d' "$STACK_DIR/.env"
|
|
sed -i '/^MEDIAWIKI_/d' "$STACK_DIR/.env"
|
|
sed -i '/^BITWARDEN_/d' "$STACK_DIR/.env"
|
|
sed -i '/^FORMIO_/d' "$STACK_DIR/.env"
|
|
sed -i '/^HOMEPAGE_VAR_/d' "$STACK_DIR/.env"
|
|
|
|
# Replace placeholders in the compose file
|
|
localize_yml_file "$STACK_DIR/docker-compose.yml"
|
|
|
|
# Copy any additional config directories
|
|
for config_dir in "$REPO_STACK_DIR"/*/; do
|
|
if [ -d "$config_dir" ] && [ "$(basename "$config_dir")" != "." ]; then
|
|
cp -r "$config_dir" "$STACK_DIR/"
|
|
sudo chown -R "$ACTUAL_USER:$ACTUAL_USER" "$STACK_DIR/$(basename "$config_dir")"
|
|
|
|
# Replace placeholders in config files
|
|
find "$STACK_DIR/$(basename "$config_dir")" -name "*.yml" -o -name "*.yaml" | while read -r config_file; do
|
|
localize_yml_file "$config_file"
|
|
done
|
|
fi
|
|
done
|
|
|
|
log_success "Prepared $stack stack for Dockge"
|
|
|
|
else
|
|
log_warning "$stack stack docker-compose.yml not found, skipping..."
|
|
fi
|
|
else
|
|
log_warning "$stack stack directory not found in repo, skipping..."
|
|
fi
|
|
done
|
|
|
|
log_success "All stacks prepared for Dockge deployment"
|
|
echo "Success"
|
|
echo "║"
|
|
}
|
|
|
|
# Main menu
|
|
show_main_menu() {
|
|
echo ""
|
|
echo "╔═════════════════════════════════════════════════════════════╗"
|
|
echo "║ EZ-HOMELAB SETUP & DEPLOYMENT ║"
|
|
echo "║ ║"
|
|
echo "║ 1) Install Prerequisites ║"
|
|
echo "║ 2) Deploy Core Server ║"
|
|
echo "║ 3) Deploy Additional Server ║"
|
|
echo "║ 4) Install NVIDIA Drivers ║"
|
|
echo "║ ║"
|
|
echo "║ q) Quit ║"
|
|
echo "║"
|
|
|
|
}
|
|
|
|
# =============================================
|
|
# MULTI-SERVER DEPLOYMENT FUNCTIONS
|
|
# =============================================
|
|
|
|
# Clean up orphaned processes (important for resource-constrained servers)
|
|
cleanup_orphaned_processes() {
|
|
debug_log "Cleaning up orphaned processes"
|
|
|
|
# Kill zombie processes by killing their parent if possible
|
|
local zombies=$(ps aux | awk '$8 ~ /Z/ {print $2}')
|
|
if [ -n "$zombies" ]; then
|
|
log_warning "Found zombie processes, attempting cleanup..."
|
|
for zombie_pid in $zombies; do
|
|
local parent_pid=$(ps -o ppid= -p $zombie_pid 2>/dev/null | xargs)
|
|
if [ -n "$parent_pid" ] && [ "$parent_pid" != "1" ]; then
|
|
debug_log "Killing parent process $parent_pid to clean up zombie $zombie_pid"
|
|
sudo kill -SIGCHLD $parent_pid 2>/dev/null || true
|
|
fi
|
|
done
|
|
fi
|
|
|
|
# Kill any stuck docker compose logs processes older than 1 hour
|
|
local old_compose_logs=$(ps aux | grep 'docker compose logs' | grep -v grep | awk '$10 ~ /[0-9]+:[0-9]+:[0-9]+/ && $10 !~ /00:0[0-5]/ {print $2}')
|
|
if [ -n "$old_compose_logs" ]; then
|
|
log_warning "Found long-running docker compose logs processes, cleaning up..."
|
|
for pid in $old_compose_logs; do
|
|
debug_log "Killing docker compose logs process $pid"
|
|
sudo kill -9 $pid 2>/dev/null || true
|
|
done
|
|
fi
|
|
|
|
log_success "Process cleanup complete"
|
|
}
|
|
|
|
# Check system resources (important for resource-constrained servers)
|
|
check_system_resources() {
|
|
debug_log "Checking system resources"
|
|
|
|
# Check available memory
|
|
local mem_available=$(free -m | awk '/^Mem:/ {print $7}')
|
|
local mem_total=$(free -m | awk '/^Mem:/ {print $2}')
|
|
local mem_percent=$((mem_available * 100 / mem_total))
|
|
|
|
if [ $mem_percent -lt 20 ]; then
|
|
log_warning "Low memory available: ${mem_available}MB of ${mem_total}MB (${mem_percent}%)"
|
|
log_info "Consider closing other applications before deployment"
|
|
else
|
|
log_success "Memory check passed: ${mem_available}MB available (${mem_percent}%)"
|
|
fi
|
|
|
|
# Check disk space
|
|
local disk_available=$(df -m / | awk 'NR==2 {print $4}')
|
|
local disk_total=$(df -m / | awk 'NR==2 {print $2}')
|
|
local disk_percent=$((disk_available * 100 / disk_total))
|
|
|
|
if [ $disk_percent -lt 10 ]; then
|
|
log_error "Critical: Low disk space available: ${disk_available}MB of ${disk_total}MB (${disk_percent}%)"
|
|
log_error "Deployment may fail. Please free up disk space."
|
|
return 1
|
|
elif [ $disk_percent -lt 20 ]; then
|
|
log_warning "Low disk space: ${disk_available}MB of ${disk_total}MB (${disk_percent}%)"
|
|
else
|
|
log_success "Disk space check passed: ${disk_available}MB available (${disk_percent}%)"
|
|
fi
|
|
|
|
return 0
|
|
}
|
|
|
|
# Check if Docker is installed and accessible
|
|
check_docker_installed() {
|
|
debug_log "Checking if Docker is installed"
|
|
|
|
if ! command -v docker &> /dev/null; then
|
|
log_error "Docker is not installed on this system"
|
|
log_info "Please run Option 1 (Install Prerequisites) first"
|
|
return 1
|
|
fi
|
|
|
|
if ! docker ps &> /dev/null; then
|
|
log_error "Docker is installed but not accessible"
|
|
log_info "Current user may not be in docker group. Try logging out and back in."
|
|
return 1
|
|
fi
|
|
|
|
debug_log "Docker is installed and accessible"
|
|
return 0
|
|
}
|
|
|
|
# Set required variables based on deployment type
|
|
set_required_vars_for_deployment() {
|
|
local deployment_type="$1"
|
|
debug_log "Setting required vars for deployment type: $deployment_type"
|
|
|
|
case "$deployment_type" in
|
|
"core")
|
|
REQUIRED_VARS=("SERVER_IP" "SERVER_HOSTNAME" "DUCKDNS_SUBDOMAINS" "DUCKDNS_TOKEN" "DOMAIN" "DEFAULT_USER" "DEFAULT_PASSWORD" "DEFAULT_EMAIL")
|
|
debug_log "Set REQUIRED_VARS for core deployment"
|
|
;;
|
|
"remote")
|
|
REQUIRED_VARS=("SERVER_IP" "SERVER_HOSTNAME" "DOMAIN" "DEFAULT_USER" "CORE_SERVER_IP" "CORE_SERVER_HOSTNAME" "CORE_SERVER_USER" "CORE_SERVER_PASSWORD")
|
|
debug_log "Set REQUIRED_VARS for remote deployment"
|
|
;;
|
|
*)
|
|
log_error "Unknown deployment type: $deployment_type"
|
|
return 1
|
|
;;
|
|
esac
|
|
}
|
|
|
|
#═══════════════════════════════════════════════════════════
|
|
# SECTION 10: REMOTE SERVER DEPLOYMENT
|
|
#═══════════════════════════════════════════════════════════
|
|
|
|
# Deploy remote server
|
|
deploy_remote_server() {
|
|
log_info "Deploying Remote Server Configuration"
|
|
|
|
# Set ACTUAL_USER if not already set (needed for SSH key paths)
|
|
if [ -z "$ACTUAL_USER" ]; then
|
|
if [ "$EUID" -eq 0 ]; then
|
|
ACTUAL_USER=${SUDO_USER:-$USER}
|
|
else
|
|
ACTUAL_USER=$USER
|
|
fi
|
|
export ACTUAL_USER
|
|
debug_log "Set ACTUAL_USER=$ACTUAL_USER"
|
|
fi
|
|
|
|
# Clean up any orphaned processes before starting (important for resource-constrained servers)
|
|
cleanup_orphaned_processes
|
|
|
|
# Check system resources
|
|
check_system_resources
|
|
|
|
# Check Docker is installed
|
|
if ! check_docker_installed; then
|
|
log_error "Docker must be installed before deploying remote server"
|
|
return 1
|
|
fi
|
|
|
|
# Ensure we have core server information
|
|
if [ -z "$CORE_SERVER_IP" ] || [ -z "$CORE_SERVER_HOSTNAME" ]; then
|
|
log_error "Core server IP and hostname are required"
|
|
return 1
|
|
fi
|
|
|
|
# Step 1: Setup SSH key authentication to core server
|
|
log_info "Step 1: Setting up SSH key authentication to core server..."
|
|
log_info "Using ACTUAL_USER=$ACTUAL_USER for SSH key path"
|
|
log_info "Target: ${CORE_SERVER_USER}@${CORE_SERVER_IP}"
|
|
|
|
# Retry loop for SSH setup
|
|
while true; do
|
|
if setup_ssh_key_to_core; then
|
|
log_success "SSH key authentication setup complete"
|
|
# echo ""
|
|
break
|
|
else
|
|
echo "║"
|
|
echo "║ ✗ SSH key authentication setup failed"
|
|
echo "║"
|
|
echo "║ 1) Retry 2) Skip 3) Main Menu"
|
|
echo -n "╚═════════════════════════════════════════════ "
|
|
read -p "Choose : " ssh_retry_choice
|
|
|
|
case $ssh_retry_choice in
|
|
1)
|
|
# echo ""
|
|
log_info "Retrying SSH setup..."
|
|
continue
|
|
;;
|
|
2)
|
|
echo "║"
|
|
echo "║ Skipping SSH setup - Manual configuration required:"
|
|
echo "║"
|
|
echo "║ 1. Generate SSH key:"
|
|
echo "║ ssh-keygen -t ed25519 -f ~/.ssh/ez-homelab-${SERVER_HOSTNAME}"
|
|
echo "║"
|
|
echo "║ 2. Copy to core server:"
|
|
echo "║ ssh-copy-id -i ~/.ssh/ez-homelab-${SERVER_HOSTNAME}.pub ${CORE_SERVER_USER}@${CORE_SERVER_IP}"
|
|
echo "║"
|
|
echo "║ 3. Test connection:"
|
|
echo "║ ssh -i ~/.ssh/ez-homelab-${SERVER_HOSTNAME} ${CORE_SERVER_USER}@${CORE_SERVER_IP}"
|
|
echo "║"
|
|
echo -n "╚═════════════════════════════════════════════ "
|
|
read -p "Press Enter to continue... "
|
|
# Set a minimal SSH_KEY_PATH for functions that need it
|
|
export SSH_KEY_PATH="/home/$ACTUAL_USER/.ssh/ez-homelab-${SERVER_HOSTNAME}"
|
|
break
|
|
;;
|
|
3)
|
|
log_info "Returning to main menu..."
|
|
return 1
|
|
;;
|
|
*)
|
|
log_error "Invalid option. Please choose 1, 2, or 3."
|
|
;;
|
|
esac
|
|
fi
|
|
done
|
|
|
|
# Step 2: Create required directories
|
|
echo "║"
|
|
echo "║ Creating required directories..."
|
|
common_create_directories "/opt/stacks" "/opt/dockge" "/opt/arcane" >/dev/null 2>&1
|
|
echo "║ /opt/stacks created"
|
|
echo "║ /opt/dockge created"
|
|
echo "║ /opt/arcane created"
|
|
|
|
# Step 3: Create required Docker networks
|
|
echo "║"
|
|
echo "║ Creating Docker networks..."
|
|
common_create_networks "homelab-network" "traefik-network" >/dev/null 2>&1
|
|
echo "║ homelab-network created"
|
|
echo "║ traefik-network created"
|
|
|
|
# Step 4: Install envsubst if not present
|
|
if ! command -v envsubst &> /dev/null; then
|
|
echo "║"
|
|
echo -n "║ Installing envsubst... "
|
|
sudo apt-get update -qq && sudo apt-get install -y gettext-base >/dev/null 2>&1
|
|
echo "Success"
|
|
fi
|
|
|
|
# Step 5: Copy all stacks to remote server
|
|
echo "║"
|
|
echo -n "║ Copying stacks to remote server... "
|
|
copy_all_stacks_for_remote >/dev/null 2>&1
|
|
echo "Success"
|
|
|
|
# Step 6: Configure services for additional server (remove Traefik labels)
|
|
echo "║"
|
|
echo -n "║ Configuring services... "
|
|
configure_remote_server_routing >/dev/null 2>&1
|
|
echo "Success"
|
|
|
|
# Step 7: Deploy Dockge
|
|
echo "║"
|
|
deploy_dockge
|
|
|
|
# Step 8: Deploy Arcane
|
|
deploy_arcane
|
|
|
|
# Step 9: Deploy Sablier stack for local lazy loading
|
|
echo -n "║ Deploying Sablier... "
|
|
deploy_sablier_stack
|
|
|
|
# Step 10: Deploy Infrastructure stack
|
|
deploy_infrastructure
|
|
|
|
# Step 11: Register this remote server with core Traefik
|
|
echo "║"
|
|
echo -n "║ Registering with core Traefik... "
|
|
register_remote_server_with_core
|
|
|
|
log_success "Remote server deployment complete!"
|
|
}
|
|
|
|
# Register remote server with core Traefik
|
|
register_remote_server_with_core() {
|
|
debug_log "Registering remote server with core Traefik via SSH"
|
|
|
|
local key_name="id_rsa_${SERVER_HOSTNAME}_to_core"
|
|
local key_path="/home/$ACTUAL_USER/.ssh/$key_name"
|
|
|
|
if [ -z "$CORE_SERVER_IP" ] || [ -z "$CORE_SERVER_USER" ]; then
|
|
log_error "CORE_SERVER_IP and CORE_SERVER_USER are required"
|
|
return 1
|
|
fi
|
|
|
|
# Verify SSH key exists
|
|
if [ ! -f "$key_path" ]; then
|
|
log_error "SSH key not found: $key_path"
|
|
log_error "Please ensure setup_ssh_key_to_core() completed successfully"
|
|
return 1
|
|
fi
|
|
|
|
log_info "Connecting to core server to register this remote server..."
|
|
log_info "Using key: $key_path"
|
|
|
|
# Test SSH connection first
|
|
if ! LC_ALL=C ssh -i "$key_path" -o ConnectTimeout=5 -o StrictHostKeyChecking=no -o BatchMode=yes -o LogLevel=ERROR \
|
|
"${CORE_SERVER_USER}@${CORE_SERVER_IP}" "echo 'test'" 2>&1 | grep -q "test"; then
|
|
log_error "Cannot establish SSH connection to core server"
|
|
log_error "Please verify:"
|
|
echo " 1. SSH key is installed: ssh -i $key_path ${CORE_SERVER_USER}@${CORE_SERVER_IP}"
|
|
echo " 2. Core server is reachable: ping ${CORE_SERVER_IP}"
|
|
echo " 3. SSH service is running on core server"
|
|
return 1
|
|
fi
|
|
|
|
log_success "SSH connection verified"
|
|
|
|
# SSH to core server and run registration function
|
|
log_info "Running registration commands on core server..."
|
|
local ssh_output=$(LC_ALL=C ssh -i "$key_path" -o ConnectTimeout=10 -o StrictHostKeyChecking=no -o LogLevel=ERROR \
|
|
"${CORE_SERVER_USER}@${CORE_SERVER_IP}" bash <<EOF 2>&1
|
|
# Source common.sh to get registration function
|
|
if [ -f ~/EZ-Homelab/scripts/common.sh ]; then
|
|
source ~/EZ-Homelab/scripts/common.sh
|
|
else
|
|
echo "ERROR: common.sh not found"
|
|
exit 1
|
|
fi
|
|
|
|
# Register this remote server
|
|
add_remote_server_to_traefik "${SERVER_IP}" "${SERVER_HOSTNAME}"
|
|
|
|
# Verify files were created
|
|
if [ -f "/opt/stacks/core/traefik/dynamic/${SERVER_HOSTNAME}-server-routes.yml" ]; then
|
|
echo "SUCCESS: server routes file created"
|
|
else
|
|
echo "ERROR: server routes file not created"
|
|
exit 1
|
|
fi
|
|
|
|
if [ -f "/opt/stacks/core/traefik/dynamic/sablier-middleware-${SERVER_HOSTNAME}.yml" ]; then
|
|
echo "SUCCESS: sablier-middleware file created"
|
|
else
|
|
echo "ERROR: sablier-middleware file not created"
|
|
exit 1
|
|
fi
|
|
|
|
# Restart Traefik to reload configs
|
|
cd /opt/stacks/core
|
|
docker compose restart traefik
|
|
|
|
echo "SUCCESS: Registration complete"
|
|
EOF
|
|
)
|
|
|
|
local ssh_exit_code=$?
|
|
|
|
if [ $ssh_exit_code -eq 0 ] && echo "$ssh_output" | grep -q "SUCCESS: Registration complete"; then
|
|
echo "Success"
|
|
echo "║"
|
|
echo "║ Routes created on core server:"
|
|
echo "║ - ${SERVER_HOSTNAME}-server-routes.yml"
|
|
echo "║ - sablier-middleware-${SERVER_HOSTNAME}.yml"
|
|
echo "║"
|
|
return 0
|
|
else
|
|
echo "Failed"
|
|
echo "║"
|
|
echo "║ ✗ Registration failed - SSH output:"
|
|
echo "$ssh_output" | sed 's/^/║ /'
|
|
return 1
|
|
fi
|
|
}
|
|
|
|
# Deploy Sablier stack
|
|
deploy_sablier_stack() {
|
|
debug_log "Deploying Sablier stack"
|
|
|
|
local sablier_dir="/opt/stacks/sablier"
|
|
|
|
# Create sablier stack directory with sudo
|
|
if [ ! -d "$sablier_dir" ]; then
|
|
sudo mkdir -p "$sablier_dir" >/dev/null 2>&1 || { log_error "Failed to create $sablier_dir"; return 1; }
|
|
sudo chown -R "$ACTUAL_USER:$ACTUAL_USER" "$sablier_dir" >/dev/null 2>&1
|
|
fi
|
|
|
|
# Check if source files exist
|
|
if [ ! -f "$REPO_DIR/docker-compose/sablier/docker-compose.yml" ]; then
|
|
log_error "Sablier docker-compose.yml not found in repo at $REPO_DIR/docker-compose/sablier/"
|
|
return 1
|
|
fi
|
|
|
|
# Copy stack files silently
|
|
cp "$REPO_DIR/docker-compose/sablier/docker-compose.yml" "$sablier_dir/" 2>/dev/null || { log_error "Failed to copy docker-compose.yml"; return 1; }
|
|
sudo chown "$ACTUAL_USER:$ACTUAL_USER" "$sablier_dir/docker-compose.yml" 2>/dev/null
|
|
|
|
# Process .env file from .env.example
|
|
process_stack_env "$sablier_dir" "$REPO_DIR/docker-compose/sablier" >/dev/null 2>&1
|
|
sudo chown "$ACTUAL_USER:$ACTUAL_USER" "$sablier_dir/.env" 2>/dev/null
|
|
|
|
# Localize the docker-compose file (labels and x-dockge only)
|
|
localize_yml_file "$sablier_dir/docker-compose.yml" >/dev/null 2>&1
|
|
|
|
# Deploy
|
|
cd "$sablier_dir"
|
|
if run_cmd --quiet docker compose up -d; then
|
|
echo "Success"
|
|
fi
|
|
}
|
|
|
|
# Remove Traefik configuration from additional server services
|
|
# Additional servers don't run local Traefik - routing is handled by core server
|
|
configure_remote_server_routing() {
|
|
debug_log "Removing Traefik labels from additional server services"
|
|
|
|
log_info "Configuring services for additional server (removing Traefik labels)..."
|
|
|
|
# Remove Traefik labels and traefik-network from dockge
|
|
if [ -f "/opt/dockge/docker-compose.yml" ]; then
|
|
# Remove all traefik.* labels (lines containing "- 'traefik.")
|
|
sed -i "/- 'traefik\./d" /opt/dockge/docker-compose.yml 2>/dev/null
|
|
# Remove traefik-network from service networks section (line with "- traefik-network")
|
|
sed -i "/^ - traefik-network$/d" /opt/dockge/docker-compose.yml 2>/dev/null
|
|
# Remove external network definition (traefik-network: and next line)
|
|
sed -i '/^ traefik-network:$/,/^ external: true$/d' /opt/dockge/docker-compose.yml 2>/dev/null
|
|
log_info "✓ Dockge: Traefik labels removed (accessible via port 5001)"
|
|
fi
|
|
|
|
# Remove Traefik labels and traefik-network from infrastructure services
|
|
if [ -f "/opt/stacks/infrastructure/docker-compose.yml" ]; then
|
|
# Remove all traefik.* and sablier.* labels
|
|
sed -i "/- 'traefik\./d" /opt/stacks/infrastructure/docker-compose.yml 2>/dev/null
|
|
sed -i "/- 'sablier\./d" /opt/stacks/infrastructure/docker-compose.yml 2>/dev/null
|
|
# Remove traefik-network from service networks sections
|
|
sed -i "/^ - traefik-network$/d" /opt/stacks/infrastructure/docker-compose.yml 2>/dev/null
|
|
# Remove external network definition
|
|
sed -i '/^ traefik-network:$/,/^ external: true$/d' /opt/stacks/infrastructure/docker-compose.yml 2>/dev/null
|
|
log_info "✓ Infrastructure: Traefik labels removed (accessible via direct ports)"
|
|
fi
|
|
|
|
log_success "Services configured for additional server - routing via core Traefik"
|
|
}
|
|
|
|
# Copy all stacks for remote server (except core)
|
|
copy_all_stacks_for_remote() {
|
|
debug_log "Copying all stacks for remote server"
|
|
|
|
# Create base stacks directory
|
|
sudo mkdir -p /opt/stacks
|
|
sudo mkdir -p /opt/dockge
|
|
sudo chown -R "$ACTUAL_USER:$ACTUAL_USER" /opt/stacks
|
|
sudo chown -R "$ACTUAL_USER:$ACTUAL_USER" /opt/dockge
|
|
|
|
# List of stacks to copy (all except core, dockge, and traefik)
|
|
local stacks=(
|
|
"alternatives"
|
|
"dashboards"
|
|
"homeassistant"
|
|
"infrastructure"
|
|
"media"
|
|
"media-management"
|
|
"monitoring"
|
|
"productivity"
|
|
"sablier"
|
|
"transcoders"
|
|
"utilities"
|
|
"vpn"
|
|
"wikis"
|
|
)
|
|
|
|
local copied_count=0
|
|
for stack in "${stacks[@]}"; do
|
|
local src_dir="$REPO_DIR/docker-compose/$stack"
|
|
local dest_dir="/opt/stacks/$stack"
|
|
|
|
# Skip if source doesn't exist
|
|
if [ ! -d "$src_dir" ]; then
|
|
debug_log "Skipping $stack - source not found"
|
|
continue
|
|
fi
|
|
|
|
# Create destination directory
|
|
mkdir -p "$dest_dir"
|
|
|
|
# Copy docker-compose.yml and any config directories
|
|
if [ -f "$src_dir/docker-compose.yml" ]; then
|
|
cp "$src_dir/docker-compose.yml" "$dest_dir/"
|
|
cp "$REPO_DIR/.env" "$dest_dir/"
|
|
|
|
# Copy any subdirectories (config, etc.)
|
|
for item in "$src_dir"/*; do
|
|
if [ -d "$item" ]; then
|
|
cp -r "$item" "$dest_dir/"
|
|
fi
|
|
done
|
|
|
|
# Clean up sensitive data from .env
|
|
sed -i '/^AUTHELIA_/d' "$dest_dir/.env"
|
|
sed -i '/^DEFAULT_PASSWORD=/d' "$dest_dir/.env"
|
|
sed -i '/^CORE_SERVER_PASSWORD=/d' "$dest_dir/.env"
|
|
|
|
# Localize compose file
|
|
localize_compose_labels "$dest_dir/docker-compose.yml" || true
|
|
|
|
copied_count=$((copied_count + 1))
|
|
debug_log "Copied $stack to $dest_dir"
|
|
fi
|
|
done
|
|
|
|
log_success "Copied $copied_count stacks to /opt/stacks/"
|
|
}
|
|
|
|
# Deploy Traefik stack (standalone for remote servers)
|
|
deploy_traefik_stack() {
|
|
debug_log "Deploying Traefik stack"
|
|
|
|
local traefik_dir="/opt/stacks/traefik"
|
|
|
|
# Create required directories
|
|
mkdir -p "$traefik_dir/config"
|
|
mkdir -p "$traefik_dir/dynamic"
|
|
|
|
# Create placeholder routes.yml file in dynamic directory
|
|
if [ ! -f "$traefik_dir/dynamic/routes.yml" ]; then
|
|
log_info "Creating Traefik dashboard route for remote server..."
|
|
cat > "$traefik_dir/dynamic/routes.yml" <<EOF
|
|
# Traefik Dynamic Routes for Remote Server
|
|
# Auto-generated by EZ-Homelab
|
|
#
|
|
# This file is watched by Traefik and reloaded automatically
|
|
# Add custom routes here if needed
|
|
|
|
http:
|
|
routers:
|
|
traefik-dashboard:
|
|
rule: "Host(\`traefik.${SERVER_HOSTNAME}.${DOMAIN}\`)"
|
|
entryPoints:
|
|
- web
|
|
service: api@internal
|
|
EOF
|
|
log_success "Created routes.yml with dashboard route"
|
|
fi
|
|
|
|
# Verify docker-compose.yml exists
|
|
if [ ! -f "$traefik_dir/docker-compose.yml" ]; then
|
|
log_error "Traefik docker-compose.yml not found at $traefik_dir"
|
|
log_error "This should have been copied by copy_all_stacks_for_remote()"
|
|
return 1
|
|
fi
|
|
|
|
# Deploy
|
|
log_info "Starting Traefik container..."
|
|
cd "$traefik_dir"
|
|
run_cmd --quiet docker compose up -d
|
|
|
|
# Verify container started
|
|
if docker ps | grep -q "traefik"; then
|
|
log_success "Traefik stack deployed and running at $traefik_dir"
|
|
else
|
|
log_warning "Traefik container may not be running, check: docker ps -a | grep traefik"
|
|
fi
|
|
}
|
|
|
|
#═══════════════════════════════════════════════════════════
|
|
# SECTION 11: UI, MENU & HELP FUNCTIONS
|
|
#═══════════════════════════════════════════════════════════
|
|
|
|
# Handle menu selection and set deployment flags
|
|
handle_menu_selection() {
|
|
# Menu selection loop
|
|
while true; do
|
|
# Show main menu
|
|
show_main_menu
|
|
echo -n "╚═════════════════════════════════════════════ "
|
|
read -p "Choose : " MAIN_CHOICE
|
|
|
|
case $MAIN_CHOICE in
|
|
1)
|
|
log_info "Selected: Install Prerequisites"
|
|
FORCE_SYSTEM_SETUP=true
|
|
DEPLOY_CORE=false
|
|
DEPLOY_INFRASTRUCTURE=false
|
|
DEPLOY_DASHBOARDS=false
|
|
SETUP_STACKS=false
|
|
break
|
|
;;
|
|
2)
|
|
log_info "Selected: Deploy Core Server"
|
|
# Check Docker first
|
|
if ! check_docker_installed; then
|
|
echo "║"
|
|
echo "║ ✗ Docker is not installed"
|
|
echo "║ Please run Option 1 (Install Prerequisites) first"
|
|
echo "║"
|
|
echo -n "╚═════════════════════════════════════════════ "
|
|
read -p "Press Enter to return to menu..."
|
|
continue
|
|
fi
|
|
|
|
DEPLOY_CORE=true
|
|
DEPLOY_INFRASTRUCTURE=true
|
|
DEPLOY_DASHBOARDS=true
|
|
SETUP_STACKS=true
|
|
DEPLOY_REMOTE_SERVER=false
|
|
|
|
# Set required variables for core deployment
|
|
set_required_vars_for_deployment "core"
|
|
|
|
break
|
|
;;
|
|
3)
|
|
log_info "Selected: Deploy Additional Server"
|
|
# Check Docker first
|
|
if ! check_docker_installed; then
|
|
echo "║"
|
|
echo "║ ✗ Docker is not installed"
|
|
echo "║ Please run Option 1 (Install Prerequisites) first"
|
|
echo "║"
|
|
echo -n "╚═════════════════════════════════════════════ "
|
|
read -p "Press Enter to return to menu..."
|
|
continue
|
|
fi
|
|
|
|
DEPLOY_CORE=false
|
|
DEPLOY_INFRASTRUCTURE=false
|
|
DEPLOY_DASHBOARDS=false
|
|
SETUP_STACKS=false
|
|
DEPLOY_REMOTE_SERVER=true
|
|
|
|
# Set required variables for remote deployment
|
|
set_required_vars_for_deployment "remote"
|
|
|
|
break
|
|
;;
|
|
4)
|
|
log_info "Selected: Install NVIDIA Drivers"
|
|
INSTALL_NVIDIA=true
|
|
DEPLOY_CORE=false
|
|
DEPLOY_INFRASTRUCTURE=false
|
|
DEPLOY_DASHBOARDS=false
|
|
SETUP_STACKS=false
|
|
break
|
|
;;
|
|
[Qq]|[Qq]uit)
|
|
log_info "Exiting..."
|
|
exit 0
|
|
;;
|
|
*)
|
|
log_warning "Invalid choice '$MAIN_CHOICE'. Please select 1-4 or q to quit."
|
|
sleep 2
|
|
continue
|
|
;;
|
|
esac
|
|
done
|
|
}
|
|
|
|
# Show deployment completion message with warnings
|
|
show_deployment_completion() {
|
|
# Show completion message
|
|
echo "║═════════════════════════════════════════════════════════════"
|
|
echo "║"
|
|
echo "║ Deployment Complete!"
|
|
echo "║"
|
|
echo "║ SSL Certificates may take a few minutes to be issued."
|
|
echo "║"
|
|
echo "║ Dockge https://dockge.${DOMAIN}"
|
|
echo "║ http://${SERVER_IP}:5001"
|
|
echo "║"
|
|
echo "║ Arcane https://arcane.${SERVER_HOSTNAME}.${DOMAIN}"
|
|
echo "║ http://${SERVER_IP}:3552"
|
|
echo "║"
|
|
echo "║ Homepage https://homepage.${DOMAIN}"
|
|
echo "║ http://${SERVER_IP}:3003"
|
|
echo "║"
|
|
|
|
# Show consolidated warnings if any
|
|
if [ -n "$GLOBAL_MISSING_VARS" ] || [ -n "$TLS_ISSUES_SUMMARY" ]; then
|
|
echo "║═════════════════════════════════════════════════════════════╗"
|
|
echo "║ ⚠️ WARNING ⚠️ ║"
|
|
echo "║ The following variables were not defined ║"
|
|
echo "║ If something isn't working as expected check these first ║"
|
|
echo "║ ║"
|
|
|
|
if [ -n "$GLOBAL_MISSING_VARS" ]; then
|
|
log_warning "Missing Environment Variables:"
|
|
echo "$GLOBAL_MISSING_VARS"
|
|
echo "║ ║"
|
|
fi
|
|
|
|
if [ -n "$TLS_ISSUES_SUMMARY" ]; then
|
|
log_warning "TLS Configuration Issues:"
|
|
echo "$TLS_ISSUES_SUMMARY"
|
|
echo "║ ║"
|
|
fi
|
|
fi
|
|
|
|
echo "║═════════════════════════════════════════════════════════════"
|
|
echo "║ RESOURCES"
|
|
echo "║"
|
|
echo "║ https://github.com/kelinfoxy/EZ-Homelab/blob/main/docs/Arcane-Configuration-Guide.md"
|
|
echo "║"
|
|
echo "║ Documentation: ~/EZ-Homelab/docs"
|
|
echo "║"
|
|
echo "║ Repository: https://github.com/kelinfoxy/EZ-Homelab"
|
|
echo "║ Wiki: https://github.com/kelinfoxy/EZ-Homelab/wiki"
|
|
echo "║ "
|
|
echo "╚═════════════════════════════════════════════════════════════"
|
|
echo ""
|
|
debug_log "Script completed successfully"
|
|
}
|
|
|
|
# Show help function
|
|
show_help() {
|
|
echo ""
|
|
echo "EZ-Homelab Setup & Deployment Script"
|
|
echo ""
|
|
echo "Usage: $0 [OPTIONS]"
|
|
echo ""
|
|
echo "Options:"
|
|
echo " -h, --help Show this help message"
|
|
echo " -d, --dry-run Enable dry-run mode (show commands without executing)"
|
|
echo " -c, --config FILE Specify configuration file (default: .env)"
|
|
echo " -t, --test Run in test mode (validate configs without deploying)"
|
|
echo " -v, --validate-only Only validate configuration and exit"
|
|
echo " --verbose Enable verbose console logging"
|
|
echo ""
|
|
echo "If no options are provided, the interactive menu will be shown."
|
|
echo ""
|
|
echo "See https://github.com/kelinfoxy/EZ-Homelab for documentation"
|
|
echo ""
|
|
}
|
|
|
|
# Validate configuration function
|
|
validate_configuration() {
|
|
log_info "Validating configuration..."
|
|
|
|
# Check if .env file exists
|
|
if [ ! -f ".env" ]; then
|
|
log_error "Configuration file .env not found."
|
|
return 1
|
|
fi
|
|
|
|
# Load and check required environment variables
|
|
if ! load_env_file; then
|
|
log_error "Failed to load .env file."
|
|
return 1
|
|
fi
|
|
|
|
# Check for critical variables
|
|
local required_vars=("DOMAIN" "SERVER_IP" "DUCKDNS_TOKEN" "AUTHELIA_ADMIN_PASSWORD_HASH")
|
|
local missing_vars=()
|
|
for var in "${required_vars[@]}"; do
|
|
if [ -z "${!var}" ]; then
|
|
missing_vars+=("$var")
|
|
fi
|
|
done
|
|
|
|
if [ ${#missing_vars[@]} -gt 0 ]; then
|
|
log_error "Missing required environment variables: ${missing_vars[*]}"
|
|
return 1
|
|
fi
|
|
|
|
# Check Docker Compose files syntax
|
|
log_info "Checking Docker Compose file syntax..."
|
|
if command -v docker-compose &> /dev/null; then
|
|
if ! docker-compose -f docker-compose/core/docker-compose.yml config -q; then
|
|
log_error "Invalid syntax in core docker-compose.yml"
|
|
return 1
|
|
fi
|
|
log_success "Core docker-compose.yml syntax is valid"
|
|
else
|
|
log_warning "docker-compose not available for syntax check"
|
|
fi
|
|
|
|
# Check network connectivity (basic)
|
|
log_info "Checking network connectivity..."
|
|
if ! ping -c 1 google.com &> /dev/null; then
|
|
log_warning "No internet connectivity detected"
|
|
else
|
|
log_success "Internet connectivity confirmed"
|
|
fi
|
|
|
|
log_success "Configuration validation completed successfully"
|
|
}
|
|
|
|
# Parse command line arguments function
|
|
parse_args() {
|
|
DRY_RUN=false
|
|
CONFIG_FILE=".env"
|
|
TEST_MODE=false
|
|
VALIDATE_ONLY=false
|
|
VERBOSE=false
|
|
while [[ $# -gt 0 ]]; do
|
|
case $1 in
|
|
-h|--help)
|
|
show_help
|
|
exit 0
|
|
;;
|
|
-d|--dry-run)
|
|
DRY_RUN=true
|
|
shift
|
|
;;
|
|
-c|--config)
|
|
CONFIG_FILE="$2"
|
|
shift 2
|
|
;;
|
|
-t|--test)
|
|
TEST_MODE=true
|
|
shift
|
|
;;
|
|
-v|--validate-only)
|
|
VALIDATE_ONLY=true
|
|
shift
|
|
;;
|
|
--verbose)
|
|
VERBOSE=true
|
|
shift
|
|
;;
|
|
*)
|
|
echo "Unknown option: $1"
|
|
show_help
|
|
exit 1
|
|
;;
|
|
esac
|
|
done
|
|
}
|
|
|
|
#═══════════════════════════════════════════════════════════
|
|
# SECTION 12: MAIN EXECUTION & SETUP
|
|
#═══════════════════════════════════════════════════════════
|
|
|
|
# Prepare deployment environment
|
|
prepare_deployment() {
|
|
# Handle special menu options
|
|
if [ "$FORCE_SYSTEM_SETUP" = true ]; then
|
|
log_info "Installing prerequisites..."
|
|
# Run the prerequisites script as root
|
|
if [ "$EUID" -eq 0 ]; then
|
|
./scripts/install-prerequisites.sh
|
|
else
|
|
sudo ./scripts/install-prerequisites.sh
|
|
fi
|
|
log_success "Prerequisites installed successfully."
|
|
exit 0
|
|
fi
|
|
|
|
if [ "$INSTALL_NVIDIA" = true ]; then
|
|
log_info "Installing NVIDIA drivers..."
|
|
install_nvidia
|
|
exit 0
|
|
fi
|
|
|
|
# Check if system setup is needed
|
|
# Only run system setup if Docker is not installed OR if running as root and Docker setup hasn't been done
|
|
DOCKER_INSTALLED=false
|
|
if command -v docker &> /dev/null && docker --version &> /dev/null; then
|
|
DOCKER_INSTALLED=true
|
|
fi
|
|
|
|
# Check if current user is in docker group (or if we're root and will add them)
|
|
USER_IN_DOCKER_GROUP=false
|
|
if groups "$USER" 2>/dev/null | grep -q docker; then
|
|
USER_IN_DOCKER_GROUP=true
|
|
fi
|
|
|
|
if [ "$EUID" -eq 0 ]; then
|
|
# Running as root - check if we need to do system setup
|
|
if [ "$DOCKER_INSTALLED" = false ] || [ "$USER_IN_DOCKER_GROUP" = false ]; then
|
|
log_info "Docker not fully installed or user not in docker group. Performing system setup..."
|
|
./scripts/install-prerequisites.sh
|
|
#echo ""
|
|
log_info "System setup complete. Please log out and back in, then run this script again."
|
|
exit 0
|
|
else
|
|
log_info "Docker is already installed and user is in docker group. Skipping system setup."
|
|
fi
|
|
else
|
|
# Not running as root
|
|
if [ "$DOCKER_INSTALLED" = false ]; then
|
|
log_error "Docker is not installed. Please run this script with sudo to perform system setup."
|
|
exit 1
|
|
fi
|
|
if [ "$USER_IN_DOCKER_GROUP" = false ]; then
|
|
log_error "Current user is not in the docker group. Please log out and back in, or run with sudo to fix group membership."
|
|
exit 1
|
|
fi
|
|
fi
|
|
|
|
# Ensure required directories exist
|
|
log_info "Ensuring required directories exist..."
|
|
if [ "$EUID" -eq 0 ]; then
|
|
ACTUAL_USER=${SUDO_USER:-$USER}
|
|
mkdir -p /opt/stacks /opt/dockge
|
|
chown -R "$ACTUAL_USER:$ACTUAL_USER" /opt
|
|
else
|
|
mkdir -p /opt/stacks /opt/dockge
|
|
fi
|
|
log_success "Directories prepared"
|
|
}
|
|
|
|
# Run command function (handles dry-run and test modes)
|
|
run_cmd() {
|
|
local quiet=false
|
|
if [ "$1" = "--quiet" ]; then
|
|
quiet=true
|
|
shift
|
|
fi
|
|
|
|
if [ "$DRY_RUN" = true ] || [ "$TEST_MODE" = true ]; then
|
|
echo -n "[DRY-RUN/TEST] "
|
|
return 0
|
|
else
|
|
if [ "$quiet" = true ]; then
|
|
if "$@" > /dev/null 2>&1; then
|
|
return 0
|
|
else
|
|
log_error "Command failed: $@"
|
|
return 1
|
|
fi
|
|
else
|
|
if "$@"; then
|
|
return 0
|
|
else
|
|
log_error "Command failed: $@"
|
|
return 1
|
|
fi
|
|
fi
|
|
fi
|
|
}
|
|
|
|
# Main logic
|
|
main() {
|
|
debug_log "main() called with arguments: $@"
|
|
log_info "EZ-Homelab Unified Setup & Deployment Script"
|
|
|
|
# Parse command line arguments
|
|
parse_args "$@"
|
|
|
|
echo ""
|
|
echo "Loading... Please wait"
|
|
echo ""
|
|
|
|
if [ "$DRY_RUN" = true ]; then
|
|
log_info "Dry-run mode enabled. Commands will be displayed but not executed."
|
|
fi
|
|
|
|
if [ "$VALIDATE_ONLY" = true ]; then
|
|
log_info "Validation mode enabled. Checking configuration..."
|
|
validate_configuration
|
|
exit 0
|
|
fi
|
|
|
|
if [ "$TEST_MODE" = true ]; then
|
|
log_info "Test mode enabled. Will validate and simulate deployment."
|
|
fi
|
|
|
|
# Load existing configuration
|
|
ENV_EXISTS=false
|
|
if load_env_file; then
|
|
ENV_EXISTS=true
|
|
debug_log "Existing .env file loaded"
|
|
else
|
|
debug_log "No existing .env file found"
|
|
fi
|
|
|
|
clear
|
|
echo ""
|
|
# Handle menu selection
|
|
handle_menu_selection
|
|
|
|
echo "║"
|
|
if [ "$DEPLOY_CORE" = true ]; then
|
|
echo "║ CORE SERVER DEPLOYMENT"
|
|
fi
|
|
if [ "$DEPLOY_REMOTE_SERVER" = true ]; then
|
|
echo "║ ADDITIONAL SERVER DEPLOYMENT"
|
|
fi
|
|
echo "║"
|
|
|
|
# Prepare deployment environment (handles special cases like prerequisites installation)
|
|
prepare_deployment
|
|
|
|
# Prompt for configuration values and save
|
|
validate_and_prompt_variables
|
|
save_env_file
|
|
|
|
# Deploy based on server type
|
|
if [ "$DEPLOY_REMOTE_SERVER" = true ]; then
|
|
deploy_remote_server
|
|
log_success "Remote server deployment complete!"
|
|
show_deployment_completion
|
|
else
|
|
validate_secrets
|
|
perform_deployment
|
|
show_deployment_completion
|
|
fi
|
|
}
|
|
|
|
#═══════════════════════════════════════════════════════════
|
|
# SCRIPT EXECUTION
|
|
#═══════════════════════════════════════════════════════════
|
|
|
|
main "$@" |