Refactor docker-compose configurations and add new services
- Reorganize Authelia configuration files - Add new dynamic routing files for Traefik - Update various service docker-compose files - Remove outdated templates and scripts
This commit is contained in:
@@ -1,366 +0,0 @@
|
||||
# EZ-Homelab TUI Deployment Script
|
||||
|
||||
## Script Launch Options
|
||||
|
||||
**Command Line Arguments:**
|
||||
- No arguments: Interactive TUI mode
|
||||
- `--yes` or `-y`: Automated deployment using complete .env file
|
||||
- `--save-only`: Answer questions and save .env without deploying
|
||||
- `--help`: Show help information
|
||||
|
||||
## .env File Structure Enhancement
|
||||
|
||||
Add deployment configuration section to .env:
|
||||
|
||||
```bash
|
||||
# ... existing configuration ...
|
||||
|
||||
##################################################
|
||||
# DEPLOYMENT CONFIGURATION (Optional - for automated deployment)
|
||||
# Set these values to skip the TUI and use --yes for automated install
|
||||
##################################################
|
||||
|
||||
# Deployment Type: SINGLE_SERVER, CORE_SERVER, REMOTE_SERVER
|
||||
DEPLOYMENT_TYPE=SINGLE_SERVER
|
||||
|
||||
# Service Selection (true/false)
|
||||
DEPLOY_DOCKGE=true
|
||||
DEPLOY_CORE=true
|
||||
DEPLOY_INFRASTRUCTURE=true
|
||||
DEPLOY_DASHBOARDS=true
|
||||
PREPARE_VPN=true
|
||||
PREPARE_MEDIA=true
|
||||
PREPARE_MEDIA_MGMT=true
|
||||
PREPARE_TRANSCODERS=true
|
||||
PREPARE_HOMEASSISTANT=true
|
||||
PREPARE_PRODUCTIVITY=true
|
||||
PREPARE_MONITORING=true
|
||||
PREPARE_UTILITIES=true
|
||||
PREPARE_WIKIS=true
|
||||
PREPARE_ALTERNATIVES=false
|
||||
|
||||
# System Configuration
|
||||
INSTALL_DOCKER=true
|
||||
INSTALL_NVIDIA=true
|
||||
AUTO_REBOOT=true
|
||||
```
|
||||
|
||||
## Pre-Flight Checks (Before TUI)
|
||||
|
||||
**System Prerequisites Check:**
|
||||
- Check OS compatibility (Ubuntu/Debian)
|
||||
- Check if running as root or with sudo
|
||||
- Check internet connectivity
|
||||
- Check available disk space (>10GB)
|
||||
- Check system architecture (amd64/arm64)
|
||||
|
||||
**Docker Check:**
|
||||
- Check if Docker is installed and running
|
||||
- Check if user is in docker group
|
||||
- If not installed: Prompt to install Docker
|
||||
- If installed but user not in group: Add user to group
|
||||
|
||||
**NVIDIA GPU Detection:**
|
||||
- Check for NVIDIA GPU presence (`lspci | grep -i nvidia`)
|
||||
- If GPU detected: Check for existing drivers
|
||||
- Check for NVIDIA Container Toolkit
|
||||
- If missing: Prompt to install drivers and toolkit
|
||||
- Detect GPU model for correct driver version
|
||||
|
||||
**Dependency Installation:**
|
||||
- Install required packages: `curl wget git htop nano ufw fail2ban unattended-upgrades apt-listchanges sshpass`
|
||||
- Update system packages
|
||||
- Install Python dependencies for TUI: `rich questionary python-dotenv`
|
||||
|
||||
## Enhanced Question Flow
|
||||
|
||||
## Initial Setup Check
|
||||
|
||||
**Question 0: Environment File Check**
|
||||
- Type: `confirm`
|
||||
- Message: "Found existing .env file with configuration. Use existing values where available?"
|
||||
- Default: true
|
||||
- Condition: Only show if .env exists and has valid values
|
||||
|
||||
**Question 0.5: Complete Configuration Check**
|
||||
- Type: `confirm`
|
||||
- Message: "Your .env file appears to be complete. Skip questions and proceed with deployment?"
|
||||
- Default: true
|
||||
- Condition: Only show if all required values are present and valid
|
||||
|
||||
## System Setup Questions
|
||||
|
||||
**Question 0.6: Docker Installation**
|
||||
- Type: `confirm`
|
||||
- Message: "Docker is not installed. Install Docker now?"
|
||||
- Default: true
|
||||
- Condition: Only show if Docker not detected
|
||||
|
||||
**Question 0.7: NVIDIA Setup**
|
||||
- Type: `confirm`
|
||||
- Message: "NVIDIA GPU detected. Install NVIDIA drivers and Container Toolkit?"
|
||||
- Default: true
|
||||
- Condition: Only show if GPU detected but drivers/toolkit missing
|
||||
|
||||
**Question 0.8: Auto Reboot**
|
||||
- Type: `confirm`
|
||||
- Message: "Some installations require a system reboot. Reboot automatically when needed?"
|
||||
- Default: false
|
||||
- Note: Warns about potential logout requirement for docker group changes
|
||||
|
||||
## Initial Setup Check
|
||||
|
||||
## Deployment Scenario Selection
|
||||
|
||||
**Question 1: Deployment Type**
|
||||
- Type: `select` (single choice)
|
||||
- Message: "Choose your Deployment Scenario"
|
||||
- Choices:
|
||||
- "🚀 Single Server Full Deployment - Deploy everything (Dockge, Core, Infrastructure, Dashboards) and prepare all stacks for Dockge"
|
||||
- "🏗️ Core Server Deployment - Deploy only core infrastructure (Dockge, Core, Dashboards) and prepare all stacks for Dockge"
|
||||
- "🔧 Remote Server Deployment - Deploy infrastructure tools (Dockge, Infrastructure, Dashboards) without core services and prepare all stacks for Dockge"
|
||||
- Default: First option
|
||||
|
||||
## Basic Configuration (Conditional - skip if valid values exist)
|
||||
|
||||
**Question 2: Domain Setup**
|
||||
- Type: `text`
|
||||
- Message: "Enter your DuckDNS subdomain (without .duckdns.org)"
|
||||
- Default: From .env or "example"
|
||||
- Validation: Required, alphanumeric + hyphens only
|
||||
- Condition: Skip if valid DOMAIN exists in .env
|
||||
|
||||
**Question 3: DuckDNS Token**
|
||||
- Type: `password`
|
||||
- Message: "Enter your DuckDNS token"
|
||||
- Validation: Required
|
||||
- Condition: Skip if valid DUCKDNS_TOKEN exists in .env
|
||||
|
||||
**Question 4: Server IP Address**
|
||||
- Type: `text`
|
||||
- Message: "Enter this server's IP address"
|
||||
- Default: From .env or auto-detected local IP
|
||||
- Validation: Valid IP address format
|
||||
- Condition: Skip if valid SERVER_IP exists in .env
|
||||
|
||||
**Question 5: Server Hostname**
|
||||
- Type: `text`
|
||||
- Message: "Enter this server's hostname"
|
||||
- Default: From .env or auto-detected hostname
|
||||
- Validation: Required
|
||||
- Condition: Skip if valid SERVER_HOSTNAME exists in .env
|
||||
|
||||
**Question 6: Timezone**
|
||||
- Type: `text`
|
||||
- Message: "Enter your timezone"
|
||||
- Default: From .env or "America/New_York"
|
||||
- Validation: Valid timezone format
|
||||
- Condition: Skip if valid TZ exists in .env
|
||||
|
||||
## Admin Credentials (Conditional - only for deployments with Core, skip if valid)
|
||||
|
||||
**Question 7: Admin Username**
|
||||
- Type: `text`
|
||||
- Message: "Enter admin username for Authelia SSO"
|
||||
- Default: From .env or "admin"
|
||||
- Validation: Required, alphanumeric only
|
||||
- Condition: Only show if deployment includes core services AND no valid AUTHELIA_ADMIN_USER exists
|
||||
|
||||
**Question 8: Admin Email**
|
||||
- Type: `text`
|
||||
- Message: "Enter admin email for Authelia SSO"
|
||||
- Default: From .env or "admin@{domain}"
|
||||
- Validation: Valid email format
|
||||
- Condition: Only show if deployment includes core services AND no valid AUTHELIA_ADMIN_EMAIL exists
|
||||
|
||||
**Question 9: Admin Password**
|
||||
- Type: `password`
|
||||
- Message: "Enter admin password for Authelia SSO (will be hashed)"
|
||||
- Validation: Minimum 8 characters
|
||||
- Condition: Only show if deployment includes core services AND no valid AUTHELIA_ADMIN_PASSWORD exists
|
||||
|
||||
## Multi-Server Configuration (Conditional - only for Remote Server Deployment, skip if valid)
|
||||
|
||||
**Question 10: Core Server IP**
|
||||
- Type: `text`
|
||||
- Message: "Enter the IP address of your core server (for shared TLS CA)"
|
||||
- Default: From .env
|
||||
- Validation: Valid IP address format
|
||||
- Condition: Only show for Remote Server Deployment AND no valid REMOTE_SERVER_IP exists
|
||||
|
||||
**Question 11: Core Server SSH User**
|
||||
- Type: `text`
|
||||
- Message: "Enter SSH username for core server access"
|
||||
- Default: From .env or current user
|
||||
- Validation: Required
|
||||
- Condition: Only show for Remote Server Deployment AND no valid REMOTE_SERVER_USER exists
|
||||
|
||||
**Question 12: Core Server SSH Password**
|
||||
- Type: `password`
|
||||
- Message: "Enter SSH password for core server (leave empty if using SSH keys)"
|
||||
- Validation: Optional
|
||||
- Condition: Only show for Remote Server Deployment AND no valid REMOTE_SERVER_PASSWORD exists
|
||||
|
||||
## Optional Advanced Configuration (skip if valid values exist)
|
||||
|
||||
**Question 13: VPN Setup**
|
||||
- Type: `confirm`
|
||||
- Message: "Would you like to configure VPN for download services?"
|
||||
- Default: true if VPN credentials exist in .env, false otherwise
|
||||
- Condition: Skip if user explicitly chooses to configure later
|
||||
|
||||
**Question 14: Surfshark Username** (Conditional)
|
||||
- Type: `text`
|
||||
- Message: "Enter your Surfshark VPN username"
|
||||
- Default: From .env
|
||||
- Validation: Required
|
||||
- Condition: Only show if VPN setup = true AND no valid SURFSHARK_USERNAME exists
|
||||
|
||||
**Question 15: Surfshark Password** (Conditional)
|
||||
- Type: `password`
|
||||
- Message: "Enter your Surfshark VPN password"
|
||||
- Validation: Required
|
||||
- Condition: Only show if VPN setup = true AND no valid SURFSHARK_PASSWORD exists
|
||||
|
||||
**Question 16: VPN Server Country**
|
||||
- Type: `text`
|
||||
- Message: "Preferred VPN server country"
|
||||
- Default: From .env or "Netherlands"
|
||||
- Condition: Only show if VPN setup = true AND no valid VPN_SERVER_COUNTRIES exists
|
||||
|
||||
**Question 17: Custom User/Group IDs**
|
||||
- Type: `confirm`
|
||||
- Message: "Use custom PUID/PGID for file permissions? (Default: 1000/1000)"
|
||||
- Default: true if custom PUID/PGID exist in .env, false otherwise
|
||||
|
||||
**Question 18: PUID** (Conditional)
|
||||
- Type: `text`
|
||||
- Message: "Enter PUID (user ID)"
|
||||
- Default: From .env or "1000"
|
||||
- Validation: Numeric
|
||||
- Condition: Only show if custom IDs = true AND no valid PUID exists
|
||||
|
||||
**Question 19: PGID** (Conditional)
|
||||
- Type: `text`
|
||||
- Message: "Enter PGID (group ID)"
|
||||
- Default: From .env or "1000"
|
||||
- Validation: Numeric
|
||||
- Condition: Only show if custom IDs = true AND no valid PGID exists
|
||||
|
||||
## Service Selection Summary (for all deployment types)
|
||||
|
||||
**Question 20: Core Services Selection**
|
||||
- Type: `checkbox` (multi-select)
|
||||
- Message: "Select which core services to deploy:"
|
||||
- Choices: (based on deployment type)
|
||||
- Single Server: [✓] DuckDNS, [✓] Traefik, [✓] Authelia, [✓] Sablier, [✓] Dockge
|
||||
- Core Server: [✓] DuckDNS, [✓] Traefik, [✓] Authelia, [✓] Sablier, [✓] Dockge
|
||||
- Remote Server: [ ] DuckDNS, [ ] Traefik, [ ] Authelia, [ ] Sablier, [✓] Dockge
|
||||
- Default: All enabled for selected deployment type
|
||||
- Note: Core services are required for the selected deployment type
|
||||
|
||||
**Question 21: Infrastructure Services Selection**
|
||||
- Type: `checkbox` (multi-select)
|
||||
- Message: "Select which infrastructure services to deploy:"
|
||||
- Choices:
|
||||
- [✓] Pi-hole (DNS + Ad blocking)
|
||||
- [✓] Watchtower (Auto container updates)
|
||||
- [✓] Dozzle (Docker log viewer)
|
||||
- [✓] Glances (System monitoring)
|
||||
- [✓] Code Server (VS Code in browser)
|
||||
- [✓] Docker Proxy (Secure socket access)
|
||||
- Default: All enabled
|
||||
- Condition: Always shown, but some may be pre-selected based on deployment type
|
||||
|
||||
**Question 22: Dashboard Services Selection**
|
||||
- Type: `checkbox` (multi-select)
|
||||
- Message: "Select which dashboard services to deploy:"
|
||||
- Choices:
|
||||
- [✓] Homepage (App dashboard)
|
||||
- [ ] Homarr (Modern dashboard)
|
||||
- Default: Homepage enabled, Homarr disabled
|
||||
- Condition: Always shown
|
||||
|
||||
**Question 23: Additional Stacks to Prepare**
|
||||
- Type: `checkbox` (multi-select)
|
||||
- Message: "Select which additional service stacks to prepare for Dockge:"
|
||||
- Choices:
|
||||
- [✓] VPN (qBittorrent with VPN)
|
||||
- [✓] Media (Jellyfin, Calibre-Web)
|
||||
- [✓] Media Management (*arr services, Prowlarr)
|
||||
- [✓] Transcoders (Tdarr, Unmanic)
|
||||
- [✓] Home Automation (Home Assistant, Node-RED, Zigbee2MQTT)
|
||||
- [✓] Productivity (Nextcloud, Gitea, Mealie)
|
||||
- [✓] Monitoring (Prometheus, Grafana, Uptime Kuma)
|
||||
- [✓] Utilities (Vaultwarden, Backrest, Duplicati)
|
||||
- [✓] Wikis (DokuWiki, BookStack, MediaWiki)
|
||||
- [ ] Alternatives (Portainer, Authentik, Plex)
|
||||
- Default: All enabled except Alternatives
|
||||
- Note: These stacks will be copied to /opt/stacks/ but not started
|
||||
|
||||
## Confirmation and Summary
|
||||
|
||||
**Question 24: Configuration Review**
|
||||
- Type: `confirm`
|
||||
- Message: "Review and confirm the following configuration:\n\n[Display formatted summary of all settings and selected services]\n\nProceed with deployment?"
|
||||
- Default: true
|
||||
|
||||
**Question 25: Deployment Action**
|
||||
- Type: `select`
|
||||
- Message: "What would you like to do?"
|
||||
- Choices:
|
||||
- "🚀 Proceed with deployment"
|
||||
- "💾 Save configuration to .env and exit (no deployment)"
|
||||
- "🔄 Change configuration values"
|
||||
- "❌ Exit without saving"
|
||||
- Default: First option
|
||||
- Condition: Only show if user declines deployment confirmation in Question 24
|
||||
|
||||
**Question 26: Save Location** (Conditional)
|
||||
- Type: `text`
|
||||
- Message: "Enter filename to save configuration (leave empty for .env)"
|
||||
- Default: ".env"
|
||||
- Validation: Valid filename
|
||||
- Condition: Only show if user chooses "Save configuration" in Question 25
|
||||
|
||||
## Post-Deployment Options
|
||||
|
||||
**Auto-Reboot Handling:**
|
||||
- If AUTO_REBOOT=true and reboot required: Automatically reboot at end
|
||||
- If AUTO_REBOOT=false and reboot required: Display manual reboot instructions
|
||||
- If no reboot required: Display success message and access URLs
|
||||
|
||||
## One-Step Installation Strategy
|
||||
|
||||
**Installation Order (to minimize reboots):**
|
||||
1. System updates and package installation (no reboot needed)
|
||||
2. Docker installation and user group addition (may require logout)
|
||||
3. NVIDIA driver installation (requires reboot)
|
||||
4. NVIDIA Container Toolkit (no additional reboot)
|
||||
5. Python dependencies (no reboot)
|
||||
6. EZ-Homelab deployment (no reboot)
|
||||
|
||||
**Reboot Optimization:**
|
||||
- Detect what requires reboot vs logout vs nothing
|
||||
- Perform all non-reboot actions first
|
||||
- Group reboot-requiring actions together
|
||||
- Use `newgrp docker` or similar to avoid logout for group changes
|
||||
- Only reboot once at the end if needed
|
||||
|
||||
**Logout Avoidance Techniques:**
|
||||
- Use `sg docker -c "command"` to run commands as docker group member
|
||||
- Reload systemd without full reboot for some services
|
||||
- Update environment variables in current session
|
||||
- Use `exec su -l $USER` to reload user environment
|
||||
|
||||
This approach ensures maximum convenience for users while handling all the complex system setup requirements.
|
||||
|
||||
This question flow ensures:
|
||||
- **Logical progression**: Basic setup first, then conditional advanced options
|
||||
- **Clear validation**: Each question validates input appropriately
|
||||
- **Conditional logic**: Questions only appear when relevant to the selected deployment type
|
||||
- **Security**: Passwords are properly masked
|
||||
- **User experience**: Clear messages and sensible defaults
|
||||
- **Error prevention**: Validation prevents common configuration mistakes
|
||||
|
||||
The TUI would then proceed to perform the actual deployment based on the collected configuration.
|
||||
@@ -1,397 +0,0 @@
|
||||
# EZ-Homelab TUI Deployment Script - Product Requirements Document
|
||||
|
||||
## Executive Summary
|
||||
|
||||
The EZ-Homelab TUI Deployment Script is a modern, user-friendly replacement for the existing complex bash deployment script. It provides an interactive terminal user interface (TUI) for deploying and managing a comprehensive homelab infrastructure using Docker Compose stacks, with support for automated deployment via configuration files.
|
||||
|
||||
## Objectives
|
||||
|
||||
### Primary Objectives
|
||||
- Replace the complex 1000+ line bash script with a maintainable Python TUI application
|
||||
- Provide three distinct deployment scenarios: Single Server Full, Core Server, and Remote Server
|
||||
- Enable both interactive and fully automated deployment workflows
|
||||
- Handle complete system setup including Docker and NVIDIA GPU configuration
|
||||
- Ensure maximum user convenience by minimizing required logouts/reboots
|
||||
|
||||
### Secondary Objectives
|
||||
- Improve user experience with modern TUI design using Rich + Questionary
|
||||
- Provide flexible service selection and configuration options
|
||||
- Support save-only mode for configuration preparation
|
||||
- Include comprehensive validation and error handling
|
||||
- Maintain backward compatibility with existing .env configurations
|
||||
|
||||
## Target Users
|
||||
|
||||
### Primary Users
|
||||
- **Homelab Enthusiasts**: Users setting up personal server infrastructure
|
||||
- **Self-Hosters**: Individuals deploying media servers, productivity tools, and monitoring
|
||||
- **System Administrators**: Those managing small-scale server deployments
|
||||
|
||||
### User Personas
|
||||
1. **Alex the Homelab Beginner**: New to self-hosting, needs guided setup with sensible defaults
|
||||
2. **Jordan the Power User**: Experienced user who wants fine-grained control over service selection
|
||||
3. **Sam the DevOps Engineer**: Needs automated deployment for multiple servers, prefers configuration files
|
||||
|
||||
### Technical Requirements
|
||||
- Ubuntu/Debian Linux systems (primary target)
|
||||
- Basic command-line familiarity
|
||||
- Internet access for package downloads
|
||||
- Administrative privileges (sudo access)
|
||||
|
||||
## Functional Requirements
|
||||
|
||||
### Core Features
|
||||
|
||||
#### 1. Deployment Scenarios
|
||||
**FR-DEP-001**: Support three deployment scenarios
|
||||
- Single Server Full: Deploy all core, infrastructure, and dashboard services
|
||||
- Core Server: Deploy only core infrastructure and dashboards
|
||||
- Remote Server: Deploy infrastructure and dashboards without core services
|
||||
|
||||
**FR-DEP-002**: Automated scenario selection based on user choice
|
||||
- Pre-select appropriate services for each scenario
|
||||
- Allow user customization within scenario constraints
|
||||
|
||||
#### 2. Configuration Management
|
||||
**FR-CONF-001**: Load existing .env configuration
|
||||
- Parse existing .env file on startup
|
||||
- Validate configuration completeness
|
||||
- Pre-populate TUI defaults with existing values
|
||||
|
||||
**FR-CONF-002**: Support deployment configuration section in .env
|
||||
- Parse [DEPLOYMENT] section with service selections
|
||||
- Enable fully automated deployment with --yes flag
|
||||
- Validate deployment configuration completeness
|
||||
|
||||
**FR-CONF-003**: Interactive configuration collection
|
||||
- Skip questions for valid existing values
|
||||
- Provide sensible defaults for all settings
|
||||
- Validate user input in real-time
|
||||
|
||||
#### 3. System Setup & Prerequisites
|
||||
**FR-SYS-001**: Pre-flight system checks
|
||||
- OS compatibility (Ubuntu/Debian)
|
||||
- Available disk space (>10GB)
|
||||
- Internet connectivity
|
||||
- System architecture validation
|
||||
|
||||
**FR-SYS-002**: Docker installation and configuration
|
||||
- Detect existing Docker installation
|
||||
- Install Docker if missing
|
||||
- Add user to docker group
|
||||
- Avoid requiring logout through smart command execution
|
||||
|
||||
**FR-SYS-003**: NVIDIA GPU support
|
||||
- Detect NVIDIA GPU presence
|
||||
- Install official NVIDIA drivers using official installers
|
||||
- Install NVIDIA Container Toolkit
|
||||
- Handle reboot requirements intelligently
|
||||
|
||||
**FR-SYS-004**: Dependency management
|
||||
- Install required system packages
|
||||
- Install Python dependencies (Rich, Questionary, python-dotenv)
|
||||
- Update system packages as needed
|
||||
|
||||
#### 4. Service Selection & Customization
|
||||
**FR-SVC-001**: Core services selection
|
||||
- Display scenario-appropriate core services
|
||||
- Allow include/exclude for flexibility
|
||||
- Enforce minimum requirements for each scenario
|
||||
|
||||
**FR-SVC-002**: Infrastructure services selection
|
||||
- Provide checkbox interface for all infrastructure services
|
||||
- Include descriptions and default selections
|
||||
- Allow complete customization
|
||||
|
||||
**FR-SVC-003**: Additional stacks preparation
|
||||
- Multi-select interface for optional service stacks
|
||||
- Copy selected stacks to /opt/stacks/ without starting
|
||||
- Enable later deployment via Dockge
|
||||
|
||||
#### 5. User Interface & Experience
|
||||
**FR-UI-001**: Interactive TUI design
|
||||
- Use Rich + Questionary for modern terminal interface
|
||||
- Provide clear, descriptive prompts
|
||||
- Include help text and validation messages
|
||||
|
||||
**FR-UI-002**: Conditional question flow
|
||||
- Show questions only when relevant
|
||||
- Skip questions with valid existing values
|
||||
- Provide logical question progression
|
||||
|
||||
**FR-UI-003**: Configuration summary and confirmation
|
||||
- Display formatted summary of all settings
|
||||
- Allow review before proceeding
|
||||
- Provide options to save, change, or exit
|
||||
|
||||
#### 6. Deployment Execution
|
||||
**FR-DEP-003**: One-step deployment process
|
||||
- Handle all installation and deployment in single script run
|
||||
- Minimize required logouts/reboots
|
||||
- Provide clear progress indication
|
||||
|
||||
**FR-DEP-004**: Smart reboot handling
|
||||
- Detect what requires reboot vs logout vs nothing
|
||||
- Perform reboot-requiring actions last
|
||||
- Support both automatic and manual reboot options
|
||||
|
||||
**FR-DEP-005**: Error handling and recovery
|
||||
- Provide clear error messages
|
||||
- Allow recovery from partial failures
|
||||
- Maintain configuration state across retries
|
||||
|
||||
### Command Line Interface
|
||||
|
||||
#### Launch Options
|
||||
**FR-CLI-001**: Support multiple launch modes
|
||||
- Interactive mode (default): Full TUI experience
|
||||
- Automated mode (--yes): Use complete .env configuration
|
||||
- Save-only mode (--save-only): Collect configuration without deploying
|
||||
- Help mode (--help): Display usage information
|
||||
|
||||
#### Configuration Output
|
||||
**FR-CLI-002**: Flexible configuration saving
|
||||
- Save to .env by default
|
||||
- Allow custom filename specification
|
||||
- Preserve existing .env structure and comments
|
||||
|
||||
## Non-Functional Requirements
|
||||
|
||||
### Performance
|
||||
**NFR-PERF-001**: Fast startup and validation
|
||||
- Complete pre-flight checks within 30 seconds
|
||||
- Validate .env file parsing within 5 seconds
|
||||
- Provide responsive TUI interaction
|
||||
|
||||
**NFR-PERF-002**: Efficient deployment
|
||||
- Complete full deployment within 15-30 minutes
|
||||
- Provide real-time progress indication
|
||||
- Handle large downloads gracefully
|
||||
|
||||
### Reliability
|
||||
**NFR-REL-001**: Robust error handling
|
||||
- Graceful handling of network failures
|
||||
- Clear error messages with recovery suggestions
|
||||
- Maintain system stability during installation
|
||||
|
||||
**NFR-REL-002**: Configuration validation
|
||||
- Validate all user inputs before proceeding
|
||||
- Check for conflicting configurations
|
||||
- Prevent deployment with invalid settings
|
||||
|
||||
### Usability
|
||||
**NFR-USAB-001**: Intuitive interface design
|
||||
- Clear, descriptive prompts and help text
|
||||
- Logical question flow and grouping
|
||||
- Consistent terminology and formatting
|
||||
|
||||
**NFR-USAB-002**: Accessibility considerations
|
||||
- Support keyboard navigation
|
||||
- Provide clear visual feedback
|
||||
- Include progress indicators for long operations
|
||||
|
||||
### Security
|
||||
**NFR-SEC-001**: Secure credential handling
|
||||
- Mask password inputs in TUI
|
||||
- Store credentials securely in .env
|
||||
- Validate certificate and token formats
|
||||
|
||||
**NFR-SEC-002**: Safe system modifications
|
||||
- Require explicit user confirmation for system changes
|
||||
- Provide clear warnings for potentially disruptive actions
|
||||
- Maintain secure file permissions
|
||||
|
||||
### Compatibility
|
||||
**NFR-COMP-001**: OS compatibility
|
||||
- Primary support for Ubuntu 20.04+ and Debian 11+
|
||||
- Graceful handling of different package managers
|
||||
- Architecture support for amd64 and arm64
|
||||
|
||||
**NFR-COMP-002**: Backward compatibility
|
||||
- Read existing .env files without modification
|
||||
- Support legacy configuration formats
|
||||
- Provide migration path for old configurations
|
||||
|
||||
## Technical Requirements
|
||||
|
||||
### Technology Stack
|
||||
**TR-TECH-001**: Core technologies
|
||||
- Python 3.8+ as runtime environment
|
||||
- Rich library for terminal formatting
|
||||
- Questionary library for interactive prompts
|
||||
- python-dotenv for configuration parsing
|
||||
|
||||
**TR-TECH-002**: System integration
|
||||
- Docker and Docker Compose for container management
|
||||
- systemd for service management
|
||||
- apt/dpkg for package management
|
||||
- Official NVIDIA installation tools
|
||||
|
||||
### Architecture
|
||||
**TR-ARCH-001**: Modular design
|
||||
- Separate concerns for UI, validation, and deployment
|
||||
- Configurable question flow engine
|
||||
- Pluggable deployment modules
|
||||
|
||||
**TR-ARCH-002**: State management
|
||||
- Maintain configuration state throughout TUI flow
|
||||
- Support save/restore of partial configurations
|
||||
- Handle interruption and resumption gracefully
|
||||
|
||||
### Dependencies
|
||||
**TR-DEPS-001**: Python packages
|
||||
- rich>=12.0.0
|
||||
- questionary>=1.10.0
|
||||
- python-dotenv>=0.19.0
|
||||
- pyyaml>=6.0 (for configuration parsing)
|
||||
|
||||
**TR-DEPS-002**: System packages
|
||||
- curl, wget, git (for downloads and version control)
|
||||
- htop, nano, vim (system monitoring and editing)
|
||||
- ufw, fail2ban (security)
|
||||
- unattended-upgrades, apt-listchanges (system maintenance)
|
||||
- sshpass (for multi-server setup)
|
||||
|
||||
## User Experience Requirements
|
||||
|
||||
### Onboarding Flow
|
||||
**UX-ONB-001**: First-time user experience
|
||||
- Clear welcome message and overview
|
||||
- Guided setup with sensible defaults
|
||||
- Help text for each question
|
||||
|
||||
**UX-ONB-002**: Returning user experience
|
||||
- Load existing configuration automatically
|
||||
- Skip redundant questions
|
||||
- Provide quick confirmation for known setups
|
||||
|
||||
### Interaction Patterns
|
||||
**UX-INT-001**: Question flow optimization
|
||||
- Group related questions together
|
||||
- Provide progress indication
|
||||
- Allow backtracking and editing
|
||||
|
||||
**UX-INT-002**: Feedback and validation
|
||||
- Real-time input validation
|
||||
- Clear error messages with suggestions
|
||||
- Success confirmations for completed steps
|
||||
|
||||
### Error Recovery
|
||||
**UX-ERR-001**: Graceful error handling
|
||||
- Clear error descriptions
|
||||
- Suggested recovery actions
|
||||
- Option to retry or modify configuration
|
||||
|
||||
**UX-ERR-002**: Partial failure recovery
|
||||
- Save progress on interruption
|
||||
- Allow resumption from last completed step
|
||||
- Provide rollback options where possible
|
||||
|
||||
## Success Criteria
|
||||
|
||||
### Functional Completeness
|
||||
- [ ] All three deployment scenarios work correctly
|
||||
- [ ] Automated deployment with --yes flag functions
|
||||
- [ ] Save-only mode preserves configuration
|
||||
- [ ] Docker and NVIDIA installation work reliably
|
||||
- [ ] Service selection and customization work as specified
|
||||
|
||||
### User Experience
|
||||
- [ ] TUI is intuitive and responsive
|
||||
- [ ] Configuration validation prevents errors
|
||||
- [ ] Error messages are helpful and actionable
|
||||
- [ ] Deployment completes without requiring logout/reboot (except when absolutely necessary)
|
||||
|
||||
### Technical Quality
|
||||
- [ ] Code is well-structured and maintainable
|
||||
- [ ] Comprehensive error handling implemented
|
||||
- [ ] Configuration parsing is robust
|
||||
- [ ] System integration works reliably across Ubuntu/Debian versions
|
||||
|
||||
### Performance Targets
|
||||
- [ ] Pre-flight checks complete within 30 seconds
|
||||
- [ ] TUI startup within 5 seconds
|
||||
- [ ] Full deployment completes within 30 minutes
|
||||
- [ ] Memory usage remains under 200MB during execution
|
||||
|
||||
## Implementation Plan
|
||||
|
||||
### Phase 1: Core Infrastructure (Week 1-2)
|
||||
- Set up Python project structure
|
||||
- Implement basic TUI framework with Rich + Questionary
|
||||
- Create configuration parsing and validation
|
||||
- Implement pre-flight system checks
|
||||
|
||||
### Phase 2: System Setup (Week 3-4)
|
||||
- Implement Docker installation and configuration
|
||||
- Add NVIDIA GPU detection and official driver installation
|
||||
- Create dependency management system
|
||||
- Implement smart reboot/logout handling
|
||||
|
||||
### Phase 3: Configuration Management (Week 5-6)
|
||||
- Build dynamic question flow engine
|
||||
- Implement .env parsing and [DEPLOYMENT] section support
|
||||
- Create configuration validation system
|
||||
- Add save-only functionality
|
||||
|
||||
### Phase 4: Deployment Logic (Week 7-8)
|
||||
- Implement deployment scenario logic
|
||||
- Create service selection and preparation system
|
||||
- Build deployment execution engine
|
||||
- Add progress indication and error handling
|
||||
|
||||
### Phase 5: Testing & Polish (Week 9-10)
|
||||
- Comprehensive testing across Ubuntu/Debian versions
|
||||
- User experience testing and refinement
|
||||
- Documentation and help system
|
||||
- Performance optimization
|
||||
|
||||
## Dependencies & Constraints
|
||||
|
||||
### External Dependencies
|
||||
- **NVIDIA Official Installers**: Must use official NVIDIA installation methods
|
||||
- **Docker Official Installation**: Use official Docker installation scripts
|
||||
- **Ubuntu/Debian Package Repositories**: Rely on standard package sources
|
||||
|
||||
### Technical Constraints
|
||||
- **Python Version**: Minimum Python 3.8 required for modern type hints
|
||||
- **Terminal Compatibility**: Must work in standard Linux terminals
|
||||
- **Network Requirements**: Internet access required for downloads
|
||||
- **Privilege Requirements**: sudo access required for system modifications
|
||||
|
||||
### Business Constraints
|
||||
- **Open Source**: Must remain free and open source
|
||||
- **Backward Compatibility**: Should not break existing .env files
|
||||
- **Documentation**: Comprehensive documentation required
|
||||
- **Community Support**: Should be maintainable by community contributors
|
||||
|
||||
## Risk Assessment
|
||||
|
||||
### High Risk Items
|
||||
- **NVIDIA Installation**: Complex driver installation across different GPU models
|
||||
- **Reboot Handling**: Ensuring one-step installation without logout requirements
|
||||
- **Configuration Validation**: Complex validation logic for interdependent settings
|
||||
|
||||
### Mitigation Strategies
|
||||
- **Testing**: Extensive testing on multiple hardware configurations
|
||||
- **Fallback Options**: Provide manual installation instructions as backup
|
||||
- **Modular Design**: Allow components to be disabled/enabled independently
|
||||
- **User Communication**: Clear warnings and alternative options for complex scenarios
|
||||
|
||||
## Future Enhancements
|
||||
|
||||
### Planned Features
|
||||
- Support for additional Linux distributions
|
||||
- Web-based configuration interface
|
||||
- Integration with configuration management tools
|
||||
- Advanced deployment templates and presets
|
||||
|
||||
### Maintenance Considerations
|
||||
- Regular updates for new NVIDIA driver versions
|
||||
- Compatibility testing with new Ubuntu/Debian releases
|
||||
- Community contribution guidelines and testing frameworks
|
||||
|
||||
---
|
||||
|
||||
*This PRD serves as the authoritative specification for the EZ-Homelab TUI Deployment Script. All development decisions should reference this document to ensure alignment with user requirements and technical constraints.*</content>
|
||||
<parameter name="filePath">c:\Users\kelin\Documents\Apps\GitHub\EZ-Homelab\EZ-Homelab TUI-PRD.md
|
||||
219
README-TUI.md
219
README-TUI.md
@@ -1,219 +0,0 @@
|
||||
# EZ-Homelab TUI Deployment Script
|
||||
|
||||
A modern, user-friendly Terminal User Interface (TUI) replacement for the complex bash deployment script. Built with Python, Rich, and Questionary for an intuitive setup experience.
|
||||
|
||||
## Features
|
||||
|
||||
- **Interactive TUI**: Beautiful terminal interface with conditional question flow
|
||||
- **Automated Deployment**: Use `--yes` flag for hands-free deployment with complete .env file
|
||||
- **Save-Only Mode**: Configure without deploying using `--save-only` flag
|
||||
- **Smart Validation**: Pre-flight checks ensure system readiness
|
||||
- **Three Deployment Scenarios**:
|
||||
- Single Server Full: Deploy everything (core + infrastructure + dashboards)
|
||||
- Core Server: Deploy only essential services
|
||||
- Remote Server: Deploy infrastructure for multi-server setups
|
||||
- **Flexible Service Selection**: Choose which services to deploy and prepare for Dockge
|
||||
|
||||
## Quick Start
|
||||
|
||||
### Prerequisites
|
||||
|
||||
- Ubuntu 20.04+ or Debian 11+
|
||||
- Python 3.8+
|
||||
- Internet connection
|
||||
- DuckDNS account (for dynamic DNS)
|
||||
|
||||
### Installation
|
||||
|
||||
1. **Clone the repository:**
|
||||
```bash
|
||||
git clone https://github.com/kelinfoxy/EZ-Homelab.git
|
||||
cd EZ-Homelab
|
||||
```
|
||||
|
||||
2. **Install dependencies:**
|
||||
```bash
|
||||
pip install -r requirements.txt
|
||||
```
|
||||
|
||||
3. **Copy environment template:**
|
||||
```bash
|
||||
cp .env.example .env
|
||||
```
|
||||
|
||||
### Usage
|
||||
|
||||
#### Interactive Setup (Recommended)
|
||||
```bash
|
||||
python scripts/ez-homelab.py
|
||||
```
|
||||
|
||||
#### Automated Deployment
|
||||
```bash
|
||||
# Complete your .env file first, then:
|
||||
python scripts/ez-homelab.py --yes
|
||||
```
|
||||
|
||||
#### Save Configuration Only
|
||||
```bash
|
||||
python scripts/ez-homelab.py --save-only
|
||||
```
|
||||
|
||||
## Command Line Options
|
||||
|
||||
- No flags: Interactive TUI mode
|
||||
- `--yes` or `-y`: Automated deployment using complete .env file
|
||||
- `--save-only`: Answer questions and save .env without deploying
|
||||
- `--help`: Show help message
|
||||
|
||||
## Deployment Scenarios
|
||||
|
||||
### 1. Single Server Full Deployment
|
||||
Deploys everything on one server:
|
||||
- Core services (DuckDNS, Traefik, Authelia, Sablier, Dockge)
|
||||
- Infrastructure services (Pi-hole, Dozzle, Glances, etc.)
|
||||
- Dashboard services (Homepage, Homarr)
|
||||
- Prepares all additional stacks for Dockge
|
||||
|
||||
### 2. Core Server Deployment
|
||||
Deploys only essential services:
|
||||
- Core services + Dashboards
|
||||
- Prepares all additional stacks for Dockge
|
||||
- Suitable for dedicated core server in multi-server setup
|
||||
|
||||
### 3. Remote Server Deployment
|
||||
Deploys infrastructure without core services:
|
||||
- Infrastructure services + Dashboards + Dockge
|
||||
- For application servers in multi-server setup
|
||||
- Requires core server to be set up first
|
||||
|
||||
## Configuration
|
||||
|
||||
The script uses a comprehensive `.env` file with two main sections:
|
||||
|
||||
### Required Configuration
|
||||
```bash
|
||||
# Basic server settings
|
||||
PUID=1000
|
||||
PGID=1000
|
||||
TZ=America/New_York
|
||||
SERVER_IP=192.168.1.100
|
||||
SERVER_HOSTNAME=debian
|
||||
|
||||
# Domain settings
|
||||
DUCKDNS_SUBDOMAINS=yourdomain
|
||||
DUCKDNS_TOKEN=your-token
|
||||
|
||||
# Admin credentials (for core servers)
|
||||
DEFAULT_USER=admin
|
||||
DEFAULT_PASSWORD=secure-password
|
||||
DEFAULT_EMAIL=admin@yourdomain.duckdns.org
|
||||
```
|
||||
|
||||
### Deployment Configuration (Optional)
|
||||
```bash
|
||||
# For automated deployment
|
||||
DEPLOYMENT_TYPE=SINGLE_SERVER
|
||||
AUTO_REBOOT=false
|
||||
INSTALL_DOCKER=true
|
||||
INSTALL_NVIDIA=true
|
||||
|
||||
# Service selection
|
||||
DEPLOY_DOCKGE=true
|
||||
DEPLOY_CORE=true
|
||||
DEPLOY_INFRASTRUCTURE=true
|
||||
DEPLOY_DASHBOARDS=true
|
||||
PREPARE_VPN=true
|
||||
PREPARE_MEDIA=true
|
||||
# ... etc
|
||||
```
|
||||
|
||||
## System Requirements
|
||||
|
||||
- **OS**: Ubuntu 20.04+ or Debian 11+
|
||||
- **Python**: 3.8 or higher
|
||||
- **RAM**: Minimum 4GB (8GB recommended)
|
||||
- **Disk**: 10GB free space minimum
|
||||
- **Network**: Internet connection for downloads
|
||||
|
||||
## What Gets Installed
|
||||
|
||||
### System Setup
|
||||
- Docker and Docker Compose
|
||||
- NVIDIA drivers and Container Toolkit (if GPU detected)
|
||||
- UFW firewall configuration
|
||||
- Automatic security updates
|
||||
- Required system packages
|
||||
|
||||
### Docker Networks
|
||||
- `traefik-network`: For services behind Traefik
|
||||
- `homelab-network`: General service communication
|
||||
- `media-network`: Media service isolation
|
||||
|
||||
### Services Deployed
|
||||
Based on your deployment scenario and selections.
|
||||
|
||||
## Post-Installation
|
||||
|
||||
After successful deployment:
|
||||
|
||||
1. **Access Dockge**: `https://dockge.yourdomain.duckdns.org`
|
||||
2. **Configure Authelia**: `https://auth.yourdomain.duckdns.org` (if core services deployed)
|
||||
3. **Start Additional Services**: Use Dockge web UI to deploy prepared stacks
|
||||
4. **Access Homepage**: `https://homepage.yourdomain.duckdns.org`
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### Common Issues
|
||||
|
||||
**"Python version 3.8+ required"**
|
||||
- Upgrade Python: `sudo apt install python3.10`
|
||||
|
||||
**"Missing required dependency"**
|
||||
- Install dependencies: `pip install -r requirements.txt`
|
||||
|
||||
**"Pre-flight checks failed"**
|
||||
- Ensure you're running on Ubuntu/Debian
|
||||
- Check internet connectivity
|
||||
- Verify sufficient disk space
|
||||
|
||||
**"Deployment failed"**
|
||||
- Check Docker installation: `docker --version`
|
||||
- Verify .env configuration
|
||||
- Review deployment logs
|
||||
|
||||
### Getting Help
|
||||
|
||||
- Check the [docs/](docs/) directory for detailed guides
|
||||
- Review [troubleshooting](docs/quick-reference.md) in the quick reference
|
||||
- Use the AI assistant in VS Code for EZ-Homelab specific help
|
||||
|
||||
## Development
|
||||
|
||||
### Running Tests
|
||||
```bash
|
||||
# Basic syntax check
|
||||
python -m py_compile scripts/ez-homelab.py
|
||||
|
||||
# YAML validation
|
||||
python -c "import yaml; yaml.safe_load(open('config-templates/traefik/dynamic/external-host-production.yml'))"
|
||||
```
|
||||
|
||||
### Code Structure
|
||||
- `EZHomelabTUI` class: Main application logic
|
||||
- Pre-flight checks and validation
|
||||
- Interactive question flow
|
||||
- Deployment orchestration
|
||||
- Configuration management
|
||||
|
||||
## Contributing
|
||||
|
||||
1. Fork the repository
|
||||
2. Create a feature branch
|
||||
3. Make your changes
|
||||
4. Test thoroughly
|
||||
5. Submit a pull request
|
||||
|
||||
## License
|
||||
|
||||
See [LICENSE](LICENSE) file for details.
|
||||
73
TASKS.md
73
TASKS.md
@@ -1,73 +0,0 @@
|
||||
# EZ-Homelab Script Refactoring Tasks
|
||||
|
||||
## Overview
|
||||
This document outlines the updated plan for refactoring `ez-homelab.sh` based on user requirements. Tasks are prioritized by impact and dependencies. All files are in the same repo. Dry-run output should be user-friendly.
|
||||
|
||||
## Task Categories
|
||||
|
||||
### 1. Menu and Workflow Updates (High Priority)
|
||||
- **1.1: Create `install-prerequisites.sh`**
|
||||
Extract `system_setup()` into a standalone script that must run as root/sudo. Update Option 1 to launch it with sudo if needed.
|
||||
*Effort*: 2-3 hours. *Files*: New `install-prerequisites.sh`, modify `ez-homelab.sh`.
|
||||
|
||||
- **1.2: Update Menu Option 3 Prompts**
|
||||
For Option 3, check if default values are valid. If yes, prompt to use defaults or not. If not, prompt for all REQUIRED_VARS to ensure easy deployment. Reword prompts to clarify REMOTE_SERVER_* vars are for core server cert copying.
|
||||
*Effort*: 1-2 hours. *Files*: `ez-homelab.sh` (`validate_and_prompt_variables()`, `prompt_for_variable()`).
|
||||
|
||||
- **1.3: Implement Menu Option 4 (NVIDIA Installation)**
|
||||
Use `nvidia-detect` to determine GPU and official installer. Install NVIDIA drivers and Container Toolkit. Handle no-GPU gracefully.
|
||||
*Effort*: 3-4 hours. *Files*: `ez-homelab.sh` or `install-prerequisites.sh`.
|
||||
|
||||
### 2. Bug Fixes (High Priority)
|
||||
- **2.1: Remove Hardcoded Values**
|
||||
Replace "kelin", "kelinreij", etc., with variables like `${DOMAIN}`, `${SERVER_IP}` in completion messages and examples.
|
||||
*Effort*: 1 hour. *Files*: `ez-homelab.sh`.
|
||||
|
||||
- **2.2: Fix HOMEPAGE_ALLOWED_HOSTS**
|
||||
Instead of hardcoding port (3003), extract the proper port from the Homepage compose file. Ensure line is `HOMEPAGE_ALLOWED_HOSTS="homepage.${DOMAIN},${SERVER_IP}:<extracted_port>"`.
|
||||
*Effort*: 30 minutes. *Files*: `ez-homelab.sh` (`save_env_file()`).
|
||||
|
||||
### 3. New Features and Enhancements (Medium Priority)
|
||||
- **3.1: Add Argument Parsing**
|
||||
Implement CLI args (e.g., `--deploy-core`, `--dry-run`, `--verbose`) using `getopts` to bypass menu.
|
||||
*Effort*: 2-3 hours. *Files*: `ez-homelab.sh` (`main()`).
|
||||
|
||||
- **3.2: Add Dry-Run Mode**
|
||||
`--dry-run` simulates deployment: validate configs, show actions, log verbosely without executing. Output user-friendly summaries.
|
||||
*Effort*: 2 hours. *Files*: `ez-homelab.sh` (`perform_deployment()`).
|
||||
|
||||
- **3.3: Enhance Console Logging for Verbose Mode**
|
||||
Update `log_*` functions to output to console when `VERBOSE=true`.
|
||||
*Effort*: 1 hour. *Files*: `ez-homelab.sh`.
|
||||
|
||||
- **3.4: Improve Error Handling**
|
||||
Remove `set -e`; log errors but continue where possible. Use `||` for non-critical failures.
|
||||
*Effort*: 2 hours. *Files*: `ez-homelab.sh`.
|
||||
|
||||
### 4. TLS and Multi-Server Logic Refinements (Medium Priority)
|
||||
- **4.1: Clarify Variable Usage**
|
||||
Ensure prompts distinguish: `SERVER_IP` for local machine, `REMOTE_SERVER_*` for core server. `${DOMAIN}` prompted even for additional servers (needed for configs).
|
||||
*Effort*: 1-2 hours. *Files*: `ez-homelab.sh`.
|
||||
|
||||
### 5. Function Organization and Code Quality (Low Priority)
|
||||
- **5.1: Audit and Improve Placeholder/Env Functions**
|
||||
Rename `replace_env_placeholders()` to `localize_yml_file()` and `enhance_placeholder_replacement()` to `localize_deployment()`. Add error aggregation in bulk function. Make single-file function robust (permissions, backups only for existing targets, no repo modifications). Add post-replacement validation for Traefik labels. Handle special characters in values (passwords, hashes).
|
||||
*Effort*: 2-3 hours. *Files*: `ez-homelab.sh`.
|
||||
|
||||
- **5.2: Modularize Code with More Functions**
|
||||
Break `main()` into `parse_args()`, `handle_menu_choice()`, `prepare_deployment()`. Extract repeated logic (env copying, dir creation).
|
||||
*Effort*: 3-4 hours. *Files*: `ez-homelab.sh`.
|
||||
|
||||
- **5.3: Fix Deployment Flow**
|
||||
Streamline `perform_deployment()`: consistent step numbering, better recovery, dry-run integration.
|
||||
*Effort*: 1 hour. *Files*: `ez-homelab.sh`.
|
||||
|
||||
## Implementation Order
|
||||
1. Start with Bug Fixes (2.1-2.2) and Menu Option 1 (1.1).
|
||||
2. Then New Features (3.1-3.4) and Menu Options (1.2-1.3).
|
||||
3. Refinements (4.1, 5.1-5.3).
|
||||
|
||||
## Notes
|
||||
- Test after each task: interactive menu, args, dry-run, multi-server.
|
||||
- Dependencies: NVIDIA tasks require `nvidia-detect`; dry-run depends on args.
|
||||
- Risks: Error handling changes may mask issues; validate thoroughly.
|
||||
@@ -16,7 +16,7 @@ services:
|
||||
- homelab-network
|
||||
- traefik-network
|
||||
ports:
|
||||
- "9000:9000"
|
||||
- '9000:9000'
|
||||
volumes:
|
||||
- /var/run/docker.sock:/var/run/docker.sock
|
||||
- portainer-data:/data
|
||||
@@ -26,15 +26,15 @@ services:
|
||||
# TRAEFIK CONFIGURATION
|
||||
# ==========================================
|
||||
# Service metadata
|
||||
- 'homelab.category=alternatives"
|
||||
- 'homelab.description=Docker container management UI (Alternative to Dockge)"
|
||||
- 'traefik.enable=true"
|
||||
- 'traefik.docker.network=traefik-network"
|
||||
- 'traefik.http.routers.portainer.rule=Host(`portainer.${DOMAIN}`)"
|
||||
- 'traefik.http.routers.portainer.entrypoints=websecure"
|
||||
- 'traefik.http.routers.portainer.tls.certresolver=letsencrypt"
|
||||
- 'traefik.http.routers.portainer.middlewares=authelia@docker"
|
||||
- 'traefik.http.services.portainer.loadbalancer.server.port=9000"
|
||||
- 'homelab.category=alternatives'
|
||||
- 'homelab.description=Docker container management UI (Alternative to Dockge)'
|
||||
- 'traefik.enable=true'
|
||||
- 'traefik.docker.network=traefik-network'
|
||||
- 'traefik.http.routers.portainer.rule=Host(`portainer.${DOMAIN}`)'
|
||||
- 'traefik.http.routers.portainer.entrypoints=websecure'
|
||||
- 'traefik.http.routers.portainer.tls.certresolver=letsencrypt'
|
||||
- 'traefik.http.routers.portainer.middlewares=authelia@docker'
|
||||
- 'traefik.http.services.portainer.loadbalancer.server.port=9000'
|
||||
|
||||
# Authentik - Alternative SSO/Identity Provider with Web UI
|
||||
# Access at: https://authentik.${DOMAIN}
|
||||
@@ -50,7 +50,7 @@ services:
|
||||
- homelab-network
|
||||
- traefik-network
|
||||
ports:
|
||||
- "9000:9000"
|
||||
- '9000:9000'
|
||||
volumes:
|
||||
- /opt/stacks/authentik/media:/media
|
||||
- /opt/stacks/authentik/custom-templates:/templates
|
||||
@@ -66,15 +66,15 @@ services:
|
||||
# TRAEFIK CONFIGURATION
|
||||
# ==========================================
|
||||
# Service metadata
|
||||
- 'homelab.category=alternatives"
|
||||
- 'homelab.description=SSO/Identity provider with web UI (Alternative to Authelia)"
|
||||
- 'traefik.enable=true"
|
||||
- 'traefik.docker.network=traefik-network"
|
||||
- 'traefik.http.routers.authentik.rule=Host(`authentik.${DOMAIN}`)"
|
||||
- 'traefik.http.routers.authentik.entrypoints=websecure"
|
||||
- 'traefik.http.routers.authentik.tls.certresolver=letsencrypt"
|
||||
- 'traefik.http.routers.authentik.middlewares=authelia@docker"
|
||||
- 'traefik.http.services.authentik.loadbalancer.server.port=9000"
|
||||
- 'homelab.category=alternatives'
|
||||
- 'homelab.description=SSO/Identity provider with web UI (Alternative to Authelia)'
|
||||
- 'traefik.enable=true'
|
||||
- 'traefik.docker.network=traefik-network'
|
||||
- 'traefik.http.routers.authentik.rule=Host(`authentik.${DOMAIN}`)'
|
||||
- 'traefik.http.routers.authentik.entrypoints=websecure'
|
||||
- 'traefik.http.routers.authentik.tls.certresolver=letsencrypt'
|
||||
- 'traefik.http.routers.authentik.middlewares=authelia@docker'
|
||||
- 'traefik.http.services.authentik.loadbalancer.server.port=9000'
|
||||
depends_on:
|
||||
- authentik-db
|
||||
- authentik-redis
|
||||
@@ -104,8 +104,8 @@ services:
|
||||
# TRAEFIK CONFIGURATION
|
||||
# ==========================================
|
||||
# Service metadata
|
||||
- 'homelab.category=alternatives"
|
||||
- 'homelab.description=Authentik background worker"
|
||||
- 'homelab.category=alternatives'
|
||||
- 'homelab.description=Authentik background worker'
|
||||
depends_on:
|
||||
- authentik-db
|
||||
- authentik-redis
|
||||
@@ -128,10 +128,10 @@ services:
|
||||
# TRAEFIK CONFIGURATION
|
||||
# ==========================================
|
||||
# Service metadata
|
||||
- 'homelab.category=alternatives"
|
||||
- 'homelab.description=Authentik database"
|
||||
- 'homelab.category=alternatives'
|
||||
- 'homelab.description=Authentik database'
|
||||
healthcheck:
|
||||
test: ["CMD-SHELL", "pg_isready -U ${AUTHENTIK_DB_USER}"]
|
||||
test: ['CMD-SHELL', 'pg_isready -U ${AUTHENTIK_DB_USER}']
|
||||
interval: 10s
|
||||
timeout: 5s
|
||||
retries: 5
|
||||
@@ -151,10 +151,10 @@ services:
|
||||
# TRAEFIK CONFIGURATION
|
||||
# ==========================================
|
||||
# Service metadata
|
||||
- 'homelab.category=alternatives"
|
||||
- 'homelab.description=Authentik cache and messaging"
|
||||
- 'homelab.category=alternatives'
|
||||
- 'homelab.description=Authentik cache and messaging'
|
||||
healthcheck:
|
||||
test: ["CMD-SHELL", "redis-cli ping | grep PONG"]
|
||||
test: ['CMD-SHELL', 'redis-cli ping | grep PONG']
|
||||
interval: 10s
|
||||
timeout: 3s
|
||||
retries: 5
|
||||
@@ -170,7 +170,7 @@ services:
|
||||
- homelab-network
|
||||
- traefik-network
|
||||
ports:
|
||||
- "32400:32400"
|
||||
- '32400:32400'
|
||||
volumes:
|
||||
- ./plex/config:/config
|
||||
- /mnt/media:/media:ro # Large media files on separate drive
|
||||
@@ -202,17 +202,17 @@ services:
|
||||
# TRAEFIK CONFIGURATION
|
||||
# ==========================================
|
||||
# Service metadata
|
||||
- 'homelab.category=alternatives"
|
||||
- 'homelab.description=Alternative media streaming server to Jellyfin"
|
||||
- 'homelab.category=alternatives'
|
||||
- 'homelab.description=Alternative media streaming server to Jellyfin'
|
||||
# Traefik labels - NO Authelia for app access
|
||||
- 'traefik.enable=true"
|
||||
- 'traefik.docker.network=traefik-network"
|
||||
- 'traefik.http.routers.plex.rule=Host(`plex.${DOMAIN}`)"
|
||||
- 'traefik.http.routers.plex.entrypoints=websecure"
|
||||
- 'traefik.http.routers.plex.tls.certresolver=letsencrypt"
|
||||
- 'traefik.http.services.plex.loadbalancer.server.port=32400"
|
||||
- "x-dockge.url=https://plex.${DOMAIN}"
|
||||
- "x-dockge.url=https://plex.${DOMAIN}"
|
||||
- 'traefik.enable=true'
|
||||
- 'traefik.docker.network=traefik-network'
|
||||
- 'traefik.http.routers.plex.rule=Host(`plex.${DOMAIN}`)'
|
||||
- 'traefik.http.routers.plex.entrypoints=websecure'
|
||||
- 'traefik.http.routers.plex.tls.certresolver=letsencrypt'
|
||||
- 'traefik.http.services.plex.loadbalancer.server.port=32400'
|
||||
- 'x-dockge.url=https://plex.${DOMAIN}'
|
||||
- 'x-dockge.url=https://plex.${DOMAIN}'
|
||||
|
||||
volumes:
|
||||
portainer-data:
|
||||
|
||||
@@ -1,236 +0,0 @@
|
||||
# Alternative Services Stack
|
||||
# This stack contains alternative/optional services that are not deployed by default
|
||||
# Deploy manually through Dockge if you want to use these alternatives
|
||||
# Place in /opt/stacks/alternatives/docker-compose.yml
|
||||
|
||||
# RESTART POLICY GUIDE:
|
||||
# - unless-stopped: Core infrastructure services that should always run
|
||||
# - no: Services with Sablier lazy loading (start on-demand)
|
||||
# - See individual service comments for specific reasoning
|
||||
|
||||
services:
|
||||
# Portainer - Docker management UI (Alternative to Dockge)
|
||||
# Access at: https://portainer.${DOMAIN}
|
||||
# NOTE: Dockge is the default Docker management UI. Deploy Portainer only if you prefer its interface
|
||||
# Docker management interface should always run when deployed
|
||||
portainer:
|
||||
image: portainer/portainer-ce:2.19.4
|
||||
container_name: portainer
|
||||
restart: unless-stopped
|
||||
networks:
|
||||
- homelab-network
|
||||
- traefik-network
|
||||
ports:
|
||||
- "9000:9000"
|
||||
volumes:
|
||||
- /var/run/docker.sock:/var/run/docker.sock
|
||||
- portainer-data:/data
|
||||
security_opt:
|
||||
- no-new-privileges:true
|
||||
labels:
|
||||
# TRAEFIK CONFIGURATION
|
||||
# ==========================================
|
||||
# Service metadata
|
||||
- "homelab.category=alternatives"
|
||||
- "homelab.description=Docker container management UI (Alternative to Dockge)"
|
||||
- "traefik.enable=true"
|
||||
- "traefik.http.routers.portainer.rule=Host(`portainer.${DOMAIN}`)"
|
||||
- "traefik.http.routers.portainer.entrypoints=websecure"
|
||||
- "traefik.http.routers.portainer.tls.certresolver=letsencrypt"
|
||||
- "traefik.http.routers.portainer.middlewares=authelia@docker"
|
||||
- "traefik.http.services.portainer.loadbalancer.server.port=9000"
|
||||
|
||||
# Authentik - Alternative SSO/Identity Provider with Web UI
|
||||
# Access at: https://authentik.${DOMAIN}
|
||||
# NOTE: Authelia is the default SSO. Deploy Authentik only if you need a web UI for user management
|
||||
# WARNING: Do not run both Authelia and Authentik at the same time
|
||||
# SSO service should always run when deployed as alternative to Authelia
|
||||
authentik-server:
|
||||
image: ghcr.io/goauthentik/server:2024.2.0
|
||||
container_name: authentik-server
|
||||
restart: unless-stopped
|
||||
command: server
|
||||
networks:
|
||||
- homelab-network
|
||||
- traefik-network
|
||||
ports:
|
||||
- "9000:9000"
|
||||
volumes:
|
||||
- /opt/stacks/authentik/media:/media
|
||||
- /opt/stacks/authentik/custom-templates:/templates
|
||||
environment:
|
||||
- AUTHENTIK_REDIS__HOST=authentik-redis
|
||||
- AUTHENTIK_POSTGRESQL__HOST=authentik-db
|
||||
- AUTHENTIK_POSTGRESQL__USER=${AUTHENTIK_DB_USER}
|
||||
- AUTHENTIK_POSTGRESQL__NAME=${AUTHENTIK_DB_NAME}
|
||||
- AUTHENTIK_POSTGRESQL__PASSWORD=${AUTHENTIK_DB_PASSWORD}
|
||||
- AUTHENTIK_SECRET_KEY=${AUTHENTIK_SECRET_KEY}
|
||||
- AUTHENTIK_ERROR_REPORTING__ENABLED=false
|
||||
labels:
|
||||
# TRAEFIK CONFIGURATION
|
||||
# ==========================================
|
||||
# Service metadata
|
||||
- "homelab.category=alternatives"
|
||||
- "homelab.description=SSO/Identity provider with web UI (Alternative to Authelia)"
|
||||
- "traefik.enable=true"
|
||||
- "traefik.http.routers.authentik.rule=Host(`authentik.${DOMAIN}`)"
|
||||
- "traefik.http.routers.authentik.entrypoints=websecure"
|
||||
- "traefik.http.routers.authentik.tls.certresolver=letsencrypt"
|
||||
- "traefik.http.routers.authentik.middlewares=authelia@docker"
|
||||
- "traefik.http.services.authentik.loadbalancer.server.port=9000"
|
||||
depends_on:
|
||||
- authentik-db
|
||||
- authentik-redis
|
||||
|
||||
# Authentik Worker - Background task processor
|
||||
# SSO background worker should always run when Authentik is deployed
|
||||
authentik-worker:
|
||||
image: ghcr.io/goauthentik/server:2024.2.0
|
||||
container_name: authentik-worker
|
||||
restart: unless-stopped
|
||||
command: worker
|
||||
networks:
|
||||
- homelab-network
|
||||
volumes:
|
||||
- /opt/stacks/authentik/media:/media
|
||||
- /opt/stacks/authentik/certs:/certs
|
||||
- /opt/stacks/authentik/custom-templates:/templates
|
||||
environment:
|
||||
- AUTHENTIK_REDIS__HOST=authentik-redis
|
||||
- AUTHENTIK_POSTGRESQL__HOST=authentik-db
|
||||
- AUTHENTIK_POSTGRESQL__USER=${AUTHENTIK_DB_USER}
|
||||
- AUTHENTIK_POSTGRESQL__NAME=${AUTHENTIK_DB_NAME}
|
||||
- AUTHENTIK_POSTGRESQL__PASSWORD=${AUTHENTIK_DB_PASSWORD}
|
||||
- AUTHENTIK_SECRET_KEY=${AUTHENTIK_SECRET_KEY}
|
||||
- AUTHENTIK_ERROR_REPORTING__ENABLED=false
|
||||
labels:
|
||||
# TRAEFIK CONFIGURATION
|
||||
# ==========================================
|
||||
# Service metadata
|
||||
- "homelab.category=alternatives"
|
||||
- "homelab.description=Authentik background worker"
|
||||
depends_on:
|
||||
- authentik-db
|
||||
- authentik-redis
|
||||
|
||||
# Authentik Database - PostgreSQL
|
||||
# Database must always run for Authentik to function
|
||||
authentik-db:
|
||||
image: postgres:16-alpine
|
||||
container_name: authentik-db
|
||||
restart: unless-stopped
|
||||
networks:
|
||||
- homelab-network
|
||||
volumes:
|
||||
- authentik-db-data:/var/lib/postgresql/data
|
||||
environment:
|
||||
- POSTGRES_USER=${AUTHENTIK_DB_USER}
|
||||
- POSTGRES_PASSWORD=${AUTHENTIK_DB_PASSWORD}
|
||||
- POSTGRES_DB=${AUTHENTIK_DB_NAME}
|
||||
labels:
|
||||
# TRAEFIK CONFIGURATION
|
||||
# ==========================================
|
||||
# Service metadata
|
||||
- "homelab.category=alternatives"
|
||||
- "homelab.description=Authentik database"
|
||||
healthcheck:
|
||||
test: ["CMD-SHELL", "pg_isready -U ${AUTHENTIK_DB_USER}"]
|
||||
interval: 10s
|
||||
timeout: 5s
|
||||
retries: 5
|
||||
|
||||
# Authentik Redis - Cache and message queue
|
||||
# Cache service must always run for Authentik performance
|
||||
authentik-redis:
|
||||
image: redis:7-alpine
|
||||
container_name: authentik-redis
|
||||
restart: unless-stopped
|
||||
networks:
|
||||
- homelab-network
|
||||
volumes:
|
||||
- authentik-redis-data:/data
|
||||
command: --save 60 1 --loglevel warning
|
||||
labels:
|
||||
# TRAEFIK CONFIGURATION
|
||||
# ==========================================
|
||||
# Service metadata
|
||||
- "homelab.category=alternatives"
|
||||
- "homelab.description=Authentik cache and messaging"
|
||||
healthcheck:
|
||||
test: ["CMD-SHELL", "redis-cli ping | grep PONG"]
|
||||
interval: 10s
|
||||
timeout: 3s
|
||||
retries: 5
|
||||
|
||||
# Plex Media Server - Alternative to Jellyfin
|
||||
# Access at: https://plex.yourdomain.duckdns.org
|
||||
# NOTE: No Authelia - allows app access from Roku, Fire TV, mobile, etc.
|
||||
# Media server should always run when deployed as alternative to Jellyfin
|
||||
plex:
|
||||
image: plexinc/pms-docker:1.40.0.7998-f68041501
|
||||
container_name: plex
|
||||
restart: unless-stopped
|
||||
networks:
|
||||
- homelab-network
|
||||
- homelab-network
|
||||
- traefik-network
|
||||
ports:
|
||||
- "32400:32400"
|
||||
volumes:
|
||||
- ./plex/config:/config
|
||||
- /mnt/media:/media:ro # Large media files on separate drive
|
||||
- plex-transcode:/transcode
|
||||
environment:
|
||||
- PUID=${PUID}
|
||||
- PGID=${PGID}
|
||||
- TZ=${TZ}
|
||||
- PLEX_CLAIM=${PLEX_CLAIM}
|
||||
# Hardware transcoding support
|
||||
# Uncomment ONE of the following options:
|
||||
|
||||
# Option 1: Intel QuickSync (most common)
|
||||
# devices:
|
||||
# - /dev/dri:/dev/dri
|
||||
|
||||
# Option 2: NVIDIA GPU (requires nvidia-container-toolkit installed)
|
||||
# runtime: nvidia
|
||||
# devices:
|
||||
# - /dev/nvidia0:/dev/nvidia0
|
||||
# - /dev/nvidiactl:/dev/nvidiactl
|
||||
# - /dev/nvidia-modeset:/dev/nvidia-modeset
|
||||
# - /dev/nvidia-uvm:/dev/nvidia-uvm
|
||||
# - /dev/nvidia-uvm-tools:/dev/nvidia-uvm-tools
|
||||
# environment:
|
||||
# - NVIDIA_VISIBLE_DEVICES=all
|
||||
# - NVIDIA_DRIVER_CAPABILITIES=compute,video,utility
|
||||
labels:
|
||||
# TRAEFIK CONFIGURATION
|
||||
# ==========================================
|
||||
# Service metadata
|
||||
- "homelab.category=alternatives"
|
||||
- "homelab.description=Alternative media streaming server to Jellyfin"
|
||||
# Traefik labels - NO Authelia for app access
|
||||
- "traefik.enable=true"
|
||||
- "traefik.http.routers.plex.rule=Host(`plex.${DOMAIN}`)"
|
||||
- "traefik.http.routers.plex.entrypoints=websecure"
|
||||
- "traefik.http.routers.plex.tls.certresolver=letsencrypt"
|
||||
- "traefik.http.services.plex.loadbalancer.server.port=32400"
|
||||
- "x-dockge.url=https://plex.${DOMAIN}"
|
||||
- "x-dockge.url=https://plex.${DOMAIN}"
|
||||
|
||||
volumes:
|
||||
portainer-data:
|
||||
driver: local
|
||||
authentik-db-data:
|
||||
driver: local
|
||||
authentik-redis-data:
|
||||
driver: local
|
||||
plex-transcode:
|
||||
driver: local
|
||||
|
||||
networks:
|
||||
homelab-network:
|
||||
external: true
|
||||
traefik-network:
|
||||
external: true
|
||||
|
||||
@@ -1,28 +0,0 @@
|
||||
# Environment Variables Template for Core Services
|
||||
# Copy this file to .env and fill in your values
|
||||
|
||||
# User and Group IDs for file permissions (get with: id -u and id -g)
|
||||
PUID=1000
|
||||
PGID=1000
|
||||
|
||||
TZ=America/New_York
|
||||
|
||||
SERVER_IP=192.168.1.100
|
||||
SERVER_HOSTNAME=your-server-name
|
||||
|
||||
# Domain & DuckDNS Configuration
|
||||
DUCKDNS_SUBDOMAINS=your-subdomain # Without .duckdns.org
|
||||
DOMAIN=your-subdomain.duckdns.org
|
||||
DUCKDNS_TOKEN=your-duckdns-token-here
|
||||
|
||||
# Default credentials (used by multiple services for easier setup)
|
||||
DEFAULT_USER=admin
|
||||
DEFAULT_PASSWORD=change-this-password
|
||||
|
||||
# Authelia Configuration
|
||||
AUTHELIA_JWT_SECRET=your-jwt-secret-here
|
||||
AUTHELIA_SESSION_SECRET=your-session-secret-here
|
||||
AUTHELIA_STORAGE_ENCRYPTION_KEY=your-encryption-key-here
|
||||
|
||||
# Let's Encrypt Configuration
|
||||
ACME_EMAIL=your-email@example.com
|
||||
@@ -1,6 +1,6 @@
|
||||
# Authelia Configuration
|
||||
# Copy to /opt/stacks/authelia/configuration.yml
|
||||
# IMPORTANT: Replace 'kelinreij.duckdns.org' with your actual DuckDNS domain
|
||||
# IMPORTANT: Replace 'your-domain.duckdns.org' with your actual DuckDNS domain
|
||||
|
||||
server:
|
||||
host: 0.0.0.0
|
||||
@@ -11,18 +11,18 @@ log:
|
||||
|
||||
theme: dark
|
||||
|
||||
jwt_secret: generate-with-openssl-rand-hex-64
|
||||
jwt_secret: ${AUTHELIA_JWT_SECRET}
|
||||
|
||||
default_redirection_url: https://auth.kelinreij.duckdns.org
|
||||
default_redirection_url: https://auth.${DOMAIN}
|
||||
|
||||
totp:
|
||||
issuer: kelinreij.duckdns.org
|
||||
issuer: ${DOMAIN}
|
||||
period: 30
|
||||
skew: 1
|
||||
|
||||
authentication_backend:
|
||||
file:
|
||||
path: /config/users_database.yml
|
||||
path: /secrets/users_database.yml
|
||||
password:
|
||||
algorithm: argon2id
|
||||
iterations: 1
|
||||
@@ -36,40 +36,34 @@ access_control:
|
||||
|
||||
rules:
|
||||
# Bypass Authelia for Jellyfin (allow app access)
|
||||
- domain: jellyfin.kelinreij.duckdns.org
|
||||
- domain: jellyfin.${DOMAIN}
|
||||
policy: bypass
|
||||
|
||||
# Bypass for Plex (allow app access)
|
||||
- domain: plex.kelinreij.duckdns.org
|
||||
- domain: plex.${DOMAIN}
|
||||
policy: bypass
|
||||
|
||||
# Bypass for Home Assistant (has its own auth)
|
||||
- domain: ha.kelinreij.duckdns.org
|
||||
policy: bypass
|
||||
|
||||
# Bypass for development services (they have their own auth or setup)
|
||||
- domain: pgadmin.kelinreij.duckdns.org
|
||||
policy: bypass
|
||||
- domain: gitlab.kelinreij.duckdns.org
|
||||
- domain: ha.${DOMAIN}
|
||||
policy: bypass
|
||||
|
||||
# Protected: All other services require authentication
|
||||
- domain: "*.kelinreij.duckdns.org"
|
||||
- domain: "*.${DOMAIN}"
|
||||
policy: one_factor
|
||||
|
||||
# Two-factor for admin services (optional)
|
||||
# - domain:
|
||||
# - "admin.kelinreij.duckdns.org"
|
||||
# - "portainer.kelinreij.duckdns.org"
|
||||
# - "admin.${DOMAIN}"
|
||||
# - "portainer.${DOMAIN}"
|
||||
# policy: two_factor
|
||||
|
||||
session:
|
||||
name: authelia_session
|
||||
secret: generate-with-openssl-rand-hex-64
|
||||
secret: ${AUTHELIA_SESSION_SECRET}
|
||||
expiration: 24h # Session expires after 24 hours
|
||||
inactivity: 24h # Session expires after 24 hours of inactivity
|
||||
remember_me_duration: 1M
|
||||
domain: kelinreij.duckdns.org
|
||||
domain: ${DOMAIN}
|
||||
|
||||
regulation:
|
||||
max_retries: 3
|
||||
@@ -77,11 +71,11 @@ regulation:
|
||||
ban_time: 5m
|
||||
|
||||
storage:
|
||||
encryption_key: generate-with-openssl-rand-hex-64
|
||||
encryption_key: ${AUTHELIA_STORAGE_ENCRYPTION_KEY}
|
||||
local:
|
||||
path: /data/db.sqlite3
|
||||
path: /config/db.sqlite3
|
||||
|
||||
notifier:
|
||||
# File-based notifications (for development/testing)
|
||||
filesystem:
|
||||
filename: /data/notification.txt
|
||||
filename: /config/notification.txt
|
||||
20
docker-compose/core/authelia/config/users_database.yml
Normal file
20
docker-compose/core/authelia/config/users_database.yml
Normal file
@@ -0,0 +1,20 @@
|
||||
# Authelia Users Database
|
||||
# Copy to /opt/stacks/authelia/users_database.yml
|
||||
# Generate password hashes with: docker run authelia/authelia:latest authelia crypto hash generate argon2 --password 'yourpassword'
|
||||
|
||||
users:
|
||||
${AUTHELIA_ADMIN_USER}:
|
||||
displayname: ${AUTHELIA_ADMIN_USER}
|
||||
password: "${AUTHELIA_ADMIN_PASSWORD_HASH}"
|
||||
email: ${AUTHELIA_ADMIN_EMAIL}
|
||||
groups:
|
||||
- admins
|
||||
- users
|
||||
|
||||
# Example: Additional user
|
||||
# user1:
|
||||
# displayname: "User One"
|
||||
# password: "$argon2id$v=19$m=65536,t=3,p=4$CHANGEME"
|
||||
# email: user1@example.com
|
||||
# groups:
|
||||
# - users
|
||||
@@ -10,4 +10,3 @@ users:
|
||||
groups:
|
||||
- admins
|
||||
- users
|
||||
- users
|
||||
@@ -1,12 +0,0 @@
|
||||
###############################################################
|
||||
# Users Database #
|
||||
###############################################################
|
||||
|
||||
users:
|
||||
${DEFAULT_USER}:
|
||||
displayname: "Admin User"
|
||||
password: "${AUTHELIA_ADMIN_PASSWORD_HASH}"
|
||||
email: ${DEFAULT_EMAIL}
|
||||
groups:
|
||||
- admins
|
||||
- users
|
||||
@@ -56,7 +56,7 @@ services:
|
||||
|
||||
authelia:
|
||||
# Single sign-on authentication service - must always run for user authentication
|
||||
image: authelia/authelia:latest
|
||||
image: authelia/authelia:4.37.5
|
||||
container_name: authelia
|
||||
restart: unless-stopped
|
||||
environment:
|
||||
|
||||
@@ -1,144 +0,0 @@
|
||||
# Core Infrastructure Services
|
||||
# These services form the foundation of the homelab and should always be running
|
||||
# Place in /opt/stacks/core/docker-compose.yml
|
||||
|
||||
# RESTART POLICY GUIDE:
|
||||
# - unless-stopped: Core infrastructure services that should always run
|
||||
# - no: Services with Sablier lazy loading (start on-demand)
|
||||
# - See individual service comments for specific reasoning
|
||||
|
||||
services:
|
||||
|
||||
duckdns:
|
||||
# Dynamic DNS service - must always run to maintain domain resolution
|
||||
image: lscr.io/linuxserver/duckdns:latest
|
||||
container_name: duckdns
|
||||
restart: unless-stopped
|
||||
environment:
|
||||
- PUID=${PUID}
|
||||
- PGID=${PGID}
|
||||
- TZ=${TZ}
|
||||
- SUBDOMAINS=${DUCKDNS_SUBDOMAINS}
|
||||
- TOKEN=${DUCKDNS_TOKEN}
|
||||
volumes:
|
||||
- ./duckdns/config:/config
|
||||
networks:
|
||||
- traefik-network
|
||||
|
||||
traefik:
|
||||
# Reverse proxy and SSL termination - core routing service, must always run
|
||||
# CONFIGURATION REQUIREMENT: traefik.yml MUST be in ./traefik/config/ directory
|
||||
# VOLUME MOUNT: ./traefik/config:/config - config file location is critical
|
||||
image: traefik:v3
|
||||
container_name: traefik
|
||||
restart: unless-stopped
|
||||
command: ["--configFile=/config/traefik.yml"]
|
||||
environment:
|
||||
- DUCKDNS_TOKEN=${DUCKDNS_TOKEN}
|
||||
ports:
|
||||
- 80:80
|
||||
- 443:443
|
||||
- 8080:8080
|
||||
volumes:
|
||||
- ./traefik/config:/config
|
||||
- ./traefik/letsencrypt:/letsencrypt
|
||||
- ./traefik/dynamic:/dynamic
|
||||
- /var/run/docker.sock:/var/run/docker.sock:ro
|
||||
networks:
|
||||
- traefik-network
|
||||
labels:
|
||||
# TRAEFIK CONFIGURATION
|
||||
# ==========================================
|
||||
# Service metadata
|
||||
- "homelab.category=core"
|
||||
- "homelab.description=Reverse proxy and SSL termination"
|
||||
# Traefik reverse proxy (comment/uncomment to disable/enable)
|
||||
# If Traefik is on a remote server: these labels are NOT USED;
|
||||
# configure external yml files in /traefik/dynamic folder instead.
|
||||
- "traefik.enable=true"
|
||||
- "traefik.http.routers.traefik.rule=Host(`traefik.${DOMAIN}`)"
|
||||
- "traefik.http.routers.traefik.entrypoints=websecure"
|
||||
- "traefik.http.routers.traefik.tls.certresolver=letsencrypt"
|
||||
- "traefik.http.routers.traefik.middlewares=authelia@docker"
|
||||
- "traefik.http.services.traefik.loadbalancer.server.port=8080"
|
||||
|
||||
authelia:
|
||||
# Single sign-on authentication service - must always run for user authentication
|
||||
# VERSION PINNING: Pinned to v4.37.5 due to breaking changes in v4.39.15+
|
||||
# BREAKING CHANGES: v4.39.15+ has incompatible configuration and database changes
|
||||
# UPGRADE NOTES: Test in separate environment before upgrading. Backup config and DB.
|
||||
image: authelia/authelia:4.37.5
|
||||
container_name: authelia
|
||||
restart: unless-stopped
|
||||
environment:
|
||||
- TZ=${TZ}
|
||||
ports:
|
||||
- "9091:9091"
|
||||
volumes:
|
||||
- ./authelia/config:/config
|
||||
- ./authelia/secrets:/secrets
|
||||
networks:
|
||||
- traefik-network
|
||||
depends_on:
|
||||
- traefik
|
||||
labels:
|
||||
# TRAEFIK CONFIGURATION
|
||||
# ==========================================
|
||||
# Service metadata
|
||||
- "homelab.category=core"
|
||||
- "homelab.description=Single sign-on authentication"
|
||||
# Traefik reverse proxy (comment/uncomment to disable/enable)
|
||||
# If Traefik is on a remote server: these labels are NOT USED;
|
||||
# configure external yml files in /traefik/dynamic folder instead.
|
||||
- "traefik.enable=true"
|
||||
- "traefik.http.routers.authelia.rule=Host(`auth.${DOMAIN}`)"
|
||||
- "traefik.http.routers.authelia.entrypoints=websecure"
|
||||
- "traefik.http.routers.authelia.tls.certresolver=letsencrypt"
|
||||
- "traefik.http.routers.authelia.service=authelia"
|
||||
- "traefik.http.services.authelia.loadbalancer.server.port=9091"
|
||||
# Authelia forward auth middleware configuration
|
||||
- "traefik.http.middlewares.authelia.forwardauth.address=http://authelia:9091/api/verify?rd=https://auth.${DOMAIN}/"
|
||||
- "traefik.http.middlewares.authelia.forwardauth.authResponseHeaders=X-Secret"
|
||||
- "traefik.http.middlewares.authelia.forwardauth.trustForwardHeader=true"
|
||||
|
||||
# Sablier - Lazy loading service for Docker containers
|
||||
# Controls startup/shutdown of lazy-loaded services, must always run
|
||||
# REQUIREMENTS FOR DOCKER API ACCESS:
|
||||
# 1. Docker daemon must be configured to listen on TCP port 2376 with TLS
|
||||
# 2. DOCKER_HOST environment variable must point to accessible Docker API endpoint
|
||||
# 3. Firewall must allow TCP connections to Docker API port (2376)
|
||||
# 4. TLS certificates must be mounted and environment variables set
|
||||
# 5. Ensure dockerproxy service is running and accessible
|
||||
sablier-service:
|
||||
image: sablierapp/sablier:latest
|
||||
container_name: sablier-service
|
||||
restart: unless-stopped
|
||||
networks:
|
||||
- traefik-network
|
||||
environment:
|
||||
- SABLIER_PROVIDER=docker
|
||||
- SABLIER_DOCKER_API_VERSION=1.51
|
||||
- SABLIER_DOCKER_NETWORK=traefik-network
|
||||
- SABLIER_LOG_LEVEL=debug
|
||||
- DOCKER_HOST=tcp://${SERVER_IP}:2376
|
||||
- DOCKER_TLS_VERIFY=1
|
||||
- DOCKER_CERT_PATH=/certs
|
||||
volumes:
|
||||
- ./shared-ca:/certs:ro
|
||||
ports:
|
||||
- 10000:10000
|
||||
labels:
|
||||
# Service metadata
|
||||
- "homelab.category=core"
|
||||
- "homelab.description=Lazy loading service for Docker containers"
|
||||
|
||||
networks:
|
||||
traefik-network:
|
||||
external: true
|
||||
|
||||
x-dockge:
|
||||
urls:
|
||||
- https://auth.${DOMAIN}
|
||||
- http://${SERVER_IP}:9091
|
||||
- https://traefik.${DOMAIN}
|
||||
- http://${SERVER_IP}:8080
|
||||
@@ -0,0 +1,19 @@
|
||||
http:
|
||||
routers:
|
||||
# Individual Services
|
||||
homeassistant:
|
||||
rule: "Host(`hass.${DOMAIN}`)"
|
||||
entryPoints:
|
||||
- websecure
|
||||
service: homeassistant
|
||||
tls:
|
||||
certResolver: letsencrypt
|
||||
middlewares:
|
||||
- authelia@docker
|
||||
services:
|
||||
# Individual Services
|
||||
homeassistant:
|
||||
loadBalancer:
|
||||
servers:
|
||||
- url: "http://${HOMEASSISTANT_IP}:8123"
|
||||
passHostHeader: true
|
||||
399
docker-compose/core/traefik/dynamic/local-host-production.yml
Normal file
399
docker-compose/core/traefik/dynamic/local-host-production.yml
Normal file
@@ -0,0 +1,399 @@
|
||||
http:
|
||||
routers:
|
||||
# Remote Server Services (${REMOTE_SERVER_HOSTNAME})
|
||||
dockge-${REMOTE_SERVER_HOSTNAME}:
|
||||
rule: "Host(`dockge.${REMOTE_SERVER_HOSTNAME}.${DOMAIN}`)"
|
||||
entryPoints:
|
||||
- websecure
|
||||
service: dockge-${REMOTE_SERVER_HOSTNAME}
|
||||
tls:
|
||||
certResolver: letsencrypt
|
||||
middlewares:
|
||||
- authelia@docker
|
||||
|
||||
dozzle-${REMOTE_SERVER_HOSTNAME}:
|
||||
rule: "Host(`dozzle.${REMOTE_SERVER_HOSTNAME}.${DOMAIN}`)"
|
||||
entryPoints:
|
||||
- websecure
|
||||
service: dozzle-${REMOTE_SERVER_HOSTNAME}
|
||||
tls:
|
||||
certResolver: letsencrypt
|
||||
middlewares:
|
||||
- authelia@docker
|
||||
|
||||
glances-${REMOTE_SERVER_HOSTNAME}:
|
||||
rule: "Host(`glances.${REMOTE_SERVER_HOSTNAME}.${DOMAIN}`)"
|
||||
entryPoints:
|
||||
- websecure
|
||||
service: glances-${REMOTE_SERVER_HOSTNAME}
|
||||
tls:
|
||||
certResolver: letsencrypt
|
||||
middlewares:
|
||||
- authelia@docker
|
||||
|
||||
backrest-${REMOTE_SERVER_HOSTNAME}:
|
||||
rule: "Host(`backrest.${REMOTE_SERVER_HOSTNAME}.${DOMAIN}`)"
|
||||
entryPoints:
|
||||
- websecure
|
||||
service: backrest-${REMOTE_SERVER_HOSTNAME}
|
||||
tls:
|
||||
certResolver: letsencrypt
|
||||
middlewares:
|
||||
- authelia@docker
|
||||
|
||||
duplicati-${REMOTE_SERVER_HOSTNAME}:
|
||||
rule: "Host(`duplicati.${REMOTE_SERVER_HOSTNAME}.${DOMAIN}`)"
|
||||
entryPoints:
|
||||
- websecure
|
||||
service: duplicati-${REMOTE_SERVER_HOSTNAME}
|
||||
tls:
|
||||
certResolver: letsencrypt
|
||||
middlewares:
|
||||
- authelia@docker
|
||||
|
||||
homepage-${REMOTE_SERVER_HOSTNAME}:
|
||||
rule: "Host(`homepage.${REMOTE_SERVER_HOSTNAME}.${DOMAIN}`)"
|
||||
entryPoints:
|
||||
- websecure
|
||||
service: homepage-${REMOTE_SERVER_HOSTNAME}
|
||||
tls:
|
||||
certResolver: letsencrypt
|
||||
middlewares:
|
||||
- authelia@docker
|
||||
|
||||
homarr-${REMOTE_SERVER_HOSTNAME}:
|
||||
rule: "Host(`homarr.${REMOTE_SERVER_HOSTNAME}.${DOMAIN}`)"
|
||||
entryPoints:
|
||||
- websecure
|
||||
service: homarr-${REMOTE_SERVER_HOSTNAME}
|
||||
tls:
|
||||
certResolver: letsencrypt
|
||||
middlewares:
|
||||
- authelia@docker
|
||||
|
||||
grafana-${REMOTE_SERVER_HOSTNAME}:
|
||||
rule: "Host(`grafana.${REMOTE_SERVER_HOSTNAME}.${DOMAIN}`)"
|
||||
entryPoints:
|
||||
- websecure
|
||||
service: grafana-${REMOTE_SERVER_HOSTNAME}
|
||||
tls:
|
||||
certResolver: letsencrypt
|
||||
middlewares:
|
||||
- authelia@docker
|
||||
|
||||
prometheus-${REMOTE_SERVER_HOSTNAME}:
|
||||
rule: "Host(`prometheus.${REMOTE_SERVER_HOSTNAME}.${DOMAIN}`)"
|
||||
entryPoints:
|
||||
- websecure
|
||||
service: prometheus-${REMOTE_SERVER_HOSTNAME}
|
||||
tls:
|
||||
certResolver: letsencrypt
|
||||
middlewares:
|
||||
- authelia@docker
|
||||
|
||||
uptime-kuma-${REMOTE_SERVER_HOSTNAME}:
|
||||
rule: "Host(`status.${REMOTE_SERVER_HOSTNAME}.${DOMAIN}`)"
|
||||
entryPoints:
|
||||
- websecure
|
||||
service: uptime-kuma-${REMOTE_SERVER_HOSTNAME}
|
||||
tls:
|
||||
certResolver: letsencrypt
|
||||
middlewares:
|
||||
- authelia@docker
|
||||
|
||||
# Service Definitions
|
||||
services:
|
||||
backrest-${SERVER_HOSTNAME}:
|
||||
loadBalancer:
|
||||
servers:
|
||||
- url: "http://${SERVER_IP}:9898"
|
||||
passHostHeader: true
|
||||
|
||||
vaultwarden-${SERVER_HOSTNAME}:
|
||||
loadBalancer:
|
||||
servers:
|
||||
- url: "http://${SERVER_IP}:8091"
|
||||
passHostHeader: true
|
||||
|
||||
bookstack-${SERVER_HOSTNAME}:
|
||||
loadbalancer:
|
||||
servers:
|
||||
- url: "http://${SERVER_IP}:6875"
|
||||
passHostHeader: true
|
||||
|
||||
calibre-web-${SERVER_HOSTNAME}:
|
||||
loadbalancer:
|
||||
servers:
|
||||
- url: "http://${SERVER_IP}:8083"
|
||||
passHostHeader: true
|
||||
|
||||
code-${SERVER_HOSTNAME}:
|
||||
loadbalancer:
|
||||
servers:
|
||||
- url: "http://${SERVER_IP}:8079"
|
||||
passHostHeader: true
|
||||
|
||||
dockge-${SERVER_HOSTNAME}:
|
||||
loadbalancer:
|
||||
servers:
|
||||
- url: "http://${SERVER_IP}:5001"
|
||||
passHostHeader: true
|
||||
|
||||
dockhand-${SERVER_HOSTNAME}:
|
||||
loadbalancer:
|
||||
servers:
|
||||
- url: "http://${SERVER_IP}:3003"
|
||||
passHostHeader: true
|
||||
|
||||
dokuwiki-${SERVER_HOSTNAME}:
|
||||
loadbalancer:
|
||||
servers:
|
||||
- url: "http://${SERVER_IP}:8087"
|
||||
passHostHeader: true
|
||||
|
||||
dozzle-${SERVER_HOSTNAME}:
|
||||
loadbalancer:
|
||||
servers:
|
||||
- url: "http://${SERVER_IP}:8085"
|
||||
passHostHeader: true
|
||||
|
||||
duplicati-${SERVER_HOSTNAME}:
|
||||
loadbalancer:
|
||||
servers:
|
||||
- url: "http://${SERVER_IP}:8200"
|
||||
passHostHeader: true
|
||||
|
||||
ez-assistant-${SERVER_HOSTNAME}:
|
||||
loadbalancer:
|
||||
servers:
|
||||
- url: "http://${SERVER_IP}:18789" # Internal IP of ${SERVER_HOSTNAME} server
|
||||
passHostHeader: true
|
||||
|
||||
formio-${SERVER_HOSTNAME}:
|
||||
loadbalancer:
|
||||
servers:
|
||||
- url: "http://${SERVER_IP}:3002"
|
||||
passHostHeader: true
|
||||
|
||||
gitea-${SERVER_HOSTNAME}:
|
||||
loadbalancer:
|
||||
servers:
|
||||
- url: "http://${SERVER_IP}:3010"
|
||||
passHostHeader: true
|
||||
|
||||
glances-${SERVER_HOSTNAME}:
|
||||
loadbalancer:
|
||||
servers:
|
||||
- url: "http://${SERVER_IP}:61208"
|
||||
passHostHeader: true
|
||||
|
||||
homarr-${SERVER_HOSTNAME}:
|
||||
loadbalancer:
|
||||
servers:
|
||||
- url: "http://${SERVER_IP}:7575"
|
||||
passHostHeader: true
|
||||
|
||||
homepage-${SERVER_HOSTNAME}:
|
||||
loadbalancer:
|
||||
servers:
|
||||
- url: "http://${SERVER_IP}:3000"
|
||||
passHostHeader: true
|
||||
|
||||
jellyfin-${SERVER_HOSTNAME}:
|
||||
loadbalancer:
|
||||
servers:
|
||||
- url: "http://${SERVER_IP}:8096"
|
||||
passHostHeader: true
|
||||
|
||||
jupyter-${SERVER_HOSTNAME}:
|
||||
loadbalancer:
|
||||
servers:
|
||||
- url: "http://${SERVER_IP}:8890"
|
||||
passHostHeader: true
|
||||
|
||||
kopia-${SERVER_HOSTNAME}:
|
||||
loadbalancer:
|
||||
servers:
|
||||
- url: "http://${SERVER_IP}:51515"
|
||||
passHostHeader: true
|
||||
|
||||
mealie-${SERVER_HOSTNAME}:
|
||||
loadbalancer:
|
||||
servers:
|
||||
- url: "http://${SERVER_IP}:9000"
|
||||
passHostHeader: true
|
||||
|
||||
mediawiki-${SERVER_HOSTNAME}:
|
||||
loadbalancer:
|
||||
servers:
|
||||
- url: "http://${SERVER_IP}:8086"
|
||||
passHostHeader: true
|
||||
|
||||
motioneye-${SERVER_HOSTNAME}:
|
||||
loadbalancer:
|
||||
servers:
|
||||
- url: "http://${SERVER_IP}:8081"
|
||||
passHostHeader: true
|
||||
|
||||
nextcloud-${SERVER_HOSTNAME}:
|
||||
loadbalancer:
|
||||
servers:
|
||||
- url: "http://${SERVER_IP}:8089"
|
||||
passHostHeader: true
|
||||
|
||||
openkm-${SERVER_HOSTNAME}:
|
||||
loadbalancer:
|
||||
servers:
|
||||
- url: "http://${SERVER_IP}:18080"
|
||||
passHostHeader: true
|
||||
|
||||
openwebui-${SERVER_HOSTNAME}:
|
||||
loadbalancer:
|
||||
servers:
|
||||
- url: "http://${SERVER_IP}:3000"
|
||||
passHostHeader: true
|
||||
|
||||
qbittorrent-${SERVER_HOSTNAME}:
|
||||
loadbalancer:
|
||||
servers:
|
||||
- url: "http://${SERVER_IP}:8081"
|
||||
passHostHeader: true
|
||||
|
||||
tdarr-${SERVER_HOSTNAME}:
|
||||
loadbalancer:
|
||||
servers:
|
||||
- url: "http://${SERVER_IP}:8265"
|
||||
passHostHeader: true
|
||||
|
||||
unmanic-${SERVER_HOSTNAME}:
|
||||
loadbalancer:
|
||||
servers:
|
||||
- url: "http://${SERVER_IP}:8889"
|
||||
passHostHeader: true
|
||||
|
||||
wordpress-${SERVER_HOSTNAME}:
|
||||
loadbalancer:
|
||||
servers:
|
||||
- url: "http://${SERVER_IP}:8088"
|
||||
passHostHeader: true
|
||||
|
||||
# Arr Services
|
||||
|
||||
jellyseerr-${SERVER_HOSTNAME}:
|
||||
loadbalancer:
|
||||
servers:
|
||||
- url: "http://${SERVER_IP}:5055"
|
||||
passHostHeader: true
|
||||
|
||||
prowlarr-${SERVER_HOSTNAME}:
|
||||
loadbalancer:
|
||||
servers:
|
||||
- url: "http://${SERVER_IP}:9696"
|
||||
passHostHeader: true
|
||||
|
||||
radarr-${SERVER_HOSTNAME}:
|
||||
loadbalancer:
|
||||
servers:
|
||||
- url: "http://${SERVER_IP}:7878"
|
||||
passHostHeader: true
|
||||
|
||||
sonarr-${SERVER_HOSTNAME}:
|
||||
loadbalancer:
|
||||
servers:
|
||||
- url: "http://${SERVER_IP}:8989"
|
||||
passHostHeader: true
|
||||
|
||||
lidarr-${SERVER_HOSTNAME}:
|
||||
loadbalancer:
|
||||
servers:
|
||||
- url: "http://${SERVER_IP}:8686"
|
||||
passHostHeader: true
|
||||
|
||||
readarr-${SERVER_HOSTNAME}:
|
||||
loadbalancer:
|
||||
servers:
|
||||
- url: "http://${SERVER_IP}:8787"
|
||||
passHostHeader: true
|
||||
|
||||
mylar3-${SERVER_HOSTNAME}:
|
||||
loadBalancer:
|
||||
servers:
|
||||
- url: "http://${SERVER_IP}:8090"
|
||||
passHostHeader: true
|
||||
|
||||
|
||||
|
||||
|
||||
# Remote Server Service Definitions (${REMOTE_SERVER_HOSTNAME})
|
||||
dockge-${REMOTE_SERVER_HOSTNAME}:
|
||||
loadbalancer:
|
||||
servers:
|
||||
- url: "http://${REMOTE_SERVER_IP}:5001"
|
||||
passHostHeader: true
|
||||
|
||||
dozzle-${REMOTE_SERVER_HOSTNAME}:
|
||||
loadbalancer:
|
||||
servers:
|
||||
- url: "http://${REMOTE_SERVER_IP}:8085"
|
||||
passHostHeader: true
|
||||
|
||||
glances-${REMOTE_SERVER_HOSTNAME}:
|
||||
loadbalancer:
|
||||
servers:
|
||||
- url: "http://${REMOTE_SERVER_IP}:61208"
|
||||
passHostHeader: true
|
||||
|
||||
backrest-${REMOTE_SERVER_HOSTNAME}:
|
||||
loadbalancer:
|
||||
servers:
|
||||
- url: "http://${REMOTE_SERVER_IP}:9898"
|
||||
passHostHeader: true
|
||||
|
||||
duplicati-${REMOTE_SERVER_HOSTNAME}:
|
||||
loadbalancer:
|
||||
servers:
|
||||
- url: "http://${REMOTE_SERVER_IP}:8200"
|
||||
passHostHeader: true
|
||||
|
||||
homepage-${REMOTE_SERVER_HOSTNAME}:
|
||||
loadbalancer:
|
||||
servers:
|
||||
- url: "http://${REMOTE_SERVER_IP}:3000"
|
||||
passHostHeader: true
|
||||
|
||||
homarr-${REMOTE_SERVER_HOSTNAME}:
|
||||
loadbalancer:
|
||||
servers:
|
||||
- url: "http://${REMOTE_SERVER_IP}:7575"
|
||||
passHostHeader: true
|
||||
|
||||
grafana-${REMOTE_SERVER_HOSTNAME}:
|
||||
loadbalancer:
|
||||
servers:
|
||||
- url: "http://${REMOTE_SERVER_IP}:3000"
|
||||
passHostHeader: true
|
||||
|
||||
prometheus-${REMOTE_SERVER_HOSTNAME}:
|
||||
loadbalancer:
|
||||
servers:
|
||||
- url: "http://${REMOTE_SERVER_IP}:9090"
|
||||
passHostHeader: true
|
||||
|
||||
uptime-kuma-${REMOTE_SERVER_HOSTNAME}:
|
||||
loadbalancer:
|
||||
servers:
|
||||
- url: "http://${REMOTE_SERVER_IP}:3001"
|
||||
passHostHeader: true
|
||||
|
||||
# Middleware Definitions
|
||||
middlewares:
|
||||
ez-assistant-websocket:
|
||||
headers:
|
||||
accessControlAllowHeaders:
|
||||
- "Connection"
|
||||
- "Upgrade"
|
||||
accessControlAllowMethods:
|
||||
- "GET"
|
||||
- "POST"
|
||||
- "OPTIONS"
|
||||
accessControlMaxAge: 86400
|
||||
31
docker-compose/core/traefik/dynamic/routes.yml
Normal file
31
docker-compose/core/traefik/dynamic/routes.yml
Normal file
@@ -0,0 +1,31 @@
|
||||
# Traefik Dynamic Configuration
|
||||
# Copy to /opt/stacks/traefik/dynamic/routes.yml
|
||||
# Add custom routes here that aren't defined via Docker labels
|
||||
|
||||
http:
|
||||
routers:
|
||||
# Example custom route
|
||||
# custom-service:
|
||||
# rule: "Host(`custom.example.com`)"
|
||||
# entryPoints:
|
||||
# - websecure
|
||||
# middlewares:
|
||||
# - authelia@docker
|
||||
# tls:
|
||||
# certResolver: letsencrypt
|
||||
# service: custom-service
|
||||
|
||||
services:
|
||||
# Example custom service
|
||||
# custom-service:
|
||||
# loadBalancer:
|
||||
# servers:
|
||||
# - url: "http://192.168.1.100:8080"
|
||||
|
||||
middlewares:
|
||||
# Additional middlewares can be defined here
|
||||
# Example: Rate limiting
|
||||
# rate-limit:
|
||||
# rateLimit:
|
||||
# average: 100
|
||||
# burst: 50
|
||||
454
docker-compose/core/traefik/dynamic/sablier.yml
Normal file
454
docker-compose/core/traefik/dynamic/sablier.yml
Normal file
@@ -0,0 +1,454 @@
|
||||
# Session duration set to 5m for testing. Increase to 30m for production.
|
||||
http:
|
||||
middlewares:
|
||||
authelia:
|
||||
forwardauth:
|
||||
address: http://authelia:9091/api/verify?rd=https://auth.${DOMAIN}/
|
||||
authResponseHeaders:
|
||||
- X-Secret
|
||||
trustForwardHeader: true
|
||||
|
||||
sablier-${SERVER_HOSTNAME}-arr:
|
||||
plugin:
|
||||
sablier:
|
||||
sablierUrl: http://sablier-service:10000
|
||||
group: ${SERVER_HOSTNAME}-arr
|
||||
sessionDuration: 5m
|
||||
ignoreUserAgent: curl
|
||||
dynamic:
|
||||
displayName: Arr Apps
|
||||
theme: ghost
|
||||
show-details-by-default: true
|
||||
|
||||
sablier-${SERVER_HOSTNAME}-backrest:
|
||||
plugin:
|
||||
sablier:
|
||||
sablierUrl: http://sablier-service:10000
|
||||
group: ${SERVER_HOSTNAME}-backrest
|
||||
sessionDuration: 5m
|
||||
ignoreUserAgent: curl
|
||||
dynamic:
|
||||
displayName: Backrest
|
||||
theme: ghost
|
||||
show-details-by-default: true
|
||||
|
||||
sablier-${SERVER_HOSTNAME}-vaultwarden:
|
||||
plugin:
|
||||
sablier:
|
||||
sablierUrl: http://sablier-service:10000
|
||||
group: ${SERVER_HOSTNAME}-vaultwarden
|
||||
sessionDuration: 5m
|
||||
ignoreUserAgent: curl
|
||||
dynamic:
|
||||
displayName: Vaultwarden
|
||||
theme: ghost
|
||||
show-details-by-default: true
|
||||
|
||||
sablier-${SERVER_HOSTNAME}-bookstack:
|
||||
plugin:
|
||||
sablier:
|
||||
sablierUrl: http://sablier-service:10000
|
||||
group: ${SERVER_HOSTNAME}-bookstack
|
||||
sessionDuration: 5m
|
||||
ignoreUserAgent: curl
|
||||
dynamic:
|
||||
displayName: Bookstack
|
||||
theme: ghost
|
||||
show-details-by-default: true
|
||||
|
||||
sablier-${SERVER_HOSTNAME}-calibre-web:
|
||||
plugin:
|
||||
sablier:
|
||||
sablierUrl: http://sablier-service:10000
|
||||
group: ${SERVER_HOSTNAME}-calibre-web
|
||||
sessionDuration: 5m
|
||||
ignoreUserAgent: curl
|
||||
dynamic:
|
||||
displayName: Calibre Web
|
||||
theme: ghost
|
||||
show-details-by-default: true
|
||||
|
||||
sablier-${SERVER_HOSTNAME}-code-server:
|
||||
plugin:
|
||||
sablier:
|
||||
sablierUrl: http://sablier-service:10000
|
||||
group: ${SERVER_HOSTNAME}-code-server
|
||||
sessionDuration: 5m
|
||||
ignoreUserAgent: curl
|
||||
dynamic:
|
||||
displayName: Code Server
|
||||
theme: ghost
|
||||
show-details-by-default: true
|
||||
|
||||
sablier-${SERVER_HOSTNAME}-dozzle:
|
||||
plugin:
|
||||
sablier:
|
||||
sablierUrl: http://sablier-service:10000
|
||||
group: ${SERVER_HOSTNAME}-dozzle
|
||||
sessionDuration: 5m
|
||||
ignoreUserAgent: curl
|
||||
dynamic:
|
||||
displayName: dozzle
|
||||
theme: ghost
|
||||
show-details-by-default: true
|
||||
|
||||
sablier-${SERVER_HOSTNAME}-dokuwiki:
|
||||
plugin:
|
||||
sablier:
|
||||
sablierUrl: http://sablier-service:10000
|
||||
group: ${SERVER_HOSTNAME}-dokuwiki
|
||||
sessionDuration: 5m
|
||||
ignoreUserAgent: curl
|
||||
dynamic:
|
||||
displayName: DokuWiki
|
||||
theme: ghost
|
||||
show-details-by-default: true
|
||||
|
||||
sablier-${SERVER_HOSTNAME}-duplicati:
|
||||
plugin:
|
||||
sablier:
|
||||
sablierUrl: http://sablier-service:10000
|
||||
group: ${SERVER_HOSTNAME}-duplicati
|
||||
sessionDuration: 5m
|
||||
ignoreUserAgent: curl
|
||||
dynamic:
|
||||
displayName: Duplicati
|
||||
theme: ghost
|
||||
show-details-by-default: true
|
||||
|
||||
sablier-${SERVER_HOSTNAME}-assistant:
|
||||
plugin:
|
||||
sablier:
|
||||
sablierUrl: http://sablier-service:10000
|
||||
group: ${SERVER_HOSTNAME}-assistant
|
||||
sessionDuration: 30m
|
||||
ignoreUserAgent: curl
|
||||
dynamic:
|
||||
displayName: EZ-Assistant
|
||||
theme: ghost
|
||||
show-details-by-default: true
|
||||
|
||||
sablier-${SERVER_HOSTNAME}-formio:
|
||||
plugin:
|
||||
sablier:
|
||||
sablierUrl: http://sablier-service:10000
|
||||
group: ${SERVER_HOSTNAME}-formio
|
||||
sessionDuration: 5m
|
||||
ignoreUserAgent: curl
|
||||
dynamic:
|
||||
displayName: FormIO
|
||||
theme: ghost
|
||||
show-details-by-default: true
|
||||
|
||||
sablier-${SERVER_HOSTNAME}-gitea:
|
||||
plugin:
|
||||
sablier:
|
||||
sablierUrl: http://sablier-service:10000
|
||||
group: ${SERVER_HOSTNAME}-gitea
|
||||
sessionDuration: 5m
|
||||
ignoreUserAgent: curl
|
||||
dynamic:
|
||||
displayName: Gitea
|
||||
theme: ghost
|
||||
show-details-by-default: true
|
||||
|
||||
sablier-${SERVER_HOSTNAME}-glances:
|
||||
plugin:
|
||||
sablier:
|
||||
sablierUrl: http://sablier-service:10000
|
||||
group: ${SERVER_HOSTNAME}-glances
|
||||
sessionDuration: 5m
|
||||
ignoreUserAgent: curl
|
||||
dynamic:
|
||||
displayName: Glances
|
||||
theme: ghost
|
||||
show-details-by-default: true
|
||||
|
||||
sablier-${SERVER_HOSTNAME}-homarr:
|
||||
plugin:
|
||||
sablier:
|
||||
sablierUrl: http://sablier-service:10000
|
||||
group: ${SERVER_HOSTNAME}-homarr
|
||||
sessionDuration: 5m
|
||||
ignoreUserAgent: curl
|
||||
dynamic:
|
||||
displayName: Homarr
|
||||
theme: ghost
|
||||
show-details-by-default: true
|
||||
|
||||
sablier-${SERVER_HOSTNAME}-jellyfin:
|
||||
plugin:
|
||||
sablier:
|
||||
sablierUrl: http://sablier-service:10000
|
||||
group: ${SERVER_HOSTNAME}-jellyfin
|
||||
sessionDuration: 5m
|
||||
ignoreUserAgent: curl
|
||||
dynamic:
|
||||
displayName: Jellyfin
|
||||
theme: ghost
|
||||
show-details-by-default: true
|
||||
|
||||
sablier-${SERVER_HOSTNAME}-jupyter:
|
||||
plugin:
|
||||
sablier:
|
||||
sablierUrl: http://sablier-service:10000
|
||||
group: ${SERVER_HOSTNAME}-jupyter
|
||||
sessionDuration: 5m
|
||||
ignoreUserAgent: curl
|
||||
dynamic:
|
||||
displayName: Jupyter
|
||||
theme: ghost
|
||||
show-details-by-default: true
|
||||
|
||||
sablier-${SERVER_HOSTNAME}-komodo:
|
||||
plugin:
|
||||
sablier:
|
||||
sablierUrl: http://sablier-service:10000
|
||||
group: ${SERVER_HOSTNAME}-komodo
|
||||
sessionDuration: 5m
|
||||
ignoreUserAgent: curl
|
||||
dynamic:
|
||||
displayName: Komodo
|
||||
theme: ghost
|
||||
show-details-by-default: true
|
||||
|
||||
sablier-${SERVER_HOSTNAME}-kopia:
|
||||
plugin:
|
||||
sablier:
|
||||
sablierUrl: http://sablier-service:10000
|
||||
group: ${SERVER_HOSTNAME}-kopia
|
||||
sessionDuration: 5m
|
||||
ignoreUserAgent: curl
|
||||
dynamic:
|
||||
displayName: Kopia
|
||||
theme: ghost
|
||||
show-details-by-default: true
|
||||
|
||||
sablier-${SERVER_HOSTNAME}-mealie:
|
||||
plugin:
|
||||
sablier:
|
||||
sablierUrl: http://sablier-service:10000
|
||||
group: ${SERVER_HOSTNAME}-mealie
|
||||
sessionDuration: 5m
|
||||
ignoreUserAgent: curl
|
||||
dynamic:
|
||||
displayName: Mealie
|
||||
theme: ghost
|
||||
show-details-by-default: true
|
||||
|
||||
sablier-${SERVER_HOSTNAME}-mediawiki:
|
||||
plugin:
|
||||
sablier:
|
||||
sablierUrl: http://sablier-service:10000
|
||||
group: ${SERVER_HOSTNAME}-mediawiki
|
||||
sessionDuration: 5m
|
||||
ignoreUserAgent: curl
|
||||
dynamic:
|
||||
displayName: mediawiki
|
||||
theme: ghost
|
||||
show-details-by-default: true
|
||||
|
||||
sablier-${SERVER_HOSTNAME}-nextcloud:
|
||||
plugin:
|
||||
sablier:
|
||||
sablierUrl: http://sablier-service:10000
|
||||
group: ${SERVER_HOSTNAME}-nextcloud
|
||||
sessionDuration: 5m
|
||||
ignoreUserAgent: curl
|
||||
dynamic:
|
||||
displayName: NextCloud
|
||||
theme: ghost
|
||||
show-details-by-default: true
|
||||
|
||||
sablier-${SERVER_HOSTNAME}-openkm:
|
||||
plugin:
|
||||
sablier:
|
||||
sablierUrl: http://sablier-service:10000
|
||||
group: ${SERVER_HOSTNAME}-openkm
|
||||
sessionDuration: 5m
|
||||
ignoreUserAgent: curl
|
||||
dynamic:
|
||||
displayName: OpenKM
|
||||
theme: ghost
|
||||
show-details-by-default: true
|
||||
|
||||
sablier-${SERVER_HOSTNAME}-openwebui:
|
||||
plugin:
|
||||
sablier:
|
||||
sablierUrl: http://sablier-service:10000
|
||||
group: ${SERVER_HOSTNAME}-openwebui
|
||||
sessionDuration: 5m
|
||||
ignoreUserAgent: curl
|
||||
dynamic:
|
||||
displayName: OpenWebUI
|
||||
theme: ghost
|
||||
show-details-by-default: true
|
||||
|
||||
sablier-${SERVER_HOSTNAME}-pulse:
|
||||
plugin:
|
||||
sablier:
|
||||
sablierUrl: http://sablier-service:10000
|
||||
group: ${SERVER_HOSTNAME}-pulse
|
||||
sessionDuration: 5m
|
||||
ignoreUserAgent: curl
|
||||
dynamic:
|
||||
displayName: Pulse
|
||||
theme: ghost
|
||||
show-details-by-default: true
|
||||
|
||||
sablier-${SERVER_HOSTNAME}-tdarr:
|
||||
plugin:
|
||||
sablier:
|
||||
sablierUrl: http://sablier-service:10000
|
||||
group: ${SERVER_HOSTNAME}-tdarr
|
||||
sessionDuration: 5m
|
||||
ignoreUserAgent: curl
|
||||
dynamic:
|
||||
displayName: Tdarr
|
||||
theme: ghost
|
||||
show-details-by-default: true
|
||||
|
||||
sablier-${SERVER_HOSTNAME}-unmanic:
|
||||
plugin:
|
||||
sablier:
|
||||
sablierUrl: http://sablier-service:10000
|
||||
group: ${SERVER_HOSTNAME}-unmanic
|
||||
sessionDuration: 5m
|
||||
ignoreUserAgent: curl
|
||||
dynamic:
|
||||
displayName: Unmanic
|
||||
theme: ghost
|
||||
show-details-by-default: true
|
||||
|
||||
sablier-${SERVER_HOSTNAME}-wordpress:
|
||||
plugin:
|
||||
sablier:
|
||||
sablierUrl: http://sablier-service:10000
|
||||
group: ${SERVER_HOSTNAME}-wordpress
|
||||
sessionDuration: 5m
|
||||
ignoreUserAgent: curl
|
||||
dynamic:
|
||||
displayName: wordpress
|
||||
theme: ghost
|
||||
show-details-by-default: true
|
||||
|
||||
# Remote Server (${REMOTE_SERVER_HOSTNAME}) Sablier Middlewares
|
||||
sablier-${REMOTE_SERVER_HOSTNAME}-dockge:
|
||||
plugin:
|
||||
sablier:
|
||||
sablierUrl: http://sablier-service:10000
|
||||
group: ${REMOTE_SERVER_HOSTNAME}-dockge
|
||||
sessionDuration: 5m
|
||||
ignoreUserAgent: curl
|
||||
dynamic:
|
||||
displayName: Dockge (${REMOTE_SERVER_HOSTNAME})
|
||||
theme: ghost
|
||||
show-details-by-default: true
|
||||
|
||||
sablier-${REMOTE_SERVER_HOSTNAME}-dozzle:
|
||||
plugin:
|
||||
sablier:
|
||||
sablierUrl: http://sablier-service:10000
|
||||
group: ${REMOTE_SERVER_HOSTNAME}-dozzle
|
||||
sessionDuration: 5m
|
||||
ignoreUserAgent: curl
|
||||
dynamic:
|
||||
displayName: Dozzle (${REMOTE_SERVER_HOSTNAME})
|
||||
theme: ghost
|
||||
show-details-by-default: true
|
||||
|
||||
sablier-${REMOTE_SERVER_HOSTNAME}-glances:
|
||||
plugin:
|
||||
sablier:
|
||||
sablierUrl: http://sablier-service:10000
|
||||
group: ${REMOTE_SERVER_HOSTNAME}-glances
|
||||
sessionDuration: 5m
|
||||
ignoreUserAgent: curl
|
||||
dynamic:
|
||||
displayName: Glances (${REMOTE_SERVER_HOSTNAME})
|
||||
theme: ghost
|
||||
show-details-by-default: true
|
||||
|
||||
sablier-${REMOTE_SERVER_HOSTNAME}-backrest:
|
||||
plugin:
|
||||
sablier:
|
||||
sablierUrl: http://sablier-service:10000
|
||||
group: ${REMOTE_SERVER_HOSTNAME}-backrest
|
||||
sessionDuration: 5m
|
||||
ignoreUserAgent: curl
|
||||
dynamic:
|
||||
displayName: Backrest (${REMOTE_SERVER_HOSTNAME})
|
||||
theme: ghost
|
||||
show-details-by-default: true
|
||||
|
||||
sablier-${REMOTE_SERVER_HOSTNAME}-duplicati:
|
||||
plugin:
|
||||
sablier:
|
||||
sablierUrl: http://sablier-service:10000
|
||||
group: ${REMOTE_SERVER_HOSTNAME}-duplicati
|
||||
sessionDuration: 5m
|
||||
ignoreUserAgent: curl
|
||||
dynamic:
|
||||
displayName: Duplicati (${REMOTE_SERVER_HOSTNAME})
|
||||
theme: ghost
|
||||
show-details-by-default: true
|
||||
|
||||
sablier-${REMOTE_SERVER_HOSTNAME}-homepage:
|
||||
plugin:
|
||||
sablier:
|
||||
sablierUrl: http://sablier-service:10000
|
||||
group: ${REMOTE_SERVER_HOSTNAME}-homepage
|
||||
sessionDuration: 5m
|
||||
ignoreUserAgent: curl
|
||||
dynamic:
|
||||
displayName: Homepage (${REMOTE_SERVER_HOSTNAME})
|
||||
theme: ghost
|
||||
show-details-by-default: true
|
||||
|
||||
sablier-${REMOTE_SERVER_HOSTNAME}-homarr:
|
||||
plugin:
|
||||
sablier:
|
||||
sablierUrl: http://sablier-service:10000
|
||||
group: ${REMOTE_SERVER_HOSTNAME}-homarr
|
||||
sessionDuration: 5m
|
||||
ignoreUserAgent: curl
|
||||
dynamic:
|
||||
displayName: Homarr (${REMOTE_SERVER_HOSTNAME})
|
||||
theme: ghost
|
||||
show-details-by-default: true
|
||||
|
||||
sablier-${REMOTE_SERVER_HOSTNAME}-grafana:
|
||||
plugin:
|
||||
sablier:
|
||||
sablierUrl: http://sablier-service:10000
|
||||
group: ${REMOTE_SERVER_HOSTNAME}-grafana
|
||||
sessionDuration: 5m
|
||||
ignoreUserAgent: curl
|
||||
dynamic:
|
||||
displayName: Grafana (${REMOTE_SERVER_HOSTNAME})
|
||||
theme: ghost
|
||||
show-details-by-default: true
|
||||
|
||||
sablier-${REMOTE_SERVER_HOSTNAME}-prometheus:
|
||||
plugin:
|
||||
sablier:
|
||||
sablierUrl: http://sablier-service:10000
|
||||
group: ${REMOTE_SERVER_HOSTNAME}-prometheus
|
||||
sessionDuration: 5m
|
||||
ignoreUserAgent: curl
|
||||
dynamic:
|
||||
displayName: Prometheus (${REMOTE_SERVER_HOSTNAME})
|
||||
theme: ghost
|
||||
show-details-by-default: true
|
||||
|
||||
sablier-${REMOTE_SERVER_HOSTNAME}-uptime-kuma:
|
||||
plugin:
|
||||
sablier:
|
||||
sablierUrl: http://sablier-service:10000
|
||||
group: ${REMOTE_SERVER_HOSTNAME}-uptime-kuma
|
||||
sessionDuration: 5m
|
||||
ignoreUserAgent: curl
|
||||
dynamic:
|
||||
displayName: Uptime Kuma (${REMOTE_SERVER_HOSTNAME})
|
||||
theme: ghost
|
||||
show-details-by-default: true
|
||||
35
docker-compose/dashboards/deploy-dashboards.sh
Executable file
35
docker-compose/dashboards/deploy-dashboards.sh
Executable file
@@ -0,0 +1,35 @@
|
||||
#!/bin/bash
|
||||
# Deploy dashboards stack script
|
||||
# Run from /opt/stacks/dashboards/
|
||||
|
||||
set -e
|
||||
|
||||
# Source common functions
|
||||
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
REPO_DIR="/home/kelin/EZ-Homelab" # Fixed repo path since script runs from /opt/stacks/dashboards
|
||||
source "$REPO_DIR/scripts/common.sh"
|
||||
|
||||
log_info "Deploying dashboards stack..."
|
||||
|
||||
# Load environment
|
||||
load_env_file_safely .env
|
||||
|
||||
# Localize labels in compose file
|
||||
localize_compose_labels docker-compose.yml
|
||||
|
||||
# Localize config files
|
||||
for config_file in $(find . -name "*.yml" -o -name "*.yaml" | grep -v docker-compose.yml); do
|
||||
localize_config_file "$config_file"
|
||||
done
|
||||
|
||||
# Deploy
|
||||
run_cmd docker compose up -d
|
||||
|
||||
# Validate
|
||||
if docker ps | grep -q homepage; then
|
||||
log_success "Dashboards stack deployed successfully"
|
||||
exit 0
|
||||
else
|
||||
log_error "Dashboards stack deployment failed"
|
||||
exit 1
|
||||
fi
|
||||
@@ -24,7 +24,7 @@ services:
|
||||
- homelab-network
|
||||
- traefik-network
|
||||
ports:
|
||||
- "3003:3000"
|
||||
- '3003:3000'
|
||||
volumes:
|
||||
- ./homepage:/app/config
|
||||
- /var/run/docker.sock:/var/run/docker.sock # For Docker integration do not mount RO
|
||||
@@ -38,24 +38,24 @@ services:
|
||||
# TRAEFIK CONFIGURATION
|
||||
# ==========================================
|
||||
# Service metadata
|
||||
- 'homelab.category=dashboard"
|
||||
- 'homelab.description=Application dashboard"
|
||||
- 'homelab.category=dashboard'
|
||||
- 'homelab.description=Application dashboard'
|
||||
# Traefik reverse proxy (comment/uncomment to disable/enable)
|
||||
# IMPORTANT: On REMOTE SERVERS (where Traefik runs elsewhere):
|
||||
# - COMMENT OUT all traefik.* labels below (don't delete them)
|
||||
# - Routes are configured via external YAML files on the core server
|
||||
# - This prevents conflicts between Docker labels and file provider
|
||||
- 'traefik.enable=true"
|
||||
- 'traefik.docker.network=traefik-network"
|
||||
- 'traefik.http.routers.homepage.rule=Host(`homepage.${DOMAIN}`)"
|
||||
- 'traefik.http.routers.homepage.entrypoints=websecure"
|
||||
- 'traefik.http.routers.homepage.tls=true"
|
||||
- 'traefik.http.routers.homepage.middlewares=authelia@docker"
|
||||
- 'traefik.http.services.homepage.loadbalancer.server.port=3000"
|
||||
- 'traefik.enable=true'
|
||||
- 'traefik.docker.network=traefik-network'
|
||||
- 'traefik.http.routers.homepage.rule=Host(`homepage.${DOMAIN}`)'
|
||||
- 'traefik.http.routers.homepage.entrypoints=websecure'
|
||||
- 'traefik.http.routers.homepage.tls=true'
|
||||
- 'traefik.http.routers.homepage.middlewares=authelia@docker'
|
||||
- 'traefik.http.services.homepage.loadbalancer.server.port=3000'
|
||||
# Sablier lazy loading (disabled by default - uncomment to enable)
|
||||
# - "sablier.enable=true"
|
||||
# - "sablier.group=jasper-homarr"
|
||||
# - "sablier.start-on-demand=true"
|
||||
# - 'sablier.enable=true'
|
||||
# - 'sablier.group=jasper-homarr'
|
||||
# - 'sablier.start-on-demand=true'
|
||||
|
||||
# Homarr - Modern dashboard
|
||||
# Uses Sablier lazy loading - starts on-demand, stops after 5min inactivity
|
||||
@@ -76,7 +76,7 @@ services:
|
||||
- homelab-network
|
||||
- traefik-network
|
||||
ports:
|
||||
- "7575:7575"
|
||||
- '7575:7575'
|
||||
volumes:
|
||||
- ./homarr/config:/app/config/configs
|
||||
- ./homarr/data:/data
|
||||
@@ -85,7 +85,7 @@ services:
|
||||
environment:
|
||||
- TZ=America/New_York
|
||||
healthcheck:
|
||||
test: ["CMD", "curl", "-f", "http://localhost:7575/"]
|
||||
test: ['CMD', 'curl', '-f', 'http://localhost:7575/']
|
||||
interval: 30s
|
||||
timeout: 10s
|
||||
retries: 3
|
||||
@@ -93,21 +93,21 @@ services:
|
||||
labels:
|
||||
# TRAEFIK CONFIGURATION
|
||||
# Service metadata
|
||||
- "com.centurylinklabs.watchtower.enable=true"
|
||||
- 'homelab.category=dashboard"
|
||||
- 'homelab.description=Modern homelab dashboard"
|
||||
- 'traefik.enable=true"
|
||||
- 'com.centurylinklabs.watchtower.enable=true'
|
||||
- 'homelab.category=dashboard'
|
||||
- 'homelab.description=Modern homelab dashboard'
|
||||
- 'traefik.enable=true'
|
||||
# Router configuration
|
||||
- 'traefik.http.routers.homarr.rule=Host(`homarr.${DOMAIN}`)"
|
||||
- 'traefik.http.routers.homarr.entrypoints=websecure"
|
||||
- 'traefik.http.routers.homarr.tls=true"
|
||||
- 'traefik.http.routers.homarr.middlewares=authelia@docker"
|
||||
- 'traefik.http.routers.homarr.rule=Host(`homarr.${DOMAIN}`)'
|
||||
- 'traefik.http.routers.homarr.entrypoints=websecure'
|
||||
- 'traefik.http.routers.homarr.tls=true'
|
||||
- 'traefik.http.routers.homarr.middlewares=authelia@docker'
|
||||
# Service configuration
|
||||
- 'traefik.http.services.homarr.loadbalancer.server.port=7575"
|
||||
- 'traefik.http.services.homarr.loadbalancer.server.port=7575'
|
||||
# Sablier configuration
|
||||
- "sablier.enable=true"
|
||||
- "sablier.group=jasper-homarr"
|
||||
- "sablier.start-on-demand=true"
|
||||
- 'sablier.enable=true'
|
||||
- 'sablier.group=jasper-homarr'
|
||||
- 'sablier.start-on-demand=true'
|
||||
|
||||
# DOCKGE URL CONFIGURATION
|
||||
x-dockge:
|
||||
|
||||
36
docker-compose/dockge/deploy-dockge.sh
Executable file
36
docker-compose/dockge/deploy-dockge.sh
Executable file
@@ -0,0 +1,36 @@
|
||||
#!/bin/bash
|
||||
# Deploy Dockge stack script
|
||||
# Run from /opt/dockge/
|
||||
|
||||
set -e
|
||||
|
||||
# Source common functions
|
||||
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
REPO_DIR="/home/kelin/EZ-Homelab" # Fixed repo path since script runs from /opt/dockge
|
||||
source "$REPO_DIR/scripts/common.sh"
|
||||
|
||||
log_info "Deploying Dockge stack..."
|
||||
|
||||
# Load environment
|
||||
load_env_file_safely .env
|
||||
|
||||
# Remove sensitive variables from dockge .env (Dockge doesn't need them)
|
||||
sed -i '/^AUTHELIA_ADMIN_PASSWORD_HASH=/d' .env
|
||||
sed -i '/^AUTHELIA_JWT_SECRET=/d' .env
|
||||
sed -i '/^AUTHELIA_SESSION_SECRET=/d' .env
|
||||
sed -i '/^AUTHELIA_STORAGE_ENCRYPTION_KEY=/d' .env
|
||||
|
||||
# Localize labels in compose file
|
||||
localize_compose_labels docker-compose.yml
|
||||
|
||||
# Deploy
|
||||
run_cmd docker compose up -d
|
||||
|
||||
# Validate
|
||||
if docker ps | grep -q dockge; then
|
||||
log_success "Dockge stack deployed successfully"
|
||||
exit 0
|
||||
else
|
||||
log_error "Dockge stack deployment failed"
|
||||
exit 1
|
||||
fi
|
||||
@@ -24,7 +24,7 @@ services:
|
||||
- homelab-network
|
||||
- traefik-network
|
||||
ports:
|
||||
- "5001:5001" # Optional: direct access
|
||||
- '5001:5001' # Optional: direct access
|
||||
volumes:
|
||||
- /var/run/docker.sock:/var/run/docker.sock
|
||||
- /opt/stacks:/opt/stacks # Dockge manages stacks in this directory
|
||||
@@ -37,18 +37,18 @@ services:
|
||||
# TRAEFIK CONFIGURATION
|
||||
# ==========================================
|
||||
# Service metadata
|
||||
- 'homelab.category=infrastructure"
|
||||
- 'homelab.description=Docker Compose stack manager (PRIMARY)"
|
||||
- 'homelab.category=infrastructure'
|
||||
- 'homelab.description=Docker Compose stack manager (PRIMARY)'
|
||||
# Traefik reverse proxy (comment/uncomment to disable/enable)
|
||||
# If Traefik is on a remote server: these labels are NOT USED;
|
||||
# configure external yml files in /traefik/dynamic folder instead.
|
||||
- 'traefik.enable=true"
|
||||
- 'traefik.docker.network=traefik-network"
|
||||
- 'traefik.http.routers.dockge.rule=Host(`dockge.${DOMAIN}`)"
|
||||
- 'traefik.http.routers.dockge.entrypoints=websecure"
|
||||
- 'traefik.http.routers.dockge.tls.certresolver=letsencrypt"
|
||||
- 'traefik.http.routers.dockge.middlewares=authelia@docker"
|
||||
- 'traefik.http.services.dockge.loadbalancer.server.port=5001"
|
||||
- 'traefik.enable=true'
|
||||
- 'traefik.docker.network=traefik-network'
|
||||
- 'traefik.http.routers.dockge.rule=Host(`dockge.${DOMAIN}`)'
|
||||
- 'traefik.http.routers.dockge.entrypoints=websecure'
|
||||
- 'traefik.http.routers.dockge.tls.certresolver=letsencrypt'
|
||||
- 'traefik.http.routers.dockge.middlewares=authelia@docker'
|
||||
- 'traefik.http.services.dockge.loadbalancer.server.port=5001'
|
||||
|
||||
networks:
|
||||
homelab-network:
|
||||
|
||||
@@ -28,8 +28,8 @@ services:
|
||||
- TZ=America/New_York
|
||||
privileged: true
|
||||
labels:
|
||||
- 'homelab.category=iot"
|
||||
- 'homelab.description=Home automation platform"
|
||||
- 'homelab.category=iot'
|
||||
- 'homelab.description=Home automation platform'
|
||||
# Note: network_mode: host means Traefik can't proxy this directly
|
||||
# Use Traefik's file provider or external host routing
|
||||
|
||||
@@ -51,7 +51,7 @@ services:
|
||||
- homelab-network
|
||||
- traefik-network
|
||||
ports:
|
||||
- "6052:6052"
|
||||
- '6052:6052'
|
||||
volumes:
|
||||
- ./esphome/config:/config
|
||||
- /etc/localtime:/etc/localtime:ro
|
||||
@@ -63,18 +63,18 @@ services:
|
||||
# TRAEFIK CONFIGURATION
|
||||
# ==========================================
|
||||
# Service metadata
|
||||
- 'homelab.category=iot"
|
||||
- 'homelab.description=ESP8266/ESP32 firmware manager"
|
||||
- 'homelab.category=iot'
|
||||
- 'homelab.description=ESP8266/ESP32 firmware manager'
|
||||
# Traefik reverse proxy (comment/uncomment to disable/enable)
|
||||
# If Traefik is on a remote server: these labels are NOT USED;
|
||||
# configure external yml files in /traefik/dynamic folder instead.
|
||||
- 'traefik.enable=true"
|
||||
- 'traefik.docker.network=traefik-network"
|
||||
- 'traefik.http.routers.esphome.rule=Host(`esphome.${DOMAIN}`)"
|
||||
- 'traefik.http.routers.esphome.entrypoints=websecure"
|
||||
- 'traefik.http.routers.esphome.tls.certresolver=letsencrypt"
|
||||
- 'traefik.http.routers.esphome.middlewares=authelia@docker"
|
||||
- 'traefik.http.services.esphome.loadbalancer.server.port=6052"
|
||||
- 'traefik.enable=true'
|
||||
- 'traefik.docker.network=traefik-network'
|
||||
- 'traefik.http.routers.esphome.rule=Host(`esphome.${DOMAIN}`)'
|
||||
- 'traefik.http.routers.esphome.entrypoints=websecure'
|
||||
- 'traefik.http.routers.esphome.tls.certresolver=letsencrypt'
|
||||
- 'traefik.http.routers.esphome.middlewares=authelia@docker'
|
||||
- 'traefik.http.services.esphome.loadbalancer.server.port=6052'
|
||||
|
||||
# TasmoAdmin - Tasmota device manager
|
||||
tasmoadmin:
|
||||
@@ -85,7 +85,7 @@ services:
|
||||
- homelab-network
|
||||
- traefik-network
|
||||
ports:
|
||||
- "8084:80"
|
||||
- '8084:80'
|
||||
volumes:
|
||||
- /opt/stacks/tasmoadmin/data:/data
|
||||
environment:
|
||||
@@ -94,18 +94,18 @@ services:
|
||||
# TRAEFIK CONFIGURATION
|
||||
# ==========================================
|
||||
# Service metadata
|
||||
- 'homelab.category=iot"
|
||||
- 'homelab.description=Tasmota device management"
|
||||
- 'homelab.category=iot'
|
||||
- 'homelab.description=Tasmota device management'
|
||||
# Traefik reverse proxy (comment/uncomment to disable/enable)
|
||||
# If Traefik is on a remote server: these labels are NOT USED;
|
||||
# configure external yml files in /traefik/dynamic folder instead.
|
||||
- 'traefik.enable=true"
|
||||
- 'traefik.docker.network=traefik-network"
|
||||
- 'traefik.http.routers.tasmoadmin.rule=Host(`tasmoadmin.${DOMAIN}`)"
|
||||
- 'traefik.http.routers.tasmoadmin.entrypoints=websecure"
|
||||
- 'traefik.http.routers.tasmoadmin.tls.certresolver=letsencrypt"
|
||||
- 'traefik.http.routers.tasmoadmin.middlewares=authelia@docker"
|
||||
- 'traefik.http.services.tasmoadmin.loadbalancer.server.port=80"
|
||||
- 'traefik.enable=true'
|
||||
- 'traefik.docker.network=traefik-network'
|
||||
- 'traefik.http.routers.tasmoadmin.rule=Host(`tasmoadmin.${DOMAIN}`)'
|
||||
- 'traefik.http.routers.tasmoadmin.entrypoints=websecure'
|
||||
- 'traefik.http.routers.tasmoadmin.tls.certresolver=letsencrypt'
|
||||
- 'traefik.http.routers.tasmoadmin.middlewares=authelia@docker'
|
||||
- 'traefik.http.services.tasmoadmin.loadbalancer.server.port=80'
|
||||
|
||||
# MotionEye - Video surveillance
|
||||
motioneye:
|
||||
@@ -116,7 +116,7 @@ services:
|
||||
- homelab-network
|
||||
- traefik-network
|
||||
ports:
|
||||
- "8765:8765"
|
||||
- '8765:8765'
|
||||
volumes:
|
||||
- ./$(basename $file .yml)/config:/etc/motioneye
|
||||
- /mnt/surveillance:/var/lib/motioneye # Large video files on separate drive
|
||||
@@ -126,18 +126,18 @@ services:
|
||||
# TRAEFIK CONFIGURATION
|
||||
# ==========================================
|
||||
# Service metadata
|
||||
- 'homelab.category=iot"
|
||||
- 'homelab.description=Video surveillance system"
|
||||
- 'homelab.category=iot'
|
||||
- 'homelab.description=Video surveillance system'
|
||||
# Traefik reverse proxy (comment/uncomment to disable/enable)
|
||||
# If Traefik is on a remote server: these labels are NOT USED;
|
||||
# configure external yml files in /traefik/dynamic folder instead.
|
||||
- 'traefik.enable=true"
|
||||
- 'traefik.docker.network=traefik-network"
|
||||
- 'traefik.http.routers.motioneye.rule=Host(`motioneye.${DOMAIN}`)"
|
||||
- 'traefik.http.routers.motioneye.entrypoints=websecure"
|
||||
- 'traefik.http.routers.motioneye.tls.certresolver=letsencrypt"
|
||||
- 'traefik.http.routers.motioneye.middlewares=authelia@docker"
|
||||
- 'traefik.http.services.motioneye.loadbalancer.server.port=8765"
|
||||
- 'traefik.enable=true'
|
||||
- 'traefik.docker.network=traefik-network'
|
||||
- 'traefik.http.routers.motioneye.rule=Host(`motioneye.${DOMAIN}`)'
|
||||
- 'traefik.http.routers.motioneye.entrypoints=websecure'
|
||||
- 'traefik.http.routers.motioneye.tls.certresolver=letsencrypt'
|
||||
- 'traefik.http.routers.motioneye.middlewares=authelia@docker'
|
||||
- 'traefik.http.services.motioneye.loadbalancer.server.port=8765'
|
||||
|
||||
# Node-RED - Flow-based automation (Home Assistant addon alternative)
|
||||
nodered:
|
||||
@@ -157,7 +157,7 @@ services:
|
||||
- homelab-network
|
||||
- traefik-network
|
||||
ports:
|
||||
- "1880:1880"
|
||||
- '1880:1880'
|
||||
volumes:
|
||||
- /opt/stacks/nodered/data:/data
|
||||
environment:
|
||||
@@ -166,18 +166,18 @@ services:
|
||||
# TRAEFIK CONFIGURATION
|
||||
# ==========================================
|
||||
# Service metadata
|
||||
- 'homelab.category=iot"
|
||||
- 'homelab.description=Flow-based automation programming"
|
||||
- 'homelab.category=iot'
|
||||
- 'homelab.description=Flow-based automation programming'
|
||||
# Traefik reverse proxy (comment/uncomment to disable/enable)
|
||||
# If Traefik is on a remote server: these labels are NOT USED;
|
||||
# configure external yml files in /traefik/dynamic folder instead.
|
||||
- 'traefik.enable=true"
|
||||
- 'traefik.docker.network=traefik-network"
|
||||
- 'traefik.http.routers.nodered.rule=Host(`nodered.${DOMAIN}`)"
|
||||
- 'traefik.http.routers.nodered.entrypoints=websecure"
|
||||
- 'traefik.http.routers.nodered.tls.certresolver=letsencrypt"
|
||||
- 'traefik.http.routers.nodered.middlewares=authelia@docker"
|
||||
- 'traefik.http.services.nodered.loadbalancer.server.port=1880"
|
||||
- 'traefik.enable=true'
|
||||
- 'traefik.docker.network=traefik-network'
|
||||
- 'traefik.http.routers.nodered.rule=Host(`nodered.${DOMAIN}`)'
|
||||
- 'traefik.http.routers.nodered.entrypoints=websecure'
|
||||
- 'traefik.http.routers.nodered.tls.certresolver=letsencrypt'
|
||||
- 'traefik.http.routers.nodered.middlewares=authelia@docker'
|
||||
- 'traefik.http.services.nodered.loadbalancer.server.port=1880'
|
||||
|
||||
# Mosquitto - MQTT broker (Home Assistant addon alternative)
|
||||
# Used by: Home Assistant, ESPHome, Tasmota devices
|
||||
@@ -188,15 +188,15 @@ services:
|
||||
networks:
|
||||
- homelab-network
|
||||
ports:
|
||||
- "1883:1883" # MQTT
|
||||
- "9001:9001" # Websockets
|
||||
- '1883:1883' # MQTT
|
||||
- '9001:9001' # Websockets
|
||||
volumes:
|
||||
- ./mosquitto/config:/mosquitto/config
|
||||
- ./mosquitto/data:/mosquitto/data
|
||||
- ./mosquitto/log:/mosquitto/log
|
||||
labels:
|
||||
- 'homelab.category=iot"
|
||||
- 'homelab.description=MQTT message broker"
|
||||
- 'homelab.category=iot'
|
||||
- 'homelab.description=MQTT message broker'
|
||||
|
||||
# Zigbee2MQTT - Zigbee to MQTT bridge (DISABLED - requires USB adapter)
|
||||
# NOTE: Requires USB Zigbee adapter (e.g., ConBee II, Sonoff ZBDongle)
|
||||
@@ -219,14 +219,14 @@ services:
|
||||
# environment:
|
||||
# - TZ=America/New_York
|
||||
# labels:
|
||||
# - 'homelab.category=iot"
|
||||
# - 'homelab.description=Zigbee to MQTT bridge"
|
||||
# - 'traefik.enable=true"
|
||||
# - 'traefik.http.routers.zigbee2mqtt.rule=Host(`zigbee2mqtt.${DOMAIN}`)"
|
||||
# - 'traefik.http.routers.zigbee2mqtt.entrypoints=websecure"
|
||||
# - 'traefik.http.routers.zigbee2mqtt.tls.certresolver=letsencrypt"
|
||||
# - 'traefik.http.routers.zigbee2mqtt.middlewares=authelia@docker"
|
||||
# - 'traefik.http.services.zigbee2mqtt.loadbalancer.server.port=8080"
|
||||
# - 'homelab.category=iot'
|
||||
# - 'homelab.description=Zigbee to MQTT bridge'
|
||||
# - 'traefik.enable=true'
|
||||
# - 'traefik.http.routers.zigbee2mqtt.rule=Host(`zigbee2mqtt.${DOMAIN}`)'
|
||||
# - 'traefik.http.routers.zigbee2mqtt.entrypoints=websecure'
|
||||
# - 'traefik.http.routers.zigbee2mqtt.tls.certresolver=letsencrypt'
|
||||
# - 'traefik.http.routers.zigbee2mqtt.middlewares=authelia@docker'
|
||||
# - 'traefik.http.services.zigbee2mqtt.loadbalancer.server.port=8080'
|
||||
|
||||
networks:
|
||||
homelab-network:
|
||||
|
||||
35
docker-compose/infrastructure/deploy-infrastructure.sh
Executable file
35
docker-compose/infrastructure/deploy-infrastructure.sh
Executable file
@@ -0,0 +1,35 @@
|
||||
#!/bin/bash
|
||||
# Deploy infrastructure stack script
|
||||
# Run from /opt/stacks/infrastructure/
|
||||
|
||||
set -e
|
||||
|
||||
# Source common functions
|
||||
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
REPO_DIR="/home/kelin/EZ-Homelab" # Fixed repo path since script runs from /opt/stacks/infrastructure
|
||||
source "$REPO_DIR/scripts/common.sh"
|
||||
|
||||
log_info "Deploying infrastructure stack..."
|
||||
|
||||
# Load environment
|
||||
load_env_file_safely .env
|
||||
|
||||
# Localize labels in compose file
|
||||
localize_compose_labels docker-compose.yml
|
||||
|
||||
# Localize config files
|
||||
for config_file in $(find . -name "*.yml" -o -name "*.yaml" | grep -v docker-compose.yml); do
|
||||
localize_config_file "$config_file"
|
||||
done
|
||||
|
||||
# Deploy
|
||||
run_cmd docker compose up -d
|
||||
|
||||
# Validate
|
||||
if docker ps | grep -q pihole && docker ps | grep -q watchtower; then
|
||||
log_success "Infrastructure stack deployed successfully"
|
||||
exit 0
|
||||
else
|
||||
log_error "Infrastructure stack deployment failed"
|
||||
exit 1
|
||||
fi
|
||||
@@ -11,7 +11,7 @@ services:
|
||||
# REQUIREMENTS FOR SABLIER INTEGRATION:
|
||||
# 1. Docker daemon must be configured to listen on TCP port 2375 (not just unix socket)
|
||||
# 2. Firewall must allow access to port 2375 from Sablier service
|
||||
# 3. Docker daemon config should include: "hosts": ["tcp://0.0.0.0:2375", "unix:///var/run/docker.sock"]
|
||||
# 3. Docker daemon config should include: 'hosts': ['tcp://0.0.0.0:2375', 'unix:///var/run/docker.sock']
|
||||
# 4. For security, consider restricting access to specific IP ranges or using TLS
|
||||
# 5. dockerproxy runs for additional security but doesn't expose port 2375 (handled by Docker daemon)
|
||||
image: tecnativa/docker-socket-proxy:latest
|
||||
@@ -55,8 +55,8 @@ services:
|
||||
- homelab-network
|
||||
- traefik-network
|
||||
ports:
|
||||
- "53:53/tcp" # DNS TCP
|
||||
- "53:53/udp" # DNS UDP
|
||||
- '53:53/tcp' # DNS TCP
|
||||
- '53:53/udp' # DNS UDP
|
||||
volumes:
|
||||
- ./pihole/etc-pihole:/etc/pihole
|
||||
- ./pihole/etc-dnsmasq.d:/etc/dnsmasq.d
|
||||
@@ -73,20 +73,20 @@ services:
|
||||
# TRAEFIK CONFIGURATION
|
||||
# ==========================================
|
||||
# Service metadata
|
||||
- 'homelab.category=infrastructure"
|
||||
- 'homelab.description=Network-wide ad blocking and DNS"
|
||||
- 'homelab.category=infrastructure'
|
||||
- 'homelab.description=Network-wide ad blocking and DNS'
|
||||
# Traefik reverse proxy (comment/uncomment to disable/enable)
|
||||
# IMPORTANT: On REMOTE SERVERS (where Traefik runs elsewhere):
|
||||
# - COMMENT OUT all traefik.* labels below (don't delete them)
|
||||
# - Routes are configured via external YAML files on the core server
|
||||
# - This prevents conflicts between Docker labels and file provider
|
||||
- 'traefik.enable=true"
|
||||
- 'traefik.docker.network=traefik-network"
|
||||
- 'traefik.http.routers.pihole.rule=Host(`pihole.${DOMAIN}`)"
|
||||
- 'traefik.http.routers.pihole.entrypoints=websecure"
|
||||
- 'traefik.http.routers.pihole.tls.certresolver=letsencrypt"
|
||||
- 'traefik.http.routers.pihole.middlewares=authelia@docker"
|
||||
- 'traefik.http.services.pihole.loadbalancer.server.port=80"
|
||||
- 'traefik.enable=true'
|
||||
- 'traefik.docker.network=traefik-network'
|
||||
- 'traefik.http.routers.pihole.rule=Host(`pihole.${DOMAIN}`)'
|
||||
- 'traefik.http.routers.pihole.entrypoints=websecure'
|
||||
- 'traefik.http.routers.pihole.tls.certresolver=letsencrypt'
|
||||
- 'traefik.http.routers.pihole.middlewares=authelia@docker'
|
||||
- 'traefik.http.services.pihole.loadbalancer.server.port=80'
|
||||
|
||||
# Watchtower - Automatic container updates
|
||||
watchtower:
|
||||
@@ -105,8 +105,8 @@ services:
|
||||
- WATCHTOWER_NOTIFICATIONS=shoutrrr
|
||||
- WATCHTOWER_NOTIFICATION_URL=${WATCHTOWER_NOTIFICATION_URL}
|
||||
labels:
|
||||
- 'homelab.category=infrastructure"
|
||||
- 'homelab.description=Automatic Docker container updates"
|
||||
- 'homelab.category=infrastructure'
|
||||
- 'homelab.description=Automatic Docker container updates'
|
||||
|
||||
# Dozzle - Real-time Docker log viewer
|
||||
# Uses Sablier lazy loading - starts on-demand, stops after 5min inactivity
|
||||
@@ -127,7 +127,7 @@ services:
|
||||
- homelab-network
|
||||
- traefik-network
|
||||
ports:
|
||||
- "8085:8080"
|
||||
- '8085:8080'
|
||||
volumes:
|
||||
- /var/run/docker.sock:/var/run/docker.sock:ro
|
||||
environment:
|
||||
@@ -135,7 +135,7 @@ services:
|
||||
- DOZZLE_TAILSIZE=300
|
||||
- DOZZLE_FILTER=status=running
|
||||
healthcheck:
|
||||
test: ["CMD", "/dozzle", "healthcheck"]
|
||||
test: ['CMD', '/dozzle', 'healthcheck']
|
||||
interval: 30s
|
||||
timeout: 10s
|
||||
retries: 3
|
||||
@@ -143,22 +143,22 @@ services:
|
||||
labels:
|
||||
# TRAEFIK CONFIGURATION
|
||||
# Service metadata
|
||||
- "com.centurylinklabs.watchtower.enable=true"
|
||||
- 'homelab.category=infrastructure"
|
||||
- 'homelab.description=Real-time Docker log viewer"
|
||||
- 'traefik.enable=true"
|
||||
- 'traefik.docker.network=traefik-network"
|
||||
- 'com.centurylinklabs.watchtower.enable=true'
|
||||
- 'homelab.category=infrastructure'
|
||||
- 'homelab.description=Real-time Docker log viewer'
|
||||
- 'traefik.enable=true'
|
||||
- 'traefik.docker.network=traefik-network'
|
||||
# Router configuration
|
||||
- 'traefik.http.routers.dozzle.rule=Host(`dozzle.jasper.${DOMAIN}`)"
|
||||
- 'traefik.http.routers.dozzle.entrypoints=websecure"
|
||||
- 'traefik.http.routers.dozzle.tls=true"
|
||||
- 'traefik.http.routers.dozzle.middlewares=authelia@docker"
|
||||
- 'traefik.http.routers.dozzle.rule=Host(`dozzle.jasper.${DOMAIN}`)'
|
||||
- 'traefik.http.routers.dozzle.entrypoints=websecure'
|
||||
- 'traefik.http.routers.dozzle.tls=true'
|
||||
- 'traefik.http.routers.dozzle.middlewares=authelia@docker'
|
||||
# Service configuration
|
||||
- 'traefik.http.services.dozzle.loadbalancer.server.port=8085"
|
||||
- 'traefik.http.services.dozzle.loadbalancer.server.port=8085'
|
||||
# Sablier configuration
|
||||
- "sablier.enable=true"
|
||||
- "sablier.group=jasper-dozzle"
|
||||
- "sablier.start-on-demand=true"
|
||||
- 'sablier.enable=true'
|
||||
- 'sablier.group=jasper-dozzle'
|
||||
- 'sablier.start-on-demand=true'
|
||||
|
||||
# Glances - System monitoring
|
||||
# Uses Sablier lazy loading - starts on-demand, stops after 30min inactivity
|
||||
@@ -179,7 +179,7 @@ services:
|
||||
- homelab-network
|
||||
- traefik-network
|
||||
ports:
|
||||
- "61208:61208"
|
||||
- '61208:61208'
|
||||
pid: host
|
||||
volumes:
|
||||
- /var/run/docker.sock:/var/run/docker.sock:ro
|
||||
@@ -187,7 +187,7 @@ services:
|
||||
environment:
|
||||
- GLANCES_OPT=-w
|
||||
healthcheck:
|
||||
test: ["CMD", "curl", "-f", "http://localhost:61208/"]
|
||||
test: ['CMD', 'curl', '-f', 'http://localhost:61208/']
|
||||
interval: 30s
|
||||
timeout: 10s
|
||||
retries: 3
|
||||
@@ -195,22 +195,22 @@ services:
|
||||
labels:
|
||||
# TRAEFIK CONFIGURATION
|
||||
# Service metadata
|
||||
- "com.centurylinklabs.watchtower.enable=true"
|
||||
- 'homelab.category=infrastructure"
|
||||
- 'homelab.description=System and Docker monitoring"
|
||||
- 'traefik.enable=true"
|
||||
- 'traefik.docker.network=traefik-network"
|
||||
- 'com.centurylinklabs.watchtower.enable=true'
|
||||
- 'homelab.category=infrastructure'
|
||||
- 'homelab.description=System and Docker monitoring'
|
||||
- 'traefik.enable=true'
|
||||
- 'traefik.docker.network=traefik-network'
|
||||
# Router configuration
|
||||
- 'traefik.http.routers.glances.rule=Host(`glances.jasper.${DOMAIN}`)"
|
||||
- 'traefik.http.routers.glances.entrypoints=websecure"
|
||||
- 'traefik.http.routers.glances.tls=true"
|
||||
- 'traefik.http.routers.glances.middlewares=authelia@docker"
|
||||
- 'traefik.http.routers.glances.rule=Host(`glances.jasper.${DOMAIN}`)'
|
||||
- 'traefik.http.routers.glances.entrypoints=websecure'
|
||||
- 'traefik.http.routers.glances.tls=true'
|
||||
- 'traefik.http.routers.glances.middlewares=authelia@docker'
|
||||
# Service configuration
|
||||
- 'traefik.http.services.glances.loadbalancer.server.port=61208"
|
||||
- 'traefik.http.services.glances.loadbalancer.server.port=61208'
|
||||
# Sablier configuration
|
||||
- "sablier.enable=true"
|
||||
- "sablier.group=jasper-glances"
|
||||
- "sablier.start-on-demand=true"
|
||||
- 'sablier.enable=true'
|
||||
- 'sablier.group=jasper-glances'
|
||||
- 'sablier.start-on-demand=true'
|
||||
|
||||
# Code Server - VS Code in browser
|
||||
# Uses Sablier lazy loading - starts on-demand, stops after 30min inactivity
|
||||
@@ -231,7 +231,7 @@ services:
|
||||
- homelab-network
|
||||
- traefik-network
|
||||
ports:
|
||||
- "8079:8443"
|
||||
- '8079:8443'
|
||||
volumes:
|
||||
- ./code-server/config:/config
|
||||
- /opt/stacks:/opt/stacks # Access to all stacks
|
||||
@@ -243,7 +243,7 @@ services:
|
||||
- PASSWORD=${CODE_SERVER_PASSWORD}
|
||||
- SUDO_PASSWORD=${CODE_SERVER_SUDO_PASSWORD}
|
||||
healthcheck:
|
||||
test: ["CMD", "curl", "-f", "http://localhost:8443/"]
|
||||
test: ['CMD', 'curl', '-f', 'http://localhost:8443/']
|
||||
interval: 30s
|
||||
timeout: 10s
|
||||
retries: 3
|
||||
@@ -251,22 +251,22 @@ services:
|
||||
labels:
|
||||
# TRAEFIK CONFIGURATION
|
||||
# Service metadata
|
||||
- "com.centurylinklabs.watchtower.enable=true"
|
||||
- 'homelab.category=infrastructure"
|
||||
- 'homelab.description=VS Code in browser"
|
||||
- 'traefik.enable=true"
|
||||
- 'traefik.docker.network=traefik-network"
|
||||
- 'com.centurylinklabs.watchtower.enable=true'
|
||||
- 'homelab.category=infrastructure'
|
||||
- 'homelab.description=VS Code in browser'
|
||||
- 'traefik.enable=true'
|
||||
- 'traefik.docker.network=traefik-network'
|
||||
# Router configuration
|
||||
- 'traefik.http.routers.code-server.rule=Host(`code.${DOMAIN}`)"
|
||||
- 'traefik.http.routers.code-server.entrypoints=websecure"
|
||||
- 'traefik.http.routers.code-server.tls.certresolver=letsencrypt"
|
||||
- 'traefik.http.routers.code-server.middlewares=authelia@docker"
|
||||
- 'traefik.http.routers.code-server.rule=Host(`code.${DOMAIN}`)'
|
||||
- 'traefik.http.routers.code-server.entrypoints=websecure'
|
||||
- 'traefik.http.routers.code-server.tls.certresolver=letsencrypt'
|
||||
- 'traefik.http.routers.code-server.middlewares=authelia@docker'
|
||||
# Service configuration
|
||||
- 'traefik.http.services.code-server.loadbalancer.server.port=8443"
|
||||
- 'traefik.http.services.code-server.loadbalancer.server.port=8443'
|
||||
# Sablier configuration
|
||||
- "sablier.enable=true"
|
||||
- "sablier.group=jasper-code-server"
|
||||
- "sablier.start-on-demand=true"
|
||||
- 'sablier.enable=true'
|
||||
- 'sablier.group=jasper-code-server'
|
||||
- 'sablier.start-on-demand=true'
|
||||
|
||||
x-dockge:
|
||||
urls:
|
||||
|
||||
@@ -14,7 +14,7 @@ services:
|
||||
- homelab-network
|
||||
- traefik-network
|
||||
ports:
|
||||
- "8989:8989"
|
||||
- '8989:8989'
|
||||
volumes:
|
||||
- ./sonarr/config:/config
|
||||
- /mnt/media:/media
|
||||
@@ -24,7 +24,7 @@ services:
|
||||
- PGID=1000
|
||||
- TZ=America/New_York
|
||||
healthcheck:
|
||||
test: ["CMD", "curl", "-f", "http://localhost:8989/"]
|
||||
test: ['CMD', 'curl', '-f', 'http://localhost:8989/']
|
||||
interval: 30s
|
||||
timeout: 10s
|
||||
retries: 3
|
||||
@@ -33,22 +33,22 @@ services:
|
||||
# TRAEFIK CONFIGURATION
|
||||
# ==========================================
|
||||
# Service metadata
|
||||
- 'homelab.category=media"
|
||||
- 'homelab.description=TV show management and automation"
|
||||
- "com.centurylinklabs.watchtower.enable=true"
|
||||
- 'homelab.category=media'
|
||||
- 'homelab.description=TV show management and automation'
|
||||
- 'com.centurylinklabs.watchtower.enable=true'
|
||||
# Traefik reverse proxy (comment/uncomment to disable/enable)
|
||||
# If Traefik is on a remote server: these labels are NOT USED;
|
||||
# configure external yml files in /traefik/dynamic folder instead.
|
||||
- 'traefik.enable=true"
|
||||
- 'traefik.docker.network=traefik-network"
|
||||
- 'traefik.http.routers.sonarr.rule=Host(`sonarr.${DOMAIN}`)"
|
||||
- 'traefik.http.routers.sonarr.entrypoints=websecure"
|
||||
- 'traefik.http.routers.sonarr.tls.certresolver=letsencrypt"
|
||||
- 'traefik.http.routers.sonarr.middlewares=authelia@docker"
|
||||
- 'traefik.http.services.sonarr.loadbalancer.server.port=8989"
|
||||
- "sablier.enable=true"
|
||||
- "sablier.group=jasper-arr"
|
||||
- "sablier.start-on-demand=true"
|
||||
- 'traefik.enable=true'
|
||||
- 'traefik.docker.network=traefik-network'
|
||||
- 'traefik.http.routers.sonarr.rule=Host(`sonarr.${DOMAIN}`)'
|
||||
- 'traefik.http.routers.sonarr.entrypoints=websecure'
|
||||
- 'traefik.http.routers.sonarr.tls.certresolver=letsencrypt'
|
||||
- 'traefik.http.routers.sonarr.middlewares=authelia@docker'
|
||||
- 'traefik.http.services.sonarr.loadbalancer.server.port=8989'
|
||||
- 'sablier.enable=true'
|
||||
- 'sablier.group=jasper-arr'
|
||||
- 'sablier.start-on-demand=true'
|
||||
|
||||
# Radarr - Movie automation
|
||||
radarr:
|
||||
@@ -59,7 +59,7 @@ services:
|
||||
- homelab-network
|
||||
- traefik-network
|
||||
ports:
|
||||
- "7878:7878"
|
||||
- '7878:7878'
|
||||
volumes:
|
||||
- ./radarr/config:/config
|
||||
- /mnt/media:/media
|
||||
@@ -69,7 +69,7 @@ services:
|
||||
- PGID=1000
|
||||
- TZ=America/New_York
|
||||
healthcheck:
|
||||
test: ["CMD", "curl", "-f", "http://localhost:7878/"]
|
||||
test: ['CMD', 'curl', '-f', 'http://localhost:7878/']
|
||||
interval: 30s
|
||||
timeout: 10s
|
||||
retries: 3
|
||||
@@ -78,22 +78,22 @@ services:
|
||||
# TRAEFIK CONFIGURATION
|
||||
# ==========================================
|
||||
# Service metadata
|
||||
- 'homelab.category=media"
|
||||
- 'homelab.description=Movie management and automation"
|
||||
- "com.centurylinklabs.watchtower.enable=true"
|
||||
- 'homelab.category=media'
|
||||
- 'homelab.description=Movie management and automation'
|
||||
- 'com.centurylinklabs.watchtower.enable=true'
|
||||
# Traefik reverse proxy (comment/uncomment to disable/enable)
|
||||
# If Traefik is on a remote server: these labels are NOT USED;
|
||||
# configure external yml files in /traefik/dynamic folder instead.
|
||||
- 'traefik.enable=true"
|
||||
- 'traefik.docker.network=traefik-network"
|
||||
- 'traefik.http.routers.radarr.rule=Host(`radarr.${DOMAIN}`)"
|
||||
- 'traefik.http.routers.radarr.entrypoints=websecure"
|
||||
- 'traefik.http.routers.radarr.tls.certresolver=letsencrypt"
|
||||
- 'traefik.http.routers.radarr.middlewares=authelia@docker"
|
||||
- 'traefik.http.services.radarr.loadbalancer.server.port=7878"
|
||||
- "sablier.enable=true"
|
||||
- "sablier.group=jasper-arr"
|
||||
- "sablier.start-on-demand=true"
|
||||
- 'traefik.enable=true'
|
||||
- 'traefik.docker.network=traefik-network'
|
||||
- 'traefik.http.routers.radarr.rule=Host(`radarr.${DOMAIN}`)'
|
||||
- 'traefik.http.routers.radarr.entrypoints=websecure'
|
||||
- 'traefik.http.routers.radarr.tls.certresolver=letsencrypt'
|
||||
- 'traefik.http.routers.radarr.middlewares=authelia@docker'
|
||||
- 'traefik.http.services.radarr.loadbalancer.server.port=7878'
|
||||
- 'sablier.enable=true'
|
||||
- 'sablier.group=jasper-arr'
|
||||
- 'sablier.start-on-demand=true'
|
||||
|
||||
# Prowlarr - Indexer manager
|
||||
# Access at: https://prowlarr.yourdomain.duckdns.org
|
||||
@@ -105,7 +105,7 @@ services:
|
||||
- homelab-network
|
||||
- traefik-network
|
||||
ports:
|
||||
- "9696:9696"
|
||||
- '9696:9696'
|
||||
volumes:
|
||||
- ./prowlarr/config:/config
|
||||
environment:
|
||||
@@ -113,7 +113,7 @@ services:
|
||||
- PGID=1000
|
||||
- TZ=America/New_York
|
||||
healthcheck:
|
||||
test: ["CMD", "curl", "-f", "http://localhost:9696/"]
|
||||
test: ['CMD', 'curl', '-f', 'http://localhost:9696/']
|
||||
interval: 30s
|
||||
timeout: 10s
|
||||
retries: 3
|
||||
@@ -122,22 +122,22 @@ services:
|
||||
# TRAEFIK CONFIGURATION
|
||||
# ==========================================
|
||||
# Service metadata
|
||||
- 'homelab.category=media"
|
||||
- 'homelab.description=Indexer manager for Sonarr/Radarr"
|
||||
- "com.centurylinklabs.watchtower.enable=true"
|
||||
- 'homelab.category=media'
|
||||
- 'homelab.description=Indexer manager for Sonarr/Radarr'
|
||||
- 'com.centurylinklabs.watchtower.enable=true'
|
||||
# Traefik reverse proxy (comment/uncomment to disable/enable)
|
||||
# If Traefik is on a remote server: these labels are NOT USED;
|
||||
# configure external yml files in /traefik/dynamic folder instead.
|
||||
- 'traefik.enable=true"
|
||||
- 'traefik.docker.network=traefik-network"
|
||||
- 'traefik.http.routers.prowlarr.rule=Host(`prowlarr.${DOMAIN}`)"
|
||||
- 'traefik.http.routers.prowlarr.entrypoints=websecure"
|
||||
- 'traefik.http.routers.prowlarr.tls.certresolver=letsencrypt"
|
||||
- 'traefik.http.routers.prowlarr.middlewares=authelia@docker"
|
||||
- 'traefik.http.services.prowlarr.loadbalancer.server.port=9696"
|
||||
- "sablier.enable=true"
|
||||
- "sablier.group=jasper-arr"
|
||||
- "sablier.start-on-demand=true"
|
||||
- 'traefik.enable=true'
|
||||
- 'traefik.docker.network=traefik-network'
|
||||
- 'traefik.http.routers.prowlarr.rule=Host(`prowlarr.${DOMAIN}`)'
|
||||
- 'traefik.http.routers.prowlarr.entrypoints=websecure'
|
||||
- 'traefik.http.routers.prowlarr.tls.certresolver=letsencrypt'
|
||||
- 'traefik.http.routers.prowlarr.middlewares=authelia@docker'
|
||||
- 'traefik.http.services.prowlarr.loadbalancer.server.port=9696'
|
||||
- 'sablier.enable=true'
|
||||
- 'sablier.group=jasper-arr'
|
||||
- 'sablier.start-on-demand=true'
|
||||
|
||||
# Readarr - Ebook and audiobook management
|
||||
readarr:
|
||||
@@ -148,7 +148,7 @@ services:
|
||||
- homelab-network
|
||||
- traefik-network
|
||||
ports:
|
||||
- "8787:8787"
|
||||
- '8787:8787'
|
||||
volumes:
|
||||
- ./readarr/config:/config
|
||||
- /mnt/media/books:/books
|
||||
@@ -161,22 +161,22 @@ services:
|
||||
# TRAEFIK CONFIGURATION
|
||||
# ==========================================
|
||||
# Service metadata
|
||||
- 'homelab.category=media"
|
||||
- 'homelab.description=Ebook and audiobook management"
|
||||
- "com.centurylinklabs.watchtower.enable=true"
|
||||
- 'homelab.category=media'
|
||||
- 'homelab.description=Ebook and audiobook management'
|
||||
- 'com.centurylinklabs.watchtower.enable=true'
|
||||
# Traefik reverse proxy (comment/uncomment to disable/enable)
|
||||
# If Traefik is on a remote server: these labels are NOT USED;
|
||||
# configure external yml files in /traefik/dynamic folder instead.
|
||||
- 'traefik.enable=true"
|
||||
- 'traefik.docker.network=traefik-network"
|
||||
- 'traefik.http.routers.readarr.rule=Host(`readarr.${DOMAIN}`)"
|
||||
- 'traefik.http.routers.readarr.entrypoints=websecure"
|
||||
- 'traefik.http.routers.readarr.tls.certresolver=letsencrypt"
|
||||
- 'traefik.http.routers.readarr.middlewares=authelia@docker"
|
||||
- 'traefik.http.services.readarr.loadbalancer.server.port=8787"
|
||||
- "sablier.enable=true"
|
||||
- "sablier.group=jasper-arr"
|
||||
- "sablier.start-on-demand=true"
|
||||
- 'traefik.enable=true'
|
||||
- 'traefik.docker.network=traefik-network'
|
||||
- 'traefik.http.routers.readarr.rule=Host(`readarr.${DOMAIN}`)'
|
||||
- 'traefik.http.routers.readarr.entrypoints=websecure'
|
||||
- 'traefik.http.routers.readarr.tls.certresolver=letsencrypt'
|
||||
- 'traefik.http.routers.readarr.middlewares=authelia@docker'
|
||||
- 'traefik.http.services.readarr.loadbalancer.server.port=8787'
|
||||
- 'sablier.enable=true'
|
||||
- 'sablier.group=jasper-arr'
|
||||
- 'sablier.start-on-demand=true'
|
||||
|
||||
# Lidarr - Music collection manager
|
||||
lidarr:
|
||||
@@ -187,7 +187,7 @@ services:
|
||||
- homelab-network
|
||||
- traefik-network
|
||||
ports:
|
||||
- "8686:8686"
|
||||
- '8686:8686'
|
||||
volumes:
|
||||
- ./lidarr/config:/config
|
||||
- /mnt/media/music:/music
|
||||
@@ -200,22 +200,22 @@ services:
|
||||
# TRAEFIK CONFIGURATION
|
||||
# ==========================================
|
||||
# Service metadata
|
||||
- 'homelab.category=media"
|
||||
- 'homelab.description=Music collection manager"
|
||||
- "com.centurylinklabs.watchtower.enable=true"
|
||||
- 'homelab.category=media'
|
||||
- 'homelab.description=Music collection manager'
|
||||
- 'com.centurylinklabs.watchtower.enable=true'
|
||||
# Traefik reverse proxy (comment/uncomment to disable/enable)
|
||||
# If Traefik is on a remote server: these labels are NOT USED;
|
||||
# configure external yml files in /traefik/dynamic folder instead.
|
||||
- 'traefik.enable=true"
|
||||
- 'traefik.docker.network=traefik-network"
|
||||
- 'traefik.http.routers.lidarr.rule=Host(`lidarr.${DOMAIN}`)"
|
||||
- 'traefik.http.routers.lidarr.entrypoints=websecure"
|
||||
- 'traefik.http.routers.lidarr.tls.certresolver=letsencrypt"
|
||||
- 'traefik.http.routers.lidarr.middlewares=authelia@docker"
|
||||
- 'traefik.http.services.lidarr.loadbalancer.server.port=8686"
|
||||
- "sablier.enable=true"
|
||||
- "sablier.group=jasper-arr"
|
||||
- "sablier.start-on-demand=true"
|
||||
- 'traefik.enable=true'
|
||||
- 'traefik.docker.network=traefik-network'
|
||||
- 'traefik.http.routers.lidarr.rule=Host(`lidarr.${DOMAIN}`)'
|
||||
- 'traefik.http.routers.lidarr.entrypoints=websecure'
|
||||
- 'traefik.http.routers.lidarr.tls.certresolver=letsencrypt'
|
||||
- 'traefik.http.routers.lidarr.middlewares=authelia@docker'
|
||||
- 'traefik.http.services.lidarr.loadbalancer.server.port=8686'
|
||||
- 'sablier.enable=true'
|
||||
- 'sablier.group=jasper-arr'
|
||||
- 'sablier.start-on-demand=true'
|
||||
|
||||
# Lazy Librarian - Book manager
|
||||
lazylibrarian:
|
||||
@@ -226,7 +226,7 @@ services:
|
||||
- homelab-network
|
||||
- traefik-network
|
||||
ports:
|
||||
- "5299:5299"
|
||||
- '5299:5299'
|
||||
volumes:
|
||||
- ./lazylibrarian/config:/config
|
||||
- /mnt/media/books:/books
|
||||
@@ -240,22 +240,22 @@ services:
|
||||
# TRAEFIK CONFIGURATION
|
||||
# ==========================================
|
||||
# Service metadata
|
||||
- 'homelab.category=media"
|
||||
- 'homelab.description=Book download automation"
|
||||
- "com.centurylinklabs.watchtower.enable=true"
|
||||
- 'homelab.category=media'
|
||||
- 'homelab.description=Book download automation'
|
||||
- 'com.centurylinklabs.watchtower.enable=true'
|
||||
# Traefik reverse proxy (comment/uncomment to disable/enable)
|
||||
# If Traefik is on a remote server: these labels are NOT USED;
|
||||
# configure external yml files in /traefik/dynamic folder instead.
|
||||
- 'traefik.enable=true"
|
||||
- 'traefik.docker.network=traefik-network"
|
||||
- 'traefik.http.routers.lazylibrarian.rule=Host(`lazylibrarian.${DOMAIN}`)"
|
||||
- 'traefik.http.routers.lazylibrarian.entrypoints=websecure"
|
||||
- 'traefik.http.routers.lazylibrarian.tls.certresolver=letsencrypt"
|
||||
- 'traefik.http.routers.lazylibrarian.middlewares=authelia@docker"
|
||||
- 'traefik.http.services.lazylibrarian.loadbalancer.server.port=5299"
|
||||
- "sablier.enable=true"
|
||||
- "sablier.group=jasper-arr"
|
||||
- "sablier.start-on-demand=true"
|
||||
- 'traefik.enable=true'
|
||||
- 'traefik.docker.network=traefik-network'
|
||||
- 'traefik.http.routers.lazylibrarian.rule=Host(`lazylibrarian.${DOMAIN}`)'
|
||||
- 'traefik.http.routers.lazylibrarian.entrypoints=websecure'
|
||||
- 'traefik.http.routers.lazylibrarian.tls.certresolver=letsencrypt'
|
||||
- 'traefik.http.routers.lazylibrarian.middlewares=authelia@docker'
|
||||
- 'traefik.http.services.lazylibrarian.loadbalancer.server.port=5299'
|
||||
- 'sablier.enable=true'
|
||||
- 'sablier.group=jasper-arr'
|
||||
- 'sablier.start-on-demand=true'
|
||||
|
||||
# Mylar3 - Comic book manager
|
||||
mylar3:
|
||||
@@ -266,7 +266,7 @@ services:
|
||||
- homelab-network
|
||||
- traefik-network
|
||||
ports:
|
||||
- "8090:8090"
|
||||
- '8090:8090'
|
||||
volumes:
|
||||
- ./mylar3/config:/config
|
||||
- /mnt/media/comics:/comics
|
||||
@@ -279,22 +279,22 @@ services:
|
||||
# TRAEFIK CONFIGURATION
|
||||
# ==========================================
|
||||
# Service metadata
|
||||
- 'homelab.category=media"
|
||||
- 'homelab.description=Comic book collection manager"
|
||||
- "com.centurylinklabs.watchtower.enable=true"
|
||||
- 'homelab.category=media'
|
||||
- 'homelab.description=Comic book collection manager'
|
||||
- 'com.centurylinklabs.watchtower.enable=true'
|
||||
# Traefik reverse proxy (comment/uncomment to disable/enable)
|
||||
# If Traefik is on a remote server: these labels are NOT USED;
|
||||
# configure external yml files in /traefik/dynamic folder instead.
|
||||
- 'traefik.enable=true"
|
||||
- 'traefik.docker.network=traefik-network"
|
||||
- 'traefik.http.routers.mylar.rule=Host(`mylar.${DOMAIN}`)"
|
||||
- 'traefik.http.routers.mylar.entrypoints=websecure"
|
||||
- 'traefik.http.routers.mylar.tls.certresolver=letsencrypt"
|
||||
- 'traefik.http.routers.mylar.middlewares=authelia@docker"
|
||||
- 'traefik.http.services.mylar.loadbalancer.server.port=8090"
|
||||
- "sablier.enable=true"
|
||||
- "sablier.group=jasper-arr"
|
||||
- "sablier.start-on-demand=true"
|
||||
- 'traefik.enable=true'
|
||||
- 'traefik.docker.network=traefik-network'
|
||||
- 'traefik.http.routers.mylar.rule=Host(`mylar.${DOMAIN}`)'
|
||||
- 'traefik.http.routers.mylar.entrypoints=websecure'
|
||||
- 'traefik.http.routers.mylar.tls.certresolver=letsencrypt'
|
||||
- 'traefik.http.routers.mylar.middlewares=authelia@docker'
|
||||
- 'traefik.http.services.mylar.loadbalancer.server.port=8090'
|
||||
- 'sablier.enable=true'
|
||||
- 'sablier.group=jasper-arr'
|
||||
- 'sablier.start-on-demand=true'
|
||||
|
||||
# Jellyseerr - Request management for Jellyfin/Plex
|
||||
jellyseerr:
|
||||
@@ -305,14 +305,14 @@ services:
|
||||
- homelab-network
|
||||
- traefik-network
|
||||
ports:
|
||||
- "5055:5055"
|
||||
- '5055:5055'
|
||||
volumes:
|
||||
- ./jellyseerr/config:/app/config
|
||||
environment:
|
||||
- LOG_LEVEL=info
|
||||
- TZ=America/New_York
|
||||
healthcheck:
|
||||
test: ["CMD", "wget", "--quiet", "--tries=1", "--spider", "http://localhost:5055/"]
|
||||
test: ['CMD', 'wget', '--quiet', '--tries=1', '--spider', 'http://localhost:5055/']
|
||||
interval: 30s
|
||||
timeout: 10s
|
||||
retries: 3
|
||||
@@ -321,22 +321,22 @@ services:
|
||||
# TRAEFIK CONFIGURATION
|
||||
# ==========================================
|
||||
# Service metadata
|
||||
- 'homelab.category=media"
|
||||
- 'homelab.description=Media request management"
|
||||
- "com.centurylinklabs.watchtower.enable=true"
|
||||
- 'homelab.category=media'
|
||||
- 'homelab.description=Media request management'
|
||||
- 'com.centurylinklabs.watchtower.enable=true'
|
||||
# Traefik reverse proxy (comment/uncomment to disable/enable)
|
||||
# If Traefik is on a remote server: these labels are NOT USED;
|
||||
# configure external yml files in /traefik/dynamic folder instead.
|
||||
- 'traefik.enable=true"
|
||||
- 'traefik.docker.network=traefik-network"
|
||||
- 'traefik.http.routers.jellyseerr.rule=Host(`jellyseerr.${DOMAIN}`)"
|
||||
- 'traefik.http.routers.jellyseerr.entrypoints=websecure"
|
||||
- 'traefik.http.routers.jellyseerr.tls.certresolver=letsencrypt"
|
||||
- 'traefik.http.routers.jellyseerr.middlewares=authelia@docker"
|
||||
- 'traefik.http.services.jellyseerr.loadbalancer.server.port=5055"
|
||||
- "sablier.enable=true"
|
||||
- "sablier.group=jasper-arr"
|
||||
- "sablier.start-on-demand=true"
|
||||
- 'traefik.enable=true'
|
||||
- 'traefik.docker.network=traefik-network'
|
||||
- 'traefik.http.routers.jellyseerr.rule=Host(`jellyseerr.${DOMAIN}`)'
|
||||
- 'traefik.http.routers.jellyseerr.entrypoints=websecure'
|
||||
- 'traefik.http.routers.jellyseerr.tls.certresolver=letsencrypt'
|
||||
- 'traefik.http.routers.jellyseerr.middlewares=authelia@docker'
|
||||
- 'traefik.http.services.jellyseerr.loadbalancer.server.port=5055'
|
||||
- 'sablier.enable=true'
|
||||
- 'sablier.group=jasper-arr'
|
||||
- 'sablier.start-on-demand=true'
|
||||
|
||||
# FlareSolverr - Cloudflare bypass for Prowlarr
|
||||
# No web UI - used by Prowlarr
|
||||
@@ -352,9 +352,9 @@ services:
|
||||
labels:
|
||||
- homelab.category=media
|
||||
- homelab.description=Cloudflare bypass for indexers
|
||||
- "sablier.enable=true"
|
||||
- "sablier.group=jasper-arr"
|
||||
- "sablier.start-on-demand=true"
|
||||
- 'sablier.enable=true'
|
||||
- 'sablier.group=jasper-arr'
|
||||
- 'sablier.start-on-demand=true'
|
||||
|
||||
x-dockge:
|
||||
urls:
|
||||
|
||||
@@ -28,7 +28,7 @@ services:
|
||||
- homelab-network
|
||||
- traefik-network
|
||||
ports:
|
||||
- "8096:8096"
|
||||
- '8096:8096'
|
||||
volumes:
|
||||
- ./jellyfin/config:/config
|
||||
- ./jellyfin/cache:/cache
|
||||
@@ -38,7 +38,7 @@ services:
|
||||
- PGID=1000
|
||||
- TZ=America/New_York
|
||||
healthcheck:
|
||||
test: ["CMD", "curl", "-f", "http://localhost:8096/"]
|
||||
test: ['CMD', 'curl', '-f', 'http://localhost:8096/']
|
||||
interval: 30s
|
||||
timeout: 10s
|
||||
retries: 3
|
||||
@@ -49,23 +49,23 @@ services:
|
||||
# TRAEFIK CONFIGURATION
|
||||
labels:
|
||||
# Service metadata
|
||||
- "com.centurylinklabs.watchtower.enable=true"
|
||||
- 'homelab.category=media"
|
||||
- 'homelab.description=Open-source media streaming server"
|
||||
- 'traefik.enable=true"
|
||||
- 'traefik.docker.network=traefik-network"
|
||||
- 'com.centurylinklabs.watchtower.enable=true'
|
||||
- 'homelab.category=media'
|
||||
- 'homelab.description=Open-source media streaming server'
|
||||
- 'traefik.enable=true'
|
||||
- 'traefik.docker.network=traefik-network'
|
||||
# Router configuration
|
||||
- 'traefik.http.routers.jellyfin.rule=Host(`jellyfin.${DOMAIN}`)"
|
||||
- 'traefik.http.routers.jellyfin.entrypoints=websecure"
|
||||
- 'traefik.http.routers.jellyfin.tls=true"
|
||||
- 'traefik.http.routers.jellyfin.tls.certresolver=letsencrypt"
|
||||
- 'traefik.http.routers.jellyfin.rule=Host(`jellyfin.${DOMAIN}`)'
|
||||
- 'traefik.http.routers.jellyfin.entrypoints=websecure'
|
||||
- 'traefik.http.routers.jellyfin.tls=true'
|
||||
- 'traefik.http.routers.jellyfin.tls.certresolver=letsencrypt'
|
||||
# Service configuration
|
||||
- 'traefik.http.services.jellyfin.loadbalancer.server.port=8096"
|
||||
- 'traefik.http.services.jellyfin.loadbalancer.server.port=8096'
|
||||
# Sablier configuration
|
||||
- "sablier.enable=true"
|
||||
- "sablier.group=jasper-jellyfin"
|
||||
- "sablier.start-on-demand=true"
|
||||
- "sablier.theme=hacker-terminal"
|
||||
- 'sablier.enable=true'
|
||||
- 'sablier.group=jasper-jellyfin'
|
||||
- 'sablier.start-on-demand=true'
|
||||
- 'sablier.theme=hacker-terminal'
|
||||
|
||||
# Calibre-Web - Ebook reader and server
|
||||
calibre-web:
|
||||
@@ -85,7 +85,7 @@ services:
|
||||
- homelab-network
|
||||
- traefik-network
|
||||
ports:
|
||||
- "8083:8083"
|
||||
- '8083:8083'
|
||||
volumes:
|
||||
- ./calibre-web/config:/config
|
||||
- /mnt/media/books:/books
|
||||
@@ -97,22 +97,22 @@ services:
|
||||
# TRAEFIK CONFIGURATION
|
||||
labels:
|
||||
# Service metadata
|
||||
- "com.centurylinklabs.watchtower.enable=true"
|
||||
- 'homelab.category=media"
|
||||
- 'homelab.description=Ebook reader and library management"
|
||||
- 'traefik.enable=true"
|
||||
- 'traefik.docker.network=traefik-network"
|
||||
- 'com.centurylinklabs.watchtower.enable=true'
|
||||
- 'homelab.category=media'
|
||||
- 'homelab.description=Ebook reader and library management'
|
||||
- 'traefik.enable=true'
|
||||
- 'traefik.docker.network=traefik-network'
|
||||
# Router configuration
|
||||
- 'traefik.http.routers.calibre.rule=Host(`calibre.${DOMAIN}`)"
|
||||
- 'traefik.http.routers.calibre.entrypoints=websecure"
|
||||
- 'traefik.http.routers.calibre.tls.certresolver=letsencrypt"
|
||||
- 'traefik.http.routers.calibre.middlewares=authelia@docker"
|
||||
- 'traefik.http.routers.calibre.rule=Host(`calibre.${DOMAIN}`)'
|
||||
- 'traefik.http.routers.calibre.entrypoints=websecure'
|
||||
- 'traefik.http.routers.calibre.tls.certresolver=letsencrypt'
|
||||
- 'traefik.http.routers.calibre.middlewares=authelia@docker'
|
||||
# Service configuration
|
||||
- 'traefik.http.services.calibre.loadbalancer.server.port=8083"
|
||||
- 'traefik.http.services.calibre.loadbalancer.server.port=8083'
|
||||
# Sablier configuration (disabled by default)
|
||||
- "sablier.enable=true"
|
||||
- "sablier.group=jasper-calibre-web"
|
||||
- "sablier.start-on-demand=true"
|
||||
- 'sablier.enable=true'
|
||||
- 'sablier.group=jasper-calibre-web'
|
||||
- 'sablier.start-on-demand=true'
|
||||
|
||||
x-dockge:
|
||||
urls:
|
||||
|
||||
@@ -1,35 +0,0 @@
|
||||
server:
|
||||
http_listen_port: 3100
|
||||
grpc_listen_port: 9096
|
||||
|
||||
common:
|
||||
path_prefix: /loki
|
||||
storage:
|
||||
filesystem:
|
||||
chunks_directory: /loki/chunks
|
||||
rules_directory: /loki/rules
|
||||
replication_factor: 1
|
||||
ring:
|
||||
instance_addr: 127.0.0.1
|
||||
kvstore:
|
||||
store: inmemory
|
||||
|
||||
query_range:
|
||||
results_cache:
|
||||
cache:
|
||||
embedded_cache:
|
||||
enabled: true
|
||||
max_size_mb: 100
|
||||
|
||||
schema_config:
|
||||
configs:
|
||||
- from: 2020-10-24
|
||||
store: tsdb
|
||||
object_store: filesystem
|
||||
schema: v13
|
||||
index:
|
||||
prefix: index_
|
||||
period: 24h
|
||||
|
||||
ruler:
|
||||
alertmanager_url: http://localhost:9093
|
||||
@@ -1,16 +0,0 @@
|
||||
global:
|
||||
scrape_interval: 15s
|
||||
evaluation_interval: 15s
|
||||
|
||||
scrape_configs:
|
||||
- job_name: 'prometheus'
|
||||
static_configs:
|
||||
- targets: ['localhost:9090']
|
||||
|
||||
- job_name: 'node-exporter'
|
||||
static_configs:
|
||||
- targets: ['node-exporter:9100']
|
||||
|
||||
- job_name: 'cadvisor'
|
||||
static_configs:
|
||||
- targets: ['cadvisor:8080']
|
||||
@@ -1,18 +0,0 @@
|
||||
server:
|
||||
http_listen_port: 9080
|
||||
grpc_listen_port: 0
|
||||
|
||||
positions:
|
||||
filename: /tmp/positions.yaml
|
||||
|
||||
clients:
|
||||
- url: http://loki:3100/loki/api/v1/push
|
||||
|
||||
scrape_configs:
|
||||
- job_name: system
|
||||
static_configs:
|
||||
- targets:
|
||||
- localhost
|
||||
labels:
|
||||
job: varlogs
|
||||
__path__: /var/log/*log
|
||||
@@ -23,7 +23,7 @@ services:
|
||||
- homelab-network
|
||||
- traefik-network
|
||||
ports:
|
||||
- "9090:9090"
|
||||
- '9090:9090'
|
||||
volumes:
|
||||
- ./config/prometheus:/etc/prometheus
|
||||
- prometheus-data:/prometheus
|
||||
@@ -38,19 +38,19 @@ services:
|
||||
# TRAEFIK CONFIGURATION
|
||||
# ==========================================
|
||||
# Service metadata
|
||||
- 'homelab.category=monitoring"
|
||||
- 'homelab.description=Metrics collection and time-series database"
|
||||
- 'homelab.category=monitoring'
|
||||
- 'homelab.description=Metrics collection and time-series database'
|
||||
# Traefik reverse proxy (comment/uncomment to disable/enable)
|
||||
# If Traefik is on a remote server: these labels are NOT USED;
|
||||
# configure external yml files in /traefik/dynamic folder instead.
|
||||
- 'traefik.enable=true"
|
||||
- 'traefik.docker.network=traefik-network"
|
||||
- 'traefik.http.routers.prometheus.rule=Host(`prometheus.${DOMAIN}`)"
|
||||
- 'traefik.http.routers.prometheus.entrypoints=websecure"
|
||||
- 'traefik.http.routers.prometheus.tls=true"
|
||||
- 'traefik.http.routers.prometheus.tls.certresolver=letsencrypt"
|
||||
- 'traefik.http.routers.prometheus.middlewares=authelia@docker"
|
||||
- 'traefik.http.services.prometheus.loadbalancer.server.port=9090"
|
||||
- 'traefik.enable=true'
|
||||
- 'traefik.docker.network=traefik-network'
|
||||
- 'traefik.http.routers.prometheus.rule=Host(`prometheus.${DOMAIN}`)'
|
||||
- 'traefik.http.routers.prometheus.entrypoints=websecure'
|
||||
- 'traefik.http.routers.prometheus.tls=true'
|
||||
- 'traefik.http.routers.prometheus.tls.certresolver=letsencrypt'
|
||||
- 'traefik.http.routers.prometheus.middlewares=authelia@docker'
|
||||
- 'traefik.http.services.prometheus.loadbalancer.server.port=9090'
|
||||
|
||||
# Grafana - Metrics visualization
|
||||
# Default credentials: admin / admin (change on first login)
|
||||
@@ -71,7 +71,7 @@ services:
|
||||
- homelab-network
|
||||
- traefik-network
|
||||
ports:
|
||||
- "3000:3000"
|
||||
- '3000:3000'
|
||||
volumes:
|
||||
- grafana-data:/var/lib/grafana
|
||||
- ./config/grafana/provisioning:/etc/grafana/provisioning
|
||||
@@ -80,26 +80,26 @@ services:
|
||||
- GF_USERS_ALLOW_SIGN_UP=false
|
||||
- GF_SERVER_ROOT_URL=https://grafana.${DOMAIN}
|
||||
- GF_INSTALL_PLUGINS=grafana-clock-panel,grafana-simple-json-datasource,grafana-piechart-panel
|
||||
user: "1000:1000"
|
||||
user: '1000:1000'
|
||||
depends_on:
|
||||
- prometheus
|
||||
labels:
|
||||
# TRAEFIK CONFIGURATION
|
||||
# ==========================================
|
||||
# Service metadata
|
||||
- 'homelab.category=monitoring"
|
||||
- 'homelab.description=Metrics visualization and dashboards"
|
||||
- 'homelab.category=monitoring'
|
||||
- 'homelab.description=Metrics visualization and dashboards'
|
||||
# Traefik reverse proxy (comment/uncomment to disable/enable)
|
||||
# If Traefik is on a remote server: these labels are NOT USED;
|
||||
# configure external yml files in /traefik/dynamic folder instead.
|
||||
- 'traefik.enable=true"
|
||||
- 'traefik.docker.network=traefik-network"
|
||||
- 'traefik.http.routers.grafana.rule=Host(`grafana.${DOMAIN}`)"
|
||||
- 'traefik.http.routers.grafana.entrypoints=websecure"
|
||||
- 'traefik.http.routers.grafana.tls=true"
|
||||
- 'traefik.http.routers.grafana.tls.certresolver=letsencrypt"
|
||||
- 'traefik.http.routers.grafana.middlewares=authelia@docker"
|
||||
- 'traefik.http.services.grafana.loadbalancer.server.port=3000"
|
||||
- 'traefik.enable=true'
|
||||
- 'traefik.docker.network=traefik-network'
|
||||
- 'traefik.http.routers.grafana.rule=Host(`grafana.${DOMAIN}`)'
|
||||
- 'traefik.http.routers.grafana.entrypoints=websecure'
|
||||
- 'traefik.http.routers.grafana.tls=true'
|
||||
- 'traefik.http.routers.grafana.tls.certresolver=letsencrypt'
|
||||
- 'traefik.http.routers.grafana.middlewares=authelia@docker'
|
||||
- 'traefik.http.services.grafana.loadbalancer.server.port=3000'
|
||||
|
||||
# Node Exporter - Host metrics exporter
|
||||
# Metrics at: http://192.168.4.4:9100/metrics
|
||||
@@ -110,7 +110,7 @@ services:
|
||||
networks:
|
||||
- homelab-network
|
||||
ports:
|
||||
- "9100:9100"
|
||||
- '9100:9100'
|
||||
volumes:
|
||||
- /proc:/host/proc:ro
|
||||
- /sys:/host/sys:ro
|
||||
@@ -121,8 +121,8 @@ services:
|
||||
- '--path.sysfs=/host/sys'
|
||||
- '--collector.filesystem.mount-points-exclude=^/(sys|proc|dev|host|etc)($$|/)'
|
||||
labels:
|
||||
- 'homelab.category=monitoring"
|
||||
- 'homelab.description=Hardware and OS metrics exporter"
|
||||
- 'homelab.category=monitoring'
|
||||
- 'homelab.description=Hardware and OS metrics exporter'
|
||||
|
||||
# cAdvisor - Container metrics exporter
|
||||
# Access at: http://192.168.4.4:8082
|
||||
@@ -134,7 +134,7 @@ services:
|
||||
- homelab-network
|
||||
- traefik-network
|
||||
ports:
|
||||
- "8082:8080"
|
||||
- '8082:8080'
|
||||
volumes:
|
||||
- /:/rootfs:ro
|
||||
- /var/run:/var/run:ro
|
||||
@@ -148,19 +148,19 @@ services:
|
||||
# TRAEFIK CONFIGURATION
|
||||
# ==========================================
|
||||
# Service metadata
|
||||
- 'homelab.category=monitoring"
|
||||
- 'homelab.description=Container metrics and performance monitoring"
|
||||
- 'homelab.category=monitoring'
|
||||
- 'homelab.description=Container metrics and performance monitoring'
|
||||
# Traefik reverse proxy (comment/uncomment to disable/enable)
|
||||
# If Traefik is on a remote server: these labels are NOT USED;
|
||||
# configure external yml files in /traefik/dynamic folder instead.
|
||||
- 'traefik.enable=true"
|
||||
- 'traefik.docker.network=traefik-network"
|
||||
- 'traefik.http.routers.cadvisor.rule=Host(`cadvisor.${DOMAIN}`)"
|
||||
- 'traefik.http.routers.cadvisor.entrypoints=websecure"
|
||||
- 'traefik.http.routers.cadvisor.tls=true"
|
||||
- 'traefik.http.routers.cadvisor.tls.certresolver=letsencrypt"
|
||||
- 'traefik.http.routers.cadvisor.middlewares=authelia@docker"
|
||||
- 'traefik.http.services.cadvisor.loadbalancer.server.port=8080"
|
||||
- 'traefik.enable=true'
|
||||
- 'traefik.docker.network=traefik-network'
|
||||
- 'traefik.http.routers.cadvisor.rule=Host(`cadvisor.${DOMAIN}`)'
|
||||
- 'traefik.http.routers.cadvisor.entrypoints=websecure'
|
||||
- 'traefik.http.routers.cadvisor.tls=true'
|
||||
- 'traefik.http.routers.cadvisor.tls.certresolver=letsencrypt'
|
||||
- 'traefik.http.routers.cadvisor.middlewares=authelia@docker'
|
||||
- 'traefik.http.services.cadvisor.loadbalancer.server.port=8080'
|
||||
|
||||
# Uptime Kuma - Uptime monitoring
|
||||
uptime-kuma:
|
||||
@@ -180,7 +180,7 @@ services:
|
||||
- homelab-network
|
||||
- traefik-network
|
||||
ports:
|
||||
- "3001:3001"
|
||||
- '3001:3001'
|
||||
volumes:
|
||||
- uptime-kuma-data:/app/data
|
||||
- /var/run/docker.sock:/var/run/docker.sock:ro
|
||||
@@ -188,19 +188,19 @@ services:
|
||||
# TRAEFIK CONFIGURATION
|
||||
# ==========================================
|
||||
# Service metadata
|
||||
- 'homelab.category=monitoring"
|
||||
- 'homelab.description=Service uptime monitoring and alerts"
|
||||
- 'homelab.category=monitoring'
|
||||
- 'homelab.description=Service uptime monitoring and alerts'
|
||||
# Traefik reverse proxy (comment/uncomment to disable/enable)
|
||||
# If Traefik is on a remote server: these labels are NOT USED;
|
||||
# configure external yml files in /traefik/dynamic folder instead.
|
||||
- 'traefik.enable=true"
|
||||
- 'traefik.docker.network=traefik-network"
|
||||
- 'traefik.http.routers.uptime-kuma.rule=Host(`uptime-kuma.${DOMAIN}`)"
|
||||
- 'traefik.http.routers.uptime-kuma.entrypoints=websecure"
|
||||
- 'traefik.http.routers.uptime-kuma.tls=true"
|
||||
- 'traefik.http.routers.uptime-kuma.tls.certresolver=letsencrypt"
|
||||
- 'traefik.http.routers.uptime-kuma.middlewares=authelia@docker"
|
||||
- 'traefik.http.services.uptime-kuma.loadbalancer.server.port=3001"
|
||||
- 'traefik.enable=true'
|
||||
- 'traefik.docker.network=traefik-network'
|
||||
- 'traefik.http.routers.uptime-kuma.rule=Host(`uptime-kuma.${DOMAIN}`)'
|
||||
- 'traefik.http.routers.uptime-kuma.entrypoints=websecure'
|
||||
- 'traefik.http.routers.uptime-kuma.tls=true'
|
||||
- 'traefik.http.routers.uptime-kuma.tls.certresolver=letsencrypt'
|
||||
- 'traefik.http.routers.uptime-kuma.middlewares=authelia@docker'
|
||||
- 'traefik.http.services.uptime-kuma.loadbalancer.server.port=3001'
|
||||
|
||||
# Loki - Log aggregation
|
||||
# Access at: http://192.168.4.4:3100
|
||||
@@ -221,7 +221,7 @@ services:
|
||||
- homelab-network
|
||||
- traefik-network
|
||||
ports:
|
||||
- "3100:3100"
|
||||
- '3100:3100'
|
||||
volumes:
|
||||
- ./config/loki:/etc/loki
|
||||
- loki-data:/loki
|
||||
@@ -230,19 +230,19 @@ services:
|
||||
# TRAEFIK CONFIGURATION
|
||||
# ==========================================
|
||||
# Service metadata
|
||||
- 'homelab.category=monitoring"
|
||||
- 'homelab.description=Log aggregation system"
|
||||
- 'homelab.category=monitoring'
|
||||
- 'homelab.description=Log aggregation system'
|
||||
# Traefik reverse proxy (comment/uncomment to disable/enable)
|
||||
# If Traefik is on a remote server: these labels are NOT USED;
|
||||
# configure external yml files in /traefik/dynamic folder instead.
|
||||
- 'traefik.enable=true"
|
||||
- 'traefik.docker.network=traefik-network"
|
||||
- 'traefik.http.routers.loki.rule=Host(`loki.${DOMAIN}`)"
|
||||
- 'traefik.http.routers.loki.entrypoints=websecure"
|
||||
- 'traefik.http.routers.loki.tls=true"
|
||||
- 'traefik.http.routers.loki.tls.certresolver=letsencrypt"
|
||||
- 'traefik.http.routers.loki.middlewares=authelia@docker"
|
||||
- 'traefik.http.services.loki.loadbalancer.server.port=3100"
|
||||
- 'traefik.enable=true'
|
||||
- 'traefik.docker.network=traefik-network'
|
||||
- 'traefik.http.routers.loki.rule=Host(`loki.${DOMAIN}`)'
|
||||
- 'traefik.http.routers.loki.entrypoints=websecure'
|
||||
- 'traefik.http.routers.loki.tls=true'
|
||||
- 'traefik.http.routers.loki.tls.certresolver=letsencrypt'
|
||||
- 'traefik.http.routers.loki.middlewares=authelia@docker'
|
||||
- 'traefik.http.services.loki.loadbalancer.server.port=3100'
|
||||
|
||||
# Promtail - Log shipper for Loki
|
||||
# Ships Docker container logs to Loki
|
||||
@@ -260,8 +260,8 @@ services:
|
||||
depends_on:
|
||||
- loki
|
||||
labels:
|
||||
- 'homelab.category=monitoring"
|
||||
- 'homelab.description=Log collector for Loki"
|
||||
- 'homelab.category=monitoring'
|
||||
- 'homelab.description=Log collector for Loki'
|
||||
|
||||
volumes:
|
||||
prometheus-data:
|
||||
|
||||
46
docker-compose/monitoring/loki/loki-config.yml
Normal file
46
docker-compose/monitoring/loki/loki-config.yml
Normal file
@@ -0,0 +1,46 @@
|
||||
# Loki Configuration Template
|
||||
# Copy this file to ./config/loki/loki-config.yml
|
||||
|
||||
auth_enabled: false
|
||||
|
||||
server:
|
||||
http_listen_port: 3100
|
||||
grpc_listen_port: 9096
|
||||
|
||||
common:
|
||||
path_prefix: /loki
|
||||
storage:
|
||||
filesystem:
|
||||
chunks_directory: /loki/chunks
|
||||
rules_directory: /loki/rules
|
||||
replication_factor: 1
|
||||
ring:
|
||||
instance_addr: 127.0.0.1
|
||||
kvstore:
|
||||
store: inmemory
|
||||
|
||||
schema_config:
|
||||
configs:
|
||||
- from: 2020-10-24
|
||||
store: boltdb-shipper
|
||||
object_store: filesystem
|
||||
schema: v11
|
||||
index:
|
||||
prefix: index_
|
||||
period: 24h
|
||||
|
||||
ruler:
|
||||
alertmanager_url: http://localhost:9093
|
||||
|
||||
# Retention configuration (delete logs older than 30 days)
|
||||
limits_config:
|
||||
retention_period: 720h # 30 days
|
||||
|
||||
# Compactor to delete old data
|
||||
compactor:
|
||||
working_directory: /loki/compactor
|
||||
shared_store: filesystem
|
||||
compaction_interval: 10m
|
||||
retention_enabled: true
|
||||
retention_delete_delay: 2h
|
||||
retention_delete_worker_count: 150
|
||||
49
docker-compose/monitoring/prometheus/prometheus.yml
Normal file
49
docker-compose/monitoring/prometheus/prometheus.yml
Normal file
@@ -0,0 +1,49 @@
|
||||
# Prometheus Configuration Template
|
||||
# Copy this file to ./config/prometheus/prometheus.yml
|
||||
|
||||
global:
|
||||
scrape_interval: 15s
|
||||
evaluation_interval: 15s
|
||||
external_labels:
|
||||
monitor: 'homelab'
|
||||
|
||||
# Alertmanager configuration (optional)
|
||||
# alerting:
|
||||
# alertmanagers:
|
||||
# - static_configs:
|
||||
# - targets:
|
||||
# - alertmanager:9093
|
||||
|
||||
# Load rules once and periodically evaluate them
|
||||
# rule_files:
|
||||
# - "alerts/*.yml"
|
||||
|
||||
# Scrape configurations
|
||||
scrape_configs:
|
||||
# Prometheus itself
|
||||
- job_name: 'prometheus'
|
||||
static_configs:
|
||||
- targets: ['localhost:9090']
|
||||
|
||||
# Node Exporter - System metrics
|
||||
- job_name: 'node-exporter'
|
||||
static_configs:
|
||||
- targets: ['node-exporter:9100']
|
||||
labels:
|
||||
instance: 'homelab-server'
|
||||
|
||||
# cAdvisor - Container metrics
|
||||
- job_name: 'cadvisor'
|
||||
static_configs:
|
||||
- targets: ['cadvisor:8080']
|
||||
labels:
|
||||
instance: 'homelab-server'
|
||||
|
||||
# Add your own services here
|
||||
# Example: Monitor a service with /metrics endpoint
|
||||
# - job_name: 'my-service'
|
||||
# static_configs:
|
||||
# - targets: ['my-service:8080']
|
||||
# labels:
|
||||
# instance: 'homelab-server'
|
||||
# service: 'my-service'
|
||||
53
docker-compose/monitoring/promtail/promtail-config.yml
Normal file
53
docker-compose/monitoring/promtail/promtail-config.yml
Normal file
@@ -0,0 +1,53 @@
|
||||
# Promtail Configuration Template
|
||||
# Copy this file to ./config/promtail/promtail-config.yml
|
||||
|
||||
server:
|
||||
http_listen_port: 9080
|
||||
grpc_listen_port: 0
|
||||
|
||||
positions:
|
||||
filename: /tmp/positions.yaml
|
||||
|
||||
clients:
|
||||
- url: http://loki:3100/loki/api/v1/push
|
||||
|
||||
scrape_configs:
|
||||
# Docker container logs
|
||||
- job_name: docker
|
||||
static_configs:
|
||||
- targets:
|
||||
- localhost
|
||||
labels:
|
||||
job: docker
|
||||
__path__: /var/lib/docker/containers/*/*-json.log
|
||||
|
||||
pipeline_stages:
|
||||
# Parse Docker JSON logs
|
||||
- json:
|
||||
expressions:
|
||||
output: log
|
||||
stream: stream
|
||||
attrs: attrs
|
||||
|
||||
# Extract container name from path
|
||||
- regex:
|
||||
expression: '/var/lib/docker/containers/(?P<container_id>[^/]+)/.*'
|
||||
source: filename
|
||||
|
||||
# Add labels
|
||||
- labels:
|
||||
stream:
|
||||
container_id:
|
||||
|
||||
# Output the log line
|
||||
- output:
|
||||
source: output
|
||||
|
||||
# System logs (optional)
|
||||
# - job_name: system
|
||||
# static_configs:
|
||||
# - targets:
|
||||
# - localhost
|
||||
# labels:
|
||||
# job: varlogs
|
||||
# __path__: /var/log/*.log
|
||||
42
docker-compose/monitoring/redis/redis.conf
Normal file
42
docker-compose/monitoring/redis/redis.conf
Normal file
@@ -0,0 +1,42 @@
|
||||
# Redis Configuration Template
|
||||
# Copy this file to ./config/redis/redis.conf
|
||||
|
||||
# Network
|
||||
bind 0.0.0.0
|
||||
protected-mode yes
|
||||
port 6379
|
||||
|
||||
# General
|
||||
daemonize no
|
||||
supervised no
|
||||
pidfile /var/run/redis_6379.pid
|
||||
loglevel notice
|
||||
logfile ""
|
||||
|
||||
# Persistence - AOF (Append Only File)
|
||||
appendonly yes
|
||||
appendfilename "appendonly.aof"
|
||||
appendfsync everysec
|
||||
no-appendfsync-on-rewrite no
|
||||
auto-aof-rewrite-percentage 100
|
||||
auto-aof-rewrite-min-size 64mb
|
||||
|
||||
# Persistence - RDB (Snapshotting)
|
||||
save 900 1
|
||||
save 300 10
|
||||
save 60 10000
|
||||
stop-writes-on-bgsave-error yes
|
||||
rdbcompression yes
|
||||
rdbchecksum yes
|
||||
dbfilename dump.rdb
|
||||
dir /data
|
||||
|
||||
# Memory Management
|
||||
maxmemory 256mb
|
||||
maxmemory-policy allkeys-lru
|
||||
|
||||
# Security
|
||||
# requirepass yourpassword # Uncomment and set a strong password
|
||||
|
||||
# Limits
|
||||
maxclients 10000
|
||||
@@ -25,7 +25,7 @@ services:
|
||||
- homelab-network
|
||||
- traefik-network
|
||||
ports:
|
||||
- "8089:80"
|
||||
- '8089:80'
|
||||
volumes:
|
||||
- ./nextcloud/html:/var/www/html
|
||||
- /mnt/nextcloud-data:/var/www/html/data # Large data on separate drive
|
||||
@@ -41,7 +41,7 @@ services:
|
||||
- OVERWRITEPROTOCOL=https
|
||||
- OVERWRITEHOST=nextcloud.${DOMAIN}
|
||||
healthcheck:
|
||||
test: ["CMD", "curl", "-f", "http://localhost/status.php"]
|
||||
test: ['CMD', 'curl', '-f', 'http://localhost/status.php']
|
||||
interval: 30s
|
||||
timeout: 10s
|
||||
retries: 3
|
||||
@@ -51,22 +51,22 @@ services:
|
||||
labels:
|
||||
# TRAEFIK CONFIGURATION
|
||||
# Service metadata
|
||||
- "com.centurylinklabs.watchtower.enable=true"
|
||||
- 'homelab.category=productivity"
|
||||
- 'homelab.description=File sync and collaboration"
|
||||
- 'traefik.enable=true"
|
||||
- 'traefik.docker.network=traefik-network"
|
||||
- 'com.centurylinklabs.watchtower.enable=true'
|
||||
- 'homelab.category=productivity'
|
||||
- 'homelab.description=File sync and collaboration'
|
||||
- 'traefik.enable=true'
|
||||
- 'traefik.docker.network=traefik-network'
|
||||
# Router configuration
|
||||
- 'traefik.http.routers.nextcloud.rule=Host(`nextcloud.${DOMAIN}`)"
|
||||
- 'traefik.http.routers.nextcloud.entrypoints=websecure"
|
||||
- 'traefik.http.routers.nextcloud.tls.certresolver=letsencrypt"
|
||||
- 'traefik.http.routers.nextcloud.middlewares=authelia@docker"
|
||||
- 'traefik.http.routers.nextcloud.rule=Host(`nextcloud.${DOMAIN}`)'
|
||||
- 'traefik.http.routers.nextcloud.entrypoints=websecure'
|
||||
- 'traefik.http.routers.nextcloud.tls.certresolver=letsencrypt'
|
||||
- 'traefik.http.routers.nextcloud.middlewares=authelia@docker'
|
||||
# Service configuration
|
||||
- 'traefik.http.services.nextcloud.loadbalancer.server.port=8089"
|
||||
- 'traefik.http.services.nextcloud.loadbalancer.server.port=8089'
|
||||
# Sablier configuration
|
||||
- "sablier.enable=true"
|
||||
- "sablier.group=jasper-nextcloud"
|
||||
- "sablier.start-on-demand=true"
|
||||
- 'sablier.enable=true'
|
||||
- 'sablier.group=jasper-nextcloud'
|
||||
- 'sablier.start-on-demand=true'
|
||||
|
||||
nextcloud-db:
|
||||
image: mariadb:10.11
|
||||
@@ -83,8 +83,8 @@ services:
|
||||
- MYSQL_PASSWORD=${NEXTCLOUD_DB_PASSWORD}
|
||||
command: --transaction-isolation=READ-COMMITTED --log-bin=binlog --binlog-format=ROW
|
||||
labels:
|
||||
- 'homelab.category=productivity"
|
||||
- 'homelab.description=Nextcloud database"
|
||||
- 'homelab.category=productivity'
|
||||
- 'homelab.description=Nextcloud database'
|
||||
|
||||
# Mealie - Recipe manager
|
||||
mealie:
|
||||
@@ -95,7 +95,7 @@ services:
|
||||
- homelab-network
|
||||
- traefik-network
|
||||
ports:
|
||||
- "9000:9000"
|
||||
- '9000:9000'
|
||||
volumes:
|
||||
- ./mealie/data:/app/data
|
||||
environment:
|
||||
@@ -107,22 +107,22 @@ services:
|
||||
labels:
|
||||
# TRAEFIK CONFIGURATION
|
||||
# Service metadata
|
||||
- "com.centurylinklabs.watchtower.enable=true"
|
||||
- 'homelab.category=productivity"
|
||||
- 'homelab.description=Recipe manager and meal planner"
|
||||
- 'traefik.enable=true"
|
||||
- 'traefik.docker.network=traefik-network"
|
||||
- 'com.centurylinklabs.watchtower.enable=true'
|
||||
- 'homelab.category=productivity'
|
||||
- 'homelab.description=Recipe manager and meal planner'
|
||||
- 'traefik.enable=true'
|
||||
- 'traefik.docker.network=traefik-network'
|
||||
# Router configuration
|
||||
- 'traefik.http.routers.mealie.rule=Host(`mealie.${DOMAIN}`)"
|
||||
- 'traefik.http.routers.mealie.entrypoints=websecure"
|
||||
- 'traefik.http.routers.mealie.tls.certresolver=letsencrypt"
|
||||
- 'traefik.http.routers.mealie.middlewares=authelia@docker"
|
||||
- 'traefik.http.routers.mealie.rule=Host(`mealie.${DOMAIN}`)'
|
||||
- 'traefik.http.routers.mealie.entrypoints=websecure'
|
||||
- 'traefik.http.routers.mealie.tls.certresolver=letsencrypt'
|
||||
- 'traefik.http.routers.mealie.middlewares=authelia@docker'
|
||||
# Service configuration
|
||||
- 'traefik.http.services.mealie.loadbalancer.server.port=9000"
|
||||
- 'traefik.http.services.mealie.loadbalancer.server.port=9000'
|
||||
# Sablier configuration
|
||||
- "sablier.enable=true"
|
||||
- "sablier.group=jasper-mealie"
|
||||
- "sablier.start-on-demand=true"
|
||||
- 'sablier.enable=true'
|
||||
- 'sablier.group=jasper-mealie'
|
||||
- 'sablier.start-on-demand=true'
|
||||
|
||||
# WordPress - Blog/website platform
|
||||
wordpress:
|
||||
@@ -133,7 +133,7 @@ services:
|
||||
- homelab-network
|
||||
- traefik-network
|
||||
ports:
|
||||
- "8088:80"
|
||||
- '8088:80'
|
||||
volumes:
|
||||
- ./wordpress/html:/var/www/html
|
||||
environment:
|
||||
@@ -142,7 +142,7 @@ services:
|
||||
- WORDPRESS_DB_PASSWORD=${WORDPRESS_DB_PASSWORD}
|
||||
- WORDPRESS_DB_NAME=wordpress
|
||||
healthcheck:
|
||||
test: ["CMD", "curl", "-f", "http://localhost/"]
|
||||
test: ['CMD', 'curl', '-f', 'http://localhost/']
|
||||
interval: 30s
|
||||
timeout: 10s
|
||||
retries: 3
|
||||
@@ -152,22 +152,22 @@ services:
|
||||
labels:
|
||||
# TRAEFIK CONFIGURATION
|
||||
# Service metadata
|
||||
- "com.centurylinklabs.watchtower.enable=true"
|
||||
- 'homelab.category=productivity"
|
||||
- 'homelab.description=Blog and website platform"
|
||||
- 'traefik.enable=true"
|
||||
- 'traefik.docker.network=traefik-network"
|
||||
- 'com.centurylinklabs.watchtower.enable=true'
|
||||
- 'homelab.category=productivity'
|
||||
- 'homelab.description=Blog and website platform'
|
||||
- 'traefik.enable=true'
|
||||
- 'traefik.docker.network=traefik-network'
|
||||
# Router configuration
|
||||
- 'traefik.http.routers.wordpress.rule=Host(`wordpress.${DOMAIN}`)"
|
||||
- 'traefik.http.routers.wordpress.entrypoints=websecure"
|
||||
- 'traefik.http.routers.wordpress.tls.certresolver=letsencrypt"
|
||||
- 'traefik.http.routers.wordpress.middlewares=authelia@docker"
|
||||
- 'traefik.http.routers.wordpress.rule=Host(`wordpress.${DOMAIN}`)'
|
||||
- 'traefik.http.routers.wordpress.entrypoints=websecure'
|
||||
- 'traefik.http.routers.wordpress.tls.certresolver=letsencrypt'
|
||||
- 'traefik.http.routers.wordpress.middlewares=authelia@docker'
|
||||
# Service configuration
|
||||
- 'traefik.http.services.wordpress.loadbalancer.server.port=8088"
|
||||
- 'traefik.http.services.wordpress.loadbalancer.server.port=8088'
|
||||
# Sablier configuration
|
||||
- "sablier.enable=true"
|
||||
- "sablier.group=jasper-wordpress"
|
||||
- "sablier.start-on-demand=true"
|
||||
- 'sablier.enable=true'
|
||||
- 'sablier.group=jasper-wordpress'
|
||||
- 'sablier.start-on-demand=true'
|
||||
|
||||
wordpress-db:
|
||||
image: mariadb:10.11
|
||||
@@ -183,8 +183,8 @@ services:
|
||||
- MYSQL_USER=wordpress
|
||||
- MYSQL_PASSWORD=${WORDPRESS_DB_PASSWORD}
|
||||
labels:
|
||||
- 'homelab.category=productivity"
|
||||
- 'homelab.description=WordPress database"
|
||||
- 'homelab.category=productivity'
|
||||
- 'homelab.description=WordPress database'
|
||||
|
||||
# Gitea - Self-hosted Git service
|
||||
gitea:
|
||||
@@ -204,7 +204,7 @@ services:
|
||||
- homelab-network
|
||||
- traefik-network
|
||||
ports:
|
||||
- "3010:3000"
|
||||
- '3010:3000'
|
||||
volumes:
|
||||
- ./gitea/data:/data
|
||||
- /etc/timezone:/etc/timezone:ro
|
||||
@@ -218,7 +218,7 @@ services:
|
||||
- GITEA__database__USER=gitea
|
||||
- GITEA__database__PASSWD=${GITEA_DB_PASSWORD}
|
||||
healthcheck:
|
||||
test: ["CMD", "curl", "-f", "http://localhost:3000/"]
|
||||
test: ['CMD', 'curl', '-f', 'http://localhost:3000/']
|
||||
interval: 30s
|
||||
timeout: 10s
|
||||
retries: 3
|
||||
@@ -228,22 +228,22 @@ services:
|
||||
labels:
|
||||
# TRAEFIK CONFIGURATION
|
||||
# Service metadata
|
||||
- "com.centurylinklabs.watchtower.enable=true"
|
||||
- 'homelab.category=productivity"
|
||||
- 'homelab.description=Self-hosted Git service"
|
||||
- 'traefik.enable=true"
|
||||
- 'traefik.docker.network=traefik-network"
|
||||
- 'com.centurylinklabs.watchtower.enable=true'
|
||||
- 'homelab.category=productivity'
|
||||
- 'homelab.description=Self-hosted Git service'
|
||||
- 'traefik.enable=true'
|
||||
- 'traefik.docker.network=traefik-network'
|
||||
# Router configuration
|
||||
- 'traefik.http.routers.gitea.rule=Host(`gitea.${DOMAIN}`)"
|
||||
- 'traefik.http.routers.gitea.entrypoints=websecure"
|
||||
- 'traefik.http.routers.gitea.tls.certresolver=letsencrypt"
|
||||
- 'traefik.http.routers.gitea.middlewares=authelia@docker"
|
||||
- 'traefik.http.routers.gitea.rule=Host(`gitea.${DOMAIN}`)'
|
||||
- 'traefik.http.routers.gitea.entrypoints=websecure'
|
||||
- 'traefik.http.routers.gitea.tls.certresolver=letsencrypt'
|
||||
- 'traefik.http.routers.gitea.middlewares=authelia@docker'
|
||||
# Service configuration
|
||||
- 'traefik.http.services.gitea.loadbalancer.server.port=3010"
|
||||
- 'traefik.http.services.gitea.loadbalancer.server.port=3010'
|
||||
# Sablier configuration
|
||||
- "sablier.enable=true"
|
||||
- "sablier.group=jasper-gitea"
|
||||
- "sablier.start-on-demand=true"
|
||||
- 'sablier.enable=true'
|
||||
- 'sablier.group=jasper-gitea'
|
||||
- 'sablier.start-on-demand=true'
|
||||
|
||||
gitea-db:
|
||||
image: postgres:14-alpine
|
||||
@@ -258,8 +258,8 @@ services:
|
||||
- POSTGRES_PASSWORD=${GITEA_DB_PASSWORD}
|
||||
- POSTGRES_DB=gitea
|
||||
labels:
|
||||
- 'homelab.category=productivity"
|
||||
- 'homelab.description=Gitea database"
|
||||
- 'homelab.category=productivity'
|
||||
- 'homelab.description=Gitea database'
|
||||
|
||||
|
||||
# Jupyter Lab - Interactive computing notebooks
|
||||
@@ -272,7 +272,7 @@ services:
|
||||
- homelab-network
|
||||
- traefik-network
|
||||
ports:
|
||||
- "8890:8888"
|
||||
- '8890:8888'
|
||||
volumes:
|
||||
- ./config/jupyter:/home/jovyan/work
|
||||
environment:
|
||||
@@ -292,22 +292,22 @@ services:
|
||||
# TRAEFIK CONFIGURATION
|
||||
# ==========================================
|
||||
# Service metadata
|
||||
- 'homelab.category=productivity"
|
||||
- 'homelab.description=Jupyter Lab for data science and ML"
|
||||
- 'homelab.category=productivity'
|
||||
- 'homelab.description=Jupyter Lab for data science and ML'
|
||||
# Traefik reverse proxy (comment/uncomment to disable/enable)
|
||||
# If Traefik is on a remote server: these labels are NOT USED;
|
||||
# configure external yml files in /traefik/dynamic folder instead.
|
||||
- 'traefik.enable=true"
|
||||
- 'traefik.docker.network=traefik-network"
|
||||
- 'traefik.http.routers.jupyter.rule=Host(`jupyter.${DOMAIN}`)"
|
||||
- 'traefik.http.routers.jupyter.entrypoints=websecure"
|
||||
- 'traefik.http.routers.jupyter.tls.certresolver=letsencrypt"
|
||||
- 'traefik.http.routers.jupyter.middlewares=authelia@docker"
|
||||
- 'traefik.http.services.jupyter.loadbalancer.server.port=8890"
|
||||
- 'traefik.enable=true'
|
||||
- 'traefik.docker.network=traefik-network'
|
||||
- 'traefik.http.routers.jupyter.rule=Host(`jupyter.${DOMAIN}`)'
|
||||
- 'traefik.http.routers.jupyter.entrypoints=websecure'
|
||||
- 'traefik.http.routers.jupyter.tls.certresolver=letsencrypt'
|
||||
- 'traefik.http.routers.jupyter.middlewares=authelia@docker'
|
||||
- 'traefik.http.services.jupyter.loadbalancer.server.port=8890'
|
||||
# Sablier configuration
|
||||
- "sablier.enable=true"
|
||||
- "sablier.group=jasper-jupyter"
|
||||
- "sablier.start-on-demand=true"
|
||||
- 'sablier.enable=true'
|
||||
- 'sablier.group=jasper-jupyter'
|
||||
- 'sablier.start-on-demand=true'
|
||||
|
||||
volumes:
|
||||
nextcloud-db-data:
|
||||
|
||||
@@ -33,22 +33,22 @@ services:
|
||||
# TRAEFIK CONFIGURATION
|
||||
# ==========================================
|
||||
# Service metadata
|
||||
- 'homelab.category=media"
|
||||
- 'homelab.description=Distributed transcoding server"
|
||||
- "com.centurylinklabs.watchtower.enable=true"
|
||||
- 'homelab.category=media'
|
||||
- 'homelab.description=Distributed transcoding server'
|
||||
- 'com.centurylinklabs.watchtower.enable=true'
|
||||
# Traefik reverse proxy (comment/uncomment to disable/enable)
|
||||
# If Traefik is on a remote server: these labels are NOT USED;
|
||||
# configure external yml files in /traefik/dynamic folder instead.
|
||||
- 'traefik.enable=true"
|
||||
- 'traefik.docker.network=traefik-network"
|
||||
- 'traefik.http.routers.tdarr.rule=Host(`tdarr.${DOMAIN}`)"
|
||||
- 'traefik.http.routers.tdarr.entrypoints=websecure"
|
||||
- 'traefik.http.routers.tdarr.tls.certresolver=letsencrypt"
|
||||
- 'traefik.http.routers.tdarr.middlewares=authelia@docker"
|
||||
- 'traefik.http.services.tdarr.loadbalancer.server.port=8265"
|
||||
- "sablier.enable=true"
|
||||
- "sablier.group=jasper-tdarr"
|
||||
- "sablier.start-on-demand=true"
|
||||
- 'traefik.enable=true'
|
||||
- 'traefik.docker.network=traefik-network'
|
||||
- 'traefik.http.routers.tdarr.rule=Host(`tdarr.${DOMAIN}`)'
|
||||
- 'traefik.http.routers.tdarr.entrypoints=websecure'
|
||||
- 'traefik.http.routers.tdarr.tls.certresolver=letsencrypt'
|
||||
- 'traefik.http.routers.tdarr.middlewares=authelia@docker'
|
||||
- 'traefik.http.services.tdarr.loadbalancer.server.port=8265'
|
||||
- 'sablier.enable=true'
|
||||
- 'sablier.group=jasper-tdarr'
|
||||
- 'sablier.start-on-demand=true'
|
||||
|
||||
# Tdarr Node - Transcoding worker
|
||||
# No web UI - controlled by server
|
||||
@@ -75,9 +75,9 @@ services:
|
||||
labels:
|
||||
- homelab.category=media
|
||||
- homelab.description=Tdarr transcoding worker node
|
||||
- "sablier.enable=true"
|
||||
- "sablier.group=jasper-tdarr"
|
||||
- "sablier.start-on-demand=true"
|
||||
- 'sablier.enable=true'
|
||||
- 'sablier.group=jasper-tdarr'
|
||||
- 'sablier.start-on-demand=true'
|
||||
|
||||
# Unmanic - Another transcoding option
|
||||
unmanic:
|
||||
@@ -88,7 +88,7 @@ services:
|
||||
- homelab-network
|
||||
- traefik-network
|
||||
ports:
|
||||
- "8889:8888"
|
||||
- '8889:8888'
|
||||
volumes:
|
||||
- ./unmanic/config:/config
|
||||
- /mnt/media:/library
|
||||
@@ -101,22 +101,22 @@ services:
|
||||
# TRAEFIK CONFIGURATION
|
||||
# ==========================================
|
||||
# Service metadata
|
||||
- 'homelab.category=media"
|
||||
- 'homelab.description=Library optimization and transcoding"
|
||||
- "com.centurylinklabs.watchtower.enable=true"
|
||||
- 'homelab.category=media'
|
||||
- 'homelab.description=Library optimization and transcoding'
|
||||
- 'com.centurylinklabs.watchtower.enable=true'
|
||||
# Traefik reverse proxy (comment/uncomment to disable/enable)
|
||||
# If Traefik is on a remote server: these labels are NOT USED;
|
||||
# configure external yml files in /traefik/dynamic folder instead.
|
||||
- 'traefik.enable=true"
|
||||
- 'traefik.docker.network=traefik-network"
|
||||
- 'traefik.http.routers.unmanic.rule=Host(`unmanic.${DOMAIN}`)"
|
||||
- 'traefik.http.routers.unmanic.entrypoints=websecure"
|
||||
- 'traefik.http.routers.unmanic.tls.certresolver=letsencrypt"
|
||||
- 'traefik.http.routers.unmanic.middlewares=authelia@docker"
|
||||
- 'traefik.http.services.unmanic.loadbalancer.server.port=8889"
|
||||
- "sablier.enable=true"
|
||||
- "sablier.group=jasper-unmanic"
|
||||
- "sablier.start-on-demand=true"
|
||||
- 'traefik.enable=true'
|
||||
- 'traefik.docker.network=traefik-network'
|
||||
- 'traefik.http.routers.unmanic.rule=Host(`unmanic.${DOMAIN}`)'
|
||||
- 'traefik.http.routers.unmanic.entrypoints=websecure'
|
||||
- 'traefik.http.routers.unmanic.tls.certresolver=letsencrypt'
|
||||
- 'traefik.http.routers.unmanic.middlewares=authelia@docker'
|
||||
- 'traefik.http.services.unmanic.loadbalancer.server.port=8889'
|
||||
- 'sablier.enable=true'
|
||||
- 'sablier.group=jasper-unmanic'
|
||||
- 'sablier.start-on-demand=true'
|
||||
|
||||
networks:
|
||||
homelab-network:
|
||||
|
||||
@@ -15,7 +15,7 @@ services:
|
||||
- homelab-network
|
||||
- traefik-network
|
||||
ports:
|
||||
- "9898:9898"
|
||||
- '9898:9898'
|
||||
volumes:
|
||||
- ./backrest/data:/data
|
||||
- ./backrest/config:/config
|
||||
@@ -27,7 +27,7 @@ services:
|
||||
- BACKREST_CONFIG=/config/config.json
|
||||
- TZ=America/New_York
|
||||
healthcheck:
|
||||
test: ["CMD", "wget", "--quiet", "--tries=1", "--spider", "http://localhost:9898/"]
|
||||
test: ['CMD', 'wget', '--quiet', '--tries=1', '--spider', 'http://localhost:9898/']
|
||||
interval: 30s
|
||||
timeout: 10s
|
||||
retries: 3
|
||||
@@ -35,22 +35,22 @@ services:
|
||||
labels:
|
||||
# TRAEFIK CONFIGURATION
|
||||
# Service metadata
|
||||
- "com.centurylinklabs.watchtower.enable=true"
|
||||
- 'homelab.category=utilities"
|
||||
- 'homelab.description=Backup management with restic"
|
||||
- 'traefik.enable=true"
|
||||
- 'traefik.docker.network=traefik-network"
|
||||
- 'com.centurylinklabs.watchtower.enable=true'
|
||||
- 'homelab.category=utilities'
|
||||
- 'homelab.description=Backup management with restic'
|
||||
- 'traefik.enable=true'
|
||||
- 'traefik.docker.network=traefik-network'
|
||||
# Router configuration
|
||||
- 'traefik.http.routers.backrest.rule=Host(`backrest.${DOMAIN}`)"
|
||||
- 'traefik.http.routers.backrest.entrypoints=websecure"
|
||||
- 'traefik.http.routers.backrest.tls.certresolver=letsencrypt"
|
||||
- 'traefik.http.routers.backrest.middlewares=authelia@docker"
|
||||
- 'traefik.http.routers.backrest.rule=Host(`backrest.${DOMAIN}`)'
|
||||
- 'traefik.http.routers.backrest.entrypoints=websecure'
|
||||
- 'traefik.http.routers.backrest.tls.certresolver=letsencrypt'
|
||||
- 'traefik.http.routers.backrest.middlewares=authelia@docker'
|
||||
# Service configuration
|
||||
- 'traefik.http.services.backrest.loadbalancer.server.port=9898"
|
||||
- 'traefik.http.services.backrest.loadbalancer.server.port=9898'
|
||||
# Sablier configuration
|
||||
- "sablier.enable=true"
|
||||
- "sablier.group=jasper-backrest"
|
||||
- "sablier.start-on-demand=true"
|
||||
- 'sablier.enable=true'
|
||||
- 'sablier.group=jasper-backrest'
|
||||
- 'sablier.start-on-demand=true'
|
||||
|
||||
# Duplicati - Backup solution
|
||||
duplicati:
|
||||
@@ -61,7 +61,7 @@ services:
|
||||
- homelab-network
|
||||
- traefik-network
|
||||
ports:
|
||||
- "8200:8200"
|
||||
- '8200:8200'
|
||||
volumes:
|
||||
- ./duplicati/config:/config
|
||||
- /opt/stacks:/source/stacks:ro
|
||||
@@ -72,7 +72,7 @@ services:
|
||||
- PGID=1000
|
||||
- TZ=America/New_York
|
||||
healthcheck:
|
||||
test: ["CMD", "curl", "-f", "http://localhost:8200/"]
|
||||
test: ['CMD', 'curl', '-f', 'http://localhost:8200/']
|
||||
interval: 30s
|
||||
timeout: 10s
|
||||
retries: 3
|
||||
@@ -80,22 +80,22 @@ services:
|
||||
labels:
|
||||
# TRAEFIK CONFIGURATION
|
||||
# Service metadata
|
||||
- "com.centurylinklabs.watchtower.enable=true"
|
||||
- 'homelab.category=utilities"
|
||||
- 'homelab.description=Backup software with encryption"
|
||||
- 'traefik.enable=true"
|
||||
- 'traefik.docker.network=traefik-network"
|
||||
- 'com.centurylinklabs.watchtower.enable=true'
|
||||
- 'homelab.category=utilities'
|
||||
- 'homelab.description=Backup software with encryption'
|
||||
- 'traefik.enable=true'
|
||||
- 'traefik.docker.network=traefik-network'
|
||||
# Router configuration
|
||||
- 'traefik.http.routers.duplicati.rule=Host(`duplicati.${DOMAIN}`)"
|
||||
- 'traefik.http.routers.duplicati.entrypoints=websecure"
|
||||
- 'traefik.http.routers.duplicati.tls.certresolver=letsencrypt"
|
||||
- 'traefik.http.routers.duplicati.middlewares=authelia@docker"
|
||||
- 'traefik.http.routers.duplicati.rule=Host(`duplicati.${DOMAIN}`)'
|
||||
- 'traefik.http.routers.duplicati.entrypoints=websecure'
|
||||
- 'traefik.http.routers.duplicati.tls.certresolver=letsencrypt'
|
||||
- 'traefik.http.routers.duplicati.middlewares=authelia@docker'
|
||||
# Service configuration
|
||||
- 'traefik.http.services.duplicati.loadbalancer.server.port=8200"
|
||||
- 'traefik.http.services.duplicati.loadbalancer.server.port=8200'
|
||||
# Sablier configuration
|
||||
- "sablier.enable=true"
|
||||
- "sablier.group=jasper-duplicati"
|
||||
- "sablier.start-on-demand=true"
|
||||
- 'sablier.enable=true'
|
||||
- 'sablier.group=jasper-duplicati'
|
||||
- 'sablier.start-on-demand=true'
|
||||
|
||||
# Form.io - Form builder
|
||||
# Uncomment and configure if formio/formio image becomes available
|
||||
@@ -107,13 +107,13 @@ services:
|
||||
- homelab-network
|
||||
- traefik-network
|
||||
ports:
|
||||
- "3002:3001"
|
||||
- '3002:3001'
|
||||
environment:
|
||||
- MONGO=mongodb://formio-mongo:27017/formio
|
||||
- JWT_SECRET=${FORMIO_JWT_SECRET}
|
||||
- DB_SECRET=${FORMIO_DB_SECRET}
|
||||
healthcheck:
|
||||
test: ["CMD", "wget", "--quiet", "--tries=1", "--spider", "http://localhost:3001/"]
|
||||
test: ['CMD', 'wget', '--quiet', '--tries=1', '--spider', 'http://localhost:3001/']
|
||||
interval: 30s
|
||||
timeout: 10s
|
||||
retries: 3
|
||||
@@ -124,22 +124,22 @@ services:
|
||||
# TRAEFIK CONFIGURATION
|
||||
# ==========================================
|
||||
# Service metadata
|
||||
- "com.centurylinklabs.watchtower.enable=true"
|
||||
- 'homelab.category=utilities"
|
||||
- 'homelab.description=Form builder platform"
|
||||
- 'com.centurylinklabs.watchtower.enable=true'
|
||||
- 'homelab.category=utilities'
|
||||
- 'homelab.description=Form builder platform'
|
||||
# Traefik labels
|
||||
- 'traefik.enable=true"
|
||||
- 'traefik.enable=true'
|
||||
# Router configuration
|
||||
- 'traefik.http.routers.formio.rule=Host(`forms.${DOMAIN}`)"
|
||||
- 'traefik.http.routers.formio.entrypoints=websecure"
|
||||
- 'traefik.http.routers.formio.tls.certresolver=letsencrypt"
|
||||
- 'traefik.http.routers.formio.middlewares=authelia@docker"
|
||||
- 'traefik.http.routers.formio.rule=Host(`forms.${DOMAIN}`)'
|
||||
- 'traefik.http.routers.formio.entrypoints=websecure'
|
||||
- 'traefik.http.routers.formio.tls.certresolver=letsencrypt'
|
||||
- 'traefik.http.routers.formio.middlewares=authelia@docker'
|
||||
# Service configuration
|
||||
- 'traefik.http.services.formio.loadbalancer.server.port=3001"
|
||||
- 'traefik.http.services.formio.loadbalancer.server.port=3001'
|
||||
# Sablier configuration
|
||||
- "sablier.enable=true"
|
||||
- "sablier.group=jasper-formio"
|
||||
- "sablier.start-on-demand=true"
|
||||
- 'sablier.enable=true'
|
||||
- 'sablier.group=jasper-formio'
|
||||
- 'sablier.start-on-demand=true'
|
||||
|
||||
formio-mongo:
|
||||
image: mongo:4.4
|
||||
@@ -148,8 +148,8 @@ services:
|
||||
networks:
|
||||
- homelab-network
|
||||
labels:
|
||||
- 'homelab.category=utilities"
|
||||
- 'homelab.description=Form.io database"
|
||||
- 'homelab.category=utilities'
|
||||
- 'homelab.description=Form.io database'
|
||||
|
||||
# Bitwarden (Vaultwarden) - Password manager
|
||||
# Note: SSO disabled for browser extension and mobile app compatibility
|
||||
@@ -162,7 +162,7 @@ services:
|
||||
- homelab-network
|
||||
- traefik-network
|
||||
ports:
|
||||
- "8091:80"
|
||||
- '8091:80'
|
||||
volumes:
|
||||
- ./vaultwarden/data:/data
|
||||
environment:
|
||||
@@ -178,7 +178,7 @@ services:
|
||||
# - SMTP_USERNAME=${SMTP_USERNAME}
|
||||
# - SMTP_PASSWORD=${SMTP_PASSWORD}
|
||||
healthcheck:
|
||||
test: ["CMD", "curl", "-f", "http://localhost:80/"]
|
||||
test: ['CMD', 'curl', '-f', 'http://localhost:80/']
|
||||
interval: 30s
|
||||
timeout: 10s
|
||||
retries: 3
|
||||
@@ -187,23 +187,23 @@ services:
|
||||
# TRAEFIK CONFIGURATION
|
||||
# ==========================================
|
||||
# Service metadata
|
||||
- 'homelab.category=utilities"
|
||||
- 'homelab.description=Self-hosted password manager (Bitwarden)"
|
||||
- 'homelab.category=utilities'
|
||||
- 'homelab.description=Self-hosted password manager (Bitwarden)'
|
||||
# Traefik reverse proxy (comment/uncomment to disable/enable)
|
||||
# If Traefik is on a remote server: these labels are NOT USED;
|
||||
# configure external yml files in /traefik/dynamic folder instead.
|
||||
- 'traefik.enable=true"
|
||||
- 'traefik.http.routers.vaultwarden.rule=Host(`vault.${DOMAIN}`)"
|
||||
- 'traefik.http.routers.vaultwarden.entrypoints=websecure"
|
||||
- 'traefik.http.routers.vaultwarden.tls=true"
|
||||
- 'traefik.http.routers.vaultwarden.tls.certresolver=letsencrypt"
|
||||
- 'traefik.enable=true'
|
||||
- 'traefik.http.routers.vaultwarden.rule=Host(`vault.${DOMAIN}`)'
|
||||
- 'traefik.http.routers.vaultwarden.entrypoints=websecure'
|
||||
- 'traefik.http.routers.vaultwarden.tls=true'
|
||||
- 'traefik.http.routers.vaultwarden.tls.certresolver=letsencrypt'
|
||||
# SSO disabled for browser extension and mobile app compatibility
|
||||
# - 'traefik.http.routers.vaultwarden.middlewares=authelia@docker"
|
||||
- 'traefik.http.services.vaultwarden.loadbalancer.server.port=80"
|
||||
# - 'traefik.http.routers.vaultwarden.middlewares=authelia@docker'
|
||||
- 'traefik.http.services.vaultwarden.loadbalancer.server.port=80'
|
||||
# Sablier configuration
|
||||
- "sablier.enable=true"
|
||||
- "sablier.group=jasper-vaultwarden"
|
||||
- "sablier.start-on-demand=true"
|
||||
- 'sablier.enable=true'
|
||||
- 'sablier.group=jasper-vaultwarden'
|
||||
- 'sablier.start-on-demand=true'
|
||||
|
||||
# Authelia Redis - Session storage for Authelia
|
||||
# No web UI - backend service
|
||||
|
||||
@@ -19,12 +19,12 @@ services:
|
||||
- homelab-network
|
||||
- traefik-network
|
||||
ports:
|
||||
- "8888:8888/tcp" # HTTP proxy
|
||||
- "8388:8388/tcp" # Shadowsocks
|
||||
- "8388:8388/udp" # Shadowsocks
|
||||
- "8081:8080" # qBittorrent web UI
|
||||
- "6881:6881" # qBittorrent
|
||||
- "6881:6881/udp" # qBittorrent
|
||||
- '8888:8888/tcp' # HTTP proxy
|
||||
- '8388:8388/tcp' # Shadowsocks
|
||||
- '8388:8388/udp' # Shadowsocks
|
||||
- '8081:8080' # qBittorrent web UI
|
||||
- '6881:6881' # qBittorrent
|
||||
- '6881:6881/udp' # qBittorrent
|
||||
volumes:
|
||||
- ./gluetun:/gluetun
|
||||
environment:
|
||||
@@ -37,22 +37,22 @@ services:
|
||||
# TRAEFIK CONFIGURATION
|
||||
labels:
|
||||
# Service metadata
|
||||
- "com.centurylinklabs.watchtower.enable=true"
|
||||
- 'homelab.category=downloaders"
|
||||
- 'homelab.description=VPN client for secure downloads"
|
||||
- 'traefik.enable=true"
|
||||
- 'traefik.docker.network=traefik-network"
|
||||
- 'com.centurylinklabs.watchtower.enable=true'
|
||||
- 'homelab.category=downloaders'
|
||||
- 'homelab.description=VPN client for secure downloads'
|
||||
- 'traefik.enable=true'
|
||||
- 'traefik.docker.network=traefik-network'
|
||||
# Router configuration
|
||||
- 'traefik.http.routers.qbittorrent.rule=Host(`qbit.${DOMAIN}`)"
|
||||
- 'traefik.http.routers.qbittorrent.entrypoints=websecure"
|
||||
- 'traefik.http.routers.qbittorrent.tls=true"
|
||||
- 'traefik.http.routers.qbittorrent.middlewares=authelia@docker"
|
||||
- 'traefik.http.routers.qbittorrent.rule=Host(`qbit.${DOMAIN}`)'
|
||||
- 'traefik.http.routers.qbittorrent.entrypoints=websecure'
|
||||
- 'traefik.http.routers.qbittorrent.tls=true'
|
||||
- 'traefik.http.routers.qbittorrent.middlewares=authelia@docker'
|
||||
# Service configuration
|
||||
- 'traefik.http.services.qbittorrent.loadbalancer.server.port=8081"
|
||||
- 'traefik.http.services.qbittorrent.loadbalancer.server.port=8081'
|
||||
# Sablier configuration
|
||||
- "sablier.enable=true"
|
||||
- "sablier.group=jasper-qbittorrent"
|
||||
- "sablier.sessionDuration=1h"
|
||||
- 'sablier.enable=true'
|
||||
- 'sablier.group=jasper-qbittorrent'
|
||||
- 'sablier.sessionDuration=1h'
|
||||
|
||||
# qBittorrent - Torrent client
|
||||
qbittorrent:
|
||||
@@ -68,7 +68,7 @@ services:
|
||||
memory: 256M
|
||||
container_name: qbittorrent
|
||||
restart: unless-stopped
|
||||
network_mode: "service:gluetun" # Routes through VPN in same compose file
|
||||
network_mode: 'service:gluetun' # Routes through VPN in same compose file
|
||||
volumes:
|
||||
- ./qbittorrent/config:/config
|
||||
- /mnt/downloads:/downloads
|
||||
|
||||
@@ -15,7 +15,7 @@ services:
|
||||
- homelab-network
|
||||
- traefik-network
|
||||
ports:
|
||||
- "8087:80"
|
||||
- '8087:80'
|
||||
volumes:
|
||||
- ./dokuwiki/config:/config
|
||||
environment:
|
||||
@@ -25,22 +25,22 @@ services:
|
||||
labels:
|
||||
# TRAEFIK CONFIGURATION
|
||||
# Service metadata
|
||||
- "com.centurylinklabs.watchtower.enable=true"
|
||||
- 'homelab.category=productivity"
|
||||
- 'homelab.description=File-based wiki"
|
||||
- 'traefik.enable=true"
|
||||
- 'traefik.docker.network=traefik-network"
|
||||
- 'com.centurylinklabs.watchtower.enable=true'
|
||||
- 'homelab.category=productivity'
|
||||
- 'homelab.description=File-based wiki'
|
||||
- 'traefik.enable=true'
|
||||
- 'traefik.docker.network=traefik-network'
|
||||
# Router configuration
|
||||
- 'traefik.http.routers.dokuwiki.rule=Host(`dokuwiki.${DOMAIN}`)"
|
||||
- 'traefik.http.routers.dokuwiki.entrypoints=websecure"
|
||||
- 'traefik.http.routers.dokuwiki.tls.certresolver=letsencrypt"
|
||||
- 'traefik.http.routers.dokuwiki.middlewares=authelia@docker"
|
||||
- 'traefik.http.routers.dokuwiki.rule=Host(`dokuwiki.${DOMAIN}`)'
|
||||
- 'traefik.http.routers.dokuwiki.entrypoints=websecure'
|
||||
- 'traefik.http.routers.dokuwiki.tls.certresolver=letsencrypt'
|
||||
- 'traefik.http.routers.dokuwiki.middlewares=authelia@docker'
|
||||
# Service configuration
|
||||
- 'traefik.http.services.dokuwiki.loadbalancer.server.port=8087"
|
||||
- 'traefik.http.services.dokuwiki.loadbalancer.server.port=8087'
|
||||
# Sablier configuration
|
||||
- "sablier.enable=true"
|
||||
- "sablier.group=jasper-dokuwiki"
|
||||
- "sablier.start-on-demand=true"
|
||||
- 'sablier.enable=true'
|
||||
- 'sablier.group=jasper-dokuwiki'
|
||||
- 'sablier.start-on-demand=true'
|
||||
|
||||
# BookStack - Documentation platform
|
||||
# Uses Sablier lazy loading - starts on-demand, stops after 5min inactivity
|
||||
@@ -52,7 +52,7 @@ services:
|
||||
- homelab-network
|
||||
- traefik-network
|
||||
ports:
|
||||
- "6875:80"
|
||||
- '6875:80'
|
||||
volumes:
|
||||
- ./bookstack/config:/config
|
||||
environment:
|
||||
@@ -66,7 +66,7 @@ services:
|
||||
- DB_PASSWORD=${BOOKSTACK_DB_PASSWORD}
|
||||
- APP_KEY=base64:NsYD8+8MAvtBhK8xw9p8pxQDy4x8aOQi/78M3CsseAw=
|
||||
healthcheck:
|
||||
test: ["CMD", "curl", "-f", "http://localhost/"]
|
||||
test: ['CMD', 'curl', '-f', 'http://localhost/']
|
||||
interval: 30s
|
||||
timeout: 10s
|
||||
retries: 3
|
||||
@@ -76,22 +76,22 @@ services:
|
||||
labels:
|
||||
# TRAEFIK CONFIGURATION
|
||||
# Service metadata
|
||||
- "com.centurylinklabs.watchtower.enable=true"
|
||||
- 'homelab.category=productivity"
|
||||
- 'homelab.description=Documentation and wiki platform"
|
||||
- 'traefik.enable=true"
|
||||
- 'traefik.docker.network=traefik-network"
|
||||
- 'com.centurylinklabs.watchtower.enable=true'
|
||||
- 'homelab.category=productivity'
|
||||
- 'homelab.description=Documentation and wiki platform'
|
||||
- 'traefik.enable=true'
|
||||
- 'traefik.docker.network=traefik-network'
|
||||
# Router configuration
|
||||
- 'traefik.http.routers.bookstack.rule=Host(`bookstack.${DOMAIN}`)"
|
||||
- 'traefik.http.routers.bookstack.entrypoints=websecure"
|
||||
- 'traefik.http.routers.bookstack.tls.certresolver=letsencrypt"
|
||||
- 'traefik.http.routers.bookstack.middlewares=authelia@docker"
|
||||
- 'traefik.http.routers.bookstack.rule=Host(`bookstack.${DOMAIN}`)'
|
||||
- 'traefik.http.routers.bookstack.entrypoints=websecure'
|
||||
- 'traefik.http.routers.bookstack.tls.certresolver=letsencrypt'
|
||||
- 'traefik.http.routers.bookstack.middlewares=authelia@docker'
|
||||
# Service configuration
|
||||
- 'traefik.http.services.bookstack.loadbalancer.server.port=6875"
|
||||
- 'traefik.http.services.bookstack.loadbalancer.server.port=6875'
|
||||
# Sablier configuration
|
||||
- "sablier.enable=true"
|
||||
- "sablier.group=jasper-bookstack"
|
||||
- "sablier.start-on-demand=true"
|
||||
- 'sablier.enable=true'
|
||||
- 'sablier.group=jasper-bookstack'
|
||||
- 'sablier.start-on-demand=true'
|
||||
|
||||
bookstack-db:
|
||||
image: mariadb:10.11
|
||||
@@ -107,8 +107,8 @@ services:
|
||||
- MYSQL_USER=bookstack
|
||||
- MYSQL_PASSWORD=${BOOKSTACK_DB_PASSWORD}
|
||||
labels:
|
||||
- 'homelab.category=productivity"
|
||||
- 'homelab.description=BookStack database"
|
||||
- 'homelab.category=productivity'
|
||||
- 'homelab.description=BookStack database'
|
||||
|
||||
# MediaWiki - Wiki platform
|
||||
mediawiki:
|
||||
@@ -119,7 +119,7 @@ services:
|
||||
- homelab-network
|
||||
- traefik-network
|
||||
ports:
|
||||
- "8086:80"
|
||||
- '8086:80'
|
||||
volumes:
|
||||
- ./mediawiki/images:/var/www/html/images
|
||||
- ./mediawiki/LocalSettings.php:/var/www/html/LocalSettings.php
|
||||
@@ -129,7 +129,7 @@ services:
|
||||
- MEDIAWIKI_DB_USER=mediawiki
|
||||
- MEDIAWIKI_DB_PASSWORD=${MEDIAWIKI_DB_PASSWORD}
|
||||
healthcheck:
|
||||
test: ["CMD", "curl", "-f", "http://localhost/"]
|
||||
test: ['CMD', 'curl', '-f', 'http://localhost/']
|
||||
interval: 30s
|
||||
timeout: 10s
|
||||
retries: 3
|
||||
@@ -139,22 +139,22 @@ services:
|
||||
labels:
|
||||
# TRAEFIK CONFIGURATION
|
||||
# Service metadata
|
||||
- "com.centurylinklabs.watchtower.enable=true"
|
||||
- 'homelab.category=productivity"
|
||||
- 'homelab.description=MediaWiki platform"
|
||||
- 'traefik.enable=true"
|
||||
- 'traefik.docker.network=traefik-network"
|
||||
- 'com.centurylinklabs.watchtower.enable=true'
|
||||
- 'homelab.category=productivity'
|
||||
- 'homelab.description=MediaWiki platform'
|
||||
- 'traefik.enable=true'
|
||||
- 'traefik.docker.network=traefik-network'
|
||||
# Router configuration
|
||||
- 'traefik.http.routers.mediawiki.rule=Host(`mediawiki.${DOMAIN}`)"
|
||||
- 'traefik.http.routers.mediawiki.entrypoints=websecure"
|
||||
- 'traefik.http.routers.mediawiki.tls.certresolver=letsencrypt"
|
||||
- 'traefik.http.routers.mediawiki.middlewares=authelia@docker"
|
||||
- 'traefik.http.routers.mediawiki.rule=Host(`mediawiki.${DOMAIN}`)'
|
||||
- 'traefik.http.routers.mediawiki.entrypoints=websecure'
|
||||
- 'traefik.http.routers.mediawiki.tls.certresolver=letsencrypt'
|
||||
- 'traefik.http.routers.mediawiki.middlewares=authelia@docker'
|
||||
# Service configuration
|
||||
- 'traefik.http.services.mediawiki.loadbalancer.server.port=8086"
|
||||
- 'traefik.http.services.mediawiki.loadbalancer.server.port=8086'
|
||||
# Sablier configuration
|
||||
- "sablier.enable=true"
|
||||
- "sablier.group=jasper-mediawiki"
|
||||
- "sablier.start-on-demand=true"
|
||||
- 'sablier.enable=true'
|
||||
- 'sablier.group=jasper-mediawiki'
|
||||
- 'sablier.start-on-demand=true'
|
||||
|
||||
mediawiki-db:
|
||||
image: mariadb:10.11
|
||||
@@ -170,8 +170,8 @@ services:
|
||||
- MYSQL_USER=mediawiki
|
||||
- MYSQL_PASSWORD=${MEDIAWIKI_DB_PASSWORD}
|
||||
labels:
|
||||
- 'homelab.category=productivity"
|
||||
- 'homelab.description=MediaWiki database"
|
||||
- 'homelab.category=productivity'
|
||||
- 'homelab.description=MediaWiki database'
|
||||
|
||||
volumes:
|
||||
bookstack-db-data:
|
||||
|
||||
@@ -1,146 +0,0 @@
|
||||
I've reviewed the full script and compared it against the show_main_menu() function and the overall logic flow. The script appears to be a comprehensive setup and deployment tool for the EZ-Homelab project, but there are several inconsistencies, bugs, and areas that need updating to align with the new menu options and intended behavior. Below is a detailed audit, focusing on the menu-related changes and broader issues.
|
||||
|
||||
1. Menu Structure and Logic Alignment
|
||||
Current Menu Options (from show_main_menu()):
|
||||
Install Prerequisites
|
||||
Deploy Core Server
|
||||
Deploy Additional Server
|
||||
Install NVIDIA Drivers
|
||||
The menu is clean and logical, with clear separation between system setup, core deployment, additional server setup, and optional NVIDIA support.
|
||||
Code Handling:
|
||||
Option 1: Sets FORCE_SYSTEM_SETUP=true, skips deployment, calls system_setup(), and exits. This is consistent.
|
||||
Option 2: Sets DEPLOY_CORE=true, DEPLOY_INFRASTRUCTURE=true, DEPLOY_DASHBOARDS=true, SETUP_STACKS=true. Deploys everything for a core server. Consistent.
|
||||
Option 3: Sets DEPLOY_CORE=false, DEPLOY_INFRASTRUCTURE=true, DEPLOY_DASHBOARDS=false, SETUP_STACKS=true. Deploys infrastructure and stacks (no core/dashboards) for additional servers. This makes sense for multi-server setups.
|
||||
Option 4: Sets INSTALL_NVIDIA=true, skips deployment, logs a warning ("not yet implemented"), and exits. Placeholder only.
|
||||
Issues:
|
||||
The menu assumes Option 3 ("Deploy Additional Server") is for multi-server scenarios, but the script still prompts for all REQUIRED_VARS (e.g., DOMAIN, SERVER_IP) even for additional servers. For additional servers, some variables (like DOMAIN if it's core-specific) might not be needed, or SERVER_IP should be clarified as the additional server's IP. The code doesn't differentiate prompts based on deployment type, which could confuse users.
|
||||
No command-line argument parsing (e.g., --debug, --verbose, or direct flags for deployment modes). The script logs args but doesn't use them, forcing interactive menu use.
|
||||
The menu loop (while true) correctly handles invalid inputs and quits, but there's no way to bypass the menu (e.g., via env vars or args) for automated/scripted runs.
|
||||
2. Bugs and Inconsistencies
|
||||
Hardcoded Values in Completion Message:
|
||||
Lines ~1520-1530: The success message hardcodes URLs like https://dockge.kelinreij.duckdns.org and https://homepage.kelinreij.duckdns.org. These should use ${DOMAIN} and ${SERVER_IP} for dynamic replacement. This is a critical bug as it won't work for other users/domains.
|
||||
Double Setting of HOMEPAGE_ALLOWED_HOSTS:
|
||||
In save_env_file() (lines ~630-633 and ~647-649): First sets HOMEPAGE_ALLOWED_HOSTS="${HOMEPAGE_SUBDOMAIN:-homepage}.${DOMAIN},${SERVER_IP}:3003", then overwrites it with HOMEPAGE_ALLOWED_HOSTS="homepage.${DOMAIN},${SERVER_IP}:3003". The second line ignores HOMEPAGE_SUBDOMAIN, causing inconsistency. Remove or merge the logic.
|
||||
NVIDIA Driver Installation:
|
||||
Option 4 is a placeholder with no implementation. The script should either implement basic NVIDIA setup (e.g., install drivers, CUDA) or remove the option to avoid user confusion.
|
||||
Environment Variable Handling:
|
||||
DEBUG and VERBOSE are set from env but not from command-line args. Add arg parsing (e.g., using getopts) to allow --debug, --verbose, etc.
|
||||
In load_env_file_safely(), it exports vars but doesn't handle arrays or complex values well. No major issues, but could be improved for security (e.g., avoid logging secrets even in debug).
|
||||
TLS and Multi-Server Logic:
|
||||
For Option 3, setup_multi_server_tls() is called, which prompts for CORE_SERVER_IP if unset. This is good, but the script assumes SERVER_IP is the additional server's IP for TLS certs. Clarify in prompts or docs.
|
||||
REQUIRED_VARS includes DOMAIN, but for additional servers, the domain might be inherited from core. The code doesn't skip prompts intelligently.
|
||||
3. Code Structure and Messiness
|
||||
Function Organization:
|
||||
The script is long (1542 lines) and could be modularized (e.g., extract menu logic, validation, and deployment into separate functions or files). main() is bloated with inline logic.
|
||||
Repeated patterns (e.g., copying .env files, replacing placeholders, chowning dirs) could be refactored into reusable functions.
|
||||
Error Handling and Logging:
|
||||
Good use of log_* functions, but debug_log only writes to file if DEBUG=true. Add console debug output for VERBOSE=true.
|
||||
set -e is good, but some commands (e.g., docker network create) use || to ignore failures, which is inconsistent.
|
||||
Deployment Flow:
|
||||
After menu selection, the flow is: check system, prompt vars, save, validate, deploy. For Option 1/4, it exits early, which is fine. But for Options 2/3, it always runs enhance_placeholder_replacement() and validate_secrets(), even if not deploying core. validate_secrets() skips if DEPLOY_CORE=false, which is correct.
|
||||
perform_deployment() has dynamic step numbering, but it's minor.
|
||||
Security and Best Practices:
|
||||
Uses sudo appropriately, but ACTUAL_USER logic is solid.
|
||||
Secrets (e.g., passwords) are handled carefully, but AUTHELIA_ADMIN_PASSWORD generation could fail silently if Docker isn't available.
|
||||
No input sanitization beyond basic validation in validate_variable().
|
||||
4. Recommendations for Updates
|
||||
Align with Menu Changes:
|
||||
Update prompts in validate_and_prompt_variables() to skip irrelevant vars for Option 3 (e.g., don't prompt for DOMAIN if deploying additional server, or clarify it's for core).
|
||||
Add command-line support: Parse args to set flags directly (e.g., .[ez-homelab.sh](http://_vscodecontentref_/1) --deploy-core --verbose to bypass menu).
|
||||
Implement Option 4: Add NVIDIA driver installation logic in system_setup() or a new function (check GPU, install drivers, reboot if needed).
|
||||
Fix Bugs:
|
||||
Replace hardcoded URLs with variables (e.g., https://dockge.${DOMAIN}).
|
||||
Fix HOMEPAGE_ALLOWED_HOSTS duplication.
|
||||
Add validation to ensure DOMAIN and SERVER_IP are set before deployment.
|
||||
General Cleanup:
|
||||
Break main() into smaller functions (e.g., handle_menu_choice(), prepare_deployment()).
|
||||
Add a --help option and better error messages.
|
||||
Test multi-server flow thoroughly, as setup_multi_server_tls() has complex SSH logic.
|
||||
Testing:
|
||||
The script lacks automated tests. Add checks for each menu path (e.g., dry-run mode).
|
||||
Validate YAML syntax in perform_deployment() before deploying.
|
||||
Overall, the script is functional but messy, with the menu changes exposing inconsistencies in variable prompting and hardcoded values. The core logic aligns with the menu, but updates are needed for robustness, especially for multi-server setups. If you provide more details on what specific "menu changes" broke, I can refine this audit.
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
Overview of Requested Changes
|
||||
Based on your notes, this is a comprehensive refactoring of ez-homelab.sh to improve usability, fix bugs, and enhance functionality. The changes focus on modularizing the script, improving user experience (e.g., better prompts, error handling), and adding new features like argument parsing and dry-run mode. Key themes include separating concerns (e.g., moving system setup to a new script), clarifying multi-server logic, and making the script more robust and user-friendly.
|
||||
|
||||
Scope and Impact:
|
||||
|
||||
Lines of Code: The script is ~1542 lines; changes will add/modify ~200-400 lines, potentially splitting into multiple files.
|
||||
Breaking Changes: Menu Option 1 now requires a separate script; variable prompts and logic for Option 3/multi-server will change; hardcoded values will be removed.
|
||||
Testing Needs: High—test all menu options, argument parsing, dry-run, error scenarios, and multi-server TLS. Validate on different systems (with/without NVIDIA, core vs. additional servers).
|
||||
Dependencies: Requires nvidia-detect (for Option 4), access to NVIDIA repos, and ensuring the new install-prerequisites.sh works across distros.
|
||||
Estimated Effort: 10-15 hours for implementation, plus testing. Prioritize bugs and core menu fixes first.
|
||||
Risks: Error handling changes could mask real issues; multi-server logic is complex—ensure backward compatibility.
|
||||
The tasks are broken down into categories below, with priorities (High/Medium/Low) based on impact and dependencies.
|
||||
|
||||
Task Breakdown
|
||||
1. Menu and Workflow Updates (High Priority - Core User Experience)
|
||||
Task 1.1: Create install-prerequisites.sh
|
||||
Extract system_setup() and related logic into a new standalone script. Ensure it must run as root/sudo. Update Option 1 in ez-homelab.sh to detect if running as root; if not, use sudo to launch the new script. Handle cases where sudo isn't available (e.g., log error and exit).
|
||||
Effort: 2-3 hours. Dependencies: None. Files: New install-prerequisites.sh, modify ez-homelab.sh.
|
||||
|
||||
Task 1.2: Update Menu Option 3 Prompts
|
||||
Ensure Option 3 prompts for all REQUIRED_VARS (no skipping). Reword prompts to clarify that variables like DOMAIN and SERVER_IP are for the local (additional) server, while REMOTE_SERVER_* vars are for the core server (e.g., "Enter the IP of the core server where Traefik is running"). Keep existing variable names but improve descriptions.
|
||||
Effort: 1-2 hours. Dependencies: Review .env.example for REMOTE_SERVER context. Files: Modify validate_and_prompt_variables() and prompt_for_variable() in ez-homelab.sh.
|
||||
|
||||
Task 1.3: Implement Menu Option 4 (NVIDIA Installation)
|
||||
Use nvidia-detect to identify the GPU and determine the official NVIDIA installer. Install NVIDIA drivers and the NVIDIA Container Toolkit (for Docker GPU support). Handle errors gracefully (e.g., if no GPU detected, skip). Integrate into system_setup() or a new function.
|
||||
Effort: 3-4 hours. Dependencies: Install nvidia-detect if needed; test on systems with/without NVIDIA GPUs. Files: Modify/add functions in ez-homelab.sh or install-prerequisites.sh.
|
||||
|
||||
2. Bug Fixes (High Priority - Prevents User Errors)
|
||||
Task 2.1: Remove Hardcoded Values
|
||||
Replace all instances of "kelin", "kelinreij", "kelin-casa", etc., with appropriate variables (e.g., ${DOMAIN}, ${SERVER_IP}). Update completion messages, URLs, and any examples.
|
||||
Effort: 1 hour. Dependencies: Search the script for these strings. Files: ez-homelab.sh.
|
||||
|
||||
Task 2.2: Fix HOMEPAGE_ALLOWED_HOSTS
|
||||
Remove HOMEPAGE_SUBDOMAIN references. Ensure the line is exactly HOMEPAGE_ALLOWED_HOSTS="homepage.${DOMAIN},${SERVER_IP}:3003" (no duplication or overwrites).
|
||||
Effort: 30 minutes. Dependencies: Check save_env_file(). Files: ez-homelab.sh.
|
||||
|
||||
3. New Features and Enhancements (Medium Priority - Improves Script Flexibility)
|
||||
Task 3.1: Add Argument Parsing
|
||||
Implement command-line argument support (e.g., using getopts or argparse if Bash allows). Support flags like --deploy-core, --deploy-additional, --install-nvidia, --dry-run, --verbose, --debug, --help. Allow bypassing the menu for automated runs.
|
||||
Effort: 2-3 hours. Dependencies: None. Files: Add to main() in ez-homelab.sh.
|
||||
|
||||
Task 3.2: Add Dry-Run Mode
|
||||
Implement --dry-run flag: Simulate deployment without making changes (e.g., validate configs, show what would be done, but don't run docker compose up). Log actions verbosely.
|
||||
Effort: 2 hours. Dependencies: Argument parsing. Files: Modify perform_deployment() and related functions in ez-homelab.sh.
|
||||
|
||||
Task 3.3: Enhance Console Logging for Verbose Mode
|
||||
Update log_* functions to output to console when VERBOSE=true (not just debug file). Ensure debug logs go to both console and file.
|
||||
Effort: 1 hour. Dependencies: None. Files: ez-homelab.sh.
|
||||
|
||||
Task 3.4: Improve Error Handling
|
||||
Change set -e to allow continuation on errors (log warnings/errors but proceed). Use || consistently for non-critical failures (e.g., network creation). Add try-catch-like logic where possible.
|
||||
Effort: 2 hours. Dependencies: Review all exit 1 points. Files: ez-homelab.sh.
|
||||
|
||||
4. TLS and Multi-Server Logic Refinements (Medium Priority - Complex but Critical for Multi-Server)
|
||||
Task 4.1: Clarify Variable Usage
|
||||
Update prompts and logic to distinguish: IP_ADDRESS/SERVER_IP for local machine; REMOTE_SERVER_* for core server. Ensure ${DOMAIN} is prompted even for additional servers (as it's needed for configs). Reword setup_multi_server_tls() to emphasize cert copying from core.
|
||||
Effort: 1-2 hours. Dependencies: Task 1.2. Files: ez-homelab.sh.
|
||||
5. Function Organization and Code Quality (Low Priority - Refactoring for Maintainability)
|
||||
Task 5.1: Audit and Improve Placeholder/Env Functions
|
||||
Review replace_env_placeholders() and enhance_placeholder_replacement() for quality (e.g., handle edge cases, improve performance). Refactor if needed.
|
||||
Effort: 1-2 hours. Dependencies: None. Files: ez-homelab.sh.
|
||||
|
||||
Task 5.2: Modularize Code with More Functions
|
||||
Break main() into smaller functions (e.g., parse_args(), handle_menu_choice(), prepare_deployment()). Extract repeated logic (e.g., env copying, dir creation) into helpers.
|
||||
Effort: 3-4 hours. Dependencies: All other tasks. Files: ez-homelab.sh.
|
||||
|
||||
Task 5.3: Fix Deployment Flow
|
||||
Streamline perform_deployment(): Ensure consistent step numbering, better error recovery, and integration with dry-run.
|
||||
Effort: 1 hour. Dependencies: Tasks 3.2, 3.4. Files: ez-homelab.sh.
|
||||
|
||||
Implementation Order and Next Steps
|
||||
Start with High-Priority Tasks: Bugs (2.1-2.2) and Menu Option 1 (1.1) to fix immediate issues.
|
||||
Then Core Features: Argument parsing (3.1), dry-run (3.2), and Option 4 (1.3).
|
||||
Refinements: Error handling (3.4), logging (3.3), and multi-server (4.1).
|
||||
Cleanup: Function organization (5.x) last, as it depends on other changes.
|
||||
Testing Plan: After each task, test interactively and with args. Use dry-run for validation.
|
||||
Questions for You: Confirm if install-prerequisites.sh should be in the same repo. Any specific NVIDIA installer versions to target? Should dry-run output be machine-readable (e.g., JSON)?
|
||||
Let me know if you'd like me to start implementing any tasks or need more details!
|
||||
764
get-docker.sh
764
get-docker.sh
@@ -1,764 +0,0 @@
|
||||
#!/bin/sh
|
||||
set -e
|
||||
# Docker Engine for Linux installation script.
|
||||
#
|
||||
# This script is intended as a convenient way to configure docker's package
|
||||
# repositories and to install Docker Engine, This script is not recommended
|
||||
# for production environments. Before running this script, make yourself familiar
|
||||
# with potential risks and limitations, and refer to the installation manual
|
||||
# at https://docs.docker.com/engine/install/ for alternative installation methods.
|
||||
#
|
||||
# The script:
|
||||
#
|
||||
# - Requires `root` or `sudo` privileges to run.
|
||||
# - Attempts to detect your Linux distribution and version and configure your
|
||||
# package management system for you.
|
||||
# - Doesn't allow you to customize most installation parameters.
|
||||
# - Installs dependencies and recommendations without asking for confirmation.
|
||||
# - Installs the latest stable release (by default) of Docker CLI, Docker Engine,
|
||||
# Docker Buildx, Docker Compose, containerd, and runc. When using this script
|
||||
# to provision a machine, this may result in unexpected major version upgrades
|
||||
# of these packages. Always test upgrades in a test environment before
|
||||
# deploying to your production systems.
|
||||
# - Isn't designed to upgrade an existing Docker installation. When using the
|
||||
# script to update an existing installation, dependencies may not be updated
|
||||
# to the expected version, resulting in outdated versions.
|
||||
#
|
||||
# Source code is available at https://github.com/docker/docker-install/
|
||||
#
|
||||
# Usage
|
||||
# ==============================================================================
|
||||
#
|
||||
# To install the latest stable versions of Docker CLI, Docker Engine, and their
|
||||
# dependencies:
|
||||
#
|
||||
# 1. download the script
|
||||
#
|
||||
# $ curl -fsSL https://get.docker.com -o install-docker.sh
|
||||
#
|
||||
# 2. verify the script's content
|
||||
#
|
||||
# $ cat install-docker.sh
|
||||
#
|
||||
# 3. run the script with --dry-run to verify the steps it executes
|
||||
#
|
||||
# $ sh install-docker.sh --dry-run
|
||||
#
|
||||
# 4. run the script either as root, or using sudo to perform the installation.
|
||||
#
|
||||
# $ sudo sh install-docker.sh
|
||||
#
|
||||
# Command-line options
|
||||
# ==============================================================================
|
||||
#
|
||||
# --version <VERSION>
|
||||
# Use the --version option to install a specific version, for example:
|
||||
#
|
||||
# $ sudo sh install-docker.sh --version 23.0
|
||||
#
|
||||
# --channel <stable|test>
|
||||
#
|
||||
# Use the --channel option to install from an alternative installation channel.
|
||||
# The following example installs the latest versions from the "test" channel,
|
||||
# which includes pre-releases (alpha, beta, rc):
|
||||
#
|
||||
# $ sudo sh install-docker.sh --channel test
|
||||
#
|
||||
# Alternatively, use the script at https://test.docker.com, which uses the test
|
||||
# channel as default.
|
||||
#
|
||||
# --mirror <Aliyun|AzureChinaCloud>
|
||||
#
|
||||
# Use the --mirror option to install from a mirror supported by this script.
|
||||
# Available mirrors are "Aliyun" (https://mirrors.aliyun.com/docker-ce), and
|
||||
# "AzureChinaCloud" (https://mirror.azure.cn/docker-ce), for example:
|
||||
#
|
||||
# $ sudo sh install-docker.sh --mirror AzureChinaCloud
|
||||
#
|
||||
# --setup-repo
|
||||
#
|
||||
# Use the --setup-repo option to configure Docker's package repositories without
|
||||
# installing Docker packages. This is useful when you want to add the repository
|
||||
# but install packages separately:
|
||||
#
|
||||
# $ sudo sh install-docker.sh --setup-repo
|
||||
#
|
||||
# Automatic Service Start
|
||||
#
|
||||
# By default, this script automatically starts the Docker daemon and enables the docker
|
||||
# service after installation if systemd is used as init.
|
||||
#
|
||||
# If you prefer to start the service manually, use the --no-autostart option:
|
||||
#
|
||||
# $ sudo sh install-docker.sh --no-autostart
|
||||
#
|
||||
# Note: Starting the service requires appropriate privileges to manage system services.
|
||||
#
|
||||
# ==============================================================================
|
||||
|
||||
|
||||
# Git commit from https://github.com/docker/docker-install when
|
||||
# the script was uploaded (Should only be modified by upload job):
|
||||
SCRIPT_COMMIT_SHA="f381ee68b32e515bb4dc034b339266aff1fbc460"
|
||||
|
||||
# strip "v" prefix if present
|
||||
VERSION="${VERSION#v}"
|
||||
|
||||
# The channel to install from:
|
||||
# * stable
|
||||
# * test
|
||||
DEFAULT_CHANNEL_VALUE="stable"
|
||||
if [ -z "$CHANNEL" ]; then
|
||||
CHANNEL=$DEFAULT_CHANNEL_VALUE
|
||||
fi
|
||||
|
||||
DEFAULT_DOWNLOAD_URL="https://download.docker.com"
|
||||
if [ -z "$DOWNLOAD_URL" ]; then
|
||||
DOWNLOAD_URL=$DEFAULT_DOWNLOAD_URL
|
||||
fi
|
||||
|
||||
DEFAULT_REPO_FILE="docker-ce.repo"
|
||||
if [ -z "$REPO_FILE" ]; then
|
||||
REPO_FILE="$DEFAULT_REPO_FILE"
|
||||
# Automatically default to a staging repo fora
|
||||
# a staging download url (download-stage.docker.com)
|
||||
case "$DOWNLOAD_URL" in
|
||||
*-stage*) REPO_FILE="docker-ce-staging.repo";;
|
||||
esac
|
||||
fi
|
||||
|
||||
mirror=''
|
||||
DRY_RUN=${DRY_RUN:-}
|
||||
REPO_ONLY=${REPO_ONLY:-0}
|
||||
NO_AUTOSTART=${NO_AUTOSTART:-0}
|
||||
while [ $# -gt 0 ]; do
|
||||
case "$1" in
|
||||
--channel)
|
||||
CHANNEL="$2"
|
||||
shift
|
||||
;;
|
||||
--dry-run)
|
||||
DRY_RUN=1
|
||||
;;
|
||||
--mirror)
|
||||
mirror="$2"
|
||||
shift
|
||||
;;
|
||||
--version)
|
||||
VERSION="${2#v}"
|
||||
shift
|
||||
;;
|
||||
--setup-repo)
|
||||
REPO_ONLY=1
|
||||
shift
|
||||
;;
|
||||
--no-autostart)
|
||||
NO_AUTOSTART=1
|
||||
;;
|
||||
--*)
|
||||
echo "Illegal option $1"
|
||||
;;
|
||||
esac
|
||||
shift $(( $# > 0 ? 1 : 0 ))
|
||||
done
|
||||
|
||||
case "$mirror" in
|
||||
Aliyun)
|
||||
DOWNLOAD_URL="https://mirrors.aliyun.com/docker-ce"
|
||||
;;
|
||||
AzureChinaCloud)
|
||||
DOWNLOAD_URL="https://mirror.azure.cn/docker-ce"
|
||||
;;
|
||||
"")
|
||||
;;
|
||||
*)
|
||||
>&2 echo "unknown mirror '$mirror': use either 'Aliyun', or 'AzureChinaCloud'."
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
|
||||
case "$CHANNEL" in
|
||||
stable|test)
|
||||
;;
|
||||
*)
|
||||
>&2 echo "unknown CHANNEL '$CHANNEL': use either stable or test."
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
|
||||
command_exists() {
|
||||
command -v "$@" > /dev/null 2>&1
|
||||
}
|
||||
|
||||
# version_gte checks if the version specified in $VERSION is at least the given
|
||||
# SemVer (Maj.Minor[.Patch]), or CalVer (YY.MM) version.It returns 0 (success)
|
||||
# if $VERSION is either unset (=latest) or newer or equal than the specified
|
||||
# version, or returns 1 (fail) otherwise.
|
||||
#
|
||||
# examples:
|
||||
#
|
||||
# VERSION=23.0
|
||||
# version_gte 23.0 // 0 (success)
|
||||
# version_gte 20.10 // 0 (success)
|
||||
# version_gte 19.03 // 0 (success)
|
||||
# version_gte 26.1 // 1 (fail)
|
||||
version_gte() {
|
||||
if [ -z "$VERSION" ]; then
|
||||
return 0
|
||||
fi
|
||||
version_compare "$VERSION" "$1"
|
||||
}
|
||||
|
||||
# version_compare compares two version strings (either SemVer (Major.Minor.Path),
|
||||
# or CalVer (YY.MM) version strings. It returns 0 (success) if version A is newer
|
||||
# or equal than version B, or 1 (fail) otherwise. Patch releases and pre-release
|
||||
# (-alpha/-beta) are not taken into account
|
||||
#
|
||||
# examples:
|
||||
#
|
||||
# version_compare 23.0.0 20.10 // 0 (success)
|
||||
# version_compare 23.0 20.10 // 0 (success)
|
||||
# version_compare 20.10 19.03 // 0 (success)
|
||||
# version_compare 20.10 20.10 // 0 (success)
|
||||
# version_compare 19.03 20.10 // 1 (fail)
|
||||
version_compare() (
|
||||
set +x
|
||||
|
||||
yy_a="$(echo "$1" | cut -d'.' -f1)"
|
||||
yy_b="$(echo "$2" | cut -d'.' -f1)"
|
||||
if [ "$yy_a" -lt "$yy_b" ]; then
|
||||
return 1
|
||||
fi
|
||||
if [ "$yy_a" -gt "$yy_b" ]; then
|
||||
return 0
|
||||
fi
|
||||
mm_a="$(echo "$1" | cut -d'.' -f2)"
|
||||
mm_b="$(echo "$2" | cut -d'.' -f2)"
|
||||
|
||||
# trim leading zeros to accommodate CalVer
|
||||
mm_a="${mm_a#0}"
|
||||
mm_b="${mm_b#0}"
|
||||
|
||||
if [ "${mm_a:-0}" -lt "${mm_b:-0}" ]; then
|
||||
return 1
|
||||
fi
|
||||
|
||||
return 0
|
||||
)
|
||||
|
||||
is_dry_run() {
|
||||
if [ -z "$DRY_RUN" ]; then
|
||||
return 1
|
||||
else
|
||||
return 0
|
||||
fi
|
||||
}
|
||||
|
||||
is_wsl() {
|
||||
case "$(uname -r)" in
|
||||
*microsoft* ) true ;; # WSL 2
|
||||
*Microsoft* ) true ;; # WSL 1
|
||||
* ) false;;
|
||||
esac
|
||||
}
|
||||
|
||||
is_darwin() {
|
||||
case "$(uname -s)" in
|
||||
*darwin* ) true ;;
|
||||
*Darwin* ) true ;;
|
||||
* ) false;;
|
||||
esac
|
||||
}
|
||||
|
||||
deprecation_notice() {
|
||||
distro=$1
|
||||
distro_version=$2
|
||||
echo
|
||||
printf "\033[91;1mDEPRECATION WARNING\033[0m\n"
|
||||
printf " This Linux distribution (\033[1m%s %s\033[0m) reached end-of-life and is no longer supported by this script.\n" "$distro" "$distro_version"
|
||||
echo " No updates or security fixes will be released for this distribution, and users are recommended"
|
||||
echo " to upgrade to a currently maintained version of $distro."
|
||||
echo
|
||||
printf "Press \033[1mCtrl+C\033[0m now to abort this script, or wait for the installation to continue."
|
||||
echo
|
||||
sleep 10
|
||||
}
|
||||
|
||||
get_distribution() {
|
||||
lsb_dist=""
|
||||
# Every system that we officially support has /etc/os-release
|
||||
if [ -r /etc/os-release ]; then
|
||||
lsb_dist="$(. /etc/os-release && echo "$ID")"
|
||||
fi
|
||||
# Returning an empty string here should be alright since the
|
||||
# case statements don't act unless you provide an actual value
|
||||
echo "$lsb_dist"
|
||||
}
|
||||
|
||||
start_docker_daemon() {
|
||||
# Use systemctl if available (for systemd-based systems)
|
||||
if command_exists systemctl; then
|
||||
is_dry_run || >&2 echo "Using systemd to manage Docker service"
|
||||
if (
|
||||
is_dry_run || set -x
|
||||
$sh_c systemctl enable --now docker.service 2>/dev/null
|
||||
); then
|
||||
is_dry_run || echo "INFO: Docker daemon enabled and started" >&2
|
||||
else
|
||||
is_dry_run || echo "WARNING: unable to enable the docker service" >&2
|
||||
fi
|
||||
else
|
||||
# No service management available (container environment)
|
||||
if ! is_dry_run; then
|
||||
>&2 echo "Note: Running in a container environment without service management"
|
||||
>&2 echo "Docker daemon cannot be started automatically in this environment"
|
||||
>&2 echo "The Docker packages have been installed successfully"
|
||||
fi
|
||||
fi
|
||||
>&2 echo
|
||||
}
|
||||
|
||||
echo_docker_as_nonroot() {
|
||||
if is_dry_run; then
|
||||
return
|
||||
fi
|
||||
if command_exists docker && [ -e /var/run/docker.sock ]; then
|
||||
(
|
||||
set -x
|
||||
$sh_c 'docker version'
|
||||
) || true
|
||||
fi
|
||||
|
||||
# intentionally mixed spaces and tabs here -- tabs are stripped by "<<-EOF", spaces are kept in the output
|
||||
echo
|
||||
echo "================================================================================"
|
||||
echo
|
||||
if version_gte "20.10"; then
|
||||
echo "To run Docker as a non-privileged user, consider setting up the"
|
||||
echo "Docker daemon in rootless mode for your user:"
|
||||
echo
|
||||
echo " dockerd-rootless-setuptool.sh install"
|
||||
echo
|
||||
echo "Visit https://docs.docker.com/go/rootless/ to learn about rootless mode."
|
||||
echo
|
||||
fi
|
||||
echo
|
||||
echo "To run the Docker daemon as a fully privileged service, but granting non-root"
|
||||
echo "users access, refer to https://docs.docker.com/go/daemon-access/"
|
||||
echo
|
||||
echo "WARNING: Access to the remote API on a privileged Docker daemon is equivalent"
|
||||
echo " to root access on the host. Refer to the 'Docker daemon attack surface'"
|
||||
echo " documentation for details: https://docs.docker.com/go/attack-surface/"
|
||||
echo
|
||||
echo "================================================================================"
|
||||
echo
|
||||
}
|
||||
|
||||
# Check if this is a forked Linux distro
|
||||
check_forked() {
|
||||
|
||||
# Check for lsb_release command existence, it usually exists in forked distros
|
||||
if command_exists lsb_release; then
|
||||
# Check if the `-u` option is supported
|
||||
set +e
|
||||
lsb_release -a -u > /dev/null 2>&1
|
||||
lsb_release_exit_code=$?
|
||||
set -e
|
||||
|
||||
# Check if the command has exited successfully, it means we're in a forked distro
|
||||
if [ "$lsb_release_exit_code" = "0" ]; then
|
||||
# Print info about current distro
|
||||
cat <<-EOF
|
||||
You're using '$lsb_dist' version '$dist_version'.
|
||||
EOF
|
||||
|
||||
# Get the upstream release info
|
||||
lsb_dist=$(lsb_release -a -u 2>&1 | tr '[:upper:]' '[:lower:]' | grep -E 'id' | cut -d ':' -f 2 | tr -d '[:space:]')
|
||||
dist_version=$(lsb_release -a -u 2>&1 | tr '[:upper:]' '[:lower:]' | grep -E 'codename' | cut -d ':' -f 2 | tr -d '[:space:]')
|
||||
|
||||
# Print info about upstream distro
|
||||
cat <<-EOF
|
||||
Upstream release is '$lsb_dist' version '$dist_version'.
|
||||
EOF
|
||||
else
|
||||
if [ -r /etc/debian_version ] && [ "$lsb_dist" != "ubuntu" ] && [ "$lsb_dist" != "raspbian" ]; then
|
||||
if [ "$lsb_dist" = "osmc" ]; then
|
||||
# OSMC runs Raspbian
|
||||
lsb_dist=raspbian
|
||||
else
|
||||
# We're Debian and don't even know it!
|
||||
lsb_dist=debian
|
||||
fi
|
||||
dist_version="$(sed 's/\/.*//' /etc/debian_version | sed 's/\..*//')"
|
||||
case "$dist_version" in
|
||||
13|14|forky)
|
||||
dist_version="trixie"
|
||||
;;
|
||||
12)
|
||||
dist_version="bookworm"
|
||||
;;
|
||||
11)
|
||||
dist_version="bullseye"
|
||||
;;
|
||||
10)
|
||||
dist_version="buster"
|
||||
;;
|
||||
9)
|
||||
dist_version="stretch"
|
||||
;;
|
||||
8)
|
||||
dist_version="jessie"
|
||||
;;
|
||||
esac
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
do_install() {
|
||||
echo "# Executing docker install script, commit: $SCRIPT_COMMIT_SHA"
|
||||
|
||||
if command_exists docker; then
|
||||
cat >&2 <<-'EOF'
|
||||
Warning: the "docker" command appears to already exist on this system.
|
||||
|
||||
If you already have Docker installed, this script can cause trouble, which is
|
||||
why we're displaying this warning and provide the opportunity to cancel the
|
||||
installation.
|
||||
|
||||
If you installed the current Docker package using this script and are using it
|
||||
again to update Docker, you can ignore this message, but be aware that the
|
||||
script resets any custom changes in the deb and rpm repo configuration
|
||||
files to match the parameters passed to the script.
|
||||
|
||||
You may press Ctrl+C now to abort this script.
|
||||
EOF
|
||||
( set -x; sleep 20 )
|
||||
fi
|
||||
|
||||
user="$(id -un 2>/dev/null || true)"
|
||||
|
||||
sh_c='sh -c'
|
||||
if [ "$user" != 'root' ]; then
|
||||
if command_exists sudo; then
|
||||
sh_c='sudo -E sh -c'
|
||||
elif command_exists su; then
|
||||
sh_c='su -c'
|
||||
else
|
||||
cat >&2 <<-'EOF'
|
||||
Error: this installer needs the ability to run commands as root.
|
||||
We are unable to find either "sudo" or "su" available to make this happen.
|
||||
EOF
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
|
||||
if is_dry_run; then
|
||||
sh_c="echo"
|
||||
fi
|
||||
|
||||
# perform some very rudimentary platform detection
|
||||
lsb_dist=$( get_distribution )
|
||||
lsb_dist="$(echo "$lsb_dist" | tr '[:upper:]' '[:lower:]')"
|
||||
|
||||
if is_wsl; then
|
||||
echo
|
||||
echo "WSL DETECTED: We recommend using Docker Desktop for Windows."
|
||||
echo "Please get Docker Desktop from https://www.docker.com/products/docker-desktop/"
|
||||
echo
|
||||
cat >&2 <<-'EOF'
|
||||
|
||||
You may press Ctrl+C now to abort this script.
|
||||
EOF
|
||||
( set -x; sleep 20 )
|
||||
fi
|
||||
|
||||
case "$lsb_dist" in
|
||||
|
||||
ubuntu)
|
||||
if command_exists lsb_release; then
|
||||
dist_version="$(lsb_release --codename | cut -f2)"
|
||||
fi
|
||||
if [ -z "$dist_version" ] && [ -r /etc/lsb-release ]; then
|
||||
dist_version="$(. /etc/lsb-release && echo "$DISTRIB_CODENAME")"
|
||||
fi
|
||||
;;
|
||||
|
||||
debian|raspbian)
|
||||
dist_version="$(sed 's/\/.*//' /etc/debian_version | sed 's/\..*//')"
|
||||
case "$dist_version" in
|
||||
13)
|
||||
dist_version="trixie"
|
||||
;;
|
||||
12)
|
||||
dist_version="bookworm"
|
||||
;;
|
||||
11)
|
||||
dist_version="bullseye"
|
||||
;;
|
||||
10)
|
||||
dist_version="buster"
|
||||
;;
|
||||
9)
|
||||
dist_version="stretch"
|
||||
;;
|
||||
8)
|
||||
dist_version="jessie"
|
||||
;;
|
||||
esac
|
||||
;;
|
||||
|
||||
centos|rhel)
|
||||
if [ -z "$dist_version" ] && [ -r /etc/os-release ]; then
|
||||
dist_version="$(. /etc/os-release && echo "$VERSION_ID")"
|
||||
fi
|
||||
;;
|
||||
|
||||
*)
|
||||
if command_exists lsb_release; then
|
||||
dist_version="$(lsb_release --release | cut -f2)"
|
||||
fi
|
||||
if [ -z "$dist_version" ] && [ -r /etc/os-release ]; then
|
||||
dist_version="$(. /etc/os-release && echo "$VERSION_ID")"
|
||||
fi
|
||||
;;
|
||||
|
||||
esac
|
||||
|
||||
# Check if this is a forked Linux distro
|
||||
check_forked
|
||||
|
||||
# Print deprecation warnings for distro versions that recently reached EOL,
|
||||
# but may still be commonly used (especially LTS versions).
|
||||
case "$lsb_dist.$dist_version" in
|
||||
centos.8|centos.7|rhel.7)
|
||||
deprecation_notice "$lsb_dist" "$dist_version"
|
||||
;;
|
||||
debian.buster|debian.stretch|debian.jessie)
|
||||
deprecation_notice "$lsb_dist" "$dist_version"
|
||||
;;
|
||||
raspbian.buster|raspbian.stretch|raspbian.jessie)
|
||||
deprecation_notice "$lsb_dist" "$dist_version"
|
||||
;;
|
||||
ubuntu.focal|ubuntu.bionic|ubuntu.xenial|ubuntu.trusty)
|
||||
deprecation_notice "$lsb_dist" "$dist_version"
|
||||
;;
|
||||
ubuntu.oracular|ubuntu.mantic|ubuntu.lunar|ubuntu.kinetic|ubuntu.impish|ubuntu.hirsute|ubuntu.groovy|ubuntu.eoan|ubuntu.disco|ubuntu.cosmic)
|
||||
deprecation_notice "$lsb_dist" "$dist_version"
|
||||
;;
|
||||
fedora.*)
|
||||
if [ "$dist_version" -lt 41 ]; then
|
||||
deprecation_notice "$lsb_dist" "$dist_version"
|
||||
fi
|
||||
;;
|
||||
esac
|
||||
|
||||
# Run setup for each distro accordingly
|
||||
case "$lsb_dist" in
|
||||
ubuntu|debian|raspbian)
|
||||
pre_reqs="ca-certificates curl"
|
||||
apt_repo="deb [arch=$(dpkg --print-architecture) signed-by=/etc/apt/keyrings/docker.asc] $DOWNLOAD_URL/linux/$lsb_dist $dist_version $CHANNEL"
|
||||
(
|
||||
if ! is_dry_run; then
|
||||
set -x
|
||||
fi
|
||||
$sh_c 'apt-get -qq update >/dev/null'
|
||||
$sh_c "DEBIAN_FRONTEND=noninteractive apt-get -y -qq install $pre_reqs >/dev/null"
|
||||
$sh_c 'install -m 0755 -d /etc/apt/keyrings'
|
||||
$sh_c "curl -fsSL \"$DOWNLOAD_URL/linux/$lsb_dist/gpg\" -o /etc/apt/keyrings/docker.asc"
|
||||
$sh_c "chmod a+r /etc/apt/keyrings/docker.asc"
|
||||
$sh_c "echo \"$apt_repo\" > /etc/apt/sources.list.d/docker.list"
|
||||
$sh_c 'apt-get -qq update >/dev/null'
|
||||
)
|
||||
|
||||
if [ "$REPO_ONLY" = "1" ]; then
|
||||
exit 0
|
||||
fi
|
||||
|
||||
pkg_version=""
|
||||
if [ -n "$VERSION" ]; then
|
||||
if is_dry_run; then
|
||||
echo "# WARNING: VERSION pinning is not supported in DRY_RUN"
|
||||
else
|
||||
# Will work for incomplete versions IE (17.12), but may not actually grab the "latest" if in the test channel
|
||||
pkg_pattern="$(echo "$VERSION" | sed 's/-ce-/~ce~.*/g' | sed 's/-/.*/g')"
|
||||
search_command="apt-cache madison docker-ce | grep '$pkg_pattern' | head -1 | awk '{\$1=\$1};1' | cut -d' ' -f 3"
|
||||
pkg_version="$($sh_c "$search_command")"
|
||||
echo "INFO: Searching repository for VERSION '$VERSION'"
|
||||
echo "INFO: $search_command"
|
||||
if [ -z "$pkg_version" ]; then
|
||||
echo
|
||||
echo "ERROR: '$VERSION' not found amongst apt-cache madison results"
|
||||
echo
|
||||
exit 1
|
||||
fi
|
||||
if version_gte "18.09"; then
|
||||
search_command="apt-cache madison docker-ce-cli | grep '$pkg_pattern' | head -1 | awk '{\$1=\$1};1' | cut -d' ' -f 3"
|
||||
echo "INFO: $search_command"
|
||||
cli_pkg_version="=$($sh_c "$search_command")"
|
||||
fi
|
||||
pkg_version="=$pkg_version"
|
||||
fi
|
||||
fi
|
||||
(
|
||||
pkgs="docker-ce${pkg_version%=}"
|
||||
if version_gte "18.09"; then
|
||||
# older versions didn't ship the cli and containerd as separate packages
|
||||
pkgs="$pkgs docker-ce-cli${cli_pkg_version%=} containerd.io"
|
||||
fi
|
||||
if version_gte "20.10"; then
|
||||
pkgs="$pkgs docker-compose-plugin docker-ce-rootless-extras$pkg_version"
|
||||
fi
|
||||
if version_gte "23.0"; then
|
||||
pkgs="$pkgs docker-buildx-plugin"
|
||||
fi
|
||||
if version_gte "28.2"; then
|
||||
pkgs="$pkgs docker-model-plugin"
|
||||
fi
|
||||
if ! is_dry_run; then
|
||||
set -x
|
||||
fi
|
||||
$sh_c "DEBIAN_FRONTEND=noninteractive apt-get -y -qq install $pkgs >/dev/null"
|
||||
)
|
||||
if [ "$NO_AUTOSTART" != "1" ]; then
|
||||
start_docker_daemon
|
||||
fi
|
||||
echo_docker_as_nonroot
|
||||
exit 0
|
||||
;;
|
||||
centos|fedora|rhel)
|
||||
if [ "$(uname -m)" = "s390x" ]; then
|
||||
echo "Effective v27.5, please consult RHEL distro statement for s390x support."
|
||||
exit 1
|
||||
fi
|
||||
repo_file_url="$DOWNLOAD_URL/linux/$lsb_dist/$REPO_FILE"
|
||||
(
|
||||
if ! is_dry_run; then
|
||||
set -x
|
||||
fi
|
||||
if command_exists dnf5; then
|
||||
$sh_c "dnf -y -q --setopt=install_weak_deps=False install dnf-plugins-core"
|
||||
$sh_c "dnf5 config-manager addrepo --overwrite --save-filename=docker-ce.repo --from-repofile='$repo_file_url'"
|
||||
|
||||
if [ "$CHANNEL" != "stable" ]; then
|
||||
$sh_c "dnf5 config-manager setopt \"docker-ce-*.enabled=0\""
|
||||
$sh_c "dnf5 config-manager setopt \"docker-ce-$CHANNEL.enabled=1\""
|
||||
fi
|
||||
$sh_c "dnf makecache"
|
||||
elif command_exists dnf; then
|
||||
$sh_c "dnf -y -q --setopt=install_weak_deps=False install dnf-plugins-core"
|
||||
$sh_c "rm -f /etc/yum.repos.d/docker-ce.repo /etc/yum.repos.d/docker-ce-staging.repo"
|
||||
$sh_c "dnf config-manager --add-repo $repo_file_url"
|
||||
|
||||
if [ "$CHANNEL" != "stable" ]; then
|
||||
$sh_c "dnf config-manager --set-disabled \"docker-ce-*\""
|
||||
$sh_c "dnf config-manager --set-enabled \"docker-ce-$CHANNEL\""
|
||||
fi
|
||||
$sh_c "dnf makecache"
|
||||
else
|
||||
$sh_c "yum -y -q install yum-utils"
|
||||
$sh_c "rm -f /etc/yum.repos.d/docker-ce.repo /etc/yum.repos.d/docker-ce-staging.repo"
|
||||
$sh_c "yum-config-manager --add-repo $repo_file_url"
|
||||
|
||||
if [ "$CHANNEL" != "stable" ]; then
|
||||
$sh_c "yum-config-manager --disable \"docker-ce-*\""
|
||||
$sh_c "yum-config-manager --enable \"docker-ce-$CHANNEL\""
|
||||
fi
|
||||
$sh_c "yum makecache"
|
||||
fi
|
||||
)
|
||||
|
||||
if [ "$REPO_ONLY" = "1" ]; then
|
||||
exit 0
|
||||
fi
|
||||
|
||||
pkg_version=""
|
||||
if command_exists dnf; then
|
||||
pkg_manager="dnf"
|
||||
pkg_manager_flags="-y -q --best"
|
||||
else
|
||||
pkg_manager="yum"
|
||||
pkg_manager_flags="-y -q"
|
||||
fi
|
||||
if [ -n "$VERSION" ]; then
|
||||
if is_dry_run; then
|
||||
echo "# WARNING: VERSION pinning is not supported in DRY_RUN"
|
||||
else
|
||||
if [ "$lsb_dist" = "fedora" ]; then
|
||||
pkg_suffix="fc$dist_version"
|
||||
else
|
||||
pkg_suffix="el"
|
||||
fi
|
||||
pkg_pattern="$(echo "$VERSION" | sed 's/-ce-/\\\\.ce.*/g' | sed 's/-/.*/g').*$pkg_suffix"
|
||||
search_command="$pkg_manager list --showduplicates docker-ce | grep '$pkg_pattern' | tail -1 | awk '{print \$2}'"
|
||||
pkg_version="$($sh_c "$search_command")"
|
||||
echo "INFO: Searching repository for VERSION '$VERSION'"
|
||||
echo "INFO: $search_command"
|
||||
if [ -z "$pkg_version" ]; then
|
||||
echo
|
||||
echo "ERROR: '$VERSION' not found amongst $pkg_manager list results"
|
||||
echo
|
||||
exit 1
|
||||
fi
|
||||
if version_gte "18.09"; then
|
||||
# older versions don't support a cli package
|
||||
search_command="$pkg_manager list --showduplicates docker-ce-cli | grep '$pkg_pattern' | tail -1 | awk '{print \$2}'"
|
||||
cli_pkg_version="$($sh_c "$search_command" | cut -d':' -f 2)"
|
||||
fi
|
||||
# Cut out the epoch and prefix with a '-'
|
||||
pkg_version="-$(echo "$pkg_version" | cut -d':' -f 2)"
|
||||
fi
|
||||
fi
|
||||
(
|
||||
pkgs="docker-ce$pkg_version"
|
||||
if version_gte "18.09"; then
|
||||
# older versions didn't ship the cli and containerd as separate packages
|
||||
if [ -n "$cli_pkg_version" ]; then
|
||||
pkgs="$pkgs docker-ce-cli-$cli_pkg_version containerd.io"
|
||||
else
|
||||
pkgs="$pkgs docker-ce-cli containerd.io"
|
||||
fi
|
||||
fi
|
||||
if version_gte "20.10"; then
|
||||
pkgs="$pkgs docker-compose-plugin docker-ce-rootless-extras$pkg_version"
|
||||
fi
|
||||
if version_gte "23.0"; then
|
||||
pkgs="$pkgs docker-buildx-plugin docker-model-plugin"
|
||||
fi
|
||||
if ! is_dry_run; then
|
||||
set -x
|
||||
fi
|
||||
$sh_c "$pkg_manager $pkg_manager_flags install $pkgs"
|
||||
)
|
||||
if [ "$NO_AUTOSTART" != "1" ]; then
|
||||
start_docker_daemon
|
||||
fi
|
||||
echo_docker_as_nonroot
|
||||
exit 0
|
||||
;;
|
||||
sles)
|
||||
echo "Effective v27.5, please consult SLES distro statement for s390x support."
|
||||
exit 1
|
||||
;;
|
||||
*)
|
||||
if [ -z "$lsb_dist" ]; then
|
||||
if is_darwin; then
|
||||
echo
|
||||
echo "ERROR: Unsupported operating system 'macOS'"
|
||||
echo "Please get Docker Desktop from https://www.docker.com/products/docker-desktop"
|
||||
echo
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
echo
|
||||
echo "ERROR: Unsupported distribution '$lsb_dist'"
|
||||
echo
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
exit 1
|
||||
}
|
||||
|
||||
# wrapped up in a function so that we have some protection against only getting
|
||||
# half the file during "curl | sh"
|
||||
do_install
|
||||
124
notes.md
124
notes.md
@@ -1,124 +0,0 @@
|
||||
User: Follow these instructions. Don't modify the file.
|
||||
|
||||
GitHub Copilot: Understood. I will not modify any files as per your instructions. If you have a specific task or question related to the EZ-Homelab project or any other coding assistance, please let me know how I can help!
|
||||
|
||||
User: The repository is in excellent shape atm. I want to create a release. I made some release notes but I haven't do that before. I could use suggestions.
|
||||
|
||||
GitHub Copilot: Based on your description, it sounds like you're ready to formalize a release for the EZ-Homelab repository—great timing since you mentioned it's in excellent shape! Since this is your first time, I'll provide clear, step-by-step suggestions tailored to a GitHub repository like this one (which appears to be a Docker-based homelab setup with scripts, configs, and documentation). I'll focus on best practices for open-source projects, keeping things simple and actionable.
|
||||
|
||||
I'll break this down into **preparation**, **release notes improvements**, **creation process**, and **post-release tips**. These are based on standard GitHub release workflows, but adapted for a project like EZ-Homelab (e.g., emphasizing Docker compatibility, user setup, and community-friendly notes).
|
||||
|
||||
### 1. **Preparation Before Creating the Release**
|
||||
- **Decide on Versioning**: Use [semantic versioning](https://semver.org/) (e.g., MAJOR.MINOR.PATCH). Your existing file is release-notes-v0.1.md, so:
|
||||
- If this is a minor update (new features or improvements without breaking changes), go with **v0.1.1** or **v0.2.0**.
|
||||
- If it's a major milestone (e.g., stable production-ready), consider **v1.0.0**.
|
||||
- Suggestion: Start with **v0.1.0** if this is the first official release, to align with your notes.
|
||||
- **Test Thoroughly**: Since EZ-Homelab involves Docker stacks and scripts, run a final test:
|
||||
- Deploy on a fresh environment (e.g., a VM or container) using your `ez-homelab.sh` script.
|
||||
- Verify core functionality: Traefik routing, Authelia SSO, Sablier lazy loading, and a few services from different stacks (e.g., Homepage, Dockge).
|
||||
- Check for any breaking changes (e.g., environment variables or network requirements).
|
||||
- **Update Documentation**: Ensure docs like README.md, getting-started.md, and services-overview.md reflect the latest state. If you've made changes, update version references.
|
||||
- **Commit and Push Changes**: Make sure all your release notes and any last-minute fixes are committed and pushed to the main branch.
|
||||
|
||||
### 2. **Improving Your Release Notes**
|
||||
Your release-notes-v0.1.md is a solid start—it's concise and covers key areas like setup, security, and services. Here's how to enhance it for a professional release (GitHub releases support Markdown, so format it nicely with headings, lists, and links):
|
||||
|
||||
- **Structure It Like a Changelog**:
|
||||
- **Overview/Summary**: Start with a 1-2 sentence high-level summary (e.g., "This release introduces automated SSL, SSO, and lazy loading for 50+ Docker services in a production-ready homelab setup.").
|
||||
- **What's New**: List new features, services, or improvements (e.g., "Added Sablier lazy loading to reduce resource usage" or "Integrated DuckDNS and Let's Encrypt for wildcard SSL").
|
||||
- **Bug Fixes**: If applicable, note any issues resolved (e.g., "Fixed port conflicts in ez-homelab.sh options 1 & 2").
|
||||
- **Breaking Changes**: Highlight anything that might break existing setups (e.g., "Updated network requirements; users must recreate traefik-network").
|
||||
- **Known Issues/Limitations**: Be transparent (e.g., "Sablier may cause occasional timeouts; refresh the page to resolve").
|
||||
- **Installation/Upgrade Instructions**: Provide clear steps (e.g., "For fresh installs: Run `./ez-homelab.sh`. For upgrades: Pull latest images and redeploy stacks via Dockge.").
|
||||
- **Credits/Thanks**: If community contributions exist, acknowledge them.
|
||||
|
||||
- **Make It User-Friendly**:
|
||||
- Use bullet points and subheadings for scannability.
|
||||
- Include links to docs (e.g., "[Getting Started](docs/getting-started.md)").
|
||||
- Add emojis or icons for visual appeal (e.g., 🚀 for new features, 🐛 for fixes).
|
||||
- Keep it concise—aim for 300-500 words. Focus on value for users (e.g., "Saves power bills with lazy loading").
|
||||
|
||||
- **Example Enhanced Version** (based on your notes):
|
||||
```
|
||||
# Release Notes v0.1.0
|
||||
|
||||
## Overview
|
||||
EZ-Homelab v0.1.0 brings automated SSL, SSO, and resource-efficient lazy loading to your Docker homelab, supporting 50+ services out of the box.
|
||||
|
||||
## What's New
|
||||
- 🚀 **Sablier Lazy Loading**: Reduces resource usage and power costs by starting services on-demand.
|
||||
- 🔒 **Security Enhancements**: Authelia SSO enabled by default (with 2FA support) and TLS certificates for Docker proxy.
|
||||
- 🌐 **DNS & Proxy**: DuckDNS integration with Let's Encrypt wildcard certificates; Traefik routing for local and remote services.
|
||||
- 📊 **Dashboards**: Preconfigured Homepage at `homepage.yoursubdomain.duckdns.org` for easy service access.
|
||||
|
||||
## Services Included
|
||||
- Core, Infrastructure, Dashboards, Media, Media Management, Productivity, Transcoders, Utilities, VPN, and Wikis stacks—all with Traefik/Sablier support.
|
||||
- Note: Monitoring stack not yet configured; Alternatives stack untested.
|
||||
|
||||
## Installation
|
||||
- Fresh OS: Follow Getting Started and run `./ez-homelab.sh`.
|
||||
- Upgrade: Pull images and redeploy via Dockge.
|
||||
|
||||
## Known Issues
|
||||
- ez-homelab.sh options 1 & 2 need testing.
|
||||
- Sablier may cause brief delays or timeouts—refresh the page.
|
||||
|
||||
## Thanks
|
||||
Thanks to the community for feedback on the GitHub Wiki!
|
||||
```
|
||||
|
||||
- **Suggestion**: Save this as `release-notes-v0.1.0.md` (or similar) in the repo root, and reference it in the GitHub release. You can also auto-generate notes from commits if using GitHub's tools.
|
||||
|
||||
### 3. **Creating the Release on GitHub**
|
||||
- **Step-by-Step Process**:
|
||||
1. **Create a Git Tag Locally**:
|
||||
- Open a terminal in your repo: `cd /path/to/EZ-Homelab`.
|
||||
- Create an annotated tag: `git tag -a v0.1.0 -m "Release v0.1.0: Automated SSL, SSO, and lazy loading for Docker homelab"`.
|
||||
- Push the tag: `git push origin v0.1.0`.
|
||||
|
||||
2. **Create the Release on GitHub**:
|
||||
- Go to your repo on GitHub.com → **Releases** → **Create a new release**.
|
||||
- **Tag version**: Select `v0.1.0` (or create it here if you skipped the local step).
|
||||
- **Release title**: "EZ-Homelab v0.1.0 - Production-Ready Docker Homelab".
|
||||
- **Description**: Copy-paste your enhanced release notes (or link to the file).
|
||||
- **Attach Assets**: If applicable, add binaries/scripts (e.g., a zipped version of `ez-homelab.sh` for non-Git users).
|
||||
- **Mark as Pre-Release**: If this isn't fully stable, check this box.
|
||||
- Click **Publish Release**.
|
||||
|
||||
- **Automation Tip**: Set up a GitHub Action for releases (e.g., using `actions/create-release`) to automate tagging and publishing when you push a tag. This is great for future releases.
|
||||
|
||||
### 4. **Post-Release Tips**
|
||||
- **Announce It**: Share on relevant communities (e.g., Reddit's r/homelab, Docker forums) with a link to the release.
|
||||
- **Monitor Feedback**: Watch for issues or PRs post-release. Use GitHub's release discussions if enabled.
|
||||
- **Plan Next Release**: Start a changelog for v0.1.1 (e.g., track new services or fixes).
|
||||
- **Security/Compliance**: Since this involves security (SSO, TLS), consider adding a security policy file (`SECURITY.md`) for reporting vulnerabilities.
|
||||
- **If Issues Arise**: You can edit the release notes or create a patch release (e.g., v0.1.1) quickly.
|
||||
|
||||
If you'd like me to help refine your release notes (e.g., by suggesting specific wording or formatting), review your current `release-notes-v0.1.md` file, or assist with any of these steps (without modifying files), just let me know! For example, I can generate a sample release notes template or walk through the Git commands. What's your next step? - **Suggestion**: Save this as `release-notes-v0.1.0.md` (or similar) in the repo root, and reference it in the GitHub release. You can also auto-generate notes from commits if using GitHub's tools.
|
||||
|
||||
### 3. **Creating the Release on GitHub**
|
||||
- **Step-by-Step Process**:
|
||||
1. **Create a Git Tag Locally**:
|
||||
- Open a terminal in your repo: `cd /path/to/EZ-Homelab`.
|
||||
- Create an annotated tag: `git tag -a v0.1.0 -m "Release v0.1.0: Automated SSL, SSO, and lazy loading for Docker homelab"`.
|
||||
- Push the tag: `git push origin v0.1.0`.
|
||||
|
||||
2. **Create the Release on GitHub**:
|
||||
- Go to your repo on GitHub.com → **Releases** → **Create a new release**.
|
||||
- **Tag version**: Select `v0.1.0` (or create it here if you skipped the local step).
|
||||
- **Release title**: "EZ-Homelab v0.1.0 - Production-Ready Docker Homelab".
|
||||
- **Description**: Copy-paste your enhanced release notes (or link to the file).
|
||||
- **Attach Assets**: If applicable, add binaries/scripts (e.g., a zipped version of `ez-homelab.sh` for non-Git users).
|
||||
- **Mark as Pre-Release**: If this isn't fully stable, check this box.
|
||||
- Click **Publish Release**.
|
||||
|
||||
- **Automation Tip**: Set up a GitHub Action for releases (e.g., using `actions/create-release`) to automate tagging and publishing when you push a tag. This is great for future releases.
|
||||
|
||||
### 4. **Post-Release Tips**
|
||||
- **Announce It**: Share on relevant communities (e.g., Reddit's r/homelab, Docker forums) with a link to the release.
|
||||
- **Monitor Feedback**: Watch for issues or PRs post-release. Use GitHub's release discussions if enabled.
|
||||
- **Plan Next Release**: Start a changelog for v0.1.1 (e.g., track new services or fixes).
|
||||
- **Security/Compliance**: Since this involves security (SSO, TLS), consider adding a security policy file (`SECURITY.md`) for reporting vulnerabilities.
|
||||
- **If Issues Arise**: You can edit the release notes or create a patch release (e.g., v0.1.1) quickly.
|
||||
|
||||
If you'd like me to help refine your release notes (e.g., by suggesting specific wording or formatting), review your current `release-notes-v0.1.md` file, or assist with any of these steps (without modifying files), just let me know! For example, I can generate a sample release notes template or walk through the Git commands. What's your next step?
|
||||
@@ -1,169 +0,0 @@
|
||||
# Raspberry Pi 4 Test Run Summary - EZ-Homelab Python Script
|
||||
## Date: 29 January 2026
|
||||
## Hardware: Raspberry Pi 4 4GB RAM, Raspberry Pi OS (Debian-based)
|
||||
## Architecture: ARM64
|
||||
|
||||
## Executive Summary
|
||||
|
||||
Testing the EZ-Homelab Python TUI deployment script on Raspberry Pi revealed several architecture-specific challenges and script deficiencies. While the script works on AMD64 systems, ARM64 deployment requires additional considerations and the script needs improvements in configuration file processing and deployment logic.
|
||||
|
||||
## Key Findings
|
||||
|
||||
### 1. Python Environment Management
|
||||
**Issue**: Direct `pip install` commands fail on modern Debian/Raspberry Pi OS due to PEP 668
|
||||
**Solution**: Must use virtual environments for all Python package installations
|
||||
**Lesson**: Script should automatically create and use virtual environments on ARM64 systems
|
||||
|
||||
### 2. Docker Installation and Permissions
|
||||
**Issue**: Docker installation requires user group changes and system reboot
|
||||
**Solution**: Manual Docker installation + `usermod` + reboot required
|
||||
**Lesson**: Script needs to handle Docker installation with proper user permissions and reboot detection
|
||||
|
||||
### 3. Package Architecture Compatibility
|
||||
**Issue**: Python packages must be compatible with ARM64
|
||||
**Solution**: PiWheels repository automatically provides ARM64-compatible packages
|
||||
**Lesson**: No script changes needed, but testing should include ARM64 validation
|
||||
|
||||
### 4. Configuration File Processing
|
||||
**Issue**: Script failed to substitute environment variables in YAML configuration files
|
||||
**Solution**: Manual `sed` commands required to replace `${VARIABLE}` placeholders
|
||||
**Lesson**: Script needs robust template processing with environment variable substitution
|
||||
|
||||
### 5. Service Deployment Logic
|
||||
**Issue**: "Core Server Only" deployment didn't include infrastructure services (Dockge)
|
||||
**Solution**: Manual deployment of infrastructure stack required
|
||||
**Lesson**: Deployment type logic needs review and testing across all scenarios
|
||||
|
||||
### 6. Docker Network Management
|
||||
**Issue**: Required networks (`traefik-network`, `homelab-network`) not created automatically
|
||||
**Solution**: Manual `docker network create` commands required
|
||||
**Lesson**: Script should pre-create all required networks before service deployment
|
||||
|
||||
### 7. Service-Specific Configuration Issues
|
||||
|
||||
#### Authelia
|
||||
**Issue**: Missing required `cookies` configuration in session section
|
||||
**Solution**: Manual addition of cookies configuration
|
||||
**Lesson**: Template files need to be complete and validated against service requirements
|
||||
|
||||
#### Sablier
|
||||
**Issue**: Configured for multi-server TLS but running on single server
|
||||
**Solution**: Changed from TCP+TLS to local socket connection
|
||||
**Lesson**: Configuration should be deployment-scenario aware (single vs multi-server)
|
||||
|
||||
## Technical Challenges Specific to ARM64
|
||||
|
||||
### 1. System Package Management
|
||||
- Raspberry Pi OS follows Debian's strict package management policies
|
||||
- PEP 668 prevents system-wide pip installs
|
||||
- Virtual environments are mandatory for user-space Python packages
|
||||
|
||||
### 2. Hardware Resource Constraints
|
||||
- Limited RAM (4GB) vs typical AMD64 systems (16GB+)
|
||||
- Slower I/O and processing compared to x86_64 systems
|
||||
- Docker image pulls are slower on ARM64
|
||||
|
||||
### 3. Architecture-Specific Dependencies
|
||||
- Some software may not have ARM64 builds
|
||||
- PiWheels provides most Python packages but not all
|
||||
- Cross-compilation issues for complex packages
|
||||
|
||||
## Script Improvements Required
|
||||
|
||||
### 1. Environment Setup
|
||||
```python
|
||||
# Add to script initialization
|
||||
def setup_python_environment():
|
||||
"""Create virtual environment and install dependencies"""
|
||||
if platform.machine() == 'aarch64': # ARM64 detection
|
||||
# Force virtual environment usage
|
||||
# Install dependencies within venv
|
||||
```
|
||||
|
||||
### 2. Docker Management
|
||||
```python
|
||||
def ensure_docker_ready():
|
||||
"""Ensure Docker is installed, running, and user has permissions"""
|
||||
# Check installation
|
||||
# Add user to docker group
|
||||
# Detect if reboot required
|
||||
# Provide clear instructions
|
||||
```
|
||||
|
||||
### 3. Configuration Processing
|
||||
```python
|
||||
def process_config_templates():
|
||||
"""Process all config templates with environment variable substitution"""
|
||||
# Find all template files
|
||||
# Replace ${VARIABLE} with os.environ.get('VARIABLE')
|
||||
# Validate resulting YAML/JSON
|
||||
```
|
||||
|
||||
### 4. Network Management
|
||||
```python
|
||||
def create_required_networks():
|
||||
"""Create all Docker networks required by services"""
|
||||
networks = ['traefik-network', 'homelab-network']
|
||||
for network in networks:
|
||||
# docker network create if not exists
|
||||
```
|
||||
|
||||
### 5. Deployment Validation
|
||||
```python
|
||||
def validate_deployment_scenario():
|
||||
"""Ensure selected services match deployment type capabilities"""
|
||||
# Core: core services only
|
||||
# Single: core + infrastructure + dashboards
|
||||
# Remote: infrastructure + dashboards only
|
||||
```
|
||||
|
||||
## Testing Recommendations
|
||||
|
||||
### 1. Multi-Architecture Testing
|
||||
- Include ARM64 in CI/CD pipeline
|
||||
- Test on actual Raspberry Pi hardware
|
||||
- Validate PiWheels compatibility
|
||||
|
||||
### 2. Pre-Flight Checks Enhancement
|
||||
- Add ARM64-specific system requirements
|
||||
- Check available disk space (ARM64 systems often have smaller storage)
|
||||
- Validate network connectivity for Docker pulls
|
||||
|
||||
### 3. Error Handling Improvements
|
||||
- Better error messages for common ARM64 issues
|
||||
- Graceful handling of permission errors
|
||||
- Clear instructions for manual intervention steps
|
||||
|
||||
## Performance Considerations
|
||||
|
||||
### ARM64 vs AMD64 Performance
|
||||
- Docker image pulls: ~2-3x slower on ARM64
|
||||
- Python package installation: Similar performance
|
||||
- Service startup: Comparable once images are cached
|
||||
- Memory usage: More critical on ARM64 due to hardware limits
|
||||
|
||||
### Optimization Strategies
|
||||
- Pre-pull commonly used Docker images
|
||||
- Minimize service startup time
|
||||
- Implement lazy loading where possible (Sablier)
|
||||
- Use resource limits to prevent memory exhaustion
|
||||
|
||||
## Conclusion
|
||||
|
||||
The EZ-Homelab Python script is functional on Raspberry Pi but requires significant improvements for production use on ARM64 systems. The main issues stem from inadequate configuration file processing and incomplete deployment logic rather than fundamental architecture incompatibilities.
|
||||
|
||||
**Priority Improvements:**
|
||||
1. Implement proper template processing with environment variable substitution
|
||||
2. Fix deployment type logic to include all selected services
|
||||
3. Add automatic Docker network creation
|
||||
4. Enhance error handling for ARM64-specific issues
|
||||
5. Add comprehensive pre-flight checks
|
||||
|
||||
**Testing Requirements:**
|
||||
- Dedicated ARM64 test environment
|
||||
- Automated testing of all deployment scenarios
|
||||
- Validation of all configuration file processing
|
||||
- Performance benchmarking against AMD64 systems
|
||||
|
||||
This testing has identified critical gaps that must be addressed before the script can be considered production-ready for ARM64 deployment.</content>
|
||||
<parameter name="filePath">/home/kelin/EZ-Homelab/raspberry-pi-test-run-summary.md
|
||||
@@ -1,154 +0,0 @@
|
||||
# EZ-Homelab Enhanced Setup System
|
||||
|
||||
A comprehensive, modular bash-based setup and management system for EZ-Homelab, replacing the complex Python TUI with robust, cross-platform scripts.
|
||||
|
||||
## 🚀 Quick Start
|
||||
|
||||
### For Fresh Installs (Recommended)
|
||||
```bash
|
||||
# Clone the repository
|
||||
git clone https://github.com/kelinfoxy/EZ-Homelab.git
|
||||
cd EZ-Homelab
|
||||
|
||||
# Start the interactive menu system
|
||||
cd scripts/enhanced-setup
|
||||
./menu.sh
|
||||
```
|
||||
|
||||
The menu provides guided access to all functionality with real-time system status.
|
||||
|
||||
### Manual Usage
|
||||
```bash
|
||||
cd scripts/enhanced-setup
|
||||
|
||||
# Phase 1: System Setup
|
||||
./preflight.sh # Validate system requirements
|
||||
./setup.sh # Install Docker and dependencies
|
||||
|
||||
# Phase 2: Configuration
|
||||
./pre-deployment-wizard.sh # Interactive service selection
|
||||
./localize.sh # Apply environment variables
|
||||
./validate.sh # Validate configurations
|
||||
|
||||
# Phase 3: Deployment
|
||||
./deploy.sh core # Deploy core services
|
||||
./deploy.sh infrastructure # Deploy infrastructure
|
||||
./deploy.sh monitoring # Deploy monitoring stack
|
||||
|
||||
# Phase 4: Management
|
||||
./service.sh list # List all services
|
||||
./monitor.sh dashboard # System monitoring
|
||||
./backup.sh config # Backup configurations
|
||||
./update.sh check # Check for updates
|
||||
```
|
||||
|
||||
## 📋 System Architecture
|
||||
|
||||
### Phase 1: Core Infrastructure
|
||||
- **preflight.sh**: System requirement validation
|
||||
- **setup.sh**: Docker installation and configuration
|
||||
|
||||
### Phase 2: Configuration Management
|
||||
- **pre-deployment-wizard.sh**: Interactive service selection and configuration
|
||||
- **localize.sh**: Template variable substitution
|
||||
- **generalize.sh**: Reverse template processing
|
||||
- **validate.sh**: Multi-purpose validation (environment, compose, network, SSL)
|
||||
|
||||
### Phase 3: Deployment Engine
|
||||
- **deploy.sh**: Orchestrated service deployment with health checks and rollback
|
||||
|
||||
### Phase 4: Service Orchestration & Management
|
||||
- **service.sh**: Individual service management (start/stop/restart/logs/exec)
|
||||
- **monitor.sh**: Real-time monitoring and alerting
|
||||
- **backup.sh**: Automated backup orchestration
|
||||
- **update.sh**: Service update management with zero-downtime
|
||||
|
||||
### Shared Libraries
|
||||
- **lib/common.sh**: Shared utilities, logging, validation functions
|
||||
- **lib/ui.sh**: Text-based UI components and progress indicators
|
||||
|
||||
## 🎯 Key Features
|
||||
|
||||
- **🔧 Template-Based Configuration**: Environment variable substitution system
|
||||
- **🚀 Smart Deployment**: Dependency-ordered deployment with health verification
|
||||
- **📊 Real-Time Monitoring**: System resources, service health, and alerting
|
||||
- **💾 Automated Backups**: Configuration, volumes, logs with retention policies
|
||||
- **⬆️ Safe Updates**: Rolling updates with rollback capabilities
|
||||
- **🔍 Comprehensive Validation**: Multi-layer checks for reliability
|
||||
- **📝 Detailed Logging**: Structured logging to `~/.ez-homelab/logs/`
|
||||
- **🔄 Cross-Platform**: Works on Linux, macOS, and other Unix-like systems
|
||||
|
||||
## 📖 Documentation
|
||||
|
||||
- **[PRD](prd.md)**: Product Requirements Document
|
||||
- **[Standards](standards.md)**: Development standards and guidelines
|
||||
- **[Traefik Guide](../docs/Traefik%20Routing%20Quick%20Reference.md)**: Traefik configuration reference
|
||||
|
||||
## 🛠️ Environment Variables
|
||||
|
||||
Create a `.env` file in the EZ-Homelab root directory:
|
||||
|
||||
```bash
|
||||
# Domain and SSL
|
||||
DOMAIN=yourdomain.com
|
||||
EMAIL=your@email.com
|
||||
|
||||
# Timezone
|
||||
TZ=America/New_York
|
||||
|
||||
# User IDs (auto-detected if not set)
|
||||
EZ_USER=yourusername
|
||||
EZ_UID=1000
|
||||
EZ_GID=1000
|
||||
|
||||
# Service-specific variables
|
||||
# Add as needed for your services
|
||||
```
|
||||
|
||||
## 🏗️ Development
|
||||
|
||||
### Prerequisites
|
||||
- Bash 4.0+
|
||||
- Docker and Docker Compose
|
||||
- Standard Unix tools (curl, wget, jq, git)
|
||||
|
||||
### Adding New Services
|
||||
1. Create docker-compose.yml in appropriate category directory
|
||||
2. Add template variables with `${VAR_NAME}` syntax
|
||||
3. Update service discovery in common.sh if needed
|
||||
4. Test with validation scripts
|
||||
|
||||
### Script Standards
|
||||
- Follow the established patterns in existing scripts
|
||||
- Use shared libraries for common functionality
|
||||
- Include comprehensive error handling and logging
|
||||
- Add help text with `--help` flag
|
||||
|
||||
## 🤝 Contributing
|
||||
|
||||
1. Follow the development standards in `standards.md`
|
||||
2. Test thoroughly on multiple platforms
|
||||
3. Update documentation as needed
|
||||
4. Submit pull requests with clear descriptions
|
||||
|
||||
## 📄 License
|
||||
|
||||
This project is part of EZ-Homelab. See the main repository for licensing information.
|
||||
|
||||
## 🆘 Troubleshooting
|
||||
|
||||
### Common Issues
|
||||
- **Permission denied**: Run `chmod +x *.sh lib/*.sh`
|
||||
- **Docker not found**: Run `./setup.sh` first
|
||||
- **Template errors**: Check `.env` file and run `./validate.sh`
|
||||
- **Service failures**: Check logs with `./service.sh logs <service>`
|
||||
|
||||
### Getting Help
|
||||
- Check the logs in `~/.ez-homelab/logs/`
|
||||
- Run individual scripts with `--help` for usage
|
||||
- Use the troubleshooting tools in the Advanced menu
|
||||
- Check the documentation files for detailed guides
|
||||
|
||||
---
|
||||
|
||||
**Happy Homelabbing!** 🏠💻
|
||||
@@ -1,686 +0,0 @@
|
||||
#!/bin/bash
|
||||
# EZ-Homelab Enhanced Setup Scripts - Backup Management
|
||||
# Automated backup orchestration and restore operations
|
||||
|
||||
SCRIPT_NAME="backup"
|
||||
SCRIPT_VERSION="1.0.0"
|
||||
|
||||
# Load common library
|
||||
source "$(dirname "${BASH_SOURCE[0]}")/lib/common.sh"
|
||||
source "$(dirname "${BASH_SOURCE[0]}")/lib/ui.sh"
|
||||
|
||||
# =============================================================================
|
||||
# BACKUP CONFIGURATION
|
||||
# =============================================================================
|
||||
|
||||
# Backup directories
|
||||
BACKUP_ROOT="${BACKUP_ROOT:-$HOME/.ez-homelab/backups}"
|
||||
BACKUP_CONFIG="${BACKUP_CONFIG:-$BACKUP_ROOT/config}"
|
||||
BACKUP_DATA="${BACKUP_DATA:-$BACKUP_ROOT/data}"
|
||||
BACKUP_LOGS="${BACKUP_LOGS:-$BACKUP_ROOT/logs}"
|
||||
|
||||
# Backup retention (days)
|
||||
CONFIG_RETENTION_DAYS=30
|
||||
DATA_RETENTION_DAYS=7
|
||||
LOG_RETENTION_DAYS=7
|
||||
|
||||
# Backup schedule (cron format)
|
||||
CONFIG_BACKUP_SCHEDULE="0 2 * * *" # Daily at 2 AM
|
||||
DATA_BACKUP_SCHEDULE="0 3 * * 0" # Weekly on Sunday at 3 AM
|
||||
LOG_BACKUP_SCHEDULE="0 1 * * *" # Daily at 1 AM
|
||||
|
||||
# Compression settings
|
||||
COMPRESSION_LEVEL=6
|
||||
COMPRESSION_TYPE="gzip"
|
||||
|
||||
# =============================================================================
|
||||
# BACKUP UTILITY FUNCTIONS
|
||||
# =============================================================================
|
||||
|
||||
# Initialize backup directories
|
||||
init_backup_dirs() {
|
||||
mkdir -p "$BACKUP_CONFIG" "$BACKUP_DATA" "$BACKUP_LOGS"
|
||||
|
||||
# Create .gitkeep files to ensure directories are tracked
|
||||
touch "$BACKUP_CONFIG/.gitkeep" "$BACKUP_DATA/.gitkeep" "$BACKUP_LOGS/.gitkeep"
|
||||
}
|
||||
|
||||
# Generate backup filename with timestamp
|
||||
generate_backup_filename() {
|
||||
local prefix="$1"
|
||||
local timestamp
|
||||
timestamp=$(date +%Y%m%d_%H%M%S)
|
||||
echo "${prefix}_${timestamp}.tar.${COMPRESSION_TYPE}"
|
||||
}
|
||||
|
||||
# Compress directory
|
||||
compress_directory() {
|
||||
local source_dir="$1"
|
||||
local archive_path="$2"
|
||||
|
||||
print_info "Compressing $source_dir to $archive_path"
|
||||
|
||||
case "$COMPRESSION_TYPE" in
|
||||
gzip)
|
||||
tar -czf "$archive_path" -C "$(dirname "$source_dir")" "$(basename "$source_dir")"
|
||||
;;
|
||||
bzip2)
|
||||
tar -cjf "$archive_path" -C "$(dirname "$source_dir")" "$(basename "$source_dir")"
|
||||
;;
|
||||
xz)
|
||||
tar -cJf "$archive_path" -C "$(dirname "$source_dir")" "$(basename "$source_dir")"
|
||||
;;
|
||||
*)
|
||||
print_error "Unsupported compression type: $COMPRESSION_TYPE"
|
||||
return 1
|
||||
;;
|
||||
esac
|
||||
}
|
||||
|
||||
# Extract archive
|
||||
extract_archive() {
|
||||
local archive_path="$1"
|
||||
local dest_dir="$2"
|
||||
|
||||
print_info "Extracting $archive_path to $dest_dir"
|
||||
|
||||
mkdir -p "$dest_dir"
|
||||
|
||||
case "$COMPRESSION_TYPE" in
|
||||
gzip)
|
||||
tar -xzf "$archive_path" -C "$dest_dir"
|
||||
;;
|
||||
bzip2)
|
||||
tar -xjf "$archive_path" -C "$dest_dir"
|
||||
;;
|
||||
xz)
|
||||
tar -xJf "$archive_path" -C "$dest_dir"
|
||||
;;
|
||||
*)
|
||||
print_error "Unsupported compression type: $COMPRESSION_TYPE"
|
||||
return 1
|
||||
;;
|
||||
esac
|
||||
}
|
||||
|
||||
# Clean old backups
|
||||
cleanup_old_backups() {
|
||||
local backup_dir="$1"
|
||||
local retention_days="$2"
|
||||
local prefix="$3"
|
||||
|
||||
print_info "Cleaning up backups older than ${retention_days} days in $backup_dir"
|
||||
|
||||
# Find and remove old backups
|
||||
find "$backup_dir" -name "${prefix}_*.tar.${COMPRESSION_TYPE}" -type f -mtime +"$retention_days" -exec rm {} \; -print
|
||||
}
|
||||
|
||||
# Get backup size
|
||||
get_backup_size() {
|
||||
local backup_path="$1"
|
||||
|
||||
if [[ -f "$backup_path" ]]; then
|
||||
du -h "$backup_path" | cut -f1
|
||||
else
|
||||
echo "N/A"
|
||||
fi
|
||||
}
|
||||
|
||||
# List backups
|
||||
list_backups() {
|
||||
local backup_dir="$1"
|
||||
local prefix="$2"
|
||||
|
||||
echo "Backups in $backup_dir:"
|
||||
echo "----------------------------------------"
|
||||
|
||||
local count=0
|
||||
while IFS= read -r -d '' file; do
|
||||
local size
|
||||
size=$(get_backup_size "$file")
|
||||
local mtime
|
||||
mtime=$(stat -c %y "$file" 2>/dev/null | cut -d'.' -f1 || echo "Unknown")
|
||||
|
||||
printf " %-40s %-8s %s\n" "$(basename "$file")" "$size" "$mtime"
|
||||
((count++))
|
||||
done < <(find "$backup_dir" -name "${prefix}_*.tar.${COMPRESSION_TYPE}" -type f -print0 | sort -z)
|
||||
|
||||
if (( count == 0 )); then
|
||||
echo " No backups found"
|
||||
fi
|
||||
echo
|
||||
}
|
||||
|
||||
# =============================================================================
|
||||
# CONFIGURATION BACKUP FUNCTIONS
|
||||
# =============================================================================
|
||||
|
||||
# Backup configuration files
|
||||
backup_config() {
|
||||
print_info "Starting configuration backup"
|
||||
|
||||
local temp_dir
|
||||
temp_dir=$(mktemp -d)
|
||||
local config_dir="$temp_dir/config"
|
||||
|
||||
mkdir -p "$config_dir"
|
||||
|
||||
# Backup EZ-Homelab configuration
|
||||
if [[ -d "$EZ_HOME" ]]; then
|
||||
print_info "Backing up EZ-Homelab configuration"
|
||||
cp -r "$EZ_HOME/docker-compose" "$config_dir/" 2>/dev/null || true
|
||||
cp -r "$EZ_HOME/templates" "$config_dir/" 2>/dev/null || true
|
||||
cp "$EZ_HOME/.env" "$config_dir/" 2>/dev/null || true
|
||||
fi
|
||||
|
||||
# Backup Docker daemon config
|
||||
if [[ -f "/etc/docker/daemon.json" ]]; then
|
||||
print_info "Backing up Docker daemon configuration"
|
||||
mkdir -p "$config_dir/docker"
|
||||
cp "/etc/docker/daemon.json" "$config_dir/docker/" 2>/dev/null || true
|
||||
fi
|
||||
|
||||
# Backup system configuration
|
||||
print_info "Backing up system configuration"
|
||||
mkdir -p "$config_dir/system"
|
||||
cp "/etc/hostname" "$config_dir/system/" 2>/dev/null || true
|
||||
cp "/etc/hosts" "$config_dir/system/" 2>/dev/null || true
|
||||
cp "/etc/resolv.conf" "$config_dir/system/" 2>/dev/null || true
|
||||
|
||||
# Create backup archive
|
||||
local backup_file
|
||||
backup_file=$(generate_backup_filename "config")
|
||||
local backup_path="$BACKUP_CONFIG/$backup_file"
|
||||
|
||||
if compress_directory "$config_dir" "$backup_path"; then
|
||||
print_success "Configuration backup completed: $backup_file"
|
||||
print_info "Backup size: $(get_backup_size "$backup_path")"
|
||||
|
||||
# Cleanup old backups
|
||||
cleanup_old_backups "$BACKUP_CONFIG" "$CONFIG_RETENTION_DAYS" "config"
|
||||
|
||||
# Cleanup temp directory
|
||||
rm -rf "$temp_dir"
|
||||
return 0
|
||||
else
|
||||
print_error "Configuration backup failed"
|
||||
rm -rf "$temp_dir"
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
# Restore configuration
|
||||
restore_config() {
|
||||
local backup_file="$1"
|
||||
|
||||
if [[ -z "$backup_file" ]]; then
|
||||
print_error "Backup file name required"
|
||||
return 1
|
||||
fi
|
||||
|
||||
local backup_path="$BACKUP_CONFIG/$backup_file"
|
||||
|
||||
if [[ ! -f "$backup_path" ]]; then
|
||||
print_error "Backup file not found: $backup_path"
|
||||
return 1
|
||||
fi
|
||||
|
||||
print_warning "This will overwrite existing configuration files. Continue? (y/N)"
|
||||
read -r response
|
||||
if [[ ! "$response" =~ ^[Yy]$ ]]; then
|
||||
print_info "Configuration restore cancelled"
|
||||
return 0
|
||||
fi
|
||||
|
||||
print_info "Restoring configuration from $backup_file"
|
||||
|
||||
local temp_dir
|
||||
temp_dir=$(mktemp -d)
|
||||
|
||||
if extract_archive "$backup_path" "$temp_dir"; then
|
||||
local extracted_dir="$temp_dir/config"
|
||||
|
||||
# Restore EZ-Homelab configuration
|
||||
if [[ -d "$extracted_dir/docker-compose" ]]; then
|
||||
print_info "Restoring EZ-Homelab configuration"
|
||||
cp -r "$extracted_dir/docker-compose" "$EZ_HOME/" 2>/dev/null || true
|
||||
fi
|
||||
|
||||
if [[ -d "$extracted_dir/templates" ]]; then
|
||||
cp -r "$extracted_dir/templates" "$EZ_HOME/" 2>/dev/null || true
|
||||
fi
|
||||
|
||||
if [[ -f "$extracted_dir/.env" ]]; then
|
||||
cp "$extracted_dir/.env" "$EZ_HOME/" 2>/dev/null || true
|
||||
fi
|
||||
|
||||
# Restore Docker configuration
|
||||
if [[ -f "$extracted_dir/docker/daemon.json" ]]; then
|
||||
print_info "Restoring Docker daemon configuration"
|
||||
sudo cp "$extracted_dir/docker/daemon.json" "/etc/docker/" 2>/dev/null || true
|
||||
fi
|
||||
|
||||
# Restore system configuration
|
||||
if [[ -f "$extracted_dir/system/hostname" ]]; then
|
||||
print_info "Restoring system hostname"
|
||||
sudo cp "$extracted_dir/system/hostname" "/etc/" 2>/dev/null || true
|
||||
fi
|
||||
|
||||
print_success "Configuration restore completed"
|
||||
rm -rf "$temp_dir"
|
||||
return 0
|
||||
else
|
||||
print_error "Configuration restore failed"
|
||||
rm -rf "$temp_dir"
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
# =============================================================================
|
||||
# DATA BACKUP FUNCTIONS
|
||||
# =============================================================================
|
||||
|
||||
# Backup Docker volumes
|
||||
backup_docker_volumes() {
|
||||
print_info "Starting Docker volumes backup"
|
||||
|
||||
local temp_dir
|
||||
temp_dir=$(mktemp -d)
|
||||
local volumes_dir="$temp_dir/volumes"
|
||||
|
||||
mkdir -p "$volumes_dir"
|
||||
|
||||
# Get all Docker volumes
|
||||
local volumes
|
||||
mapfile -t volumes < <(docker volume ls --format "{{.Name}}" 2>/dev/null | grep -E "^ez-homelab|^homelab" || true)
|
||||
|
||||
if [[ ${#volumes[@]} -eq 0 ]]; then
|
||||
print_warning "No EZ-Homelab volumes found to backup"
|
||||
rm -rf "$temp_dir"
|
||||
return 0
|
||||
fi
|
||||
|
||||
print_info "Found ${#volumes[@]} volumes to backup"
|
||||
|
||||
for volume in "${volumes[@]}"; do
|
||||
print_info "Backing up volume: $volume"
|
||||
|
||||
# Create a temporary container to backup the volume
|
||||
local container_name="ez_backup_${volume}_$(date +%s)"
|
||||
|
||||
if docker run --rm -d --name "$container_name" -v "$volume:/data" alpine sleep 30 >/dev/null 2>&1; then
|
||||
# Copy volume data
|
||||
mkdir -p "$volumes_dir/$volume"
|
||||
docker cp "$container_name:/data/." "$volumes_dir/$volume/" 2>/dev/null || true
|
||||
|
||||
# Clean up container
|
||||
docker stop "$container_name" >/dev/null 2>&1 || true
|
||||
else
|
||||
print_warning "Failed to backup volume: $volume"
|
||||
fi
|
||||
done
|
||||
|
||||
# Create backup archive
|
||||
local backup_file
|
||||
backup_file=$(generate_backup_filename "volumes")
|
||||
local backup_path="$BACKUP_DATA/$backup_file"
|
||||
|
||||
if compress_directory "$volumes_dir" "$backup_path"; then
|
||||
print_success "Docker volumes backup completed: $backup_file"
|
||||
print_info "Backup size: $(get_backup_size "$backup_path")"
|
||||
|
||||
# Cleanup old backups
|
||||
cleanup_old_backups "$BACKUP_DATA" "$DATA_RETENTION_DAYS" "volumes"
|
||||
|
||||
rm -rf "$temp_dir"
|
||||
return 0
|
||||
else
|
||||
print_error "Docker volumes backup failed"
|
||||
rm -rf "$temp_dir"
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
# Restore Docker volumes
|
||||
restore_docker_volumes() {
|
||||
local backup_file="$1"
|
||||
|
||||
if [[ -z "$backup_file" ]]; then
|
||||
print_error "Backup file name required"
|
||||
return 1
|
||||
fi
|
||||
|
||||
local backup_path="$BACKUP_DATA/$backup_file"
|
||||
|
||||
if [[ ! -f "$backup_path" ]]; then
|
||||
print_error "Backup file not found: $backup_path"
|
||||
return 1
|
||||
fi
|
||||
|
||||
print_warning "This will overwrite existing Docker volumes. Continue? (y/N)"
|
||||
read -r response
|
||||
if [[ ! "$response" =~ ^[Yy]$ ]]; then
|
||||
print_info "Docker volumes restore cancelled"
|
||||
return 0
|
||||
fi
|
||||
|
||||
print_info "Restoring Docker volumes from $backup_file"
|
||||
|
||||
local temp_dir
|
||||
temp_dir=$(mktemp -d)
|
||||
|
||||
if extract_archive "$backup_path" "$temp_dir"; then
|
||||
local volumes_dir="$temp_dir/volumes"
|
||||
|
||||
# Restore each volume
|
||||
for volume_dir in "$volumes_dir"/*/; do
|
||||
if [[ -d "$volume_dir" ]]; then
|
||||
local volume_name
|
||||
volume_name=$(basename "$volume_dir")
|
||||
|
||||
print_info "Restoring volume: $volume_name"
|
||||
|
||||
# Create volume if it doesn't exist
|
||||
docker volume create "$volume_name" >/dev/null 2>&1 || true
|
||||
|
||||
# Create temporary container to restore data
|
||||
local container_name="ez_restore_${volume_name}_$(date +%s)"
|
||||
|
||||
if docker run --rm -d --name "$container_name" -v "$volume_name:/data" alpine sleep 30 >/dev/null 2>&1; then
|
||||
# Copy data back
|
||||
docker cp "$volume_dir/." "$container_name:/data/" 2>/dev/null || true
|
||||
|
||||
# Clean up container
|
||||
docker stop "$container_name" >/dev/null 2>&1 || true
|
||||
|
||||
print_success "Volume restored: $volume_name"
|
||||
else
|
||||
print_error "Failed to restore volume: $volume_name"
|
||||
fi
|
||||
fi
|
||||
done
|
||||
|
||||
print_success "Docker volumes restore completed"
|
||||
rm -rf "$temp_dir"
|
||||
return 0
|
||||
else
|
||||
print_error "Docker volumes restore failed"
|
||||
rm -rf "$temp_dir"
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
# =============================================================================
|
||||
# LOG BACKUP FUNCTIONS
|
||||
# =============================================================================
|
||||
|
||||
# Backup logs
|
||||
backup_logs() {
|
||||
print_info "Starting logs backup"
|
||||
|
||||
local temp_dir
|
||||
temp_dir=$(mktemp -d)
|
||||
local logs_dir="$temp_dir/logs"
|
||||
|
||||
mkdir -p "$logs_dir"
|
||||
|
||||
# Backup EZ-Homelab logs
|
||||
if [[ -d "$LOG_DIR" ]]; then
|
||||
print_info "Backing up EZ-Homelab logs"
|
||||
cp -r "$LOG_DIR"/* "$logs_dir/" 2>/dev/null || true
|
||||
fi
|
||||
|
||||
# Backup Docker logs
|
||||
print_info "Backing up Docker container logs"
|
||||
mkdir -p "$logs_dir/docker"
|
||||
|
||||
# Get logs from running containers
|
||||
local containers
|
||||
mapfile -t containers < <(docker ps --format "{{.Names}}" 2>/dev/null || true)
|
||||
|
||||
for container in "${containers[@]}"; do
|
||||
docker logs "$container" > "$logs_dir/docker/${container}.log" 2>&1 || true
|
||||
done
|
||||
|
||||
# Backup system logs
|
||||
print_info "Backing up system logs"
|
||||
mkdir -p "$logs_dir/system"
|
||||
cp "/var/log/syslog" "$logs_dir/system/" 2>/dev/null || true
|
||||
cp "/var/log/auth.log" "$logs_dir/system/" 2>/dev/null || true
|
||||
cp "/var/log/kern.log" "$logs_dir/system/" 2>/dev/null || true
|
||||
|
||||
# Create backup archive
|
||||
local backup_file
|
||||
backup_file=$(generate_backup_filename "logs")
|
||||
local backup_path="$BACKUP_LOGS/$backup_file"
|
||||
|
||||
if compress_directory "$logs_dir" "$backup_path"; then
|
||||
print_success "Logs backup completed: $backup_file"
|
||||
print_info "Backup size: $(get_backup_size "$backup_path")"
|
||||
|
||||
# Cleanup old backups
|
||||
cleanup_old_backups "$BACKUP_LOGS" "$LOG_RETENTION_DAYS" "logs"
|
||||
|
||||
rm -rf "$temp_dir"
|
||||
return 0
|
||||
else
|
||||
print_error "Logs backup failed"
|
||||
rm -rf "$temp_dir"
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
# =============================================================================
|
||||
# SCHEDULED BACKUP FUNCTIONS
|
||||
# =============================================================================
|
||||
|
||||
# Setup cron jobs for automated backups
|
||||
setup_backup_schedule() {
|
||||
print_info "Setting up automated backup schedule"
|
||||
|
||||
# Create backup script
|
||||
local backup_script="$BACKUP_ROOT/backup.sh"
|
||||
cat > "$backup_script" << 'EOF'
|
||||
#!/bin/bash
|
||||
# Automated backup script for EZ-Homelab
|
||||
|
||||
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")/../scripts/enhanced-setup" && pwd)"
|
||||
|
||||
# Run backups
|
||||
"$SCRIPT_DIR/backup.sh" config --quiet
|
||||
"$SCRIPT_DIR/backup.sh" volumes --quiet
|
||||
"$SCRIPT_DIR/backup.sh" logs --quiet
|
||||
|
||||
# Log completion
|
||||
echo "$(date): Automated backup completed" >> "$HOME/.ez-homelab/logs/backup.log"
|
||||
EOF
|
||||
|
||||
chmod +x "$backup_script"
|
||||
|
||||
# Add to crontab
|
||||
local cron_entry
|
||||
|
||||
# Config backup (daily at 2 AM)
|
||||
cron_entry="$CONFIG_BACKUP_SCHEDULE $backup_script config"
|
||||
if ! crontab -l 2>/dev/null | grep -q "$backup_script config"; then
|
||||
(crontab -l 2>/dev/null; echo "$cron_entry") | crontab -
|
||||
print_info "Added config backup to crontab: $cron_entry"
|
||||
fi
|
||||
|
||||
# Data backup (weekly on Sunday at 3 AM)
|
||||
cron_entry="$DATA_BACKUP_SCHEDULE $backup_script volumes"
|
||||
if ! crontab -l 2>/dev/null | grep -q "$backup_script volumes"; then
|
||||
(crontab -l 2>/dev/null; echo "$cron_entry") | crontab -
|
||||
print_info "Added volumes backup to crontab: $cron_entry"
|
||||
fi
|
||||
|
||||
# Logs backup (daily at 1 AM)
|
||||
cron_entry="$LOG_BACKUP_SCHEDULE $backup_script logs"
|
||||
if ! crontab -l 2>/dev/null | grep -q "$backup_script logs"; then
|
||||
(crontab -l 2>/dev/null; echo "$cron_entry") | crontab -
|
||||
print_info "Added logs backup to crontab: $cron_entry"
|
||||
fi
|
||||
|
||||
print_success "Automated backup schedule configured"
|
||||
}
|
||||
|
||||
# Remove backup schedule
|
||||
remove_backup_schedule() {
|
||||
print_info "Removing automated backup schedule"
|
||||
|
||||
# Remove from crontab
|
||||
crontab -l 2>/dev/null | grep -v "backup.sh" | crontab -
|
||||
|
||||
print_success "Automated backup schedule removed"
|
||||
}
|
||||
|
||||
# =============================================================================
|
||||
# MAIN FUNCTION
|
||||
# =============================================================================
|
||||
|
||||
main() {
|
||||
local action=""
|
||||
local backup_file=""
|
||||
local quiet=false
|
||||
|
||||
# Parse command line arguments
|
||||
while [[ $# -gt 0 ]]; do
|
||||
case $1 in
|
||||
-h|--help)
|
||||
cat << EOF
|
||||
EZ-Homelab Backup Management
|
||||
|
||||
USAGE:
|
||||
backup [OPTIONS] <ACTION> [BACKUP_FILE]
|
||||
|
||||
ACTIONS:
|
||||
config Backup/restore configuration files
|
||||
volumes Backup/restore Docker volumes
|
||||
logs Backup logs
|
||||
list List available backups
|
||||
schedule Setup automated backup schedule
|
||||
unschedule Remove automated backup schedule
|
||||
all Run all backup types
|
||||
|
||||
OPTIONS:
|
||||
-q, --quiet Suppress non-error output
|
||||
--restore Restore from backup (requires BACKUP_FILE)
|
||||
|
||||
EXAMPLES:
|
||||
backup config # Backup configuration
|
||||
backup config --restore config_20240129_020000.tar.gz
|
||||
backup volumes # Backup Docker volumes
|
||||
backup logs # Backup logs
|
||||
backup list config # List config backups
|
||||
backup schedule # Setup automated backups
|
||||
backup all # Run all backup types
|
||||
|
||||
EOF
|
||||
exit 0
|
||||
;;
|
||||
-q|--quiet)
|
||||
quiet=true
|
||||
shift
|
||||
;;
|
||||
--restore)
|
||||
action="restore"
|
||||
shift
|
||||
;;
|
||||
config|volumes|logs|list|schedule|unschedule|all)
|
||||
action="$1"
|
||||
shift
|
||||
break
|
||||
;;
|
||||
*)
|
||||
if [[ -z "$backup_file" ]]; then
|
||||
backup_file="$1"
|
||||
else
|
||||
print_error "Too many arguments"
|
||||
exit 1
|
||||
fi
|
||||
shift
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
# Handle remaining arguments
|
||||
while [[ $# -gt 0 ]]; do
|
||||
if [[ -z "$backup_file" ]]; then
|
||||
backup_file="$1"
|
||||
else
|
||||
print_error "Too many arguments"
|
||||
exit 1
|
||||
fi
|
||||
shift
|
||||
done
|
||||
|
||||
# Initialize script
|
||||
init_script "$SCRIPT_NAME" "$SCRIPT_VERSION"
|
||||
init_logging "$SCRIPT_NAME"
|
||||
init_backup_dirs
|
||||
|
||||
# Check prerequisites
|
||||
if ! command_exists "tar"; then
|
||||
print_error "tar command not found. Please install tar."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Execute action
|
||||
case "$action" in
|
||||
config)
|
||||
if [[ "$action" == "restore" ]]; then
|
||||
restore_config "$backup_file"
|
||||
else
|
||||
backup_config
|
||||
fi
|
||||
;;
|
||||
volumes)
|
||||
if [[ "$action" == "restore" ]]; then
|
||||
restore_docker_volumes "$backup_file"
|
||||
else
|
||||
backup_docker_volumes
|
||||
fi
|
||||
;;
|
||||
logs)
|
||||
backup_logs
|
||||
;;
|
||||
list)
|
||||
case "$backup_file" in
|
||||
config|"")
|
||||
list_backups "$BACKUP_CONFIG" "config"
|
||||
;;
|
||||
volumes)
|
||||
list_backups "$BACKUP_DATA" "volumes"
|
||||
;;
|
||||
logs)
|
||||
list_backups "$BACKUP_LOGS" "logs"
|
||||
;;
|
||||
*)
|
||||
print_error "Unknown backup type: $backup_file"
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
;;
|
||||
schedule)
|
||||
setup_backup_schedule
|
||||
;;
|
||||
unschedule)
|
||||
remove_backup_schedule
|
||||
;;
|
||||
all)
|
||||
print_info "Running all backup types"
|
||||
backup_config && backup_docker_volumes && backup_logs
|
||||
;;
|
||||
"")
|
||||
print_error "No action specified. Use --help for usage information."
|
||||
exit 1
|
||||
;;
|
||||
*)
|
||||
print_error "Unknown action: $action"
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
}
|
||||
|
||||
# Run main function
|
||||
main "$@"
|
||||
@@ -1,440 +0,0 @@
|
||||
#!/bin/bash
|
||||
# EZ-Homelab Enhanced Setup Scripts - Deployment Engine
|
||||
# Orchestrated deployment of services with proper sequencing and health checks
|
||||
|
||||
SCRIPT_NAME="deploy"
|
||||
SCRIPT_VERSION="1.0.0"
|
||||
|
||||
# Load common library
|
||||
source "$(dirname "${BASH_SOURCE[0]}")/lib/common.sh"
|
||||
source "$(dirname "${BASH_SOURCE[0]}")/lib/ui.sh"
|
||||
|
||||
# =============================================================================
|
||||
# DEPLOYMENT CONFIGURATION
|
||||
# =============================================================================
|
||||
|
||||
# Service deployment order (dependencies must come first)
|
||||
DEPLOYMENT_ORDER=(
|
||||
"core" # Infrastructure services (Traefik, Authelia, etc.)
|
||||
"infrastructure" # Development tools (code-server, etc.)
|
||||
"dashboards" # Homepage, monitoring dashboards
|
||||
"monitoring" # Grafana, Prometheus, Loki
|
||||
"media" # Plex, Jellyfin, etc.
|
||||
"media-management" # Sonarr, Radarr, etc.
|
||||
"home" # Home Assistant, Node-RED
|
||||
"productivity" # Nextcloud, Gitea, etc.
|
||||
"utilities" # Duplicati, FreshRSS, etc.
|
||||
"vpn" # VPN services
|
||||
"alternatives" # Alternative services
|
||||
"wikis" # Wiki services
|
||||
)
|
||||
|
||||
# Core services that must be running for the system to function
|
||||
CORE_SERVICES=("traefik" "authelia" "duckdns")
|
||||
|
||||
# Service health check timeouts (seconds)
|
||||
HEALTH_CHECK_TIMEOUT=300
|
||||
SERVICE_STARTUP_TIMEOUT=60
|
||||
|
||||
# =============================================================================
|
||||
# DEPLOYMENT FUNCTIONS
|
||||
# =============================================================================
|
||||
|
||||
# Get list of available service stacks
|
||||
get_available_stacks() {
|
||||
local stacks=()
|
||||
local stack_dir="$EZ_HOME/docker-compose"
|
||||
|
||||
if [[ -d "$stack_dir" ]]; then
|
||||
while IFS= read -r -d '' dir; do
|
||||
local stack_name="$(basename "$dir")"
|
||||
if [[ -f "$dir/docker-compose.yml" ]]; then
|
||||
stacks+=("$stack_name")
|
||||
fi
|
||||
done < <(find "$stack_dir" -mindepth 1 -maxdepth 1 -type d -print0 2>/dev/null)
|
||||
fi
|
||||
|
||||
printf '%s\n' "${stacks[@]}"
|
||||
}
|
||||
|
||||
# Check if a service stack exists
|
||||
stack_exists() {
|
||||
local stack="$1"
|
||||
local stack_dir="$EZ_HOME/docker-compose/$stack"
|
||||
|
||||
[[ -d "$stack_dir" && -f "$stack_dir/docker-compose.yml" ]]
|
||||
}
|
||||
|
||||
# Get services in a stack
|
||||
get_stack_services() {
|
||||
local stack="$1"
|
||||
local compose_file="$EZ_HOME/docker-compose/$stack/docker-compose.yml"
|
||||
|
||||
if [[ ! -f "$compose_file" ]]; then
|
||||
return 1
|
||||
fi
|
||||
|
||||
# Extract service names from docker-compose.yml
|
||||
# Look for lines that start at column 0 followed by a service name
|
||||
sed -n '/^services:/,/^[^ ]/p' "$compose_file" 2>/dev/null | \
|
||||
grep '^ [a-zA-Z0-9_-]\+:' | \
|
||||
sed 's/^\s*//' | sed 's/:.*$//' || true
|
||||
}
|
||||
|
||||
# Check if a service is running
|
||||
is_service_running() {
|
||||
local service="$1"
|
||||
|
||||
docker ps --filter "name=$service" --filter "status=running" --format "{{.Names}}" | grep -q "^${service}$"
|
||||
}
|
||||
|
||||
# Check service health
|
||||
check_service_health() {
|
||||
local service="$1"
|
||||
local timeout="${2:-$HEALTH_CHECK_TIMEOUT}"
|
||||
local start_time=$(date +%s)
|
||||
|
||||
print_info "Checking health of service: $service"
|
||||
|
||||
while (( $(date +%s) - start_time < timeout )); do
|
||||
if is_service_running "$service"; then
|
||||
# Additional health checks could be added here
|
||||
# For now, just check if container is running
|
||||
print_success "Service $service is healthy"
|
||||
return 0
|
||||
fi
|
||||
sleep 5
|
||||
done
|
||||
|
||||
print_error "Service $service failed health check (timeout: ${timeout}s)"
|
||||
return 1
|
||||
}
|
||||
|
||||
# Deploy a single service stack
|
||||
deploy_stack() {
|
||||
local stack="$1"
|
||||
local compose_file="$EZ_HOME/docker-compose/$stack/docker-compose.yml"
|
||||
|
||||
if [[ ! -f "$compose_file" ]]; then
|
||||
print_error "Compose file not found: $compose_file"
|
||||
return 1
|
||||
fi
|
||||
|
||||
print_info "Deploying stack: $stack"
|
||||
|
||||
# Validate the compose file first
|
||||
if ! validate_yaml "$compose_file"; then
|
||||
print_error "Invalid YAML in $compose_file"
|
||||
return 1
|
||||
fi
|
||||
|
||||
# Pull images first
|
||||
print_info "Pulling images for stack: $stack"
|
||||
if ! docker compose -f "$compose_file" pull; then
|
||||
print_warning "Failed to pull some images for $stack, continuing..."
|
||||
fi
|
||||
|
||||
# Deploy the stack
|
||||
print_info "Starting services in stack: $stack"
|
||||
if ! docker compose -f "$compose_file" up -d; then
|
||||
print_error "Failed to deploy stack: $stack"
|
||||
return 1
|
||||
fi
|
||||
|
||||
# Get list of services in this stack
|
||||
local services
|
||||
mapfile -t services < <(get_stack_services "$stack")
|
||||
|
||||
# Wait for services to start and check health
|
||||
for service in "${services[@]}"; do
|
||||
print_info "Waiting for service to start: $service"
|
||||
sleep "$SERVICE_STARTUP_TIMEOUT"
|
||||
|
||||
if ! check_service_health "$service"; then
|
||||
print_error "Service $service in stack $stack failed health check"
|
||||
return 1
|
||||
fi
|
||||
done
|
||||
|
||||
print_success "Successfully deployed stack: $stack"
|
||||
return 0
|
||||
}
|
||||
|
||||
# Stop a service stack
|
||||
stop_stack() {
|
||||
local stack="$1"
|
||||
local compose_file="$EZ_HOME/docker-compose/$stack/docker-compose.yml"
|
||||
|
||||
if [[ ! -f "$compose_file" ]]; then
|
||||
print_warning "Compose file not found: $compose_file"
|
||||
return 0
|
||||
fi
|
||||
|
||||
print_info "Stopping stack: $stack"
|
||||
|
||||
if docker compose -f "$compose_file" down; then
|
||||
print_success "Successfully stopped stack: $stack"
|
||||
return 0
|
||||
else
|
||||
print_error "Failed to stop stack: $stack"
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
# Rollback deployment
|
||||
rollback_deployment() {
|
||||
local failed_stack="$1"
|
||||
local deployed_stacks=("${@:2}")
|
||||
|
||||
print_warning "Rolling back deployment due to failure in: $failed_stack"
|
||||
|
||||
# Stop the failed stack first
|
||||
stop_stack "$failed_stack" || true
|
||||
|
||||
# Stop all previously deployed stacks in reverse order
|
||||
for ((i=${#deployed_stacks[@]}-1; i>=0; i--)); do
|
||||
local stack="${deployed_stacks[i]}"
|
||||
if [[ "$stack" != "$failed_stack" ]]; then
|
||||
stop_stack "$stack" || true
|
||||
fi
|
||||
done
|
||||
|
||||
print_info "Rollback completed"
|
||||
}
|
||||
|
||||
# Deploy all stacks in order
|
||||
deploy_all() {
|
||||
local deployed_stacks=()
|
||||
local total_stacks=${#DEPLOYMENT_ORDER[@]}
|
||||
local current_stack=0
|
||||
|
||||
print_info "Starting full deployment of $total_stacks stacks"
|
||||
|
||||
for stack in "${DEPLOYMENT_ORDER[@]}"; do
|
||||
current_stack=$((current_stack + 1))
|
||||
local percent=$(( current_stack * 100 / total_stacks ))
|
||||
|
||||
if ui_available && ! $non_interactive; then
|
||||
ui_gauge "Deploying $stack... ($current_stack/$total_stacks)" "$percent"
|
||||
fi
|
||||
|
||||
print_info "[$current_stack/$total_stacks] Deploying stack: $stack"
|
||||
|
||||
if ! stack_exists "$stack"; then
|
||||
print_warning "Stack $stack not found, skipping"
|
||||
continue
|
||||
fi
|
||||
|
||||
if deploy_stack "$stack"; then
|
||||
deployed_stacks+=("$stack")
|
||||
else
|
||||
print_error "Failed to deploy stack: $stack"
|
||||
rollback_deployment "$stack" "${deployed_stacks[@]}"
|
||||
return 1
|
||||
fi
|
||||
done
|
||||
|
||||
print_success "All stacks deployed successfully!"
|
||||
return 0
|
||||
}
|
||||
|
||||
# Deploy specific stacks
|
||||
deploy_specific() {
|
||||
local stacks=("$@")
|
||||
local deployed_stacks=()
|
||||
local total_stacks=${#stacks[@]}
|
||||
local current_stack=0
|
||||
|
||||
print_info "Starting deployment of $total_stacks specific stacks"
|
||||
|
||||
for stack in "${stacks[@]}"; do
|
||||
current_stack=$((current_stack + 1))
|
||||
local percent=$(( current_stack * 100 / total_stacks ))
|
||||
|
||||
if ui_available && ! $non_interactive; then
|
||||
ui_gauge "Deploying $stack... ($current_stack/$total_stacks)" "$percent"
|
||||
fi
|
||||
|
||||
print_info "[$current_stack/$total_stacks] Deploying stack: $stack"
|
||||
|
||||
if ! stack_exists "$stack"; then
|
||||
print_error "Stack $stack not found"
|
||||
rollback_deployment "$stack" "${deployed_stacks[@]}"
|
||||
return 1
|
||||
fi
|
||||
|
||||
if deploy_stack "$stack"; then
|
||||
deployed_stacks+=("$stack")
|
||||
else
|
||||
print_error "Failed to deploy stack: $stack"
|
||||
rollback_deployment "$stack" "${deployed_stacks[@]}"
|
||||
return 1
|
||||
fi
|
||||
done
|
||||
|
||||
print_success "Specified stacks deployed successfully!"
|
||||
return 0
|
||||
}
|
||||
|
||||
# Stop all stacks
|
||||
stop_all() {
|
||||
local stacks
|
||||
mapfile -t stacks < <(get_available_stacks)
|
||||
local total_stacks=${#stacks[@]}
|
||||
local current_stack=0
|
||||
|
||||
print_info "Stopping all $total_stacks stacks"
|
||||
|
||||
for stack in "${stacks[@]}"; do
|
||||
current_stack=$((current_stack + 1))
|
||||
local percent=$(( current_stack * 100 / total_stacks ))
|
||||
|
||||
if ui_available && ! $non_interactive; then
|
||||
ui_gauge "Stopping $stack... ($current_stack/$total_stacks)" "$percent"
|
||||
fi
|
||||
|
||||
stop_stack "$stack" || true
|
||||
done
|
||||
|
||||
print_success "All stacks stopped"
|
||||
}
|
||||
|
||||
# Show deployment status
|
||||
show_status() {
|
||||
print_info "EZ-Homelab Deployment Status"
|
||||
echo
|
||||
|
||||
local stacks
|
||||
mapfile -t stacks < <(get_available_stacks)
|
||||
|
||||
for stack in "${stacks[@]}"; do
|
||||
echo "Stack: $stack"
|
||||
|
||||
local services
|
||||
mapfile -t services < <(get_stack_services "$stack")
|
||||
|
||||
for service in "${services[@]}"; do
|
||||
if is_service_running "$service"; then
|
||||
echo " ✅ $service - Running"
|
||||
else
|
||||
echo " ❌ $service - Stopped"
|
||||
fi
|
||||
done
|
||||
echo
|
||||
done
|
||||
}
|
||||
|
||||
# =============================================================================
|
||||
# MAIN FUNCTION
|
||||
# =============================================================================
|
||||
|
||||
main() {
|
||||
local action="deploy"
|
||||
local stacks=()
|
||||
local non_interactive=false
|
||||
local verbose=false
|
||||
|
||||
# Parse command line arguments
|
||||
while [[ $# -gt 0 ]]; do
|
||||
case $1 in
|
||||
-h|--help)
|
||||
cat << EOF
|
||||
EZ-Homelab Deployment Engine
|
||||
|
||||
USAGE:
|
||||
deploy [OPTIONS] [ACTION] [STACKS...]
|
||||
|
||||
ACTIONS:
|
||||
deploy Deploy all stacks (default)
|
||||
stop Stop all stacks
|
||||
status Show deployment status
|
||||
restart Restart all stacks
|
||||
|
||||
ARGUMENTS:
|
||||
STACKS Specific stacks to deploy (optional, deploys all if not specified)
|
||||
|
||||
OPTIONS:
|
||||
-h, --help Show this help message
|
||||
-v, --verbose Enable verbose logging
|
||||
--no-ui Run without interactive UI
|
||||
--no-rollback Skip rollback on deployment failure
|
||||
|
||||
EXAMPLES:
|
||||
deploy # Deploy all stacks
|
||||
deploy core media # Deploy only core and media stacks
|
||||
deploy stop # Stop all stacks
|
||||
deploy status # Show status of all services
|
||||
|
||||
EOF
|
||||
exit 0
|
||||
;;
|
||||
-v|--verbose)
|
||||
verbose=true
|
||||
shift
|
||||
;;
|
||||
--no-ui)
|
||||
non_interactive=true
|
||||
shift
|
||||
;;
|
||||
--no-rollback)
|
||||
NO_ROLLBACK=true
|
||||
shift
|
||||
;;
|
||||
deploy|stop|status|restart)
|
||||
action="$1"
|
||||
shift
|
||||
break
|
||||
;;
|
||||
*)
|
||||
stacks+=("$1")
|
||||
shift
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
# Handle remaining arguments as stacks
|
||||
while [[ $# -gt 0 ]]; do
|
||||
stacks+=("$1")
|
||||
shift
|
||||
done
|
||||
|
||||
# Initialize script
|
||||
init_script "$SCRIPT_NAME" "$SCRIPT_VERSION"
|
||||
init_logging "$SCRIPT_NAME"
|
||||
|
||||
# Check prerequisites
|
||||
if ! docker_available; then
|
||||
print_error "Docker is not available. Please run setup.sh first."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Execute action
|
||||
case "$action" in
|
||||
deploy)
|
||||
if [[ ${#stacks[@]} -eq 0 ]]; then
|
||||
deploy_all
|
||||
else
|
||||
deploy_specific "${stacks[@]}"
|
||||
fi
|
||||
;;
|
||||
stop)
|
||||
stop_all
|
||||
;;
|
||||
status)
|
||||
show_status
|
||||
;;
|
||||
restart)
|
||||
print_info "Restarting all stacks..."
|
||||
stop_all
|
||||
sleep 5
|
||||
deploy_all
|
||||
;;
|
||||
*)
|
||||
print_error "Unknown action: $action"
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
}
|
||||
|
||||
# Run main function
|
||||
main "$@"
|
||||
@@ -1,399 +0,0 @@
|
||||
#!/bin/bash
|
||||
# EZ-Homelab Enhanced Setup Scripts - Configuration Generalization
|
||||
# Reverse localization by restoring template variables from backups
|
||||
|
||||
SCRIPT_NAME="generalize"
|
||||
SCRIPT_VERSION="1.0.0"
|
||||
|
||||
# Load common library
|
||||
source "$(dirname "${BASH_SOURCE[0]}")/lib/common.sh"
|
||||
source "$(dirname "${BASH_SOURCE[0]}")/lib/ui.sh"
|
||||
|
||||
# =============================================================================
|
||||
# SCRIPT CONFIGURATION
|
||||
# =============================================================================
|
||||
|
||||
# Template variables that were replaced
|
||||
TEMPLATE_VARS=("DOMAIN" "TZ" "PUID" "PGID" "DUCKDNS_TOKEN" "JWT_SECRET" "SESSION_SECRET" "ENCRYPTION_KEY" "AUTHELIA_ADMIN_PASSWORD" "AUTHELIA_ADMIN_EMAIL" "PLEX_CLAIM_TOKEN" "DEPLOYMENT_TYPE" "SERVER_HOSTNAME" "DOCKER_SOCKET_PATH")
|
||||
|
||||
# =============================================================================
|
||||
# GENERALIZATION FUNCTIONS
|
||||
# =============================================================================
|
||||
|
||||
# Load environment variables
|
||||
load_environment() {
|
||||
local env_file="$EZ_HOME/.env"
|
||||
|
||||
if [[ ! -f "$env_file" ]]; then
|
||||
print_error ".env file not found at $env_file"
|
||||
print_error "Cannot generalize without environment context"
|
||||
return 1
|
||||
fi
|
||||
|
||||
# Source the .env file
|
||||
set -a
|
||||
source "$env_file"
|
||||
set +a
|
||||
|
||||
print_success "Environment loaded from $env_file"
|
||||
return 0
|
||||
}
|
||||
|
||||
# Find backup template files
|
||||
find_backup_files() {
|
||||
local service="${1:-}"
|
||||
|
||||
if [[ -n "$service" ]]; then
|
||||
# Process specific service
|
||||
local service_dir="$EZ_HOME/docker-compose/$service"
|
||||
if [[ -d "$service_dir" ]]; then
|
||||
find "$service_dir" -name "*.template" -type f 2>/dev/null
|
||||
else
|
||||
print_error "Service directory not found: $service_dir"
|
||||
return 1
|
||||
fi
|
||||
else
|
||||
# Process all services
|
||||
find "$EZ_HOME/docker-compose" -name "*.template" -type f 2>/dev/null
|
||||
fi
|
||||
}
|
||||
|
||||
# Restore template file from backup
|
||||
restore_template_file() {
|
||||
local backup_file="$1"
|
||||
local original_file="${backup_file%.template}"
|
||||
|
||||
print_info "Restoring: $original_file"
|
||||
|
||||
if [[ ! -f "$backup_file" ]]; then
|
||||
print_error "Backup file not found: $backup_file"
|
||||
return 1
|
||||
fi
|
||||
|
||||
# Confirm destructive operation
|
||||
if ui_available; then
|
||||
if ! ui_yesno "Restore $original_file from backup? This will overwrite current changes."; then
|
||||
print_info "Skipped $original_file"
|
||||
return 0
|
||||
fi
|
||||
fi
|
||||
|
||||
# Backup current version (safety)
|
||||
backup_file "$original_file"
|
||||
|
||||
# Restore from template backup
|
||||
cp "$backup_file" "$original_file"
|
||||
|
||||
print_success "Restored $original_file from $backup_file"
|
||||
return 0
|
||||
}
|
||||
|
||||
# Generalize processed file (reverse engineer values)
|
||||
generalize_processed_file() {
|
||||
local file="$1"
|
||||
local backup_file="${file}.template"
|
||||
|
||||
print_info "Generalizing: $file"
|
||||
|
||||
if [[ ! -f "$file" ]]; then
|
||||
print_error "File not found: $file"
|
||||
return 1
|
||||
fi
|
||||
|
||||
# Create backup if it doesn't exist
|
||||
if [[ ! -f "$backup_file" ]]; then
|
||||
cp "$file" "$backup_file"
|
||||
print_info "Created backup: $backup_file"
|
||||
fi
|
||||
|
||||
# Process template variables in reverse
|
||||
local temp_file
|
||||
temp_file=$(mktemp)
|
||||
|
||||
cp "$file" "$temp_file"
|
||||
|
||||
for var in "${TEMPLATE_VARS[@]}"; do
|
||||
local value="${!var:-}"
|
||||
if [[ -n "$value" ]]; then
|
||||
# Escape special characters in value for sed
|
||||
local escaped_value
|
||||
escaped_value=$(printf '%s\n' "$value" | sed 's/[[\.*^$()+?{|]/\\&/g')
|
||||
|
||||
# Replace actual values back to ${VAR} format
|
||||
sed -i "s|$escaped_value|\${$var}|g" "$temp_file"
|
||||
log_debug "Generalized \${$var} in $file"
|
||||
fi
|
||||
done
|
||||
|
||||
# Move generalized file back
|
||||
mv "$temp_file" "$file"
|
||||
|
||||
print_success "Generalized $file"
|
||||
return 0
|
||||
}
|
||||
|
||||
# Clean up backup files
|
||||
cleanup_backups() {
|
||||
local service="${1:-}"
|
||||
|
||||
print_info "Cleaning up backup files..."
|
||||
|
||||
local backup_files
|
||||
mapfile -t backup_files < <(find_backup_files "$service")
|
||||
|
||||
if [[ ${#backup_files[@]} -eq 0 ]]; then
|
||||
print_info "No backup files to clean up"
|
||||
return 0
|
||||
fi
|
||||
|
||||
local cleaned=0
|
||||
for backup in "${backup_files[@]}"; do
|
||||
if ui_available; then
|
||||
if ui_yesno "Delete backup file: $backup?"; then
|
||||
rm -f "$backup"
|
||||
((cleaned++))
|
||||
print_info "Deleted: $backup"
|
||||
fi
|
||||
else
|
||||
rm -f "$backup"
|
||||
((cleaned++))
|
||||
log_info "Deleted backup: $backup"
|
||||
fi
|
||||
done
|
||||
|
||||
print_success "Cleaned up $cleaned backup file(s)"
|
||||
}
|
||||
|
||||
# =============================================================================
|
||||
# UI FUNCTIONS
|
||||
# =============================================================================
|
||||
|
||||
# Show generalization options
|
||||
show_generalization_menu() {
|
||||
local text="Select generalization method:"
|
||||
local items=(
|
||||
"restore" "Restore from .template backups" "on"
|
||||
"reverse" "Reverse engineer from current files" "off"
|
||||
"cleanup" "Clean up backup files" "off"
|
||||
)
|
||||
|
||||
ui_radiolist "$text" "$UI_HEIGHT" "$UI_WIDTH" "${items[@]}"
|
||||
}
|
||||
|
||||
# Show progress for batch processing
|
||||
show_generalization_progress() {
|
||||
local total="$1"
|
||||
local current="$2"
|
||||
local file="$3"
|
||||
|
||||
local percent=$(( current * 100 / total ))
|
||||
ui_gauge "Generalizing configurations... ($current/$total)" "$percent"
|
||||
}
|
||||
|
||||
# =============================================================================
|
||||
# MAIN FUNCTION
|
||||
# =============================================================================
|
||||
|
||||
main() {
|
||||
local service=""
|
||||
local method=""
|
||||
local non_interactive=false
|
||||
local verbose=false
|
||||
local cleanup=false
|
||||
|
||||
# Parse command line arguments
|
||||
while [[ $# -gt 0 ]]; do
|
||||
case $1 in
|
||||
-h|--help)
|
||||
cat << EOF
|
||||
EZ-Homelab Configuration Generalization
|
||||
|
||||
USAGE:
|
||||
$SCRIPT_NAME [OPTIONS] [SERVICE]
|
||||
|
||||
ARGUMENTS:
|
||||
SERVICE Specific service to generalize (optional, processes all if not specified)
|
||||
|
||||
OPTIONS:
|
||||
-h, --help Show this help message
|
||||
-v, --verbose Enable verbose logging
|
||||
--method METHOD Generalization method: restore, reverse, cleanup
|
||||
--cleanup Clean up backup files after generalization
|
||||
--no-ui Run without interactive UI
|
||||
|
||||
METHODS:
|
||||
restore Restore files from .template backups (safe)
|
||||
reverse Reverse engineer template variables from current values (advanced)
|
||||
cleanup Remove .template backup files
|
||||
|
||||
EXAMPLES:
|
||||
$SCRIPT_NAME --method restore # Restore all from backups
|
||||
$SCRIPT_NAME --method reverse traefik # Reverse engineer Traefik
|
||||
$SCRIPT_NAME --cleanup # Clean up all backups
|
||||
|
||||
WARNING:
|
||||
Generalization can be destructive. Always backup important data first.
|
||||
|
||||
EOF
|
||||
exit 0
|
||||
;;
|
||||
-v|--verbose)
|
||||
verbose=true
|
||||
;;
|
||||
--method)
|
||||
shift
|
||||
method="$1"
|
||||
;;
|
||||
--cleanup)
|
||||
cleanup=true
|
||||
;;
|
||||
--no-ui)
|
||||
non_interactive=true
|
||||
;;
|
||||
-*)
|
||||
print_error "Unknown option: $1"
|
||||
echo "Use --help for usage information"
|
||||
exit 1
|
||||
;;
|
||||
*)
|
||||
if [[ -z "$service" ]]; then
|
||||
service="$1"
|
||||
else
|
||||
print_error "Multiple services specified. Use only one service name."
|
||||
exit 1
|
||||
fi
|
||||
;;
|
||||
esac
|
||||
shift
|
||||
done
|
||||
|
||||
# Initialize script
|
||||
init_script "$SCRIPT_NAME"
|
||||
|
||||
if $verbose; then
|
||||
set -x
|
||||
fi
|
||||
|
||||
print_info "Starting EZ-Homelab configuration generalization..."
|
||||
|
||||
# Load environment
|
||||
if ! load_environment; then
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Determine method
|
||||
if [[ -z "$method" ]]; then
|
||||
if ui_available && ! $non_interactive; then
|
||||
method=$(show_generalization_menu) || exit 1
|
||||
else
|
||||
print_error "Method must be specified with --method when running non-interactively"
|
||||
echo "Available methods: restore, reverse, cleanup"
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
|
||||
case "$method" in
|
||||
"restore")
|
||||
# Find backup files
|
||||
local backup_files
|
||||
mapfile -t backup_files < <(find_backup_files "$service")
|
||||
if [[ ${#backup_files[@]} -eq 0 ]]; then
|
||||
print_warning "No backup files found"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
print_info "Found ${#backup_files[@]} backup file(s)"
|
||||
|
||||
# Restore files
|
||||
local restored=0
|
||||
local total=${#backup_files[@]}
|
||||
|
||||
for backup in "${backup_files[@]}"; do
|
||||
if ui_available && ! $non_interactive; then
|
||||
show_generalization_progress "$total" "$restored" "$backup"
|
||||
fi
|
||||
|
||||
if restore_template_file "$backup"; then
|
||||
((restored++))
|
||||
fi
|
||||
done
|
||||
|
||||
# Close progress gauge
|
||||
if ui_available && ! $non_interactive; then
|
||||
ui_gauge "Restoration complete!" 100
|
||||
sleep 1
|
||||
fi
|
||||
|
||||
print_success "Restored $restored file(s) from backups"
|
||||
;;
|
||||
|
||||
"reverse")
|
||||
print_warning "Reverse engineering is experimental and may not be perfect"
|
||||
print_warning "Make sure you have backups of important data"
|
||||
|
||||
if ui_available && ! $non_interactive; then
|
||||
if ! ui_yesno "Continue with reverse engineering? This may modify your configuration files."; then
|
||||
print_info "Operation cancelled"
|
||||
exit 0
|
||||
fi
|
||||
fi
|
||||
|
||||
# Find processed files (those with actual values instead of templates)
|
||||
local processed_files
|
||||
mapfile -t processed_files < <(find "$EZ_HOME/docker-compose${service:+/$service}" -name "*.yml" -o -name "*.yaml" -o -name "*.json" -o -name "*.conf" -o -name "*.cfg" -o -name "*.env" 2>/dev/null)
|
||||
|
||||
if [[ ${#processed_files[@]} -eq 0 ]]; then
|
||||
print_warning "No configuration files found"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
print_info "Found ${#processed_files[@]} file(s) to generalize"
|
||||
|
||||
# Generalize files
|
||||
local generalized=0
|
||||
local total=${#processed_files[@]}
|
||||
|
||||
for file in "${processed_files[@]}"; do
|
||||
if ui_available && ! $non_interactive; then
|
||||
show_generalization_progress "$total" "$generalized" "$file"
|
||||
fi
|
||||
|
||||
if generalize_processed_file "$file"; then
|
||||
((generalized++))
|
||||
fi
|
||||
done
|
||||
|
||||
# Close progress gauge
|
||||
if ui_available && ! $non_interactive; then
|
||||
ui_gauge "Generalization complete!" 100
|
||||
sleep 1
|
||||
fi
|
||||
|
||||
print_success "Generalized $generalized file(s)"
|
||||
;;
|
||||
|
||||
"cleanup")
|
||||
cleanup_backups "$service"
|
||||
;;
|
||||
|
||||
*)
|
||||
print_error "Unknown method: $method"
|
||||
echo "Available methods: restore, reverse, cleanup"
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
|
||||
# Optional cleanup
|
||||
if $cleanup && [[ "$method" != "cleanup" ]]; then
|
||||
cleanup_backups "$service"
|
||||
fi
|
||||
|
||||
echo ""
|
||||
print_success "Configuration generalization complete!"
|
||||
print_info "Use ./validate.sh to check the results"
|
||||
|
||||
exit 0
|
||||
}
|
||||
|
||||
# Run main function
|
||||
main "$@"
|
||||
@@ -1,373 +0,0 @@
|
||||
#!/bin/bash
|
||||
# EZ-Homelab Enhanced Setup Scripts - Common Library
|
||||
# Shared variables, utility functions, and constants
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
# =============================================================================
|
||||
# SHARED VARIABLES
|
||||
# =============================================================================
|
||||
|
||||
# Repository and paths
|
||||
EZ_HOME="${EZ_HOME:-/home/kelin/EZ-Homelab}"
|
||||
STACKS_DIR="${STACKS_DIR:-/opt/stacks}"
|
||||
LOG_DIR="${LOG_DIR:-$HOME/.ez-homelab/logs}"
|
||||
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)"
|
||||
|
||||
# User and system
|
||||
EZ_USER="${EZ_USER:-$USER}"
|
||||
EZ_UID="${EZ_UID:-$(id -u)}"
|
||||
EZ_GID="${EZ_GID:-$(id -g)}"
|
||||
|
||||
# Architecture detection
|
||||
ARCH="$(uname -m)"
|
||||
IS_ARM64=false
|
||||
[[ "$ARCH" == "aarch64" ]] && IS_ARM64=true
|
||||
|
||||
# System information
|
||||
OS_NAME="$(lsb_release -si 2>/dev/null | tail -1 || echo "Unknown")"
|
||||
OS_VERSION="$(lsb_release -sr 2>/dev/null | tail -1 || echo "Unknown")"
|
||||
KERNEL_VERSION="$(uname -r)"
|
||||
|
||||
# Colors for output
|
||||
RED='\033[0;31m'
|
||||
GREEN='\033[0;32m'
|
||||
YELLOW='\033[1;33m'
|
||||
BLUE='\033[0;34m'
|
||||
NC='\033[0m' # No Color
|
||||
|
||||
# =============================================================================
|
||||
# LOGGING FUNCTIONS
|
||||
# =============================================================================
|
||||
|
||||
# Initialize logging
|
||||
init_logging() {
|
||||
local script_name="${1:-unknown}"
|
||||
mkdir -p "$LOG_DIR"
|
||||
LOG_FILE="$LOG_DIR/${script_name}.log"
|
||||
touch "$LOG_FILE"
|
||||
}
|
||||
|
||||
# Log message with timestamp and level
|
||||
log() {
|
||||
local level="$1"
|
||||
local message="$2"
|
||||
local timestamp
|
||||
timestamp=$(date '+%Y-%m-%d %H:%M:%S')
|
||||
echo "$timestamp [$SCRIPT_NAME] $level: $message" >> "$LOG_FILE"
|
||||
echo "$timestamp [$SCRIPT_NAME] $level: $message" >&2
|
||||
}
|
||||
|
||||
# Convenience logging functions
|
||||
log_info() { log "INFO" "$1"; }
|
||||
log_warn() { log "WARN" "$1"; }
|
||||
log_error() { log "ERROR" "$1"; }
|
||||
log_debug() { log "DEBUG" "$1"; }
|
||||
|
||||
# =============================================================================
|
||||
# UTILITY FUNCTIONS
|
||||
# =============================================================================
|
||||
|
||||
# Check if command exists
|
||||
command_exists() {
|
||||
command -v "$1" >/dev/null 2>&1
|
||||
}
|
||||
|
||||
# Check if running as root
|
||||
is_root() {
|
||||
[[ $EUID -eq 0 ]]
|
||||
}
|
||||
|
||||
# Get available disk space in GB
|
||||
get_disk_space() {
|
||||
local path="${1:-/}"
|
||||
df -BG "$path" 2>/dev/null | tail -1 | awk '{print $4}' | sed 's/G//' || echo "0"
|
||||
}
|
||||
|
||||
# Get total memory in MB
|
||||
get_total_memory() {
|
||||
free -m 2>/dev/null | awk 'NR==2{printf "%.0f", $2}' || echo "0"
|
||||
}
|
||||
|
||||
# Get available memory in MB
|
||||
get_available_memory() {
|
||||
free -m 2>/dev/null | awk 'NR==2{printf "%.0f", $7}' || echo "0"
|
||||
}
|
||||
|
||||
# Check if service is running (systemd)
|
||||
service_running() {
|
||||
local service="$1"
|
||||
systemctl is-active --quiet "$service" 2>/dev/null
|
||||
}
|
||||
|
||||
# Check if Docker is installed and running
|
||||
docker_available() {
|
||||
command_exists docker && service_running docker
|
||||
}
|
||||
|
||||
# Check network connectivity
|
||||
check_network() {
|
||||
ping -c 1 -W 5 8.8.8.8 >/dev/null 2>&1
|
||||
}
|
||||
|
||||
# Validate YAML file syntax
|
||||
validate_yaml() {
|
||||
local file="$1"
|
||||
if command_exists python3 && python3 -c "import yaml" 2>/dev/null; then
|
||||
python3 -c "import yaml; yaml.safe_load(open('$file'))" 2>/dev/null
|
||||
elif command_exists yq; then
|
||||
yq eval '.' "$file" >/dev/null 2>/dev/null
|
||||
elif command_exists docker && docker compose version >/dev/null 2>&1; then
|
||||
# Fallback to docker compose config
|
||||
local dir=$(dirname "$file")
|
||||
local base=$(basename "$file")
|
||||
(cd "$dir" && docker compose -f "$base" config >/dev/null 2>&1)
|
||||
else
|
||||
# No validation tools available, assume valid
|
||||
return 0
|
||||
fi
|
||||
}
|
||||
|
||||
# Backup file with timestamp
|
||||
backup_file() {
|
||||
local file="$1"
|
||||
local backup="${file}.bak.$(date +%Y%m%d_%H%M%S)"
|
||||
cp "$file" "$backup"
|
||||
log_info "Backed up $file to $backup"
|
||||
}
|
||||
|
||||
# Clean up old backups (keep last 5)
|
||||
cleanup_backups() {
|
||||
local file="$1"
|
||||
local backups
|
||||
mapfile -t backups < <(ls -t "${file}.bak."* 2>/dev/null | tail -n +6)
|
||||
for backup in "${backups[@]}"; do
|
||||
rm -f "$backup"
|
||||
log_debug "Cleaned up old backup: $backup"
|
||||
done
|
||||
}
|
||||
|
||||
# Display colored message
|
||||
print_color() {
|
||||
local color="$1"
|
||||
local message="$2"
|
||||
echo -e "${color}${message}${NC}"
|
||||
}
|
||||
|
||||
# Display success message
|
||||
print_success() {
|
||||
print_color "$GREEN" "✓ $1"
|
||||
}
|
||||
|
||||
# Display warning message
|
||||
print_warning() {
|
||||
print_color "$YELLOW" "⚠ $1"
|
||||
}
|
||||
|
||||
# Display error message
|
||||
print_error() {
|
||||
print_color "$RED" "✗ $1"
|
||||
}
|
||||
|
||||
# Display info message
|
||||
print_info() {
|
||||
print_color "$BLUE" "ℹ $1"
|
||||
}
|
||||
|
||||
# =============================================================================
|
||||
# VALIDATION FUNCTIONS
|
||||
# =============================================================================
|
||||
|
||||
# Validate OS compatibility
|
||||
validate_os() {
|
||||
case "$OS_NAME" in
|
||||
"Ubuntu"|"Debian"|"Raspbian")
|
||||
if [[ "$OS_NAME" == "Ubuntu" && "$OS_VERSION" =~ ^(20|22|24) ]]; then
|
||||
return 0
|
||||
elif [[ "$OS_NAME" == "Debian" && "$OS_VERSION" =~ ^(11|12|13) ]]; then
|
||||
return 0
|
||||
elif [[ "$OS_NAME" == "Raspbian" ]]; then
|
||||
return 0
|
||||
fi
|
||||
;;
|
||||
esac
|
||||
return 1
|
||||
}
|
||||
|
||||
# Validate architecture
|
||||
validate_arch() {
|
||||
[[ "$ARCH" == "x86_64" || "$ARCH" == "aarch64" ]]
|
||||
}
|
||||
|
||||
# Validate minimum requirements
|
||||
validate_requirements() {
|
||||
local min_disk=20 # GB
|
||||
local min_memory=1024 # MB
|
||||
|
||||
local disk_space
|
||||
disk_space=$(get_disk_space)
|
||||
local total_memory
|
||||
total_memory=$(get_total_memory)
|
||||
|
||||
if (( disk_space < min_disk )); then
|
||||
log_error "Insufficient disk space: ${disk_space}GB available, ${min_disk}GB required"
|
||||
return 1
|
||||
fi
|
||||
|
||||
if (( total_memory < min_memory )); then
|
||||
log_error "Insufficient memory: ${total_memory}MB available, ${min_memory}MB required"
|
||||
return 1
|
||||
fi
|
||||
|
||||
return 0
|
||||
}
|
||||
|
||||
# =============================================================================
|
||||
# DEPENDENCY CHECKS
|
||||
# =============================================================================
|
||||
|
||||
# Check if required packages are installed
|
||||
check_dependencies() {
|
||||
local deps=("curl" "wget" "jq" "git")
|
||||
local missing=()
|
||||
|
||||
for dep in "${deps[@]}"; do
|
||||
if ! command_exists "$dep"; then
|
||||
missing+=("$dep")
|
||||
fi
|
||||
done
|
||||
|
||||
if [[ ${#missing[@]} -gt 0 ]]; then
|
||||
log_warn "Missing dependencies: ${missing[*]}"
|
||||
return 1
|
||||
fi
|
||||
|
||||
return 0
|
||||
}
|
||||
|
||||
# Install missing dependencies
|
||||
install_dependencies() {
|
||||
if ! check_dependencies; then
|
||||
log_info "Installing missing dependencies..."
|
||||
if is_root; then
|
||||
apt update && apt install -y curl wget jq git
|
||||
else
|
||||
sudo apt update && sudo apt install -y curl wget jq git
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
# =============================================================================
|
||||
# SCRIPT INITIALIZATION
|
||||
# =============================================================================
|
||||
|
||||
# Initialize script environment
|
||||
init_script() {
|
||||
local script_name="$1"
|
||||
SCRIPT_NAME="$script_name"
|
||||
init_logging "$script_name"
|
||||
|
||||
log_info "Starting $script_name on $OS_NAME $OS_VERSION ($ARCH)"
|
||||
|
||||
# Set trap for cleanup
|
||||
trap 'log_error "Script interrupted"; exit 1' INT TERM
|
||||
|
||||
# Validate basic requirements
|
||||
if ! validate_os; then
|
||||
print_error "Unsupported OS: $OS_NAME $OS_VERSION"
|
||||
log_error "Unsupported OS: $OS_NAME $OS_VERSION"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if ! validate_arch; then
|
||||
print_error "Unsupported architecture: $ARCH"
|
||||
log_error "Unsupported architecture: $ARCH"
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
# =============================================================================
|
||||
# DOCKER UTILITIES
|
||||
# =============================================================================
|
||||
|
||||
# Check if Docker is available
|
||||
docker_available() {
|
||||
command_exists "docker" && docker info >/dev/null 2>&1
|
||||
}
|
||||
|
||||
# Get services in a stack
|
||||
get_stack_services() {
|
||||
local stack="$1"
|
||||
local compose_file="$EZ_HOME/docker-compose/$stack/docker-compose.yml"
|
||||
|
||||
if [[ ! -f "$compose_file" ]]; then
|
||||
return 1
|
||||
fi
|
||||
|
||||
# Extract service names from docker-compose.yml
|
||||
# Look for lines that start at column 0 followed by a service name
|
||||
sed -n '/^services:/,/^[^ ]/p' "$compose_file" 2>/dev/null | \
|
||||
grep '^ [a-zA-Z0-9_-]\+:' | \
|
||||
sed 's/^\s*//' | sed 's/:.*$//' || true
|
||||
}
|
||||
|
||||
# Check if a service is running
|
||||
is_service_running() {
|
||||
local service="$1"
|
||||
|
||||
docker ps --filter "name=$service" --filter "status=running" --format "{{.Names}}" | grep -q "^${service}$"
|
||||
}
|
||||
|
||||
# Find all available services across all stacks
|
||||
find_all_services() {
|
||||
local services=()
|
||||
|
||||
# Get all docker-compose directories
|
||||
local compose_dirs
|
||||
mapfile -t compose_dirs < <(find "$EZ_HOME/docker-compose" -name "docker-compose.yml" -type f -exec dirname {} \; 2>/dev/null)
|
||||
|
||||
for dir in "${compose_dirs[@]}"; do
|
||||
local stack_services
|
||||
mapfile -t stack_services < <(get_stack_services "$(basename "$dir")")
|
||||
|
||||
for service in "${stack_services[@]}"; do
|
||||
# Avoid duplicates
|
||||
if [[ ! " ${services[*]} " =~ " ${service} " ]]; then
|
||||
services+=("$service")
|
||||
fi
|
||||
done
|
||||
done
|
||||
|
||||
printf '%s\n' "${services[@]}" | sort
|
||||
}
|
||||
|
||||
# Find which stack a service belongs to
|
||||
find_service_stack() {
|
||||
local service="$1"
|
||||
|
||||
local compose_dirs
|
||||
mapfile -t compose_dirs < <(find "$EZ_HOME/docker-compose" -name "docker-compose.yml" -type f -exec dirname {} \; 2>/dev/null)
|
||||
|
||||
for dir in "${compose_dirs[@]}"; do
|
||||
local stack_services
|
||||
mapfile -t stack_services < <(get_stack_services "$(basename "$dir")")
|
||||
|
||||
for stack_service in "${stack_services[@]}"; do
|
||||
if [[ "$stack_service" == "$service" ]]; then
|
||||
echo "$dir"
|
||||
return 0
|
||||
fi
|
||||
done
|
||||
done
|
||||
|
||||
return 1
|
||||
}
|
||||
|
||||
# Get service compose file
|
||||
get_service_compose_file() {
|
||||
local service="$1"
|
||||
local stack_dir
|
||||
|
||||
stack_dir=$(find_service_stack "$service")
|
||||
[[ -n "$stack_dir" ]] && echo "$stack_dir/docker-compose.yml"
|
||||
}
|
||||
@@ -1,324 +0,0 @@
|
||||
#!/bin/bash
|
||||
# EZ-Homelab Enhanced Setup Scripts - UI Library
|
||||
# Dialog/whiptail helper functions for consistent user interface
|
||||
|
||||
# Detect available UI tool
|
||||
if command_exists whiptail; then
|
||||
UI_TOOL="whiptail"
|
||||
elif command_exists dialog; then
|
||||
UI_TOOL="dialog"
|
||||
else
|
||||
echo "Error: Neither whiptail nor dialog is installed. Please install one of them."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# UI configuration
|
||||
UI_HEIGHT=20
|
||||
UI_WIDTH=70
|
||||
UI_TITLE="EZ-Homelab Setup"
|
||||
UI_BACKTITLE="EZ-Homelab Enhanced Setup Scripts v1.0"
|
||||
|
||||
# Colors (for dialog)
|
||||
if [[ "$UI_TOOL" == "dialog" ]]; then
|
||||
export DIALOGRC="$SCRIPT_DIR/lib/dialogrc"
|
||||
fi
|
||||
|
||||
# =============================================================================
|
||||
# BASIC UI FUNCTIONS
|
||||
# =============================================================================
|
||||
|
||||
# Display a message box
|
||||
ui_msgbox() {
|
||||
local text="$1"
|
||||
local height="${2:-$UI_HEIGHT}"
|
||||
local width="${3:-$UI_WIDTH}"
|
||||
|
||||
"$UI_TOOL" --backtitle "$UI_BACKTITLE" --title "$UI_TITLE" \
|
||||
--msgbox "$text" "$height" "$width"
|
||||
}
|
||||
|
||||
# Display a yes/no question
|
||||
ui_yesno() {
|
||||
local text="$1"
|
||||
local height="${2:-$UI_HEIGHT}"
|
||||
local width="${3:-$UI_WIDTH}"
|
||||
|
||||
"$UI_TOOL" --backtitle "$UI_BACKTITLE" --title "$UI_TITLE" \
|
||||
--yesno "$text" "$height" "$width"
|
||||
}
|
||||
|
||||
# Get user input
|
||||
ui_inputbox() {
|
||||
local text="$1"
|
||||
local default="${2:-}"
|
||||
local height="${3:-$UI_HEIGHT}"
|
||||
local width="${4:-$UI_WIDTH}"
|
||||
|
||||
"$UI_TOOL" --backtitle "$UI_BACKTITLE" --title "$UI_TITLE" \
|
||||
--inputbox "$text" "$height" "$width" "$default" 2>&1
|
||||
}
|
||||
|
||||
# Display a menu
|
||||
ui_menu() {
|
||||
local text="$1"
|
||||
local height="${2:-$UI_HEIGHT}"
|
||||
local width="${3:-$UI_WIDTH}"
|
||||
shift 2
|
||||
|
||||
local menu_items=("$@")
|
||||
local menu_height=$(( ${#menu_items[@]} / 2 ))
|
||||
|
||||
"$UI_TOOL" --backtitle "$UI_BACKTITLE" --title "$UI_TITLE" \
|
||||
--menu "$text" "$height" "$width" "$menu_height" \
|
||||
"${menu_items[@]}" 2>&1
|
||||
}
|
||||
|
||||
# Display a checklist
|
||||
ui_checklist() {
|
||||
local text="$1"
|
||||
local height="${2:-$UI_HEIGHT}"
|
||||
local width="${3:-$UI_WIDTH}"
|
||||
shift 2
|
||||
|
||||
local checklist_items=("$@")
|
||||
local list_height=$(( ${#checklist_items[@]} / 3 ))
|
||||
|
||||
"$UI_TOOL" --backtitle "$UI_BACKTITLE" --title "$UI_TITLE" \
|
||||
--checklist "$text" "$height" "$width" "$list_height" \
|
||||
"${checklist_items[@]}" 2>&1
|
||||
}
|
||||
|
||||
# Display a radiolist
|
||||
ui_radiolist() {
|
||||
local text="$1"
|
||||
local height="${2:-$UI_HEIGHT}"
|
||||
local width="${3:-$UI_WIDTH}"
|
||||
shift 2
|
||||
|
||||
local radiolist_items=("$@")
|
||||
local list_height=$(( ${#radiolist_items[@]} / 3 ))
|
||||
|
||||
"$UI_TOOL" --backtitle "$UI_BACKTITLE" --title "$UI_TITLE" \
|
||||
--radiolist "$text" "$height" "$width" "$list_height" \
|
||||
"${radiolist_items[@]}" 2>&1
|
||||
}
|
||||
|
||||
# Display progress gauge
|
||||
ui_gauge() {
|
||||
local text="$1"
|
||||
local percent="${2:-0}"
|
||||
local height="${3:-$UI_HEIGHT}"
|
||||
local width="${4:-$UI_WIDTH}"
|
||||
|
||||
{
|
||||
echo "$percent"
|
||||
echo "$text"
|
||||
} | "$UI_TOOL" --backtitle "$UI_BACKTITLE" --title "$UI_TITLE" \
|
||||
--gauge "$text" "$height" "$width" 0
|
||||
}
|
||||
|
||||
# =============================================================================
|
||||
# ADVANCED UI FUNCTIONS
|
||||
# =============================================================================
|
||||
|
||||
# Display progress with updating percentage
|
||||
ui_progress() {
|
||||
local title="$1"
|
||||
local command="$2"
|
||||
local height="${3:-$UI_HEIGHT}"
|
||||
local width="${4:-$UI_WIDTH}"
|
||||
|
||||
{
|
||||
eval "$command" | while IFS= read -r line; do
|
||||
# Try to extract percentage from output
|
||||
if [[ "$line" =~ ([0-9]+)% ]]; then
|
||||
echo "${BASH_REMATCH[1]}"
|
||||
fi
|
||||
echo "$line" >&2
|
||||
done
|
||||
echo "100"
|
||||
} 2>&1 | "$UI_TOOL" --backtitle "$UI_BACKTITLE" --title "$UI_TITLE" \
|
||||
--gauge "$title" "$height" "$width" 0
|
||||
}
|
||||
|
||||
# Display a form with multiple fields
|
||||
ui_form() {
|
||||
local text="$1"
|
||||
local height="${2:-$UI_HEIGHT}"
|
||||
local width="${3:-$UI_WIDTH}"
|
||||
shift 2
|
||||
|
||||
local form_items=("$@")
|
||||
local form_height=$(( ${#form_items[@]} / 2 ))
|
||||
|
||||
"$UI_TOOL" --backtitle "$UI_BACKTITLE" --title "$UI_TITLE" \
|
||||
--form "$text" "$height" "$width" "$form_height" \
|
||||
"${form_items[@]}" 2>&1
|
||||
}
|
||||
|
||||
# Display password input (hidden)
|
||||
ui_password() {
|
||||
local text="$1"
|
||||
local height="${2:-$UI_HEIGHT}"
|
||||
local width="${3:-$UI_WIDTH}"
|
||||
|
||||
"$UI_TOOL" --backtitle "$UI_BACKTITLE" --title "$UI_TITLE" \
|
||||
--passwordbox "$text" "$height" "$width" 2>&1
|
||||
}
|
||||
|
||||
# =============================================================================
|
||||
# EZ-HOMELAB SPECIFIC UI FUNCTIONS
|
||||
# =============================================================================
|
||||
|
||||
# Display deployment type selection
|
||||
ui_select_deployment_type() {
|
||||
local text="Select your deployment type:"
|
||||
local items=(
|
||||
"core" "Core Only" "off"
|
||||
"single" "Single Server (Core + Infrastructure + Services)" "on"
|
||||
"remote" "Remote Server (Infrastructure + Services only)" "off"
|
||||
)
|
||||
|
||||
ui_radiolist "$text" "$UI_HEIGHT" "$UI_WIDTH" "${items[@]}"
|
||||
}
|
||||
|
||||
# Display service selection checklist
|
||||
ui_select_services() {
|
||||
local deployment_type="$1"
|
||||
local text="Select services to deploy:"
|
||||
local items=()
|
||||
|
||||
case "$deployment_type" in
|
||||
"core")
|
||||
items=(
|
||||
"duckdns" "DuckDNS (Dynamic DNS)" "on"
|
||||
"traefik" "Traefik (Reverse Proxy)" "on"
|
||||
"authelia" "Authelia (SSO Authentication)" "on"
|
||||
"gluetun" "Gluetun (VPN Client)" "on"
|
||||
"sablier" "Sablier (Lazy Loading)" "on"
|
||||
)
|
||||
;;
|
||||
"single")
|
||||
items=(
|
||||
"core" "Core Services" "on"
|
||||
"infrastructure" "Infrastructure (Dockge, Pi-hole)" "on"
|
||||
"dashboards" "Dashboards (Homepage, Homarr)" "on"
|
||||
"media" "Media Services (Plex, Jellyfin)" "off"
|
||||
"media-management" "Media Management (*arr services)" "off"
|
||||
"homeassistant" "Home Assistant Stack" "off"
|
||||
"productivity" "Productivity (Nextcloud, Gitea)" "off"
|
||||
"monitoring" "Monitoring (Grafana, Prometheus)" "off"
|
||||
"utilities" "Utilities (Duplicati, FreshRSS)" "off"
|
||||
)
|
||||
;;
|
||||
"remote")
|
||||
items=(
|
||||
"infrastructure" "Infrastructure (Dockge, Pi-hole)" "on"
|
||||
"dashboards" "Dashboards (Homepage, Homarr)" "on"
|
||||
"media" "Media Services (Plex, Jellyfin)" "off"
|
||||
"media-management" "Media Management (*arr services)" "off"
|
||||
"homeassistant" "Home Assistant Stack" "off"
|
||||
"productivity" "Productivity (Nextcloud, Gitea)" "off"
|
||||
"monitoring" "Monitoring (Grafana, Prometheus)" "off"
|
||||
"utilities" "Utilities (Duplicati, FreshRSS)" "off"
|
||||
)
|
||||
;;
|
||||
esac
|
||||
|
||||
ui_checklist "$text" "$UI_HEIGHT" "$UI_WIDTH" "${items[@]}"
|
||||
}
|
||||
|
||||
# Display environment configuration form
|
||||
ui_configure_environment() {
|
||||
local text="Configure your environment:"
|
||||
local items=(
|
||||
"Domain" 1 1 "" 1 20 50 0
|
||||
"Timezone" 2 1 "America/New_York" 2 20 50 0
|
||||
"PUID" 3 1 "1000" 3 20 50 0
|
||||
"PGID" 4 1 "1000" 4 20 50 0
|
||||
)
|
||||
|
||||
ui_form "$text" "$UI_HEIGHT" "$UI_WIDTH" "${items[@]}"
|
||||
}
|
||||
|
||||
# Display confirmation dialog
|
||||
ui_confirm_action() {
|
||||
local action="$1"
|
||||
local details="${2:-}"
|
||||
local text="Confirm $action?"
|
||||
|
||||
if [[ -n "$details" ]]; then
|
||||
text="$text\n\n$details"
|
||||
fi
|
||||
|
||||
ui_yesno "$text"
|
||||
}
|
||||
|
||||
# Display error and offer retry
|
||||
ui_error_retry() {
|
||||
local error="$1"
|
||||
local suggestion="${2:-}"
|
||||
|
||||
local text="Error: $error"
|
||||
if [[ -n "$suggestion" ]]; then
|
||||
text="$text\n\nSuggestion: $suggestion"
|
||||
fi
|
||||
text="$text\n\nWould you like to retry?"
|
||||
|
||||
ui_yesno "$text"
|
||||
}
|
||||
|
||||
# Display success message
|
||||
ui_success() {
|
||||
local message="$1"
|
||||
ui_msgbox "Success!\n\n$message"
|
||||
}
|
||||
|
||||
# =============================================================================
|
||||
# UTILITY FUNCTIONS
|
||||
# =============================================================================
|
||||
|
||||
# Check if UI is available (for non-interactive mode)
|
||||
ui_available() {
|
||||
[[ -n "${DISPLAY:-}" ]] || [[ -n "${TERM:-}" ]] && [[ "$TERM" != "dumb" ]]
|
||||
}
|
||||
|
||||
# Run command with UI progress if available
|
||||
run_with_progress() {
|
||||
local title="$1"
|
||||
local command="$2"
|
||||
|
||||
if ui_available; then
|
||||
ui_progress "$title" "$command"
|
||||
else
|
||||
print_info "$title"
|
||||
eval "$command"
|
||||
fi
|
||||
}
|
||||
|
||||
# Display help text
|
||||
ui_show_help() {
|
||||
local script_name="$1"
|
||||
local help_text="
|
||||
EZ-Homelab $script_name
|
||||
|
||||
USAGE:
|
||||
$script_name [OPTIONS]
|
||||
|
||||
OPTIONS:
|
||||
-h, --help Show this help message
|
||||
-v, --verbose Enable verbose logging
|
||||
-y, --yes Assume yes for all prompts
|
||||
--no-ui Run without interactive UI
|
||||
|
||||
EXAMPLES:
|
||||
$script_name # Interactive mode
|
||||
$script_name --no-ui # Non-interactive mode
|
||||
$script_name --help # Show help
|
||||
|
||||
For more information, visit:
|
||||
https://github.com/your-repo/EZ-Homelab
|
||||
"
|
||||
|
||||
echo "$help_text" | ui_msgbox "Help - $script_name" 20 70
|
||||
}
|
||||
@@ -1,296 +0,0 @@
|
||||
#!/bin/bash
|
||||
# EZ-Homelab Enhanced Setup Scripts - Configuration Localization
|
||||
# Replace template variables in service configurations with environment values
|
||||
|
||||
SCRIPT_NAME="localize"
|
||||
SCRIPT_VERSION="1.0.0"
|
||||
|
||||
# Load common library
|
||||
source "$(dirname "${BASH_SOURCE[0]}")/lib/common.sh"
|
||||
source "$(dirname "${BASH_SOURCE[0]}")/lib/ui.sh"
|
||||
|
||||
# =============================================================================
|
||||
# SCRIPT CONFIGURATION
|
||||
# =============================================================================
|
||||
|
||||
# Template variables to replace
|
||||
TEMPLATE_VARS=("DOMAIN" "TZ" "PUID" "PGID" "DUCKDNS_TOKEN" "DUCKDNS_SUBDOMAINS" "AUTHELIA_JWT_SECRET" "AUTHELIA_SESSION_SECRET" "AUTHELIA_STORAGE_ENCRYPTION_KEY" "DEFAULT_EMAIL" "SERVER_IP" "JWT_SECRET" "SESSION_SECRET" "ENCRYPTION_KEY" "AUTHELIA_ADMIN_PASSWORD" "AUTHELIA_ADMIN_EMAIL" "PLEX_CLAIM_TOKEN" "DEPLOYMENT_TYPE" "SERVER_HOSTNAME" "DOCKER_SOCKET_PATH")
|
||||
|
||||
# File extensions to process
|
||||
TEMPLATE_EXTENSIONS=("yml" "yaml" "json" "conf" "cfg" "env")
|
||||
|
||||
# =============================================================================
|
||||
# LOCALIZATION FUNCTIONS
|
||||
# =============================================================================
|
||||
|
||||
# Load environment variables
|
||||
load_environment() {
|
||||
local env_file="$EZ_HOME/.env"
|
||||
|
||||
if [[ ! -f "$env_file" ]]; then
|
||||
print_error ".env file not found at $env_file"
|
||||
print_error "Run ./pre-deployment-wizard.sh first"
|
||||
return 1
|
||||
fi
|
||||
|
||||
# Source the .env file
|
||||
set -a
|
||||
source "$env_file"
|
||||
set +a
|
||||
|
||||
print_success "Environment loaded from $env_file"
|
||||
return 0
|
||||
}
|
||||
|
||||
# Find template files
|
||||
find_template_files() {
|
||||
local service="${1:-}"
|
||||
local files=()
|
||||
|
||||
if [[ -n "$service" ]]; then
|
||||
# Process specific service
|
||||
local service_dir="$EZ_HOME/docker-compose/$service"
|
||||
if [[ -d "$service_dir" ]]; then
|
||||
while IFS= read -r -d '' file; do
|
||||
files+=("$file")
|
||||
done < <(find "$service_dir" -type f \( -name "*.yml" -o -name "*.yaml" -o -name "*.json" -o -name "*.conf" -o -name "*.cfg" -o -name "*.env" \) -print0 2>/dev/null)
|
||||
else
|
||||
print_error "Service directory not found: $service_dir"
|
||||
return 1
|
||||
fi
|
||||
else
|
||||
# Process all services
|
||||
while IFS= read -r -d '' file; do
|
||||
files+=("$file")
|
||||
done < <(find "$EZ_HOME/docker-compose" -type f \( -name "*.yml" -o -name "*.yaml" -o -name "*.json" -o -name "*.conf" -o -name "*.cfg" -o -name "*.env" \) -print0 2>/dev/null)
|
||||
fi
|
||||
|
||||
printf '%s\n' "${files[@]}"
|
||||
}
|
||||
|
||||
# Check if file contains template variables
|
||||
file_has_templates() {
|
||||
local file="$1"
|
||||
|
||||
for var in "${TEMPLATE_VARS[@]}"; do
|
||||
if grep -q "\${$var}" "$file" 2>/dev/null; then
|
||||
return 0
|
||||
fi
|
||||
done
|
||||
|
||||
return 1
|
||||
}
|
||||
|
||||
# Process template file
|
||||
process_template_file() {
|
||||
local file="$1"
|
||||
local backup="${file}.template"
|
||||
|
||||
print_info "Processing: $file"
|
||||
|
||||
# Check if file has templates
|
||||
if ! file_has_templates "$file"; then
|
||||
print_info "No templates found in $file"
|
||||
return 0
|
||||
fi
|
||||
|
||||
# Backup original if not already backed up
|
||||
if [[ ! -f "$backup" ]]; then
|
||||
cp "$file" "$backup"
|
||||
print_info "Backed up original to $backup"
|
||||
fi
|
||||
|
||||
# Process template variables
|
||||
local temp_file
|
||||
temp_file=$(mktemp)
|
||||
|
||||
cp "$file" "$temp_file"
|
||||
|
||||
for var in "${TEMPLATE_VARS[@]}"; do
|
||||
local value="${!var:-}"
|
||||
if [[ -n "$value" ]]; then
|
||||
# Use sed to replace ${VAR} with value
|
||||
sed -i "s|\${$var}|$value|g" "$temp_file"
|
||||
log_debug "Replaced \${$var} with $value in $file"
|
||||
else
|
||||
log_warn "Variable $var not set, leaving template as-is"
|
||||
fi
|
||||
done
|
||||
|
||||
# Move processed file back
|
||||
mv "$temp_file" "$file"
|
||||
|
||||
print_success "Processed $file"
|
||||
return 0
|
||||
}
|
||||
|
||||
# Validate processed files
|
||||
validate_processed_files() {
|
||||
local files=("$@")
|
||||
local errors=0
|
||||
|
||||
print_info "Validating processed files..."
|
||||
|
||||
for file in "${files[@]}"; do
|
||||
if [[ "$file" =~ \.(yml|yaml)$ ]]; then
|
||||
if ! validate_yaml "$file"; then
|
||||
print_error "Invalid YAML in $file"
|
||||
errors=$((errors + 1))
|
||||
fi
|
||||
fi
|
||||
done
|
||||
|
||||
if [[ $errors -gt 0 ]]; then
|
||||
print_error "Validation failed for $errors file(s)"
|
||||
return 1
|
||||
fi
|
||||
|
||||
print_success "All files validated successfully"
|
||||
return 0
|
||||
}
|
||||
|
||||
# =============================================================================
|
||||
# UI FUNCTIONS
|
||||
# =============================================================================
|
||||
|
||||
# Show progress for batch processing
|
||||
show_localization_progress() {
|
||||
local total="$1"
|
||||
local current="$2"
|
||||
local file="$3"
|
||||
|
||||
local percent=$(( current * 100 / total ))
|
||||
ui_gauge "Processing templates... ($current/$total)" "$percent"
|
||||
}
|
||||
|
||||
# =============================================================================
|
||||
# MAIN FUNCTION
|
||||
# =============================================================================
|
||||
|
||||
main() {
|
||||
local service=""
|
||||
local non_interactive=false
|
||||
local verbose=false
|
||||
local dry_run=false
|
||||
|
||||
# Parse command line arguments
|
||||
while [[ $# -gt 0 ]]; do
|
||||
case $1 in
|
||||
-h|--help)
|
||||
cat << EOF
|
||||
EZ-Homelab Configuration Localization
|
||||
|
||||
USAGE:
|
||||
$SCRIPT_NAME [OPTIONS] [SERVICE]
|
||||
|
||||
ARGUMENTS:
|
||||
SERVICE Specific service to localize (optional, processes all if not specified)
|
||||
|
||||
OPTIONS:
|
||||
-h, --help Show this help message
|
||||
-v, --verbose Enable verbose logging
|
||||
--dry-run Show what would be processed without making changes
|
||||
--no-ui Run without interactive UI
|
||||
|
||||
EXAMPLES:
|
||||
$SCRIPT_NAME # Process all services
|
||||
$SCRIPT_NAME traefik # Process only Traefik
|
||||
$SCRIPT_NAME --dry-run # Show what would be changed
|
||||
|
||||
EOF
|
||||
exit 0
|
||||
;;
|
||||
-v|--verbose)
|
||||
verbose=true
|
||||
;;
|
||||
--dry-run)
|
||||
dry_run=true
|
||||
;;
|
||||
--no-ui)
|
||||
non_interactive=true
|
||||
;;
|
||||
-*)
|
||||
print_error "Unknown option: $1"
|
||||
echo "Use --help for usage information"
|
||||
exit 1
|
||||
;;
|
||||
*)
|
||||
if [[ -z "$service" ]]; then
|
||||
service="$1"
|
||||
else
|
||||
print_error "Multiple services specified. Use only one service name."
|
||||
exit 1
|
||||
fi
|
||||
;;
|
||||
esac
|
||||
shift
|
||||
done
|
||||
|
||||
# Initialize script
|
||||
init_script "$SCRIPT_NAME"
|
||||
|
||||
if $verbose; then
|
||||
set -x
|
||||
fi
|
||||
|
||||
print_info "Starting EZ-Homelab configuration localization..."
|
||||
|
||||
# Load environment
|
||||
if ! load_environment; then
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Find template files
|
||||
local files
|
||||
mapfile -t files < <(find_template_files "$service")
|
||||
if [[ ${#files[@]} -eq 0 ]]; then
|
||||
print_warning "No template files found"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
print_info "Found ${#files[@]} template file(s) to process"
|
||||
|
||||
if $dry_run; then
|
||||
print_info "DRY RUN - Would process the following files:"
|
||||
printf '%s\n' "${files[@]}"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# Process files
|
||||
local processed=0
|
||||
local total=${#files[@]}
|
||||
|
||||
for file in "${files[@]}"; do
|
||||
if ui_available && ! $non_interactive; then
|
||||
show_localization_progress "$total" "$processed" "$file"
|
||||
fi
|
||||
|
||||
if process_template_file "$file"; then
|
||||
processed=$((processed + 1))
|
||||
fi
|
||||
done
|
||||
|
||||
# Close progress gauge
|
||||
if ui_available && ! $non_interactive; then
|
||||
ui_gauge "Processing complete!" 100
|
||||
sleep 1
|
||||
fi
|
||||
|
||||
# Validate processed files
|
||||
if ! validate_processed_files "${files[@]}"; then
|
||||
print_error "Some processed files failed validation"
|
||||
print_error "Check the log file: $LOG_FILE"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo ""
|
||||
print_success "Configuration localization complete!"
|
||||
print_info "Processed $processed file(s)"
|
||||
print_info "Templates backed up with .template extension"
|
||||
print_info "Next step: ./validate.sh"
|
||||
|
||||
exit 0
|
||||
}
|
||||
|
||||
# Run main function
|
||||
main "$@"
|
||||
@@ -1,732 +0,0 @@
|
||||
#!/bin/bash
|
||||
# EZ-Homelab Enhanced Setup - Main Menu
|
||||
# Unified interface for all EZ-Homelab setup and management operations
|
||||
|
||||
SCRIPT_NAME="ez-homelab"
|
||||
SCRIPT_VERSION="1.0.0"
|
||||
|
||||
# Load common library
|
||||
source "$(dirname "${BASH_SOURCE[0]}")/lib/common.sh"
|
||||
source "$(dirname "${BASH_SOURCE[0]}")/lib/ui.sh"
|
||||
|
||||
# =============================================================================
|
||||
# MENU CONFIGURATION
|
||||
# =============================================================================
|
||||
|
||||
# Menu options
|
||||
MAIN_MENU_OPTIONS=(
|
||||
"System Setup & Validation"
|
||||
"Configuration Management"
|
||||
"Deployment & Services"
|
||||
"Monitoring & Maintenance"
|
||||
"Backup & Recovery"
|
||||
"Updates & Maintenance"
|
||||
"Advanced Options"
|
||||
"Exit"
|
||||
)
|
||||
|
||||
SYSTEM_MENU_OPTIONS=(
|
||||
"Run System Preflight Check"
|
||||
"Install & Configure Docker"
|
||||
"Validate Docker Installation"
|
||||
"Check System Resources"
|
||||
"Back to Main Menu"
|
||||
)
|
||||
|
||||
CONFIG_MENU_OPTIONS=(
|
||||
"Interactive Pre-deployment Wizard"
|
||||
"Localize Configuration Templates"
|
||||
"Generalize Configuration Files"
|
||||
"Validate All Configurations"
|
||||
"Show Current Configuration Status"
|
||||
"Back to Main Menu"
|
||||
)
|
||||
|
||||
DEPLOY_MENU_OPTIONS=(
|
||||
"Deploy Core Services"
|
||||
"Deploy Infrastructure Services"
|
||||
"Deploy Monitoring Stack"
|
||||
"Deploy Media Services"
|
||||
"Deploy Productivity Services"
|
||||
"Deploy All Services"
|
||||
"Show Deployment Status"
|
||||
"Back to Main Menu"
|
||||
)
|
||||
|
||||
MONITOR_MENU_OPTIONS=(
|
||||
"Show Monitoring Dashboard"
|
||||
"Monitor Service Health"
|
||||
"Monitor System Resources"
|
||||
"View Service Logs"
|
||||
"Continuous Monitoring Mode"
|
||||
"Back to Main Menu"
|
||||
)
|
||||
|
||||
BACKUP_MENU_OPTIONS=(
|
||||
"Backup Configuration Files"
|
||||
"Backup Docker Volumes"
|
||||
"Backup System Logs"
|
||||
"Backup Everything"
|
||||
"List Available Backups"
|
||||
"Restore from Backup"
|
||||
"Setup Automated Backups"
|
||||
"Back to Main Menu"
|
||||
)
|
||||
|
||||
UPDATE_MENU_OPTIONS=(
|
||||
"Check for Service Updates"
|
||||
"Update Individual Service"
|
||||
"Update All Services"
|
||||
"Show Update History"
|
||||
"Monitor Update Progress"
|
||||
"Setup Automated Updates"
|
||||
"Back to Main Menu"
|
||||
)
|
||||
|
||||
ADVANCED_MENU_OPTIONS=(
|
||||
"Service Management Console"
|
||||
"Docker Compose Operations"
|
||||
"Network Configuration"
|
||||
"SSL Certificate Management"
|
||||
"System Maintenance"
|
||||
"Troubleshooting Tools"
|
||||
"View Logs"
|
||||
"Back to Main Menu"
|
||||
)
|
||||
|
||||
# =============================================================================
|
||||
# MENU DISPLAY FUNCTIONS
|
||||
# =============================================================================
|
||||
|
||||
# Show main menu
|
||||
show_main_menu() {
|
||||
echo
|
||||
echo "╔══════════════════════════════════════════════════════════════╗"
|
||||
echo "║ EZ-HOMELAB SETUP ║"
|
||||
echo "║ Enhanced Management System ║"
|
||||
echo "╠══════════════════════════════════════════════════════════════╣"
|
||||
echo "║ Welcome to EZ-Homelab! Choose an option below: ║"
|
||||
echo "╚══════════════════════════════════════════════════════════════╝"
|
||||
echo
|
||||
}
|
||||
|
||||
# Show system status header
|
||||
show_system_status() {
|
||||
echo "System Status:"
|
||||
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
|
||||
|
||||
# Docker status
|
||||
if docker_available; then
|
||||
echo "✅ Docker: Running"
|
||||
else
|
||||
echo "❌ Docker: Not available"
|
||||
fi
|
||||
|
||||
# EZ-Homelab status
|
||||
if [[ -f "$EZ_HOME/.env" ]]; then
|
||||
echo "✅ Configuration: Found"
|
||||
else
|
||||
echo "⚠️ Configuration: Not found (run wizard first)"
|
||||
fi
|
||||
|
||||
# Service count
|
||||
local service_count=0
|
||||
local running_count=0
|
||||
|
||||
# Safely count services with error handling
|
||||
if service_list=$(find_all_services 2>/dev/null); then
|
||||
service_count=$(echo "$service_list" | wc -l 2>/dev/null || echo "0")
|
||||
# Remove whitespace and ensure it's a number
|
||||
service_count=$(echo "$service_count" | tr -d '[:space:]' | sed 's/[^0-9]*//g')
|
||||
if [[ "$service_count" =~ ^[0-9]+$ ]] && (( service_count > 0 )); then
|
||||
running_count=$(echo "$service_list" | while read -r service; do is_service_running "$service" 2>/dev/null && echo "1"; done | wc -l 2>/dev/null || echo "0")
|
||||
running_count=$(echo "$running_count" | tr -d '[:space:]' | sed 's/[^0-9]*//g')
|
||||
fi
|
||||
fi
|
||||
|
||||
if [[ "$service_count" =~ ^[0-9]+$ ]] && (( service_count > 0 )); then
|
||||
echo "✅ Services: $running_count/$service_count running"
|
||||
else
|
||||
echo "ℹ️ Services: None deployed yet"
|
||||
fi
|
||||
|
||||
echo
|
||||
}
|
||||
|
||||
# Generic menu display function
|
||||
show_menu() {
|
||||
local title="$1"
|
||||
local options=("${@:2}")
|
||||
|
||||
echo "╔══════════════════════════════════════════════════════════════╗"
|
||||
printf "║ %-60s ║\n" "$title"
|
||||
echo "╠══════════════════════════════════════════════════════════════╣"
|
||||
|
||||
local i=1
|
||||
for option in "${options[@]}"; do
|
||||
printf "║ %-2d. %-55s ║\n" "$i" "$option"
|
||||
((i++))
|
||||
done
|
||||
|
||||
echo "╚══════════════════════════════════════════════════════════════╝"
|
||||
echo
|
||||
}
|
||||
|
||||
# =============================================================================
|
||||
# MENU HANDLER FUNCTIONS
|
||||
# =============================================================================
|
||||
|
||||
# Handle main menu selection
|
||||
handle_main_menu() {
|
||||
local choice="$1"
|
||||
|
||||
case "$choice" in
|
||||
1) show_system_menu ;;
|
||||
2) show_config_menu ;;
|
||||
3) show_deploy_menu ;;
|
||||
4) show_monitor_menu ;;
|
||||
5) show_backup_menu ;;
|
||||
6) show_update_menu ;;
|
||||
7) show_advanced_menu ;;
|
||||
8)
|
||||
echo
|
||||
print_info "Thank you for using EZ-Homelab!"
|
||||
echo "For documentation, visit: https://github.com/kelinfoxy/EZ-Homelab"
|
||||
echo
|
||||
exit 0
|
||||
;;
|
||||
*)
|
||||
print_error "Invalid choice. Please select 1-8."
|
||||
return 1
|
||||
;;
|
||||
esac
|
||||
}
|
||||
|
||||
# Handle system menu
|
||||
handle_system_menu() {
|
||||
local choice="$1"
|
||||
|
||||
case "$choice" in
|
||||
1)
|
||||
print_info "Running system preflight check..."
|
||||
set +e
|
||||
./preflight.sh
|
||||
set -e
|
||||
;;
|
||||
2)
|
||||
print_info "Installing and configuring Docker..."
|
||||
./setup.sh --no-ui --skip-nvidia
|
||||
;;
|
||||
3)
|
||||
print_info "Validating Docker installation..."
|
||||
if docker_available; then
|
||||
print_success "Docker is properly installed and running"
|
||||
docker --version
|
||||
docker compose version
|
||||
else
|
||||
print_error "Docker is not available"
|
||||
fi
|
||||
;;
|
||||
4)
|
||||
print_info "Checking system resources..."
|
||||
echo "CPU Cores: $(nproc)"
|
||||
echo "Total Memory: $(get_total_memory) MB"
|
||||
echo "Available Memory: $(get_available_memory) MB"
|
||||
echo "Disk Space: $(get_disk_space) GB available"
|
||||
echo "Architecture: $ARCH"
|
||||
echo "OS: $OS_NAME $OS_VERSION"
|
||||
;;
|
||||
5) return 0 ;; # Back to main menu
|
||||
*)
|
||||
print_error "Invalid choice. Please select 1-5."
|
||||
return 1
|
||||
;;
|
||||
esac
|
||||
|
||||
echo
|
||||
read -rp "Press Enter to continue..."
|
||||
}
|
||||
|
||||
# Handle configuration menu
|
||||
handle_config_menu() {
|
||||
local choice="$1"
|
||||
|
||||
case "$choice" in
|
||||
1)
|
||||
print_info "Starting interactive pre-deployment wizard..."
|
||||
print_info "The wizard will run and return to this menu when complete."
|
||||
echo
|
||||
# Check if UI tools are available
|
||||
if ! command_exists whiptail && ! command_exists dialog; then
|
||||
print_error "Pre-deployment wizard requires whiptail or dialog to be installed"
|
||||
print_info "Install with: sudo apt install whiptail"
|
||||
read -rp "Press Enter to continue..."
|
||||
continue
|
||||
fi
|
||||
read -rp "Press Enter to continue..."
|
||||
if ./pre-deployment-wizard.sh; then
|
||||
print_success "Pre-deployment wizard completed successfully"
|
||||
else
|
||||
print_error "Pre-deployment wizard exited with an error"
|
||||
fi
|
||||
;;
|
||||
2)
|
||||
print_info "Localizing configuration templates..."
|
||||
./localize.sh || true
|
||||
;;
|
||||
3)
|
||||
print_info "Generalizing configuration files..."
|
||||
./generalize.sh --method restore --no-ui || true
|
||||
;;
|
||||
4)
|
||||
print_info "Validating all configurations..."
|
||||
./validate.sh || true
|
||||
;;
|
||||
5)
|
||||
print_info "Current configuration status:"
|
||||
if [[ -f "$EZ_HOME/.env" ]]; then
|
||||
echo "✅ Environment file found"
|
||||
grep -E "^[A-Z_]+" "$EZ_HOME/.env" | head -10
|
||||
echo "... (showing first 10 variables)"
|
||||
else
|
||||
echo "❌ No environment file found"
|
||||
fi
|
||||
|
||||
if [[ -d "$EZ_HOME/docker-compose" ]]; then
|
||||
echo "✅ Docker compose directory found"
|
||||
local template_count
|
||||
template_count=$(find "$EZ_HOME/docker-compose" -name "*.template" | wc -l)
|
||||
echo "📄 Template files: $template_count"
|
||||
else
|
||||
echo "❌ Docker compose directory not found"
|
||||
fi
|
||||
;;
|
||||
6) return 0 ;; # Back to main menu
|
||||
*)
|
||||
print_error "Invalid choice. Please select 1-6."
|
||||
return 1
|
||||
;;
|
||||
esac
|
||||
|
||||
echo
|
||||
read -rp "Press Enter to continue..."
|
||||
}
|
||||
|
||||
# Handle deployment menu
|
||||
handle_deploy_menu() {
|
||||
local choice="$1"
|
||||
|
||||
case "$choice" in
|
||||
1)
|
||||
print_info "Deploying core services..."
|
||||
./deploy.sh core
|
||||
;;
|
||||
2)
|
||||
print_info "Deploying infrastructure services..."
|
||||
./deploy.sh infrastructure
|
||||
;;
|
||||
3)
|
||||
print_info "Deploying monitoring stack..."
|
||||
./deploy.sh monitoring
|
||||
;;
|
||||
4)
|
||||
print_info "Deploying media services..."
|
||||
./deploy.sh media
|
||||
;;
|
||||
5)
|
||||
print_info "Deploying productivity services..."
|
||||
./deploy.sh productivity
|
||||
;;
|
||||
6)
|
||||
print_info "Deploying all services..."
|
||||
./deploy.sh all
|
||||
;;
|
||||
7)
|
||||
print_info "Showing deployment status..."
|
||||
./deploy.sh status
|
||||
;;
|
||||
8) return 0 ;; # Back to main menu
|
||||
*)
|
||||
print_error "Invalid choice. Please select 1-8."
|
||||
return 1
|
||||
;;
|
||||
esac
|
||||
|
||||
echo
|
||||
read -rp "Press Enter to continue..."
|
||||
}
|
||||
|
||||
# Handle monitoring menu
|
||||
handle_monitor_menu() {
|
||||
local choice="$1"
|
||||
|
||||
case "$choice" in
|
||||
1)
|
||||
print_info "Showing monitoring dashboard..."
|
||||
./monitor.sh dashboard || true
|
||||
;;
|
||||
2)
|
||||
print_info "Monitoring service health..."
|
||||
./monitor.sh check || true
|
||||
;;
|
||||
3)
|
||||
print_info "Monitoring system resources..."
|
||||
echo "System Resources:"
|
||||
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
|
||||
./monitor.sh check | grep -E "(CPU|Memory|Disk)" || true
|
||||
;;
|
||||
4)
|
||||
echo "Available services:"
|
||||
./service.sh list | grep "✅ Running" | head -10 || true
|
||||
echo
|
||||
read -rp "Enter service name to view logs (or press Enter to skip): " service_name
|
||||
if [[ -n "$service_name" ]]; then
|
||||
./service.sh logs "$service_name" -n 20 || true
|
||||
fi
|
||||
;;
|
||||
5)
|
||||
print_info "Starting continuous monitoring (Ctrl+C to stop)..."
|
||||
./monitor.sh watch -i 30 || true
|
||||
;;
|
||||
6) return 0 ;; # Back to main menu
|
||||
*)
|
||||
print_error "Invalid choice. Please select 1-6."
|
||||
return 1
|
||||
;;
|
||||
esac
|
||||
|
||||
echo
|
||||
read -rp "Press Enter to continue..."
|
||||
}
|
||||
|
||||
# Handle backup menu
|
||||
handle_backup_menu() {
|
||||
local choice="$1"
|
||||
|
||||
case "$choice" in
|
||||
1)
|
||||
print_info "Backing up configuration files..."
|
||||
./backup.sh config || true
|
||||
;;
|
||||
2)
|
||||
print_info "Backing up Docker volumes..."
|
||||
./backup.sh volumes || true
|
||||
;;
|
||||
3)
|
||||
print_info "Backing up system logs..."
|
||||
./backup.sh logs || true
|
||||
;;
|
||||
4)
|
||||
print_info "Backing up everything..."
|
||||
./backup.sh all || true
|
||||
;;
|
||||
5)
|
||||
echo "Available backups:"
|
||||
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
|
||||
./backup.sh list config || true
|
||||
./backup.sh list volumes || true
|
||||
./backup.sh list logs || true
|
||||
;;
|
||||
6)
|
||||
echo "Available backups:"
|
||||
./backup.sh list config | grep -E "\.tar\.gzip$" | tail -5 || true
|
||||
echo
|
||||
read -rp "Enter backup filename to restore (or press Enter to skip): " backup_file
|
||||
if [[ -n "$backup_file" ]]; then
|
||||
./backup.sh config --restore "$backup_file" || true
|
||||
fi
|
||||
;;
|
||||
7)
|
||||
print_info "Setting up automated backups..."
|
||||
./backup.sh schedule || true
|
||||
;;
|
||||
8) return 0 ;; # Back to main menu
|
||||
*)
|
||||
print_error "Invalid choice. Please select 1-8."
|
||||
return 1
|
||||
;;
|
||||
esac
|
||||
|
||||
echo
|
||||
read -rp "Press Enter to continue..."
|
||||
}
|
||||
|
||||
# Handle update menu
|
||||
handle_update_menu() {
|
||||
local choice="$1"
|
||||
|
||||
case "$choice" in
|
||||
1)
|
||||
print_info "Checking for service updates..."
|
||||
./update.sh check || true
|
||||
;;
|
||||
2)
|
||||
echo "Available services:"
|
||||
./service.sh list | grep "✅ Running" | head -10 || true
|
||||
echo
|
||||
read -rp "Enter service name to update (or press Enter to skip): " service_name
|
||||
if [[ -n "$service_name" ]]; then
|
||||
./update.sh update "$service_name" || true
|
||||
fi
|
||||
;;
|
||||
3)
|
||||
print_warning "This will update all services. Continue? (y/N): "
|
||||
read -r response
|
||||
if [[ "$response" =~ ^[Yy]$ ]]; then
|
||||
./update.sh update all || true
|
||||
fi
|
||||
;;
|
||||
4)
|
||||
print_info "Showing update history..."
|
||||
./update.sh status || true
|
||||
;;
|
||||
5)
|
||||
print_info "Monitoring update progress..."
|
||||
./update.sh monitor || true
|
||||
;;
|
||||
6)
|
||||
print_info "Setting up automated updates..."
|
||||
./update.sh schedule || true
|
||||
;;
|
||||
7) return 0 ;; # Back to main menu
|
||||
*)
|
||||
print_error "Invalid choice. Please select 1-7."
|
||||
return 1
|
||||
;;
|
||||
esac
|
||||
|
||||
echo
|
||||
read -rp "Press Enter to continue..."
|
||||
}
|
||||
|
||||
# Handle advanced menu
|
||||
handle_advanced_menu() {
|
||||
local choice="$1"
|
||||
|
||||
case "$choice" in
|
||||
1)
|
||||
print_info "Service Management Console"
|
||||
echo "Available commands: start, stop, restart, status, logs, exec"
|
||||
echo "Example: start traefik, logs pihole, exec authelia bash"
|
||||
echo
|
||||
while true; do
|
||||
read -rp "service> " cmd
|
||||
if [[ "$cmd" == "exit" || "$cmd" == "quit" ]]; then
|
||||
break
|
||||
fi
|
||||
if [[ -n "$cmd" ]]; then
|
||||
./service.sh $cmd || true
|
||||
fi
|
||||
done
|
||||
;;
|
||||
2)
|
||||
print_info "Docker Compose Operations"
|
||||
echo "Available stacks:"
|
||||
find "$EZ_HOME/docker-compose" -name "docker-compose.yml" -exec dirname {} \; | xargs basename -a | sort
|
||||
echo
|
||||
read -rp "Enter stack name for compose operations (or press Enter to skip): " stack_name
|
||||
if [[ -n "$stack_name" ]]; then
|
||||
echo "Available operations: up, down, restart, logs, ps"
|
||||
read -rp "Operation: " operation
|
||||
if [[ -n "$operation" ]]; then
|
||||
(cd "$EZ_HOME/docker-compose/$stack_name" && docker compose "$operation")
|
||||
fi
|
||||
fi
|
||||
;;
|
||||
3)
|
||||
print_info "Network Configuration"
|
||||
echo "Docker Networks:"
|
||||
docker network ls
|
||||
echo
|
||||
echo "Container Ports:"
|
||||
docker ps --format "table {{.Names}}\t{{.Ports}}" | head -10
|
||||
;;
|
||||
4)
|
||||
print_info "SSL Certificate Management"
|
||||
if docker ps | grep -q traefik; then
|
||||
echo "Traefik SSL Certificates:"
|
||||
docker exec traefik traefik healthcheck 2>/dev/null || echo "Traefik health check failed"
|
||||
else
|
||||
echo "Traefik is not running"
|
||||
fi
|
||||
;;
|
||||
5)
|
||||
print_info "System Maintenance"
|
||||
echo "Docker System Cleanup:"
|
||||
docker system df
|
||||
echo
|
||||
read -rp "Run cleanup? (y/N): " response
|
||||
if [[ "$response" =~ ^[Yy]$ ]]; then
|
||||
./service.sh cleanup || true
|
||||
fi
|
||||
;;
|
||||
6)
|
||||
print_info "Troubleshooting Tools"
|
||||
echo "1. Test network connectivity"
|
||||
echo "2. Check Docker logs"
|
||||
echo "3. Validate configurations"
|
||||
echo "4. Show system information"
|
||||
read -rp "Choose option (1-4): " tool_choice
|
||||
case "$tool_choice" in
|
||||
1) ping -c 3 8.8.8.8 ;;
|
||||
2) docker logs $(docker ps -q | head -1) 2>/dev/null || echo "No containers running" ;;
|
||||
3) ./validate.sh || true ;;
|
||||
4) uname -a && docker --version ;;
|
||||
esac
|
||||
;;
|
||||
7)
|
||||
print_info "Viewing logs..."
|
||||
echo "Available log files:"
|
||||
ls -la "$LOG_DIR"/*.log 2>/dev/null || echo "No log files found"
|
||||
echo
|
||||
read -rp "Enter log file to view (or press Enter to skip): " log_file
|
||||
if [[ -n "$log_file" && -f "$LOG_DIR/$log_file" ]]; then
|
||||
tail -50 "$LOG_DIR/$log_file"
|
||||
fi
|
||||
;;
|
||||
8) return 0 ;; # Back to main menu
|
||||
*)
|
||||
print_error "Invalid choice. Please select 1-8."
|
||||
return 1
|
||||
;;
|
||||
esac
|
||||
|
||||
echo
|
||||
read -rp "Press Enter to continue..."
|
||||
}
|
||||
|
||||
# =============================================================================
|
||||
# MENU NAVIGATION FUNCTIONS
|
||||
# =============================================================================
|
||||
|
||||
# Show system menu
|
||||
show_system_menu() {
|
||||
while true; do
|
||||
show_menu "System Setup & Validation" "${SYSTEM_MENU_OPTIONS[@]}"
|
||||
read -rp "Choose an option (1-${#SYSTEM_MENU_OPTIONS[@]}): " choice
|
||||
|
||||
if [[ "$choice" == "${#SYSTEM_MENU_OPTIONS[@]}" ]]; then
|
||||
break
|
||||
fi
|
||||
|
||||
handle_system_menu "$choice" || true
|
||||
done
|
||||
}
|
||||
|
||||
# Show configuration menu
|
||||
show_config_menu() {
|
||||
while true; do
|
||||
show_menu "Configuration Management" "${CONFIG_MENU_OPTIONS[@]}"
|
||||
read -rp "Choose an option (1-${#CONFIG_MENU_OPTIONS[@]}): " choice
|
||||
|
||||
if [[ "$choice" == "${#CONFIG_MENU_OPTIONS[@]}" ]]; then
|
||||
break
|
||||
fi
|
||||
|
||||
handle_config_menu "$choice" || true
|
||||
done
|
||||
}
|
||||
|
||||
# Show deployment menu
|
||||
show_deploy_menu() {
|
||||
while true; do
|
||||
show_menu "Deployment & Services" "${DEPLOY_MENU_OPTIONS[@]}"
|
||||
read -rp "Choose an option (1-${#DEPLOY_MENU_OPTIONS[@]}): " choice
|
||||
|
||||
if [[ "$choice" == "${#DEPLOY_MENU_OPTIONS[@]}" ]]; then
|
||||
break
|
||||
fi
|
||||
|
||||
handle_deploy_menu "$choice" || true
|
||||
done
|
||||
}
|
||||
|
||||
# Show monitoring menu
|
||||
show_monitor_menu() {
|
||||
while true; do
|
||||
show_menu "Monitoring & Maintenance" "${MONITOR_MENU_OPTIONS[@]}"
|
||||
read -rp "Choose an option (1-${#MONITOR_MENU_OPTIONS[@]}): " choice
|
||||
|
||||
if [[ "$choice" == "${#MONITOR_MENU_OPTIONS[@]}" ]]; then
|
||||
break
|
||||
fi
|
||||
|
||||
handle_monitor_menu "$choice" || true
|
||||
done
|
||||
}
|
||||
|
||||
# Show backup menu
|
||||
show_backup_menu() {
|
||||
while true; do
|
||||
show_menu "Backup & Recovery" "${BACKUP_MENU_OPTIONS[@]}"
|
||||
read -rp "Choose an option (1-${#BACKUP_MENU_OPTIONS[@]}): " choice
|
||||
|
||||
if [[ "$choice" == "${#BACKUP_MENU_OPTIONS[@]}" ]]; then
|
||||
break
|
||||
fi
|
||||
|
||||
handle_backup_menu "$choice" || true
|
||||
done
|
||||
}
|
||||
|
||||
# Show update menu
|
||||
show_update_menu() {
|
||||
while true; do
|
||||
show_menu "Updates & Maintenance" "${UPDATE_MENU_OPTIONS[@]}"
|
||||
read -rp "Choose an option (1-${#UPDATE_MENU_OPTIONS[@]}): " choice
|
||||
|
||||
if [[ "$choice" == "${#UPDATE_MENU_OPTIONS[@]}" ]]; then
|
||||
break
|
||||
fi
|
||||
|
||||
handle_update_menu "$choice" || true
|
||||
done
|
||||
}
|
||||
|
||||
# Show advanced menu
|
||||
show_advanced_menu() {
|
||||
while true; do
|
||||
show_menu "Advanced Options" "${ADVANCED_MENU_OPTIONS[@]}"
|
||||
read -rp "Choose an option (1-${#ADVANCED_MENU_OPTIONS[@]}): " choice
|
||||
|
||||
if [[ "$choice" == "${#ADVANCED_MENU_OPTIONS[@]}" ]]; then
|
||||
break
|
||||
fi
|
||||
|
||||
handle_advanced_menu "$choice" || true
|
||||
done
|
||||
}
|
||||
|
||||
# =============================================================================
|
||||
# MAIN FUNCTION
|
||||
# =============================================================================
|
||||
|
||||
main() {
|
||||
# Initialize script
|
||||
init_script "$SCRIPT_NAME" "$SCRIPT_VERSION"
|
||||
init_logging "$SCRIPT_NAME"
|
||||
|
||||
# Clear screen for clean menu display
|
||||
clear
|
||||
|
||||
# Main menu loop
|
||||
while true; do
|
||||
show_main_menu
|
||||
show_system_status
|
||||
show_menu "Main Menu" "${MAIN_MENU_OPTIONS[@]}"
|
||||
|
||||
read -rp "Choose an option (1-${#MAIN_MENU_OPTIONS[@]}): " choice
|
||||
|
||||
if ! [[ "$choice" =~ ^[0-9]+$ ]] || (( choice < 1 || choice > ${#MAIN_MENU_OPTIONS[@]} )); then
|
||||
print_error "Invalid choice. Please enter a number between 1 and ${#MAIN_MENU_OPTIONS[@]}."
|
||||
echo
|
||||
read -rp "Press Enter to continue..."
|
||||
continue
|
||||
fi
|
||||
|
||||
handle_main_menu "$choice"
|
||||
done
|
||||
}
|
||||
|
||||
# Run main function
|
||||
main "$@"
|
||||
@@ -1,577 +0,0 @@
|
||||
#!/bin/bash
|
||||
# EZ-Homelab Enhanced Setup Scripts - Service Monitoring
|
||||
# Real-time service monitoring and alerting
|
||||
|
||||
SCRIPT_NAME="monitor"
|
||||
SCRIPT_VERSION="1.0.0"
|
||||
|
||||
# Load common library
|
||||
source "$(dirname "${BASH_SOURCE[0]}")/lib/common.sh"
|
||||
source "$(dirname "${BASH_SOURCE[0]}")/lib/ui.sh"
|
||||
|
||||
# =============================================================================
|
||||
# MONITORING CONFIGURATION
|
||||
# =============================================================================
|
||||
|
||||
# Monitoring intervals (seconds)
|
||||
HEALTH_CHECK_INTERVAL=30
|
||||
RESOURCE_CHECK_INTERVAL=60
|
||||
LOG_CHECK_INTERVAL=300
|
||||
|
||||
# Alert thresholds
|
||||
CPU_THRESHOLD=80
|
||||
MEMORY_THRESHOLD=80
|
||||
DISK_THRESHOLD=90
|
||||
|
||||
# Alert cooldown (seconds) - prevent alert spam
|
||||
ALERT_COOLDOWN=300
|
||||
|
||||
# Monitoring state file
|
||||
MONITOR_STATE_FILE="$LOG_DIR/monitor_state.json"
|
||||
|
||||
# =============================================================================
|
||||
# MONITORING STATE MANAGEMENT
|
||||
# =============================================================================
|
||||
|
||||
# Initialize monitoring state
|
||||
init_monitor_state() {
|
||||
if [[ ! -f "$MONITOR_STATE_FILE" ]]; then
|
||||
cat > "$MONITOR_STATE_FILE" << EOF
|
||||
{
|
||||
"services": {},
|
||||
"alerts": {},
|
||||
"last_check": $(date +%s),
|
||||
"system_stats": {}
|
||||
}
|
||||
EOF
|
||||
fi
|
||||
}
|
||||
|
||||
# Update service state
|
||||
update_service_state() {
|
||||
local service="$1"
|
||||
local status="$2"
|
||||
local timestamp
|
||||
timestamp=$(date +%s)
|
||||
|
||||
# Use jq if available, otherwise use sed
|
||||
if command_exists "jq"; then
|
||||
jq --arg service "$service" --arg status "$status" --argjson timestamp "$timestamp" \
|
||||
'.services[$service] = {"status": $status, "last_update": $timestamp}' \
|
||||
"$MONITOR_STATE_FILE" > "${MONITOR_STATE_FILE}.tmp" && mv "${MONITOR_STATE_FILE}.tmp" "$MONITOR_STATE_FILE"
|
||||
else
|
||||
# Simple fallback without jq
|
||||
log_warn "jq not available, using basic state tracking"
|
||||
fi
|
||||
}
|
||||
|
||||
# Check if alert should be sent (cooldown check)
|
||||
should_alert() {
|
||||
local alert_key="$1"
|
||||
local current_time
|
||||
current_time=$(date +%s)
|
||||
|
||||
if command_exists "jq"; then
|
||||
local last_alert
|
||||
last_alert=$(jq -r ".alerts[\"$alert_key\"] // 0" "$MONITOR_STATE_FILE")
|
||||
local time_diff=$((current_time - last_alert))
|
||||
|
||||
if (( time_diff >= ALERT_COOLDOWN )); then
|
||||
# Update last alert time
|
||||
jq --arg alert_key "$alert_key" --argjson timestamp "$current_time" \
|
||||
'.alerts[$alert_key] = $timestamp' \
|
||||
"$MONITOR_STATE_FILE" > "${MONITOR_STATE_FILE}.tmp" && mv "${MONITOR_STATE_FILE}.tmp" "$MONITOR_STATE_FILE"
|
||||
return 0
|
||||
else
|
||||
return 1
|
||||
fi
|
||||
else
|
||||
# Without jq, always alert (no cooldown)
|
||||
return 0
|
||||
fi
|
||||
}
|
||||
|
||||
# =============================================================================
|
||||
# HEALTH MONITORING FUNCTIONS
|
||||
# =============================================================================
|
||||
|
||||
# Check service health
|
||||
check_service_health() {
|
||||
local service="$1"
|
||||
|
||||
if ! is_service_running "$service"; then
|
||||
if should_alert "service_down_$service"; then
|
||||
print_error "ALERT: Service '$service' is down"
|
||||
log_error "Service '$service' is down"
|
||||
fi
|
||||
update_service_state "$service" "down"
|
||||
return 1
|
||||
fi
|
||||
|
||||
# Check container health status
|
||||
local health_status
|
||||
health_status=$(docker inspect "$service" --format '{{.State.Health.Status}}' 2>/dev/null || echo "unknown")
|
||||
|
||||
case "$health_status" in
|
||||
"healthy")
|
||||
update_service_state "$service" "healthy"
|
||||
;;
|
||||
"unhealthy")
|
||||
if should_alert "service_unhealthy_$service"; then
|
||||
print_warning "ALERT: Service '$service' is unhealthy"
|
||||
log_warn "Service '$service' is unhealthy"
|
||||
fi
|
||||
update_service_state "$service" "unhealthy"
|
||||
return 1
|
||||
;;
|
||||
"starting")
|
||||
update_service_state "$service" "starting"
|
||||
;;
|
||||
*)
|
||||
update_service_state "$service" "unknown"
|
||||
;;
|
||||
esac
|
||||
|
||||
return 0
|
||||
}
|
||||
|
||||
# Check all services health
|
||||
check_all_services_health() {
|
||||
print_info "Checking service health..."
|
||||
|
||||
local services
|
||||
mapfile -t services < <(find_all_services)
|
||||
local unhealthy_count=0
|
||||
|
||||
for service in "${services[@]}"; do
|
||||
if ! check_service_health "$service"; then
|
||||
((unhealthy_count++))
|
||||
fi
|
||||
done
|
||||
|
||||
if (( unhealthy_count == 0 )); then
|
||||
print_success "All services are healthy"
|
||||
else
|
||||
print_warning "$unhealthy_count service(s) have issues"
|
||||
fi
|
||||
}
|
||||
|
||||
# =============================================================================
|
||||
# RESOURCE MONITORING FUNCTIONS
|
||||
# =============================================================================
|
||||
|
||||
# Check system resources
|
||||
check_system_resources() {
|
||||
print_info "Checking system resources..."
|
||||
|
||||
# CPU usage
|
||||
local cpu_usage
|
||||
cpu_usage=$(top -bn1 | grep "Cpu(s)" | sed "s/.*, *\([0-9.]*\)%* id.*/\1/" | awk '{print 100 - $1}')
|
||||
cpu_usage=$(printf "%.0f" "$cpu_usage")
|
||||
|
||||
if (( cpu_usage > CPU_THRESHOLD )); then
|
||||
if should_alert "high_cpu"; then
|
||||
print_error "ALERT: High CPU usage: ${cpu_usage}% (threshold: ${CPU_THRESHOLD}%)"
|
||||
log_error "High CPU usage: ${cpu_usage}%"
|
||||
fi
|
||||
fi
|
||||
|
||||
# Memory usage
|
||||
local memory_usage
|
||||
memory_usage=$(free | grep Mem | awk '{printf "%.0f", $3/$2 * 100.0}')
|
||||
|
||||
if (( memory_usage > MEMORY_THRESHOLD )); then
|
||||
if should_alert "high_memory"; then
|
||||
print_error "ALERT: High memory usage: ${memory_usage}% (threshold: ${MEMORY_THRESHOLD}%)"
|
||||
log_error "High memory usage: ${memory_usage}%"
|
||||
fi
|
||||
fi
|
||||
|
||||
# Disk usage
|
||||
local disk_usage
|
||||
disk_usage=$(df / | tail -1 | awk '{print $5}' | sed 's/%//')
|
||||
|
||||
if (( disk_usage > DISK_THRESHOLD )); then
|
||||
if should_alert "high_disk"; then
|
||||
print_error "ALERT: High disk usage: ${disk_usage}% (threshold: ${DISK_THRESHOLD}%)"
|
||||
log_error "High disk usage: ${disk_usage}%"
|
||||
fi
|
||||
fi
|
||||
|
||||
print_info "CPU: ${cpu_usage}%, Memory: ${memory_usage}%, Disk: ${disk_usage}%"
|
||||
}
|
||||
|
||||
# Check Docker resource usage
|
||||
check_docker_resources() {
|
||||
print_info "Checking Docker resources..."
|
||||
|
||||
# Get container resource usage
|
||||
if command_exists "docker" && docker_available; then
|
||||
local containers
|
||||
mapfile -t containers < <(docker ps --format "{{.Names}}")
|
||||
|
||||
for container in "${containers[@]}"; do
|
||||
local stats
|
||||
stats=$(docker stats --no-stream --format "table {{.Container}}\t{{.CPUPerc}}\t{{.MemPerc}}" "$container" 2>/dev/null | tail -n 1)
|
||||
|
||||
if [[ -n "$stats" ]]; then
|
||||
local cpu_perc mem_perc
|
||||
cpu_perc=$(echo "$stats" | awk '{print $2}' | sed 's/%//')
|
||||
mem_perc=$(echo "$stats" | awk '{print $3}' | sed 's/%//')
|
||||
|
||||
# Convert to numbers for comparison
|
||||
cpu_perc=${cpu_perc%.*}
|
||||
mem_perc=${mem_perc%.*}
|
||||
|
||||
if [[ "$cpu_perc" =~ ^[0-9]+$ ]] && (( cpu_perc > CPU_THRESHOLD )); then
|
||||
if should_alert "container_high_cpu_$container"; then
|
||||
print_warning "ALERT: Container '$container' high CPU: ${cpu_perc}%"
|
||||
log_warn "Container '$container' high CPU: ${cpu_perc}%"
|
||||
fi
|
||||
fi
|
||||
|
||||
if [[ "$mem_perc" =~ ^[0-9]+$ ]] && (( mem_perc > MEMORY_THRESHOLD )); then
|
||||
if should_alert "container_high_memory_$container"; then
|
||||
print_warning "ALERT: Container '$container' high memory: ${mem_perc}%"
|
||||
log_warn "Container '$container' high memory: ${mem_perc}%"
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
done
|
||||
fi
|
||||
}
|
||||
|
||||
# =============================================================================
|
||||
# LOG MONITORING FUNCTIONS
|
||||
# =============================================================================
|
||||
|
||||
# Check service logs for errors
|
||||
check_service_logs() {
|
||||
local service="$1"
|
||||
local since="${2:-1m}" # Default to last minute
|
||||
|
||||
if ! is_service_running "$service"; then
|
||||
return 0
|
||||
fi
|
||||
|
||||
local compose_file
|
||||
compose_file=$(get_service_compose_file "$service")
|
||||
if [[ -z "$compose_file" ]]; then
|
||||
return 1
|
||||
fi
|
||||
|
||||
local compose_dir=$(dirname "$compose_file")
|
||||
local compose_base=$(basename "$compose_file")
|
||||
|
||||
# Check for error patterns in recent logs
|
||||
local error_patterns=("ERROR" "error" "Exception" "failed" "Failed" "panic" "PANIC")
|
||||
local errors_found=()
|
||||
|
||||
for pattern in "${error_patterns[@]}"; do
|
||||
local error_count
|
||||
error_count=$(cd "$compose_dir" && docker compose logs --since="$since" "$service" 2>&1 | grep -c "$pattern" || true)
|
||||
|
||||
if (( error_count > 0 )); then
|
||||
errors_found+=("$pattern: $error_count")
|
||||
fi
|
||||
done
|
||||
|
||||
if [[ ${#errors_found[@]} -gt 0 ]]; then
|
||||
if should_alert "log_errors_$service"; then
|
||||
print_warning "ALERT: Service '$service' has errors in logs: ${errors_found[*]}"
|
||||
log_warn "Service '$service' log errors: ${errors_found[*]}"
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
# Check all services logs
|
||||
check_all_logs() {
|
||||
print_info "Checking service logs for errors..."
|
||||
|
||||
local services
|
||||
mapfile -t services < <(find_all_services)
|
||||
|
||||
for service in "${services[@]}"; do
|
||||
check_service_logs "$service"
|
||||
done
|
||||
}
|
||||
|
||||
# =============================================================================
|
||||
# MONITORING DISPLAY FUNCTIONS
|
||||
# =============================================================================
|
||||
|
||||
# Display monitoring dashboard
|
||||
show_monitoring_dashboard() {
|
||||
print_info "EZ-Homelab Monitoring Dashboard"
|
||||
echo
|
||||
|
||||
# System resources
|
||||
echo "=== System Resources ==="
|
||||
local cpu_usage memory_usage disk_usage
|
||||
cpu_usage=$(top -bn1 | grep "Cpu(s)" | sed "s/.*, *\([0-9.]*\)%* id.*/\1/" | awk '{print 100 - $1}' || echo "0")
|
||||
memory_usage=$(free | grep Mem | awk '{printf "%.0f", $3/$2 * 100.0}' || echo "0")
|
||||
disk_usage=$(df / | tail -1 | awk '{print $5}' | sed 's/%//' || echo "0")
|
||||
|
||||
echo "CPU Usage: ${cpu_usage}%"
|
||||
echo "Memory Usage: ${memory_usage}%"
|
||||
echo "Disk Usage: ${disk_usage}%"
|
||||
echo
|
||||
|
||||
# Service status summary
|
||||
echo "=== Service Status ==="
|
||||
local services=()
|
||||
mapfile -t services < <(find_all_services)
|
||||
local total_services=${#services[@]}
|
||||
local running_services=0
|
||||
local unhealthy_services=0
|
||||
|
||||
for service in "${services[@]}"; do
|
||||
if is_service_running "$service"; then
|
||||
running_services=$((running_services + 1))
|
||||
|
||||
local health_status
|
||||
health_status=$(docker inspect "$service" --format '{{.State.Health.Status}}' 2>/dev/null || echo "unknown")
|
||||
if [[ "$health_status" == "unhealthy" ]]; then
|
||||
unhealthy_services=$((unhealthy_services + 1))
|
||||
fi
|
||||
fi
|
||||
done
|
||||
|
||||
echo "Total Services: $total_services"
|
||||
echo "Running: $running_services"
|
||||
echo "Unhealthy: $unhealthy_services"
|
||||
echo
|
||||
|
||||
# Recent alerts
|
||||
echo "=== Recent Alerts ==="
|
||||
if command_exists "jq" && [[ -f "$MONITOR_STATE_FILE" ]]; then
|
||||
local recent_alerts
|
||||
recent_alerts=$(jq -r '.alerts | to_entries[] | select(.value > (now - 3600)) | "\(.key): \(.value | strftime("%H:%M:%S"))"' "$MONITOR_STATE_FILE" 2>/dev/null || echo "")
|
||||
|
||||
if [[ -n "$recent_alerts" ]]; then
|
||||
echo "$recent_alerts"
|
||||
else
|
||||
echo "No recent alerts (last hour)"
|
||||
fi
|
||||
else
|
||||
echo "Alert history not available (jq not installed)"
|
||||
fi
|
||||
}
|
||||
|
||||
# Display detailed service status
|
||||
show_detailed_status() {
|
||||
local service="$1"
|
||||
|
||||
if [[ -z "$service" ]]; then
|
||||
print_error "Service name required"
|
||||
return 1
|
||||
fi
|
||||
|
||||
print_info "Detailed Status for: $service"
|
||||
echo
|
||||
|
||||
if ! is_service_running "$service"; then
|
||||
echo "Status: ❌ Stopped"
|
||||
return 0
|
||||
fi
|
||||
|
||||
echo "Status: ✅ Running"
|
||||
|
||||
# Container details
|
||||
local container_info
|
||||
container_info=$(docker ps --filter "name=^${service}$" --format "table {{.Image}}\t{{.Status}}\t{{.Ports}}" | tail -n +2)
|
||||
if [[ -n "$container_info" ]]; then
|
||||
echo "Container: $container_info"
|
||||
fi
|
||||
|
||||
# Health status
|
||||
local health_status
|
||||
health_status=$(docker inspect "$service" --format '{{.State.Health.Status}}' 2>/dev/null || echo "N/A")
|
||||
echo "Health: $health_status"
|
||||
|
||||
# Resource usage
|
||||
local stats
|
||||
stats=$(docker stats --no-stream --format "table {{.CPUPerc}}\t{{.MemPerc}}\t{{.NetIO}}\t{{.BlockIO}}" "$service" 2>/dev/null | tail -n +2)
|
||||
if [[ -n "$stats" ]]; then
|
||||
echo "Resources: $stats"
|
||||
fi
|
||||
|
||||
# Recent logs
|
||||
echo
|
||||
echo "Recent Logs:"
|
||||
local compose_file
|
||||
compose_file=$(get_service_compose_file "$service")
|
||||
if [[ -n "$compose_file" ]]; then
|
||||
local compose_dir=$(dirname "$compose_file")
|
||||
local compose_base=$(basename "$compose_file")
|
||||
(cd "$compose_dir" && docker compose logs --tail=5 "$service" 2>/dev/null || echo "No logs available")
|
||||
fi
|
||||
}
|
||||
|
||||
# =============================================================================
|
||||
# CONTINUOUS MONITORING FUNCTIONS
|
||||
# =============================================================================
|
||||
|
||||
# Run continuous monitoring
|
||||
run_continuous_monitoring() {
|
||||
local interval="${1:-$HEALTH_CHECK_INTERVAL}"
|
||||
|
||||
print_info "Starting continuous monitoring (interval: ${interval}s)"
|
||||
print_info "Press Ctrl+C to stop"
|
||||
|
||||
# Initialize state
|
||||
init_monitor_state
|
||||
|
||||
# Main monitoring loop
|
||||
while true; do
|
||||
local start_time
|
||||
start_time=$(date +%s)
|
||||
|
||||
# Run all checks
|
||||
check_all_services_health
|
||||
check_system_resources
|
||||
check_docker_resources
|
||||
check_all_logs
|
||||
|
||||
# Update timestamp
|
||||
if command_exists "jq"; then
|
||||
jq --argjson timestamp "$(date +%s)" '.last_check = $timestamp' \
|
||||
"$MONITOR_STATE_FILE" > "${MONITOR_STATE_FILE}.tmp" && mv "${MONITOR_STATE_FILE}.tmp" "$MONITOR_STATE_FILE"
|
||||
fi
|
||||
|
||||
local end_time
|
||||
end_time=$(date +%s)
|
||||
local duration=$((end_time - start_time))
|
||||
|
||||
print_info "Monitoring cycle completed in ${duration}s. Next check in $((interval - duration))s..."
|
||||
|
||||
# Sleep for remaining time
|
||||
local sleep_time=$((interval - duration))
|
||||
if (( sleep_time > 0 )); then
|
||||
sleep "$sleep_time"
|
||||
fi
|
||||
done
|
||||
}
|
||||
|
||||
# =============================================================================
|
||||
# MAIN FUNCTION
|
||||
# =============================================================================
|
||||
|
||||
main() {
|
||||
local action=""
|
||||
local service=""
|
||||
local interval="$HEALTH_CHECK_INTERVAL"
|
||||
local continuous=false
|
||||
|
||||
# Parse command line arguments
|
||||
while [[ $# -gt 0 ]]; do
|
||||
case $1 in
|
||||
-h|--help)
|
||||
cat << EOF
|
||||
EZ-Homelab Service Monitoring
|
||||
|
||||
USAGE:
|
||||
monitor [OPTIONS] <ACTION> [SERVICE]
|
||||
|
||||
ACTIONS:
|
||||
dashboard Show monitoring dashboard
|
||||
status Show detailed status for a service
|
||||
check Run all monitoring checks once
|
||||
watch Continuous monitoring mode
|
||||
|
||||
OPTIONS:
|
||||
-i, --interval SEC Monitoring interval in seconds (default: $HEALTH_CHECK_INTERVAL)
|
||||
-c, --continuous Run in continuous mode (same as 'watch')
|
||||
|
||||
EXAMPLES:
|
||||
monitor dashboard # Show monitoring dashboard
|
||||
monitor status traefik # Show detailed status for Traefik
|
||||
monitor check # Run all checks once
|
||||
monitor watch # Start continuous monitoring
|
||||
monitor watch -i 60 # Continuous monitoring every 60 seconds
|
||||
|
||||
EOF
|
||||
exit 0
|
||||
;;
|
||||
-i|--interval)
|
||||
interval="$2"
|
||||
shift 2
|
||||
;;
|
||||
-c|--continuous)
|
||||
continuous=true
|
||||
shift
|
||||
;;
|
||||
dashboard|status|check|watch)
|
||||
action="$1"
|
||||
shift
|
||||
break
|
||||
;;
|
||||
*)
|
||||
if [[ -z "$service" ]]; then
|
||||
service="$1"
|
||||
else
|
||||
print_error "Too many arguments"
|
||||
exit 1
|
||||
fi
|
||||
shift
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
# Handle remaining arguments
|
||||
while [[ $# -gt 0 ]]; do
|
||||
if [[ -z "$service" ]]; then
|
||||
service="$1"
|
||||
else
|
||||
print_error "Too many arguments"
|
||||
exit 1
|
||||
fi
|
||||
shift
|
||||
done
|
||||
|
||||
# Initialize script
|
||||
init_script "$SCRIPT_NAME" "$SCRIPT_VERSION"
|
||||
init_logging "$SCRIPT_NAME"
|
||||
init_monitor_state
|
||||
|
||||
# Check prerequisites
|
||||
if ! docker_available; then
|
||||
print_error "Docker is not available"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Execute action
|
||||
case "$action" in
|
||||
dashboard)
|
||||
show_monitoring_dashboard
|
||||
;;
|
||||
status)
|
||||
if [[ -n "$service" ]]; then
|
||||
show_detailed_status "$service"
|
||||
else
|
||||
print_error "Service name required for status action"
|
||||
exit 1
|
||||
fi
|
||||
;;
|
||||
check)
|
||||
check_all_services_health
|
||||
check_system_resources
|
||||
check_docker_resources
|
||||
check_all_logs
|
||||
;;
|
||||
watch)
|
||||
run_continuous_monitoring "$interval"
|
||||
;;
|
||||
"")
|
||||
# Default action: show dashboard
|
||||
show_monitoring_dashboard
|
||||
;;
|
||||
*)
|
||||
print_error "Unknown action: $action"
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
}
|
||||
|
||||
# Run main function
|
||||
main "$@"
|
||||
@@ -1,352 +0,0 @@
|
||||
# EZ-Homelab Enhanced Setup Scripts - Product Requirements Document
|
||||
|
||||
## Document Information
|
||||
- **Project**: EZ-Homelab Enhanced Setup Scripts
|
||||
- **Version**: 1.0
|
||||
- **Date**: January 29, 2026
|
||||
- **Author**: EZ-Homelab Development Team
|
||||
- **Location**: `scripts/enhanced-setup/`
|
||||
|
||||
## Executive Summary
|
||||
|
||||
The EZ-Homelab Enhanced Setup Scripts project aims to replace the complex Python TUI deployment system with a modular, bash-based suite of scripts that provide automated, user-friendly deployment of the EZ-Homelab infrastructure. This approach prioritizes simplicity, minimal manual intervention, and cross-architecture compatibility (AMD64/ARM64) while maintaining the project's file-based, AI-manageable architecture.
|
||||
|
||||
The solution consists of 11 specialized scripts that handle different aspects of homelab deployment, from pre-flight checks to ongoing management and monitoring.
|
||||
|
||||
## Objectives
|
||||
|
||||
### Primary Objectives
|
||||
- **Simplify Deployment**: Reduce manual steps for inexperienced users to near-zero
|
||||
- **Cross-Platform Support**: Ensure seamless operation on AMD64 and ARM64 architectures
|
||||
- **Modular Design**: Create reusable, focused scripts instead of monolithic solutions
|
||||
- **Error Resilience**: Provide clear error messages and recovery options
|
||||
- **Maintainability**: Keep code AI-manageable and file-based
|
||||
|
||||
### Secondary Objectives
|
||||
- **User Experience**: Implement text-based UI with dynamic menus using dialog/whiptail
|
||||
- **Automation**: Support both interactive and non-interactive (scripted) execution
|
||||
- **Monitoring**: Provide status reporting tools for ongoing management
|
||||
- **Security**: Maintain security-first principles with proper permission handling
|
||||
|
||||
## Target Users
|
||||
|
||||
### Primary Users
|
||||
- **Inexperienced Homelab Enthusiasts**: Users new to Docker/homelab concepts
|
||||
- **Raspberry Pi Users**: ARM64 users with resource constraints
|
||||
- **Single-Server Deployers**: Users setting up complete homelabs on one machine
|
||||
|
||||
### Secondary Users
|
||||
- **Advanced Users**: Those who want granular control over deployment
|
||||
- **Multi-Server Administrators**: Users managing distributed homelab setups
|
||||
- **Developers**: Contributors to EZ-Homelab who need to test changes
|
||||
|
||||
## Requirements
|
||||
|
||||
### Functional Requirements
|
||||
|
||||
#### FR-1: Pre-Flight System Validation (`preflight.sh`)
|
||||
- **Description**: Perform comprehensive system checks before deployment
|
||||
- **Requirements**:
|
||||
- Check OS compatibility (Debian/Ubuntu-based systems)
|
||||
- Verify architecture support (AMD64/ARM64)
|
||||
- Assess available disk space (minimum 20GB for core deployment)
|
||||
- Check network connectivity and DNS resolution
|
||||
- Validate CPU and memory resources
|
||||
- Detect existing Docker installation
|
||||
- Check for NVIDIA GPU presence
|
||||
- **Output**: Detailed report with pass/fail status and recommendations
|
||||
- **UI**: Progress bar with whiptail/dialog
|
||||
|
||||
#### FR-2: System Setup and Prerequisites (`setup.sh`)
|
||||
- **Description**: Install and configure Docker and system prerequisites
|
||||
- **Requirements**:
|
||||
- Install Docker Engine (version 24.0+)
|
||||
- Configure Docker daemon for Traefik
|
||||
- Add user to docker group
|
||||
- Install required system packages (curl, jq, git)
|
||||
- Set up virtual environments for Python dependencies (ARM64 compatibility)
|
||||
- Handle system reboot requirements gracefully
|
||||
- **Output**: Installation log with success confirmation
|
||||
- **UI**: Progress indicators and user prompts for reboots
|
||||
|
||||
#### FR-3: NVIDIA GPU Setup (`nvidia.sh`)
|
||||
- **Description**: Install NVIDIA drivers and configure GPU support
|
||||
- **Requirements**:
|
||||
- Detect NVIDIA GPU presence
|
||||
- Install official NVIDIA drivers (version 525+ for current GPUs)
|
||||
- Configure Docker NVIDIA runtime
|
||||
- Validate GPU functionality with nvidia-smi
|
||||
- Handle driver conflicts and updates
|
||||
- **Output**: GPU detection and installation status
|
||||
- **UI**: Confirmation prompts and progress tracking
|
||||
|
||||
#### FR-4: Pre-Deployment Configuration Wizard (`pre-deployment-wizard.sh`)
|
||||
- **Description**: Interactive setup of deployment options and environment
|
||||
- **Requirements**:
|
||||
- Create required Docker networks (traefik-network, homelab-network)
|
||||
- Guide user through deployment type selection (Core, Single Server, Remote)
|
||||
- Service selection with checkboxes (dynamic based on deployment type)
|
||||
- Environment variable collection (.env file creation)
|
||||
- Domain configuration (DuckDNS setup)
|
||||
- Architecture-specific option handling
|
||||
- **Output**: Generated .env file and network configurations
|
||||
- **UI**: Dynamic dialog menus with conditional questions
|
||||
|
||||
#### FR-5: Multi-Purpose Validation (`validate.sh`)
|
||||
- **Description**: Validate configurations, compose files, and deployment readiness
|
||||
- **Requirements**:
|
||||
- Validate .env file completeness and syntax
|
||||
- Check Docker Compose file syntax (`docker compose config`)
|
||||
- Verify network availability
|
||||
- Validate service dependencies
|
||||
- Check SSL certificate readiness
|
||||
- Perform architecture-specific validations
|
||||
- **Output**: Validation report with error details and fixes
|
||||
- **UI**: Optional progress display, detailed error messages
|
||||
|
||||
#### FR-6: Configuration Localization (`localize.sh`)
|
||||
- **Description**: Replace template variables in service configurations
|
||||
- **Requirements**:
|
||||
- Process per-service configuration files
|
||||
- Replace ${VARIABLE} placeholders with environment values
|
||||
- Handle nested configurations (YAML, JSON, conf files)
|
||||
- Support selective localization (single service or all)
|
||||
- Preserve original templates for generalization
|
||||
- **Output**: Localized configuration files ready for deployment
|
||||
- **UI**: Progress for batch operations
|
||||
|
||||
#### FR-7: Configuration Generalization (`generalize.sh`)
|
||||
- **Description**: Reverse localization for template maintenance
|
||||
- **Requirements**:
|
||||
- Extract environment values back to ${VARIABLE} format
|
||||
- Update template files from localized versions
|
||||
- Support selective generalization
|
||||
- Maintain configuration integrity
|
||||
- **Output**: Updated template files
|
||||
- **UI**: Confirmation prompts for destructive operations
|
||||
|
||||
#### FR-8: Service Deployment (`deploy.sh`)
|
||||
- **Description**: Deploy single stacks or complete homelab
|
||||
- **Requirements**:
|
||||
- Support deployment of individual services/stacks
|
||||
- Enforce deployment order (core first, then others)
|
||||
- Handle service dependencies and health checks
|
||||
- Provide rollback options for failed deployments
|
||||
- Support both interactive and automated modes
|
||||
- Log deployment progress and errors
|
||||
- **Output**: Deployment status and access URLs
|
||||
- **UI**: Progress bars and real-time status updates
|
||||
|
||||
#### FR-9: Uninstall and Cleanup (`uninstall.sh`)
|
||||
- **Description**: Remove services, stacks, or complete homelab
|
||||
- **Requirements**:
|
||||
- Support selective uninstall (service, stack, or full)
|
||||
- Preserve user data with confirmation
|
||||
- Clean up Docker networks and volumes
|
||||
- Remove generated configurations
|
||||
- Provide safety confirmations
|
||||
- **Output**: Cleanup report with remaining resources
|
||||
- **UI**: Confirmation dialogs and progress tracking
|
||||
|
||||
#### FR-10: Proxy Configuration Status (`proxy-status.sh`)
|
||||
- **Description**: Generate comprehensive proxy configuration report
|
||||
- **Requirements**:
|
||||
- Analyze Docker Compose labels for Traefik routing
|
||||
- Check external host configurations in Traefik dynamic files
|
||||
- Validate Sablier lazy loading configurations
|
||||
- Support local and remote server analysis
|
||||
- Include all stacks (deployed and not deployed)
|
||||
- Generate table-format reports
|
||||
- **Output**: HTML/PDF report with configuration status
|
||||
- **UI**: Table display with color-coded status
|
||||
|
||||
#### FR-11: DNS and SSL Status (`dns-status.sh`)
|
||||
- **Description**: Report on DuckDNS and Let's Encrypt certificate status
|
||||
- **Requirements**:
|
||||
- Check DuckDNS subdomain resolution
|
||||
- Validate SSL certificate validity and expiration
|
||||
- Monitor certificate renewal status
|
||||
- Report on DNS propagation
|
||||
- Include wildcard certificate coverage
|
||||
- **Output**: Certificate and DNS health report
|
||||
- **UI**: Status dashboard with alerts
|
||||
|
||||
### Non-Functional Requirements
|
||||
|
||||
#### NFR-1: Performance
|
||||
- **Startup Time**: Scripts should complete pre-flight checks in <30 seconds
|
||||
- **Deployment Time**: Core services deployment in <5 minutes on standard hardware
|
||||
- **Memory Usage**: <100MB RAM for script execution
|
||||
- **Disk Usage**: <500MB for script and temporary files
|
||||
|
||||
#### NFR-2: Reliability
|
||||
- **Error Recovery**: Scripts should handle common failures gracefully
|
||||
- **Idempotency**: Safe to re-run scripts without side effects
|
||||
- **Logging**: Comprehensive logging to `/var/log/ez-homelab/`
|
||||
- **Backup**: Automatic backup of configurations before modifications
|
||||
|
||||
#### NFR-3: Usability
|
||||
- **User Guidance**: Clear error messages with suggested fixes
|
||||
- **Documentation**: Inline help (`--help`) for all scripts
|
||||
- **Localization**: English language with clear technical terms
|
||||
- **Accessibility**: Keyboard-only navigation for text UI
|
||||
|
||||
#### NFR-4: Security
|
||||
- **Permission Handling**: Proper sudo usage with minimal privilege escalation
|
||||
- **Secret Management**: Secure handling of passwords and API keys
|
||||
- **Network Security**: No unnecessary port exposures during setup
|
||||
- **Audit Trail**: Log all configuration changes
|
||||
|
||||
#### NFR-5: Compatibility
|
||||
- **OS Support**: Debian 11+, Ubuntu 20.04+, Raspberry Pi OS
|
||||
- **Architecture**: AMD64 and ARM64
|
||||
- **Docker**: Version 20.10+ with Compose V2
|
||||
- **Dependencies**: Use only widely available packages
|
||||
|
||||
## Technical Specifications
|
||||
|
||||
### Software Dependencies
|
||||
- **Core System**:
|
||||
- bash 5.0+
|
||||
- curl 7.68+
|
||||
- jq 1.6+
|
||||
- git 2.25+
|
||||
- dialog 1.3+ (or whiptail 0.52+)
|
||||
- **Docker Ecosystem**:
|
||||
- Docker Engine 24.0+
|
||||
- Docker Compose V2 (docker compose plugin)
|
||||
- Docker Buildx for multi-architecture builds
|
||||
- **NVIDIA (Optional)**:
|
||||
- NVIDIA Driver 525+
|
||||
- nvidia-docker2 2.12+
|
||||
- **Python (Virtual Environment)**:
|
||||
- Python 3.9+
|
||||
- pip 21.0+
|
||||
- virtualenv 20.0+
|
||||
|
||||
### Architecture Considerations
|
||||
- **AMD64**: Full feature support, optimized performance
|
||||
- **ARM64**: PiWheels integration, resource-aware deployment
|
||||
- **Multi-Server**: TLS certificate management for remote access
|
||||
|
||||
### File Structure
|
||||
```
|
||||
scripts/enhanced-setup/
|
||||
├── prd.md # This document
|
||||
├── preflight.sh # System validation
|
||||
├── setup.sh # Docker installation
|
||||
├── nvidia.sh # GPU setup
|
||||
├── pre-deployment-wizard.sh # Configuration wizard
|
||||
├── validate.sh # Multi-purpose validation
|
||||
├── localize.sh # Template processing
|
||||
├── generalize.sh # Template reversal
|
||||
├── deploy.sh # Service deployment
|
||||
├── uninstall.sh # Cleanup operations
|
||||
├── proxy-status.sh # Proxy configuration report
|
||||
├── dns-status.sh # DNS/SSL status report
|
||||
├── lib/ # Shared functions
|
||||
│ ├── common.sh # Utility functions
|
||||
│ ├── ui.sh # Dialog/whiptail helpers
|
||||
│ └── validation.sh # Validation logic
|
||||
├── templates/ # Configuration templates
|
||||
└── logs/ # Execution logs
|
||||
```
|
||||
|
||||
### Integration Points
|
||||
- **EZ-Homelab Repository**: Located in `~/EZ-Homelab/`
|
||||
- **Runtime Location**: Deploys to `/opt/stacks/`
|
||||
- **Configuration Source**: Uses `.env` files and templates
|
||||
- **Service Definitions**: Leverages existing `docker-compose/` directory
|
||||
|
||||
## User Stories
|
||||
|
||||
### US-1: First-Time Raspberry Pi Setup
|
||||
**As a** Raspberry Pi user new to homelabs
|
||||
**I want** a guided setup process
|
||||
**So that** I can deploy EZ-Homelab without Docker knowledge
|
||||
|
||||
**Acceptance Criteria**:
|
||||
- Pre-flight detects ARM64 and guides Pi-specific setup
|
||||
- Setup script handles Docker installation on Raspbian
|
||||
- Wizard provides Pi-optimized service selections
|
||||
- Deployment completes without manual intervention
|
||||
|
||||
### US-2: Multi-Server Homelab Administrator
|
||||
**As a** homelab administrator with multiple servers
|
||||
**I want** to deploy services across servers
|
||||
**So that** I can manage distributed infrastructure
|
||||
|
||||
**Acceptance Criteria**:
|
||||
- Proxy-status reports configuration across all servers
|
||||
- Deploy script supports remote server targeting
|
||||
- DNS-status validates certificates for all subdomains
|
||||
- Uninstall handles cross-server cleanup
|
||||
|
||||
### US-3: Development and Testing
|
||||
**As a** developer contributing to EZ-Homelab
|
||||
**I want** to validate changes before deployment
|
||||
**So that** I can ensure quality and compatibility
|
||||
|
||||
**Acceptance Criteria**:
|
||||
- Validate script checks all configurations
|
||||
- Localize/generalize supports template development
|
||||
- Deploy script allows single-service testing
|
||||
- Status scripts provide detailed diagnostic information
|
||||
|
||||
## Implementation Plan
|
||||
|
||||
### Phase 1: Core Infrastructure (Week 1-2)
|
||||
- Implement preflight.sh and setup.sh
|
||||
- Create shared library functions
|
||||
- Set up basic dialog UI framework
|
||||
|
||||
### Phase 2: Configuration Management (Week 3-4)
|
||||
- Build pre-deployment-wizard.sh
|
||||
- Implement localize.sh and generalize.sh
|
||||
- Add validation.sh framework
|
||||
|
||||
### Phase 3: Deployment Engine (Week 5-6)
|
||||
- Create deploy.sh with service orchestration
|
||||
- Implement uninstall.sh
|
||||
- Add comprehensive error handling
|
||||
|
||||
### Phase 4: Monitoring and Reporting (Week 7-8)
|
||||
- Build proxy-status.sh and dns-status.sh
|
||||
- Add nvidia.sh for GPU support
|
||||
- Comprehensive testing across architectures
|
||||
|
||||
### Phase 5: Polish and Documentation (Week 9-10)
|
||||
- UI/UX improvements
|
||||
- Documentation and help systems
|
||||
- Performance optimization
|
||||
|
||||
## Risk Assessment
|
||||
|
||||
### Technical Risks
|
||||
- **ARM64 Compatibility**: Mitigated by early testing on Raspberry Pi
|
||||
- **Dialog/Whiptail Availability**: Low risk - included in Debian/Ubuntu
|
||||
- **Docker API Changes**: Mitigated by using stable Docker versions
|
||||
|
||||
### Operational Risks
|
||||
- **User Adoption**: Addressed through clear documentation and UI
|
||||
- **Maintenance Overhead**: Mitigated by modular design
|
||||
- **Security Vulnerabilities**: Addressed through regular updates and audits
|
||||
|
||||
## Success Metrics
|
||||
|
||||
### Quantitative Metrics
|
||||
- **Deployment Success Rate**: >95% first-time success
|
||||
- **Setup Time**: <15 minutes for basic deployment
|
||||
- **Error Rate**: <5% user-reported issues
|
||||
- **Architecture Coverage**: Full AMD64/ARM64 support
|
||||
|
||||
### Qualitative Metrics
|
||||
- **User Satisfaction**: Positive feedback on simplicity
|
||||
- **Community Adoption**: Increased GitHub stars and contributors
|
||||
- **Maintainability**: Easy to add new services and features
|
||||
|
||||
## Conclusion
|
||||
|
||||
The EZ-Homelab Enhanced Setup Scripts project will provide a robust, user-friendly deployment system that addresses the limitations of the previous Python approach while maintaining the project's core principles of simplicity and automation. The modular script design ensures maintainability and extensibility for future homelab needs.
|
||||
|
||||
This PRD serves as the foundation for implementation and will be updated as development progresses.
|
||||
@@ -1,375 +0,0 @@
|
||||
#!/bin/bash
|
||||
# EZ-Homelab Enhanced Setup Scripts - Pre-Deployment Configuration Wizard
|
||||
# Interactive setup of deployment options and environment configuration
|
||||
|
||||
SCRIPT_NAME="pre-deployment-wizard"
|
||||
SCRIPT_VERSION="1.0.0"
|
||||
|
||||
# Load common library
|
||||
source "$(dirname "${BASH_SOURCE[0]}")/lib/common.sh"
|
||||
source "$(dirname "${BASH_SOURCE[0]}")/lib/ui.sh"
|
||||
|
||||
# =============================================================================
|
||||
# SCRIPT CONFIGURATION
|
||||
# =============================================================================
|
||||
|
||||
# Default values
|
||||
DEFAULT_DOMAIN="example.duckdns.org"
|
||||
DEFAULT_TIMEZONE="America/New_York"
|
||||
DEFAULT_PUID=1000
|
||||
DEFAULT_PGID=1000
|
||||
|
||||
# Service stacks
|
||||
CORE_STACKS=("duckdns" "traefik" "authelia" "gluetun" "sablier")
|
||||
INFRA_STACKS=("dockge" "pihole")
|
||||
DASHBOARD_STACKS=("homepage" "homarr")
|
||||
MEDIA_STACKS=("plex" "jellyfin" "calibre-web" "qbittorrent")
|
||||
MEDIA_MGMT_STACKS=("sonarr" "radarr" "bazarr" "lidarr" "readarr" "prowlarr")
|
||||
HOME_STACKS=("homeassistant" "nodered" "zigbee2mqtt")
|
||||
PRODUCTIVITY_STACKS=("nextcloud" "gitea" "bookstack")
|
||||
MONITORING_STACKS=("grafana" "prometheus" "uptimekuma")
|
||||
UTILITY_STACKS=("duplicati" "freshrss" "wallabag")
|
||||
|
||||
# =============================================================================
|
||||
# CONFIGURATION FUNCTIONS
|
||||
# =============================================================================
|
||||
|
||||
# Create required Docker networks
|
||||
create_docker_networks() {
|
||||
print_info "Creating required Docker networks..."
|
||||
|
||||
local networks=("traefik-network" "homelab-network")
|
||||
for network in "${networks[@]}"; do
|
||||
if ! docker network ls --format "{{.Name}}" | grep -q "^${network}$"; then
|
||||
docker network create "$network" || {
|
||||
print_error "Failed to create network: $network"
|
||||
return 1
|
||||
}
|
||||
print_success "Created network: $network"
|
||||
else
|
||||
print_info "Network already exists: $network"
|
||||
fi
|
||||
done
|
||||
|
||||
return 0
|
||||
}
|
||||
|
||||
# Generate .env file
|
||||
generate_env_file() {
|
||||
local domain="$1"
|
||||
local timezone="$2"
|
||||
local puid="$3"
|
||||
local pgid="$4"
|
||||
local deployment_type="$5"
|
||||
|
||||
print_info "Generating .env file..."
|
||||
|
||||
local env_file="$EZ_HOME/.env"
|
||||
local temp_file
|
||||
temp_file=$(mktemp)
|
||||
|
||||
# Generate secrets
|
||||
local jwt_secret
|
||||
jwt_secret=$(openssl rand -hex 64 2>/dev/null || echo "change-me-jwt-secret")
|
||||
local session_secret
|
||||
session_secret=$(openssl rand -hex 64 2>/dev/null || echo "change-me-session-secret")
|
||||
local encryption_key
|
||||
encryption_key=$(openssl rand -hex 64 2>/dev/null || echo "change-me-encryption-key")
|
||||
local duckdns_token="your-duckdns-token-here"
|
||||
|
||||
# Write environment variables
|
||||
cat > "$temp_file" << EOF
|
||||
# EZ-Homelab Environment Configuration
|
||||
# Generated by pre-deployment-wizard.sh on $(date)
|
||||
|
||||
# Domain and Networking
|
||||
DOMAIN=$domain
|
||||
TZ=$timezone
|
||||
PUID=$puid
|
||||
PGID=$pgid
|
||||
|
||||
# DuckDNS Configuration
|
||||
DUCKDNS_TOKEN=$duckdns_token
|
||||
|
||||
# Authelia Secrets (Change these in production!)
|
||||
JWT_SECRET=$jwt_secret
|
||||
SESSION_SECRET=$session_secret
|
||||
ENCRYPTION_KEY=$encryption_key
|
||||
|
||||
# Deployment Configuration
|
||||
DEPLOYMENT_TYPE=$deployment_type
|
||||
SERVER_HOSTNAME=$(hostname)
|
||||
|
||||
# Docker Configuration
|
||||
DOCKER_SOCKET_PATH=/var/run/docker.sock
|
||||
|
||||
# Default Credentials (Change these!)
|
||||
AUTHELIA_ADMIN_PASSWORD=admin
|
||||
AUTHELIA_ADMIN_EMAIL=admin@example.com
|
||||
|
||||
# Service-specific settings
|
||||
PLEX_CLAIM_TOKEN=your-plex-claim-token
|
||||
EOF
|
||||
|
||||
# Backup existing .env if it exists
|
||||
if [[ -f "$env_file" ]]; then
|
||||
backup_file "$env_file"
|
||||
fi
|
||||
|
||||
# Move to final location
|
||||
mv "$temp_file" "$env_file"
|
||||
chmod 600 "$env_file"
|
||||
|
||||
print_success ".env file created at $env_file"
|
||||
print_warning "IMPORTANT: Edit the .env file to set your actual secrets and tokens!"
|
||||
}
|
||||
|
||||
# Get service selection based on deployment type
|
||||
get_service_selection() {
|
||||
local deployment_type="$1"
|
||||
local selected_services=()
|
||||
|
||||
case "$deployment_type" in
|
||||
"core")
|
||||
selected_services=("${CORE_STACKS[@]}")
|
||||
;;
|
||||
"single")
|
||||
# Show all categories for single server
|
||||
local all_services=(
|
||||
"${CORE_STACKS[@]}"
|
||||
"${INFRA_STACKS[@]}"
|
||||
"${DASHBOARD_STACKS[@]}"
|
||||
"${MEDIA_STACKS[@]}"
|
||||
"${MEDIA_MGMT_STACKS[@]}"
|
||||
"${HOME_STACKS[@]}"
|
||||
"${PRODUCTIVITY_STACKS[@]}"
|
||||
"${MONITORING_STACKS[@]}"
|
||||
"${UTILITY_STACKS[@]}"
|
||||
)
|
||||
selected_services=("${all_services[@]}")
|
||||
;;
|
||||
"remote")
|
||||
# Remote servers get infrastructure + services (no core)
|
||||
local remote_services=(
|
||||
"${INFRA_STACKS[@]}"
|
||||
"${DASHBOARD_STACKS[@]}"
|
||||
"${MEDIA_STACKS[@]}"
|
||||
"${MEDIA_MGMT_STACKS[@]}"
|
||||
"${HOME_STACKS[@]}"
|
||||
"${PRODUCTIVITY_STACKS[@]}"
|
||||
"${MONITORING_STACKS[@]}"
|
||||
"${UTILITY_STACKS[@]}"
|
||||
)
|
||||
selected_services=("${remote_services[@]}")
|
||||
;;
|
||||
esac
|
||||
|
||||
echo "${selected_services[@]}"
|
||||
}
|
||||
|
||||
# Validate configuration
|
||||
validate_configuration() {
|
||||
local domain="$1"
|
||||
local deployment_type="$2"
|
||||
|
||||
print_info "Validating configuration..."
|
||||
|
||||
# Validate domain format
|
||||
if [[ ! "$domain" =~ ^[a-zA-Z0-9.-]+\.[a-zA-Z]{2,}$ ]]; then
|
||||
print_error "Invalid domain format: $domain"
|
||||
return 1
|
||||
fi
|
||||
|
||||
# Validate deployment type
|
||||
case "$deployment_type" in
|
||||
"core"|"single"|"remote")
|
||||
;;
|
||||
*)
|
||||
print_error "Invalid deployment type: $deployment_type"
|
||||
return 1
|
||||
;;
|
||||
esac
|
||||
|
||||
print_success "Configuration validation passed"
|
||||
return 0
|
||||
}
|
||||
|
||||
# =============================================================================
|
||||
# UI FUNCTIONS
|
||||
# =============================================================================
|
||||
|
||||
# Show welcome screen
|
||||
show_welcome() {
|
||||
ui_msgbox "Welcome to EZ-Homelab Setup Wizard!
|
||||
|
||||
This wizard will help you configure your EZ-Homelab deployment by:
|
||||
• Setting up Docker networks
|
||||
• Configuring environment variables
|
||||
• Selecting services to deploy
|
||||
• Generating configuration files
|
||||
|
||||
Press OK to continue or ESC to cancel."
|
||||
}
|
||||
|
||||
# Get domain configuration
|
||||
get_domain_config() {
|
||||
local domain
|
||||
domain=$(ui_inputbox "Enter your domain (e.g., yourname.duckdns.org):" "$DEFAULT_DOMAIN")
|
||||
[[ -z "$domain" ]] && return 1
|
||||
echo "$domain"
|
||||
}
|
||||
|
||||
# Get timezone
|
||||
get_timezone() {
|
||||
local timezone
|
||||
timezone=$(ui_inputbox "Enter your timezone (e.g., America/New_York):" "$DEFAULT_TIMEZONE")
|
||||
[[ -z "$timezone" ]] && return 1
|
||||
echo "$timezone"
|
||||
}
|
||||
|
||||
# Get PUID/PGID
|
||||
get_user_ids() {
|
||||
local puid pgid
|
||||
|
||||
puid=$(ui_inputbox "Enter PUID (User ID for Docker containers):" "$DEFAULT_PUID")
|
||||
[[ -z "$puid" ]] && return 1
|
||||
|
||||
pgid=$(ui_inputbox "Enter PGID (Group ID for Docker containers):" "$DEFAULT_PGID")
|
||||
[[ -z "$pgid" ]] && return 1
|
||||
|
||||
echo "$puid $pgid"
|
||||
}
|
||||
|
||||
# Get deployment type
|
||||
get_deployment_type() {
|
||||
ui_select_deployment_type
|
||||
}
|
||||
|
||||
# Confirm configuration
|
||||
confirm_configuration() {
|
||||
local domain="$1"
|
||||
local timezone="$2"
|
||||
local puid="$3"
|
||||
local pgid="$4"
|
||||
local deployment_type="$5"
|
||||
local services="$6"
|
||||
|
||||
local message="Configuration Summary:
|
||||
|
||||
Domain: $domain
|
||||
Timezone: $timezone
|
||||
PUID/PGID: $puid/$pgid
|
||||
Deployment Type: $deployment_type
|
||||
Services: $services
|
||||
|
||||
Do you want to proceed with this configuration?"
|
||||
|
||||
ui_yesno "$message"
|
||||
}
|
||||
|
||||
# =============================================================================
|
||||
# MAIN FUNCTION
|
||||
# =============================================================================
|
||||
|
||||
main() {
|
||||
local non_interactive=false
|
||||
local verbose=false
|
||||
|
||||
# Parse command line arguments
|
||||
while [[ $# -gt 0 ]]; do
|
||||
case $1 in
|
||||
-h|--help)
|
||||
ui_show_help "$SCRIPT_NAME"
|
||||
exit 0
|
||||
;;
|
||||
--no-ui)
|
||||
non_interactive=true
|
||||
;;
|
||||
-v|--verbose)
|
||||
verbose=true
|
||||
;;
|
||||
*)
|
||||
print_error "Unknown option: $1"
|
||||
echo "Use --help for usage information"
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
shift
|
||||
done
|
||||
|
||||
# Initialize script
|
||||
init_script "$SCRIPT_NAME"
|
||||
|
||||
if $verbose; then
|
||||
set -x
|
||||
fi
|
||||
|
||||
print_info "Starting EZ-Homelab pre-deployment configuration wizard..."
|
||||
|
||||
# Check if running interactively
|
||||
if ! ui_available || $non_interactive; then
|
||||
print_error "This script requires an interactive terminal with dialog/whiptail"
|
||||
print_error "Run without --no-ui or in a proper terminal"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Show welcome screen
|
||||
show_welcome || exit 1
|
||||
|
||||
# Get configuration interactively
|
||||
local domain
|
||||
domain=$(get_domain_config) || exit 1
|
||||
|
||||
local timezone
|
||||
timezone=$(get_timezone) || exit 1
|
||||
|
||||
local puid pgid
|
||||
read -r puid pgid <<< "$(get_user_ids)" || exit 1
|
||||
|
||||
local deployment_type
|
||||
deployment_type=$(get_deployment_type) || exit 1
|
||||
|
||||
# Get service selection
|
||||
local services
|
||||
services=$(get_service_selection "$deployment_type")
|
||||
|
||||
# Confirm configuration
|
||||
if ! confirm_configuration "$domain" "$timezone" "$puid" "$pgid" "$deployment_type" "$services"; then
|
||||
print_info "Configuration cancelled by user"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# Validate configuration
|
||||
if ! validate_configuration "$domain" "$deployment_type"; then
|
||||
print_error "Configuration validation failed"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Create Docker networks
|
||||
if ! create_docker_networks; then
|
||||
print_error "Failed to create Docker networks"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Generate .env file
|
||||
if ! generate_env_file "$domain" "$timezone" "$puid" "$pgid" "$deployment_type"; then
|
||||
print_error "Failed to generate .env file"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo ""
|
||||
print_success "EZ-Homelab configuration complete!"
|
||||
print_info "Next steps:"
|
||||
print_info "1. Edit $EZ_HOME/.env to set your actual secrets"
|
||||
print_info "2. Run: ./validate.sh"
|
||||
print_info "3. Run: ./localize.sh"
|
||||
print_info "4. Run: ./deploy.sh core"
|
||||
|
||||
echo ""
|
||||
print_info "Returning to EZ-Homelab menu..."
|
||||
|
||||
exit 0
|
||||
}
|
||||
|
||||
# Run main function
|
||||
main "$@"
|
||||
@@ -1,375 +0,0 @@
|
||||
#!/bin/bash
|
||||
# EZ-Homelab Enhanced Setup Scripts - Pre-Flight System Validation
|
||||
# Performs comprehensive system checks before EZ-Homelab deployment
|
||||
|
||||
SCRIPT_NAME="preflight"
|
||||
SCRIPT_VERSION="1.0.0"
|
||||
|
||||
# Load common library
|
||||
source "$(dirname "${BASH_SOURCE[0]}")/lib/common.sh"
|
||||
source "$(dirname "${BASH_SOURCE[0]}")/lib/ui.sh"
|
||||
|
||||
# =============================================================================
|
||||
# SCRIPT CONFIGURATION
|
||||
# =============================================================================
|
||||
|
||||
# Minimum requirements
|
||||
MIN_DISK_SPACE=20 # GB
|
||||
MIN_MEMORY=1024 # MB
|
||||
MIN_CPU_CORES=2
|
||||
|
||||
# Required packages (will be installed by setup.sh if missing)
|
||||
REQUIRED_PACKAGES=("curl" "wget" "git" "jq")
|
||||
|
||||
# Optional packages (recommended but not required)
|
||||
OPTIONAL_PACKAGES=("htop" "ncdu" "tmux" "unzip")
|
||||
|
||||
# =============================================================================
|
||||
# VALIDATION FUNCTIONS
|
||||
# =============================================================================
|
||||
|
||||
# Check OS compatibility
|
||||
check_os_compatibility() {
|
||||
print_info "Checking OS compatibility..."
|
||||
|
||||
if ! validate_os; then
|
||||
print_error "Unsupported OS: $OS_NAME $OS_VERSION"
|
||||
print_error "Supported: Ubuntu 20.04+, Debian 11+, Raspberry Pi OS"
|
||||
return 1
|
||||
fi
|
||||
|
||||
print_success "OS: $OS_NAME $OS_VERSION ($ARCH)"
|
||||
return 0
|
||||
}
|
||||
|
||||
# Check system resources
|
||||
check_system_resources() {
|
||||
print_info "Checking system resources..."
|
||||
|
||||
local errors=0
|
||||
|
||||
# Check disk space
|
||||
local disk_space
|
||||
disk_space=$(get_disk_space)
|
||||
if (( disk_space < MIN_DISK_SPACE )); then
|
||||
print_error "Insufficient disk space: ${disk_space}GB available, ${MIN_DISK_SPACE}GB required"
|
||||
((errors++))
|
||||
else
|
||||
print_success "Disk space: ${disk_space}GB available"
|
||||
fi
|
||||
|
||||
# Check memory
|
||||
local total_memory
|
||||
total_memory=$(get_total_memory)
|
||||
if (( total_memory < MIN_MEMORY )); then
|
||||
print_error "Insufficient memory: ${total_memory}MB available, ${MIN_MEMORY}MB required"
|
||||
((errors++))
|
||||
else
|
||||
print_success "Memory: ${total_memory}MB total"
|
||||
fi
|
||||
|
||||
# Check CPU cores
|
||||
local cpu_cores
|
||||
cpu_cores=$(nproc)
|
||||
if (( cpu_cores < MIN_CPU_CORES )); then
|
||||
print_warning "Low CPU cores: ${cpu_cores} available, ${MIN_CPU_CORES} recommended"
|
||||
else
|
||||
print_success "CPU cores: $cpu_cores"
|
||||
fi
|
||||
|
||||
return $errors
|
||||
}
|
||||
|
||||
# Check network connectivity
|
||||
check_network_connectivity() {
|
||||
print_info "Checking network connectivity..."
|
||||
|
||||
if ! check_network; then
|
||||
print_error "No internet connection detected"
|
||||
print_error "Please check your network configuration"
|
||||
return 1
|
||||
fi
|
||||
|
||||
print_success "Internet connection available"
|
||||
|
||||
# Check DNS resolution
|
||||
if ! nslookup github.com >/dev/null 2>&1; then
|
||||
print_warning "DNS resolution may be slow or failing"
|
||||
else
|
||||
print_success "DNS resolution working"
|
||||
fi
|
||||
|
||||
return 0
|
||||
}
|
||||
|
||||
# Check required packages (will be installed by setup.sh if missing)
|
||||
check_required_packages() {
|
||||
print_info "Checking required packages..."
|
||||
|
||||
local missing=()
|
||||
for package in "${REQUIRED_PACKAGES[@]}"; do
|
||||
if ! command_exists "$package"; then
|
||||
missing+=("$package")
|
||||
fi
|
||||
done
|
||||
|
||||
if [[ ${#missing[@]} -gt 0 ]]; then
|
||||
print_warning "Required packages missing: ${missing[*]}"
|
||||
print_info "These will be installed automatically by setup.sh"
|
||||
return 2 # Warning, not error
|
||||
fi
|
||||
|
||||
print_success "All required packages installed"
|
||||
return 0
|
||||
}
|
||||
|
||||
# Check optional packages
|
||||
check_optional_packages() {
|
||||
print_info "Checking optional packages..."
|
||||
|
||||
local missing=()
|
||||
for package in "${OPTIONAL_PACKAGES[@]}"; do
|
||||
if ! command_exists "$package"; then
|
||||
missing+=("$package")
|
||||
fi
|
||||
done
|
||||
|
||||
if [[ ${#missing[@]} -gt 0 ]]; then
|
||||
print_warning "Optional packages not installed: ${missing[*]}"
|
||||
print_info "Consider installing for better experience: sudo apt install -y ${missing[*]}"
|
||||
else
|
||||
print_success "All optional packages available"
|
||||
fi
|
||||
|
||||
return 0
|
||||
}
|
||||
|
||||
# Check Docker installation
|
||||
check_docker_installation() {
|
||||
print_info "Checking Docker installation..."
|
||||
|
||||
if ! command_exists docker; then
|
||||
print_warning "Docker not installed"
|
||||
print_info "Docker will be installed by setup.sh"
|
||||
return 2 # Warning
|
||||
fi
|
||||
|
||||
if ! service_running docker; then
|
||||
print_warning "Docker service not running"
|
||||
print_info "Docker will be started by setup.sh"
|
||||
return 2
|
||||
fi
|
||||
|
||||
# Check Docker version
|
||||
local docker_version
|
||||
docker_version=$(docker --version | grep -oP 'Docker version \K[^,]+')
|
||||
if [[ -z "$docker_version" ]]; then
|
||||
print_warning "Could not determine Docker version"
|
||||
return 2
|
||||
fi
|
||||
|
||||
# Compare version (simplified check)
|
||||
if [[ "$docker_version" =~ ^([0-9]+)\.([0-9]+) ]]; then
|
||||
local major="${BASH_REMATCH[1]}"
|
||||
local minor="${BASH_REMATCH[2]}"
|
||||
if (( major < 20 || (major == 20 && minor < 10) )); then
|
||||
print_warning "Docker version $docker_version may be outdated (20.10+ recommended)"
|
||||
return 2
|
||||
fi
|
||||
fi
|
||||
|
||||
print_success "Docker $docker_version installed and running"
|
||||
return 0
|
||||
}
|
||||
|
||||
# Check NVIDIA GPU
|
||||
check_nvidia_gpu() {
|
||||
print_info "Checking for NVIDIA GPU..."
|
||||
|
||||
if ! command_exists nvidia-smi; then
|
||||
print_info "No NVIDIA GPU detected or drivers not installed"
|
||||
return 0
|
||||
fi
|
||||
|
||||
local gpu_info
|
||||
gpu_info=$(nvidia-smi --query-gpu=name --format=csv,noheader,nounits | head -1)
|
||||
if [[ -z "$gpu_info" ]]; then
|
||||
print_warning "NVIDIA GPU detected but not accessible"
|
||||
return 2
|
||||
fi
|
||||
|
||||
print_success "NVIDIA GPU: $gpu_info"
|
||||
return 0
|
||||
}
|
||||
|
||||
# Check EZ-Homelab repository
|
||||
check_repository() {
|
||||
print_info "Checking EZ-Homelab repository..."
|
||||
|
||||
if [[ ! -d "$EZ_HOME" ]]; then
|
||||
print_error "EZ-Homelab repository not found at $EZ_HOME"
|
||||
print_error "Please clone the repository first"
|
||||
return 1
|
||||
fi
|
||||
|
||||
if [[ ! -f "$EZ_HOME/docker-compose/core/docker-compose.yml" ]]; then
|
||||
print_error "Repository structure incomplete"
|
||||
print_error "Please ensure you have the full EZ-Homelab repository"
|
||||
return 1
|
||||
fi
|
||||
|
||||
print_success "EZ-Homelab repository found at $EZ_HOME"
|
||||
return 0
|
||||
}
|
||||
|
||||
# Check user permissions
|
||||
check_user_permissions() {
|
||||
print_info "Checking user permissions..."
|
||||
|
||||
if is_root; then
|
||||
print_warning "Running as root - not recommended for normal usage"
|
||||
print_info "Consider running as regular user with sudo access"
|
||||
return 2
|
||||
fi
|
||||
|
||||
if ! sudo -n true 2>/dev/null && ! sudo -l >/dev/null 2>&1; then
|
||||
print_error "User does not have sudo access"
|
||||
print_error "Please ensure your user can run sudo commands"
|
||||
return 1
|
||||
fi
|
||||
|
||||
print_success "User has appropriate permissions"
|
||||
return 0
|
||||
}
|
||||
|
||||
# =============================================================================
|
||||
# REPORT GENERATION
|
||||
# =============================================================================
|
||||
|
||||
# Generate validation report
|
||||
generate_report() {
|
||||
local report_file="$LOG_DIR/preflight-report-$(date +%Y%m%d-%H%M%S).txt"
|
||||
|
||||
{
|
||||
echo "EZ-Homelab Pre-Flight Validation Report"
|
||||
echo "======================================="
|
||||
echo "Date: $(date)"
|
||||
echo "System: $OS_NAME $OS_VERSION ($ARCH)"
|
||||
echo "Kernel: $KERNEL_VERSION"
|
||||
echo "User: $EZ_USER (UID: $EZ_UID, GID: $EZ_GID)"
|
||||
echo ""
|
||||
echo "Results:"
|
||||
echo "- OS Compatibility: $(check_os_compatibility >/dev/null 2>&1 && echo "PASS" || echo "FAIL")"
|
||||
echo "- System Resources: $(check_system_resources >/dev/null 2>&1 && echo "PASS" || echo "WARN/FAIL")"
|
||||
echo "- Network: $(check_network_connectivity >/dev/null 2>&1 && echo "PASS" || echo "FAIL")"
|
||||
echo "- Required Packages: $(check_required_packages >/dev/null 2>&1 && echo "PASS" || echo "FAIL")"
|
||||
echo "- Docker: $(check_docker_installation >/dev/null 2>&1; case $? in 0) echo "PASS";; 1) echo "FAIL";; 2) echo "WARN";; esac)"
|
||||
echo "- NVIDIA GPU: $(check_nvidia_gpu >/dev/null 2>&1 && echo "PASS" || echo "N/A")"
|
||||
echo "- Repository: $(check_repository >/dev/null 2>&1 && echo "PASS" || echo "FAIL")"
|
||||
echo "- Permissions: $(check_user_permissions >/dev/null 2>&1; case $? in 0) echo "PASS";; 1) echo "FAIL";; 2) echo "WARN";; esac)"
|
||||
echo ""
|
||||
echo "Log file: $LOG_FILE"
|
||||
} > "$report_file"
|
||||
|
||||
print_info "Report saved to: $report_file"
|
||||
}
|
||||
|
||||
# =============================================================================
|
||||
# MAIN FUNCTION
|
||||
# =============================================================================
|
||||
|
||||
main() {
|
||||
local non_interactive=false
|
||||
local verbose=false
|
||||
|
||||
# Parse command line arguments
|
||||
while [[ $# -gt 0 ]]; do
|
||||
case $1 in
|
||||
-h|--help)
|
||||
ui_show_help "$SCRIPT_NAME"
|
||||
exit 0
|
||||
;;
|
||||
--no-ui)
|
||||
non_interactive=true
|
||||
;;
|
||||
-v|--verbose)
|
||||
verbose=true
|
||||
;;
|
||||
*)
|
||||
print_error "Unknown option: $1"
|
||||
echo "Use --help for usage information"
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
shift
|
||||
done
|
||||
|
||||
# Initialize script
|
||||
init_script "$SCRIPT_NAME"
|
||||
|
||||
if $verbose; then
|
||||
set -x
|
||||
fi
|
||||
|
||||
print_info "Starting EZ-Homelab pre-flight validation..."
|
||||
print_info "This will check your system readiness for EZ-Homelab deployment."
|
||||
|
||||
local total_checks=9
|
||||
local passed=0
|
||||
local warnings=0
|
||||
local failed=0
|
||||
|
||||
# Run all checks (disable strict error checking for this loop)
|
||||
set +e
|
||||
local checks=(
|
||||
"check_os_compatibility"
|
||||
"check_system_resources"
|
||||
"check_network_connectivity"
|
||||
"check_required_packages"
|
||||
"check_optional_packages"
|
||||
"check_docker_installation"
|
||||
"check_nvidia_gpu"
|
||||
"check_repository"
|
||||
"check_user_permissions"
|
||||
)
|
||||
|
||||
for check in "${checks[@]}"; do
|
||||
echo ""
|
||||
# Run check and capture exit code
|
||||
local exit_code=0
|
||||
$check || exit_code=$?
|
||||
|
||||
if [[ $exit_code -eq 0 ]]; then
|
||||
((passed++))
|
||||
elif [[ $exit_code -eq 2 ]]; then
|
||||
((warnings++))
|
||||
else
|
||||
((failed++))
|
||||
fi
|
||||
done
|
||||
set -e # Re-enable strict error checking
|
||||
|
||||
echo ""
|
||||
print_info "Pre-flight validation complete!"
|
||||
print_info "Summary: $passed passed, $warnings warnings, $failed failed"
|
||||
|
||||
# Generate report
|
||||
generate_report
|
||||
|
||||
# Determine exit code
|
||||
if [[ $failed -gt 0 ]]; then
|
||||
print_error "Critical issues found. Please resolve before proceeding."
|
||||
print_info "Check the log file: $LOG_FILE"
|
||||
print_info "Run this script again after fixing issues."
|
||||
exit 1
|
||||
elif [[ $warnings -gt 0 ]]; then
|
||||
print_warning "Some warnings detected. You may proceed but consider addressing them."
|
||||
exit 2
|
||||
else
|
||||
print_success "All checks passed! Your system is ready for EZ-Homelab deployment."
|
||||
exit 0
|
||||
fi
|
||||
}
|
||||
|
||||
# Run main function
|
||||
main "$@"
|
||||
@@ -1,556 +0,0 @@
|
||||
#!/bin/bash
|
||||
# EZ-Homelab Enhanced Setup Scripts - Service Management
|
||||
# Individual service control, monitoring, and maintenance
|
||||
|
||||
SCRIPT_NAME="service"
|
||||
SCRIPT_VERSION="1.0.0"
|
||||
|
||||
# Load common library
|
||||
source "$(dirname "${BASH_SOURCE[0]}")/lib/common.sh"
|
||||
source "$(dirname "${BASH_SOURCE[0]}")/lib/ui.sh"
|
||||
|
||||
# =============================================================================
|
||||
# SERVICE MANAGEMENT CONFIGURATION
|
||||
# =============================================================================
|
||||
|
||||
# Service action timeouts (seconds)
|
||||
SERVICE_START_TIMEOUT=60
|
||||
SERVICE_STOP_TIMEOUT=30
|
||||
LOG_TAIL_LINES=100
|
||||
HEALTH_CHECK_RETRIES=3
|
||||
|
||||
# =============================================================================
|
||||
# SERVICE DISCOVERY FUNCTIONS
|
||||
# =============================================================================
|
||||
|
||||
# Find all available services across all stacks
|
||||
find_all_services() {
|
||||
local services=()
|
||||
|
||||
# Get all docker-compose directories
|
||||
local compose_dirs
|
||||
mapfile -t compose_dirs < <(find "$EZ_HOME/docker-compose" -name "docker-compose.yml" -type f -exec dirname {} \; 2>/dev/null)
|
||||
|
||||
for dir in "${compose_dirs[@]}"; do
|
||||
local stack_services
|
||||
mapfile -t stack_services < <(get_stack_services "$(basename "$dir")")
|
||||
|
||||
for service in "${stack_services[@]}"; do
|
||||
# Avoid duplicates
|
||||
if [[ ! " ${services[*]} " =~ " ${service} " ]]; then
|
||||
services+=("$service")
|
||||
fi
|
||||
done
|
||||
done
|
||||
|
||||
printf '%s\n' "${services[@]}" | sort
|
||||
}
|
||||
|
||||
# Find which stack a service belongs to
|
||||
find_service_stack() {
|
||||
local service="$1"
|
||||
|
||||
local compose_dirs
|
||||
mapfile -t compose_dirs < <(find "$EZ_HOME/docker-compose" -name "docker-compose.yml" -type f -exec dirname {} \; 2>/dev/null)
|
||||
|
||||
for dir in "${compose_dirs[@]}"; do
|
||||
local stack_services
|
||||
mapfile -t stack_services < <(get_stack_services "$(basename "$dir")")
|
||||
|
||||
for stack_service in "${stack_services[@]}"; do
|
||||
if [[ "$stack_service" == "$service" ]]; then
|
||||
echo "$dir"
|
||||
return 0
|
||||
fi
|
||||
done
|
||||
done
|
||||
|
||||
return 1
|
||||
}
|
||||
|
||||
# Get service compose file
|
||||
get_service_compose_file() {
|
||||
local service="$1"
|
||||
local stack_dir
|
||||
|
||||
stack_dir=$(find_service_stack "$service")
|
||||
[[ -n "$stack_dir" ]] && echo "$stack_dir/docker-compose.yml"
|
||||
}
|
||||
|
||||
# =============================================================================
|
||||
# SERVICE CONTROL FUNCTIONS
|
||||
# =============================================================================
|
||||
|
||||
# Start a specific service
|
||||
start_service() {
|
||||
local service="$1"
|
||||
local compose_file
|
||||
|
||||
compose_file=$(get_service_compose_file "$service")
|
||||
if [[ -z "$compose_file" ]]; then
|
||||
print_error "Service '$service' not found"
|
||||
return 1
|
||||
fi
|
||||
|
||||
if is_service_running "$service"; then
|
||||
print_warning "Service '$service' is already running"
|
||||
return 0
|
||||
fi
|
||||
|
||||
print_info "Starting service: $service"
|
||||
|
||||
local compose_dir=$(dirname "$compose_file")
|
||||
local compose_base=$(basename "$compose_file")
|
||||
|
||||
if (cd "$compose_dir" && docker compose -f "$compose_base" up -d "$service"); then
|
||||
print_info "Waiting for service to start..."
|
||||
sleep "$SERVICE_START_TIMEOUT"
|
||||
|
||||
if is_service_running "$service"; then
|
||||
print_success "Service '$service' started successfully"
|
||||
return 0
|
||||
else
|
||||
print_error "Service '$service' failed to start"
|
||||
return 1
|
||||
fi
|
||||
else
|
||||
print_error "Failed to start service '$service'"
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
# Stop a specific service
|
||||
stop_service() {
|
||||
local service="$1"
|
||||
local compose_file
|
||||
|
||||
compose_file=$(get_service_compose_file "$service")
|
||||
if [[ -z "$compose_file" ]]; then
|
||||
print_error "Service '$service' not found"
|
||||
return 1
|
||||
fi
|
||||
|
||||
if ! is_service_running "$service"; then
|
||||
print_warning "Service '$service' is not running"
|
||||
return 0
|
||||
fi
|
||||
|
||||
print_info "Stopping service: $service"
|
||||
|
||||
local compose_dir=$(dirname "$compose_file")
|
||||
local compose_base=$(basename "$compose_file")
|
||||
|
||||
if (cd "$compose_dir" && docker compose -f "$compose_base" stop "$service"); then
|
||||
local count=0
|
||||
while ((count < SERVICE_STOP_TIMEOUT)) && is_service_running "$service"; do
|
||||
sleep 1
|
||||
((count++))
|
||||
done
|
||||
|
||||
if ! is_service_running "$service"; then
|
||||
print_success "Service '$service' stopped successfully"
|
||||
return 0
|
||||
else
|
||||
print_warning "Service '$service' did not stop gracefully, forcing..."
|
||||
(cd "$compose_dir" && docker compose -f "$compose_base" kill "$service")
|
||||
return 0
|
||||
fi
|
||||
else
|
||||
print_error "Failed to stop service '$service'"
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
# Restart a specific service
|
||||
restart_service() {
|
||||
local service="$1"
|
||||
|
||||
print_info "Restarting service: $service"
|
||||
|
||||
if stop_service "$service" && start_service "$service"; then
|
||||
print_success "Service '$service' restarted successfully"
|
||||
return 0
|
||||
else
|
||||
print_error "Failed to restart service '$service'"
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
# Get service logs
|
||||
show_service_logs() {
|
||||
local service="$1"
|
||||
local lines="${2:-$LOG_TAIL_LINES}"
|
||||
local follow="${3:-false}"
|
||||
|
||||
local compose_file
|
||||
compose_file=$(get_service_compose_file "$service")
|
||||
if [[ -z "$compose_file" ]]; then
|
||||
print_error "Service '$service' not found"
|
||||
return 1
|
||||
fi
|
||||
|
||||
print_info "Showing logs for service: $service"
|
||||
|
||||
local compose_dir=$(dirname "$compose_file")
|
||||
local compose_base=$(basename "$compose_file")
|
||||
|
||||
if $follow; then
|
||||
(cd "$compose_dir" && docker compose -f "$compose_base" logs -f --tail="$lines" "$service")
|
||||
else
|
||||
(cd "$compose_dir" && docker compose -f "$compose_base" logs --tail="$lines" "$service")
|
||||
fi
|
||||
}
|
||||
|
||||
# Check service health
|
||||
check_service_status() {
|
||||
local service="$1"
|
||||
|
||||
local compose_file
|
||||
compose_file=$(get_service_compose_file "$service")
|
||||
if [[ -z "$compose_file" ]]; then
|
||||
print_error "Service '$service' not found"
|
||||
return 1
|
||||
fi
|
||||
|
||||
echo "Service: $service"
|
||||
|
||||
if is_service_running "$service"; then
|
||||
echo "Status: ✅ Running"
|
||||
|
||||
# Get container info
|
||||
local container_info
|
||||
container_info=$(docker ps --filter "name=^${service}$" --format "table {{.Image}}\t{{.Status}}\t{{.Ports}}" | tail -n +2)
|
||||
if [[ -n "$container_info" ]]; then
|
||||
echo "Container: $container_info"
|
||||
fi
|
||||
|
||||
# Get health status if available
|
||||
local health_status
|
||||
health_status=$(docker inspect "$service" --format '{{.State.Health.Status}}' 2>/dev/null || echo "N/A")
|
||||
if [[ "$health_status" != "N/A" ]]; then
|
||||
echo "Health: $health_status"
|
||||
fi
|
||||
else
|
||||
echo "Status: ❌ Stopped"
|
||||
fi
|
||||
|
||||
# Show stack info
|
||||
local stack_dir
|
||||
stack_dir=$(find_service_stack "$service")
|
||||
if [[ -n "$stack_dir" ]]; then
|
||||
echo "Stack: $(basename "$stack_dir")"
|
||||
fi
|
||||
|
||||
echo
|
||||
}
|
||||
|
||||
# Execute command in service container
|
||||
exec_service_command() {
|
||||
local service="$1"
|
||||
shift
|
||||
local command="$*"
|
||||
|
||||
if ! is_service_running "$service"; then
|
||||
print_error "Service '$service' is not running"
|
||||
return 1
|
||||
fi
|
||||
|
||||
print_info "Executing command in $service: $command"
|
||||
docker exec -it "$service" $command
|
||||
}
|
||||
|
||||
# =============================================================================
|
||||
# BULK OPERATIONS
|
||||
# =============================================================================
|
||||
|
||||
# Start all services in a stack
|
||||
start_stack_services() {
|
||||
local stack="$1"
|
||||
local compose_file="$EZ_HOME/docker-compose/$stack/docker-compose.yml"
|
||||
|
||||
if [[ ! -f "$compose_file" ]]; then
|
||||
print_error "Stack '$stack' not found"
|
||||
return 1
|
||||
fi
|
||||
|
||||
print_info "Starting all services in stack: $stack"
|
||||
|
||||
if docker compose -f "$compose_file" up -d; then
|
||||
print_success "Stack '$stack' started successfully"
|
||||
return 0
|
||||
else
|
||||
print_error "Failed to start stack '$stack'"
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
# Stop all services in a stack
|
||||
stop_stack_services() {
|
||||
local stack="$1"
|
||||
local compose_file="$EZ_HOME/docker-compose/$stack/docker-compose.yml"
|
||||
|
||||
if [[ ! -f "$compose_file" ]]; then
|
||||
print_error "Stack '$stack' not found"
|
||||
return 1
|
||||
fi
|
||||
|
||||
print_info "Stopping all services in stack: $stack"
|
||||
|
||||
if docker compose -f "$compose_file" down; then
|
||||
print_success "Stack '$stack' stopped successfully"
|
||||
return 0
|
||||
else
|
||||
print_error "Failed to stop stack '$stack'"
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
# Show status of all services
|
||||
show_all_status() {
|
||||
print_info "EZ-Homelab Service Status"
|
||||
echo
|
||||
|
||||
local services
|
||||
mapfile -t services < <(find_all_services)
|
||||
|
||||
for service in "${services[@]}"; do
|
||||
check_service_status "$service"
|
||||
done
|
||||
}
|
||||
|
||||
# =============================================================================
|
||||
# UTILITY FUNCTIONS
|
||||
# =============================================================================
|
||||
|
||||
# List all available services
|
||||
list_services() {
|
||||
print_info "Available Services:"
|
||||
|
||||
local services
|
||||
mapfile -t services < <(find_all_services)
|
||||
|
||||
for service in "${services[@]}"; do
|
||||
local stack_dir=""
|
||||
stack_dir=$(find_service_stack "$service")
|
||||
local stack_name=""
|
||||
[[ -n "$stack_dir" ]] && stack_name="($(basename "$stack_dir"))"
|
||||
|
||||
local status="❌ Stopped"
|
||||
is_service_running "$service" && status="✅ Running"
|
||||
|
||||
printf " %-20s %-12s %s\n" "$service" "$status" "$stack_name"
|
||||
done
|
||||
}
|
||||
|
||||
# Clean up stopped containers and unused images
|
||||
cleanup_services() {
|
||||
print_info "Cleaning up Docker resources..."
|
||||
|
||||
# Remove stopped containers
|
||||
local stopped_containers
|
||||
stopped_containers=$(docker ps -aq -f status=exited)
|
||||
if [[ -n "$stopped_containers" ]]; then
|
||||
print_info "Removing stopped containers..."
|
||||
echo "$stopped_containers" | xargs docker rm
|
||||
fi
|
||||
|
||||
# Remove unused images
|
||||
print_info "Removing unused images..."
|
||||
docker image prune -f
|
||||
|
||||
# Remove unused volumes
|
||||
print_info "Removing unused volumes..."
|
||||
docker volume prune -f
|
||||
|
||||
print_success "Cleanup completed"
|
||||
}
|
||||
|
||||
# =============================================================================
|
||||
# MAIN FUNCTION
|
||||
# =============================================================================
|
||||
|
||||
main() {
|
||||
local action=""
|
||||
local service=""
|
||||
local stack=""
|
||||
local follow_logs=false
|
||||
local log_lines="$LOG_TAIL_LINES"
|
||||
local non_interactive=false
|
||||
|
||||
# Parse command line arguments
|
||||
while [[ $# -gt 0 ]]; do
|
||||
case $1 in
|
||||
-h|--help)
|
||||
cat << EOF
|
||||
EZ-Homelab Service Management
|
||||
|
||||
USAGE:
|
||||
service [OPTIONS] <ACTION> [SERVICE|STACK]
|
||||
|
||||
ACTIONS:
|
||||
start Start a service or all services in a stack
|
||||
stop Stop a service or all services in a stack
|
||||
restart Restart a service or all services in a stack
|
||||
status Show status of a service or all services
|
||||
logs Show logs for a service
|
||||
exec Execute command in a running service container
|
||||
list List all available services
|
||||
cleanup Clean up stopped containers and unused resources
|
||||
|
||||
ARGUMENTS:
|
||||
SERVICE Service name (for service-specific actions)
|
||||
STACK Stack name (for stack-wide actions)
|
||||
|
||||
OPTIONS:
|
||||
-f, --follow Follow logs (for logs action)
|
||||
-n, --lines NUM Number of log lines to show (default: $LOG_TAIL_LINES)
|
||||
--no-ui Run without interactive UI
|
||||
|
||||
EXAMPLES:
|
||||
service list # List all services
|
||||
service status # Show all service statuses
|
||||
service start traefik # Start Traefik service
|
||||
service stop core # Stop all core services
|
||||
service restart pihole # Restart Pi-hole service
|
||||
service logs traefik # Show Traefik logs
|
||||
service logs traefik --follow # Follow Traefik logs
|
||||
service exec authelia bash # Execute bash in Authelia container
|
||||
service cleanup # Clean up Docker resources
|
||||
|
||||
EOF
|
||||
exit 0
|
||||
;;
|
||||
-f|--follow)
|
||||
follow_logs=true
|
||||
shift
|
||||
;;
|
||||
-n|--lines)
|
||||
log_lines="$2"
|
||||
shift 2
|
||||
;;
|
||||
--no-ui)
|
||||
non_interactive=true
|
||||
shift
|
||||
;;
|
||||
start|stop|restart|status|logs|exec|list|cleanup)
|
||||
if [[ -z "$action" ]]; then
|
||||
action="$1"
|
||||
else
|
||||
if [[ -z "$service" ]]; then
|
||||
service="$1"
|
||||
else
|
||||
print_error "Too many arguments"
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
shift
|
||||
;;
|
||||
*)
|
||||
if [[ -z "$service" ]]; then
|
||||
service="$1"
|
||||
else
|
||||
print_error "Too many arguments"
|
||||
exit 1
|
||||
fi
|
||||
shift
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
# Initialize script
|
||||
init_script "$SCRIPT_NAME" "$SCRIPT_VERSION"
|
||||
init_logging "$SCRIPT_NAME"
|
||||
|
||||
# Check prerequisites
|
||||
if ! docker_available; then
|
||||
print_error "Docker is not available"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Execute action
|
||||
case "$action" in
|
||||
start)
|
||||
if [[ -n "$service" ]]; then
|
||||
# Check if it's a stack or service
|
||||
if [[ -d "$EZ_HOME/docker-compose/$service" ]]; then
|
||||
start_stack_services "$service"
|
||||
else
|
||||
start_service "$service"
|
||||
fi
|
||||
else
|
||||
print_error "Service or stack name required"
|
||||
exit 1
|
||||
fi
|
||||
;;
|
||||
stop)
|
||||
if [[ -n "$service" ]]; then
|
||||
# Check if it's a stack or service
|
||||
if [[ -d "$EZ_HOME/docker-compose/$service" ]]; then
|
||||
stop_stack_services "$service"
|
||||
else
|
||||
stop_service "$service"
|
||||
fi
|
||||
else
|
||||
print_error "Service or stack name required"
|
||||
exit 1
|
||||
fi
|
||||
;;
|
||||
restart)
|
||||
if [[ -n "$service" ]]; then
|
||||
# Check if it's a stack or service
|
||||
if [[ -d "$EZ_HOME/docker-compose/$service" ]]; then
|
||||
stop_stack_services "$service" && start_stack_services "$service"
|
||||
else
|
||||
restart_service "$service"
|
||||
fi
|
||||
else
|
||||
print_error "Service or stack name required"
|
||||
exit 1
|
||||
fi
|
||||
;;
|
||||
status)
|
||||
if [[ -n "$service" ]]; then
|
||||
check_service_status "$service"
|
||||
else
|
||||
show_all_status
|
||||
fi
|
||||
;;
|
||||
logs)
|
||||
if [[ -n "$service" ]]; then
|
||||
show_service_logs "$service" "$log_lines" "$follow_logs"
|
||||
else
|
||||
print_error "Service name required"
|
||||
exit 1
|
||||
fi
|
||||
;;
|
||||
exec)
|
||||
if [[ -n "$service" ]]; then
|
||||
if [[ $# -gt 0 ]]; then
|
||||
exec_service_command "$service" "$@"
|
||||
else
|
||||
exec_service_command "$service" bash
|
||||
fi
|
||||
else
|
||||
print_error "Service name required"
|
||||
exit 1
|
||||
fi
|
||||
;;
|
||||
list)
|
||||
list_services
|
||||
;;
|
||||
cleanup)
|
||||
cleanup_services
|
||||
;;
|
||||
"")
|
||||
print_error "No action specified. Use --help for usage information."
|
||||
exit 1
|
||||
;;
|
||||
*)
|
||||
print_error "Unknown action: $action"
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
}
|
||||
|
||||
# Run main function
|
||||
main "$@"
|
||||
@@ -1,387 +0,0 @@
|
||||
#!/bin/bash
|
||||
# EZ-Homelab Enhanced Setup Scripts - System Setup and Prerequisites
|
||||
# Installs Docker and configures system prerequisites for EZ-Homelab
|
||||
|
||||
SCRIPT_NAME="setup"
|
||||
SCRIPT_VERSION="1.0.0"
|
||||
|
||||
# Load common library
|
||||
source "$(dirname "${BASH_SOURCE[0]}")/lib/common.sh"
|
||||
source "$(dirname "${BASH_SOURCE[0]}")/lib/ui.sh"
|
||||
|
||||
# =============================================================================
|
||||
# SCRIPT CONFIGURATION
|
||||
# =============================================================================
|
||||
|
||||
# Docker version requirements
|
||||
MIN_DOCKER_VERSION="20.10.0"
|
||||
RECOMMENDED_DOCKER_VERSION="24.0.0"
|
||||
|
||||
# Required system packages
|
||||
SYSTEM_PACKAGES=("curl" "wget" "git" "jq" "unzip" "software-properties-common" "apt-transport-https" "ca-certificates" "gnupg" "lsb-release")
|
||||
|
||||
# Python packages (for virtual environment) - LEGACY, no longer used
|
||||
# PYTHON_PACKAGES=("docker-compose" "pyyaml" "requests")
|
||||
|
||||
# =============================================================================
|
||||
# DOCKER INSTALLATION FUNCTIONS
|
||||
# =============================================================================
|
||||
|
||||
# Remove old Docker installations
|
||||
remove_old_docker() {
|
||||
print_info "Removing old Docker installations..."
|
||||
|
||||
# Stop services
|
||||
sudo systemctl stop docker docker.socket containerd 2>/dev/null || true
|
||||
|
||||
# Remove packages
|
||||
sudo apt remove -y docker docker-engine docker.io containerd runc 2>/dev/null || true
|
||||
|
||||
# Remove Docker data
|
||||
sudo rm -rf /var/lib/docker /var/lib/containerd
|
||||
|
||||
print_success "Old Docker installations removed"
|
||||
}
|
||||
|
||||
# Install Docker using official method
|
||||
install_docker_official() {
|
||||
print_info "Installing Docker Engine (official method)..."
|
||||
|
||||
# Add Docker's official GPG key
|
||||
curl -fsSL https://download.docker.com/linux/"$(lsb_release -si | tr '[:upper:]' '[:lower:]')"/gpg | sudo gpg --dearmor -o /usr/share/keyrings/docker-archive-keyring.gpg
|
||||
|
||||
# Add Docker repository
|
||||
echo "deb [arch=$(dpkg --print-architecture) signed-by=/usr/share/keyrings/docker-archive-keyring.gpg] https://download.docker.com/linux/$(lsb_release -si | tr '[:upper:]' '[:lower:]') $(lsb_release -cs) stable" | sudo tee /etc/apt/sources.list.d/docker.list > /dev/null
|
||||
|
||||
# Update package index
|
||||
sudo apt update
|
||||
|
||||
# Install Docker Engine
|
||||
sudo apt install -y docker-ce docker-ce-cli containerd.io docker-compose-plugin
|
||||
|
||||
print_success "Docker Engine installed"
|
||||
}
|
||||
|
||||
# Install Docker using convenience script (fallback)
|
||||
install_docker_convenience() {
|
||||
print_info "Installing Docker using convenience script..."
|
||||
|
||||
curl -fsSL https://get.docker.com -o get-docker.sh
|
||||
sudo sh get-docker.sh
|
||||
rm get-docker.sh
|
||||
|
||||
print_success "Docker installed via convenience script"
|
||||
}
|
||||
|
||||
# Configure Docker daemon
|
||||
configure_docker_daemon() {
|
||||
print_info "Configuring Docker daemon..."
|
||||
|
||||
local daemon_config='{
|
||||
"log-driver": "json-file",
|
||||
"log-opts": {
|
||||
"max-size": "10m",
|
||||
"max-file": "3"
|
||||
},
|
||||
"storage-driver": "overlay2",
|
||||
"iptables": false,
|
||||
"bridge": "none",
|
||||
"ip-masq": false
|
||||
}'
|
||||
|
||||
echo "$daemon_config" | sudo tee /etc/docker/daemon.json > /dev/null
|
||||
|
||||
print_success "Docker daemon configured"
|
||||
}
|
||||
|
||||
# Start and enable Docker service
|
||||
start_docker_service() {
|
||||
print_info "Starting Docker service..."
|
||||
|
||||
sudo systemctl enable docker
|
||||
sudo systemctl start docker
|
||||
|
||||
# Wait for Docker to be ready
|
||||
local retries=30
|
||||
while ! docker info >/dev/null 2>&1 && (( retries > 0 )); do
|
||||
sleep 1
|
||||
((retries--))
|
||||
done
|
||||
|
||||
if ! docker info >/dev/null 2>&1; then
|
||||
print_error "Docker service failed to start"
|
||||
return 1
|
||||
fi
|
||||
|
||||
print_success "Docker service started and enabled"
|
||||
}
|
||||
|
||||
# Add user to docker group
|
||||
configure_user_permissions() {
|
||||
print_info "Configuring user permissions..."
|
||||
|
||||
if ! groups "$EZ_USER" | grep -q docker; then
|
||||
sudo usermod -aG docker "$EZ_USER"
|
||||
print_warning "User added to docker group. A reboot may be required for changes to take effect."
|
||||
print_info "Alternatively, run: newgrp docker"
|
||||
else
|
||||
print_success "User already in docker group"
|
||||
fi
|
||||
}
|
||||
|
||||
# Test Docker installation
|
||||
test_docker_installation() {
|
||||
print_info "Testing Docker installation..."
|
||||
|
||||
# Run hello-world container
|
||||
if ! docker run --rm hello-world >/dev/null 2>&1; then
|
||||
print_error "Docker test failed"
|
||||
return 1
|
||||
fi
|
||||
|
||||
# Check Docker version
|
||||
local docker_version
|
||||
docker_version=$(docker --version | grep -oP 'Docker version \K[^,]+')
|
||||
|
||||
if [[ -z "$docker_version" ]]; then
|
||||
print_warning "Could not determine Docker version"
|
||||
return 2
|
||||
fi
|
||||
|
||||
print_success "Docker $docker_version installed and working"
|
||||
|
||||
# Check Docker Compose V2
|
||||
if docker compose version >/dev/null 2>&1; then
|
||||
local compose_version
|
||||
compose_version=$(docker compose version | grep -oP 'v\K[^ ]+')
|
||||
print_success "Docker Compose V2 $compose_version available"
|
||||
else
|
||||
print_warning "Docker Compose V2 not available"
|
||||
fi
|
||||
}
|
||||
|
||||
# =============================================================================
|
||||
# SYSTEM SETUP FUNCTIONS
|
||||
# =============================================================================
|
||||
|
||||
# Install system packages
|
||||
install_system_packages() {
|
||||
print_info "Installing system packages..."
|
||||
|
||||
# Check if user has sudo access
|
||||
if ! sudo -n true 2>/dev/null; then
|
||||
print_error "This script requires sudo access to install system packages."
|
||||
print_error "Please run this script as a user with sudo privileges, or install the required packages manually:"
|
||||
print_error " sudo apt update && sudo apt install -y ${SYSTEM_PACKAGES[*]}"
|
||||
return 1
|
||||
fi
|
||||
|
||||
# Update package lists
|
||||
print_info "Updating package lists..."
|
||||
if ! sudo apt update; then
|
||||
print_error "Failed to update package lists. Please check your internet connection and apt configuration."
|
||||
return 1
|
||||
fi
|
||||
|
||||
local missing_packages=()
|
||||
for package in "${SYSTEM_PACKAGES[@]}"; do
|
||||
if ! dpkg -l "$package" >/dev/null 2>&1; then
|
||||
missing_packages+=("$package")
|
||||
fi
|
||||
done
|
||||
|
||||
if [[ ${#missing_packages[@]} -gt 0 ]]; then
|
||||
print_info "Installing missing packages: ${missing_packages[*]}"
|
||||
if ! sudo apt install -y "${missing_packages[@]}"; then
|
||||
print_error "Failed to install required packages: ${missing_packages[*]}"
|
||||
print_error "Please install them manually: sudo apt install -y ${missing_packages[*]}"
|
||||
return 1
|
||||
fi
|
||||
else
|
||||
print_info "All required packages are already installed"
|
||||
fi
|
||||
|
||||
print_success "System packages installed"
|
||||
}
|
||||
|
||||
# Configure system settings
|
||||
configure_system_settings() {
|
||||
print_info "Configuring system settings..."
|
||||
|
||||
# Increase file watchers (for large deployments)
|
||||
echo "fs.inotify.max_user_watches=524288" | sudo tee -a /etc/sysctl.conf >/dev/null
|
||||
sudo sysctl -p >/dev/null 2>&1
|
||||
|
||||
# Configure journald for better logging
|
||||
sudo mkdir -p /etc/systemd/journald.conf.d
|
||||
cat << EOF | sudo tee /etc/systemd/journald.conf.d/ez-homelab.conf >/dev/null
|
||||
[Journal]
|
||||
Storage=persistent
|
||||
SystemMaxUse=100M
|
||||
RuntimeMaxUse=50M
|
||||
EOF
|
||||
|
||||
print_success "System settings configured"
|
||||
}
|
||||
|
||||
# Create required directories
|
||||
create_directories() {
|
||||
print_info "Creating required directories..."
|
||||
|
||||
sudo mkdir -p /opt/stacks
|
||||
sudo chown "$EZ_USER:$EZ_USER" /opt/stacks
|
||||
|
||||
mkdir -p "$LOG_DIR"
|
||||
|
||||
print_success "Directories created"
|
||||
}
|
||||
|
||||
# =============================================================================
|
||||
# NVIDIA GPU SETUP (OPTIONAL)
|
||||
# =============================================================================
|
||||
|
||||
# Check if NVIDIA setup is needed
|
||||
check_nvidia_setup_needed() {
|
||||
command_exists nvidia-smi && nvidia-smi >/dev/null 2>&1
|
||||
}
|
||||
|
||||
# Install NVIDIA drivers (if requested)
|
||||
install_nvidia_drivers() {
|
||||
if $non_interactive; then
|
||||
print_info "Skipping NVIDIA setup (non-interactive mode)"
|
||||
return 0
|
||||
fi
|
||||
|
||||
if ! ui_yesno "NVIDIA GPU detected. Install NVIDIA drivers and Docker GPU support?"; then
|
||||
print_info "Skipping NVIDIA setup"
|
||||
return 0
|
||||
fi
|
||||
|
||||
print_info "Installing NVIDIA drivers..."
|
||||
|
||||
# Add NVIDIA repository
|
||||
wget https://developer.download.nvidia.com/compute/cuda/repos/"$(lsb_release -si | tr '[:upper:]' '[:lower:]')""$(lsb_release -sr | tr -d '.')"/x86_64/cuda-keyring_1.0-1_all.deb
|
||||
sudo dpkg -i cuda-keyring_1.0-1_all.deb
|
||||
rm cuda-keyring_1.0-1_all.deb
|
||||
|
||||
sudo apt update
|
||||
|
||||
# Install NVIDIA driver
|
||||
sudo apt install -y nvidia-driver-525 nvidia-docker2
|
||||
|
||||
# Configure Docker for NVIDIA
|
||||
sudo systemctl restart docker
|
||||
|
||||
print_success "NVIDIA drivers installed"
|
||||
}
|
||||
|
||||
# =============================================================================
|
||||
# MAIN FUNCTION
|
||||
# =============================================================================
|
||||
|
||||
main() {
|
||||
local skip_docker=false
|
||||
local skip_nvidia=false
|
||||
local non_interactive=false
|
||||
local verbose=false
|
||||
|
||||
# Parse command line arguments
|
||||
while [[ $# -gt 0 ]]; do
|
||||
case $1 in
|
||||
-h|--help)
|
||||
ui_show_help "$SCRIPT_NAME"
|
||||
exit 0
|
||||
;;
|
||||
--skip-docker)
|
||||
skip_docker=true
|
||||
;;
|
||||
--skip-nvidia)
|
||||
skip_nvidia=true
|
||||
;;
|
||||
--no-ui)
|
||||
non_interactive=true
|
||||
;;
|
||||
-v|--verbose)
|
||||
verbose=true
|
||||
;;
|
||||
*)
|
||||
print_error "Unknown option: $1"
|
||||
echo "Use --help for usage information"
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
shift
|
||||
done
|
||||
|
||||
# Initialize script
|
||||
init_script "$SCRIPT_NAME"
|
||||
|
||||
if $verbose; then
|
||||
set -x
|
||||
fi
|
||||
|
||||
print_info "Starting EZ-Homelab system setup..."
|
||||
print_info "This will install Docker and configure your system for EZ-Homelab."
|
||||
|
||||
# Run pre-flight checks first (allow warnings)
|
||||
local preflight_exit=0
|
||||
"$(dirname "${BASH_SOURCE[0]}")/preflight.sh" --no-ui || preflight_exit=$?
|
||||
if [[ $preflight_exit -eq 1 ]]; then
|
||||
print_error "Pre-flight checks failed with critical errors. Please resolve issues before proceeding."
|
||||
exit 1
|
||||
elif [[ $preflight_exit -eq 2 ]]; then
|
||||
print_warning "Pre-flight checks completed with warnings. Setup will proceed and install missing dependencies."
|
||||
fi
|
||||
|
||||
# Install system packages
|
||||
if ! run_with_progress "Installing system packages" "install_system_packages"; then
|
||||
print_error "Failed to install system packages. This is required for Docker installation."
|
||||
print_error "Please resolve the issue and re-run this script."
|
||||
print_error "Common solutions:"
|
||||
print_error " - Ensure you have sudo access: sudo -l"
|
||||
print_error " - Check internet connection: ping 8.8.8.8"
|
||||
print_error " - Update package lists: sudo apt update"
|
||||
print_error " - Install packages manually: sudo apt install -y ${SYSTEM_PACKAGES[*]}"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Configure system settings
|
||||
run_with_progress "Configuring system settings" "configure_system_settings"
|
||||
|
||||
# Create directories
|
||||
run_with_progress "Creating directories" "create_directories"
|
||||
|
||||
# Install Docker (unless skipped)
|
||||
if ! $skip_docker; then
|
||||
run_with_progress "Removing old Docker installations" "remove_old_docker"
|
||||
run_with_progress "Installing Docker" "install_docker_official"
|
||||
run_with_progress "Configuring Docker daemon" "configure_docker_daemon"
|
||||
run_with_progress "Starting Docker service" "start_docker_service"
|
||||
run_with_progress "Configuring user permissions" "configure_user_permissions"
|
||||
run_with_progress "Testing Docker installation" "test_docker_installation"
|
||||
else
|
||||
print_info "Skipping Docker installation (--skip-docker)"
|
||||
fi
|
||||
|
||||
# NVIDIA setup (if applicable and not skipped)
|
||||
if ! $skip_nvidia && check_nvidia_setup_needed; then
|
||||
run_with_progress "Installing NVIDIA drivers" "install_nvidia_drivers"
|
||||
fi
|
||||
|
||||
echo ""
|
||||
print_success "EZ-Homelab system setup complete!"
|
||||
|
||||
if ! $skip_docker && ! groups "$EZ_USER" | grep -q docker; then
|
||||
print_warning "IMPORTANT: Please reboot your system for Docker group changes to take effect."
|
||||
print_info "Alternatively, run: newgrp docker"
|
||||
print_info "Then re-run this script or proceed to the next step."
|
||||
else
|
||||
print_info "You can now proceed to the pre-deployment wizard:"
|
||||
print_info " ./pre-deployment-wizard.sh"
|
||||
fi
|
||||
|
||||
exit 0
|
||||
}
|
||||
|
||||
# Run main function
|
||||
main "$@"
|
||||
@@ -1,150 +0,0 @@
|
||||
# EZ-Homelab Enhanced Setup Scripts - Standards & Conventions
|
||||
|
||||
## Script Communication & Standards
|
||||
|
||||
### Exit Codes
|
||||
- **0**: Success - Script completed without issues
|
||||
- **1**: Error - Script failed, requires user intervention
|
||||
- **2**: Warning - Script completed but with non-critical issues
|
||||
- **3**: Skipped - Script skipped due to conditions (e.g., already installed)
|
||||
|
||||
### Logging
|
||||
- **Location**: `/var/log/ez-homelab/` (created by setup.sh)
|
||||
- **Format**: `YYYY-MM-DD HH:MM:SS [SCRIPT_NAME] LEVEL: MESSAGE`
|
||||
- **Levels**: INFO, WARN, ERROR, DEBUG
|
||||
- **Rotation**: Use logrotate with weekly rotation, keep 4 weeks
|
||||
|
||||
### Shared Variables (lib/common.sh)
|
||||
```bash
|
||||
# Repository and paths
|
||||
EZ_HOME="${EZ_HOME:-/home/kelin/EZ-Homelab}"
|
||||
STACKS_DIR="${STACKS_DIR:-/opt/stacks}"
|
||||
LOG_DIR="${LOG_DIR:-/var/log/ez-homelab}"
|
||||
|
||||
# User and system
|
||||
EZ_USER="${EZ_USER:-$USER}"
|
||||
EZ_UID="${EZ_UID:-$(id -u)}"
|
||||
EZ_GID="${EZ_GID:-$(id -g)}"
|
||||
|
||||
# Architecture detection
|
||||
ARCH="$(uname -m)"
|
||||
IS_ARM64=false
|
||||
[[ "$ARCH" == "aarch64" ]] && IS_ARM64=true
|
||||
|
||||
# Colors for output
|
||||
RED='\033[0;31m'
|
||||
GREEN='\033[0;32m'
|
||||
YELLOW='\033[1;33m'
|
||||
BLUE='\033[0;34m'
|
||||
NC='\033[0m' # No Color
|
||||
```
|
||||
|
||||
### Configuration Files
|
||||
- **Format**: Use YAML for complex configurations, .env for environment variables
|
||||
- **Location**: `scripts/enhanced-setup/config/` for script configs
|
||||
- **Validation**: All configs validated with `yq` (YAML) or `dotenv` (env)
|
||||
|
||||
### Function Naming
|
||||
- **Prefix**: Use script name (e.g., `preflight_check_disk()`)
|
||||
- **Style**: snake_case for functions, UPPER_CASE for constants
|
||||
- **Documentation**: All functions have header comments with purpose, parameters, return values
|
||||
|
||||
## UI/UX Design
|
||||
|
||||
### Dialog/Whiptail Theme
|
||||
- **Colors**: Blue headers (#0000FF), Green success (#00FF00), Red errors (#FF0000)
|
||||
- **Size**: Auto-size based on content, minimum 80x24
|
||||
- **Title**: "EZ-Homelab Setup - [Script Name]"
|
||||
- **Backtitle**: "EZ-Homelab Enhanced Setup Scripts v1.0"
|
||||
|
||||
### Menu Flow
|
||||
- **Navigation**: Tab/Arrow keys, Enter to select, Esc to cancel
|
||||
- **Progress**: Use `--gauge` for long operations with percentage
|
||||
- **Confirmation**: Always confirm destructive actions with "Are you sure? (y/N)"
|
||||
- **Help**: F1 key shows context help, `--help` flag for command-line usage
|
||||
|
||||
### User Prompts
|
||||
- **Style**: Clear, action-oriented (e.g., "Press Enter to continue" not "OK")
|
||||
- **Defaults**: Safe defaults (e.g., N for destructive actions)
|
||||
- **Validation**: Real-time input validation with error messages
|
||||
|
||||
## Error Handling & Recovery
|
||||
|
||||
### Error Types
|
||||
- **Critical**: Script cannot continue (exit 1)
|
||||
- **Warning**: Issue noted but script continues (exit 2)
|
||||
- **Recoverable**: User can fix and retry
|
||||
|
||||
### Recovery Mechanisms
|
||||
- **Backups**: Automatic backup of modified files (`.bak` extension)
|
||||
- **Rollback**: `--rollback` flag to undo last operation
|
||||
- **Resume**: Scripts detect partial completion and offer to resume
|
||||
- **Cleanup**: `--cleanup` flag removes temporary files and partial installs
|
||||
|
||||
### User Guidance
|
||||
- **Error Messages**: Include suggested fix (e.g., "Run 'sudo apt update' and retry")
|
||||
- **Logs**: Point to log file location for detailed errors
|
||||
- **Support**: Include link to documentation or issue tracker
|
||||
|
||||
## Testing & Validation
|
||||
|
||||
### Unit Testing
|
||||
- **Tool**: ShellCheck for syntax validation
|
||||
- **Coverage**: All scripts pass ShellCheck with no warnings
|
||||
- **Mocks**: Use `mktemp` and environment variables to mock external calls
|
||||
|
||||
### Integration Testing
|
||||
- **Environments**:
|
||||
- AMD64: Ubuntu 22.04 LTS VM
|
||||
- ARM64: Raspberry Pi OS (64-bit) on Pi 4
|
||||
- **Scenarios**: Clean install, partial install recovery, network failures
|
||||
- **Automation**: Use GitHub Actions for CI/CD with matrix testing
|
||||
|
||||
### Validation Checks
|
||||
- **Pre-run**: Scripts validate dependencies and environment
|
||||
- **Post-run**: Verify expected files, services, and configurations
|
||||
- **Cross-script**: Ensure scripts don't conflict (e.g., multiple network creations)
|
||||
|
||||
## Integration Points
|
||||
|
||||
### Existing EZ-Homelab Structure
|
||||
- **Repository**: Scripts read from `$EZ_HOME/docker-compose/` and `$EZ_HOME/.env`
|
||||
- **Runtime**: Deploy to `$STACKS_DIR/` matching current structure
|
||||
- **Services**: Leverage existing compose files without modification
|
||||
- **Secrets**: Use existing `.env` pattern, never commit secrets
|
||||
|
||||
### Service Dependencies
|
||||
- **Core First**: All scripts enforce core stack deployment before others
|
||||
- **Network Requirements**: Scripts create `traefik-network` and `homelab-network` as needed
|
||||
- **Port Conflicts**: Validate no conflicts before deployment
|
||||
- **Health Checks**: Use Docker health checks where available
|
||||
|
||||
### Version Compatibility
|
||||
- **Docker**: Support 20.10+ with Compose V2
|
||||
- **OS**: Debian 11+, Ubuntu 20.04+, Raspbian/Raspberry Pi OS
|
||||
- **Architecture**: AMD64 and ARM64 with PiWheels for Python packages
|
||||
|
||||
## Development Workflow
|
||||
|
||||
### Branching Strategy
|
||||
- **Main**: Production-ready code
|
||||
- **Develop**: Integration branch
|
||||
- **Feature**: `feature/script-name` for individual scripts
|
||||
- **Hotfix**: `hotfix/issue-description` for urgent fixes
|
||||
|
||||
### Code Reviews
|
||||
- **Required**: All PRs need review from at least one maintainer
|
||||
- **Checklist**: Standards compliance, testing, documentation
|
||||
- **Automation**: GitHub Actions for basic checks (ShellCheck, YAML validation)
|
||||
|
||||
### Documentation
|
||||
- **Inline**: All functions and complex logic documented
|
||||
- **README**: Each script has usage examples
|
||||
- **Updates**: PRD updated with implemented features
|
||||
- **Changelog**: Maintain `CHANGELOG.md` with version history
|
||||
|
||||
### Release Process
|
||||
- **Versioning**: Semantic versioning (MAJOR.MINOR.PATCH)
|
||||
- **Testing**: Full integration test before release
|
||||
- **Packaging**: Scripts distributed as part of EZ-Homelab repository
|
||||
- **Announcement**: Release notes with breaking changes highlighted
|
||||
@@ -1,600 +0,0 @@
|
||||
#!/bin/bash
|
||||
# EZ-Homelab Enhanced Setup Scripts - Update Management
|
||||
# Service update management with zero-downtime deployments
|
||||
|
||||
SCRIPT_NAME="update"
|
||||
SCRIPT_VERSION="1.0.0"
|
||||
|
||||
# Load common library
|
||||
source "$(dirname "${BASH_SOURCE[0]}")/lib/common.sh"
|
||||
source "$(dirname "${BASH_SOURCE[0]}")/lib/ui.sh"
|
||||
|
||||
# =============================================================================
|
||||
# UPDATE CONFIGURATION
|
||||
# =============================================================================
|
||||
|
||||
# Update settings
|
||||
UPDATE_CHECK_INTERVAL=86400 # 24 hours in seconds
|
||||
UPDATE_TIMEOUT=300 # 5 minutes timeout for updates
|
||||
ROLLBACK_TIMEOUT=180 # 3 minutes for rollback
|
||||
|
||||
# Update sources
|
||||
DOCKER_HUB_API="https://registry.hub.docker.com/v2"
|
||||
GITHUB_API="https://api.github.com"
|
||||
|
||||
# Update strategies
|
||||
UPDATE_STRATEGY_ROLLING="rolling" # Update one service at a time
|
||||
UPDATE_STRATEGY_BLUE_GREEN="blue-green" # Deploy new version alongside old
|
||||
UPDATE_STRATEGY_CANARY="canary" # Update subset of instances first
|
||||
|
||||
# Default update strategy
|
||||
DEFAULT_UPDATE_STRATEGY="$UPDATE_STRATEGY_ROLLING"
|
||||
|
||||
# =============================================================================
|
||||
# UPDATE STATE MANAGEMENT
|
||||
# =============================================================================
|
||||
|
||||
# Update state file
|
||||
UPDATE_STATE_FILE="$LOG_DIR/update_state.json"
|
||||
|
||||
# Initialize update state
|
||||
init_update_state() {
|
||||
if [[ ! -f "$UPDATE_STATE_FILE" ]]; then
|
||||
cat > "$UPDATE_STATE_FILE" << EOF
|
||||
{
|
||||
"last_check": 0,
|
||||
"updates_available": {},
|
||||
"update_history": [],
|
||||
"current_updates": {}
|
||||
}
|
||||
EOF
|
||||
fi
|
||||
}
|
||||
|
||||
# Record update attempt
|
||||
record_update_attempt() {
|
||||
local service="$1"
|
||||
local old_version="$2"
|
||||
local new_version="$3"
|
||||
local status="$4"
|
||||
local timestamp
|
||||
timestamp=$(date +%s)
|
||||
|
||||
if command_exists "jq"; then
|
||||
jq --arg service "$service" --arg old_version "$old_version" --arg new_version "$new_version" \
|
||||
--arg status "$status" --argjson timestamp "$timestamp" \
|
||||
'.update_history |= . + [{"service": $service, "old_version": $old_version, "new_version": $new_version, "status": $status, "timestamp": $timestamp}]' \
|
||||
"$UPDATE_STATE_FILE" > "${UPDATE_STATE_FILE}.tmp" && mv "${UPDATE_STATE_FILE}.tmp" "$UPDATE_STATE_FILE"
|
||||
fi
|
||||
}
|
||||
|
||||
# Get service update status
|
||||
get_service_update_status() {
|
||||
local service="$1"
|
||||
|
||||
if command_exists "jq" && [[ -f "$UPDATE_STATE_FILE" ]]; then
|
||||
jq -r ".current_updates[\"$service\"] // \"idle\"" "$UPDATE_STATE_FILE"
|
||||
else
|
||||
echo "unknown"
|
||||
fi
|
||||
}
|
||||
|
||||
# Set service update status
|
||||
set_service_update_status() {
|
||||
local service="$1"
|
||||
local status="$2"
|
||||
|
||||
if command_exists "jq"; then
|
||||
jq --arg service "$service" --arg status "$status" \
|
||||
'.current_updates[$service] = $status' \
|
||||
"$UPDATE_STATE_FILE" > "${UPDATE_STATE_FILE}.tmp" && mv "${UPDATE_STATE_FILE}.tmp" "$UPDATE_STATE_FILE"
|
||||
fi
|
||||
}
|
||||
|
||||
# =============================================================================
|
||||
# VERSION CHECKING FUNCTIONS
|
||||
# =============================================================================
|
||||
|
||||
# Get current service version
|
||||
get_current_version() {
|
||||
local service="$1"
|
||||
|
||||
if ! is_service_running "$service"; then
|
||||
echo "unknown"
|
||||
return 1
|
||||
fi
|
||||
|
||||
# Get image from running container
|
||||
local image
|
||||
image=$(docker inspect "$service" --format '{{.Config.Image}}' 2>/dev/null || echo "")
|
||||
|
||||
if [[ -z "$image" ]]; then
|
||||
echo "unknown"
|
||||
return 1
|
||||
fi
|
||||
|
||||
# Extract version tag
|
||||
if [[ "$image" == *":"* ]]; then
|
||||
echo "$image" | cut -d: -f2
|
||||
else
|
||||
echo "latest"
|
||||
fi
|
||||
}
|
||||
|
||||
# Check for Docker image updates
|
||||
check_docker_updates() {
|
||||
local service="$1"
|
||||
|
||||
if ! is_service_running "$service"; then
|
||||
return 1
|
||||
fi
|
||||
|
||||
local current_image
|
||||
current_image=$(docker inspect "$service" --format '{{.Config.Image}}' 2>/dev/null || echo "")
|
||||
|
||||
if [[ -z "$current_image" ]]; then
|
||||
return 1
|
||||
fi
|
||||
|
||||
# Extract repository and tag
|
||||
local repo tag
|
||||
if [[ "$current_image" == *":"* ]]; then
|
||||
repo=$(echo "$current_image" | cut -d: -f1)
|
||||
tag=$(echo "$current_image" | cut -d: -f2)
|
||||
else
|
||||
repo="$current_image"
|
||||
tag="latest"
|
||||
fi
|
||||
|
||||
print_info "Checking updates for $service ($repo:$tag)"
|
||||
|
||||
# Pull latest image to check for updates
|
||||
if docker pull "$repo:latest" >/dev/null 2>&1; then
|
||||
# Compare image IDs
|
||||
local current_id latest_id
|
||||
current_id=$(docker inspect "$repo:$tag" --format '{{.Id}}' 2>/dev/null || echo "")
|
||||
latest_id=$(docker inspect "$repo:latest" --format '{{.Id}}' 2>/dev/null || echo "")
|
||||
|
||||
if [[ "$current_id" != "$latest_id" ]]; then
|
||||
print_info "Update available for $service: $tag -> latest"
|
||||
return 0
|
||||
else
|
||||
print_info "Service $service is up to date"
|
||||
return 1
|
||||
fi
|
||||
else
|
||||
print_warning "Failed to check updates for $service"
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
# Check all services for updates
|
||||
check_all_updates() {
|
||||
print_info "Checking for service updates"
|
||||
|
||||
local services
|
||||
mapfile -t services < <(find_all_services)
|
||||
local updates_available=()
|
||||
|
||||
for service in "${services[@]}"; do
|
||||
if check_docker_updates "$service"; then
|
||||
updates_available+=("$service")
|
||||
fi
|
||||
done
|
||||
|
||||
if [[ ${#updates_available[@]} -gt 0 ]]; then
|
||||
print_info "Updates available for: ${updates_available[*]}"
|
||||
return 0
|
||||
else
|
||||
print_info "All services are up to date"
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
# =============================================================================
|
||||
# UPDATE EXECUTION FUNCTIONS
|
||||
# =============================================================================
|
||||
|
||||
# Update single service with rolling strategy
|
||||
update_service_rolling() {
|
||||
local service="$1"
|
||||
local new_image="$2"
|
||||
|
||||
print_info "Updating service $service with rolling strategy"
|
||||
set_service_update_status "$service" "updating"
|
||||
|
||||
local old_version
|
||||
old_version=$(get_current_version "$service")
|
||||
|
||||
# Get compose file
|
||||
local compose_file
|
||||
compose_file=$(get_service_compose_file "$service")
|
||||
|
||||
if [[ -z "$compose_file" ]]; then
|
||||
print_error "Cannot find compose file for service $service"
|
||||
set_service_update_status "$service" "failed"
|
||||
return 1
|
||||
fi
|
||||
|
||||
local compose_dir=$(dirname "$compose_file")
|
||||
local compose_base=$(basename "$compose_file")
|
||||
|
||||
# Backup current configuration
|
||||
print_info "Creating backup before update"
|
||||
"$SCRIPT_DIR/backup.sh" config --quiet
|
||||
|
||||
# Update the service
|
||||
print_info "Pulling new image: $new_image"
|
||||
if ! docker pull "$new_image"; then
|
||||
print_error "Failed to pull new image: $new_image"
|
||||
set_service_update_status "$service" "failed"
|
||||
return 1
|
||||
fi
|
||||
|
||||
print_info "Restarting service with new image"
|
||||
if (cd "$compose_dir" && docker compose -f "$compose_base" up -d "$service"); then
|
||||
# Wait for service to start
|
||||
local count=0
|
||||
while (( count < UPDATE_TIMEOUT )) && ! is_service_running "$service"; do
|
||||
sleep 5
|
||||
((count += 5))
|
||||
done
|
||||
|
||||
if is_service_running "$service"; then
|
||||
# Verify service health
|
||||
sleep 10
|
||||
if check_service_health "$service"; then
|
||||
local new_version
|
||||
new_version=$(get_current_version "$service")
|
||||
print_success "Service $service updated successfully: $old_version -> $new_version"
|
||||
record_update_attempt "$service" "$old_version" "$new_version" "success"
|
||||
set_service_update_status "$service" "completed"
|
||||
return 0
|
||||
else
|
||||
print_error "Service $service failed health check after update"
|
||||
rollback_service "$service"
|
||||
return 1
|
||||
fi
|
||||
else
|
||||
print_error "Service $service failed to start after update"
|
||||
rollback_service "$service"
|
||||
return 1
|
||||
fi
|
||||
else
|
||||
print_error "Failed to update service $service"
|
||||
set_service_update_status "$service" "failed"
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
# Rollback service to previous version
|
||||
rollback_service() {
|
||||
local service="$1"
|
||||
|
||||
print_warning "Rolling back service $service"
|
||||
set_service_update_status "$service" "rolling_back"
|
||||
|
||||
# For now, just restart with current configuration
|
||||
# In a more advanced implementation, this would restore from backup
|
||||
local compose_file
|
||||
compose_file=$(get_service_compose_file "$service")
|
||||
|
||||
if [[ -n "$compose_file" ]]; then
|
||||
local compose_dir=$(dirname "$compose_file")
|
||||
local compose_base=$(basename "$compose_file")
|
||||
|
||||
if (cd "$compose_dir" && docker compose -f "$compose_base" restart "$service"); then
|
||||
sleep 10
|
||||
if check_service_health "$service"; then
|
||||
print_success "Service $service rolled back successfully"
|
||||
set_service_update_status "$service" "rolled_back"
|
||||
return 0
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
|
||||
print_error "Failed to rollback service $service"
|
||||
set_service_update_status "$service" "rollback_failed"
|
||||
return 1
|
||||
}
|
||||
|
||||
# Update all services
|
||||
update_all_services() {
|
||||
local strategy="${1:-$DEFAULT_UPDATE_STRATEGY}"
|
||||
|
||||
print_info "Updating all services with $strategy strategy"
|
||||
|
||||
local services
|
||||
mapfile -t services < <(find_all_services)
|
||||
local updated=0
|
||||
local failed=0
|
||||
|
||||
for service in "${services[@]}"; do
|
||||
if check_docker_updates "$service"; then
|
||||
print_info "Updating service: $service"
|
||||
|
||||
# Get latest image
|
||||
local current_image
|
||||
current_image=$(docker inspect "$service" --format '{{.Config.Image}}' 2>/dev/null || echo "")
|
||||
|
||||
if [[ -n "$current_image" ]]; then
|
||||
local repo
|
||||
repo=$(echo "$current_image" | cut -d: -f1)
|
||||
local new_image="$repo:latest"
|
||||
|
||||
if update_service_rolling "$service" "$new_image"; then
|
||||
((updated++))
|
||||
else
|
||||
((failed++))
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
done
|
||||
|
||||
print_info "Update summary: $updated updated, $failed failed"
|
||||
|
||||
if (( failed > 0 )); then
|
||||
return 1
|
||||
else
|
||||
return 0
|
||||
fi
|
||||
}
|
||||
|
||||
# =============================================================================
|
||||
# UPDATE MONITORING FUNCTIONS
|
||||
# =============================================================================
|
||||
|
||||
# Show update status
|
||||
show_update_status() {
|
||||
print_info "Update Status"
|
||||
echo
|
||||
|
||||
local services
|
||||
mapfile -t services < <(find_all_services)
|
||||
|
||||
echo "Service Update Status:"
|
||||
echo "----------------------------------------"
|
||||
|
||||
for service in "${services[@]}"; do
|
||||
local status
|
||||
status=$(get_service_update_status "$service")
|
||||
local version
|
||||
version=$(get_current_version "$service")
|
||||
|
||||
printf " %-20s %-12s %s\n" "$service" "$status" "$version"
|
||||
done
|
||||
echo
|
||||
|
||||
# Show recent update history
|
||||
if command_exists "jq" && [[ -f "$UPDATE_STATE_FILE" ]]; then
|
||||
echo "Recent Update History:"
|
||||
echo "----------------------------------------"
|
||||
jq -r '.update_history | reverse | .[0:5][] | "\(.timestamp | strftime("%Y-%m-%d %H:%M")) \(.service) \(.old_version)->\(.new_version) [\(.status)]"' "$UPDATE_STATE_FILE" 2>/dev/null || echo "No update history available"
|
||||
fi
|
||||
}
|
||||
|
||||
# Monitor ongoing updates
|
||||
monitor_updates() {
|
||||
print_info "Monitoring ongoing updates (Ctrl+C to stop)"
|
||||
|
||||
while true; do
|
||||
clear
|
||||
show_update_status
|
||||
echo
|
||||
echo "Press Ctrl+C to stop monitoring"
|
||||
sleep 10
|
||||
done
|
||||
}
|
||||
|
||||
# =============================================================================
|
||||
# AUTOMATED UPDATE FUNCTIONS
|
||||
# =============================================================================
|
||||
|
||||
# Setup automated updates
|
||||
setup_automated_updates() {
|
||||
local schedule="${1:-0 3 * * 0}" # Weekly on Sunday at 3 AM
|
||||
|
||||
print_info "Setting up automated updates with schedule: $schedule"
|
||||
|
||||
# Create update script
|
||||
local update_script="$HOME/.ez-homelab/update.sh"
|
||||
cat > "$update_script" << EOF
|
||||
#!/bin/bash
|
||||
# Automated update script for EZ-Homelab
|
||||
|
||||
SCRIPT_DIR="$SCRIPT_DIR"
|
||||
|
||||
# Run updates
|
||||
"\$SCRIPT_DIR/update.sh" all --quiet
|
||||
|
||||
# Log completion
|
||||
echo "\$(date): Automated update completed" >> "$LOG_DIR/update.log"
|
||||
EOF
|
||||
|
||||
chmod +x "$update_script"
|
||||
|
||||
# Add to crontab
|
||||
local cron_entry="$schedule $update_script"
|
||||
if ! crontab -l 2>/dev/null | grep -q "update.sh"; then
|
||||
(crontab -l 2>/dev/null; echo "$cron_entry") | crontab -
|
||||
print_info "Added automated updates to crontab: $cron_entry"
|
||||
fi
|
||||
|
||||
print_success "Automated updates configured"
|
||||
}
|
||||
|
||||
# Remove automated updates
|
||||
remove_automated_updates() {
|
||||
print_info "Removing automated updates"
|
||||
|
||||
# Remove from crontab
|
||||
crontab -l 2>/dev/null | grep -v "update.sh" | crontab -
|
||||
|
||||
print_success "Automated updates removed"
|
||||
}
|
||||
|
||||
# =============================================================================
|
||||
# MAIN FUNCTION
|
||||
# =============================================================================
|
||||
|
||||
main() {
|
||||
local action=""
|
||||
local service=""
|
||||
local strategy="$DEFAULT_UPDATE_STRATEGY"
|
||||
local schedule=""
|
||||
local quiet=false
|
||||
|
||||
# Parse command line arguments
|
||||
while [[ $# -gt 0 ]]; do
|
||||
case $1 in
|
||||
-h|--help)
|
||||
cat << EOF
|
||||
EZ-Homelab Update Management
|
||||
|
||||
USAGE:
|
||||
update [OPTIONS] <ACTION> [SERVICE]
|
||||
|
||||
ACTIONS:
|
||||
check Check for available updates
|
||||
update Update a service or all services
|
||||
status Show update status and history
|
||||
monitor Monitor ongoing updates
|
||||
rollback Rollback a service
|
||||
schedule Setup automated updates
|
||||
unschedule Remove automated updates
|
||||
|
||||
OPTIONS:
|
||||
-s, --strategy STRATEGY Update strategy (rolling, blue-green, canary)
|
||||
--schedule CRON Cron schedule for automated updates
|
||||
-q, --quiet Suppress non-error output
|
||||
|
||||
STRATEGIES:
|
||||
rolling Update one service at a time (default)
|
||||
blue-green Deploy new version alongside old
|
||||
canary Update subset of instances first
|
||||
|
||||
EXAMPLES:
|
||||
update check # Check for updates
|
||||
update update traefik # Update Traefik service
|
||||
update update all # Update all services
|
||||
update status # Show update status
|
||||
update rollback traefik # Rollback Traefik
|
||||
update monitor # Monitor updates
|
||||
update schedule "0 3 * * 0" # Weekly updates Sunday 3 AM
|
||||
|
||||
EOF
|
||||
exit 0
|
||||
;;
|
||||
-s|--strategy)
|
||||
strategy="$2"
|
||||
shift 2
|
||||
;;
|
||||
--schedule)
|
||||
schedule="$2"
|
||||
shift 2
|
||||
;;
|
||||
-q|--quiet)
|
||||
quiet=true
|
||||
shift
|
||||
;;
|
||||
check|update|status|monitor|rollback|schedule|unschedule)
|
||||
action="$1"
|
||||
shift
|
||||
break
|
||||
;;
|
||||
*)
|
||||
if [[ -z "$service" ]]; then
|
||||
service="$1"
|
||||
else
|
||||
print_error "Too many arguments"
|
||||
exit 1
|
||||
fi
|
||||
shift
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
# Handle remaining arguments
|
||||
while [[ $# -gt 0 ]]; do
|
||||
if [[ -z "$service" ]]; then
|
||||
service="$1"
|
||||
else
|
||||
print_error "Too many arguments"
|
||||
exit 1
|
||||
fi
|
||||
shift
|
||||
done
|
||||
|
||||
# Initialize script
|
||||
init_script "$SCRIPT_NAME" "$SCRIPT_VERSION"
|
||||
init_logging "$SCRIPT_NAME"
|
||||
init_update_state
|
||||
|
||||
# Check prerequisites
|
||||
if ! docker_available; then
|
||||
print_error "Docker is not available"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Execute action
|
||||
case "$action" in
|
||||
check)
|
||||
if [[ -n "$service" ]]; then
|
||||
check_docker_updates "$service"
|
||||
else
|
||||
check_all_updates
|
||||
fi
|
||||
;;
|
||||
update)
|
||||
if [[ "$service" == "all" || -z "$service" ]]; then
|
||||
update_all_services "$strategy"
|
||||
else
|
||||
# Get latest image for the service
|
||||
local current_image
|
||||
current_image=$(docker inspect "$service" --format '{{.Config.Image}}' 2>/dev/null || echo "")
|
||||
|
||||
if [[ -n "$current_image" ]]; then
|
||||
local repo
|
||||
repo=$(echo "$current_image" | cut -d: -f1)
|
||||
local new_image="$repo:latest"
|
||||
|
||||
update_service_rolling "$service" "$new_image"
|
||||
else
|
||||
print_error "Cannot determine current image for service $service"
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
;;
|
||||
status)
|
||||
show_update_status
|
||||
;;
|
||||
monitor)
|
||||
monitor_updates
|
||||
;;
|
||||
rollback)
|
||||
if [[ -n "$service" ]]; then
|
||||
rollback_service "$service"
|
||||
else
|
||||
print_error "Service name required for rollback"
|
||||
exit 1
|
||||
fi
|
||||
;;
|
||||
schedule)
|
||||
setup_automated_updates "$schedule"
|
||||
;;
|
||||
unschedule)
|
||||
remove_automated_updates
|
||||
;;
|
||||
"")
|
||||
print_error "No action specified. Use --help for usage information."
|
||||
exit 1
|
||||
;;
|
||||
*)
|
||||
print_error "Unknown action: $action"
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
}
|
||||
|
||||
# Run main function
|
||||
main "$@"
|
||||
@@ -1,372 +0,0 @@
|
||||
#!/bin/bash
|
||||
# EZ-Homelab Enhanced Setup Scripts - Multi-Purpose Validation
|
||||
# Validate configurations, compose files, and deployment readiness
|
||||
|
||||
SCRIPT_NAME="validate"
|
||||
SCRIPT_VERSION="1.0.0"
|
||||
|
||||
# Load common library
|
||||
source "$(dirname "${BASH_SOURCE[0]}")/lib/common.sh"
|
||||
source "$(dirname "${BASH_SOURCE[0]}")/lib/ui.sh"
|
||||
|
||||
# =============================================================================
|
||||
# VALIDATION FUNCTIONS
|
||||
# =============================================================================
|
||||
|
||||
# Validate .env file
|
||||
validate_env_file() {
|
||||
local env_file="$EZ_HOME/.env"
|
||||
|
||||
if [[ ! -f "$env_file" ]]; then
|
||||
print_error ".env file not found at $env_file"
|
||||
return 1
|
||||
fi
|
||||
|
||||
return 0
|
||||
}
|
||||
|
||||
# Validate Docker Compose files
|
||||
validate_compose_files() {
|
||||
local service="${1:-}"
|
||||
|
||||
print_info "Validating Docker Compose files..."
|
||||
|
||||
local compose_files
|
||||
if [[ -n "$service" ]]; then
|
||||
compose_files=("$EZ_HOME/docker-compose/$service/docker-compose.yml")
|
||||
else
|
||||
mapfile -t compose_files < <(find "$EZ_HOME/docker-compose" -name "docker-compose.yml" -type f 2>/dev/null)
|
||||
fi
|
||||
|
||||
if [[ ${#compose_files[@]} -eq 0 ]]; then
|
||||
print_error "No Docker Compose files found"
|
||||
return 1
|
||||
fi
|
||||
|
||||
local errors=0
|
||||
for file in "${compose_files[@]}"; do
|
||||
if [[ ! -f "$file" ]]; then
|
||||
print_error "Compose file not found: $file"
|
||||
errors=$((errors + 1))
|
||||
continue
|
||||
fi
|
||||
|
||||
# Validate YAML syntax
|
||||
if ! validate_yaml "$file"; then
|
||||
print_error "Invalid YAML in $file"
|
||||
errors=$((errors + 1))
|
||||
continue
|
||||
fi
|
||||
|
||||
# Validate with docker compose config
|
||||
if command_exists docker && docker compose version >/dev/null 2>&1; then
|
||||
if ! docker compose -f "$file" config >/dev/null 2>&1; then
|
||||
print_error "Invalid Docker Compose configuration in $file"
|
||||
errors=$((errors + 1))
|
||||
continue
|
||||
fi
|
||||
fi
|
||||
|
||||
print_success "Validated: $file"
|
||||
done
|
||||
|
||||
if [[ $errors -gt 0 ]]; then
|
||||
print_error "Found $errors error(s) in compose files"
|
||||
return 1
|
||||
fi
|
||||
|
||||
print_success "All compose files validated"
|
||||
return 0
|
||||
}
|
||||
|
||||
# Validate Docker networks
|
||||
validate_networks() {
|
||||
print_info "Validating Docker networks..."
|
||||
|
||||
if ! docker_available; then
|
||||
print_warning "Docker not available, skipping network validation"
|
||||
return 2
|
||||
fi
|
||||
|
||||
local required_networks=("traefik-network" "homelab-network")
|
||||
local missing_networks=()
|
||||
|
||||
for network in "${required_networks[@]}"; do
|
||||
if ! docker network ls --format "{{.Name}}" | grep -q "^${network}$"; then
|
||||
missing_networks+=("$network")
|
||||
fi
|
||||
done
|
||||
|
||||
if [[ ${#missing_networks[@]} -gt 0 ]]; then
|
||||
print_error "Missing Docker networks: ${missing_networks[*]}"
|
||||
print_error "Run ./pre-deployment-wizard.sh to create networks"
|
||||
return 1
|
||||
fi
|
||||
|
||||
print_success "All required networks exist"
|
||||
return 0
|
||||
}
|
||||
|
||||
# Validate SSL certificates
|
||||
validate_ssl_certificates() {
|
||||
print_info "Validating SSL certificates..."
|
||||
|
||||
# Check if Traefik is running and has certificates
|
||||
if ! docker_available; then
|
||||
print_warning "Docker not available, skipping SSL validation"
|
||||
return 2
|
||||
fi
|
||||
|
||||
if ! service_running traefik 2>/dev/null; then
|
||||
print_warning "Traefik not running, skipping SSL validation"
|
||||
return 2
|
||||
fi
|
||||
|
||||
# Check acme.json exists
|
||||
local acme_file="$STACKS_DIR/core/traefik/acme.json"
|
||||
if [[ ! -f "$acme_file" ]]; then
|
||||
print_warning "SSL certificate file not found: $acme_file"
|
||||
print_warning "Certificates will be obtained on first Traefik run"
|
||||
return 2
|
||||
fi
|
||||
|
||||
print_success "SSL certificate file found"
|
||||
return 0
|
||||
}
|
||||
|
||||
# Validate service dependencies
|
||||
validate_service_dependencies() {
|
||||
local service="${1:-}"
|
||||
|
||||
print_info "Validating service dependencies..."
|
||||
|
||||
# This is a basic implementation - could be expanded
|
||||
# to check for specific service requirements
|
||||
|
||||
if [[ -n "$service" ]]; then
|
||||
local service_dir="$EZ_HOME/docker-compose/$service"
|
||||
if [[ ! -d "$service_dir" ]]; then
|
||||
print_error "Service directory not found: $service_dir"
|
||||
return 1
|
||||
fi
|
||||
|
||||
local compose_file="$service_dir/docker-compose.yml"
|
||||
if [[ ! -f "$compose_file" ]]; then
|
||||
print_error "Compose file not found: $compose_file"
|
||||
return 1
|
||||
fi
|
||||
|
||||
print_success "Service $service dependencies validated"
|
||||
else
|
||||
print_success "Service dependencies validation skipped (no specific service)"
|
||||
fi
|
||||
|
||||
return 0
|
||||
}
|
||||
|
||||
# =============================================================================
|
||||
# REPORT GENERATION
|
||||
# =============================================================================
|
||||
|
||||
# Generate validation report
|
||||
generate_validation_report() {
|
||||
local report_file="$LOG_DIR/validation-report-$(date +%Y%m%d-%H%M%S).txt"
|
||||
|
||||
{
|
||||
echo "EZ-Homelab Validation Report"
|
||||
echo "============================"
|
||||
echo "Date: $(date)"
|
||||
echo "System: $OS_NAME $OS_VERSION ($ARCH)"
|
||||
echo ""
|
||||
echo "Validation Results:"
|
||||
echo "- Environment: $(validate_env_file >/dev/null 2>&1 && echo "PASS" || echo "FAIL")"
|
||||
echo "- Compose Files: $(validate_compose_files >/dev/null 2>&1 && echo "PASS" || echo "FAIL")"
|
||||
echo "- Networks: $(validate_networks >/dev/null 2>&1; case $? in 0) echo "PASS";; 1) echo "FAIL";; 2) echo "SKIP";; esac)"
|
||||
echo "- SSL Certificates: $(validate_ssl_certificates >/dev/null 2>&1; case $? in 0) echo "PASS";; 1) echo "FAIL";; 2) echo "SKIP";; esac)"
|
||||
echo ""
|
||||
echo "Log file: $LOG_FILE"
|
||||
} > "$report_file"
|
||||
|
||||
print_info "Report saved to: $report_file"
|
||||
}
|
||||
|
||||
# =============================================================================
|
||||
# MAIN FUNCTION
|
||||
# =============================================================================
|
||||
|
||||
main() {
|
||||
local service=""
|
||||
local check_env=true
|
||||
local check_compose=true
|
||||
local check_networks=true
|
||||
local check_ssl=true
|
||||
local non_interactive=false
|
||||
local verbose=false
|
||||
|
||||
# Parse command line arguments
|
||||
while [[ $# -gt 0 ]]; do
|
||||
case $1 in
|
||||
-h|--help)
|
||||
cat << EOF
|
||||
EZ-Homelab Multi-Purpose Validation
|
||||
|
||||
USAGE:
|
||||
$SCRIPT_NAME [OPTIONS] [SERVICE]
|
||||
|
||||
ARGUMENTS:
|
||||
SERVICE Specific service to validate (optional)
|
||||
|
||||
OPTIONS:
|
||||
-h, --help Show this help message
|
||||
-v, --verbose Enable verbose logging
|
||||
--no-env Skip .env file validation
|
||||
--no-compose Skip compose file validation
|
||||
--no-networks Skip network validation
|
||||
--no-ssl Skip SSL certificate validation
|
||||
--no-ui Run without interactive UI
|
||||
|
||||
EXAMPLES:
|
||||
$SCRIPT_NAME # Validate everything
|
||||
$SCRIPT_NAME traefik # Validate only Traefik
|
||||
$SCRIPT_NAME --no-ssl # Skip SSL validation
|
||||
|
||||
EOF
|
||||
exit 0
|
||||
;;
|
||||
-v|--verbose)
|
||||
verbose=true
|
||||
;;
|
||||
--no-env)
|
||||
check_env=false
|
||||
;;
|
||||
--no-compose)
|
||||
check_compose=false
|
||||
;;
|
||||
--no-networks)
|
||||
check_networks=false
|
||||
;;
|
||||
--no-ssl)
|
||||
check_ssl=false
|
||||
;;
|
||||
--no-ui)
|
||||
non_interactive=true
|
||||
;;
|
||||
-*)
|
||||
print_error "Unknown option: $1"
|
||||
echo "Use --help for usage information"
|
||||
exit 1
|
||||
;;
|
||||
*)
|
||||
if [[ -z "$service" ]]; then
|
||||
service="$1"
|
||||
else
|
||||
print_error "Multiple services specified. Use only one service name."
|
||||
exit 1
|
||||
fi
|
||||
;;
|
||||
esac
|
||||
shift
|
||||
done
|
||||
|
||||
# Initialize script
|
||||
init_script "$SCRIPT_NAME"
|
||||
|
||||
if $verbose; then
|
||||
set -x
|
||||
fi
|
||||
|
||||
print_info "Starting EZ-Homelab validation..."
|
||||
|
||||
local total_checks=0
|
||||
local passed=0
|
||||
local warnings=0
|
||||
local failed=0
|
||||
|
||||
# Run validations
|
||||
if $check_env; then
|
||||
total_checks=$((total_checks + 1))
|
||||
# Run check and capture exit code
|
||||
local exit_code=0
|
||||
validate_env_file || exit_code=$?
|
||||
|
||||
if [[ $exit_code -eq 0 ]]; then
|
||||
passed=$((passed + 1))
|
||||
else
|
||||
failed=$((failed + 1))
|
||||
fi
|
||||
fi
|
||||
|
||||
if $check_compose; then
|
||||
((total_checks++))
|
||||
# Run check and capture exit code
|
||||
local exit_code=0
|
||||
validate_compose_files "$service" || exit_code=$?
|
||||
|
||||
if [[ $exit_code -eq 0 ]]; then
|
||||
((passed++))
|
||||
else
|
||||
((failed++))
|
||||
fi
|
||||
fi
|
||||
|
||||
if $check_networks; then
|
||||
total_checks=$((total_checks + 1))
|
||||
# Run check and capture exit code
|
||||
local exit_code=0
|
||||
validate_networks || exit_code=$?
|
||||
|
||||
case $exit_code in
|
||||
0) passed=$((passed + 1)) ;;
|
||||
1) failed=$((failed + 1)) ;;
|
||||
2) warnings=$((warnings + 1)) ;;
|
||||
esac
|
||||
fi
|
||||
|
||||
if $check_ssl; then
|
||||
total_checks=$((total_checks + 1))
|
||||
# Run check and capture exit code
|
||||
local exit_code=0
|
||||
validate_ssl_certificates || exit_code=$?
|
||||
|
||||
case $exit_code in
|
||||
0) passed=$((passed + 1)) ;;
|
||||
1) failed=$((failed + 1)) ;;
|
||||
2) warnings=$((warnings + 1)) ;;
|
||||
esac
|
||||
fi
|
||||
|
||||
# Service-specific validation
|
||||
if [[ -n "$service" ]]; then
|
||||
total_checks=$((total_checks + 1))
|
||||
# Run check and capture exit code
|
||||
local exit_code=0
|
||||
validate_service_dependencies "$service" || exit_code=$?
|
||||
|
||||
if [[ $exit_code -eq 0 ]]; then
|
||||
passed=$((passed + 1))
|
||||
else
|
||||
failed=$((failed + 1))
|
||||
fi
|
||||
fi
|
||||
|
||||
echo ""
|
||||
print_info "Validation complete: $passed passed, $warnings warnings, $failed failed"
|
||||
|
||||
# Generate report
|
||||
generate_validation_report
|
||||
|
||||
# Determine exit code
|
||||
if [[ $failed -gt 0 ]]; then
|
||||
print_error "Validation failed. Check the log file: $LOG_FILE"
|
||||
exit 1
|
||||
elif [[ $warnings -gt 0 ]]; then
|
||||
print_warning "Validation passed with warnings"
|
||||
exit 2
|
||||
else
|
||||
print_success "All validations passed!"
|
||||
exit 0
|
||||
fi
|
||||
}
|
||||
|
||||
# Run main function
|
||||
main "$@"
|
||||
File diff suppressed because it is too large
Load Diff
Reference in New Issue
Block a user