--- # deploy.yml — One-command host rebuild # # Rebuilds a host from bare metal to fully configured using repo state. # Assumes: SSH access via Tailscale, root user, host is in inventory. # # Usage: # Full fleet: ansible-playbook deploy.yml # Single host: ansible-playbook deploy.yml --limit helsinki-a # Dry run: ansible-playbook deploy.yml --check --diff # # Prerequisites: # - Target host has SSH access via Tailscale # - Target host has a base OS installed (Debian/FreeBSD) # - ansible-galaxy install -r requirements.yml # ────────────────────────────────────────────── # Stage 1: Common baseline — all hosts # ────────────────────────────────────────────── - name: "Stage 1: Common baseline" hosts: all tags: [common, baseline] roles: - role: common - role: dotfiles # ────────────────────────────────────────────── # Stage 2: Docker engine — hosts that run containers # ────────────────────────────────────────────── - name: "Stage 2: Docker engine" hosts: docker_hosts tags: [docker] roles: - role: docker # ────────────────────────────────────────────── # Stage 4: Per-host services # ────────────────────────────────────────────── # helsinki-a: Caddy reverse proxy + status page - name: "Stage 4a: Caddy + status page (helsinki-a)" hosts: helsinki-a tags: [services, caddy, status_page] roles: - role: caddy - role: status_page - role: systemd_services # All docker hosts: deploy compose services - name: "Stage 4b: Docker services" hosts: docker_hosts tags: [services, docker_services] roles: - role: docker_services # nuremberg-a: Mail server - name: "Stage 4c: Mail server (nuremberg-a)" hosts: nuremberg-a tags: [services, mail] # london-b: Media stack + backups - name: "Stage 4d: Media stack + backups (london-b)" hosts: london-b tags: [services, london-b] roles: - role: media_stack - role: backup # copenhagen-a: Gaming servers - name: "Stage 4e: Gaming servers (copenhagen-a)" hosts: copenhagen-a tags: [services, gaming] roles: - role: systemd_services - role: mariadb # london-a: Cockpit VM host (Debian) - name: "Stage 4f: Cockpit VM host (london-a)" hosts: london-a tags: [services, cockpit] tasks: - name: Install cockpit and cockpit-machines ansible.builtin.apt: name: - cockpit - cockpit-machines state: present update_cache: true - name: Enable and start cockpit ansible.builtin.service: name: cockpit state: started enabled: true # ────────────────────────────────────────────── # Stage 4g: ZFS scrub scheduling — zfs_hosts # ────────────────────────────────────────────── - name: "Stage 4g: ZFS scrub scheduling" hosts: zfs_hosts tags: [services, zfs] roles: - role: zfs # ────────────────────────────────────────────── # Stage 5: Verification # ────────────────────────────────────────────── - name: "Stage 5: Post-deploy verification" hosts: all tags: [verify] tasks: - name: Check SSH is working ansible.builtin.ping: - name: Gather uptime ansible.builtin.command: uptime changed_when: false register: uptime_result - name: Check Docker containers (where applicable) ansible.builtin.command: docker ps --format "table {{ '{{' }}.Names{{ '}}' }}\t{{ '{{' }}.Status{{ '}}' }}" changed_when: false register: docker_status when: "'docker_hosts' in group_names" failed_when: false - name: Report host status ansible.builtin.debug: msg: | Host: {{ inventory_hostname }} ({{ host_description | default('no description') }}) Uptime: {{ uptime_result.stdout }} Docker: {{ docker_status.stdout_lines | default(['N/A']) | join('\n') }}