1.0.0 Release IaaS

This commit is contained in:
2026-03-15 04:41:02 +09:00
commit a7365da431
292 changed files with 36059 additions and 0 deletions

7
.gitignore vendored Normal file
View File

@@ -0,0 +1,7 @@
data/bin/*
data/volumes/*
data/images/*
docs/archives/textfiles/
docs/notes/*
*.sql
!.gitkeep

26
README.md Normal file
View File

@@ -0,0 +1,26 @@
# ilnmors homelab README
This homelab project implements single-node On-premise IaaS system. The homelab contains virtual machines which are divided by their roles, such as private firewall, DNS, PKI, LDAP and database, SSO\(OIDC\). The standard domain is used to implement this system without specific vendors. All components are defined as code and initiated by IaC \(Ansible\) except hypervisor initial configuration.
## RTO times
- Feb/25/2026 - Reprovisioning Hypervisor and vms
- RTO: 1 hour 30 min - verified
- Manual install and set vmm: 20 min
- Create and reprovision fw including services: 15 min
- Create and reprovision infra including services: 20 min
- Create and reprovision auth including services: 10 min
- Create and reprovision app except services: 10 min
- Intermediate tasks (ACME issuance, DNS propagation, etc.): 15 min
- Mar/5/2026 - Reprovisioning Hardware and Hypervisor and vms
- RTO: 2 hour 20 min
- console: 15min - verified
- certificate: 0 min \(When it needs to be created, RTO will be 20 min) - not verified
- wireguard: 0 min \(When it needs to be created, RTO will be 1 min) - not verified
- hypervisor\(+fw\): 45 min - verified
- switch: 1 min - verified
- dsm: 30 min - verified
- kopia: 0 min \(When it needs to be created, RTO will be 10 min) - verified
- Extra vms: 30 min - verified
- Etc: 30 min

57
ansible/ansible.cfg Normal file
View File

@@ -0,0 +1,57 @@
[defaults]
# (boolean) This controls whether an Ansible playbook should prompt for a login password. If using SSH keys for authentication, you probably do not need to change this setting.
ask_pass=False
# (boolean) This controls whether an Ansible playbook should prompt for a vault password.
ask_vault_pass=True
# (pathlist) Comma-separated list of Ansible inventory sources
inventory=./inventory
# (pathspec) Colon-separated paths in which Ansible will search for Roles.
roles_path=./roles
# (string) Set the main callback used to display Ansible output. You can only have one at a time.
# You can have many other callbacks, but just one can be in charge of stdout.
# See :ref:`callback_plugins` for a list of available options.
stdout_callback=default
# (boolean) Set this to "False" if you want to avoid host key checking by the underlying connection plugin Ansible uses to connect to the host.
# Please read the documentation of the specific connection plugin used for details.
host_key_checking=True
# (string) Path to the Python interpreter to be used for module execution on remote targets, or an automatic discovery mode. Supported discovery modes are ``auto`` (the default), ``auto_silent``, ``auto_legacy``, and ``auto_legacy_silent``. All discovery modes match against an ordered list of well-known Python interpreter locations. The fallback behavior will issue a warning that the interpreter should be set explicitly (since interpreters installed later may change which one is used). This warning behavior can be disabled by setting ``auto_silent``. The ``auto_legacy`` modes are deprecated and behave the same as their respective ``auto`` modes. They exist for backward-compatibility with older Ansible releases that always defaulted to ``/usr/bin/python3``, which will use that interpreter if present.
interpreter_python=auto_silent
# (path) A number of non-playbook CLIs have a ``--playbook-dir`` argument; this sets the default value for it.
playbook_dir=./playbooks
# (bool) This controls whether a failed Ansible playbook should create a .retry file.
retry_files_enabled=False
# (boolean) This option controls if notified handlers run on a host even if a failure occurs on that host.
# When false, the handlers will not run if a failure has occurred on a host.
# This can also be set per play or on the command line. See Handlers and Failure for more details.
force_handlers=True
[privilege_escalation]
# (boolean) Toggles the use of privilege escalation, allowing you to 'become' another user after login.
become=True
# (boolean) Toggle to prompt for privilege escalation password.
become_ask_pass=False
# (string) Privilege escalation method to use when `become` is enabled.;
become_method=sudo
# (string) The user your login/remote user 'becomes' when using privilege escalation, most systems will use 'root' when no user is specified.;
become_user=root
[connection]
# (boolean) This is a global option, each connection plugin can override either by having more specific options or not supporting pipelining at all.
# Pipelining, if supported by the connection plugin, reduces the number of network operations required to execute a module on the remote server, by executing many Ansible modules without actual file transfer.
# It can result in a very significant performance improvement when enabled.
# However this conflicts with privilege escalation (become). For example, when using 'sudo:' operations you must first disable 'requiretty' in /etc/sudoers on all managed hosts, which is why it is disabled by default.
# This setting will be disabled if ``ANSIBLE_KEEP_REMOTE_FILES`` is enabled.
pipelining=True

39
ansible/convention.yaml Normal file
View File

@@ -0,0 +1,39 @@
---
# `""` should be used for every string values
# except name, boolean, varible(environment) name, number (only for decimal)
- name: Convention task
delegate_to: "host"
delegate_facts: true
environment:
env: "environment"
ansible.builtin.file:
# Identifier: name:, src, dest, url
path: "/path/of/example"
# State
state: "directory"
owner: "root"
group: "root"
mode: "0640"
# optional data
recurse: true
loop:
- "list"
loop_control:
label: "{{ item }}"
become: true
# become_user: "root"
when: condition.stat.exists
changed_when: condition.rc != 0
failed_when: condition.rc == 0
register: "convention_task"
notify: "notification_example"
listen: "notification_example"
no_log: true
run_once: true
tags:
- "always"
- "init"
- "upgrade"
- "update"
# when: "'tags' is not in ansible_run_tags"

View File

@@ -0,0 +1,74 @@
---
# Global vars
ansible_ssh_private_key_file: "/etc/secrets/{{ hostvars['console']['node']['uid'] }}/id_console"
# URL infromation, you can use {{ infra_uri['services'] | split(':') | first|last }} to seperate domain and ports
infra_uri:
crowdsec:
domain: "crowdsec.ilnmors.internal"
ports:
https: "8080"
bind:
domain: "bind.ilnmors.internal"
ports:
dns: "53"
blocky:
domain: "blocky.ilnmors.internal"
ports:
https: "443"
dns: "53"
postgresql:
domain: "postgresql.ilnmors.internal"
ports:
tcp: "5432" # postgresql db connection port
ldap:
domain: "ldap.ilnmors.internal"
ports:
http: "17170"
ldaps: "636"
ca:
domain: "ca.ilnmors.internal"
ports:
https: "9000"
prometheus:
domain: "prometheus.ilnmors.internal"
ports:
https: "9090"
loki:
domain: "loki.ilnmors.internal"
ports:
https: "3100"
nas:
domain: "nas.ilnmors.internal"
ports:
https: "5001"
kopia:
domain: "nas.ilnmors.internal"
ports:
https: "51515"
version:
packages:
sops: "3.12.1"
step: "0.29.0"
kopia: "0.22.3"
blocky: "0.28.2"
alloy: "1.13.0"
# telegraf: "1.37.1"
containers:
# common
caddy: "2.10.2"
# infra
step: "0.29.0"
ldap: "v0.6.2"
x509-exporter: "3.19.1"
prometheus: "v3.9.1"
loki: "3.6.5"
grafana: "12.3.3"
## Postgresql
postgresql: "18.2"
# For immich - https://github.com/immich-app/base-images/blob/main/postgres/versions.yaml
# pgvector: "v0.8.1"
vectorchord: "0.5.3"
# Auth
authelia: "4.39.15"

View File

@@ -0,0 +1,7 @@
---
node:
name: "vmm"
uid: 2000
home_path: "/home/vmm"
ssh_san: "vmm,vmm_init,vmm.ilnmors.internal,init.vmm.ilnmors.internal"
local_san: "localhost vmm.ilnmors.internal"

View File

@@ -0,0 +1,41 @@
---
# Node Factors
node:
name: "app"
uid: 2004
home_path: "/home/app"
ssh_san: "app,app.ilnmors.internal"
local_san: "localhost app.ilnmors.internal"
# VM Factors
vm:
name: "app"
cpu: 4
shares: 1024
memory: 16
storage: 256
lan_mac: "0a:49:6e:4d:03:00"
lan_net: "lan-net"
lan_group: "vlan10-access"
# PCIe passthrough address
# result of `lspci | grep -i -e "sata controller" -e "vga"` and parse it.
# Ex) 04:00.0 > domain: "0x0000", bus: "0x04", slot: "0x00", function: "0x0"
pass_through:
igpu:
address: "0000:00:02.0"
domain: "0x0000"
bus: "0x00"
slot: "0x02"
function: "0x0"
sata_controller: # Additional SATA Controller
address: "0000:04:00.0"
domain: "0x0000"
bus: "0x04"
slot: "0x00"
function: "0x0"
# BTRFS configuration for hdd which is passthroughed
storage:
btrfs:
label: "APP_DATA"
level: "raid10"
mount_point: "/home/app/data"

View File

@@ -0,0 +1,18 @@
---
# Node Factors
node:
name: "auth"
uid: 2003
home_path: "/home/auth"
ssh_san: "auth,auth.ilnmors.internal"
local_san: "localhost auth.ilnmors.internal"
# VM Factors
vm:
name: "auth"
cpu: 2
shares: 512
memory: 2
storage: 64
lan_mac: "0a:49:6e:4d:02:00"
lan_net: "lan-net"
lan_group: "vlan10-access"

View File

@@ -0,0 +1,25 @@
---
# Secret management
age_key: !vault |
$ANSIBLE_VAULT;1.1;AES256
32343637656262323931313061323861393737663736626538396165613563323531316639346637
3766363134663963396634353764323166353936626633300a356338363036373165363335333665
38316638386661623265306538303739616638316565373864316364623539383736343366646463
3464613666663836310a666531386136326439616637393035333534303661373433343830323665
66613736613133616439393163653166306261376231646163323266616431623531313964326132
33653638373537323363316363646534323362353836373665636265663939353862663532313230
30643038313138626464626161373835646665326334393834616234643931656536343130316238
61656264643830616639
# Node Factors
node:
name: "console"
uid: 2999
home_path: "/home/console"
workspace_path: "{{ node.home_path }}/workspace"
homelab_path: "{{ node.home_path }}/workspace/homelab"
data_path: "{{ node.homelab_path }}/data"
config_path: "{{ node.homelab_path }}/config"
ssh_san: "console,console.ilnmors.internal"
ssh_users: "vmm,fw,infra,auth,app"
local_san: "localhost console.ilnmors.internal"
# ansible_python_interpreter: "{{ ansible_playbook_python }}"

View File

@@ -0,0 +1,98 @@
---
# Node Factors
node:
name: "fw"
uid: 2001
home_path: "/home/fw"
ssh_san: "fw,fw.ilnmors.internal"
local_san: "localhost fw.ilnmors.internal"
# VM Factors
vm:
name: "fw"
cpu: 2
shares: 2048
memory: 4
storage: 64
wan_mac: "0a:49:6e:4d:00:00"
lan_mac: "0a:49:6e:4d:00:01"
wan_net: "wan-net"
lan_net: "lan-net"
lan_group: "vlan-trunk"
# Network Factors
# LLA is like MAC address for L3 (Network layer). Usually, subnet is used to seperate network.
network4:
subnet:
client: "192.168.1.0/24"
server: "192.168.10.0/24"
user: "192.168.20.0/24"
wg: "192.168.99.0/24"
lla: "169.254.0.0/16"
# You can use "{{ hostvars['fw']['network4']['firewall'].values() | join(', ') }}" for all
firewall:
client: "192.168.1.1"
server: "192.168.10.1"
user: "192.168.20.1"
wg: "192.168.99.1"
blocky:
server: "192.168.10.2"
bind:
server: "192.168.10.3"
console:
client: "192.168.1.20"
wg: "192.168.99.20"
vmm:
client: "192.168.1.10"
server: "192.168.10.10"
infra:
server: "192.168.10.11"
auth:
server: "192.168.10.12"
app:
server: "192.168.10.13"
switch:
client: "192.168.1.2"
nas:
client: "192.168.1.11"
printer:
client: "192.168.1.101"
network6:
subnet:
client: "fd00:1::/64"
server: "fd00:10::/64"
wg: "fd00:99::/64"
lla: "fe80::/10"
firewall:
client: "fd00:1::1"
server: "fd00:10::1"
wg: "fd00:99::1"
blocky:
server: "fd00:10::2"
bind:
server: "fd00:10::3"
console:
client: "fd00:1::20"
wg: "fd00:99::20"
vmm:
client: "fd00:1::10"
server: "fd00:10::10"
infra:
server: "fd00:10::11"
auth:
server: "fd00:10::12"
app:
server: "fd00:10::13"
switch:
client: "fd00:1::2"
nas:
client: "fd00:1::11"
printer:
client: "fd00:1::101"
# Suricata Factors
# suricata_home_net: '[10.0.0.0/8,172.16.0.0/12,192.168.0.0/16]'
suricata:
home_net: '[10.0.0.0/8,172.16.0.0/12,192.168.0.0/16,fd00::/8,fe80::/8]'
interfaces: ["wan", "client", "server", "user"]

View File

@@ -0,0 +1,19 @@
---
# Node Factors
node:
name: "infra"
uid: 2002
home_path: "/home/infra"
ssh_san: "infra,infra.ilnmors.internal"
local_san: "localhost infra.ilnmors.internal"
# VM Factors
vm:
name: "infra"
cpu: 2
shares: 1024
memory: 6
storage: 256
lan_mac: "0a:49:6e:4d:01:00"
lan_net: "lan-net"
lan_group: "vlan10-access"

View File

@@ -0,0 +1,14 @@
# --- console ---
console ansible_connection=local ansible_user=console
# --- Hypervisor ---
[hypervisor]
vmm_init ansible_host=init.vmm.ilnmors.internal ansible_user=vmm
vmm ansible_host=vmm.ilnmors.internal ansible_user=vmm
# --- Virtual Machines ---
[vms]
fw ansible_host=fw.ilnmors.internal ansible_user=fw
infra ansible_host=infra.ilnmors.internal ansible_user=infra
auth ansible_host=auth.ilnmors.internal ansible_user=auth
app ansible_host=app.ilnmors.internal ansible_user=app

View File

@@ -0,0 +1,185 @@
---
- name: Load secret values
hosts: "console"
gather_facts: false
become: false
tasks:
- name: Load secret from secrets.yaml
ansible.builtin.include_role:
name: "console"
tasks_from: "node/load_secret_vars"
apply:
tags: ["always"]
tags: ["always"]
- name: Site app
hosts: "app"
gather_facts: false
become: false
pre_tasks:
- name: Set become password
ansible.builtin.set_fact:
ansible_become_pass: "{{ hostvars['console']['sudo']['password']['app'] }}"
tags: ["always"]
tasks:
- name: Set timezone to Asia/Seoul
community.general.timezone:
name: Asia/Seoul
become: true
tags: ["init", "timezone"]
- name: Deploy root_ca certificate
ansible.builtin.include_role:
name: "common"
tasks_from: "node/deploy_root_ca"
apply:
tags: ["init", "root_crt"]
tags: ["init", "root_crt"]
- name: Deploy hosts file
ansible.builtin.include_role:
name: "common"
tasks_from: "node/deploy_hosts"
apply:
tags: ["init", "hosts"]
tags: ["init", "hosts"]
- name: Create default directory
ansible.builtin.include_role:
name: "common"
tasks_from: "node/create_default_dir"
apply:
tags: ["init", "default_dir"]
tags: ["init", "default_dir"]
- name: Set ssh host
ansible.builtin.include_role:
name: "common"
tasks_from: "node/set_ssh_host"
apply:
tags: ["init", "ssh_host"]
tags: ["init", "ssh_host"]
- name: Set networkd
ansible.builtin.include_role:
name: "common"
tasks_from: "node/set_networkd"
apply:
tags: ["init", "networkd"]
tags: ["init", "networkd"]
- name: Set resolved
ansible.builtin.include_role:
name: "common"
tasks_from: "node/set_resolved"
apply:
tags: ["init", "resolved"]
tags: ["init", "resolved"]
- name: Update and upgrade apt
ansible.builtin.apt:
upgrade: "dist"
update_cache: true
cache_valid_time: 3600
become: true
tags: ["init", "site", "upgrade-packages"]
- name: Install common packages
ansible.builtin.apt:
name:
- "acl"
- "curl"
- "jq"
- "netcat-openbsd"
- "dbus-user-session"
state: "present"
become: true
tags: ["init", "install-packages"]
- name: Set raid
ansible.builtin.include_role:
name: "app"
tasks_from: "node/set_raid"
apply:
tags: ["init", "raid"]
tags: ["init", "raid"]
- name: Set linger
ansible.builtin.include_role:
name: "common"
tasks_from: "node/set_linger"
apply:
tags: ["init", "linger"]
tags: ["init", "linger"]
- name: Set podman
ansible.builtin.include_role:
name: "common"
tasks_from: "services/set_podman"
apply:
tags: ["init", "podman"]
tags: ["init", "podman"]
- name: Set nftables
ansible.builtin.include_role:
name: "common"
tasks_from: "node/set_nftables"
apply:
tags: ["init", "nftables"]
tags: ["init", "nftables"]
- name: Set crowdsec
ansible.builtin.include_role:
name: "common"
tasks_from: "services/set_crowdsec"
apply:
tags: ["site", "crowdsec"]
tags: ["site", "crowdsec"]
- name: Set alloy
ansible.builtin.include_role:
name: "common"
tasks_from: "services/set_alloy"
apply:
tags: ["init", "update", "alloy"]
tags: ["init", "update", "alloy"]
- name: Set kopia
ansible.builtin.include_role:
name: "common"
tasks_from: "services/set_kopia"
apply:
tags: ["site", "kopia"]
tags: ["site", "kopia"]
- name: Flush handlers right now
ansible.builtin.meta: "flush_handlers"
# Only update iGPU firmware
- name: Install iGPU Firmware
ansible.builtin.apt:
name:
- "firmware-intel-graphics"
- "intel-media-va-driver-non-free"
update_cache: true
state: "present"
become: true
notify:
- "notification_update_initramfs"
- "notification_reboot_app"
tags: ["init"]
handlers:
- name: Update initramfs
ansible.builtin.command:
update-initramfs -u
become: true
changed_when: false
listen: "notification_update_initramfs"
ignore_errors: true # noqa: ignore-errors
- name: Reboot app vm
ansible.builtin.reboot:
reboot_timeout: 300
become: true
listen: "notification_reboot_app"
ignore_errors: true # noqa: ignore-errors

View File

@@ -0,0 +1,154 @@
---
- name: Load secret values
hosts: "console"
gather_facts: false
become: false
tasks:
- name: Load secret from secrets.yaml
ansible.builtin.include_role:
name: "console"
tasks_from: "node/load_secret_vars"
apply:
tags: ["always"]
tags: ["always"]
- name: Site auth
hosts: "auth"
gather_facts: false
become: false
pre_tasks:
- name: Set become password
ansible.builtin.set_fact:
ansible_become_pass: "{{ hostvars['console']['sudo']['password']['auth'] }}"
tags: ["always"]
tasks:
- name: Set timezone to Asia/Seoul
community.general.timezone:
name: Asia/Seoul
become: true
tags: ["init", "timezone"]
- name: Deploy root_ca certificate
ansible.builtin.include_role:
name: "common"
tasks_from: "node/deploy_root_ca"
apply:
tags: ["init", "root_crt"]
tags: ["init", "root_crt"]
- name: Deploy hosts file
ansible.builtin.include_role:
name: "common"
tasks_from: "node/deploy_hosts"
apply:
tags: ["init", "hosts"]
tags: ["init", "hosts"]
- name: Create default directory
ansible.builtin.include_role:
name: "common"
tasks_from: "node/create_default_dir"
apply:
tags: ["init", "default_dir"]
tags: ["init", "default_dir"]
- name: Set ssh host
ansible.builtin.include_role:
name: "common"
tasks_from: "node/set_ssh_host"
apply:
tags: ["init", "ssh_host"]
tags: ["init", "ssh_host"]
- name: Set networkd
ansible.builtin.include_role:
name: "common"
tasks_from: "node/set_networkd"
apply:
tags: ["init", "networkd"]
tags: ["init", "networkd"]
- name: Set resolved
ansible.builtin.include_role:
name: "common"
tasks_from: "node/set_resolved"
apply:
tags: ["init", "resolved"]
tags: ["init", "resolved"]
- name: Update and upgrade apt
ansible.builtin.apt:
upgrade: "dist"
update_cache: true
cache_valid_time: 3600
become: true
tags: ["init", "site", "upgrade-packages"]
- name: Install common packages
ansible.builtin.apt:
name:
- "acl"
- "curl"
- "jq"
- "netcat-openbsd"
- "dbus-user-session"
state: "present"
become: true
tags: ["init", "site", "install-packages"]
- name: Set linger
ansible.builtin.include_role:
name: "common"
tasks_from: "node/set_linger"
apply:
tags: ["init", "linger"]
tags: ["init", "linger"]
- name: Set podman
ansible.builtin.include_role:
name: "common"
tasks_from: "services/set_podman"
apply:
tags: ["init", "podman"]
tags: ["init", "podman"]
- name: Set nftables
ansible.builtin.include_role:
name: "common"
tasks_from: "node/set_nftables"
apply:
tags: ["init", "nftables"]
tags: ["init", "nftables"]
- name: Set crowdsec
ansible.builtin.include_role:
name: "common"
tasks_from: "services/set_crowdsec"
apply:
tags: ["site", "crowdsec"]
tags: ["site", "crowdsec"]
- name: Set caddy
ansible.builtin.include_role:
name: "common"
tasks_from: "services/set_caddy"
apply:
tags: ["site", "caddy"]
tags: ["site", "caddy"]
- name: Set authelia
ansible.builtin.include_role:
name: "auth"
tasks_from: "services/set_authelia"
apply:
tags: ["site", "authelia"]
tags: ["site", "authelia"]
- name: Set alloy
ansible.builtin.include_role:
name: "common"
tasks_from: "services/set_alloy"
apply:
tags: ["site", "alloy"]
tags: ["site", "alloy"]

View File

@@ -0,0 +1,132 @@
---
- name: Load secret values
hosts: "console"
gather_facts: false
become: false
tasks:
- name: Load secret from secrets.yaml
ansible.builtin.include_role:
name: "console"
tasks_from: "node/load_secret_vars"
apply:
tags: ["always"]
tags: ["always"]
- name: Site console
hosts: "console"
gather_facts: false
become: false
pre_tasks:
- name: Set become password
ansible.builtin.set_fact:
ansible_become_pass: "{{ hostvars['console']['sudo']['password']['console'] }}"
tags: ["always"]
tasks:
# init
- name: Set timezone to Asia/Seoul
community.general.timezone:
name: Asia/Seoul
become: true
tags: ["init", "timezone"]
- name: Deploy root_ca certificate
ansible.builtin.include_role:
name: "common"
tasks_from: "node/deploy_root_ca"
apply:
tags: ["init", "root_crt"]
tags: ["init", "root_crt"]
- name: Deploy hosts file
ansible.builtin.include_role:
name: "common"
tasks_from: "node/deploy_hosts"
apply:
tags: ["init", "hosts"]
tags: ["init", "hosts"]
- name: Create default directory
ansible.builtin.include_role:
name: "common"
tasks_from: "node/create_default_dir"
apply:
tags: ["init", "default_dir"]
tags: ["init", "default_dir"]
- name: Update and upgrade apt
ansible.builtin.apt:
upgrade: "dist"
update_cache: true
cache_valid_time: 3600
become: true
tags: ["init", "site", "upgrade-packages"]
- name: Set ssh client
ansible.builtin.include_role:
name: "console"
tasks_from: "node/set_ssh_client"
apply:
tags: ["init", "ssh_client"]
tags: ["init", "ssh_client"]
- name: Check file permissions
ansible.builtin.file:
path: "{{ node['workspace_path'] }}/{{ item }}"
state: "directory"
owner: "{{ ansible_user }}"
group: "svadmins"
mode: "u=rwX,g=,o="
recurse: true
loop:
- "homelab/ansible"
- "homelab/config"
- "homelab/docs"
- "university"
tags: ["init", "site", "file_permission"]
# kopia snashot is mounted on homelab/data/volumes.
# NEVER CHANGE permission and owners
- name: Download vm cloud-init
ansible.builtin.get_url:
url: "https://cdimage.debian.org/images/cloud/trixie/latest/debian-13-generic-amd64.qcow2"
dest: "{{ node['data_path'] }}/images/debian-13-generic-amd64.qcow2"
owner: "console"
group: "svadmins"
mode: "0600"
tags: ["init", "site", "cloud-init-image"]
- name: Install packages
ansible.builtin.apt:
name:
- "git"
- "gnupg"
- "acl"
- "curl"
- "jq"
- "cloud-image-utils"
- "logrotate"
- "nftables"
- "build-essential"
- "g++"
- "gcc"
- "fuse3"
state: "present"
become: true
tags: ["init", "site", "install-packages"]
- name: Install CLI tools
ansible.builtin.include_role:
name: "console"
tasks_from: "services/set_cli_tools"
apply:
tags: ["init", "site", "tools"]
tags: ["init", "site", "tools"]
- name: Install chromium with font
ansible.builtin.include_role:
name: "console"
tasks_from: "services/set_chromium"
apply:
tags: ["init", "site", "chromium"]
tags: ["init", "site", "chromium"]

View File

@@ -0,0 +1,190 @@
---
- name: Load secret values
hosts: "console"
gather_facts: false
become: false
tasks:
- name: Load secret from secrets.yaml
ansible.builtin.include_role:
name: "console"
tasks_from: "node/load_secret_vars"
apply:
tags: ["always"]
tags: ["always"]
- name: Site fw
hosts: "fw"
gather_facts: false
become: false
pre_tasks:
- name: Set become password
ansible.builtin.set_fact:
ansible_become_pass: "{{ hostvars['console']['sudo']['password']['fw'] }}"
tags: ["always"]
tasks:
- name: Set timezone to Asia/Seoul
community.general.timezone:
name: Asia/Seoul
become: true
tags: ["init", "timezone"]
- name: Deploy root_ca certificate
ansible.builtin.include_role:
name: "common"
tasks_from: "node/deploy_root_ca"
apply:
tags: ["init", "root_crt"]
tags: ["init", "root_crt"]
- name: Deploy hosts file
ansible.builtin.include_role:
name: "common"
tasks_from: "node/deploy_hosts"
apply:
tags: ["init", "hosts"]
tags: ["init", "hosts"]
- name: Create default directory
ansible.builtin.include_role:
name: "common"
tasks_from: "node/create_default_dir"
apply:
tags: ["init", "default_dir"]
tags: ["init", "default_dir"]
- name: Set ssh host
ansible.builtin.include_role:
name: "common"
tasks_from: "node/set_ssh_host"
apply:
tags: ["init", "ssh_host"]
tags: ["init", "ssh_host"]
- name: Set networkd
ansible.builtin.include_role:
name: "common"
tasks_from: "node/set_networkd"
apply:
tags: ["init", "networkd"]
tags: ["init", "networkd"]
- name: Set wireguard
ansible.builtin.include_role:
name: "common"
tasks_from: "node/set_wireguard"
apply:
tags: ["init", "wireguard"]
tags: ["init", "wireguard"]
- name: Set resolved
ansible.builtin.include_role:
name: "common"
tasks_from: "node/set_resolved"
apply:
tags: ["init", "resolved"]
tags: ["init", "resolved"]
- name: Update and upgrade apt
ansible.builtin.apt:
upgrade: "dist"
update_cache: true
cache_valid_time: 3600
become: true
tags: ["init", "site", "upgrade-packages"]
- name: Install common packages
ansible.builtin.apt:
name:
- "acl"
- "curl"
- "jq"
- "wireguard-tools"
- "dnsutils"
- "conntrack"
- "logrotate"
- "netcat-openbsd"
- "dbus-user-session"
state: "present"
become: true
tags: ["init", "site", "install-packages"]
- name: Set linger
ansible.builtin.include_role:
name: "common"
tasks_from: "node/set_linger"
apply:
tags: ["init", "linger"]
tags: ["init", "linger"]
- name: Set chrony
ansible.builtin.include_role:
name: "fw"
tasks_from: "services/set_chrony"
apply:
tags: ["init", "chrony"]
tags: ["init", "chrony"]
- name: Set ddns
ansible.builtin.include_role:
name: "fw"
tasks_from: "services/set_ddns"
apply:
tags: ["init", "ddns"]
tags: ["init", "ddns"]
- name: Set nftables
ansible.builtin.include_role:
name: "common"
tasks_from: "node/set_nftables"
apply:
tags: ["init", "site", "nftables"]
tags: ["init", "site", "nftables"]
- name: Set suricata
ansible.builtin.include_role:
name: "fw"
tasks_from: "services/set_suricata"
apply:
tags: ["site", "suricata"]
tags: ["site", "suricata"]
- name: Set crowdsec
ansible.builtin.include_role:
name: "common"
tasks_from: "services/set_crowdsec"
apply:
tags: ["site", "crowdsec"]
tags: ["site", "crowdsec"]
- name: Set bind
ansible.builtin.include_role:
name: "fw"
tasks_from: "services/set_bind"
apply:
tags: ["init", "update", "bind"]
tags: ["init", "update", "bind"]
- name: Set blocky
ansible.builtin.include_role:
name: "fw"
tasks_from: "services/set_blocky"
apply:
tags: ["site", "blocky"]
tags: ["site", "blocky"]
- name: Set kea
ansible.builtin.include_role:
name: "fw"
tasks_from: "services/set_kea"
apply:
tags: ["site", "kea"]
tags: ["site", "kea"]
- name: Set alloy
ansible.builtin.include_role:
name: "common"
tasks_from: "services/set_alloy"
apply:
tags: ["site", "alloy"]
tags: ["site", "alloy"]

View File

@@ -0,0 +1,210 @@
---
- name: Load secret values
hosts: "console"
gather_facts: false
become: false
tasks:
- name: Load secret from secrets.yaml
ansible.builtin.include_role:
name: "console"
tasks_from: "node/load_secret_vars"
apply:
tags: ["always"]
tags: ["always"]
- name: Site infra
hosts: infra
gather_facts: false
become: false
pre_tasks:
- name: Set become password
ansible.builtin.set_fact:
ansible_become_pass: "{{ hostvars['console']['sudo']['password']['infra'] }}"
tags: ["always"]
tasks:
- name: Set timezone to Asia/Seoul
community.general.timezone:
name: Asia/Seoul
become: true
tags: ["init", "timezone"]
- name: Deploy root_ca certificate
ansible.builtin.include_role:
name: "common"
tasks_from: "node/deploy_root_ca"
apply:
tags: ["init", "root_crt"]
tags: ["init", "root_crt"]
- name: Deploy hosts file
ansible.builtin.include_role:
name: "common"
tasks_from: "node/deploy_hosts"
apply:
tags: ["init", "hosts"]
tags: ["init", "hosts"]
- name: Create default directory
ansible.builtin.include_role:
name: "common"
tasks_from: "node/create_default_dir"
apply:
tags: ["init", "default_dir"]
tags: ["init", "default_dir"]
- name: Set ssh host
ansible.builtin.include_role:
name: "common"
tasks_from: "node/set_ssh_host"
apply:
tags: ["init", "ssh_host"]
tags: ["init", "ssh_host"]
- name: Set networkd
ansible.builtin.include_role:
name: "common"
tasks_from: "node/set_networkd"
apply:
tags: ["init", "networkd"]
tags: ["init", "networkd"]
- name: Set resolved
ansible.builtin.include_role:
name: "common"
tasks_from: "node/set_resolved"
apply:
tags: ["init", "resolved"]
tags: ["init", "resolved"]
- name: Update and upgrade apt
ansible.builtin.apt:
upgrade: "dist"
update_cache: true
cache_valid_time: 3600
become: true
tags: ["init", "site", "upgrade-packages"]
- name: Install common packages
ansible.builtin.apt:
name:
- "acl"
- "curl"
- "jq"
- "netcat-openbsd"
- "dbus-user-session"
state: "present"
become: true
tags: ["init", "site", "install-packages"]
- name: Set linger
ansible.builtin.include_role:
name: "common"
tasks_from: "node/set_linger"
apply:
tags: ["init", "linger"]
tags: ["init", "linger"]
- name: Set podman
ansible.builtin.include_role:
name: "common"
tasks_from: "services/set_podman"
apply:
tags: ["init", "podman"]
tags: ["init", "podman"]
- name: Set nftables
ansible.builtin.include_role:
name: "common"
tasks_from: "node/set_nftables"
apply:
tags: ["init", "nftables"]
tags: ["init", "nftables"]
- name: Set crowdsec
ansible.builtin.include_role:
name: "common"
tasks_from: "services/set_crowdsec"
apply:
tags: ["site", "crowdsec"]
tags: ["site", "crowdsec"]
- name: Set ca
ansible.builtin.include_role:
name: "infra"
tasks_from: "services/set_ca_server"
apply:
tags: ["site", "ca"]
tags: ["site", "ca"]
- name: Set postgresql
ansible.builtin.include_role:
name: "infra"
tasks_from: "services/set_postgresql"
apply:
tags: ["site", "postgresql"]
tags: ["site", "postgresql"]
- name: Set caddy
ansible.builtin.include_role:
name: "common"
tasks_from: "services/set_caddy"
apply:
tags: ["site", "caddy"]
tags: ["site", "caddy"]
- name: Set ldap
ansible.builtin.include_role:
name: "infra"
tasks_from: "services/set_ldap"
apply:
tags: ["site", "ldap"]
tags: ["site", "ldap"]
- name: Set x509 exporter
ansible.builtin.include_role:
name: "infra"
tasks_from: "services/set_x509-exporter"
apply:
tags: ["site", "x509-exporter"]
tags: ["site", "x509-exporter"]
- name: Set prometheus
ansible.builtin.include_role:
name: "infra"
tasks_from: "services/set_prometheus"
apply:
tags: ["site", "prometheus"]
tags: ["site", "prometheus"]
- name: Set loki
ansible.builtin.include_role:
name: "infra"
tasks_from: "services/set_loki"
apply:
tags: ["site", "loki"]
tags: ["site", "loki"]
- name: Set alloy
ansible.builtin.include_role:
name: "common"
tasks_from: "services/set_alloy"
apply:
tags: ["site", "alloy"]
tags: ["site", "alloy"]
- name: Set grafana
ansible.builtin.include_role:
name: "infra"
tasks_from: "services/set_grafana"
apply:
tags: ["site", "grafana"]
tags: ["site", "grafana"]
- name: Set kopia
ansible.builtin.include_role:
name: "common"
tasks_from: "services/set_kopia"
apply:
tags: ["site", "kopia"]
tags: ["site", "kopia"]

View File

@@ -0,0 +1,61 @@
---
- name: Load secret values
hosts: "console"
gather_facts: false
become: false
tasks:
- name: Load secret from secrets.yaml
ansible.builtin.include_role:
name: "console"
tasks_from: "node/load_secret_vars"
apply:
tags: ["always"]
tags: ["always"]
- name: Create vm
hosts: vmm_init
gather_facts: false
become: false
vars:
valid_vm_names:
- "fw"
- "infra"
- "auth"
- "app"
tasks:
- name: Set vm name depends on tags
ansible.builtin.set_fact:
target_vm: "{{ ansible_run_tags[0] }}"
when: (ansible_run_tags | length) == 1
- name: Check VM name
ansible.builtin.fail:
msg: "invalid vm name. vm name should be included in \"{{ valid_vm_names | join(', ') }}\""
when: (target_vm | default("none")) not in valid_vm_names
- name: Set become password
ansible.builtin.set_fact:
ansible_become_pass: "{{ hostvars['console']['sudo']['password']['vmm'] }}"
- name: Create seed file
ansible.builtin.include_role:
name: "vmm"
tasks_from: "vm/create_seed"
apply:
delegate_to: "console"
tags: ["always"]
- name: Deploy vm init files
ansible.builtin.include_role:
name: "vmm"
tasks_from: "vm/deploy_vm_init"
apply:
tags: ["always"]
- name: Register vm
ansible.builtin.include_role:
name: "vmm"
tasks_from: "vm/register_vm"
apply:
tags: ["always"]
tags: ["always"]

View File

@@ -0,0 +1,165 @@
---
- name: Set host and load secret values
hosts: "console"
gather_facts: false
become: false
tasks:
- name: Set host as vmm
ansible.builtin.set_fact:
vmm_host: "vmm"
when: "'init' is not in ansible_run_tags"
tags: ["always"]
- name: Load secret from secrets.yaml
ansible.builtin.include_role:
name: "console"
tasks_from: "node/load_secret_vars"
apply:
tags: ["always"]
tags: ["always"]
- name: Site vmm
hosts: "{{ hostvars['console']['vmm_host'] | default('vmm_init') }}"
gather_facts: false
become: false
pre_tasks:
- name: Set become password
ansible.builtin.set_fact:
ansible_become_pass: "{{ hostvars['console']['sudo']['password']['vmm'] }}"
tags: ["always"]
tasks:
# init
- name: Set timezone to Asia/Seoul
community.general.timezone:
name: Asia/Seoul
become: true
tags: ["init", "timezone"]
- name: Deploy root_ca certificate
ansible.builtin.include_role:
name: "common"
tasks_from: "node/deploy_root_ca"
apply:
tags: ["init", "root_crt"]
tags: ["init", "root_crt"]
- name: Deploy hosts file
ansible.builtin.include_role:
name: "common"
tasks_from: "node/deploy_hosts"
apply:
tags: ["init", "hosts"]
tags: ["init", "hosts"]
- name: Create default directory
ansible.builtin.include_role:
name: "common"
tasks_from: "node/create_default_dir"
apply:
tags: ["init", "default_dir"]
tags: ["init", "default_dir"]
- name: Set ssh host
ansible.builtin.include_role:
name: "common"
tasks_from: "node/set_ssh_host"
apply:
tags: ["init", "ssh_host"]
tags: ["init", "ssh_host"]
- name: Set networkd
ansible.builtin.include_role:
name: "common"
tasks_from: "node/set_networkd"
apply:
tags: ["init", "networkd"]
tags: ["init", "networkd"]
- name: Set resolved
ansible.builtin.include_role:
name: "common"
tasks_from: "node/set_resolved"
apply:
tags: ["init", "resolved"]
tags: ["init", "resolved"]
- name: Set timesyncd
ansible.builtin.include_role:
name: "common"
tasks_from: "node/set_timesyncd"
apply:
tags: ["init", "timesyncd"]
tags: ["init", "timesyncd"]
- name: Set linger # vmm has dbus-user-session in it
ansible.builtin.include_role:
name: "common"
tasks_from: "node/set_linger"
apply:
tags: ["init", "linger"]
tags: ["init", "linger"]
- name: Set libvirt
ansible.builtin.include_role:
name: "vmm"
tasks_from: "node/set_libvirt"
apply:
tags: ["init", "libvirt"]
tags: ["init", "libvirt"]
- name: Set nftables
ansible.builtin.include_role:
name: "common"
tasks_from: "node/set_nftables"
apply:
tags: ["init", "site", "nftables"]
tags: ["init", "site", "nftables"]
- name: Update and upgrade apt # init roles has no internet (airgap statement)
ansible.builtin.apt:
update_cache: true
upgrade: "dist"
cache_valid_time: 3600
when: inventory_hostname != "vmm_init"
become: true
tags: ["site", "upgrade-packages"]
- name: Set crowdsec
ansible.builtin.include_role:
name: "common"
tasks_from: "services/set_crowdsec"
apply:
tags: ["site", "crowdsec"]
when: inventory_hostname != "vmm_init"
tags: ["site", "crowdsec"]
- name: Set alloy
ansible.builtin.include_role:
name: "common"
tasks_from: "services/set_alloy"
apply:
tags: ["site", "alloy"]
when: inventory_hostname != "vmm_init"
tags: ["site", "alloy"]
- name: Install packages # they are already installed in prerequisite step
ansible.builtin.apt:
name:
- acl
- curl
- jq
- crowdsec
- systemd-resolved
- qemu-system-x86
- ksmtuned
- libvirt-daemon-system
- virt-top
- python3
- python3-apt
- python3-libvirt
- python3-lxml
state: "present"
become: true
when: "'init' is not in ansible_run_tags"
tags: ["never", "install-packages"]

View File

@@ -0,0 +1,70 @@
---
- name: Check btrfs installation
ansible.builtin.shell: |
command -v btrfs
become: true # btrfs is located in /usr/sbin, which means root permission is needed.
changed_when: false
failed_when: false
register: "is_btrfs_installed"
ignore_errors: true
- name: Install btrfs
ansible.builtin.apt:
name: "btrfs-progs"
state: "present"
become: true
when: is_btrfs_installed.rc != 0
- name: Set hard disk path
ansible.builtin.shell: |
set -o pipefail
ls -1 /dev/disk/by-path/*{{ vm['pass_through']['sata_controller']['address'] }}* | \
grep -v '\.0$' | \
sort
changed_when: false
register: "hdd_path_list"
- name: Check app_hdd filesystem already exists
ansible.builtin.command: |
blkid -L {{ storage['btrfs']['label'] }}
register: is_app_data
changed_when: false
failed_when: false
become: true
- name: Check disk number
ansible.builtin.fail:
msg: "Below 4 disks for RAID10, found {{ hdd_path_list.stdout_lines | length }}"
when: (hdd_path_list.stdout_lines | length) < 4
- name: Set btrfs raid10 volume
ansible.builtin.shell: |
mkfs.btrfs -f \
-L {{ storage['btrfs']['label'] }} \
-d {{ storage['btrfs']['level'] }} \
-m {{ storage['btrfs']['level'] }} \
{{ hdd_path_list.stdout_lines | join(' ') }}
become: true
when:
- is_app_data.rc != 0
- (hdd_path_list.stdout_lines | length) >= 4
changed_when: is_mkfs.rc == 0
register: "is_mkfs"
- name: Mount btrfs raid10 volume
ansible.posix.mount:
path: "{{ storage['btrfs']['mount_point'] }}"
src: "LABEL={{ storage['btrfs']['label'] }}"
state: "mounted"
fstype: "btrfs"
opts: "defaults,noatime,compress=zstd:3,autodefrag,degraded,nofail"
become: true
- name: Set hard disk path permissions
ansible.builtin.file:
path: "{{ storage['btrfs']['mount_point'] }}"
state: "directory"
owner: "{{ ansible_user }}"
group: "svadmins"
mode: "0770"
become: true

View File

@@ -0,0 +1,11 @@
---
- name: Restart authelia
ansible.builtin.systemd:
name: "authelia.service"
state: "restarted"
enabled: true
scope: "user"
daemon_reload: true
changed_when: false
listen: "notification_restart_authelia"
ignore_errors: true # noqa: ignore-errors

View File

@@ -0,0 +1,78 @@
---
- name: Create authelia directory
ansible.builtin.file:
path: "{{ node['home_path'] }}/containers/{{ item }}"
state: "directory"
owner: "{{ ansible_user }}"
group: "svadmins"
mode: "0770"
loop:
- "authelia"
- "authelia/config"
- "authelia/certs"
become: true
- name: Deploy authelia configuration file
ansible.builtin.template:
src: "{{ hostvars['console']['node']['config_path'] }}/services/containers/auth/authelia/config/authelia.yaml.j2"
dest: "{{ node['home_path'] }}/containers/authelia/config/authelia.yaml"
owner: "{{ ansible_user }}"
group: "svadmins"
mode: "0600"
become: true
notify: "notification_restart_authelia"
no_log: true
- name: Deploy certificates
ansible.builtin.copy:
content: |
{{ hostvars['console']['ca']['root']['crt'] }}
dest: "{{ node['home_path'] }}/containers/authelia/certs/ilnmors_root_ca.crt"
owner: "{{ ansible_user }}"
group: "svadmins"
mode: "0440"
become: true
no_log: true
- name: Register secret value to podman secret
containers.podman.podman_secret:
name: "{{ item.name }}"
data: "{{ item.value }}"
state: "present"
force: true
loop:
- name: "AUTHELIA_JWT_SECRET"
value: "{{ hostvars['console']['authelia']['jwt_secret'] }}"
- name: "AUTHELIA_SESSION_SECRET"
value: "{{ hostvars['console']['authelia']['session_secret'] }}"
- name: "AUTHELIA_STORAGE_SECRET"
value: "{{ hostvars['console']['authelia']['storage_secret'] }}"
- name: "AUTHELIA_HMAC_SECRET"
value: "{{ hostvars['console']['authelia']['hmac_secret'] }}"
- name: "AUTHELIA_JWKS_RS256"
value: "{{ hostvars['console']['authelia']['jwk_rs256'] }}"
- name: "AUTHELIA_JWKS_ES256"
value: "{{ hostvars['console']['authelia']['jwk_es256'] }}"
- name: "AUTHELIA_LDAP_PASSWORD"
value: "{{ hostvars['console']['ldap']['password']['authelia'] }}"
- name: "POSTGRES_AUTHELIA_PASSWORD"
value: "{{ hostvars['console']['postgresql']['password']['authelia'] }}"
notify: "notification_restart_authelia"
no_log: true
- name: Deploy container file
ansible.builtin.template:
src: "{{ hostvars['console']['node']['config_path'] }}/services/containers/auth/authelia/authelia.container.j2"
dest: "{{ node['home_path'] }}/.config/containers/systemd/authelia.container"
owner: "{{ ansible_user }}"
group: "svadmins"
mode: "0644"
notify: "notification_restart_authelia"
- name: Enable authelia.service
ansible.builtin.systemd:
name: "authelia.service"
state: "started"
enabled: true
daemon_reload: true
scope: "user"

View File

@@ -0,0 +1,101 @@
---
- name: Restart ca certificate
ansible.builtin.command: |
update-ca-certificates
become: true
changed_when: false
listen: "notification_update_ca"
ignore_errors: true # noqa: ignore-errors
- name: Restart sshd
ansible.builtin.systemd:
name: "sshd.service"
state: "restarted"
enabled: true
become: true
changed_when: false
listen: "notification_restart_sshd"
ignore_errors: true # noqa: ignore-errors
- name: Reload systemd-networkd
ansible.builtin.systemd:
name: "systemd-networkd.service"
state: "reloaded"
enabled: true
become: true
changed_when: false
listen: "notification_reload_networkctl"
ignore_errors: true # noqa: ignore-errors
- name: Reload systemd-resolved.service
ansible.builtin.systemd:
name: "systemd-resolved.service"
state: "reloaded"
enabled: true
become: true
changed_when: false
listen: "notification_reload_resolved"
ignore_errors: true # noqa: ignore-errors
- name: Restart systemd-timesyncd
ansible.builtin.systemd:
name: "systemd-timesyncd.service"
state: "restarted"
enabled: true
become: true
changed_when: false
listen: "notification_restart_timesyncd"
ignore_errors: true # noqa: ignore-errors
- name: Update nftables
ansible.builtin.command: |
nft -f /etc/nftables.conf
become: true
changed_when: false
listen: "notification_update_nftables"
ignore_errors: true # noqa: ignore-errors
- name: Restart crowdsec
ansible.builtin.systemd:
name: "crowdsec.service"
state: "restarted"
enabled: true
daemon_reload: true
become: true
changed_when: false
listen: "notification_restart_crowdsec"
ignore_errors: true # noqa: ignore-errors
- name: Restart crowdsec bouncer
ansible.builtin.systemd:
name: "crowdsec-firewall-bouncer.service"
state: "restarted"
enabled: true
daemon_reload: true
become: true
when: node['name'] == 'fw'
changed_when: false
listen: "notification_restart_crowdsec_bouncer"
ignore_errors: true # noqa: ignore-errors
- name: Restart caddy
ansible.builtin.systemd:
name: "caddy.service"
state: "restarted"
enabled: true
scope: "user"
daemon_reload: true
changed_when: false
listen: "notification_restart_caddy"
ignore_errors: true # noqa: ignore-errors
- name: Restart alloy
ansible.builtin.systemd:
name: "alloy.service"
state: "restarted"
enabled: true
daemon_reload: true
become: true
changed_when: false
listen: "notification_restart_alloy"
ignore_errors: true # noqa: ignore-errors

View File

@@ -0,0 +1,34 @@
---
- name: Create common secret directory
ansible.builtin.file:
path: "/etc/secrets"
state: "directory"
owner: "root"
group: "root"
mode: "0711"
become: true
- name: Create user secret directory
ansible.builtin.file:
path: "/etc/secrets/{{ node['uid'] }}"
state: "directory"
owner: "{{ ansible_user }}"
group: "root"
mode: "0500"
become: true
- name: Create user systemd directory
ansible.builtin.file:
path: "{{ node['home_path'] }}/.config/systemd/user"
state: "directory"
owner: "{{ ansible_user }}"
group: "svadmins"
mode: "0700"
- name: Create quadlet directory
ansible.builtin.file:
path: "{{ node['home_path'] }}/.config/containers/systemd"
state: "directory"
owner: "{{ ansible_user }}"
group: "svadmins"
mode: "0700"

View File

@@ -0,0 +1,9 @@
---
- name: Deploy /etc/hosts
ansible.builtin.template:
src: "{{ hostvars['console']['node']['config_path'] }}/node/common/hosts.j2"
dest: "/etc/hosts"
owner: "root"
group: "root"
mode: "0644"
become: true

View File

@@ -0,0 +1,10 @@
---
- name: Deploy root_ca.crt
ansible.builtin.copy:
content: "{{ hostvars['console']['ca']['root']['crt'] }}"
dest: "/usr/local/share/ca-certificates/ilnmors_root_ca.crt"
owner: "root"
group: "root"
mode: "0644"
become: true
notify: "notification_update_ca"

View File

@@ -0,0 +1,20 @@
---
- name: Checking linger
ansible.builtin.stat:
path: "/var/lib/systemd/linger/{{ ansible_user }}"
register: "is_linger_file"
- name: Activate linger
when: not is_linger_file.stat.exists
block:
- name: Enable linger
ansible.builtin.command: |
loginctl enable-linger {{ ansible_user }}
become: true
changed_when: true
- name: Reboot system to ensure DBUS socket activation
ansible.builtin.reboot:
reboot_timeout: 300
post_reboot_delay: 3
become: true

View File

@@ -0,0 +1,23 @@
---
- name: Set network files directory
ansible.builtin.set_fact:
directory_name: "{{ node['name'] }}"
when: node['name'] in ["vmm", "fw"]
- name: Set target vm
ansible.builtin.set_fact:
target_vm: "{{ node['name'] }}"
- name: Deploy networkd files
ansible.builtin.template:
src: "{{ item }}"
dest: "/etc/systemd/network/{{ item | basename }}"
owner: "root"
group: "systemd-network"
mode: "0640"
loop: "{{ query('fileglob', hostvars['console']['node']['config_path'] + '/node/' + (directory_name | default('common')) + '/networkd/*') | sort }}"
become: true
notify:
- "notification_reload_networkctl"
- "notification_restart_crowdsec"
no_log: true

View File

@@ -0,0 +1,36 @@
---
- name: Check nftables installation
ansible.builtin.shell: |
command -v nft
become: true # nftables is located in /usr/sbin, which means root permission is needed.
changed_when: false
failed_when: false
register: "is_nftables_installed"
ignore_errors: true
- name: Install nftables
ansible.builtin.apt:
name: "nftables"
state: "present"
become: true
when: is_nftables_installed.rc != 0
- name: Enable nftables.service
ansible.builtin.systemd:
name: "nftables.service"
state: "started"
enabled: true
become: true
- name: Deploy nftables.conf
ansible.builtin.template:
src: "{{ hostvars['console']['node']['config_path'] }}/node/{{ node['name'] }}/nftables.conf.j2"
dest: "/etc/nftables.conf"
owner: "root"
group: "root"
mode: "0700"
validate: "/usr/sbin/nft -c -f %s"
become: true
notify:
- "notification_update_nftables"
- "notification_restart_crowdsec_bouncer"

View File

@@ -0,0 +1,39 @@
---
- name: Enable systemd-resolved.service
ansible.builtin.systemd:
name: "systemd-resolved.service"
state: "started"
enabled: true
become: true
- name: Check global.conf
ansible.builtin.stat:
path: "/etc/systemd/resolved.conf.d/global.conf"
register: "is_global_conf"
- name: Create resolved directory
ansible.builtin.file:
path: "/etc/systemd/resolved.conf.d"
state: "directory"
owner: "root"
group: "root"
mode: "0755"
become: true
- name: Deploy global conf file
ansible.builtin.template:
src: "{{ hostvars['console']['node']['config_path'] }}/node/common/resolved/global.conf.j2"
dest: "/etc/systemd/resolved.conf.d/global.conf"
owner: "root"
group: "systemd-resolve"
mode: "0640"
become: true
notify: "notification_reload_resolved"
- name: Restart systemd-resolved.service when it is initiated
ansible.builtin.systemd:
name: "systemd-resolved.service"
state: "restarted"
enabled: true
become: true
when: not is_global_conf.stat.exists

View File

@@ -0,0 +1,119 @@
---
- name: Deploy /etc/ssh/local_ssh_ca.pub
ansible.builtin.copy:
content: |
{{ hostvars['console']['ssh']['ca']['pub'] }}
dest: "/etc/ssh/local_ssh_ca.pub"
owner: "root"
group: "root"
mode: "0644"
become: true
no_log: true
- name: Check ssh_host_key-cert.pub
ansible.builtin.stat:
path: "/etc/ssh/ssh_host_ed25519_key-cert.pub"
register: "is_signed_ca_key"
- name: Get current ssh_host_key-cert.pub Key ID
ansible.builtin.shell: |
set -o pipefail
ssh-keygen -L -f /etc/ssh/ssh_host_ed25519_key-cert.pub | \
grep "Key ID" | \
sed -E 's/.*Key ID: "(.*)"/\1/'
when: is_signed_ca_key.stat.exists
changed_when: false
register: "current_key_id"
no_log: true
- name: Get current ssh_host_key-cert.pub san
ansible.builtin.shell: |
set -o pipefail
ssh-keygen -L -f /etc/ssh/ssh_host_ed25519_key-cert.pub | \
sed -n '/Principals:/,/Critical Options:/p' | \
sed '1d;$d' | \
sed 's/^[[:space:]]*//'
when: is_signed_ca_key.stat.exists
changed_when: false
register: "current_san_id"
no_log: true
- name: Set current key informations
ansible.builtin.set_fact:
current_id_key: "{{ current_key_id.stdout }}"
current_san_list: "{{ current_san_id.stdout_lines }}"
when: is_signed_ca_key.stat.exists
no_log: true
- name: Compare key values between current information and defined information
ansible.builtin.set_fact:
is_certificate_info_different: true
when: (current_id_key | default("")) != node['name'] or (current_san_list | default([])) != (node['ssh_san'].split(',') | map('trim') | list)
- name: Get SSH CA and signing
when: not is_signed_ca_key.stat.exists or (is_certificate_info_different | default(false))
block:
- name: Get ssh_host_key.pub from remote server
ansible.builtin.fetch:
src: "/etc/ssh/ssh_host_ed25519_key.pub"
dest: "/run/user/{{ hostvars['console']['node']['uid'] }}/{{ node['name'] }}_ssh_host_ed25519_key.pub"
flat: true
become: true
- name: Get SSH CA
delegate_to: "console"
ansible.builtin.copy:
content: |
{{ hostvars['console']['ssh']['ca']['key'] }}
dest: "/run/user/{{ hostvars['console']['node']['uid'] }}/local_ssh_ca_private_key"
owner: "console"
group: "svadmins"
mode: "0400"
no_log: true
- name: Sign on ssh host keys (pub file)
delegate_to: "console"
ansible.builtin.command: |
ssh-keygen -s /run/user/{{ hostvars['console']['node']['uid'] }}/local_ssh_ca_private_key \
-h \
-I "{{ node['name'] }}" \
-n "{{ node['ssh_san'] }}" \
/run/user/{{ hostvars['console']['node']['uid'] }}/{{ node['name'] }}_ssh_host_ed25519_key.pub
changed_when: not is_signed_ca_key.stat.exists or (is_certificate_info_different | default(false))
no_log: true
- name: Deploy signed pub file
ansible.builtin.copy:
src: "/run/user/{{ hostvars['console']['node']['uid'] }}/{{ node['name'] }}_ssh_host_ed25519_key-cert.pub"
dest: "/etc/ssh/ssh_host_ed25519_key-cert.pub"
owner: "root"
group: "root"
mode: "0644"
become: true
notify: "notification_restart_sshd"
always:
- name: Clean temporary files
delegate_to: "console"
ansible.builtin.file:
path: "/run/user/{{ hostvars['console']['node']['uid'] }}/{{ item }}"
state: "absent"
loop:
- "{{ node['name'] }}_ssh_host_ed25519_key.pub"
- "{{ node['name'] }}_ssh_host_ed25519_key-cert.pub"
- "local_ssh_ca_private_key"
no_log: true
- name: Set sshd_config.d files
ansible.builtin.copy:
src: "{{ hostvars['console']['node']['config_path'] }}/node/common/ssh/{{ item }}"
dest: "/etc/ssh/sshd_config.d/{{ item }}"
owner: "root"
group: "root"
mode: "0644"
loop:
- "prohibit_root.conf"
- "ssh_ca.conf"
- "host_certificate.conf"
become: true
notify: "notification_restart_sshd"

View File

@@ -0,0 +1,20 @@
---
- name: Create timesyncd.conf.d
ansible.builtin.file:
path: "/etc/systemd/timesyncd.conf.d"
state: "directory"
owner: "root"
group: "root"
mode: "0755"
become: true
- name: Deploy timesyncd.conf.d/local-ntp.conf
ansible.builtin.copy:
src: "{{ hostvars['console']['node']['config_path'] }}/node/common/timesyncd/local-ntp.conf"
dest: "/etc/systemd/timesyncd.conf.d/local-ntp.conf"
owner: "root"
group: "systemd-timesync"
mode: "0640"
become: true
notify: "notification_restart_timesyncd"
no_log: true

View File

@@ -0,0 +1,15 @@
---
- name: Create wg0 files
ansible.builtin.template:
src: "{{ hostvars['console']['node']['config_path'] }}/node/fw/wireguard/{{ item }}"
dest: "/etc/systemd/network/{{ item }}"
owner: "root"
group: "systemd-network"
mode: "0640"
loop:
- "30-fw-wg0.netdev"
- "31-fw-wg0.network"
become: true
when: node['name'] == 'fw'
notify: "notification_reload_networkctl"
no_log: true

View File

@@ -0,0 +1,73 @@
---
- name: Gather system facts (hardware)
ansible.builtin.setup:
gather_subset:
- hardware
become: true
- name: Deploy alloy deb file (x86_64)
ansible.builtin.copy:
src: "{{ hostvars['console']['node']['data_path'] }}/bin/alloy-{{ version['packages']['alloy'] }}-amd64.deb"
dest: "/var/cache/apt/archives/alloy-{{ version['packages']['alloy'] }}.deb"
owner: "root"
group: "root"
mode: "0644"
become: true
when: ansible_facts['architecture'] == "x86_64"
- name: Deploy alloy deb file (aarch64)
ansible.builtin.copy:
src: "{{ hostvars['console']['node']['data_path'] }}/bin/alloy-{{ version['packages']['alloy'] }}-arm64.deb"
dest: "/var/cache/apt/archives/alloy-{{ version['packages']['alloy'] }}.deb"
owner: "root"
group: "root"
mode: "0644"
become: true
when: ansible_facts['architecture'] == "aarch64"
- name: Install alloy
ansible.builtin.apt:
deb: "/var/cache/apt/archives/alloy-{{ version['packages']['alloy'] }}.deb"
state: "present"
become: true
- name: Deploy alloy config
ansible.builtin.template:
src: "{{ hostvars['console']['node']['config_path'] }}/services/systemd/common/alloy/config.alloy.j2"
dest: "/etc/alloy/config.alloy"
owner: "root"
group: "root"
mode: "0644"
become: true
notify: "notification_restart_alloy"
no_log: true
- name: Create alloy.service.d
ansible.builtin.file:
path: "/etc/systemd/system/alloy.service.d"
state: "directory"
owner: "root"
group: "root"
mode: "0755"
become: true
- name: Set alloy.service.d/override.conf
ansible.builtin.copy:
dest: "/etc/systemd/system/alloy.service.d/override.conf"
content: |
[Service]
Restart=always
RestartSec=60
owner: "root"
group: "root"
mode: "0644"
become: true
notify: "notification_restart_alloy"
- name: Enable alloy service
ansible.builtin.systemd:
name: "alloy.service"
state: "started"
enabled: true
daemon_reload: true
become: true

View File

@@ -0,0 +1,99 @@
---
# infra, auth, app (vmm, fw has no podman in it)
- name: Create caddy directory
ansible.builtin.file:
path: "{{ node['home_path'] }}/containers/{{ item }}"
owner: "{{ ansible_user }}"
group: "svadmins"
state: "directory"
mode: "0770"
loop:
- "caddy"
- "caddy/etc"
- "caddy/data"
- "caddy/build"
become: true
- name: Create caddy log directory for auth
ansible.builtin.file:
path: /var/log/caddy
owner: "{{ ansible_user }}"
group: "svadmins"
state: "directory"
mode: "0755"
become: true
when: node['name'] == "auth"
- name: Register acme key to podman secret
containers.podman.podman_secret:
name: "CADDY_ACME_KEY"
data: "{{ hostvars['console']['ca']['acme_key'] }}"
state: "present"
force: true
notify: "notification_restart_caddy"
no_log: true
- name: Register crowdsec bouncer key to podman secret
containers.podman.podman_secret:
name: "CADDY_CROWDSEC_KEY"
data: "{{ hostvars['console']['crowdsec']['bouncer']['caddy'] }}"
state: "present"
force: true
when: node['name'] == "auth"
notify: "notification_restart_caddy"
no_log: true
- name: Deploy containerfile for build
ansible.builtin.template:
src: "{{ hostvars['console']['node']['config_path'] }}/services/containers/common/caddy/build/caddy.containerfile.j2"
dest: "{{ node['home_path'] }}/containers/caddy/build/Containerfile"
owner: "{{ ansible_user }}"
group: "svadmins"
mode: "0640"
- name: Deploy root crt for build
ansible.builtin.copy:
content: "{{ hostvars['console']['ca']['root']['crt'] }}"
dest: "{{ node['home_path'] }}/containers/caddy/build/ilnmors_root_ca.crt"
owner: "{{ ansible_user }}"
group: "svadmins"
mode: "0640"
no_log: true
- name: Build caddy container image
containers.podman.podman_image:
name: "ilnmors.internal/{{ node['name'] }}/caddy"
# check tags from container file
tag: "{{ version['containers']['caddy'] }}"
state: "build"
path: "{{ node['home_path'] }}/containers/caddy/build"
- name: Prune caddy dangling images
containers.podman.podman_prune:
image: true
- name: Deploy caddyfile
ansible.builtin.template:
src: "{{ hostvars['console']['node']['config_path'] }}/services/containers/common/caddy/etc/{{ node['name'] }}/Caddyfile.j2"
dest: "{{ node['home_path'] }}/containers/caddy/etc/Caddyfile"
owner: "{{ ansible_user }}"
group: "svadmins"
mode: "0600"
notify: "notification_restart_caddy"
- name: Deploy container file
ansible.builtin.template:
src: "{{ hostvars['console']['node']['config_path'] }}/services/containers/common/caddy/caddy.container.j2"
dest: "{{ node['home_path'] }}/.config/containers/systemd/caddy.container"
owner: "{{ ansible_user }}"
group: "svadmins"
mode: "0644"
notify: "notification_restart_caddy"
- name: Enable caddy
ansible.builtin.systemd:
name: "caddy.service"
state: "started"
enabled: true
daemon_reload: true
scope: "user"

View File

@@ -0,0 +1,304 @@
---
- name: Check crowdsec installed
ansible.builtin.shell: |
command -v crowdsec
changed_when: false
failed_when: false
register: "is_crowdsec_installed"
ignore_errors: true
- name: Check crowdsec bouncer installed
ansible.builtin.shell: |
command -v crowdsec-firewall-bouncer
when: node['name'] == "fw"
changed_when: false
failed_when: false
register: "is_crowdsec_bouncer_installed"
ignore_errors: true
- name: Install crowdsec
ansible.builtin.apt:
name: "crowdsec"
state: "present"
become: true
when: is_crowdsec_installed.rc != 0
- name: Install crowdsec bouncers
ansible.builtin.apt:
name: "crowdsec-firewall-bouncer"
state: "present"
become: true
when:
- node['name'] == "fw"
- is_crowdsec_bouncer_installed.rc != 0
- name: Set acquis.d list for bouncer
ansible.builtin.set_fact:
acquisd_list:
fw:
collection: "crowdsecurity/suricata"
config: "suricata.yaml"
auth:
collection: "crowdsecurity/caddy"
config: "caddy.yaml"
- name: Deploy crowdsec-update service files
ansible.builtin.copy:
src: "{{ hostvars['console']['node']['config_path'] }}/services/systemd/common/crowdsec/{{ item }}"
dest: "/etc/systemd/system/{{ item }}"
owner: "root"
group: "root"
mode: "0644"
validate: "/usr/bin/systemd-analyze verify %s"
loop:
- "crowdsec-update.service"
- "crowdsec-update.timer"
become: true
- name: Deploy crowdsec config.yaml
ansible.builtin.template:
src: "{{ hostvars['console']['node']['config_path'] }}/services/systemd/common/crowdsec/etc/config.yaml.j2"
dest: "/etc/crowdsec/config.yaml"
owner: "root"
group: "root"
mode: "0644"
become: true
notify: "notification_restart_crowdsec"
no_log: true
- name: Deploy crowdsec local_api_credentials.yaml
ansible.builtin.template:
src: "{{ hostvars['console']['node']['config_path'] }}/services/systemd/common/crowdsec/etc/local_api_credentials.yaml.j2"
dest: "/etc/crowdsec/local_api_credentials.yaml"
owner: "root"
group: "root"
mode: "0600"
become: true
notify: "notification_restart_crowdsec"
no_log: true
- name: Set Crowdsec LAPI configuration
when: node['name'] == "fw"
block:
- name: Create crowdsec ssl directory
ansible.builtin.file:
path: "/etc/crowdsec/ssl"
state: "directory"
owner: "root"
group: "root"
mode: "0700"
become: true
- name: Deploy crowdsec lapi ssl certificate
ansible.builtin.copy:
content: |
{{ hostvars['console']['crowdsec']['crt'] | trim }}
{{ hostvars['console']['ca']['intermediate']['crt'] }}
dest: "/etc/crowdsec/ssl/crowdsec.crt"
owner: "root"
group: "root"
mode: "0644"
become: true
notify: "notification_restart_crowdsec"
no_log: true
- name: Deploy crowdsec lapi ssl key
ansible.builtin.copy:
content: |
{{ hostvars['console']['crowdsec']['key'] }}
dest: "/etc/crowdsec/ssl/crowdsec.key"
owner: "root"
group: "root"
mode: "0400"
become: true
notify: "notification_restart_crowdsec"
no_log: true
- name: Get existing machines list
ansible.builtin.command:
cmd: "cscli machines list -o json"
become: true
changed_when: false
register: "existing_crowdsec_machines_list"
- name: Set existing machines' name
ansible.builtin.set_fact:
existing_machines_name: "{{ existing_crowdsec_machines_list.stdout | from_json | map(attribute='machineId') | list }}"
- name: Set goal machines' name
ansible.builtin.set_fact:
machines_name: ["fw", "vmm", "infra", "auth", "app"]
no_log: true
- name: Prune unknown (random) machines
ansible.builtin.command:
cmd: "cscli machines delete {{ item }}"
loop: "{{ existing_machines_name | difference(machines_name) }}"
become: true
changed_when: true
- name: Register crowdsec machines to LAPI server
ansible.builtin.command:
cmd: "cscli machines add {{ item }} --password {{ hostvars['console']['crowdsec']['machine'][item] }} --force -f /dev/null"
loop: "{{ machines_name }}"
become: true
changed_when: false
no_log: true
- name: Get existing bouncers list
ansible.builtin.command:
cmd: "cscli bouncers list -o json"
become: true
register: "existing_crowdsec_bouncers_list"
changed_when: false
- name: Set existing bouncers' name
ansible.builtin.set_fact:
existing_bouncers_name: "{{ existing_crowdsec_bouncers_list.stdout | from_json | map(attribute='name') | list }}"
- name: Flush bouncers
ansible.builtin.command:
cmd: "cscli bouncers delete {{ item }}"
loop: "{{ existing_bouncers_name }}"
become: true
changed_when: true
- name: Set bouncers' name
ansible.builtin.set_fact:
bouncers_name: ["fw", "caddy"]
- name: Register Firewall Bouncer to LAPI
ansible.builtin.command:
cmd: "cscli bouncers add {{ item }}-bouncer -k {{ hostvars['console']['crowdsec']['bouncer'][item] }}"
loop: "{{ bouncers_name }}"
become: true
changed_when: true
notify: "notification_restart_crowdsec_bouncer"
no_log: true
- name: Set crowdsec bouncer
when: node['name'] in acquisd_list
block:
- name: Install crowdsec collection
ansible.builtin.command:
cmd: "cscli collections install {{ acquisd_list[node['name']]['collection'] }}"
become: true
changed_when: "'overwrite' not in is_collection_installed.stderr"
failed_when:
- is_collection_installed.rc != 0
- "'already installed' not in is_collection_installed.stderr"
register: "is_collection_installed"
- name: Create crowdsec acquis.d directory
ansible.builtin.file:
path: "/etc/crowdsec/acquis.d"
state: "directory"
owner: "root"
group: "root"
mode: "0755"
become: true
- name: Create whitelists.yaml
ansible.builtin.template:
src: "{{ hostvars['console']['node']['config_path'] }}/services/systemd/common/crowdsec/bouncers/whitelists.yaml.j2"
dest: "/etc/crowdsec/parsers/s02-enrich/whitelists.yaml"
owner: "root"
group: "root"
mode: "0644"
become: true
notify:
- "notification_restart_crowdsec"
- "notification_restart_crowdsec_bouncer"
no_log: true
- name: Deploy acquis.d file
ansible.builtin.copy:
src: "{{ hostvars['console']['node']['config_path'] }}/services/systemd/common/crowdsec/acquis.d/{{ acquisd_list[node['name']]['config'] }}"
dest: "/etc/crowdsec/acquis.d/{{ acquisd_list[node['name']]['config'] }}"
owner: "root"
group: "root"
mode: "0644"
become: true
notify: "notification_restart_crowdsec"
- name: Set Crowdsec-Firewall-Bouncer
when: node['name'] == "fw"
block:
- name: Deploy crowdsec-firewall-bouncer.yaml
ansible.builtin.template:
src: "{{ hostvars['console']['node']['config_path'] }}/services/systemd/common/crowdsec/bouncers/crowdsec-firewall-bouncer.yaml.j2"
dest: "/etc/crowdsec/bouncers/crowdsec-firewall-bouncer.yaml"
owner: "root"
group: "root"
mode: "0600"
become: true
notify: "notification_restart_crowdsec_bouncer"
- name: Delete crowdsec-firewall-bouncer.yaml subfiles (.id, .local)
ansible.builtin.file:
path: "/etc/crowdsec/bouncers/crowdsec-firewall-bouncer.yaml.{{ item }}"
state: "absent"
loop:
- "local"
- "id"
become: true
notify: "notification_restart_crowdsec_bouncer"
- name: Create crowdsec-firewall-bouncer.service.d
ansible.builtin.file:
path: "/etc/systemd/system/crowdsec-firewall-bouncer.service.d"
state: "directory"
owner: "root"
group: "root"
mode: "0755"
become: true
- name: Set crowdsec-firewall-bouncer.service.d/override.conf
ansible.builtin.copy:
dest: "/etc/systemd/system/crowdsec-firewall-bouncer.service.d/override.conf"
content: |
[Service]
Type=simple
TimeoutStartSec=600
Restart=always
RestartSec=60
owner: "root"
group: "root"
mode: "0644"
become: true
notify: "notification_restart_crowdsec_bouncer"
- name: Create crowdsec.service.d
ansible.builtin.file:
path: "/etc/systemd/system/crowdsec.service.d"
state: "directory"
owner: "root"
group: "root"
mode: "0755"
become: true
- name: Set crowdsec.service.d/override.conf
ansible.builtin.copy:
dest: "/etc/systemd/system/crowdsec.service.d/override.conf"
content: |
[Service]
Restart=always
RestartSec=60
owner: "root"
group: "root"
mode: "0644"
become: true
notify: "notification_restart_crowdsec"
- name: Enable auto crowdsec rules update
ansible.builtin.systemd:
name: "crowdsec-update.timer"
state: "started"
enabled: true
daemon_reload: true
become: true
# cscli bouncers list
# cscli machines list
# cscli metrics

View File

@@ -0,0 +1,137 @@
---
- name: Gather system facts (hardware)
ansible.builtin.setup:
gather_subset:
- hardware
become: true
- name: Check kopia installation
ansible.builtin.shell: |
command -v kopia
changed_when: false
failed_when: false
register: "is_kopia_installed"
ignore_errors: true
- name: Set console kopia
when: node['name'] == 'console'
block:
- name: Apply cli tools (x86_64)
ansible.builtin.apt:
deb: "{{ node['data_path'] }}/bin/kopia-{{ version['packages']['kopia'] }}-amd64.deb"
state: "present"
become: true
when:
- ansible_facts['architecture'] == "x86_64"
- is_kopia_installed.rc != 0
- name: Apply cli tools (aarch64)
ansible.builtin.apt:
deb: "{{ node['data_path'] }}/bin/kopia-{{ version['packages']['kopia'] }}-arm64.deb"
state: "present"
become: true
when:
- ansible_facts['architecture'] == "aarch64"
- is_kopia_installed.rc != 0
- name: Connect kopia server
environment:
KOPIA_PASSWORD: "{{ hostvars['console']['kopia']['user']['console'] }}"
ansible.builtin.shell: |
/usr/bin/kopia repository connect server \
--url=https://{{ infra_uri['kopia']['domain'] }}:{{ infra_uri['kopia']['ports']['https'] }} \
--override-username=console \
--override-hostname=console.ilnmors.internal
changed_when: false
failed_when: is_kopia_connected.rc != 0
register: "is_kopia_connected"
no_log: true
- name: Set infra/app kopia
when: node['name'] in ['infra', 'app']
block:
- name: Set kopia uid
ansible.builtin.set_fact:
kopia_uid: 951
- name: Deploy kopia deb file (x86_64)
ansible.builtin.copy:
src: "{{ hostvars['console']['node']['data_path'] }}/bin/kopia-{{ version['packages']['kopia'] }}-amd64.deb"
dest: "/var/cache/apt/archives/kopia-{{ version['packages']['kopia'] }}.deb"
owner: "root"
group: "root"
mode: "0644"
become: true
when: ansible_facts['architecture'] == "x86_64"
- name: Deploy kopia deb file (aarch64)
ansible.builtin.copy:
src: "{{ hostvars['console']['node']['data_path'] }}/bin/kopia-{{ version['packages']['kopia'] }}-arm64.deb"
dest: "/var/cache/apt/archives/kopia-{{ version['packages']['kopia'] }}.deb"
owner: "root"
group: "root"
mode: "0644"
become: true
when: ansible_facts['architecture'] == "aarch64"
- name: Create kopia group
ansible.builtin.group:
name: "kopia"
gid: "{{ kopia_uid }}"
state: "present"
become: true
- name: Create kopia user
ansible.builtin.user:
name: "kopia"
uid: "{{ kopia_uid }}"
group: "kopia"
shell: "/usr/sbin/nologin"
password_lock: true
comment: "Kopia backup User"
state: "present"
become: true
- name: Create kopia directory
ansible.builtin.file:
path: "{{ item.name }}"
state: "directory"
owner: "kopia"
group: "root"
mode: "{{ item.mode }}"
loop:
- name: "/etc/kopia"
mode: "0700"
- name: "/etc/secrets/951"
mode: "0500"
- name: "/var/cache/kopia"
mode: "0700"
become: true
no_log: true
- name: Install kopia
ansible.builtin.apt:
deb: "/var/cache/apt/archives/kopia-{{ version['packages']['kopia'] }}.deb"
state: "present"
become: true
when: is_kopia_installed.rc != 0
- name: Deploy kopia env
ansible.builtin.template:
src: "{{ hostvars['console']['node']['config_path'] }}/services/systemd/common/kopia/kopia.env.j2"
dest: "/etc/secrets/{{ kopia_uid }}/kopia.env"
owner: "{{ kopia_uid }}"
group: "root"
mode: "0400"
become: true
no_log: true
- name: Deploy kopia service files
ansible.builtin.template:
src: "{{ hostvars['console']['node']['config_path'] }}/services/systemd/common/kopia/{{ item }}.j2"
dest: "/etc/systemd/system/{{ item }}"
owner: "root"
group: "root"
mode: "0644"
validate: "/usr/bin/systemd-analyze verify %s"
loop:
- "kopia-backup.service"
- "kopia-backup.timer"
become: true
- name: Enable auto kopia rules update
ansible.builtin.systemd:
name: "kopia-backup.timer"
state: "started"
enabled: true
daemon_reload: true
become: true

View File

@@ -0,0 +1,46 @@
---
- name: Check podman installation
ansible.builtin.shell: |
command -v podman
changed_when: false
failed_when: false
register: "is_podman_installed"
ignore_errors: true
- name: Create container directory
ansible.builtin.file:
path: "{{ node['home_path'] }}/containers"
owner: "{{ ansible_user }}"
group: "svadmins"
state: "directory"
mode: "0700"
- name: Create contaienr data directory for app
ansible.builtin.file:
path: "{{ node['home_path'] }}/data/containers"
owner: "{{ ansible_user }}"
group: "svadmins"
state: "directory"
mode: "0770"
when: node['name'] == "app"
- name: Install podman and reset ssh connection for initiating
when: is_podman_installed.rc != 0
become: true
block:
- name: Set subid scope (Overwrite)
ansible.builtin.copy:
content: |
{{ ansible_user }}:100000:65536
dest: "/etc/sub{{ item }}"
owner: "root"
group: "root"
mode: "0644"
loop:
- "uid"
- "gid"
- name: Install podman
ansible.builtin.apt:
name:
- "podman"
state: "present"

View File

@@ -0,0 +1,8 @@
---
- name: Register font
ansible.builtin.shell: |
fc-cache -f -v
become: true
changed_when: false
listen: "notification_update_font"
ignore_errors: true # noqa: ignore-errors

View File

@@ -0,0 +1,29 @@
---
- name: Check sops installation (Prerequisite)
ansible.builtin.shell: |
command -v sops
changed_when: false
failed_when: false
register: "is_sops_installed"
ignore_errors: true
- name: Failure when sops is missing
ansible.builtin.fail:
msg: "sops is not installed. Please install sops manually as described in README.md before running this playbook"
when: is_sops_installed.rc != 0
- name: Decrypt secret values in console
environment:
SOPS_AGE_KEY: "{{ hostvars['console']['age_key'] }}"
ansible.builtin.command: |
sops -d --output-type yaml {{ hostvars['console']['node']['config_path'] }}/secrets/secrets.yaml
changed_when: false
register: "decrypted_secrets"
run_once: true
no_log: true
- name: Load decrypted secret vaules in console
ansible.builtin.set_fact:
"{{ item.key }}": "{{ item.value }}"
loop: "{{ decrypted_secrets.stdout | from_yaml | dict2items }}"
no_log: true

View File

@@ -0,0 +1,109 @@
---
- name: Create ssh id_console
ansible.builtin.copy:
content: "{{ hostvars['console']['ssh']['console']['key'] }}"
dest: "/etc/secrets/{{ node['uid'] }}/id_console"
owner: "{{ ansible_user }}"
group: "root"
mode: "0400"
become: true
no_log: true
- name: Create ssh id_console.pub
ansible.builtin.copy:
content: "{{ hostvars['console']['ssh']['console']['pub'] }}"
dest: "/etc/secrets/{{ node['uid'] }}/id_console.pub"
owner: "{{ ansible_user }}"
group: "root"
mode: "0400"
become: true
no_log: true
- name: Create ssh_known_hosts
become: true
ansible.builtin.copy:
content: |
@cert-authority *.ilnmors.internal {{ hostvars['console']['ssh']['ca']['pub'] }}
dest: "/etc/ssh/ssh_known_hosts"
owner: "root"
group: "root"
mode: "0644"
no_log: true
- name: Check id_console-cert.pub
ansible.builtin.stat:
path: "/etc/secrets/{{ node['uid'] }}/id_console-cert.pub"
register: "is_signed_console_key"
- name: Get current id_console-cert.pub allow users
ansible.builtin.shell: |
set -o pipefail
ssh-keygen -L -f /etc/secrets/{{ node['uid'] }}/id_console-cert.pub | \
sed -n '/Principals:/,/Critical Options:/p' | \
sed '1d;$d' | \
sed 's/^[[:space:]]*//'
when: is_signed_console_key.stat.exists
changed_when: false
register: "current_allow_users"
no_log: true
- name: Set key informations
ansible.builtin.set_fact:
current_user_list: "{{ current_allow_users.stdout_lines }}"
when: is_signed_console_key.stat.exists
no_log: true
- name: Compare key values between current information and defined information
ansible.builtin.set_fact:
is_certificate_info_different: true
when: (current_user_list | default([])) != (node['ssh_users'].split(',') | map('trim') | list)
- name: Get SSH CA and signing
when: not is_signed_console_key.stat.exists or (is_certificate_info_different | default(false))
block:
- name: Get SSH CA
ansible.builtin.copy:
content: |
{{ hostvars['console']['ssh']['ca']['key'] }}
dest: "/run/user/{{ node['uid'] }}/local_ssh_ca_private_key"
owner: "console"
group: "svadmins"
mode: "0400"
no_log: true
- name: Sign on ssh console key (pub file)
ansible.builtin.command: |
ssh-keygen -s /run/user/{{ node['uid'] }}/local_ssh_ca_private_key \
-I "{{ node['name'] }}" \
-n "{{ node['ssh_users'] }}" \
/etc/secrets/{{ node['uid'] }}/id_console.pub
become: true
changed_when: not is_signed_console_key.stat.exists or (is_certificate_info_different | default(false))
no_log: true
always:
- name: Clean temporary files
ansible.builtin.file:
path: "/run/user/{{ node['uid'] }}/local_ssh_ca_private_key"
state: "absent"
no_log: true
- name: Create .ssh directory
ansible.builtin.file:
path: "{{ node['home_path'] }}/.ssh"
state: "directory"
owner: "{{ ansible_user }}"
group: "svadmins"
mode: "0700"
- name: Create ssh config file
ansible.builtin.copy:
content: |
{% for host in groups['all'] if host != 'console' %}
Host {{ host }}
HostName {{ hostvars[host]['ansible_host'] }}
User {{ hostvars[host]['ansible_user'] }}
IdentityFile /etc/secrets/{{ node['uid'] }}/id_console
{% endfor %}
dest: "{{ node['home_path'] }}/.ssh/config"
owner: "{{ ansible_user }}"
group: "svadmins"
mode: "0600"

View File

@@ -0,0 +1,31 @@
---
- name: Check chromium installation
ansible.builtin.shell: |
command -v chromium
changed_when: false
failed_when: false
register: "is_chromium_installed"
ignore_errors: true
- name: Check korean font installation
ansible.builtin.shell: |
fc-list | grep -i "nanum"
changed_when: false
failed_when: false
register: "is_font_installed"
ignore_errors: true
- name: Install chromium
ansible.builtin.apt:
name: "chromium"
state: "present"
become: true
when: is_chromium_installed.rc != 0
- name: Install font
ansible.builtin.apt:
name: "fonts-nanum"
state: "present"
become: true
when: is_font_installed.rc != 0
notify: "notification_update_font"

View File

@@ -0,0 +1,108 @@
---
- name: Gather system facts (hardware)
ansible.builtin.setup:
gather_subset:
- hardware
become: true
- name: Check ansible installation
ansible.builtin.shell: |
command -v ansible
changed_when: false
failed_when: false
register: "is_ansible_installed"
ignore_errors: true
- name: Upgrade ansible module
community.general.ansible_galaxy_install:
type: "collection"
name: "{{ item }}"
state: "latest"
loop:
- "ansible.posix"
- "community.libvirt"
- "community.general"
- "containers.podman"
when: is_ansible_installed.rc == 0
- name: Download sops
ansible.builtin.get_url:
url: "https://github.com/getsops/sops/releases/download/v{{ version['packages']['sops'] }}/\
sops_{{ version['packages']['sops'] }}_{{ item }}.deb"
dest: "{{ node['data_path'] }}/bin/sops-{{ version['packages']['sops'] }}-{{ item }}.deb"
owner: "{{ ansible_user }}"
group: "svadmins"
mode: "0600"
loop:
- "amd64"
- "arm64"
- name: Download step-cli
ansible.builtin.get_url:
url: "https://dl.smallstep.com/gh-release/cli/gh-release-header/v{{ version['packages']['step'] }}/\
step-cli_{{ version['packages']['step'] }}-1_{{ item }}.deb"
dest: "{{ node['data_path'] }}/bin/step-{{ version['packages']['step'] }}-{{ item }}.deb"
owner: "{{ ansible_user }}"
group: "svadmins"
mode: "0600"
loop:
- "amd64"
- "arm64"
- name: Download kopia
ansible.builtin.get_url:
url: "https://github.com/kopia/kopia/releases/download/v{{ version['packages']['kopia'] }}/\
kopia_{{ version['packages']['kopia'] }}_linux_{{ item }}.deb"
dest: "{{ node['data_path'] }}/bin/kopia-{{ version['packages']['kopia'] }}-{{ item }}.deb"
owner: "{{ ansible_user }}"
group: "svadmins"
mode: "0600"
loop:
- "amd64"
- "arm64"
- name: Download blocky
ansible.builtin.get_url:
url: "https://github.com/0xERR0R/blocky/releases/download/v{{ version['packages']['blocky'] }}/\
blocky_v{{ version['packages']['blocky'] }}_Linux_{{ item }}.tar.gz"
dest: "{{ node['data_path'] }}/bin/blocky-{{ version['packages']['blocky'] }}-{{ item }}.tar.gz"
owner: "{{ ansible_user }}"
group: "svadmins"
mode: "0600" # noqa: line-length
loop:
- "x86_64"
- "arm64"
- name: Download alloy
ansible.builtin.get_url:
url: "https://github.com/grafana/alloy/releases/download/v{{ version['packages']['alloy'] }}/\
alloy-{{ version['packages']['alloy'] }}-1.{{ item }}.deb"
dest: "{{ node['data_path'] }}/bin/alloy-{{ version['packages']['alloy'] }}-{{ item }}.deb"
owner: "{{ ansible_user }}"
group: "svadmins"
mode: "0600"
loop:
- "amd64"
- "arm64"
- name: Apply cli tools (x86_64)
ansible.builtin.apt:
deb: "{{ node['data_path'] }}/bin/{{ item }}"
state: "present"
loop:
- "sops-{{ version['packages']['sops'] }}-amd64.deb"
- "step-{{ version['packages']['step'] }}-amd64.deb"
- "kopia-{{ version['packages']['kopia'] }}-amd64.deb"
become: true
when: ansible_facts['architecture'] == "x86_64"
- name: Apply cli tools (aarch64)
ansible.builtin.apt:
deb: "{{ node['data_path'] }}/bin/{{ item }}"
state: "present"
loop:
- "sops-{{ version['packages']['sops'] }}-arm64.deb"
- "step-{{ version['packages']['step'] }}-arm64.deb"
- "kopia-{{ version['packages']['kopia'] }}-arm64.deb"
become: true
when: ansible_facts['architecture'] == "aarch64"

View File

@@ -0,0 +1,63 @@
---
- name: Restart chrony
ansible.builtin.systemd:
name: "chrony.service"
state: "restarted"
enabled: true
daemon_reload: true
become: true
changed_when: false
listen: "notification_restart_chrony"
ignore_errors: true # noqa: ignore-errors
- name: Update suricata rules
ansible.builtin.command:
suricata-update --disable-conf /etc/suricata/disable.conf --enable-conf /etc/suricata/enable.conf --local /etc/suricata/rules/local.rules
become: true
changed_when: false
listen: "notification_update_suricata_rules"
ignore_errors: true # noqa: ignore-errors
- name: Restart suricata
ansible.builtin.systemd:
name: "suricata.service"
state: "restarted"
enabled: true
daemon_reload: true
become: true
changed_when: false
listen: "notification_restart_suricata"
ignore_errors: true # noqa: ignore-errors
- name: Restart bind9
ansible.builtin.systemd:
name: "named.service"
state: "restarted"
enabled: true
daemon_reload: true
become: true
changed_when: false
listen: "notification_restart_bind"
ignore_errors: true # noqa: ignore-errors
- name: Restart blocky
ansible.builtin.systemd:
name: "blocky.service"
state: "restarted"
enabled: "true"
daemon_reload: true
become: true
changed_when: false
listen: "notification_restart_blocky"
ignore_errors: true # noqa: ignore-errors
- name: Restart kea-dhcp4
ansible.builtin.systemd:
name: "kea-dhcp4-server.service"
state: "restarted"
enabled: true
daemon_reload: true
become: true
changed_when: false
listen: "notification_restart_kea4"
ignore_errors: true # noqa: ignore-errors

View File

@@ -0,0 +1,103 @@
---
- name: Check bind9 installation
ansible.builtin.shell: |
command -v named
become: true # named is located in /usr/sbin, which means root permission is needed.
changed_when: false
failed_when: false
register: "is_bind_installed"
ignore_errors: true
- name: Set bind9 zone files
ansible.builtin.set_fact:
bind_zone_files:
- "db.ilnmors.internal"
- "db.ilnmors.com"
- "db.1.168.192.in-addr.arpa"
- "db.10.168.192.in-addr.arpa"
- "db.1.00df.ip6.arpa"
- "db.10.00df.ip6.arpa"
- name: Install bind9
ansible.builtin.apt:
name: "bind9"
state: "present"
become: true
when: is_bind_installed.rc != 0
- name: Deploy acem.key
ansible.builtin.copy:
content: "{{ hostvars['console']['bind']['acme_key'] }}"
dest: "/etc/bind/acme.key"
owner: "bind"
group: "bind"
mode: "0640"
become: true
notify: "notification_restart_bind"
no_log: true
- name: Deploy db files
ansible.builtin.copy:
src: "{{ hostvars['console']['node']['config_path'] }}/services/systemd/fw/bind/lib/{{ item }}"
dest: "/var/lib/bind/{{ item }}"
owner: "bind"
group: "bind"
mode: "0640"
loop: "{{ bind_zone_files }}"
become: true
notify: "notification_restart_bind"
no_log: true
- name: Clean BIND journal files
ansible.builtin.file:
path: "/var/lib/bind/{{ item }}.jnl"
state: absent
loop: "{{ bind_zone_files }}"
become: true
notify: "notification_restart_bind"
no_log: true
- name: Deploy named.conf
ansible.builtin.template:
src: "{{ hostvars['console']['node']['config_path'] }}/services/systemd/fw/bind/etc/named.conf.j2"
dest: "/etc/bind/named.conf"
owner: "root"
group: "bind"
mode: "0640"
validate: "/usr/bin/named-checkconf -z %s"
become: true
notify: "notification_restart_bind"
no_log: true
- name: Create named.service.d
ansible.builtin.file:
path: "/etc/systemd/system/named.service.d"
state: "directory"
owner: "root"
group: "root"
mode: "0755"
become: true
- name: Set named.service.d/override.conf
ansible.builtin.copy:
dest: "/etc/systemd/system/named.service.d/override.conf"
content: |
[Service]
Restart=always
RestartSec=60
owner: "root"
group: "root"
mode: "0644"
become: true
notify: "notification_restart_bind"
- name: Enable bind9 service
ansible.builtin.systemd:
name: "named.service"
state: "started"
enabled: true
become: true
# Verify working
# dig A fw.ilnmors.internal @fd00:10::3
# dig AAAA fw.ilnmors.internal @fd00:10::3

View File

@@ -0,0 +1,117 @@
---
- name: Gather system facts (hardware)
ansible.builtin.setup:
gather_subset:
- hardware
become: true
- name: Create blocky group
ansible.builtin.group:
name: "blocky"
gid: 953
state: "present"
become: true
- name: Create blocky user
ansible.builtin.user:
name: "blocky"
uid: 953
group: "blocky"
shell: "/usr/sbin/nologin"
password_lock: true
comment: "Blocky DNS User"
state: "present"
become: true
- name: Create blocky etc directory
ansible.builtin.file:
path: "{{ item }}"
owner: "blocky"
group: "blocky"
mode: "0750"
state: "directory"
loop:
- "/etc/blocky"
- "/etc/blocky/ssl"
become: true
- name: Deploy blocky binary file (x86_64)
ansible.builtin.unarchive:
src: "{{ hostvars['console']['node']['data_path'] }}/bin/blocky-{{ version['packages']['blocky'] }}-x86_64.tar.gz"
dest: "/usr/local/bin/"
owner: "root"
group: "root"
mode: "0755"
extra_opts:
- "--strip-components=0"
- "--wildcards"
- "blocky"
become: true
when: ansible_facts['architecture'] == "x86_64"
notify: "notification_restart_blocky"
- name: Deploy blocky binary file (aarch64)
ansible.builtin.unarchive:
src: "{{ hostvars['console']['node']['data_path'] }}/bin/blocky-{{ version['packages']['blocky'] }}-arm64.tar.gz"
dest: "/usr/local/bin/"
owner: "root"
group: "root"
mode: "0755"
extra_opts:
- "--strip-components=0"
- "--wildcards"
- "blocky"
become: true
when: ansible_facts['architecture'] == "aarch64"
notify: "notification_restart_blocky"
- name: Deploy blocky config
ansible.builtin.template:
src: "{{ hostvars['console']['node']['config_path'] }}/services/systemd/fw/blocky/etc/config.yaml.j2"
dest: "/etc/blocky/config.yaml"
owner: "blocky"
group: "blocky"
mode: "0640"
become: true
notify: "notification_restart_blocky"
no_log: true
- name: Deploy blocky certificate and key
ansible.builtin.copy:
content: |
{{ item.value }}
dest: "/etc/blocky/ssl/{{ item.name }}"
owner: "blocky"
group: "blocky"
mode: "{{ item.mode }}"
loop:
- name: "blocky.crt"
value: |
{{ hostvars['console']['blocky']['crt'] | trim }}
{{ hostvars['console']['ca']['intermediate']['crt'] }}
mode: "0440"
- name: "blocky.key"
value: "{{ hostvars['console']['blocky']['key'] }}"
mode: "0400"
become: true
notify: "notification_restart_blocky"
no_log: true
- name: Deploy blocky service
ansible.builtin.copy:
src: "{{ hostvars['console']['node']['config_path'] }}/services/systemd/fw/blocky/blocky.service"
dest: "/etc/systemd/system/blocky.service"
owner: "root"
group: "root"
mode: "0644"
validate: "/usr/bin/systemd-analyze verify %s"
become: true
notify: "notification_restart_blocky"
- name: Enable blocky service
ansible.builtin.systemd:
name: "blocky.service"
state: "started"
enabled: true
daemon_reload: true
become: true

View File

@@ -0,0 +1,55 @@
---
- name: Check chrnoy installation
ansible.builtin.shell: |
command -v chronyc
changed_when: false
failed_when: false
register: "is_chrony_installed"
ignore_errors: true
- name: Install chrony
ansible.builtin.apt:
name: "chrony"
state: "present"
become: true
when: is_chrony_installed.rc != 0
- name: Deploy local acl file
ansible.builtin.template:
src: "{{ hostvars['console']['node']['config_path'] }}/services/systemd/fw/chrony/local-acl.conf.j2"
dest: "/etc/chrony/conf.d/local-acl.conf"
owner: "root"
group: "root"
mode: "0644"
become: true
notify: "notification_restart_chrony"
- name: Create chrony.service.d
ansible.builtin.file:
path: "/etc/systemd/system/chrony.service.d"
state: "directory"
owner: "root"
group: "root"
mode: "0755"
become: true
- name: Set chrony.service.d/override.conf
ansible.builtin.copy:
dest: "/etc/systemd/system/chrony.service.d/override.conf"
content: |
[Service]
Restart=always
RestartSec=60
owner: "root"
group: "root"
mode: "0644"
become: true
notify: "notification_restart_chrony"
- name: Enable chrony service
ansible.builtin.systemd:
name: "chrony.service"
state: "started"
enabled: true
daemon_reload: true
become: true

View File

@@ -0,0 +1,41 @@
---
- name: Create ddns secret env file
ansible.builtin.copy:
content: |
ZONE_ID={{ hostvars['console']['ddns']['zone_id'] }}
API_KEY={{ hostvars['console']['ddns']['api_key'] }}
dest: "/etc/secrets/{{ node['uid'] }}/ddns.env"
owner: "{{ ansible_user }}"
group: "svadmins"
mode: "0400"
become: true
no_log: true
- name: Deploy ddns script
ansible.builtin.copy:
src: "{{ hostvars['console']['node']['config_path'] }}/services/systemd/fw/ddns/ddns.sh"
dest: "/usr/local/bin"
owner: "{{ ansible_user }}"
group: "svadmins"
mode: "0711"
become: true
- name: Deploy ddns service files
ansible.builtin.copy:
src: "{{ hostvars['console']['node']['config_path'] }}/services/systemd/fw/ddns/{{ item }}"
dest: "{{ node['home_path'] }}/.config/systemd/user/{{ item }}"
owner: "{{ ansible_user }}"
group: "svadmins"
mode: "0600"
validate: "/usr/bin/systemd-analyze verify %s"
loop:
- "ddns.service"
- "ddns.timer"
- name: Register ddns timer
ansible.builtin.systemd:
name: "ddns.timer"
state: "started"
enabled: true
daemon_reload: true
scope: "user"

View File

@@ -0,0 +1,57 @@
---
- name: Check Kea dhcp4 installation
ansible.builtin.shell: |
command -v kea-dhcp4
become: true # kea-dhcp4 is located in /usr/sbin, which means root permission is needed.
changed_when: false
failed_when: false
register: "is_kea4_installed"
ignore_errors: true
- name: Install kea dhcp 4
ansible.builtin.apt:
name:
- "kea-dhcp4-server"
state: "present"
become: true
when: is_kea4_installed.rc != 0
- name: Deploy kea dhcp4 conf
ansible.builtin.template:
src: "{{ hostvars['console']['node']['config_path'] }}/services/systemd/fw/kea/kea-dhcp4.conf.j2"
dest: "/etc/kea/kea-dhcp4.conf"
owner: "_kea"
group: "_kea"
mode: "0600"
become: true
notify: "notification_restart_kea4"
- name: Create kea-dhcp-server.service.d
ansible.builtin.file:
path: "/etc/systemd/system/kea-dhcp4-server.service.d"
state: "directory"
owner: "root"
group: "root"
mode: "0755"
become: true
- name: Set kea-dhcp-server.service.d/override.conf
ansible.builtin.copy:
dest: "/etc/systemd/system/kea-dhcp4-server.service.d/override.conf"
content: |
[Service]
Restart=always
RestartSec=60
owner: "root"
group: "root"
mode: "0644"
become: true
notify:
- "notification_restart_kea4"
- name: Enable kea service
ansible.builtin.systemd:
name: "kea-dhcp4-server.service"
state: "started"
enabled: true
become: true

View File

@@ -0,0 +1,141 @@
---
- name: Check suricata installation
ansible.builtin.shell: |
command -v suricata
changed_when: false
failed_when: false
register: "is_suricata_installed"
ignore_errors: true
- name: Install suricata
ansible.builtin.apt:
name:
- "suricata"
- "suricata-update"
state: "present"
become: true
when: is_suricata_installed.rc != 0
- name: Deploy suricata-update service files
ansible.builtin.copy:
src: "{{ hostvars['console']['node']['config_path'] }}/services/systemd/fw/suricata/{{ item }}"
dest: "/etc/systemd/system/{{ item }}"
owner: "root"
group: "root"
mode: "0644"
validate: "/usr/bin/systemd-analyze verify %s"
loop:
- "suricata-update.service"
- "suricata-update.timer"
become: true
- name: Deploy suricata custom configurations
ansible.builtin.copy:
src: "{{ hostvars['console']['node']['config_path'] }}/services/systemd/fw/suricata/etc/{{ item }}"
dest: "/etc/suricata/{{ item }}"
owner: "root"
group: "root"
mode: "0644"
loop:
- "disable.conf"
- "enable.conf"
become: true
notify:
- "notification_update_suricata_rules"
- "notification_restart_suricata"
- name: Deploy suricata custom rules
ansible.builtin.copy:
src: "{{ hostvars['console']['node']['config_path'] }}/services/systemd/fw/suricata/etc/local.rules"
dest: "/etc/suricata/rules/local.rules"
owner: "root"
group: "root"
mode: "0644"
become: true
notify:
- "notification_update_suricata_rules"
- "notification_restart_suricata"
- name: Check suricata rules
ansible.builtin.stat:
path: "/var/lib/suricata/rules/suricata.rules"
register: "is_suricata_rules_file"
- name: Update suricata rules
ansible.builtin.command:
suricata-update
become: true
when: not is_suricata_rules_file.stat.exists
changed_when: true
- name: Enable auto suricata rules update
ansible.builtin.systemd:
name: "suricata-update.timer"
state: "started"
enabled: true
daemon_reload: true
become: true
- name: Deploy suricata.yaml
ansible.builtin.template:
src: "{{ hostvars['console']['node']['config_path'] }}/services/systemd/fw/suricata/etc/suricata.yaml.j2"
dest: "/etc/suricata/suricata.yaml"
owner: "root"
group: "root"
mode: "0644"
validate: "/usr/bin/suricata -T -c %s"
become: true
notify: "notification_restart_suricata"
- name: Create suricata.service.d
ansible.builtin.file:
path: "/etc/systemd/system/suricata.service.d"
state: "directory"
owner: "root"
group: "root"
mode: "0755"
become: true
- name: Set suricata.service.d/override.conf
ansible.builtin.copy:
dest: "/etc/systemd/system/suricata.service.d/override.conf"
content: |
[Service]
Restart=always
RestartSec=60
owner: "root"
group: "root"
mode: "0644"
become: true
notify:
- "notification_restart_suricata"
- name: Enable suricata service
ansible.builtin.systemd:
name: "suricata.service"
state: "started"
enabled: true
daemon_reload: true
become: true
- name: Set suricata logs logrotate
ansible.builtin.copy:
content: |
/var/log/suricata/*.log /var/log/suricata/*.json {
weekly
missingok
rotate 4
compress
delaycompress
notifempty
maxsize 500M
sharedscripts
postrotate
/usr/bin/systemctl reload suricata > /dev/null 2>/dev/null || true
endscript
}
dest: "/etc/logrotate.d/suricata"
owner: "root"
group: "root"
mode: "0644"
become: true

View File

@@ -0,0 +1,85 @@
- name: Restart ca
ansible.builtin.systemd:
name: "ca.service"
state: "restarted"
enabled: "true"
daemon_reload: true
scope: "user"
changed_when: false
listen: "notification_restart_ca"
ignore_errors: true # noqa: ignore-errors
- name: Reload postgresql
ansible.builtin.command:
/usr/bin/podman exec -u postgres postgresql sh -c "pg_ctl reload"
when: not (is_postgresql_init_run | default(false))
changed_when: false
listen: "notification_reload_postgresql"
ignore_errors: true # noqa: ignore-errors
- name: Restart postgresql
ansible.builtin.systemd:
name: "postgresql.service"
state: "restarted"
enabled: true
daemon_reload: true
scope: "user"
when: not (is_postgresql_init_run | default(false))
changed_when: false
listen: "notification_restart_postgresql"
ignore_errors: true # noqa: ignore-errors
- name: Restart ldap
ansible.builtin.systemd:
name: "ldap.service"
state: "restarted"
enabled: true
daemon_reload: true
scope: "user"
changed_when: false
listen: "notification_restart_ldap"
ignore_errors: true # noqa: ignore-errors
- name: Restart prometheus
ansible.builtin.systemd:
name: "prometheus.service"
state: "restarted"
enabled: true
daemon_reload: true
scope: "user"
changed_when: false
listen: "notification_restart_prometheus"
ignore_errors: true # noqa: ignore-errors
- name: Restart loki
ansible.builtin.systemd:
name: "loki.service"
state: "restarted"
enabled: true
daemon_reload: true
scope: "user"
changed_when: false
listen: "notification_restart_loki"
ignore_errors: true # noqa: ignore-errors
- name: Restart grafana
ansible.builtin.systemd:
name: "grafana.service"
state: "restarted"
enabled: true
daemon_reload: true
scope: "user"
changed_when: false
listen: "notification_restart_grafana"
ignore_errors: true # noqa: ignore-errors
- name: Enable x509-exporter.service
ansible.builtin.systemd:
name: "x509-exporter.service"
state: "started"
enabled: true
daemon_reload: true
scope: "user"
changed_when: false
listen: "notification_restart_x509-exporter"
ignore_errors: true # noqa: ignore-errors

View File

@@ -0,0 +1,84 @@
---
- name: Set ca container subuid
ansible.builtin.set_fact:
ca_subuid: "100999"
- name: Create ca directory
ansible.builtin.file:
path: "{{ node['home_path'] }}/containers/{{ item }}"
owner: "{{ ca_subuid }}"
group: "svadmins"
state: "directory"
mode: "0770"
loop:
- "ca"
- "ca/certs"
- "ca/secrets"
- "ca/config"
- "ca/db"
- "ca/templates"
become: true
- name: Register secret value to podman secret
containers.podman.podman_secret:
name: "STEP_CA_PASSWORD"
data: "{{ hostvars['console']['ca']['intermediate']['password'] }}"
state: "present"
force: true
notify: "notification_restart_ca"
no_log: true
- name: Deploy ca config files
ansible.builtin.template:
src: "{{ hostvars['console']['node']['config_path'] }}/services/containers/infra/ca/config/{{ item }}.j2"
dest: "{{ node['home_path'] }}/containers/ca/config/{{ item }}"
owner: "{{ ca_subuid }}"
group: "svadmins"
mode: "0400"
loop:
- "ca.json"
- "defaults.json"
become: true
notify: "notification_restart_ca"
- name: Deploy ca certificate and key
ansible.builtin.copy:
content: |
{{ item.value }}
dest: "{{ item.path }}/{{ item.name }}"
owner: "{{ ca_subuid }}"
group: "svadmins"
mode: "{{ item.mode }}"
loop:
- name: "ilnmors_root_ca.crt"
value: "{{ hostvars['console']['ca']['root']['crt'] }}"
path: "{{ node['home_path'] }}/containers/ca/certs"
mode: "0440"
- name: "ilnmors_intermediate_ca.crt"
value: "{{ hostvars['console']['ca']['intermediate']['crt'] }}"
path: "{{ node['home_path'] }}/containers/ca/certs"
mode: "0440"
- name: "ilnmors_intermediate_ca.key"
value: "{{ hostvars['console']['ca']['intermediate']['key'] }}"
path: "{{ node['home_path'] }}/containers/ca/secrets"
mode: "0400"
become: true
notify: "notification_restart_ca"
no_log: true
- name: Deploy container file
ansible.builtin.template:
src: "{{ hostvars['console']['node']['config_path'] }}/services/containers/infra/ca/ca.container.j2"
dest: "{{ node['home_path'] }}/.config/containers/systemd/ca.container"
owner: "{{ ansible_user }}"
group: "svadmins"
mode: "0644"
notify: "notification_restart_ca"
- name: Enable ca
ansible.builtin.systemd:
name: "ca.service"
state: "started"
enabled: true
daemon_reload: true
scope: "user"

View File

@@ -0,0 +1,89 @@
---
- name: Set grafana container subuid
ansible.builtin.set_fact:
grafana_subuid: "100471"
- name: Create grafana directory
ansible.builtin.file:
path: "{{ node['home_path'] }}/containers/{{ item }}"
owner: "{{ grafana_subuid }}"
group: "svadmins"
state: "directory"
mode: "0770"
loop:
- "grafana"
- "grafana/data"
- "grafana/etc"
- "grafana/etc/provisioning"
- "grafana/etc/dashboards"
- "grafana/ssl"
become: true
- name: Deploy root certificate and key
ansible.builtin.copy:
content: |
{{ hostvars['console']['ca']['root']['crt'] }}
dest: "{{ node['home_path'] }}/containers/grafana/ssl/ilnmors_root_ca.crt"
owner: "{{ grafana_subuid }}"
group: "svadmins"
mode: "0400"
become: true
notify: "notification_restart_grafana"
no_log: true
- name: Register secret value to podman secret
containers.podman.podman_secret:
name: "{{ item.name }}"
data: "{{ item.value }}"
state: "present"
force: true
loop:
- name: "GF_DB_PASSWORD"
value: "{{ hostvars['console']['postgresql']['password']['grafana'] }}"
- name: "LDAP_BIND_PASSWORD"
value: "{{ hostvars['console']['ldap']['password']['grafana'] }}"
- name: "GF_ADMIN_PASSWORD"
value: "{{ hostvars['console']['grafana']['user']['password'] }}"
notify: "notification_restart_grafana"
no_log: true
- name: Deploy configruation files
ansible.builtin.template:
src: "{{ hostvars['console']['node']['config_path'] }}/services/containers/infra/grafana/etc/{{ item }}.j2"
dest: "{{ node['home_path'] }}/containers/grafana/etc/{{ item }}"
owner: "{{ grafana_subuid }}"
group: "svadmins"
mode: "0400"
loop:
- "grafana.ini"
- "ldap.toml"
become: true
notify: "notification_restart_grafana"
no_log: true
- name: Deploy provisioing and dashboard files
ansible.builtin.copy:
src: "{{ hostvars['console']['node']['config_path'] }}/services/containers/infra/grafana/etc/provisioning/"
dest: "{{ node['home_path'] }}/containers/grafana/etc/provisioning/"
owner: "{{ grafana_subuid }}"
group: "svadmins"
mode: "0400"
become: true
notify: "notification_restart_grafana"
- name: Deploy container file
ansible.builtin.template:
src: "{{ hostvars['console']['node']['config_path'] }}/services/containers/infra/grafana/grafana.container.j2"
dest: "{{ node['home_path'] }}/.config/containers/systemd/grafana.container"
owner: "{{ ansible_user }}"
group: "svadmins"
mode: "0644"
notify: "notification_restart_grafana"
- name: Enable grafana
ansible.builtin.systemd:
name: "grafana.service"
state: "started"
enabled: true
daemon_reload: true
scope: "user"

View File

@@ -0,0 +1,112 @@
---
- name: Set ldap container subuid
ansible.builtin.set_fact:
ldap_subuid: "100999"
- name: Create ldap directory
ansible.builtin.file:
path: "{{ node['home_path'] }}/containers/{{ item }}"
owner: "{{ ldap_subuid }}"
group: "svadmins"
state: "directory"
mode: "0770"
loop:
- "ldap"
- "ldap/data"
- "ldap/ssl"
become: true
- name: Deploy ldap certificate and key
ansible.builtin.copy:
content: |
{{ item.value }}
dest: "{{ node['home_path'] }}/containers/ldap/ssl/{{ item.name }}"
owner: "{{ ldap_subuid }}"
group: "svadmins"
mode: "{{ item.mode }}"
loop:
- name: "ilnmors_root_ca.crt"
value: "{{ hostvars['console']['ca']['root']['crt'] }}"
mode: "0440"
- name: "ldap.crt"
value: |
{{ hostvars['console']['ldap']['crt'] | trim }}
{{ hostvars['console']['ca']['intermediate']['crt'] }}
mode: "0440"
- name: "ldap.key"
value: "{{ hostvars['console']['ldap']['key'] }}"
mode: "0400"
become: true
notify: "notification_restart_ldap"
no_log: true
- name: Register secret value to podman secret
containers.podman.podman_secret:
name: "{{ item.name }}"
data: "{{ item.value }}"
state: "present"
force: true
loop:
# urlencode doesn't fix `/` as `%2F`. It needs replace
- name: "LLDAP_DATABASE_URL"
value: "postgres://ldap:{{ hostvars['console']['postgresql']['password']['ldap'] | urlencode | replace('/', '%2F') }}\
@{{ infra_uri['postgresql']['domain'] }}/ldap_db?sslmode=verify-full&sslrootcert=/etc/ssl/ldap/ilnmors_root_ca.crt"
- name: "LLDAP_KEY_SEED"
value: "{{ hostvars['console']['ldap']['seed_key'] }}"
- name: "LLDAP_JWT_SECRET"
value: "{{ hostvars['console']['ldap']['jwt_secret'] }}"
notify: "notification_restart_ldap"
no_log: true
- name: Initiate ldap (When = false, If DB data does not exist in postgresql, activate this block)
when: false
become: true
block:
- name: Register extra secret value to podman secret
containers.podman.podman_secret:
name: "LLDAP_LDAP_USER_PASSWORD"
data: "{{ hostvars['console']['ldap']['password']['user'] }}"
state: "present"
force: true
# You must check the image version first (following container file on data/config/containers/infra/ldap/ldap.container)
- name: Initiate ldap
containers.podman.podman_container:
name: "init_LLDAP"
image: "docker.io/lldap/lldap:{{ version['containers']['ldap'] }}"
rm: true
detach: false
env:
TZ: "Asia/Seoul"
LLDAP_LDAP_BASE_DN: "dc=ilnmors,dc=internal"
secrets:
- "LLDAP_DATABASE_URL,type=env"
- "LLDAP_KEY_SEED,type=env"
- "LLDAP_JWT_SECRET,type=env"
- "LLDAP_LDAP_USER_PASSWORD,type=env"
volumes:
- "{{ node['home_path'] }}/containers/ldap/data:/data:rw"
- "{{ node['home_path'] }}/containers/ldap/ssl:/etc/ssl/ldap:ro"
always:
- name: Clean extra secret value from podman secret
containers.podman.podman_secret:
name: "LLDAP_LDAP_USER_PASSWORD"
state: "absent"
- name: Deploy container file
ansible.builtin.template:
src: "{{ hostvars['console']['node']['config_path'] }}/services/containers/infra/ldap/ldap.container.j2"
dest: "{{ node['home_path'] }}/.config/containers/systemd/ldap.container"
owner: "{{ ansible_user }}"
group: "svadmins"
mode: "0644"
notify: "notification_restart_ldap"
- name: Enable ldap
ansible.builtin.systemd:
name: "ldap.service"
state: "started"
enabled: true
daemon_reload: true
scope: "user"

View File

@@ -0,0 +1,70 @@
---
- name: Set loki container subuid
ansible.builtin.set_fact:
loki_subuid: "110000" # 10001
- name: Create loki directory
ansible.builtin.file:
path: "{{ node['home_path'] }}/containers/{{ item }}"
state: "directory"
owner: "{{ loki_subuid }}"
group: "svadmins"
mode: "0770"
loop:
- "loki"
- "loki/etc"
- "loki/data"
- "loki/ssl"
become: true
- name: Deploy loki configuration file
ansible.builtin.copy:
src: "{{ hostvars['console']['node']['config_path'] }}/services/containers/infra/loki/etc/loki.yaml"
dest: "{{ node['home_path'] }}/containers/loki/etc/loki.yaml"
owner: "{{ loki_subuid }}"
group: "svadmins"
mode: "0600"
become: true
notify: "notification_restart_loki"
no_log: true
- name: Deploy loki certificate and key
ansible.builtin.copy:
content: |
{{ item.value }}
dest: "{{ node['home_path'] }}/containers/loki/ssl/{{ item.name }}"
owner: "{{ loki_subuid }}"
group: "svadmins"
mode: "{{ item.mode }}"
loop:
- name: "ilnmors_root_ca.crt"
value: "{{ hostvars['console']['ca']['root']['crt'] }}"
mode: "0440"
- name: "loki.crt"
value: |
{{ hostvars['console']['loki']['crt'] | trim }}
{{ hostvars['console']['ca']['intermediate']['crt'] }}
mode: "0440"
- name: "loki.key"
value: "{{ hostvars['console']['loki']['key'] }}"
mode: "0400"
become: true
notify: "notification_restart_loki"
no_log: true
- name: Deploy container file
ansible.builtin.template:
src: "{{ hostvars['console']['node']['config_path'] }}/services/containers/infra/loki/loki.container.j2"
dest: "{{ node['home_path'] }}/.config/containers/systemd/loki.container"
owner: "{{ ansible_user }}"
group: "svadmins"
mode: "0644"
notify: "notification_restart_loki"
- name: Enable loki
ansible.builtin.systemd:
name: "loki.service"
state: "started"
enabled: true
daemon_reload: true
scope: "user"

View File

@@ -0,0 +1,169 @@
---
- name: Set postgresql container subuid
ansible.builtin.set_fact:
postgresql_subuid: "100998"
- name: Set connected services list
ansible.builtin.set_fact:
# telegraf has no database
connected_services:
- "ldap"
- "authelia"
- "grafana"
- name: Create postgresql directory
ansible.builtin.file:
path: "{{ node['home_path'] }}/containers/{{ item }}"
state: "directory"
owner: "{{ postgresql_subuid }}"
group: "svadmins"
mode: "0770"
loop:
- "postgresql"
- "postgresql/data"
- "postgresql/config"
- "postgresql/ssl"
- "postgresql/init"
- "postgresql/backups"
- "postgresql/build"
become: true
- name: Deploy containerfile for build
ansible.builtin.template:
src: "{{ hostvars['console']['node']['config_path'] }}/services/containers/infra/postgresql/build/postgresql.containerfile.j2"
dest: "{{ node['home_path'] }}/containers/postgresql/build/Containerfile"
owner: "{{ ansible_user }}"
group: "svadmins"
mode: "0640"
- name: Build postgresql container image
containers.podman.podman_image:
name: "ilnmors.internal/{{ node['name'] }}/postgres"
# check tags from container file
tag: "pg{{ version['containers']['postgresql'] }}-vectorchord{{ version['containers']['vectorchord'] }}"
state: "build"
path: "{{ node['home_path'] }}/containers/postgresql/build"
- name: Prune postgresql dangling images
containers.podman.podman_prune:
image: true
- name: Deploy postgresql configuration files
ansible.builtin.template:
src: "{{ hostvars['console']['node']['config_path'] }}/services/containers/infra/postgresql/config/{{ item }}.j2"
dest: "{{ node['home_path'] }}/containers/postgresql/config/{{ item }}"
owner: "{{ postgresql_subuid }}"
group: "svadmins"
mode: "0600"
loop:
- "postgresql.conf"
- "pg_hba.conf"
become: true
notify: "notification_reload_postgresql"
no_log: true
- name: Deploy postgresql certificate and key
ansible.builtin.copy:
content: |
{{ item.value }}
dest: "{{ node['home_path'] }}/containers/postgresql/ssl/{{ item.name }}"
owner: "{{ postgresql_subuid }}"
group: "svadmins"
mode: "{{ item.mode }}"
loop:
- name: "ilnmors_root_ca.crt"
value: "{{ hostvars['console']['ca']['root']['crt'] }}"
mode: "0440"
- name: "postgresql.crt"
value: |
{{ hostvars['console']['postgresql']['crt'] | trim }}
{{ hostvars['console']['ca']['intermediate']['crt'] }}
mode: "0440"
- name: "postgresql.key"
value: "{{ hostvars['console']['postgresql']['key'] }}"
mode: "0400"
become: true
notify: "notification_reload_postgresql"
no_log: true
- name: Check data directory empty
ansible.builtin.find:
paths: "{{ node['home_path'] }}/containers/postgresql/data/"
hidden: true
file_type: "any"
become: true
register: "is_data_dir_empty"
- name: Prepare initiating DB
when: is_data_dir_empty.matched == 0
become: true
block:
# `init/pg_cluster.sql` should be fetched from postgresql's backup directory before running initiating
- name: Deploy init cluster sql file
ansible.builtin.copy:
src: "{{ hostvars['console']['node']['config_path'] }}/services/containers/infra/postgresql/init/pg_cluster.sql"
dest: "{{ node['home_path'] }}/containers/postgresql/init/0_pg_cluster.sql"
owner: "{{ postgresql_subuid }}"
group: "svadmins"
mode: "0600"
- name: Deploy resoring data sql files
ansible.builtin.copy:
src: "{{ hostvars['console']['node']['config_path'] }}/services/containers/infra/postgresql/init/pg_{{ item }}.sql"
dest: "{{ node['home_path'] }}/containers/postgresql/init/{{ index_num + 1 }}_pg_{{ item }}.sql"
owner: "{{ postgresql_subuid }}"
group: "svadmins"
mode: "0600"
loop: "{{ connected_services }}"
loop_control:
index_var: index_num
- name: Set is_postgresql_init_run
ansible.builtin.set_fact:
is_postgresql_init_run: true
- name: Deploy container file
ansible.builtin.template:
src: "{{ hostvars['console']['node']['config_path'] }}/services/containers/infra/postgresql/postgresql.container.j2"
dest: "{{ node['home_path'] }}/.config/containers/systemd/postgresql.container"
owner: "{{ ansible_user }}"
group: "svadmins"
mode: "0644"
notify: "notification_restart_postgresql"
- name: Deploy backup service files
ansible.builtin.copy:
src: "{{ hostvars['console']['node']['config_path'] }}/services/containers/infra/postgresql/services/{{ item }}"
dest: "{{ node['home_path'] }}/.config/systemd/user/{{ item }}"
owner: "{{ ansible_user }}"
group: "svadmins"
mode: "0644"
loop:
- "postgresql-cluster-backup.service"
- "postgresql-cluster-backup.timer"
- "postgresql-data-backup@.service"
- "postgresql-data-backup@.timer"
- name: Enable postgresql
ansible.builtin.systemd:
name: "postgresql.service"
state: "started"
enabled: true
daemon_reload: true
scope: "user"
- name: Enable cluster backup timer
ansible.builtin.systemd:
name: "postgresql-cluster-backup.timer"
state: "started"
enabled: true
daemon_reload: true
scope: "user"
- name: Enable data backup timer
ansible.builtin.systemd:
name: "postgresql-data-backup@{{ item }}.timer"
state: "started"
enabled: true
daemon_reload: true
scope: "user"
loop: "{{ connected_services }}"

View File

@@ -0,0 +1,74 @@
---
- name: Set prometheus container subuid
ansible.builtin.set_fact:
prometheus_subuid: "165533" # nobody - 65534
- name: Create prometheus directory
ansible.builtin.file:
path: "{{ node['home_path'] }}/containers/{{ item }}"
state: "directory"
owner: "{{ prometheus_subuid }}"
group: "svadmins"
mode: "0770"
loop:
- "prometheus"
- "prometheus/etc"
- "prometheus/data"
- "prometheus/ssl"
become: true
- name: Deploy prometheus configuration file
ansible.builtin.template:
src: "{{ hostvars['console']['node']['config_path'] }}/services/containers/infra/prometheus/etc/{{ item }}.j2"
dest: "{{ node['home_path'] }}/containers/prometheus/etc/{{ item }}"
owner: "{{ prometheus_subuid }}"
group: "svadmins"
mode: "0600"
loop:
- "prometheus.yaml"
- "rules.yaml"
- "web-config.yaml"
become: true
notify: "notification_restart_prometheus"
no_log: true
- name: Deploy prometheus certificate and key
ansible.builtin.copy:
content: |
{{ item.value }}
dest: "{{ node['home_path'] }}/containers/prometheus/ssl/{{ item.name }}"
owner: "{{ prometheus_subuid }}"
group: "svadmins"
mode: "{{ item.mode }}"
loop:
- name: "ilnmors_root_ca.crt"
value: "{{ hostvars['console']['ca']['root']['crt'] }}"
mode: "0440"
- name: "prometheus.crt"
value: |
{{ hostvars['console']['prometheus']['crt'] | trim }}
{{ hostvars['console']['ca']['intermediate']['crt'] }}
mode: "0440"
- name: "prometheus.key"
value: "{{ hostvars['console']['prometheus']['key'] }}"
mode: "0400"
become: true
notify: "notification_restart_prometheus"
no_log: true
- name: Deploy container file
ansible.builtin.template:
src: "{{ hostvars['console']['node']['config_path'] }}/services/containers/infra/prometheus/prometheus.container.j2"
dest: "{{ node['home_path'] }}/.config/containers/systemd/prometheus.container"
owner: "{{ ansible_user }}"
group: "svadmins"
mode: "0644"
notify: "notification_restart_prometheus"
- name: Enable prometheus
ansible.builtin.systemd:
name: "prometheus.service"
state: "started"
enabled: true
daemon_reload: true
scope: "user"

View File

@@ -0,0 +1,63 @@
---
- name: Set x509-exporter container subuid
ansible.builtin.set_fact:
x509_exporter_subuid: "165533" # nobody - 65534
- name: Create x509-exporter directory
ansible.builtin.file:
path: "{{ node['home_path'] }}/containers/{{ item }}"
state: "directory"
owner: "{{ x509_exporter_subuid }}"
group: "svadmins"
mode: "0770"
loop:
- "x509-exporter"
- "x509-exporter/certs"
become: true
- name: Deploy certificates
ansible.builtin.copy:
content: |
{{ item.value }}
dest: "{{ node['home_path'] }}/containers/x509-exporter/certs/{{ item.name }}"
owner: "{{ x509_exporter_subuid }}"
group: "svadmins"
mode: "0440"
loop:
- name: "root.crt"
value: "{{ hostvars['console']['ca']['root']['crt'] }}"
- name: "intermediate.crt"
value: "{{ hostvars['console']['ca']['intermediate']['crt'] }}"
- name: "crowdsec.crt"
value: "{{ hostvars['console']['crowdsec']['crt'] }}"
- name: "blocky.crt"
value: "{{ hostvars['console']['blocky']['crt'] }}"
- name: "postgresql.crt"
value: "{{ hostvars['console']['postgresql']['crt'] }}"
- name: "ldap.crt"
value: "{{ hostvars['console']['ldap']['crt'] }}"
- name: "prometheus.crt"
value: "{{ hostvars['console']['prometheus']['crt'] }}"
- name: "loki.crt"
value: "{{ hostvars['console']['loki']['crt'] }}"
- name: "dsm.crt"
value: "{{ hostvars['console']['dsm']['crt'] }}"
become: true
no_log: true
- name: Deploy container file
ansible.builtin.template:
src: "{{ hostvars['console']['node']['config_path'] }}/services/containers/infra/x509-exporter/x509-exporter.container.j2"
dest: "{{ node['home_path'] }}/.config/containers/systemd/x509-exporter.container"
owner: "{{ ansible_user }}"
group: "svadmins"
mode: "0644"
notify: "notification_restart_x509-exporter"
- name: Enable x509-exporter.service
ansible.builtin.systemd:
name: "x509-exporter.service"
state: "started"
enabled: true
daemon_reload: true
scope: "user"

View File

@@ -0,0 +1,92 @@
---
- name: Add user in libvirt group
ansible.builtin.user:
name: "{{ ansible_user }}"
state: "present"
groups: "libvirt, kvm, libvirt-qemu"
append: true
become: true
- name: Check libvirt directory
ansible.builtin.stat:
path: "/var/lib/libvirt/{{ item }}"
loop:
- "images"
- "seeds"
register: "is_libvirt_dir"
- name: Create libvirt directory
ansible.builtin.file:
path: "/var/lib/libvirt/{{ item.item }}"
state: "directory"
owner: "root"
group: "root"
mode: "0711"
loop: "{{ is_libvirt_dir.results }}"
when: not item.stat.exists
become: true
no_log: true
- name: Set LIBVIRT_DEFAULT_URI
ansible.builtin.lineinfile:
path: "{{ node['home_path'] }}/.bashrc"
state: "present"
line: "export LIBVIRT_DEFAULT_URI='qemu:///system'"
regexp: '^export LIBVIRT_DEFAULT_URI='
- name: Define virtual networks
community.libvirt.virt_net:
name: "{{ item }}"
xml: "{{ lookup('file', hostvars['console']['node']['config_path'] + '/services/systemd/vmm/libvirt/xml/networks/' + item + '.xml') }}"
uri: "qemu:///system"
command: "define"
loop:
- "wan-net"
- "lan-net"
- name: Start virtual networks
community.libvirt.virt_net:
name: "{{ item }}"
state: "active"
uri: "qemu:///system"
autostart: true
loop:
- "wan-net"
- "lan-net"
- name: Autostart virtual networks
community.libvirt.virt_net:
name: "{{ item }}"
uri: "qemu:///system"
autostart: true
loop:
- "wan-net"
- "lan-net"
- name: Define virtual storage pool
community.libvirt.virt_pool:
name: "{{ item }}"
xml: "{{ lookup('file', hostvars['console']['node']['config_path'] + '/services/systemd/vmm/libvirt/xml/storages/' + item + '.xml') }}"
uri: "qemu:///system"
command: "define"
loop:
- "images-pool"
- "seeds-pool"
- name: Start virtual storage pool
community.libvirt.virt_pool:
name: "{{ item }}"
state: "active"
uri: "qemu:///system"
loop:
- "images-pool"
- "seeds-pool"
- name: Autostart virtual storage pool
community.libvirt.virt_pool:
name: "{{ item }}"
uri: "qemu:///system"
autostart: true
loop:
- "images-pool"
- "seeds-pool"

View File

@@ -0,0 +1,59 @@
---
# This task is located in vmm roles because of its attributes,
# but all process should be run in "console".
# At the playbook, `delegate_to: "console"` option is applyed by `apply:`.
- name: Create images directory
ansible.builtin.file:
path: "{{ hostvars['console']['node']['data_path'] }}/images/seeds/{{ target_vm }}"
state: "directory"
owner: "console"
group: "svadmins"
mode: "0700"
- name: Create temp meta-data
ansible.builtin.copy:
content: |
instance-id: vm-{{ target_vm }}
local-hostname: {{ target_vm }}
dest: "{{ hostvars['console']['node']['data_path'] }}/images/seeds/{{ target_vm }}/meta-data"
owner: "console"
group: "svadmins"
mode: "0600"
register: "vm_meta_data"
no_log: true
- name: Create temp user-data
ansible.builtin.template:
src: "{{ hostvars['console']['node']['config_path'] }}/services/systemd/vmm/libvirt/seeds/user-data.j2"
dest: "{{ hostvars['console']['node']['data_path'] }}/images/seeds/{{ target_vm }}/user-data"
owner: "console"
group: "svadmins"
mode: "0600"
register: "vm_user_data"
no_log: true
- name: Create temp network-config
ansible.builtin.copy:
content: |
network: {config: disabled}
dest: "{{ hostvars['console']['node']['data_path'] }}/images/seeds/{{ target_vm }}/network-config"
owner: "console"
group: "svadmins"
mode: "0600"
register: "vm_network_config"
no_log: true
- name: Check seed.iso
ansible.builtin.stat:
path: "{{ hostvars['console']['node']['data_path'] }}/images/seeds/{{ target_vm }}/seed.iso"
register: "is_seediso"
- name: Create seed.iso
ansible.builtin.shell:
cmd: |
cloud-localds -N {{ hostvars['console']['node']['data_path'] }}/images/seeds/{{ target_vm }}/network-config \
{{ hostvars['console']['node']['data_path'] }}/images/seeds/{{ target_vm }}/seed.iso \
{{ hostvars['console']['node']['data_path'] }}/images/seeds/{{ target_vm }}/user-data \
{{ hostvars['console']['node']['data_path'] }}/images/seeds/{{ target_vm }}/meta-data
when: vm_meta_data.changed or vm_user_data.changed or vm_network_config.changed or not is_seediso.stat.exists
changed_when: true

View File

@@ -0,0 +1,55 @@
---
- name: Check vm cloud-init
ansible.builtin.stat:
path: "/var/lib/libvirt/images/debian-13.qcow2"
become: true
register: is_cloud_init_file
- name: Deploy vm cloud-init
ansible.builtin.copy:
src: "{{ hostvars['console']['node']['data_path'] }}/images/debian-13-generic-amd64.qcow2"
dest: "/var/lib/libvirt/images/debian-13.qcow2"
owner: "{{ ansible_user }}"
group: "svadmins"
mode: "0644"
force: false
become: true
when: not is_cloud_init_file.stat.exists
- name: Remote copy vm cloud-init file
ansible.builtin.copy:
src: "/var/lib/libvirt/images/debian-13.qcow2"
dest: "/var/lib/libvirt/images/{{ target_vm }}.qcow2"
owner: "{{ ansible_user }}"
group: "svadmins"
mode: "0644"
remote_src: true
force: false
become: true
- name: Check deployed cloud-init file info
ansible.builtin.command:
cmd: "qemu-img info /var/lib/libvirt/images/{{ target_vm }}.qcow2 --output json"
changed_when: false
failed_when:
- deployed_cloudfile_info.rc != 0
- ("lock") not in deployed_cloudfile_info.stderr
register: "deployed_cloudfile_info"
- name: Resize deployed cloud-init file
ansible.builtin.command:
cmd: "qemu-img resize /var/lib/libvirt/images/{{ target_vm }}.qcow2 {{ hostvars[target_vm]['vm']['storage'] }}G"
when:
- deployed_cloudfile_info.rc == 0
- (deployed_cloudfile_info.stdout | from_json)['virtual-size'] < (hostvars[target_vm]['vm']['storage'] | int * 1024 * 1024 * 1024)
changed_when: true
- name: Deploy vm seed.iso
ansible.builtin.copy:
src: "{{ hostvars['console']['node']['data_path'] }}/images/seeds/{{ target_vm }}/seed.iso"
dest: "/var/lib/libvirt/seeds/{{ target_vm }}_seed.iso"
owner: "{{ ansible_user }}"
group: "svadmins"
mode: "0644"
become: true
when: deployed_cloudfile_info.rc == 0

View File

@@ -0,0 +1,24 @@
---
- name: Register VM xml file
community.libvirt.virt:
name: "{{ target_vm }}"
xml: |
{{ lookup('template', hostvars['console']['node']['config_path'] + '/services/systemd/vmm/libvirt/xml/vms/vms.xml.j2') }}
uri: "qemu:///system"
command: define
- name: Deploy VM systemd file
ansible.builtin.copy:
src: "{{ hostvars['console']['node']['config_path'] }}/services/systemd/vmm/libvirt/services/{{ target_vm }}.service"
dest: "{{ node['home_path'] }}/.config/systemd/user/{{ target_vm }}.service"
owner: "{{ ansible_user }}"
group: "svadmins"
mode: "0400"
- name: Register VM service
ansible.builtin.systemd:
name: "{{ target_vm }}.service"
state: "started"
enabled: true
daemon_reload: true
scope: "user"

View File

@@ -0,0 +1,38 @@
#!/usr/sbin/nft -f
flush ruleset
define NET4_SERVER = {{ hostvars['fw']['network4']['subnet']['server'] }}
define NET6_SERVER = {{ hostvars['fw']['network6']['subnet']['server'] }}
define HOSTS4_CONSOLE = { {{ hostvars['fw']['network4']['console'].values() | join(', ') }} }
define HOSTS6_CONSOLE = { {{ hostvars['fw']['network6']['console'].values() | join(', ') }} }
define PORTS_SSH = 22
table inet nat {
chain prerouting {
type nat hook prerouting priority dstnat; policy accept;
}
chain postrouting {
}
chain output {
type nat hook output priority dstnat; policy accept;
}
}
table inet filter {
chain input {
type filter hook input priority 0; policy drop;
ct state invalid drop comment "deny invalid connection"
ct state established, related accept comment "allow all connection already existing"
iifname "lo" accept comment "allow local connection"
meta l4proto { icmp, icmpv6 } accept comment "allow icmp connection"
ip saddr $HOSTS4_CONSOLE tcp dport $PORTS_SSH accept comment "allow ipv4 ssh connection: CONSOLE > APP"
ip6 saddr $HOSTS6_CONSOLE tcp dport $PORTS_SSH accept comment "allow ipv6 ssh connection: CONSOLE > APP"
}
chain forward {
type filter hook forward priority 0; policy drop;
}
chain output {
type filter hook output priority 0; policy accept;
}
}

View File

@@ -0,0 +1,48 @@
#!/usr/sbin/nft -f
flush ruleset
define NET4_SERVER = {{ hostvars['fw']['network4']['subnet']['server'] }}
define NET6_SERVER = {{ hostvars['fw']['network6']['subnet']['server'] }}
define HOSTS4_CONSOLE = { {{ hostvars['fw']['network4']['console'].values() | join(', ') }} }
define HOSTS6_CONSOLE = { {{ hostvars['fw']['network6']['console'].values() | join(', ') }} }
define PORTS_SSH = 22
define PORTS_HTTP = 80
define PORTS_HTTP_FORWARD = 2080
define PORTS_HTTPS = 443
define PORTS_HTTPS_FORWARD = 2443
table inet nat {
chain prerouting {
type nat hook prerouting priority dstnat; policy accept;
tcp dport $PORTS_HTTP dnat to :$PORTS_HTTP_FORWARD comment "dnat http ports to $PORTS_HTTP_FORWARD"
tcp dport $PORTS_HTTPS dnat to :$PORTS_HTTPS_FORWARD comment "dnat https ports to $PORTS_HTTPS_FORWARD"
}
chain postrouting {
}
chain output {
type nat hook output priority dstnat; policy accept;
oifname "lo" tcp dport $PORTS_HTTP dnat to :$PORTS_HTTP_FORWARD comment "dnat http ports to $PORTS_HTTP_FORWARD out of LOCALHOST"
oifname "lo" tcp dport $PORTS_HTTPS dnat to :$PORTS_HTTPS_FORWARD comment "dnat https ports to $PORTS_HTTPS_FORWARD out of LOCALHOST"
}
}
table inet filter {
chain input {
type filter hook input priority 0; policy drop;
ct state invalid drop comment "deny invalid connection"
ct state established, related accept comment "allow all connection already existing"
iifname "lo" accept comment "allow local connection: AUTH > AUTH"
meta l4proto { icmp, icmpv6 } accept comment "allow icmp connection: AUTH"
ip saddr $HOSTS4_CONSOLE tcp dport $PORTS_SSH accept comment "allow ipv4 ssh connection: CONSOLE > AUTH"
ip6 saddr $HOSTS6_CONSOLE tcp dport $PORTS_SSH accept comment "allow ipv6 ssh connection: CONSOLE > AUTH"
tcp dport $PORTS_HTTP_FORWARD ct original proto-dst $PORTS_HTTP accept comment "allow ipv4, 6 http connection: > AUTH"
tcp dport $PORTS_HTTPS_FORWARD ct original proto-dst $PORTS_HTTPS accept comment "allow ipv4, 6 https connection: > AUTH"
}
chain forward {
type filter hook forward priority 0; policy drop;
}
chain output {
type filter hook output priority 0; policy accept;
}
}

View File

@@ -0,0 +1,34 @@
# localhost
127.0.0.1 {{ node['local_san'] }}
::1 {{ node['local_san'] }}
{% if node['name'] == 'console' %}
# Hosts IPv4
{{ hostvars['fw']['network4']['firewall']['server'] }} fw.ilnmors.internal
{{ hostvars['fw']['network4']['vmm']['client'] }} init.vmm.ilnmors.internal
{{ hostvars['fw']['network4']['vmm']['server'] }} vmm.ilnmors.internal
{{ hostvars['fw']['network4']['infra']['server'] }} infra.ilnmors.internal
{{ hostvars['fw']['network4']['auth']['server'] }} auth.ilnmors.internal
{{ hostvars['fw']['network4']['app']['server'] }} app.ilnmors.internal
# Hosts IPv6
{{ hostvars['fw']['network6']['firewall']['server'] }} fw.ilnmors.internal
{{ hostvars['fw']['network6']['vmm']['client'] }} init.vmm.ilnmors.internal
{{ hostvars['fw']['network6']['vmm']['server'] }} vmm.ilnmors.internal
{{ hostvars['fw']['network6']['infra']['server'] }} infra.ilnmors.internal
{{ hostvars['fw']['network6']['auth']['server'] }} auth.ilnmors.internal
{{ hostvars['fw']['network6']['app']['server'] }} app.ilnmors.internal
{% else %}
# IPv4
# Crowdsec, blocky, bind(fw)
{{ hostvars['fw']['network4']['firewall']['server'] }} ntp.ilnmors.internal crowdsec.ilnmors.internal
{{ hostvars['fw']['network4']['blocky']['server'] }} blocky.ilnmors.internal
{{ hostvars['fw']['network4']['bind']['server'] }} bind.ilnmors.internal
# DB, LDAP, CA, Prometheus, Loki, mail (infra)
{{ hostvars['fw']['network4']['infra']['server'] }} postgresql.ilnmors.internal ldap.ilnmors.internal prometheus.ilnmors.internal loki.ilnmors.internal mail.ilnmors.internal ca.ilnmors.internal
# IPv6
# Crowdsec, blocky, bind(fw)
{{ hostvars['fw']['network6']['firewall']['server'] }} ntp.ilnmors.internal crowdsec.ilnmors.internal
{{ hostvars['fw']['network6']['blocky']['server'] }} blocky.ilnmors.internal
{{ hostvars['fw']['network6']['bind']['server'] }} bind.ilnmors.internal
# DB, LDAP, CA, Prometheus, Loki, mail (infra)
{{ hostvars['fw']['network6']['infra']['server'] }} postgresql.ilnmors.internal ldap.ilnmors.internal prometheus.ilnmors.internal loki.ilnmors.internal mail.ilnmors.internal ca.ilnmors.internal
{% endif %}

View File

@@ -0,0 +1,5 @@
[Match]
MACAddress={{ hostvars[target_vm]['vm']['lan_mac'] }}
[Link]
Name=eth0

View File

@@ -0,0 +1,13 @@
[Match]
Name=eth0
[Network]
# IPv4
Address={{ hostvars['fw']['network4'][target_vm]['server'] }}/24
Gateway={{ hostvars['fw']['network4']['firewall']['server'] }}
DNS={{ hostvars['fw']['network4']['blocky']['server'] }}
# IPv6
IPv6AcceptRA=false
Address={{ hostvars['fw']['network6'][target_vm]['server'] }}/64
Gateway={{ hostvars['fw']['network6']['firewall']['server'] }}
DNS={{ hostvars['fw']['network6']['blocky']['server'] }}

View File

@@ -0,0 +1,6 @@
[Resolve]
{% if node['name'] in ['vmm', 'fw'] %}
DNS=1.1.1.2 1.0.0.2
DNS=2606:4700:4700::1112 2606:4700:4700::1002
{% endif %}
cache=false

View File

@@ -0,0 +1,2 @@
HostKey /etc/ssh/ssh_host_ed25519_key
HostCertificate /etc/ssh/ssh_host_ed25519_key-cert.pub

View File

@@ -0,0 +1 @@
PermitRootLogin no

View File

@@ -0,0 +1 @@
TrustedUserCAKeys /etc/ssh/local_ssh_ca.pub

View File

@@ -0,0 +1,3 @@
[Time]
NTP=ntp.ilnmors.internal
FallbackNTP=0.debian.pool.ntp.org 1.debian.pool.ntp.org 2.debian.pool.ntp.org 3.debian.pool.ntp.org

View File

@@ -0,0 +1,5 @@
[Match]
MACAddress={{ hostvars['fw']['vm']['wan_mac'] }}
[Link]
Name=wan

View File

@@ -0,0 +1,5 @@
[Match]
MACAddress={{ hostvars['fw']['vm']['lan_mac'] }}
[Link]
Name=client

View File

@@ -0,0 +1,6 @@
[NetDev]
Name=server
Kind=vlan
[VLAN]
Id=10

View File

@@ -0,0 +1,6 @@
[NetDev]
Name=user
Kind=vlan
[VLAN]
Id=20

View File

@@ -0,0 +1,16 @@
[Match]
Name=wan
[Network]
DHCP=true
IPv6AcceptRA=true
IPForward=true
RequiredForOnline=false
[DHCPv4]
UseDNS=false
[DHCPv6]
WithoutRA=solicit
PrefixDelegationHint=yes
UseDNS=false

View File

@@ -0,0 +1,16 @@
[Match]
Name=client
[Network]
# General
IPForward=true
IPv6SendRA=false
IPv6AcceptRA=false
VLAN=server
VLAN=user
# IPv4
Address={{ hostvars['fw']['network4']['firewall']['client'] }}/24
DNS={{ hostvars['fw']['network4']['blocky']['server'] }}
# IPv6
Address={{ hostvars['fw']['network6']['firewall']['client'] }}/64
DNS={{ hostvars['fw']['network6']['blocky']['server'] }}

View File

@@ -0,0 +1,24 @@
[Match]
Name=server
[Network]
IPForward=true
IPv6SendRA=false
IPv6AcceptRA=false
# IPv4
Address={{ hostvars['fw']['network4']['firewall']['server'] }}/24
DNS={{ hostvars['fw']['network4']['blocky']['server'] }}
# IPv6
Address={{ hostvars['fw']['network6']['firewall']['server'] }}/64
DNS={{ hostvars['fw']['network6']['blocky']['server'] }}
[Address]
Address={{ hostvars['fw']['network4']['blocky']['server'] }}/24
[Address]
Address={{ hostvars['fw']['network4']['bind']['server'] }}/24
[Address]
Address={{ hostvars['fw']['network6']['blocky']['server'] }}/64
PreferredLifetime=0
[Address]
Address={{ hostvars['fw']['network6']['bind']['server'] }}/64
PreferredLifetime=0

View File

@@ -0,0 +1,25 @@
[Match]
Name=user
[Network]
IPForward=true
IPv6PrefixDelegation=true
IPv6SendRA=true
IPv6SendRAExtension=false
# IPv4
Address={{ hostvars['fw']['network4']['firewall']['user'] }}/24
DNS={{ hostvars['fw']['network4']['blocky']['server'] }}
[IPv6PrefixDelegation]
SubnetId=20
# A-Flag: Enable SLAAC
AddressAutoconfiguration=true
OnLink=true
[IPv6SendRA]
# M-Flag: Client IP from DHCPv6
Managed=false
# O-Flag: Other information form DHCPv6
OtherInformation=false
EmitDNS=true
DNS={{ hostvars['fw']['network6']['blocky']['server'] }}

View File

@@ -0,0 +1,186 @@
#!/usr/sbin/nft -f
# Convention
# iifname oifname saddr daddr proto dport ct state action / Ellipsis if you can something
flush ruleset
define IF_WAN = "wan"
define IF_CLIENT = "client"
define IF_SERVER = "server"
define IF_USER = "user"
define IF_WG = "wg0"
define NET4_CLIENT = {{ hostvars['fw']['network4']['subnet']['client'] }}
define NET4_SERVER = {{ hostvars['fw']['network4']['subnet']['server'] }}
define NET4_USER = {{ hostvars['fw']['network4']['subnet']['user'] }}
define NET4_WG = {{ hostvars['fw']['network4']['subnet']['wg'] }}
define NET4_LLA = {{ hostvars['fw']['network4']['subnet']['lla'] }}
define NET4_RFC1918 = { 10.0.0.0/8, 172.16.0.0/12, 192.168.0.0/16 }
define NET6_CLIENT = {{ hostvars['fw']['network6']['subnet']['client'] }}
define NET6_SERVER = {{ hostvars['fw']['network6']['subnet']['server'] }}
define NET6_WG = {{ hostvars['fw']['network6']['subnet']['wg'] }}
define NET6_LLA = {{ hostvars['fw']['network6']['subnet']['lla'] }}
define HOSTS4_FW = { {{ hostvars['fw']['network4']['firewall'].values() | join(', ') }} }
define HOSTS4_BLOCKY = {{ hostvars['fw']['network4']['blocky']['server'] }}
define HOSTS4_BIND = {{ hostvars['fw']['network4']['bind']['server'] }}
define HOSTS4_CONSOLE = { {{ hostvars['fw']['network4']['console'].values() | join(', ') }} }
define HOSTS4_VMM = { {{ hostvars['fw']['network4']['vmm'].values() | join(', ') }} }
define HOSTS4_INFRA = {{ hostvars['fw']['network4']['infra']['server'] }}
define HOSTS4_AUTH = {{ hostvars['fw']['network4']['auth']['server'] }}
define HOSTS4_APP = {{ hostvars['fw']['network4']['app']['server'] }}
define HOSTS4_NAS = {{ hostvars['fw']['network4']['nas']['client'] }}
define HOSTS6_FW = { {{ hostvars['fw']['network6']['firewall'].values() | join(', ') }} }
define HOSTS6_BLOCKY = {{ hostvars['fw']['network6']['blocky']['server'] }}
define HOSTS6_BIND = {{ hostvars['fw']['network6']['bind']['server'] }}
define HOSTS6_CONSOLE = { {{ hostvars['fw']['network6']['console'].values() | join(', ') }} }
define HOSTS6_VMM = { {{ hostvars['fw']['network6']['vmm'].values() | join(', ') }} }
define HOSTS6_INFRA = {{ hostvars['fw']['network6']['infra']['server'] }}
define HOSTS6_AUTH = {{ hostvars['fw']['network6']['auth']['server'] }}
define HOSTS6_APP = {{ hostvars['fw']['network6']['app']['server'] }}
define HOSTS6_NAS = {{ hostvars['fw']['network6']['nas']['client'] }}
define PORTS_SSH = 22
define PORTS_WEB = { 80, 443 }
define PORTS_DHCP = { 67, 68, 546, 547 }
define PORTS_DNS = 53
define PORTS_NTP = 123
define PORTS_VPN = 11290
define PORTS_CROWDSEC = 8080
define PORTS_NAS = { 5000, 5001 }
define PORTS_KOPIA = 51515
table inet nat {
chain prerouting {
type nat hook prerouting priority dstnat; policy accept;
# After prerouting, accept forward chain WAN
iifname $IF_WAN meta nfproto ipv4 tcp dport $PORTS_WEB dnat to $HOSTS4_AUTH comment "DNAT44 ipv4 web connection: WAN > FW > SERVER AUTH"
iifname $IF_WAN meta nfproto ipv6 tcp dport $PORTS_WEB dnat to $HOSTS6_AUTH comment "DNAT66 ipv6 web connection: WAN > FW > SERVER AUTH"
}
chain postrouting {
type nat hook postrouting priority srcnat; policy accept;
# Masquerade the packet
oifname $IF_WAN meta nfproto ipv4 masquerade comment "masquerade ipv4 wan connection: > FW > WAN"
# $IF_USER uses GUA on IPv6
iifname { $IF_CLIENT, $IF_SERVER, $IF_WG } oifname $IF_WAN meta nfproto ipv6 masquerade comment "masquerade ipv6 wan connection: CLIENT/SERVER/WG > FW > WAN"
}
chain output {
}
}
table inet filter {
set crowdsec-blacklists {
type ipv4_addr
flags timeout
}
set crowdsec6-blacklists {
type ipv6_addr
flags timeout
}
chain global {
# invalid packets
ct state invalid drop comment "deny invalid connection"
# crowdsec
ip saddr @crowdsec-blacklists counter drop comment "deny all crowdsec blacklist"
ip6 saddr @crowdsec6-blacklists counter drop comment "deny all ipv6 crowdsec blacklist"
# fw
ct state established, related accept comment "allow all connection already existing"
ip6 saddr $NET6_LLA return comment "return ipv6 linklocaladdress to input and forward chain"
iifname $IF_WAN tcp dport $PORTS_SSH drop comment "deny ssh connection: WAN !> "
iifname $IF_WAN udp dport $PORTS_DNS drop comment "deny udp dns connection: WAN !> "
iifname $IF_WAN tcp dport $PORTS_DNS drop comment "deny tcp dns connection: WAN !> "
iifname $IF_WAN icmp type echo-request drop comment "deny icmp echo connection (Ping): WAN !>"
iifname $IF_WAN icmpv6 type echo-request drop comment "deny icmpv6 echo connection (Ping): WAN !>"
iifname $IF_WAN meta l4proto { icmp, icmpv6 } accept comment "allow icmp, icmpv6 connection: WAN >"
iifname $IF_WAN ip saddr $NET4_RFC1918 drop comment "deny ipv4 all connection: WAN RFC1918 !>"
iifname $IF_WAN ip saddr $NET4_LLA drop comment "deny ipv4 all connection: WAN APIPA(bogon) !>"
iifname { $IF_CLIENT, $IF_SERVER, $IF_USER } udp dport $PORTS_DHCP accept comment "allow dhcp4, dhcp6 connection: CLIENT/SERVER/USER > FW"
iifname $IF_CLIENT ip saddr != $NET4_CLIENT drop comment "deny ipv4 all connection: CLIENT !CLIENT !>"
iifname $IF_CLIENT ip6 saddr != $NET6_CLIENT drop comment "deny ipv6 all connection: CLIENT !CLIENT !>"
iifname $IF_SERVER ip saddr != $NET4_SERVER drop comment "deny ipv4 all connection: SERVER !SERVER !>"
iifname $IF_SERVER ip6 saddr != $NET6_SERVER drop comment "deny ipv6 all connection: SERVER !SERVER !>"
# IF_USER uses GUA on ipv6, so ipv6 rule is not needed
iifname $IF_USER ip saddr != $NET4_USER drop comment "deny ipv4 all connection: USER !USER !>"
iifname $IF_WG ip saddr != $NET4_WG drop comment "deny all ipv4 connection: WG !WG !>"
iifname $IF_WG ip6 saddr != $NET6_WG drop comment "deny all ipv6 connection: WG !WG !>"
}
chain input {
type filter hook input priority filter; policy drop;
jump global comment "set global condition"
iifname "lo" accept comment "allow local connection: FW > FW"
udp dport $PORTS_VPN accept comment "allow vpn connection: > FW"
iifname { $IF_CLIENT, $IF_SERVER, $IF_USER, $IF_WG } meta l4proto { icmp, icmpv6 } accept comment "allow icmp, icmpv6 connection: CLIENT/SERVER/USER/WG > FW"
iifname { $IF_CLIENT, $IF_SERVER, $IF_USER, $IF_WG } udp dport $PORTS_NTP accept comment "allow ntp connection: CLIENT/SERVER/USER/WG > FW"
# Global chain contains "WAN !> :SSH_PORT"
ip saddr $HOSTS4_CONSOLE tcp dport $PORTS_SSH accept comment "allow ipv4 ssh connection: CONSOLE > FW"
ip6 saddr $HOSTS6_CONSOLE tcp dport $PORTS_SSH accept comment "allow ipv6 ssh connection: CONSOLE > FW"
ip saddr { $HOSTS4_VMM, $HOSTS4_INFRA, $HOSTS4_AUTH, $HOSTS4_APP } tcp dport $PORTS_CROWDSEC accept comment "allow ipv4 crowdsec lapi connection: SERVER > FW"
ip6 saddr { $HOSTS6_VMM, $HOSTS6_INFRA, $HOSTS6_AUTH, $HOSTS6_APP } tcp dport $PORTS_CROWDSEC accept comment "allow ipv6 crowdsec lapi connection: SERVER > FW"
# Global chain contains "WAN !> :DNS_PORT"
ip daddr $HOSTS4_BLOCKY udp dport $PORTS_DNS accept comment "allow ipv4 udp dns connection: !WAN > SERVER BLOCKY(FW)"
ip daddr $HOSTS4_BLOCKY tcp dport $PORTS_DNS accept comment "allow ipv4 tcp dns connection: !WAN > SERVER BLOCKY(FW)"
ip6 daddr $HOSTS6_BLOCKY udp dport $PORTS_DNS accept comment "allow ipv6 udp dns connection: !WAN > SERVER BLOCKY(FW)"
ip6 daddr $HOSTS6_BLOCKY tcp dport $PORTS_DNS accept comment "allow ipv6 tcp dns connection: !WAN > SERVER BLOCKY(FW)"
ip saddr { $HOSTS4_INFRA, $HOSTS4_AUTH, $HOSTS4_APP } ip daddr $HOSTS4_BIND udp dport $PORTS_DNS accept comment "allow ipv4 udp dns connection (nsupdate): SERVER INFRA/AUTH/APP > BIND9(FW)"
ip saddr { $HOSTS4_INFRA, $HOSTS4_AUTH, $HOSTS4_APP } ip daddr $HOSTS4_BIND tcp dport $PORTS_DNS accept comment "allow ipv4 tcp dns connection (nsupdate): SERVER INFRA/AUTH/APP > BIND9(FW)"
ip6 saddr { $HOSTS6_INFRA, $HOSTS6_AUTH, $HOSTS6_APP } ip6 daddr $HOSTS6_BIND udp dport $PORTS_DNS accept comment "allow ipv6 udp dns connection (nsupdate): SERVER INFRA/AUTH/APP > BIND9(FW)"
ip6 saddr { $HOSTS6_INFRA, $HOSTS6_AUTH, $HOSTS6_APP } ip6 daddr $HOSTS6_BIND tcp dport $PORTS_DNS accept comment "allow ipv6 tcp dns connection (nsupdate): SERVER INFRA/AUTH/APP > BIND9(FW)"
}
chain forward {
type filter hook forward priority filter; policy drop;
jump global comment "set global condition"
# ICMP
ip saddr $HOSTS4_CONSOLE meta l4proto icmp accept comment "allow icmp connection: CONSOLE > FW >"
ip6 saddr $HOSTS6_CONSOLE meta l4proto icmpv6 accept comment "allow icmpv6 connection: CONSOLE > FW >"
# SSH connection
ip saddr $HOSTS4_CONSOLE tcp dport $PORTS_SSH accept comment "allow ipv4 ssh connection: CONSOLE > FW >"
ip6 saddr $HOSTS6_CONSOLE tcp dport $PORTS_SSH accept comment "allow ipv6 ssh connection: CONSOLE > FW >"
# Reverse proxy (WAN)
oifname $IF_SERVER ip daddr $HOSTS4_AUTH tcp dport $PORTS_WEB accept comment "allow ipv4 web connection: > FW > SERVER AUTH"
oifname $IF_SERVER ip6 daddr $HOSTS6_AUTH tcp dport $PORTS_WEB accept comment "allow ipv6 web connection: > FW > SERVER AUTH"
# Reverse proxy (SERVER)
oifname $IF_SERVER ip saddr $HOSTS4_CONSOLE ip daddr { $HOSTS4_INFRA, $HOSTS4_APP } tcp dport $PORTS_WEB accept comment "allow ipv4 web connection: CONSOLE > FW > SERVER INFRA/APP"
oifname $IF_SERVER ip6 saddr $HOSTS6_CONSOLE ip6 daddr { $HOSTS6_INFRA, $HOSTS6_APP } tcp dport $PORTS_WEB accept comment "allow ipv6 web connection: CONSOLE > FW > SERVER INFRA/APP"
# Kopia/NAS Console > NAS
oifname $IF_CLIENT ip saddr $HOSTS4_CONSOLE ip daddr $HOSTS4_NAS tcp dport { $PORTS_NAS, $PORTS_KOPIA } accept comment "allow ipv4 web connection (DSM, KOPIA): CONSOLE > FW > CLIENT NAS"
oifname $IF_CLIENT ip6 saddr $HOSTS6_CONSOLE ip6 daddr $HOSTS6_NAS tcp dport { $PORTS_NAS, $PORTS_KOPIA } accept comment "allow ipv6 web connection (DSM, KOPIA): CONSOLE > FW > CLIENT NAS"
iifname $IF_WAN jump wan comment "set WAN interface rules"
iifname $IF_CLIENT jump client comment "set CLIENT interface rules"
iifname $IF_SERVER jump server comment "set SERVER interface rules"
iifname $IF_USER jump user comment "set USER interface rules"
iifname $IF_WG jump wg comment "set WG interface rules"
}
chain wan {
return
}
chain client {
oifname $IF_WAN ip saddr { $HOSTS4_CONSOLE, $HOSTS4_NAS } accept comment "allow ipv4 internet connection: CLIENT CONSOLE/NAS > FW > WAN"
oifname $IF_WAN ip6 saddr { $HOSTS6_CONSOLE, $HOSTS6_NAS } accept comment "allow ipv6 internet connection: CLIENT CONSOLE/NAS > FW > WAN"
return
}
chain server {
# reverse proxy AUTH > NAS
oifname $IF_CLIENT ip saddr $HOSTS4_AUTH ip daddr $HOSTS4_NAS tcp dport $PORTS_NAS accept comment "allow ipv4 web connection(DSM): SERVER AUTH > FW > CLIENT NAS"
oifname $IF_CLIENT ip6 saddr $HOSTS6_AUTH ip6 daddr $HOSTS6_NAS tcp dport $PORTS_NAS accept comment "allow ipv6 web connection(DSM): SERVER AUTH > FW > CLIENT NAS"
# Kopia INFRA, APP > NAS
oifname $IF_CLIENT ip saddr { $HOSTS4_INFRA, $HOSTS4_APP } ip daddr $HOSTS4_NAS tcp dport $PORTS_KOPIA accept comment "allow ipv4 web connection(kopia): SERVER INFRA/APP > FW > CLIENT NAS"
oifname $IF_CLIENT ip6 saddr { $HOSTS6_INFRA, $HOSTS6_APP } ip6 daddr $HOSTS6_NAS tcp dport $PORTS_KOPIA accept comment "allow ipv6 web connection(kopia): SERVER INFRA/APP > FW > CLIENT NAS"
oifname $IF_WAN ip saddr { $HOSTS4_VMM, $HOSTS4_INFRA, $HOSTS4_AUTH, $HOSTS4_APP } accept comment "allow ipv4 internet connection: SERVER VMM/INFRA/AUTH/APP > FW > WAN"
oifname $IF_WAN ip6 saddr { $HOSTS6_VMM, $HOSTS6_INFRA, $HOSTS6_AUTH, $HOSTS6_APP } accept comment "allow ipv6 internet connection: SERVER VMM/INFRA/AUTH/APP > FW > WAN"
return
}
chain user {
oifname $IF_WAN accept comment "allow internet connection: USER > FW > WAN"
return
}
chain wg {
oifname $IF_WAN accept comment "allow internet connection: WG > FW > WAN"
return
}
chain output {
type filter hook output priority filter; policy accept;
}
}

View File

@@ -0,0 +1,10 @@
[NetDev]
Name=wg0
Kind=wireguard
[WireGuard]
ListenPort=11290
PrivateKey={{ hostvars['console']['wireguard']['server']['private_key'] }}
[WireGuardPeer]
PublicKey={{ hostvars['console']['wireguard']['console']['public_key'] }}
PresharedKey={{ hostvars['console']['wireguard']['console']['preshared_key'] }}
AllowedIPs={{ hostvars['fw']["network4"]["console"]["wg"] }}/32, {{ hostvars['fw']["network6"]["console"]["wg"] }}/128

View File

@@ -0,0 +1,6 @@
[Match]
Name=wg0
[Network]
Address={{ hostvars['fw']["network4"]["firewall"]["wg"] }}/24
Address={{ hostvars['fw']["network6"]["firewall"]["wg"] }}/64
IPForward=yes

View File

@@ -0,0 +1,70 @@
#!/usr/sbin/nft -f
# Convention
# iifname oifname saddr daddr proto dport ct state action / Ellipsis if you can something
flush ruleset
define NET4_SERVER = {{ hostvars['fw']['network4']['subnet']['server'] }}
define NET6_SERVER = {{ hostvars['fw']['network6']['subnet']['server'] }}
define HOSTS4_CONSOLE = { {{ hostvars['fw']['network4']['console'].values() | join(', ') }} }
define HOSTS6_CONSOLE = { {{ hostvars['fw']['network6']['console'].values() | join(', ') }} }
define PORTS_SSH = 22
define PORTS_DB = 5432
define PORTS_CA = 9000
define PORTS_LDAPS = 636
define PORTS_LDAPS_FORWARD = 6360
define PORTS_HTTP = 80
define PORTS_HTTP_FORWARD = 2080
define PORTS_HTTPS = 443
define PORTS_HTTPS_FORWARD = 2443
define PORTS_PROMETHEUS = 9090
define PORTS_LOKI = 3100
table inet nat {
chain prerouting {
type nat hook prerouting priority dstnat; policy accept;
tcp dport $PORTS_HTTP dnat to :$PORTS_HTTP_FORWARD comment "DNAT http ports to $PORTS_HTTP_FORWARD"
tcp dport $PORTS_HTTPS dnat to :$PORTS_HTTPS_FORWARD comment "DNAT https ports to $PORTS_HTTPS_FORWARD"
tcp dport $PORTS_LDAPS dnat to :$PORTS_LDAPS_FORWARD comment "DNAT ldaps ports to $PORTS_LDAPS_FORWARD"
}
chain postrouting {
}
chain output {
type nat hook output priority dstnat; policy accept;
oifname "lo" tcp dport $PORTS_HTTP dnat to :$PORTS_HTTP_FORWARD comment "DNAT http ports to $PORTS_HTTP_FORWARD out of LOCALHOST"
oifname "lo" tcp dport $PORTS_HTTPS dnat to :$PORTS_HTTPS_FORWARD comment "DNAT https ports to $PORTS_HTTPS_FORWARD out of LOCALHOST"
oifname "lo" tcp dport $PORTS_LDAPS dnat to :$PORTS_LDAPS_FORWARD comment "DNAT ldaps ports to $PORTS_LDAPS_FORWARD out of LOCALHOST"
}
}
table inet filter {
chain input {
type filter hook input priority 0; policy drop;
ct state invalid drop comment "deny invalid connection"
ct state established, related accept comment "allow all connection already existing"
iifname "lo" accept comment "allow local connection: INFRA > INFRA"
meta l4proto { icmp, icmpv6 } accept comment "allow icmp connection: > INFRA"
ip saddr $HOSTS4_CONSOLE tcp dport $PORTS_SSH accept comment "allow ipv4 ssh connection: CONSOLE > INFRA"
ip6 saddr $HOSTS6_CONSOLE tcp dport $PORTS_SSH accept comment "allow ipv6 ssh connection: CONSOLE > INFRA"
ip saddr $NET4_SERVER tcp dport $PORTS_CA accept comment "allow ipv4 ca connection: SERVER > INFRA"
ip6 saddr $NET6_SERVER tcp dport $PORTS_CA accept comment "allow ipv6 ca connection: SERVER > INFRA"
ip saddr $NET4_SERVER tcp dport $PORTS_DB accept comment "allow ipv4 db connection: SERVER > INFRA"
ip6 saddr $NET6_SERVER tcp dport $PORTS_DB accept comment "allow ipv6 db connection: SERVER > INFRA"
ip saddr $HOSTS4_CONSOLE tcp dport $PORTS_HTTP_FORWARD ct original proto-dst $PORTS_HTTP accept comment "allow ipv4 http connection: CONSOLE > INFRA"
ip6 saddr $HOSTS6_CONSOLE tcp dport $PORTS_HTTP_FORWARD ct original proto-dst $PORTS_HTTP accept comment "allow ipv6 http connection: CONSOLE > INFRA"
ip saddr $HOSTS4_CONSOLE tcp dport $PORTS_HTTPS_FORWARD ct original proto-dst $PORTS_HTTPS accept comment "allow ipv4 https connection: CONSOLE > INFRA"
ip6 saddr $HOSTS6_CONSOLE tcp dport $PORTS_HTTPS_FORWARD ct original proto-dst $PORTS_HTTPS accept comment "allow ipv6 https connection: CONSOLE > INFRA"
ip saddr $NET4_SERVER tcp dport $PORTS_LDAPS_FORWARD ct original proto-dst $PORTS_LDAPS accept comment "allow ipv4 ldaps connection: SERVER > INFRA"
ip6 saddr $NET6_SERVER tcp dport $PORTS_LDAPS_FORWARD ct original proto-dst $PORTS_LDAPS accept comment "allow ipv6 ldaps connection: SERVER > INFRA"
ip saddr $NET4_SERVER tcp dport $PORTS_PROMETHEUS accept comment "allow ipv4 prometheus connection: SERVER > INFRA"
ip6 saddr $NET6_SERVER tcp dport $PORTS_PROMETHEUS accept comment "allow ipv6 prometheus connection: SERVER > INFRA"
ip saddr $NET4_SERVER tcp dport $PORTS_LOKI accept comment "allow ipv4 loki connection: SERVER > INFRA"
ip6 saddr $NET6_SERVER tcp dport $PORTS_LOKI accept comment "allow ipv6 loki connection: SERVER > INFRA"
}
chain forward {
type filter hook forward priority 0; policy drop;
}
chain output {
type filter hook output priority 0; policy accept;
}
}

View File

@@ -0,0 +1,5 @@
[Match]
MACAddress=c8:ff:bf:05:aa:b0
[Link]
Name=eth0

View File

@@ -0,0 +1,5 @@
[Match]
MACAddress=c8:ff:bf:05:aa:b1
[Link]
Name=eth1

View File

@@ -0,0 +1,3 @@
[NetDev]
Name=br0
Kind=bridge

View File

@@ -0,0 +1,7 @@
[NetDev]
Name=br1
Kind=bridge
[Bridge]
VLANFiltering=true
DefaultPVID=1

View File

@@ -0,0 +1,6 @@
[NetDev]
Name=vlan1
Kind=vlan
[VLAN]
Id=1

View File

@@ -0,0 +1,6 @@
[NetDev]
Name=vlan10
Kind=vlan
[VLAN]
Id=10

View File

@@ -0,0 +1,6 @@
[NetDev]
Name=vlan20
Kind=vlan
[VLAN]
Id=20

View File

@@ -0,0 +1,6 @@
[Match]
Name=eth0
[Network]
Bridge=br0
LinkLocalAddressing=false

View File

@@ -0,0 +1,15 @@
[Match]
Name=eth1
[Network]
Bridge=br1
LinkLocalAddressing=false
[BridgeVLAN]
VLAN=1
PVID=true
EgressUntagged=true
[BridgeVLAN]
VLAN=10
VLAN=20

View File

@@ -0,0 +1,5 @@
[Match]
Name=br0
[Network]
LinkLocalAddressing=false

View File

@@ -0,0 +1,17 @@
[Match]
Name=br1
[Network]
VLAN=vlan1
VLAN=vlan10
VLAN=vlan20
LinkLocalAddressing=false
[BridgeVLAN]
VLAN=1
PVID=yes
EgressUntagged=true
[BridgeVLAN]
VLAN=10
VLAN=20

View File

@@ -0,0 +1,28 @@
[Match]
Name=vlan1
[Network]
# IPv4
Address=192.168.1.10/24
# IPv6
Address=fd00:1::10/64
[RoutingPolicyRule]
From=192.168.1.10/32
Table=1
Priority=100
[Route]
Destination=192.168.1.0/24
Scope=link
Table=1
[RoutingPolicyRule]
From=fd00:1::10/128
Table=61
Priority=100
[Route]
Destination=fd00:1::/64
Scope=link
Table=61

View File

@@ -0,0 +1,32 @@
[Match]
Name=vlan10
[Network]
RequiredForOnline=false
# IPv4
Address=192.168.10.10/24
Gateway=192.168.10.1
DNS=192.168.10.2
# IPv6
Address=fd00:10::10/64
Gateway=fd00:10::1
DNS=fd00:10::2
[RoutingPolicyRule]
From=192.168.10.10/32
Table=2
Priority=100
[Route]
Destination=0.0.0.0/0
Gateway=192.168.10.1
Table=2
[RoutingPolicyRule]
From=fd00:10::10/128
Table=62
Priority=100
[Route]
Destination=::/0
Gateway=fd00:10::1
Table=62

View File

@@ -0,0 +1,26 @@
#!/usr/sbin/nft -f
# Convention
# iifname oifname saddr daddr proto dport ct state action / Ellipsis if you can something
flush ruleset
define HOSTS4_CONSOLE = { {{ hostvars['fw']['network4']['console'].values() | join(', ') }} }
define HOSTS6_CONSOLE = { {{ hostvars['fw']['network6']['console'].values() | join(', ') }} }
define PORTS_SSH = 22
table inet filter {
chain input {
type filter hook input priority 0; policy drop;
ct state invalid drop comment "deny invalid connection"
ct state established, related accept comment "allow all connection already existing"
iifname "lo" accept comment "allow local connection"
meta l4proto { icmp, icmpv6 } accept comment "allow icmp connection: > VMM"
ip saddr $HOSTS4_CONSOLE tcp dport $PORTS_SSH accept comment "allow ipv4 ssh connection: CONSOLE > VMM"
ip6 saddr $HOSTS6_CONSOLE tcp dport $PORTS_SSH accept comment "allow ipv6 ssh connection: CONSOLE > VMM"
}
chain forward {
type filter hook forward priority 0; policy drop;
}
chain output {
type filter hook output priority 0; policy accept;
}
}

View File

@@ -0,0 +1,3 @@
creation_rules:
- path_regex: secrets\.yaml$
age: age120wuwcmsm845ztsvsz46pswj5je53uc2n35vadklrfqudu6cxuusxetk7y

BIN
config/secrets/age-key.gpg Normal file

Binary file not shown.

Some files were not shown because too many files have changed in this diff Show More