From a7365da43101b7abd734499cbe16c5e0b7b31ecb Mon Sep 17 00:00:00 2001 From: il Date: Sun, 15 Mar 2026 04:41:02 +0900 Subject: [PATCH] 1.0.0 Release IaaS --- .gitignore | 7 + README.md | 26 + ansible/ansible.cfg | 57 + ansible/convention.yaml | 39 + ansible/inventory/group_vars/all.yaml | 74 + ansible/inventory/group_vars/hypervisor.yaml | 7 + ansible/inventory/host_vars/app.yaml | 41 + ansible/inventory/host_vars/auth.yaml | 18 + ansible/inventory/host_vars/console.yaml | 25 + ansible/inventory/host_vars/fw.yaml | 98 + ansible/inventory/host_vars/infra.yaml | 19 + ansible/inventory/hosts.ini | 14 + ansible/playbooks/app/site.yaml | 185 + ansible/playbooks/auth/site.yaml | 154 + ansible/playbooks/console/site.yaml | 132 + ansible/playbooks/fw/site.yaml | 190 + ansible/playbooks/infra/site.yaml | 210 + ansible/playbooks/vmm/create_vm.yaml | 61 + ansible/playbooks/vmm/site.yaml | 165 + ansible/roles/app/tasks/node/set_raid.yaml | 70 + ansible/roles/auth/handlers/main.yaml | 11 + .../auth/tasks/services/set_authelia.yaml | 78 + ansible/roles/common/handlers/main.yaml | 101 + .../common/tasks/node/create_default_dir.yaml | 34 + .../roles/common/tasks/node/deploy_hosts.yaml | 9 + .../common/tasks/node/deploy_root_ca.yaml | 10 + .../roles/common/tasks/node/set_linger.yaml | 20 + .../roles/common/tasks/node/set_networkd.yaml | 23 + .../roles/common/tasks/node/set_nftables.yaml | 36 + .../roles/common/tasks/node/set_resolved.yaml | 39 + .../roles/common/tasks/node/set_ssh_host.yaml | 119 + .../common/tasks/node/set_timesyncd.yaml | 20 + .../common/tasks/node/set_wireguard.yaml | 15 + .../common/tasks/services/set_alloy.yaml | 73 + .../common/tasks/services/set_caddy.yaml | 99 + .../common/tasks/services/set_crowdsec.yaml | 304 + .../common/tasks/services/set_kopia.yaml | 137 + .../common/tasks/services/set_podman.yaml | 46 + ansible/roles/console/handlers/main.yaml | 8 + .../console/tasks/node/load_secret_vars.yaml | 29 + .../console/tasks/node/set_ssh_client.yaml | 109 + .../console/tasks/services/set_chromium.yaml | 31 + .../console/tasks/services/set_cli_tools.yaml | 108 + ansible/roles/fw/handlers/main.yaml | 63 + ansible/roles/fw/tasks/services/set_bind.yaml | 103 + .../roles/fw/tasks/services/set_blocky.yaml | 117 + .../roles/fw/tasks/services/set_chrony.yaml | 55 + ansible/roles/fw/tasks/services/set_ddns.yaml | 41 + ansible/roles/fw/tasks/services/set_kea.yaml | 57 + .../roles/fw/tasks/services/set_suricata.yaml | 141 + ansible/roles/infra/handlers/main.yaml | 85 + .../infra/tasks/services/set_ca_server.yaml | 84 + .../infra/tasks/services/set_grafana.yaml | 89 + .../roles/infra/tasks/services/set_ldap.yaml | 112 + .../roles/infra/tasks/services/set_loki.yaml | 70 + .../infra/tasks/services/set_postgresql.yaml | 169 + .../infra/tasks/services/set_prometheus.yaml | 74 + .../tasks/services/set_x509-exporter.yaml | 63 + ansible/roles/vmm/tasks/node/set_libvirt.yaml | 92 + ansible/roles/vmm/tasks/vm/create_seed.yaml | 59 + .../roles/vmm/tasks/vm/deploy_vm_init.yaml | 55 + ansible/roles/vmm/tasks/vm/register_vm.yaml | 24 + config/node/app/nftables.conf.j2 | 38 + config/node/auth/nftables.conf.j2 | 48 + config/node/common/hosts.j2 | 34 + config/node/common/networkd/00-eth0.link | 5 + config/node/common/networkd/20-eth0.network | 13 + config/node/common/resolved/global.conf.j2 | 6 + config/node/common/ssh/host_certificate.conf | 2 + config/node/common/ssh/prohibit_root.conf | 1 + config/node/common/ssh/ssh_ca.conf | 1 + config/node/common/timesyncd/local-ntp.conf | 3 + config/node/fw/networkd/00-fw-wan.link | 5 + config/node/fw/networkd/01-fw-client.link | 5 + config/node/fw/networkd/10-fw-server.netdev | 6 + config/node/fw/networkd/11-fw-user.netdev | 6 + config/node/fw/networkd/20-fw-wan.network | 16 + config/node/fw/networkd/21-fw-client.network | 16 + config/node/fw/networkd/22-fw-server.network | 24 + config/node/fw/networkd/23-fw-user.network | 25 + config/node/fw/nftables.conf.j2 | 186 + config/node/fw/wireguard/30-fw-wg0.netdev | 10 + config/node/fw/wireguard/31-fw-wg0.network | 6 + config/node/infra/nftables.conf.j2 | 70 + config/node/vmm/networkd/00-vmm-eth0.link | 5 + config/node/vmm/networkd/01-vmm-eth1.link | 5 + config/node/vmm/networkd/10-vmm-br0.netdev | 3 + config/node/vmm/networkd/11-vmm-br1.netdev | 7 + config/node/vmm/networkd/12-vmm-vlan1.netdev | 6 + config/node/vmm/networkd/13-vmm-vlan10.netdev | 6 + config/node/vmm/networkd/14-vmm-vlan20.netdev | 6 + config/node/vmm/networkd/20-vmm-eth0.network | 6 + config/node/vmm/networkd/21-vmm-eth1.network | 15 + config/node/vmm/networkd/22-vmm-br0.network | 5 + config/node/vmm/networkd/23-vmm-br1.network | 17 + config/node/vmm/networkd/24-vmm-vlan1.network | 28 + .../node/vmm/networkd/25-vmm-vlan10.network | 32 + config/node/vmm/nftables.conf.j2 | 26 + config/secrets/.sops.yaml | 3 + config/secrets/age-key.gpg | Bin 0 -> 255 bytes config/secrets/edit_secret.sh | 86 + config/secrets/extract_secret.sh | 151 + config/secrets/secrets.yaml | 205 + .../auth/authelia/authelia.container.j2 | 67 + .../auth/authelia/config/authelia.yaml.j2 | 133 + .../common/caddy/build/caddy.containerfile.j2 | 17 + .../common/caddy/caddy.container.j2 | 49 + .../common/caddy/etc/auth/Caddyfile.j2 | 62 + .../common/caddy/etc/infra/Caddyfile.j2 | 40 + .../containers/infra/ca/ca.container.j2 | 35 + .../containers/infra/ca/config/ca.json.j2 | 61 + .../infra/ca/config/defaults.json.j2 | 6 + .../containers/infra/ca/templates/ca.tpl | 8 + .../infra/grafana/etc/grafana.ini.j2 | 54 + .../containers/infra/grafana/etc/ldap.toml.j2 | 47 + .../provisioning/datasources/datasources.yaml | 29 + .../infra/grafana/grafana.container.j2 | 43 + .../containers/infra/ldap/ldap.container.j2 | 64 + .../containers/infra/loki/etc/loki.yaml | 46 + .../containers/infra/loki/loki.container.j2 | 32 + .../build/postgresql.containerfile.j2 | 12 + .../infra/postgresql/config/pg_hba.conf.j2 | 28 + .../postgresql/config/postgresql.conf.j2 | 41 + .../containers/infra/postgresql/init/.gitkeep | 0 .../infra/postgresql/postgresql.container.j2 | 36 + .../postgresql-cluster-backup.service | 18 + .../services/postgresql-cluster-backup.timer | 17 + .../services/postgresql-data-backup@.service | 19 + .../services/postgresql-data-backup@.timer | 17 + .../infra/prometheus/etc/prometheus.yaml.j2 | 32 + .../infra/prometheus/etc/rules.yaml.j2 | 38 + .../infra/prometheus/etc/web-config.yaml.j2 | 9 + .../infra/prometheus/prometheus.container.j2 | 38 + .../x509-exporter/x509-exporter.container.j2 | 26 + .../systemd/common/alloy/config.alloy.j2 | 299 + .../common/crowdsec/acquis.d/caddy.yaml | 5 + .../common/crowdsec/acquis.d/suricata.yaml | 5 + .../crowdsec-firewall-bouncer.yaml.j2 | 56 + .../crowdsec/bouncers/whitelists.yaml.j2 | 11 + .../common/crowdsec/crowdsec-update.service | 10 + .../common/crowdsec/crowdsec-update.timer | 10 + .../common/crowdsec/etc/config.yaml.j2 | 66 + .../etc/local_api_credentials.yaml.j2 | 3 + .../common/kopia/kopia-backup.service.j2 | 49 + .../common/kopia/kopia-backup.timer.j2 | 10 + .../systemd/common/kopia/kopia.env.j2 | 5 + .../systemd/fw/bind/etc/named.conf.j2 | 68 + .../systemd/fw/bind/lib/db.1.00df.ip6.arpa | 13 + .../fw/bind/lib/db.1.168.192.in-addr.arpa | 13 + .../systemd/fw/bind/lib/db.10.00df.ip6.arpa | 17 + .../fw/bind/lib/db.10.168.192.in-addr.arpa | 17 + .../systemd/fw/bind/lib/db.ilnmors.com | 12 + .../systemd/fw/bind/lib/db.ilnmors.internal | 40 + .../services/systemd/fw/blocky/blocky.service | 23 + .../systemd/fw/blocky/etc/config.yaml.j2 | 67 + .../systemd/fw/chrony/local-acl.conf.j2 | 9 + config/services/systemd/fw/ddns/ddns.service | 15 + config/services/systemd/fw/ddns/ddns.sh | 299 + config/services/systemd/fw/ddns/ddns.timer | 10 + .../services/systemd/fw/kea/kea-dhcp4.conf.j2 | 105 + .../systemd/fw/suricata/etc/disable.conf | 7 + .../systemd/fw/suricata/etc/enable.conf | 0 .../systemd/fw/suricata/etc/local.rules | 0 .../systemd/fw/suricata/etc/suricata.yaml.j2 | 518 + .../fw/suricata/suricata-update.service | 9 + .../systemd/fw/suricata/suricata-update.timer | 10 + .../systemd/vmm/libvirt/seeds/user-data.j2 | 79 + .../systemd/vmm/libvirt/services/app.service | 23 + .../systemd/vmm/libvirt/services/auth.service | 23 + .../systemd/vmm/libvirt/services/fw.service | 23 + .../vmm/libvirt/services/infra.service | 23 + .../vmm/libvirt/xml/networks/lan-net.xml | 19 + .../vmm/libvirt/xml/networks/wan-net.xml | 7 + .../vmm/libvirt/xml/storages/images-pool.xml | 8 + .../vmm/libvirt/xml/storages/seeds-pool.xml | 8 + .../systemd/vmm/libvirt/xml/vms/vms.xml.j2 | 78 + data/bin/.gitkeep | 0 data/create_all_structure.sh | 56 + data/ilnmors_root_ca.crt | 11 + data/images/.gitkeep | 0 data/vmm_init/grub.d/iommu.cfg | 1 + data/vmm_init/modprobe.d/vfio.conf | 3 + data/vmm_init/network/00-vmm-eth0.link | 5 + data/vmm_init/network/01-vmm-eth1.link | 5 + data/vmm_init/network/10-vmm-br0.netdev | 3 + data/vmm_init/network/11-vmm-br1.netdev | 7 + data/vmm_init/network/12-vmm-vlan1.netdev | 6 + data/vmm_init/network/13-vmm-vlan10.netdev | 6 + data/vmm_init/network/14-vmm-vlan20.netdev | 6 + data/vmm_init/network/20-vmm-eth0.network | 6 + data/vmm_init/network/21-vmm-eth1.network | 15 + data/vmm_init/network/22-vmm-br0.network | 5 + data/vmm_init/network/23-vmm-br1.network | 17 + data/vmm_init/network/24-vmm-vlan1.network | 28 + data/vmm_init/network/25-vmm-vlan10.network | 32 + data/vmm_init/nftables.conf | 25 + data/vmm_init/ssh/local_ssh_ca.pub | 1 + .../ssh/sshd_config.d/prohibit_root.conf | 1 + data/vmm_init/ssh/sshd_config.d/ssh_ca.conf | 1 + data/vmm_init/sysctl.d/bridge.conf | 3 + data/volumes/.gitkeep | 0 docs/adr/001-architecture.md | 72 + docs/adr/002-network.md | 63 + docs/adr/003-pki.md | 57 + docs/adr/004-dns.md | 52 + docs/adr/005-ids-ips.md | 54 + docs/adr/006-secrets.md | 60 + docs/adr/007-backup.md | 61 + docs/adr/008-passthrough.md | 43 + docs/adr/009-isolation.md | 49 + docs/adr/010-provisioning.md | 35 + docs/adr/011-tls-communication.md | 33 + docs/adr/012-alerting.md | 45 + docs/archives/2025-06/on-premise.txt | 12151 ++++++++++++++++ docs/archives/2025-06/홈 서버 구축 계획.txt | 102 + docs/archives/2025-12/01_plans/01_01_plans.md | 814 ++ .../2025-12/01_plans/01_02_milestone.md | 167 + docs/archives/2025-12/02_theory/02_01_dns.md | 101 + docs/archives/2025-12/02_theory/02_02_dhcp.md | 55 + docs/archives/2025-12/02_theory/02_03_pki.md | 103 + docs/archives/2025-12/02_theory/02_04_tls.md | 92 + docs/archives/2025-12/02_theory/02_05_sso.md | 131 + .../archives/2025-12/02_theory/02_06_email.md | 242 + .../03_common/03_01_debian_configuration.md | 657 + .../2025-12/03_common/03_02_iptables.md | 273 + .../2025-12/03_common/03_03_podman.md | 306 + .../2025-12/03_common/03_04_crowdsec.md | 321 + .../archives/2025-12/03_common/03_05_redis.md | 107 + .../archives/2025-12/03_common/03_06_btrfs.md | 130 + .../2025-12/04_hypervisor/04_01_hypervisor.md | 277 + .../2025-12/05_firewall/05_01_opnsense_vm.md | 130 + .../05_firewall/05_02_opnsense_general.md | 91 + .../05_firewall/05_03_opnsense_interface.md | 159 + .../05_firewall/05_04_opnsense_rules.md | 79 + .../05_firewall/05_05_opnsense_suricata.md | 69 + .../05_firewall/05_06_opnsense_acme.md | 133 + .../2025-12/05_firewall/05_07_opnsense_kea.md | 40 + .../2025-12/06_network/06_01_net_vm.md | 107 + .../2025-12/06_network/06_02_net_ddns.md | 387 + .../2025-12/06_network/06_03_net_bind.md | 338 + .../06_network/06_04_net_adguard_home.md | 293 + .../2025-12/06_network/06_05_net_kea.md | 630 + .../2025-12/07_authorization/07_01_auth_vm.md | 148 + .../07_authorization/07_02_auth_step-ca.md | 320 + .../07_authorization/07_03_auth_main_caddy.md | 231 + .../07_authorization/07_04_auth_authentik.md | 315 + .../07_authorization/07_04_auth_lldap.md | 386 + .../07_authorization/07_05_auth_authelia.md | 651 + .../2025-12/08_development/08_01_dev_vm.md | 149 + .../08_development/08_02_dev_postgresql.md | 534 + .../08_development/08_03_dev_sidecar_caddy.md | 259 + .../08_development/08_04_dev_code-server.md | 341 + .../2025-12/09_application/09_01_app_vm.md | 253 + docs/archives/2025-12/console.md | 360 + docs/archives/2025-12/scripts.md | 452 + docs/notes/.gitkeep | 0 docs/runbook/00-operate.md | 104 + docs/runbook/01-windows.md | 255 + docs/runbook/02-certificates.md | 169 + docs/runbook/03-wireguard.md | 23 + docs/runbook/04-hypervisor.md | 162 + docs/runbook/05-hardwares.md | 220 + docs/runbook/06-kopia.md | 248 + docs/runbook/07-git.md | 71 + docs/services/app/igpu_firmware.md | 12 + docs/services/common/alloy.md | 35 + docs/services/common/caddy.md | 45 + docs/services/common/crowdsec.md | 233 + docs/services/common/kopia.md | 14 + docs/services/console/git.md | 0 docs/services/fw/kea.md | 29 + docs/services/infra/ca.md | 146 + docs/services/infra/grafana.md | 20 + docs/services/infra/ldap.md | 154 + docs/services/infra/loki.md | 12 + docs/services/infra/postgresql.md | 64 + docs/services/infra/prometheus.md | 12 + docs/services/systemd/systemd-networkd.md | 35 + docs/services/systemd/systemd-quadlet.md | 67 + docs/services/vmm/libvirt/cloud-init.md | 125 + docs/services/vmm/libvirt/undefine.md | 18 + docs/specifications/environments.md | 154 + docs/specifications/hardwares.md | 67 + docs/specifications/matrix.md | 117 + docs/theories/network/dhcp.md | 53 + docs/theories/network/dns.md | 99 + docs/theories/network/email.md | 241 + docs/theories/network/link-local.md | 30 + docs/theories/network/tls.md | 90 + docs/theories/pki/pki.md | 102 + docs/theories/pki/sso.md | 129 + docs/theories/virtualization/passthrough.md | 15 + 292 files changed, 36059 insertions(+) create mode 100644 .gitignore create mode 100644 README.md create mode 100644 ansible/ansible.cfg create mode 100644 ansible/convention.yaml create mode 100644 ansible/inventory/group_vars/all.yaml create mode 100644 ansible/inventory/group_vars/hypervisor.yaml create mode 100644 ansible/inventory/host_vars/app.yaml create mode 100644 ansible/inventory/host_vars/auth.yaml create mode 100644 ansible/inventory/host_vars/console.yaml create mode 100644 ansible/inventory/host_vars/fw.yaml create mode 100644 ansible/inventory/host_vars/infra.yaml create mode 100644 ansible/inventory/hosts.ini create mode 100644 ansible/playbooks/app/site.yaml create mode 100644 ansible/playbooks/auth/site.yaml create mode 100644 ansible/playbooks/console/site.yaml create mode 100644 ansible/playbooks/fw/site.yaml create mode 100644 ansible/playbooks/infra/site.yaml create mode 100644 ansible/playbooks/vmm/create_vm.yaml create mode 100644 ansible/playbooks/vmm/site.yaml create mode 100644 ansible/roles/app/tasks/node/set_raid.yaml create mode 100644 ansible/roles/auth/handlers/main.yaml create mode 100644 ansible/roles/auth/tasks/services/set_authelia.yaml create mode 100644 ansible/roles/common/handlers/main.yaml create mode 100644 ansible/roles/common/tasks/node/create_default_dir.yaml create mode 100644 ansible/roles/common/tasks/node/deploy_hosts.yaml create mode 100644 ansible/roles/common/tasks/node/deploy_root_ca.yaml create mode 100644 ansible/roles/common/tasks/node/set_linger.yaml create mode 100644 ansible/roles/common/tasks/node/set_networkd.yaml create mode 100644 ansible/roles/common/tasks/node/set_nftables.yaml create mode 100644 ansible/roles/common/tasks/node/set_resolved.yaml create mode 100644 ansible/roles/common/tasks/node/set_ssh_host.yaml create mode 100644 ansible/roles/common/tasks/node/set_timesyncd.yaml create mode 100644 ansible/roles/common/tasks/node/set_wireguard.yaml create mode 100644 ansible/roles/common/tasks/services/set_alloy.yaml create mode 100644 ansible/roles/common/tasks/services/set_caddy.yaml create mode 100644 ansible/roles/common/tasks/services/set_crowdsec.yaml create mode 100644 ansible/roles/common/tasks/services/set_kopia.yaml create mode 100644 ansible/roles/common/tasks/services/set_podman.yaml create mode 100644 ansible/roles/console/handlers/main.yaml create mode 100644 ansible/roles/console/tasks/node/load_secret_vars.yaml create mode 100644 ansible/roles/console/tasks/node/set_ssh_client.yaml create mode 100644 ansible/roles/console/tasks/services/set_chromium.yaml create mode 100644 ansible/roles/console/tasks/services/set_cli_tools.yaml create mode 100644 ansible/roles/fw/handlers/main.yaml create mode 100644 ansible/roles/fw/tasks/services/set_bind.yaml create mode 100644 ansible/roles/fw/tasks/services/set_blocky.yaml create mode 100644 ansible/roles/fw/tasks/services/set_chrony.yaml create mode 100644 ansible/roles/fw/tasks/services/set_ddns.yaml create mode 100644 ansible/roles/fw/tasks/services/set_kea.yaml create mode 100644 ansible/roles/fw/tasks/services/set_suricata.yaml create mode 100644 ansible/roles/infra/handlers/main.yaml create mode 100644 ansible/roles/infra/tasks/services/set_ca_server.yaml create mode 100644 ansible/roles/infra/tasks/services/set_grafana.yaml create mode 100644 ansible/roles/infra/tasks/services/set_ldap.yaml create mode 100644 ansible/roles/infra/tasks/services/set_loki.yaml create mode 100644 ansible/roles/infra/tasks/services/set_postgresql.yaml create mode 100644 ansible/roles/infra/tasks/services/set_prometheus.yaml create mode 100644 ansible/roles/infra/tasks/services/set_x509-exporter.yaml create mode 100644 ansible/roles/vmm/tasks/node/set_libvirt.yaml create mode 100644 ansible/roles/vmm/tasks/vm/create_seed.yaml create mode 100644 ansible/roles/vmm/tasks/vm/deploy_vm_init.yaml create mode 100644 ansible/roles/vmm/tasks/vm/register_vm.yaml create mode 100644 config/node/app/nftables.conf.j2 create mode 100644 config/node/auth/nftables.conf.j2 create mode 100644 config/node/common/hosts.j2 create mode 100644 config/node/common/networkd/00-eth0.link create mode 100644 config/node/common/networkd/20-eth0.network create mode 100644 config/node/common/resolved/global.conf.j2 create mode 100644 config/node/common/ssh/host_certificate.conf create mode 100644 config/node/common/ssh/prohibit_root.conf create mode 100644 config/node/common/ssh/ssh_ca.conf create mode 100644 config/node/common/timesyncd/local-ntp.conf create mode 100644 config/node/fw/networkd/00-fw-wan.link create mode 100644 config/node/fw/networkd/01-fw-client.link create mode 100644 config/node/fw/networkd/10-fw-server.netdev create mode 100644 config/node/fw/networkd/11-fw-user.netdev create mode 100644 config/node/fw/networkd/20-fw-wan.network create mode 100644 config/node/fw/networkd/21-fw-client.network create mode 100644 config/node/fw/networkd/22-fw-server.network create mode 100644 config/node/fw/networkd/23-fw-user.network create mode 100644 config/node/fw/nftables.conf.j2 create mode 100644 config/node/fw/wireguard/30-fw-wg0.netdev create mode 100644 config/node/fw/wireguard/31-fw-wg0.network create mode 100644 config/node/infra/nftables.conf.j2 create mode 100644 config/node/vmm/networkd/00-vmm-eth0.link create mode 100644 config/node/vmm/networkd/01-vmm-eth1.link create mode 100644 config/node/vmm/networkd/10-vmm-br0.netdev create mode 100644 config/node/vmm/networkd/11-vmm-br1.netdev create mode 100644 config/node/vmm/networkd/12-vmm-vlan1.netdev create mode 100644 config/node/vmm/networkd/13-vmm-vlan10.netdev create mode 100644 config/node/vmm/networkd/14-vmm-vlan20.netdev create mode 100644 config/node/vmm/networkd/20-vmm-eth0.network create mode 100644 config/node/vmm/networkd/21-vmm-eth1.network create mode 100644 config/node/vmm/networkd/22-vmm-br0.network create mode 100644 config/node/vmm/networkd/23-vmm-br1.network create mode 100644 config/node/vmm/networkd/24-vmm-vlan1.network create mode 100644 config/node/vmm/networkd/25-vmm-vlan10.network create mode 100644 config/node/vmm/nftables.conf.j2 create mode 100644 config/secrets/.sops.yaml create mode 100644 config/secrets/age-key.gpg create mode 100755 config/secrets/edit_secret.sh create mode 100644 config/secrets/extract_secret.sh create mode 100644 config/secrets/secrets.yaml create mode 100644 config/services/containers/auth/authelia/authelia.container.j2 create mode 100644 config/services/containers/auth/authelia/config/authelia.yaml.j2 create mode 100644 config/services/containers/common/caddy/build/caddy.containerfile.j2 create mode 100644 config/services/containers/common/caddy/caddy.container.j2 create mode 100644 config/services/containers/common/caddy/etc/auth/Caddyfile.j2 create mode 100644 config/services/containers/common/caddy/etc/infra/Caddyfile.j2 create mode 100644 config/services/containers/infra/ca/ca.container.j2 create mode 100644 config/services/containers/infra/ca/config/ca.json.j2 create mode 100644 config/services/containers/infra/ca/config/defaults.json.j2 create mode 100644 config/services/containers/infra/ca/templates/ca.tpl create mode 100644 config/services/containers/infra/grafana/etc/grafana.ini.j2 create mode 100644 config/services/containers/infra/grafana/etc/ldap.toml.j2 create mode 100644 config/services/containers/infra/grafana/etc/provisioning/datasources/datasources.yaml create mode 100644 config/services/containers/infra/grafana/grafana.container.j2 create mode 100644 config/services/containers/infra/ldap/ldap.container.j2 create mode 100644 config/services/containers/infra/loki/etc/loki.yaml create mode 100644 config/services/containers/infra/loki/loki.container.j2 create mode 100644 config/services/containers/infra/postgresql/build/postgresql.containerfile.j2 create mode 100644 config/services/containers/infra/postgresql/config/pg_hba.conf.j2 create mode 100644 config/services/containers/infra/postgresql/config/postgresql.conf.j2 create mode 100644 config/services/containers/infra/postgresql/init/.gitkeep create mode 100644 config/services/containers/infra/postgresql/postgresql.container.j2 create mode 100644 config/services/containers/infra/postgresql/services/postgresql-cluster-backup.service create mode 100644 config/services/containers/infra/postgresql/services/postgresql-cluster-backup.timer create mode 100644 config/services/containers/infra/postgresql/services/postgresql-data-backup@.service create mode 100644 config/services/containers/infra/postgresql/services/postgresql-data-backup@.timer create mode 100644 config/services/containers/infra/prometheus/etc/prometheus.yaml.j2 create mode 100644 config/services/containers/infra/prometheus/etc/rules.yaml.j2 create mode 100644 config/services/containers/infra/prometheus/etc/web-config.yaml.j2 create mode 100644 config/services/containers/infra/prometheus/prometheus.container.j2 create mode 100644 config/services/containers/infra/x509-exporter/x509-exporter.container.j2 create mode 100644 config/services/systemd/common/alloy/config.alloy.j2 create mode 100644 config/services/systemd/common/crowdsec/acquis.d/caddy.yaml create mode 100644 config/services/systemd/common/crowdsec/acquis.d/suricata.yaml create mode 100644 config/services/systemd/common/crowdsec/bouncers/crowdsec-firewall-bouncer.yaml.j2 create mode 100644 config/services/systemd/common/crowdsec/bouncers/whitelists.yaml.j2 create mode 100644 config/services/systemd/common/crowdsec/crowdsec-update.service create mode 100644 config/services/systemd/common/crowdsec/crowdsec-update.timer create mode 100644 config/services/systemd/common/crowdsec/etc/config.yaml.j2 create mode 100644 config/services/systemd/common/crowdsec/etc/local_api_credentials.yaml.j2 create mode 100644 config/services/systemd/common/kopia/kopia-backup.service.j2 create mode 100644 config/services/systemd/common/kopia/kopia-backup.timer.j2 create mode 100644 config/services/systemd/common/kopia/kopia.env.j2 create mode 100644 config/services/systemd/fw/bind/etc/named.conf.j2 create mode 100644 config/services/systemd/fw/bind/lib/db.1.00df.ip6.arpa create mode 100644 config/services/systemd/fw/bind/lib/db.1.168.192.in-addr.arpa create mode 100644 config/services/systemd/fw/bind/lib/db.10.00df.ip6.arpa create mode 100644 config/services/systemd/fw/bind/lib/db.10.168.192.in-addr.arpa create mode 100644 config/services/systemd/fw/bind/lib/db.ilnmors.com create mode 100644 config/services/systemd/fw/bind/lib/db.ilnmors.internal create mode 100644 config/services/systemd/fw/blocky/blocky.service create mode 100644 config/services/systemd/fw/blocky/etc/config.yaml.j2 create mode 100644 config/services/systemd/fw/chrony/local-acl.conf.j2 create mode 100644 config/services/systemd/fw/ddns/ddns.service create mode 100644 config/services/systemd/fw/ddns/ddns.sh create mode 100644 config/services/systemd/fw/ddns/ddns.timer create mode 100644 config/services/systemd/fw/kea/kea-dhcp4.conf.j2 create mode 100644 config/services/systemd/fw/suricata/etc/disable.conf create mode 100644 config/services/systemd/fw/suricata/etc/enable.conf create mode 100644 config/services/systemd/fw/suricata/etc/local.rules create mode 100644 config/services/systemd/fw/suricata/etc/suricata.yaml.j2 create mode 100644 config/services/systemd/fw/suricata/suricata-update.service create mode 100644 config/services/systemd/fw/suricata/suricata-update.timer create mode 100644 config/services/systemd/vmm/libvirt/seeds/user-data.j2 create mode 100644 config/services/systemd/vmm/libvirt/services/app.service create mode 100644 config/services/systemd/vmm/libvirt/services/auth.service create mode 100644 config/services/systemd/vmm/libvirt/services/fw.service create mode 100644 config/services/systemd/vmm/libvirt/services/infra.service create mode 100644 config/services/systemd/vmm/libvirt/xml/networks/lan-net.xml create mode 100644 config/services/systemd/vmm/libvirt/xml/networks/wan-net.xml create mode 100644 config/services/systemd/vmm/libvirt/xml/storages/images-pool.xml create mode 100644 config/services/systemd/vmm/libvirt/xml/storages/seeds-pool.xml create mode 100644 config/services/systemd/vmm/libvirt/xml/vms/vms.xml.j2 create mode 100644 data/bin/.gitkeep create mode 100644 data/create_all_structure.sh create mode 100644 data/ilnmors_root_ca.crt create mode 100644 data/images/.gitkeep create mode 100644 data/vmm_init/grub.d/iommu.cfg create mode 100644 data/vmm_init/modprobe.d/vfio.conf create mode 100644 data/vmm_init/network/00-vmm-eth0.link create mode 100644 data/vmm_init/network/01-vmm-eth1.link create mode 100644 data/vmm_init/network/10-vmm-br0.netdev create mode 100644 data/vmm_init/network/11-vmm-br1.netdev create mode 100644 data/vmm_init/network/12-vmm-vlan1.netdev create mode 100644 data/vmm_init/network/13-vmm-vlan10.netdev create mode 100644 data/vmm_init/network/14-vmm-vlan20.netdev create mode 100644 data/vmm_init/network/20-vmm-eth0.network create mode 100644 data/vmm_init/network/21-vmm-eth1.network create mode 100644 data/vmm_init/network/22-vmm-br0.network create mode 100644 data/vmm_init/network/23-vmm-br1.network create mode 100644 data/vmm_init/network/24-vmm-vlan1.network create mode 100644 data/vmm_init/network/25-vmm-vlan10.network create mode 100644 data/vmm_init/nftables.conf create mode 100644 data/vmm_init/ssh/local_ssh_ca.pub create mode 100644 data/vmm_init/ssh/sshd_config.d/prohibit_root.conf create mode 100644 data/vmm_init/ssh/sshd_config.d/ssh_ca.conf create mode 100644 data/vmm_init/sysctl.d/bridge.conf create mode 100644 data/volumes/.gitkeep create mode 100644 docs/adr/001-architecture.md create mode 100644 docs/adr/002-network.md create mode 100644 docs/adr/003-pki.md create mode 100644 docs/adr/004-dns.md create mode 100644 docs/adr/005-ids-ips.md create mode 100644 docs/adr/006-secrets.md create mode 100644 docs/adr/007-backup.md create mode 100644 docs/adr/008-passthrough.md create mode 100644 docs/adr/009-isolation.md create mode 100644 docs/adr/010-provisioning.md create mode 100644 docs/adr/011-tls-communication.md create mode 100644 docs/adr/012-alerting.md create mode 100644 docs/archives/2025-06/on-premise.txt create mode 100644 docs/archives/2025-06/홈 서버 구축 계획.txt create mode 100644 docs/archives/2025-12/01_plans/01_01_plans.md create mode 100644 docs/archives/2025-12/01_plans/01_02_milestone.md create mode 100644 docs/archives/2025-12/02_theory/02_01_dns.md create mode 100644 docs/archives/2025-12/02_theory/02_02_dhcp.md create mode 100644 docs/archives/2025-12/02_theory/02_03_pki.md create mode 100644 docs/archives/2025-12/02_theory/02_04_tls.md create mode 100644 docs/archives/2025-12/02_theory/02_05_sso.md create mode 100644 docs/archives/2025-12/02_theory/02_06_email.md create mode 100644 docs/archives/2025-12/03_common/03_01_debian_configuration.md create mode 100644 docs/archives/2025-12/03_common/03_02_iptables.md create mode 100644 docs/archives/2025-12/03_common/03_03_podman.md create mode 100644 docs/archives/2025-12/03_common/03_04_crowdsec.md create mode 100644 docs/archives/2025-12/03_common/03_05_redis.md create mode 100644 docs/archives/2025-12/03_common/03_06_btrfs.md create mode 100644 docs/archives/2025-12/04_hypervisor/04_01_hypervisor.md create mode 100644 docs/archives/2025-12/05_firewall/05_01_opnsense_vm.md create mode 100644 docs/archives/2025-12/05_firewall/05_02_opnsense_general.md create mode 100644 docs/archives/2025-12/05_firewall/05_03_opnsense_interface.md create mode 100644 docs/archives/2025-12/05_firewall/05_04_opnsense_rules.md create mode 100644 docs/archives/2025-12/05_firewall/05_05_opnsense_suricata.md create mode 100644 docs/archives/2025-12/05_firewall/05_06_opnsense_acme.md create mode 100644 docs/archives/2025-12/05_firewall/05_07_opnsense_kea.md create mode 100644 docs/archives/2025-12/06_network/06_01_net_vm.md create mode 100644 docs/archives/2025-12/06_network/06_02_net_ddns.md create mode 100644 docs/archives/2025-12/06_network/06_03_net_bind.md create mode 100644 docs/archives/2025-12/06_network/06_04_net_adguard_home.md create mode 100644 docs/archives/2025-12/06_network/06_05_net_kea.md create mode 100644 docs/archives/2025-12/07_authorization/07_01_auth_vm.md create mode 100644 docs/archives/2025-12/07_authorization/07_02_auth_step-ca.md create mode 100644 docs/archives/2025-12/07_authorization/07_03_auth_main_caddy.md create mode 100644 docs/archives/2025-12/07_authorization/07_04_auth_authentik.md create mode 100644 docs/archives/2025-12/07_authorization/07_04_auth_lldap.md create mode 100644 docs/archives/2025-12/07_authorization/07_05_auth_authelia.md create mode 100644 docs/archives/2025-12/08_development/08_01_dev_vm.md create mode 100644 docs/archives/2025-12/08_development/08_02_dev_postgresql.md create mode 100644 docs/archives/2025-12/08_development/08_03_dev_sidecar_caddy.md create mode 100644 docs/archives/2025-12/08_development/08_04_dev_code-server.md create mode 100644 docs/archives/2025-12/09_application/09_01_app_vm.md create mode 100644 docs/archives/2025-12/console.md create mode 100644 docs/archives/2025-12/scripts.md create mode 100644 docs/notes/.gitkeep create mode 100644 docs/runbook/00-operate.md create mode 100644 docs/runbook/01-windows.md create mode 100644 docs/runbook/02-certificates.md create mode 100644 docs/runbook/03-wireguard.md create mode 100644 docs/runbook/04-hypervisor.md create mode 100644 docs/runbook/05-hardwares.md create mode 100644 docs/runbook/06-kopia.md create mode 100644 docs/runbook/07-git.md create mode 100644 docs/services/app/igpu_firmware.md create mode 100644 docs/services/common/alloy.md create mode 100644 docs/services/common/caddy.md create mode 100644 docs/services/common/crowdsec.md create mode 100644 docs/services/common/kopia.md create mode 100644 docs/services/console/git.md create mode 100644 docs/services/fw/kea.md create mode 100644 docs/services/infra/ca.md create mode 100644 docs/services/infra/grafana.md create mode 100644 docs/services/infra/ldap.md create mode 100644 docs/services/infra/loki.md create mode 100644 docs/services/infra/postgresql.md create mode 100644 docs/services/infra/prometheus.md create mode 100644 docs/services/systemd/systemd-networkd.md create mode 100644 docs/services/systemd/systemd-quadlet.md create mode 100644 docs/services/vmm/libvirt/cloud-init.md create mode 100644 docs/services/vmm/libvirt/undefine.md create mode 100644 docs/specifications/environments.md create mode 100644 docs/specifications/hardwares.md create mode 100644 docs/specifications/matrix.md create mode 100644 docs/theories/network/dhcp.md create mode 100644 docs/theories/network/dns.md create mode 100644 docs/theories/network/email.md create mode 100644 docs/theories/network/link-local.md create mode 100644 docs/theories/network/tls.md create mode 100644 docs/theories/pki/pki.md create mode 100644 docs/theories/pki/sso.md create mode 100644 docs/theories/virtualization/passthrough.md diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..bae35de --- /dev/null +++ b/.gitignore @@ -0,0 +1,7 @@ +data/bin/* +data/volumes/* +data/images/* +docs/archives/textfiles/ +docs/notes/* +*.sql +!.gitkeep diff --git a/README.md b/README.md new file mode 100644 index 0000000..b0050a6 --- /dev/null +++ b/README.md @@ -0,0 +1,26 @@ +# ilnmors homelab README + +This homelab project implements single-node On-premise IaaS system. The homelab contains virtual machines which are divided by their roles, such as private firewall, DNS, PKI, LDAP and database, SSO\(OIDC\). The standard domain is used to implement this system without specific vendors. All components are defined as code and initiated by IaC \(Ansible\) except hypervisor initial configuration. + +## RTO times +- Feb/25/2026 - Reprovisioning Hypervisor and vms + - RTO: 1 hour 30 min - verified + - Manual install and set vmm: 20 min + - Create and reprovision fw including services: 15 min + - Create and reprovision infra including services: 20 min + - Create and reprovision auth including services: 10 min + - Create and reprovision app except services: 10 min + - Intermediate tasks (ACME issuance, DNS propagation, etc.): 15 min + +- Mar/5/2026 - Reprovisioning Hardware and Hypervisor and vms + - RTO: 2 hour 20 min + - console: 15min - verified + - certificate: 0 min \(When it needs to be created, RTO will be 20 min) - not verified + - wireguard: 0 min \(When it needs to be created, RTO will be 1 min) - not verified + - hypervisor\(+fw\): 45 min - verified + - switch: 1 min - verified + - dsm: 30 min - verified + - kopia: 0 min \(When it needs to be created, RTO will be 10 min) - verified + - Extra vms: 30 min - verified + - Etc: 30 min + diff --git a/ansible/ansible.cfg b/ansible/ansible.cfg new file mode 100644 index 0000000..378d462 --- /dev/null +++ b/ansible/ansible.cfg @@ -0,0 +1,57 @@ +[defaults] + +# (boolean) This controls whether an Ansible playbook should prompt for a login password. If using SSH keys for authentication, you probably do not need to change this setting. +ask_pass=False + +# (boolean) This controls whether an Ansible playbook should prompt for a vault password. +ask_vault_pass=True + +# (pathlist) Comma-separated list of Ansible inventory sources +inventory=./inventory + +# (pathspec) Colon-separated paths in which Ansible will search for Roles. +roles_path=./roles + +# (string) Set the main callback used to display Ansible output. You can only have one at a time. +# You can have many other callbacks, but just one can be in charge of stdout. +# See :ref:`callback_plugins` for a list of available options. +stdout_callback=default + +# (boolean) Set this to "False" if you want to avoid host key checking by the underlying connection plugin Ansible uses to connect to the host. +# Please read the documentation of the specific connection plugin used for details. +host_key_checking=True + +# (string) Path to the Python interpreter to be used for module execution on remote targets, or an automatic discovery mode. Supported discovery modes are ``auto`` (the default), ``auto_silent``, ``auto_legacy``, and ``auto_legacy_silent``. All discovery modes match against an ordered list of well-known Python interpreter locations. The fallback behavior will issue a warning that the interpreter should be set explicitly (since interpreters installed later may change which one is used). This warning behavior can be disabled by setting ``auto_silent``. The ``auto_legacy`` modes are deprecated and behave the same as their respective ``auto`` modes. They exist for backward-compatibility with older Ansible releases that always defaulted to ``/usr/bin/python3``, which will use that interpreter if present. +interpreter_python=auto_silent + +# (path) A number of non-playbook CLIs have a ``--playbook-dir`` argument; this sets the default value for it. +playbook_dir=./playbooks + +# (bool) This controls whether a failed Ansible playbook should create a .retry file. +retry_files_enabled=False + +# (boolean) This option controls if notified handlers run on a host even if a failure occurs on that host. +# When false, the handlers will not run if a failure has occurred on a host. +# This can also be set per play or on the command line. See Handlers and Failure for more details. +force_handlers=True + +[privilege_escalation] +# (boolean) Toggles the use of privilege escalation, allowing you to 'become' another user after login. +become=True + +# (boolean) Toggle to prompt for privilege escalation password. +become_ask_pass=False + +# (string) Privilege escalation method to use when `become` is enabled.; +become_method=sudo + +# (string) The user your login/remote user 'becomes' when using privilege escalation, most systems will use 'root' when no user is specified.; +become_user=root + +[connection] +# (boolean) This is a global option, each connection plugin can override either by having more specific options or not supporting pipelining at all. +# Pipelining, if supported by the connection plugin, reduces the number of network operations required to execute a module on the remote server, by executing many Ansible modules without actual file transfer. +# It can result in a very significant performance improvement when enabled. +# However this conflicts with privilege escalation (become). For example, when using 'sudo:' operations you must first disable 'requiretty' in /etc/sudoers on all managed hosts, which is why it is disabled by default. +# This setting will be disabled if ``ANSIBLE_KEEP_REMOTE_FILES`` is enabled. +pipelining=True diff --git a/ansible/convention.yaml b/ansible/convention.yaml new file mode 100644 index 0000000..8c2cb32 --- /dev/null +++ b/ansible/convention.yaml @@ -0,0 +1,39 @@ +--- +# `""` should be used for every string values +# except name, boolean, varible(environment) name, number (only for decimal) +- name: Convention task + delegate_to: "host" + delegate_facts: true + environment: + env: "environment" + ansible.builtin.file: + # Identifier: name:, src, dest, url + path: "/path/of/example" + # State + state: "directory" + owner: "root" + group: "root" + mode: "0640" + # optional data + recurse: true + loop: + - "list" + loop_control: + label: "{{ item }}" + become: true + # become_user: "root" + when: condition.stat.exists + changed_when: condition.rc != 0 + failed_when: condition.rc == 0 + register: "convention_task" + notify: "notification_example" + listen: "notification_example" + no_log: true + run_once: true + tags: + - "always" + - "init" + - "upgrade" + - "update" + +# when: "'tags' is not in ansible_run_tags" diff --git a/ansible/inventory/group_vars/all.yaml b/ansible/inventory/group_vars/all.yaml new file mode 100644 index 0000000..b09553c --- /dev/null +++ b/ansible/inventory/group_vars/all.yaml @@ -0,0 +1,74 @@ +--- +# Global vars +ansible_ssh_private_key_file: "/etc/secrets/{{ hostvars['console']['node']['uid'] }}/id_console" + +# URL infromation, you can use {{ infra_uri['services'] | split(':') | first|last }} to seperate domain and ports +infra_uri: + crowdsec: + domain: "crowdsec.ilnmors.internal" + ports: + https: "8080" + bind: + domain: "bind.ilnmors.internal" + ports: + dns: "53" + blocky: + domain: "blocky.ilnmors.internal" + ports: + https: "443" + dns: "53" + postgresql: + domain: "postgresql.ilnmors.internal" + ports: + tcp: "5432" # postgresql db connection port + ldap: + domain: "ldap.ilnmors.internal" + ports: + http: "17170" + ldaps: "636" + ca: + domain: "ca.ilnmors.internal" + ports: + https: "9000" + prometheus: + domain: "prometheus.ilnmors.internal" + ports: + https: "9090" + loki: + domain: "loki.ilnmors.internal" + ports: + https: "3100" + nas: + domain: "nas.ilnmors.internal" + ports: + https: "5001" + kopia: + domain: "nas.ilnmors.internal" + ports: + https: "51515" + +version: + packages: + sops: "3.12.1" + step: "0.29.0" + kopia: "0.22.3" + blocky: "0.28.2" + alloy: "1.13.0" + # telegraf: "1.37.1" + containers: + # common + caddy: "2.10.2" + # infra + step: "0.29.0" + ldap: "v0.6.2" + x509-exporter: "3.19.1" + prometheus: "v3.9.1" + loki: "3.6.5" + grafana: "12.3.3" + ## Postgresql + postgresql: "18.2" + # For immich - https://github.com/immich-app/base-images/blob/main/postgres/versions.yaml +# pgvector: "v0.8.1" + vectorchord: "0.5.3" + # Auth + authelia: "4.39.15" diff --git a/ansible/inventory/group_vars/hypervisor.yaml b/ansible/inventory/group_vars/hypervisor.yaml new file mode 100644 index 0000000..27793fe --- /dev/null +++ b/ansible/inventory/group_vars/hypervisor.yaml @@ -0,0 +1,7 @@ +--- +node: + name: "vmm" + uid: 2000 + home_path: "/home/vmm" + ssh_san: "vmm,vmm_init,vmm.ilnmors.internal,init.vmm.ilnmors.internal" + local_san: "localhost vmm.ilnmors.internal" diff --git a/ansible/inventory/host_vars/app.yaml b/ansible/inventory/host_vars/app.yaml new file mode 100644 index 0000000..13e4f51 --- /dev/null +++ b/ansible/inventory/host_vars/app.yaml @@ -0,0 +1,41 @@ +--- +# Node Factors +node: + name: "app" + uid: 2004 + home_path: "/home/app" + ssh_san: "app,app.ilnmors.internal" + local_san: "localhost app.ilnmors.internal" +# VM Factors +vm: + name: "app" + cpu: 4 + shares: 1024 + memory: 16 + storage: 256 + lan_mac: "0a:49:6e:4d:03:00" + lan_net: "lan-net" + lan_group: "vlan10-access" + # PCIe passthrough address + # result of `lspci | grep -i -e "sata controller" -e "vga"` and parse it. + # Ex) 04:00.0 > domain: "0x0000", bus: "0x04", slot: "0x00", function: "0x0" + pass_through: + igpu: + address: "0000:00:02.0" + domain: "0x0000" + bus: "0x00" + slot: "0x02" + function: "0x0" + sata_controller: # Additional SATA Controller + address: "0000:04:00.0" + domain: "0x0000" + bus: "0x04" + slot: "0x00" + function: "0x0" + +# BTRFS configuration for hdd which is passthroughed +storage: + btrfs: + label: "APP_DATA" + level: "raid10" + mount_point: "/home/app/data" diff --git a/ansible/inventory/host_vars/auth.yaml b/ansible/inventory/host_vars/auth.yaml new file mode 100644 index 0000000..aee9496 --- /dev/null +++ b/ansible/inventory/host_vars/auth.yaml @@ -0,0 +1,18 @@ +--- +# Node Factors +node: + name: "auth" + uid: 2003 + home_path: "/home/auth" + ssh_san: "auth,auth.ilnmors.internal" + local_san: "localhost auth.ilnmors.internal" +# VM Factors +vm: + name: "auth" + cpu: 2 + shares: 512 + memory: 2 + storage: 64 + lan_mac: "0a:49:6e:4d:02:00" + lan_net: "lan-net" + lan_group: "vlan10-access" diff --git a/ansible/inventory/host_vars/console.yaml b/ansible/inventory/host_vars/console.yaml new file mode 100644 index 0000000..fc46ece --- /dev/null +++ b/ansible/inventory/host_vars/console.yaml @@ -0,0 +1,25 @@ +--- +# Secret management +age_key: !vault | + $ANSIBLE_VAULT;1.1;AES256 + 32343637656262323931313061323861393737663736626538396165613563323531316639346637 + 3766363134663963396634353764323166353936626633300a356338363036373165363335333665 + 38316638386661623265306538303739616638316565373864316364623539383736343366646463 + 3464613666663836310a666531386136326439616637393035333534303661373433343830323665 + 66613736613133616439393163653166306261376231646163323266616431623531313964326132 + 33653638373537323363316363646534323362353836373665636265663939353862663532313230 + 30643038313138626464626161373835646665326334393834616234643931656536343130316238 + 61656264643830616639 +# Node Factors +node: + name: "console" + uid: 2999 + home_path: "/home/console" + workspace_path: "{{ node.home_path }}/workspace" + homelab_path: "{{ node.home_path }}/workspace/homelab" + data_path: "{{ node.homelab_path }}/data" + config_path: "{{ node.homelab_path }}/config" + ssh_san: "console,console.ilnmors.internal" + ssh_users: "vmm,fw,infra,auth,app" + local_san: "localhost console.ilnmors.internal" +# ansible_python_interpreter: "{{ ansible_playbook_python }}" diff --git a/ansible/inventory/host_vars/fw.yaml b/ansible/inventory/host_vars/fw.yaml new file mode 100644 index 0000000..af0bdeb --- /dev/null +++ b/ansible/inventory/host_vars/fw.yaml @@ -0,0 +1,98 @@ +--- +# Node Factors +node: + name: "fw" + uid: 2001 + home_path: "/home/fw" + ssh_san: "fw,fw.ilnmors.internal" + local_san: "localhost fw.ilnmors.internal" + +# VM Factors +vm: + name: "fw" + cpu: 2 + shares: 2048 + memory: 4 + storage: 64 + wan_mac: "0a:49:6e:4d:00:00" + lan_mac: "0a:49:6e:4d:00:01" + wan_net: "wan-net" + lan_net: "lan-net" + lan_group: "vlan-trunk" + +# Network Factors +# LLA is like MAC address for L3 (Network layer). Usually, subnet is used to seperate network. +network4: + subnet: + client: "192.168.1.0/24" + server: "192.168.10.0/24" + user: "192.168.20.0/24" + wg: "192.168.99.0/24" + lla: "169.254.0.0/16" + # You can use "{{ hostvars['fw']['network4']['firewall'].values() | join(', ') }}" for all + firewall: + client: "192.168.1.1" + server: "192.168.10.1" + user: "192.168.20.1" + wg: "192.168.99.1" + blocky: + server: "192.168.10.2" + bind: + server: "192.168.10.3" + console: + client: "192.168.1.20" + wg: "192.168.99.20" + vmm: + client: "192.168.1.10" + server: "192.168.10.10" + infra: + server: "192.168.10.11" + auth: + server: "192.168.10.12" + app: + server: "192.168.10.13" + switch: + client: "192.168.1.2" + nas: + client: "192.168.1.11" + printer: + client: "192.168.1.101" + +network6: + subnet: + client: "fd00:1::/64" + server: "fd00:10::/64" + wg: "fd00:99::/64" + lla: "fe80::/10" + firewall: + client: "fd00:1::1" + server: "fd00:10::1" + wg: "fd00:99::1" + blocky: + server: "fd00:10::2" + bind: + server: "fd00:10::3" + console: + client: "fd00:1::20" + wg: "fd00:99::20" + vmm: + client: "fd00:1::10" + server: "fd00:10::10" + infra: + server: "fd00:10::11" + auth: + server: "fd00:10::12" + app: + server: "fd00:10::13" + switch: + client: "fd00:1::2" + nas: + client: "fd00:1::11" + printer: + client: "fd00:1::101" + +# Suricata Factors +# suricata_home_net: '[10.0.0.0/8,172.16.0.0/12,192.168.0.0/16]' +suricata: + home_net: '[10.0.0.0/8,172.16.0.0/12,192.168.0.0/16,fd00::/8,fe80::/8]' + interfaces: ["wan", "client", "server", "user"] diff --git a/ansible/inventory/host_vars/infra.yaml b/ansible/inventory/host_vars/infra.yaml new file mode 100644 index 0000000..e7243e2 --- /dev/null +++ b/ansible/inventory/host_vars/infra.yaml @@ -0,0 +1,19 @@ +--- +# Node Factors +node: + name: "infra" + uid: 2002 + home_path: "/home/infra" + ssh_san: "infra,infra.ilnmors.internal" + local_san: "localhost infra.ilnmors.internal" + +# VM Factors +vm: + name: "infra" + cpu: 2 + shares: 1024 + memory: 6 + storage: 256 + lan_mac: "0a:49:6e:4d:01:00" + lan_net: "lan-net" + lan_group: "vlan10-access" diff --git a/ansible/inventory/hosts.ini b/ansible/inventory/hosts.ini new file mode 100644 index 0000000..227ae78 --- /dev/null +++ b/ansible/inventory/hosts.ini @@ -0,0 +1,14 @@ +# --- console --- +console ansible_connection=local ansible_user=console + +# --- Hypervisor --- +[hypervisor] +vmm_init ansible_host=init.vmm.ilnmors.internal ansible_user=vmm +vmm ansible_host=vmm.ilnmors.internal ansible_user=vmm + +# --- Virtual Machines --- +[vms] +fw ansible_host=fw.ilnmors.internal ansible_user=fw +infra ansible_host=infra.ilnmors.internal ansible_user=infra +auth ansible_host=auth.ilnmors.internal ansible_user=auth +app ansible_host=app.ilnmors.internal ansible_user=app \ No newline at end of file diff --git a/ansible/playbooks/app/site.yaml b/ansible/playbooks/app/site.yaml new file mode 100644 index 0000000..3df2a37 --- /dev/null +++ b/ansible/playbooks/app/site.yaml @@ -0,0 +1,185 @@ +--- +- name: Load secret values + hosts: "console" + gather_facts: false + become: false + tasks: + - name: Load secret from secrets.yaml + ansible.builtin.include_role: + name: "console" + tasks_from: "node/load_secret_vars" + apply: + tags: ["always"] + tags: ["always"] + +- name: Site app + hosts: "app" + gather_facts: false + become: false + pre_tasks: + - name: Set become password + ansible.builtin.set_fact: + ansible_become_pass: "{{ hostvars['console']['sudo']['password']['app'] }}" + tags: ["always"] + + tasks: + - name: Set timezone to Asia/Seoul + community.general.timezone: + name: Asia/Seoul + become: true + tags: ["init", "timezone"] + + - name: Deploy root_ca certificate + ansible.builtin.include_role: + name: "common" + tasks_from: "node/deploy_root_ca" + apply: + tags: ["init", "root_crt"] + tags: ["init", "root_crt"] + + - name: Deploy hosts file + ansible.builtin.include_role: + name: "common" + tasks_from: "node/deploy_hosts" + apply: + tags: ["init", "hosts"] + tags: ["init", "hosts"] + + - name: Create default directory + ansible.builtin.include_role: + name: "common" + tasks_from: "node/create_default_dir" + apply: + tags: ["init", "default_dir"] + tags: ["init", "default_dir"] + + - name: Set ssh host + ansible.builtin.include_role: + name: "common" + tasks_from: "node/set_ssh_host" + apply: + tags: ["init", "ssh_host"] + tags: ["init", "ssh_host"] + + - name: Set networkd + ansible.builtin.include_role: + name: "common" + tasks_from: "node/set_networkd" + apply: + tags: ["init", "networkd"] + tags: ["init", "networkd"] + + - name: Set resolved + ansible.builtin.include_role: + name: "common" + tasks_from: "node/set_resolved" + apply: + tags: ["init", "resolved"] + tags: ["init", "resolved"] + + - name: Update and upgrade apt + ansible.builtin.apt: + upgrade: "dist" + update_cache: true + cache_valid_time: 3600 + become: true + tags: ["init", "site", "upgrade-packages"] + + - name: Install common packages + ansible.builtin.apt: + name: + - "acl" + - "curl" + - "jq" + - "netcat-openbsd" + - "dbus-user-session" + state: "present" + become: true + tags: ["init", "install-packages"] + + - name: Set raid + ansible.builtin.include_role: + name: "app" + tasks_from: "node/set_raid" + apply: + tags: ["init", "raid"] + tags: ["init", "raid"] + + - name: Set linger + ansible.builtin.include_role: + name: "common" + tasks_from: "node/set_linger" + apply: + tags: ["init", "linger"] + tags: ["init", "linger"] + + - name: Set podman + ansible.builtin.include_role: + name: "common" + tasks_from: "services/set_podman" + apply: + tags: ["init", "podman"] + tags: ["init", "podman"] + + - name: Set nftables + ansible.builtin.include_role: + name: "common" + tasks_from: "node/set_nftables" + apply: + tags: ["init", "nftables"] + tags: ["init", "nftables"] + + - name: Set crowdsec + ansible.builtin.include_role: + name: "common" + tasks_from: "services/set_crowdsec" + apply: + tags: ["site", "crowdsec"] + tags: ["site", "crowdsec"] + + - name: Set alloy + ansible.builtin.include_role: + name: "common" + tasks_from: "services/set_alloy" + apply: + tags: ["init", "update", "alloy"] + tags: ["init", "update", "alloy"] + + - name: Set kopia + ansible.builtin.include_role: + name: "common" + tasks_from: "services/set_kopia" + apply: + tags: ["site", "kopia"] + tags: ["site", "kopia"] + + - name: Flush handlers right now + ansible.builtin.meta: "flush_handlers" + + # Only update iGPU firmware + - name: Install iGPU Firmware + ansible.builtin.apt: + name: + - "firmware-intel-graphics" + - "intel-media-va-driver-non-free" + update_cache: true + state: "present" + become: true + notify: + - "notification_update_initramfs" + - "notification_reboot_app" + tags: ["init"] + handlers: + - name: Update initramfs + ansible.builtin.command: + update-initramfs -u + become: true + changed_when: false + listen: "notification_update_initramfs" + ignore_errors: true # noqa: ignore-errors + - name: Reboot app vm + ansible.builtin.reboot: + reboot_timeout: 300 + become: true + listen: "notification_reboot_app" + ignore_errors: true # noqa: ignore-errors diff --git a/ansible/playbooks/auth/site.yaml b/ansible/playbooks/auth/site.yaml new file mode 100644 index 0000000..380d33c --- /dev/null +++ b/ansible/playbooks/auth/site.yaml @@ -0,0 +1,154 @@ +--- +- name: Load secret values + hosts: "console" + gather_facts: false + become: false + tasks: + - name: Load secret from secrets.yaml + ansible.builtin.include_role: + name: "console" + tasks_from: "node/load_secret_vars" + apply: + tags: ["always"] + tags: ["always"] + +- name: Site auth + hosts: "auth" + gather_facts: false + become: false + pre_tasks: + - name: Set become password + ansible.builtin.set_fact: + ansible_become_pass: "{{ hostvars['console']['sudo']['password']['auth'] }}" + tags: ["always"] + + tasks: + - name: Set timezone to Asia/Seoul + community.general.timezone: + name: Asia/Seoul + become: true + tags: ["init", "timezone"] + + - name: Deploy root_ca certificate + ansible.builtin.include_role: + name: "common" + tasks_from: "node/deploy_root_ca" + apply: + tags: ["init", "root_crt"] + tags: ["init", "root_crt"] + + - name: Deploy hosts file + ansible.builtin.include_role: + name: "common" + tasks_from: "node/deploy_hosts" + apply: + tags: ["init", "hosts"] + tags: ["init", "hosts"] + + - name: Create default directory + ansible.builtin.include_role: + name: "common" + tasks_from: "node/create_default_dir" + apply: + tags: ["init", "default_dir"] + tags: ["init", "default_dir"] + + - name: Set ssh host + ansible.builtin.include_role: + name: "common" + tasks_from: "node/set_ssh_host" + apply: + tags: ["init", "ssh_host"] + tags: ["init", "ssh_host"] + + - name: Set networkd + ansible.builtin.include_role: + name: "common" + tasks_from: "node/set_networkd" + apply: + tags: ["init", "networkd"] + tags: ["init", "networkd"] + + - name: Set resolved + ansible.builtin.include_role: + name: "common" + tasks_from: "node/set_resolved" + apply: + tags: ["init", "resolved"] + tags: ["init", "resolved"] + + - name: Update and upgrade apt + ansible.builtin.apt: + upgrade: "dist" + update_cache: true + cache_valid_time: 3600 + become: true + tags: ["init", "site", "upgrade-packages"] + + - name: Install common packages + ansible.builtin.apt: + name: + - "acl" + - "curl" + - "jq" + - "netcat-openbsd" + - "dbus-user-session" + state: "present" + become: true + tags: ["init", "site", "install-packages"] + + - name: Set linger + ansible.builtin.include_role: + name: "common" + tasks_from: "node/set_linger" + apply: + tags: ["init", "linger"] + tags: ["init", "linger"] + + - name: Set podman + ansible.builtin.include_role: + name: "common" + tasks_from: "services/set_podman" + apply: + tags: ["init", "podman"] + tags: ["init", "podman"] + + - name: Set nftables + ansible.builtin.include_role: + name: "common" + tasks_from: "node/set_nftables" + apply: + tags: ["init", "nftables"] + tags: ["init", "nftables"] + + - name: Set crowdsec + ansible.builtin.include_role: + name: "common" + tasks_from: "services/set_crowdsec" + apply: + tags: ["site", "crowdsec"] + tags: ["site", "crowdsec"] + + - name: Set caddy + ansible.builtin.include_role: + name: "common" + tasks_from: "services/set_caddy" + apply: + tags: ["site", "caddy"] + tags: ["site", "caddy"] + + - name: Set authelia + ansible.builtin.include_role: + name: "auth" + tasks_from: "services/set_authelia" + apply: + tags: ["site", "authelia"] + tags: ["site", "authelia"] + + - name: Set alloy + ansible.builtin.include_role: + name: "common" + tasks_from: "services/set_alloy" + apply: + tags: ["site", "alloy"] + tags: ["site", "alloy"] diff --git a/ansible/playbooks/console/site.yaml b/ansible/playbooks/console/site.yaml new file mode 100644 index 0000000..8f8a5f3 --- /dev/null +++ b/ansible/playbooks/console/site.yaml @@ -0,0 +1,132 @@ +--- +- name: Load secret values + hosts: "console" + gather_facts: false + become: false + tasks: + - name: Load secret from secrets.yaml + ansible.builtin.include_role: + name: "console" + tasks_from: "node/load_secret_vars" + apply: + tags: ["always"] + tags: ["always"] + +- name: Site console + hosts: "console" + gather_facts: false + become: false + pre_tasks: + - name: Set become password + ansible.builtin.set_fact: + ansible_become_pass: "{{ hostvars['console']['sudo']['password']['console'] }}" + tags: ["always"] + + tasks: + # init + - name: Set timezone to Asia/Seoul + community.general.timezone: + name: Asia/Seoul + become: true + tags: ["init", "timezone"] + + - name: Deploy root_ca certificate + ansible.builtin.include_role: + name: "common" + tasks_from: "node/deploy_root_ca" + apply: + tags: ["init", "root_crt"] + tags: ["init", "root_crt"] + + - name: Deploy hosts file + ansible.builtin.include_role: + name: "common" + tasks_from: "node/deploy_hosts" + apply: + tags: ["init", "hosts"] + tags: ["init", "hosts"] + + - name: Create default directory + ansible.builtin.include_role: + name: "common" + tasks_from: "node/create_default_dir" + apply: + tags: ["init", "default_dir"] + tags: ["init", "default_dir"] + + - name: Update and upgrade apt + ansible.builtin.apt: + upgrade: "dist" + update_cache: true + cache_valid_time: 3600 + become: true + tags: ["init", "site", "upgrade-packages"] + + - name: Set ssh client + ansible.builtin.include_role: + name: "console" + tasks_from: "node/set_ssh_client" + apply: + tags: ["init", "ssh_client"] + tags: ["init", "ssh_client"] + + - name: Check file permissions + ansible.builtin.file: + path: "{{ node['workspace_path'] }}/{{ item }}" + state: "directory" + owner: "{{ ansible_user }}" + group: "svadmins" + mode: "u=rwX,g=,o=" + recurse: true + loop: + - "homelab/ansible" + - "homelab/config" + - "homelab/docs" + - "university" + tags: ["init", "site", "file_permission"] + # kopia snashot is mounted on homelab/data/volumes. + # NEVER CHANGE permission and owners + + - name: Download vm cloud-init + ansible.builtin.get_url: + url: "https://cdimage.debian.org/images/cloud/trixie/latest/debian-13-generic-amd64.qcow2" + dest: "{{ node['data_path'] }}/images/debian-13-generic-amd64.qcow2" + owner: "console" + group: "svadmins" + mode: "0600" + tags: ["init", "site", "cloud-init-image"] + + - name: Install packages + ansible.builtin.apt: + name: + - "git" + - "gnupg" + - "acl" + - "curl" + - "jq" + - "cloud-image-utils" + - "logrotate" + - "nftables" + - "build-essential" + - "g++" + - "gcc" + - "fuse3" + state: "present" + become: true + tags: ["init", "site", "install-packages"] + + - name: Install CLI tools + ansible.builtin.include_role: + name: "console" + tasks_from: "services/set_cli_tools" + apply: + tags: ["init", "site", "tools"] + tags: ["init", "site", "tools"] + + - name: Install chromium with font + ansible.builtin.include_role: + name: "console" + tasks_from: "services/set_chromium" + apply: + tags: ["init", "site", "chromium"] + tags: ["init", "site", "chromium"] diff --git a/ansible/playbooks/fw/site.yaml b/ansible/playbooks/fw/site.yaml new file mode 100644 index 0000000..38333f6 --- /dev/null +++ b/ansible/playbooks/fw/site.yaml @@ -0,0 +1,190 @@ +--- +- name: Load secret values + hosts: "console" + gather_facts: false + become: false + tasks: + - name: Load secret from secrets.yaml + ansible.builtin.include_role: + name: "console" + tasks_from: "node/load_secret_vars" + apply: + tags: ["always"] + tags: ["always"] + +- name: Site fw + hosts: "fw" + gather_facts: false + become: false + pre_tasks: + - name: Set become password + ansible.builtin.set_fact: + ansible_become_pass: "{{ hostvars['console']['sudo']['password']['fw'] }}" + tags: ["always"] + + tasks: + - name: Set timezone to Asia/Seoul + community.general.timezone: + name: Asia/Seoul + become: true + tags: ["init", "timezone"] + + - name: Deploy root_ca certificate + ansible.builtin.include_role: + name: "common" + tasks_from: "node/deploy_root_ca" + apply: + tags: ["init", "root_crt"] + tags: ["init", "root_crt"] + + - name: Deploy hosts file + ansible.builtin.include_role: + name: "common" + tasks_from: "node/deploy_hosts" + apply: + tags: ["init", "hosts"] + tags: ["init", "hosts"] + + - name: Create default directory + ansible.builtin.include_role: + name: "common" + tasks_from: "node/create_default_dir" + apply: + tags: ["init", "default_dir"] + tags: ["init", "default_dir"] + + - name: Set ssh host + ansible.builtin.include_role: + name: "common" + tasks_from: "node/set_ssh_host" + apply: + tags: ["init", "ssh_host"] + tags: ["init", "ssh_host"] + + - name: Set networkd + ansible.builtin.include_role: + name: "common" + tasks_from: "node/set_networkd" + apply: + tags: ["init", "networkd"] + tags: ["init", "networkd"] + + - name: Set wireguard + ansible.builtin.include_role: + name: "common" + tasks_from: "node/set_wireguard" + apply: + tags: ["init", "wireguard"] + tags: ["init", "wireguard"] + + - name: Set resolved + ansible.builtin.include_role: + name: "common" + tasks_from: "node/set_resolved" + apply: + tags: ["init", "resolved"] + tags: ["init", "resolved"] + + - name: Update and upgrade apt + ansible.builtin.apt: + upgrade: "dist" + update_cache: true + cache_valid_time: 3600 + become: true + tags: ["init", "site", "upgrade-packages"] + + - name: Install common packages + ansible.builtin.apt: + name: + - "acl" + - "curl" + - "jq" + - "wireguard-tools" + - "dnsutils" + - "conntrack" + - "logrotate" + - "netcat-openbsd" + - "dbus-user-session" + state: "present" + become: true + tags: ["init", "site", "install-packages"] + + - name: Set linger + ansible.builtin.include_role: + name: "common" + tasks_from: "node/set_linger" + apply: + tags: ["init", "linger"] + tags: ["init", "linger"] + + - name: Set chrony + ansible.builtin.include_role: + name: "fw" + tasks_from: "services/set_chrony" + apply: + tags: ["init", "chrony"] + tags: ["init", "chrony"] + + - name: Set ddns + ansible.builtin.include_role: + name: "fw" + tasks_from: "services/set_ddns" + apply: + tags: ["init", "ddns"] + tags: ["init", "ddns"] + + - name: Set nftables + ansible.builtin.include_role: + name: "common" + tasks_from: "node/set_nftables" + apply: + tags: ["init", "site", "nftables"] + tags: ["init", "site", "nftables"] + + - name: Set suricata + ansible.builtin.include_role: + name: "fw" + tasks_from: "services/set_suricata" + apply: + tags: ["site", "suricata"] + tags: ["site", "suricata"] + + - name: Set crowdsec + ansible.builtin.include_role: + name: "common" + tasks_from: "services/set_crowdsec" + apply: + tags: ["site", "crowdsec"] + tags: ["site", "crowdsec"] + + - name: Set bind + ansible.builtin.include_role: + name: "fw" + tasks_from: "services/set_bind" + apply: + tags: ["init", "update", "bind"] + tags: ["init", "update", "bind"] + + - name: Set blocky + ansible.builtin.include_role: + name: "fw" + tasks_from: "services/set_blocky" + apply: + tags: ["site", "blocky"] + tags: ["site", "blocky"] + + - name: Set kea + ansible.builtin.include_role: + name: "fw" + tasks_from: "services/set_kea" + apply: + tags: ["site", "kea"] + tags: ["site", "kea"] + + - name: Set alloy + ansible.builtin.include_role: + name: "common" + tasks_from: "services/set_alloy" + apply: + tags: ["site", "alloy"] + tags: ["site", "alloy"] diff --git a/ansible/playbooks/infra/site.yaml b/ansible/playbooks/infra/site.yaml new file mode 100644 index 0000000..b7895ba --- /dev/null +++ b/ansible/playbooks/infra/site.yaml @@ -0,0 +1,210 @@ +--- +- name: Load secret values + hosts: "console" + gather_facts: false + become: false + tasks: + - name: Load secret from secrets.yaml + ansible.builtin.include_role: + name: "console" + tasks_from: "node/load_secret_vars" + apply: + tags: ["always"] + tags: ["always"] + +- name: Site infra + hosts: infra + gather_facts: false + become: false + pre_tasks: + - name: Set become password + ansible.builtin.set_fact: + ansible_become_pass: "{{ hostvars['console']['sudo']['password']['infra'] }}" + tags: ["always"] + + tasks: + - name: Set timezone to Asia/Seoul + community.general.timezone: + name: Asia/Seoul + become: true + tags: ["init", "timezone"] + + - name: Deploy root_ca certificate + ansible.builtin.include_role: + name: "common" + tasks_from: "node/deploy_root_ca" + apply: + tags: ["init", "root_crt"] + tags: ["init", "root_crt"] + + - name: Deploy hosts file + ansible.builtin.include_role: + name: "common" + tasks_from: "node/deploy_hosts" + apply: + tags: ["init", "hosts"] + tags: ["init", "hosts"] + + - name: Create default directory + ansible.builtin.include_role: + name: "common" + tasks_from: "node/create_default_dir" + apply: + tags: ["init", "default_dir"] + tags: ["init", "default_dir"] + + - name: Set ssh host + ansible.builtin.include_role: + name: "common" + tasks_from: "node/set_ssh_host" + apply: + tags: ["init", "ssh_host"] + tags: ["init", "ssh_host"] + + - name: Set networkd + ansible.builtin.include_role: + name: "common" + tasks_from: "node/set_networkd" + apply: + tags: ["init", "networkd"] + tags: ["init", "networkd"] + + - name: Set resolved + ansible.builtin.include_role: + name: "common" + tasks_from: "node/set_resolved" + apply: + tags: ["init", "resolved"] + tags: ["init", "resolved"] + + - name: Update and upgrade apt + ansible.builtin.apt: + upgrade: "dist" + update_cache: true + cache_valid_time: 3600 + become: true + tags: ["init", "site", "upgrade-packages"] + + - name: Install common packages + ansible.builtin.apt: + name: + - "acl" + - "curl" + - "jq" + - "netcat-openbsd" + - "dbus-user-session" + state: "present" + become: true + tags: ["init", "site", "install-packages"] + + - name: Set linger + ansible.builtin.include_role: + name: "common" + tasks_from: "node/set_linger" + apply: + tags: ["init", "linger"] + tags: ["init", "linger"] + + - name: Set podman + ansible.builtin.include_role: + name: "common" + tasks_from: "services/set_podman" + apply: + tags: ["init", "podman"] + tags: ["init", "podman"] + + - name: Set nftables + ansible.builtin.include_role: + name: "common" + tasks_from: "node/set_nftables" + apply: + tags: ["init", "nftables"] + tags: ["init", "nftables"] + + - name: Set crowdsec + ansible.builtin.include_role: + name: "common" + tasks_from: "services/set_crowdsec" + apply: + tags: ["site", "crowdsec"] + tags: ["site", "crowdsec"] + + - name: Set ca + ansible.builtin.include_role: + name: "infra" + tasks_from: "services/set_ca_server" + apply: + tags: ["site", "ca"] + tags: ["site", "ca"] + + - name: Set postgresql + ansible.builtin.include_role: + name: "infra" + tasks_from: "services/set_postgresql" + apply: + tags: ["site", "postgresql"] + tags: ["site", "postgresql"] + + - name: Set caddy + ansible.builtin.include_role: + name: "common" + tasks_from: "services/set_caddy" + apply: + tags: ["site", "caddy"] + tags: ["site", "caddy"] + + - name: Set ldap + ansible.builtin.include_role: + name: "infra" + tasks_from: "services/set_ldap" + apply: + tags: ["site", "ldap"] + tags: ["site", "ldap"] + + - name: Set x509 exporter + ansible.builtin.include_role: + name: "infra" + tasks_from: "services/set_x509-exporter" + apply: + tags: ["site", "x509-exporter"] + tags: ["site", "x509-exporter"] + + - name: Set prometheus + ansible.builtin.include_role: + name: "infra" + tasks_from: "services/set_prometheus" + apply: + tags: ["site", "prometheus"] + tags: ["site", "prometheus"] + + - name: Set loki + ansible.builtin.include_role: + name: "infra" + tasks_from: "services/set_loki" + apply: + tags: ["site", "loki"] + tags: ["site", "loki"] + + - name: Set alloy + ansible.builtin.include_role: + name: "common" + tasks_from: "services/set_alloy" + apply: + tags: ["site", "alloy"] + tags: ["site", "alloy"] + + - name: Set grafana + ansible.builtin.include_role: + name: "infra" + tasks_from: "services/set_grafana" + apply: + tags: ["site", "grafana"] + tags: ["site", "grafana"] + + - name: Set kopia + ansible.builtin.include_role: + name: "common" + tasks_from: "services/set_kopia" + apply: + tags: ["site", "kopia"] + tags: ["site", "kopia"] diff --git a/ansible/playbooks/vmm/create_vm.yaml b/ansible/playbooks/vmm/create_vm.yaml new file mode 100644 index 0000000..28695eb --- /dev/null +++ b/ansible/playbooks/vmm/create_vm.yaml @@ -0,0 +1,61 @@ +--- +- name: Load secret values + hosts: "console" + gather_facts: false + become: false + tasks: + - name: Load secret from secrets.yaml + ansible.builtin.include_role: + name: "console" + tasks_from: "node/load_secret_vars" + apply: + tags: ["always"] + tags: ["always"] + +- name: Create vm + hosts: vmm_init + gather_facts: false + become: false + vars: + valid_vm_names: + - "fw" + - "infra" + - "auth" + - "app" + tasks: + - name: Set vm name depends on tags + ansible.builtin.set_fact: + target_vm: "{{ ansible_run_tags[0] }}" + when: (ansible_run_tags | length) == 1 + + - name: Check VM name + ansible.builtin.fail: + msg: "invalid vm name. vm name should be included in \"{{ valid_vm_names | join(', ') }}\"" + when: (target_vm | default("none")) not in valid_vm_names + + - name: Set become password + ansible.builtin.set_fact: + ansible_become_pass: "{{ hostvars['console']['sudo']['password']['vmm'] }}" + + - name: Create seed file + ansible.builtin.include_role: + name: "vmm" + tasks_from: "vm/create_seed" + apply: + delegate_to: "console" + tags: ["always"] + + - name: Deploy vm init files + ansible.builtin.include_role: + name: "vmm" + tasks_from: "vm/deploy_vm_init" + apply: + tags: ["always"] + + - name: Register vm + ansible.builtin.include_role: + name: "vmm" + tasks_from: "vm/register_vm" + apply: + tags: ["always"] + tags: ["always"] diff --git a/ansible/playbooks/vmm/site.yaml b/ansible/playbooks/vmm/site.yaml new file mode 100644 index 0000000..2963636 --- /dev/null +++ b/ansible/playbooks/vmm/site.yaml @@ -0,0 +1,165 @@ +--- +- name: Set host and load secret values + hosts: "console" + gather_facts: false + become: false + tasks: + - name: Set host as vmm + ansible.builtin.set_fact: + vmm_host: "vmm" + when: "'init' is not in ansible_run_tags" + tags: ["always"] + + - name: Load secret from secrets.yaml + ansible.builtin.include_role: + name: "console" + tasks_from: "node/load_secret_vars" + apply: + tags: ["always"] + tags: ["always"] + + +- name: Site vmm + hosts: "{{ hostvars['console']['vmm_host'] | default('vmm_init') }}" + gather_facts: false + become: false + pre_tasks: + - name: Set become password + ansible.builtin.set_fact: + ansible_become_pass: "{{ hostvars['console']['sudo']['password']['vmm'] }}" + tags: ["always"] + tasks: + # init + - name: Set timezone to Asia/Seoul + community.general.timezone: + name: Asia/Seoul + become: true + tags: ["init", "timezone"] + + - name: Deploy root_ca certificate + ansible.builtin.include_role: + name: "common" + tasks_from: "node/deploy_root_ca" + apply: + tags: ["init", "root_crt"] + tags: ["init", "root_crt"] + + - name: Deploy hosts file + ansible.builtin.include_role: + name: "common" + tasks_from: "node/deploy_hosts" + apply: + tags: ["init", "hosts"] + tags: ["init", "hosts"] + + - name: Create default directory + ansible.builtin.include_role: + name: "common" + tasks_from: "node/create_default_dir" + apply: + tags: ["init", "default_dir"] + tags: ["init", "default_dir"] + + - name: Set ssh host + ansible.builtin.include_role: + name: "common" + tasks_from: "node/set_ssh_host" + apply: + tags: ["init", "ssh_host"] + tags: ["init", "ssh_host"] + + - name: Set networkd + ansible.builtin.include_role: + name: "common" + tasks_from: "node/set_networkd" + apply: + tags: ["init", "networkd"] + tags: ["init", "networkd"] + + - name: Set resolved + ansible.builtin.include_role: + name: "common" + tasks_from: "node/set_resolved" + apply: + tags: ["init", "resolved"] + tags: ["init", "resolved"] + + - name: Set timesyncd + ansible.builtin.include_role: + name: "common" + tasks_from: "node/set_timesyncd" + apply: + tags: ["init", "timesyncd"] + tags: ["init", "timesyncd"] + + - name: Set linger # vmm has dbus-user-session in it + ansible.builtin.include_role: + name: "common" + tasks_from: "node/set_linger" + apply: + tags: ["init", "linger"] + tags: ["init", "linger"] + + - name: Set libvirt + ansible.builtin.include_role: + name: "vmm" + tasks_from: "node/set_libvirt" + apply: + tags: ["init", "libvirt"] + tags: ["init", "libvirt"] + + - name: Set nftables + ansible.builtin.include_role: + name: "common" + tasks_from: "node/set_nftables" + apply: + tags: ["init", "site", "nftables"] + tags: ["init", "site", "nftables"] + + - name: Update and upgrade apt # init roles has no internet (airgap statement) + ansible.builtin.apt: + update_cache: true + upgrade: "dist" + cache_valid_time: 3600 + when: inventory_hostname != "vmm_init" + become: true + tags: ["site", "upgrade-packages"] + + - name: Set crowdsec + ansible.builtin.include_role: + name: "common" + tasks_from: "services/set_crowdsec" + apply: + tags: ["site", "crowdsec"] + when: inventory_hostname != "vmm_init" + tags: ["site", "crowdsec"] + + - name: Set alloy + ansible.builtin.include_role: + name: "common" + tasks_from: "services/set_alloy" + apply: + tags: ["site", "alloy"] + when: inventory_hostname != "vmm_init" + tags: ["site", "alloy"] + + - name: Install packages # they are already installed in prerequisite step + ansible.builtin.apt: + name: + - acl + - curl + - jq + - crowdsec + - systemd-resolved + - qemu-system-x86 + - ksmtuned + - libvirt-daemon-system + - virt-top + - python3 + - python3-apt + - python3-libvirt + - python3-lxml + state: "present" + become: true + when: "'init' is not in ansible_run_tags" + tags: ["never", "install-packages"] diff --git a/ansible/roles/app/tasks/node/set_raid.yaml b/ansible/roles/app/tasks/node/set_raid.yaml new file mode 100644 index 0000000..e10992f --- /dev/null +++ b/ansible/roles/app/tasks/node/set_raid.yaml @@ -0,0 +1,70 @@ +--- +- name: Check btrfs installation + ansible.builtin.shell: | + command -v btrfs + become: true # btrfs is located in /usr/sbin, which means root permission is needed. + changed_when: false + failed_when: false + register: "is_btrfs_installed" + ignore_errors: true + +- name: Install btrfs + ansible.builtin.apt: + name: "btrfs-progs" + state: "present" + become: true + when: is_btrfs_installed.rc != 0 + +- name: Set hard disk path + ansible.builtin.shell: | + set -o pipefail + ls -1 /dev/disk/by-path/*{{ vm['pass_through']['sata_controller']['address'] }}* | \ + grep -v '\.0$' | \ + sort + changed_when: false + register: "hdd_path_list" + +- name: Check app_hdd filesystem already exists + ansible.builtin.command: | + blkid -L {{ storage['btrfs']['label'] }} + register: is_app_data + changed_when: false + failed_when: false + become: true + +- name: Check disk number + ansible.builtin.fail: + msg: "Below 4 disks for RAID10, found {{ hdd_path_list.stdout_lines | length }}" + when: (hdd_path_list.stdout_lines | length) < 4 + +- name: Set btrfs raid10 volume + ansible.builtin.shell: | + mkfs.btrfs -f \ + -L {{ storage['btrfs']['label'] }} \ + -d {{ storage['btrfs']['level'] }} \ + -m {{ storage['btrfs']['level'] }} \ + {{ hdd_path_list.stdout_lines | join(' ') }} + become: true + when: + - is_app_data.rc != 0 + - (hdd_path_list.stdout_lines | length) >= 4 + changed_when: is_mkfs.rc == 0 + register: "is_mkfs" + +- name: Mount btrfs raid10 volume + ansible.posix.mount: + path: "{{ storage['btrfs']['mount_point'] }}" + src: "LABEL={{ storage['btrfs']['label'] }}" + state: "mounted" + fstype: "btrfs" + opts: "defaults,noatime,compress=zstd:3,autodefrag,degraded,nofail" + become: true + +- name: Set hard disk path permissions + ansible.builtin.file: + path: "{{ storage['btrfs']['mount_point'] }}" + state: "directory" + owner: "{{ ansible_user }}" + group: "svadmins" + mode: "0770" + become: true diff --git a/ansible/roles/auth/handlers/main.yaml b/ansible/roles/auth/handlers/main.yaml new file mode 100644 index 0000000..b3c34a8 --- /dev/null +++ b/ansible/roles/auth/handlers/main.yaml @@ -0,0 +1,11 @@ +--- +- name: Restart authelia + ansible.builtin.systemd: + name: "authelia.service" + state: "restarted" + enabled: true + scope: "user" + daemon_reload: true + changed_when: false + listen: "notification_restart_authelia" + ignore_errors: true # noqa: ignore-errors diff --git a/ansible/roles/auth/tasks/services/set_authelia.yaml b/ansible/roles/auth/tasks/services/set_authelia.yaml new file mode 100644 index 0000000..adbdcf6 --- /dev/null +++ b/ansible/roles/auth/tasks/services/set_authelia.yaml @@ -0,0 +1,78 @@ +--- +- name: Create authelia directory + ansible.builtin.file: + path: "{{ node['home_path'] }}/containers/{{ item }}" + state: "directory" + owner: "{{ ansible_user }}" + group: "svadmins" + mode: "0770" + loop: + - "authelia" + - "authelia/config" + - "authelia/certs" + become: true + +- name: Deploy authelia configuration file + ansible.builtin.template: + src: "{{ hostvars['console']['node']['config_path'] }}/services/containers/auth/authelia/config/authelia.yaml.j2" + dest: "{{ node['home_path'] }}/containers/authelia/config/authelia.yaml" + owner: "{{ ansible_user }}" + group: "svadmins" + mode: "0600" + become: true + notify: "notification_restart_authelia" + no_log: true + +- name: Deploy certificates + ansible.builtin.copy: + content: | + {{ hostvars['console']['ca']['root']['crt'] }} + dest: "{{ node['home_path'] }}/containers/authelia/certs/ilnmors_root_ca.crt" + owner: "{{ ansible_user }}" + group: "svadmins" + mode: "0440" + become: true + no_log: true + +- name: Register secret value to podman secret + containers.podman.podman_secret: + name: "{{ item.name }}" + data: "{{ item.value }}" + state: "present" + force: true + loop: + - name: "AUTHELIA_JWT_SECRET" + value: "{{ hostvars['console']['authelia']['jwt_secret'] }}" + - name: "AUTHELIA_SESSION_SECRET" + value: "{{ hostvars['console']['authelia']['session_secret'] }}" + - name: "AUTHELIA_STORAGE_SECRET" + value: "{{ hostvars['console']['authelia']['storage_secret'] }}" + - name: "AUTHELIA_HMAC_SECRET" + value: "{{ hostvars['console']['authelia']['hmac_secret'] }}" + - name: "AUTHELIA_JWKS_RS256" + value: "{{ hostvars['console']['authelia']['jwk_rs256'] }}" + - name: "AUTHELIA_JWKS_ES256" + value: "{{ hostvars['console']['authelia']['jwk_es256'] }}" + - name: "AUTHELIA_LDAP_PASSWORD" + value: "{{ hostvars['console']['ldap']['password']['authelia'] }}" + - name: "POSTGRES_AUTHELIA_PASSWORD" + value: "{{ hostvars['console']['postgresql']['password']['authelia'] }}" + notify: "notification_restart_authelia" + no_log: true + +- name: Deploy container file + ansible.builtin.template: + src: "{{ hostvars['console']['node']['config_path'] }}/services/containers/auth/authelia/authelia.container.j2" + dest: "{{ node['home_path'] }}/.config/containers/systemd/authelia.container" + owner: "{{ ansible_user }}" + group: "svadmins" + mode: "0644" + notify: "notification_restart_authelia" + +- name: Enable authelia.service + ansible.builtin.systemd: + name: "authelia.service" + state: "started" + enabled: true + daemon_reload: true + scope: "user" diff --git a/ansible/roles/common/handlers/main.yaml b/ansible/roles/common/handlers/main.yaml new file mode 100644 index 0000000..d9a65a1 --- /dev/null +++ b/ansible/roles/common/handlers/main.yaml @@ -0,0 +1,101 @@ +--- +- name: Restart ca certificate + ansible.builtin.command: | + update-ca-certificates + become: true + changed_when: false + listen: "notification_update_ca" + ignore_errors: true # noqa: ignore-errors + +- name: Restart sshd + ansible.builtin.systemd: + name: "sshd.service" + state: "restarted" + enabled: true + become: true + changed_when: false + listen: "notification_restart_sshd" + ignore_errors: true # noqa: ignore-errors + +- name: Reload systemd-networkd + ansible.builtin.systemd: + name: "systemd-networkd.service" + state: "reloaded" + enabled: true + become: true + changed_when: false + listen: "notification_reload_networkctl" + ignore_errors: true # noqa: ignore-errors + +- name: Reload systemd-resolved.service + ansible.builtin.systemd: + name: "systemd-resolved.service" + state: "reloaded" + enabled: true + become: true + changed_when: false + listen: "notification_reload_resolved" + ignore_errors: true # noqa: ignore-errors + +- name: Restart systemd-timesyncd + ansible.builtin.systemd: + name: "systemd-timesyncd.service" + state: "restarted" + enabled: true + become: true + changed_when: false + listen: "notification_restart_timesyncd" + ignore_errors: true # noqa: ignore-errors + +- name: Update nftables + ansible.builtin.command: | + nft -f /etc/nftables.conf + become: true + changed_when: false + listen: "notification_update_nftables" + ignore_errors: true # noqa: ignore-errors + +- name: Restart crowdsec + ansible.builtin.systemd: + name: "crowdsec.service" + state: "restarted" + enabled: true + daemon_reload: true + become: true + changed_when: false + listen: "notification_restart_crowdsec" + ignore_errors: true # noqa: ignore-errors + +- name: Restart crowdsec bouncer + ansible.builtin.systemd: + name: "crowdsec-firewall-bouncer.service" + state: "restarted" + enabled: true + daemon_reload: true + become: true + when: node['name'] == 'fw' + changed_when: false + listen: "notification_restart_crowdsec_bouncer" + ignore_errors: true # noqa: ignore-errors + +- name: Restart caddy + ansible.builtin.systemd: + name: "caddy.service" + state: "restarted" + enabled: true + scope: "user" + daemon_reload: true + changed_when: false + listen: "notification_restart_caddy" + ignore_errors: true # noqa: ignore-errors + +- name: Restart alloy + ansible.builtin.systemd: + name: "alloy.service" + state: "restarted" + enabled: true + daemon_reload: true + become: true + changed_when: false + listen: "notification_restart_alloy" + ignore_errors: true # noqa: ignore-errors diff --git a/ansible/roles/common/tasks/node/create_default_dir.yaml b/ansible/roles/common/tasks/node/create_default_dir.yaml new file mode 100644 index 0000000..467fc38 --- /dev/null +++ b/ansible/roles/common/tasks/node/create_default_dir.yaml @@ -0,0 +1,34 @@ +--- +- name: Create common secret directory + ansible.builtin.file: + path: "/etc/secrets" + state: "directory" + owner: "root" + group: "root" + mode: "0711" + become: true + +- name: Create user secret directory + ansible.builtin.file: + path: "/etc/secrets/{{ node['uid'] }}" + state: "directory" + owner: "{{ ansible_user }}" + group: "root" + mode: "0500" + become: true + +- name: Create user systemd directory + ansible.builtin.file: + path: "{{ node['home_path'] }}/.config/systemd/user" + state: "directory" + owner: "{{ ansible_user }}" + group: "svadmins" + mode: "0700" + +- name: Create quadlet directory + ansible.builtin.file: + path: "{{ node['home_path'] }}/.config/containers/systemd" + state: "directory" + owner: "{{ ansible_user }}" + group: "svadmins" + mode: "0700" diff --git a/ansible/roles/common/tasks/node/deploy_hosts.yaml b/ansible/roles/common/tasks/node/deploy_hosts.yaml new file mode 100644 index 0000000..132a6a0 --- /dev/null +++ b/ansible/roles/common/tasks/node/deploy_hosts.yaml @@ -0,0 +1,9 @@ +--- +- name: Deploy /etc/hosts + ansible.builtin.template: + src: "{{ hostvars['console']['node']['config_path'] }}/node/common/hosts.j2" + dest: "/etc/hosts" + owner: "root" + group: "root" + mode: "0644" + become: true diff --git a/ansible/roles/common/tasks/node/deploy_root_ca.yaml b/ansible/roles/common/tasks/node/deploy_root_ca.yaml new file mode 100644 index 0000000..87e793d --- /dev/null +++ b/ansible/roles/common/tasks/node/deploy_root_ca.yaml @@ -0,0 +1,10 @@ +--- +- name: Deploy root_ca.crt + ansible.builtin.copy: + content: "{{ hostvars['console']['ca']['root']['crt'] }}" + dest: "/usr/local/share/ca-certificates/ilnmors_root_ca.crt" + owner: "root" + group: "root" + mode: "0644" + become: true + notify: "notification_update_ca" diff --git a/ansible/roles/common/tasks/node/set_linger.yaml b/ansible/roles/common/tasks/node/set_linger.yaml new file mode 100644 index 0000000..1c2a6aa --- /dev/null +++ b/ansible/roles/common/tasks/node/set_linger.yaml @@ -0,0 +1,20 @@ +--- +- name: Checking linger + ansible.builtin.stat: + path: "/var/lib/systemd/linger/{{ ansible_user }}" + register: "is_linger_file" + +- name: Activate linger + when: not is_linger_file.stat.exists + block: + - name: Enable linger + ansible.builtin.command: | + loginctl enable-linger {{ ansible_user }} + become: true + changed_when: true + + - name: Reboot system to ensure DBUS socket activation + ansible.builtin.reboot: + reboot_timeout: 300 + post_reboot_delay: 3 + become: true diff --git a/ansible/roles/common/tasks/node/set_networkd.yaml b/ansible/roles/common/tasks/node/set_networkd.yaml new file mode 100644 index 0000000..beaf97f --- /dev/null +++ b/ansible/roles/common/tasks/node/set_networkd.yaml @@ -0,0 +1,23 @@ +--- +- name: Set network files directory + ansible.builtin.set_fact: + directory_name: "{{ node['name'] }}" + when: node['name'] in ["vmm", "fw"] + +- name: Set target vm + ansible.builtin.set_fact: + target_vm: "{{ node['name'] }}" + +- name: Deploy networkd files + ansible.builtin.template: + src: "{{ item }}" + dest: "/etc/systemd/network/{{ item | basename }}" + owner: "root" + group: "systemd-network" + mode: "0640" + loop: "{{ query('fileglob', hostvars['console']['node']['config_path'] + '/node/' + (directory_name | default('common')) + '/networkd/*') | sort }}" + become: true + notify: + - "notification_reload_networkctl" + - "notification_restart_crowdsec" + no_log: true diff --git a/ansible/roles/common/tasks/node/set_nftables.yaml b/ansible/roles/common/tasks/node/set_nftables.yaml new file mode 100644 index 0000000..a267231 --- /dev/null +++ b/ansible/roles/common/tasks/node/set_nftables.yaml @@ -0,0 +1,36 @@ +--- +- name: Check nftables installation + ansible.builtin.shell: | + command -v nft + become: true # nftables is located in /usr/sbin, which means root permission is needed. + changed_when: false + failed_when: false + register: "is_nftables_installed" + ignore_errors: true + +- name: Install nftables + ansible.builtin.apt: + name: "nftables" + state: "present" + become: true + when: is_nftables_installed.rc != 0 + +- name: Enable nftables.service + ansible.builtin.systemd: + name: "nftables.service" + state: "started" + enabled: true + become: true + +- name: Deploy nftables.conf + ansible.builtin.template: + src: "{{ hostvars['console']['node']['config_path'] }}/node/{{ node['name'] }}/nftables.conf.j2" + dest: "/etc/nftables.conf" + owner: "root" + group: "root" + mode: "0700" + validate: "/usr/sbin/nft -c -f %s" + become: true + notify: + - "notification_update_nftables" + - "notification_restart_crowdsec_bouncer" diff --git a/ansible/roles/common/tasks/node/set_resolved.yaml b/ansible/roles/common/tasks/node/set_resolved.yaml new file mode 100644 index 0000000..d403839 --- /dev/null +++ b/ansible/roles/common/tasks/node/set_resolved.yaml @@ -0,0 +1,39 @@ +--- +- name: Enable systemd-resolved.service + ansible.builtin.systemd: + name: "systemd-resolved.service" + state: "started" + enabled: true + become: true + +- name: Check global.conf + ansible.builtin.stat: + path: "/etc/systemd/resolved.conf.d/global.conf" + register: "is_global_conf" + +- name: Create resolved directory + ansible.builtin.file: + path: "/etc/systemd/resolved.conf.d" + state: "directory" + owner: "root" + group: "root" + mode: "0755" + become: true + +- name: Deploy global conf file + ansible.builtin.template: + src: "{{ hostvars['console']['node']['config_path'] }}/node/common/resolved/global.conf.j2" + dest: "/etc/systemd/resolved.conf.d/global.conf" + owner: "root" + group: "systemd-resolve" + mode: "0640" + become: true + notify: "notification_reload_resolved" + +- name: Restart systemd-resolved.service when it is initiated + ansible.builtin.systemd: + name: "systemd-resolved.service" + state: "restarted" + enabled: true + become: true + when: not is_global_conf.stat.exists diff --git a/ansible/roles/common/tasks/node/set_ssh_host.yaml b/ansible/roles/common/tasks/node/set_ssh_host.yaml new file mode 100644 index 0000000..8be6529 --- /dev/null +++ b/ansible/roles/common/tasks/node/set_ssh_host.yaml @@ -0,0 +1,119 @@ +--- +- name: Deploy /etc/ssh/local_ssh_ca.pub + ansible.builtin.copy: + content: | + {{ hostvars['console']['ssh']['ca']['pub'] }} + dest: "/etc/ssh/local_ssh_ca.pub" + owner: "root" + group: "root" + mode: "0644" + become: true + no_log: true + +- name: Check ssh_host_key-cert.pub + ansible.builtin.stat: + path: "/etc/ssh/ssh_host_ed25519_key-cert.pub" + register: "is_signed_ca_key" + +- name: Get current ssh_host_key-cert.pub Key ID + ansible.builtin.shell: | + set -o pipefail + ssh-keygen -L -f /etc/ssh/ssh_host_ed25519_key-cert.pub | \ + grep "Key ID" | \ + sed -E 's/.*Key ID: "(.*)"/\1/' + when: is_signed_ca_key.stat.exists + changed_when: false + register: "current_key_id" + no_log: true + +- name: Get current ssh_host_key-cert.pub san + ansible.builtin.shell: | + set -o pipefail + ssh-keygen -L -f /etc/ssh/ssh_host_ed25519_key-cert.pub | \ + sed -n '/Principals:/,/Critical Options:/p' | \ + sed '1d;$d' | \ + sed 's/^[[:space:]]*//' + when: is_signed_ca_key.stat.exists + changed_when: false + register: "current_san_id" + no_log: true + +- name: Set current key informations + ansible.builtin.set_fact: + current_id_key: "{{ current_key_id.stdout }}" + current_san_list: "{{ current_san_id.stdout_lines }}" + when: is_signed_ca_key.stat.exists + no_log: true + +- name: Compare key values between current information and defined information + ansible.builtin.set_fact: + is_certificate_info_different: true + when: (current_id_key | default("")) != node['name'] or (current_san_list | default([])) != (node['ssh_san'].split(',') | map('trim') | list) + +- name: Get SSH CA and signing + when: not is_signed_ca_key.stat.exists or (is_certificate_info_different | default(false)) + block: + - name: Get ssh_host_key.pub from remote server + ansible.builtin.fetch: + src: "/etc/ssh/ssh_host_ed25519_key.pub" + dest: "/run/user/{{ hostvars['console']['node']['uid'] }}/{{ node['name'] }}_ssh_host_ed25519_key.pub" + flat: true + become: true + + - name: Get SSH CA + delegate_to: "console" + ansible.builtin.copy: + content: | + {{ hostvars['console']['ssh']['ca']['key'] }} + dest: "/run/user/{{ hostvars['console']['node']['uid'] }}/local_ssh_ca_private_key" + owner: "console" + group: "svadmins" + mode: "0400" + no_log: true + + - name: Sign on ssh host keys (pub file) + delegate_to: "console" + ansible.builtin.command: | + ssh-keygen -s /run/user/{{ hostvars['console']['node']['uid'] }}/local_ssh_ca_private_key \ + -h \ + -I "{{ node['name'] }}" \ + -n "{{ node['ssh_san'] }}" \ + /run/user/{{ hostvars['console']['node']['uid'] }}/{{ node['name'] }}_ssh_host_ed25519_key.pub + changed_when: not is_signed_ca_key.stat.exists or (is_certificate_info_different | default(false)) + no_log: true + + - name: Deploy signed pub file + ansible.builtin.copy: + src: "/run/user/{{ hostvars['console']['node']['uid'] }}/{{ node['name'] }}_ssh_host_ed25519_key-cert.pub" + dest: "/etc/ssh/ssh_host_ed25519_key-cert.pub" + owner: "root" + group: "root" + mode: "0644" + become: true + notify: "notification_restart_sshd" + + always: + - name: Clean temporary files + delegate_to: "console" + ansible.builtin.file: + path: "/run/user/{{ hostvars['console']['node']['uid'] }}/{{ item }}" + state: "absent" + loop: + - "{{ node['name'] }}_ssh_host_ed25519_key.pub" + - "{{ node['name'] }}_ssh_host_ed25519_key-cert.pub" + - "local_ssh_ca_private_key" + no_log: true + +- name: Set sshd_config.d files + ansible.builtin.copy: + src: "{{ hostvars['console']['node']['config_path'] }}/node/common/ssh/{{ item }}" + dest: "/etc/ssh/sshd_config.d/{{ item }}" + owner: "root" + group: "root" + mode: "0644" + loop: + - "prohibit_root.conf" + - "ssh_ca.conf" + - "host_certificate.conf" + become: true + notify: "notification_restart_sshd" diff --git a/ansible/roles/common/tasks/node/set_timesyncd.yaml b/ansible/roles/common/tasks/node/set_timesyncd.yaml new file mode 100644 index 0000000..6b78325 --- /dev/null +++ b/ansible/roles/common/tasks/node/set_timesyncd.yaml @@ -0,0 +1,20 @@ +--- +- name: Create timesyncd.conf.d + ansible.builtin.file: + path: "/etc/systemd/timesyncd.conf.d" + state: "directory" + owner: "root" + group: "root" + mode: "0755" + become: true + +- name: Deploy timesyncd.conf.d/local-ntp.conf + ansible.builtin.copy: + src: "{{ hostvars['console']['node']['config_path'] }}/node/common/timesyncd/local-ntp.conf" + dest: "/etc/systemd/timesyncd.conf.d/local-ntp.conf" + owner: "root" + group: "systemd-timesync" + mode: "0640" + become: true + notify: "notification_restart_timesyncd" + no_log: true diff --git a/ansible/roles/common/tasks/node/set_wireguard.yaml b/ansible/roles/common/tasks/node/set_wireguard.yaml new file mode 100644 index 0000000..f3a7b28 --- /dev/null +++ b/ansible/roles/common/tasks/node/set_wireguard.yaml @@ -0,0 +1,15 @@ +--- +- name: Create wg0 files + ansible.builtin.template: + src: "{{ hostvars['console']['node']['config_path'] }}/node/fw/wireguard/{{ item }}" + dest: "/etc/systemd/network/{{ item }}" + owner: "root" + group: "systemd-network" + mode: "0640" + loop: + - "30-fw-wg0.netdev" + - "31-fw-wg0.network" + become: true + when: node['name'] == 'fw' + notify: "notification_reload_networkctl" + no_log: true diff --git a/ansible/roles/common/tasks/services/set_alloy.yaml b/ansible/roles/common/tasks/services/set_alloy.yaml new file mode 100644 index 0000000..bf6fd54 --- /dev/null +++ b/ansible/roles/common/tasks/services/set_alloy.yaml @@ -0,0 +1,73 @@ +--- +- name: Gather system facts (hardware) + ansible.builtin.setup: + gather_subset: + - hardware + become: true + +- name: Deploy alloy deb file (x86_64) + ansible.builtin.copy: + src: "{{ hostvars['console']['node']['data_path'] }}/bin/alloy-{{ version['packages']['alloy'] }}-amd64.deb" + dest: "/var/cache/apt/archives/alloy-{{ version['packages']['alloy'] }}.deb" + owner: "root" + group: "root" + mode: "0644" + become: true + when: ansible_facts['architecture'] == "x86_64" + +- name: Deploy alloy deb file (aarch64) + ansible.builtin.copy: + src: "{{ hostvars['console']['node']['data_path'] }}/bin/alloy-{{ version['packages']['alloy'] }}-arm64.deb" + dest: "/var/cache/apt/archives/alloy-{{ version['packages']['alloy'] }}.deb" + owner: "root" + group: "root" + mode: "0644" + become: true + when: ansible_facts['architecture'] == "aarch64" + +- name: Install alloy + ansible.builtin.apt: + deb: "/var/cache/apt/archives/alloy-{{ version['packages']['alloy'] }}.deb" + state: "present" + become: true + +- name: Deploy alloy config + ansible.builtin.template: + src: "{{ hostvars['console']['node']['config_path'] }}/services/systemd/common/alloy/config.alloy.j2" + dest: "/etc/alloy/config.alloy" + owner: "root" + group: "root" + mode: "0644" + become: true + notify: "notification_restart_alloy" + no_log: true + +- name: Create alloy.service.d + ansible.builtin.file: + path: "/etc/systemd/system/alloy.service.d" + state: "directory" + owner: "root" + group: "root" + mode: "0755" + become: true + +- name: Set alloy.service.d/override.conf + ansible.builtin.copy: + dest: "/etc/systemd/system/alloy.service.d/override.conf" + content: | + [Service] + Restart=always + RestartSec=60 + owner: "root" + group: "root" + mode: "0644" + become: true + notify: "notification_restart_alloy" + +- name: Enable alloy service + ansible.builtin.systemd: + name: "alloy.service" + state: "started" + enabled: true + daemon_reload: true + become: true diff --git a/ansible/roles/common/tasks/services/set_caddy.yaml b/ansible/roles/common/tasks/services/set_caddy.yaml new file mode 100644 index 0000000..75b8c44 --- /dev/null +++ b/ansible/roles/common/tasks/services/set_caddy.yaml @@ -0,0 +1,99 @@ +--- +# infra, auth, app (vmm, fw has no podman in it) +- name: Create caddy directory + ansible.builtin.file: + path: "{{ node['home_path'] }}/containers/{{ item }}" + owner: "{{ ansible_user }}" + group: "svadmins" + state: "directory" + mode: "0770" + loop: + - "caddy" + - "caddy/etc" + - "caddy/data" + - "caddy/build" + become: true + +- name: Create caddy log directory for auth + ansible.builtin.file: + path: /var/log/caddy + owner: "{{ ansible_user }}" + group: "svadmins" + state: "directory" + mode: "0755" + become: true + when: node['name'] == "auth" + +- name: Register acme key to podman secret + containers.podman.podman_secret: + name: "CADDY_ACME_KEY" + data: "{{ hostvars['console']['ca']['acme_key'] }}" + state: "present" + force: true + notify: "notification_restart_caddy" + no_log: true + +- name: Register crowdsec bouncer key to podman secret + containers.podman.podman_secret: + name: "CADDY_CROWDSEC_KEY" + data: "{{ hostvars['console']['crowdsec']['bouncer']['caddy'] }}" + state: "present" + force: true + when: node['name'] == "auth" + notify: "notification_restart_caddy" + no_log: true + +- name: Deploy containerfile for build + ansible.builtin.template: + src: "{{ hostvars['console']['node']['config_path'] }}/services/containers/common/caddy/build/caddy.containerfile.j2" + dest: "{{ node['home_path'] }}/containers/caddy/build/Containerfile" + owner: "{{ ansible_user }}" + group: "svadmins" + mode: "0640" + +- name: Deploy root crt for build + ansible.builtin.copy: + content: "{{ hostvars['console']['ca']['root']['crt'] }}" + dest: "{{ node['home_path'] }}/containers/caddy/build/ilnmors_root_ca.crt" + owner: "{{ ansible_user }}" + group: "svadmins" + mode: "0640" + no_log: true + +- name: Build caddy container image + containers.podman.podman_image: + name: "ilnmors.internal/{{ node['name'] }}/caddy" + # check tags from container file + tag: "{{ version['containers']['caddy'] }}" + state: "build" + path: "{{ node['home_path'] }}/containers/caddy/build" + +- name: Prune caddy dangling images + containers.podman.podman_prune: + image: true + +- name: Deploy caddyfile + ansible.builtin.template: + src: "{{ hostvars['console']['node']['config_path'] }}/services/containers/common/caddy/etc/{{ node['name'] }}/Caddyfile.j2" + dest: "{{ node['home_path'] }}/containers/caddy/etc/Caddyfile" + owner: "{{ ansible_user }}" + group: "svadmins" + mode: "0600" + notify: "notification_restart_caddy" + +- name: Deploy container file + ansible.builtin.template: + src: "{{ hostvars['console']['node']['config_path'] }}/services/containers/common/caddy/caddy.container.j2" + dest: "{{ node['home_path'] }}/.config/containers/systemd/caddy.container" + owner: "{{ ansible_user }}" + group: "svadmins" + mode: "0644" + notify: "notification_restart_caddy" + +- name: Enable caddy + ansible.builtin.systemd: + name: "caddy.service" + state: "started" + enabled: true + daemon_reload: true + scope: "user" diff --git a/ansible/roles/common/tasks/services/set_crowdsec.yaml b/ansible/roles/common/tasks/services/set_crowdsec.yaml new file mode 100644 index 0000000..04e6720 --- /dev/null +++ b/ansible/roles/common/tasks/services/set_crowdsec.yaml @@ -0,0 +1,304 @@ +--- +- name: Check crowdsec installed + ansible.builtin.shell: | + command -v crowdsec + changed_when: false + failed_when: false + register: "is_crowdsec_installed" + ignore_errors: true + +- name: Check crowdsec bouncer installed + ansible.builtin.shell: | + command -v crowdsec-firewall-bouncer + when: node['name'] == "fw" + changed_when: false + failed_when: false + register: "is_crowdsec_bouncer_installed" + ignore_errors: true + +- name: Install crowdsec + ansible.builtin.apt: + name: "crowdsec" + state: "present" + become: true + when: is_crowdsec_installed.rc != 0 + +- name: Install crowdsec bouncers + ansible.builtin.apt: + name: "crowdsec-firewall-bouncer" + state: "present" + become: true + when: + - node['name'] == "fw" + - is_crowdsec_bouncer_installed.rc != 0 + +- name: Set acquis.d list for bouncer + ansible.builtin.set_fact: + acquisd_list: + fw: + collection: "crowdsecurity/suricata" + config: "suricata.yaml" + auth: + collection: "crowdsecurity/caddy" + config: "caddy.yaml" + +- name: Deploy crowdsec-update service files + ansible.builtin.copy: + src: "{{ hostvars['console']['node']['config_path'] }}/services/systemd/common/crowdsec/{{ item }}" + dest: "/etc/systemd/system/{{ item }}" + owner: "root" + group: "root" + mode: "0644" + validate: "/usr/bin/systemd-analyze verify %s" + loop: + - "crowdsec-update.service" + - "crowdsec-update.timer" + become: true + +- name: Deploy crowdsec config.yaml + ansible.builtin.template: + src: "{{ hostvars['console']['node']['config_path'] }}/services/systemd/common/crowdsec/etc/config.yaml.j2" + dest: "/etc/crowdsec/config.yaml" + owner: "root" + group: "root" + mode: "0644" + become: true + notify: "notification_restart_crowdsec" + no_log: true + +- name: Deploy crowdsec local_api_credentials.yaml + ansible.builtin.template: + src: "{{ hostvars['console']['node']['config_path'] }}/services/systemd/common/crowdsec/etc/local_api_credentials.yaml.j2" + dest: "/etc/crowdsec/local_api_credentials.yaml" + owner: "root" + group: "root" + mode: "0600" + become: true + notify: "notification_restart_crowdsec" + no_log: true + +- name: Set Crowdsec LAPI configuration + when: node['name'] == "fw" + block: + - name: Create crowdsec ssl directory + ansible.builtin.file: + path: "/etc/crowdsec/ssl" + state: "directory" + owner: "root" + group: "root" + mode: "0700" + become: true + + - name: Deploy crowdsec lapi ssl certificate + ansible.builtin.copy: + content: | + {{ hostvars['console']['crowdsec']['crt'] | trim }} + {{ hostvars['console']['ca']['intermediate']['crt'] }} + dest: "/etc/crowdsec/ssl/crowdsec.crt" + owner: "root" + group: "root" + mode: "0644" + become: true + notify: "notification_restart_crowdsec" + no_log: true + + - name: Deploy crowdsec lapi ssl key + ansible.builtin.copy: + content: | + {{ hostvars['console']['crowdsec']['key'] }} + dest: "/etc/crowdsec/ssl/crowdsec.key" + owner: "root" + group: "root" + mode: "0400" + become: true + notify: "notification_restart_crowdsec" + no_log: true + + - name: Get existing machines list + ansible.builtin.command: + cmd: "cscli machines list -o json" + become: true + changed_when: false + register: "existing_crowdsec_machines_list" + + - name: Set existing machines' name + ansible.builtin.set_fact: + existing_machines_name: "{{ existing_crowdsec_machines_list.stdout | from_json | map(attribute='machineId') | list }}" + + - name: Set goal machines' name + ansible.builtin.set_fact: + machines_name: ["fw", "vmm", "infra", "auth", "app"] + no_log: true + + - name: Prune unknown (random) machines + ansible.builtin.command: + cmd: "cscli machines delete {{ item }}" + loop: "{{ existing_machines_name | difference(machines_name) }}" + become: true + changed_when: true + + - name: Register crowdsec machines to LAPI server + ansible.builtin.command: + cmd: "cscli machines add {{ item }} --password {{ hostvars['console']['crowdsec']['machine'][item] }} --force -f /dev/null" + loop: "{{ machines_name }}" + become: true + changed_when: false + no_log: true + + - name: Get existing bouncers list + ansible.builtin.command: + cmd: "cscli bouncers list -o json" + become: true + register: "existing_crowdsec_bouncers_list" + changed_when: false + + - name: Set existing bouncers' name + ansible.builtin.set_fact: + existing_bouncers_name: "{{ existing_crowdsec_bouncers_list.stdout | from_json | map(attribute='name') | list }}" + + - name: Flush bouncers + ansible.builtin.command: + cmd: "cscli bouncers delete {{ item }}" + loop: "{{ existing_bouncers_name }}" + become: true + changed_when: true + + - name: Set bouncers' name + ansible.builtin.set_fact: + bouncers_name: ["fw", "caddy"] + + - name: Register Firewall Bouncer to LAPI + ansible.builtin.command: + cmd: "cscli bouncers add {{ item }}-bouncer -k {{ hostvars['console']['crowdsec']['bouncer'][item] }}" + loop: "{{ bouncers_name }}" + become: true + changed_when: true + notify: "notification_restart_crowdsec_bouncer" + no_log: true + +- name: Set crowdsec bouncer + when: node['name'] in acquisd_list + block: + - name: Install crowdsec collection + ansible.builtin.command: + cmd: "cscli collections install {{ acquisd_list[node['name']]['collection'] }}" + become: true + changed_when: "'overwrite' not in is_collection_installed.stderr" + failed_when: + - is_collection_installed.rc != 0 + - "'already installed' not in is_collection_installed.stderr" + register: "is_collection_installed" + + - name: Create crowdsec acquis.d directory + ansible.builtin.file: + path: "/etc/crowdsec/acquis.d" + state: "directory" + owner: "root" + group: "root" + mode: "0755" + become: true + + - name: Create whitelists.yaml + ansible.builtin.template: + src: "{{ hostvars['console']['node']['config_path'] }}/services/systemd/common/crowdsec/bouncers/whitelists.yaml.j2" + dest: "/etc/crowdsec/parsers/s02-enrich/whitelists.yaml" + owner: "root" + group: "root" + mode: "0644" + become: true + notify: + - "notification_restart_crowdsec" + - "notification_restart_crowdsec_bouncer" + no_log: true + + - name: Deploy acquis.d file + ansible.builtin.copy: + src: "{{ hostvars['console']['node']['config_path'] }}/services/systemd/common/crowdsec/acquis.d/{{ acquisd_list[node['name']]['config'] }}" + dest: "/etc/crowdsec/acquis.d/{{ acquisd_list[node['name']]['config'] }}" + owner: "root" + group: "root" + mode: "0644" + become: true + notify: "notification_restart_crowdsec" + + - name: Set Crowdsec-Firewall-Bouncer + when: node['name'] == "fw" + block: + - name: Deploy crowdsec-firewall-bouncer.yaml + ansible.builtin.template: + src: "{{ hostvars['console']['node']['config_path'] }}/services/systemd/common/crowdsec/bouncers/crowdsec-firewall-bouncer.yaml.j2" + dest: "/etc/crowdsec/bouncers/crowdsec-firewall-bouncer.yaml" + owner: "root" + group: "root" + mode: "0600" + become: true + notify: "notification_restart_crowdsec_bouncer" + + - name: Delete crowdsec-firewall-bouncer.yaml subfiles (.id, .local) + ansible.builtin.file: + path: "/etc/crowdsec/bouncers/crowdsec-firewall-bouncer.yaml.{{ item }}" + state: "absent" + loop: + - "local" + - "id" + become: true + notify: "notification_restart_crowdsec_bouncer" + + - name: Create crowdsec-firewall-bouncer.service.d + ansible.builtin.file: + path: "/etc/systemd/system/crowdsec-firewall-bouncer.service.d" + state: "directory" + owner: "root" + group: "root" + mode: "0755" + become: true + + - name: Set crowdsec-firewall-bouncer.service.d/override.conf + ansible.builtin.copy: + dest: "/etc/systemd/system/crowdsec-firewall-bouncer.service.d/override.conf" + content: | + [Service] + Type=simple + TimeoutStartSec=600 + Restart=always + RestartSec=60 + owner: "root" + group: "root" + mode: "0644" + become: true + notify: "notification_restart_crowdsec_bouncer" + + +- name: Create crowdsec.service.d + ansible.builtin.file: + path: "/etc/systemd/system/crowdsec.service.d" + state: "directory" + owner: "root" + group: "root" + mode: "0755" + become: true + +- name: Set crowdsec.service.d/override.conf + ansible.builtin.copy: + dest: "/etc/systemd/system/crowdsec.service.d/override.conf" + content: | + [Service] + Restart=always + RestartSec=60 + owner: "root" + group: "root" + mode: "0644" + become: true + notify: "notification_restart_crowdsec" + +- name: Enable auto crowdsec rules update + ansible.builtin.systemd: + name: "crowdsec-update.timer" + state: "started" + enabled: true + daemon_reload: true + become: true + +# cscli bouncers list +# cscli machines list +# cscli metrics diff --git a/ansible/roles/common/tasks/services/set_kopia.yaml b/ansible/roles/common/tasks/services/set_kopia.yaml new file mode 100644 index 0000000..643d3fc --- /dev/null +++ b/ansible/roles/common/tasks/services/set_kopia.yaml @@ -0,0 +1,137 @@ +--- +- name: Gather system facts (hardware) + ansible.builtin.setup: + gather_subset: + - hardware + become: true + +- name: Check kopia installation + ansible.builtin.shell: | + command -v kopia + changed_when: false + failed_when: false + register: "is_kopia_installed" + ignore_errors: true + +- name: Set console kopia + when: node['name'] == 'console' + block: + - name: Apply cli tools (x86_64) + ansible.builtin.apt: + deb: "{{ node['data_path'] }}/bin/kopia-{{ version['packages']['kopia'] }}-amd64.deb" + state: "present" + become: true + when: + - ansible_facts['architecture'] == "x86_64" + - is_kopia_installed.rc != 0 + - name: Apply cli tools (aarch64) + ansible.builtin.apt: + deb: "{{ node['data_path'] }}/bin/kopia-{{ version['packages']['kopia'] }}-arm64.deb" + state: "present" + become: true + when: + - ansible_facts['architecture'] == "aarch64" + - is_kopia_installed.rc != 0 + - name: Connect kopia server + environment: + KOPIA_PASSWORD: "{{ hostvars['console']['kopia']['user']['console'] }}" + ansible.builtin.shell: | + /usr/bin/kopia repository connect server \ + --url=https://{{ infra_uri['kopia']['domain'] }}:{{ infra_uri['kopia']['ports']['https'] }} \ + --override-username=console \ + --override-hostname=console.ilnmors.internal + changed_when: false + failed_when: is_kopia_connected.rc != 0 + register: "is_kopia_connected" + no_log: true + +- name: Set infra/app kopia + when: node['name'] in ['infra', 'app'] + block: + - name: Set kopia uid + ansible.builtin.set_fact: + kopia_uid: 951 + - name: Deploy kopia deb file (x86_64) + ansible.builtin.copy: + src: "{{ hostvars['console']['node']['data_path'] }}/bin/kopia-{{ version['packages']['kopia'] }}-amd64.deb" + dest: "/var/cache/apt/archives/kopia-{{ version['packages']['kopia'] }}.deb" + owner: "root" + group: "root" + mode: "0644" + become: true + when: ansible_facts['architecture'] == "x86_64" + - name: Deploy kopia deb file (aarch64) + ansible.builtin.copy: + src: "{{ hostvars['console']['node']['data_path'] }}/bin/kopia-{{ version['packages']['kopia'] }}-arm64.deb" + dest: "/var/cache/apt/archives/kopia-{{ version['packages']['kopia'] }}.deb" + owner: "root" + group: "root" + mode: "0644" + become: true + when: ansible_facts['architecture'] == "aarch64" + - name: Create kopia group + ansible.builtin.group: + name: "kopia" + gid: "{{ kopia_uid }}" + state: "present" + become: true + - name: Create kopia user + ansible.builtin.user: + name: "kopia" + uid: "{{ kopia_uid }}" + group: "kopia" + shell: "/usr/sbin/nologin" + password_lock: true + comment: "Kopia backup User" + state: "present" + become: true + - name: Create kopia directory + ansible.builtin.file: + path: "{{ item.name }}" + state: "directory" + owner: "kopia" + group: "root" + mode: "{{ item.mode }}" + loop: + - name: "/etc/kopia" + mode: "0700" + - name: "/etc/secrets/951" + mode: "0500" + - name: "/var/cache/kopia" + mode: "0700" + become: true + no_log: true + - name: Install kopia + ansible.builtin.apt: + deb: "/var/cache/apt/archives/kopia-{{ version['packages']['kopia'] }}.deb" + state: "present" + become: true + when: is_kopia_installed.rc != 0 + - name: Deploy kopia env + ansible.builtin.template: + src: "{{ hostvars['console']['node']['config_path'] }}/services/systemd/common/kopia/kopia.env.j2" + dest: "/etc/secrets/{{ kopia_uid }}/kopia.env" + owner: "{{ kopia_uid }}" + group: "root" + mode: "0400" + become: true + no_log: true + - name: Deploy kopia service files + ansible.builtin.template: + src: "{{ hostvars['console']['node']['config_path'] }}/services/systemd/common/kopia/{{ item }}.j2" + dest: "/etc/systemd/system/{{ item }}" + owner: "root" + group: "root" + mode: "0644" + validate: "/usr/bin/systemd-analyze verify %s" + loop: + - "kopia-backup.service" + - "kopia-backup.timer" + become: true + - name: Enable auto kopia rules update + ansible.builtin.systemd: + name: "kopia-backup.timer" + state: "started" + enabled: true + daemon_reload: true + become: true diff --git a/ansible/roles/common/tasks/services/set_podman.yaml b/ansible/roles/common/tasks/services/set_podman.yaml new file mode 100644 index 0000000..02dc863 --- /dev/null +++ b/ansible/roles/common/tasks/services/set_podman.yaml @@ -0,0 +1,46 @@ +--- +- name: Check podman installation + ansible.builtin.shell: | + command -v podman + changed_when: false + failed_when: false + register: "is_podman_installed" + ignore_errors: true + +- name: Create container directory + ansible.builtin.file: + path: "{{ node['home_path'] }}/containers" + owner: "{{ ansible_user }}" + group: "svadmins" + state: "directory" + mode: "0700" + +- name: Create contaienr data directory for app + ansible.builtin.file: + path: "{{ node['home_path'] }}/data/containers" + owner: "{{ ansible_user }}" + group: "svadmins" + state: "directory" + mode: "0770" + when: node['name'] == "app" + +- name: Install podman and reset ssh connection for initiating + when: is_podman_installed.rc != 0 + become: true + block: + - name: Set subid scope (Overwrite) + ansible.builtin.copy: + content: | + {{ ansible_user }}:100000:65536 + dest: "/etc/sub{{ item }}" + owner: "root" + group: "root" + mode: "0644" + loop: + - "uid" + - "gid" + - name: Install podman + ansible.builtin.apt: + name: + - "podman" + state: "present" diff --git a/ansible/roles/console/handlers/main.yaml b/ansible/roles/console/handlers/main.yaml new file mode 100644 index 0000000..237327f --- /dev/null +++ b/ansible/roles/console/handlers/main.yaml @@ -0,0 +1,8 @@ +--- +- name: Register font + ansible.builtin.shell: | + fc-cache -f -v + become: true + changed_when: false + listen: "notification_update_font" + ignore_errors: true # noqa: ignore-errors diff --git a/ansible/roles/console/tasks/node/load_secret_vars.yaml b/ansible/roles/console/tasks/node/load_secret_vars.yaml new file mode 100644 index 0000000..6048b9b --- /dev/null +++ b/ansible/roles/console/tasks/node/load_secret_vars.yaml @@ -0,0 +1,29 @@ +--- +- name: Check sops installation (Prerequisite) + ansible.builtin.shell: | + command -v sops + changed_when: false + failed_when: false + register: "is_sops_installed" + ignore_errors: true + +- name: Failure when sops is missing + ansible.builtin.fail: + msg: "sops is not installed. Please install sops manually as described in README.md before running this playbook" + when: is_sops_installed.rc != 0 + +- name: Decrypt secret values in console + environment: + SOPS_AGE_KEY: "{{ hostvars['console']['age_key'] }}" + ansible.builtin.command: | + sops -d --output-type yaml {{ hostvars['console']['node']['config_path'] }}/secrets/secrets.yaml + changed_when: false + register: "decrypted_secrets" + run_once: true + no_log: true + +- name: Load decrypted secret vaules in console + ansible.builtin.set_fact: + "{{ item.key }}": "{{ item.value }}" + loop: "{{ decrypted_secrets.stdout | from_yaml | dict2items }}" + no_log: true diff --git a/ansible/roles/console/tasks/node/set_ssh_client.yaml b/ansible/roles/console/tasks/node/set_ssh_client.yaml new file mode 100644 index 0000000..87312f3 --- /dev/null +++ b/ansible/roles/console/tasks/node/set_ssh_client.yaml @@ -0,0 +1,109 @@ +--- +- name: Create ssh id_console + ansible.builtin.copy: + content: "{{ hostvars['console']['ssh']['console']['key'] }}" + dest: "/etc/secrets/{{ node['uid'] }}/id_console" + owner: "{{ ansible_user }}" + group: "root" + mode: "0400" + become: true + no_log: true + +- name: Create ssh id_console.pub + ansible.builtin.copy: + content: "{{ hostvars['console']['ssh']['console']['pub'] }}" + dest: "/etc/secrets/{{ node['uid'] }}/id_console.pub" + owner: "{{ ansible_user }}" + group: "root" + mode: "0400" + become: true + no_log: true + +- name: Create ssh_known_hosts + become: true + ansible.builtin.copy: + content: | + @cert-authority *.ilnmors.internal {{ hostvars['console']['ssh']['ca']['pub'] }} + dest: "/etc/ssh/ssh_known_hosts" + owner: "root" + group: "root" + mode: "0644" + no_log: true + +- name: Check id_console-cert.pub + ansible.builtin.stat: + path: "/etc/secrets/{{ node['uid'] }}/id_console-cert.pub" + register: "is_signed_console_key" + +- name: Get current id_console-cert.pub allow users + ansible.builtin.shell: | + set -o pipefail + ssh-keygen -L -f /etc/secrets/{{ node['uid'] }}/id_console-cert.pub | \ + sed -n '/Principals:/,/Critical Options:/p' | \ + sed '1d;$d' | \ + sed 's/^[[:space:]]*//' + when: is_signed_console_key.stat.exists + changed_when: false + register: "current_allow_users" + no_log: true + +- name: Set key informations + ansible.builtin.set_fact: + current_user_list: "{{ current_allow_users.stdout_lines }}" + when: is_signed_console_key.stat.exists + no_log: true + +- name: Compare key values between current information and defined information + ansible.builtin.set_fact: + is_certificate_info_different: true + when: (current_user_list | default([])) != (node['ssh_users'].split(',') | map('trim') | list) + +- name: Get SSH CA and signing + when: not is_signed_console_key.stat.exists or (is_certificate_info_different | default(false)) + block: + - name: Get SSH CA + ansible.builtin.copy: + content: | + {{ hostvars['console']['ssh']['ca']['key'] }} + dest: "/run/user/{{ node['uid'] }}/local_ssh_ca_private_key" + owner: "console" + group: "svadmins" + mode: "0400" + no_log: true + - name: Sign on ssh console key (pub file) + ansible.builtin.command: | + ssh-keygen -s /run/user/{{ node['uid'] }}/local_ssh_ca_private_key \ + -I "{{ node['name'] }}" \ + -n "{{ node['ssh_users'] }}" \ + /etc/secrets/{{ node['uid'] }}/id_console.pub + become: true + changed_when: not is_signed_console_key.stat.exists or (is_certificate_info_different | default(false)) + no_log: true + always: + - name: Clean temporary files + ansible.builtin.file: + path: "/run/user/{{ node['uid'] }}/local_ssh_ca_private_key" + state: "absent" + no_log: true + +- name: Create .ssh directory + ansible.builtin.file: + path: "{{ node['home_path'] }}/.ssh" + state: "directory" + owner: "{{ ansible_user }}" + group: "svadmins" + mode: "0700" + +- name: Create ssh config file + ansible.builtin.copy: + content: | + {% for host in groups['all'] if host != 'console' %} + Host {{ host }} + HostName {{ hostvars[host]['ansible_host'] }} + User {{ hostvars[host]['ansible_user'] }} + IdentityFile /etc/secrets/{{ node['uid'] }}/id_console + {% endfor %} + dest: "{{ node['home_path'] }}/.ssh/config" + owner: "{{ ansible_user }}" + group: "svadmins" + mode: "0600" diff --git a/ansible/roles/console/tasks/services/set_chromium.yaml b/ansible/roles/console/tasks/services/set_chromium.yaml new file mode 100644 index 0000000..e559f85 --- /dev/null +++ b/ansible/roles/console/tasks/services/set_chromium.yaml @@ -0,0 +1,31 @@ +--- +- name: Check chromium installation + ansible.builtin.shell: | + command -v chromium + changed_when: false + failed_when: false + register: "is_chromium_installed" + ignore_errors: true + +- name: Check korean font installation + ansible.builtin.shell: | + fc-list | grep -i "nanum" + changed_when: false + failed_when: false + register: "is_font_installed" + ignore_errors: true + +- name: Install chromium + ansible.builtin.apt: + name: "chromium" + state: "present" + become: true + when: is_chromium_installed.rc != 0 + +- name: Install font + ansible.builtin.apt: + name: "fonts-nanum" + state: "present" + become: true + when: is_font_installed.rc != 0 + notify: "notification_update_font" diff --git a/ansible/roles/console/tasks/services/set_cli_tools.yaml b/ansible/roles/console/tasks/services/set_cli_tools.yaml new file mode 100644 index 0000000..73aa5d4 --- /dev/null +++ b/ansible/roles/console/tasks/services/set_cli_tools.yaml @@ -0,0 +1,108 @@ +--- +- name: Gather system facts (hardware) + ansible.builtin.setup: + gather_subset: + - hardware + become: true + +- name: Check ansible installation + ansible.builtin.shell: | + command -v ansible + changed_when: false + failed_when: false + register: "is_ansible_installed" + ignore_errors: true + +- name: Upgrade ansible module + community.general.ansible_galaxy_install: + type: "collection" + name: "{{ item }}" + state: "latest" + loop: + - "ansible.posix" + - "community.libvirt" + - "community.general" + - "containers.podman" + when: is_ansible_installed.rc == 0 + +- name: Download sops + ansible.builtin.get_url: + url: "https://github.com/getsops/sops/releases/download/v{{ version['packages']['sops'] }}/\ + sops_{{ version['packages']['sops'] }}_{{ item }}.deb" + dest: "{{ node['data_path'] }}/bin/sops-{{ version['packages']['sops'] }}-{{ item }}.deb" + owner: "{{ ansible_user }}" + group: "svadmins" + mode: "0600" + loop: + - "amd64" + - "arm64" + +- name: Download step-cli + ansible.builtin.get_url: + url: "https://dl.smallstep.com/gh-release/cli/gh-release-header/v{{ version['packages']['step'] }}/\ + step-cli_{{ version['packages']['step'] }}-1_{{ item }}.deb" + dest: "{{ node['data_path'] }}/bin/step-{{ version['packages']['step'] }}-{{ item }}.deb" + owner: "{{ ansible_user }}" + group: "svadmins" + mode: "0600" + loop: + - "amd64" + - "arm64" + +- name: Download kopia + ansible.builtin.get_url: + url: "https://github.com/kopia/kopia/releases/download/v{{ version['packages']['kopia'] }}/\ + kopia_{{ version['packages']['kopia'] }}_linux_{{ item }}.deb" + dest: "{{ node['data_path'] }}/bin/kopia-{{ version['packages']['kopia'] }}-{{ item }}.deb" + owner: "{{ ansible_user }}" + group: "svadmins" + mode: "0600" + loop: + - "amd64" + - "arm64" + +- name: Download blocky + ansible.builtin.get_url: + url: "https://github.com/0xERR0R/blocky/releases/download/v{{ version['packages']['blocky'] }}/\ + blocky_v{{ version['packages']['blocky'] }}_Linux_{{ item }}.tar.gz" + dest: "{{ node['data_path'] }}/bin/blocky-{{ version['packages']['blocky'] }}-{{ item }}.tar.gz" + owner: "{{ ansible_user }}" + group: "svadmins" + mode: "0600" # noqa: line-length + loop: + - "x86_64" + - "arm64" + +- name: Download alloy + ansible.builtin.get_url: + url: "https://github.com/grafana/alloy/releases/download/v{{ version['packages']['alloy'] }}/\ + alloy-{{ version['packages']['alloy'] }}-1.{{ item }}.deb" + dest: "{{ node['data_path'] }}/bin/alloy-{{ version['packages']['alloy'] }}-{{ item }}.deb" + owner: "{{ ansible_user }}" + group: "svadmins" + mode: "0600" + loop: + - "amd64" + - "arm64" + +- name: Apply cli tools (x86_64) + ansible.builtin.apt: + deb: "{{ node['data_path'] }}/bin/{{ item }}" + state: "present" + loop: + - "sops-{{ version['packages']['sops'] }}-amd64.deb" + - "step-{{ version['packages']['step'] }}-amd64.deb" + - "kopia-{{ version['packages']['kopia'] }}-amd64.deb" + become: true + when: ansible_facts['architecture'] == "x86_64" + +- name: Apply cli tools (aarch64) + ansible.builtin.apt: + deb: "{{ node['data_path'] }}/bin/{{ item }}" + state: "present" + loop: + - "sops-{{ version['packages']['sops'] }}-arm64.deb" + - "step-{{ version['packages']['step'] }}-arm64.deb" + - "kopia-{{ version['packages']['kopia'] }}-arm64.deb" + become: true + when: ansible_facts['architecture'] == "aarch64" diff --git a/ansible/roles/fw/handlers/main.yaml b/ansible/roles/fw/handlers/main.yaml new file mode 100644 index 0000000..58c79e6 --- /dev/null +++ b/ansible/roles/fw/handlers/main.yaml @@ -0,0 +1,63 @@ +--- +- name: Restart chrony + ansible.builtin.systemd: + name: "chrony.service" + state: "restarted" + enabled: true + daemon_reload: true + become: true + changed_when: false + listen: "notification_restart_chrony" + ignore_errors: true # noqa: ignore-errors + +- name: Update suricata rules + ansible.builtin.command: + suricata-update --disable-conf /etc/suricata/disable.conf --enable-conf /etc/suricata/enable.conf --local /etc/suricata/rules/local.rules + become: true + changed_when: false + listen: "notification_update_suricata_rules" + ignore_errors: true # noqa: ignore-errors + +- name: Restart suricata + ansible.builtin.systemd: + name: "suricata.service" + state: "restarted" + enabled: true + daemon_reload: true + become: true + changed_when: false + listen: "notification_restart_suricata" + ignore_errors: true # noqa: ignore-errors + +- name: Restart bind9 + ansible.builtin.systemd: + name: "named.service" + state: "restarted" + enabled: true + daemon_reload: true + become: true + changed_when: false + listen: "notification_restart_bind" + ignore_errors: true # noqa: ignore-errors + +- name: Restart blocky + ansible.builtin.systemd: + name: "blocky.service" + state: "restarted" + enabled: "true" + daemon_reload: true + become: true + changed_when: false + listen: "notification_restart_blocky" + ignore_errors: true # noqa: ignore-errors + +- name: Restart kea-dhcp4 + ansible.builtin.systemd: + name: "kea-dhcp4-server.service" + state: "restarted" + enabled: true + daemon_reload: true + become: true + changed_when: false + listen: "notification_restart_kea4" + ignore_errors: true # noqa: ignore-errors diff --git a/ansible/roles/fw/tasks/services/set_bind.yaml b/ansible/roles/fw/tasks/services/set_bind.yaml new file mode 100644 index 0000000..6ba6334 --- /dev/null +++ b/ansible/roles/fw/tasks/services/set_bind.yaml @@ -0,0 +1,103 @@ +--- +- name: Check bind9 installation + ansible.builtin.shell: | + command -v named + become: true # named is located in /usr/sbin, which means root permission is needed. + changed_when: false + failed_when: false + register: "is_bind_installed" + ignore_errors: true + +- name: Set bind9 zone files + ansible.builtin.set_fact: + bind_zone_files: + - "db.ilnmors.internal" + - "db.ilnmors.com" + - "db.1.168.192.in-addr.arpa" + - "db.10.168.192.in-addr.arpa" + - "db.1.00df.ip6.arpa" + - "db.10.00df.ip6.arpa" + +- name: Install bind9 + ansible.builtin.apt: + name: "bind9" + state: "present" + become: true + when: is_bind_installed.rc != 0 + +- name: Deploy acem.key + ansible.builtin.copy: + content: "{{ hostvars['console']['bind']['acme_key'] }}" + dest: "/etc/bind/acme.key" + owner: "bind" + group: "bind" + mode: "0640" + become: true + notify: "notification_restart_bind" + no_log: true + +- name: Deploy db files + ansible.builtin.copy: + src: "{{ hostvars['console']['node']['config_path'] }}/services/systemd/fw/bind/lib/{{ item }}" + dest: "/var/lib/bind/{{ item }}" + owner: "bind" + group: "bind" + mode: "0640" + loop: "{{ bind_zone_files }}" + become: true + notify: "notification_restart_bind" + no_log: true + +- name: Clean BIND journal files + ansible.builtin.file: + path: "/var/lib/bind/{{ item }}.jnl" + state: absent + loop: "{{ bind_zone_files }}" + become: true + notify: "notification_restart_bind" + no_log: true + +- name: Deploy named.conf + ansible.builtin.template: + src: "{{ hostvars['console']['node']['config_path'] }}/services/systemd/fw/bind/etc/named.conf.j2" + dest: "/etc/bind/named.conf" + owner: "root" + group: "bind" + mode: "0640" + validate: "/usr/bin/named-checkconf -z %s" + become: true + notify: "notification_restart_bind" + no_log: true + +- name: Create named.service.d + ansible.builtin.file: + path: "/etc/systemd/system/named.service.d" + state: "directory" + owner: "root" + group: "root" + mode: "0755" + become: true + +- name: Set named.service.d/override.conf + ansible.builtin.copy: + dest: "/etc/systemd/system/named.service.d/override.conf" + content: | + [Service] + Restart=always + RestartSec=60 + owner: "root" + group: "root" + mode: "0644" + become: true + notify: "notification_restart_bind" + +- name: Enable bind9 service + ansible.builtin.systemd: + name: "named.service" + state: "started" + enabled: true + become: true + +# Verify working +# dig A fw.ilnmors.internal @fd00:10::3 +# dig AAAA fw.ilnmors.internal @fd00:10::3 diff --git a/ansible/roles/fw/tasks/services/set_blocky.yaml b/ansible/roles/fw/tasks/services/set_blocky.yaml new file mode 100644 index 0000000..a8cd927 --- /dev/null +++ b/ansible/roles/fw/tasks/services/set_blocky.yaml @@ -0,0 +1,117 @@ +--- +- name: Gather system facts (hardware) + ansible.builtin.setup: + gather_subset: + - hardware + become: true + +- name: Create blocky group + ansible.builtin.group: + name: "blocky" + gid: 953 + state: "present" + become: true + +- name: Create blocky user + ansible.builtin.user: + name: "blocky" + uid: 953 + group: "blocky" + shell: "/usr/sbin/nologin" + password_lock: true + comment: "Blocky DNS User" + state: "present" + become: true + +- name: Create blocky etc directory + ansible.builtin.file: + path: "{{ item }}" + owner: "blocky" + group: "blocky" + mode: "0750" + state: "directory" + loop: + - "/etc/blocky" + - "/etc/blocky/ssl" + become: true + +- name: Deploy blocky binary file (x86_64) + ansible.builtin.unarchive: + src: "{{ hostvars['console']['node']['data_path'] }}/bin/blocky-{{ version['packages']['blocky'] }}-x86_64.tar.gz" + dest: "/usr/local/bin/" + owner: "root" + group: "root" + mode: "0755" + extra_opts: + - "--strip-components=0" + - "--wildcards" + - "blocky" + become: true + when: ansible_facts['architecture'] == "x86_64" + notify: "notification_restart_blocky" + +- name: Deploy blocky binary file (aarch64) + ansible.builtin.unarchive: + src: "{{ hostvars['console']['node']['data_path'] }}/bin/blocky-{{ version['packages']['blocky'] }}-arm64.tar.gz" + dest: "/usr/local/bin/" + owner: "root" + group: "root" + mode: "0755" + extra_opts: + - "--strip-components=0" + - "--wildcards" + - "blocky" + become: true + when: ansible_facts['architecture'] == "aarch64" + notify: "notification_restart_blocky" + +- name: Deploy blocky config + ansible.builtin.template: + src: "{{ hostvars['console']['node']['config_path'] }}/services/systemd/fw/blocky/etc/config.yaml.j2" + dest: "/etc/blocky/config.yaml" + owner: "blocky" + group: "blocky" + mode: "0640" + become: true + notify: "notification_restart_blocky" + no_log: true + +- name: Deploy blocky certificate and key + ansible.builtin.copy: + content: | + {{ item.value }} + dest: "/etc/blocky/ssl/{{ item.name }}" + owner: "blocky" + group: "blocky" + mode: "{{ item.mode }}" + loop: + - name: "blocky.crt" + value: | + {{ hostvars['console']['blocky']['crt'] | trim }} + {{ hostvars['console']['ca']['intermediate']['crt'] }} + mode: "0440" + - name: "blocky.key" + value: "{{ hostvars['console']['blocky']['key'] }}" + mode: "0400" + become: true + notify: "notification_restart_blocky" + no_log: true + +- name: Deploy blocky service + ansible.builtin.copy: + src: "{{ hostvars['console']['node']['config_path'] }}/services/systemd/fw/blocky/blocky.service" + dest: "/etc/systemd/system/blocky.service" + owner: "root" + group: "root" + mode: "0644" + validate: "/usr/bin/systemd-analyze verify %s" + become: true + notify: "notification_restart_blocky" + +- name: Enable blocky service + ansible.builtin.systemd: + name: "blocky.service" + state: "started" + enabled: true + daemon_reload: true + become: true diff --git a/ansible/roles/fw/tasks/services/set_chrony.yaml b/ansible/roles/fw/tasks/services/set_chrony.yaml new file mode 100644 index 0000000..054d477 --- /dev/null +++ b/ansible/roles/fw/tasks/services/set_chrony.yaml @@ -0,0 +1,55 @@ +--- +- name: Check chrnoy installation + ansible.builtin.shell: | + command -v chronyc + changed_when: false + failed_when: false + register: "is_chrony_installed" + ignore_errors: true + +- name: Install chrony + ansible.builtin.apt: + name: "chrony" + state: "present" + become: true + when: is_chrony_installed.rc != 0 + +- name: Deploy local acl file + ansible.builtin.template: + src: "{{ hostvars['console']['node']['config_path'] }}/services/systemd/fw/chrony/local-acl.conf.j2" + dest: "/etc/chrony/conf.d/local-acl.conf" + owner: "root" + group: "root" + mode: "0644" + become: true + notify: "notification_restart_chrony" + +- name: Create chrony.service.d + ansible.builtin.file: + path: "/etc/systemd/system/chrony.service.d" + state: "directory" + owner: "root" + group: "root" + mode: "0755" + become: true + +- name: Set chrony.service.d/override.conf + ansible.builtin.copy: + dest: "/etc/systemd/system/chrony.service.d/override.conf" + content: | + [Service] + Restart=always + RestartSec=60 + owner: "root" + group: "root" + mode: "0644" + become: true + notify: "notification_restart_chrony" + +- name: Enable chrony service + ansible.builtin.systemd: + name: "chrony.service" + state: "started" + enabled: true + daemon_reload: true + become: true diff --git a/ansible/roles/fw/tasks/services/set_ddns.yaml b/ansible/roles/fw/tasks/services/set_ddns.yaml new file mode 100644 index 0000000..83ca96e --- /dev/null +++ b/ansible/roles/fw/tasks/services/set_ddns.yaml @@ -0,0 +1,41 @@ +--- +- name: Create ddns secret env file + ansible.builtin.copy: + content: | + ZONE_ID={{ hostvars['console']['ddns']['zone_id'] }} + API_KEY={{ hostvars['console']['ddns']['api_key'] }} + dest: "/etc/secrets/{{ node['uid'] }}/ddns.env" + owner: "{{ ansible_user }}" + group: "svadmins" + mode: "0400" + become: true + no_log: true + +- name: Deploy ddns script + ansible.builtin.copy: + src: "{{ hostvars['console']['node']['config_path'] }}/services/systemd/fw/ddns/ddns.sh" + dest: "/usr/local/bin" + owner: "{{ ansible_user }}" + group: "svadmins" + mode: "0711" + become: true + +- name: Deploy ddns service files + ansible.builtin.copy: + src: "{{ hostvars['console']['node']['config_path'] }}/services/systemd/fw/ddns/{{ item }}" + dest: "{{ node['home_path'] }}/.config/systemd/user/{{ item }}" + owner: "{{ ansible_user }}" + group: "svadmins" + mode: "0600" + validate: "/usr/bin/systemd-analyze verify %s" + loop: + - "ddns.service" + - "ddns.timer" + +- name: Register ddns timer + ansible.builtin.systemd: + name: "ddns.timer" + state: "started" + enabled: true + daemon_reload: true + scope: "user" diff --git a/ansible/roles/fw/tasks/services/set_kea.yaml b/ansible/roles/fw/tasks/services/set_kea.yaml new file mode 100644 index 0000000..7d23006 --- /dev/null +++ b/ansible/roles/fw/tasks/services/set_kea.yaml @@ -0,0 +1,57 @@ +--- +- name: Check Kea dhcp4 installation + ansible.builtin.shell: | + command -v kea-dhcp4 + become: true # kea-dhcp4 is located in /usr/sbin, which means root permission is needed. + changed_when: false + failed_when: false + register: "is_kea4_installed" + ignore_errors: true + +- name: Install kea dhcp 4 + ansible.builtin.apt: + name: + - "kea-dhcp4-server" + state: "present" + become: true + when: is_kea4_installed.rc != 0 + +- name: Deploy kea dhcp4 conf + ansible.builtin.template: + src: "{{ hostvars['console']['node']['config_path'] }}/services/systemd/fw/kea/kea-dhcp4.conf.j2" + dest: "/etc/kea/kea-dhcp4.conf" + owner: "_kea" + group: "_kea" + mode: "0600" + become: true + notify: "notification_restart_kea4" + +- name: Create kea-dhcp-server.service.d + ansible.builtin.file: + path: "/etc/systemd/system/kea-dhcp4-server.service.d" + state: "directory" + owner: "root" + group: "root" + mode: "0755" + become: true + +- name: Set kea-dhcp-server.service.d/override.conf + ansible.builtin.copy: + dest: "/etc/systemd/system/kea-dhcp4-server.service.d/override.conf" + content: | + [Service] + Restart=always + RestartSec=60 + owner: "root" + group: "root" + mode: "0644" + become: true + notify: + - "notification_restart_kea4" + +- name: Enable kea service + ansible.builtin.systemd: + name: "kea-dhcp4-server.service" + state: "started" + enabled: true + become: true diff --git a/ansible/roles/fw/tasks/services/set_suricata.yaml b/ansible/roles/fw/tasks/services/set_suricata.yaml new file mode 100644 index 0000000..91ff15b --- /dev/null +++ b/ansible/roles/fw/tasks/services/set_suricata.yaml @@ -0,0 +1,141 @@ +--- +- name: Check suricata installation + ansible.builtin.shell: | + command -v suricata + changed_when: false + failed_when: false + register: "is_suricata_installed" + ignore_errors: true + +- name: Install suricata + ansible.builtin.apt: + name: + - "suricata" + - "suricata-update" + state: "present" + become: true + when: is_suricata_installed.rc != 0 + +- name: Deploy suricata-update service files + ansible.builtin.copy: + src: "{{ hostvars['console']['node']['config_path'] }}/services/systemd/fw/suricata/{{ item }}" + dest: "/etc/systemd/system/{{ item }}" + owner: "root" + group: "root" + mode: "0644" + validate: "/usr/bin/systemd-analyze verify %s" + loop: + - "suricata-update.service" + - "suricata-update.timer" + become: true + +- name: Deploy suricata custom configurations + ansible.builtin.copy: + src: "{{ hostvars['console']['node']['config_path'] }}/services/systemd/fw/suricata/etc/{{ item }}" + dest: "/etc/suricata/{{ item }}" + owner: "root" + group: "root" + mode: "0644" + loop: + - "disable.conf" + - "enable.conf" + become: true + notify: + - "notification_update_suricata_rules" + - "notification_restart_suricata" + +- name: Deploy suricata custom rules + ansible.builtin.copy: + src: "{{ hostvars['console']['node']['config_path'] }}/services/systemd/fw/suricata/etc/local.rules" + dest: "/etc/suricata/rules/local.rules" + owner: "root" + group: "root" + mode: "0644" + become: true + notify: + - "notification_update_suricata_rules" + - "notification_restart_suricata" + +- name: Check suricata rules + ansible.builtin.stat: + path: "/var/lib/suricata/rules/suricata.rules" + register: "is_suricata_rules_file" + +- name: Update suricata rules + ansible.builtin.command: + suricata-update + become: true + when: not is_suricata_rules_file.stat.exists + changed_when: true + +- name: Enable auto suricata rules update + ansible.builtin.systemd: + name: "suricata-update.timer" + state: "started" + enabled: true + daemon_reload: true + become: true + +- name: Deploy suricata.yaml + ansible.builtin.template: + src: "{{ hostvars['console']['node']['config_path'] }}/services/systemd/fw/suricata/etc/suricata.yaml.j2" + dest: "/etc/suricata/suricata.yaml" + owner: "root" + group: "root" + mode: "0644" + validate: "/usr/bin/suricata -T -c %s" + become: true + notify: "notification_restart_suricata" + +- name: Create suricata.service.d + ansible.builtin.file: + path: "/etc/systemd/system/suricata.service.d" + state: "directory" + owner: "root" + group: "root" + mode: "0755" + become: true + +- name: Set suricata.service.d/override.conf + ansible.builtin.copy: + dest: "/etc/systemd/system/suricata.service.d/override.conf" + content: | + [Service] + Restart=always + RestartSec=60 + owner: "root" + group: "root" + mode: "0644" + become: true + notify: + - "notification_restart_suricata" + +- name: Enable suricata service + ansible.builtin.systemd: + name: "suricata.service" + state: "started" + enabled: true + daemon_reload: true + become: true + +- name: Set suricata logs logrotate + ansible.builtin.copy: + content: | + /var/log/suricata/*.log /var/log/suricata/*.json { + weekly + missingok + rotate 4 + compress + delaycompress + notifempty + maxsize 500M + sharedscripts + postrotate + /usr/bin/systemctl reload suricata > /dev/null 2>/dev/null || true + endscript + } + dest: "/etc/logrotate.d/suricata" + owner: "root" + group: "root" + mode: "0644" + become: true diff --git a/ansible/roles/infra/handlers/main.yaml b/ansible/roles/infra/handlers/main.yaml new file mode 100644 index 0000000..ec14d7f --- /dev/null +++ b/ansible/roles/infra/handlers/main.yaml @@ -0,0 +1,85 @@ +- name: Restart ca + ansible.builtin.systemd: + name: "ca.service" + state: "restarted" + enabled: "true" + daemon_reload: true + scope: "user" + changed_when: false + listen: "notification_restart_ca" + ignore_errors: true # noqa: ignore-errors + +- name: Reload postgresql + ansible.builtin.command: + /usr/bin/podman exec -u postgres postgresql sh -c "pg_ctl reload" + when: not (is_postgresql_init_run | default(false)) + changed_when: false + listen: "notification_reload_postgresql" + ignore_errors: true # noqa: ignore-errors + +- name: Restart postgresql + ansible.builtin.systemd: + name: "postgresql.service" + state: "restarted" + enabled: true + daemon_reload: true + scope: "user" + when: not (is_postgresql_init_run | default(false)) + changed_when: false + listen: "notification_restart_postgresql" + ignore_errors: true # noqa: ignore-errors + +- name: Restart ldap + ansible.builtin.systemd: + name: "ldap.service" + state: "restarted" + enabled: true + daemon_reload: true + scope: "user" + changed_when: false + listen: "notification_restart_ldap" + ignore_errors: true # noqa: ignore-errors + +- name: Restart prometheus + ansible.builtin.systemd: + name: "prometheus.service" + state: "restarted" + enabled: true + daemon_reload: true + scope: "user" + changed_when: false + listen: "notification_restart_prometheus" + ignore_errors: true # noqa: ignore-errors + +- name: Restart loki + ansible.builtin.systemd: + name: "loki.service" + state: "restarted" + enabled: true + daemon_reload: true + scope: "user" + changed_when: false + listen: "notification_restart_loki" + ignore_errors: true # noqa: ignore-errors + +- name: Restart grafana + ansible.builtin.systemd: + name: "grafana.service" + state: "restarted" + enabled: true + daemon_reload: true + scope: "user" + changed_when: false + listen: "notification_restart_grafana" + ignore_errors: true # noqa: ignore-errors + +- name: Enable x509-exporter.service + ansible.builtin.systemd: + name: "x509-exporter.service" + state: "started" + enabled: true + daemon_reload: true + scope: "user" + changed_when: false + listen: "notification_restart_x509-exporter" + ignore_errors: true # noqa: ignore-errors diff --git a/ansible/roles/infra/tasks/services/set_ca_server.yaml b/ansible/roles/infra/tasks/services/set_ca_server.yaml new file mode 100644 index 0000000..22435bd --- /dev/null +++ b/ansible/roles/infra/tasks/services/set_ca_server.yaml @@ -0,0 +1,84 @@ +--- +- name: Set ca container subuid + ansible.builtin.set_fact: + ca_subuid: "100999" + +- name: Create ca directory + ansible.builtin.file: + path: "{{ node['home_path'] }}/containers/{{ item }}" + owner: "{{ ca_subuid }}" + group: "svadmins" + state: "directory" + mode: "0770" + loop: + - "ca" + - "ca/certs" + - "ca/secrets" + - "ca/config" + - "ca/db" + - "ca/templates" + become: true + +- name: Register secret value to podman secret + containers.podman.podman_secret: + name: "STEP_CA_PASSWORD" + data: "{{ hostvars['console']['ca']['intermediate']['password'] }}" + state: "present" + force: true + notify: "notification_restart_ca" + no_log: true + +- name: Deploy ca config files + ansible.builtin.template: + src: "{{ hostvars['console']['node']['config_path'] }}/services/containers/infra/ca/config/{{ item }}.j2" + dest: "{{ node['home_path'] }}/containers/ca/config/{{ item }}" + owner: "{{ ca_subuid }}" + group: "svadmins" + mode: "0400" + loop: + - "ca.json" + - "defaults.json" + become: true + notify: "notification_restart_ca" + +- name: Deploy ca certificate and key + ansible.builtin.copy: + content: | + {{ item.value }} + dest: "{{ item.path }}/{{ item.name }}" + owner: "{{ ca_subuid }}" + group: "svadmins" + mode: "{{ item.mode }}" + loop: + - name: "ilnmors_root_ca.crt" + value: "{{ hostvars['console']['ca']['root']['crt'] }}" + path: "{{ node['home_path'] }}/containers/ca/certs" + mode: "0440" + - name: "ilnmors_intermediate_ca.crt" + value: "{{ hostvars['console']['ca']['intermediate']['crt'] }}" + path: "{{ node['home_path'] }}/containers/ca/certs" + mode: "0440" + - name: "ilnmors_intermediate_ca.key" + value: "{{ hostvars['console']['ca']['intermediate']['key'] }}" + path: "{{ node['home_path'] }}/containers/ca/secrets" + mode: "0400" + become: true + notify: "notification_restart_ca" + no_log: true + +- name: Deploy container file + ansible.builtin.template: + src: "{{ hostvars['console']['node']['config_path'] }}/services/containers/infra/ca/ca.container.j2" + dest: "{{ node['home_path'] }}/.config/containers/systemd/ca.container" + owner: "{{ ansible_user }}" + group: "svadmins" + mode: "0644" + notify: "notification_restart_ca" + +- name: Enable ca + ansible.builtin.systemd: + name: "ca.service" + state: "started" + enabled: true + daemon_reload: true + scope: "user" diff --git a/ansible/roles/infra/tasks/services/set_grafana.yaml b/ansible/roles/infra/tasks/services/set_grafana.yaml new file mode 100644 index 0000000..4bfd735 --- /dev/null +++ b/ansible/roles/infra/tasks/services/set_grafana.yaml @@ -0,0 +1,89 @@ +--- +- name: Set grafana container subuid + ansible.builtin.set_fact: + grafana_subuid: "100471" + +- name: Create grafana directory + ansible.builtin.file: + path: "{{ node['home_path'] }}/containers/{{ item }}" + owner: "{{ grafana_subuid }}" + group: "svadmins" + state: "directory" + mode: "0770" + loop: + - "grafana" + - "grafana/data" + - "grafana/etc" + - "grafana/etc/provisioning" + - "grafana/etc/dashboards" + - "grafana/ssl" + become: true + +- name: Deploy root certificate and key + ansible.builtin.copy: + content: | + {{ hostvars['console']['ca']['root']['crt'] }} + dest: "{{ node['home_path'] }}/containers/grafana/ssl/ilnmors_root_ca.crt" + owner: "{{ grafana_subuid }}" + group: "svadmins" + mode: "0400" + become: true + notify: "notification_restart_grafana" + no_log: true + +- name: Register secret value to podman secret + containers.podman.podman_secret: + name: "{{ item.name }}" + data: "{{ item.value }}" + state: "present" + force: true + loop: + - name: "GF_DB_PASSWORD" + value: "{{ hostvars['console']['postgresql']['password']['grafana'] }}" + - name: "LDAP_BIND_PASSWORD" + value: "{{ hostvars['console']['ldap']['password']['grafana'] }}" + - name: "GF_ADMIN_PASSWORD" + value: "{{ hostvars['console']['grafana']['user']['password'] }}" + notify: "notification_restart_grafana" + no_log: true + +- name: Deploy configruation files + ansible.builtin.template: + src: "{{ hostvars['console']['node']['config_path'] }}/services/containers/infra/grafana/etc/{{ item }}.j2" + dest: "{{ node['home_path'] }}/containers/grafana/etc/{{ item }}" + owner: "{{ grafana_subuid }}" + group: "svadmins" + mode: "0400" + loop: + - "grafana.ini" + - "ldap.toml" + become: true + notify: "notification_restart_grafana" + no_log: true + +- name: Deploy provisioing and dashboard files + ansible.builtin.copy: + src: "{{ hostvars['console']['node']['config_path'] }}/services/containers/infra/grafana/etc/provisioning/" + dest: "{{ node['home_path'] }}/containers/grafana/etc/provisioning/" + owner: "{{ grafana_subuid }}" + group: "svadmins" + mode: "0400" + become: true + notify: "notification_restart_grafana" + +- name: Deploy container file + ansible.builtin.template: + src: "{{ hostvars['console']['node']['config_path'] }}/services/containers/infra/grafana/grafana.container.j2" + dest: "{{ node['home_path'] }}/.config/containers/systemd/grafana.container" + owner: "{{ ansible_user }}" + group: "svadmins" + mode: "0644" + notify: "notification_restart_grafana" + +- name: Enable grafana + ansible.builtin.systemd: + name: "grafana.service" + state: "started" + enabled: true + daemon_reload: true + scope: "user" diff --git a/ansible/roles/infra/tasks/services/set_ldap.yaml b/ansible/roles/infra/tasks/services/set_ldap.yaml new file mode 100644 index 0000000..16a1002 --- /dev/null +++ b/ansible/roles/infra/tasks/services/set_ldap.yaml @@ -0,0 +1,112 @@ +--- +- name: Set ldap container subuid + ansible.builtin.set_fact: + ldap_subuid: "100999" + +- name: Create ldap directory + ansible.builtin.file: + path: "{{ node['home_path'] }}/containers/{{ item }}" + owner: "{{ ldap_subuid }}" + group: "svadmins" + state: "directory" + mode: "0770" + loop: + - "ldap" + - "ldap/data" + - "ldap/ssl" + become: true + +- name: Deploy ldap certificate and key + ansible.builtin.copy: + content: | + {{ item.value }} + dest: "{{ node['home_path'] }}/containers/ldap/ssl/{{ item.name }}" + owner: "{{ ldap_subuid }}" + group: "svadmins" + mode: "{{ item.mode }}" + loop: + - name: "ilnmors_root_ca.crt" + value: "{{ hostvars['console']['ca']['root']['crt'] }}" + mode: "0440" + - name: "ldap.crt" + value: | + {{ hostvars['console']['ldap']['crt'] | trim }} + {{ hostvars['console']['ca']['intermediate']['crt'] }} + mode: "0440" + - name: "ldap.key" + value: "{{ hostvars['console']['ldap']['key'] }}" + mode: "0400" + become: true + notify: "notification_restart_ldap" + no_log: true + +- name: Register secret value to podman secret + containers.podman.podman_secret: + name: "{{ item.name }}" + data: "{{ item.value }}" + state: "present" + force: true + loop: + # urlencode doesn't fix `/` as `%2F`. It needs replace + - name: "LLDAP_DATABASE_URL" + value: "postgres://ldap:{{ hostvars['console']['postgresql']['password']['ldap'] | urlencode | replace('/', '%2F') }}\ + @{{ infra_uri['postgresql']['domain'] }}/ldap_db?sslmode=verify-full&sslrootcert=/etc/ssl/ldap/ilnmors_root_ca.crt" + - name: "LLDAP_KEY_SEED" + value: "{{ hostvars['console']['ldap']['seed_key'] }}" + - name: "LLDAP_JWT_SECRET" + value: "{{ hostvars['console']['ldap']['jwt_secret'] }}" + notify: "notification_restart_ldap" + no_log: true + +- name: Initiate ldap (When = false, If DB data does not exist in postgresql, activate this block) + when: false + become: true + block: + - name: Register extra secret value to podman secret + containers.podman.podman_secret: + name: "LLDAP_LDAP_USER_PASSWORD" + data: "{{ hostvars['console']['ldap']['password']['user'] }}" + state: "present" + force: true + # You must check the image version first (following container file on data/config/containers/infra/ldap/ldap.container) + + - name: Initiate ldap + containers.podman.podman_container: + name: "init_LLDAP" + image: "docker.io/lldap/lldap:{{ version['containers']['ldap'] }}" + rm: true + detach: false + env: + TZ: "Asia/Seoul" + LLDAP_LDAP_BASE_DN: "dc=ilnmors,dc=internal" + secrets: + - "LLDAP_DATABASE_URL,type=env" + - "LLDAP_KEY_SEED,type=env" + - "LLDAP_JWT_SECRET,type=env" + - "LLDAP_LDAP_USER_PASSWORD,type=env" + volumes: + - "{{ node['home_path'] }}/containers/ldap/data:/data:rw" + - "{{ node['home_path'] }}/containers/ldap/ssl:/etc/ssl/ldap:ro" + + always: + - name: Clean extra secret value from podman secret + containers.podman.podman_secret: + name: "LLDAP_LDAP_USER_PASSWORD" + state: "absent" + +- name: Deploy container file + ansible.builtin.template: + src: "{{ hostvars['console']['node']['config_path'] }}/services/containers/infra/ldap/ldap.container.j2" + dest: "{{ node['home_path'] }}/.config/containers/systemd/ldap.container" + owner: "{{ ansible_user }}" + group: "svadmins" + mode: "0644" + notify: "notification_restart_ldap" + +- name: Enable ldap + ansible.builtin.systemd: + name: "ldap.service" + state: "started" + enabled: true + daemon_reload: true + scope: "user" diff --git a/ansible/roles/infra/tasks/services/set_loki.yaml b/ansible/roles/infra/tasks/services/set_loki.yaml new file mode 100644 index 0000000..2d0f29e --- /dev/null +++ b/ansible/roles/infra/tasks/services/set_loki.yaml @@ -0,0 +1,70 @@ +--- +- name: Set loki container subuid + ansible.builtin.set_fact: + loki_subuid: "110000" # 10001 + +- name: Create loki directory + ansible.builtin.file: + path: "{{ node['home_path'] }}/containers/{{ item }}" + state: "directory" + owner: "{{ loki_subuid }}" + group: "svadmins" + mode: "0770" + loop: + - "loki" + - "loki/etc" + - "loki/data" + - "loki/ssl" + become: true + +- name: Deploy loki configuration file + ansible.builtin.copy: + src: "{{ hostvars['console']['node']['config_path'] }}/services/containers/infra/loki/etc/loki.yaml" + dest: "{{ node['home_path'] }}/containers/loki/etc/loki.yaml" + owner: "{{ loki_subuid }}" + group: "svadmins" + mode: "0600" + become: true + notify: "notification_restart_loki" + no_log: true + +- name: Deploy loki certificate and key + ansible.builtin.copy: + content: | + {{ item.value }} + dest: "{{ node['home_path'] }}/containers/loki/ssl/{{ item.name }}" + owner: "{{ loki_subuid }}" + group: "svadmins" + mode: "{{ item.mode }}" + loop: + - name: "ilnmors_root_ca.crt" + value: "{{ hostvars['console']['ca']['root']['crt'] }}" + mode: "0440" + - name: "loki.crt" + value: | + {{ hostvars['console']['loki']['crt'] | trim }} + {{ hostvars['console']['ca']['intermediate']['crt'] }} + mode: "0440" + - name: "loki.key" + value: "{{ hostvars['console']['loki']['key'] }}" + mode: "0400" + become: true + notify: "notification_restart_loki" + no_log: true + +- name: Deploy container file + ansible.builtin.template: + src: "{{ hostvars['console']['node']['config_path'] }}/services/containers/infra/loki/loki.container.j2" + dest: "{{ node['home_path'] }}/.config/containers/systemd/loki.container" + owner: "{{ ansible_user }}" + group: "svadmins" + mode: "0644" + notify: "notification_restart_loki" + +- name: Enable loki + ansible.builtin.systemd: + name: "loki.service" + state: "started" + enabled: true + daemon_reload: true + scope: "user" diff --git a/ansible/roles/infra/tasks/services/set_postgresql.yaml b/ansible/roles/infra/tasks/services/set_postgresql.yaml new file mode 100644 index 0000000..e68f90f --- /dev/null +++ b/ansible/roles/infra/tasks/services/set_postgresql.yaml @@ -0,0 +1,169 @@ +--- +- name: Set postgresql container subuid + ansible.builtin.set_fact: + postgresql_subuid: "100998" + +- name: Set connected services list + ansible.builtin.set_fact: + # telegraf has no database + connected_services: + - "ldap" + - "authelia" + - "grafana" + +- name: Create postgresql directory + ansible.builtin.file: + path: "{{ node['home_path'] }}/containers/{{ item }}" + state: "directory" + owner: "{{ postgresql_subuid }}" + group: "svadmins" + mode: "0770" + loop: + - "postgresql" + - "postgresql/data" + - "postgresql/config" + - "postgresql/ssl" + - "postgresql/init" + - "postgresql/backups" + - "postgresql/build" + become: true + +- name: Deploy containerfile for build + ansible.builtin.template: + src: "{{ hostvars['console']['node']['config_path'] }}/services/containers/infra/postgresql/build/postgresql.containerfile.j2" + dest: "{{ node['home_path'] }}/containers/postgresql/build/Containerfile" + owner: "{{ ansible_user }}" + group: "svadmins" + mode: "0640" + +- name: Build postgresql container image + containers.podman.podman_image: + name: "ilnmors.internal/{{ node['name'] }}/postgres" + # check tags from container file + tag: "pg{{ version['containers']['postgresql'] }}-vectorchord{{ version['containers']['vectorchord'] }}" + state: "build" + path: "{{ node['home_path'] }}/containers/postgresql/build" + +- name: Prune postgresql dangling images + containers.podman.podman_prune: + image: true + +- name: Deploy postgresql configuration files + ansible.builtin.template: + src: "{{ hostvars['console']['node']['config_path'] }}/services/containers/infra/postgresql/config/{{ item }}.j2" + dest: "{{ node['home_path'] }}/containers/postgresql/config/{{ item }}" + owner: "{{ postgresql_subuid }}" + group: "svadmins" + mode: "0600" + loop: + - "postgresql.conf" + - "pg_hba.conf" + become: true + notify: "notification_reload_postgresql" + no_log: true + +- name: Deploy postgresql certificate and key + ansible.builtin.copy: + content: | + {{ item.value }} + dest: "{{ node['home_path'] }}/containers/postgresql/ssl/{{ item.name }}" + owner: "{{ postgresql_subuid }}" + group: "svadmins" + mode: "{{ item.mode }}" + loop: + - name: "ilnmors_root_ca.crt" + value: "{{ hostvars['console']['ca']['root']['crt'] }}" + mode: "0440" + - name: "postgresql.crt" + value: | + {{ hostvars['console']['postgresql']['crt'] | trim }} + {{ hostvars['console']['ca']['intermediate']['crt'] }} + mode: "0440" + - name: "postgresql.key" + value: "{{ hostvars['console']['postgresql']['key'] }}" + mode: "0400" + become: true + notify: "notification_reload_postgresql" + no_log: true + +- name: Check data directory empty + ansible.builtin.find: + paths: "{{ node['home_path'] }}/containers/postgresql/data/" + hidden: true + file_type: "any" + become: true + register: "is_data_dir_empty" + +- name: Prepare initiating DB + when: is_data_dir_empty.matched == 0 + become: true + block: + # `init/pg_cluster.sql` should be fetched from postgresql's backup directory before running initiating + - name: Deploy init cluster sql file + ansible.builtin.copy: + src: "{{ hostvars['console']['node']['config_path'] }}/services/containers/infra/postgresql/init/pg_cluster.sql" + dest: "{{ node['home_path'] }}/containers/postgresql/init/0_pg_cluster.sql" + owner: "{{ postgresql_subuid }}" + group: "svadmins" + mode: "0600" + + - name: Deploy resoring data sql files + ansible.builtin.copy: + src: "{{ hostvars['console']['node']['config_path'] }}/services/containers/infra/postgresql/init/pg_{{ item }}.sql" + dest: "{{ node['home_path'] }}/containers/postgresql/init/{{ index_num + 1 }}_pg_{{ item }}.sql" + owner: "{{ postgresql_subuid }}" + group: "svadmins" + mode: "0600" + loop: "{{ connected_services }}" + loop_control: + index_var: index_num + - name: Set is_postgresql_init_run + ansible.builtin.set_fact: + is_postgresql_init_run: true + +- name: Deploy container file + ansible.builtin.template: + src: "{{ hostvars['console']['node']['config_path'] }}/services/containers/infra/postgresql/postgresql.container.j2" + dest: "{{ node['home_path'] }}/.config/containers/systemd/postgresql.container" + owner: "{{ ansible_user }}" + group: "svadmins" + mode: "0644" + notify: "notification_restart_postgresql" + +- name: Deploy backup service files + ansible.builtin.copy: + src: "{{ hostvars['console']['node']['config_path'] }}/services/containers/infra/postgresql/services/{{ item }}" + dest: "{{ node['home_path'] }}/.config/systemd/user/{{ item }}" + owner: "{{ ansible_user }}" + group: "svadmins" + mode: "0644" + loop: + - "postgresql-cluster-backup.service" + - "postgresql-cluster-backup.timer" + - "postgresql-data-backup@.service" + - "postgresql-data-backup@.timer" + +- name: Enable postgresql + ansible.builtin.systemd: + name: "postgresql.service" + state: "started" + enabled: true + daemon_reload: true + scope: "user" + +- name: Enable cluster backup timer + ansible.builtin.systemd: + name: "postgresql-cluster-backup.timer" + state: "started" + enabled: true + daemon_reload: true + scope: "user" + +- name: Enable data backup timer + ansible.builtin.systemd: + name: "postgresql-data-backup@{{ item }}.timer" + state: "started" + enabled: true + daemon_reload: true + scope: "user" + loop: "{{ connected_services }}" diff --git a/ansible/roles/infra/tasks/services/set_prometheus.yaml b/ansible/roles/infra/tasks/services/set_prometheus.yaml new file mode 100644 index 0000000..0edf970 --- /dev/null +++ b/ansible/roles/infra/tasks/services/set_prometheus.yaml @@ -0,0 +1,74 @@ +--- +- name: Set prometheus container subuid + ansible.builtin.set_fact: + prometheus_subuid: "165533" # nobody - 65534 + +- name: Create prometheus directory + ansible.builtin.file: + path: "{{ node['home_path'] }}/containers/{{ item }}" + state: "directory" + owner: "{{ prometheus_subuid }}" + group: "svadmins" + mode: "0770" + loop: + - "prometheus" + - "prometheus/etc" + - "prometheus/data" + - "prometheus/ssl" + become: true + +- name: Deploy prometheus configuration file + ansible.builtin.template: + src: "{{ hostvars['console']['node']['config_path'] }}/services/containers/infra/prometheus/etc/{{ item }}.j2" + dest: "{{ node['home_path'] }}/containers/prometheus/etc/{{ item }}" + owner: "{{ prometheus_subuid }}" + group: "svadmins" + mode: "0600" + loop: + - "prometheus.yaml" + - "rules.yaml" + - "web-config.yaml" + become: true + notify: "notification_restart_prometheus" + no_log: true + +- name: Deploy prometheus certificate and key + ansible.builtin.copy: + content: | + {{ item.value }} + dest: "{{ node['home_path'] }}/containers/prometheus/ssl/{{ item.name }}" + owner: "{{ prometheus_subuid }}" + group: "svadmins" + mode: "{{ item.mode }}" + loop: + - name: "ilnmors_root_ca.crt" + value: "{{ hostvars['console']['ca']['root']['crt'] }}" + mode: "0440" + - name: "prometheus.crt" + value: | + {{ hostvars['console']['prometheus']['crt'] | trim }} + {{ hostvars['console']['ca']['intermediate']['crt'] }} + mode: "0440" + - name: "prometheus.key" + value: "{{ hostvars['console']['prometheus']['key'] }}" + mode: "0400" + become: true + notify: "notification_restart_prometheus" + no_log: true + +- name: Deploy container file + ansible.builtin.template: + src: "{{ hostvars['console']['node']['config_path'] }}/services/containers/infra/prometheus/prometheus.container.j2" + dest: "{{ node['home_path'] }}/.config/containers/systemd/prometheus.container" + owner: "{{ ansible_user }}" + group: "svadmins" + mode: "0644" + notify: "notification_restart_prometheus" + +- name: Enable prometheus + ansible.builtin.systemd: + name: "prometheus.service" + state: "started" + enabled: true + daemon_reload: true + scope: "user" diff --git a/ansible/roles/infra/tasks/services/set_x509-exporter.yaml b/ansible/roles/infra/tasks/services/set_x509-exporter.yaml new file mode 100644 index 0000000..7d1a1d8 --- /dev/null +++ b/ansible/roles/infra/tasks/services/set_x509-exporter.yaml @@ -0,0 +1,63 @@ +--- +- name: Set x509-exporter container subuid + ansible.builtin.set_fact: + x509_exporter_subuid: "165533" # nobody - 65534 + +- name: Create x509-exporter directory + ansible.builtin.file: + path: "{{ node['home_path'] }}/containers/{{ item }}" + state: "directory" + owner: "{{ x509_exporter_subuid }}" + group: "svadmins" + mode: "0770" + loop: + - "x509-exporter" + - "x509-exporter/certs" + become: true + +- name: Deploy certificates + ansible.builtin.copy: + content: | + {{ item.value }} + dest: "{{ node['home_path'] }}/containers/x509-exporter/certs/{{ item.name }}" + owner: "{{ x509_exporter_subuid }}" + group: "svadmins" + mode: "0440" + loop: + - name: "root.crt" + value: "{{ hostvars['console']['ca']['root']['crt'] }}" + - name: "intermediate.crt" + value: "{{ hostvars['console']['ca']['intermediate']['crt'] }}" + - name: "crowdsec.crt" + value: "{{ hostvars['console']['crowdsec']['crt'] }}" + - name: "blocky.crt" + value: "{{ hostvars['console']['blocky']['crt'] }}" + - name: "postgresql.crt" + value: "{{ hostvars['console']['postgresql']['crt'] }}" + - name: "ldap.crt" + value: "{{ hostvars['console']['ldap']['crt'] }}" + - name: "prometheus.crt" + value: "{{ hostvars['console']['prometheus']['crt'] }}" + - name: "loki.crt" + value: "{{ hostvars['console']['loki']['crt'] }}" + - name: "dsm.crt" + value: "{{ hostvars['console']['dsm']['crt'] }}" + become: true + no_log: true + +- name: Deploy container file + ansible.builtin.template: + src: "{{ hostvars['console']['node']['config_path'] }}/services/containers/infra/x509-exporter/x509-exporter.container.j2" + dest: "{{ node['home_path'] }}/.config/containers/systemd/x509-exporter.container" + owner: "{{ ansible_user }}" + group: "svadmins" + mode: "0644" + notify: "notification_restart_x509-exporter" + +- name: Enable x509-exporter.service + ansible.builtin.systemd: + name: "x509-exporter.service" + state: "started" + enabled: true + daemon_reload: true + scope: "user" diff --git a/ansible/roles/vmm/tasks/node/set_libvirt.yaml b/ansible/roles/vmm/tasks/node/set_libvirt.yaml new file mode 100644 index 0000000..7f34aee --- /dev/null +++ b/ansible/roles/vmm/tasks/node/set_libvirt.yaml @@ -0,0 +1,92 @@ +--- +- name: Add user in libvirt group + ansible.builtin.user: + name: "{{ ansible_user }}" + state: "present" + groups: "libvirt, kvm, libvirt-qemu" + append: true + become: true + +- name: Check libvirt directory + ansible.builtin.stat: + path: "/var/lib/libvirt/{{ item }}" + loop: + - "images" + - "seeds" + register: "is_libvirt_dir" + +- name: Create libvirt directory + ansible.builtin.file: + path: "/var/lib/libvirt/{{ item.item }}" + state: "directory" + owner: "root" + group: "root" + mode: "0711" + loop: "{{ is_libvirt_dir.results }}" + when: not item.stat.exists + become: true + no_log: true + +- name: Set LIBVIRT_DEFAULT_URI + ansible.builtin.lineinfile: + path: "{{ node['home_path'] }}/.bashrc" + state: "present" + line: "export LIBVIRT_DEFAULT_URI='qemu:///system'" + regexp: '^export LIBVIRT_DEFAULT_URI=' + +- name: Define virtual networks + community.libvirt.virt_net: + name: "{{ item }}" + xml: "{{ lookup('file', hostvars['console']['node']['config_path'] + '/services/systemd/vmm/libvirt/xml/networks/' + item + '.xml') }}" + uri: "qemu:///system" + command: "define" + loop: + - "wan-net" + - "lan-net" + +- name: Start virtual networks + community.libvirt.virt_net: + name: "{{ item }}" + state: "active" + uri: "qemu:///system" + autostart: true + loop: + - "wan-net" + - "lan-net" + +- name: Autostart virtual networks + community.libvirt.virt_net: + name: "{{ item }}" + uri: "qemu:///system" + autostart: true + loop: + - "wan-net" + - "lan-net" + +- name: Define virtual storage pool + community.libvirt.virt_pool: + name: "{{ item }}" + xml: "{{ lookup('file', hostvars['console']['node']['config_path'] + '/services/systemd/vmm/libvirt/xml/storages/' + item + '.xml') }}" + uri: "qemu:///system" + command: "define" + loop: + - "images-pool" + - "seeds-pool" + +- name: Start virtual storage pool + community.libvirt.virt_pool: + name: "{{ item }}" + state: "active" + uri: "qemu:///system" + loop: + - "images-pool" + - "seeds-pool" + +- name: Autostart virtual storage pool + community.libvirt.virt_pool: + name: "{{ item }}" + uri: "qemu:///system" + autostart: true + loop: + - "images-pool" + - "seeds-pool" diff --git a/ansible/roles/vmm/tasks/vm/create_seed.yaml b/ansible/roles/vmm/tasks/vm/create_seed.yaml new file mode 100644 index 0000000..29ff167 --- /dev/null +++ b/ansible/roles/vmm/tasks/vm/create_seed.yaml @@ -0,0 +1,59 @@ +--- +# This task is located in vmm roles because of its attributes, +# but all process should be run in "console". +# At the playbook, `delegate_to: "console"` option is applyed by `apply:`. +- name: Create images directory + ansible.builtin.file: + path: "{{ hostvars['console']['node']['data_path'] }}/images/seeds/{{ target_vm }}" + state: "directory" + owner: "console" + group: "svadmins" + mode: "0700" + +- name: Create temp meta-data + ansible.builtin.copy: + content: | + instance-id: vm-{{ target_vm }} + local-hostname: {{ target_vm }} + dest: "{{ hostvars['console']['node']['data_path'] }}/images/seeds/{{ target_vm }}/meta-data" + owner: "console" + group: "svadmins" + mode: "0600" + register: "vm_meta_data" + no_log: true + +- name: Create temp user-data + ansible.builtin.template: + src: "{{ hostvars['console']['node']['config_path'] }}/services/systemd/vmm/libvirt/seeds/user-data.j2" + dest: "{{ hostvars['console']['node']['data_path'] }}/images/seeds/{{ target_vm }}/user-data" + owner: "console" + group: "svadmins" + mode: "0600" + register: "vm_user_data" + no_log: true + +- name: Create temp network-config + ansible.builtin.copy: + content: | + network: {config: disabled} + dest: "{{ hostvars['console']['node']['data_path'] }}/images/seeds/{{ target_vm }}/network-config" + owner: "console" + group: "svadmins" + mode: "0600" + register: "vm_network_config" + no_log: true + +- name: Check seed.iso + ansible.builtin.stat: + path: "{{ hostvars['console']['node']['data_path'] }}/images/seeds/{{ target_vm }}/seed.iso" + register: "is_seediso" + +- name: Create seed.iso + ansible.builtin.shell: + cmd: | + cloud-localds -N {{ hostvars['console']['node']['data_path'] }}/images/seeds/{{ target_vm }}/network-config \ + {{ hostvars['console']['node']['data_path'] }}/images/seeds/{{ target_vm }}/seed.iso \ + {{ hostvars['console']['node']['data_path'] }}/images/seeds/{{ target_vm }}/user-data \ + {{ hostvars['console']['node']['data_path'] }}/images/seeds/{{ target_vm }}/meta-data + when: vm_meta_data.changed or vm_user_data.changed or vm_network_config.changed or not is_seediso.stat.exists + changed_when: true diff --git a/ansible/roles/vmm/tasks/vm/deploy_vm_init.yaml b/ansible/roles/vmm/tasks/vm/deploy_vm_init.yaml new file mode 100644 index 0000000..97b46b4 --- /dev/null +++ b/ansible/roles/vmm/tasks/vm/deploy_vm_init.yaml @@ -0,0 +1,55 @@ +--- +- name: Check vm cloud-init + ansible.builtin.stat: + path: "/var/lib/libvirt/images/debian-13.qcow2" + become: true + register: is_cloud_init_file + +- name: Deploy vm cloud-init + ansible.builtin.copy: + src: "{{ hostvars['console']['node']['data_path'] }}/images/debian-13-generic-amd64.qcow2" + dest: "/var/lib/libvirt/images/debian-13.qcow2" + owner: "{{ ansible_user }}" + group: "svadmins" + mode: "0644" + force: false + become: true + when: not is_cloud_init_file.stat.exists + +- name: Remote copy vm cloud-init file + ansible.builtin.copy: + src: "/var/lib/libvirt/images/debian-13.qcow2" + dest: "/var/lib/libvirt/images/{{ target_vm }}.qcow2" + owner: "{{ ansible_user }}" + group: "svadmins" + mode: "0644" + remote_src: true + force: false + become: true + +- name: Check deployed cloud-init file info + ansible.builtin.command: + cmd: "qemu-img info /var/lib/libvirt/images/{{ target_vm }}.qcow2 --output json" + changed_when: false + failed_when: + - deployed_cloudfile_info.rc != 0 + - ("lock") not in deployed_cloudfile_info.stderr + register: "deployed_cloudfile_info" + +- name: Resize deployed cloud-init file + ansible.builtin.command: + cmd: "qemu-img resize /var/lib/libvirt/images/{{ target_vm }}.qcow2 {{ hostvars[target_vm]['vm']['storage'] }}G" + when: + - deployed_cloudfile_info.rc == 0 + - (deployed_cloudfile_info.stdout | from_json)['virtual-size'] < (hostvars[target_vm]['vm']['storage'] | int * 1024 * 1024 * 1024) + changed_when: true + +- name: Deploy vm seed.iso + ansible.builtin.copy: + src: "{{ hostvars['console']['node']['data_path'] }}/images/seeds/{{ target_vm }}/seed.iso" + dest: "/var/lib/libvirt/seeds/{{ target_vm }}_seed.iso" + owner: "{{ ansible_user }}" + group: "svadmins" + mode: "0644" + become: true + when: deployed_cloudfile_info.rc == 0 diff --git a/ansible/roles/vmm/tasks/vm/register_vm.yaml b/ansible/roles/vmm/tasks/vm/register_vm.yaml new file mode 100644 index 0000000..1d0f6af --- /dev/null +++ b/ansible/roles/vmm/tasks/vm/register_vm.yaml @@ -0,0 +1,24 @@ +--- +- name: Register VM xml file + community.libvirt.virt: + name: "{{ target_vm }}" + xml: | + {{ lookup('template', hostvars['console']['node']['config_path'] + '/services/systemd/vmm/libvirt/xml/vms/vms.xml.j2') }} + uri: "qemu:///system" + command: define + +- name: Deploy VM systemd file + ansible.builtin.copy: + src: "{{ hostvars['console']['node']['config_path'] }}/services/systemd/vmm/libvirt/services/{{ target_vm }}.service" + dest: "{{ node['home_path'] }}/.config/systemd/user/{{ target_vm }}.service" + owner: "{{ ansible_user }}" + group: "svadmins" + mode: "0400" + +- name: Register VM service + ansible.builtin.systemd: + name: "{{ target_vm }}.service" + state: "started" + enabled: true + daemon_reload: true + scope: "user" diff --git a/config/node/app/nftables.conf.j2 b/config/node/app/nftables.conf.j2 new file mode 100644 index 0000000..6a33d3d --- /dev/null +++ b/config/node/app/nftables.conf.j2 @@ -0,0 +1,38 @@ +#!/usr/sbin/nft -f +flush ruleset + +define NET4_SERVER = {{ hostvars['fw']['network4']['subnet']['server'] }} +define NET6_SERVER = {{ hostvars['fw']['network6']['subnet']['server'] }} +define HOSTS4_CONSOLE = { {{ hostvars['fw']['network4']['console'].values() | join(', ') }} } +define HOSTS6_CONSOLE = { {{ hostvars['fw']['network6']['console'].values() | join(', ') }} } +define PORTS_SSH = 22 + +table inet nat { + chain prerouting { + type nat hook prerouting priority dstnat; policy accept; + } + chain postrouting { + + } + chain output { + type nat hook output priority dstnat; policy accept; + } +} + +table inet filter { + chain input { + type filter hook input priority 0; policy drop; + ct state invalid drop comment "deny invalid connection" + ct state established, related accept comment "allow all connection already existing" + iifname "lo" accept comment "allow local connection" + meta l4proto { icmp, icmpv6 } accept comment "allow icmp connection" + ip saddr $HOSTS4_CONSOLE tcp dport $PORTS_SSH accept comment "allow ipv4 ssh connection: CONSOLE > APP" + ip6 saddr $HOSTS6_CONSOLE tcp dport $PORTS_SSH accept comment "allow ipv6 ssh connection: CONSOLE > APP" + } + chain forward { + type filter hook forward priority 0; policy drop; + } + chain output { + type filter hook output priority 0; policy accept; + } +} diff --git a/config/node/auth/nftables.conf.j2 b/config/node/auth/nftables.conf.j2 new file mode 100644 index 0000000..6b336a5 --- /dev/null +++ b/config/node/auth/nftables.conf.j2 @@ -0,0 +1,48 @@ +#!/usr/sbin/nft -f +flush ruleset + +define NET4_SERVER = {{ hostvars['fw']['network4']['subnet']['server'] }} +define NET6_SERVER = {{ hostvars['fw']['network6']['subnet']['server'] }} +define HOSTS4_CONSOLE = { {{ hostvars['fw']['network4']['console'].values() | join(', ') }} } +define HOSTS6_CONSOLE = { {{ hostvars['fw']['network6']['console'].values() | join(', ') }} } +define PORTS_SSH = 22 +define PORTS_HTTP = 80 +define PORTS_HTTP_FORWARD = 2080 +define PORTS_HTTPS = 443 +define PORTS_HTTPS_FORWARD = 2443 + +table inet nat { + chain prerouting { + type nat hook prerouting priority dstnat; policy accept; + tcp dport $PORTS_HTTP dnat to :$PORTS_HTTP_FORWARD comment "dnat http ports to $PORTS_HTTP_FORWARD" + tcp dport $PORTS_HTTPS dnat to :$PORTS_HTTPS_FORWARD comment "dnat https ports to $PORTS_HTTPS_FORWARD" + } + chain postrouting { + + } + chain output { + type nat hook output priority dstnat; policy accept; + oifname "lo" tcp dport $PORTS_HTTP dnat to :$PORTS_HTTP_FORWARD comment "dnat http ports to $PORTS_HTTP_FORWARD out of LOCALHOST" + oifname "lo" tcp dport $PORTS_HTTPS dnat to :$PORTS_HTTPS_FORWARD comment "dnat https ports to $PORTS_HTTPS_FORWARD out of LOCALHOST" + } +} + +table inet filter { + chain input { + type filter hook input priority 0; policy drop; + ct state invalid drop comment "deny invalid connection" + ct state established, related accept comment "allow all connection already existing" + iifname "lo" accept comment "allow local connection: AUTH > AUTH" + meta l4proto { icmp, icmpv6 } accept comment "allow icmp connection: AUTH" + ip saddr $HOSTS4_CONSOLE tcp dport $PORTS_SSH accept comment "allow ipv4 ssh connection: CONSOLE > AUTH" + ip6 saddr $HOSTS6_CONSOLE tcp dport $PORTS_SSH accept comment "allow ipv6 ssh connection: CONSOLE > AUTH" + tcp dport $PORTS_HTTP_FORWARD ct original proto-dst $PORTS_HTTP accept comment "allow ipv4, 6 http connection: > AUTH" + tcp dport $PORTS_HTTPS_FORWARD ct original proto-dst $PORTS_HTTPS accept comment "allow ipv4, 6 https connection: > AUTH" + } + chain forward { + type filter hook forward priority 0; policy drop; + } + chain output { + type filter hook output priority 0; policy accept; + } +} diff --git a/config/node/common/hosts.j2 b/config/node/common/hosts.j2 new file mode 100644 index 0000000..00f79e0 --- /dev/null +++ b/config/node/common/hosts.j2 @@ -0,0 +1,34 @@ +# localhost +127.0.0.1 {{ node['local_san'] }} +::1 {{ node['local_san'] }} +{% if node['name'] == 'console' %} +# Hosts IPv4 +{{ hostvars['fw']['network4']['firewall']['server'] }} fw.ilnmors.internal +{{ hostvars['fw']['network4']['vmm']['client'] }} init.vmm.ilnmors.internal +{{ hostvars['fw']['network4']['vmm']['server'] }} vmm.ilnmors.internal +{{ hostvars['fw']['network4']['infra']['server'] }} infra.ilnmors.internal +{{ hostvars['fw']['network4']['auth']['server'] }} auth.ilnmors.internal +{{ hostvars['fw']['network4']['app']['server'] }} app.ilnmors.internal +# Hosts IPv6 +{{ hostvars['fw']['network6']['firewall']['server'] }} fw.ilnmors.internal +{{ hostvars['fw']['network6']['vmm']['client'] }} init.vmm.ilnmors.internal +{{ hostvars['fw']['network6']['vmm']['server'] }} vmm.ilnmors.internal +{{ hostvars['fw']['network6']['infra']['server'] }} infra.ilnmors.internal +{{ hostvars['fw']['network6']['auth']['server'] }} auth.ilnmors.internal +{{ hostvars['fw']['network6']['app']['server'] }} app.ilnmors.internal +{% else %} +# IPv4 +# Crowdsec, blocky, bind(fw) +{{ hostvars['fw']['network4']['firewall']['server'] }} ntp.ilnmors.internal crowdsec.ilnmors.internal +{{ hostvars['fw']['network4']['blocky']['server'] }} blocky.ilnmors.internal +{{ hostvars['fw']['network4']['bind']['server'] }} bind.ilnmors.internal +# DB, LDAP, CA, Prometheus, Loki, mail (infra) +{{ hostvars['fw']['network4']['infra']['server'] }} postgresql.ilnmors.internal ldap.ilnmors.internal prometheus.ilnmors.internal loki.ilnmors.internal mail.ilnmors.internal ca.ilnmors.internal +# IPv6 +# Crowdsec, blocky, bind(fw) +{{ hostvars['fw']['network6']['firewall']['server'] }} ntp.ilnmors.internal crowdsec.ilnmors.internal +{{ hostvars['fw']['network6']['blocky']['server'] }} blocky.ilnmors.internal +{{ hostvars['fw']['network6']['bind']['server'] }} bind.ilnmors.internal +# DB, LDAP, CA, Prometheus, Loki, mail (infra) +{{ hostvars['fw']['network6']['infra']['server'] }} postgresql.ilnmors.internal ldap.ilnmors.internal prometheus.ilnmors.internal loki.ilnmors.internal mail.ilnmors.internal ca.ilnmors.internal +{% endif %} diff --git a/config/node/common/networkd/00-eth0.link b/config/node/common/networkd/00-eth0.link new file mode 100644 index 0000000..b8bce6a --- /dev/null +++ b/config/node/common/networkd/00-eth0.link @@ -0,0 +1,5 @@ +[Match] +MACAddress={{ hostvars[target_vm]['vm']['lan_mac'] }} + +[Link] +Name=eth0 \ No newline at end of file diff --git a/config/node/common/networkd/20-eth0.network b/config/node/common/networkd/20-eth0.network new file mode 100644 index 0000000..2fb5337 --- /dev/null +++ b/config/node/common/networkd/20-eth0.network @@ -0,0 +1,13 @@ +[Match] +Name=eth0 + +[Network] +# IPv4 +Address={{ hostvars['fw']['network4'][target_vm]['server'] }}/24 +Gateway={{ hostvars['fw']['network4']['firewall']['server'] }} +DNS={{ hostvars['fw']['network4']['blocky']['server'] }} +# IPv6 +IPv6AcceptRA=false +Address={{ hostvars['fw']['network6'][target_vm]['server'] }}/64 +Gateway={{ hostvars['fw']['network6']['firewall']['server'] }} +DNS={{ hostvars['fw']['network6']['blocky']['server'] }} diff --git a/config/node/common/resolved/global.conf.j2 b/config/node/common/resolved/global.conf.j2 new file mode 100644 index 0000000..4a68fab --- /dev/null +++ b/config/node/common/resolved/global.conf.j2 @@ -0,0 +1,6 @@ +[Resolve] +{% if node['name'] in ['vmm', 'fw'] %} +DNS=1.1.1.2 1.0.0.2 +DNS=2606:4700:4700::1112 2606:4700:4700::1002 +{% endif %} +cache=false \ No newline at end of file diff --git a/config/node/common/ssh/host_certificate.conf b/config/node/common/ssh/host_certificate.conf new file mode 100644 index 0000000..0529da4 --- /dev/null +++ b/config/node/common/ssh/host_certificate.conf @@ -0,0 +1,2 @@ +HostKey /etc/ssh/ssh_host_ed25519_key +HostCertificate /etc/ssh/ssh_host_ed25519_key-cert.pub diff --git a/config/node/common/ssh/prohibit_root.conf b/config/node/common/ssh/prohibit_root.conf new file mode 100644 index 0000000..7ff52c7 --- /dev/null +++ b/config/node/common/ssh/prohibit_root.conf @@ -0,0 +1 @@ +PermitRootLogin no diff --git a/config/node/common/ssh/ssh_ca.conf b/config/node/common/ssh/ssh_ca.conf new file mode 100644 index 0000000..10c8405 --- /dev/null +++ b/config/node/common/ssh/ssh_ca.conf @@ -0,0 +1 @@ +TrustedUserCAKeys /etc/ssh/local_ssh_ca.pub diff --git a/config/node/common/timesyncd/local-ntp.conf b/config/node/common/timesyncd/local-ntp.conf new file mode 100644 index 0000000..8aae667 --- /dev/null +++ b/config/node/common/timesyncd/local-ntp.conf @@ -0,0 +1,3 @@ +[Time] +NTP=ntp.ilnmors.internal +FallbackNTP=0.debian.pool.ntp.org 1.debian.pool.ntp.org 2.debian.pool.ntp.org 3.debian.pool.ntp.org diff --git a/config/node/fw/networkd/00-fw-wan.link b/config/node/fw/networkd/00-fw-wan.link new file mode 100644 index 0000000..3676e5e --- /dev/null +++ b/config/node/fw/networkd/00-fw-wan.link @@ -0,0 +1,5 @@ +[Match] +MACAddress={{ hostvars['fw']['vm']['wan_mac'] }} + +[Link] +Name=wan diff --git a/config/node/fw/networkd/01-fw-client.link b/config/node/fw/networkd/01-fw-client.link new file mode 100644 index 0000000..5f35dc3 --- /dev/null +++ b/config/node/fw/networkd/01-fw-client.link @@ -0,0 +1,5 @@ +[Match] +MACAddress={{ hostvars['fw']['vm']['lan_mac'] }} + +[Link] +Name=client diff --git a/config/node/fw/networkd/10-fw-server.netdev b/config/node/fw/networkd/10-fw-server.netdev new file mode 100644 index 0000000..b982dea --- /dev/null +++ b/config/node/fw/networkd/10-fw-server.netdev @@ -0,0 +1,6 @@ +[NetDev] +Name=server +Kind=vlan + +[VLAN] +Id=10 diff --git a/config/node/fw/networkd/11-fw-user.netdev b/config/node/fw/networkd/11-fw-user.netdev new file mode 100644 index 0000000..4b0333a --- /dev/null +++ b/config/node/fw/networkd/11-fw-user.netdev @@ -0,0 +1,6 @@ +[NetDev] +Name=user +Kind=vlan + +[VLAN] +Id=20 diff --git a/config/node/fw/networkd/20-fw-wan.network b/config/node/fw/networkd/20-fw-wan.network new file mode 100644 index 0000000..38cf170 --- /dev/null +++ b/config/node/fw/networkd/20-fw-wan.network @@ -0,0 +1,16 @@ +[Match] +Name=wan + +[Network] +DHCP=true +IPv6AcceptRA=true +IPForward=true +RequiredForOnline=false + +[DHCPv4] +UseDNS=false + +[DHCPv6] +WithoutRA=solicit +PrefixDelegationHint=yes +UseDNS=false diff --git a/config/node/fw/networkd/21-fw-client.network b/config/node/fw/networkd/21-fw-client.network new file mode 100644 index 0000000..c73d274 --- /dev/null +++ b/config/node/fw/networkd/21-fw-client.network @@ -0,0 +1,16 @@ +[Match] +Name=client + +[Network] +# General +IPForward=true +IPv6SendRA=false +IPv6AcceptRA=false +VLAN=server +VLAN=user +# IPv4 +Address={{ hostvars['fw']['network4']['firewall']['client'] }}/24 +DNS={{ hostvars['fw']['network4']['blocky']['server'] }} +# IPv6 +Address={{ hostvars['fw']['network6']['firewall']['client'] }}/64 +DNS={{ hostvars['fw']['network6']['blocky']['server'] }} diff --git a/config/node/fw/networkd/22-fw-server.network b/config/node/fw/networkd/22-fw-server.network new file mode 100644 index 0000000..f4924f5 --- /dev/null +++ b/config/node/fw/networkd/22-fw-server.network @@ -0,0 +1,24 @@ +[Match] +Name=server + +[Network] +IPForward=true +IPv6SendRA=false +IPv6AcceptRA=false +# IPv4 +Address={{ hostvars['fw']['network4']['firewall']['server'] }}/24 +DNS={{ hostvars['fw']['network4']['blocky']['server'] }} +# IPv6 +Address={{ hostvars['fw']['network6']['firewall']['server'] }}/64 +DNS={{ hostvars['fw']['network6']['blocky']['server'] }} + +[Address] +Address={{ hostvars['fw']['network4']['blocky']['server'] }}/24 +[Address] +Address={{ hostvars['fw']['network4']['bind']['server'] }}/24 +[Address] +Address={{ hostvars['fw']['network6']['blocky']['server'] }}/64 +PreferredLifetime=0 +[Address] +Address={{ hostvars['fw']['network6']['bind']['server'] }}/64 +PreferredLifetime=0 diff --git a/config/node/fw/networkd/23-fw-user.network b/config/node/fw/networkd/23-fw-user.network new file mode 100644 index 0000000..47f3b4b --- /dev/null +++ b/config/node/fw/networkd/23-fw-user.network @@ -0,0 +1,25 @@ +[Match] +Name=user + +[Network] +IPForward=true +IPv6PrefixDelegation=true +IPv6SendRA=true +IPv6SendRAExtension=false +# IPv4 +Address={{ hostvars['fw']['network4']['firewall']['user'] }}/24 +DNS={{ hostvars['fw']['network4']['blocky']['server'] }} + +[IPv6PrefixDelegation] +SubnetId=20 +# A-Flag: Enable SLAAC +AddressAutoconfiguration=true +OnLink=true + +[IPv6SendRA] +# M-Flag: Client IP from DHCPv6 +Managed=false +# O-Flag: Other information form DHCPv6 +OtherInformation=false +EmitDNS=true +DNS={{ hostvars['fw']['network6']['blocky']['server'] }} diff --git a/config/node/fw/nftables.conf.j2 b/config/node/fw/nftables.conf.j2 new file mode 100644 index 0000000..f5d6242 --- /dev/null +++ b/config/node/fw/nftables.conf.j2 @@ -0,0 +1,186 @@ +#!/usr/sbin/nft -f +# Convention +# iifname oifname saddr daddr proto dport ct state action / Ellipsis if you can something +flush ruleset + +define IF_WAN = "wan" +define IF_CLIENT = "client" +define IF_SERVER = "server" +define IF_USER = "user" +define IF_WG = "wg0" + +define NET4_CLIENT = {{ hostvars['fw']['network4']['subnet']['client'] }} +define NET4_SERVER = {{ hostvars['fw']['network4']['subnet']['server'] }} +define NET4_USER = {{ hostvars['fw']['network4']['subnet']['user'] }} +define NET4_WG = {{ hostvars['fw']['network4']['subnet']['wg'] }} +define NET4_LLA = {{ hostvars['fw']['network4']['subnet']['lla'] }} +define NET4_RFC1918 = { 10.0.0.0/8, 172.16.0.0/12, 192.168.0.0/16 } + +define NET6_CLIENT = {{ hostvars['fw']['network6']['subnet']['client'] }} +define NET6_SERVER = {{ hostvars['fw']['network6']['subnet']['server'] }} +define NET6_WG = {{ hostvars['fw']['network6']['subnet']['wg'] }} +define NET6_LLA = {{ hostvars['fw']['network6']['subnet']['lla'] }} + +define HOSTS4_FW = { {{ hostvars['fw']['network4']['firewall'].values() | join(', ') }} } +define HOSTS4_BLOCKY = {{ hostvars['fw']['network4']['blocky']['server'] }} +define HOSTS4_BIND = {{ hostvars['fw']['network4']['bind']['server'] }} +define HOSTS4_CONSOLE = { {{ hostvars['fw']['network4']['console'].values() | join(', ') }} } +define HOSTS4_VMM = { {{ hostvars['fw']['network4']['vmm'].values() | join(', ') }} } +define HOSTS4_INFRA = {{ hostvars['fw']['network4']['infra']['server'] }} +define HOSTS4_AUTH = {{ hostvars['fw']['network4']['auth']['server'] }} +define HOSTS4_APP = {{ hostvars['fw']['network4']['app']['server'] }} +define HOSTS4_NAS = {{ hostvars['fw']['network4']['nas']['client'] }} + +define HOSTS6_FW = { {{ hostvars['fw']['network6']['firewall'].values() | join(', ') }} } +define HOSTS6_BLOCKY = {{ hostvars['fw']['network6']['blocky']['server'] }} +define HOSTS6_BIND = {{ hostvars['fw']['network6']['bind']['server'] }} +define HOSTS6_CONSOLE = { {{ hostvars['fw']['network6']['console'].values() | join(', ') }} } +define HOSTS6_VMM = { {{ hostvars['fw']['network6']['vmm'].values() | join(', ') }} } +define HOSTS6_INFRA = {{ hostvars['fw']['network6']['infra']['server'] }} +define HOSTS6_AUTH = {{ hostvars['fw']['network6']['auth']['server'] }} +define HOSTS6_APP = {{ hostvars['fw']['network6']['app']['server'] }} +define HOSTS6_NAS = {{ hostvars['fw']['network6']['nas']['client'] }} + +define PORTS_SSH = 22 +define PORTS_WEB = { 80, 443 } +define PORTS_DHCP = { 67, 68, 546, 547 } +define PORTS_DNS = 53 +define PORTS_NTP = 123 +define PORTS_VPN = 11290 +define PORTS_CROWDSEC = 8080 +define PORTS_NAS = { 5000, 5001 } +define PORTS_KOPIA = 51515 + +table inet nat { + chain prerouting { + type nat hook prerouting priority dstnat; policy accept; + # After prerouting, accept forward chain WAN + iifname $IF_WAN meta nfproto ipv4 tcp dport $PORTS_WEB dnat to $HOSTS4_AUTH comment "DNAT44 ipv4 web connection: WAN > FW > SERVER AUTH" + iifname $IF_WAN meta nfproto ipv6 tcp dport $PORTS_WEB dnat to $HOSTS6_AUTH comment "DNAT66 ipv6 web connection: WAN > FW > SERVER AUTH" + } + chain postrouting { + type nat hook postrouting priority srcnat; policy accept; + # Masquerade the packet + oifname $IF_WAN meta nfproto ipv4 masquerade comment "masquerade ipv4 wan connection: > FW > WAN" + # $IF_USER uses GUA on IPv6 + iifname { $IF_CLIENT, $IF_SERVER, $IF_WG } oifname $IF_WAN meta nfproto ipv6 masquerade comment "masquerade ipv6 wan connection: CLIENT/SERVER/WG > FW > WAN" + } + chain output { + } +} + +table inet filter { + set crowdsec-blacklists { + type ipv4_addr + flags timeout + } + set crowdsec6-blacklists { + type ipv6_addr + flags timeout + } + chain global { + # invalid packets + ct state invalid drop comment "deny invalid connection" + # crowdsec + ip saddr @crowdsec-blacklists counter drop comment "deny all crowdsec blacklist" + ip6 saddr @crowdsec6-blacklists counter drop comment "deny all ipv6 crowdsec blacklist" + # fw + ct state established, related accept comment "allow all connection already existing" + ip6 saddr $NET6_LLA return comment "return ipv6 linklocaladdress to input and forward chain" + iifname $IF_WAN tcp dport $PORTS_SSH drop comment "deny ssh connection: WAN !> " + iifname $IF_WAN udp dport $PORTS_DNS drop comment "deny udp dns connection: WAN !> " + iifname $IF_WAN tcp dport $PORTS_DNS drop comment "deny tcp dns connection: WAN !> " + iifname $IF_WAN icmp type echo-request drop comment "deny icmp echo connection (Ping): WAN !>" + iifname $IF_WAN icmpv6 type echo-request drop comment "deny icmpv6 echo connection (Ping): WAN !>" + iifname $IF_WAN meta l4proto { icmp, icmpv6 } accept comment "allow icmp, icmpv6 connection: WAN >" + iifname $IF_WAN ip saddr $NET4_RFC1918 drop comment "deny ipv4 all connection: WAN RFC1918 !>" + iifname $IF_WAN ip saddr $NET4_LLA drop comment "deny ipv4 all connection: WAN APIPA(bogon) !>" + iifname { $IF_CLIENT, $IF_SERVER, $IF_USER } udp dport $PORTS_DHCP accept comment "allow dhcp4, dhcp6 connection: CLIENT/SERVER/USER > FW" + iifname $IF_CLIENT ip saddr != $NET4_CLIENT drop comment "deny ipv4 all connection: CLIENT !CLIENT !>" + iifname $IF_CLIENT ip6 saddr != $NET6_CLIENT drop comment "deny ipv6 all connection: CLIENT !CLIENT !>" + iifname $IF_SERVER ip saddr != $NET4_SERVER drop comment "deny ipv4 all connection: SERVER !SERVER !>" + iifname $IF_SERVER ip6 saddr != $NET6_SERVER drop comment "deny ipv6 all connection: SERVER !SERVER !>" + # IF_USER uses GUA on ipv6, so ipv6 rule is not needed + iifname $IF_USER ip saddr != $NET4_USER drop comment "deny ipv4 all connection: USER !USER !>" + iifname $IF_WG ip saddr != $NET4_WG drop comment "deny all ipv4 connection: WG !WG !>" + iifname $IF_WG ip6 saddr != $NET6_WG drop comment "deny all ipv6 connection: WG !WG !>" + } + chain input { + type filter hook input priority filter; policy drop; + jump global comment "set global condition" + iifname "lo" accept comment "allow local connection: FW > FW" + udp dport $PORTS_VPN accept comment "allow vpn connection: > FW" + iifname { $IF_CLIENT, $IF_SERVER, $IF_USER, $IF_WG } meta l4proto { icmp, icmpv6 } accept comment "allow icmp, icmpv6 connection: CLIENT/SERVER/USER/WG > FW" + iifname { $IF_CLIENT, $IF_SERVER, $IF_USER, $IF_WG } udp dport $PORTS_NTP accept comment "allow ntp connection: CLIENT/SERVER/USER/WG > FW" + # Global chain contains "WAN !> :SSH_PORT" + ip saddr $HOSTS4_CONSOLE tcp dport $PORTS_SSH accept comment "allow ipv4 ssh connection: CONSOLE > FW" + ip6 saddr $HOSTS6_CONSOLE tcp dport $PORTS_SSH accept comment "allow ipv6 ssh connection: CONSOLE > FW" + ip saddr { $HOSTS4_VMM, $HOSTS4_INFRA, $HOSTS4_AUTH, $HOSTS4_APP } tcp dport $PORTS_CROWDSEC accept comment "allow ipv4 crowdsec lapi connection: SERVER > FW" + ip6 saddr { $HOSTS6_VMM, $HOSTS6_INFRA, $HOSTS6_AUTH, $HOSTS6_APP } tcp dport $PORTS_CROWDSEC accept comment "allow ipv6 crowdsec lapi connection: SERVER > FW" + # Global chain contains "WAN !> :DNS_PORT" + ip daddr $HOSTS4_BLOCKY udp dport $PORTS_DNS accept comment "allow ipv4 udp dns connection: !WAN > SERVER BLOCKY(FW)" + ip daddr $HOSTS4_BLOCKY tcp dport $PORTS_DNS accept comment "allow ipv4 tcp dns connection: !WAN > SERVER BLOCKY(FW)" + ip6 daddr $HOSTS6_BLOCKY udp dport $PORTS_DNS accept comment "allow ipv6 udp dns connection: !WAN > SERVER BLOCKY(FW)" + ip6 daddr $HOSTS6_BLOCKY tcp dport $PORTS_DNS accept comment "allow ipv6 tcp dns connection: !WAN > SERVER BLOCKY(FW)" + ip saddr { $HOSTS4_INFRA, $HOSTS4_AUTH, $HOSTS4_APP } ip daddr $HOSTS4_BIND udp dport $PORTS_DNS accept comment "allow ipv4 udp dns connection (nsupdate): SERVER INFRA/AUTH/APP > BIND9(FW)" + ip saddr { $HOSTS4_INFRA, $HOSTS4_AUTH, $HOSTS4_APP } ip daddr $HOSTS4_BIND tcp dport $PORTS_DNS accept comment "allow ipv4 tcp dns connection (nsupdate): SERVER INFRA/AUTH/APP > BIND9(FW)" + ip6 saddr { $HOSTS6_INFRA, $HOSTS6_AUTH, $HOSTS6_APP } ip6 daddr $HOSTS6_BIND udp dport $PORTS_DNS accept comment "allow ipv6 udp dns connection (nsupdate): SERVER INFRA/AUTH/APP > BIND9(FW)" + ip6 saddr { $HOSTS6_INFRA, $HOSTS6_AUTH, $HOSTS6_APP } ip6 daddr $HOSTS6_BIND tcp dport $PORTS_DNS accept comment "allow ipv6 tcp dns connection (nsupdate): SERVER INFRA/AUTH/APP > BIND9(FW)" + } + chain forward { + type filter hook forward priority filter; policy drop; + + jump global comment "set global condition" + # ICMP + ip saddr $HOSTS4_CONSOLE meta l4proto icmp accept comment "allow icmp connection: CONSOLE > FW >" + ip6 saddr $HOSTS6_CONSOLE meta l4proto icmpv6 accept comment "allow icmpv6 connection: CONSOLE > FW >" + # SSH connection + ip saddr $HOSTS4_CONSOLE tcp dport $PORTS_SSH accept comment "allow ipv4 ssh connection: CONSOLE > FW >" + ip6 saddr $HOSTS6_CONSOLE tcp dport $PORTS_SSH accept comment "allow ipv6 ssh connection: CONSOLE > FW >" + # Reverse proxy (WAN) + oifname $IF_SERVER ip daddr $HOSTS4_AUTH tcp dport $PORTS_WEB accept comment "allow ipv4 web connection: > FW > SERVER AUTH" + oifname $IF_SERVER ip6 daddr $HOSTS6_AUTH tcp dport $PORTS_WEB accept comment "allow ipv6 web connection: > FW > SERVER AUTH" + # Reverse proxy (SERVER) + oifname $IF_SERVER ip saddr $HOSTS4_CONSOLE ip daddr { $HOSTS4_INFRA, $HOSTS4_APP } tcp dport $PORTS_WEB accept comment "allow ipv4 web connection: CONSOLE > FW > SERVER INFRA/APP" + oifname $IF_SERVER ip6 saddr $HOSTS6_CONSOLE ip6 daddr { $HOSTS6_INFRA, $HOSTS6_APP } tcp dport $PORTS_WEB accept comment "allow ipv6 web connection: CONSOLE > FW > SERVER INFRA/APP" + # Kopia/NAS Console > NAS + oifname $IF_CLIENT ip saddr $HOSTS4_CONSOLE ip daddr $HOSTS4_NAS tcp dport { $PORTS_NAS, $PORTS_KOPIA } accept comment "allow ipv4 web connection (DSM, KOPIA): CONSOLE > FW > CLIENT NAS" + oifname $IF_CLIENT ip6 saddr $HOSTS6_CONSOLE ip6 daddr $HOSTS6_NAS tcp dport { $PORTS_NAS, $PORTS_KOPIA } accept comment "allow ipv6 web connection (DSM, KOPIA): CONSOLE > FW > CLIENT NAS" + + iifname $IF_WAN jump wan comment "set WAN interface rules" + iifname $IF_CLIENT jump client comment "set CLIENT interface rules" + iifname $IF_SERVER jump server comment "set SERVER interface rules" + iifname $IF_USER jump user comment "set USER interface rules" + iifname $IF_WG jump wg comment "set WG interface rules" + } + chain wan { + return + } + chain client { + oifname $IF_WAN ip saddr { $HOSTS4_CONSOLE, $HOSTS4_NAS } accept comment "allow ipv4 internet connection: CLIENT CONSOLE/NAS > FW > WAN" + oifname $IF_WAN ip6 saddr { $HOSTS6_CONSOLE, $HOSTS6_NAS } accept comment "allow ipv6 internet connection: CLIENT CONSOLE/NAS > FW > WAN" + return + } + chain server { + # reverse proxy AUTH > NAS + oifname $IF_CLIENT ip saddr $HOSTS4_AUTH ip daddr $HOSTS4_NAS tcp dport $PORTS_NAS accept comment "allow ipv4 web connection(DSM): SERVER AUTH > FW > CLIENT NAS" + oifname $IF_CLIENT ip6 saddr $HOSTS6_AUTH ip6 daddr $HOSTS6_NAS tcp dport $PORTS_NAS accept comment "allow ipv6 web connection(DSM): SERVER AUTH > FW > CLIENT NAS" + # Kopia INFRA, APP > NAS + oifname $IF_CLIENT ip saddr { $HOSTS4_INFRA, $HOSTS4_APP } ip daddr $HOSTS4_NAS tcp dport $PORTS_KOPIA accept comment "allow ipv4 web connection(kopia): SERVER INFRA/APP > FW > CLIENT NAS" + oifname $IF_CLIENT ip6 saddr { $HOSTS6_INFRA, $HOSTS6_APP } ip6 daddr $HOSTS6_NAS tcp dport $PORTS_KOPIA accept comment "allow ipv6 web connection(kopia): SERVER INFRA/APP > FW > CLIENT NAS" + oifname $IF_WAN ip saddr { $HOSTS4_VMM, $HOSTS4_INFRA, $HOSTS4_AUTH, $HOSTS4_APP } accept comment "allow ipv4 internet connection: SERVER VMM/INFRA/AUTH/APP > FW > WAN" + oifname $IF_WAN ip6 saddr { $HOSTS6_VMM, $HOSTS6_INFRA, $HOSTS6_AUTH, $HOSTS6_APP } accept comment "allow ipv6 internet connection: SERVER VMM/INFRA/AUTH/APP > FW > WAN" + return + } + chain user { + oifname $IF_WAN accept comment "allow internet connection: USER > FW > WAN" + return + } + chain wg { + oifname $IF_WAN accept comment "allow internet connection: WG > FW > WAN" + return + } + chain output { + type filter hook output priority filter; policy accept; + } +} diff --git a/config/node/fw/wireguard/30-fw-wg0.netdev b/config/node/fw/wireguard/30-fw-wg0.netdev new file mode 100644 index 0000000..9569f01 --- /dev/null +++ b/config/node/fw/wireguard/30-fw-wg0.netdev @@ -0,0 +1,10 @@ +[NetDev] +Name=wg0 +Kind=wireguard +[WireGuard] +ListenPort=11290 +PrivateKey={{ hostvars['console']['wireguard']['server']['private_key'] }} +[WireGuardPeer] +PublicKey={{ hostvars['console']['wireguard']['console']['public_key'] }} +PresharedKey={{ hostvars['console']['wireguard']['console']['preshared_key'] }} +AllowedIPs={{ hostvars['fw']["network4"]["console"]["wg"] }}/32, {{ hostvars['fw']["network6"]["console"]["wg"] }}/128 diff --git a/config/node/fw/wireguard/31-fw-wg0.network b/config/node/fw/wireguard/31-fw-wg0.network new file mode 100644 index 0000000..8ca8b10 --- /dev/null +++ b/config/node/fw/wireguard/31-fw-wg0.network @@ -0,0 +1,6 @@ +[Match] +Name=wg0 +[Network] +Address={{ hostvars['fw']["network4"]["firewall"]["wg"] }}/24 +Address={{ hostvars['fw']["network6"]["firewall"]["wg"] }}/64 +IPForward=yes diff --git a/config/node/infra/nftables.conf.j2 b/config/node/infra/nftables.conf.j2 new file mode 100644 index 0000000..66b8dd4 --- /dev/null +++ b/config/node/infra/nftables.conf.j2 @@ -0,0 +1,70 @@ +#!/usr/sbin/nft -f +# Convention +# iifname oifname saddr daddr proto dport ct state action / Ellipsis if you can something +flush ruleset + +define NET4_SERVER = {{ hostvars['fw']['network4']['subnet']['server'] }} +define NET6_SERVER = {{ hostvars['fw']['network6']['subnet']['server'] }} +define HOSTS4_CONSOLE = { {{ hostvars['fw']['network4']['console'].values() | join(', ') }} } +define HOSTS6_CONSOLE = { {{ hostvars['fw']['network6']['console'].values() | join(', ') }} } +define PORTS_SSH = 22 +define PORTS_DB = 5432 +define PORTS_CA = 9000 +define PORTS_LDAPS = 636 +define PORTS_LDAPS_FORWARD = 6360 +define PORTS_HTTP = 80 +define PORTS_HTTP_FORWARD = 2080 +define PORTS_HTTPS = 443 +define PORTS_HTTPS_FORWARD = 2443 +define PORTS_PROMETHEUS = 9090 +define PORTS_LOKI = 3100 + +table inet nat { + chain prerouting { + type nat hook prerouting priority dstnat; policy accept; + tcp dport $PORTS_HTTP dnat to :$PORTS_HTTP_FORWARD comment "DNAT http ports to $PORTS_HTTP_FORWARD" + tcp dport $PORTS_HTTPS dnat to :$PORTS_HTTPS_FORWARD comment "DNAT https ports to $PORTS_HTTPS_FORWARD" + tcp dport $PORTS_LDAPS dnat to :$PORTS_LDAPS_FORWARD comment "DNAT ldaps ports to $PORTS_LDAPS_FORWARD" + } + chain postrouting { + + } + chain output { + type nat hook output priority dstnat; policy accept; + oifname "lo" tcp dport $PORTS_HTTP dnat to :$PORTS_HTTP_FORWARD comment "DNAT http ports to $PORTS_HTTP_FORWARD out of LOCALHOST" + oifname "lo" tcp dport $PORTS_HTTPS dnat to :$PORTS_HTTPS_FORWARD comment "DNAT https ports to $PORTS_HTTPS_FORWARD out of LOCALHOST" + oifname "lo" tcp dport $PORTS_LDAPS dnat to :$PORTS_LDAPS_FORWARD comment "DNAT ldaps ports to $PORTS_LDAPS_FORWARD out of LOCALHOST" + } +} + +table inet filter { + chain input { + type filter hook input priority 0; policy drop; + ct state invalid drop comment "deny invalid connection" + ct state established, related accept comment "allow all connection already existing" + iifname "lo" accept comment "allow local connection: INFRA > INFRA" + meta l4proto { icmp, icmpv6 } accept comment "allow icmp connection: > INFRA" + ip saddr $HOSTS4_CONSOLE tcp dport $PORTS_SSH accept comment "allow ipv4 ssh connection: CONSOLE > INFRA" + ip6 saddr $HOSTS6_CONSOLE tcp dport $PORTS_SSH accept comment "allow ipv6 ssh connection: CONSOLE > INFRA" + ip saddr $NET4_SERVER tcp dport $PORTS_CA accept comment "allow ipv4 ca connection: SERVER > INFRA" + ip6 saddr $NET6_SERVER tcp dport $PORTS_CA accept comment "allow ipv6 ca connection: SERVER > INFRA" + ip saddr $NET4_SERVER tcp dport $PORTS_DB accept comment "allow ipv4 db connection: SERVER > INFRA" + ip6 saddr $NET6_SERVER tcp dport $PORTS_DB accept comment "allow ipv6 db connection: SERVER > INFRA" + ip saddr $HOSTS4_CONSOLE tcp dport $PORTS_HTTP_FORWARD ct original proto-dst $PORTS_HTTP accept comment "allow ipv4 http connection: CONSOLE > INFRA" + ip6 saddr $HOSTS6_CONSOLE tcp dport $PORTS_HTTP_FORWARD ct original proto-dst $PORTS_HTTP accept comment "allow ipv6 http connection: CONSOLE > INFRA" + ip saddr $HOSTS4_CONSOLE tcp dport $PORTS_HTTPS_FORWARD ct original proto-dst $PORTS_HTTPS accept comment "allow ipv4 https connection: CONSOLE > INFRA" + ip6 saddr $HOSTS6_CONSOLE tcp dport $PORTS_HTTPS_FORWARD ct original proto-dst $PORTS_HTTPS accept comment "allow ipv6 https connection: CONSOLE > INFRA" + ip saddr $NET4_SERVER tcp dport $PORTS_LDAPS_FORWARD ct original proto-dst $PORTS_LDAPS accept comment "allow ipv4 ldaps connection: SERVER > INFRA" + ip6 saddr $NET6_SERVER tcp dport $PORTS_LDAPS_FORWARD ct original proto-dst $PORTS_LDAPS accept comment "allow ipv6 ldaps connection: SERVER > INFRA" + ip saddr $NET4_SERVER tcp dport $PORTS_PROMETHEUS accept comment "allow ipv4 prometheus connection: SERVER > INFRA" + ip6 saddr $NET6_SERVER tcp dport $PORTS_PROMETHEUS accept comment "allow ipv6 prometheus connection: SERVER > INFRA" + ip saddr $NET4_SERVER tcp dport $PORTS_LOKI accept comment "allow ipv4 loki connection: SERVER > INFRA" + ip6 saddr $NET6_SERVER tcp dport $PORTS_LOKI accept comment "allow ipv6 loki connection: SERVER > INFRA" + } + chain forward { + type filter hook forward priority 0; policy drop; + } + chain output { + type filter hook output priority 0; policy accept; + } +} diff --git a/config/node/vmm/networkd/00-vmm-eth0.link b/config/node/vmm/networkd/00-vmm-eth0.link new file mode 100644 index 0000000..1119415 --- /dev/null +++ b/config/node/vmm/networkd/00-vmm-eth0.link @@ -0,0 +1,5 @@ +[Match] +MACAddress=c8:ff:bf:05:aa:b0 + +[Link] +Name=eth0 diff --git a/config/node/vmm/networkd/01-vmm-eth1.link b/config/node/vmm/networkd/01-vmm-eth1.link new file mode 100644 index 0000000..5f72662 --- /dev/null +++ b/config/node/vmm/networkd/01-vmm-eth1.link @@ -0,0 +1,5 @@ +[Match] +MACAddress=c8:ff:bf:05:aa:b1 + +[Link] +Name=eth1 diff --git a/config/node/vmm/networkd/10-vmm-br0.netdev b/config/node/vmm/networkd/10-vmm-br0.netdev new file mode 100644 index 0000000..6ec2b6d --- /dev/null +++ b/config/node/vmm/networkd/10-vmm-br0.netdev @@ -0,0 +1,3 @@ +[NetDev] +Name=br0 +Kind=bridge diff --git a/config/node/vmm/networkd/11-vmm-br1.netdev b/config/node/vmm/networkd/11-vmm-br1.netdev new file mode 100644 index 0000000..3f00292 --- /dev/null +++ b/config/node/vmm/networkd/11-vmm-br1.netdev @@ -0,0 +1,7 @@ +[NetDev] +Name=br1 +Kind=bridge + +[Bridge] +VLANFiltering=true +DefaultPVID=1 diff --git a/config/node/vmm/networkd/12-vmm-vlan1.netdev b/config/node/vmm/networkd/12-vmm-vlan1.netdev new file mode 100644 index 0000000..bbe3596 --- /dev/null +++ b/config/node/vmm/networkd/12-vmm-vlan1.netdev @@ -0,0 +1,6 @@ +[NetDev] +Name=vlan1 +Kind=vlan + +[VLAN] +Id=1 diff --git a/config/node/vmm/networkd/13-vmm-vlan10.netdev b/config/node/vmm/networkd/13-vmm-vlan10.netdev new file mode 100644 index 0000000..70257b8 --- /dev/null +++ b/config/node/vmm/networkd/13-vmm-vlan10.netdev @@ -0,0 +1,6 @@ +[NetDev] +Name=vlan10 +Kind=vlan + +[VLAN] +Id=10 diff --git a/config/node/vmm/networkd/14-vmm-vlan20.netdev b/config/node/vmm/networkd/14-vmm-vlan20.netdev new file mode 100644 index 0000000..01c2af6 --- /dev/null +++ b/config/node/vmm/networkd/14-vmm-vlan20.netdev @@ -0,0 +1,6 @@ +[NetDev] +Name=vlan20 +Kind=vlan + +[VLAN] +Id=20 diff --git a/config/node/vmm/networkd/20-vmm-eth0.network b/config/node/vmm/networkd/20-vmm-eth0.network new file mode 100644 index 0000000..f26bfe6 --- /dev/null +++ b/config/node/vmm/networkd/20-vmm-eth0.network @@ -0,0 +1,6 @@ +[Match] +Name=eth0 + +[Network] +Bridge=br0 +LinkLocalAddressing=false diff --git a/config/node/vmm/networkd/21-vmm-eth1.network b/config/node/vmm/networkd/21-vmm-eth1.network new file mode 100644 index 0000000..19339dd --- /dev/null +++ b/config/node/vmm/networkd/21-vmm-eth1.network @@ -0,0 +1,15 @@ +[Match] +Name=eth1 + +[Network] +Bridge=br1 +LinkLocalAddressing=false + +[BridgeVLAN] +VLAN=1 +PVID=true +EgressUntagged=true + +[BridgeVLAN] +VLAN=10 +VLAN=20 diff --git a/config/node/vmm/networkd/22-vmm-br0.network b/config/node/vmm/networkd/22-vmm-br0.network new file mode 100644 index 0000000..1eae45a --- /dev/null +++ b/config/node/vmm/networkd/22-vmm-br0.network @@ -0,0 +1,5 @@ +[Match] +Name=br0 + +[Network] +LinkLocalAddressing=false diff --git a/config/node/vmm/networkd/23-vmm-br1.network b/config/node/vmm/networkd/23-vmm-br1.network new file mode 100644 index 0000000..ac0e65a --- /dev/null +++ b/config/node/vmm/networkd/23-vmm-br1.network @@ -0,0 +1,17 @@ +[Match] +Name=br1 + +[Network] +VLAN=vlan1 +VLAN=vlan10 +VLAN=vlan20 +LinkLocalAddressing=false + +[BridgeVLAN] +VLAN=1 +PVID=yes +EgressUntagged=true + +[BridgeVLAN] +VLAN=10 +VLAN=20 diff --git a/config/node/vmm/networkd/24-vmm-vlan1.network b/config/node/vmm/networkd/24-vmm-vlan1.network new file mode 100644 index 0000000..3ebb2cd --- /dev/null +++ b/config/node/vmm/networkd/24-vmm-vlan1.network @@ -0,0 +1,28 @@ +[Match] +Name=vlan1 + +[Network] +# IPv4 +Address=192.168.1.10/24 +# IPv6 +Address=fd00:1::10/64 + +[RoutingPolicyRule] +From=192.168.1.10/32 +Table=1 +Priority=100 + +[Route] +Destination=192.168.1.0/24 +Scope=link +Table=1 + +[RoutingPolicyRule] +From=fd00:1::10/128 +Table=61 +Priority=100 + +[Route] +Destination=fd00:1::/64 +Scope=link +Table=61 diff --git a/config/node/vmm/networkd/25-vmm-vlan10.network b/config/node/vmm/networkd/25-vmm-vlan10.network new file mode 100644 index 0000000..62cc6e8 --- /dev/null +++ b/config/node/vmm/networkd/25-vmm-vlan10.network @@ -0,0 +1,32 @@ +[Match] +Name=vlan10 +[Network] +RequiredForOnline=false +# IPv4 +Address=192.168.10.10/24 +Gateway=192.168.10.1 +DNS=192.168.10.2 +# IPv6 +Address=fd00:10::10/64 +Gateway=fd00:10::1 +DNS=fd00:10::2 + +[RoutingPolicyRule] +From=192.168.10.10/32 +Table=2 +Priority=100 + +[Route] +Destination=0.0.0.0/0 +Gateway=192.168.10.1 +Table=2 + +[RoutingPolicyRule] +From=fd00:10::10/128 +Table=62 +Priority=100 + +[Route] +Destination=::/0 +Gateway=fd00:10::1 +Table=62 diff --git a/config/node/vmm/nftables.conf.j2 b/config/node/vmm/nftables.conf.j2 new file mode 100644 index 0000000..99f0666 --- /dev/null +++ b/config/node/vmm/nftables.conf.j2 @@ -0,0 +1,26 @@ +#!/usr/sbin/nft -f +# Convention +# iifname oifname saddr daddr proto dport ct state action / Ellipsis if you can something +flush ruleset + +define HOSTS4_CONSOLE = { {{ hostvars['fw']['network4']['console'].values() | join(', ') }} } +define HOSTS6_CONSOLE = { {{ hostvars['fw']['network6']['console'].values() | join(', ') }} } +define PORTS_SSH = 22 + +table inet filter { + chain input { + type filter hook input priority 0; policy drop; + ct state invalid drop comment "deny invalid connection" + ct state established, related accept comment "allow all connection already existing" + iifname "lo" accept comment "allow local connection" + meta l4proto { icmp, icmpv6 } accept comment "allow icmp connection: > VMM" + ip saddr $HOSTS4_CONSOLE tcp dport $PORTS_SSH accept comment "allow ipv4 ssh connection: CONSOLE > VMM" + ip6 saddr $HOSTS6_CONSOLE tcp dport $PORTS_SSH accept comment "allow ipv6 ssh connection: CONSOLE > VMM" + } + chain forward { + type filter hook forward priority 0; policy drop; + } + chain output { + type filter hook output priority 0; policy accept; + } +} diff --git a/config/secrets/.sops.yaml b/config/secrets/.sops.yaml new file mode 100644 index 0000000..e00a01b --- /dev/null +++ b/config/secrets/.sops.yaml @@ -0,0 +1,3 @@ +creation_rules: + - path_regex: secrets\.yaml$ + age: age120wuwcmsm845ztsvsz46pswj5je53uc2n35vadklrfqudu6cxuusxetk7y diff --git a/config/secrets/age-key.gpg b/config/secrets/age-key.gpg new file mode 100644 index 0000000000000000000000000000000000000000..0c5d3a8cc56d8eb07de7b3861c5d93661976141b GIT binary patch literal 255 zcmVVDS^$uk9 zWSvM0cU)wx-ZRl(i6(S}JX(1-+Abz4Y7_sdLiECOkB@@aqbpJmtuuFmniJB$)N1FV z&JeEciv==bLz8nCmpdyvpx+e+OeN(uU9_?27Z)RMf$MYo$Sh`2hCcH{g6cNo2^L<2 z4ze%-SCD}>+HGAu~L{A-*sVf{X)Pt&wjLQ#WQEAba)at%9 z*69q|=Wr79W_yeqiKr6vY+dJ0BI@h?5^&2 + exit 1 +} + +# Log function +log() { + local timestamp=$(date -u +"%Y-%m-%dT%H:%M:%SZ") + local level="$1" + local msg="$2" + echo "time=\"$timestamp\" level=\"$level\" msg=\"$msg\" source=\"edit_secret.sh\"">&2 +} + +# Secret file check +if [ -z "$SECRET_FILE" -o ! -f "$SECRET_FILE" ]; then + log "error" "Secret file path is required" + usage + exit 1 +fi + +# age-key file check +if [ ! -f "$KEY_PATH/age-key.gpg" ]; then + log "error" "age key path is required" + exit 1 +fi + +# Dependency check +if ! command -v sops >/dev/null; then + log "error" "sops is required" + exit 1 +fi + +if ! command -v gpg >/dev/null; then + log "error" "gnupg is required" + exit 1 +fi + +# Cleanup function for trap +cleanup() { + if [ -f "$TMP_PATH/age-key" ]; then + rm -f "$TMP_PATH/age-key" + log "info" "age key is deleted" + fi +} + +# Trap +trap cleanup EXIT + +# Get GPG password from prompt +echo -n "Enter GPG passphrase: " >&2 +read -s GPG_PASSPHRASE +echo "" >&2 + +# Decrypt age-key on the tmpfs (memory) +echo "$GPG_PASSPHRASE" | gpg --batch --yes --passphrase-fd 0 \ +--output "$TMP_PATH/age-key" \ +--decrypt "$KEY_PATH/age-key.gpg" &&\ +chmod 600 "$TMP_PATH/age-key" + +# Unset environment varibles +unset GPG_PASSPHRASE + +# Check the key on memory +if [ ! -f "$TMP_PATH/age-key" ]; then + log "error" "age key does not exist" + exit 1 +fi + +# Kill the gpg session value +gpgconf --kill gpg-agent + +# Open sops editor and delete the key +SOPS_AGE_KEY_FILE="$TMP_PATH/age-key" sops "$SECRET_FILE" + +exit 0 diff --git a/config/secrets/extract_secret.sh b/config/secrets/extract_secret.sh new file mode 100644 index 0000000..2561470 --- /dev/null +++ b/config/secrets/extract_secret.sh @@ -0,0 +1,151 @@ +#!/bin/bash +# extract_secret.sh /path/of/secret/secret.yaml [-n] (-f|-e ) + +set -e + +# Varibles +KEY_PATH="$HOME/workspace/homelab/config/secrets" +TMP_PATH="/run/user/$UID" +SECRET_FILE="$1" +VALUE="" +TYPE="" +NEWLINE="true" + +# Remove $1 and shift $(n-1) < $n +shift + +# Usage function + +usage () { + echo "Usage: $0 \"/path/of/secret/secret.yaml\" [-n] (-f|-e \"yaml section name\")" + echo "-n: remove the newline" + echo "-f : Print secret file" + echo "-e : Print secret env file" + exit 1 +} + +# Log function +log() { + local timestamp=$(date -u +"%Y-%m-%dT%H:%M:%SZ") + local level="$1" + local msg="$2" + echo "time=\"$timestamp\" level=\"$level\" msg=\"$msg\" source=\"extract_secret.sh\"">&2 +} + +# getops to get parameters +while getopts "f:e:n" opt; do + case $opt in + f) + VALUE="$OPTARG" + TYPE="FILE" + ;; + e) + VALUE="$OPTARG" + TYPE="ENV" + ;; + n) + NEWLINE="false" + ;; + \?) + log "error" "Invalid option: -$OPTARG" + usage + ;; + :) + log "error" "Option -$OPTARG requires an argument" + usage + ;; + esac +done + +# Secret file check +if [ -z "$SECRET_FILE" -o ! -f "$SECRET_FILE" ]; then + log "error" "Secret file path is required" + usage + exit 1 +fi + +# -f or -e option check +if [ -z "$TYPE" ]; then + log "error" "-f or -e option requires" + usage + exit 1 +fi + +# age-key file check +if [ ! -f "$KEY_PATH/age-key.gpg" ]; then + log "error" "Key file is required: $KEY_PATH/age-key.gpg" + exit 1 +fi + +# Dependency check +if ! command -v sops >/dev/null; then + log "error" "sops is required" + exit 1 +fi + +if ! command -v gpg >/dev/null; then + log "error" "gnupg is required" + exit 1 +fi + +# Cleanup function for trap +cleanup() { + if [ -f "$TMP_PATH/age-key" ]; then + rm -f "$TMP_PATH/age-key" + log "info" "age-key was deleted" + fi +} + +# Trap +trap cleanup EXIT + +# Get GPG password from prompt +echo -n "Enter GPG passphrase: " >&2 +read -s GPG_PASSPHRASE +echo "" >&2 + +# Decrypt age-key on the tmpfs (memory) +echo "$GPG_PASSPHRASE" | gpg --batch --yes --passphrase-fd 0 \ +--output "$TMP_PATH/age-key" \ +--decrypt "$KEY_PATH/age-key.gpg" &&\ +chmod 600 "$TMP_PATH/age-key" + +# Unset environment varibles +unset GPG_PASSPHRASE + +# Check the key on memory +if [ ! -f "$TMP_PATH/age-key" ]; then + log "error" "age key file does not exist" + exit 1 +fi + +# Kill the gpg session value +gpgconf --kill gpg-agent + +if [ "$TYPE" == "FILE" ]; then + if RESULT=$(SOPS_AGE_KEY_FILE="$TMP_PATH/age-key" sops --decrypt --extract "[\"$VALUE\"]" --output-type binary "$SECRET_FILE") ; then + if [ "$NEWLINE" == "true" ]; then + echo "$RESULT" + else + echo -n "$RESULT" + fi + exit 0 + else + log "error" "SOPS extract error" + exit 1 + fi +fi + +if [ "$TYPE" == "ENV" ]; then + if RESULT=$(SOPS_AGE_KEY_FILE="$TMP_PATH/age-key" sops --decrypt --extract "[\"$VALUE\"]" --output-type dotenv "$SECRET_FILE") ; then + if [ "$NEWLINE" == "true" ]; then + echo "$RESULT" + else + echo -n "$RESULT" + fi + exit 0 + else + log "error" "SOPS extract error" + exit 1 + fi +fi \ No newline at end of file diff --git a/config/secrets/secrets.yaml b/config/secrets/secrets.yaml new file mode 100644 index 0000000..c5eaca9 --- /dev/null +++ b/config/secrets/secrets.yaml @@ -0,0 +1,205 @@ +#ENC[AES256_GCM,data:p5q9g2YX1hb7yIKFsuHhO6EVsTU00Q3JopqUX7Gr63z5SmQQaK2f+73FGXBjW5c=,iv:P895qJTjJioxM1OpXg7xTscA4UBQRyDxTUnHXb17dhA=,tag:TWmvv/0gD78mE0zbp4Av7w==,type:comment] +#ENC[AES256_GCM,data:LHFLuYEf6f3spreglxE445d3BZeUfOlJSIKcqByoWePJ+BJcDi2VUKUQJBGz+KPYSFqBNlucgGdeXNajgeC0SgIkEWDi3Cg3ORX2RBE=,iv:A17KfTM9AxfAdOlCtpxOXRft6+2fYqNCEOSgaob8Upc=,tag:IZOzealCJ4M2KQuzSCek7Q==,type:comment] +# +#ENC[AES256_GCM,data:U/qLcosbBJIVoIv4d4Zjb+c=,iv:rdAsPzdZgu5UVxWTiOmYglCBWBsyq6HktO2+/Dqmudw=,tag:K7Kci7xGrRXCKsSG5UkHEA==,type:comment] +sudo: + password: + console: ENC[AES256_GCM,data:RdvzU4VOU+ww1OVvLanD9KbCYom/yPHQ1DdjUBfG6l9nPHEapYqqvHpr6Oi3nDRfc2n5cR4R40i0DHCh01Eo,iv:X8RxKloim0jsDj8U/aY+rHARtrlTrvwNMqMcTmdPfes=,tag:eaMrWFdyU9jifh3SvBrE+A==,type:str] + vmm: ENC[AES256_GCM,data:CqEAeHMgRkmsFYFGOyHic2TGYXlw/1VO4mMAaPSHqzGDoW6u6Gz29uho9EY=,iv:5fc0LkIQ/gVFTK7YEjnmA5ye2EEyh5/61VvTVarGOQA=,tag:yLwK2HchZvGZBFL4ZnxNaw==,type:str] + fw: ENC[AES256_GCM,data:Iz9zt+M+ug6vxzjMAdGuSBSQ16+JrrXpfbsWUpoA184JEV+nC0xZi4eawLI=,iv:4lbQCgFdo5WzY5vFbn9dbkLN6iCoJNJ/5UXw74qOv7s=,tag:eCRh07yL5/D3Kzh6xgmDWg==,type:str] + infra: ENC[AES256_GCM,data:2KEo16JyMbcySxkPUA1qvQZP5YnfKlT/lJS8FnFWSoXEL508dwiWslSDPfI=,iv:xLah1BYaNuMlSWUufhdWupzPaXbLVgVwVfXO7dKSCcY=,tag:wqkJdWNf2h5ecSBDf3T8xg==,type:str] + auth: ENC[AES256_GCM,data:Gv73925NEyxQEXmVC3YImOfzj99ZVDH+Jt3iEl0yEN69E/1VeqH67uG5kyQ=,iv:+Dd4kY0nDZIBnB+Ft7e8GD1v9a9NjRjU/QnwnXJLJ/w=,tag:X/hOwq1fxHPPhp5VtENWbw==,type:str] + app: ENC[AES256_GCM,data:/e7LF628qBL1+dPtfqV0myEp9MYBx4s1R6LG+HD9P/7LSsF2qG4eUl885GE=,iv:oU23xu+QTtYbT9/7Z8Q+CrCrpOJXs+Cgstvf8RASEZc=,tag:AvYzJuTKFt+dHm/uca+TGg==,type:str] + hash: + fw: ENC[AES256_GCM,data:cI7ZkkxkNypzTRtChxC0HVp0YxwUHXDlcs+Mm1A2OxuGekr4t2IoXnF3OieQVB8LfjH14iJDhBAQxAjww9O3dsaKy1QQx8zCZ5IabDmBfLEsdhJVGHxTBw6PJHzhlW4+1QAYnxXcWqH6Tg==,iv:s/A8I30BP4PYBf+5lBxBXySx2atYn1MYdclA1Cb/VOY=,tag:tKBs1udztZV0T3/UfAiL5A==,type:str] + infra: ENC[AES256_GCM,data:Ifk0g0CZOIzyTeTgSVlOj5J0TsIBNQTtZYsM+MNsZ/BGhNb/NADotrBRwpgK7Lts13TQ9eNZtcm4VgWVyWVIcM2HJ+1hAKMHcgHsOlaZNZa3vbiGS/X6xvD01vFeNWC1P0A7GDsvNS0B0A==,iv:PbwSvKNl4EmLBJ/la3ePx7hzJodtLGn57XvgM3X3xtc=,tag:Qahp1EARyRqmWr2BZ/pLJw==,type:str] + auth: ENC[AES256_GCM,data:ezk7MNsSRkF/dI8YhKB9CX+M8j876cRCkDqx64mZPaYNIWD7/tpbwqSTuTvoFsGJ9ikh3ZbNA4n3qMxMSsWstckHfcSgjUF56Bz9lCRvNqNCk/tDyj5aqxeHPXrbad4lwrgl0dPNFuPqxg==,iv:JEJnP+HnaxaLWpgsVmsLuTcxTMIA28l/ESCDnJcM1oQ=,tag:flDCxuD3L5IEfAX5BGu+YA==,type:str] + app: ENC[AES256_GCM,data:xRr+kc1cvB2BK6DhQZQ21TvsgIgAxAJlWVMM7fbFTn9HObUIuz+a9MROGnduHKe8dp5bOkL97jlwZvk4qT8n51rYp4YvgKU/CQ7n89MqQO248hJp2MRMRWmom9Gh6HlzY4IHeMhceb/oYQ==,iv:BG4tPfwp5Qa9/H0h517fHjaE/ZIMy4vnAAQ4Fvqj2H8=,tag:meE8ZxXB8E4Pi/bS4dKMZA==,type:str] +#ENC[AES256_GCM,data:ODXFUxxxdQ==,iv:s9zJVx6wo6x517tbNvC+FZ0dFzqbjqeLI6rXBq72hQA=,tag:bXoV2I3LbpmQyddJrtS3Qg==,type:comment] +# +# +#ENC[AES256_GCM,data:5Xa+rIwa0hT/RCE=,iv:WwgE6FmE+hRqtAmmnnznMiCK9ApuZRm6pwILOCJNQeQ=,tag:TaVHDrAO4rV29Z2yFDdLOw==,type:comment] +ca: + root: + key: ENC[AES256_GCM,data:plvrcOhFujtAJv9BHZIQwpryUmVevxI/LYAOmkp9CRyb7jxymGXFSRrUd9zWa3mT4z8lonuK9wkEkINzxG+hjFODDhnQqKpXllZ2EFNiTB/x4wEgedcsx2J/qf6TreBwXOlPCFY0GcYC4UdQFU4ZmmBmFFxe0X0T8l5Fcj27cNRvU6i6p03wXUHVHoDF4pP2NXFFqgepdkPFtvexEpSQDbnTo3unKOgwgqXXvaG9dBflU4nE4ulhiDbziqGOAFqb8Iojx0EwSYaAUG1s/QgwkLFcOSyPYoSH0VdHVlfkiyQnQC/Csbn3uj8bWLO7PhY9cz4WTd2+Z2YJELHnBEvbUFvonD61/KbXE3Zn/j9Mevoe2qizszRujyEuQD6Gw+F9mfTo7aX01XKqlj5alX+H7In3VLyKIJ1M43Y=,iv:fPv2FoE7LA/ImDQiIpwA9NFO/5MIXMcfLJxVALirFu8=,tag:I0Ya9yZ0seTpjaCbY0kxuw==,type:str] + crt: ENC[AES256_GCM,data:t+yAyGjsbGUFfa/3wKqAHVgV15trtFrFHXRgBN5y1iJNXvqsHzdt4OO07o/d8lT2bh/UXZEHQDmx0VW+gKSouVzr+ZF2ENgXAW+d1cWWup7cLW+oC2LOf22gP3js53dGNS4+qRhPTi5M7XZztsAJgaI3+LAiDMoxoZn8jZZ2OKOB2038w8ZHCDGMUmvCMXJkk5u4o9zf8qLggdPk/MoAtQ4g4/5Q/gJsf5v6+ufZbSXnWWfwtBce7FStb3Bg06RIGG3LtDGzBxvMFpDJe3z0bidJtOlYyAJhOZF/knOTUwVlEt/Jhd9KoxRKYCMETj0eKO88bIyxZx9gmli7OuETWRyK+A8Y7xAdAoZxgeDJZjX4kcPsMy3vkGKSGRKDg+FMwX/PGwUDQmG+UyfmGqZUyc5fJWHKI8eY55jZoHR2U47tRvV3DcRlvfq8x1Ror1aMtaHfAYcTtxmhQxaWTBhpEQDMSBfMS85UMtNATQd9FXaIh3L6NB1hXCEa/3Tb03ULjkV9Ij92l041CdVUGhRbgz7iC7gy2aGap8fK4PopuqPeEsz306S1Mc1+FLrBz0Y4+545k+cNcEueKRXkColtdsTGU0JhftF3tXxynmx8W0kGKfG3o12VAN4Q8+yYPvE4XtTNVoJtzPeC+ksCem+w57gIAP+PbSdmnaEjsM/GrfTfGzJzcuCUKVDtSl8jh3B8f7f6dk0GSXftMnofdHlSXcAq3rG8oHoqCzS+k/VMz8o22uqcVHTiE0HKVK9h0jghDkkEYcGyiBsLPMlUxmkBmE8uwA==,iv:FyUTqDvjMajaHljcwnQZ9usEaL46hpvGOjNvIVUTMEA=,tag:K3h0hZfr0yjVhxpaG6bi3Q==,type:str] + password: ENC[AES256_GCM,data:ElXnqYyR9qAiBjPqR0HVsMcknp/qqCsBbs1zLfRVqYHSX68RkNVDWOWhrIo=,iv:xiBAV2J52aQbaA9VxdUvjiYg7JTnQyeuBukTFfDCjig=,tag:lZXVek8TSTcNtcu5VyUMUQ==,type:str] + intermediate: + key: ENC[AES256_GCM,data:xZB2VgzLgTleX/E27lie9Cy0Ham6iGZtSfmw/wfOqH/h/HBv754uYV5XPQyenj6Mst06WiiNl/gvzCZjNAoLiKPkGWvpWZeUjeclsb3QuWjIIArarjcOaA1XX0wxsizfAZ79FU4qjY+alJlN2EnWgiyYIq/bCdhMorYi9nn3z63Q2CetzDpMILfacv4aUzIqu6iX9sgJ4cwpzlAdl0vs7Q4jUZ1eNu00zb9Lbf3NSFUuafzDNSn4FFJ5H4nFqC/okLtFjfMrWv+XPXKR/EqA77cS2gW0g8bGtpZX51cNGYlIMdXj9Coet5tgj+UG4KzJSMoTFdiXHdR1jFAvmYUcFbX1wtbGtIqK3vKcVT1UBtkxQnj8dURzuetyj21qABR6WfPXVOfgIQrTdG4bUZEw1LZbEYd8w6L57NE=,iv:nspy2dXUIGvuyTkETzMxmeEPh0u4saxc5jHfsUMC93M=,tag:v7mMF2+asasRwqIyTSlgQQ==,type:str] + crt: ENC[AES256_GCM,data:4RrGKeUS+9oZN08WINaW61C7yGs6Atu/vA/w9GNGhC21wMWHoB3FXl6sxDkNAO+qZn0YhINXW6MF6VvmPh0Lu2nQ5wdLlhsgFat1fXrWcatqvHoD08Q6pWSVTXwN9ZLM038ucOj26Xs4C2mBCl2wddII59YzCKRcXIYoK7GWF3m3x5uWY+sP+8IZ2fQiHEaITFX1hg9tNItmWoRh7gCQ15yec6KTOuM9Ph2HI32oP1Lm4h3fW2SvIcgJqHcFObtequGFXpvZbSedesdR3v+gNVSr5oTVsEGoGwyALt/gGpK3zptTDsavARzsQPUXu3+QILyuNYHXP653g+VnSLMwEMuFdeQJaBeG4TF0Zo76dPuy8lPSawhGK1BJsEgUb++23q87nW/O5UdJsUJ7gIejrQQpH266nq8cw1Thyskypk0TrsMdrTWwOniWOLSUVf+5GZi9mLqNse89Qe9xyJlvocTdpZg7Bl9kDVFjLRO2IC8+r7NqD8PeOjxT8ojJNYXG3iDbdqyQtHtpETjLUN4ZHCo8EOdGIBAqnutV0gAxnKIcran86gtk6QIeYL62x0AgkDDEClCrQMZEOIMbhzPMml+4Y0SdcoA8DErrFiPeufDxak5gNLRJxOT/okOBi5/QK4deJ2Cbx0lSxB79d69Ol9OAKUNW855o/lcGD+Tx3bNXYzN+EaFmpku3Sv8Bvl3QloVQ4832w0bbwcqEoviuB8hDMqsmWXZS8D+cqL9uYZVObRJS8rR8QGPfPkSPy8NePWsuR1vznlyBT35eyRcKwVp+qweDIjHHgAIcSvSAVPoQ3EYT01WCpRPsiTIxh4IbLIYeqImFrb6Vn7S6tMmv3E3eCnuL/LfxiV1t3Q==,iv:bUDkP5wRWEqi9n9QEN9ETHFeWdAiEk8GUwTD4X7asRA=,tag:xYYYPlGQfhcK97tSPtM7dQ==,type:str] + password: ENC[AES256_GCM,data:aijMXt3hTo19xVChh1gplU8sUQjmX4Y+dSjS69Kw2cEqmEJB/pelihLH96c=,iv:zP1yDLGsbL0sxJq2PF+qe3c8rNT7Knz6vKaGwMVB2xs=,tag:Oazypz0OfAYI/eptqgaINw==,type:str] + acme_key: ENC[AES256_GCM,data:vir3SKMdU8FsZ3fBNoSMrev3vreFG4Vsex7R4hIR7Ty68x9wqsO/YpJSNkw=,iv:e7AqVbmCOkVIaje6xF8QzISU0E3JfyU4GM7/361ybdc=,tag:HC/kMa/jmVqBwCsvR31WHQ==,type:str] +#ENC[AES256_GCM,data:ODXFUxxxdQ==,iv:s9zJVx6wo6x517tbNvC+FZ0dFzqbjqeLI6rXBq72hQA=,tag:bXoV2I3LbpmQyddJrtS3Qg==,type:comment] +# +# +#ENC[AES256_GCM,data:EcolN4koQ/STbbgJMw==,iv:VY3FneEP+SyYh/uWXdvnDgcY79op+j7RiEgTND5+KNY=,tag:35bxqy3l6ZSf/PoQRvUubA==,type:comment] +ssh: + ca: + key: ENC[AES256_GCM,data:5xMsfRvz2SmtPI+bsWpKY5u4P++DIOyuSLK4vMuTVpRu2r0X1b35rhoB3xdN65oEwH0CKvBEeZQuNdmgSbWJtkbXKxAQlli3m0myi+zXvKZiDS/bCvPLjSTi8wQ5dhILmqIagBIdWFIpf/5BmWYaWg2XHnfYZklhRlHLyKtkCY57kDdVco0fkhmc2SX28yA+5vF55dNU5ouxCg+K90dXRQaM2ZpQgZBNPy/6dF1ZWSv3oU2IvEp02tRuIZggaqiqBW6D1cxnOm/PVhvMjCtBy+EdC/+iWpGIJZqn0RkPAZ4wff90EniR3yF4p1BgQA9QkeGRdDXqoZgxWbRZl1S52Kbb2XtC9oJRle1gKJAU99bLOQVFyitW91QZ7rVTNsXJpxUk61x83QAH9qBU6CA+Sawt+fqoO3omlN5H1xOaXEqTRZgbh8GRdJKF4nGbczHR0UeYOoVrO5x8LeoE8ARYVu2ZKlRJ3nRruoBWNxxwooGsInJKqpSEoJ3MBZ0vRz/bPsABhSPB30WX4ZUovYqj,iv:mpch3JeRjhOmJU+O3KcLXq2wzRKwNQXV1pjh7HRkh4Q=,tag:y07Qj/IGHBgqLxgLmlGpWQ==,type:str] + pub: ENC[AES256_GCM,data:rTZrT/8k4fdjm8VGCvGD4jT2DF6E41dytBVCicJEANxlYs3ByXWgQIxpLPQH2Fc3yCtJYKgERF9ugd7HMsQ1G54VgfuEOUyMyW6i/XTfU/KIa8yEFg9KhMP09hXSeA==,iv:xy1Bv45o63/oqzoB7E4U3CnbiN/Z8N8eJ6diKXdJtM8=,tag:szxdwQzusxVwWm2uq6ZjfA==,type:str] + console: + key: ENC[AES256_GCM,data:cnSKedN0VglTn3QwTQOIFFQM/ASROvPrYINZX7AxLbcY2UI0JSq34eN5LnOL76Fol5Gl9PUaTsg26D8dG5AlS/Egu4p/u19s14UMzNLg33r2C9eiJDO056z/oRko1gDZPIq3LWRoGkyXJFzASJ0RpVbGwoilhoCgFOS4PZjBEZO9g4pGX1OAqvhDg4/kdRJqnh6N4hAOhphP8lS8vsf/9iEQEiyEoBHepd40BsQ9NC6l1SZm2Jk32Pe1IkHZ0kJoDweY/9zdDpRdr51uTzvg1xLM9QlfiLYhZMb5R3Q3SjNV/CKpIhkZvfdNdCrsH4pF1N7UuzBmp0bfhgSeBsAUr//acs2HW+mRPoRXUyAsBeA+znX8/v3OIjr+0iBmqoIP7qhHKdSKn87JpZ7ef3QMO2gpKoYCUMurpC1eXffIB1BRxZkt+w5656CN+T1uF8tZI0WGfM2VormYmRRyAqfTWkIIseIieDV4/CZWSllICTNaWXJ6fSAscENHnAP3mXTCijVTTkNZhhVISoKvAJYsgMQChqcVUm8IXeWT,iv:nislvsSMKf/fjnmksNr3NUj0mJVeBqx+YuKkCdGar5E=,tag:IFiOxkGYC/oMqwcoMLmEXw==,type:str] + #ENC[AES256_GCM,data:Lc10ZCXfOJjKp/f5B3ktF6VwJ3Xt5S+yOUhGjvy11/m67uVzVDriCbUIZzD48EBym+qnRytWl1jf3fDx8Eeio/GgTUyEXy2JpQnwQp0ZvT9G4w9RLWuY+ceiYx4=,iv:vIiNQ0doLXveoSbwZ1GbGNUh39coFYOGLWlOpe/RNF4=,tag:TN/9NYlwU95r6JKSCOJnWQ==,type:comment] + pub: ENC[AES256_GCM,data:kBzNL05tQB9pqLuxHhv+YTuJG9mBkc0DUF+xoUHT4l8lvRMDxwOsw2MIcBefwFmaNLIQ6g7Cr+XomhN+muXHchSuoc2FBqoODeVxuiqz3XExviFEMSjPo4PFQN6/tumHDlCvSlI=,iv:UCdm6KOq3ycaP0PrTwYUNkyg60s67rRp/GyWlol8Ay0=,tag:Jas29pUb7fSzvtIbsdn8cA==,type:str] +#ENC[AES256_GCM,data:ODXFUxxxdQ==,iv:s9zJVx6wo6x517tbNvC+FZ0dFzqbjqeLI6rXBq72hQA=,tag:bXoV2I3LbpmQyddJrtS3Qg==,type:comment] +# +# +#ENC[AES256_GCM,data:N1PD5xQeHX/nrlM68FOw+6c1,iv:b9AEHsj3CnXtdczYUhY8Vsd6niDoI2b+SPNFpuKcShc=,tag:c2kXzTx3QNI/5k6PgUdPAw==,type:comment] +wireguard: + server: + private_key: ENC[AES256_GCM,data:rxnsBjeYxgVJfSuthDYZAnslReXexc+/Z+/31tDFSGOhECcZBskFaYpsLKY=,iv:GFyeG07iGdixmCkL55lHpZl9/eFIE4kEcXN/avgfNgI=,tag:30IHC4wXbvk1U+aBx62pZg==,type:str] + public_key: ENC[AES256_GCM,data:7iOIIBp3l7PwEbgBj7gF1uipExHWMwi5Bu2heT8c4G6hBa7CB+TtlvLI8m0=,iv:LrvLL9ZpZ+mUa/7FjHSuVHYpzomcne1Ac0u1oexqMnE=,tag:Ek0sYfqz85bwASVbKf0NDg==,type:str] + console: + private_key: ENC[AES256_GCM,data:OGC0SqqlvoVlDnkWhyCjojBBVaZEcLuNwp9JrmLH4vCkq6LQysaHeXJBp4k=,iv:Yke+FRt+vaicDfOo6WViDBoeESrHa+4L0nu6cy9FW+M=,tag:jIf2G3/tvku+Q8FZJQmaow==,type:str] + public_key: ENC[AES256_GCM,data:Q2rPqu/vSJFLe4t4CDGy7tWT/BRAmFclSFUQ9CQYqV7UTt1ZJH5Ju4XFYkA=,iv:clrb6d9XQNgM55Ki/8guOb1H+t/GiDVJxuP/QARO5zY=,tag:FBGsAQShTVmnnIMTJEva2w==,type:str] + preshared_key: ENC[AES256_GCM,data:ZiAROExgHCWwMyKdOY/FRgpNK/it7F3PxSD5yRr7Xhd6H18NUtujt6OSqY4=,iv:uNw8wIhafXQrzQ7o+O1PQfEBodEXYVP9Abth4COI/dY=,tag:5zn6i/8/sRv9qlFTejEQ9g==,type:str] +#ENC[AES256_GCM,data:ODXFUxxxdQ==,iv:s9zJVx6wo6x517tbNvC+FZ0dFzqbjqeLI6rXBq72hQA=,tag:bXoV2I3LbpmQyddJrtS3Qg==,type:comment] +# +# +#ENC[AES256_GCM,data:+kiy17Z7q3EiPgo4Ykk=,iv:vOi54XHHLNIO9U0ybpU9Cn68Ymj5W4vtQ2OhTkGAmRo=,tag:h3wdCo5UfNGlHqMpeeBf/A==,type:comment] +kopia: + repository: ENC[AES256_GCM,data:16V50bVVtecbF+TJcXHCx46ZPGNMgAhUOA0YbZHVTsCrOWQvTvcK2k8Iq3E=,iv:/wNbrE5GaFiQPNEp8wZ0JehbXkmah/gPm5ywNE6WVVM=,tag:vW82KTKtJ44++tqXON/SYw==,type:str] + user: + console: ENC[AES256_GCM,data:LZQEz7JS8CMMSlGB2/FS15VHAbko6YpCaWDZDa9tXVnmYLwFs8F39/u8kLI=,iv:WrYox/3LweqafWl96fxmezTFhKHB/2XSDSLwHljdpw0=,tag:bPwqkdeexXItkbpPQQ6klg==,type:str] + infra: ENC[AES256_GCM,data:fwvu96Zhao5RthZptxhuv2NobY1riQzpvD4Ardc8UTwN6+HyfFVR31yTTqk=,iv:jqFEKsP2npZgeRL3E3Va7oLzbHShBO4FxLRYwnfLtys=,tag:qXescWki6oPaaSrhMsUw9g==,type:str] + app: ENC[AES256_GCM,data:VVs8lfo8ZsbYfeKIwtaFCrpV82SguR/xxNnMZXXp8YOBP+6F2Vs5jeDtTTs=,iv:L2mS2RhiUlB9Oq4gwVs8WNF4r8vYv6zg6Fqnpe9hSoM=,tag:WYjtPdD2Lny9Kc+dWVupjw==,type:str] +#ENC[AES256_GCM,data:ODXFUxxxdQ==,iv:s9zJVx6wo6x517tbNvC+FZ0dFzqbjqeLI6rXBq72hQA=,tag:bXoV2I3LbpmQyddJrtS3Qg==,type:comment] +# +# +#ENC[AES256_GCM,data:gfsEXIOqhoTsiCI4uDjDeEY=,iv:SfJwBdIFY8Z4Nt2CxjuSY8PYKebQvKcg9n2xL3w7rsU=,tag:yrTNPBjrvnxydGK2X32Zlg==,type:comment] +crowdsec: + key: ENC[AES256_GCM,data:C7bMfJikh20PFQTe7UtP48SDUlSKKDMOul7dt9USZIVKq7LLBJV83vqbOc3xbteqPzwIYqBEndfqypSevpy/cLAPstQXL634fQGbel6trqbCApq/xjucUvnaqRKyUjk/TH3oRP9qXG8XDsgQ1o+I4b4E1alxLljMPN12wd2KIHfDGp+TURcC9DkK+NESIyQMLQg40cBaps1HcxXvg7FB1oJImM6AjqP9iybDfgkCaNAqUHxLyCHJI4rdYFflALHFkorl8jaQTazqWfuSu70x8j5tUJREksOS8arN3YIwXp4UGHQ=,iv:sbF2ZD6xS3Z3EYc6/5m3oKMghkR1q/IDzVwBX0OuTTI=,tag:NIuO8x3rP47oE/Y0EiNOvw==,type:str] + crt: ENC[AES256_GCM,data:0Olvh8chXMx0tri9NeiWhka/bl5XRwdAQf55EwhVa2kTedETOBIjdgDqdsXhoREhannWK/1cXcxP7vpvRFMt5e06FsmPh8mSuuyRboVs9ys5p8KPYQcnlOZxfBU1J6DzE8Zq/tOf9cpdlcUVpPJ038tI7cokqhBvPGZIR2Q4/2BKWQnL8d0GvQoEfXOpNw63qQDynSvaSs/asfZf7yeYH6v3Tc4tNRBB5nl8sCq8GjTvm4xleRWax9qS0oroB/uY7S+5OCGLtJFo0A4eweOcEwWuqvpGqsOgs6s8bToeaSmPOUgsCQ2abiMtBWnyCxMxMCFNB+79lpByasf1iGRWlrhjQeUpIoWWAWU6oUbd4pMRy+j+rKV1/Cq4wF7cc7kSTh8BlFAK3Yr8DIDppiUUo53utPAl179ZkycNUzHY5Auh/jA9ZU1UZRbtIOBlIN6mVr/Y53EzigtVYlZRQ6aaQFutM8jGBnegfPhp3m+XY2WanYk1GKKj46Wh11g1YqIWVYpk5hlR1RdQoPsgZPf0yChblbdfQ14iRJqw/0pVPBvX6bKS6P2OaDHpCIBSpTE98VSEjXTkK0XU0Mu5YzyUWxIe5/BoRiF9gRFGHEDWwFWprTuFk3eUEaEZ9qoyWr0igmX73Be7y8Bre0jX1xSMjepSLV/0dSUnimhfAvT1TlhwNlxq+FGR0ZMsu8QmbYp1CqLcH93rQxoloHIs2T1+KfKAENwsjm2PshdqmOOoUkzANUmdilYCelkbXyUKXkLqFb8qnSV0jK6hG/ZVeXTzZPnODJ9Tow9bXKdxOwk16XpuMSHjCoIol2E0yD/0YKVBHAnYuz/Uz8Dl/ko//3+OaDD4TudV/nRyy4Ge4JI/kckiovMLma4dJSmADgOAXa+Ao+3N/vfUMF1F1DyRXUXsqHMygtlZZpGiswabq/MrjEvBPomzFWUmu9clWiPJ,iv:SQrBnbzApJscyGuNPE40uKsUdQCf4W73uUwanfyCGbU=,tag:KK+a0tLB+mmn4YWl5Yc0tQ==,type:str] + bouncer: + fw: ENC[AES256_GCM,data:wplc8z6YNuQi7bN6r27wSOmx7alVDWUBgU+pgvnH0exmRhHCHjbS9zX+4P0=,iv:fbBg0AlqOtbKgYjbiSPopJ0QwEryAEXfcc+YaUWI6xI=,tag:Ul5+MwKZyMevBIyvZH/hAQ==,type:str] + caddy: ENC[AES256_GCM,data:fRyTH9/7OZ7oaz2n3o7DtXJCnH5l89fGSZtM3FH8sWzw8Iqj14t/qXsX5Vs=,iv:YQbozgWTmU+facWKeC+2ab2fvm47CwLbrprRiWKDRyg=,tag:Ki3hGXPJ4lWMXJYaDF0kaA==,type:str] + machine: + vmm: ENC[AES256_GCM,data:CF49CU6iwC8LiUMPaGMFMNGjF0n9gpSplnGkkJNk14qEkvYFTgfoUPqc964=,iv:Qi6cTieniO++EltyUkabXqsPAEb0AEmdR1ramlisthI=,tag:G85rEh6STS9pDr1zbuNUdg==,type:str] + fw: ENC[AES256_GCM,data:uA3AfA394F8b0a2jGjU8ABHvKGNTsZM0O1woQc7gLdHte5Cn8SgfuyNgWu0=,iv:TCZ4NwJuTVPAbiWn6QWM7cRta2uXODHb/41Q1qslJsU=,tag:A7fGrietJuxyEUik3eVZLQ==,type:str] + infra: ENC[AES256_GCM,data:rNpo77YAdoRedzJonScaT6Fhr18+T6u6/bKEzC7PYOIzEpuKh/MqzIS6Np4=,iv:D8QND39D2eqBG0USHhcc2UCxpTSuGn4SCpTCOBO0RAE=,tag:1X8IK8DNsD2m6DdtxMQTyA==,type:str] + auth: ENC[AES256_GCM,data:neUWugEoUU+LrXijmUjd6063pvtyMuvBhiLtKR0BdAqkl126LN776x4cibs=,iv:OSu3S3AQNqk0PRR2kITKgLcniEsNhrNbpPIix3UIwDw=,tag:5OpA77ayasIQAYx7tYFD9Q==,type:str] + app: ENC[AES256_GCM,data:mMO7DZ/oVKoNgLNGGbhIeKDfjlwxlni9c2EoULZ41WeqDEnkdp32ZPV/LS0=,iv:ex200YXZOxA+nGFnv8OxjI6hTlg0GNiIc9TIopNZujg=,tag:Bp+W7FsOgZBtKcrC6qnx5Q==,type:str] +#ENC[AES256_GCM,data:q9DPl4EKv0g2B7cpy4YlxRk=,iv:g0tCbPnr/xqWG6MNaxjb9zSYFx2nxQPQiRRJjbsIR1w=,tag:pP47LKjzDziPwHV/0Wv0RQ==,type:comment] +# +# +#ENC[AES256_GCM,data:t/KEdBnDGbzSMAml3g==,iv:5CJufEHKR/dUsTxHyPqi8Q5Sfl6xXlY4qAviKh4YSWY=,tag:AkxRCxzLw9CjqPxNiSV1xw==,type:comment] +ddns: + zone_id: ENC[AES256_GCM,data:zor6X7gMJ5B3ZmCk/HMxfSuVBBBmvsnlNKbXIFhSwz4=,iv:borj6+JvYPFOx/nRIRceKBo2Spkblc4ieQ3atOSARvw=,tag:FO6ugJsz3qzLa9adj8GOUw==,type:str] + api_key: ENC[AES256_GCM,data:JyM+sZSF2uga+B+0+gofpJ7UN922TSslKRmnm5RHHdld12rdwBpUYQ==,iv:5AHzpbaMfJIMAJnYqBaOL9k/QKGblTF0MGrGG2lTgAI=,tag:Ra16GIKDtdGTr0NXQ+VwEA==,type:str] +#ENC[AES256_GCM,data:ODXFUxxxdQ==,iv:s9zJVx6wo6x517tbNvC+FZ0dFzqbjqeLI6rXBq72hQA=,tag:bXoV2I3LbpmQyddJrtS3Qg==,type:comment] +# +# +#ENC[AES256_GCM,data:avhAPSvxfnseTd2OGg==,iv:yhKB6gjtztcncgm7hAnXC7PpN76dCYsJaLt85/2Zm0M=,tag:DMCROfsM3abh3U+XGvbydA==,type:comment] +bind: + acme_key: ENC[AES256_GCM,data:XdFARjEVV5jLrOt/Ul1eO+s4xPzsmmmlHf+hAInTic8BDJ4DvbA4u18ZqLeAOQNNEf29HkJd9Zv7yrZ1c4KYLaQc/OIZAszD4WaLVsWmRlNJLUKQFYdBo8OWXM5Ac5c6NQoWRPzZ,iv:qNrbZz4PniDaXDdxTXrKdiJsoY59iwjMw1tCL8q26A0=,tag:gyNDdXRlJhlRt5kTj2XhBw==,type:str] +#ENC[AES256_GCM,data:ODXFUxxxdQ==,iv:s9zJVx6wo6x517tbNvC+FZ0dFzqbjqeLI6rXBq72hQA=,tag:bXoV2I3LbpmQyddJrtS3Qg==,type:comment] +# +# +#ENC[AES256_GCM,data:FNnIRyidUh2urEgGgM85,iv:w6Z7/VLY9BtH4CNqchc/3GkWb4ez2Nb0TxdZF6RdND4=,tag:14vEc2qpBoOApdnci8ZTug==,type:comment] +blocky: + key: ENC[AES256_GCM,data:3hIpfdbwUzIvFhhVOl9TYoLvVLtFS0jdKSC9FGT2dSmYJP9f0qqrKEtH9+52GF3YUXvf0fkPoqAxxDf3iNh2ynAS7PMBOth3LS5mnY+NpRCZuod8PkyQOWCzkAeVla/JeAD3n3da4f8layWHY47u37XgeojQpyijCWKifEnpbjJdXVBKi440mSaFaORgDJkvSMRGcgF1o1HWz8/YricUIfv3xniQA3Ke8gg4/L9gHHFAqAZiKUBDDppHzuY5ua82FDWTgipPtoNdi+9xyeTmNdunrVAF3OrHwkNQqfLJrGOfrhU=,iv:TGbO2lfqjnsu6ZzcO5qAeQKSgE5qr2lQZi+3YuAlAGs=,tag:tE8bTnPpBXwDXYyB9YsOPA==,type:str] + crt: ENC[AES256_GCM,data:vvT9evMqMW8isY3N0tvQCW0XFWIW98vVz0aO0o3habCbLJlB5wjF6TMA1u1xbJ9zQWX8kHTt62BxBngCHXtKFnnVN2q1hNFK/sGFpAJz/WXD/sJW8kHOAv00dwGl2/r1josB9hFmn52omxpryY2h3ZTgqLxoCpzYPoB1LvJzPKo0bS4ig+/qYbAyPe9pNbTmPzGjXutoGbOKXKAQvfZg0YNsR/nbdd0Y5eEdHL+0syH2wEZZ1ZBBNxnd9wiEmRoQHDDkDJ6VHAWfcgZcuX47jKak6LFBAOfj2lVIza78o7MSQsCT1M5DJMZu/K5mV9jJhftljnZ8MB9rX3rFcApHnFX9luLn5b7pHRwieWCNHM71gy3wHS9DFZXjYRpFqFTiXmAxtKxgL3wDIu7M3Ajrc1o1JMlsWMGVUa/InfF73NIMJMKrAPgSWXb4JgwF9HmGjbO5GiSAjRXvuFFmQBArvA7NRn6XFaRyHsig8T/5nQ4t+E0C1HjCkzMFVnUwq5C/3aEJij+cv2YaT5jVTZ5tvX/x0lykMLxd5Psy2Mqvw1++NjS1eOBmg/4q0Pero6zOl+s83fyoSCiXD+K8XHHe24M2IBITABC/Wf4L4yNrwE5S8qn7M6vbFZns1C4BpnIUqbtGeMbAx5reM30nXTWO9pI8Xl5zAPrb4gd7zdl5c6HLge3k8wdNqw2kBD/z4EDCNCPar3g4f1XMhcA1aB3FrHuLvHOkCkCmNbvZ4D4+Nams8HDQ7aNNStshLGt2Rdh8fzQ/KlQslJvgk0oLupnGuZzNWP+TO4tuR4DC/pfX9Uw3+1SDeeTFH/X4iaoSJJxqwC0D3pTvu4SUXilZ+MBE+mDEkB+PbNTuyuFkbUi9OyRFy3W175Mbr9BAdlSIAd70SpUiARqn7k3S5twV5pomxYyOaK+cuAJub/Gp8I0Y102P2rNqcASmA/Ocnogp,iv:m3r2kOi1XUVzLcKo/x9gWT22IDrnOlDYFaGxY2wd6Oo=,tag:6qOgMHBC4mrOTRJ61jhGAQ==,type:str] +#ENC[AES256_GCM,data:ODXFUxxxdQ==,iv:s9zJVx6wo6x517tbNvC+FZ0dFzqbjqeLI6rXBq72hQA=,tag:bXoV2I3LbpmQyddJrtS3Qg==,type:comment] +# +# +#ENC[AES256_GCM,data:xUxguG12uiQAzJGMXaaqdgTfCw==,iv:kh2nqSVtBXaAhEywAdDPbbG0p2ljX4mtjVKYXCNL20c=,tag:XGaBtxPBHSaLRYZa97+kmw==,type:comment] +postgresql: + key: ENC[AES256_GCM,data:XCBWU7Y/BG5kOeKqmNpqwyjePWrMzXUW2RUc+d3r8kfTMqVT2Hxe3Hh8qSCilsZOKoeEfraDKCMKBLmmaX/WGXtjnhOF87Y43i11jDPeMv4g0b3f3oh+ekckxG43pFpPUR8/IW3xB3otTdrp7Sw93xh1s6BcYVutL+opOVLwdB6JNlnx6ZAi+ppjmlHxinEa1UpnOkLC0JNIyp0i5xp5KMizNs4iNcduI3eWWZmj1z/lW1Q7dkH6rPIjtNQt0Ce8wpdsuJvPaZHHAZZ31KkSj/6n/sNhSQsgAvJNYBIsqZRG5lw=,iv:/1keHEeQzFuxaZD/Pw51IBIaBy3J/5fK7BcgF+A41Mc=,tag:Cx2AX0sPTwkmHODbuZNu3A==,type:str] + crt: ENC[AES256_GCM,data:cnvAtbrzS3xpa+RQKhSO72xsPLAiloQZrbUL5asql6Zf2B4aNzepfZa9vlTObd4pG/nw1lIFIY4N1oh+L22j9snDXRX4BX5tANJBCiZrnqhvfDkT97xaK/86D9J6kyWmIYjlQ6bXBkvDhDII0eqigV3A/NDl8V9YApGHyAMuryWB69I4wjvVnlok2HSOgZxq0F/3Epp4hk8BuUTNvAcSeIAfpWePVHYtHRk0X3IT3KhR+Q16Ahm3fqDXFkAhs86NBaCl+kEFunGLyBqxecU8uYuU7gmofISqSDytI5ZGVt+oygHSkcQKrW/NBxY2yYs++8m5N0sLPfmtE8NFIMktpjHrUGEKnZmdUEjg7sFFRGhvpGyo77x8TPC3bD1rZJVgDwYCmKjyCO3VT9jPonV66rVOq5S8svm4pQlaT+BDPZXIqDQn4UHD7grKOdUA+zgvdBuVsUnngR/D34cPPm0Y2x4CLPmz4PBGZQu5Q4sCU+8vq/n8kNX1z2T1K/d40YBPlSunOZIZk6H5XMoy4Njftty2sIjraVy7udYEwqmUwfm9XlYqBa6QeofoxY7BaUrRjVUIcPrKLUZ13qddOoPV//gLtEDwcD37kyLw5ZbPKVdR/5rU48ENvP1UPFC47HDyhtA9/FzYweEZ1jEXSuxr6r5xJVcH5/2eyhW3Eo6ewwWeT26oVPj0zm6faeY6WVPXJX3fhMwNonAtTHzkfZCfFlDgizMtSMsMadzzTut8rlziEea9cuMS0EacIdzafoZgQsYq2ZrRhsYDbQNGAE/DzK3Hkrtp8eqJDNyRyCIsn1+B0A0ci8hADSHPS3q4tQD2JPb5zj6/0PNt0oIIBRcYuXgJW1nathtP718i3Hhi+cmqvZtza7XKUkxNYNYLO9hGnVTNY+mHD8HW94QdAQ8ikfF8tSBv0N4/ebyVIkaS9Nes8MyOQJbzfGhmmpeuo8jineQsf6w=,iv:6gXqS7C8lypzUvjExURswSwnUXG1bTC2+BxUkK0D36w=,tag:cd6URiL6TxM8GxyYirvYDw==,type:str] + #ENC[AES256_GCM,data:Zu3CNVkCUlW6BKGh/rhTTqrGv/7H2BG7,iv:vW4cAVvla/Ya0S89DTvo9k3do9517F+AHMLb36Ey19A=,tag:neDpBA97R77p88Im92xn8w==,type:comment] + password: + postgres: ENC[AES256_GCM,data:XlpsKT0/3VgNesMdifvHlYZ2dh7mlMEnxi0MJpQZiQ+1tZ0/3zMxQ7+vwec=,iv:akcPRmDZQfFLxelG7V87UnNB8ez28PPLCmY1MYfzStU=,tag:14jvIKP7Htfh+/oMwLYC2Q==,type:str] + alloy: ENC[AES256_GCM,data:ifgJH2e7MTAJ/+ppdWHEmLC3DAsKHWNi8j4w1ZVjUh4tN1cDLDJR3EvJTl0=,iv:rDYUkVe6idIf5Si5E9V+mvBaIdYQTJJCB2rOV0Suvr0=,tag:xDCL25NwuTp6H1DOGTIm4w==,type:str] + ldap: ENC[AES256_GCM,data:mJrxIhXynHxJhncw3upHpOkXIw+Ka9bmDBJwkDjYl+D9Pg4RDvL6WzBjthw=,iv:y8MUYo6VhgTzbWh/+n7/hf1Jw+L2KcdxKvulPJ67xn8=,tag:4ZFpj1UdOwXmaZjYvC/s3A==,type:str] + grafana: ENC[AES256_GCM,data:P9okJ7bcsqmeGstkSwbDq/RgnG+lFrgAOvcj8A5lOTpmHaSlXGiKG+ybXa0=,iv:Di1ghnxIbAb/u7uo/mJCC3QYVjdweTHaQDZmXTx8OG4=,tag:DT3a1zgU9sTr0BXpyoZ/SQ==,type:str] + authelia: ENC[AES256_GCM,data:OqyloAADO6KKEaBjGLsJc9GTe77wn6IvA1VCD2dfCWxx+zgzUYh87fK1XX8=,iv:QIOHNTdNnzcY/f3Co8dPdNHykhBnYRhm43nt35hbALM=,tag:DLQq58GrZd+Ul7MSn6s9uQ==,type:str] +#ENC[AES256_GCM,data:ODXFUxxxdQ==,iv:s9zJVx6wo6x517tbNvC+FZ0dFzqbjqeLI6rXBq72hQA=,tag:bXoV2I3LbpmQyddJrtS3Qg==,type:comment] +# +# +#ENC[AES256_GCM,data:VuOcF5INAvCNDvXhcAc=,iv:zdTFTKaII606RjjDJbdUyv8jGba8abL7lJNTsPH5C74=,tag:cYSefZHKIuvHmAzoxierQg==,type:comment] +ldap: + key: ENC[AES256_GCM,data:OCje8b2x62vVQDV2DXvy28UxY9IIMtRoSLyXVFwNe+sRWA5prW67Vp0XcwwL00nP6hagkXGNRxy0eXx0VYXiktQBeQQe7TC5bUpzJlDCyGmi9BsbDhzShSoSM0v/TxX4UYZ16PbdVxyEIxlD5J/SpbQdg/VvVA6oJgLU4SfZCYmKRZ/gC3I4cKGzFJAT7NLAnC74/Wngz2nE6ipzXiQaDHnSuitLHF/RGoAgE5XyelyXag5wGaB8EAUgmuM8woI5g7RES9885bPOdt/Q1MUNNmEz7jIBH4AVGo8stxRioYmAcZ0=,iv:6Fi0DyvzmrQN+OVclRFZZyXCW8v1nfizj0saGkV7qTY=,tag:AkMtsUaRIgxNKZG1oYPTTg==,type:str] + crt: ENC[AES256_GCM,data:1SJlTokgS71mW3rEQG1fTLs1cp1Mraww58stg87Bf2PryFsKw88va4HB+bsKzkCRgykQoj8iDSqM7tc7Wze1HWz+bgFTgIqluRQpZtbcmfYVlqefdIUcJPAntqe/eEaA8Hj3ozTNMlKFspv13vPwjSJHGrHcektwlcEI58+S3lhu/IrIfdxryBm97E2UQczznRE3zuP3MiBYUV0oj9Bcm8KWDKqa3DLwudp5wwyw/OG6DGg48isEvNxaMJhuEkiMwMNR+zZXgAvYcYteMaW0p1UXNFHzxMpOWshU1zYL6U9vZkKfpVMoMoTPW3+SO+vxlJVafevStEipUFd48XmOo31uMQH9zWh/hbLM8j/fiTaW+kY89XwkQ9ZUOx8+YXgcfBb04FAbtLEXMbC/eF0meVdjAHv0TEBPF1a/F4m9Kqv1GkJ4RA8jHKFng7vWsRGl7WhmxrCxjrJrYu9ptP7iMvbaaqMWG1YOSwwZUe2HvrM7WTlJxeRukwvW3rOglAUdFODJhsWbATpobuu716fKLqDcprKa+vA5l7VMghqLf7tWRGXmT3O3RgTGDwqjokx0q0+4ib10cgzrRCkwuTBDiLYRdxd2eZFWumLOlKKQyVAMJ9YZmUnH2VNULazcN7Tznpqm3vdA8pC6Em7ELTxYTUvkaye27jRVyilkZvoX8oLiWyEpMpPAajoVntmPnGTF5ppChQ0EmyuFXlv0tsU3K+bxmM5VYVNy4S+1cMOvEwGDuSGJ9nZfS0BFm8NXDp+BbEIAa/DVc6yOjVEwi1Wh0khxbplkW9feWfkLqrdlxF2khNOTePIyEDMzs5/CRq+kzLy0U+7BeyX28jleRpeAQL6nlOZ6Ha3AyQUhxC1/KVOK/kgAlGVi0gyaxZy0yuF4whFKN7+oXB+CrJoCyX3mjrOVqQIOeNIK6tLac3hW/cATu43uDQ==,iv:BmdsX6o02n/ekhnTQrpTUgM3lqXCyQNYtMbexI1Ziho=,tag:7AI4lOFSXDxR7n1zoqcE1g==,type:str] + seed_key: ENC[AES256_GCM,data:q6kodsHzwDuf0UHPSk9VbEI9pCwePJWcA7o6dQiZqyM=,iv:EbozH0UpLThEXjmWkNhkpHQxabH/K3/lJ8xki3lLGZo=,tag:rKO8csevxpH5kzzFWQoang==,type:str] + jwt_secret: ENC[AES256_GCM,data:AfPcqKAwdsHfiYHrZ6yKqOxD93gBcLEvZ4N3M8nhWMQ=,iv:awfSXZbc7Tzg8sRqUkp71TXivLb663ZjaxNkskE1Heg=,tag:e541/ejk7x+GOhJeoUPMGA==,type:str] + #ENC[AES256_GCM,data:/5Po+8hjJg74ZBepaNWTzZy7JrTjiwo2DNFT76hZTUFKEdchNd4sfdy3tbVwL5y9QPC/kC3kvBZL2oA0+WzcUmd51Ht4/xS6CnUCpodBrSWNHu4H+ohdmz/jMplREeJVWAFHuTZROqB9v3hw3rYJ5Gb+5XROAuzT4pKfrPoJ1B+ysGUpuvybhrC1O5Tg6IPs5fvIiR3yExGcIFuywZBiiA==,iv:gupWDnXb2TD1N8GhHwpiYuz0sDciLX8V0ww9cFKtFNI=,tag:7eDx5RZOrNXOUshvdijV5w==,type:comment] + #ENC[AES256_GCM,data:+Q0n+z/P2NwlY6/aOzMNe2yZaoN7uSctcH8DlTUR6dYgNTnZlGQNNFAHs416XVMcYHyBqPwkSvdQLGl5TcJReZLm+8ESeYeQhjWgHZWKnGJK0e46cdMhznXKjQ==,iv:dayGt24JAY3+LV3OS0JUxM7vxfOUAoTl7O33d6y4wVI=,tag:KcfHxeqjmyAUfNDELBfx/w==,type:comment] + #ENC[AES256_GCM,data:6AE31A6F6mflPwwac4jVxY5kJPYaKlYzNjtUC9/VmCPeS8iuvoFmxHNQ9cwxLa20Nw==,iv:N7tQ9RyBYvSCKzbLGWgH8NgyKCJpNM5cupGK9e9pQbM=,tag:nAduK50DDywtkkQ03psKRA==,type:comment] + #ENC[AES256_GCM,data:o4Uc0IfSKbSCHAh00aHkN1g/+FoK/Ffm26iT//ZGc9WTncusMdZslfX9AQWXXOtRwOP9hjpw7KZA5FWsYEGzyIJNcvq0nayEgwBmCB1RmmDVcvVT2jEjKLGCIqYMHT10ZUdFuls+SSKtE6Kk6GMrrV/MEGzn7rKIHSlflv3+Iav8p2cblRoEFqAxrbNgbvcMzSDpWH9XCInhD0Sw5ABViNvBc6CVb0ZfZEuyoBF4EhQ=,iv:bs9iS3fmJtb0K5fc/DXKYWho9+1GBWmN8jOBEhGr8Bw=,tag:Ry77miT/7UKA2pLE3kMcag==,type:comment] + password: + user: ENC[AES256_GCM,data:DT9h51o6z70A4QeoKvCbUo98JFxILgB5tkHeX60CZuCl0GHTa//mRpIUEvo=,iv:Pa9iqXeKDHHBz524zeRImYEkXmY50s7Z+Z4KYt3sNT4=,tag:UTS5VIqbRA66UVDrDXwuPQ==,type:str] + authelia: ENC[AES256_GCM,data:G8ZGsLKqEmMzQ5NMAgirF5BQraHNqixtI6dyyaeNhTdXebjJZML52xL36p4=,iv:ZtHAsFYmrQxr+qoQLPW/eme0+nsT148KRsXmW/LNLlU=,tag:Pvjs/eylkgxJpmGBsRmjcw==,type:str] + grafana: ENC[AES256_GCM,data:vWmU3ZKcolETWAY74C3OMD8gMXDeYk+DqssACL0xefIPi5IkbrhYWmnWAnA=,iv:wcRms3Zp8kPM4USRPVa0UHpCTK36SWhK9C8yHSWu2Cs=,tag:gU5S/6fdMZVd/ih3Yd5uJA==,type:str] + il: ENC[AES256_GCM,data:/CyMeo1+rIUAYiB25nI0,iv:jsyiiRN5z9GqcUnTZ0CZo4s+umTc2zeY2FPp+tVOC9o=,tag:cwOHcqMysCxX57w3a+Pzpg==,type:str] + morsalin: null +#ENC[AES256_GCM,data:ODXFUxxxdQ==,iv:s9zJVx6wo6x517tbNvC+FZ0dFzqbjqeLI6rXBq72hQA=,tag:bXoV2I3LbpmQyddJrtS3Qg==,type:comment] +# +# +#ENC[AES256_GCM,data:NronfCPkbPQLilb38VmliNn0Yg==,iv:AbC7xk46CJ5dqU55IbyPX+OZVEROz+OGnJfxoZmaKwE=,tag:GEsziOOFhDF16Vjqx59SNw==,type:comment] +prometheus: + key: ENC[AES256_GCM,data:MNmUv4J5qVRB2NH3vFcIPGsx8ZB7ggYLK/4HyI2hBE+KhBGgh/9GeIauN6VDxO0xPqVe6xsY41vMDE8K5wmQyvTOi2fyXzL6LDxPHKVZVUKMjRgiYKJJh9hghzVeQwKwTjfZ+eXFgSKf+ENpSgh4+zbFqm0eVhhZy6EQ38494IlwS/UsojC831xg0OeeHNF5QrE7rIG8zo+rjsntpWZbJzSM9BgzFAAfW38Ywi03lstbJi8IEq/gFmZjQxU3CFHicD0X/jKY/uBxZB8lu8Km2ZevGDLFGwGX3zpcYxou3667m9M=,iv:1Px2X4RKD1sbQ9NhFW9rf0Q8cu0ex8P2RjmR5wUmFbA=,tag:rKkYDav55LfUwUueaA9Qfg==,type:str] + crt: ENC[AES256_GCM,data:0eNafjanGCWtqCiavj5vAQKi9nczPShx0w9dfULmP03sPZTh74FCZwZvu2vQ8i0GbKNZdHMKYAP/Utyd1XdttaVr4Kvf6ggkQXj5a0AOakRmTOjkO2WhZtWUVd/fJSfcsifa1gG+x5O32yVZXx4pWSVtcV57g+BnksTyt5JBnPz+u5pEBK0VZKRSjIjlvanLM+l/Dp1tyrmKarFX+28bCDUxB3I6wZUEnF35wUNbu6pVMcR8JofQG6KrmsjixMWPRDz+u1w8b/+nTGOXyykUUjxH7a6wEf7/RTGNBwT44CrSACxybJMCed2MBjY42jA0Lm4ISwE8epvOkpptu7NRyEgMiVke5mNSH9ZGaUe6mCBpbUVkL+tUQG5rwclsL6ijaCyqsWSD5bb2i6wGhmPe5/ERKxmLjOWNeAQSf1HiJc/bSXOSpt15DuGgwwJ4jfebOqdshUy7CMc7KclQj7ppqhTYhRHbM1mJwCJr6QGVDh5xSzFDSH1qbCRdeXkPEkm7WxRJbAtzY73UmO4eqhsp5n6+PtiFNNqQg66mOGF8N7PlEKh2vcoxgtjnTTVHVlgiAHuT2TMWNuhO3qwJeF6G3zApFSPuJWcEH7PGAeezL+qUF2TzHVthOHQMUNFr1m8i4ms/hCE9DGSv5oo+X7YKO6xcIe00xNQEzMNk+HqmAKed9pl/eBHhD62UvQNZh66e1VjWpOE/ig0hP6vgKSwyQL6xUtJWWzYl184KcWokw+O9Vyw2FCR++0iu+Zn+PIYYQg4Vwpqd5VM0cj0tIB7W1+aLz8hGIOgQKDjQUshWeMxEjESILgb3+7lNyZkKT/y48ocHL/dedHRLXuxUcSQ6ZUGhWf33so7763Gimo2hK3jmIaprnx4LGyiBKPPxkSl+Bt7seUiFJ041olDmO+fUDvi0ko166CA+3mUxeoeNRh74J2RPHR8dmy1okp1NLEQztyHw4Ps=,iv:14bMOT+Fn8j2kFDQxTuwtsTyYnp4fIF0WRpQntz4afE=,tag:fWnktLbiQzG8UExa3nspUA==,type:str] +#ENC[AES256_GCM,data:ODXFUxxxdQ==,iv:s9zJVx6wo6x517tbNvC+FZ0dFzqbjqeLI6rXBq72hQA=,tag:bXoV2I3LbpmQyddJrtS3Qg==,type:comment] +# +# +#ENC[AES256_GCM,data:8Pk3De3AWQPtW+o18w==,iv:O05B1JyftUqN/0/pCi5HReEOltp5jJ6GRER5+NXkoMY=,tag:Tl4duRnv+EoZ4xWVwMfEWA==,type:comment] +loki: + key: ENC[AES256_GCM,data:gtQ3j59WJd3YxreCMoR70VZBJubaR8UfoeAEYgZR8KGIYkEGPnBCMEmBlu/L4qZO0fHg0DLSN/ixLbmvbOluTMNF1rbj/5W4F0O79YI2RztXZ3oxrtYf4dRPxIoCuOQi9PtD5VrduBUGGhvDW9yae9QxzQ+Ca7KwMHgOLu/5+VW6YdQRBdgEQycSDsdMvRTmFwnQuSxuQhWxhu9GqR2WGx3cinQIZ8ewr147aOiZNgUjnHLfYjtzRXqQtK0HEvjNVKBWPuOWLYRa+h10TQlKMQMZn7x7QJlDZhsF/aRqVRNwYc0=,iv:0vVdSLKOgFTwvUbW1uU2vINYwnXMIYIzOl+2BqsJ7Vk=,tag:nm34OwNXseUTzO/KUmT2Vg==,type:str] + crt: ENC[AES256_GCM,data:O1if/oi5wJQNhHftNEwRhSvdnloaiE8EIJ0dlY9DO2Lml1rz5+2LfwGnN/mHGFOkIZri2AhizPUqXios5kKYgdh7ib4jD2C4DC8crKO9XiNiiChzqE0WqP/sda9UJL9zzmmm3WtkGljcL1iyr4zuCvH0uuIMX6W+NohIu19JT9wqamwMHBCEYuQ6gFAVOBnsz5Rsa6v2LBsIsUBadAzngUcrIztT1UO0cHiplBq5s+TkC96wWrlAUiaV1Lcf1aABDrI6o2u4uv6Vwv/d4AQrEpaNvH1N3un/M3lOIek53atwMYw598/BAioJfZXRTQ4qeqcv9+WxFJSpG6XrWxx941St6N3ITJcAMVmKQFfupj257oyazHF4oqyJKsgXQBTXlMdLPDLH/HTRVfv5k1qD5RQ6AXJrMy4NQrE4BlLBPkLU1wxyg2REqd6eKMgw1PQdqTeZHZrQCGzxWeY+rv/0v5jwSFzC/YY+hOZRz3BRpfufWC9mZQZpNoH8yccVhPybVT22YMMVljLUEg/0BICm9Q4mSMpc9hEJ7VwHZo5pT4DuNiVjComTpcnnrEgK3yYGBB6jsAAkU+zNmOZ6AkCvKAma2DVbOhwUx1ZuBruc4GsfJcHGWqWogr4zBpgDg9FOtLF28yQpfKDWZEdx1eq1Bu9gFhK1FoBxrzd7mX5ADn0XeRbFDn/G+C24ShA+D+rmhARAdCtz6M4uk5IZ3w+L8NA/qWyZ+bgaXX8KrpDOV+/7KFStD4FXkVk20+IaMpb6OSYskqPq1ICI0MIFsONajO8gCd026B6WD/hLsGvkLL/58IwtAd+bRBceXHgoqXeTjyqNgKhhiqmKejZUphzrIsMx+GWdP70GOdcj41I5PSN0GbMnXQTRPGJo/GYN4pP3TbtmVrEvs5rMuGe0WiDiXQGM2hAreGMnt43Ge7DjyB+BPAx6ZA==,iv:1E1u18aWSRZFKewRnH+pJKN9xCpFUcJ0Dy4kq4yMprY=,tag:nmI9X0IrT545vUWhoIdhHg==,type:str] +#ENC[AES256_GCM,data:ODXFUxxxdQ==,iv:s9zJVx6wo6x517tbNvC+FZ0dFzqbjqeLI6rXBq72hQA=,tag:bXoV2I3LbpmQyddJrtS3Qg==,type:comment] +# +# +#ENC[AES256_GCM,data:yHgd8KBDPYeMjKzUOimdOA==,iv:ozyRymtPEII2luBads2GHyLf4Vdhf4EXNaF82SNmQm4=,tag:1mOJAmR5kVRBjm0SbQkqlw==,type:comment] +grafana: + user: + id: ENC[AES256_GCM,data:32LNqOb8pzi0/18=,iv:YJrYaQRE+veDhaEUefGWLbNJo1toHGmEOpqrNV3IDfg=,tag:ENryvH9ec3EbDhBB2bdHug==,type:str] + password: ENC[AES256_GCM,data:MFw7TAvzpL0EW2o+w6hF5qrLYVkn0HsQOXqBk9AQq7GxK4uAEDCi/YL3goI=,iv:HSpJL2CZKXHQYhOJLy6HzWARootl8Y1yMGhaMT0XRfM=,tag:t0VHDYHCZJwDcR1VcRFdCw==,type:str] +#ENC[AES256_GCM,data:ODXFUxxxdQ==,iv:s9zJVx6wo6x517tbNvC+FZ0dFzqbjqeLI6rXBq72hQA=,tag:bXoV2I3LbpmQyddJrtS3Qg==,type:comment] +# +# +#ENC[AES256_GCM,data:k+Y8/BDL9G+C1EnnJjy7gz4=,iv:IU/ZlXVvtX+zaeU7Zcz5baYk8ceDz95dZgzpl5/L9lg=,tag:t0OzLQCM+Up0wLkc3B/R/Q==,type:comment] +authelia: + jwt_secret: ENC[AES256_GCM,data:NOsbV8poM41bodRZeNJ6vnRYIl7vGKXm2BpBNeXWVeE=,iv:1G4PnaoopJMIt7D+8x0x3ui5tefMPRnSI5KJRv4IuTo=,tag:EkEHgeJ3ta4htrgOCnYldg==,type:str] + session_secret: ENC[AES256_GCM,data:tPudIXLrAKMhR3VPFj8U1g/+FRvYgBo0wXLNf/a72x0=,iv:atfRSZNUYEYFK92+0WOvsD5Gi4tMEKv+dwsFcRDrEtM=,tag:cJu5G4EYQ9EnYOQ4w1OSpA==,type:str] + storage_secret: ENC[AES256_GCM,data:g3Z8t/2xFZJMz4baIfY7pGoAOdoKsr+Xn4POBl1lQCk=,iv:i9Owc4w76qC1rFSUXLv4NuW6/Y2rmgSoTr/EnZuRVE0=,tag:zitZ/UAgXiUSuyWDhvHa9A==,type:str] + hmac_secret: ENC[AES256_GCM,data:c6Xn0QceaQWIFDCFr6UAQcr9M/Np2aGCkcNiXJfM79vFakT5L8q27+38Mvs=,iv:QSpnN/ZVYNogHfFq10E1rR8jXnI/qLs3Z7k2qKHFt4A=,tag:N6Wx52yCmGx2kJlyQXl7bQ==,type:str] + jwk_rs256: ENC[AES256_GCM,data:e0rbiDCUadbuRL3dFZ//LMMYimQ3THJtZo1OqkKntwayh10DbvdNrEhs6HF0VAMtVh5nkAaL69V3tqZnwLUadSg0Oomum8v9GkTE527iupAMjWRrXzAR/nsjvjI8gPUL3NJk++v0VA6ew/Gb9WfGlhUlmkf3K72Ab+gGiD8+xfAFeqRE/k7vHkTxkNwD1hRFnE+XhkUa4Q021nSd7xrW80sHwBWTKKUqA08xuouQN8Gkbjj22NgicT5rhy4VJiV8OnLCERLWc8F6LmDM9vLrTiPBvsGn1eX09fvVRtw40Wo0VSgklm9+REQtjefBhXuVxVDR3DXNzPliLNL3MOrvlcweDU8uxFVuml23FBBkJypJLJwbWvxwFQk1OWjiQ9jPel9+9gYu1xHg/dKeEsDf8GyMVJz60BDEQH5YnEwI1R6rPnJLZnmZVhRtoxmKRE8/oE3lP30tvOVKwRL/f3rXCyB1PYDR/2NsWtj61inzgn1BuMDl/tHDKjGhsCu2+ECThD756p6cjQnGftSSoGlwhmMCqA8I+6WUjMPqEsqietWKbMCRk000ufLp57wU4i7BvUVPSpiVO58lf6n/zs6hHBYDWNfoxEa2gha4csmCTkhCrzf+N+UJQmoMDOIQe/gSqPNcwcW/+q8K8zr2b4cMIyLbwZIyiez0RvPRnM/KuM9UmFAKfW1xdtyxalNLMOBjpqeZFnCIPFRQy6COP/dZNA9xUQX4TXXsL+qTk0vlU41/gcORgw9HQFRkLiRtrnqvRUMXipIDU6SIRipbBJ1GawH5nPTdHdbjdr7Wh0s2RUjIzLNvT0Lz0VRii2dlVRdURbtFl8R8frijuE9LG9xKY2Z4f/ojo71h6HcJvrvMOGm4iE8V5+cdthlwn4Hpyl8nLinJWLyVjhc4bzmIlo+VZvbq9o/keSfCb3wrI39L3Wy/bpoKzXx/U3eTVIIXRDrBpK+4fmYeSEi2JRJjAHHvs8De7c0wKLq9qOctIFvELwueisDIkyNGYykX1+JXa4YbuTYaOSLJNbk9B6UE17Vbr6FD9N/8n5JveXzfd2dTROQBbWmHyKa0qR7MC9foPdyUVImj1Z06w91OwRyAi+/GBiUE8ivKD9AmnTjrvX1uZhp9qGSzP9f3RkCd6lFDQ0YhSCVHQAYOrDsdBX50zqqmZEPPevIXXPnaJpSgIaOCamBLIHqrxX9KOFM3ApicE2BhXX4xN2QWFl8HGu2cVD1e9bINKrpLGqR+rQQE822OQqMqw3kZsi7lo46YPOrpXLsetxPJlLftQ7bdaCPONZk3M18xvxESYclDeGk7LJ8Lp5CjtBterF3Ka/0r7cY4OxWusTypOY3jmVDILZTMlVOKoYiJgjIHxwXFdoqbtGMeGi83CY2/SfoHn7BSrU2669O3liX0L7hzGOLerkiVozbmC3TPq+JxGNw7dA79mXCqRDQEeqAjeha6J3wmvTvdmkuPIbO7ryFtDg9VwbULfgl07WzkES5nVyQDeamKv76ERRF/EIjQGgiWRY5MDyXAhVnzy9ATEbOnBTXeks6863CTKa/3lhhtDUGeo+9tTXcEUqLPkq1jh9xYo80OUp99J1483n5iE7EEBfB13McRwsJPufViyMWIQX9KDmRZmbCjhe/Jrj6joapQcY1Fw5MaZJgfIIfzz83GnekgTG9sz7Vfq2p4qiPDONLhY6qL9ENl8Cm/tWq/Xjvk+v/gaT65L9uvGLtkYKv7OpbPtTzieGHtYmddLZNr34Ly06PHxajLL+X6LeB5wmeC7jvanLh9vW6VqwNRn6kstS9reM4kHRhYTNKlDB9pOOz/cK0p0wcNzVvqetoYtZAX9TgPcYumhZQUPsXYsuC2Gdo88seihGKND9jtepvu2OwKRT3raSpsG+rum86Q95ulRsE/rhg4BvBXs8U62E16gL1KamtRkPcjXT/2aO1t0gh6JiBbQAYemnOaJCanycbnQcZal5oSvqPLAxzFpcjTkPNVFhYyiGe0ekcaYkCs9PYuM3hqP+qNAwjQmK7bfpK+eZNGT1gwikmlUtokTvRvQjmFynfbQ5oUoOHQuxuqWOa8KLi2NwMMmaJ4LXuzozUY8KD2EXmlmrxteCZBtQNfa7BxulUMlPRHG5bqKsBc9l380I1XU+aR2qmqHxdrd9698sIXxVK5zM1vhwI0fb6V61u86iYIAdSoitzhRQu8lK8prJXxcZWOQWehV03wvDnqkU4F2Qm/XYCXJgTx8PJyGKP2w90K7TtkW1YMWrBXmnwr,iv:av1JOLQd7WoZr4O8e9PU8nEZlaf+Bs7LLbGZs/4hm88=,tag:sZ8zyjRmhyYRdwntXTOfPg==,type:str] + jwk_rs256_public: ENC[AES256_GCM,data:2IP+w7X5ZfYUH+upr7qtjno45RXXUT71hG9Xs/VMY9aWOv6KGLpK17OEo7hVUaXwmOWOzAQl0V79bGZjIowabAoba15+IEi38L973i6F44kM9pb4N29Z2LjmLoZ/blNpl82C0ZIO/+OQ+OCRxVV8b9eZ+E1ckpvEk24NmOV2o9H2E1EU6QBOQGmYt39mnD/tQKNtE0/Q0+HbW2bz9dWqTScjH6CcwBwhtn3dhU/kbXjLc27XG6pxMKInKLo6GZmjLA3PDRQY9O2cYnFN1nDRKdOqHpbEnAPU4DiUSronJZGq6R6Z+3JGkWXH2OUTXaP8itLpXYBioY8BKH02QA+YVxnzxC0cBooNW9BxFb/GQdeLqFawNZ2lH9+743svXIRjjKp3jyn1Bn0TnDxgirZSfmvBdrgOy1fKl0qpMBYrBVLaAP1+T4MxNNa1kQqnmt9wke7aYrjVroi3/33TE9oU/xW6SW6Y+3lLEB6kJIGYb/m+qhwksOrqXRDom8G5hocUqMX3Q1fkiv8Jt/qvmuz8oSFNsjVF/4AZFESacfAF9pyerPUemyti6Y+rVjeY1yUv3+BHGgu8OiF7lqlIUm5O7NGhNQ==,iv:41P+xnnSOQK4H1dm5EohB5FmXsuaKXfSOFr+ROgoTWw=,tag:7a0+DsaDDK1DlkggQQd6vQ==,type:str] + jwk_es256: ENC[AES256_GCM,data:Q/ZXBPopDOD/sook4a50CYUMCcjNuPfHHqxWekRrCtLzFL7EaXdko6HFbL4vI5IRNOw/uzN0tuxQNZHR3ln+cVL5OnT8x+IiOVXoz8k6fKJVIWSWReiaQxHWmLSHILHodPH2dWOPj7+ffrK8wmRGcyCAS4QfXSFwChTl43wA3H+aI9yhGAaWV1YzTbb87r16724sJGTPIR59ZJhuAJlz+LYF54fnc2J5oyaFBTGtoSRdYAnCkO4fUOWPqWpIKR1ETY40xOMyjOMpvKP8Nv6ZTwYOubPxLmpclFO/hj4R9SDowsH2ZP4PRViU1RmE2gbRnQ==,iv:5BNxP8ALgoo58lvlIdO/cwTKu0xI50wK4xo0uYU3kZc=,tag:Tc1CtmV2NSmDwKjl8NgO5w==,type:str] + jwk_es256_public: ENC[AES256_GCM,data:utXk4AXeVQuNkImDWNQvA0JAEZAOyA4RMxix7xI2x3g0D4W3dyoTDD18LqLW5v6aA9cZmvcPtl0iSvrkxOrNJiDGJLHBNSa11q2HVAyJq7FTIRZkJOJz7WP+V7qjfmpiPw/PCS4GSWE8U5dvPcocxlPq13jmeCupz01ZRky8SIHdPiBN9a6EGMCxgTRhWUisIkV4i+1JTyb6jAR45ul6KFttxLZr7VPdu0M+pNRlJGSRPA==,iv:S448BFJqSfdnD7NcfMsq/Rb7w88IV1JhvrGlBrKW9zQ=,tag:yVDSLkUa2rMLZc5KpDC+2A==,type:str] +#ENC[AES256_GCM,data:ODXFUxxxdQ==,iv:s9zJVx6wo6x517tbNvC+FZ0dFzqbjqeLI6rXBq72hQA=,tag:bXoV2I3LbpmQyddJrtS3Qg==,type:comment] +# +# +#ENC[AES256_GCM,data:T4Wtn49AAxPd2QUFTR+q,iv:bH5goGWBDqumAat9dUv2OwfCUJUpuVqncTMqMBZUXhI=,tag:G+W6hHA+yftQ+4RJpXrxHg==,type:comment] +switch: + password: ENC[AES256_GCM,data:qu0f9L7A0eFq/UCpaRs=,iv:W8LLOp3MSfd/+EfNEZNf91K8GgI5eUfVPoWTRES2C0Y=,tag:Q5FlAOfwqwJwPvd7k6i+0g==,type:str] +#ENC[AES256_GCM,data:ODXFUxxxdQ==,iv:s9zJVx6wo6x517tbNvC+FZ0dFzqbjqeLI6rXBq72hQA=,tag:bXoV2I3LbpmQyddJrtS3Qg==,type:comment] +# +# +#ENC[AES256_GCM,data:UfrFHblh4F20kzKZ,iv:+gouH3z5BXLnALq1Fcs0H6SQ8Gnz6noeeRhltWSgzJQ=,tag:JXqIQjRZGtnx4y+/83CFFA==,type:comment] +dsm: + key: ENC[AES256_GCM,data:jxQRPNuioZbznadJa7sdhbntABsi+nc6Y5TJsyMXwKRvVUbQw8p3snp75d9WVmOfq4uj8wBGen0noa/6xtWo4ZiHsIg1Svh/5xAN37zueB6NmK/z+BW2y8HaIkNhjcinusnT2SWE2ahguizMNM90FpXgmTt9Zv7wVwRkifDLWpH+rs5zmyMWPtpT2RN3+k7tVh8XWVevek/DxG4h+jtoYhTVzndMC5oe/A60MHBJCj6kaksGlX3vIDbRo5A0FEKX8uWI6k6U2osFc4SHIvpljYmPagj3EwPaTc3mbl8IJsXFVH4=,iv:7O5oQa1fELjIt6f3nQ+fWDFirAexgioS36/Nozh656o=,tag:t1RHmY8QC6mE9PVPJg426Q==,type:str] + crt: ENC[AES256_GCM,data:N32+8CItiwbW8drpnL65weS35SY8uP8vcw9wBDFOxGFQjs6EyEwsMUVh1ndMpmDSUKWz7c7ji0HisC710RqnZw9l2NMg6R57DeXR/WsLW4LiAT7sFuDBcqorTB+6Nk0K9wLLdnsKpUQgl8oV9ffnWXZdfI5skKCDMNB40WJAYY/3wx9iT3wDQWaunq/1NT0SZHrtXXTj1JYiLbfDaTwTvkuqqUv1cXvOjonL2Cp4aNT/zjwK6c/TRbKdhIs/7tnUUE/lJx74eARfrMQy/3y6h2xK9pgruASuJxMw9Tyaj6Ya0FMNMIezO3GPQbFCE/IRfmCE6vX2RXUjVqX8wd/d4m4KNuMsN3hbwC3vQ6r+DcYdaTUfGIu96xfKSiSax/ZPQ4JhTyqthkGgpE0Ob67aVELrZkyhc/kvvSxafVSvP63C/nHPQnXMgitdRddYFq7487Vbq03frxiyID1WkqeUavc+Tn3dyA8c7aqVvbW6vZCAwUzbKD0nwPuBUtKZQpJKxHvtx2390PW7a214zkPLcw5LH3FnNacopiNfs6dNEeW/UsGysYkoBRXfrAPMX4UL53TvUTrpuCBhthJ4rgqjhgTfq9GtomP7piuf/tnMAO33qSHPM3w2ocLWeLyOiqYESFmXoBAsiu33LVzuVryFVw75pgrGAsc2wwKhcu4JC0cNgUE44ENIIjffVQX/mcvPvHxuI05vtzM0N8kOFPwG7iAVrO1eUEpwUjZwUwkk8QRsuIhZ6W7aYx7TLdPDpjmy6XCYGNHZgHg6jU7v18KkPep1YarAnsVYFzHQwfZIMOzW7DHlHq38NJW8lWq7zkg+kY6JDVy0nttlAUSskWNeCHdI5Xy3Rz3t7V6t1+UxkSy3roQCZOYjiaknwrXSctsgqucKaJCq4w0Tty36kaIXwOQYVm9JqDIztOmer4fW8Es7OEbLwA==,iv:26fGb288JEbtA0UcAmiYcN+UunrUOw2S6fKjGMetgwE=,tag:GmUJb9LJ186HNOF4AhfmLg==,type:str] + il: + password: ENC[AES256_GCM,data:6LiNif7aIGgGBBdVYpl4K8L8ev6EPru5x6HvX2yJ0oiDQb6G9AI+ZsaKamc=,iv:Y+eijt1PmNZR9c9uCzBnzJT9BDc3w/P4VzsbvrPRU64=,tag:2KKRtTSYzrJKy9VX/2J5eQ==,type:str] + oidc: + secret: ENC[AES256_GCM,data:IXJNy9PmMPn9uowSMMrNPYqaW5PZ13JTeu1t7eNlapxmnV3EAl3I3xbMK5A=,iv:UcG2E6zgoKkPfUiHr0A+gvnApCkBJG2GROfacwD634Q=,tag:nSFn9yaCadJGlGy/aCvc2A==,type:str] + hash: ENC[AES256_GCM,data:Y2/FNYbID0trRS5UpP2MX6UcLWxlRoXu9VLeU9DU9whEwNLbf4gYrxI7Q7etQe6HidBuoIJNxKajf3t0fI0ptbXnJaxzMh/v/CsiDg2p568o3KIyfB1PruGRKUrTuJneREUHg5jRXMYCwcRUQ4C5WgAyjd2spGlTQO0OLEqh0wkTtvs=,iv:YTYoaBy3QwlSrmx/K5woiXAHrh0RkawF+uQ4rQuP7WU=,tag:Ej6ZjTFvKjw6BrXbd0lf0g==,type:str] +#ENC[AES256_GCM,data:ODXFUxxxdQ==,iv:s9zJVx6wo6x517tbNvC+FZ0dFzqbjqeLI6rXBq72hQA=,tag:bXoV2I3LbpmQyddJrtS3Qg==,type:comment] +# +# +sops: + age: + - recipient: age120wuwcmsm845ztsvsz46pswj5je53uc2n35vadklrfqudu6cxuusxetk7y + enc: | + -----BEGIN AGE ENCRYPTED FILE----- + YWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSB0TVVta2V6UHB4V3NmV0tR + V0hmbUw1bUpSSk4zQXZ4MHlaVzdJU0VSWm1VCnZaMUg1cjJyOEFyOWxPNm9lWkVm + QlRBK1kyQ2JnQ1ZZdlF6b3UvVEtrTFEKLS0tIFpVelk3MDg4QXBJSWwzRlIzSTIx + UmliaFNxVTBqRkI1QWJpWGpTRWxETW8KEY/8AfU73UOzCGhny1cNnd5dCNv7bHXt + k+uyWPPi+enFkVaceSwMFrA66uaWWrwAj11sXEB7yzvGFPrnAGezjQ== + -----END AGE ENCRYPTED FILE----- + lastmodified: "2026-03-14T19:40:47Z" + mac: ENC[AES256_GCM,data:EUVSxs6FPhKMSSmHe8P/d0IyBZsNb3q7AYj06j98bklAMYYVOludVePdh45MSvn92lDn712Muy6pqcJzDpsPWyxgXngywTu2SGV1yRCyA7U7RloRxlNROuDiugMkJWOtHcKArytVChUHT2PnzagAJR2kBSApbjUsC/xUTMBpsNM=,iv:SsJW2fMNEJHT2M+gjW5TKu6AYoxsf9jKf5T9KgJoF40=,tag:ItVweaSxts2Cm1VKkLp0/w==,type:str] + unencrypted_suffix: _unencrypted + version: 3.12.1 diff --git a/config/services/containers/auth/authelia/authelia.container.j2 b/config/services/containers/auth/authelia/authelia.container.j2 new file mode 100644 index 0000000..03d0f8a --- /dev/null +++ b/config/services/containers/auth/authelia/authelia.container.j2 @@ -0,0 +1,67 @@ +[Quadlet] +DefaultDependencies=false + +[Unit] +Description=Authelia + +After=caddy.service +Wants=caddy.service + +[Container] + +Image=docker.io/authelia/authelia:{{ version['containers']['authelia'] }} + +ContainerName=authelia +HostName=authelia + +# Web UI +PublishPort=9091:9091/tcp + + +Volume=%h/containers/authelia/config:/config:rw +Volume=%h/containers/authelia/certs:/etc/ssl/authelia:ro + +# Default +Environment="TZ=Asia/Seoul" +# Enable Go template engine +# !CAUTION! +{% raw %}# If this environment were enabled, you would have to use {{/* ... /*}} for {{ go_filter }} options. Go engine always processes its own grammar first. +{% endraw %} +Environment="X_AUTHELIA_CONFIG_FILTERS=template" +# Encryption +## JWT +Environment="AUTHELIA_IDENTITY_VALIDATION_RESET_PASSWORD_JWT_SECRET_FILE=/run/secrets/AUTHELIA_JWT_SECRET" +Secret=AUTHELIA_JWT_SECRET,target=/run/secrets/AUTHELIA_JWT_SECRET +## Session +Environment="AUTHELIA_SESSION_SECRET_FILE=/run/secrets/AUTHELIA_SESSION_SECRET" +Secret=AUTHELIA_SESSION_SECRET,target=/run/secrets/AUTHELIA_SESSION_SECRET +## Storage +Environment="AUTHELIA_STORAGE_ENCRYPTION_KEY_FILE=/run/secrets/AUTHELIA_STORAGE_SECRET" +Secret=AUTHELIA_STORAGE_SECRET,target=/run/secrets/AUTHELIA_STORAGE_SECRET +# OIDC (HMAC, JWKS), This part needs the clients to integrate with Authelia in order for it to activate. +Environment="AUTHELIA_IDENTITY_PROVIDERS_OIDC_HMAC_SECRET_FILE=/run/secrets/AUTHELIA_HMAC_SECRET" +Secret=AUTHELIA_HMAC_SECRET,target=/run/secrets/AUTHELIA_HMAC_SECRET +Secret=AUTHELIA_JWKS_RS256,target=/run/secrets/AUTHELIA_JWKS_RS256 +Secret=AUTHELIA_JWKS_ES256,target=/run/secrets/AUTHELIA_JWKS_ES256 +# LDAP +Environment="AUTHELIA_AUTHENTICATION_BACKEND_LDAP_PASSWORD_FILE=/run/secrets/AUTHELIA_LDAP_PASSWORD" +Secret=AUTHELIA_LDAP_PASSWORD,target=/run/secrets/AUTHELIA_LDAP_PASSWORD +# Database +Environment="AUTHELIA_STORAGE_POSTGRES_PASSWORD_FILE=/run/secrets/POSTGRES_AUTHELIA_PASSWORD" +Secret=POSTGRES_AUTHELIA_PASSWORD,target=/run/secrets/POSTGRES_AUTHELIA_PASSWORD + +Exec=--config /config/authelia.yaml + +[Service] +# Wait for dependency +# They run as rootless podman container, so their port is not opened until they are normaly running +# Check their ports with nc command +ExecStartPre=/usr/bin/nc -zv {{ infra_uri['postgresql']['domain'] }} {{ infra_uri['postgresql']['ports']['tcp'] }} +ExecStartPre=/usr/bin/nc -zv {{ infra_uri['ldap']['domain'] }} {{ infra_uri['ldap']['ports']['ldaps'] }} +ExecStartPre=sleep 5 +Restart=always +RestartSec=10s +TimeoutStopSec=120 + +[Install] +WantedBy=default.target diff --git a/config/services/containers/auth/authelia/config/authelia.yaml.j2 b/config/services/containers/auth/authelia/config/authelia.yaml.j2 new file mode 100644 index 0000000..c282a3e --- /dev/null +++ b/config/services/containers/auth/authelia/config/authelia.yaml.j2 @@ -0,0 +1,133 @@ +--- +# https://github.com/lldap/lldap/blob/main/example_configs/authelia.md +# authelia.yaml +# certificates setting +certificates_directory: '/etc/ssl/authelia/' + +# them setting - light, dark, grey, auto. +theme: 'auto' + +# Server configuration +server: + # TLS will be applied on caddy + address: 'tcp://:9091/' + +# Log configuration +log: + level: 'info' + #file_path: 'path/of/log/file' - without this option, using stdout + +# TOTP configuration +totp: + # issure option is for 2FA app. It works as identifier. "My homelab' or 'ilnmors.internal', 'Authelia - ilnmors' + issuer: 'ilnmors.internal' + +# Identity validation confituration +identity_validation: + reset_password: + jwt_secret: '' # $AUTHELIA_IDENTITY_VALIDATION_RESET_PASSWORD_JWT_SECRET_FILE option is designated in container file + +# Authentication backend provider configuration +authentication_backend: + ldap: + # ldaps uses 636 -> NAT automatically change port 636 in output packet -> 2636 which lldap server uses. + address: 'ldaps://ldap.ilnmors.internal' + implementation: 'lldap' + # tls configruation, it uses certificates_directory's /etc/ssl/authelia/ilnmors_root_ca.crt + tls: + server_name: 'ldap.ilnmors.internal' + skip_verify: false + # LLDAP base DN + base_dn: 'dc=ilnmors,dc=internal' + additional_users_dn: 'ou=people' + additional_groups_dn: 'ou=groups' + # LLDAP filters + users_filter: '(&(|({username_attribute}={input})({mail_attribute}={input}))(objectClass=person))' + groups_filter: '(&(member={dn})(objectClass=groupOfNames))' + # LLDAP bind account configuration + user: 'uid=authelia,ou=people,dc=ilnmors,dc=internal' + password: '' # $AUTHELIA_AUTHENTICATION_BACKEND_LDAP_PASSWORD_FILE option is designated in container file + +# Access control configuration +access_control: + default_policy: 'deny' + rules: + # authelia portal + - domain: 'authelia.ilnmors.internal' + policy: 'bypass' + - domain: 'authelia.ilnmors.com' + policy: 'bypass' + - domain: 'test.ilnmors.com' + policy: 'one_factor' + subject: + - 'group:admins' +# Session provider configuration +session: + secret: '' # $AUTHELIA_SESSION_SECRET_FILE is designated in container file + expiration: '24 hours' # Session maintains for 24 hours + inactivity: '24 hours' # Session maintains for 24 hours without actions + cookies: + - name: 'authelia_public_session' + domain: 'ilnmors.com' + authelia_url: 'https://authelia.ilnmors.com' + same_site: 'lax' + +# This authelia doesn't use Redis. + +# Storage provider configuration +storage: + encryption_key: '' # $AUTHELIA_STORAGE_ENCRYPTION_KEY_FILE is designated in container file + postgres: + address: 'tcp://{{ infra_uri['postgresql']['domain'] }}:{{ infra_uri['postgresql']['ports']['tcp'] }}' + database: 'authelia_db' + username: 'authelia' + password: '' # $AUTHELIA_STORAGE_POSTGRES_PASSWORD_FILE is designated in container file + tls: + server_name: '{{ infra_uri['postgresql']['domain'] }}' + skip_verify: false + +# Notification provider +notifier: + filesystem: + filename: '/config/notification.txt' + +# This part needs the clients to integrate with Authelia in order for it to activate. +identity_providers: + oidc: + hmac_secret: '' # $AUTHELIA_IDENTITY_PROVIDERS_OIDC_HMAC_SECRET_FILE + jwks:{% raw %} + - algorithm: 'RS256' + use: 'sig' + key: {{ secret "/run/secrets/AUTHELIA_JWKS_RS256" | mindent 10 "|" | msquote }} + - algorithm: 'ES256' + use: 'sig' + key: {{ secret "/run/secrets/AUTHELIA_JWKS_ES256" | mindent 10 "|" | msquote }}{% endraw %} + clients: + # https://www.authelia.com/integration/openid-connect/clients/synology-dsm/ + - client_id: 'dsm' + client_name: 'dsm' + # It depends on application + # hash vaule generate: + # podman exec -it authelia sh + # authelia crypto hash generate pbkdf2 --password 'password' + client_secret: '{{ hostvars['console']['dsm']['oidc']['hash'] }}' + # If there were not client secret, public should be `true` [true | false] + public: false + authorization_policy: 'one_factor' + require_pkce: false + pkce_challenge_method: '' + redirect_uris: + - 'https://{{ infra_uri['nas']['domain'] }}:{{ infra_uri['nas']['ports']['https'] }}' + scopes: + - 'openid' + - 'profile' + - 'groups' + - 'email' + response_types: + - 'code' + grant_types: + - 'authorization_code' + access_token_signed_response_alg: 'none' + userinfo_signed_response_alg: 'none' + # [ client_secret_post | client_secret_basic ] + token_endpoint_auth_method: 'client_secret_post' diff --git a/config/services/containers/common/caddy/build/caddy.containerfile.j2 b/config/services/containers/common/caddy/build/caddy.containerfile.j2 new file mode 100644 index 0000000..b6eb133 --- /dev/null +++ b/config/services/containers/common/caddy/build/caddy.containerfile.j2 @@ -0,0 +1,17 @@ +FROM docker.io/library/caddy:{{ version['containers']['caddy'] }}-builder-alpine AS builder + +RUN xcaddy build \ +{% if node['name'] == 'auth' %} +--with github.com/caddy-dns/rfc2136 \ +--with github.com/hslatman/caddy-crowdsec-bouncer/crowdsec \ +--with github.com/hslatman/caddy-crowdsec-bouncer/http +{% else %} +--with github.com/caddy-dns/rfc2136 +{% endif %} + +FROM docker.io/library/caddy:{{ version['containers']['caddy'] }} + +COPY --from=builder /usr/bin/caddy /usr/bin/caddy +COPY ./ilnmors_root_ca.crt /usr/local/share/ca-certificates/ilnmors_root_ca.crt + +RUN update-ca-certificates diff --git a/config/services/containers/common/caddy/caddy.container.j2 b/config/services/containers/common/caddy/caddy.container.j2 new file mode 100644 index 0000000..9cf0132 --- /dev/null +++ b/config/services/containers/common/caddy/caddy.container.j2 @@ -0,0 +1,49 @@ +[Quadlet] +DefaultDependencies=false + +[Unit] +Description=Caddy + +{% if node['name'] == "infra" %} +After=ca.service +Requires=ca.service +{% else %} +After=network-online.target +Wants=network-online.target +{% endif %} + + +[Container] +Image=ilnmors.internal/{{ node['name'] }}/caddy:{{ version['containers']['caddy'] }} + +ContainerName=caddy_{{ node['name'] }} +HostName=caddy_{{ node['name'] }} +{% if node['name'] == 'infra' %} +AddHost={{ infra_uri['ca']['domain'] }}:host-gateway +AddHost={{ infra_uri['prometheus']['domain'] }}:host-gateway +AddHost={{ infra_uri['loki']['domain'] }}:host-gateway +{% endif %} + +PublishPort=2080:80/tcp +PublishPort=2443:443/tcp + +Volume=%h/containers/caddy/etc:/etc/caddy:ro +Volume=%h/containers/caddy/data:/data:rw +{% if node['name'] == 'auth' %} +Volume=/var/log/caddy:/log:rw +{% endif %} + +Environment="TZ=Asia/Seoul" + +Secret=CADDY_ACME_KEY,target=/run/secrets/CADDY_ACME_KEY +{% if node['name'] == 'auth' %} +Secret=CADDY_CROWDSEC_KEY,target=/run/secrets/CADDY_CROWDSEC_KEY +{% endif %} + +[Service] +Restart=always +RestartSec=10s +TimeoutStopSec=120 + +[Install] +WantedBy=default.target diff --git a/config/services/containers/common/caddy/etc/auth/Caddyfile.j2 b/config/services/containers/common/caddy/etc/auth/Caddyfile.j2 new file mode 100644 index 0000000..a140f9a --- /dev/null +++ b/config/services/containers/common/caddy/etc/auth/Caddyfile.j2 @@ -0,0 +1,62 @@ +{ + # CrowdSec LAPI connection + crowdsec { + api_url https://{{ infra_uri['crowdsec']['domain'] }}:{{ infra_uri['crowdsec']['ports']['https'] }} + api_key "{file./run/secrets/CADDY_CROWDSEC_KEY}" + } +} + +# Snippets +# CrowdSec log for parser +(crowdsec_log) { + log { + output file /log/access.log { + mode 0644 + roll_size 100MiB + roll_keep 1 + } + format json + } +} +# Private TLS ACME with DNS-01-challenge +(private_tls) { + tls { + issuer acme { + dir https://{{ infra_uri['ca']['domain'] }}:{{ infra_uri['ca']['ports']['https'] }}/acme/acme@ilnmors.internal/directory + dns rfc2136 { + server {{ infra_uri['bind']['domain'] }}:{{ infra_uri['bind']['ports']['dns'] }} + key_name acme-key + key_alg hmac-sha256 + key "{file./run/secrets/CADDY_ACME_KEY}" + } + } + } +} + +# Public domain +authelia.ilnmors.com { + import crowdsec_log + route { + crowdsec + reverse_proxy host.containers.internal:9091 + } +} +test.ilnmors.com { + import crowdsec_log + route { + crowdsec + forward_auth host.containers.internal:9091 { + # Authelia Forward Auth endpoint URI + uri /api/authz/forward-auth + copy_headers Remote-User Remote-Groups Remote-Email Remote-Name + } + root * /usr/share/caddy + file_server + } +} + +# Internal domain +auth.ilnmors.internal { + import private_tls + metrics +} diff --git a/config/services/containers/common/caddy/etc/infra/Caddyfile.j2 b/config/services/containers/common/caddy/etc/infra/Caddyfile.j2 new file mode 100644 index 0000000..7d32911 --- /dev/null +++ b/config/services/containers/common/caddy/etc/infra/Caddyfile.j2 @@ -0,0 +1,40 @@ +# Private TLS ACME with DNS-01-challenge +(private_tls) { + tls { + issuer acme { + dir https://{{ infra_uri['ca']['domain'] }}:{{ infra_uri['ca']['ports']['https'] }}/acme/acme@ilnmors.internal/directory + dns rfc2136 { + server {{ infra_uri['bind']['domain'] }}:{{ infra_uri['bind']['ports']['dns'] }} + key_name acme-key + key_alg hmac-sha256 + key "{file./run/secrets/CADDY_ACME_KEY}" + } + } + } +} + +infra.ilnmors.internal { + import private_tls + metrics +} + +{{ infra_uri['ldap']['domain'] }} { + import private_tls + route { + reverse_proxy host.containers.internal:{{ infra_uri['ldap']['ports']['http'] }} + } +} + +{{ infra_uri['prometheus']['domain'] }} { + import private_tls + route { + reverse_proxy https://{{ infra_uri['prometheus']['domain'] }}:{{ infra_uri['prometheus']['ports']['https'] }} + } +} + +grafana.ilnmors.internal { + import private_tls + route { + reverse_proxy host.containers.internal:3000 + } +} diff --git a/config/services/containers/infra/ca/ca.container.j2 b/config/services/containers/infra/ca/ca.container.j2 new file mode 100644 index 0000000..95fb817 --- /dev/null +++ b/config/services/containers/infra/ca/ca.container.j2 @@ -0,0 +1,35 @@ +[Quadlet] +DefaultDependencies=false + +[Unit] +Description=CA + +After=network-online.target +Wants=network-online.target + +[Container] +Image=docker.io/smallstep/step-ca:{{ version['containers']['step'] }} + +ContainerName=ca +HostName=ca + +PublishPort=9000:9000/tcp + +Volume=%h/containers/ca/certs:/home/step/certs:ro +Volume=%h/containers/ca/secrets:/home/step/secrets:ro +Volume=%h/containers/ca/config:/home/step/config:rw +Volume=%h/containers/ca/db:/home/step/db:rw +Volume=%h/containers/ca/templates:/home/step/templates:rw + +Environment="TZ=Asia/Seoul" +Environment="PWDPATH=/run/secrets/STEP_CA_PASSWORD" + +Secret=STEP_CA_PASSWORD,target=/run/secrets/STEP_CA_PASSWORD + +[Service] +Restart=always +RestartSec=10s +TimeoutStopSec=120 + +[Install] +WantedBy=default.target \ No newline at end of file diff --git a/config/services/containers/infra/ca/config/ca.json.j2 b/config/services/containers/infra/ca/config/ca.json.j2 new file mode 100644 index 0000000..ce14d1a --- /dev/null +++ b/config/services/containers/infra/ca/config/ca.json.j2 @@ -0,0 +1,61 @@ +{ + "root": "/home/step/certs/ilnmors_root_ca.crt", + "federatedRoots": null, + "crt": "/home/step/certs/ilnmors_intermediate_ca.crt", + "key": "/home/step/secrets/ilnmors_intermediate_ca.key", + "address": ":9000", + "insecureAddress": "", + "dnsNames": [ + "{{ infra_uri['ca']['domain'] }}" + ], + "logger": { + "format": "text" + }, + "db": { + "type": "badgerv2", + "dataSource": "/home/step/db", + "badgerFileLoadingMode": "" + }, + "authority": { + "policy": { + "x509": { + "allow": { + "dns": [ + "ilnmors.internal", + "*.ilnmors.internal" + ] + }, + "allowWildcardNames": true + } + }, + "provisioners": [ + { + "type": "ACME", + "name": "acme@ilnmors.internal", + "claims": { + "defaultTLSCertDuration": "2160h0m0s", + "enableSSHCA": true, + "disableRenewal": false, + "allowRenewalAfterExpiry": false, + "disableSmallstepExtensions": false + }, + "options": { + "x509": {}, + "ssh": {} + } + } + ], + "template": {}, + "backdate": "1m0s" + }, + "tls": { + "cipherSuites": [ + "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256", + "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256" + ], + "minVersion": 1.2, + "maxVersion": 1.3, + "renegotiation": false + }, + "commonName": "ilnmors Online CA" +} diff --git a/config/services/containers/infra/ca/config/defaults.json.j2 b/config/services/containers/infra/ca/config/defaults.json.j2 new file mode 100644 index 0000000..b53c767 --- /dev/null +++ b/config/services/containers/infra/ca/config/defaults.json.j2 @@ -0,0 +1,6 @@ +{ + "ca-url": "https://{{ infra_uri['ca']['domain'] }}:{{ infra_uri['ca']['ports']['https'] }}", + "ca-config": "/home/step/config/ca.json", + "fingerprint": "215c851d2d0d2dbf90fc3507425207c29696ffd587c640c94a68dddb1d84d8e8", + "root": "/home/step/certs/ilnmors_root_ca.crt" +} diff --git a/config/services/containers/infra/ca/templates/ca.tpl b/config/services/containers/infra/ca/templates/ca.tpl new file mode 100644 index 0000000..4bb8f08 --- /dev/null +++ b/config/services/containers/infra/ca/templates/ca.tpl @@ -0,0 +1,8 @@ +{ + "subject": {{ toJson .Subject }}, + "keyUsage": ["certSign", "crlSign"], + "basicConstraints": { + "isCA": true, + "maxPathLen": 0 + } +} diff --git a/config/services/containers/infra/grafana/etc/grafana.ini.j2 b/config/services/containers/infra/grafana/etc/grafana.ini.j2 new file mode 100644 index 0000000..fed1a52 --- /dev/null +++ b/config/services/containers/infra/grafana/etc/grafana.ini.j2 @@ -0,0 +1,54 @@ +# https://github.com/grafana/grafana/blob/main/conf/defaults.ini +[paths] +data = /var/lib/grafana +logs = /var/log/grafana +plugins = /var/lib/grafana/plugins +provisioning = /etc/grafana/provisioning + +[server] +protocol = http +http_port = 3000 +domain = grafana.ilnmors.internal +root_url = http://grafana.ilnmors.internal/ +router_logging = false + +[database] +type = postgres +host = {{ infra_uri['postgresql']['domain'] }}:{{ infra_uri['postgresql']['ports']['tcp'] }} +name = grafana_db +user = grafana +password = $__file{/run/secrets/GF_DB_PASSWORD} +ssl_mode = verify-full +ca_cert_path = /etc/ssl/grafana/ilnmors_root_ca.crt + +[auth.ldap] +enabled = true +config_file = /etc/grafana/ldap.toml +allow_sign_up = true + +[auth] +disable_login_form = false +allow_anonymous_device_id_auth = false + +[security] +# local admin +admin_user = local_admin +# local password +admin_password = $__file{/run/secrets/GF_ADMIN_PASSWORD} +cookie_secure = true +cookie_samesite = lax +allow_embedding = false + +# [smtp] +# enabled = true +# host = localhost:25 +# from_address = alert@ilnmors.internal +# from_name = Grafana-Infra + +[analytics] +reporting_enabled = false +check_for_updates = false + +[log] +mode = console +level = info diff --git a/config/services/containers/infra/grafana/etc/ldap.toml.j2 b/config/services/containers/infra/grafana/etc/ldap.toml.j2 new file mode 100644 index 0000000..8834493 --- /dev/null +++ b/config/services/containers/infra/grafana/etc/ldap.toml.j2 @@ -0,0 +1,47 @@ +# https://github.com/lldap/lldap/blob/main/example_configs/grafana_ldap_config.toml +[[servers]] +host = "{{ infra_uri['ldap']['domain'] }}" +port = {{ infra_uri['ldap']['ports']['ldaps'] }} +# Activate STARTTLS or LDAPS +use_ssl = true +# true = STARTTLS, false = LDAPS +start_tls = false +tls_ciphers = [] +min_tls_version = "" +ssl_skip_verify = false +root_ca_cert = "/etc/ssl/grafana/ilnmors_root_ca.crt" +# mTLS option, it is not needed +# client_cert = "/path/to/client.crt" +# client_key = "/path/to/client.key" + +bind_dn = "uid=grafana,ou=people,dc=ilnmors,dc=internal" +bind_password = "$__file{/run/secrets/LDAP_BIND_PASSWORD}" + +search_filter = "(|(uid=%s)(mail=%s))" +search_base_dns = ["dc=ilnmors,dc=internal"] + +[servers.attributes] +member_of = "memberOf" +email = "mail" +name = "displayName" +surname = "sn" +username = "uid" + +group_search_filter = "(&(objectClass=groupOfUniqueNames)(uniqueMember=%s))" +group_search_base_dns = ["ou=groups,dc=ilnmors,dc=internal"] +group_search_filter_user_attribute = "uid" + +[[servers.group_mappings]] +group_dn = "cn=lldap_admin,ou=groups,dc=ilnmors,dc=internal" +org_role = "Admin" +grafana_admin = true + +[[servers.group_mappings]] +group_dn = "cn=admins,ou=groups,dc=ilnmors,dc=internal" +org_role = "Editor" +grafana_admin = false + +[[servers.group_mappings]] +group_dn = "cn=users,ou=groups,dc=ilnmors,dc=internal" +org_role = "Viewer" +grafana_admin = false diff --git a/config/services/containers/infra/grafana/etc/provisioning/datasources/datasources.yaml b/config/services/containers/infra/grafana/etc/provisioning/datasources/datasources.yaml new file mode 100644 index 0000000..cd10a56 --- /dev/null +++ b/config/services/containers/infra/grafana/etc/provisioning/datasources/datasources.yaml @@ -0,0 +1,29 @@ +# https://github.com/grafana/grafana/blob/main/conf/provisioning/datasources/sample.yaml +apiVersion: 1 + +datasources: + - name: Prometheus + type: prometheus + url: https://prometheus.ilnmors.internal:9090 + access: proxy + isDefault: true + jsonData: + tlsAuth: false + tlsAuthWithCACert: true + httpMethod: POST + secureJsonData: + tlsCACert: "$__file{/etc/ssl/grafana/ilnmors_root_ca.crt}" + + - name: Loki + type: loki + url: https://loki.ilnmors.internal:3100 + access: proxy + jsonData: + tlsAuth: false + tlsAuthWithCACert: true + # Tenent value set "to solve no org id" + httpHeaderName1: "X-Scope-OrgID" + maxLines: 1000 + secureJsonData: + tlsCACert: "$__file{/etc/ssl/grafana/ilnmors_root_ca.crt}" + httpHeaderValue1: "ilnmors.internal" diff --git a/config/services/containers/infra/grafana/grafana.container.j2 b/config/services/containers/infra/grafana/grafana.container.j2 new file mode 100644 index 0000000..e71f224 --- /dev/null +++ b/config/services/containers/infra/grafana/grafana.container.j2 @@ -0,0 +1,43 @@ +[Quadlet] +DefaultDependencies=false + +[Unit] +Description=Grafana + +After=postgresql.service ldap.service +Requires=postgresql.service ldap.service + +[Container] +Image=docker.io/grafana/grafana:{{ version['containers']['grafana'] }} + +ContainerName=grafana +HostName=grafana + +AddHost={{ infra_uri['postgresql']['domain'] }}:host-gateway +AddHost={{ infra_uri['ldap']['domain'] }}:host-gateway +AddHost={{ infra_uri['prometheus']['domain'] }}:host-gateway +AddHost={{ infra_uri['loki']['domain'] }}:host-gateway + +PublishPort=3000:3000/tcp + +Volume=%h/containers/grafana/data:/var/lib/grafana:rw +Volume=%h/containers/grafana/etc:/etc/grafana:ro +Volume=%h/containers/grafana/ssl:/etc/ssl/grafana:ro + +Environment="TZ=Asia/Seoul" +Environment="GF_PATHS_CONFIG=/etc/grafana/grafana.ini" +# plugin +# Environment="GF_INSTALL_PLUGINS=grafana-clock-panel,grafana-simple-json-datasource" +Environment="GF_FEATURE_TOGGLES_EXPAND_ENV_VARS=true" + +Secret=GF_DB_PASSWORD,target=/run/secrets/GF_DB_PASSWORD +Secret=LDAP_BIND_PASSWORD,target=/run/secrets/LDAP_BIND_PASSWORD +Secret=GF_ADMIN_PASSWORD,target=/run/secrets/GF_ADMIN_PASSWORD + +[Service] +Restart=always +RestartSec=10s +TimeoutStopSec=120 + +[Install] +WantedBy=default.target diff --git a/config/services/containers/infra/ldap/ldap.container.j2 b/config/services/containers/infra/ldap/ldap.container.j2 new file mode 100644 index 0000000..56414a7 --- /dev/null +++ b/config/services/containers/infra/ldap/ldap.container.j2 @@ -0,0 +1,64 @@ +[Quadlet] +DefaultDependencies=false + +[Unit] +Description=LDAP + +After=postgresql.service +Requires=postgresql.service + +[Container] +Image=docker.io/lldap/lldap:{{ version['containers']['ldap'] }} + +ContainerName=ldap +HostName=ldap +# They are at the same host (for Pasta, it is needed) +AddHost={{ infra_uri['postgresql']['domain'] }}:host-gateway +# For LDAPS - 636 > 6360 nftables +PublishPort=6360:6360/tcp +# Web UI +PublishPort=17170:17170/tcp + + +Volume=%h/containers/ldap/data:/data:rw +Volume=%h/containers/ldap/ssl:/etc/ssl/ldap:ro + +# Default +Environment="TZ=Asia/Seoul" + +# Domain +Environment="LLDAP_LDAP_BASE_DN=dc=ilnmors,dc=internal" + +# LDAPS +Environment="LLDAP_LDAPS_OPTIONS__ENABLED=true" +Environment="LLDAP_LDAPS_OPTIONS__CERT_FILE=/etc/ssl/ldap/ldap.crt" +Environment="LLDAP_LDAPS_OPTIONS__KEY_FILE=/etc/ssl/ldap/ldap.key" +# Secret files' Path +Environment="LLDAP_KEY_SEED_FILE=/run/secrets/LLDAP_KEY_SEED" +Environment="LLDAP_JWT_SECRET_FILE=/run/secrets/LLDAP_JWT_SECRET" + +# SMTP options > you can set all of these at the /data/config.toml instead of Environment +# Only `LLDAP_SMTP_OPTIONS__PASSWORD` will be injected by secret +# LLDAP_SMTP_OPTIONS__ENABLE_PASSWORD_RESET=true +# LLDAP_SMTP_OPTIONS__SERVER=smtp.example.com +# LLDAP_SMTP_OPTIONS__PORT=465 +# LLDAP_SMTP_OPTIONS__SMTP_ENCRYPTION=TLS +# LLDAP_SMTP_OPTIONS__USER=no-reply@example.com +# LLDAP_SMTP_OPTIONS__PASSWORD=PasswordGoesHere +# LLDAP_SMTP_OPTIONS__FROM=no-reply +# LLDAP_SMTP_OPTIONS__TO=admin + +# Database +Secret=LLDAP_DATABASE_URL,type=env + +# Secrets +Secret=LLDAP_KEY_SEED,target="/run/secrets/LLDAP_KEY_SEED" +Secret=LLDAP_JWT_SECRET,target="/run/secrets/LLDAP_JWT_SECRET" + +[Service] +Restart=always +RestartSec=10s +TimeoutStopSec=120 + +[Install] +WantedBy=default.target diff --git a/config/services/containers/infra/loki/etc/loki.yaml b/config/services/containers/infra/loki/etc/loki.yaml new file mode 100644 index 0000000..95f93ab --- /dev/null +++ b/config/services/containers/infra/loki/etc/loki.yaml @@ -0,0 +1,46 @@ +--- +server: + http_listen_address: "::" + http_listen_port: 3100 + http_tls_config: + cert_file: /etc/ssl/loki/loki.crt + key_file: /etc/ssl/loki/loki.key + +#memberlist: +# join_members: ["localhost"] +# bind_addr: ['::'] +# bind_port: 7946 + +schema_config: + configs: + - from: "2023-01-01" + store: tsdb + object_store: filesystem + schema: v13 + index: + prefix: index_ + period: 24h + +limits_config: + retention_period: 30d + reject_old_samples: true + reject_old_samples_max_age: 168h + +common: + instance_addr: localhost + path_prefix: /loki + replication_factor: 1 + storage: + filesystem: + chunks_directory: /loki/chunks + rules_directory: /loki/rules + ring: + kvstore: + store: inmemory + +compactor: + working_directory: /loki/compactor + delete_request_store: filesystem + compaction_interval: 10m + retention_enabled: true + retention_delete_delay: 2h diff --git a/config/services/containers/infra/loki/loki.container.j2 b/config/services/containers/infra/loki/loki.container.j2 new file mode 100644 index 0000000..456bb2c --- /dev/null +++ b/config/services/containers/infra/loki/loki.container.j2 @@ -0,0 +1,32 @@ +[Quadlet] +DefaultDependencies=false + +[Unit] +Description=Loki + +After=network-online.target +Wants=network-online.target + +[Container] +Image=docker.io/grafana/loki:{{ version['containers']['loki'] }} + +ContainerName=loki +HostName=loki + +PublishPort=3100:3100/tcp + +Volume=%h/containers/loki/data:/loki:rw +Volume=%h/containers/loki/etc:/etc/loki:ro +Volume=%h/containers/loki/ssl:/etc/ssl/loki:ro + +Environment="TZ=Asia/Seoul" + +Exec=--config.file=/etc/loki/loki.yaml + +[Service] +Restart=always +RestartSec=10s +TimeoutStopSec=120 + +[Install] +WantedBy=default.target diff --git a/config/services/containers/infra/postgresql/build/postgresql.containerfile.j2 b/config/services/containers/infra/postgresql/build/postgresql.containerfile.j2 new file mode 100644 index 0000000..9299395 --- /dev/null +++ b/config/services/containers/infra/postgresql/build/postgresql.containerfile.j2 @@ -0,0 +1,12 @@ +ARG PG_VER={{ version['containers']['postgresql'] }} + +FROM docker.io/library/postgres:${PG_VER} + +ARG VECTORCHORD_VER={{ version['containers']['vectorchord'] }} + +RUN apt update && \ + apt install -y wget postgresql-${PG_MAJOR}-pgvector && \ + wget -nv -O /tmp/vchord.deb https://github.com/tensorchord/VectorChord/releases/download/${VECTORCHORD_VER}/postgresql-${PG_MAJOR}-vchord_${VECTORCHORD_VER}-1_amd64.deb && \ + apt install -y /tmp/vchord.deb && \ + apt purge -y wget && apt autoremove -y && \ + rm -rf /tmp/vchord.deb /var/lib/apt/lists/* diff --git a/config/services/containers/infra/postgresql/config/pg_hba.conf.j2 b/config/services/containers/infra/postgresql/config/pg_hba.conf.j2 new file mode 100644 index 0000000..63b0495 --- /dev/null +++ b/config/services/containers/infra/postgresql/config/pg_hba.conf.j2 @@ -0,0 +1,28 @@ +# @authcomment@ +# TYPE DATABASE USER ADDRESS METHOD +# Local host `trust` +local all all trust + +# Local monitoring connection (host - infra VM) `trust` +hostssl postgres alloy {{ hostvars['fw']['network4']['infra']['server'] }}/32 trust +hostssl postgres alloy {{ hostvars['fw']['network6']['infra']['server'] }}/128 trust +hostssl postgres alloy {{ hostvars['fw']['network4']['subnet']['lla'] }} trust +hostssl postgres alloy {{ hostvars['fw']['network6']['subnet']['lla'] }} trust + +# Local connection (in postgresql container) needs password (127.0.0.1 - container loopback) +host all all 127.0.0.1/32 scram-sha-256 +host all all ::1/128 scram-sha-256 + +# Local connection (host - infra VM) needs password (169.254.1.0/24 - link_local subnet for containers in pasta mode) +hostssl all all {{ hostvars['fw']['network4']['infra']['server'] }}/32 scram-sha-256 +hostssl all all {{ hostvars['fw']['network6']['infra']['server'] }}/128 scram-sha-256 +hostssl all all {{ hostvars['fw']['network4']['subnet']['lla'] }} scram-sha-256 +hostssl all all {{ hostvars['fw']['network6']['subnet']['lla'] }} scram-sha-256 + +# auth VM +hostssl all all {{ hostvars['fw']['network4']['auth']['server'] }}/32 scram-sha-256 +hostssl all all {{ hostvars['fw']['network6']['auth']['server'] }}/128 scram-sha-256 + +# app VM (Applications, 192.168.10.13) +hostssl all all {{ hostvars['fw']['network4']['app']['server'] }}/32 scram-sha-256 +hostssl all all {{ hostvars['fw']['network6']['app']['server'] }}/128 scram-sha-256 diff --git a/config/services/containers/infra/postgresql/config/postgresql.conf.j2 b/config/services/containers/infra/postgresql/config/postgresql.conf.j2 new file mode 100644 index 0000000..eb87ce6 --- /dev/null +++ b/config/services/containers/infra/postgresql/config/postgresql.conf.j2 @@ -0,0 +1,41 @@ +#------------------------------------------------------------------------------ +# CUSTOMIZED OPTIONS +#------------------------------------------------------------------------------ + +# Add settings for extensions here +# Listen_address +listen_addresses = '*' +# Max connections +max_connections = 250 +# listen_port +port = 5432 + +# SSL +ssl = on +ssl_ca_file = '/etc/ssl/postgresql/ilnmors_root_ca.crt' +ssl_cert_file = '/etc/ssl/postgresql/postgresql.crt' +ssl_key_file = '/etc/ssl/postgresql/postgresql.key' +ssl_ciphers = 'HIGH:!aNULL:!MD5' +ssl_prefer_server_ciphers = on + +# log +log_destination = 'stderr' +log_checkpoints = on +log_temp_files = 0 +log_min_duration_statement = 500 + +# IO +track_io_timing = on + +## immich_config +shared_preload_libraries = 'vchord.so' +search_path = '"$user", public' +max_wal_size = 5GB +shared_buffers = 512MB +wal_compression = on +work_mem = 16MB +autovacuum_vacuum_scale_factor = 0.1 +autovacuum_analyze_scale_factor = 0.05 +autovacuum_vacuum_cost_limit = 1000 +effective_io_concurrency = 200 +random_page_cost = 1.2 diff --git a/config/services/containers/infra/postgresql/init/.gitkeep b/config/services/containers/infra/postgresql/init/.gitkeep new file mode 100644 index 0000000..e69de29 diff --git a/config/services/containers/infra/postgresql/postgresql.container.j2 b/config/services/containers/infra/postgresql/postgresql.container.j2 new file mode 100644 index 0000000..6c9bacd --- /dev/null +++ b/config/services/containers/infra/postgresql/postgresql.container.j2 @@ -0,0 +1,36 @@ +[Quadlet] +DefaultDependencies=false + +[Unit] +Description=PostgreSQL + +After=network-online.target +Wants=network-online.target + +[Container] +Image=ilnmors.internal/{{ node['name'] }}/postgres:pg{{ version['containers']['postgresql'] }}-vectorchord{{ version['containers']['vectorchord'] }} + +ContainerName=postgresql +HostName=postgresql + +PublishPort=5432:5432/tcp + +Volume=%h/containers/postgresql/data:/var/lib/postgresql:rw +Volume=%h/containers/postgresql/config:/config:ro +Volume=%h/containers/postgresql/ssl:/etc/ssl/postgresql:ro +Volume=%h/containers/postgresql/init:/docker-entrypoint-initdb.d/:ro +Volume=%h/containers/postgresql/backups:/backups:rw + +Environment="TZ=Asia/Seoul" +# This option is only for init process, after init custom config file `pg_hba.conf` will control this option. +Environment="POSTGRES_HOST_AUTH_METHOD=trust" + +Exec=postgres -c 'config_file=/config/postgresql.conf' -c 'hba_file=/config/pg_hba.conf' + +[Service] +Restart=always +RestartSec=10s +TimeoutStopSec=120 + +[Install] +WantedBy=default.target diff --git a/config/services/containers/infra/postgresql/services/postgresql-cluster-backup.service b/config/services/containers/infra/postgresql/services/postgresql-cluster-backup.service new file mode 100644 index 0000000..91405d3 --- /dev/null +++ b/config/services/containers/infra/postgresql/services/postgresql-cluster-backup.service @@ -0,0 +1,18 @@ +[Unit] +Description=PostgreSQL Cluster Backup Service +After=postgresql.service +BindsTo=postgresql.service + +[Service] +Type=oneshot + +# logging +StandardOutput=journal +StandardError=journal + +ExecStartPre=/usr/bin/podman exec postgresql sh -c "mkdir -p /backups/cluster && chown postgres:root /backups/cluster && chmod 770 /backups/cluster" + +# Run the script +ExecStart=/usr/bin/podman exec -u postgres postgresql sh -c 'pg_dumpall -U postgres --schema-only | grep -v -E "CREATE ROLE postgres" > /backups/cluster/pg_cluster_$(date "+%%Y-%%m-%%d").sql' +ExecStart=/usr/bin/podman exec -u postgres postgresql sh -c "find /backups/cluster -maxdepth 1 -type f -mtime +7 -delete" +ExecStart=/usr/bin/podman exec postgresql sh -c "chown -R postgres:root /backups/cluster && chmod 660 /backups/cluster/*" diff --git a/config/services/containers/infra/postgresql/services/postgresql-cluster-backup.timer b/config/services/containers/infra/postgresql/services/postgresql-cluster-backup.timer new file mode 100644 index 0000000..822b513 --- /dev/null +++ b/config/services/containers/infra/postgresql/services/postgresql-cluster-backup.timer @@ -0,0 +1,17 @@ +[Unit] +Description=Run PostgreSQL Cluster Backup service every day + +[Timer] +# Execute service after 1 min on booting +OnBootSec=1min + +# Execute service every day 00:00 +OnCalendar=*-*-* 00:00:00 +# Random time to postpone the timer +RandomizedDelaySec=15min + +# When timer is activated, Service also starts. +Persistent=true + +[Install] +WantedBy=timers.target diff --git a/config/services/containers/infra/postgresql/services/postgresql-data-backup@.service b/config/services/containers/infra/postgresql/services/postgresql-data-backup@.service new file mode 100644 index 0000000..e40c69b --- /dev/null +++ b/config/services/containers/infra/postgresql/services/postgresql-data-backup@.service @@ -0,0 +1,19 @@ +[Unit] +Description=PostgreSQL Data %i Backup Service +After=postgresql.service +BindsTo=postgresql.service + +[Service] +Type=oneshot + +# logging +StandardOutput=journal +StandardError=journal + +ExecStartPre=/usr/bin/podman exec postgresql sh -c "mkdir -p /backups/%i && chown postgres:root /backups/%i && chmod 770 /backups/%i" + +# Run the script +ExecStart=/usr/bin/podman exec -u postgres postgresql sh -c 'printf "\\connect %i_db\n" > /backups/%i/pg_%i_$(date "+%%Y-%%m-%%d").sql' +ExecStart=/usr/bin/podman exec -u postgres postgresql sh -c 'pg_dump -U postgres -d %i_db --data-only >> /backups/%i/pg_%i_$(date "+%%Y-%%m-%%d").sql' +ExecStart=/usr/bin/podman exec -u postgres postgresql sh -c "find /backups/%i -maxdepth 1 -type f -mtime +7 -delete" +ExecStart=/usr/bin/podman exec postgresql sh -c "chown -R postgres:root /backups/%i && chmod 660 /backups/%i/*" diff --git a/config/services/containers/infra/postgresql/services/postgresql-data-backup@.timer b/config/services/containers/infra/postgresql/services/postgresql-data-backup@.timer new file mode 100644 index 0000000..d73af5b --- /dev/null +++ b/config/services/containers/infra/postgresql/services/postgresql-data-backup@.timer @@ -0,0 +1,17 @@ +[Unit] +Description=Run %i Data Backup service every day + +[Timer] +# Execute service after 1 min on booting +OnBootSec=1min + +# Execute service every day 00:00 +OnCalendar=*-*-* 00:00:00 +# Random time to postpone the timer +RandomizedDelaySec=15min + +# When timer is activated, Service also starts. +Persistent=true + +[Install] +WantedBy=timers.target diff --git a/config/services/containers/infra/prometheus/etc/prometheus.yaml.j2 b/config/services/containers/infra/prometheus/etc/prometheus.yaml.j2 new file mode 100644 index 0000000..d8ac2bc --- /dev/null +++ b/config/services/containers/infra/prometheus/etc/prometheus.yaml.j2 @@ -0,0 +1,32 @@ +# my global config +global: + scrape_interval: 15s # Set the scrape interval to every 15 seconds. Default is every 1 minute. + evaluation_interval: 15s # Evaluate rules every 15 seconds. The default is every 1 minute. + # scrape_timeout is set to the global default (10s). + +# Alertmanager configuration +alerting: + alertmanagers: + - static_configs: + - targets: + # - alertmanager:9093 + +# Load rules once and periodically evaluate them according to the global 'evaluation_interval'. +rule_files: + - "/etc/prometheus/rules.yaml" + +# A scrape configuration containing exactly one endpoint to scrape: +# Here it's Prometheus itself. +scrape_configs: + # The job name is added as a label `job=` to any timeseries scraped from this config. + - job_name: "prometheus" + # metrics_path defaults to '/metrics' + scheme: "https" + tls_config: + ca_file: "/etc/ssl/prometheus/ilnmors_root_ca.crt" + server_name: "{{ infra_uri['prometheus']['domain'] }}" + static_configs: + - targets: ["localhost:9090"] + # The label name is added as a label `label_name=` to any timeseries scraped from this config. + labels: + instance: "{{ node['name'] }}" diff --git a/config/services/containers/infra/prometheus/etc/rules.yaml.j2 b/config/services/containers/infra/prometheus/etc/rules.yaml.j2 new file mode 100644 index 0000000..808c0db --- /dev/null +++ b/config/services/containers/infra/prometheus/etc/rules.yaml.j2 @@ -0,0 +1,38 @@ +groups: + - name: node_exporters_heartbeat + rules: +{% for instance in ['vmm', 'fw', 'infra', 'auth', 'app'] %} + - alert: {{ instance }}_node_exporter_down + expr: | + (present_over_time(up{instance="{{ instance }}"}[5m]) or on() vector(0)) == 0 + for: 30s + labels: + severity: critical + annotations: + summary: "Exporter heartbeat is down: {{ instance }}" + description: "{{ instance }} exporter is down for 5 mins" +{% endfor %} + - name: postgresql_heartbeat + rules: + - alert: Postgresql_Down + expr: | + (present_over_time(pg_up{instance="infra", job="postgres"}[5m]) or on() vector(0)) == 0 + for: 30s + labels: + severity: critical + annotations: + summary: "Postgresql Heartbeat Lost: postgresql" + description: "postgresql node is down for 5 mins." + - name: Certificate_expiry_check + rules: +{% for filename in ['root.crt', 'intermediate.crt', 'crowdsec.crt', 'blocky.crt', 'postgresql.crt', 'ldap.crt', 'prometheus.crt', 'loki.crt', 'dsm.crt'] %} + - alert: {{ filename | replace('.', '_') }}_is_expired_soon + expr: | + max(x509_cert_not_after{filename="{{ filename }}"}) - time() < 2592000 + for: 1d + labels: + severity: critical + annotations: + summary: "{{ filename }} is expired in 30 days" + description: "{{ filename }} is expired in 30 days." +{% endfor %} diff --git a/config/services/containers/infra/prometheus/etc/web-config.yaml.j2 b/config/services/containers/infra/prometheus/etc/web-config.yaml.j2 new file mode 100644 index 0000000..c4f8248 --- /dev/null +++ b/config/services/containers/infra/prometheus/etc/web-config.yaml.j2 @@ -0,0 +1,9 @@ +# Additionally, a certificate and a key file are needed. +tls_server_config: + cert_file: "/etc/ssl/prometheus/prometheus.crt" + key_file: "/etc/ssl/prometheus/prometheus.key" + +# Passwords are hashed with bcrypt: https://github.com/prometheus/exporter-toolkit/blob/master/docs/web-configuration.md#about-bcrypt +#basic_auth_users: +# alice: $2y$10$mDwo.lAisC94iLAyP81MCesa29IzH37oigHC/42V2pdJlUprsJPze +# bob: $2y$10$hLqFl9jSjoAAy95Z/zw8Ye8wkdMBM8c5Bn1ptYqP/AXyV0.oy0S8m diff --git a/config/services/containers/infra/prometheus/prometheus.container.j2 b/config/services/containers/infra/prometheus/prometheus.container.j2 new file mode 100644 index 0000000..cc6821b --- /dev/null +++ b/config/services/containers/infra/prometheus/prometheus.container.j2 @@ -0,0 +1,38 @@ +[Quadlet] +DefaultDependencies=false + +[Unit] +Description=Prometheus + +After=network-online.target +Wants=network-online.target + +[Container] +Image=docker.io/prom/prometheus:{{ version['containers']['prometheus'] }} + +ContainerName=prometheus +HostName=prometheus + +PublishPort=9090:9090/tcp + +Volume=%h/containers/prometheus/data:/prometheus:rw +Volume=%h/containers/prometheus/etc:/etc/prometheus:ro +Volume=%h/containers/prometheus/ssl:/etc/ssl/prometheus:ro + +Environment="TZ=Asia/Seoul" + +Exec=--config.file=/etc/prometheus/prometheus.yaml \ + --web.config.file=/etc/prometheus/web-config.yaml \ + --web.enable-remote-write-receiver \ + --storage.tsdb.path=/prometheus \ + --storage.tsdb.retention.time=30d \ + --storage.tsdb.retention.size=15GB \ + --storage.tsdb.wal-compression + +[Service] +Restart=always +RestartSec=10s +TimeoutStopSec=120 + +[Install] +WantedBy=default.target diff --git a/config/services/containers/infra/x509-exporter/x509-exporter.container.j2 b/config/services/containers/infra/x509-exporter/x509-exporter.container.j2 new file mode 100644 index 0000000..38f92c5 --- /dev/null +++ b/config/services/containers/infra/x509-exporter/x509-exporter.container.j2 @@ -0,0 +1,26 @@ +[Quadlet] +DefaultDependencies=false + +[Unit] +Description=x509-Exporter +After=network-online.target +Wants=network-online.target + +[Container] +Image=docker.io/enix/x509-certificate-exporter:{{ version['containers']['x509-exporter'] }} +ContainerName=x509-exporter +HostName=X509-exporter + +Volume=%h/containers/x509-exporter/certs:/certs:ro + +PublishPort=9793:9793 + +Exec=--listen-address :9793 --watch-dir=/certs + +[Service] +Restart=always +RestartSec=10s +TimeoutStopSec=120 + +[Install] +WantedBy=default.target diff --git a/config/services/systemd/common/alloy/config.alloy.j2 b/config/services/systemd/common/alloy/config.alloy.j2 new file mode 100644 index 0000000..23db6dd --- /dev/null +++ b/config/services/systemd/common/alloy/config.alloy.j2 @@ -0,0 +1,299 @@ +// The "name" and "job" +// job > prometheus: which exporter / loki: which service +// name > prometheus: which service +// service_name > loki: which service +// Metric +//// Metric ouput +prometheus.remote_write "prometheus" { + endpoint { + url = "https://{{ infra_uri['prometheus']['domain'] }}:{{ infra_uri['prometheus']['ports']['https'] }}/api/v1/write" + } +} + +//// Metric relabel +////// For node metrics +prometheus.relabel "system_relabel" { + forward_to = [prometheus.remote_write.prometheus.receiver] + rule { + target_label = "instance" + replacement = "{{ node['name'] }}" + } + rule { + source_labels = ["job"] + regex = "integrations\\/(.+)" + target_label = "job" + replacement = "$1" + } + rule { + source_labels = ["name"] + regex = "(.+)\\.service" + target_label = "name" + replacement = "$1" + } +} + +////// For service metrics +prometheus.relabel "default_label" { + forward_to = [prometheus.remote_write.prometheus.receiver] + rule { + target_label = "instance" + replacement = "{{ node['name'] }}" + } + rule { + source_labels = ["job"] + regex = "prometheus\\.scrape\\.(.+)" + target_label = "job" + replacement = "$1" + } + rule { + source_labels = ["job"] + regex = "integrations\\/(.+)" + target_label = "job" + replacement = "$1" + } +} + +//// Metric input +////// For node metrics +prometheus.exporter.unix "system" { + enable_collectors = ["systemd", "cgroup", "processes", "cpu", "meminfo", "filesystem", "netdev"] + filesystem { + mount_points_exclude = "^/(sys|proc|dev|run|var/lib/docker/.+|var/lib/kubelet/.+)($|/)" + fs_types_exclude = "^(tmpfs|devtmpfs|devfs|iso9660|overlay|aufs|squashfs)$" + } +} +prometheus.scrape "system" { + targets = prometheus.exporter.unix.system.targets + forward_to = [prometheus.relabel.system_relabel.receiver] +} + +{% if node['name'] == 'fw' %} +////// For Crowdsec metrics +prometheus.scrape "crowdsec" { + targets = [ + { "__address__" = "{{ infra_uri['crowdsec']['domain'] }}:6060", "job" = "crowdsec" }, + { "__address__" = "{{ infra_uri['crowdsec']['domain'] }}:60601", "job" = "crowdsec-bouncer" }, + ] + honor_labels = true + forward_to = [prometheus.relabel.default_label.receiver] +} +{% endif %} + +{% if node['name'] == 'infra' %} +////// For postgresql metrics +prometheus.exporter.postgres "postgresql" { + data_source_names = [ + "postgres://alloy@{{ infra_uri['postgresql']['domain'] }}:{{ infra_uri['postgresql']['ports']['tcp'] }}/postgres?sslmode=verify-full", + ] +} +prometheus.scrape "postgresql" { + targets = prometheus.exporter.postgres.postgresql.targets + forward_to = [prometheus.relabel.default_label.receiver] +} +///// For certificates metrics +prometheus.scrape "x509" { + targets = [ + { "__address__" = "{{ node['name'] }}.ilnmors.internal:9793" }, + ] + forward_to = [prometheus.relabel.default_label.receiver] +} +{% endif %} + +{% if node['name'] in ['infra', 'auth', 'app'] %} +////// For Input Caddy metrics +prometheus.scrape "caddy" { + targets = [ + { "__address__" = "{{ node['name'] }}.ilnmors.internal:443" }, + ] + scheme = "https" + forward_to = [prometheus.relabel.default_label.receiver] +} +{% endif %} + +// Log +//// Logs output +loki.write "loki" { + endpoint { + url = "https://{{ infra_uri['loki']['domain'] }}:{{ infra_uri['loki']['ports']['https'] }}/loki/api/v1/push" + tenant_id = "ilnmors.internal" + } +} +//// Logs relabel +///// journal +loki.relabel "journal_relabel" { + forward_to = [] + rule { + target_label = "instance" + replacement = "{{ node['name'] }}" + } + // Default value + rule { + target_label = "job" + replacement = "systemd-journal" + } + // if identifier exists + rule { + source_labels = ["__journal_syslog_identifier"] + regex = "(.+)" + target_label = "job" + replacement = "$1" + } + // if systemd_unit exists + rule { + source_labels = ["__journal__systemd_unit"] + regex = "(.+)\\.service" + target_label = "job" + replacement = "$1" + } + // if systemd_unit is "user@$UID" + rule { + source_labels = ["job"] + regex = "user@\\d+" + target_label = "job" + replacement = "systemd-journal" + } + // if systemd_user_unit exists + rule { + source_labels = ["__journal__systemd_user_unit"] + regex = "(.+)\\.service" + target_label = "job" + replacement = "$1" + } + rule { + source_labels = ["__journal_priority_keyword"] + target_label = "level" + } +} +{% if node['name'] == "fw" %} +loki.relabel "suricata_relabel" { + forward_to = [loki.process.suricata_json.receiver] + rule { + target_label = "instance" + replacement = "{{ node['name'] }}" + } + rule { + target_label = "level" + replacement = "info" + } + rule { + target_label = "job" + replacement = "suricata_eve" + } +} +{% endif %} +{% if node['name'] == "auth" %} +loki.relabel "caddy_relabel" { + forward_to = [loki.process.caddy_json.receiver] + rule { + target_label = "instance" + replacement = "{{ node['name'] }}" + } + rule { + target_label = "level" + replacement = "info" + } + rule { + target_label = "job" + replacement = "caddy_access" + } +} +{% endif %} +//// Log parser +///// journal +loki.process "journal_parser" { + forward_to = [loki.write.loki.receiver] + // Severity parsing + // If content of log includes "level" information, change the level + stage.logfmt { + mapping = { + "content_level" = "level", + } + } + stage.labels { + values = { + "level" = "content_level", + } + } + // Add this section as parser for each service + // common + stage.match { + selector = "{job=\"sshd\"}" + stage.regex { + expression = "Accepted \\w+ for (?P\\w+) from (?P[\\d\\.]+)" + } + stage.labels { + values = { "user" = "" } + } + } + // infra + {% if node['name'] == 'infra' %} + // auth + {% elif node['name'] == 'auth' %} + // app + {% elif node['name'] == 'app' %} + {% endif %} +} +{% if node['name'] == "fw" %} +////// suricata +loki.process "suricata_json" { + forward_to = [loki.write.loki.receiver] + stage.json { + expressions = { + event_type = "event_type", + src_ip = "src_ip", + severity = "alert.severity", + } + } + stage.labels { + values = { event_type = "", severity = "" } + } +} +{% endif %} +{% if node['name'] == "auth" %} +////// caddy +loki.process "caddy_json" { + forward_to = [loki.write.loki.receiver] + stage.json { + expressions = { + status = "status", + method = "method", + remote_ip = "remote_ip", + duration = "duration", + } + } + stage.labels { + values = { status = "", method = "" } + } +} +{% endif %} +//// Logs input +////// journald +loki.source.journal "systemd" { + forward_to = [loki.process.journal_parser.receiver] + // Temporary tags like "__journal__systemd_unit" is automatically removed when logs is passing "forward_to" + // To relabel tags with temporary tags, relabel_rules command is necessary. + relabel_rules = loki.relabel.journal_relabel.rules +} + +{% if node['name'] == 'fw' %} +////// suricata +local.file_match "suricata_logs" { + path_targets = [{ "__path__" = "/var/log/suricata/eve.json", "instance" = "{{ node['name'] }}" }] +} +loki.source.file "suricata" { + targets = local.file_match.suricata_logs.targets + forward_to = [loki.relabel.suricata_relabel.receiver] +} +{% endif %} + +{% if node['name'] == 'auth' %} +////// caddy +local.file_match "caddy_logs" { + path_targets = [{ "__path__" = "/var/log/caddy/access.log", "instance" = "{{ node['name'] }}" }] +} + +loki.source.file "caddy" { + targets = local.file_match.caddy_logs.targets + forward_to = [loki.relabel.caddy_relabel.receiver] +} +{% endif %} diff --git a/config/services/systemd/common/crowdsec/acquis.d/caddy.yaml b/config/services/systemd/common/crowdsec/acquis.d/caddy.yaml new file mode 100644 index 0000000..bbf2b85 --- /dev/null +++ b/config/services/systemd/common/crowdsec/acquis.d/caddy.yaml @@ -0,0 +1,5 @@ +# Suricata logs +filenames: + - /var/log/caddy/access.log +labels: + type: caddy diff --git a/config/services/systemd/common/crowdsec/acquis.d/suricata.yaml b/config/services/systemd/common/crowdsec/acquis.d/suricata.yaml new file mode 100644 index 0000000..d743905 --- /dev/null +++ b/config/services/systemd/common/crowdsec/acquis.d/suricata.yaml @@ -0,0 +1,5 @@ +# Suricata logs +filenames: + - /var/log/suricata/eve.json +labels: + type: suricata diff --git a/config/services/systemd/common/crowdsec/bouncers/crowdsec-firewall-bouncer.yaml.j2 b/config/services/systemd/common/crowdsec/bouncers/crowdsec-firewall-bouncer.yaml.j2 new file mode 100644 index 0000000..1d4994c --- /dev/null +++ b/config/services/systemd/common/crowdsec/bouncers/crowdsec-firewall-bouncer.yaml.j2 @@ -0,0 +1,56 @@ +mode: nftables +pid_dir: /var/run/ +update_frequency: 10s +log_mode: file +log_dir: /var/log/ +log_level: info +log_compression: true +log_max_size: 100 +log_max_backups: 3 +log_max_age: 30 +api_url: "https://{{ infra_uri['crowdsec']['domain'] }}:{{ infra_uri['crowdsec']['ports']['https'] }}" +api_key: "{{ hostvars['console']['crowdsec']['bouncer']['fw'] }}" +insecure_skip_verify: false +disable_ipv6: false +deny_action: DROP +deny_log: false +supported_decisions_types: + - ban +#to change log prefix +#deny_log_prefix: "crowdsec: " +#to change the blacklists name +blacklists_ipv4: crowdsec-blacklists +blacklists_ipv6: crowdsec6-blacklists +#type of ipset to use +ipset_type: nethash +#if present, insert rule in those chains +#iptables_chains: +# - INPUT +# - FORWARD +# - OUTPUT +# - DOCKER-USER + +## nftables > table inet filter's set crowddsec-blacklists_ipv4,6 is needed +nftables: + ipv4: + enabled: true + set-only: true + family: inet + table: filter + chain: global + ipv6: + enabled: true + set-only: true + family: inet + table: filter + chain: global +# packet filter +pf: + # an empty string disables the anchor + anchor_name: "" + +# Crowdsec firewall bouncer cannot use "[::]" yet +prometheus: + enabled: true + listen_addr: "::" + listen_port: 60601 diff --git a/config/services/systemd/common/crowdsec/bouncers/whitelists.yaml.j2 b/config/services/systemd/common/crowdsec/bouncers/whitelists.yaml.j2 new file mode 100644 index 0000000..8766d77 --- /dev/null +++ b/config/services/systemd/common/crowdsec/bouncers/whitelists.yaml.j2 @@ -0,0 +1,11 @@ +name: crowdsecurity/whitelists +description: "Whitelist console/admin hosts only" +whitelist: + reason: "trusted admin hosts" + ip: + - "127.0.0.1" + - "::1" + - "{{ hostvars['fw']['network4']['console']['client'] }}" + - "{{ hostvars['fw']['network4']['console']['wg'] }}" + - "{{ hostvars['fw']['network6']['console']['client'] }}" + - "{{ hostvars['fw']['network6']['console']['wg'] }}" diff --git a/config/services/systemd/common/crowdsec/crowdsec-update.service b/config/services/systemd/common/crowdsec/crowdsec-update.service new file mode 100644 index 0000000..d55ed0b --- /dev/null +++ b/config/services/systemd/common/crowdsec/crowdsec-update.service @@ -0,0 +1,10 @@ +[Unit] +Description=Crowdsec Rule Update Service +After=network-online.target +Wants=network-online.target + +[Service] +Type=oneshot +ExecStart=/usr/bin/cscli hub update +ExecStart=/usr/bin/cscli hub upgrade +ExecStartPost=/bin/systemctl restart crowdsec diff --git a/config/services/systemd/common/crowdsec/crowdsec-update.timer b/config/services/systemd/common/crowdsec/crowdsec-update.timer new file mode 100644 index 0000000..17807d7 --- /dev/null +++ b/config/services/systemd/common/crowdsec/crowdsec-update.timer @@ -0,0 +1,10 @@ +[Unit] +Description=Daily Crowdsec Rule Update Timer + +[Timer] +OnCalendar=*-*-* 05:00:00 +Persistent=true +RandomizedDelaySec=300 + +[Install] +WantedBy=timers.target diff --git a/config/services/systemd/common/crowdsec/etc/config.yaml.j2 b/config/services/systemd/common/crowdsec/etc/config.yaml.j2 new file mode 100644 index 0000000..0563a4b --- /dev/null +++ b/config/services/systemd/common/crowdsec/etc/config.yaml.j2 @@ -0,0 +1,66 @@ +common: + daemonize: true + log_media: file + log_level: info + log_dir: /var/log/ + log_max_size: 20 + compress_logs: true + log_max_files: 10 + working_dir: . +config_paths: + config_dir: /etc/crowdsec/ + data_dir: /var/lib/crowdsec/data/ + simulation_path: /etc/crowdsec/simulation.yaml + hub_dir: /var/lib/crowdsec/hub/ + index_path: /var/lib/crowdsec/hub/.index.json + notification_dir: /etc/crowdsec/notifications/ + plugin_dir: /usr/lib/crowdsec/plugins/ +crowdsec_service: + acquisition_path: /etc/crowdsec/acquis.yaml + acquisition_dir: /etc/crowdsec/acquis.d + parser_routines: 1 +cscli: + output: human + color: auto +db_config: + log_level: info + type: sqlite + db_path: /var/lib/crowdsec/data/crowdsec.db + #max_open_conns: 100 + #user: + #password: + #db_name: + #host: + #port: + flush: + max_items: 5000 + max_age: 7d +plugin_config: + user: nobody # plugin process would be ran on behalf of this user + group: nogroup # plugin process would be ran on behalf of this group +api: + client: + insecure_skip_verify: false + credentials_path: /etc/crowdsec/local_api_credentials.yaml +{% if node['name'] == 'fw' %} + server: + log_level: info + listen_uri: "[::]:8080" + profiles_path: /etc/crowdsec/profiles.yaml + console_path: /etc/crowdsec/console.yaml + online_client: # Central API credentials (to push signals and receive bad IPs) + credentials_path: /etc/crowdsec/online_api_credentials.yaml + trusted_ips: # IP ranges, or IPs which can have admin API access + - ::1 + - 127.0.0.1 + - {{ hostvars['fw']['network6']['subnet']['server'] }} + - {{ hostvars['fw']['network4']['subnet']['server'] }} + tls: + cert_file: /etc/crowdsec/ssl/crowdsec.crt + key_file: /etc/crowdsec/ssl/crowdsec.key +prometheus: + enabled: true + level: full + listen_addr: "[::]" + listen_port: 6060 +{% endif %} diff --git a/config/services/systemd/common/crowdsec/etc/local_api_credentials.yaml.j2 b/config/services/systemd/common/crowdsec/etc/local_api_credentials.yaml.j2 new file mode 100644 index 0000000..834dc95 --- /dev/null +++ b/config/services/systemd/common/crowdsec/etc/local_api_credentials.yaml.j2 @@ -0,0 +1,3 @@ +url: https://{{ infra_uri['crowdsec']['domain'] }}:{{ infra_uri['crowdsec']['ports']['https'] }} +login: {{ node['name'] }} +password: {{ hostvars['console']['crowdsec']['machine'][node['name']] }} diff --git a/config/services/systemd/common/kopia/kopia-backup.service.j2 b/config/services/systemd/common/kopia/kopia-backup.service.j2 new file mode 100644 index 0000000..70cca85 --- /dev/null +++ b/config/services/systemd/common/kopia/kopia-backup.service.j2 @@ -0,0 +1,49 @@ +[Unit] +Description=Kopia backup service +Wants=network-online.target +After=network-online.target + +[Service] +User=kopia +Group=kopia + +Type=oneshot + +# logging +StandardOutput=journal +StandardError=journal + +CapabilityBoundingSet=CAP_DAC_READ_SEARCH +AmbientCapabilities=CAP_DAC_READ_SEARCH + +ProtectSystem=strict +ProtectHome=tmpfs +InaccessiblePaths=/boot /root + +{% if node['name'] == 'infra' %} +BindReadOnlyPaths=/home/infra/containers/postgresql/backups +{% elif node['name'] == 'app' %} +BindReadOnlyPaths=/home/app/data +{% endif %} +# In root namescope, %u always bring 0 +BindPaths=/etc/kopia +BindPaths=/etc/secrets/{{ kopia_uid }} +BindPaths=/var/cache/kopia +EnvironmentFile=/etc/secrets/{{ kopia_uid }}/kopia.env + +ExecStartPre=/usr/bin/kopia repository connect server \ + --url=https://{{ infra_uri['kopia']['domain'] }}:{{ infra_uri['kopia']['ports']['https'] }} \ + --override-username={{ node['name'] }} \ + --override-hostname={{ node['name'] }}.ilnmors.internal + +{% if node['name'] == 'infra' %} +ExecStart=/usr/bin/kopia snapshot create \ + /home/infra/containers/postgresql/backups +{% elif node['name'] == 'app' %} +ExecStart=/usr/bin/kopia snapshot create \ + /home/app/data +{% endif %} + + +[Install] +WantedBy=multi-user.target diff --git a/config/services/systemd/common/kopia/kopia-backup.timer.j2 b/config/services/systemd/common/kopia/kopia-backup.timer.j2 new file mode 100644 index 0000000..909a5cc --- /dev/null +++ b/config/services/systemd/common/kopia/kopia-backup.timer.j2 @@ -0,0 +1,10 @@ +[Unit] +Description=Daily Kopia backup timer + +[Timer] +OnCalendar=*-*-* 03:00:00 +Persistent=true +RandomizedDelaySec=300 + +[Install] +WantedBy=timers.target diff --git a/config/services/systemd/common/kopia/kopia.env.j2 b/config/services/systemd/common/kopia/kopia.env.j2 new file mode 100644 index 0000000..ebee7f7 --- /dev/null +++ b/config/services/systemd/common/kopia/kopia.env.j2 @@ -0,0 +1,5 @@ +KOPIA_PASSWORD={{ hostvars['console']['kopia']['user'][node['name']] }} +KOPIA_CONFIG_PATH=/etc/kopia/repository.config +KOPIA_CACHE_DIRECTORY=/var/cache/kopia +KOPIA_LOG_DIR=/var/cache/kopia/logs +KOPIA_CHECK_FOR_UPDATES=false diff --git a/config/services/systemd/fw/bind/etc/named.conf.j2 b/config/services/systemd/fw/bind/etc/named.conf.j2 new file mode 100644 index 0000000..2ac873a --- /dev/null +++ b/config/services/systemd/fw/bind/etc/named.conf.j2 @@ -0,0 +1,68 @@ +include "/etc/bind/acme.key"; + +options { + directory "/var/cache/bind"; + + listen-on port 53 { {{ hostvars['fw']['network4']['bind']['server'] }}; }; + listen-on-v6 port 53 { {{ hostvars['fw']['network6']['bind']['server'] }}; }; + + // Authoritative DNS setting + allow-recursion { none; }; + allow-transfer { none; }; + allow-update { none; }; + + dnssec-validation no; + + check-names master warn; +}; + +zone "ilnmors.internal." { + type primary; + file "/var/lib/bind/db.ilnmors.internal"; + notify yes; + // ACME-01 challenge policy. It allows only TXT record of subdomain update. + update-policy { + grant acme-key subdomain ilnmors.internal. TXT; + }; +}; + +zone "1.168.192.in-addr.arpa" { + type primary; + file "/var/lib/bind/db.1.168.192.in-addr.arpa"; + notify yes; +}; + +zone "10.168.192.in-addr.arpa" { + type primary; + file "/var/lib/bind/db.10.168.192.in-addr.arpa"; + notify yes; +}; + +zone "0.0.0.0.0.0.0.0.1.0.0.0.0.0.d.f.ip6.arpa" { + type primary; + file "/var/lib/bind/db.1.00df.ip6.arpa"; + notify yes; +}; + +zone "0.0.0.0.0.0.0.0.0.1.0.0.0.0.d.f.ip6.arpa" { + type primary; + file "/var/lib/bind/db.10.00df.ip6.arpa"; + notify yes; +}; + +zone "ilnmors.com." { + //split horizon dns + type primary; + file "/var/lib/bind/db.ilnmors.com"; + notify yes; +}; + +logging { + channel default_log { + stderr; + severity info; + }; + category default { default_log; }; + category config { default_log; }; + category queries { default_log; }; +}; diff --git a/config/services/systemd/fw/bind/lib/db.1.00df.ip6.arpa b/config/services/systemd/fw/bind/lib/db.1.00df.ip6.arpa new file mode 100644 index 0000000..62c6181 --- /dev/null +++ b/config/services/systemd/fw/bind/lib/db.1.00df.ip6.arpa @@ -0,0 +1,13 @@ +$TTL 86400 + +@ IN SOA bind.ilnmors.internal. mail.ilnmors.internal. ( + 2026021201 ; serial + 3600 ; refresh (1 hour) + 1800 ; retry (30 minutes) + 604800 ; expire (1 week) + 86400 ; minimum (1 day) + ) + IN NS bind.ilnmors.internal. +1.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0 IN PTR fw.ilnmors.internal. +1.1.0.0.0.0.0.0.0.0.0.0.0.0.0.0 IN PTR nas.ilnmors.internal. +0.2.0.0.0.0.0.0.0.0.0.0.0.0.0.0 IN PTR console.ilnmors.internal. diff --git a/config/services/systemd/fw/bind/lib/db.1.168.192.in-addr.arpa b/config/services/systemd/fw/bind/lib/db.1.168.192.in-addr.arpa new file mode 100644 index 0000000..e011c72 --- /dev/null +++ b/config/services/systemd/fw/bind/lib/db.1.168.192.in-addr.arpa @@ -0,0 +1,13 @@ +$TTL 86400 + +@ IN SOA bind.ilnmors.internal. mail.ilnmors.internal. ( + 2026021201 ; serial + 3600 ; refresh (1 hour) + 1800 ; retry (30 minutes) + 604800 ; expire (1 week) + 86400 ; minimum (1 day) + ) + IN NS bind.ilnmors.internal. +1 IN PTR fw.ilnmors.internal. +11 IN PTR nas.ilnmors.internal. +20 IN PTR console.ilnmors.internal. diff --git a/config/services/systemd/fw/bind/lib/db.10.00df.ip6.arpa b/config/services/systemd/fw/bind/lib/db.10.00df.ip6.arpa new file mode 100644 index 0000000..0d9bd6c --- /dev/null +++ b/config/services/systemd/fw/bind/lib/db.10.00df.ip6.arpa @@ -0,0 +1,17 @@ +$TTL 86400 + +@ IN SOA bind.ilnmors.internal. mail.ilnmors.internal. ( + 2026021201 ; serial + 3600 ; refresh (1 hour) + 1800 ; retry (30 minutes) + 604800 ; expire (1 week) + 86400 ; minimum (1 day) + ) + IN NS bind.ilnmors.internal. +1.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0 IN PTR fw.ilnmors.internal. +2.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0 IN PTR blocky.ilnmors.internal. +3.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0 IN PTR bind.ilnmors.internal. +0.1.0.0.0.0.0.0.0.0.0.0.0.0.0.0 IN PTR vmm.ilnmors.internal. +1.1.0.0.0.0.0.0.0.0.0.0.0.0.0.0 IN PTR infra.ilnmors.internal. +2.1.0.0.0.0.0.0.0.0.0.0.0.0.0.0 IN PTR auth.ilnmors.internal. +3.1.0.0.0.0.0.0.0.0.0.0.0.0.0.0 IN PTR app.ilnmors.internal. diff --git a/config/services/systemd/fw/bind/lib/db.10.168.192.in-addr.arpa b/config/services/systemd/fw/bind/lib/db.10.168.192.in-addr.arpa new file mode 100644 index 0000000..dea0da8 --- /dev/null +++ b/config/services/systemd/fw/bind/lib/db.10.168.192.in-addr.arpa @@ -0,0 +1,17 @@ +$TTL 86400 + +@ IN SOA bind.ilnmors.internal. mail.ilnmors.internal. ( + 2026021201 ; serial + 3600 ; refresh (1 hour) + 1800 ; retry (30 minutes) + 604800 ; expire (1 week) + 86400 ; minimum (1 day) + ) + IN NS bind.ilnmors.internal. +1 IN PTR fw.ilnmors.internal. +2 IN PTR blocky.ilnmors.internal. +3 IN PTR bind.ilnmors.internal. +10 IN PTR vmm.ilnmors.internal. +11 IN PTR infra.ilnmors.internal. +12 IN PTR auth.ilnmors.internal. +13 IN PTR app.ilnmors.internal. diff --git a/config/services/systemd/fw/bind/lib/db.ilnmors.com b/config/services/systemd/fw/bind/lib/db.ilnmors.com new file mode 100644 index 0000000..f9e03d4 --- /dev/null +++ b/config/services/systemd/fw/bind/lib/db.ilnmors.com @@ -0,0 +1,12 @@ +$TTL 86400 + +@ IN SOA bind.ilnmors.internal. mail.ilnmors.internal. ( + 2026021201 ; serial + 3600 ; refresh (1 hour) + 1800 ; retry (30 minutes) + 604800 ; expire (1 week) + 86400 ; minimum (1 day) + ) + IN NS bind.ilnmors.internal. +* IN A 192.168.10.12 +* IN AAAA fd00:10::12 diff --git a/config/services/systemd/fw/bind/lib/db.ilnmors.internal b/config/services/systemd/fw/bind/lib/db.ilnmors.internal new file mode 100644 index 0000000..4c45d46 --- /dev/null +++ b/config/services/systemd/fw/bind/lib/db.ilnmors.internal @@ -0,0 +1,40 @@ +$TTL 86400 + +@ IN SOA bind.ilnmors.internal. mail.ilnmors.internal. ( + 2026021201 ; serial + 3600 ; refresh (1 hour) + 1800 ; retry (30 minutes) + 604800 ; expire (1 week) + 86400 ; minimum (1 day) + ) + IN NS bind.ilnmors.internal. +bind IN A 192.168.10.3 +bind IN AAAA fd00:10::3 +fw IN A 192.168.10.1 +fw IN AAAA fd00:10::1 +blocky IN A 192.168.10.2 +blocky IN AAAA fd00:10::2 +vmm IN A 192.168.10.10 +vmm IN AAAA fd00:10::10 +infra IN A 192.168.10.11 +infra IN AAAA fd00:10::11 +auth IN A 192.168.10.12 +auth IN AAAA fd00:10::12 +app IN A 192.168.10.13 +app IN AAAA fd00:10::13 +switch IN A 192.168.1.2 +nas IN A 192.168.1.11 +nas IN AAAA fd00:1::11 +console IN A 192.168.1.20 +console IN AAAA fd00:1::20 +printer IN A 192.168.1.101 +ntp IN CNAME fw.ilnmors.internal. +crowdsec IN CNAME fw.ilnmors.internal. +ca IN CNAME infra.ilnmors.internal. +postgresql IN CNAME infra.ilnmors.internal. +ldap IN CNAME infra.ilnmors.internal. +prometheus IN CNAME infra.ilnmors.internal. +loki IN CNAME infra.ilnmors.internal. +grafana IN CNAME infra.ilnmors.internal. +authelia IN CNAME auth.ilnmors.internal. +*.app IN CNAME app.ilnmors.internal. diff --git a/config/services/systemd/fw/blocky/blocky.service b/config/services/systemd/fw/blocky/blocky.service new file mode 100644 index 0000000..3fb7386 --- /dev/null +++ b/config/services/systemd/fw/blocky/blocky.service @@ -0,0 +1,23 @@ +[Unit] +Description=Blocky DNS Resolver +Wants=network-online.target +After=network-online.target + +[Service] +User=blocky +Group=blocky + +CapabilityBoundingSet=CAP_NET_BIND_SERVICE +AmbientCapabilities=CAP_NET_BIND_SERVICE + +ExecStart=/usr/local/bin/blocky --config /etc/blocky/config.yaml +Restart=always +RestartSec=5s + + +NoNewPrivileges=true +ProtectSystem=full +ProtectHome=true + +[Install] +WantedBy=multi-user.target diff --git a/config/services/systemd/fw/blocky/etc/config.yaml.j2 b/config/services/systemd/fw/blocky/etc/config.yaml.j2 new file mode 100644 index 0000000..e7a7bdf --- /dev/null +++ b/config/services/systemd/fw/blocky/etc/config.yaml.j2 @@ -0,0 +1,67 @@ +certFile: "/etc/blocky/ssl/blocky.crt" +keyFile: "/etc/blocky/ssl/blocky.key" +minTlsServeVersion: 1.2 +connectIPVersion: dual + +ports: + dns: + - "{{ hostvars['fw']['network4']['blocky']['server'] }}:53" + - "[{{ hostvars['fw']['network6']['blocky']['server'] }}]:53" + tls: + - "{{ hostvars['fw']['network4']['blocky']['server'] }}:853" + - "[{{ hostvars['fw']['network6']['blocky']['server'] }}]:853" + https: + - "{{ hostvars['fw']['network4']['blocky']['server'] }}:443" + - "[{{ hostvars['fw']['network6']['blocky']['server'] }}]:443" + +log: + level: info + format: text + timestamp: true + privacy: false + +upstreams: + groups: + default: + - "tcp-tls:1.1.1.1:853" + - "tcp-tls:1.0.0.1:853" + - "tcp-tls:[2606:4700:4700::1111]:853" + - "tcp-tls:[2606:4700:4700::1001]:853" + +conditional: + fallbackUpstream: false + mapping: + ilnmors.internal: "{{ hostvars['fw']['network4']['bind']['server'] }}, {{ hostvars['fw']['network6']['bind']['server'] }}" + ilnmors.com: "{{ hostvars['fw']['network4']['bind']['server'] }}, {{ hostvars['fw']['network6']['bind']['server'] }}" + 1.168.192.in-addr.arpa: "{{ hostvars['fw']['network4']['bind']['server'] }}, {{ hostvars['fw']['network6']['bind']['server'] }}" + 10.168.192.in-addr.arpa: "{{ hostvars['fw']['network4']['bind']['server'] }}, {{ hostvars['fw']['network6']['bind']['server'] }}" + 0.0.0.0.0.0.0.0.1.0.0.0.0.0.d.f.ip6.arpa: "{{ hostvars['fw']['network4']['bind']['server'] }}, {{ hostvars['fw']['network6']['bind']['server'] }}" + 0.0.0.0.0.0.0.0.0.1.0.0.0.0.d.f.ip6.arpa: "{{ hostvars['fw']['network4']['bind']['server'] }}, {{ hostvars['fw']['network6']['bind']['server'] }}" + vpn.ilnmors.com: "tcp-tls:1.1.1.1:853, tcp-tls:1.0.0.1:853, tcp-tls:[2606:4700:4700::1111]:853, tcp-tls:[2606:4700:4700::1001]:853" + +blocking: + blockType: nxDomain + denylists: + ads: + # [ General ] + - https://raw.githubusercontent.com/StevenBlack/hosts/master/hosts + - https://big.oisd.nl + - https://o0.pages.dev/Lite/domains.txt + # [ Korean regional ] + - https://raw.githubusercontent.com/yous/YousList/master/hosts.txt + # [ Telemetry ] + - https://raw.githubusercontent.com/crazy-max/WindowsSpyBlocker/master/data/hosts/spy.txt + - https://raw.githubusercontent.com/Perflyst/PiHoleBlocklist/master/SmartTV.txt + clientGroupsBlock: + default: + - ads + +caching: + minTime: 5m + maxTime: 30m + cacheTimeNegative: 0m + prefetching: true + +prometheus: + enable: false + path: /metrics diff --git a/config/services/systemd/fw/chrony/local-acl.conf.j2 b/config/services/systemd/fw/chrony/local-acl.conf.j2 new file mode 100644 index 0000000..0db83b8 --- /dev/null +++ b/config/services/systemd/fw/chrony/local-acl.conf.j2 @@ -0,0 +1,9 @@ +# 1. Access Control (IPv4) +allow {{ hostvars['fw']['network4']['subnet']['client'] }} +allow {{ hostvars['fw']['network4']['subnet']['server'] }} +allow {{ hostvars['fw']['network4']['subnet']['wg'] }} + +# 2. Access Control (IPv6) +allow {{ hostvars['fw']['network6']['subnet']['client'] }} +allow {{ hostvars['fw']['network6']['subnet']['server'] }} +allow {{ hostvars['fw']['network6']['subnet']['wg'] }} diff --git a/config/services/systemd/fw/ddns/ddns.service b/config/services/systemd/fw/ddns/ddns.service new file mode 100644 index 0000000..8508db1 --- /dev/null +++ b/config/services/systemd/fw/ddns/ddns.service @@ -0,0 +1,15 @@ +[Unit] +Description=DDNS Update Service +After=network-online.target +Wants=network-online.target + +[Service] +Type=oneshot + +StandardOutput=journal +StandardError=journal + +EnvironmentFile=/etc/secrets/%U/ddns.env + +# Run the script +ExecStart=/usr/local/bin/ddns.sh -d "ilnmors.com" diff --git a/config/services/systemd/fw/ddns/ddns.sh b/config/services/systemd/fw/ddns/ddns.sh new file mode 100644 index 0000000..4d1bcb0 --- /dev/null +++ b/config/services/systemd/fw/ddns/ddns.sh @@ -0,0 +1,299 @@ +#!/bin/bash + +## Change Log format as logfmt (refactoring) +# ddns.sh -d domain [-t ] [-p] [-r] [-c] + +# Default Information +DOMAIN="" +TTL=180 +C_TTL=86400 +PROXIED="false" +DELETE_FLAG="false" +CURRENT_IP="" + +# These will be injected by systemd +# ZONE_ID='.secret' +# API_KEY='.secret' + +# usage() function +usage() { + echo "Usage: $0 -d \"domain\" [-t \"ttl\"] [-p] [-r] [-c]" + echo "-d : Specify the domain to update" + echo "-t : Specify the TTL(Time to live)" + echo "-p: Specify the cloudflare proxy to use" + echo "-r: Delete the DNS record" + exit 1 +} + +# Log function +log() { + local timestamp=$(date -u +"%Y-%m-%dT%H:%M:%SZ") + local level="$1" + local msg="$2" + echo "time=\"$timestamp\" level=\"$level\" msg=\"$msg\" source=\"ddns.sh\"">&2 +} + +# getopts to get arguments +while getopts "d:t:pr" opt; do + case $opt in + d) + DOMAIN="$OPTARG" + ;; + t) + TTL="$OPTARG" + ;; + p) + PROXIED="true" + ;; + r) + DELETE_FLAG="true" + ;; + \?) # unknown options + log "error" "Invalid option: -$OPTARG" + usage + ;; + :) # parameter required option + log "error" "Option -$OPTARG requires an argument." + usage + ;; + esac +done + +# Get option and move to parameters - This has no functional thing, because it only use arguments with parameters +shift $((OPTIND - 1)) + +# Check necessary options +if [ -z "$DOMAIN" ]; then + log "error" "-d option is required" + usage +fi + +if ! [[ "$TTL" =~ ^[0-9]+$ ]] || [ "$TTL" -le 0 ]; then + log "error" "-t option (ttl) requires a number above 0." + usage +fi + +# Check necessary environment variables (Injected by systemd or shell) +if [ -z "$ZONE_ID" ]; then + log "error" "ZONE_ID is required via environment variable." + exit 1 +fi + +if [ -z "$API_KEY" ]; then + log "error" "API_KEY is required via environment variable." + exit 1 +fi + +# Check package +if ! command -v curl >/dev/null; then + log "error" "curl is required" + exit 1 +fi +if ! command -v jq >/dev/null; then + log "error" "jq is required" + exit 1 +fi + +# API options +URL="https://api.cloudflare.com/client/v4/zones/$ZONE_ID/dns_records" +CONTENT_TYPE="Content-Type: application/json" +AUTHORIZATION="Authorization: Bearer $API_KEY" + +# Current IP +CURRENT_IP=$( ip address show dev wan | grep 'inet ' | awk '{print $2}' | cut -d'/' -f1 ) +# Get current IP from external server when IP is private IP +if [[ -z "$CURRENT_IP" || "$CURRENT_IP" =~ ^(10\.|172\.(1[6-9]|2[0-9]|3[0-1])\.|192\.168\.|127\.) ]]; then + log "info" "IP from interface is private or empty. Fetching public IP..." + CURRENT_IP=$(curl -sf "https://ifconfig.me") ||\ + CURRENT_IP=$(curl -sf "https://ifconfig.kr") ||\ + CURRENT_IP=$(curl -sf "https://api.ipify.org") +fi +if [ "$CURRENT_IP" == "" ]; then + log "Error" "Can't get an IP" + exit 1 +fi + +# DNS functions + +# get_dns_record() function +get_dns_record() +{ + local type="$1" + local name="$2" + + local response="$( + curl -s "$URL?type=$type&name=$name"\ + -H "$CONTENT_TYPE"\ + -H "$AUTHORIZATION")" + if [ "$(echo "$response" | jq -r '.success')" == "false" ]; then + log "error" "Can't get dns record by $response" + exit 1 + else + # return + echo "$response" + fi +} + +# create_dns_record() function +create_dns_record() +{ + local type="$1" + local name="$2" + local ttl="$3" + local comment="$4" + local content="$5" + local response="$( + curl -s "$URL"\ + -X POST\ + -H "$CONTENT_TYPE"\ + -H "$AUTHORIZATION"\ + -d "{ + \"name\": \"$name\", + \"ttl\": $ttl, + \"type\": \"$type\", + \"comment\": \"$comment\", + \"content\": \"$content\", + \"proxied\": $PROXIED + }")" + if [ "$(echo "$response" | jq -r '.success')" == "false" ]; then + log "error" "Can't create dns record by $response" + exit 1 + else + # return + echo "$response" + fi +} + +# update_dns_record() function +update_dns_record() +{ + local type="$1" + local name="$2" + local ttl="$3" + local comment="$4" + local content="$5" + local id="$6" + local response=$( + curl -s "$URL/$id"\ + -X PUT\ + -H "$CONTENT_TYPE"\ + -H "$AUTHORIZATION"\ + -d "{ + \"name\": \"$name\", + \"ttl\": $ttl, + \"type\": \"$type\", + \"comment\": \"$comment\", + \"content\": \"$content\", + \"proxied\": $PROXIED + }") + if [ "$(echo "$response" | jq -r '.success')" == "false" ]; then + log "error" "Can't update dns record by $response" + exit 1 + else + #return + echo "$response" + fi +} + +# delete_dns_record() function +delete_dns_record() +{ + local type="$1" + local id="$2" + + local response=$( + curl -s "$URL/$id"\ + -X DELETE\ + -H "$CONTENT_TYPE"\ + -H "$AUTHORIZATION" + ) + if [ "$(echo "$response" | jq -r '.success')" == "false" ]; then + log "error" "Can't delete dns record by $response" + exit 1 + else + # return + echo "$response" + fi +} + +# Get DNS A, and CNAME record +A_DNS_RECORD=$(get_dns_record "A" "$DOMAIN") +S_DNS_RECORD=$(get_dns_record "cname" "*.$DOMAIN") +W_DNS_RECORD=$(get_dns_record "cname" "www.$DOMAIN") + +# Delete DNS record with Delete flag +if [ "$DELETE_FLAG" == "true" ]; then + FLAG="false" + if [ "$(echo $A_DNS_RECORD | jq -r '.result | length')" -eq 1 ]; then + A_DNS_ID="$(echo $A_DNS_RECORD | jq -r '.result[0].id')" + delete_dns_record "A" "$A_DNS_ID" + log "info" "root DNS record is deleted" + FLAG="true" + fi + if [ "$(echo $S_DNS_RECORD | jq -r '.result | length')" -eq 1 ]; then + S_DNS_ID="$(echo $S_DNS_RECORD | jq -r '.result[0].id')" + delete_dns_record "cname" "$S_DNS_ID" + log "info" "sub DNS record is deleted" + FLAG="true" + fi + if [ "$(echo $W_DNS_RECORD | jq -r '.result | length')" -eq 1 ]; then + W_DNS_ID="$(echo $W_DNS_RECORD | jq -r '.result[0].id')" + delete_dns_record "cname" "$W_DNS_ID" + log "info" "www DNS record is deleted" + FLAG="true" + fi + if [ "$FLAG" == "false" ]; then + log "info" "Nothing is Deleted. There are no DNS records" + fi + exit +fi + +# Create or update DNS A record +if [ "$(echo $A_DNS_RECORD | jq -r '.result | length')" -eq 1 ]; then # root DNS record exist + A_DNS_ID="$(echo $A_DNS_RECORD | jq -r '.result[0].id')" + A_DNS_CONTENT="$(echo $A_DNS_RECORD | jq -r '.result[0].content')" + A_DNS_TTL="$(echo $A_DNS_RECORD | jq -r '.result[0].ttl')" + A_DNS_PROXIED="$(echo $A_DNS_RECORD | jq -r '.result[0].proxied')" + if [ "$A_DNS_CONTENT" != $CURRENT_IP -o "$A_DNS_TTL" != "$TTL" -o "$A_DNS_PROXIED" != "$PROXIED" ]; then + update_dns_record "A" "$DOMAIN" "$TTL" "$(date "+%Y-%m-%d %H:%M:%S"): root domain from ddns.sh" "$CURRENT_IP" "$A_DNS_ID" + log "info" "Root DNS record is successfully changed Domain: $DOMAIN IP: $A_DNS_CONTENT to $CURRENT_IP TTL: $A_DNS_TTL to $TTL proxied: $A_DNS_PROXIED to $PROXIED" + else + log "info" "Root DNS record is not changed Domain: $DOMAIN IP: $CURRENT_IP TTL: $TTL proxied: $PROXIED" + fi +else # root DNS record does not exist + create_dns_record "A" "$DOMAIN" "$TTL" "$(date "+%Y-%m-%d %H:%M:%S"): root domain from ddns.sh" "$CURRENT_IP" + log "info" "Root DNS record is successfully created Domain: $DOMAIN IP: $CURRENT_IP TTL: $TTL proxied: $PROXIED" +fi + +# Create or update DNS CNAME records +if [ "$(echo $S_DNS_RECORD | jq -r '.result | length')" -eq 1 ]; then # sub DNS record exist + S_DNS_ID="$(echo $S_DNS_RECORD | jq -r '.result[0].id')" + S_DNS_CONTENT="$(echo $S_DNS_RECORD | jq -r '.result[0].content')" + S_DNS_TTL="$(echo $S_DNS_RECORD | jq -r '.result[0].ttl')" + S_DNS_PROXIED="$(echo $S_DNS_RECORD | jq -r '.result[0].proxied')" + if [ "$S_DNS_CONTENT" != "$DOMAIN" -o "$S_DNS_TTL" != "$C_TTL" -o "$S_DNS_PROXIED" != "$PROXIED" ]; then + update_dns_record "cname" "*.$DOMAIN" "$C_TTL" "$(date "+%Y-%m-%d %H:%M:%S"): sub domain from ddns.sh" "$DOMAIN" "$S_DNS_ID" + log "info" "Sub DNS record is successfully changed Domain: $S_DNS_CONTENT to *.$DOMAIN cname: $DOMAIN TTL: $S_DNS_TTL to $C_TTL proxied: $S_DNS_PROXIED to $PROXIED" + else + log "info" "Sub DNS record is not changed Domain: *.$DOMAIN cname: $DOMAIN TTL: $C_TTL proxied: $PROXIED" + fi +else # sub DNS record does not exist + create_dns_record "cname" "*.$DOMAIN" "$C_TTL" "$(date "+%Y-%m-%d %H:%M:%S"): sub domain from ddns.sh" "$DOMAIN" + log "info" "Sub DNS record is successfully created Domain: *.$DOMAIN cname: $DOMAIN TTL: $C_TTL proxied: $PROXIED" +fi + +if [ "$(echo $W_DNS_RECORD | jq -r '.result | length')" -eq 1 ]; then # www DNS record exist + W_DNS_ID="$(echo $W_DNS_RECORD | jq -r '.result[0].id')" + W_DNS_CONTENT="$(echo $W_DNS_RECORD | jq -r '.result[0].content')" + W_DNS_TTL="$(echo $W_DNS_RECORD | jq -r '.result[0].ttl')" + W_DNS_PROXIED="$(echo $W_DNS_RECORD | jq -r '.result[0].proxied')" + if [ "$W_DNS_CONTENT" != "$DOMAIN" -o "$W_DNS_TTL" != "$C_TTL" -o "$W_DNS_PROXIED" != "$PROXIED" ]; then + update_dns_record "cname" "www.$DOMAIN" "$C_TTL" "$(date "+%Y-%m-%d %H:%M:%S"): www domain from ddns.sh" "$DOMAIN" "$W_DNS_ID" + log "info" "www DNS record is successfully changed Domain: $W_DNS_CONTENT to www.$DOMAIN cname: $DOMAIN TTL: $W_DNS_TTL to $C_TTL proxied: $W_DNS_PROXIED to $PROXIED" + else + log "info" "www DNS record is not changed Domain: www.$DOMAIN cname: $DOMAIN TTL: $C_TTL proxied: $PROXIED" + fi +else # www DNS record does not exist + create_dns_record "cname" "www.$DOMAIN" "$C_TTL" "$(date "+%Y-%m-%d %H:%M:%S"): www domain from ddns.sh" "$DOMAIN" + log "info" "www DNS record is successfully created Domain: www.$DOMAIN cname: $DOMAIN TTL: $C_TTL proxied: $PROXIED" +fi diff --git a/config/services/systemd/fw/ddns/ddns.timer b/config/services/systemd/fw/ddns/ddns.timer new file mode 100644 index 0000000..c570426 --- /dev/null +++ b/config/services/systemd/fw/ddns/ddns.timer @@ -0,0 +1,10 @@ +[Unit] +Description=Run DDNS update service every 5 minutes + +[Timer] +OnBootSec=1min +OnUnitActiveSec=5min +Persistent=true + +[Install] +WantedBy=timers.target diff --git a/config/services/systemd/fw/kea/kea-dhcp4.conf.j2 b/config/services/systemd/fw/kea/kea-dhcp4.conf.j2 new file mode 100644 index 0000000..ada0f65 --- /dev/null +++ b/config/services/systemd/fw/kea/kea-dhcp4.conf.j2 @@ -0,0 +1,105 @@ +{ + "Dhcp4": { + "subnet4": [ + { + "subnet": "{{ hostvars['fw']['network4']['subnet']['client'] }}", + "pools" : [ + { + "pool": "192.168.1.254-192.168.1.254" + } + ], + "option-data": [ + { + "name": "routers", + "data": "{{ hostvars['fw']['network4']['firewall']['client'] }}" + }, + { + "name": "domain-name-servers", + "data": "{{ hostvars['fw']['network4']['blocky']['server'] }}" + }, + { + "name": "domain-name", + "data": "ilnmors.internal." + } + ], + "reservations": [ + { + "hw-address": "58:04:4f:18:6c:5e", + "ip-address": "{{ hostvars['fw']['network4']['switch']['client'] }}", + "hostname": "switch" + }, + { + "hw-address": "90:09:d0:65:a9:db", + "ip-address": "{{ hostvars['fw']['network4']['nas']['client'] }}", + "hostname": "nas" + }, + { + "hw-address": "d8:e2:df:ff:1b:d5", + "ip-address": "{{ hostvars['fw']['network4']['console']['client'] }}", + "hostname": "surface" + }, + { + "hw-address": "38:ca:84:94:5e:06", + "ip-address": "{{ hostvars['fw']['network4']['printer']['client'] }}", + "hostname": "printer" + } + ], + "id": 1, + "interface": "client" + }, + { + "subnet": "{{ hostvars['fw']['network4']['subnet']['user'] }}", + "pools" : [ + { + "pool": "192.168.20.2-192.168.20.254" + } + ], + "option-data": [ + { + "name": "routers", + "data": "{{ hostvars['fw']['network4']['firewall']['user'] }}" + }, + { + "name": "domain-name-servers", + "data": "{{ hostvars['fw']['network4']['blocky']['server'] }}" + }, + { + "name": "domain-name", + "data": "ilnmors.internal." + } + ], + "id": 2, + "interface": "user" + } + ], + "interfaces-config": { + "interfaces": [ + "client", + "user" + ], + "dhcp-socket-type": "raw", + "service-sockets-max-retries": 5, + "service-sockets-require-all": true + }, + "renew-timer": 1000, + "rebind-timer": 2000, + "valid-lifetime": 4000, + "loggers": [ + { + "name": "kea-dhcp4", + "output_options": [ + { + "output": "stdout" + } + ], + "severity": "INFO" + } + ], + "lease-database": { + "type": "memfile", + "persist": true, + "name": "/var/lib/kea/kea-leases4.csv", + "lfc-interval": 3600 + } + } +} diff --git a/config/services/systemd/fw/suricata/etc/disable.conf b/config/services/systemd/fw/suricata/etc/disable.conf new file mode 100644 index 0000000..b769f6f --- /dev/null +++ b/config/services/systemd/fw/suricata/etc/disable.conf @@ -0,0 +1,7 @@ +# Stream events +2210010 # SURICATA STREAM 3way handshake wrong seq wrong ack / TCP 3-way handshake in local networks +2210021 +2210045 +# Wrong thread warning +2210059 + diff --git a/config/services/systemd/fw/suricata/etc/enable.conf b/config/services/systemd/fw/suricata/etc/enable.conf new file mode 100644 index 0000000..e69de29 diff --git a/config/services/systemd/fw/suricata/etc/local.rules b/config/services/systemd/fw/suricata/etc/local.rules new file mode 100644 index 0000000..e69de29 diff --git a/config/services/systemd/fw/suricata/etc/suricata.yaml.j2 b/config/services/systemd/fw/suricata/etc/suricata.yaml.j2 new file mode 100644 index 0000000..f88f300 --- /dev/null +++ b/config/services/systemd/fw/suricata/etc/suricata.yaml.j2 @@ -0,0 +1,518 @@ +%YAML 1.1 +--- +suricata-version: "7.0" + +vars: + address-groups: + HOME_NET: "{{ hostvars['fw']['suricata']['home_net'] }}" + EXTERNAL_NET: "!$HOME_NET" + HTTP_SERVERS: "$HOME_NET" + SMTP_SERVERS: "$HOME_NET" + SQL_SERVERS: "$HOME_NET" + DNS_SERVERS: "$HOME_NET" + TELNET_SERVERS: "$HOME_NET" + AIM_SERVERS: "$EXTERNAL_NET" + DC_SERVERS: "$HOME_NET" + DNP3_SERVER: "$HOME_NET" + DNP3_CLIENT: "$HOME_NET" + MODBUS_CLIENT: "$HOME_NET" + MODBUS_SERVER: "$HOME_NET" + ENIP_CLIENT: "$HOME_NET" + ENIP_SERVER: "$HOME_NET" + + port-groups: + HTTP_PORTS: "80" + SHELLCODE_PORTS: "!80" + ORACLE_PORTS: 1521 + SSH_PORTS: 22 + DNP3_PORTS: 20000 + MODBUS_PORTS: 502 + FILE_DATA_PORTS: "[$HTTP_PORTS,110,143]" + FTP_PORTS: 21 + GENEVE_PORTS: 6081 + VXLAN_PORTS: 4789 + TEREDO_PORTS: 3544 + +default-log-dir: /var/log/suricata/ + +stats: + enabled: yes + interval: 8 + +plugins: + +outputs: + - fast: + enabled: yes + filename: fast.log + append: yes + - eve-log: + enabled: yes + filetype: regular + filename: eve.json + pcap-file: false + community-id: true + community-id-seed: 0 + xff: + enabled: no + mode: extra-data + deployment: reverse + header: X-Forwarded-For + + types: + - alert: + tagged-packets: yes + - frame: + enabled: no + - anomaly: + enabled: yes + types: + - http: + extended: yes + - dns: + - tls: + extended: yes + - files: + force-magic: no + - smtp: + - ftp + - rdp + - nfs + - smb + - tftp + - ike + - dcerpc + - krb5 + - bittorrent-dht + - snmp + - rfb + - sip + - quic: + - dhcp: + enabled: yes + extended: no + - ssh + - mqtt: + - http2 + - pgsql: + enabled: no + - stats: + totals: yes + threads: no + deltas: no + - flow + - http-log: + enabled: no + filename: http.log + append: yes + - tls-log: + enabled: no + filename: tls.log + append: yes + - tls-store: + enabled: no + - pcap-log: + enabled: no + filename: log.pcap + limit: 1000mb + max-files: 2000 + compression: none + mode: normal # normal, multi or sguil. + use-stream-depth: no + honor-pass-rules: no + - alert-debug: + enabled: no + filename: alert-debug.log + append: yes + - stats: + enabled: yes + filename: stats.log + append: yes + totals: yes + threads: no + - syslog: + enabled: no + facility: local5 + - file-store: + version: 2 + enabled: no + xff: + enabled: no + mode: extra-data + deployment: reverse + header: X-Forwarded-For + - tcp-data: + enabled: no + type: file + filename: tcp-data.log + - http-body-data: + enabled: no + type: file + filename: http-data.log + - lua: + enabled: no + scripts: + +logging: + default-log-level: notice + default-output-filter: + outputs: + - console: + enabled: yes + - file: + enabled: yes + level: info + filename: suricata.log + - syslog: + enabled: no + facility: local5 + format: "[%i] <%d> -- " + +af-packet: +{% for iface in hostvars['fw']['suricata']['interfaces'] %} + - interface: {{ iface }} + cluster-id: {{ 99 - loop.index0 }} + cluster-type: cluster_flow + defrag: yes + use-mmap: yes + tpacket-v3: yes + checksum-checks: no +{% endfor %} + +app-layer: + protocols: + telnet: + enabled: yes + rfb: + enabled: yes + detection-ports: + dp: 5900, 5901, 5902, 5903, 5904, 5905, 5906, 5907, 5908, 5909 + mqtt: + enabled: yes + krb5: + enabled: yes + bittorrent-dht: + enabled: yes + snmp: + enabled: yes + ike: + enabled: yes + tls: + enabled: yes + detection-ports: + dp: 443 + pgsql: + enabled: no + stream-depth: 0 + dcerpc: + enabled: yes + ftp: + enabled: yes + rdp: + ssh: + enabled: yes + http2: + enabled: yes + smtp: + enabled: yes + raw-extraction: no + mime: + decode-mime: yes + decode-base64: yes + decode-quoted-printable: yes + header-value-depth: 2000 + extract-urls: yes + inspected-tracker: + content-limit: 100000 + content-inspect-min-size: 32768 + content-inspect-window: 4096 + imap: + enabled: detection-only + smb: + enabled: yes + detection-ports: + dp: 139, 445 + nfs: + enabled: yes + tftp: + enabled: yes + dns: + tcp: + enabled: yes + detection-ports: + dp: 53 + udp: + enabled: yes + detection-ports: + dp: 53 + http: + enabled: yes + libhtp: + default-config: + personality: IDS + request-body-limit: 100kb + response-body-limit: 100kb + request-body-minimal-inspect-size: 32kb + request-body-inspect-window: 4kb + response-body-minimal-inspect-size: 40kb + response-body-inspect-window: 16kb + response-body-decompress-layer-limit: 2 + http-body-inline: auto + swf-decompression: + enabled: no + type: both + compress-depth: 100kb + decompress-depth: 100kb + double-decode-path: no + double-decode-query: no + server-config: + modbus: + enabled: no + detection-ports: + dp: 502 + stream-depth: 0 + dnp3: + enabled: no + detection-ports: + dp: 20000 + enip: + enabled: no + detection-ports: + dp: 44818 + sp: 44818 + ntp: + enabled: yes + quic: + enabled: yes + dhcp: + enabled: yes + sip: +asn1-max-frames: 256 + +datasets: + defaults: + limits: + rules: + +security: + limit-noproc: true + landlock: + enabled: no + directories: + read: + - /usr/ + - /etc/ + - /etc/suricata/ + lua: + +coredump: + max-dump: unlimited + +unix-command: + enabled: yes + filename: /var/run/suricata-command.socket + +legacy: + uricontent: enabled + +exception-policy: auto + +engine-analysis: + rules-fast-pattern: yes + rules: yes + +pcre: + match-limit: 3500 + match-limit-recursion: 1500 + +host-os-policy: + windows: [0.0.0.0/0] + bsd: [] + bsd-right: [] + old-linux: [] + linux: [] + old-solaris: [] + solaris: [] + hpux10: [] + hpux11: [] + irix: [] + macos: [] + vista: [] + windows2k3: [] + +defrag: + memcap: 32mb + hash-size: 65536 + trackers: 65535 # number of defragmented flows to follow + max-frags: 65535 # number of fragments to keep (higher than trackers) + prealloc: yes + timeout: 60 + +flow: + memcap: 128mb + hash-size: 65536 + prealloc: 10000 + emergency-recovery: 30 + +vlan: + use-for-tracking: true + +livedev: + use-for-tracking: true + +flow-timeouts: + default: + new: 30 + established: 300 + closed: 0 + bypassed: 100 + emergency-new: 10 + emergency-established: 100 + emergency-closed: 0 + emergency-bypassed: 50 + tcp: + new: 60 + established: 600 + closed: 60 + bypassed: 100 + emergency-new: 5 + emergency-established: 100 + emergency-closed: 10 + emergency-bypassed: 50 + udp: + new: 30 + established: 300 + bypassed: 100 + emergency-new: 10 + emergency-established: 100 + emergency-bypassed: 50 + icmp: + new: 30 + established: 300 + bypassed: 100 + emergency-new: 10 + emergency-established: 100 + emergency-bypassed: 50 + +stream: + memcap: 64mb + checksum-validation: yes + inline: auto + reassembly: + memcap: 256mb + depth: 1mb + toserver-chunk-size: 2560 + toclient-chunk-size: 2560 + randomize-chunk-size: yes + +host: + hash-size: 4096 + prealloc: 1000 + memcap: 32mb + +decoder: + teredo: + enabled: true + ports: $TEREDO_PORTS + vxlan: + enabled: true + ports: $VXLAN_PORTS + geneve: + enabled: true + ports: $GENEVE_PORTS + +detect: + profile: medium + custom-values: + toclient-groups: 3 + toserver-groups: 25 + sgh-mpm-context: auto + prefilter: + default: mpm + grouping: + profiling: + grouping: + dump-to-disk: false + include-rules: false + include-mpm-stats: false + +mpm-algo: auto + +threading: + set-cpu-affinity: no + cpu-affinity: + - management-cpu-set: + cpu: [ 0 ] + - receive-cpu-set: + cpu: [ 0 ] + - worker-cpu-set: + cpu: [ "all" ] + mode: "exclusive" + prio: + low: [ 0 ] + medium: [ "1-2" ] + high: [ 3 ] + default: "medium" + detect-thread-ratio: 1.0 + +luajit: + states: 128 + +profiling: + rules: + enabled: yes + filename: rule_perf.log + append: yes + limit: 10 + json: yes + keywords: + enabled: yes + filename: keyword_perf.log + append: yes + prefilter: + enabled: yes + filename: prefilter_perf.log + append: yes + rulegroups: + enabled: yes + filename: rule_group_perf.log + append: yes + packets: + enabled: yes + filename: packet_stats.log + append: yes + csv: + enabled: no + filename: packet_stats.csv + locks: + enabled: no + filename: lock_stats.log + append: yes + pcap-log: + enabled: no + filename: pcaplog_stats.log + append: yes + +nfq: + +nflog: + - group: 2 + buffer-size: 18432 + - group: default + qthreshold: 1 + qtimeout: 100 + max-size: 20000 + +capture: + +ipfw: + +napatech: + +default-rule-path: /var/lib/suricata/rules + +rule-files: + - suricata.rules + +classification-file: /etc/suricata/classification.config +reference-config-file: /etc/suricata/reference.config diff --git a/config/services/systemd/fw/suricata/suricata-update.service b/config/services/systemd/fw/suricata/suricata-update.service new file mode 100644 index 0000000..23cd38e --- /dev/null +++ b/config/services/systemd/fw/suricata/suricata-update.service @@ -0,0 +1,9 @@ +[Unit] +Description=Suricata Rule Update Service +After=network-online.target +Wants=network-online.target + +[Service] +Type=oneshot +ExecStart=/usr/bin/suricata-update --disable-conf /etc/suricata/disable.conf --enable-conf /etc/suricata/enable.conf --local /etc/suricata/rules/local.rules +ExecStartPost=/usr/bin/systemctl reload suricata diff --git a/config/services/systemd/fw/suricata/suricata-update.timer b/config/services/systemd/fw/suricata/suricata-update.timer new file mode 100644 index 0000000..178e854 --- /dev/null +++ b/config/services/systemd/fw/suricata/suricata-update.timer @@ -0,0 +1,10 @@ +[Unit] +Description=Daily Suricata Rule Update Timer + +[Timer] +OnCalendar=*-*-* 06:00:00 +Persistent=true +RandomizedDelaySec=300 + +[Install] +WantedBy=timers.target diff --git a/config/services/systemd/vmm/libvirt/seeds/user-data.j2 b/config/services/systemd/vmm/libvirt/seeds/user-data.j2 new file mode 100644 index 0000000..9860ac3 --- /dev/null +++ b/config/services/systemd/vmm/libvirt/seeds/user-data.j2 @@ -0,0 +1,79 @@ +#cloud-config + +bootcmd: + - groupadd -g 2000 svadmins || true + +hostname: {{ hostvars[target_vm]['vm']['name'] }} + +disable_root: true + +users: + - name: {{ target_vm }} + uid: {{ hostvars[target_vm]['node']['uid'] }} + gecos: {{ target_vm }} + primary_group: svadmins + groups: sudo + lock_passwd: false + passwd: {{ hostvars['console']['sudo']['hash'][target_vm] }} + shell: /bin/bash + +write_files: + - path: /etc/ssh/local_ssh_ca.pub + content: | + {{ hostvars['console']['ssh']['ca']['pub'] | trim }} + owner: "root:root" + permissions: "0644" + - path: /etc/ssh/sshd_config.d/ssh_ca.conf + content: | + TrustedUserCAKeys /etc/ssh/local_ssh_ca.pub + owner: "root:root" + permissions: "0644" + - path: /etc/ssh/sshd_config.d/prohibit_root.conf + content: | + PermitRootLogin no + owner: "root:root" + permissions: "0644" + - path: /etc/apt/sources.list.d/debian.sources + content: | + Types: deb deb-src + URIs: https://deb.debian.org/debian + Suites: trixie trixie-updates trixie-backports + Components: main contrib non-free non-free-firmware + Signed-By: /usr/share/keyrings/debian-archive-keyring.gpg + + Types: deb deb-src + URIs: https://deb.debian.org/debian-security + Suites: trixie-security + Components: main contrib non-free non-free-firmware + Signed-By: /usr/share/keyrings/debian-archive-keyring.gpg + owner: "root:root" + permissions: "0644" +{% if target_vm == 'fw' %} + - path: /etc/sysctl.d/ipforward.conf + content: | + net.ipv4.ip_forward = 1 + net.ipv6.conf.all.forwarding = 1 + owner: "root:root" + permissions: "0644" +{% endif %} +{% set net_config_dir = 'fw' if target_vm == 'fw' else 'common' %} +{% for file_path in query('fileglob', hostvars['console']['node']['config_path'] + '/node/' + net_config_dir + '/networkd/' + '/*') | sort %} + - path: /etc/systemd/network/{{ file_path | basename}} + content: | + {{ lookup('template', file_path) | indent(8) | trim }} + owner: "root:root" + permissions: "0644" +{% endfor %} + +runcmd: + - update-initramfs -u + - systemctl disable networking + - systemctl enable systemd-networkd + - systemctl enable getty@ttyS0 + - sync + +power_state: + delay: "now" + mode: reboot + message: "rebooting after cloud-init configuration" + timeout: 30 diff --git a/config/services/systemd/vmm/libvirt/services/app.service b/config/services/systemd/vmm/libvirt/services/app.service new file mode 100644 index 0000000..24f814e --- /dev/null +++ b/config/services/systemd/vmm/libvirt/services/app.service @@ -0,0 +1,23 @@ +[Unit] +Description=app vm +After=network-online.target libvirtd.service fw.service infra.service auth.service +Wants=fw.service infra.service auth.service + +[Service] +Type=oneshot +RemainAfterExit=yes +TimeoutStopSec=360 + +ExecStart=/usr/bin/virsh -c qemu:///system start app + +ExecStartPost=/bin/sleep 30 + +ExecStop=/bin/bash -c '\ + /usr/bin/virsh -c qemu:///system shutdown app; \ + while /usr/bin/virsh -c qemu:///system list --state-running --name | grep -q "app"; do \ + echo "Waiting for app to shutdown..."; \ + sleep 2; \ + done' + +[Install] +WantedBy=default.target diff --git a/config/services/systemd/vmm/libvirt/services/auth.service b/config/services/systemd/vmm/libvirt/services/auth.service new file mode 100644 index 0000000..341ddde --- /dev/null +++ b/config/services/systemd/vmm/libvirt/services/auth.service @@ -0,0 +1,23 @@ +[Unit] +Description=auth vm +After=network-online.target libvirtd.service fw.service infra.service +Wants=fw.service infra.service + +[Service] +Type=oneshot +RemainAfterExit=yes +TimeoutStopSec=360 + +ExecStart=/usr/bin/virsh -c qemu:///system start auth + +ExecStartPost=/bin/sleep 30 + +ExecStop=/bin/bash -c '\ + /usr/bin/virsh -c qemu:///system shutdown auth; \ + while /usr/bin/virsh -c qemu:///system list --state-running --name | grep -q "auth"; do \ + echo "Waiting for auth to shutdown..."; \ + sleep 2; \ + done' + +[Install] +WantedBy=default.target diff --git a/config/services/systemd/vmm/libvirt/services/fw.service b/config/services/systemd/vmm/libvirt/services/fw.service new file mode 100644 index 0000000..451887e --- /dev/null +++ b/config/services/systemd/vmm/libvirt/services/fw.service @@ -0,0 +1,23 @@ +[Unit] +Description=fw vm +After=network-online.target libvirtd.service +Wants=network-online.target + +[Service] +Type=oneshot +RemainAfterExit=yes +TimeoutStopSec=360 + +ExecStart=/usr/bin/virsh -c qemu:///system start fw + +ExecStartPost=/bin/sleep 30 + +ExecStop=/bin/bash -c '\ + /usr/bin/virsh -c qemu:///system shutdown fw; \ + while /usr/bin/virsh -c qemu:///system list --state-running --name | grep -q "fw"; do \ + echo "Waiting for fw to shutdown..."; \ + sleep 2; \ + done' + +[Install] +WantedBy=default.target diff --git a/config/services/systemd/vmm/libvirt/services/infra.service b/config/services/systemd/vmm/libvirt/services/infra.service new file mode 100644 index 0000000..714e2c9 --- /dev/null +++ b/config/services/systemd/vmm/libvirt/services/infra.service @@ -0,0 +1,23 @@ +[Unit] +Description=infra vm +After=network-online.target libvirtd.service fw.service +Wants=fw.service + +[Service] +Type=oneshot +RemainAfterExit=yes +TimeoutStopSec=360 + +ExecStart=/usr/bin/virsh -c qemu:///system start infra + +ExecStartPost=/bin/sleep 30 + +ExecStop=/bin/bash -c '\ + /usr/bin/virsh -c qemu:///system shutdown infra; \ + while /usr/bin/virsh -c qemu:///system list --state-running --name | grep -q "infra"; do \ + echo "Waiting for infra to shutdown..."; \ + sleep 2; \ + done' + +[Install] +WantedBy=default.target diff --git a/config/services/systemd/vmm/libvirt/xml/networks/lan-net.xml b/config/services/systemd/vmm/libvirt/xml/networks/lan-net.xml new file mode 100644 index 0000000..478c52a --- /dev/null +++ b/config/services/systemd/vmm/libvirt/xml/networks/lan-net.xml @@ -0,0 +1,19 @@ + + + + lan-net + + + + + + + + + + + + + + + diff --git a/config/services/systemd/vmm/libvirt/xml/networks/wan-net.xml b/config/services/systemd/vmm/libvirt/xml/networks/wan-net.xml new file mode 100644 index 0000000..700056c --- /dev/null +++ b/config/services/systemd/vmm/libvirt/xml/networks/wan-net.xml @@ -0,0 +1,7 @@ + + + + wan-net + + + diff --git a/config/services/systemd/vmm/libvirt/xml/storages/images-pool.xml b/config/services/systemd/vmm/libvirt/xml/storages/images-pool.xml new file mode 100644 index 0000000..4bd5f25 --- /dev/null +++ b/config/services/systemd/vmm/libvirt/xml/storages/images-pool.xml @@ -0,0 +1,8 @@ + + + + images-pool + + /var/lib/libvirt/images + + diff --git a/config/services/systemd/vmm/libvirt/xml/storages/seeds-pool.xml b/config/services/systemd/vmm/libvirt/xml/storages/seeds-pool.xml new file mode 100644 index 0000000..9511058 --- /dev/null +++ b/config/services/systemd/vmm/libvirt/xml/storages/seeds-pool.xml @@ -0,0 +1,8 @@ + + + + seeds-pool + + /var/lib/libvirt/seeds + + diff --git a/config/services/systemd/vmm/libvirt/xml/vms/vms.xml.j2 b/config/services/systemd/vmm/libvirt/xml/vms/vms.xml.j2 new file mode 100644 index 0000000..6fff61e --- /dev/null +++ b/config/services/systemd/vmm/libvirt/xml/vms/vms.xml.j2 @@ -0,0 +1,78 @@ + + {{ hostvars[target_vm]['vm']['name'] }} + {{ hostvars[target_vm]['vm']['memory'] }} + {{ hostvars[target_vm]['vm']['cpu'] }} + + {{ hostvars[target_vm]['vm']['shares'] }} + + + hvm + + + + + /usr/share/OVMF/OVMF_CODE_4M.ms.fd + /var/lib/libvirt/qemu/nvram/{{ hostvars[target_vm]['vm']['name'] }}_VARS.fd + + + + + + + + destroy + restart + destroy + + + + + + + + + + + + +{% if target_vm == 'fw' %} + + + + + + + + + + +{% else %} + + + + + +{% endif %} + + + + + +
+ +{% if target_vm == 'app' %} +{% for device in hostvars[target_vm]['vm']['pass_through'].values() %} + + + +
+ +
+ +{% endfor %} +{% endif %} + + + + + diff --git a/data/bin/.gitkeep b/data/bin/.gitkeep new file mode 100644 index 0000000..e69de29 diff --git a/data/create_all_structure.sh b/data/create_all_structure.sh new file mode 100644 index 0000000..aa8a6d8 --- /dev/null +++ b/data/create_all_structure.sh @@ -0,0 +1,56 @@ +#!/bin/bash + +FILE_PATH="$HOME/workspace/homelab/docs/archives/textfiles/$(date "+%Y-%m-%d")" + +mkdir -p $FILE_PATH + +echo "--- Ansible contents ---" > "$FILE_PATH/ansible.txt" +find ~/workspace/homelab/ansible -type f \ + -print0 | \ + sort -z | \ + xargs -0 awk 'FNR==1{print "\n\n---------------------\nFILE PATH: " FILENAME "\n---------------------\n"}1' \ + >> "$FILE_PATH/ansible.txt" +echo "---------------------" | sort >> "$FILE_PATH/ansible.txt" + +echo "--- Data contents ---" > "$FILE_PATH/data.txt" +find ~/workspace/homelab/data -type f \ + ! -path "*volumes*" \ + ! -name "*.deb" \ + ! -name "*.gz" \ + ! -name "*.qcow2" \ + ! -name "*.iso" \ + ! -name "*.gpg" \ + ! -name "*.sql" \ + -print0 | \ + sort -z | \ + xargs -0 awk 'FNR==1{print "\n\n---------------------\nFILE PATH: " FILENAME "\n---------------------\n"}1' \ + >> "$FILE_PATH/data.txt" +echo "---------------------" | sort >> "$FILE_PATH/data.txt" + +echo "--- Config contents ---" > "$FILE_PATH/config.txt" +find ~/workspace/homelab/config -type f \ + ! -path "*/grafana/etc/dashboards*" \ + ! -name "*.sql" \ + -print0 | \ + sort -z | \ + xargs -0 awk 'FNR==1{print "\n\n---------------------\nFILE PATH: " FILENAME "\n---------------------\n"}1' \ + >> "$FILE_PATH/config.txt" +echo "---------------------" | sort >> "$FILE_PATH/config.txt" + + +echo "--- Docs contents ---" > "$FILE_PATH/docs.txt" +find ~/workspace/homelab/docs -type f \ + ! -path "*archives/*" \ + ! -name "*.deb" \ + ! -name "*.gz" \ + ! -name "*.qcow2" \ + ! -name "*.iso" \ + ! -name "*.gpg" \ + ! -name "*.sql" \ + -print0 | \ + sort -z | \ + xargs -0 awk 'FNR==1{print "\n\n---------------------\nFILE PATH: " FILENAME "\n---------------------\n"}1' \ + >> "$FILE_PATH/docs.txt" +echo "---------------------" | sort >> "$FILE_PATH/docs.txt" + +cp ~/workspace/homelab/README.md $FILE_PATH/README.md \ No newline at end of file diff --git a/data/ilnmors_root_ca.crt b/data/ilnmors_root_ca.crt new file mode 100644 index 0000000..39fe146 --- /dev/null +++ b/data/ilnmors_root_ca.crt @@ -0,0 +1,11 @@ +-----BEGIN CERTIFICATE----- +MIIBijCCATCgAwIBAgIRAKPu8PZdfsvjvryosyWb01owCgYIKoZIzj0EAwIwIzEh +MB8GA1UEAxMYaWxubW9ycy5pbnRlcm5hbCBSb290IENBMB4XDTI1MTIxOTA0MDM0 +N1oXDTM1MTIxNzA0MDM0N1owIzEhMB8GA1UEAxMYaWxubW9ycy5pbnRlcm5hbCBS +b290IENBMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEKDZLQ+47J72K9281V6qG +I8kBScUpJOppbbIaCwi94dp6Tqbe9PfT4eChOSt2lkmb7bG0PmgfYOXv/FUvgFGP +DKNFMEMwDgYDVR0PAQH/BAQDAgEGMBIGA1UdEwEB/wQIMAYBAf8CAQEwHQYDVR0O +BBYEFCZMiE3SKrzUqlHM6BYDJYkYyelRMAoGCCqGSM49BAMCA0gAMEUCIC3+tXyt +6uz75leUXhQsa0gQ/QfPd/dtjRQvuRPURAZ1AiEAjTFJe7cID6rSByF3e0rhgyeL +d6BE/tcQ7ymDBWUTGn0= +-----END CERTIFICATE----- diff --git a/data/images/.gitkeep b/data/images/.gitkeep new file mode 100644 index 0000000..e69de29 diff --git a/data/vmm_init/grub.d/iommu.cfg b/data/vmm_init/grub.d/iommu.cfg new file mode 100644 index 0000000..6eda4e4 --- /dev/null +++ b/data/vmm_init/grub.d/iommu.cfg @@ -0,0 +1 @@ +GRUB_CMDLINE_LINUX_DEFAULT="quiet intel_iommu=on iommu=pt" diff --git a/data/vmm_init/modprobe.d/vfio.conf b/data/vmm_init/modprobe.d/vfio.conf new file mode 100644 index 0000000..d03d405 --- /dev/null +++ b/data/vmm_init/modprobe.d/vfio.conf @@ -0,0 +1,3 @@ +options vfio-pci ids=8086:46d4,1b21:1064 +softdep i915 pre: vfio-pci +softdep ahci pre: vfio-pci \ No newline at end of file diff --git a/data/vmm_init/network/00-vmm-eth0.link b/data/vmm_init/network/00-vmm-eth0.link new file mode 100644 index 0000000..1119415 --- /dev/null +++ b/data/vmm_init/network/00-vmm-eth0.link @@ -0,0 +1,5 @@ +[Match] +MACAddress=c8:ff:bf:05:aa:b0 + +[Link] +Name=eth0 diff --git a/data/vmm_init/network/01-vmm-eth1.link b/data/vmm_init/network/01-vmm-eth1.link new file mode 100644 index 0000000..5f72662 --- /dev/null +++ b/data/vmm_init/network/01-vmm-eth1.link @@ -0,0 +1,5 @@ +[Match] +MACAddress=c8:ff:bf:05:aa:b1 + +[Link] +Name=eth1 diff --git a/data/vmm_init/network/10-vmm-br0.netdev b/data/vmm_init/network/10-vmm-br0.netdev new file mode 100644 index 0000000..6ec2b6d --- /dev/null +++ b/data/vmm_init/network/10-vmm-br0.netdev @@ -0,0 +1,3 @@ +[NetDev] +Name=br0 +Kind=bridge diff --git a/data/vmm_init/network/11-vmm-br1.netdev b/data/vmm_init/network/11-vmm-br1.netdev new file mode 100644 index 0000000..3f00292 --- /dev/null +++ b/data/vmm_init/network/11-vmm-br1.netdev @@ -0,0 +1,7 @@ +[NetDev] +Name=br1 +Kind=bridge + +[Bridge] +VLANFiltering=true +DefaultPVID=1 diff --git a/data/vmm_init/network/12-vmm-vlan1.netdev b/data/vmm_init/network/12-vmm-vlan1.netdev new file mode 100644 index 0000000..bbe3596 --- /dev/null +++ b/data/vmm_init/network/12-vmm-vlan1.netdev @@ -0,0 +1,6 @@ +[NetDev] +Name=vlan1 +Kind=vlan + +[VLAN] +Id=1 diff --git a/data/vmm_init/network/13-vmm-vlan10.netdev b/data/vmm_init/network/13-vmm-vlan10.netdev new file mode 100644 index 0000000..70257b8 --- /dev/null +++ b/data/vmm_init/network/13-vmm-vlan10.netdev @@ -0,0 +1,6 @@ +[NetDev] +Name=vlan10 +Kind=vlan + +[VLAN] +Id=10 diff --git a/data/vmm_init/network/14-vmm-vlan20.netdev b/data/vmm_init/network/14-vmm-vlan20.netdev new file mode 100644 index 0000000..01c2af6 --- /dev/null +++ b/data/vmm_init/network/14-vmm-vlan20.netdev @@ -0,0 +1,6 @@ +[NetDev] +Name=vlan20 +Kind=vlan + +[VLAN] +Id=20 diff --git a/data/vmm_init/network/20-vmm-eth0.network b/data/vmm_init/network/20-vmm-eth0.network new file mode 100644 index 0000000..f26bfe6 --- /dev/null +++ b/data/vmm_init/network/20-vmm-eth0.network @@ -0,0 +1,6 @@ +[Match] +Name=eth0 + +[Network] +Bridge=br0 +LinkLocalAddressing=false diff --git a/data/vmm_init/network/21-vmm-eth1.network b/data/vmm_init/network/21-vmm-eth1.network new file mode 100644 index 0000000..19339dd --- /dev/null +++ b/data/vmm_init/network/21-vmm-eth1.network @@ -0,0 +1,15 @@ +[Match] +Name=eth1 + +[Network] +Bridge=br1 +LinkLocalAddressing=false + +[BridgeVLAN] +VLAN=1 +PVID=true +EgressUntagged=true + +[BridgeVLAN] +VLAN=10 +VLAN=20 diff --git a/data/vmm_init/network/22-vmm-br0.network b/data/vmm_init/network/22-vmm-br0.network new file mode 100644 index 0000000..1eae45a --- /dev/null +++ b/data/vmm_init/network/22-vmm-br0.network @@ -0,0 +1,5 @@ +[Match] +Name=br0 + +[Network] +LinkLocalAddressing=false diff --git a/data/vmm_init/network/23-vmm-br1.network b/data/vmm_init/network/23-vmm-br1.network new file mode 100644 index 0000000..ac0e65a --- /dev/null +++ b/data/vmm_init/network/23-vmm-br1.network @@ -0,0 +1,17 @@ +[Match] +Name=br1 + +[Network] +VLAN=vlan1 +VLAN=vlan10 +VLAN=vlan20 +LinkLocalAddressing=false + +[BridgeVLAN] +VLAN=1 +PVID=yes +EgressUntagged=true + +[BridgeVLAN] +VLAN=10 +VLAN=20 diff --git a/data/vmm_init/network/24-vmm-vlan1.network b/data/vmm_init/network/24-vmm-vlan1.network new file mode 100644 index 0000000..3ebb2cd --- /dev/null +++ b/data/vmm_init/network/24-vmm-vlan1.network @@ -0,0 +1,28 @@ +[Match] +Name=vlan1 + +[Network] +# IPv4 +Address=192.168.1.10/24 +# IPv6 +Address=fd00:1::10/64 + +[RoutingPolicyRule] +From=192.168.1.10/32 +Table=1 +Priority=100 + +[Route] +Destination=192.168.1.0/24 +Scope=link +Table=1 + +[RoutingPolicyRule] +From=fd00:1::10/128 +Table=61 +Priority=100 + +[Route] +Destination=fd00:1::/64 +Scope=link +Table=61 diff --git a/data/vmm_init/network/25-vmm-vlan10.network b/data/vmm_init/network/25-vmm-vlan10.network new file mode 100644 index 0000000..62cc6e8 --- /dev/null +++ b/data/vmm_init/network/25-vmm-vlan10.network @@ -0,0 +1,32 @@ +[Match] +Name=vlan10 +[Network] +RequiredForOnline=false +# IPv4 +Address=192.168.10.10/24 +Gateway=192.168.10.1 +DNS=192.168.10.2 +# IPv6 +Address=fd00:10::10/64 +Gateway=fd00:10::1 +DNS=fd00:10::2 + +[RoutingPolicyRule] +From=192.168.10.10/32 +Table=2 +Priority=100 + +[Route] +Destination=0.0.0.0/0 +Gateway=192.168.10.1 +Table=2 + +[RoutingPolicyRule] +From=fd00:10::10/128 +Table=62 +Priority=100 + +[Route] +Destination=::/0 +Gateway=fd00:10::1 +Table=62 diff --git a/data/vmm_init/nftables.conf b/data/vmm_init/nftables.conf new file mode 100644 index 0000000..34d2c86 --- /dev/null +++ b/data/vmm_init/nftables.conf @@ -0,0 +1,25 @@ +#!/usr/sbin/nft -f + +flush ruleset + +define HOSTS4_CONSOLE = { 192.168.1.20, 192.168.99.20 } +define HOSTS6_CONSOLE = { fd00:1::20, fd00:99::20 } +define PORTS_SSH = 22 + +table inet filter { + chain input { + type filter hook input priority 0; policy drop; + ct state invalid drop comment "deny invalid connection" + ct state established, related accept comment "allow all connection already existing" + iifname "lo" accept comment "allow local connection" + meta l4proto { icmp, icmpv6 } accept comment "allow icmp connection: > VMM" + ip saddr $HOSTS4_CONSOLE tcp dport $PORTS_SSH accept comment "allow ipv4 ssh connection: CONSOLE > VMM" + ip6 saddr $HOSTS6_CONSOLE tcp dport $PORTS_SSH accept comment "allow ipv6 ssh connection: CONSOLE > VMM" + } + chain forward { + type filter hook forward priority 0; policy drop; + } + chain output { + type filter hook output priority 0; policy accept; + } +} diff --git a/data/vmm_init/ssh/local_ssh_ca.pub b/data/vmm_init/ssh/local_ssh_ca.pub new file mode 100644 index 0000000..8d2544b --- /dev/null +++ b/data/vmm_init/ssh/local_ssh_ca.pub @@ -0,0 +1 @@ +ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIJtBbAyORSd3qece5jHnEFrJPR7QxIzeIUsTEYoBLMKd LOCAL_SSH_CA diff --git a/data/vmm_init/ssh/sshd_config.d/prohibit_root.conf b/data/vmm_init/ssh/sshd_config.d/prohibit_root.conf new file mode 100644 index 0000000..7ff52c7 --- /dev/null +++ b/data/vmm_init/ssh/sshd_config.d/prohibit_root.conf @@ -0,0 +1 @@ +PermitRootLogin no diff --git a/data/vmm_init/ssh/sshd_config.d/ssh_ca.conf b/data/vmm_init/ssh/sshd_config.d/ssh_ca.conf new file mode 100644 index 0000000..10c8405 --- /dev/null +++ b/data/vmm_init/ssh/sshd_config.d/ssh_ca.conf @@ -0,0 +1 @@ +TrustedUserCAKeys /etc/ssh/local_ssh_ca.pub diff --git a/data/vmm_init/sysctl.d/bridge.conf b/data/vmm_init/sysctl.d/bridge.conf new file mode 100644 index 0000000..3c35632 --- /dev/null +++ b/data/vmm_init/sysctl.d/bridge.conf @@ -0,0 +1,3 @@ +net.bridge.bridge-nf-call-ip6tables = 0 +net.bridge.bridge-nf-call-iptables = 0 +net.bridge.bridge-nf-call-arptables = 0 \ No newline at end of file diff --git a/data/volumes/.gitkeep b/data/volumes/.gitkeep new file mode 100644 index 0000000..e69de29 diff --git a/docs/adr/001-architecture.md b/docs/adr/001-architecture.md new file mode 100644 index 0000000..cf0cf9e --- /dev/null +++ b/docs/adr/001-architecture.md @@ -0,0 +1,72 @@ +# ADR 001 - Architecture + +## Date + +- Feb/23/2026 + - First documentation +- Mar/4/2026 + - Refining sentences + +## Status + +- Accepted + +## Context + +- Maintaining multi nodes requires a huge amount of resources, including hardware, electricity, even administrative efforts +- All units which responsible for a single role should follow the Principle of Least Privilege \(PoLP\). +- All units should be interchangeable on standard to avoid vendor lock-in. + +## Consideration + +### Hypervisor + +- Proxmox Virutal Environment \(PVE\) + - Based on Debian. + - PVE uses `qm` command which is not a standard to implement the virtual environment. +- VMware ESXi + - Based on UNIX, deveoped by VMware \(Licence is not free\) +- Hyper-V + - Based on Microsoft Windows \(Licence is not free\) +- Debian Stable + - Based on standard linux \(conservative\) + - Standard virtualization technology 'Libvirt, QEMU, KVM' + +### Container + +- Docker + - Daemon is used to run containers + - Root authority required + - Socket and network problem is complex \(Docker bridge\) + - docker-compose is an orchestration tool +- Rootless Podman + - Daemonless design + - Root authority not required + - Orchestration is integrated into systemd + - PASTA dumps packet via host-gateway +- K8S, K3S + - HA is based on reprovisioning + - Guarantee availability to create and destroy node dynamically + +### IaC + +- Terraform + - Strength for initiating low-level and dynamic multi node environment +- Ansible + - Declaritive and easy yaml grammar + - SSH is the way to set + +## Decisions + +- Use Libvirt/KVM/QEMU on pure linux \(Debian stable\). +- Separate all services by VM, and podman rootless containers without K3S. + - Orchestration stack is not needed in single node system + - Services will be defined by Quadelt to integrate into systemd and to manage them declaratively + - IaC will be implemented by Ansible only declaratively +- All VMs and services are isolated logically by VLAN and nftables + +## Consequences + +- All VMs have independent borderline by VLAN and nftables +- All services have independent namespaces by podman subuid without daemon +- Ansible can manage all configurations of services and VMs declaratively diff --git a/docs/adr/002-network.md b/docs/adr/002-network.md new file mode 100644 index 0000000..4b9aca8 --- /dev/null +++ b/docs/adr/002-network.md @@ -0,0 +1,63 @@ +# ADR 002 - Network + +## Date + +- Feb/23/2026 + - First documentation + + +## Status + +- Accepted + +## Context + +- All L3 communications should be contolled by central firewall node. +- Every firewall rule should be managed by code, not clicks. +- Every edge node takes charge of L2 communication rules. +- IPv4 and IPv6 dual stack should be supported for future network environment. + +## Consideration + +### Firewall + +- OPNSense/pfSense + - vendor lock-in + - GUI environment \(WebGUI\) can contain vulnerability + - It is hard to manage configurations by IaC +- iptables + - Previous standard of Linux + - IPv4 and IPv6 configuration is separated \(no inet\) +- nftables + - New standard of Linux + - English grammar friendly + - IPv4 and IPv6 configuration can be set on the same table \(inet\) + +### Flat network structure +- LAN only + - L2 communication doesn't need to pass through gateway + - They use MAC address with ARP. Unicast communication is hard to manage. + - It is hard to manage and apply the policy centrally + +## Decisions + +- Categorize all nodes in 4 roles 'client', 'server', 'user', 'wg0; vpn connections' +- Implement role separation with VLAN tagging on L2 switch (systemd-networkd bridge) + - VLAN 1: client (vmm, console, nas) + - VLAN 10: server (vmm, infra, auth, app) + - VLAN 20: user (DHCP allocated devices) + - wg0: VPN connections +- Manage the rules based on roles fundamentally, furthermore manage them based on ip and ports when it is needed +- All L3 communication which needs to pass gateway should be on control of firewall \(fw\) +- All nodes including firewall uses nftables \(modern standard\) to manage the packets based on zone concept +- IPv6 has two track strategy + - Client and server, wg nodes has static ULA IP, and use NAT66 for permanency + - User nodes has GUA SLAAC IP from ISP for compatibility + + +## Consequences + +- Firewall takes charge of L3 communications +- Each nodes takes charge of L2 communications and communication from FW +- All nodes can communicate under both IPv4 and IPv6 +- All policies can be managed by Code diff --git a/docs/adr/003-pki.md b/docs/adr/003-pki.md new file mode 100644 index 0000000..e5f9855 --- /dev/null +++ b/docs/adr/003-pki.md @@ -0,0 +1,57 @@ +# ADR 003 - PKI + +## Date + +- Feb/23/2026 + - First documentation +- Mar/06/2026 + - Add expiry date observation way + +## Status + +- Accepted + +## Context + +- All communications except loop-back, should be encrypted +- ssh, and TLS communications needs key and certificates +- Public CA never issues for private domain, '.internal' +- Automate issuing and renewing certificates +- Revocation is not needed in this single and small environment. + +## Consideration + +### Automate protocol + +- JWK/JWT provisioner + - It is hard to manage pre-shared secret values than ACME \(Especially nsupdate\) +- authorized_keys + - When the nodes are increased, it is hard to manage authorized_key. + - SSH ca.pub allow all the certificates signed by ca key, so it is not needed to manage authroized_keys from each hosts. + +### Revocation + +- CRL/OCSP/OCSP-stappling + - All long-term certificates are managed manually + - All short-term certificates are managed by ACME + - When the certificates are leaked, it is easier to change intermediate CA itself + +## Decisions + +- Operate private CA + - Root CA \(Store on coldstorage\) - 10 years + - Intermediate CA \(Online server as Step-CA\) - 5 years + - SSH CA - No period +- Manage certificates with two track + - ACME with nsupdate \(using private DNS\) for web services via Caddy - 90 days + - Manual issuing and managing leaf certificate for infra services for independency - 2.5 years + - All manual issuing leaf certificate expiry date is observed by x509-exporter on infra vm +- Manage SSH certificates + - *-cert.pub for host \(with -h options\) + - *-cert.pub for client \(without -h options\) + +## Consequences + +- Private PKI is operated +- Private SSH CA is operated +- All external/internal communication is encrypted as TLS re-encryption. \(E2EE\) diff --git a/docs/adr/004-dns.md b/docs/adr/004-dns.md new file mode 100644 index 0000000..ffbe019 --- /dev/null +++ b/docs/adr/004-dns.md @@ -0,0 +1,52 @@ +# ADR 004 - DNS + +## Date + +- Feb/23/2026 + - First documentation + + +## Status + +- Accepted + +## Context + +- Private authoritative DNS is required to use private reserved root domain \(.internal\) +- Split horizon DNS needs DNS resolver, because authoritative DNS must not send queries to other DNS. +- Automatical issuing certificates needs private authoritative DNS which supports nsupdate \(RFC 2136\) + +## Consideration + +### Resolver DNS +- AdGuard Home + - More powerful query routing than blocky + - Web UI dependency + - Extra function which is not useful \(DHCP, etc ..\) +- Unbound DNS + - Cache and forward zone management is powerful + - more complex than blocky + - cache function is not that needed in this environment + - Internal authoritative DNS only takes charge of internal communication + - All security function is delegated to public DNS like cloudflare \(DNSSEC, etc\) + +## Decisions + +- Operate BIND9 as authoritative DNS + - BIND9 is developed by ISC as de facto standard of authoritative DNS + - It supports nsupdate perfectly + - Use 2 forward zones + - ilnmors.com for split horizon DNS + - ilnmors.internal for internal DNS + - Uses 4 PTR zones + - Client vlan ipv4, v6 PTR zone + - Server vlan ipv4, v6 PTR zone +- Operate Blocky as resolver and cache DNS + - blocky set the configurations with one code file + - It supports query routing based on its domain - Split horizon DNS + +## Consequences + +- Implementation of split horizon DNS +- ACME is available via nsupdate +- malicious DNS query is blocked in DNS level diff --git a/docs/adr/005-ids-ips.md b/docs/adr/005-ids-ips.md new file mode 100644 index 0000000..eecd985 --- /dev/null +++ b/docs/adr/005-ids-ips.md @@ -0,0 +1,54 @@ +# ADR 005 - IDS/IPS + +## Date + +- Feb/23/2026 + - First documentation + + +## Status + +- Accepted + +## Context + +- Automized detection and prevention threats via network + +## considerations + +### IPS +- Operate suricata as IDS and IPS + - Suricata IPS mode blocks packets by itself, bypassing nftables integration + - Suricata IPS mode overhead is very higher than IDS mode. + - Suricata IPS mode cannot detect and prevent TLS based communication. + - Homelab server resources are not enough to deal with high overhead. + +- fail2ban + - Single node only, no centralized decision sharing + - No community-based threat intelligence (CAPI) + - Regex based log parsing is less structured than CrowdSec's parser/scenario model + +- Crowdsec + - Community based rules and sinario \(CAPI\) + - Prevention based on local machines and parsers \(LAPI\) + - Bouncers can use nftables to prevent threats + - Parser can detect even L7 attack under TLS + +## Decisions + +- Operate suricata as IDS + - suricata IDS mode mirror all packets from interfaces + - match the packets based on its rules and writes log as fast.log, and eve.json + +- Operate Crowdsec as IPS + - CrowdSec uses two API server, CAPI, LAPI. + - CAPI updates malicious IPs based on community decisions + - LAPI decides malicious attack based on log from its parser and scenario \(Suricata, caddy, etc\) + - When CAPI, and LAPI decides block some IP based on log parsed by parser and scenarios, bouncer block the malicious accesses. + - Crowdsec register blacklist on nftables or iptables. + + +## Consequences + +- All malicious attack from WAN, even from LAN is controlled by CrowdSec and Suricata +- The firewall maintains high network throughput because blocking is performed efficiently at the OS network level (`nftables` sets) rather than through deep inline packet inspection. diff --git a/docs/adr/006-secrets.md b/docs/adr/006-secrets.md new file mode 100644 index 0000000..600fbf7 --- /dev/null +++ b/docs/adr/006-secrets.md @@ -0,0 +1,60 @@ +# ADR 006 - Secrets + +## Date + +- Feb/23/2026 + - First documentation + +## Status + +- Accepted + +## Context + +- Secret values must not uploaded anywhere as plain values. +- Manage secret values as Git without its real values. + +## Considerations + +### External KMS + +- HashiCorp Vault or Infisical + - Very powerful, but introduces significant compute/memory overhead. + - Creates a "Secret Zero" problem for a single-node homelab environment because of dependency \(DB, or etc\). + - It is hard to operate hardware separated key servers. + +### Systemd-credential + +- VM environment is hard to apply TPM for systemd-credential + - It is hard to guarantee the idempotency of TPM in virtual environment. + +### Ansible vault only + +- Ansible vault is powerful options but they are not convenient. + - It is necessary to encrypt separately outside of host_vars or group_vars' file. + - It is hard to add or modify secret values in inventory file. + +## Decisions + +- All secret data which has yaml format is encrypted by sops with age-key in `secret.yaml`. +- age-key is encrypted by gpg and ansible vault with master key \(including upper, lower case, number, special letters) above 40 characters. + - All secret data always decrypt by `edit_secret.sh` script or ansible tasks from secrets.yaml using age-key encrypted by ansible-vault. + - decrypted secret data is always processed on ramfs, they are never saved on disk. +- Master key is never saved on disk, but only cold storage \(USB, M-DISC, operators' memory\) +- The secret data will be saved on each servers specific directory or podman secret. + - OS: + - path: /etc/secrets + owner: root:root + mode: 0711 + - path: /etc/secrets/\$UID + owner: \$UID:root + mode: 0500 + - Containers: + - podman secret: + path: /run/secret/\$SECRET_NAME + - These data are never backed up by kopia, or uploaded to git. + +## Consequences + +- Secret values are not located as a plain text in everywhere except where they are needed. +- It is possible to manage encrypted secret data with Git. diff --git a/docs/adr/007-backup.md b/docs/adr/007-backup.md new file mode 100644 index 0000000..50b81ec --- /dev/null +++ b/docs/adr/007-backup.md @@ -0,0 +1,61 @@ +# ADR 007 - backup + +## Date + +- Feb/23/2026 + - First documentation + +- Feb/27/2026 + - Status changed from Deffered to Accepted + +## Status + +- Accepted + +## Context + +- All configuration file is managed by git \(IaC\) +- All data file should be backed up by kopia +- All backup should follow 3-2-1 backup cycle + +## Considerations + +### Backup Tool +- Restic / BorgBackup + - Also excellent deduplicating backup tools. + - However, Kopia provides a highly efficient native server mode (API) and cross-platform compatibility, making it easier to integrate with Synology DSM. + +### Database Backup Method +- Physical Backup (Raw data folder backup / File system snapshots) + - Backing up the `/var/lib/postgresql` directory directly while the DB is running can lead to severe data corruption and inconsistency. + - Logical dumps (`pg_dump`) are much safer, database-agnostic, and easier to restore in a homelab environment. + + +## Decisions + +- All configuration files are managed by Git + - Configuration files are based on text + - It is necessary to version, history management. + - Local git -> private Gitea -> github private project \(mirrored\) + - This fulfills 3-2-1 backup rules +- Data files are managed by Kopia and DSM + - Local storage - kopia -> DSM's Kopia repository server - CloudSync -> Cloud server such as OneDrive or Google Drive + - This fulfills 3-2-1 backup rules +- Data files which needs backup + - DB data files: dump + - DB data files are located on infra:/home/infra/containers/postgresql/backups/\{cluster,$service\}/ + - App data files: Photos, Media, etc .. + - App data files are located on app:/home/app/data/ + - Backed up files: kopia + - DSM:/kopia/{infra,app}/ +- Kopia over DSM configuration is managed by runbook with equivalent CLI commands due to vendor limitation +- Restore will be processed manually + - DB data files + - From kopia server to console:$HOMELAB_PATH/data/volume/infra/postgresql/\{cluster,data\} + - APP data files + - From kopia server to APP vm after initiating before deploy services +- Automative backup does not guarantee integrity of data system, so before reset the system conduct manual backup after making sure all services are shutdown. + +## Consequences + +- All files including configuration and data back ups will fulfill 3-2-1 \(3 Copies, 2 different media, 1 offsite\) back up rules diff --git a/docs/adr/008-passthrough.md b/docs/adr/008-passthrough.md new file mode 100644 index 0000000..449cdad --- /dev/null +++ b/docs/adr/008-passthrough.md @@ -0,0 +1,43 @@ +# ADR 008 - passthrough + +## Date + +- Feb/23/2026 + - First documentation + +## Status + +- Accepted + +## Context + +- App VM needs GPU for heavy workloads like Immich \(hardware transcoding and machine learning\) +- App VM needs huge data storage for its own services + +## Considerations + +### iGPU + +- SR-IOV + - SR-IOV is tech to divide PCIe devices for several VMs. + - Current stable linux kernel doesn't support sr-iov + - It is necessary to use DKMS for sr-iov + - Use DKMS is unstable depending on kernel upgrade, and the most important thing in server is stability. + - When passthrough iGPU itself, hypervisor cannot use graphic function. + - All nodes are managed by SSH session, so it is not a problem. + +### Storage + +- Each HDD + - Aoostar WTR Pro has their own sata controller for HDD. + - It is more effective and advantageous to passthrough SATA controller itself to manage btrfs RAID10, and HDD health check via S.M.A.R.T values. + +## Decisions + +- Passthrough N150's iGPU to APP VM +- Passthrough SATA controller to APP VM + +## Consequences + +- Passthrough iGPU itself to APP vm. +- Passthrough SATA controller to APP vm. diff --git a/docs/adr/009-isolation.md b/docs/adr/009-isolation.md new file mode 100644 index 0000000..54acb7e --- /dev/null +++ b/docs/adr/009-isolation.md @@ -0,0 +1,49 @@ +# ADR 009 - isolation + +## Date + +- Mar/06/2026 + - First documentation + +## Status + +- Accepted + +## Context + +- Distinguish borderline for service unit including hypervisor, vm, container + +## Considerations + +### Hypervisor + +- As a pure hypervisor, it should only operate virtualization for VM. +- Hypervisor just provides resources and dummy hub \(br\) + +### VM + +- VM should be distinguished based on their logical role. + - Firewall is responsible for networking + - Infra is responsible for infrastructure services such as DB, Monitoring, CA server + - Auth is responsible for authentication and authorization for services + - App is responsible for applications + +### Services + +- Services should be distinguished based on their needs \(Privilege\) + - Network stack, backup stack needs special privilege for low level ACL or networks. + - application stack doesn't need low level privilege usually + +## Decisions + +- Hypervisor: Only supply pure virtualization for VM +- VM: isolated by hypervisor from the other vms based on their role +- Services: + - the one which needs previlieges: Run as native on vm. Don't make overhead for virtualization. + - the one which doesn't need previlieges: Isolate as container from host. + +## Consequences + +- Guarantee scurity integrity +- Simple operational rules +- Optimize the limited resources \ No newline at end of file diff --git a/docs/adr/010-provisioning.md b/docs/adr/010-provisioning.md new file mode 100644 index 0000000..8d3e41d --- /dev/null +++ b/docs/adr/010-provisioning.md @@ -0,0 +1,35 @@ +# ADR 010 - provisioning + +## Date + +- Mar/06/2026 + - First documentation + +## Status + +- Accepted + +## Context + +- Every sensitive process should be controlled and managed. + +## Considerations + +### Automate destroying process + +- Destroying is not frequent process, there's no reason to make complex logic +- Sensitive process should be double checked by human manually + +## Decisions + +- Make provisioning process as auto +- Make sensitive process as manual + - Removing + - Formatting + - Destroying + - Certificates and CA \([ADR-003](./003-pki.md)\) + - Etc. what operator decides that is sensitive + +## Consequences + +- All process can be under control of the operator diff --git a/docs/adr/011-tls-communication.md b/docs/adr/011-tls-communication.md new file mode 100644 index 0000000..93cb58a --- /dev/null +++ b/docs/adr/011-tls-communication.md @@ -0,0 +1,33 @@ +# ADR 011 - TLS communication + +## Date + +- Mar/06/2026 + - First documentation + +## Status + +- Accepted + +## Context + +- To make administrational policy simple +- Set the principle of TLS communication boundry + +## Considerations + +### Apply mTLS + +- implementing mTLS needs both client certificate and server certificate +- Managing a number of certificates makes a huge operational burden \(expiry date, revocation, etc ..\) + +## Decisions + +- Set TLS for all communication except 'lo' interface +- When it is possible to activate TLS, apply it even in 'lo' interface + +## Consequences + +- The policy is set simple +- The overhead is increased little +- Exclude the exceptions on operation \(For the administrator\) diff --git a/docs/adr/012-alerting.md b/docs/adr/012-alerting.md new file mode 100644 index 0000000..242c893 --- /dev/null +++ b/docs/adr/012-alerting.md @@ -0,0 +1,45 @@ +# ADR 012 - Alerting + +## Date + +- Mar/08/2026 + - First documentation + +## Status + +- Accepted + +## Context + +- The necessity of observability +- Difficulty of realizing present status of services +- Stable restoring process already exists + +## Considerations + +### Mail based + +- MTA is hard to manage even when operator uses this as relay host +- The mail protocol is complex to implement only for internal mail system for single operator + +### Chat based + +- Using discord, telegram is easy to get announcment automatically +- The dependency of external services + +## Decisions + +- Do not operate alerting system + - Single node system for small group doesn't need HA + - When single node system is down, the alerting system is also down. +- When the alert system is needed, implement the system on free instance of external IaaS like AWS or Azure + +## Consequences + +- Simple management and stable restoring + - Check service availability + - Check from grafana + - Access to node via vpn with ssh + - Access to node via physical VLAN + - Reprovisioning the node +- The additional possibility of extension with Cloud services. diff --git a/docs/archives/2025-06/on-premise.txt b/docs/archives/2025-06/on-premise.txt new file mode 100644 index 0000000..a72c25f --- /dev/null +++ b/docs/archives/2025-06/on-premise.txt @@ -0,0 +1,12151 @@ +개인 활용 목적의 On-premise 홈 네트워크 환경 구축 프로젝트 +이 프로젝트의 목표는 개인이 활용하기 위한 On-premise 홈 네트워크 환경을 집에서 사용 가능한 장비와 네트워크 구조로 구현하면서 다양한 기술 요소(네트워크, OS, 가상화와 컨테이너, 자원 관리, 데이터 베이스, 상용 서비스 운영, 보안)이 네트워크 상에서 어떻게 하나의 시스템을 이루고 작동하는지에 대하여 깊이 이해하고, 이를 체득하는 것에 있습니다. + +1. 프로젝트 개요 + +1.1. 동기 및 목표 +동기 + + 본 프로젝트의 시작은 개인적인 사진 및 데이터를 저장하기 위한 방법을 찾는 것이었습니다. 처음 고려한 방법은 클라우드 서비스를 사용하는 것이었습니다. 하지만 클라우드를 사용하는 방식은 클라우드 업체에서 소유한 데이터에 언제나 접근할 수 있다는 위험을 가지고 있습니다. 이는 소유한 데이터에 대하여 완전한 통제가 불가능함과 동시에 온전한 권리의 부재를 의미합니다. 따라서 이러한 문제를 해결하기 위해 처음 선택한 방식은 NAS(Network Attached Storage)를 구축하는 것이었습니다. 하지만 NAS를 구축한 이후 이 방식 역시 설정과 사용에 있어 추상화된 부분이 많아 해당 문제에서 자유로울 수 없었습니다. 데이터를 사용하며 자유롭게 설정하고 다루지 못한다면 이는 클라우드 환경과 다를 것 없거나 혹은 더 위험할 수 있다 생각했습니다. + + 이에 단순한 NAS 구성을 벗어나기로 결정하였습니다. 개인 데이터의 모든 권한과 책임을 가지고 사용 가능한 On-premise 환경을 구축한다는 목표를 세웠습니다. 개인이 구현한 On-premise 환경이 상용 서비스 수준의 가용성과 안정성을 담보하기는 힘들 것입니다. 하지만 본 프로젝트를 통하여 직접 On-premise 환경을 구축하면서 현대 IT(Information Technology) 인프라의 필수 구성요소들과 상호작용, 또 이러한 환경 자체에 대한 이해와 경험을 하고자 합니다. 이를 통해 IT 서비스와 네트워크 환경을 이용함에 있어 스스로 가장 약한 고리가 되지 않는 것 역시 목표입니다. + + 과거 군 복무를 하며 분산된 지역망과 삼중화된 물리적 HA(High Availability)를 갖춘 대규모 On-premise 환경의 전장망을 관리하였습니다. 당시의 경험 속 이해하기 힘들었던 복잡한 매뉴얼과 SoC(Separation of Concerns)에 맞춰 설계된 구조들, LTO(Linear Tape Open) 기반의 자동 백업 시스템, 체계적인 복구 시나리오를 직접 운영했던 경험을 본 프로젝트에 적용하려 합니다. 나아가 단순히 명령어를 입력하고 매뉴얼 중심의 운용을 했던 과거의 경험에서 벗어나 아래와 같이 목표를 설정하고 이를 성취하려 합니다. + + 목표 + + 구현 목표 + + 대상 + + + + 2~5명의 소규모 인원 + + + + 요구 기능 + + + + IT 서비스 제공을 위하여 다음과 같은 기능을 가진 서버 및 네트워크 운영 + + + + 방화벽 + + Local CA(Certificate Authority) + + Local DNS(Domain Name System) + + Proxy + + DB(Database) + + + + + + 파일, 미디어, 어플리케이션 서버 등의 개인 클라우드 환경 구축 + + 블로그, wiki 등의 웹 서비스 호스팅 + + IPS/IDS(Intrusion Prevention/Detection System), Kali 등을 통한 정보보안 실습 환경 제공 + + 단, 다음 기능들은 상용 서비스급 가용성 및 기능을 확보할 필요가 없으며, 학습 및 경험을 우선 + + + + 시스템 아키텍쳐 설계 및 기술 통합 + + 제한된 물리적 환경 및 하드웨어/네트워크 제약 사항 속에서 주어진 요구 사항을 최선의 방식으로 구현합니다. 필수 요구 사항을 구현하며 과도한 투자를 지양하고 효율적인 자원 배분을 하는 능력을 기르고자 합니다. 또한, 구현 전 발생 가능한 문제점을 사전에 발견하고, 이러한 문제점을 해결하기 위하여 어떤 기술을 선택하고 조합하여 설계하고 구현할 것인지에 대한 능력을 기르고자 합니다. + + 추상적 개념의 구체화 및 실무 적용 + + OS(Operating System), 네트워크, 정보보안 등 여러 분야 속 추상적이고 개별적인 개념들을 조합하여 실제로 구현합니다. 이 과정 속에서 각 개념들의 연결과 동작 원리를 자세하게 파악합니다. 이를 통해 이론적인 지식을 어떻게 현실에서 구현하고 적용할 수 있는지에 대한 감각을 익히려 합니다. 모든 기술 스택을 전부 직접 구현하는 것이 아닌 인프라의 핵심 요소와 통제하에 위임 가능한 요소를 분리합니다. 필수적인 부분에 집중하여 주도권 있는 위임을 통해 프로젝트 전반의 기술 품질과 유지 관리성을 높이는 분업 환경을 구현하고자 합니다. + + 문제 해결 및 제약 조건의 극복 + + 프로젝트를 진행하면서 사전에 고려하지 못한 다양한 기술적, 환경적 문제들과 마주할 것입니다. 이러한 문제들이 발생하였을 때 당황하지 않고 정확하게 분석하고 기술적인 근거에 기반하여 해결책을 찾아 적용하는 능력을 기르고자 합니다. + + 체계적인 문서화 및 지식 관리 + + 문서화는 모든 프로젝트 진행에 있어 가장 큰 의미를 갖습니다. 프로젝트 진행 중 모든 과정(계획, 설계 및 근거, 구현 절차 및 로그, 설정, 문제 해결 사항에 대한 기록 등)을 BookStack을 활용하여 체계적으로 문서화를 함으로 단순한 구축 경험을 넘어 지식과 경험을 내재화 하고 관리 및 공유 가능한 자산으로 만드는 능력을 기르고자 합니다. + + 결론 + + 본 프로젝트는 처음부터 완벽한 인프라 설계와 오류 및 시행 착오 없는 구축을 목표로 하지 않습니다. 프로젝트를 진행하며 얻고자 하는 궁극적인 가치는 위와 같은 목표를 가지고 단순하게 개인이 사용 가능한 IT 환경을 구축하는 것과 기술을 배우는 것에 그치지 않습니다. 프로젝트 자체와 이를 통해 경험하는 실패와 시행 착오 역시 훌륭한 학습이 될 것입니다. 특히 문제 해결을 위한 사고력과 실행력을 기르고 작은 범위에서라도 안정적인 서비스를 설계 통제 하며 이를 기반으로 안정적인 서비스 운영을 할 수 있는 수준에 도달하는 것이 궁극적인 가치이자 목표입니다. + + + + 2025-05-13 - 초안 작성 + + 2025-05-15 - 태그 수정 및 동기와 결론, 오타 수정 + + 2025-05-16 - 동기와 결론 수정 + + 2025-05-29 - 결론 수정/요구 기능 제약 조건 구체화 + + 2025-06-14 - 부록 1.1. 문서 작성 가이드라인 에 따라 본문 재작성 + + 2025-06-20 - 날짜 표기 변경 + +1.2. 하드웨어 구성 +목표 + + 프로젝트를 진행하기 위하여 구매한 하드웨어를 목록으로 정리합니다. + + 하드웨어 구성 + + 라우터/L2 스위치 + + 메인 라우터(ipTime T5004; T5004) + + + + 가격 42,900 KRW + + 외부 Gateway 역할 수행 + + 메인 DHCP(Dynamic Host Configuration Protocol) 서버 역할 수행 + + EasyMesh controller 역할 수행 + + 1차 방화벽 기능 수행 + + VLAN 기능 미지원 + + OpenWRT 지원 기종으로 추후 프로젝트시 VLAN(Virtual Local Area Network; 802.1Q) 기능 지원 기기로 사용 가능 + + + + 서브 라우터(ipTime AX6000M; AX6000M) + + + + 가격 99,000 KRW + + 무선 AP(Access Point) + + EasyMesh node 역할 수행 + + VLAN 기능 미지원 + + + + 스위치(ipTime H8005G-IGMP; H8005G) + + + + 가격 17,900 KRW + + 5포트 L2 스위치 + + VLAN 기능 미지원 + + 추후 OpenWRT 업데이트 프로젝트 진행 시 T5004로 변경 예정 + + + + 서버 + + 서버 하드웨어(Aoostar WTR Pro N150; WTR Pro) + + + + 가격 254,900 KRW + + Intel N150 4Core/4Thread/TDP 6W + + DDR4 SO-DIMM slot x1 + + M.2 NVMe slot x1 + + SATA 3.5" slot x4 + + 2.5Gbps NIC(Network Interface Card) x2 + + Hypervisor OS 설치를 통해 여러 역할을 수행하는 VM 및 LXC가 논리적으로 격리되어 각 역할 수행 + + + + RAM(Samsung DDR4 SO-DIMM 32GB) + + + + 가격 106,900 KRW + + + + SSD(Samsung M.2 NVMe SSD 1TB 980PRO) + + + + 기존 사용하던 SSD로 프로젝트를 위하여 따로 구매 하지 않음 + + 가격 276,000 KRW(2022/04/22 기준, 현재 최저가 184,000 KWR) + + + + HDD(Seagate/HGST 3.5" HDD 2TB x4) + + + + 가격 200,000 KRW + + SATA 6Gbps + + + + 외부 백업 서버 + + 서버 하드웨어(Synology DS124; DS124) + + + + 가격 233,000KRW + + Realtek RTD1619B 4Core/4Thread + + DDR4 1GB RAM x1 + + SATA 3.5" x1 + + 1Gbps NIC x1 + + 홈 서버가 구축되기 전 까지 홈 서버 역할 수행 + + 홈 서버 구축 이후로는 홈 서버의 외부 백업 서버 역할 수행 + + + + HDD(Toshiba 3.5" HDD 4TB) + + + + 가격 55,000KRW + + 4TB SATA 3.3Gbps + + + + + + 2025-05-13 - 초안 작성, 각 서버의 역할 추가 + + 2025-05-15 - 태그 수정 + + 2025-05-25 - 네트워크 장비 VLAN 미지원 명시 + + 2025-05-29 - SSD 변경(256GB Hynix > 1TB Samsung) + + 2025-05-30 - T5004가 OpenWRT를 지원하는 사실 명시 + + 2025-06-14 - 부록 1.1. 문서 작성 가이드라인 에 따라 본문 재작성 + + 2025-06-20 - 날짜 표기 변경 + +1.3. 목표 아키텍처 +목표 + + + + 현재 집안의 물리적인 구조를 파악합니다. + + 프로젝트를 통해 구현하고자 하는 서비스를 포함한 목표 아키텍처를 정의합니다. + + + + 목표 아키텍처 구조 + + 물리 구조도 + + + + 물리적 네트워크 구조 설명 + + + + WAN(Wide Area Network) - ISP(Internet Service Provider) Modem - T5004로 WAN과 LAN(Local Area Network)이 구별 + + 통신 단자함에는 전원부가 존재하지 않으며 PoE(Power over Ethernet; 802.3af, 802.3at, 802.3bt)로 전원을 공급 + + T5004는 LAN-WAN 통신의 메인 Gateway로 DHCP 서버 역할 수행, DHCP Relay 및 VLAN 지원하지 않음 + + AX6000M은 AP로 허브 모드로 작동하며, 외부 백업 서버(NAS)와 연결, VLAN 지원하지 않음 + + H5008G는 L2 허브로 WTR Pro의 NIC0, NIC1과 연결, VLAN 지원하지 않음 + + + + 집안의 물리적인 제약 사항들로 인하여 통신 단자함에 새로운 기기 추가의 어려움이 있습니다. 또한 WTR Pro 외부 클라이언트에 대하여 자유로운 VLAN 설정이 불가능합니다. + + 아키텍처 구조도 + + + + 아키텍처 구조 설명 + + 사용자 + + 2~5명의 소규모 인원 + + 네트워크 + + WAN + + + + Public DNS를 통하여 들어오는 Well-Known DNS 패킷만을 허용(DDoS; Distributed Denial of Service 방지) + + Public IP(Internet Protocol) adress를 통한 직접 접속 차단(Port Scan 방지) + + https(443) 및 VPN(Virtual Private Network) 포트만 외부 개방 + + WAN에서 T5004로 접속 차단, UPnP(Universal Plug and Play) 등의 취약 기능 전체 차단, 강력한 비밀번호 사용, 최신 펌웨어 유지 + + Cloudflare API를 통한 DDNS(Dynamic Domain Name System) 자동화 + + Cloudflare를 통한 공인 SSL/TLS(Secure Sockets Layer/Transport Layer Security) 인증서 발급 및 자동 갱신 + + T5004는 Untagged VLAN을 위한 DHCP 서버 및 외부 Gateway 역할 수행 + + + + LAN + + + + OPNsense가 LAN(VLAN 포함)의 내부 라우터로 작동 + + + + WTR Pro의 NIC0과 NIC1의 역할을 분리 + + NIC0은 vmbr0, NIC1은 vmbr1 할당 + + + + vmbr0: OPNsense의 vtnet0과(T5004와 연결되어 WAN 통신) + + vmbr1: OPNsense의 vtnet1과 연결(Trunk mode, Untagged VLAN, VLAN2,3,4,10 연결되어 LAN 통신, PVE Web UI 연결) + + + + + + 모든 LAN(VLAN 포함)의 기본 Gateway는 OPNsense(T5004의 DHCP 서버 기본 Gateway 포함) + + OPNsense의 Gateway는 T5004로 고정 할당 + + T5004는 NAT가 허용된 모든 패킷을 OPNsense로 포트 포워딩 + + + + + + FreeIPA를 통한 LDAP(Lightweight Directory Access Protocol)/SSO(Single Sign ON) 및 Kerberos, Local CA, Local DNS(Split Horizon DNS) 구현 + + AdGuard Home LXC를 통하여 광고 차단 필터를 적용한 DNS 서비스 구현 + + + + AdGuard Home > FreeIPA DNS > Cloudflare DNS(DoH; DNS over Https 사용) + + + + + + Zero Trust 원칙에 따라 클라이언트 간 iptables 등 Local 방화벽 설정을 통한 통신 제어 강화 + + PVE(Proxmox Virtual Environment) 내부의 vmbr(Linux Bridge) 기능을 활용하여 VM(Virtual Machine)/LXC(LinuX Container)/Docker를 OPNsense와 연결 + + 보안 실습을 위한 Kali와 실습 Client는 별도의 VLAN을 통하여 격리 + + 각 서비스 별 VLAN을 나누어 IP 할당 + + + + Untagged VLAN(192.168.0.0/24): Untagged VLAN으로 외부 클라이언트 망(PC, DS124, Mobile Devices) + + + + 주요 Clients(PC, DS124 등) Static IP 할당 + + Mobile Devices 및 IoT 기기는 Dynamic IP 할당 + + + + + + VLAN2(192.168.2.0/24): PVE 내부 서버 및 서비스망 + + VLAN3(192.168.3.0/24): Kali 망 + + VLAN4(192.168.4.0/24): 정보보안 실습 Client 망 + + VLAN10(10.0.0.0/24): VPN Clients 망 + + + + + + VLAN 간 통신 제어 + + + + VLAN 간 통신시 접근 가능 IP 제한 + + 외부 통신시 VPN 이용한 접근만 가능 + + 특정 서비스(Reverse Proxy 등)은 서비스 목적에 따라 필요한 범위 내에서 접근 허용 + + + + + + + + WTR Pro + + 기본 사항 + + + + PVE를 통하여 여러 기능의 서버를 중요도와 특성에 맞게 각각 VM 및 LXC로 논리적 분리 구축 + + 모든 LXC는 기본적으로 root 권한이 포함되지 않은 Unprivileged LXC로 구축 + + LXC 상에서 Host의 Root 권한이 필요한 Docker 서비스를 운영하지 않음(Docker 사용 시 VM 활용) + + NIC0, NIC1은 OPNsense에 vtnet으로 할당 + + + + NIC0: WAN 통신 담당 + + NIC1: LAN(VLAN) 통신 담당 + + + + + + 각 서비스 별 대표 UID/GID Matrix를 작성(FreeIPA LDAP/SSO 전용 UID 대역과 분리) + + OPNsense 및 FreeIPA가 SPOF(Single Point of Failure)가 되므로 자동 복구 절차 및 수동 복구 절차 수립(자동화 모니터링/복구 스크립트 작성 후 활용) + + 각 VM과 LXC에 지정된 리소스는 모니터링 하며 세부 조정 + + + + 로깅 및 모니터링과 서버 설정 + + + + 로깅 및 모니터링 + + + + Prometheus와 Grafana를 통한 모니터링 및 시각화 + + Loki와 Promtail을 활용한 log 중앙 집중 + + Alertmanager를 통한 특이 log 알림 + + + + + + Ansible + Semaphore(Web UI)를 활용한 서버 설정 자동화 + + Gitea를 활용한 설정 파일 아카이빙 + + 추후 n8n과 연동하여 모든 과정 자동화 + + + + 파일 및 백업 + + + + Btrfs(B-Tree File System)의 CoW(Copy on Write) 기능으로 인한 외부 단편화 방지를 위하여 파일 서버 상 3개월 마다 한 번씩 btrfs filesystem defragment 명령어 수행 + +  시스템 + + + + Btrfs 스냅샷을 활용하여 하루 1회, 15일간 스냅샷 보관 + + OS 파티션 및 VM/LXC 저장소 한 달 1회 백업(설정 변경 시 이상 없는 경우 즉시 백업)하여 파일 서버에 저장, 최근 3개 버전 유지 + + + + + +  어플리케이션 + + + + OPNsense/FreeIPA 설정, DB, docker-compose.yml 등의 주요 설정 및 데이터 백업하여 Gitea에 저장 + + 각 서버의 로그 보관 기간 7일, 이후 파일 서버로 이관 후 3 개월 보관 후 파기(일반 삭제) + + + + + + 데이터 + + + + 파일 서버의 데이터(NFS 공유 데이터 및 백업 데이터)를 DS124 및 클라우드에 백업 (Kopia를 통한 백업 기능 활용, E2EE 기능 적용) + + 추후 데이터 용량이 커지면 Amazon Glacier 서비스 이용 고려 + + + + + + 실제 환경 구축 후 더욱 구체적이고 실질적인 백업 전략 수립 + + + + 필요 서비스(VM/LXC/Docker) + + + + OPNsense VM + + Rocky Linux - FreeIPA VM + + AdGuard Home LXC + + Monitoring Server LXC + + Proxy Server LXC + + File Server VM - Docker + + + + Portainer Agent - 도커 통합 관리 + + Seafile - 파일 관리 및 동기화 + + Kopia - 백업 및 아카이빙 + + + + + + DB Server VM - Docker + + + + Portainer Agent - 도커 통합 관리 + + Maria DB/Postgre SQL - DBMS + + + + + + Web Server/WAS VM - Docker + + + + Portainer Agent - 도커 통합 관리 + + Homepage - 홈페이지 + + Ghost - 블로그 + + Bookstack - wiki, 노트 + + Gitea - Git 버전 관리 + + + + + + Application Server VM - Docker + + + + Portainer Server - 도커 통합 관리 + + Uptime Kuma - 도커 서비스  모니터링 + + Diun - 도커 업데이트 관리 + + n8n - 워크 플로우 자동화 + + Redis - 캐시 DB + + Code-Server - 웹 코드 에디터 + + Immich - 사진 및 동영상 관리 + + PeerTube - 동영상 컨텐츠 관리 + + Navidrome - 음악 관리 + + Vaultwarden - 사용자 암호 관리 + + Infisical - DB, API key 등 관리 + + Matrix-Synapse/element - 채팅/통화/영상 통화 + + Radicale - 캘린더 및 주소록 + + Vikunja - To-Do 리스트 + + Firefly III - 가계부 + + Paperless-ngx - 문서 관리 + + Snipe-IT - ITAM; IT 자산 관리(하드웨어, 소프트웨어 라이센스 등) + + Calibre-web - 전자책 관리 + + Kmoga - 만화책 관리 + + Audiobookshelf - 오디오북 관리 + + + + + + Kali and practice client VM + + + + 가용성 확보를 위한 리소스 및 복구 우선 순위 + + + + 네트워크 및 인증 핵심 기능을 우선 복구하여 서비스 의존성 문제 최소화 + + PVE > OPNsense > FreeIPA > Proxy Server > DB Server > File Server > etc + + + + DS124 + + + + 구축 초기에는 Docker 등의 필요 서비스 임시 호스팅 + + 구축 중반에는 초기 역할을 마이그레이션 하서 백업 서버 역할 수행 + + 구축 이후에는 완전한 백업 서버로 역할 전환 + + + + 결론 + + 해당 목표 아키텍처는 프로젝트가 진행되며 변경될 수 있습니다. 목표 아키텍처를 계속해서 발전시키며 구현해 나갈 것입니다. + + + + 2025-05-13 - 초안 작성 + + 2025-05-15 - 태그 수정 및 최종 목표 아키텍처 수정, 이상적인 아키텍쳐 항목 작성 + + 2025-05-24 - 최종 목표 아키텍쳐 및 이상적인 구조의 아키텍쳐 구조도 수정 + + 2025-05-25 - 최종 목표 아키텍쳐 및 이상적인 구조의 아키텍쳐 구조도 수정/VLAN, vtnet, vmbr 관련 내용 추가 + + 2025-05-28 - DB server LXC를 VM으로 변경/로깅 및 모니터링 전략 추가/ 백업 전략 구체화 + + 2025-05-29 - T5004 보안 대책 추가/Ansible을 통한 설정 자동화 및 조각모음 대책 추가 + + 2025-05-30 - 네트워크 설계 VLAN 반영 수정/OpenWRT 적용 시 VLAN, HA 가능 구조 적용 + + 2025-05-31 - VLAN/HA 추가 구조를 1.4. 추가 프로젝트 계획 으로 옮김 + + 2025-06-02 - vtnet0과 vtnet1의 역할 명확화 + + 2025-06-15 - 부록 1.1. 문서 작성 가이드라인 에 따라 본문 재작성 + + 2025-06-16 - AdGuard Home LXC 내용 추가 + + 2025-06-19 - VPN Clients 망 추가 + + 2025-06-20 - 날짜 표기 변경 + + 2025-06-23 - VLAN 표기 방식 수정 + +1.4. 추가 프로젝트 계획 +목표 + + 프로젝트의 목표 아키텍처 구현한 이후 이어서 진행할 추가 프로젝트의 청사진을 정의합니다. + + OpenWRT 적용 계획 + + 목표 + + 현 프로젝트에서 메인 라우터인 T5004는 VLAN을 지원하지 않아 WTR Pro 외부 클라이언트에 대한 VLAN 구성이 어렵습니다. 이때 T5004는 오픈 소스 라우터 OS인 OpenWRT를 설치할 수 있습니다(비고: OpenWRT 지원 목록 - ipTime T5004 ). 따라서 T5004에 OpenWRT를 설치 시 WTR Pro 외부의 클라이언트 또한 VLAN으로 L3 망 격리가 가능해져 보다 정밀한 네트워크 관리가 가능합니다. + + OpenWRT 적용시 구조 및 아키텍처 + + 현 프로젝트의 목표 아키텍처를 구현한 후 안정적인 운영을 할 수 있을 때 T5004에 OpenWRT를 설치하여 구현하고자 하는 아키텍처입니다. + + 물리 구조도 + + + + 물리적 네트워크 구조 설명 (기존 구조에서 수정된 부분) + + + + WAN - ISP Modem - T5004(OpenWRT) - T5004(OpenWRT) - OPNsense 구조 + + + + 각 방의 랜 포트가 하나 뿐이고, 회선도 하나 뿐인 물리적 한계로 인한 타협사항 + + 두 장비 모두 L2 관리형 스위치로 연결된 포트에 VLAN10(WAN Paththrough 망)을 태깅 + + 이 계획에서는 WAN Paththrough(VLAN을 통한 WAN 트래픽 격리 전달)를 통해, OPNsense는 공인 IP 그대로 전달 받음 + + + + + + WTR Pro가 주 라우터 및 IPS/IDS로 작동 + + AX6000M은 무선 AP로 허브 모드로 작동하며, 외부 백업 서버(NAS)와 연결, VLAN 지원하지 않음 + + H5008G가 T5004(OpenWRT)로 대체 + + + + 아키텍처 구조도 (기존 구조에서 수정된 부분) + + + + + + 아키텍처 구조 설명 (기존 아키텍처에서 수정된 부분) + + 사용자 + + 2~5명 + + 네트워크 + + + + WAN + + + + 외부 DNS를 통한 Well-Known DNS의 패킷만을 허용(DDoS 방지/ OPNsense에서 처리) + + T5004는 WAN Paththrough가 가능한 L2 관리형 스위치로 작동 + + 모든 라우팅 및 IPS/IDS, Gateway는 OPNsense에서 처리 + + + + + + LAN + + + + VLAN을 통한 엄격한 내부 망분리(DMZ는 관제/CERT 운영 없이는 비효율적) + + 각 장비 별(네트워크 장비, 서버, 주요 클라이언트) 등에 대한 고정 IP 설정 + + + + 관리할 고정 IP가 적으므로, MAC기반 DHCP 예약이 아닌 IP 매트릭스를 운영하며 수동으로 정의  + + VLAN100(192.168.10.0/24 ): WAN Paththrough 망 + + VLAN2(192.168.2.0/24): 클라이언트(PC 등) 망 + + VLAN3(192.168.3.0/24): Hypervisor 내부 서버 및 서비스 망 + + VLAN4(192.168.4.0/24): Mobile Devices 및 Guest 망 + + VLAN5(192.168.5.0/24): Kali 망 + + VLAN6(192.168.6.0/24): Kali를 통한 정보보안 실습망 + + VLAN10(10.0.0.0/24): VPN Clients 망 + + + + + + + + + + + + + + HA를 위한 계획 + + 목표 + + 필요 선행 프로젝트: OpenWRT 적용 + + 현 프로젝트의 목표는 상용 서비스 만큼의 가용성을 확보하는 것은 아닙니다. 이는 많은 시간과 자원을 투자하여야 하여야 하며, 현실적인 제약 조건을 고려하여야 합니다. 하지만 HA는 현대 IT 인프라의 중요한 요소로 이를 경험 및 학습을 하는 것은 큰 의미가 있습니다. 따라서 현재 가장 큰 SPOF인 OPNsense와 FreeIPA를 HA 구성하여 실제 작동을 경험하고 테스트 해보고자 합니다.  + + HA 적용시 구조 및 아키텍처 + + 물리 구조도 + + + + + + 물리적 네트워크 구조 설명 (OpenWRT 계획 구조에서 수정된 부분) + + + + + + WAN - ISP Modem - T5004(OpenWRT) - T5004(OpenWRT) - OPNsense 구조 + + + + 각 방의 랜 포트가 하나 뿐이고, 회선도 하나 뿐인 물리적 한계로 인한 타협사항 + + 두 장비 모두 L2 관리형 스위치로 연결된 포트에 VLAN100(WAN Paththrough 망)을 태깅 + + 이 계획에서는 WAN Paththrough(VLAN을 통한 WAN 트래픽 격리 전달)를 통해, OPNsense는 공인 IP 그대로 전달 받음 + + + + + + Mini PC가 주 라우터 및 IPS/IDS로 작동 + + AX6000M은 무선 AP로 허브 모드로 작동하며, 외부 백업 서버(NAS)와 연결, VLAN 지원하지 않음 + + H5008G가 T5004(OpenWRT)로 대체 + + + + 아키텍처 구조도 + + + + 아키텍처 구조 설명 (OpenWRT 계획 구조에서 수정된 부분) + + + + 사용자 + + 2~5명 + + 네트워크 + + + + WAN + + + + 외부 DNS를 통한 Well-Known DNS의 패킷만을 허용(DDoS 방지/ OPNsense에서 처리) + + T5004는 WAN Paththrough가 가능한 L2 관리형 스위치로 작동 + + 모든 라우팅 및 IPS/IDS, Gateway는 OPNsense main에서 처리(OPNsense HA도 stanby로 설정) + + + + + + LAN + + + + VLAN을 통한 엄격한 내부 망분리(DMZ는 관제/CERT 운영 없이는 비효율적) + + 각 OPNsense의 vtnet0은 Trunk로 WAN(VLAN100) 및 HA LAN(VLAN200)과 연결 + + 각 OPNsense의 vtnet1은 Trunk로 VLAN(VLAN2, 3, 4, 5, 6, 10) 과 연결 + + 각 장비 별(네트워크 장비, 서버, 주요 클라이언트) 등에 대한 고정 IP 설정 + + + + 관리할 고정 IP가 적으므로, MAC기반 DHCP 예약이 아닌 IP 매트릭스를 운영하며 수동으로 정의 + + + + VLAN100(192.168.10.0/24 ): WAN Paththrough 망 + + VLAN200(192.168.20.0/24): HA 구성 및 관리 망 + + VLAN2(192.168.2.0/24): 클라이언트(PC 등) 망 + + VLAN3(192.168.3.0/24): Hypervisor 내부 서버 및 서비스 망 + + VLAN4(192.168.4.0/24): Mobile Devices 및 Guest 망 + + VLAN5(192.168.5.0/24): Kali 망 + + VLAN6(192.168.6.0/24): Kali를 통한 정보보안 실습망 + + VLAN10(10.0.0.0/24): VPN Clients 망 + + + + + + + + + + + + + + + + WTR Pro, mini PC + + + + 기본 사항 + + + + OPNsense 및 FreeIPA가 SPOF(단일 실패 지점)가 될 수 있으므로 물리적인 HA 구성 + + 각 서버의 리소스 부하를 고려하여 메인 서버를 mini PC, 스탠바이 서버를 WTR Pro에 구성 + + 각 VM 및 LXC에 지정된 리소스는 모니터링 하면서 유동적으로 변경 + + + + + + 필요 서비스(VM/LXC/Docker) + + + + OPNsense VM(HA 구성) + + FreeIPA VM(HA 구성) + + AdGuard Home LXC + + Monitoring Server LXC + + Proxy Server LXC(HA 구성) + + File Server VM + + DB Server VM + + Web Server/WAS VM + + Application Server VM + + Kali and practice client VM + + + + + + 가용성 확보를 위한 복구 우선 순위 + + + + 네트워크 및 인증 핵심 기능을 우선 복구하여 서비스 의존성 문제 최소화 + + PVE > OPNsense > FreeIPA >Proxy Server > DB Server > File Server > etc + + + + + + + + + + On-premise 환경 규범집 작성 및 정보보안 법률 적용 계획 + + 목표 + + 단순한 서버 구현 및 서비스 제공을 넘어서, 이 서비스를 안전하게 운영하기 위한 첫 걸음을 다음 추가 프로젝트를 통해 진행하려 합니다. 개인 On-premise 환경을 위한 규범집을 작성하고, 법률을 서비스 환경에 적용하여 ISMS-P 기준을 간략하게 적용하여 간이 보고서를 작성하려 합니다. 이는 보안이 실 서비스 운영에 있어서 얼마나 중요한지 확인하고, 법과 규정을 실제로 서비스에서 왜 필요한지에 대한 심도 싶은 이해를 하고자 합니다. + + + + IPv4, IPv6 동시 서비스 계획 + + 목표 + + 현대 인터넷 환경은 IPv4 체계의 주소 고갈과 한계로 인하여 IPv6 체계의 도입하고 있는 과도기적 상황입니다. 이러한 상황 속에서 IPv4의 NAT(Network Address Translation), Subnetting, Broadcast와 Unicast, IPv4 환경에서 네트워크 보안 등에 대하여 심도 깊은 이해를 하고, IPv6 환경에서 어떻게 같은 서비스를 지속할 수 있을지에 대한 학습 및 경험을 하고자 합니다. + + + + IaaS(Infrastructure as a Service) 등 외부 서비스와 연동 계획 + + 목표 + + 현대 IT 인프라는 절대 On-premise 환경에서만 작동하지 않습니다. 수많은 서비스와 On-premise 환경이 동시에 공존하며 상호 보완적인 관계를 띄고 있습니다. 이에 On-premise 환경 구축 후 이를 Azure와 같은 외부 IaaS 서비스와 직접 연동을 하는 프로젝트를 통해 현대 IT 인프라가 어떤 형태로 작동하는 지에 대한 더욱 깊은 이해를 추구합니다. + + + + 2025-05-24 - 초안 작성 + + 2025-05-28 - IPv4, IPv6 동시 서비스 계획 추가 작성 + + 2025-05-29 - IaaS 등 외부 서비스와 연동 계획 추가 작성 + + 2025-05-30 - HA 계획에 OpenWRT 내용 추가 + + 2025-06-16 - OpenWRT와 HA 계획 분리 및 부록 1.1. 문서 작성 가이드라인 에 따라 본문 재작성 + + 2025-06-19 - VPN Clients 망 추가 + + 2025-06-20 - 날짜 표기 변경 + +2. 프로젝트 설계 + +2.1. 하드웨어 선정 +목표 + + + + 본 프로젝트의 요구 사항을 만족시킬 수 있는 하드웨어를 선정합니다. + + 선택된 하드웨어 (WTR Pro N150 및 상세 사양)이 성능 한계와 기대치를 확인합니다. + + 각 과정의 고려사항 및 근거를 명확하게 기록합니다. + + + + 요구 사항 + + + + 실사용 및 학습 목적에 성능 요구사항을 충족해야 합니다. + + + + 다중 VM/LXC 환경 + + IPS/IDS 학습 목적 운용(통제된 Inbound 트래픽 및 학습 목적의 최소 운영) + + 소규모 사용자를 위한 Web, Media, Application Service 제공(트랜스코딩 및 동시 부하 가능성 매우 낮음) + + + + + + 24/7 운영을 전제로 하고 있으므로 다음과 같은 조건을 만족해야 합니다. + + + + 저전력 + + 저발열 + + 저소음 + + 안정성 + + + + + + 다음과 같은 필수 인터페이스를 포함해야 합니다. + + + + File Server를 위한 다중 SATA Slot + + OS가 설치되어 작동할 SSD Slot + + 트래픽 분산을 위한 두 개 이상의 NIC + + + + + + 예산이 제한되어 있으므로 가능한 저렴한 제품을 선택합니다. + + + + 검토 사항 + + 서버 하드웨어 + + 직접 구성 + + 24/7 운영을 위한 저전력 프로세서는 주로 일체형 메인보드 형태로 판매합니다. 따라서, 직접 서버를 구성한다고 하더라도 구성이 PC를 조립하는 것에 비하여 자유롭지 않습니다. 또한, 메인보드 및 프로세스를 제외하더라도 케이스 및 여러 가지 인터페이스를 구매하기 위한 추가 비용이 있습니다. + + 기성 제품 + + 기성 제품은 특화된 기능을 저럼한 가격에 좋은 품질로 제공하는 경우가 많습니다. 특히 저전력 프로세서를 사용한 제품은 구성의 자유도 면에서 기성 제품이 오히려 더 좋은 가격에 더 좋은 품질로 구매할 수 있습니다. + + 결론 + + 기성 제품 중에서 프로젝트에 필요한 모든 인터페이스가 포함되어 있던, Aoostar 의  WTR Pro 모델로 서버 하드웨어를 결정하였습니다. + + 프로세서 + + WTR Pro 모델의 프로세서 선택 + + WTR Pro 모델은 Ryzen R7 5825U 모델과 Intel N150 모델 두 가지가 있습니다. WTR Pro의 프로세서를 선택하기 위하여 다음과 같은 점들을 고려하였습니다. + + 성능 조건 + + + + + + + + + + 프로세서 + + + + R7 5825U + + N150 + + + + + + + + + + Core/Thread + + 8/16 + + 4/4 + + + + + + Architecture + + x86-64 + + x86-64 + + + + + + Clock Speed + + 2.0GHz(4.5GHz as Turbo Speed) + + 0.8GHz(3.6GHz as Turbo Speed) + + + + + + L3 Cache + + 16MB + + 6MB + + + + + + TDP + + 15W(25W MAX) + + 6W + + + + + + Price(WTR Pro) + + 379,400 KRW + + 254,900 KRW + + + + + + + + 절대적인 성능은 R7 5825U 모델이 N150 모델보다 훨씬 좋습니다. Core는 2배, Thread는 4배이며 Clock Speed 역시 압도적인 격차가 있습니다. 하지만 프로젝트의 요구사항을 고려할 때, N150 모델 역시 충분히 이를 구현 가능한 성능을 가지고 있습니다. 하지만 N150 모델은 R7 5825U에 비하여 고부하 상황에서 언제나 쾌적한 환경을 제공하는 것이 힘들 수 있다는 것을 예상하고 실제 서비스 운영 시 자원 배분에 각별한 주의가 필요합니다. + + 24/7 운영을 위한 조건 + + 24/7 운영을 위하여 프로세서는 저전력, 저발열, 저소음, 안정성 조건을 만족하여야 합니다. R7 5825U 모델은 N150 모델에 비하여 매우 좋은 성능을 가지고 있습니다. 하지만 N150 모델에 비하여 전력 소모량 및 발열량이 높았습니다. TDP가 약 2.5~4배 가까이 차이가 났으며, 이는 가정에서 24/7로 운영하며 사용할 때 발열 및 전력 소모가 더 높은 것을 의미합니다. 이는 시스템에 안정성에도 큰 영향을 끼칠 것입니다. + + 가격 조건 + + R7 5825U 모델은 N150 모델에 비하여 약 30% 가량 비싸 약 120,000 KRW 정도의 추가 지출이 필요하였습니다. + + 기타 고려 사항 + + + + R7 5825U 모델은 다음과 같은 특정한 하드웨어 이슈를 가지고 있었습니다. + + + + 특정한 작업(트랜스코딩 등)이 N150을 사용할 때에 비하여 불안정함이 보고됨. + + SATA Controller가 Hypervisor 상에서 Path through 할 때 비정상적인 작동이 보고됨. + + 쿨링이 원활하지 못한 미니 PC 구조와 더불어 4Bay HDD를 지원하여, 발열이 쉽게 해소되지 않는 문제 보고 됨. + + + + + + R7 5825U 모델은 N150 모델에 비하여 RAM Slot과 M.2 Slot이 한 개씩 더 많습니다. + + + + 예상 프로세서 할당량 및 우선 순위(N150) + + + + + + + + + + 서비스 이름 + + + + + + 타입 + + + + + + 할당 vCPU (core) + + + + + + 우선 순위 (cpuunits) + + + + + + 할당 이유 + + + + + + + + + + + + OPNsense + + VM + + 2 + + 매우 높음 + + 트래픽 처리, IDS/IPS 학습 및 내부 방화벽, VPN 핵심 기능: 무중단 필수 + + + + + + + + Rocky Linux -FreeIPA  + + + + VM + + 2 + + 매우 높음 + + ID 관리(LDAP/SSO), 내부 CA, DNS 등 핵심 인증 서비스: 무중단 필수 + + + + + + + + AdGuard Home + + + + LXC + + 1 + + 낮음 + + 광고 차단을 목적으로 하는 DNS 필터링 서비스: 무중단 필수, FreeIPA 연동 + + + + + + + + Monitoring Server + + + + LXC + + 1 + + 낮음 + + 백그라운드 상 지속 운영하나, 부하 자체는 낮을 것으로 보임 + + + + + + Proxy Server + + LXC + + 1 + + 보통 + + SSL/TLS 처리시 순간 부하 발생 가능, 지속 운영 + + + + + + File Server + + VM + + 1 + + 보통 + + I/O 작업 및 파일전송 프로토콜, 접속자에 따라 변동 + + + + + + Web Server/WAS + + VM + + 1 + + 낮음 + + 저트래픽 기술 블로그, Bookstack 운영 (DB 서버 분리) 부하는 낮을 것으로 보임 + + + + + + DB Server + + VM + + 2 + + 높음 + + 중앙 집중식 데이터베이스 서버: 무중단 필수 + + + + + + Application Server + + VM + + 2 + + 높음 + + Immich, PeerTube, Vaultwarden 등 미디어 및 주요 서비스(DB 도커는 별도 운영) + + + + + + Kali Linux + + VM + + + + 2(1) + + + + 높음(낮음) + + + + 실행 시 Docker VM 일시 중지 조건부 운영 (+practice client) + + + + + + + + + + vCPU: VM/LXC에 할당되는 가상 CPU 코어 개별 VM/LXC에 pCPU의 Thread 초과 할당 비권장. 하지만 전체 VM/LXC는 초과 가능 cpuunits: CPU 자원 경합 시 값이 높은 쪽이 우선 순위가 높고 더 많은 시간을 할당 (매우 높음: 기본값 2배, 높음: 기본값 1.5배, 보통: 기본값, 낮음: 기본값 0.5배 등으로 설정) cpulimit: 보조적으로 과도한 CPU점유를 막기 위해 절대적인 CPU 사용 상한 제한 socket/core: vCPU의 CPU와 Core 개념, vCPU는 pCPU의 Thread 위에서 실행되며, 각각 soket(vCPU)과 core(vCore)로 구분 + + 결론 + + R7 5825U 모델은 N150 모델에 비하여 막강한 성능을 가지고 있습니다. 하지만 저부하 IPS/IDS 운용과 소규모 인원을 위한 WEB, Media, Application Service 운용, 매우 낮은 트랜스코딩 사용 가능성과 동시 부하 가능성이 매우 낮다는 프로젝트의 요구 사항을 반영하였을 때,  목적 달성에는 N150 모델로 충분할 것입니다. 이는 가장 중요한 부분으로 요구 사항을 넘어서는 고부하 상황 조건을 고려한다면 매우 고성능 서버 프로세서와 고용량 RAM으로도 성능의 부족을 경험할 수 있을 것입니다. 모든 하드웨어 선택은 항상 성능과 요구 사항 사이의 균형을 찾아야 합니다. 따라서 성능과 요구 사항 사이의 균형, 안정성 및 가격, 기타 고려 사항을 종합하여 최종적으로 N150 모델을 선택하였습니다. + + RAM + + RAM의 용량은 서비스의 가용성을 유지하기 위하여 매우 중요합니다. RAM 용량이 부족하면 일차적으로 Swap이 일어나 서비스가 느려집니다. 이후 지속적으로 RAM 용량이 부족하면 시스템은 자동으로 OOM Killer 기능을 이용하여 RAM을 차지하는 서비스를 강제로 중지합니다. 이는 서비스의 가용성의 큰 문제를 만듭니다. 따라서 RAM 용량을 항상 적절하게 할당하고 배분해야 합니다. + + 서비스 별 RAM 사용 예상량 + + + + + + + + 서비스 이름 + + 타입 + + 할당 RAM + + 할당 이유 + + + + + + + + + + OPNsense + + VM + + 4GB + + IDS/IPS 학습 및 내부 방화벽/VPN 핵심 기능 + + + + + + Rocky Linux -FreeIPA  + + VM + + 4GB + + ID 관리(LDAP/SSO), 내부 CA, DNS 등 핵심 인증 서비스 + + + + + + AdGuard Home + + LXC + + 0.5GB + + 광고 차단을 목적으로 하는 DNS 필터링 서비스, FreeIPA와 연동 + + + + + + + + Monitoring Server + + + + LXC + + 2GB + + Prometheus, Grafana, Loki, Promtail 기반 시스템 모니터링 및 로깅과 Ansible을 통한 서버 자동 설정 + + + + + + Proxy Server + + LXC + + 1.5GB + + 리버스 프록시 및 SSO 프록시, SSL 인증서 관리, DDNS 스크립트 작동 + + + + + + File Server + + VM + + 4GB + + NFS, WebDAV 등 데이터 저장 및 공유(SATA 패스스루 RAID 관리) + + + + + + Web Server/WAS  + + VM + + 2GB + + 저트래픽 기술 블로그, Bookstack 운영(DB 서버 분리) + + + + + + DB Server + + VM + + 4GB + + 중앙 집중식 데이터베이스 서버(여러 서비스에서 사용, 2~3개 DBMS 도커로 운용, 각 DBMS 별로 1~1.5GB 램 제한) + + + + + + Application Server + + VM + + 6GB + + Immich, PeerTube, Vaultwarden 등 미디어 및 주요 서비스(DB 서버 분리) + + + + + + Kali Linux  + + VM + + (4GB + 1GB) + + + + 실행 시 Application Server VM 일시 중지 조건부 운영 (+practice client) + + + + + + + + Guest RAM + + - + + 28GB(27GB) + + 일반 환경 시 RAM(Kali 실습 시 RAM) + + + + + + PVE + + OS + + 4GB(5GB) + + Btrfs 스냅샷 기능 및 OS 안정성 확보(kali 실습 시 5GB) + + + + + + + + 결론 + + 계획 초기 현 계획보다 적은 기능을 고려하여 16GB RAM으로 충분히 모든 서비스를 제공할 수 있을 것이라 생각했습니다. 하지만 위와 같이 계획이 구체화 되면서 각 서비스를 제공하기 위한 필요 RAM 용량이 늘어났습니다. 따라서 최종적으로 32GB RAM 을 선택하였습니다. 각 서비스가 동시에 고부하 작업을 할 확률이 매우 낮은 점, 각 서비스 별로 RAM 용량의 여유를 둔 점, PVE 자체의 KSM(Kernel Same-page Merging) 기능을 통한 메모리 절약이 가능한 점을 고려하면 32GB의 RAM 용량은 요구 사항을 충분히 충족할 수 있을 것입니다. 하지만 실제로 서비스를 운영하며 RAM 할당량을 모니터링 및 조율할 필요가 있습니다. + + 저장 장치 + + SSD + + OS가 설치되는 부분 및 DB가 실행되는 부분은 IO 속도가 매우 중요하므로 M.2 NVMe SSD를 선택하여 사용하기로 선택하였습니다. OS 및 DB 데이터를 비롯해 각종 로그 데이터를 저장하기 위해 충분한 용량인 M.2 NVMe SSD 1TB  모델을 선택하였습니다. + + 서비스 별 SSD 사용 예상량 + + + + + + + + 서비스 이름 + + 타입 + + 할당 SSD + + 할당 이유 + + + + + + + + + + OPNsense + + VM + + 32GB + + 단기 로그, IPS/IDS 규칙, 캐시 등을 저장 + + + + + + Rocky Linux -FreeIPA  + + VM + + 64GB + + LDAP 데이터베이스, 인증서, 사용자 정보 등을 저장 + + + + + + AdGuard Home + + LXC + + 16GB + + 단기 쿼리 로그 등 저장 + + + + + + + + Monitoring Server + + + + LXC + + 64GB + + 단기 메트릭, 로그 저장. 추후 File Server로 옮기더라도 충분한 용량 부여. + + + + + + Proxy Server + + LXC + + 16GB + + 웹 캐시 등을 저장 + + + + + + File Server + + VM + + + + 32GB + + + + OS 및 Docker 설치 + + + + + + Web Server/WAS  + + VM + + 32GB + + OS 및 Docker 설치(Git 혹은 Application Data는 File Server NFS 이용) + + + + + + DB Server + + VM + + 128GB + + DB는 I/O 성능이 중요, DB 데이터 저장(백업 파일; Dump는 File Server NFS 이용 저장) + + + + + + Application Server + + VM + + 128GB + + DB 데이터는 DB Server, 대용량 파일은 File Server 이용하나 Cache 및 Metadata, Tumbnail 등 저장 + + + + + + Kali Linux + Practice Client + + VM + + 64GB + 16GB + + Kali와 Clients OS 데이터 저장 + + + + + + PVE + + OS + + 100GB + + PVE 자체 용량(자동 지정) + + + + + + 총합 + + + + 692GB/1TB + + SSD 성능 상 필요 예비 공간 100GB, 추후 필요시 약 150GB~200GB 추가 할당 가능 + + + + + + + + HDD + + 이에 반하여 일반 데이터 파일 및 미디어 파일이 저장되는 부분은 IO 속도보다는 용량과 안정성이 중요합니다. 따라서 저장 용량 대비 가격이 저렴한  2TB 3.5" HDD 4개 를 사용하기로 선택하였습니다. + + 이와 동시에 저장장치의 가용성을 확보하기 위하여 여러 RAID(Redundant Array of Independent Disk) 방식을 고려하였습니다. 이 때, RAID 10, 5, 6이 고려하였고, HDD 갯수가 4개인 점, IO 성능의 향상폭이 가장 높은 점, 데이터 복구시 HDD 별 부하량이 가장 낮은 점과 같은 고려사항을 통해 RAID 10 (총 8TB 중 4TB 사용 가능)을 선택하였습니다. + + 결론 + + 다음과 같은 조건 및 고려사항 하에서 WTR Pro N150 모델은 주요 기능을 운영하고 학습 목표를 실행하는데 있어서 큰 무리가 없다고 판단됩니다. 하지만 N150 프로세서의 절대적인 성능의 한계에 대하여 명확하게 파악하고, 실제로 고부하 작업(특히 미디어 스캔 혹은 트랜스코딩 등)의 상황에서 쾌적하지 못한 상황이 발생할 수 있다는 것을 인지하여야 합니다. 하지만 실제 요구 조건 하에서 그런 상황은 거의 일어나지 않을 것이므로 실제 운영 상 성능 여유(Headroom)가 없다는 점만 인지하고 있으면 충분할 것으로 보입니다. 따라서 현재 가지고 있는 N150 서버에 대한 꾸준한 모니터링 및 성능 할당량 조정을 통하여 제한된 하드웨어 조건 하에서 어떻게 가용성을 지킬 수 있는 가에 대한 부분을 학습 할 수 있을 것입니다. + + 최종적인 하드웨어 선택 및 각 구성은  1.2. 하드웨어 구성 에 정리되어 있습니다. + + + + 2025-05-24 - 초안 작성 + + 2025-05-25 - HDD RAID 부분 수정 + + 2025-05-28 - RAM 용량 조정: SSO proxy와 reverse proxy 분리로 인한 RAM 0.5GB 추가/모니터링 서버 RAM 0.5GB 추가/ Proxmox RAM 1GB 감소/ kali 실습시 Practice client 추가로 +1GB/DB 서버 분리, 도커 환경을 위한 VM 변경 및 램 2GB 추가/OPNsense 2GB 감소 + + 2025-05-29 - 예상 프로세서 할당량 및 우선 순위 추가 / SSD 용량 1TB로 상향(로그 및 DB 데이터) + + 2025-06-16 - AdGuard Home LXC 내용 추가 및 RAM 용량 조정/ Proxy Server 0.5GB 추가/ AdGuardHome LXC 0.5GB 추가/PVE 1GB 감소 + + 2025-06-17 - 부록 1.1. 문서 작성 가이드라인 에 따라 본문 재작성 + + 2025-06-19 - Rocky Linux 내용 추가 + + 2025-06-20 - 날짜 표기 변경 + + 2025-06-24 - 각 VM/LXC 별 SSD 용량표 추가 + +2.2. 네트워크 설계 +목표 + + + + 네트워크 설계 중 물리적인 한계 및 하드웨어적인 한계를 파악하고 이를 극복할 수 있는 기술적인 방안을 선택합니다. + + 최종적으로 설계된 네트워크 구조의 잠재적인 위험성을 파악하고 장애 발생시 어떻게 대처 할지 구상합니다. + + 각 과정의 고려사항 및 근거를 명확하게 기록합니다. + + + + 요구사항 + + + + 집안 네트워크 회선의 물리적인 구조로 인한 제약 사항을 파악 및 회피합니다. + + 주요 네트워크 장비의 VLAN 미지원에 대응하여 L2 통신간 E2E 보안 및 클라이언트 격리 방안을 모색합니다. + + 방화벽 및 IPS/IDS로 Inbound, Outbound 통신을 중앙에서 감시 및 제어할 수 있도록 합니다. + + L2 통신과 L3 통신의 차이를 명확히 파악하여 각 통신별 보안 대책을 수립합니다. + + + + 검토사항 + + 물리적 네트워크 구조 구성도 + + + + 물리적 제약 사항 + + + + Terminal Box 내의 전원의 부재로 인하여 기반 네트워크 장비 PoE 사용(새로운 Node를 추가 불가) + + 각 방과 Terminal Box 사이의 물리적인 회선이 하나 + + 주요 네트워크 장비(T5004, AX6000M, H5008G)의 VLAN 미지원으로 인하여 PVE 밖 Clients에 VLAN 적용 불가 + + T5004의 DHCP Relay 미지원으로 OPNsense의 DHCP 운영 불가 및 서브네팅 활용 불가 + + + + 이중 게이트웨이 구조 + + 제약 사항 + + OPNsense가 Gateway가 되어 DHCP를 운영하기 위해서는 Terminal Box에 위치하거나, DHCP Relay 기능을 활용하여야 합니다. 하지만 Terminal Box에 전원이 부재한 제약 사항으로 인하여 불가능합니다. 또한 T5004는 VLAN을 미지원하고, DHCP Relay 기능 역시 미지원하여 불가능합니다. 이는 OPNsense가 직접 DHCP를 운영하고 Gateway로 작동하는 것이 불가능함을 뜻합니다. + + 회피 방안 및 결론 + + T5004 내에서 DHCP 운영시 DHCP의 기본 Gateway를 변경할 수 있는 점에서 착안하여, OPNsense를 T5004의Untagged VLAN의 DHCP 기본 Gateway로 설정합니다. 이때 OPNsense는 Static IP를 할당하여 자신의 Gateway를 T5004로 설정하는 이중 게이트웨이 구조를 운영할 수 있습니다. 이 경우 다음과 같은 예상되는 위험이 존재합니다. + + OPNsense의 IP가 Static IP가 아닌 경우, 자기 자신을 게이트웨이로 지정하여 Routing loop 현상이 발생 가능 + + 하지만 OPNsense의 IP가 Static IP로 설정되어 T5004로부터 IP를 할당받는 경우가 아니라면 Routing loop의 위험성은 없습니다. 동시에 모든 L3 통신은 OPNsense를 거치게 됩니다. 따라서, 물리적으로 T5004 바로 뒤에 방화벽 OPNsense를 두지 못하는 상황에서 이러한 이중 게이트웨이 구조는 가장 현실적으로 L3 통신을 통제할 수 있는 방안입니다. + + 또한 PVE 내부의 있는 서버 및 서비스들은 vmbr을 통한 VLAN 적용이 가능하므로 OPNsense의 라우팅 기능을 활용하여 라우팅 테이블을 작성하여야 합니다. + + + + Untagged VLAN - Untagged VLAN 통신: ARP를 통한 unicast 통신 + + Untagged VLAN - Other VLAN 통신: OPNsense가 내부망의 게이트웨이이므로, OPNsense 자체 라우팅 + + Untagged VLAN - WAN 통신: OPNsense가 내부망의 게이트웨이이므로, OPNsense에서 T5004로 라우팅 + + Other VLAN - WAN 통신: OPNsense가 내부망의 게이트웨이이므로, OPNsense에서 T5004로 라우팅 + + + + OPNsense의 NIC 구성 방안 + + 제약 사항 + + OPNsense에 할당 가능한 물리 NIC는 2개입니다. 그리고 T5004는 LACP(Link Aggregation Control Protocol)를 지원하지 않습니다. 이러한 상황 속에서 설계 목표 중 하나였던 WAN 통신은 NIC0에서, LAN(VLAN 포함) 통신은 NIC1에서 제어하여 병목을 최소화하고, 모든 L3 통신을 OPNsense 중앙에서 통제하여야 합니다. + + 회피 방안 및 결론 + + + + 위 제약 사항을 해결하기 위하여 다음 다이어그램과 같이 두 가지 방안을 고려하였습니다.  + + + + NIC0과 NIC1를 vmbr0에 통합하여 OPNsense 내 vtnet0과 연결하여 단일 인터페이스로 관리 + + NIC0과 NIC1 각각 vtnet0, vtnet1로 할당하여 따로 관리 + + + + 방안 1은 OPNsense에서 Untagged VLAN에 대하여 단 하나의 인터페이스와 IP만을 관리하므로 설정이 단순하고, LACP를 지원하지 않더라도 Active-backup 설정시 자동으로 NIC 장애 대처가 가능한 점이 있습니다. 하지만 프로젝트의 목표였던 WAN과 LAN 통신의 NIC 분리가 불가능합니다. 이는 OPNsense의 단일 vtnet으로 트래픽이 전달되며 vmbr0은 가상 L2 장비로 NIC를 구별하지 않기 때문입니다. 또 Active-backup 설정시 평상시에는 하나의 주 물리 NIC만 사용되므로 NIC 별 효과적인 트래픽 분산을 하는 것이 불가능합니다. + + 2번 방안은 두 물리 NIC에 각각 vmbr에 연결하여, OPNsense에 할당하고 각각 별도의 인터페이스 및 IP로 관리합니다. 따라서 OPNsense의 L3 인터페이스를 통해 물리적인 NIC 경로를 WAN 트래픽과 LAN 트래픽을 분리하여 사용할 수 있습니다. 이를 통해, vtnet0과 vtnet1에 각각에 방화벽 규칙 등을 적용하여 정교한 통제가 가능해지며, 두 물리 NIC를 전부 적극적으로 활용할 수 있습니다. 하지만 설정이 1번 방안에 비하여 더 복잡해집니다. 각 물리 NIC가 장애 시, 각 NIC가 가진 역할이 중단될 수 있는 SPOF가 됩니다. 또한 명시적으로 라우팅 정책을 설정하지 않을 시, 비대칭 라우팅으로 인한 네트워크 장애가 발생할 수 있습니다. + + 최종적으로는 다음과 같은 이유로 방안 2를 선택하였습니다. + + + + L2/L3 통신의 차이를 명확하게 하고, OPNsense의 라우팅, 방화벽 정책, 인터페이스 관리에 대한 더 깊은 이해 가능 + + WAN 통신은 NIC0에서, LAN(VLAN 포함) 통신은 NIC1에서 처리한다는 초기 설계 목표를 명시적으로 L3상에서 구현 가능 + + 두 개의 별도 물리 NIC를 적극적으로 활용하여, 성능의 이점을 가짐 + + 이중 게이트웨이 구조의 명확화 + + + + Zero Trust 원칙 하 클라이언트 방화벽 정책 제어 + + 제약 사항 + + T5004를 비롯한 홈 네트워크 장비는 VLAN 및 Subnet을 지원하지 않습니다. 이는 PVE 밖의 클라이언트에 대한 유연한 논리적 L2 망 분리가 불가능함을 뜻합니다. LAN 통신은 L2 통신으로, L3 장비인 방화벽(OPNsense)을 거치지 않고 Gateway 또한 통과하지 않습니다. L2 통신에서는 클라이언트가 직접 ARP broadcast를 통해 전송 대상의 MAC 주소를 획득하여 unicast로 통신합니다. 따라서 VLAN과 Subnetting이 불가능하므로 방화벽을 통한 ACL(Access Control List) 관리는 불가능합니다. 이는 내부망을 위한 보안 대책의 필요성을 높입니다. 다만, PVE 내부의 클라이언트에 대하여서는 VLAN을 통한 L2 망 분리가 가능합니다. + + 회피 방안 및 결론 + + 외부자가 쉽게 네트워크를 접근할 수 없도록 WPA3 프로토콜을 이용한 통신 암호화가 필요합니다. 또한 VLAN 기능은 없지만, T5004에 존재하는 Guest network 기능을 활용하여 외부인이 사용 가능한 네트워크를 격리할 수 있습니다. Guest network는 LAN 통신을 막고, WAN 통신만 가능하도록 하는 기능입니다.  각 VLAN(Untagged VLAN 포함) 내부 통신의 ACL 정책은 클라이언트 단에서 처리되어야 합니다. 이 때, Zero Trust 원칙 하 각 서버 및 클라이언트의 iptables, Windows 방화벽 등 자체 방화벽 정책을 철저하게 설정하여 최소 권한 원칙을 통해 꼭 필요한 포트와 IP로 부터의 접근만을 허용해야 합니다. 이 때 홈 네트워크 특성상 관리해야 할 서버 및 클라이언트의 숫자가 크지 않으므로 이러한 조치들로도 과도하게 복잡해지지 않고 충분히 관리가 될 것으로 보입니다. 또한 Ansible 등의 서버 설정 자동화 툴 도입 역시 관리 피로를 낮출 수 있습니다. + + 각 서버 및 클라이언트의 대역대를 VLAN으로 나누어 각 통신을 L3 장비인 OPNsense에서 담당할 수 있도록 한다면 더욱 확실하게 이러한 문제를 해결할 수 있을 것으로 보입니다. 특히 WTR Pro 내부 VLAN2, 3, 4에 접근 가능한 IP를 OPNsense에서 직접 제어할 수 있기 때문에 다음과 같은 정책을 시도할 것입니다. + + + + 내부 접근 가능 IP 제한(특정 Client IP) + + 외부 접근 시 VPN 이용 접근만 가능 + + 특정 서비스(Reverse Proxy 등)만, 서비스 목적에 따라 필요한 범위 내에서 전체 접근 허용 + + + + 확인 결과 T5004에 OpenWRT 적용이 가능한 것을 확인하였습니다. 현재 계획 대로 On-premise 환경을 구축한 이후, T5004에 대한 OpenWRT 적용 및 전체 네트워크 VLAN 적용을 테스트 해볼 수 있을 것으로 보입니다. 다만 VLAN을 통한 망 분리를 넘어, DMZ (Demilitarized Zone) 를 구성하는 경우 홈 네트워크 특성 상 24/7 관제가 불가능하며 CERT(Computer Emergency Response Team) 운용 역시 불가능합니다. 따라서 해당 DMZ를 운영시 오히려 관리 지점 및 보안 취약점을 늘리는 결과를 만들 수 있습니다. 따라서, OpenWRT를 적용한 프로젝트를 진행하더라도 추가로 DMZ를 운영하지는 않을 것입니다. + + 통신 보안 및 암호화 + + 제약 사항 + + WAN 통신 시에는 외부 CA를 통한 TLS 인증서를 발급 받아 통신을 비교적 쉽게 암호화 할 수 있습니다. 하지만 내부망 통신시에는 외부 TLS 인증서를 통한 암화가 불가능합니다.  또한 내부망을 위한 일부 특정 취약 프로토콜들(NFS, SMB 등)은 TLS를 통한 암호화가 불가능하며 평문 통신을 하므로 이를 암호화 할 방안이 필요합니다. LAN을 위한 특정 프로토콜들은 WAN에서 접근 시 사용이 불가능하기에 이를 위하여 VPN 역시 도입이 필요합니다. + + 회피 방안 및 결론 + + 기본적으로 WAN에서 접근 시 웹 서비스를 위한 https(443)와 VPN을 위한 포트만을 T5004에서 NAT로 개방합니다. 이를 통해 WAN에서 LAN을 위협하는 포트 스캔 등은 막을 수 있습니다. 이는 OPNsense의 부하 역시 줄여줍니다.  또한 WAN에서 IP를 통한 직접 접근을 막고, Domain을 통한 접근만 허용하며, Cloudflare를 통한 DDoS 방어 서비스를 사용한다고 모든 웹 서비스는 Reverse Proxy를 거치게 한다면 더욱 안전한 WAN 통신이 가능할 것으로 보입니다. T5004 내에서도 역시 외부 관리페이지 접속 차단, UPnP등 보안 취약점 기능 전체 차단, 강력한 PW 사용, 최신 펌웨어 유지 등의 조치가 필요할 것으로 보입니다. + + WAN 통신 뿐 아니라 LAN 통신 시 암호화를 적용하기 위하여 FreeIPA를 통한 내부 CA(TLS 암호화) 구축 및 Kerberos(NFS, SMB 등 암호화 및 추후 SSO 구현 토대) 인증 구조를 구현하고자 합니다. 또한 장기적으로 LDAP 및 SSO를 구현하는 데 있어 Kerberos가 사용될 수 있으므로 통합하여 FreeIPA를 구축합니다. FreeIPA 구축 시, 내부 root CA를 통한 내부 도메인용 인증서 직접 서명/발급/갱신/폐기 등을 자동화 할 수 있는 스크립트를 작성하는 것도 중요합니다. + + FreeIPA는 자체 DNS 기능 역시 포함하고 있으므로 Local DNS를 구축하여 외부와 내부에서 같은 Domain을 통한 접근이 가능하도록 Split Horizon DNS를 구현하고자 합니다. 이때 Adguard Home을 통한 광고 및 악성 Domain을 차단하고 FreeIPA와 연동할 수 있습니다. 또한 Local DNS에 없는 Domain을 요청 받은 경우 신뢰할 수 있는 외부 DNS로 DNS 포워딩을 하여 구현 가능합니다. DoH를 통하여 Cloudflare Public DNS를 이용합니다. + + WAN을 통한 내부 서버 및 클라이언트 관리는 지양하고, 필요시 VPN을 통해 관리합니다. 또한 LAN 서비스(AdGuard Home 등)을 이용시 역시 VPN을 통한 LAN 접속 후 이용합니다. + + SFOP(단일 실패 지점) 문제 + + 제약 사항 + + OPNsense와 FreeIPA는 홈 네트워크에서 가장 중요한 부분을 차지하고 있는 서비스입니다. 이 서비스들에 이상이 생긴다면, 다른 하드웨어 장비가 이상이 없더라도 전체 네트워크가 중단될 수 있습니다.  + + + + OPNsense: 게이트웨이 + 방화벽 + VPN 서버 + + FreeIPA: 내부 CA + 내부 DNS + KDC + + + + 물론 L2 통신은 ARP를 통한 MAC 기반 unicast 통신으로, 이들에 문제가 있더라도 가능하지만 Outbound/Inbound 통신이 불가능해 지는 시점에서 홈 네트워크의 가용성에 치명적인 타격을 줄 수 있습니다. + + 회피 방안 및 결론 + + 가장 좋은 해결 방안은 Scale out을 통한 물리적 HA 구성이겠으나 이는 예산의 문제로 현재 불가합니다. 또한 프로젝트 목표 역시 상용 서비스 수준을 따라가는 것이 아닌 학습과 체험 목적 역시 존재하므로 SPOF 문제는 어느 정도 타협이 필요합니다. 따라서 이러한 문제를 최대한 회피하기 위하여 다음과 같은 방안을 모색했습니다. + + + + OPNsense 및 FreeIPA의 설정 백업 (각 서비스 별 기능 및 Gitea, Ansible 활용) + + Proxmox 내 기능 활용하여 VM 스냅샷 백업 + + 모니터링 서버를 이용하여 이상 발생시 알림 발송(단 Outbound/Inbound 통신 불가시 메일 발송 등은 불가함) + + Proxmox 내에서 주기적으로 각 VM의 상태를 체크하여 문제 발생 시 자동으로 재시동, 문제 지속 시 스냅샷으로 VM 복원을 진행 + + + + 다음과 같은 방안들이 자동화 된 스크립트에서 작동한다면 RTO(목표 복구 시간)을 최소화 하고 RPO(목표 복구 지점)의 관리가 가능할 것으로 예상됩니다. + + VM/LXC/Docker/Client 간 통신 + + PVE는 기본적으로 Linux Bridge(vmbr) 및 vNIC(vtnet)을 통해 각 VM/LXC에 할당이 가능하며, Docker 역시 dockerd를 통해 내부 Bridge를 이용하여 통신하는 것이 가능합니다. + + 이를 통하여 서버 망과, 특수 목적 망(Kali 및 실습 목적 LAN)은 물리적 NIC에 연결되지 않은 vtnet과 vmbr을 할당하여 OPNsense 내에서 VLAN을 통해 격리하고자 합니다. 모든 VLAN은 OPNsense를 게이트웨이로 갖습니다. + + Docker의 통신은 특수한 경우에만 macvlan 기능을 사용하여 직접 IP 주소를 부여합니다. 이외에는 host 내부에서 Bridge 네트워크를 통해 통신하며, 외부 통신 필요시 host의 IP 주소와 port를 기반으로 통신합니다. + + 결론 + + 다음과 같은 검토 사항을 통하여 볼 때 현실적인 물리적/하드웨어적 제약 하에서도 원하는 기능 및 구조를 구현하는 것에는 무리가 없다고 여겨집니다. 하지만 제약 조건을 회피할 수 있는 것이지, 한계점이 사라지는 것은 아니기에 철저한 정책 수립과 꾸준한 모니터링 및 로깅, 그리고 조정이 필요한 것은 인지하여야 합니다. 이 과정을 통하여 언제 어떠한 프로토콜이 어떠한 방식으로 어떻게 작동하는지에 대하여와 전반적인 네트워크 통신의 구조에 대하여 깊은 이해를 하고자 합니다. + + 최종적인 네트워크 설계 구조는 1.3. 최종 목표 아키텍처 에 정리되어 있습니다. + + + + 2025-05-25 - 초안 작성 시작 + + 2025-05-28 - 초안 작성 완료 + + 2025-05-30 - VLAN 관련하여, Hypervisor 내부 서버에 대한 VLAN 분리 적용 + + 2025-06-02 - OPNsense의 NIC 구성 방안에 대한 내용 추가 + + 2025-06-17 - 부록 1.1. 문서 작성 가이드라인 에 따라 본문 재작성 + + 2025-06-20 - 날짜 표기 변경 + + 2025-06-23 - VLAN 표기 방식 수정 + +2.3. 시스템 선정 +목표 + + + + 하나의 하드웨어(WTR Pro) 안에서 가상화 기능을 통해 각 사항을 중요도와 특성에 따라 논리적으로 분리 구축합니다 + + 요구 사항을 SoC 원칙 하 나누어 이를 효과적으로 달성할 수 있는 시스템을 식별 및 선정합니다. + + 선정된 OS와 서비스를 논리적으로 분리할 가상화 기술의 특징 및 장단점을 명확하게 하여 적재적소의 기술을 선택합니다. + + 각 과정의 고려사항 및 근거를 명확하게 기록합니다. + + + + 요구사항 + + + + 하나의 하드웨어 안에서 여러 시스템이 격리되어 실행될 수 있는 Hypervisor를 사용합니다. + + 각 시스템 다음과 같은 프로젝트 요구 기능을 구현할 수 있어야 합니다. + + + + 각종 서비스 제공을 위한 DB, local DNS, CA, Proxy, Firewall 등 서버 및 네트워크 운영 + + 파일, 미디어, 사진, 어플리케이션 서버 등의 개인 클라우드 환경 구축 + + 블로그, wiki 등의 웹 서비스 호스팅 + + IPS/IDS, Kali 등을 통한 정보보안 실습 환경 제공 + + + + + + 위와 같은 요구 기능들이 SoC 원칙 하에서 어떻게 분리되어 구축할지 정의해야 합니다. + + VM/LXC/Docker의 장단점 및 논리적 분리 정도 차이를 명확히 파악하여, 각 기술별 보안 대책을 수립합니다. + + + + 검토 사항 + + 시스템 선정에 가장 중요한 핵심 고려 사항 + + + + Open Source로 무료 사용이 가능할 것 + + 무료 버전의 기능상 제약이 없거나 적을 것 + + 실행 사양이 높지 않을 것 + + 사용자가 많아 정보를 찾기 쉬울 것 + + + + Hypervisor + + 프로젝트가 성공적으로 구현되기 위해서 가장 중요한 것은 하나의 하드웨어에서 여러 가지의 시스템이 동시에 논리적으로 격리되어 작동할 수 있어야 한다는 것입니다. 이를 위한 개념이 바로 Hypervisor입니다. Hypervisor는 하나의 하드웨어 자원을 가상으로 논리적 분할하여, 각 시스템들이 마치 별도의 독립된 하드웨어에서 시스템이 동작하는 것 처럼 작동할 수 있도록 지원합니다. 이 중 직접 HW 위에서 동작하는 Hypervisor를 Type 1이라고 부릅니다. + + Type 1 Hypervisor OS는 MS의 Hyper-V, VMWare의 ESXi, Xen, Proxmox VE 등 여러 종류가 있습니다. 이 중 프로젝트에 가장 부합하는 Hypervisor를 찾기 위해 각 시스템을 비교하고, 다음과 같은 사항을 고려했습니다. + + 시스템 비교 + + + + + + + + 시스템 + + Hyper-V + + ESXi + + Xen + + Proxmox + + + + + + + + + + 아키텍처 + + Windows Server + + 베어 메탈 + + 베어 메탈(Dom0 필요) + + KVM(Debian Linux base) + + + + + + 라이센스 + + + + Windows server에 포함 + + (무료 버전 지원 중단) + + + + + + 무료/유료 + + (기능 차이 존재) + + + + Open Source (상용 버전 존재) + + + + Open Source + + (모든 기능 무료) + + + + + + + + UI + + + + GUI, CLI, Web UI + + 모두 지원 + + + + + + CLI, Web UI 지원 + + + + + + CLI 기본 지원 + + GUI는 서드파티 지원 + + + + CLI, Web UI 지원 + + + + + + + + 핵심 기능 + + + + + + Windows/Linux guest 실시간 마이그레이션, HA 지원 등 + + + + + + Windows/Linux/Mac 등 다양한 Guest 실시간 마이그레이션, HA 지원 등 + + + + + + Windows/Linux/BSD 등 다양한 Guest, 실시간 마이그레이션, HA 지원 등 + + + + + + Windows/Linux/BSD/LXC 등 다양한 Guest 실시간 마이그레이션, HA, 통합 백업 및 복원 등 + + + + + + + + + + 최소 사양 + + + + + + 높음 + + + + + + 중간 + + + + + + 중간 + + + + + + 낮음 + + + + + + + + + + 추가 고려 사항 + + + + Linux 기반일 것 (다른 서비스와 호환성 고려) + + + + 결론 + + 각 시스템에 대한 비교와 고려 사항을 검토한 결과 가장 적합한 Hypervisor OS로 Proxmox VE 를 최종적으로 선택했습니다. + + 내부 라우팅 및 Firewall + + 철저한 SoC 원칙에 따른다면 라우팅과 Firewall 역시 분리되어야 하지만, 제한된 조건 하에서 WAN-LAN 통신 간 모든 패킷이 통과하는 내부 게이트웨이에 Firewall이 같이 있는 것이 큰 문제는 아닐 것이라 판단했습니다. 실제로 분리하여 구축한다고 하더라도, Friewall은 주로 게이트웨이 바로 앞에 물리적으로 위치하게 되며, 이들의 분리는 SoC에 따른 것이라기 보다는 SPOF를 막기 위한 것입니다. + + 내부 라우팅과 Firewall, IPS/IDS 기능을 모두 지원하는 OS로는 크게 pfSense와 OPNsense 두 가지입니다. pfSense가 2006년 출시되었고, OPNsense는 2014는 pfSense로 부터 포크되어 출시되었습니다. 같은 뿌리를 가지고 있는 만큼 두 OS는 비슷한 점도 많지만, 다른 점도 생겨났습니다. 이 중 프로젝트에 가장 부합하는 OS를 찾기 위하여 다음과 같은 사항을 고려했습니다. + + 시스템 비교 + + + + + + + + 시스템 + + pfSense + + OPNsense + + + + + + + + + + 출시 + + 2006 + + 2014 + + + + + + 아키텍처 + + FreeBSD + + FreeBSD + + + + + + 라이센스 + + CE/Plus 이원화 (기능 차이 존재) + + 완전한 Open Soruce (기술지원 버전 존재하나 기능은 동일) + + + + + + UI + + 전통적이고 안정적, 간결하며 페이지 리로드 적음 + + 현대적이고 사용자 친화적, AJAX를 활용한 실시간 업데이트 + + + + + + 핵심 기능 + + + + Firewall, IPS/IDS, NAT, VPN(OpenVPN/IPsec), 강력한 플러그인 지원 (pfBlockerNG, Suricata 등) + + + + + + Firewall, IPS/IDS, NAT, VPN(OpenVPN/WireGuard), 2FA + + + + + + + + 업데이트 주기 + + + + CE는 주기가 길며 안정성 중심, Plus는 더욱 잦은 업데이트 + + + + 연 2회 주요 릴리즈를 포함하여 잦은 업데이트 + + + + + + 커뮤니티 + + 방대하고 활발한 커뮤니티 + + 빠른 성장 중인 커뮤니티와 개발팀과 원할한 소통 + + + + + + 사양 + + 낮음 + + 낮음 + + + + + + + + 추가 고려 사항 + + + + 사용자 친화적인 UI를 가지고 있을 것 + + + + 결론 + + 본 프로젝트는 Firewall 및 IPS/IDS의 모든 고급 기능(추가적인 고급 플러그인 사용 등)을 사용하는 것을 목표로 하는 것이 아닙니다. 이러한 Firewall 및 IPS/IDS의 기능을 체험하고, 경험하며 익숙해지는 것이 목표입니다. 이런 상황 속에서 두 OS의 차이가 크게 없다면 Open Source이고 기본 기능의 제한이 없는 쪽을 선택하는 것이 맞다고 판단했습니다. 또한 아직 익숙하지 않은 상황에서 사용자 친화적인 UI는 기술 학습 곡선을 조금 더 완만하게 만들 수 있을 것으로 기대합니다. 따라서 기본 기능의 제한이 없고 조금 더 사용자 친화적인 UI를 갖춘 OPNsense 를 선택했습니다. + + LDAP, 인증, 감사 서비스 및 DNS + + 프로젝트를 구체화 하는 과정 속에서 통합 ACL 관리 및 SSO 인증, 내부 CA를 통한 통신 보안을 구현하고 추가적으로 내부 DNS를 운영하기 위한 통합 관리 솔루션이 필요합니다. 물론 각각의 기능을 별도로 분리하여 구축하는 것도 좋지만, 이는 시스템의 관리 포인트를 무척 늘리고 유지 보수를 어렵게 합니다. 앞서 언급했듯 본 프로젝트의 주된 목표는 각각의 기능을 체험하고, 경험하고 익숙해 지는 것입니다. 각각의 기능을 별도로 구축하는 것 보다는 이러한 통합 인증 서버를 구현할 수 있는 시스템을 선택하는 것이 유리하다고 판단했습니다. 이러한 시스템은 크게 FreeIPA, Samba AD DC, Microsoft Active Directory 등이 있었습니다. + + 시스템 비교 + + + + + + + + + + 시스템 + + + + FreeIPA + + Samba AD DC + + Microsoft Active Directory + + + + 일반 서버 OS 하 + + 개별 솔루션 조합 + + + + + + + + + + + + 아키텍처 + + Linux/Unix 중심 + + Active Directory 호환 + + Windows 중심 + + 필요 기능만 조합 + + + + + + 라이센스 + + + + Open Source + + + + Open Source + + 유료 + + 서비스 별로 다름 + + + + + + UI + + + + CLI(ipa 명령어), Web UI 지원 + + + + + + CLI(samba-tool), Windows RSAT 지원 + + + + + + CLI, GUI 지원 + + + + 통합 UI 부재 + + + + + + + + 핵심 기능 + + + + + + LDAP, Kerberos, DNS, CA, 정책 관리, OTP 등 지원 + + (Linux/Unix 환경 중심) + + + + + + LDAP, Kerberos, DNS, 정책 관리, AD 스키마 호환 등 지원 (Winodws/linux/unix + + 호환 중심) + + + + + + LDAP, Kerberos, DNS, CA, 정책 관리, ADFS 등 지원 (Windows 중심) + + + + + +   + + 필요한 프로토콜 및 + + 기능 별도로 구현 + + + + + + + + + + 설정 및 + + 유지보수 난이도 + + + + + + 높음 + + + + + + 높음 + + + + + + 중간 + + + + + + 매우 높음 + + + + + + + + + + 최소 사양 + + + + 중간 + + 중간 + + 높음 + + 가변적 + + + + + + + + 추가 고려 사항 + + + + 현재 목표 구조는 주로 Linux와 Unix를 사용하므로 이에 대한 호환성이 좋을 것 + + + + 결론 + + 현재 목표로 하는 홈 네트워크 환경은 주로 Linux와 Unix를 기반으로 설계되어 있습니다. Samba AD DC나 Microsoft Active Directory 같은 경우 Linux와 Unix 자체에 초점이 맞춰진 것이 아닌, Linux/Unix - Windows 간 호환성 혹은 Windows 자체에 초점이 맞춰져 있었습니다. + + 거기에 개별 솔루션 조합은 아무리 본 프로젝트가 학습과 경험에 조금 더 초점을 맞춘다고 할지라도, 중앙 관리 및 모니터링이 불가능한 점은 너무나 많은 관리 포인트를 만들어 프로젝트 진행을 어렵게 만들 것이라 판단했습니다. + + 따라서 본 프로젝트에 가장 잘 어울리는 시스템으로 Rocky Linux 위의 FreeIPA 를 선택했습니다. 최대한 비영리 Open Source를 사용하고자 OS로 Rocky Linux를 선택하였습니다. 단, FreeIPA를 통한 LDAP를 구현하더라도 관리의 편의를 위하여 별도의 UID/GID 대역 매트릭스를 운영합니다. + + 또한, DNS를 통한 광고 필터링이 가능한데 이를 FreeIPA에 직접 구현하는 것은 관리상 큰 어려움이 있습니다. 따라서 이를 쉽게 구현하기 위하여  AdGuard Home 서비스를 LXC로 별도 구축하여 FreeIPA의 DNS와 연결합니다. + + 중앙 모니터링 및 로깅, 서버 설정 자동화 + + 프로젝트의 핵심 하드웨어인 WTR Pro의 경우 N150 프로세서와 32GB RAM을 사용하고 있어 성능의 여유(headroom)이 매우 한정적인 것이 사실입니다. 이러한 제약조건 속에서 프로젝트를 성공적으로 구현하기 위해서는 성능의 미세한 조정이 선택이 아닌 필수입니다. + + 넉넉한 성능의 하드웨어를 가지고 있다면 이러한 중앙 모니터링의 중요성이 낮아질 수 있으나, 현재 상황으로서는 매우 중요한 위치를 가지고 있습니다. 또한 여러 시스템이 동시에 올라가 작동하는 네트워크 구조 상 디버깅을 위한 로그를 중앙에서 관리하는 것이 중요합니다. + + 거기에 일관된 서버 설정을 위하여 Matrix를 관리하는 것 뿐 아니라 자동화 된 도구가 필요했습니다. 이를 위하여 모니터링 및 로깅을 동시에 할 수 있는 시스템을 다음과 같이 선정하였습니다. + + 모니터링 및 로깅, 알람, 자동화 기능은 특별한 권한이 필요하거나 엄격한 논리적 분리가 필요한 기능이 아니므로 성능의 부하를 줄이기 위하여 LXC로 구축합니다. + + 추가 고려 사항 + + + + 각 서버의 상태를 한 곳에서 모니터링 할 수 있을 것 + + 각 서버의 로그를 한 곳에서 일괄 확인할 수 있을 것 + + 문제 발생 시 자동으로 알림을 보낼 수 있을 것 + + 각 서버의 설정을 자동화된 관리가 가능하게 할 것 + + + + 결론 + + 다음 고려 사항을 확인하였을 때, 이 부분은 실무 환경에서 가장 많이 흔히 사용하는 조합을 가져오기로 하였습니다. 홈 네트워크 구조 상 무시 받기 쉬운 로깅, 및 모니터링 시스템이지만 본 프로젝트의 목적인 경험과 학습, 그리고 미세한 성능 조정을 위하여 다음과 같은 시스템을 하나의 서버에서 운영하기로 결정했습니다. + + + + 모니터링: Prometheus (성능 모니터링 결과 수집)와 Grafana (대시보드 및 시각화) + + 로깅: Loki (수집 로그 관리)와 promtail (로그 수집) + + 알림: Alertmanager + + 서버 설정 자동화: Ansible + Semaphore (경량 Web UI) + + + + Proxy 및 DDNS + + 안정적인 웹 서비스 접근 및 SSO를 위하여, Reverse Proxy 및 SSO Proxy 관련 시스템을 하나로 묶어 구축하려고 합니다. 웹 요청이 있을 경우 Reverse Proxy는 SSO Proxy에게 인증 정보를 요청하고, SSO Proxy는 idP(identity Provider)를 통해(혹은 직접) Kerberos(혹은 다른 프로토콜)과 연동하여 인증 여부를 포함한 정보를 Reverse Proxy로 전달, 마지막으로 Reverse Proxy에서 요청을 웹 서비스로 전달하게 됩니다. DDNS는 WAN과 연결된 공인 IP를 탐지하여, 공인 IP 정보가 바뀌면 외부 DNS에 IP정보를 자동으로 전달하여 갱신 하는 역할을 합니다. + + DDNS에 경우 API(주로 DNS에서 제공)를 통한 간단한 스크립트로 구현이 가능하나, Reverse Proxy와 SSO Proxy(+idP)를 구현하기 위하여 주로 다음과 같은 시스템을 사용합니다.  + + Proxy server 역시 Monitoring server와 마찬가지로 특별한 권한이 필요하거나 엄격한 논리적 분리가 필요한 기능이 아니므로 성능의 부하를 줄이기 위하여 LXC로 구축합니다. + + 시스템 비교 + + + + + + + + 시스템 + + Apache HTTP server + mod_auth_gssapi + + Nginx + oauth2-proxy + keycloak(idP) + + Authentick + + + + + + + + + + 아키텍처 + + + + 단일형(+모듈) + + + + 분리형 + + 통합형 + + + + + + 라이센스 + + 무료 + + Open Source + + 무료(개인 사용자 한정) + + + + + + UI + + 설정 파일 외 관리 UI 부재 + + + + Nginx/oauth2-proxy: 설정 파일 + + Keycloak: Web UI 지원 + + + + Web UI 지원 + + + + + + + + 핵심 기능 + + + + + + Web, Reverse Proxy, Kerberos 직접 인증 및 위임 등 지원 + + + + + + Nginx: Reverse Proxy + + oauth2-proxy: OIDC/OAuth2, idP 등 지원 + + keycloak: Kerberos 인증, OIDC/SAML 토큰 발급, MFA 등 지원 + + + + + + idP(OIDC, SAML, LDAP), Kerberos 인증, 정책 엔진, + + 사용자/그룹 관리 지원, MFA, + + Application Proxy 등 지원 + + + + + + + + 설정 및 유지 보수 난이도 + + 중간 + + 높음 + + 높음 + + + + + + 최소 사양 + + 낮음 + + 중간 + + 중간 + + + + + + + + 추가 고려 사항 + + + + SSO라는 이해하기 힘든 개념에 대하여 대해 체계적으로 학습 가능하게 할 것 + + + + 결론 + + 단순한 구현이 목표라면 Apache를 사용하는 것이 가장 간단할 수 있을 것입니다. 하지만 프로젝트의 목표는 구현을 넘어 학습과 개념에 체득에 있습니다. + + 점점 SSO의 중요성이 부각되는 현대 사회에서 단순 Apache 사용은 앞으로 더더욱 입지가 줄어들 것입니다. 또한 Reverse Proxy와 SSO Proxy, 추가로 idP라는 복잡한 개념을 이해하려면 각각의 기능들을 이해할 수 있다면, 어떤 시스템을 사용한다고 하더라도 쉽게 익숙해지고 적응 할 것입니다. + + 또한 FreeIPA의 경우와 다르게, 이러한 기능은 십수가지가 아닌 3개로 나눠집니다. 따라서 SoC에 따른 분리를 하더라도 관리 포인트가 엄청나게 느는 것은 아닐 것으로 생각됩니다. + + 이런 고려사항을 따라 SoC에 따라 분리되어 있는  Nginx + oauth2-proxy + keycloack 을 선택해 적용하여 각 개념을 더욱 정밀하게 배울 수 있을 것으로 기대됩니다. 추후, 이러한 개념이 내재화 된다면, Authentick 같은 통합형 시스템도 쉽게 익숙해 질 수 있을 것을 기대합니다. + + 또한 DNS가 작동하는 FreeIPA가 아닌, Proxy Server에서 DDNS 스크립트가 작동하는 이유는 SoC 때문입니다. FreeIPA는 내부의 보안과 인증을 담당하는 서비스로 이러한 FreeIPA가 외부에 노출되는 것은 보안상 바람직하지 못합니다. 따라서, 외부와 통신하는 것이 주요한 역할인 Proxy에서 DDNS를 두어 SoC 원칙 하에 서비스를 분리하여 구축합니다. + + VPN + + WAN에서 안전하게 LAN으로 접근하여 서비스를 이용하고, 서버 및 클라이언트를 관리하기 위해서는 VPN이 필요합니다. VPN은 암호화된 터널을 구성해, WAN에서도 마치 특정 LAN에 속한 것 처럼 네트워크를 사용할 수 있도록 하는 기술입니다. + + 프로젝트에서 계획 중인 대부분의 서비스는 DDNS를 통한 외부 공개 접속이 가능하나, 특정 서비스(AdGuard Home, SSH, NFS, FTP 등)는 외부에 노출시 보안의 문제가 발생하기 쉽습니다. 따라서 이러한 특정 서비스를 안전하게 외부에서 이용하기 위해 VPN을 구축하여 사용하고자 합니다. + + 특히 OPNsense를 통한 VPN 서버를 구축 예정이므로, 공식적으로 지원하는 OpenVPN, WireGuard, IKEv2/IPSec을 비교하려 합니다. + + 시스템 비교 + + + + + + + + 시스템 + + OpenVPN + + WireGuard + + IKEv2/IPSec + + + + + + + + + + + +  호환성 + + (아키텍처) + + + + + + 다양함 + + (Windows, Linux, 모바일 등) + + + + + + 다양함 + + (Windows, Linux, 모바일 등) + + + + + + 대부분의 모바일 플랫폼 + + (RFC 프로토콜) + + + + + + + + 라이센스 + + Open Source + + Open Source + + + + Microsoft/Cisco + + + + + + + + 속도 + + 보통 + + 매우 빠름 + + 빠름 + + + + + + + +   + + 핵심 기능 + + + + 높은 배터리 소모, 높은 보안성과 안정성, 네트워크 전환 시 느린 재연결, 공식 앱 지원, 서드 파티 분할 터널링 지원 + + 낮은 배터리 소모, 높은 보안성과 안정성, 네트워크 전환 시 즉각적인 재연결, 공식 앱 지원, 공식 앱 분할 터널링 지원 + + 낮은 배터리 소모, 높은 보안성과 안정성, 네트워크 전환 시 즉각적인 재연결, OS 내장형, 분할 터널링 기능 지원하나 설정의 까다로움 + + + + + + + +   + + 설정 및  + + 유지 보수 난이도 + + + + + + 보통 + + (인증서 관리 필수, 수동으로 클라이언트 설정) + +   + + + + + + 매우 쉬움 + + (직관적인 UI, QR 코드를 통한 클라이언트 설정 가능) + + + + + + 어려움 + + (두 단계의 설정 및 일치, + + 벤더간 호환성 문제 발생 쉬움) + + + + + + + + + + 결론 + +  VPN을 사용할 때 있어서 중요한 것은 높은 보안성과 안정성 외에도 편의성도 큰 비중을 차지합니다. 특히 모바일 기기를 통한 VPN을 주로 사용하는 환경에서는 낮은 배터리 소모와 분할 터널링, 쉽고 직관적인 클라이언트 설정이 사용 경험을 크게 올려줄 것입니다. 따라서 세 프로토콜 전부 높은 보안성과 안정성을 보장할 수 있으므로, 배터리 소모량이 가장 적고 직관적이며 설정이 쉬운 WireGuard 를 선택했습니다. + + 파일 및 백업 관리와 클라우드 서비스 + + On-premise 환경을 구축하는 데 있어 파일 및 백업 관리와 클라우드 서비스는 가장 중요한 기능 중 하나입니다. 특히 빠른 I/O 속도가 상대적으로 중요하지 않은 대용량 파일(미디어 파일, 개인 파일, 백업 파일 등)은 SSD가 아닌 비용 효율적인 HDD에 저장하는 것이 가장 이상적입니다. + + 하지만 모든 서버에 HDD를 직접 마운트하는 방식은 복잡하고 비효율적입니다. 따라서, 중앙에 파일 서버를 구축하여 NFS 및 백업 서비스, 클라우드 서비스를 제공하는 것이 더욱 효율적입니다. + + 이때 Hypervisor로 부터 독립적인 파일 시스템을 관리하기 위하여 LXC가 아닌 VM으로 구축합니다. 이러한 중앙 통합적인 파일 서버를 구축하기 위하여 다음과 같은 사항들을 고려했습니다. + + 시스템 비교 + + OS + + + + + + + + 시스템 + + + + NAS OS (TrueNAS, OMV 등) + + + + Ubuntu Server + + Debian (Stable) + + + + + + + + + + 아키텍처 + + + + Unix/Linux 등 + + + + Linux + + Linux + + + + + + 라이센스 + + OS 별로 다름 + + Open Source + + Open Source + + + + + + UI + + 전문 GUI 지원 + + + + CLI 지원 + + + + CLI 지원 + + + + + + + + 핵심 기능 + + + + + + 파일 서버에 필요한 모든 기능들을 통합적으로 지원 + + (SMB, NFS, 인증, 백업, 미디어 서버, DDNS, 자체 클라우드 등) + + + + + + 필요한 프로토콜 및 기능 별도로 구현 + + + + + + 필요한 프로토콜 및 기능 별도로 구현 + + + + + + + + 설정 및 유지 보수 난이도 + + + + 낮음 + + (타 서버/서비스 연동시 높음) + + + + 중간 + + 낮음~중간 + + + + + + 최소 사양 + + 중간 + + 낮음 + + 낮음 + + + + + + + + Cloud service + + + + + + + + 시스템 + + + + NAS OS의 패키지 + + + + NextCloud + + Seafile + + + + + + + + + + 아키텍처 + + SW + + SW + + SW + + + + + + 라이센스 + + OS 별로 다름 + + Open Source + + 무료/유료 버전 + + + + + + UI + + 전문 GUI 지원 + + + + Web GUI 지원 + + + + Web GUI 지원 + + + + + + + + 핵심 기능 + + + + + + OS 별 차이가 있을 수 있으나 기본적으로 모든 기능을 포함한 통합 파일 관리 솔루션 제공 + + + + + + 파일 동기화, PIM 기능(캘린더, 할일, 주소록 등), media/office 기능을 포함한 통합 파일 관리 솔루션, LDAP/SSO 지원 + + + + 파일 동기화 및 관리/공유 기능 특화, LDAP/SSO 지원 + +   + + + + + + + + 설정 및 유지 보수 난이도 + + + + 낮음 + + (타 서버/서비스 연동시 높음) + + + + 중간 + + 중간 + + + + + + 최소 사양 + + 높음 + + 중간 + + 낮음 + + + + + + + + Backup service + + + + + + + + 시스템 + + + + NAS OS의 패키지 + + + + duplicati + + BorgBackup (+Vorta) + + Kopia + + + + + + + + + + 아키텍처 + + + + SW + + + + SW + + SW + + SW + + + + + + 라이센스 + + OS 별로 다름 + + Open Source + + Open Source + + Open Source + + + + + + UI + + 전문 GUI 지원 + + CLI, Web GUI 지원 + + + + CLI(기본), + + GUI(vorta) 지원 + + + + CLI, GUI 지원 + + + + + + + + 핵심 기능 + + + + + + OS 별 차이가 있을 수 있으나 기본적으로 스냅샷,클라우드 동기화/백업, 중복 제거 증분 백업, E2EE 백업, 스케줄 백업 등 제공 + + + + + + 다양한 클라우드 백업 등 백엔드 직접 지원, E2EE 암호화, 자동 스케줄링, 버전 관리 등 제공 + + + + + + 최고 수준 중복제거/압축, 강력한 E2EE 암호화, 스냅샷(아카이브) 기반, 스케줄 백업, 강력한 CLI 등 제공 + + + + 최신 기술(CDC 중복제거), 다양한 클라우드 백업 직접 지원, 정책 기반 관리, E2EE 암호화, 스케줄 백업 등 제공 + + + + + + + + 설정 및 + + 유지 보수 난이도 + + + + + + 낮음 + + (타 서버/서비스 연동시 높음) + + + + + + 낮음 + + + + + + 중간(CLI 사용 시 높음) + + + + + + 낮음 + + + + + + + + 최소 사양 + + 높음 + + + +  낮음 + + + + + + 낮음~중간 + + + + + +  중간 + + + + + + + + + + 추가 고려 사항 + + + + 타 서비스(OPNsense, FreeIPA, Proxy 등)과 연동 용이할 것 + + 다양한 기능보다는 File Server 자체 기능의 집중할 것 + + + + 결론 + + 프로젝트 진행에 있어 가장 중요한 점은 학습 가능성입니다. 존재하는 NAS OS를 그대로 파일 서버로 사용한다면 직접 설정하는 것보다는 편리할 수 있습니다. 하지만 다른 서비스들과의 통합과 부족한 하드웨어 성능, 그리고 학습 가능성을 고려했을 때 NAS OS 보다는 파일 서버를 직접 구축하는 것이 낫다는 결론을 내렸습니다. + + 따라서 서버를 구축하기 위한 OS로 NAS OS를 제외한 Ubuntu와 Debian(stable)을 비교하였습니다. Ubuntu와 Debian모두 안정적이고 가벼운 OS이나 Hypervisor인 PVE의 기반이 Debian이므로 다른 LXC와 VM 간 일관성을 위하여 Debian 을 선택하였습니다. + + 이때 기본 파일 시스템을 Btrfs로 적용하여 사용하려 하는데, Btrfs는 자체 중복 처리 기능과 CoW 기능, 그리고 스냅샷 기능을 제공합니다. 다만 CoW로 인한 외부 단편화가 발생할 수 있으므로 주기적인 디스크 압축 기능을 사용하여야 합니다.  + + 개인 파일 관리를 위한 Cloud Service를 위한 시스템을 선정하는데 있어서 가장 중요한 점은 필요 성능이 낮아야 한다는 것과 특화된 기능입니다. 파일 관리를 제외한 나머지 기능들, 즉 PIM 및 Multimedia 기능 등은 필요시 별도로 Docker를 통해 Application Server에 구축될 예정이기 때문입니다. 따라서 NextCloud와 Seafile을 비교하였을 때, 다양한 기능을 가진 NextCloud 보다 본연의 파일 관리 기능에 특화되어 있는 Seafile 을 선택하였습니다. + + 마지막으로 File Server의 파일들을 보존하기 위한 Backup Service를 위하여 고려한 점 역시 학습 가능성과 사용 편의성이 었습니다. + + Duplicati는 오랜 기간동안 쓰여온 파일 백업 시스템이지만, 최신 중복 제거 알고리즘 지원등이 다른 서비스보다 부족하였고 타 시스템보다 CLI를 통한 강력한 기능 지원이 부족하였습니다. + + BorgBackup 역시 오랜 기간 동안 쓰여온 강력한 시스템이며, 가장 강력하고 최신인 중복 제거 알고리즘을 사용합니다. 또, CLI를 통한 세밀한 관리 등이 가능하였으나, Vorta라는 별도의 GUI 시스템을 사용하지 않으면 GUI를 지원하지 않고 백엔트 연동 등의 편의성 부분에서 부족하였습니다. + + Kopia는 출시된 지 오래된 시스템은 아니지만 빠르게 발전하는 시스템입니다. 최신 중복 제거 알고리즘을 지원하며, CLI와 GUI 모두 지원하였습니다. 또한, 다양한 백엔드 시스템(타 사 클라우드 서비스 포함) 연동 기능을 자체적으로 보유하고 있습니다. + + 다음과 같은 점들을 모두 고려 하였을 때  Kopia 가 학습 가능성과 사용 편의성을 모두 충족시키는 것이라 생각되어 선택하였습니다. + + 이 경우, Seafile과 Kopia 모두 일관된 환경에서 실행하기 위하여 Docker로 배포합니다. + + 따라서 File Server는 Debian(Stable - netist) 위에서  Seafile + Kopia(on Docker) 을 구축하기로 운영하기로 했습니다.  + + DB, Web, Application 서비스 + + DB, Web, Application 서비스는 VM 위에서 Docker를 통하여 어느 환경에서나 동일한 작동을 보장하도록 구축할 것입니다. Docker의 경우 편리하지만, rootless docker는 사용에 있어 큰 제약(내부 인터페이스 생성 등)이 있기에 root 권한으로 실행되어야 합니다. + + 따라서 Docker를 사용하기 위하여서는 LXC가 아닌 VM을 사용하여야 합니다. 이는 LXC 상에서 Docker가 실행되었을 때 root 권한을 통해 LXC를 넘어 Hypervisor의 커널 영역까지 침범할 수 있는 위험성을 가지고 있기 때문입니다. LXC의 경우 appArmor/seccomp의 존재로 인하여 충분한 권한을 얻지 못한 Docker 서비스에 오류가 발생할 가능성이 큽니다. unprivileged LXC와 rootless docker의 경우 앞서 언급한 제약으로 인해 문제가 발생할 가능성이 있으므로 역시 권장되는 사용법이 아닙니다. 따라서 Docker 활용시에는 반드시 VM을 통한 완벽한 논리적 분리가 필요합니다. + + 따라서 앞선 File server의 경우처럼 VM을 통해 Proxmox VE와 일관성을 유지하기 위한 Debian(Stable - netist) 환경에서 Docker 서비스를 배포하는 형식으로 구현하고자 합니다. 구현하고자 하는 서비스의 목록은 다음과 같습니다. + + DB + + + + Maria DB, Postgre SQL 등 - DBMS + + + + Web + + + + Hompage - 홈페이지 및 대쉬보드 + + Ghost - 블로그 서비스 + + Bookstack - 개인 wiki, 노트 + + Gitea - Git 버전 관리 + + + + Application + + + + Portainer - 도커 통합 관리 + + Uptime Kuma - 도커 서비스 모니터링 + + Diun - 도커 업데이트 관리(수동 업데이트) + + n8n - 워크 플로우 자동화 + + Redis - 캐시 DB + + Code-Server - 웹 코드 에디터(Git 연동) + + Immich - 사진 및 개인 동영상 관리 + + PeerTube - 동영상 컨텐츠 관리 및 스트리밍 + + Navidrome - 음악 관리 + + Vaultwarden - 개인 암호 관리 + + Infisical - DB, API key 등 관리 + + Matrix-Synapse/element - 채팅/통화/영상 통화 + + Radicale - 캘린더 및 주소록 + + Vikunja - To-DO 리스트 + + Firefly III - 가계부 + + Paperless-ngx - 문서 관리 + + Snipe-IT - ITAM; IT 자산 관리 + + Calibre-web - 전자책 관리 + + Kmoga - 만화책 관리 + + Audiobookshelf - 오디오 북 관리 + + + + 보안 실습 환경 + + 보안 실습 환경은 Kali linux와 Practice server로 나누어 운영를 할 것입니다. 두 서버 모두 보안 실습에 활용될 예정이므로 Hypervisor에 영향을 줄 수 없도록 VM으로 격리하여야 합니다. Kali는 보안 실습에 필요한 많은 도구들을 자체적으로 내장하고 있는 OS이며, Practice server는 목적에 맞게 여러 OS를 활용할 계획입니다. Windows 부터 Unix, Linux 등을 필요할 때 설치하고, 실습 완료 후 제거하는 방식으로 운영할 것입니다. + + 결론 + + SoC 원칙 및 VM/LXC/Docker 환경 내에서 주의하여야 할 점들을 명확하게 하여 필요한 서비스들을 정리 및 시스템을 선정하였습니다. 이러한 필요 서비스들을 한 번에 구축하는 것이 아닌, 중요 순서 별로 차례 차례 구축하며 할당 성능을 조절한다면 주어진 물리적 제약 조건 속에서도 목표하는 모든 사항을 구현할 수 있을 것으로 보입니다. + + 최종적인 시스템 선정 결과는  1.3. 최종 목표 아키텍처 에 정리되어 있습니다. + + + + 2025-05-29 - 초안 작성 중 + + 2025-06-04 - 초안 작성 완료 + + 2025-06-16 - AdGuard Home LXC 내용 추가 + + 2025-06-17 - VPN 내용 추가 및 부록 1.1. 문서 작성 가이드라인 에 따라 본문 재작성 + + 2025-06-19 - Rocky Linux 내용 추가 + + 2025-06-20 - 날짜 표기 변경 + +2.4. 보안 설계 +목표 + + + + 네트워크, 시스템, 인증, 및 정책 모든 부분에 대한 잠재적인 보안 위협을 예상하고, 이를 효율적으로 차단할 수 있는 방안을 구상합니다. + + 각 과정의 고려사항 및 근거를 명확하게 기록합니다. + + + + 요구 사항 + + + + 최소 권한 원칙 및 Zero Trust 원칙 하, 보안 위험을 식별하고 차단합니다. + + T5004/AX6000M/OPNsense/FreeIPA 등의 네트워크 관련 정책을 설계합니다. + + 방화벽, IPS/IDS, Port Fowarding 및 NAT, VLAN, VPN 등이 포함합니다. + + 각 서버 별 공통으로 적용되어야 할 보안 규칙(System Hardening)과 예외 사항을 작성합니다. + + ACL을 위한 UID/GID 관리와 FreeIPA를 통한 SSO 규칙을 작성합니다. + + 데이터 보안 정책을 설계합니다. + + + + 검토 사항 + + 네트워크 보안 정책 설계 + + T5004/AX6000M 보안 정책 + + 관리자 접근 + + + + WAN에서의 관리자 페이지 접속을 원천적으로 차단 + + LAN 혹은 VPN을 통해서만 접근 허용 + + + + Wi-Fi 보안 + + + + 무선 네트워크에 대하여 WPA3 암호화 방식 사용 + + 강력한 비밀번호 설정 + + IoT 기기와 Client의 IP 대역 분리 + + + + OPNsense 상에서 IoT 기기의 IP 대역에 대하여 다음 규칙 적용 + + Allow: 목적지가 AdGuard Home의 DNS 포트인 트래픽과 WAN인 트래픽 + + Dney: 모든 내부망 트래픽 + + Clients는 전부 각각 Static IP 주소 할당하여, 대역이 아닌 IP 주소로 접근 제어 + + IP 위변조시 IP 주소 충돌로 확인 가능 , WTR Pro 외부 VLAN 적용 불가인 현 환경에서 최선 + + + + + + + + 펌웨어 관리 + + + + 주기적으로 최신 펌웨어 확인 및 유지 + + + + 취약 기능 비활성화 + + + + UPnP와 같은 보안에 취약한 기능은 비활성화 원칙 + + + + 방화벽 정책 + + 기본 정책(Default Deny) + + + + 모든 인터페이스 간 통신은 기본적으로 차단 + + + + VLAN 간 통신 정책 + + + + 특정 Client만 SSH등 접속 허용 + + 그 외 모든 통신은 원칙적으로 Proxy Server의 https 포트와, DNS 통신만 허용 + + + + CrowdSec + + + + 각 서버에 별개로 Fail2Ban을 설정하지 않고 중앙 OPNsense에서 CrowdSec 플러그인을 통해 중앙 관제 및 대응 + + 커뮤니티 기반의 악성 IP 차단 목록 활성화하여 알려진 공격자 차단 + + 모든 VM/LXC에 Ansible을 기반으로 CrowdSec 에이전트를 설치하여 OPNsense에서 통합 분석 + + + + IPS/IDS 정책 + + 초기 운영 정책 + + + + 운영 초기 서버 부하를 알기 위하여 IDS 모드로 운영하여 충분히 검토 + + 이후 점진적인 IPS 규칙 적용 + + + + 규칙 셋 + + + + 커뮤니티 기반 ET Open 규칙셋을 적용 + + 최대한 가벼운 체험 목적의 운영 + + 오탐 규칙은 개별적인 비활성화 + + + + Port Fowarding 및 NAT 정책 + + + + INBOUND 접근은 https(tcp/443)과 WireGuard VPN의 포트만 허용 + + T5004는 모든 트래픽을 OPNsense로 전달(Port Fowarding) + + + + DNS 정책 + + + + 기본 DNS를 AdGuard Home으로 설정 + + AdGuard Home은 광고 Domain을 차단 후 FreeIPA DNS로 upstream + + FreeIPA는 Spilt Horizon DNS를 구현, 매치되지 않는 Domain에 대하여 Public DNS로 Fowarding + + + + VPN 접속 정책 + + VPN 프로토콜 + + + + WireGuard 프로토콜 사용 + + + + VPN Server + + + + OPNsense에서 구동 + + + + 접근 원칙 + + + + 외부에서 내부 관리 기능(SSH, PVE 웹 UI, Reverse Proxy 등) 이용시 반드시 VPN을 통해 접속 + + https 접근이 아닌 모든 프로토콜(RDP, NFS, SMB 등)은 반드시 VPN을 통해 접속 + + + + 시스템 보안 정책 설계 + + root 계정 보안 + + + + 명시적인 ssh root 접속 차단 + + 서버 관리용 계정 생성 + + root 권한 필요시 sudo group을 활용한 sudo 사용 + + sudo 사용 기록 로깅 + + + + 시스템 하드닝(System hardening) 정책 + + 자동화 관리 + + Ansible 플레이북을 통해 신규 VM/LXC 배포시 다음 정책 강제 적용 + + + + 최소 패키지 원칙 + + 정기적 업데이트 + + 로컬 방화벽(iptables) 정책 + + + + 최소 권한 원칙 적용 + + ssh 등 필수 포트 개방 + + 추가 포트 개방 필요시 수동으로 정의 + + + + + + CrowdSec 에이전트 설치하여 침입 탐지 및 공격 차단 + + + + Secrets 관리 정책 + + Infisical Secret + + + + Infisical이 자신의 DB에 접속하기 위한 초기 비밀번호는 Ansible Vault를 통해 암호화 + + Vault 마스터 비밀번호는 개인 암호 관리자(Vaultwarden)에 보관 + + + + Infisical 정책 + + + + Infisical 실행 이후 DB 접속 정보, API 키 등 모든 Secret은 Infisical를 통해 중앙 집중식 관리 + + 주요 API 키 및 DB 비밀번호는 주기적인 변경 + + + + 개인 Secret 관리 + + + + Vaultwarden 서버를 활용하여 저장 + + + + 인증 및 접근 제어 정책 설계 + + FreeIAP를 통한 SSO 정책 + + Application 인증 + + + + BookStack, Gitea 등 SSO를 지원하는 서비스의 사용자 로그인은 FreeIPA를 통한 LDAP/SSO를 통해 적용 + + 직접 SSO가 지원되지 않는 서비스는 idP를 이용하여 SSO 적용 + + + + Server 인증 + + + + 모든 서버의 SSH 접속은 비밀번호가 아닌, FreeIPA를 통한 Kerberos 혹은 SSH 공개키 인증 방식 적용 + + + + 계정 및 권한 정책 + + Local UID/GID 정책 + + + + 로컬 UID/GID 대역: 2000~9999 + + + + 서버 자체의 root를 대신할 관리자 UID:GID + + ACL 관리는 FreeIPA가 LDAP로 담당 + + + + + + 서버별 대표 UID:GID 설정(예시) + + + + 서버 관리 그룹 GID: 2000  + + DB 서버의 대표 UID: 2001 + + 파일 서버의 대표 UID: 2002 + + + + + + FreeIPA에 할당될 LDAP/SSO 대역: 10000~ + + + + 최소 권한 원칙 + + + + 원격 root 접속은 제한 + + 모든 사용자는 일반 권한 보유 + + 관리자 권한 필요 작업시 sudo 사용 후 사용 내역 로깅 + + + + DB 정책 + + 접속 호스트 제한 + + + + 각 user는 자신의 DB에 모든 권한 보유 + + DB 로그인은 'user'@'ip_address' 형태로 해당 계정을 사용하는 서비스의 IP 주소에서만 접근 가능하도록 명시적 제한 + + + + 데이터 보안 정책 + + 파일 시스템 + + + + Btrfs 사용하여 CoW, 스냅샷 등 고급 기능 활성화 + + + + DB의 논리적 백업 + + + + 모든 DB는 pg_dump (PostgreSQL) 혹은 mysqldump (MariaDB) 사용하여 논리적 백업 수행 + + 백업 수행 결과는 File Server로 이동하여 저장 + + + + 데이터 백업 + + + + 파일 서버의 데이터는 Kopia를 사용하여 DS124에 1차 백업 + + 주요 데이터는 E2EE를 적용하여 클라우드 백업 + + + + 결론 + + 보안 분야는 무궁무진하므로 새로운 위험과 취약점을 발견 시 언제든 수정될 수 있습니다. 고정된 설계가 아닌 프로젝트가 진행 되며 같이 변경되는 설계로 보안 분야에 대한 학습과 이해, 그리고 정책 수립 등에 익숙해지고자 합니다. + + 최종적인 보안 설계 구조는 1.3. 최종 목표 아키텍처 에 정리되어 있습니다. + + + + 2025-06-08 - 초안 작성 + + 2025-06-19 - 세부 사항 작성 + + 2025-06-20 - 날짜 표기 변경 + +2.5. 구현 계획 +목표 + + + + 실제로 프로젝트를 구현하기 앞서 구체적인 구현 순서를 계획합니다. + + 계획에 따라 순차적으로 서비스를 구현하고 검증합니다. + + + + 구현 계획 + + 네트워크 장비 설정 + + 기보유 네트워크 장비들을 계획에 맞도록 설정합니다. + + IP Matrix 작성 + + 네트워크 장비 설정에 앞서, 각 네트워크 장비와 서버 및 클라이언트의 IP 대역을 미리 계획합니다. 이를 Matrix 형태로 정리하여 추후 관리 및 유지보수가 편리하도록 합니다. + + T5004/AX6000M 설정 + + 현 네트워크 구조의 핵심인 T5004와, AX6000M의 설정을 합니다. 필요 설정은 크게 다음과 같습니다. + + + + DHCP 서버 설정(기본 게이트웨이, DNS, Static/Dynamic IP 범위 등) + + 보안 설정(NAT 설정, UPnP 설정, 라우터 WAN 접근 차단, 관리자 ID/PW 설정, 접속 국가 차단, 무선 LAN 설정 등) + + 펌웨어 업데이트 확인 + + 물리적인 보안 대책 확인 + + + + PVE 설정 + + VM/LXC를 통한 각 서버 구현 전 Hypervisor인 Proxmox VE를 설치하고 설정합니다. + + UID/GID Matrix 작성 + + 앞으로 구현될 각 서버의 UID/GID 대역을 설정합니다. FreeIPA 설정 후 통합될 LDAP/SSO 전용 대역과 로컬 UID/GID를 분리합니다. + + PVE 설치 + + 임시 서비스 Debain(Stable - netist) 설정 + + Bookstack, Gitea 등 프로젝트 진행을 위하여 필수적인 서비스를 임시로 설치합니다. 추후 WTR Pro에 서비스 구현시 마이그레이션합니다. + + 필수 설치 패키지 목록 + + + + curl + + sudo + + dockerd + + + + 임시 서비스 목록 + + + + nginx-proxy-manager[jc21/nginx-proxy-manager] + + MariaDB[mariadb] + + BookStack[linuxserver/bookstack] + + Gitea[gitea/gitea] + + Code-Server + + PostgreSQL[postgres] + + redis[redis] + + Infisical[infisical/infisical] + + immich + + + + OPNsense 설정 + + Monitoring Server 설정 + + AdGuard Home 설정 + + FreeIPA 설정 + + Proxy Server 설정 + + DB Server 설정 + + 기존 서비스의 Local DB 마이그레이션 + + File Server 설정 + + Web Server/WAS 설정 + + Application Server 설정 + + 정보보안 실습환경 설정 + + 결론 + + 이 계획 문서는 프로젝트의 진행에 따라 계속해서 갱신됩니다. 현재 프로젝트 구현의 큰 흐름이 잡힌 상태입니다. 이러한 계획에 따라 프로젝트를 구현합니다. + + + + 2025-06-07 - 초안 작성 + + 2025-06-16 - AdGuard Home 내용 추가 + + 2025-06-17 - DS124 상의 임시 서비스 내용 추가 + + 2025-06-19 - DS124 상의 임시 서비스 내용 삭제 및 Debian (stable - netits) 상의 임시 서비스 내용 추가 + + 2025-06-20 - 날짜 표기 변경 + +3. 네트워크 장비 설정 + +3.1. IP Matrix 작성 +목표 + + + + 추후 구현될 Server들이 속할 VLAN과 IP 주소 대역을 깔끔하고 보기 쉽게 관리합니다. + + On-premise 환경을 구현하기 앞서 VLAN과 IP를 명확하게 Matrix로 작성합니다. + + + + 요구 사항 + + + + VLAN Matirx 작성 + + 각 VLAN 별 IP Matrix 작성 + + + + Matrix + + VLAN + + + + + + + + VLAN ID + + VLAN 이름 + + IP 대역(CIDR) + + Gateway + + DHCP 범위 + + 역할/설명 + + + + + + + + + + Untagged VLAN + + default LAN + + 192.168.0.0/24 + + + + 192.168.0.3 + + + + 192.168.0.50~254 + + WTR Pro 외부 Clients + + + + + + 2 + + Server + + 192.168.2.0/24 + + 192.168.2.1 + + N/A + + + + 서버 및 + + 관리 인프라 + + + + + + + + 3 + + Kali + + 192.168.3.0/24 + + + + 192.168.3.1 + + + + N/A + + 정보보호 실습 + + + + + + 4 + + Practice + + 192.168.4.0/24 + + 192.168.4.1 + + N/A + + 정보보호 실습 + + + + + + 10 + + VPN + + 10.0.0.0/24 + + 10.0.0.1 + + 10.0.0.2~254 + + VPN 할당 대역 + + + + + + + + Static IP Address + + Untagged VLAN + + + + + + + + 장치/서비스 + + 이름 + + IP 주소 + + MAC 주소 + + 주요 서비스 및 포트 + + 접근 정책 + + 비고 + + + + + + + + + + + + T5004 + + router + + + + ISP 할당 WAN 주소 + + 192.168.0.1 + + + + + + 물리 NIC + + [WAN: 58-86-94-96-BF-E9] + + [LAN: 58:86:94:96:BF:E8] + + + + WAN gateway + + + + Inbound/Outbound 트래픽 제어, NAT 수행 + + + + + + + + + + + + OPNsense + + opnsense-vm + + + + 192.168.0.2(vtnet0) + + 192.168.0.3(vtnet1) + + + + + + vmbr0,1 + + [NIC0: C8:FF:BF:05:AA:B0] + + [NIC1: C8:FF:BF:05:AA:B1] + + + + + + Firewall, LAN gateway, DHCP, VPN Server, IPS/IDS + + + + Inbound/Outbound 트래픽 제어, 내부 라우팅, 중앙 방화벽 + + + + + + + + + + AX6000M + + ap + + + + 192.168.0.4 + + + + + +  물리 NIC + + [58:86:94:43:BC:8F] + + + + AP + + AP + + + + + + + + + + Temporary PVE + + pve + + + + 192.168.0.48 + + (OPNsense 구축 전) + + + + + + vmbr1 + + [NIC1: C8:FF:BF:05:AA:B1] + + + + PVE Web(tmp) + + 임시, Console에서만 접근 가능 + + + + + + + + + + NAS + + ds124 + + 192.168.0.5 + + + + 물리 NIC + + [90:09:D0:65:A9:DB] + + + + Backup/File server + + Untagged VLAN, VLAN2 접근 가능 + + + + + + + + + + PC1 + + pc-1 + + 192.168.0.6 + + + + 물리 NIC + + [CC:28:AA:A7:D9:33]  + + + + Client/Console + + 모든 VLAN 접근 가능 + + + + + + + + + + PC2 + + pc-2 + + 192.168.0.7 + + + + 물리 NIC + + [3C:7C:3F:D3:1B:D7] + + + + Client + + DNS, Proxy 접근 가능 + + + + + + + + + + Peinter + + printer + + + + 192.168.0.8 + + + + + + + + 물리 NIC + + [38:CA:84:94:5E:07] + + + + Client + + DNS, Proxy 접근 가능 + + + + + + + + + + TV + + TV + + + + 192.168.0.9 + + + + + + 물리 NIC [7C:0A:3F:3D:12:37] + + + + Client + + DNS, Proxy 접근 가능 + + + + + + + + + + Temporary Server + + temp-app + + + + 192.168.0.49 + + (임시 서비스 서버) + + + + + + vmbr1 + + [가상 eth: BC:24:11:ED:10:79] + + + + docker host + + + + 임시, Console 및 Web 접근 가능, + + SSH는 Console 만 가능 + + + + + + + + + + + + ↳Docker Services + + containers + + + + 172.16.0.x + + + + docker bridge + + N/A + + Reverse Proxy 접근 통해 가능 + + + + + + + + + + + + VLAN2 + + VLAN3 + + VLAN4 + + VLAN10 + + 결론 + + 현재 임시 환경 Matrix 작성 중입니다. 추후 서버 및 정보보호 환경, VPN 환경 구축 시 Matrix 업데이트 예정입니다. + + + + 2025-06-19 - 초안 작성 + + 2025-06-20 - IP Matrix MAC 주소 추가 + + 2025-06-23 - VLAN 표기 방식 수정 + +3.2. T5004/AX6000M 설정 +목표 + + + + 현 집안 환경의 물리적 네트워크 구조 핵심인 T5004와 AX6000M을 설정합니다. + + + + 요구 사항 + + + + T5004의 초기 설정을 진행합니다. + + T5004의 DHCP 서버를 설정합니다. + + T5004와 AX6000M의 보안 설정을 합니다. + + + + 포트 포워딩 설정 + + UPnP 설정 + + 라우터 관리 페이지 WAN 접근 차단 설정 + + 관리자 ID/PW 설정 + + 접속 국가 차단 설정 + + 무선 LAN 설정 + + + + + + 펌웨어 업데이트를 진행합니다. + + 설정을 백업합니다. + + 물리적인 보안 대책을 확인합니다. + + + + 수행 작업 + + 초기 설정 + + 시스템 관리 + + 관리자 설정 + + + + 관리자 계정 설정 + + 로그인 인증 방법 + + + + 세션 방식 + + 10분간 미사용 시 자동 로그아웃 + + Captcha 활성화 + + + + + + + + 펌웨어 업그레이드 + + + + 자동 업그레이드 수행 + + 월 1회 주기적인 재확인 + + + + 기타 설정 + + + + 공유기 관리 포트 + + + + 기본 http 지정 + + + + + + UPnP 설정 끄기 + + + + 특수 기능 + + IPTV 설정 + + + + IPTV 사용 안함 + + + + DHCP 서버 설정 + + 네트워크 관리 + + 인터넷 설정 정보 + + + + WAN 정보(ISP 기본 설정) 확인 + + DNS 주소 확인 + + + + DNS 주소는 AdGuard Home 구축 후 변경 + + + + + + WAN MAC 주소 확인 + + + + 내부 네트워크 설정 + + + + 내부 IP 주소(T5004) 확인 + + LAN Subnetmask 확인 + + + + DHCP 서버 설정 + + + + DHCP 실행 + + DHCP 설정 + + + + 대여 범위(Dynamic IP range): 192.168.0.50 ~ 192.168.0.254 + + Subnetmask: 255.255.255.0 + + Gateway: 192.168.0.1 + + + + OPNsense 구축 후 192.168.0.3으로 변경 + + OPNsense의 Gateway는 192.168.0.1로, vtnet0(192.168.0.2) 통해서 나감  + + + + + +  DNS 주소 + + + + AdGuard Home 구축 후 변경 + + + + + + + + + + 작성한 IP Matric를 바탕으로 Static  IP 할당 + + + + 등록된 주소 관리 + 버튼 클릭 + + IP 주소, MAC 주소, 설명 추가 + + + + + + IP 재할당을 위하여 재시작 + + + + Easy Mesh 관리툴 + + Easy Mesh 기본 설정 + + + + Controller mode 설정 + + + + Controller mode 네트워크 이름 및 암호 지정 + + + + 인증방법 WPA3SAE/WPA2PSK + AES + + 이름 알림 비활성화 + + + + + + + + + + WiFi 설정 + + + + 2.4GHz, 5GHz SSID 개별 설정 + + + + 인증방법 WPA3SAE/WPA2PSK + AES + + 이름 알림 활성화 + + + + + +  6GHz 비활성화 + + 비밀번호 설정 + + 게스트 네트워크 사용 비활성화 + + + + AdGuard Home 사용시 필요 + + + + + + + + + + + + Easy Mesh 고급 설정 + + + + 자동 재시작 + + + + 주 1회 05:00 순차적 재시작 + + + + + + + + Easy Mesh 관리 + + + + Easy Mesh 연결할 T5004와 AX6000M LAN 포트간 연결 후 Easy Mesh 관리툴 접속 + + 연결 가능한 ipTIME 건색 + + AX6000M 확인 후 IP 자동으로 받아오기 및 Agent 이름 지정 + + + + 설정 페이지 비활성화 + + 고급 설정 - 무선 백홀 사용 안함 + + + + + + + + 보안 설정 + + 보안기능 + + 공유기 접속/보안 관리 + + + + 원격 관리 포트 비활성화 + + 외부 접속 보안 비활성화 + + 내부 접속 보안 비활성화 + + + + 추후 Reverse Proxy 구축 시 Proxy 및 클라이언트 IP만 허용 + + + + + + + + 국가별 접속 제한 + + + + 국가 허용 + + + + 한국, 캐나다만 허용 + + + + + + + + NAT/라우터 관리 + + 포트포워드 설정 + + + + 추후 VPN 서버 구축시 VPN 포트 허용 + + https(443) 포트만 DS124로 포트포워딩 + + + + OPNsense 구축 시 OPNsense로 포트포워딩 + + + + + + + + 고급 NAT 설정 + + + + 인터넷 공유 기능 활성화 + + 포트포워드 UPnP 릴레이 비활성화 + + + + 설정 백업 + + 시스템 관리 + + 기타 설정 + + + + 설정 백업 복구 + + + + 공유기 백업 + + + + + + + + 물리적 보안 대책 + + + + 통신 단자함 내 T5004 위치 + + 거실 내 AX6000M 위치 + + 미사용 LAN 포트 봉인 + + + + 결론 + + + + 네트워크 연결 확인 + + Static IP 할당 확인 + + WiFi 설정 확인 + + 물리적 보안을 위한 미사용 LAN 포트 봉인 + + 추후 프로젝트 진행하며 업데이트 예정 + + + + + + 2025-06-20 - 초안 작성 + +4. PVE 설정 + +4.1. 서비스 UID/GID Matrix 작성 +목표 + + + + 프로젝트 내 모든 서버의 파일 권한을 일관되게 관리하기 위한 체계를 수립합니다. + + 서버 간 상호 작용을 위한 Local 계정의 대역과, 사람이 사용할 LDAP 계정의 대역을 명확하게 구별하고 대역을 분리합니다. + + 분리한 대역을 명확하게 Matrix로 작성합니다. + + + + 요구 사항 + + + + Local UID/GID 대역 정의 + + + + 각 서버 별 대표 UID/GID 정의 + + + + + + Docker의 UID/GID + + FreeIPA LDAP/SSO 전용 UID/GID 대역 정의 + + + + Matrix + + UID/GID 대역 + + + + + + + + 종류 + + Local Server + + FreeIPA LDAP/SSO + + Docker + + + + + + + + + + UID + + 2000~9999 + + 10000~ + + Container가 가진 기본 값 + + + + + + GID + + + + 2000~9999 + + *2000(svadmins), *9000(security) + + + + + + + + + + 대역 설명 + + + + 각 서버 관리 계정은 기본적으로 동일한(같은 숫자의) UID:GID를 갖는다. + + + + ex) 2006(dbsvadmin):2006(dbsv) + + + + + + 모든 서버 관리 계정은 2000 svadmins라는 추가 그룹에 속한다. + + Docker의 경우 Docker Container가 가진 기본 UID/GID를 사용한다. + + 도커 유저가 사용하는 파일 및 디렉토리는 기본적으로 [도커 컨테이너 내부 유저]:[호스트 서버 그룹] 형태이다. + + + + ex) 999(mariadb):2006(dbsv) + + + + + + 둘 이상의 서버가 공유하는 파일 및 디렉토리 만이 2000(svadmins)를 guid로 갖는다. + + + + (백업 디렉토리 등) + + + + + + 특정 서버와만 공유 하는 파일 및 디렉튜리는 해당 서버의 gid를 부여한다. + + + + (로그 파일 등) + + + + + + 모든 서버에 그룹 add를 통해 각 서버의 gid를 모두 정의한다. + + 추후 LDAP를 통한 파일 ACL 관리를 추가한다. + + + + Local UID/GID 정의 + + + + + + + + 서비스 + + UID + + GID + + 대표 GID + + + + + + + + + + PVE + + 2000(pveadmin) + + 2000(svadmins) + + 2000(svadmins, 대표) + + + + + + OPNsense + + 2001 + + 2001 + + + + + + Rocky Linux - FreeIPA + + 2002 + + 2002 + + + + + + AdGuard Home + + 2003 + + 2003 + + + + + + Monitoring Server + + 2004 + + 2004 + + + + + + Proxy Server + + 2005 + + 2005 + + + + + + DB Server + + 2006 + + 2006 + + + + + + File Server + + 2007 + + 2007 + + + + + + Web Server/WAS + + 2008 + + 2008 + + + + + + Application Server + + 2009 + + 2009 + + + + + + 임시 Debian Server + + 2999(temp-app) + + 2000(svadmins) + + + + + + Kali + + 9000 + + 9000(security) + + 9000(security) + + + + + + Pactice Client + + 9001~9999 + + 9001~9999 + + + + + + + + FreeIPA LDAP/SSO UID/GID 정의 + + + + + + + + 서비스 + + UID + + GID + + + + + + + + + + FreeIPA LDAP/SSO + + 10000~ + + 10000~ + + + + + + + + 결론 + + 프로젝트를 진행하며 LDAP/SSO에 부여할 UID와 GID를 세분화 하여 업데이트 예정입니다. + + + + 2025-06-21 - 초안 작성 + + 2026-06-27 - uid/gid 관리 방법 세분화 + +4.2. PVE 설치 +목표 + + + + PVE를 WTR Pro에 설치합니다. + + 설치 절차 및 방법을 순서에 따라 명확하게 문서로 기록합니다. + + + + 요구 사항 + + + + PVE를 WTR Pro에 설치 + + PVE 원격 접속 + + PVE의 초기 설정 + + + + 수행 작업 + + PVE 설치 사전 준비 + + 설치 USB 준비 + + 프로젝트를 위하여 많은 OS 설치 파일이 필요합니다. 이러한 OS 설치 파일을 USB에 하나씩 넣기에는 비효율적입니다. 따라서 Ventoy라는 소프트웨어를 준비하여 하나의 USB로 여러 가지 OS 설치 파일을 한 번에 관리할 수 있도록 합니다. + + 사용 USB는 Samsung FIT Plus 64GB 입니다. + + Ventoy 설치 + + Ventoy는 오픈 소스로 여러 OS 설치 파일을 USB에 복사하여 부팅 가능하게 만들 수 있는 USB 드라이브 생성도구입니다. + + + + 공식 사이트: https://www.ventoy.net/en/download.html + + 파일을 다운 받은 후, Powershell에서  Get-FileHash -Path [file_path] -Algorithm SHA256 명령어를 통해 해시 값을 확인 + + 공식 사이트에 명시된 Hash값과 차이 없을 시 준비된 Ventoy USB에 ISO 파일 복사(무결성 확인) + + USB를 연결 후 Ventoy 파일을 실행하고,  Install 버튼을 눌러 USB에 Ventoy 설치 + + 이후 OS 설치 파일을 마운트 된 Ventoy Directory에 복사 + + + + PVE 설치 파일 준비 + + + + 공식 사이트: https://www.proxmox.com/en/downloads + + 파일을 다운 받은 후, Powershell에서  Get-FileHash -Path [file_path] -Algorithm SHA256 명령어를 통해 해시 값을 확인 + + 공식 사이트에 명시된 Hash값과 차이 없을 시 준비된 Ventoy USB에 ISO 파일 복사(무결성 확인) + + + + PVE 설치 + + WTR Pro 부팅 + + + + WTR Pro에 USB 연결 후 전원 켜기 + + Ventoy 화면에서 PVE 설치 파일을 선택 + + Boot in normal mode 선택 + + + + PVE 설치 + + + + Install Proxmox VE (Graphic) 선택 + + 약관 동의 + + 저장소 선택 (SSD, 기본 Ext4) + + 국가, 타임 존, 키보드 레이아웃 선택 + + + + South Korea + + Asia/Seoul + + U.S. English + + + + + + (root) 비밀번호, E-mail 지정 + + NIC 지정 + + + + Hostname(FQDN): pve.example.com + + IP(CIDR) + + Gateway/DNS: DHCP + + + + + + Summary 확인 후 Install 선택 + + CLI 상 https://[ip_address]:8006 확인 + + 브라우저 상에서 위 주소로 Proxmox Web UI 접속 + + + + 보안 경고시, 고급 -  안전하지 않음으로 이동 선택 + + + + + + + + PVE 초기 설정 + + PVE 로그인 + + + + User name: root + + Password: 설치 시 설정한 Password + + 언어: 한국어 - 국어 + + + + 사용자 이름 저장: 미체크 + + + + + + PVE Enterprise 라이센스 해지 + + Web UI 나 Shell 중에 하나 선택하여 라이센스 해지 + + + + Web UI + + + + 데이터센터 - pve - 업데이트 -  리포지토리 에서 다음 항목  비활성화 + + https://enterprise.proxmox.com/debian/ceph-quincy + + https://enterprise.proxmox.com/debian/pve + + + + + + Shell + + + + + + #!/bin/bash + +vi /etc/apt/sources.list.d/pve-enterprise.list + +#편집(주석 처리) + +#https://enterprise.proxmox.com/debian/ceph-quincy + +vi /etc/apt/sources.list.d/ceph.list + +#편집(주석 처리) + +#https://enterprise.proxmox.com/debian/pve + + + + + + + + + + 유효한 구독 없음 팝업 비활성화 + + + + Shell + + + + + + #!/bin/bash + +cd /usr/share/javascript/proxmox-widget-toolkit + +#파일 백업 + +cp proxmoxlib.js proxmoxlib.js.bak + +vi proxmoxlib.js + +#ESC+'/' 이후 No valid 검색 + +#.data.status.toLowerCase() !== 'active') 행을 + +#.data.status.toLowerCase() == 'active') 로 변경 + +systemctl restart pveproxy.service + +#서비스 재시작 + + + + 브라우저 Cache가 남아 있는 경우 계속 팝업이 활성화 될 수 있으므로 강화된 새로고침 필수 + + 추후 PVE 업데이트 시 다시 활성화 될 수 있으므로, 업데이트 후 재실행 + + + + + + + + 관리자 계정 생성 + + 계정 생성 + + + + Shell + + + + + + #!/bin/bash + +apt update + +apt install sudo -y + +apt update + +#sudo 명령어 설치 + +groupadd -g 2000 svadmins + +useradd -u 2000 -g 2000 -m -s /bin/bash -c "Proxmox Admin" -G sudo pveadmin + +#-m: 홈 디렉터리 자동 설정, -s: 셸, -c: 설명, -G sudo: sudo group 포함 + +passwd pveadmin + +#Password 설정 + +id pveadmin + +#결과 예시: uid=2000(pveadmin) gid=2000(pveadmins) groups=2000(pveadmins),27(sudo) + + + + + + + + Web UI + + + + 데이터센터 - 사용자 - 추가 + + + + 계정: pveadmin + + 영역: Linux PAM standard authentication + + email + + + + + + 비밀번호 + + + + 추가 + + + + + + 데이터센터 -  권한 - 추가 - 사용자 권한 + + + + 경로: /   + + 사용자: pveadmin + + 역할: administration + + + + + + + + + + + + root 계정 ssh 로그인 금지 + + + + Shell + + + + + + #!/bin/bash + +sudo cp /etc/ssh/sshd_config /etc/ssh/sshd_config.bak + +sudo vi /etc/ssh/sshd_config + +#PermitRootLogin no로 변경 + +sudo service sshd restart + + + + + + + + Web UI + + + + 데이터센터 - 사용자 - root 선택 - 수정 + + + + 활성화 됨 체크 해제 + + + + + + + + + + + + iptables 설정 + + + + Shell + + + + + + #!/bin/bash + +sudo apt install iptables-persistent #debian iptables 저장 패키지 + +#Save current IPv4 Rulse 선택 + +# 명령어 설명 + +# 1. 모든 규칙 초기화 + +iptables -F + +iptables -X + +iptables -t nat -F + +iptables -t nat -X + +iptables -t mangle -F + +iptables -t mangle -X + +# 2. 기본 정책 설정 (Default-Drop) + +iptables -P INPUT DROP + +iptables -P FORWARD DROP + +iptables -P OUTPUT ACCEPT + +# 3. 필수 규칙 추가 + +# 로컬호스트 허용 + +iptables -A INPUT -i lo -j ACCEPT + +# 이미 연결된 세션 허용 + +iptables -A INPUT -m conntrack --ctstate ESTABLISHED,RELATED -j ACCEPT + +# 4. 서비스별 규칙 추가 + +# SSH (22/tcp) 허용 + +iptables -A INPUT -p tcp --dport 22 -j ACCEPT + +# 웹서버 (80/tcp, 443/tcp) 허용 + +iptables -A INPUT -p tcp --dport 80 -j ACCEPT + +iptables -A INPUT -p tcp --dport 443 -j ACCEPT + +# Ping (ICMP) 허용 + +iptables -A INPUT -p icmp -j ACCEPT + +# 5. 설정된 규칙 확인 + +iptables -L -v -n + +# 실제 적용 규칙 + +iptables -F + +iptables -X + +iptables -t nat -F + +iptables -t nat -X + +iptables -t mangle -F + +iptables -t mangle -X + +iptables -A INPUT -m conntrack --ctstate ESTABLISHED,RELATED -j ACCEPT # 연결된 세션 및 연관된 패킷 허용 # 연결된 세션 및 연관된 패킷 허용 *가장 먼저 적용* + +iptables -A INPUT -i lo -j ACCEPT #local host 접근 허용 + +iptables -A INPUT -p icmp -j ACCEPT + +iptables -A INPUT -s 192.168.0.6 -p tcp --dport 22 -j ACCEPT#콘솔 PC에서만 ssh 접근 허용 + +iptables -A INPUT -s 192.168.0.6 -p tcp --dport 8006 -j ACCEPT #콘솔 PC에서만 WEB UI 접근 허용 + +iptables -A INPUT -p tcp --dport 80 -j ACCEPT + +iptables -A INPUT -p tcp --dport 443 -j ACCEPT + +#*허용 규칙 이후 차단 규칙 적용* + +iptables -P INPUT DROP + +iptables -P FORWARD DROP + +iptables -P OUTPUT ACCEPT + +iptables -L -v -n + +sudo netfilter-persistent save 현재 설정 저장 + + + + + + + + + + + + PVE NIC 설정 + + vmbr + + + + PVE내에서 가상 L2 스위치 처럼 동작합니다. + + 일반적으로 vmbr에 할당된 IP 주소는 PVE의 host IP 입니다. + + vmbr은 L2 스위치이므로 IP주소를 할당하지 않아도 됩니다. (PVE Web UI에 접근하는 IP 주소 제외) + + VLAN을 지원합니다. + + STP(Spanning Tree Protocol; IEEE 802.1D)가 기본적으로 비활성화 되어 있습니다. + + + + vmbr 설정 + + 추후 OPNsense가 WAN/LAN 통신시 다른 물리 NIC로 통신하기 위하여 vmbr을 두 개 생성합니다. 이때, vmbr0은 이미 PVE의 웹 host용 vmbr로 생성되어 있습니다. 따라서 계획에 따라 vmbr1을 생성하고, vmbr1에 PVE의 Web용 host와 임시 Server를 연결합니다. + + + + 데이터센터 - pve - 시스템 - 네트워크 - vmbr0 - 수정 + + + + IPv4/CIDR 삭제 + + Gateway 삭제 + + + + + + 생성 + + + + 이름: vmbr1 + + IPv4/CIDR: vmbr0의 내용 + + Gateway: vmbr0의 내용 + + 자동시작 체크 + + 브릿지 포트: enp3s0 (NIC1) + + 생성 + + + + + + 설정 적용 + + + + STP + + STP는 Switch - Switch 간 브로드캐스트 발생 시 서로에게 무한하게 Broadcast 패킷을 보내는 루프로 인한 브로드캐스팅 스톰을 방지하기 위한 프로토콜입니다. 따라서, Switch가 STP를 지원하지 않으면 Switch - Switch간 이중화된 연결이 불가능합니다. 따라서, 한 vmbr 안에 여러 NIC를 할당하려면, STP를 활성화하여야, 네트워크가 정상 작동합니다. (권장되는 방식은 LACP를 사용하여, Bonding을 구성하는 것입니다.) + + #!/bin/bash + +#STP 활성화 방법 + +#shell 접속 + +cat /etc/network/interfaces + +sudo vi /etc/network/interfaces + +iface vmbr0 inet static + + address 192.168.0.48/24 + + gateway 192.168.0.1 + + bridge-ports enp2s0 # NIC 할당 + + bridge-stp on #STP 활성화(이 부분을 off에서 on으로 바꿔야 STP 활성화) + +# bridge-fd 0 #이 부분은 STP 활성화 시, 주석 처리하는 것이 권장 됩니다. + +#:wq! + +sudo systemctl restart networking #network.d 재실행 + + 다만, OPNsense에 두 NIC에 같은 LAN 대역의 다른 IP를 부여하여도, OPNsense 내에서 두 NIC를 각각 다른 End point 취급하므로(Switch 처럼 작동하지 않으므로) 정상 작동합니다. + + 결론 + + + + WTR Pro에 PVE 설치 완료 + + PVE 원격 접속 가능 + + admin 계정 생성 및 root 계정 설정 + + + + root 계정 ssh 접근 비활성화 + + root 계정 Web UI 접근 비활성화 + + pveadmin 계정 생성(2000:2000, sudo) + + Web UI 상 pveadmin 권한 부여 + + + + + +  vmbr 생성 및 설정 + + + + STP로 비활성화로 인한 주의점 명시 + + + + + + + + + + 2025-06-21 - 초안 작성 + + 2025-06-23 - vmbr 생성 및 설정, 주의점 추가 + + 2025-06-25 - iptables-persistent 설치 및 규칙 추가 + +4.3. PVE 상 VM/LXC 설치법 +목표 + + + + PVE 상 VM/LXC 설치법을 확인합니다. + + PVE 상 임시 Server (Debian)을 설치합니다. + + 설치 절차 및 방법을 순서에 따라 명확하게 문서로 기록합니다. + + + + 요구 사항 + + + + 임시 Server(Debain)을 PVE에 설치 + + + + 수행 작업 + + VM/LXC 설치 전 사전 준비 + + VM과 LXC간 차이점 + + VM은 PVE와 분리된 별도의 OS입니다. PVE를 통해서 가상화된 하드웨어 위에 커널부터 격리된 OS를 설치합니다. 이와 반대로 LXC는 PVE와 커널을 공유하므로, 별도의 OS 설치 파일 없이 PVE 위에서 바로 생성이 가능합니다. + + 설치 파일 준비 (VM) + + Debian 설치 파일 준비 + + + + 공식 사이트: https://cdimage.debian.org/debian-cd/current/amd64/iso-cd/ + + Debian-netins ISO 파일과 checksum 파일 다운로드 + + 파일을 다운 받은 후, Powershell에서  Get-FileHash -Path [file_path] -Algorithm SHA256 명령어를 통해 Hash 값을 확인 + + checksum 파일에 명시된 Hash값과 차이 없을 시 ISO 파일 관리용 USB에 Debian-netins 파일 복사(무결성 확인) + + + + 기타 설치 파일 준비 + + PVE는 Linux 뿐 아니라, Unix, Windows 등 다양한 OS를 지원하므로 VM 추가 필요시 해당 설치 파일을 구하여 설치할 수 있습니다. + + 설치 + + VM 설치 + + ISO 파일 업로드 + + + + 데이터센터 - PVE -  local(pve) - ISO 이미지 - 업로드 + + 파일 선택 + + + + OS 설치 파일 선택 + + 해시 알고리즘 선택 + + 해시 값 붙여 넣기 + + + + + + 업로드 + + + + TASK OK 확인 + + + + + + + + VM 설치 + + + + 데이터센터 - PVE 우클릭 + + VM 생성 클릭 + + 일반 설정 + + + + 노트: pve + + VM ID: 서버별 대표 UID와 동일하게 관리 - 임시 Server: 2999 + + 이름: temp-app + + 부팅시 시작 체크 + + 시작/종료 순서, 시작 지연, 종료 시간 제한: 추후 VM이 늘어날 때 설정 + + + + + + OS 설정 + + + + CD/DVD 디스크 이미지 파일(ISO) 사용 + + + + ISO 이미지 선택 + + + + + + 게스트 OS + + + + 유형: Linux (설치할 OS 종류에 따라) + + 버전: 6.x - 2.6 Kernel (설치할 OS 종류에 따라) + + + + + + + + + + 시스템 설정 + + + + 기본 설정 + + + + + + 디스크 설정 + + + + 스토리지: local-lvm 선택 + + 삭제(Discard): 체크 (파일이 삭제될 때 마다 SSD에 블록이 비었다고 알리는 형식, 실시간 TRIM) + + 디스크 크기: 59.60; 64GB (VM에 할당할 용량을 GiB 단위로 입력) + + + + + + PVE는 기본 용량 단위를 iB 체계를 사용하므로, B체계 사용 시 변환 작업 필요 + + + + 추후 VM - 하드웨어 - 디스크 Action 에서 디스크 작업 크기조정으로 용량 변경 가능, VM OS 내 파티션 변경 작업 별도 + + + + + + 버스/디바이스: VirtlO Block (기본 값, 가장 좋은 성능)/VM 내부 디스크의 순서 + + 디스크 형식: Raw Disk Image (약간의 성능상 우위, 고급기능 X) + + + + + +  CPU 설정 + + + + 소켓: 1 (pCPU 갯수, 1개)  + + Cores: 2 (원하는 vCPU 갯수 만큼) + + vCPUs: 2 (Socket x Cores) + + CPU 제한: (추후 VM 여러 개 생성시 지정) + + + + + + Memory 설정 + + + + Memory: 5722(약 6GB) (계획에 따라. MiB 단위로 입력) + + + + + + 네트워크 설정 + + + + 브릿지: vmbr1 (계획에 따라) + + 모델: VirtlO + + VLAN 태그: 없음 (계획에 따라) + + 방화벽 체크 해제 (추후 iptable등 UFW 사용) + + 추후 VM - 하드웨어 탭에서 네트워크 인터페이스 추가 가능 + + + + + + 확인 + + + + 생성 후 시작 체크 해제 + + + + + + VM - 하드웨어 - 네트워크 디바이스 에서 생성된 MAC 주소 확인하여 DHCP 예약 추가 + + VM - 옵션 - 부팅 순서 변경 + + + + virtio(HDD/SSD) + + ide(CD/DVD) + + net(network)  + + + + 해당 설정 미실행시, 부팅 불가 현상 발생 가능 + + + + + + + + + + LXC 설치 + + LXC 템플릿 다운로드 + + + + 데이터센터 - PVE -  local(pve) - LXC 템플릿 - 템플릿 + + 패키지 선택 + + + + debian 선택 + + + + + + 다운로드 + + + + TASK OK 확인 + + + + + + + + LXC 설치(VM과 다른 부분만) + + + + 데이터센터 - PVE 우클릭 + + CT 생성 클릭 + + 일반 설정 + + + + CT ID: 서버별 대표 UID와 동일하게 관리 + + 권한 없는 컨테이너 체크 (unprivileged LXC) + + 부팅시 시작 체크 + + 시작/종료 순서, 시작 지연, 종료 시간 제한: 추후 VM이 늘어날 때 설정 + + 비밀번호 입력 + + SSH 공개키 입력 (선택 사항) + + + + + + 템플릿 + + + + 템플릿 선택 + + + + + + 디스크 설정 + + + + 마운트 옵션: Discard + + + + + + CPU + + + + Cores만 선택 + + + + + + 네트워크 + + + + 이름: eth0 + + 브릿지: vmbr1 + + IPv4: Static/DHCP 선택 + + + + + + DNS + + 확인 + + + + 생성 후 시작 체크 해제 + + + + + + LXC - 하드웨어 - 네트워크 디바이스 에서 생성된 MAC 주소 확인하여 DHCP 예약 추가 + + + + 결론 + + + + 임시 Server VM 설치 준비 완료 + + + + + + 2025-06-24 - 초안 작성 완료 + +5. 임시 Server 설정 + +5.x. NPM 설정 +목표 + + 임시 Server에 NPM을 설치합니다. + 설치 절차 및 방법을 순서에 따라 명확하게 문서로 기록합니다. + + 요구 사항 + + NPM 설치 + NPM에 내부망 접속 + NAT 설정 + SSL/TLS 이용을 위한 인증서를 적용 + + 수행 작업 + docker-compose-npm.yml 작성 + 도커 설정 + + 이미지 + + jc21/nginx-proxy-manager:latest + + +  포트 + + 81:81 + 80:80 + 443:443 + + + 볼륨 마운트 + + + /home/temp-app/docker/nginx-proxy-manager/data:/data + + /home/temp-app/docker/nginx-proxy-manager/ + + + + 환경 변수 + + 없음 + + + DB 연동시 다음과 같은 환경변수 설정 필요(MYSQL 부분은, POSTGRES 사용 시 POSTGRES로 대체 가능) + + DB_MYSQL_HOST + DB_MYSQL_PORT + DB_MYSQL_USER + DB_MYSQL_PASSWORD + DB_MYSQL_NAME + + + 하지만 현재 임시 서버 환경이므로 간단한 SQLite를 사용(환경 변수 필요 없음) + + docker-compose.yml 작성 + + 도커 컨테이너를 위한 디렉터리 생성 + + mkdir -p /home/temp-app/docker/nginx-proxy-manager/{data,letsencrypt,websites} + + + 디렉토리 권한 변경 + + chmod -R 755 /home/temp-app/docker/ngnix-proxy-manager + chmod -R 700 /home/temp-app/docker/ngnix-proxy-manager/letsencrypt + + + /home/temp-app/docker/ngnix-proxy-manager 위에서 docker-compose-npm.yml 작성 + + docker-compose.yml은 'tab'을 인식하지 않으므로 'space'를 사용  + + services: +app: +image: 'jc21/nginx-proxy-manager:latest' +restart: unless-stopped +ports: +# These ports are in format : +- '80:80' # Public HTTP Port +- '443:443' # Public HTTPS Port +- '81:81' # Admin Web Port +# environment: +# Uncomment this if you want to change the location of +# the SQLite DB file within the container +# Mysql/Maria connection parameters: +#DB_MYSQL_HOST: "db" +#DB_MYSQL_PORT: 3306 +#DB_MYSQL_USER: "npm" +#DB_MYSQL_PASSWORD: "npm" +#DB_MYSQL_NAME: "npm" +# Postgres parameters: +#DB_POSTGRES_HOST: 'db' +#DB_POSTGRES_PORT: '5432' +#DB_POSTGRES_USER: 'npm' +#DB_POSTGRES_PASSWORD: 'npmpass' +#DB_POSTGRES_NAME: 'npm' +volumes: +- ./data:/data +- ./letsencrypt:/etc/letsencrypt +#the direcrtory for static web files which you want to host +- ./websites:/mnt/user/appdata/NginxProxyManager/websites + + + + + 도커 실행 + + docker-compose.yml 파일이 있는 곳에서 docker compose -f docker-compose-npm.yml up -d 명령어 실행 + + iptables 설정 변경 + + sudo iptables -A INPUT -s 192.168.0.6 -p tcp --dport 81 -j ACCEPT # 콘솔 PC에서만 웹 UI 접근 허용 + sudo iptables -L -v -n # 설정 확인 + sudo netfilter-persistent save # 설정 적용 + + 기존에 iptables 설정에서 이미 -p tcp --dport 443, -p tcp --dport 80은 이미 허용 + + + 웹 UI 접속 + 초기 설정 + + [temp-app_ip]:81로 접속 + 초기 계정: admin@example.com/changeme + Fullname, nickname, password 입력하여 변경 + 비밀번호 변경 + + SSL/TLS 인증서 발급 + + 인증서 발급 전 T5004의 포트 포워딩 규칙 추가 + + 443포트를 temp-app 서버로 포트 포워딩 + + + 메뉴 - SSL certificates - add SSL certificate + + Domain names: example.com + email adress for Let's Encrypt + Use DNS Challenge + + DNS providor: cloudflare + Credentials File Content: API 키 입력(DDNS 사용 시 사용했던 것) + + + + + 메뉴 - proxy hosts - add new proxy host  + + 결과 + +5.4. DB 설치 +목표 + + 임시 Server에 DBMS를 설치합니다. + 설치 절차 및 방법을 순서에 따라 명확하게 문서로 기록합니다. + + 요구 사항 + + MariaDB 설치 + PostgreSQL 설치 + DBMS 계정 설정 + Redis 설치 + + 수행 작업 + 결과 + +5.1. Debian 설치 +목표 + + + + PVE 상 Debian을 설치합니다. + + 설치 절차 및 방법을 순서에 따라 명확하게 문서로 기록합니다. + + + + 요구 사항 + + + + Debian VM을 PVE 위에 설치 + + Debian의 초기 설정 + + + + 수행 작업 + + Debian 설치 + + VM 실행 + + + + 데이터센터 - pve - 2999(temp-app) - 콘솔 + + Start now + + + + Debian 설치 + + + + Debian 설치 화면  Graphic Install 클릭 + + 언어 및 위치 설정 + + + + 언어: korean + + 위치: 대한민국 + + 키보드 설정: 한국어 + + + + + + 네트워크 설정 + + + + 호스트 이름: temp-app + + 도메인 네임: temp-app.example.com + + + + + + 사용자 계정 설정 + + + + root 암호 설정 + + 사용자 이름: temp-app + + 사용자 암호 설정 + + + + + + 디스크 설정 + + + + 디스크 파티션 설정: 자동 - 디스크 전체 사용 + + 가상 디스크 선택 + + 파티션: 모두 한 파티션에 사용 (추후 다른 서버 구축할 때 NFS 적용시, 분리 필요) + + 파티션 나누기를 마치고 바뀐 사항을 디스크에 쓰기 + + 바뀐 점을 디스크에 쓰시겠습니까?: 예 + + + + + + 패키지 관리자 + + + + 추가 설치 미디어를 검사하겠습니까?: 아니요 + + 지역: 대한민국 + + 아카이브 미러 사이트: deb.debian.org + + http 프록시 설정: 없음 + + 패키지 인기 투표: 안함 + + + + + + 소프트웨어 선택 + + + + SSH Server 체크 + + 표준 시스템 유틸리티 체크 + + 나머지 전부 체크 해제 + + + + + + GRUB 부트로더 + + + + 사용 + + 장치: 메인 저장소(/dev/vda 등) + + + + + + ISO 파일 제거 + + + + 데이터센터 - pve -  2999(temp-app)  -  하드웨어 - CD/DVD 드라이브 - 수정 + + 미디어 사용 안함 + + + + + + VM 재시작 + + + + 데이터센터 - pve - 2999(temp-app) - 콘솔 + + 계속 + + + + + + + + Debian 초기 설정 + + Debian 로그인 + + + + root/password + + + + 필수 패키지 설치 + + #!/bin/bash + +apt update + +apt install sudo curl iptables-persistent #sudo와 curl, iptables-persistent 기본 설치 안되어 있음 + +apt update + + 계정 정보 확인 및 수정 + + #!/bin/bash + +#계정 정보 확인 + +cat /etc/passwd + +#temp-app 1000:1000 처음 설치시 설정한 계정 temp-app이 1000:1000이므로 수정 + +groupadd -g 2000 svadmins #svadmins 그룹 생성 + +usermod -u 2999 -g 2000 -s /bin/bash -c "Temp-app Admin" -G sudo temp-app + +#-u [UID] -g [GID] -s [shell] -c [discription] -G [additional group] user_name + +#User name을 변경하고 싶을 시 -l [user_name]을 입력한다. + +id temp-app + +#결과 예시: uid=2999(temp-app) gid=2000(pveadmins) groups=2000(pveadmins),27(sudo) + + root 계정 ssh 로그인 금지 + + + + sshd_config 확인 + + + + + + #!/bin/bash + +cp /etc/ssh/sshd_config /etc/ssh/sshd_config.bak + +cat /etc/ssh/sshd_config + +#PermitRootLogin 옵션 확인 후 명시적인 차단 + +vi /etc/ssh/sshd_config + +#PermitRootLogin no + +sudo service sshd restart + + + + + + + + ssh 접속하여 root 로그인 가능 여부 확인 + + + + ssh [id]@[ip] + + 접속 불가 확인(permission denied) + + + + + + ssh 접속하여 일반 계정 로그인 가능 여부 확인 + + + + ssh [id]@[ip] + + 접속 가능 확인 + + + + + + + + iptables 설정 + + #!/bin/bash + +sudo apt install iptables-persistent #debian iptables 저장 패키지 + +#Save current IPv4 Rulse 선택 + +sudo apt update + +# 명령어 설명 + +# 1. 모든 규칙 초기화 + +iptables -F + +iptables -X + +iptables -t nat -F + +iptables -t nat -X + +iptables -t mangle -F + +iptables -t mangle -X + +# 2. 기본 정책 설정 (Default-Drop) + +iptables -P INPUT DROP + +iptables -P FORWARD DROP + +iptables -P OUTPUT ACCEPT + +# 3. 필수 규칙 추가 + +# 로컬호스트 허용 + +iptables -A INPUT -i lo -j ACCEPT + +# 이미 연결된 세션 허용 + +iptables -A INPUT -m conntrack --ctstate ESTABLISHED,RELATED -j ACCEPT + +# 4. 서비스별 규칙 추가 + +# SSH (22/tcp) 허용 + +iptables -A INPUT -p tcp --dport 22 -j ACCEPT + +# 웹서버 (80/tcp, 443/tcp) 허용 + +iptables -A INPUT -p tcp --dport 80 -j ACCEPT + +iptables -A INPUT -p tcp --dport 443 -j ACCEPT + +# Ping (ICMP) 허용 + +iptables -A INPUT -p icmp -j ACCEPT + +# 5. 설정된 규칙 확인 + +iptables -L -v -n + +# 실제 적용 규칙 + +iptables -F + +iptables -X + +iptables -t nat -F + +iptables -t nat -X + +iptables -t mangle -F + +iptables -t mangle -X + +iptables -A INPUT -m conntrack --ctstate ESTABLISHED,RELATED -j ACCEPT # 연결된 세션 및 연관된 패킷 허용 # 연결된 세션 및 연관된 패킷 허용 *가장 먼저 적용* + +iptables -A INPUT -i lo -j ACCEPT #local host 접근 허용 + +iptables -A INPUT -p icmp -j ACCEPT + +iptables -A INPUT -s 192.168.0.6 -p tcp --dport 22 -j ACCEPT#콘솔 PC에서만 ssh 접근 허용 + +iptables -A INPUT -p tcp --dport 80 -j ACCEPT + +iptables -A INPUT -p tcp --dport 443 -j ACCEPT + +#*허용 규칙 이후 차단 규칙 적용* + +iptables -P INPUT DROP + +iptables -P FORWARD DROP + +iptables -P OUTPUT ACCEPT + +iptables -L -v -n + +sudo netfilter-persistent save 현재 설정 저장 + + 결론 + + + + PVE 상 Debian 설치 완료 + + root 계정 및 local 계정 설정 + + + + ssh root 계정 접근 차단 + + + + + + 필수 패키지(iptables, sudo, curl) 설치 완료 + + iptables 규칙 적용 + + + + + + 2025-06-25 - 초안 작성 + +5.2. DDNS 설정 +목표 + + + + 임시 Server에서 Cloudflare API를 통한 DDNS를 설정합니다. + + 설정 절차 및 방법을 순서에 따라 명확하게 문서로 기록합니다. + + + + 요구 사항 + + + + Cloudflare API를 통한 DDNS 설정 shell script 프로그램 작성 + + + + 도메인 등록, 갱신, 삭제 기능 구현 + + + + + + cron을 통한 shell script 자동 실행 + + + + 수행 작업 + + Cloudflare Domain 구입 + + + + https://www.cloudflare.com 접속 + + 로그인 후 대시보드 로 이동 + + 도메인 등록 - 도메인 등록 으로 이동하여 도메인 구입 + + 도메인 등록 - 도메인 관리 로 이동하여 구매 도메인 활성 확인 + + + + DDNS 스크립트 작성 + + API 키 확인 + + + + https://www.cloudflare.com 접속 + + 내 프로필 - API 토큰 으로 이동 + + 토큰 생성 - 영역 DNS 편집 - 템플릿 사용 + + 권한 + + + + 영역 - DNS - 편집 + + + + + + 영역 리소스 + + + + 포함 - 특정 영역 - Domain + + 포함 - + + + + + + 요약 계속 - 토큰 생성 + + 이 토큰 테스트에 있는 명령어 실행하여 정상 작동하는 지 확인 후 복사 + + + + + + #!/bin/bash + + curl "https://api.cloudflare.com/client/v4/user/tokens/verify" \ + + + +-H "Authorization: Bearer $API_KEY" + + #결과 예시: {"result":{"id":"---","status":"active"},"success":true,"errors":[],"messages":[{"code":10000,"message":"This API Token is valid and active","type":null}]} + + + + + + + + 계정 홈 - 사용할 도메인 클릭 + + + + API - 영역 ID(Zone_ID) 확인 후 복사 + + + + + + + + Cloudflare API 스크립트 확인 + + + + https://developers.cloudflare.com/api 접속 + + Ctrl + k 눌러 DNS Record 검색 후 이동 + + API 스크립트 확인 + + + + + + #!/bin/bash + +#List DNS Records + +curl "https://api.cloudflare.com/client/v4/zones/$ZONE_ID/dns_records?name=$DOMAIN" \ + +-H "Authorization: Bearer $API_KEY" + +#결과가 없을 시 "result": [], success: "true", ... + +#결과 형식 "result": [{..., ...}, {..., ...},...], success: "true", ... + +#Create DNS Record + +curl "https://api.cloudflare.com/client/v4/zones/$ZONE_ID/dns_records" \ + + -H 'Content-Type: application/json' \ + +-H "Authorization: Bearer $API_KEY" \ + + -d "{ + + \"name\": \"$DOMAIN\", + + \"ttl\": 3600, + + \"type\": \"A\", + + \"comment\": \"Domain verification record\", + + \"content\": \"$IP\", + + \"proxied\": true + + }" + +#Overwrite DNS Record + +curl "https://api.cloudflare.com/client/v4/zones/$ZONE_ID/dns_records/$DNS_RECORD_ID" \ + + -X PUT \ + + -H 'Content-Type: application/json' \ + +-H "Authorization: Bearer $API_KEY" \ + + -d "{ + + \"name\": \"$DOMAIN\", + + \"ttl\": 3600, + + \"type\": \"A\", + + \"comment\": \"Domain verification record\", + + \"content\": \"$IP\", + + \"proxied\": true + + }" + +#Delete DNS Record + +curl "https://api.cloudflare.com/client/v4/zones/$ZONE_ID/dns_records/$DNS_RECORD_ID" \ + + -X DELETE \ + + -H 'Content-Type: application/json' \ + +-H "Authorization: Bearer $API_KEY" \ + + }" + + + + + + + + + + Shell script 작성 + + + + Script 파일을 생성합니다. + + + + touch /home/temp-app/ddns/ddns.sh + + touch /home/temp-app/ddns/ddns.conf + + + + + + Script 파일 실행 권한을 부여합니다. + + + + chmod 744 /home/temp-app/ddns/ddns.sh + + + + + + 설정 파일에 소유자 읽기 권한만 부여합니다. + + + + chmod 600 /home/temp-app/ddns/ddns.conf + + + + + + Script 파일을 작성합니다. + + + + getopts 를 활용하여 Domain, proxied, ttl, 삭제 여부 등을 매개변수로 받습니다. + + 함수를 활용하여, Cloudflare DNS API기능을 정의합니다. (조회, 생성, 업데이트, 삭제) + + 최대한 범용성 있는 스크립트를 만듭니다. + + vi /home/temp-app/ddns/ddns.sh + + + + #!/bin/bash + +DOMAIN="" + +TTL=180 # basic value + +C_TTL=86400 + +PROXIED="false" # basic value + +DELETE_FLAG=0 + +CURRENT_IP="" + +FLAG="" + +# usage func + +usage() { + + echo "Usage: $0 -d \"domain\" [-t \"ttl\"] [-p] [-r] [-c]" + + echo " -d : Specify the domain to update." + + echo " -t : Specify the TTL (Time to Live)." + + echo " -p : Specify the cloudflare proxy to use." + + echo " -r : Delete the DNS record." + + exit 1 + +} + +# using getopts to get arguemnts + +while getopts "d:t:pr" opt; do + + case $opt in + + d) + + DOMAIN="$OPTARG" + + ;; + + t) + + TTL="$OPTARG" + + ;; + + p) + + PROXIED="true" + + ;; + + r) + + DELETE_FLAG=1 + + ;; + + \?) # unknown options + + echo "Invalid option: -$OPTARG" >&2 + + usage + + ;; + + :) # paramenter required option + + echo "Option -$OPTARG requires an argument." >&2 + + usage + + ;; + + esac + +done + +# get option and move to parameters + +shift $((OPTIND - 1)) + +# check necessary option + +if [ -z "$DOMAIN" ]; then + + echo "Error: -d option (domain) is required." >&2 + + usage + +fi + +if ! [[ "$TTL" =~ ^[0-9]+$ ]] || [ "$TTL" -le 0 ]; then + + echo "Error: -t option (ttl) requires a number above 0." >&2 + + usage + +fi + +# change directory for cron + +cd "$(dirname "$0")" + +# make config directory and log directory + +if [ ! -d "./config" ]; then + + mkdir ./config + +fi + +if [ ! -d "./log" ]; then + + mkdir ./log + +fi + +LOG_FILE="./log/ddns_$(date "+%Y-%m-%d").log" + +CONF_FILE="./config/ddns.conf" + +# log func + +log() + +{ + + local text="$1" + + echo "---------" >> "$LOG_FILE" + + echo -e "$(date "+%Y-%m-%d %H:%M:%S"): $text" >> $LOG_FILE + +} + +# check and create log file + +if [ ! -f "$LOG_FILE" ]; then + + log "Notice: log file is created" + +fi + +# check and create conf file + +if [ ! -f "$CONF_FILE" ]; then + + log "Error: Set ./config/ddns.conf first" + + echo -e "#!/bin/bash" >> $CONF_FILE + + echo -e "# --- ddns.conf ---" >> $CONF_FILE + + echo -e "ZONE_ID=\"\"" >> $CONF_FILE + + echo -e "API_KEY=\"\"" >> $CONF_FILE + + chmod 600 "$CONF_FILE" + + exit + +fi + +# check environmental value + +source "$CONF_FILE" + +if [ -z "$ZONE_ID" -o -z "$API_KEY" -o -z "$DOMAIN" ]; then + + log "Error: There is no correct option in \"ddns.conf\"\nZONE_ID, API_KEY are needed" + + exit + +fi + +# check package + +if ! command -v curl &> /dev/null; then + + log "Error: curl package is needed" + + exit + +fi + +if ! command -v jq &> /dev/null; then + + log "Error: jq package is needed" + + exit + +fi + +# API options + +URL="https://api.cloudflare.com/client/v4/zones/$ZONE_ID/dns_records" + +CONTENT_TYPE="Content-Type: application/json" + +AUTHORIZATION="Authorization: Bearer $API_KEY" + +CURRENT_IP=$(curl -sf "https://ifconfig.me") ||\ + +CURRENT_IP=$(curl -sf "https://ifconfig.kr") ||\ + +CURRENT_IP=$(curl -sf "https://api.ipify.org") + +if [ "$CURRENT_IP" == "" ]; then + + log "Error: Can't get an IP" + + exit + +fi + +get_dns_record() + +{ + + local type="$1" + + local name="$2" + + local response="$( + + curl -s "$URL?type=$type&name=$name"\ + + -H "$CONTENT_TYPE"\ + + -H "$AUTHORIZATION")" + + if [ "$(echo "$response" | jq -r '.success')" == "false" ]; then + + log "Error: Can't get dns record\"Reason: $response" + + exit + + else + + echo "$response" + + fi + +} + +create_dns_record() + +{ + + local type="$1" + + local name="$2" + + local ttl="$3" + + local comment="$4" + + local content="$5" + + local response="$( + + curl -s "$URL"\ + + -X POST\ + + -H "$CONTENT_TYPE"\ + + -H "$AUTHORIZATION"\ + + -d "{ + + \"name\": \"$name\", + + \"ttl\": $ttl, + + \"type\": \"$type\", + + \"comment\": \"$comment\", + + \"content\": \"$content\", + + \"proxied\": $PROXIED + + }")" + + if [ "$(echo "$response" | jq -r '.success')" == "false" ]; then + + log "Error: Can't create dns record\"Reason: $response" + + exit + + else + + echo "$response" + + fi + +} + +update_dns_record() + +{ + + local type="$1" + + local name="$2" + + local ttl="$3" + + local comment="$4" + + local content="$5" + + local id="$6" + + local response=$( + + curl -s "$URL/$id"\ + + -X PUT\ + + -H "$CONTENT_TYPE"\ + + -H "$AUTHORIZATION"\ + + -d "{ + + \"name\": \"$name\", + + \"ttl\": $ttl, + + \"type\": \"$type\", + + \"comment\": \"$comment\", + + \"content\": \"$content\", + + \"proxied\": $PROXIED + + }") + + if [ "$(echo "$response" | jq -r '.success')" == "false" ]; then + + log "Error: Can't update dns record\"Reason: $response" + + exit + + else + + echo "$response" + + fi + +} + +delete_dns_record() + +{ + + local type="$1" + + local id="$2" + + local response=$( + + curl -s "$URL/$id"\ + + -X DELETE\ + + -H "$CONTENT_TYPE"\ + + -H "$AUTHORIZATION" + + ) + + if [ "$(echo "$response" | jq -r '.success')" == "false" ]; then + + log "Error: Can't delete dns record\"Reason: $response" + + exit + + else + + echo "$response" + + fi + +} + +A_DNS_RECORD=$(get_dns_record "A" "$DOMAIN") + +S_DNS_RECORD=$(get_dns_record "cname" "*.$DOMAIN") + +W_DNS_RECORD=$(get_dns_record "cname" "www.$DOMAIN") + +if [ "$DELETE_FLAG" -eq 1 ]; then # Delete flag + + FLAG="false" + + if [ "$(echo $A_DNS_RECORD | jq -r '.result | length')" -eq 1 ]; then + + A_DNS_ID="$(echo $A_DNS_RECORD | jq -r '.result[0].id')" + + delete_dns_record "A" "$A_DNS_ID" + + log "Delete: root DNS record is deleted" + + FLAG="true" + + fi + + if [ "$(echo $S_DNS_RECORD | jq -r '.result | length')" -eq 1 ]; then + + S_DNS_ID="$(echo $S_DNS_RECORD | jq -r '.result[0].id')" + + delete_dns_record "cname" "$S_DNS_ID" + + log "Delete: sub DNS record is deleted" + + FLAG="true" + + fi + + if [ "$(echo $W_DNS_RECORD | jq -r '.result | length')" -eq 1 ]; then + + W_DNS_ID="$(echo $W_DNS_RECORD | jq -r '.result[0].id')" + + delete_dns_record "cname" "$W_DNS_ID" + + log "Delete: www DNS record is deleted" + + FLAG="true" + + fi + + if [ "$FLAG" == "false" ]; then + + log "Notice: Nothing is Deleted. There are no DNS records" + + fi + + exit + +fi + +if [ "$(echo $A_DNS_RECORD | jq -r '.result | length')" -eq 1 ]; then # root DNS record exist + + A_DNS_ID="$(echo $A_DNS_RECORD | jq -r '.result[0].id')" + + A_DNS_CONTENT="$(echo $A_DNS_RECORD | jq -r '.result[0].content')" + + A_DNS_TTL="$(echo $A_DNS_RECORD | jq -r '.result[0].ttl')" + + A_DNS_PROXIED="$(echo $A_DNS_RECORD | jq -r '.result[0].proxied')" + + if [ "$A_DNS_CONTENT" != $CURRENT_IP -o "$A_DNS_TTL" != "$TTL" -o "$A_DNS_PROXIED" != "$PROXIED" ]; then + + update_dns_record "A" "$DOMAIN" "$TTL" "$(date "+%Y-%m-%d %H:%M:%S"): root domain from ddns.sh" "$CURRENT_IP" "$A_DNS_ID" + + log "Update: Root DNS record is successfully changed\nDomain: $DOMAIN\nIP: $A_DNS_CONTENT to $CURRENT_IP\nTTL: $A_DNS_TTL to $TTL\nproxied: $A_DNS_PROXIED to $PROXIED" + + else + + log "Notice: Root DNS record is not changed\nDomain: $DOMAIN\nIP: $CURRENT_IP\nTTL: $TTL\nproxied: $PROXIED" + + fi + +else # root DNS record does not exist + + create_dns_record "A" "$DOMAIN" "$TTL" "$(date "+%Y-%m-%d %H:%M:%S"): root domain from ddns.sh" "$CURRENT_IP" + + log "Create: Root DNS record is successfully created\nDomain: $DOMAIN\nIP: $CURRENT_IP\nTTL: $TTL\nproxied: $PROXIED" + +fi + +if [ "$(echo $S_DNS_RECORD | jq -r '.result | length')" -eq 1 ]; then # sub DNS record exist + + S_DNS_ID="$(echo $S_DNS_RECORD | jq -r '.result[0].id')" + + S_DNS_CONTENT="$(echo $S_DNS_RECORD | jq -r '.result[0].content')" + + S_DNS_TTL="$(echo $S_DNS_RECORD | jq -r '.result[0].ttl')" + + S_DNS_PROXIED="$(echo $S_DNS_RECORD | jq -r '.result[0].proxied')" + + if [ "$S_DNS_CONTENT" != "$DOMAIN" -o "$S_DNS_TTL" != "$C_TTL" -o "$S_DNS_PROXIED" != "$PROXIED" ]; then + + update_dns_record "cname" "*.$DOMAIN" "$C_TTL" "$(date "+%Y-%m-%d %H:%M:%S"): sub domain from ddns.sh" "$DOMAIN" "$S_DNS_ID" + + log "Update: Sub DNS record is successfully changed\nDomain: $S_DNS_CONTENT to *.$DOMAIN\ncname: $DOMAIN \nTTL: $S_DNS_TTL to $C_TTL\nproxied: $S_DNS_PROXIED to $PROXIED" + + else + + log "Notice: Sub DNS record is not changed\nDomain: *.$DOMAIN\ncname: $DOMAIN\nTTL: $C_TTL\nproxied: $PROXIED" + + fi + +else # sub DNS record does not exist + + create_dns_record "cname" "*.$DOMAIN" "$C_TTL" "$(date "+%Y-%m-%d %H:%M:%S"): sub domain from ddns.sh" "$DOMAIN" + + log "Create: Sub DNS record is successfully created\nDomain: *.$DOMAIN\ncname: $DOMAIN\nTTL: $C_TTL\nproxied: $PROXIED" + +fi + +if [ "$(echo $W_DNS_RECORD | jq -r '.result | length')" -eq 1 ]; then # www DNS record exist + + W_DNS_ID="$(echo $W_DNS_RECORD | jq -r '.result[0].id')" + + W_DNS_CONTENT="$(echo $W_DNS_RECORD | jq -r '.result[0].content')" + + W_DNS_TTL="$(echo $W_DNS_RECORD | jq -r '.result[0].ttl')" + + W_DNS_PROXIED="$(echo $W_DNS_RECORD | jq -r '.result[0].proxied')" + + if [ "$W_DNS_CONTENT" != "$DOMAIN" -o "$W_DNS_TTL" != "$C_TTL" -o "$W_DNS_PROXIED" != "$PROXIED" ]; then + + update_dns_record "cname" "www.$DOMAIN" "$C_TTL" "$(date "+%Y-%m-%d %H:%M:%S"): www domain from ddns.sh" "$DOMAIN" "$W_DNS_ID" + + log "Update: www DNS record is successfully changed\nDomain: $W_DNS_CONTENT to www.$DOMAIN\ncname: $DOMAIN\nTTL: $W_DNS_TTL to $C_TTL\nproxied: $W_DNS_PROXIED to $PROXIED" + + else + + log "Notice: www DNS record is not changed\nDomain: www.$DOMAIN\ncname: $DOMAIN\nTTL: $C_TTL\nproxied: $PROXIED" + + fi + +else # www DNS record does not exist + + create_dns_record "cname" "www.$DOMAIN" "$C_TTL" "$(date "+%Y-%m-%d %H:%M:%S"): www domain from ddns.sh" "$DOMAIN" + + log "Create: www DNS record is successfully created\nDomain: www.$DOMAIN\ncname: $DOMAIN\nTTL: $C_TTL\nproxied: $PROXIED" + +fi + + + + vi /home/temp-app/ddns/config/ddns.conf   + + + + #!/bin/bash + +#ddns.conf + +# --- Cloudflare DDNS update script --- + +# --- ddns.conf --- + +#"Your Cloudflare Zone ID" + +ZONE_ID="" + +#"Your Cloudflare API key" + +API_KEY="" + + + + + + + + + + Shell script 실행 환경 만들기 + + 해당 쉘 스크립트는 jq라는 jason 파일 처리 명령어가 필요합니다. 따라서 스크립트를 실행하기 앞서 jq 명령어를 설치합니다. + + #!/bin/bash + +sudo apt update + +sudo apt install jq -y + +sudo apt update + + + + Shell script 실행 및 cron 등록하기 + + + + 스크립트를 실행합니다. + + + + /home/temp-app/ddns/ddns.sh -d example.com + + + + + + ddns.log 파일을 확인합니다. + + + + cat /home/temp-app/ddns/log/[date]_ddns.log + + + + --------- + +2025-06-27 02:49:02: Notice: log file is created + +--------- + +2025-06-27 02:49:04: Create: Root DNS record is successfully created + +Domain: example.com + +IP: 0.0.0.0 + +TTL: 300 + +proxied: true + +--------- + +2025-06-27 02:49:04: Create: Sub DNS record is successfully created + +Domain: *.example.com + +cname: example.com + +TTL: 400 + +proxied: true + +--------- + +2025-06-27 02:49:05: Create: www DNS record is successfully created + +Domain: www.example.com + +cname: example.com + +TTL: 400 + +proxied: true + +--------- + + + + + + + +  등록된 DNS가 작동하는 지 확인합니다. + + + + ping example.com + + + + + + 정상 실행이 확인되면 cron을 등록합니다. + + + + + + #!/bin/bash + +crontab -e # crontab 파일 편집 + +*/5 * * * * /home/temp-app/ddns/ddns.sh # 분 시 일 월 요일 [실행 파일], 매 5분마다 실행 + +#(*,-/) 사용하여 기간 조정 가능 + +#*/[숫자]: 매 [숫자] 분/시/일 ... 마다 반복 + +#[숫자], [숫자]: [숫자], [숫자] 분/시/일 마다 반복 + +#[숫자]-[숫자]: [숫자] 분/시/일 부터 [숫자] 분/시/일까지 매 분/시/일 반복 등등 + +crontab -l # crontab 파일 확인 + +#crontab -r # crontab 삭제 + +#crontab 시작 + +service cron start + +#crontab 중지 + +#service cron stop + +#crontab 작동 확인 + +#service cron status + +#crontab 재시작 + +#service cron restart + + + + + + + + 몇 분 후 ddns.log 파일 확인하여 정상 작동하는 지 확인합니다. + + + + cat /home/temp-app/ddns/log/ddns.log + + sudo service cron status + + + + + + + + 결과 + + + + jq 패키지 설치 + + DDNS 스크립트 작성 + + + + API를 통한 등록, 갱신, 삭제 기능 구현 + + JSON 형식에 대한 이해 + + jq를 통한 JSON 파일 조작에 애한 이해 + + + + + + DNS 및 DDNS 이해 + + A, AAAA, CNAME, TTL, PROXIED 등의 레코드의 의미 이해 + + cron 등록 + + + + + + 2025-06-25 - 초안 작성 + +5.3. Docker 설치 환경 설정 +목표 + + + + 임시 Server에 Docker 설치 환경을 설정합니다. + + 설치 절차 및 방법을 순서에 따라 명확하게 문서로 기록합니다. + + + + 요구 사항 + + + + 임시 서버에 Docker 설치 환경 설정 + + 관리자 계정 Docker 그룹 추가 + + + + 수행 작업 + + Docker 설치 환경 설정 + +  Docker docs 확인 + + + + Docker docs 접속 + + + + Debian 상 engine 설치를 위하여 다음 페이지 접속: https://docs.docker.com/engine/install/debian/ + + + + + + Docker docs의 절차를 따라 Docker engine 설치 + + + + 임시 Server 접속 + + 임시 Server가 구성되었으므로 더 이상 PVE web UI를 통한 접속이 아닌 SSH로 접속합니다. + + + + ssh temp-app@[ip_address] + + pw 입력 + + + + 이전 버전 제거 + + + + 운영 체제에서 자체적으로 지원하는  비공식적 인 도커 이미지를 확인합니다. + + + + docker.io + + docker-compose + + docker-doc + + podman-docker + + (docker engine은 containerd와 runc에 의존합니다. 만약 설치되어 있다면 추가로 제거하면 됩니다.) + + + + + + 다음 명령어를 통해 패키지 제거를 시도합니다. + + + + for pkg in docker.io docker-doc docker-compose podman-docker containerd runc; do sudo apt-get remove $pkg; done + + 설치된 패키지가 없는 경우  패키지가 설치되지 않았습니다. 라는 반환이 나올 수 있습니다. + + 클린 설치를 원할 시 uninstall docker engine 섹션을 다시 확인합니다. + + + + + + + + 설치 과정 + + 도커는 다음과 같은 경로로 설치될 수 있습니다. + + + + Linux Desktop 환경을 위한 bundle + + apt 명령어를 통한 설치 + + manual 설치(업그레이드 포함) + + script를 통한 설치(오직 테스트 및 개발 환경에서만 추천합니다.) + + + + 현재 임시 Server는 CLI 환경이므로 apt 명령어를 통해 설치합니다. + + Docker Engine 설치 + + Docker Engine은 Docker가 구동될 수 있도록 만들어 주는 핵심 부분입니다. Docker의 CLI 명령어를 통해, 컨테이너를 만들고, 실행하고, 관리하는 모든 작업을 수행합니다. + + + + 설치의 앞서 Docker의 apt 저장소를 설정합니다. + + + + + + #!/bin/bash + +# Add Docker's official GPG key: + +sudo apt-get update + +sudo apt-get install ca-certificates curl + +sudo install -m 0755 -d /etc/apt/keyrings + +sudo curl -fsSL https://download.docker.com/linux/debian/gpg -o /etc/apt/keyrings/docker.asc + +sudo chmod a+r /etc/apt/keyrings/docker.asc + +# Add the repository to Apt sources: + +echo \ + + "deb [arch=$(dpkg --print-architecture) signed-by=/etc/apt/keyrings/docker.asc] https://download.docker.com/linux/debian \ + + $(. /etc/os-release && echo "$VERSION_CODENAME") stable" | \ + + sudo tee /etc/apt/sources.list.d/docker.list > /dev/null + +sudo apt-get update + + + + 만약 kali같은 특수한 Linux 파생 버전을 사용한다면,  $(. /etc/os-release && echo "VERSOION_CODENAME") 부분을  brookworm 등으로 대체해야 할 수 있습니다. + + + + + +  다음 명령어를 통해 Docker의 최신 버전을 설치합니다. + + + + + + #!/bin/bash + +sudo apt-get install docker-ce docker-ce-cli containerd.io docker-buildx-plugin docker-compose-plugin + + + + 만약 특정 버전을 설치하고 싶다면 다음 명령어를 통해 설치합니다. + + + + #!/bin/bash + +#List the available versions: + +apt-cache madison docker-ce | awk '{ print $3 }' + +5:28.3.0-1~debian.12~bookworm + +5:28.2.2-1~debian.12~bookworm + +.... + +#Select the version which you want + +VERSION_STRING=5:28.3.0-1~debian.12~bookworm + +#Install + +sudo apt-get install docker-ce=$VERSION_STRING docker-ce-cli=$VERSION_STRING containerd.io docker-buildx-plugin docker-compose-plugin + + + + + + + + hello-world 이미지를 실행하여 성공적으로 설치가 되었는지 확인합니다. + + + + sudo docker run hello-world + + Hello from Docker! 라는 멘트를 확인합니다. + + + + + + hello-world 이미지를 제거합니다. + + + + docker container ls -a + + hello-world 컨테이너 명 확인 + + docker rm [컨테이너 명] + + docker images ls + + hello-world 이미지 id 확인 + + docker rmi [이미지 id] + + + + + + 업데이트 시 해당 절차의 2. 로 돌아가 명령어를 다시 실행합니다. + + + + Docker Compose 설치 + + Docker Compose는 여러 개의 컨테이너로 구성된 애플리케이션을 한번의 정의하고 관리하거나, 복잡한 설정을 파일에서 관리할 수 있도록 하는 도구입니다. docker run의 명령어들을 모아 yaml 파일인 docker-compose.yml을 통해 정의하고 제어할 수 있도록 도와줍니다. + + + + Docker engine이 설치를 완료 한 뒤 이어서 Docker compose를 설치합니다. + + + + + +   + + #!/bin/bash + +#Install docker compose plugin + +sudo apt-get update + +sudo apt-get install docker-compose-plugin + +#Check docker compose version + +sudo docker compose version + + + + + + + + 서버 관리 계정에 docker 그룹을 추가하여, sudo 없이 docker 명령어를 사용할 수 있도록 합니다. + + + + + + #!/bin/bash + +#Add group to Server admin + +sudo usermod -aG docker "$user" + +#Check the group + +id "$user" + + + + + + + + 업데이트 시 해당 절차의 1.로 돌아가 명령어를 실행 합니다. + + + + Dockerfile과 docker-compose.yml + + Dockerfile 이란 + + + + 도커 이미지를 만들기 위한 스크립트로 다음과 같은 형식을 통해 작성됩니다. + + + + + + # 1. FROM: 베이스 이미지 지정 (필수) + +# - 어떤 OS 또는 미리 만들어진 이미지를 기반으로 시작할지 결정합니다. + +# - 예: ubuntu:22.04, node:20, python:3.10-slim, mariadb:latest 등 + +FROM + +# 2. WORKDIR: 작업 디렉토리 설정 + +# - 컨테이너 내부에서 작업할 경로를 지정합니다. 이후의 COPY, RUN, CMD 명령어는 이 디렉토리에서 실행됩니다. + +WORKDIR /app + +# 3. COPY: 파일 복사 + +# - 호스트(내 컴퓨터)의 파일을 컨테이너 내부로 복사합니다. + +# - 예: COPY . . (현재 디렉토리의 모든 파일 복사) + +COPY + +# 4. RUN: 명령어 실행 + +# - 이미지 빌드 중에 실행할 명령어를 지정합니다. + +# - 예: RUN apt-get update && apt-get install -y vim (패키지 설치) + +# - 예: RUN npm install (의존성 설치) + +# - 하나의 RUN 명령어로 묶어서 빌드 레이어를 줄이면 이미지 크기 최적화에 도움 + +# - 만약 패키지를 설치 했다면, 캐시는 삭제하는 게 좋다. && rm -rf /var/lib/apt/lists/* + +RUN + +# 5. EXPOSE: 포트 노출 (문서화 목적) + +# - 컨테이너가 어떤 포트를 사용할지 명시합니다. 실제 포트 연결은 'docker run -p' 또는 'docker-compose.yml'에서 합니다. + +EXPOSE + +# 6. CMD: 컨테이너 실행 시 명령어 + +# - 컨테이너가 시작될 때 실행될 명령어입니다. + +# - Dockerfile에 하나만 존재할 수 있습니다. + +# - 예: CMD ["npm", "start"] + +# - CMD나 ENTRYPOINT는 기본 이미지의 것을 그대로 사용합니다. + +# - CMD:docker run에서 다른 명령 주면 덮어 쓰여짐. (기본 명령) + +# - ENTRYPOINT: 어떤 일이 있어도 항상 먼저 실행되는 실행 파일 + +# - 필요하다면 재정의할 수 있지만, 대부분은 건드리지 않습니다. + +CMD ["executable", "param1", "param2"] + +# 7. ENV: 환경 변수 설정 + +# - 컨테이너 내부에서 사용할 환경 변수를 지정합니다. + +ENV = + + + + + + + + Docker 파일로 실행된 이미지는 상위 이미지의 업데이트 정보를 알 수 없기 때문에, yml 파일에서 label을 추가하여 diun을 통해 관리합니다. + + + + + + labels: + + - "diun.enable=true" + + - "diun.watch_repo=Original_Image" + + - "diun.watch_tag=latest" + + + + diun 프로세스는 실행되면서 해당 도커를 감시합니다.  + + diun은 외부 원격 서버에 있는 도커도 감시할 수 있습니다. 다만 IPTABLES를 통한 명시적인 허가를 해줘야 합니다. [Docker API의 기본 포트: 2375(평문), 2376(ssl/tls)] + + 평문 전송은 공격에 취약할 수 있으므로, SSL/TLS를 적용합니다. + + + + + + + + docker-compose.yml + + + + 도커 이미지를 컨테이너로 만들 때 필요한 설정들의 모음입니다. 다음과 같이 정의합니다. + + + + + + # 1. version: Compose 파일의 버전 + +version: '3.8' + +# 2. services: 컨테이너 목록 + +services: + + # 서비스 이름 (컨테이너 이름과 다를 수 있지만, 네트워크 내부에서 식별자로 사용됨) + + : + + # 3. image: 사용할 이미지 + + # - Docker Hub에서 이미지를 가져옴 + + image: : + + + + # 4. build: Dockerfile로 이미지를 빌드 + + # - 현재 디렉토리의 Dockerfile을 사용 + + build: . + + # - 또는 특정 디렉토리의 Dockerfile을 사용 + + # build: ./my-app + + + + # 5. container_name: 컨테이너에 이름 지정 + + container_name: + + + + # 6. ports: 포트 포워딩 + + # - '호스트포트:컨테이너포트' + + ports: + + - "80:80" + + + + # 7. volumes: 데이터 영속성을 위한 볼륨 마운트 + + # - '호스트경로:컨테이너경로' + + volumes: + + - ./data:/var/lib/mysql + + - ./nginx.conf:/etc/nginx/nginx.conf + + + + # 8. environment: 환경 변수 설정 + + environment: + + - DB_USER=myuser + + - DB_PASSWORD=mypassword + + # 9. depends_on: 서비스 간 종속성 설정 + + # - DB 컨테이너가 먼저 시작되어야 Node.js 앱이 실행되도록 설정 + + depends_on: + + - db + +# 10. networks: 네트워크 설정 (선택 사항) + +networks: + + : + + driver: bridge + + + +# 11. volumes: 볼륨 정의 (선택 사항) + +volumes: + + : + + + + + + + + + + Docker 명령어 + + Docker image 명령어 + + + + docker images [ls] + + + + 로컬 도커 이미지 목록 확인 + + + + + + docker rmi 이미지 id[:리포지토리:태그명] + + + + 도커 이미지 삭제, id를 통한 삭제 권장 + + + + + + docker rmi prune + + + + 사용되지 않는 모든 도커 이미지 삭제 + + + + + + docker image bulid -t 이미지명[:태그명] path + + + + Dockerfile 이미지 생성, 도커 파일은 path의 root에 위치 + + + + + + docker image pull 레파지토리명[:태그명] + + + + docker 이미지를 다운로드 + + + + + + + + Docker container 명령어 + + + + + + docker ps + + + + + + 실행중인 컨테이너 목록 확인 + + + + + + + + + + docker ps -a + + + + + + 전체 컨테이너 목록 확인 + + + + + + + + + + docker container ls -a + + + + + + 전체 컨테이너 목록 확인 + + + + + + + + + + docker start 컨테이너명[:컨테이너 id] + + + + + + 컨테이너 시작 + + + + + + + + + + docker attach 컨테이너명[:컨테이너 id] + + + + + + 컨테이너 접속 + + + + + + + + + + docker stop 컨테이너명[:컨테이너 id] + + + + + + 컨테이너 멈춤 + + + + + + + + + + docker run [옵션] 이미지명  + + + + + + 컨테이너 생성 및 시작 + + + + -d: 백그라운드 실행 + + --name: 컨테이너 이름 지정 + + -p [host_port:container_port]: 호스트와 컨테이너가 연결될 포트 지정 + + -v [host_directory]:[container_directory]: 마운트 될 볼륨 지정 + + --rm: 일회성으로 실행 + + -it 이미지명 [쉘]: 상호작용 모드 활성화 및 가상 터미널 할당 + + + + + + + + docker exec -it 컨테이너명[:컨테이너 id] [쉘] + + + + + + 이미 실행 중인 컨테이너 접속 + + + + + + + + + + docker rm 컨테이너명[:컨테이너 id] + + + + + + 컨테이너 삭제 + + + + + + + + + + exit : 컨테이너 빠져나오기 + + + + + + 결과 + + + + Docker Engine 설치 + + Docker Compose 설치 + + 서버 관리자 계정에 Docker 그룹 추가 + + + + + + 2025-06-27 - 초안 작성 완료 + + 2025-06-30 - dockerfile과 docker-compose.yml 내용 추가 + +부록 1. 문서 작성 가이드라인 및 용어집 + +부록 1.1. 문서 작성 가이드라인 +목표 + + 이 가이드라인은 해당 프로젝트의 문서 품질을 향상시키고, 모든 내용을 명확하고 일관되게 작성하기 위한 기준 문서입니다. 개인 프로젝트로 작성자 1인을 위한 가이드라인이며, 추후 작성자 및 예상 독자(기술면접관 등)를 위하여 다음과 같은 목표를 추구합니다. + + + + 정확성: 모든 정보는 사실에 기반하여 이해하기 쉽도록 작성합니다. + + 간결성: 명확하고 간결한 문장을 사용하여 핵심 내용을 전달합니다. + + 최신성: 모든 진행 사항은 문서에 업데이트 되어 항상 최신 상태를 유지합니다. + + + + 문서 구조 및 형식 + + 제목 계층 + + 모든 페이지는 H1, H2, H3 등의 HTML 제목 태그를 일관되게 사용하여 내용의 계층 구조를 명확하게 합니다.  + + 목차 + + BookStack의 기본 기능을 사용하여 관리합니다. + + 날짜 표기 + + 날짜의 표기는 국제 표준 ISO 8061에 따라 YYYY-MM-DD hh:mm:ss로 표기합니다. + + 변경 이력 표시 + + BookStack의 기본 기능과 별개로 페이지 하단 구분선으로 구분된 영역에 문서 변경 이력(날짜 및 주요 변경 내용)을 기록하여 버전 관리를 합니다. 변경 이력은 다음과 같이 기록합니다. + + YYYY-MM-DD - 변경 내용 요약 + + 단락 및 목록 + + 하나의 단락에는 하나의 주제를 담습니다. 정보를 나열할 때는 순서 있는 목록(
    ) 혹은 순서 없는 목록 (
      ) 태그를 적절하게 활용합니다. + + 강조 및 인용 + + BookStack 내부의 callout 기능을 활용하여 인용, 정보, 성공, 경고, 위험 등의 범례를 적절하게 활용합니다. + + 표 + + 글로 설명하기 어려운 복잡한 데이터를 비교할 시 표()를 활용하여 시각적으로 보기 좋게 정리합니다. 표 활용 시 행 머리글 옵션을 활용합니다. 셀 속성에서 내부 텍스트를 가로, 세로 전부 가운데 정렬하여 보기 좋게합니다. + + 이미지 및 다이어그램 + + 시각 자료는 draw.io를 사용하여 만든 뒤, [file_name].drawio.png 형태로 업로드하여 사용합니다. 이미지는 선명하고 가독성이 높아야 하며, 필요한 경우 캡션(설명)을 추가합니다. png 파일을 저장할 때, drawio 정보를 함께 저장하여 관리합니다.  + + 코드 및 명령어 + + BookStack 내부의 소스 코드 기능을 활용하여 코드 및 명령어를 입력합니다. + + 링크 + + BookStack 내부의 내외부 링크 기능을 활용하여, 다른 문서를 빠르게 참조할 수 있도록 합니다. 이 때, 내부 참조 링크는 BookStack의 기능을 활용하여 추가합니다. 이는 추후 문서 수정 시 링크의 유효성을 유지할 수 있도록 합니다. + + 내용 작성 스타일 + + 용어 사용 + + 문서에 사용되는 용어는 문서 전체에 일관되게 사용합니다. 처음 등장하는 용어는 완전한 명칭과 함께 약어를 병기하고, 부록 1 아래의 용어집에 정의합니다. 한 번 사용된 용어는 약어로 표기하되 강조가 필요한 특별한 경우에는 완전한 명칭을 사용합니다. + + 문체 + + 최대한 객관적이고 명료하게 능동 표현으로 작성합니다. 피동 표현은 꼭 필요한 경우에만 사용합니다. 만연체가 아닌 간결체로 작성하며, 계획 분석 결과 위주로 작성합니다. 또한, 시제를 상황에 맞게 사용하여 문서의 일관성을 유지합니다. (기본적으로 현재 시제. 과거/미래 시제는 해당 경우에만) + + 문법 오류 및 오탈자 + + 문서 작성시 용어집을 통한 일관된 용어를 사용합니다. 문서 작성 후 교정 작업을 통해 문법적으로 어색한 부분과 잘못된 용어 사용을 교정합니다. + + 문장 부호 + + 온점(.), 반점(,) 등을 남용하지 않고, 적재적소에 사용하여 문장을 깔끔하게 유지합니다. + + 문서 관리 + + 검토 및 업데이트 + + 프로젝트 진행 도중 계획에 수정 사항이 생길 시, 즉시 관련된 모든 문서를 검토하고 수정합니다. 수정 사항이 없더라도 주기적으로 문서 전체 내용을 확인하며 정합성과 일관성이 어긋나는 부분이 있는 지 확인하여 수정합니다. + + To-Do list 활용 + + 프로젝트 진행 시 정식 진행 사항에 넣기 전 임시로 활용할 수 있는 To-Do list 페이지를 적극적으로 활용합니다. 양식, 용어 등에 구애받지 않고 자유롭게 작성합니다. 추후 정식으로 문서에 포함 시 문서 작성 가이드 라인에 맞추어 재작성 후 해당 내용을 삭제합니다. + + 주기적인 문서 재작성 + + 문서 작성 가이드라인과 용어집 용례 준수를 위하여 매주 1회 문서를 검토하고 수정합니다. + + 결론 + + 프로젝트를 진행하며 해당 가이드라인에 맞추어 명확하고 일관된 가독성있는 문서를 작성합니다. 추후 프로젝트 진행 시 현 문서 역시 수정될 수 있습니다. + + + + 2025-06-05 - 초안 작성 + + 2025-06-20 - ISO 8601 적용 및 주기적인 문서 재작성 항목 추가 + +부록 1.2. 용어집(임시) +목표 + + 해당 프로젝트에 사용되는 용어를 분류하기 전 임시로 작성합니다. 추후 용어의 성격에 맞춰 분류된 별도의 용어집을 작성합니다. + + 용어 + + 1.1. 동기 및 목표의 용어 + + NAS(Network Attached Storage) - 네트워크 기반의 파일 저장 서버 + + On-premise - 기관, 기업, 개인이 자체적으로 운영하는 서버/소프트웨어 환경 + + IT(Information Technology) - 정보 기술; 컴퓨터 시스템, 소프트웨어, 네트워크, 데이터 처리 기술 등을 총칭 + + HA(High Availability) - 고 가용성; 물리적/논리적 오류에 대응하여 가용성을 높이는 것 + + SoC(Separation of Concerns) - 관심사 분리; 어떠한 하나의 기능을 실행하는 데 있어 물리적/논리적으로 구별하여 독립적인 구현을 하는 것 + + LTO(Linear Tape Open) - 백업 및 데이터 보관에 사용되는 자기 테이프 기술 + + CA(Certificate Authority) - 인증 기관; SSL/TLS 프로토콜을 사용하기 위한 인증서를 발급하는 주체 + + DNS(Domain Name System) - IP(Internet Protocol) 주소를 사람이 이해하기 쉬운 Domain Name으로 사용할 수 있게 하는 시스템; Domain Name에 대응하는 IP 주소를 반환 + + DB(Database) - 여러 사람 혹은 서비스가 공유하여 사용할 목적으로 생성되어 관리되는 데이터의 집합 + + IPS/IDS(Intrusion Prevention/Detection System) - 침입 방지/탐지 시스템; 실시간으로 패킷을 탐지 하거나 차단하여 악의적인 접근에 대처하는 시스템 + + OS(Operating System) - 하드웨어와 응용프로그램 사이에 위치하며, 하드웨어 자원을 분배하는 등의 역할을 수행 + + 1.2. 하드웨어 구성의 용어 + + DHCP(Dynamic Host Configuration Protocol) - Host의 IP 주소, Subnet Mask, Gateway, DNS 주소 등을 자동으로 설정하는 프로토콜 + + VLAN(Virtual Local Area Network) - 패킷에 VLAN 태그를 붙여 가상의 LAN을 설정, 여러 스위치가 포함된 네트워크 환경 속에서 태그 별로 각각 하나의 LAN 처럼 동작(Broadcast 단위의 분리; L2 통신 분리); IEEE 802.1Q에 정의 + + AP(Access Point) - 유선 LAN과 무선 LAN을 연결해주는 장치; WAP(Wireless Access Point) + + NIC(Network Interface Card) - 컴퓨터, 혹은 네트워크 장비가 네트워크에 접근할 수 있도록 하는 하드웨어 장치 + + 1.3. 목표 아키텍처의 용어 + + WAN(Wide Area Network) - LAN과 LAN을 연결하는 네트워크; 라우터를 거쳐 IP를 이용하여 L3 통신 + + LAN(Local Area Network) - 각 호스트와 서버를 연결하는 네트워크; 스위치를 거쳐 MAC을 이용하여 L2 통신 + + ISP(Internet Service Provider) - 개인 혹은 기업에게 인터넷 접속을 제공하는 주체 혹은 기업 + + PoE(Power over Ethernet) - LAN 통신 캐이블을 이용하여 장비에 전원을 공급하는 기술; IEEE 802.3af, 802.3at, 802.3bt에서 정의 + + DDoS(Distributed Denial of Service) - 기존의 서비스 거부 공격(DoS)에서 발전형 공격 기법; 다수의 감염된 시스템이 조직되어 대상 서비스에 트래픽을 집중하여 서비스 가용성을 손상 + + IP(Internet Protocol) - 인터넷을 통하여 네트워크에서 어떤 정보를 수신하고 송신하는지에 대한 통신 규약 + + Port Scan - 네트워크에 접속된 컴퓨터 혹은 장치의 어떤 포트가 열려 있는지 확인하는 작업 + + VPN(Virtual Private Network) - WAN에서 특정 LAN으로 암호화된 터널을 구성해 접속하여, LAN에 속한 것 처럼 네트워크를 사용할 수 있도록 하는 기술 + + UPnP(Universal Plug and Play) - 네트워크에 연결된 기기들이 별도의 설정 없이 서로를 인식하고 필요 포트를 자동으로 포트를 개방하는 기능 + + DDNS(Dynamic Domain Name System) - 유동적으로 변경되는 Public IP 주소에 대한 DNS를 제공하기 위한 기술; 유동적인 Public IP에 대하여 고정된 Domain Name을 사용 가능 + + SSL/TLS( Secure Sockets Layer / Transport Layer Security) - 클라이언트와 서버 간의 통신을 암호화하여 제 3자가 데이터를 가로채거나 변조하는 것을 방지하는 프로토콜; TLS가 SSL의 최신 버전 + + Gateway - 네트워크 간 통신을 가능하게 하는 컴퓨터 혹은 소프트웨어; 각 LAN과 LAN, LAN과 WAN의 사이에서 서로의 통신을 가능하게 중계하는 역할 + + OPNsense - 오픈 소스 방화벽 OS + + FreeIPA - 오픈 소스 통합 인증 솔루션(LDAP/Kerberos 등 제공) + + LDAP(Lightweight Directory Access Protocol) - 네트워크 상의 조직, 개인, 파일, 장치 등에 대한 정보를 중앙 집중식으로 관리하고 검색하는 프로토콜 + + SSO(Single Sign On) - 사용자가 한 번의 로그인으로 여러 어플리케이션에 접근할 수 있도록 하는 보안 기술 + + Split Horizon DNS - 요청한 위치가 LAN 혹은 WAN으로 다르더라도, 동일 Domain Name으로 부터 다른 IP를 얻어 일관적인 서비스를 제공할 수 있게 하는 기술 + + DoH(DNS over Https) - DNS 요청을 https 프로토콜을 사용하여 암호화; DoT(DNS over TLS) DNS 요청 자체를 https가 아닌 TLS로 자체 암호화 + + Kerberos - 네트워크 상 사용자 인증을 위한 프로토콜; 티켓 기반으로 KDC(Key Distribution Center)을 운영하여 관리 + + Zero Trust - 절대 신뢰하지 말고 항상 검증하라는 원칙을 기반으로 하는 보안 모델; 핵심 요소로 항상 검증, 최소 권한 접근, 침해 가정, 마이크로 세그멘테이션, 모니터링 및 분석, 행위 분석 등이 존재 + + PVE(Proxmox Virtual Environment) - Debian Linux를 기반으로 만들어진 오픈 소스 Hypervisor OS + + VM(Virtual Machine) - 컴퓨티 환경을 소프트웨어로 구현한 것으로, 하드웨어를 추상화 하여 OS를 실행할 수 있는 가상 컴퓨팅 환경 + + LXC(LinuX Container) - 단일 Linux 호스트 위에서 호스트의 커널을 공유하는 여러 개의 독립적인 가상 Linux System을 실행 + + Docker - 각 응용 프로그램의 실행 환경에 대한 종속성을 벗어나기 위하여 가상화한 Container; OS 전체를 가상화 하는 VM, 커널을 공유하나 독립적인 Linux System으로 가상화된 LXC와 달리 실행에 필요한 라이브러리만을 패키징하여 가상화 + + SPOF(Single Point of Failure) - 단일 실패 지점; 시스템 구성 요소 중 동작 불능 시 전체 시스템이 중단되는 부분 + + Prometheus - 오픈 소스 시스템 모니터링 및 경고 툴킷 + + Grafana - 수집된 데이터를 시각적으로 표현해 주는 오픈 소스 분석 및 시각화 웹 어플리케이션 + + Loki/Promtail - 로그 데이터를 수집하고 저장하는 로그 집계 시스템 + + Alertmanager - Prometheus가 보낸 경고를 관리, 중복된 경고 그룹화 및 알림 라우팅 + + Ansible - 오픈 소스 IT 자동화 도구, YAML 형식의 플레이북을 사용하여 서버 구성, 애플리케이션 배포, 작업 자동화 수행 + + Kopia - 오픈 소스 백업/아카이빙 스프트웨어 + + NFS(Network File System) - Linux/Unix 상 네트워크를 통해 파일을 로컬 저장소 처럼 사용할 수 있게 하는 프로토콜 + + E2EE(End to End Encryption) - 종단간 암호화 + + Btrfs(B-tree File System) -  CoW, 스냅샷, 데이터 무결성 검증, 통합 볼륨 관리 등의 고급 기능에 중점을 둔 리눅스 용 최신 파일 시스템 + + CoW(Copy on Write) - 쓰기 시 복사; 파일이 수정되면 파일 전체를 덮어 쓰는 것이 아닌 새로운 공간에 수정된 부분의 블록을 기록, 강력한 무결성 지원, 스냅샷 기능, 높은 용량 효율성 등의 성능상 이점이 있으나 단점으로 외부 단편화 발생 + + AdGuard Home - DNS 광고 필터링 기능을 포함한 솔루션; FreeIPA로 upstream + + Monitoring server - 성능 모니터링 및 로깅, 서버 설정 자동화 담당 서버 + + Proxy Server - Reverse Proxy, DDNS, idP 등 담당 서버 + + File Server - 파일 관리 및 백업 담당 서버 + + DB Server - DB 서버 + + Web Server/WAS - Bookstack, Gitea, Ghost 등의 웹 서비스 담당 서버 + + Application Server - 여러 응용 서비스 담당 서버 + + Kali and practice client - 정보 보안 실습 담당 + + 1.4. 추가 프로젝트 + + OpenWRT - 주로 공유기(가정용 라우터)에 사용되는 임베디드 기기를 위한 Linux 배포판 + + ISMS-P - 정보보호 및 개인정보보호 관리체계 인증; KISA에서 공인하는 정보 보호/개인 정보 보호를 위한 관리체계 인증 제도 + + NAT (Network Address Translation) - 네트워크 주소 변환 기술; 여러 개의 LAN IP 대역과 하나의 WAN IP 주소로 연결하는 기술 + + Subnetting - 기존 Class 단위의 IPv4 체계를 벗어나, CIDR를 통하여 Subnet Mask 를 이용해 네트워크 단위를 분할; 하나의 IP 대역을 여러개로 나누어 사용 가능 + + Broadcast - LAN 전체를 대상으로 한 번에 모두 전송하는 방식(ARP, DHCP Broadcast 등에 사용) + + Unicast - 특정 Client 하나를 대상으로 전송하는 방식(http, https, FTP 등에 사용) + + (Multicast - 특정 그룹을 대상으로 전송하는 방식(IPTV, 실시간 스트리밍 등에 사용)) + + IaaS( Infrastructure as a Service ) - 클라우드 컴퓨팅 서비스 모델 중 하나; IT 인프라를 가상화 형태로 제공하는 것(AWS EC2, Microsoft Azure 등) + + (PaaS(Platform as a Service) - 클라우드 컴퓨팅 서비스 모델 중 하나; OS, DB 등의 플랫폼을 가상화 형태로 제공하는 것(Google App Engine 등) + + SaaS(Software as a Service) - 클라우드 컴퓨팅 서비스 모델 중 하나; 완성된 소프트웨어를 가상화 형태로 제공하는 것(MS Office365, Google Workspace 등)) + + 2.1. 하드웨어 선정 + + 프로세서(CPU; Central Processing Unit) - 중앙 처리 장치; 연산, 제어, 기억, 해석을 담당하는 컴퓨터 부품 + + OOM Killer - Linux 시스템 상에서 RAM 용량이 부족할 시, 서비스를 강제 종료하여 RAM을 회수하는 프로세스 + + KSM(Kernel Same-page Merging) - PVE 내부에서 중복되는 Memory Page를 병합하고, 실제 물리 RAM 사용량을 줄여주는 기술 + + RAID(Redundant Array of Independent Disk) - 여러 개의 저장장치(하드 디스크, SSD 등)에 일부 중복된 데이터를 나누어 저장하는 기술; 단순 병합 및 미러링부터 오류 검출을 위한 패리티를 사용하는 등 여러 방식이 존재 + + 2.2. 네트워크 설계 + + E2E(End to End) - 종단간 + + Terminal Box - 통신 단자함 + + LACP(Link Aggregation Control Protocol) - 두 개 이상의 NIC를 묶어 하나의 NIC 처럼 사용하는 기술; 802.3ad에서 정의 + + vmbr - Linux Bridge; PVE 내부에서 사용되는 가상의 스위치 + + vtnet - PVE 내부에서 사용되는 가상의 NIC + + ARP( Address Resolution Protocol ) - IP 주소를 통해 MAC 주소를 알아내는 시스템; Broadcast를 사용 + + ACL(Access Control List) - 접근 제어 목록 + + DMZ(Demilitarized Zone) -  + + CERT(Computer Emergency Response Team) -  + + 2.3. 시스템 선정 + + Hypervisor - 하나의 물리적 서버에서 여러 개의 VM을 실행할 수 있도록 하는 소프트웨어 또는 펌웨어 계층 + + idP(identity Provider) - 사용자의 디지털 신원을 관리하고 인증 서비스를 제공하는 시스템 + + + + 2025-06-14 - 초안 작성 + + 2025-06-20 - 날짜 표기 변경 + +부록 2. 구현 일지 + +부록 2.1. To-Do List +목표 + + 구현 로그로 남기기 전 바로 바로 해야 할 작업들을 임시로 저장하는 곳입니다. 작업 완료 시 구현 로그로 이동 후 해당 내용을 삭제합니다. + + 내용 + + 문서 재정제 계획 + + 임시 Server 호스팅 + + 임서 Server + Gitea, Code-server, hedgedoc, mkdoc, Reverse Proxy(ngnix) 설정 + + 문서 재정제 + + 용어집, 구현로그, 용례 처리, 문체, 일관된 callout 및 형식 등 처리 + + 문서 모듈화 + + '부록 4. 패키지 사용법 및 공통 설정' 챕터 신설 + + + + iptables 등 별도로 사용법을 기록해야 하는 명령어(패키지) 별로 문서 생성 + + 서버 설정시 필수로 필요한 패키지 정리 + + 중복되는 설정들(iptables, root 계정 설정, useradd/usermod/groupadd 등등) 통합하여 페이지 생성 + + + + 서버별 특별히 필요한 설정만 서버 설정 페이지에서 정의 + + Debian 계열 뿐 아니라 RedHat 등 같은 기능 다른 명령어도 병기 가능하면 병기 + + + + + + 각 서비스(Docker 등) 별로 필요한 Port 정리 + + + + bookstack에서 벗어나, wiki.js + Gitea 환경으로 이전 + + bookstack 내부의 엄격한 위계 구조(책장 - 책 - 챕터 -페이지)와 관리 및 유지 보수의 어려움(계속 바뀌는 프로젝트와, 사소한 변경도 전부 버전으로 기록)을 Git 기반의 Gitea로 이전하여 해소. + + 예상 레퍼리토리 + + /my-onpremise-docs (Gitea 저장소)   ├── mkdocs.yml   └── docs/       ├── index.md       ├── flows/       │   ├── 02_design/      │   │   ├── index.md                  # 설계 개요      │   │   ├── 01_considerations.md      # 왜? (고려사항, 비교, 결정)      │   │   └── 02_final_architecture.md  # 무엇을? (최종 구조, 다이어그램)      │   └── 03_implementation_plan.md  # 구현 순서에 대한 목차 역할(ex - 문서화 > 네트워크 환경 설정 > PVE 설치 > ...., 로그와 링크)      ├── components/                     # (기존 hardwares and softwares - 간단한 명세 위주로 - 추후 설정 및 스크립트와 링크)       │   ├── 01_hardware_list.md         # 1.1 하드웨어 구성       │   ├── 02_software_stack.md        # 1.2 소프트웨어 스택 (PVE, OPNsense 등)       │   └── 03_service_list.md          # 1.3 서비스 목록 (Docker 컨테이너 등)       ├── logs/ # 실제 구현 과정 중 있던 일들에 대한 간단한 명세  (ex - 네트워크 설정 - T5004를 설정 중 어떤 어떤 작업을 했고, 어떤 문제 발생 했는지만 명세 - 구체적인 스크립트, 설정, 시도 조치 등은 링크)      │   ├── 01_documentalize/       │   │   ├── index.md                        │   │   ├── 01_bookstack.md       │   │   └── 02_code-server_and_gitea.md      │   ├── 02_network_setup/      │   ├── 03_pve_setup/      │   └── 04_ddns_script/       ├── troubleshooting/       │   └── 01_504_gateway_timeout.md       ├── scripts_and_configs/        │   ├── ddns/       │   ├── debian/       │   └── pve/       ├── references/       │   ├── glossary.md       │   └── style_guide.md       └── assets/          └── images/              └── architecture.png + + 계획(작업 계획서이자 마스터 체크 리스트) + + 실행 로그(실제로 수행한 기록이 담기는 작업 일지 간단한 명세 > 계획에서 링크) + + 결과물(코드, 스크립트, 설정 파일, 매트릭스 등 등 > 실행 로그에서 링크) + + Hedgedoc(마크다운 편집기) + code-server(여러 문서 편집기 + git) > gitea > mkdoc  + + + + + + PVE 상에서 PCI Passthrough 시 + + 추후 File Server 구현 시 PVE 상에서 PCI Passthrough를 사용해야 하므로 미리 정리 + + 설정 방법: 이 방식은 VM에 가상 디스크를 추가하는 것이 아니라, SATA 컨트롤러라는 하드웨어 자체를 VM에 직접 넘겨주는 PCI Passthrough 방식을 사용 + + + + 사전 준비: PVE 호스트의 BIOS/UEFI와 부트로더에서 IOMMU (Intel VT-d 또는 AMD-Vi)를 활성화해야 합니다. 이는 고급 설정에 속함 + + VM 생성: 파일 서버용 VM(Debian)을 생성합니다. 이때 OS 디스크는 1번 방식 을 따라 NVMe SSD( local-lvm )에 작게 생성합니다. 데이터용 HDD 디스크는 이 단계에서 추가하지 않습니다. + + PCI 디바이스 추가: + + + + 생성된 VM의 하드웨어 탭으로 이동합니다. + + 추가 -> PCI 디바이스 를 선택합니다. + + 디바이스 목록에서 WTR Pro의 SATA 컨트롤러를 찾아 선택하고 추가합니다. + + + + + + VM 부팅 및 설정: + + + + VM을 부팅하면, Debian OS는 4개의 물리 HDD를 마치 자신의 컴퓨터에 직접 연결된 것처럼 인식합니다. + + 이후 Debian 내에서 mdadm 이나 Btrfs의 내장 기능을 사용하여 4개의 디스크로 RAID 10 어레이를 구성할 수 있습니다. + + + + + + + + 기타 사항 + + Immich 사진 Import 방법 정리 + + 추후 immich 구현시, 사진 import 방법 정리 필요 + + 공식 import 방법 + + + + 모바일 app을 통해서 import + + web을 통해서 import + + immich-cli를 통해서 import + + + + immich-cli를 통한 방법(File Server에서 바로 하는 법) + + + + immich-cli를 공식 도커 이미지 받기 + + docker run 명령어 사용 + + + + + + #!/bin/bash + +docker run --rm -it \ + + -v "/path/on/your/vm/to/photos:/import" \ + + ghcr.io/immich-app/immich-cli \ + + upload --key [API_Key] --server [Server Address] /import + + + + 각 명령어 옵션 설명 + + + + --rm: 명령 실행이 끝나면 컨테이너를 자동으로 삭제하여 시스템을 깨끗하게 유지합니다. + + -it: 터미널을 통해 진행 상황을 실시간으로 보기 위한 옵션입니다. + + -v "/path/on/your/vm/to/photos:/import" : [호스트 사진 경로]:[컨테이너 내부 경로] + + ghcr.io/immich-app/immich-cli: 실행할 공식 immich-cli 도커 이미지의 이름입니다. + + upload --key ... /import: 컨테이너 내부에서 실행될 실제 명령어입니다. 마지막의 경로가 컨테이너 내부 경로인 /import로 지정된 것을 볼 수 있습니다. + + + + + + + + + + + + 대량의 사진을 호스트 환경의 오염 없이 격리된 도커 내부 컨테이너에서 immich docker로 전송 가능하며, 해당 명령어를 shell script 화 하면 더 간단하게 재사용 가능 + + + + Git 정리 + + Git과 Gitea + + + + Git: 로컬에서 관리되는 버전 관리 프로그램 + + Gitea: 이 Git으로 관리되 파일을 원격 저장 및 공유, 웹 인터페이스로 보여주는 서버 + + + + Git의 저장 공간 + + + + Working Directory(작업 공간): 로컬 폴터 + + Staging Area(스테이징 공간): 작업한 파일 중 이번 버전에 포함시킬 변경 사항만 올리는 공간 + + Local Repository(로컬 저장소): 스테이징 공간에서 준비된 병경 사항을 하나의 의미있는 버전(Commit)으로 확정하여 저장하는 일종의 데이터베이스(.git 이라는 숨김 폴더에 모든 이력 기록) + + + + 사용 방식 + + + + Git으로 작업한 결과를 Gitea 서버로 업로드(push) + + + + 작업 공간 파일 수정 + + 스테이징 공간 변경 사항 추가 + + 로컬 저장소에 버전으로 확정(커밋) + + + + + + + + Code-Server에 Git 설치하기 + + + + 임시 설치 + + + + code-server 컨테이너 실행 + + 컨테이너 터미널 들어가기 + + apt-get update && apt-get install -y git 명령어를 통해 설치 + + 추후 컨테이너 재생성(업데이트 등의 이유)시 초기화 + + + + + + Dockerfile 생성 + + + + + + #Dockerfile.tamplate + +FROM codercom/code-server:latest + +USER root + +RUN apt-get update && apt-get install -y git + +USER coder + + + + + + + + + + Git과 Gitea 연동 + + + + Gitea에 프로젝트 생성 + + Gitea 서버 인증(git push 최초 실행 전 설정) + + + + + + Code-Server 터미널 접속 + + + + + + #!/bin/bash + +ssh-keyget -t rsa -b 4096 #ssh 키 생성 + +cat ~/.ssh/id_rsa.pub #생성된 ssh 키 출력(공개키) 후 내용 복사 + + + + + + Gitea 웹 UI 설정 > SSH/GPG 키 이동 + + + + + + 키 추가 버튼 누른 뒤, 공개키 붙여 넣고 저장 + + + + + + + + Gitea의 저장소(Repository) 복제 + + + + Gitea 웹 사이트에서 프로젝트 페이지 접속 + + 오른쪽에 있는 HTTPS 또는 SSH 주소를 복사 + + Code-Server 웹 사이트 메뉴의 Terminal > New Terminal 클릭 하여 내장 터미널 접속 + + + + #!/bin/bash + +git clone git@:/[Gitea Address] + + + + Code-Server의 왼쪽 파일 탐색기에 [프로젝트] 폴더 생성 확인 + + + + + + 파일 수정 및 버전 관리 + + + + + + Code-Server 편집기 활용 [프로젝트] 폴더 내 파일 생성 혹은 수정 + + + + + + 파일이 끝나면 터미널에서 git 명령어 차례로 실행 + + + + + + #!/bin/bash + +cd [project directory] #해당 폴더로 이동 + +git add file_name #특정 파일 하나만 올릴 때 git add . #전체 파일 올릴 때 + +git commit -m "messages" #의미있는 메세지와 함께 버전으로 확정 + +git push origin main #Gitea 서버에 업로드(push) + + + + + + + + 협업 흐름(Branch) + + + + + + #!/bin/bash + +#'update-monitoring'이라는 새로운 브랜치를 만들고, 그 브랜치로 이동 + +git checkout -b update-monitoring + +#새로운 브랜치에서 모니터링 관련 설정을 수정하고 커밋 + +git add . git commit -m "Update Prometheus configuration" + +#작업이 성공적으로 끝나면, 다시 원래의 main 브랜치로 복귀 + +git checkout main + +#main 브랜치에서 새로운 브랜치의 변경 사항을 합치기 (Merge) + +git merge update-monitoring + + + + + + + + + + + + 추후 자동화 연동 + + Gitea +n8n을 이용한 GitOps 자동화 워크플로우 + + + + 트리거 (Trigger): Gitea Webhook + + + + Gitea의 homelab-config 저장소 설정에서 웹훅(Webhook)을 생성 + + 이 웹훅은 push 이벤트가 발생할 때마다, n8n으로 알림을 보내도록 설정 + + + + + + 수신 (Listen): n8n Webhook Node + + + + n8n 워크플로우에서는 'Webhook' 노드를 사용하여 Gitea가 보내는 알림 대기 + + 이 방식은 5분마다 확인하는 Cron Job과 달리, 변경 사항이 발생하는 즉시 실시간으로 반응 + + + + + + 실행 (Execute): n8n Execute Command Node + + + + cd /path/to/homelab-config (프로젝트 디렉토리로 이동) + + git pull origin main (Gitea로부터 최신 설정 가져오기) + + docker-compose up -d --remove-orphans (변경된 docker-compose.yml을 기반으로 도커 컨테이너 재시작/업데이트) + + + + + + n8n이 호스트의 명령어를 실행하고 도커를 제어하기 위하여 특정 폴더와 도커 소켓을 볼륨으로 마운트 필요 + + + + + + #n8n 서비스의 docker-compose.yml 예시 + +services: + + n8n: + +   image: n8nio/n8n + +   restart: always + +   ports: + +     - "5678:5678" + +   volumes: + +     - n8n_data:/home/node/.n8n + + #n8n 컨테이너가 호스트의 도커를 제어할 수 있게 함 + +     - /var/run/docker.sock:/var/run/docker.sock + + #n8n 컨테이너가 Git 프로젝트 폴더에 접근할 수 있게 함 + +     - /path/to/homelab-config:/data/homelab-config + +   environment: + +     - GENERIC_TIMEZONE=Asia/Seoul + + + + + + + + + + 기존 DB 백업 + + mariadb-dump 백업 + + + +   mariadb (데이터 경로: /var/lib/mysql ) 사용 + + + + 터미널 접속 및 명령어 실행 + + + + mkdir /backups/sqldumps + + mariadb-dump -u [username] -p [password] [options] [database_name] > [export_file_name] 명령어 사용 + + + + #!/bin/bash + +#하나의 데이터베이스만 백업 + +mariadb-dump -u user -p database > /backups/sqldumps/database_backup_$(date "+%Y-%m-%d %H:%M:%S").sql + +#여러 데이터베이스 한번에 백업 + +mariadb-dump -u user[root|admin] -p database1 [database2|...] > /backups/sqldumps/databases_backup_$(date "+%Y-%m-%d %H:%M:%S").sql + +#시스템 데이터베이스 제외한 모든 데이터베이스 한번에 백업 + +mariadb-dump -u user[root|admin] --all-databases --exclude-databases=mysql,information_schema,performance_schema,sys > /backups/sqldumps/all_databases_backup_$(date "+%Y-%m-%d %H:%M:%S").sql + + + + + + 데이터 백업 + + + + /var/lib/mysql 내 내용을 그대로 backups로 복사 + + + + #!/bin/bash + +cp -aR /var/lib/mysql backups/mysql + + + + + + DB 서비스 완전 중지 및 볼륨 데이터 삭제 + + + + container manager 를 통해 mariadb 종료 + + File station에서 /docker/mariadb/mysql/* 삭제 + + + + 도커 업데이트 + + mariadb 설치 + + 도커 설정 + + + + 이미지 + + + + mariadb + + + + + + 포트 + + + + 3306:3306 + + + + + + 볼륨 마운트 + + + + + + /docker/mariadb/mysql /var/lib/mysql + + + + + + /docker/mariadb/backups /backups + + + + + + + + 환경 변수 + + + + MARIADB_ROOT_PASSWORD MariaDB1Geonil! + + TZ Asia/Seoul + + restart: unless-stopped + + + + + + + + 컨테이너 설정 + + + + 터미널 접속 및 명령어 실행 + + chown 999:999 /volume1/docker/mariadb/mysql + + chown 999:999 /volume1/docker/mariadb/backups + + File Station 설정 + + + + /docker/mariadb/mysql + + /docker/mariadb/backups + + 권한 설정: 생성 > Owner > 모든 권한 + + + + + + + + 도커 다시 시작 + + mariadb 설정 + + 터미널 접속 및 명령어 실행 + + + + 다음 명령어 입력하여 mariadb 접속 + + + + + + #!/bin/bash + +mariadb -u root -p + + + + + + + + + + 다음 명령어 입력하여 root 계정 비밀번호 생성 및 로컬 접속만 허용 + + + + + + #ALTER USER 'root'@'localhost' IDENTIFIED BY 'MariaDB1Geonil!'; #비밀번호 생성 but 기본 생성 + +DELETE FROM mysql.user WHERE User='root' AND Host='%'; #로컬 접속만 허용 + +FLUSH PRIVILEGES; #변경된 권한 사항 즉시 적용 + + + + + + + + exit로 접속 종료 후, 재 접속해 정상적으로 적용되었는지 확인 + + 다음 명령어를 이용하여 admin 계정 생성 + + + + + + CREATE USER 'mariadb_admin'@'%' IDENTIFIED BY 'MariaAdmin1Geonil!'; #계정 생성 + +GRANT ALL PRIVILEGES ON *.* TO 'mariadb_admin'@'%' WITH GRANT OPTION; #권한 부여 + +FLUSH PRIVILEGES; #변경된 권한 사항 즉시 적용 + + + + + + + + exit로 접속 종료 후, 해당 계정으로 접속해 정상적으로 적용되었는지 확인 + + + + 어플리케이션용 데이터베이스 생성 및 계정 생성 + + + + + + bookstack + + + + + + CREATE DATABASE IF NOT EXISTS bookstack_db; #데이터베이스 생성 + +CREATE USER 'bookstack_user'@'%' IDENTIFIED BY 'Bookstack1Geonil!'; #계정 생성 + +GRANT ALL PRIVILEGES ON bookstack_db.* TO 'bookstack_user'@'%'; #권한 부여 + +FLUSH PRIVILEGES; #변경된 권한 사항 즉시 적용 + + + + + + + + + + Gitea + + + + + + CREATE DATABASE IF NOT EXISTS gitea_db; #데이터베이스 생성 + +CREATE USER 'gitea_user'@'%' IDENTIFIED BY 'Gitea1Geonil!'; #계정 생성 + +GRANT ALL PRIVILEGES ON gitea_db.* TO 'gitea_user'@'%'; #권한 부여 + +FLUSH PRIVILEGES; #변경된 권한 사항 즉시 적용 + + + + + + + + + + exit로 접속 종료 + + + + + + mariadb-dump를 통한 복구 + + + + 명령어 입력 + + + + + + #!/bin/bash + +mariadb -u bookstack_user -p bookstack_db < /backups/sqldumps/bookstack_db_time.sql + + + + + + + + mariaDB 접속하여 확인 + + + + + + USE bookstack_db; + +SHOW TABLES; + +SELECT COUNT(*) FROM users; #실제 데이터 들어갔는지 확인 + + + + + + + + + + bookstack 설치 및 DB 연동 + + 도커 설정 + + + + 이미지 + + + + linuxserver/bookstack + + + + + +  포트 + + + + :443 + + 6875:80 + + 내부 CA 구축시 HTTPS 사용 가능 + + + + + + 볼륨 마운트 + + + + + + /docker/bookstack/config /config + + + + + + + + + + 환경 변수 + + + + + + APP_URL https://book.ilfamilynas.synology.me + + + + + + APP_KEY base64:a3FqaHM0a3ZtMXJyZno2bm5zaTg0eHUzdXFkenpkNm4= + + + + + + DB_HOST mariadb(추후 외부로 분리시 IP 혹은 도메인으로 설정) + + + + + + DB_PORT 3306 + + + + + + DB_DATABASE bookstack_db + + + + + + DB_USERNAME bookstack_user + + + + + + DB_PASSWORD Bookstack1Geonil! + + + + + + TZ Asia/Seoul + + + + + + PUID 1000 + + + + + + PGID 1000 + + + + + + restart: unless-stopped + + + + depends_on: mariadb + + + + + + bookstack 에서는 PUID PGID 설정을 했으나, 안해도 무방 + + + + 컨테이너 설정 + + + + 터미널 접속 및 명령어 실행 + + chown 1000:1000 config + + File Station 설정 + + + + /docker/bookstack/config  + + 권한 설정: 생성 > Owner > 모든 권한 + + + + + + + + 도커 다시 시작 + + 로그 확인 + + 리버스 프록시 연동 + + 접속 확인 + + + + 기본 로그인 정보 + + + + ID: admin@admin.com + + Password: password + + + + + + 로그인 후 확인 + + + + Gitea 설치 및 DB 연동 + + 도커 설정 + + + + 이미지 + + + + gitea/gitea + + + + + +  포트 + + + + 2222:22 + + 3000:3000 + + + + + + 볼륨 마운트 + + + + + + /docker/gitea/data /data + + + + + + + + + + 환경 변수 + + + + + + GITEA__server__ROOT_URL https://gitea.ilfamilynas.synology.me + + + + + + GITEA_server__SSH_DOMAIN gitea.ilfamilynas.synology.me + + + + GITEA_server__SSH_PORT 2222 + + GITEA_ssh_LISTEN_PORT 22 + + GITEA__database__DB_TYPE mysql + + GITEA__database__HOST mariaDB:3306 + + GITEA__database__NAME gitea_db + + GITEA__database__USER gitea_user + + GITEA__database__PASSWD Gitea1Geonil! + + + + TZ Asia/Seoul + + + + + + USER_UID 1000 + + + + + + USER_GID 1000 + + + + + + restart: unless-stopped + + + + depends_on: mariadb + + + + + + Gitea에서는 UID GID 설정을 했으나, 안해도 무방 + + + + 컨테이너 설정 + + + + 터미널 접속 및 명령어 실행 + + chown -R 1000:1000 data + + File Station 설정 + + + + /docker/gitea/data + + 권한 설정: 생성 > Owner > 모든 권한 + + + + + + + + 도커 다시 시작 + + 로그 확인 + + 리버스 프록시 연동 + + 접속 확인 및 초기 설정 + + + + 홈페이지 이름 변경: Gitea: Il and Mors + + 사용자 등록 비활성화 체크 + + 페이지를 보기 위해 로그인 하기 체크 + + 관리자 계정 생성 + + Gitea 설치하기 + + + + 사양을 많이 잡아 먹으므로, 충분한 Reverse Proxy의 연결 제한 시간 설정 필요 + + + + + + + + Code-server 설치 및 Gitea와 연동 + + 도커 설정 + + + + 이미지 + + + + codercom/code-server + + + + + + 포트 + + + + 8080:8080 + + + + + + 볼륨 마운트 + + + + + + /docker/code-server/config /home/coder/.config + + + + /docker/code-server/data /home/coder/data + + + + + + + + 환경 변수 + + + + PASSWORD Codeserver1Geonil! + + BIND_ADDR 0.0.0.0:8080 + + TZ Asia/Seoul + + + + restart: unless-stopped + + + + + + + + + + 컨테이너 설정 + + + + 권한 오류로 컨테이너 계속 비정상 종료되므로 도커 터미널이 아닌 NAS 직접 SSH 접속 후 다음 명령어 입력 + + + + #!/bin/bash + +#ssh [username]@[ipaddress] + +cd /volume1/docker/code-server + +sudo chown -R 1000:1000 ./* + +ls -l + + + + 권한 변경 확인 + + File Station 설정 + + + + /docker/code-server/config + + /docker/code-server/data + + 권한 설정: 생성 > Owner > 모든 권한 + + + + + + + + 리버스 프록시 연동 + + 로그 확인 + + 접속 확인 및 초기 Git 설정 + + + + 사양 문제로 인하여 code-server 정상 작동 불가 + + 따라서 VS code로 Git 동기화 하여 사용하다 추후 On-premise 네트워크 환경 구축 후 도커 설치 + + + + postgreSQL 설치 + + 도커 설정 + + + + 이미지 + + + + postgres + + + + + +  포트 + + + + [5432:5432] - 기본값, DSM 상에서 5432 이미 사용중이므로 임의의 값 입력 + + 5433:5432 + + + + + + 볼륨 마운트 + + + + + + /docker/postgres/data /var/lib/postgres/data + + + + + + /docker/postgres/backups /backups + + + + /docker/postgres/conf/pg_hba.conf /var/lib/postgres/data/pg_hba.conf + + + + + + 환경 변수 + + + + POSTGRES_PASSWORD PostgreSQL1Geonil! + + TZ Asia/Seoul + + restart: unless-stopped + + + + + + pg_hba.conf 파일 + + + + 재적용 원할 시 bash에서  pg_ctl reload 명령어 입력, 혹은 psql 내부에서  SELECT pg_reload_conf(); 명령어 입력 + + + + # pg_hba.conf - 클라이언트 인증 설정 + +# ---------------------------------------------------------------------- + +# TYPE DATABASE USER ADDRESS METHOD + +# ---------------------------------------------------------------------- + +# TYPE: 연결 유형 (local, host, hostssl, hostnossl) + +# local: Unix 도메인 소켓 연결 (컨테이너 내부에서만 사용) + +# host: TCP/IP 연결 (SSL 사용 여부 무관) + +# hostssl: TCP/IP 연결 (SSL 필수) + +# hostnossl: TCP/IP 연결 (SSL 필수 아님) + +# DATABASE: 연결을 허용할 데이터베이스 (all, sameuser, samenet, replication, specific_db_name) + +# all: 모든 데이터베이스 + +# sameuser: 사용자 이름과 동일한 데이터베이스 + +# samenet: 사용자 IP 주소와 동일한 서브넷의 데이터베이스 + +# replication: 복제 연결 + +# specific_db_name: 특정 데이터베이스 이름 + +# USER: 연결을 허용할 사용자 (all, specific_user_name, +group_name) + +# all: 모든 사용자 + +# specific_user_name: 특정 사용자 이름 + +# +group_name: 특정 그룹에 속한 사용자 + +# ADDRESS: 연결을 허용할 IP 주소 또는 네트워크 대역 (IP/CIDR, hostname, all, samehost, samenet) + +# IP/CIDR: IP 주소와 CIDR 마스크 (예: 192.168.1.0/24) + +# all: 모든 IP 주소 + +# samehost: 서버 자체의 모든 IP 주소 + +# samenet: 서버의 모든 IP 주소가 속한 모든 서브넷 + +# METHOD: 인증 방식 (trust, reject, md5, scram-sha-256, peer, ident, gssapi, sspi, cert, pam, ldap, radius) + +# trust: 비밀번호 없이 연결 허용 (매우 위험, 테스트 환경에서만) + +# reject: 연결 거부 + +# md5: MD5 해시된 비밀번호 인증 + +# scram-sha-256: SCRAM-SHA-256 해시된 비밀번호 인증 (최신 권장) + +# peer: Unix 도메인 소켓에서 운영체제 사용자명 일치 여부로 인증 + +# ident: 클라이언트 운영체제 사용자명으로 인증 + +# ldap: LDAP 서버를 통한 인증 (FreeIPA 연동 시 사용 가능) + +# ---------------------------------------------------------------------- + +# 1. 로컬 연결 (컨테이너 내부에서 psql 클라이언트 등으로 접속 시) + +# Unix 도메인 소켓을 통한 연결은 기본적으로 'trust' (비밀번호 없이 허용)로 설정되는 경우가 많습니다. + +# 이는 컨테이너 내부에서만 가능하므로 비교적 안전합니다. + +local all all trust + +# 2. 호스트로부터의 연결 (Docker 'ports' 매핑 시) + +# Docker 호스트에서 127.0.0.1 (localhost)로 접속을 시도할 때 + +host all all 127.0.0.1/32 scram-sha-256 + +# 3. Docker 내부 네트워크에서의 연결 + +# Docker Compose 네트워크 내의 다른 컨테이너(예: Infisical)에서 PostgreSQL 컨테이너로 접속 시 + +# 'docker network inspect ' 명령어로 정확한 서브넷 확인 + +# 예시: 172.18.0.0/16 또는 172.17.0.0/16 + +host all all 172.17.0.0/16 scram-sha-256 + +# 4. 내부망의 특정 VM/물리 서버에서 접속 허용 (예시) + +# 만약 192.168.1.100 이라는 내부망 IP를 가진 VM에서 접속해야 한다면 + +# host all all 192.168.1.100/32 scram-sha-256 + +# 5. 특정 데이터베이스에 특정 사용자만 접근 허용 (더 세밀한 제어) + +# 예시: infisical_db 데이터베이스에 infisical_user만 Docker 내부 네트워크에서 접근 허용 + +# host infisical_db infisical_user 172.18.0.0/16 scram-sha-256 + +# 예시: 관리자용 계정(postgres_admin)만 특정 IP 대역에서 모든 DB에 접근 허용 + +# host all postgres_admin 192.168.1.0/24 scram-sha-256 + +# 6. 모든 IP 주소에서 접속 허용 (보안상 매우 위험, 외부 노출 시 VPN 필수) + +# 반드시 방화벽으로 접근을 제한하거나 VPN을 통해서만 접근하도록 강제해야 합니다. + +# host all all 0.0.0.0/0 scram-sha-256 + + + + + + + + + + + + 컨테이너 설정 + + + + 터미널 접속 및 명령어 실행 + + chown 999:999 /volume1/docker/postgres/data + + chown 999:999 /volume1/docker/postgres/backups + + File Station 설정 + + + + /docker/mariadb/mysql + + /docker/mariadb/backups + + 권한 설정: 생성 > Owner > 모든 권한 + + + + + + + + postgresql 설정 + + 터미널 접속 및 명령어 실행 + + + + 다음 명령어 입력하여 postgresql 접속 + + + + + + #!/bin/bash + +psql -U postgres #pg_hba.conf의 local trust로 비밀번호 없이 로그인 됨 + +#비밀번호 강제 원할 시 psql -U postgres -W 입력 + +#접근 제어는 pg_hba.conf에서 설정 + + + + + + + + + + 다음 명령어를 이용하여 admin 계정 생성 + + + + + + CREATE USER postgres_admin WITH PASSWORD 'PostgresAdmin1Geonil!'; -- admin 계정 생성 + +ALTER USER postgres_admin WITH SUPERUSER; -- Superuser 권한 부여 + +\du -- 유저 생성 확인 + +CREATE DATABASE postgres_admin; -- 계정 로그인 시 DB가 필수로 필요 + +ALTER DATABASE postgres_admin OWNER TO postgres_admin; -- 권한 부여 + +\l -- DB 확인 및 q로 종료 + + + + + + + + \q로 접속 종료 후, 해당 계정으로 접속해 정상적으로 적용되었는지 확인 + + + + 어플리케이션용 데이터베이스 생성 및 계정 생성 + + + + + + infisical + + + + + + CREATE USER infisical_user WITH PASSWORD 'Infisical1Geonil!'; -- 유저 생성 + +CREATE DATABASE infisical_db; -- 데이터베이스 생성 + +ALTER DATABASE infisical_db OWNER TO infisical_user; -- 권한 부여 + +\du + +\l -- 확인 + + + + + + #!/bin/bash + +psql -U infisical_user -W -d infisical_db # -d database 가 있어야 로그인이 된다 + + + + + + + + + + redis 설치 + + 도커 설정 + + + + 이미지 + + + + redis + + + + + +  포트 + + + + 6379:6379 + + + + + + 볼륨 마운트 + + + + + + /docker/redis/data /data + + + + #/docker/redis/data/redis.conf /usr/local/etc/redis/redis.conf + + + + 옵션, 지금은 마운트 안함 + + + + + + + + + + + + 환경 변수 + + + + #REDIS_PASSWORD: "your_redis_password" + + + + 이 환경 변수는 redis.conf에서 참조하거나, Redis 스크립트에서 사용 가능 + + Redis 비밀번호 (필요 시), Redis에 비밀번호를 설정하려면 requirepass 지시어를 redis.conf 파일에 추가 필요 + + + + + + TZ Aisa/Seoul + + + + restart: unless-stopped + + + + + + + + redis.conf 파일 + + + + + + # redis.conf - Redis 설정 파일 예시 + +# 1. 일반 설정 (GENERAL) + +# ======================== + +daemonize no # Redis를 데몬으로 실행할지 여부 (Docker 컨테이너에서는 'no'로 설정) + +protected-mode yes # 보호 모드 활성화 (외부 접속 제한, 비활성화 시 보안 위험) + +port 6379 # Redis 서버가 수신할 포트 + +# bind 127.0.0.1 # Redis가 수신할 IP 주소 (컨테이너에서는 일반적으로 설정 불필요, Docker 네트워크가 처리) + + # '0.0.0.0'으로 설정하면 모든 인터페이스에서 수신 (컨테이너 내부에서 기본) + +# loglevel notice # 로그 레벨 (debug, verbose, notice, warning) + +# logfile "" # 로그 파일 경로 (비워두면 stdout/stderr로 출력, Docker에서는 stdout/stderr이 일반적) + +# 2. 스냅샷 (SNAPSHOTS / RDB Persistence) - 데이터 영속성을 위한 핵심 + +# ======================================= + +# save + +# 지정된 시간(초) 동안 지정된 개수()의 키가 변경되면 RDB 파일(.rdb)을 생성하여 디스크에 스냅샷 저장 + +save 900 1 # 900초(15분) 동안 1개 이상의 키가 변경되면 저장 + +save 300 10 # 300초(5분) 동안 10개 이상의 키가 변경되면 저장 + +save 60 10000 # 60초(1분) 동안 10000개 이상의 키가 변경되면 저장 + +stop-writes-on-bgsave-error yes # 백그라운드 저장(bgsave) 실패 시 쓰기 작업 중지 여부 + +rdbcompression yes # RDB 파일 압축 사용 여부 + +rdbchecksum yes # RDB 파일 체크섬 사용 여부 + +dbfilename dump.rdb # RDB 파일 이름 + +dir /data # RDB 파일이 저장될 디렉토리 (Docker 볼륨 마운트 경로와 일치) + +# 3. AOF 영속성 (APPEND ONLY FILE / AOF Persistence) - 데이터 손실 최소화 + +# ================================================== + +appendonly no # AOF 파일 사용 여부 (yes로 설정하면 AOF 활성화) + + # AOF를 사용하면 RDB보다 데이터 손실 위험이 적으나, 파일 크기가 커짐 + +# appendfilename "appendonly.aof" + +# appendfsync everysecond # AOF 파일을 디스크에 동기화하는 빈도 (always, everysecond, no) + + # 'everysecond'가 좋은 균형 + +# no-appendfsync-on-rewrite no # AOF 재작성 중에도 fsync 실행 여부 + +# 4. 복제 (REPLICATION) - 고가용성/확장성 (클러스터 구성 시) + +# ========================================================== + +# replicaof # 이 서버를 다른 Redis 서버의 복제본으로 설정 + +# 5. 보안 (SECURITY) + +# ================== + +# requirepass your_redis_password # Redis에 접속하기 위한 비밀번호 설정 (강력 권장!) + + # 이 비밀번호는 Infisical의 REDIS_URL에도 포함되어야 합니다. + + # 'your_redis_password' 대신 실제 비밀번호로 교체 + +# rename-command CONFIG "" # CONFIG 명령 비활성화 (보안 강화) + +# rename-command KEYS "" # KEYS 명령 비활성화 (큰 프로덕션 환경에서 성능 저하 방지) + +# 6. 클라이언트 제한 (CLIENTS) + +# ============================ + +maxclients 10000 # 최대 동시 클라이언트 연결 수 + +# 7. 메모리 관리 (MEMORY MANAGEMENT) + +# ================================== + +# maxmemory # Redis가 사용할 최대 메모리 양 (초과 시 데이터 삭제 정책 적용) + +# maxmemory-policy noeviction # 최대 메모리 도달 시 데이터 삭제 정책 (noeviction, allkeys-lru 등) + + + + + + + + + + infisical 설치 및 DB 연동 + + 도커 설정 + + + + 이미지 + + + + infisical/infisical:latest-postgres + + + + postgresql 사용시 태그는 latest-postgres + + 그냥 latest 사용시 mangodb를 불러오며, 에러 발생 + + + + + + + + + +  포트 + + + + 8080:8080 + + + + + + 볼륨 마운트 + + + + + + /docker/infisical/config /app/.infisical + + + + + + + + + + 환경 변수 + + + + + + ROOT_ENCRYPTION_KEY 7dc22797c2d74bc436f836696a888de5 + + + + openssl rand -hex 16 명령어로 생성 + + + + + + AUTH_SECRET bhR7cc44ItjJzz52g1C5tNmZPU2bfDQSIz4TlwdxxUU= + + + + openssl rand -base64 32 명령어로 생성 + + + + + + + + DB_CONNECTION_URI postgresql://infisical_user:Infisical1Geonil!@postgres:5432/infisical_db + + + + postgresql://[db_username]:[db_password]@[hostname:port]/[db_name] + + + + + + REDIS_URL redis://redis:6379 + + SITE_URL https://infisical.ilfamilynas.synology.me + + TZ Asia/Seoul + + + + restart: unless-stopped + + + + depends_on: postgres, redis + + + + + + + + 컨테이너 설정 + + + + 권한 오류로 컨테이너 계속 비정상 종료되므로 도커 터미널이 아닌 NAS 직접 SSH 접속 후 다음 명령어 입력 + + + + #!/bin/bash + +#ssh [username]@[ipaddress] + +cd /volume1/docker/infisical + +sudo chown -R 1000:1000 ./* + +ls -l + + + + 권한 변경 확인 + + File Station 설정 + + + + /docker/infisical/config + + 권한 설정: 생성 > Owner > 모든 권한 + + + + + + + + 도커 다시 시작 + + 로그 확인 + + 리버스 프록시 연동 + + 접속 확인 + + + + 일단은 편하게 let's encrpt를 쓰고 추후 cloudflare DNS 사용할 때 cloudflare 사용 + + + + + + + + 2025-06-05 - 초안 작성 + + 2025-06-20 - 날짜 표기 변경 + +부록 2.2. 프로젝트 구상 및 설계사항 북스택 문서화 +목표 + + 프로젝트 구상과 설계 사항을 북스택 문서화하여 체계적으로 정리합니다. + + 진행 사항 + + [2025-04-20 - 2025-05-12] + + 프로젝트 전반 기획 + + 프로젝트 시작 후 많은 부분이 변경되고 체계적인 문서화가 이뤄지지 않아 간략하게 서술합니다. + + [2025-04-20] + + + + 기존 NAS DS124의 성능의 한계를 느끼고 새로운 서버를 구축하기로 시도 + + + + [2025-04-21] + + + + 프로젝트 구상 시작. 여러가지 방안 구상 + + + + NAS OS 위 필요 Docker로 서비스 + + 일반 서버 OS(ubuntu 혹은 다른 linux 계열 등) 위에 Docker 및 파일 서버 구축 + + Hypervisor 개념을 접하고, 가상화하여 예전 군에서 다루던 네트워크 환경 구축 + + + + + + + + [2025-04-21 - 2025-04-24] + + + + 단순 포트 개방 및 포트포워드 방식이 불편하던 중 reverse proxy 개념 접하여 sub domain 및 인증서 활용 시작 + + DS124내의 단순한 reverse proxy 기능 사용 후, SSL/TLS 개념에 대하여 처음으로 고찰하기 시작 + + 프로젝트를 진행하기로 마음 먹은 김에 학습과, 경험을 동시에 쌓을 수 있는 hypervisor 안을 선정 + + 안건 선정 후 가격, 운영비(전기세 포함), 필요 인터페이스, 24/7 운영 등을 고려하여 하드웨어 선정 + + + + Aoostar WTR Pro N150/RAM 16GB/SSD 256GB + + + + + + https(443)/http(80) 외에 다른 프로토콜은 reverse proxy가 지원되지 않는 것을 확인 + + + + RDP(3389)를 리버스 프록시를 통해 이용하려 했으나 실패 + + reverse proxy 작동을 위해 Domain이 담긴 패킷이 필요하다는 것을 확인 + + VPN의 개념과 필요성 확인 + + + + + + + + [2025-04-24 - 2025-05-01] + + + + hypervisor 위에 가상화를 고려하며 방화벽 및 IPS/IDS 서비스를 고려 + + + + 실제 패킷의 흐름을 제어하고, 확인하고 싶어 선택 + + T5004의 기능적 한계(DHCP relay 부재, VLAN 부재)와 집안 네트워크 구조의 물리적 한계 확인 + + OPNsense를 통해 어느 정도 회피 계획을 수립 시작 + + + + + + 필요 서비스들을 정리 + + + + LADP/SSO에 대한 개념의 부족, Kerberos는 아직 LADP/SSO를 고려하기 전이라 제외 + + 내부 CA 도입으로 단순한 SSL/TLS를 통한 암호화 및 취약 알고리즘(NFS 등)을 VPN으로 격리 계획  + + 파일 서버/DNS 서버/Proxy 서버/내부 CA/도커를 활용한 VM 서버들/DB 서버/보안 실습 서버 등 선정 + + SPOF와 SoC에 대한 개념 정립 시작, 가지고 있는 한계 속에서 어떻게 최선의 결과를 만들어 낼 수 있을 지 고민 + + 파일 시스템에 대한 고민을 시작, Btrfs/zfs 등의 시스템과 RAID를 통한 가용성에 대해 확인함 + + 부족한 RAM과 4bay HDD를 위해서 btrfs 및 raid 10이 가장 적합하다고 결론 + + + + + + 추후 접근이 편리하도록 Cloudflare를 이용하여 Domain을 구매함 + + + + [2025-05-01 - 2025-05-05] + + + + 필요한 서비스들을 점검하는 중 16GB RAM이 부족함을 확인 후 급하게 32GB RAM으로 교환 + + 필요한 서비스들을 구축할 때 필요한 예상 장애 지점과 문제사항에 대하여 QnA 형식으로 정리를 시작 + + IPS/IDS 서비스가 많은 부하가 있다는 것을 확인 후 체험 및 공부 수준의 필터링/관제로 계획 + + N150 프로세서의 성능 한계 파악 후 RAM과 함께 리소스 분배 계획 + + 데이터 백업과 모니터링에 대하여 계획(구체적 X) + + 추후 ACL의 관리의 용이함을 위하여 LADP/SSO에 대한 필요성을 인지, 하지만 잘 이해가 되지 않아 보류 + + VM/LXC/Docker의 논리적 구분 정도와 보안 위험성을 인지하고 서비스별로 올바른 수준의 격리를 실천 시도 + + + + LXC는 호스트의 커널을 공유 + + Docker는 호스트의 루트 권한을 요구 + + 따라서 LXC에서 Docker의 실행은 비권장 + + + + + + Split Horizon DNS 개념을 확인 후 내외부망에서의 일관된 접속을 구현하려 계획 + + L2 통신이 게이트웨이를 거치지 않는고 ARP 및 Unicast 통신을 한다는 사실을 확인 + + + + OPNsense 상에서의 내부망 통신 통제가 아닌 각 클라이언트 별 iptable 및 방화벽 설정, Zero Trust 전략을 수립함 + + + + + + DMZ의 필요성과 어떤 경우의 효용성이 있는지 분석 후 도입하지 않기로 결정 + + + + 관제팀 및 CERT 운용 없이는 공격 포인트만 추가 + + + + + + 체계적인 문서화의 필요성을 깨닫고 문서화를 위한 도구들을 확인 + + + + Obsidian, Git 등이 있었으나 Bookstack이 가장 편리해 보여 Bookstack을 선택 + + + + + + + + [2025-05-05 - 2025-05-10] + + + + 서버에 필요한 하드웨어 배송되어 하드웨어 이상 확인 + + 북스택으로 문서를 체계화 하기 전 프로젝트 내용을 최대한 다듬으며, 다이어그램 등으로 시각적 자료 생성 시작 + + 각 서버에 필요한 OS 파일을 다운로드 하며 SHA 값을 체크(위장 OS 파일인지 확인) + + DS124에 임시로 사용할 수 있는 Bookstack Docker를 설치 + + + + + +  Synology DSM 7.1 Container Manager 패키지 상에서 MariaDB 및 Bookstack 도커 실행 불가 상황 + + + + + + + + + + [2025-05-10 - 2025-05-12] + + + + 본격적인 Bookstack을 통한 문서 체계화 시작 + + LADP/SSO 기능 및 Kerberos, DNS, 내부 CA 까지 전담 가능한 FreeIPA의 존재를 확인 계획 전면 수정 + + + + VPN을 통한 NFS 암호화 > FreeIPA를 통한 Kerberos로 NFS 암호화 + + OPNsense 가 전담하던 내부 CA 기능을 SoC에 따라 FreeIPA에서 담당하도록 계획 수정 + + 별도로 두려던 DNS를 FreeIPA와 통합 + + 원래 DNS에서 담당하려 한 DDNS는 역시 SoC에 따라 외부와 통신하는 proxy에서 담당하도록 계획 수정 + + 기존 Docker에서 사용하던 서비스를 그대로 이전할 수 있는 방법 확인(docker-compose.yml 및 .env, DB dump 등) + + 더 나아가 추후 프로젝트를 더욱 확장시킬 수 있는 HA 구성 등을 설립함(이상적인 아키텍쳐 환경) + + + + + + + + + + [2025-05-13] + + 1. 프로젝트 개요 챕터 작성 + + + + 1.1. 동기 및 목표 + + 1.2. 하드웨어 구성   + + + + + + [2025-05-15] + + 1. 프로젝트 개요 챕터 작성 + + + + 1.3. 최종 목표 아키텍처 + + + + + + [2025-05-24] + + 1. 프로젝트 개요 챕터 작성 + + + + 1.4. 추가 프로젝트 계획 + + + + 2. 프로젝트 설계 챕터 작성 + + + + 2.1. 하드웨어 선정 + + + + + + [2025-05-25 - 2025-05-28] + + 세부 내용 수정 + + + + 각 서버 별 예상 RAM 수정 + + DBMS 단일 운영 불가 예상으로 DB server를 vm으로 변경 후 각 DBMS docker로 계획 수정 + + 각 페이지의 내용 및 문체 수정 + + Ansible 도입을 통한 서버 설정 자동화 계획 추가 + + 예상 프로세서 할당량 및 우선 순위 구체화 + + + + 2. 프로젝트 설계 챕터 작성 + + + + 2.2. 네트워크 설계 + + + + + + [2025-05-29] + + 세부 내용 수정 + + + + SSD를 SKHynix 256GB에서 Samsung 1TB로 변경 + + + + + + [2025-05-30 - 2025-05-31] + + 네트워크 구조 재설계 + + + + 최종 목표 아키텍처에서 VLAN을 최대한 활용하여 untagged VLAN(VLAN0)과 hypervisor 내부의 VLAN1,2,3으로 구별 + + 이상적인 아키텍처 부분 수정 - OpenWRT가 T5004를 지원하므로 물리적인 VLAN 구현이 생각보다 쉽기 때문에, 추후 OpenWRT 추가 프로젝트 진행 + + 물리 구성도와 아키텍처 구성도 재작성 + + 이상적인 아키텍처를 OpenWRT 적용과 HA(고가용성)을 위한 추후 프로젝트 계획으로 분리하여 현재 목표를 더욱 명확히 함 + + + + + + [2025-06-02] + + 네트워크 구조 고려사항 추가 + + + + NIC1과 NIC2를 vmbr 혹은 vtnet을 통해 어떻게 OPNsense와 연결할지에 대하여 명확하게 정의하고 다이어그램을 추가 + + + + + + [2025-06-04] + + 2. 프로젝트 설계 챕터 작성 + + + + 2.3. 시스템 선정 + + + + + + [2025-06-05] + + 부록 1. 문서 작성 가이드라인 및 용어집 챕터 작성 + + + + 부록 1.1 문서 작성 가이드라인 + + + + 부록 2. 구현 일지 챕터 작성 + + + + 챕터 작성 후 기존 구축 과정 및 일지 챕터의 [2025/04/20 - 현재] 프로젝트 구상 및 설계사항 북스택 문서화 (현재 페이지) 이동 + + To-Do List + + + + 부록 3. 문제 해결(Troubleshooting) 챕터 작성 + + + + 챕터 작성 후 기존 구축 과정 및 일지 챕터의 [2025/05/06] Synology DSM 7.1 Container Manager 패키지 상에서 MariaDB 및 Bookstack 도커 실행 불가 상황  이동 + + + + 구축 과정 및 일지 챕터 삭제 + + + + [2025-06-07] + + 2. 프로젝트 설계 챕터 작성 + + + + 2.5. 구현 계획 + + + + + + [2025-06-08] + + 부록 3. 문제 해결(Troubleshooting)  챕터 작성 + + + + Bookstack 내 오류 해결 후 다음 내용 작성 + + + + + + Bookstack 도커 내부 게시글 이미지 갱신 불가 상황 + + + + + + + + + + 2. 프로젝트 설계 챕터 작성 + + + + 2.4. 보안 설계 + + + + + + [2025-06-14] + + 부록 1. 문서 작성 가이드라인 및 용어집 챕터 작성 + + + + 부록 1.2. 용어집(임시) + + + + 1. 프로젝트 개요 챕터 부록 1. 문서 작성 가이드라인 에 따라 재작성  + + + + 1.1. 동기 및 목표 + + 1.2. 하드웨어 구성   + + + + + + [2025-06-15] + + 1. 프로젝트 개요 챕터 부록 1. 문서 작성 가이드라인 에 따라 재작성  + + + + 1.3. 목표 아키텍처 + + + + + + [2025-06-16] + + 세부 내용 수정 + + + + OpenWRT 적용과 HA(고가용성)을 위한 추후 프로젝트 계획을 각각 Open WRT 적용 계획과 HA를 위한 계획으로 분리 + + + + 네트워크 구조 추가 + + + + AdGuard Home LXC를 추가하여 FreeIPA의 Local DNS와 연동, 광고 차단 기능 구현 및 다음 문서 수정 + + + + 1. 프로젝트 개요 챕터 부록 1. 문서 작성 가이드라인 에 따라 재작성  + + + + 1.4. 추가 프로젝트 계획 + + + + + + [2025-06-17] + + 세부 내용 추가 + + + + VPN 내용 추가 + + PVE의 메모리 할당 부분에 KSM 내용 추가 + + DB/File/Web/WAS/Application Server 내 Docker 목록 작성 + + 구현 계획에 DS124에 임시 서비스 설치 계획 추가 + + + + 2. 프로젝트 설계  챕터 부록 1. 문서 작성 가이드라인 에 따라 재작성 + + + + 2.1. 하드웨어 선정 + + 2.2. 네트워크 설계 + + 2.3. 시스템 선정 + + + + + + [2025-06-19] + + 2. 프로젝트 설계 챕터 작성 + + + + 2.4. 보안 설계 내용 구체화 + + + + + + [2025-06-20] + + 세부 내용 변경 + + + + 날짜 형식 표준에 맞게 변경 + + + + + + [2025-06-23] + + 세부 내용 변경 + + + + VLAN0을 Untagged VLAN로 수정 + + + + + + [2025-06-24] + + 세부 내용 추가 + + + + 2.1 하드웨어 선정 + + + + 각 VM/LXC 별 SSD 용량표 추가 + + + + + + + + + + 2025-05-13 - 초안 작성 + + 2025-06-20 - 날짜 표기 변경 + +부록 2.3. 네트워크 장비 설정 +목표 + + 네트워크 장비 설정의 진행 사항을 체계적으로 정리합니다. + + 진행 사항 + + [2025-06-19] + + 세부 내용 추가 + + + + VLAN10 - VPN Clients 망 추가 + + DS124 임시 서비스 구현 계획 변경 + + + + PVE 내 Debian (stable - netits) VM 위에 임시 서비스 구현 + + 추후 서비스 생성 시 삭제 + + + + + + FreeIPA의 OS로 Rocky Linux 결정 + + + + IP Matrix 작성 + + + + 3.1 IP Matrix 작성 + + 임시 IP Matrix(VLAN0) 작성 완료 + + + + [2025-06-20] + + 세부 내용 추가 및 수정 + + + + 임시 IP Matrix(VLAN0) Client 별 MAC 주소 추가 + + + + T5004/AX6000M 설정 + + + + 3.2. T5004/AX600M 설정 작성 + + + + 초기 설정 + + DHCP 서버 설정 + + Easy Mesh 설정 + + 보안 설정 + + 물리적 보안 확인 + + + + + + + + [2025-06-23] + + 세부 내용 추가 및 수정 + + + + VLAN0을 Untagged VLAN로 수정 + + + + + + 2025-06-19 - 초안 작성 + +부록 2.4. PVE 설정 +목표 + + PVE를 설치하고, 설정의 진행사항을 체계적으로 정리합니다. + + 진행 사항 + + [2025-06-21] + + UID/GID Matrix 작성 + + + + 4.1. UID/GID Matrix 작성 + + + + Local UID/GID 대역 정의 + + 각 서버별 대표 UID/GID 정의 + + Docker의 UID/GID 정의 + + FreeIPA LDAP/SSO 전용 UID/GID 대역 정의 + + + + + + + + PVE 설치 + + + + 4.2. PVE 설치 + + + + WTR Pro에 PVE 설치 완료 + + PVE 원격 접속 가능 + + sudo 기능 설치 + + admin 계정 생성 및 root 계정 설정 + + + + root 계정 ssh 접근 비활성화 + + root 계정 Web UI 접근 비활성화 + + pveadmin 계정 생성(2000:2000, sudo) + + Web UI 상 pveadmin 권한 부여 + + + + + + + + + + + + + + [2025-06-23] + + PVE 설치 + + + + 4.2. PVE 설치 + + + +  vmbr 생성 및 설정 + + + + STP로 비활성화로 인한 주의점 명시 + + + + + + + + + + + + + + [2025-06-23] + + PVE 상 VM/LXC 설치법 + + + + 4.3. PVE 상 VM/LXC 설치법 + + + + VM 설치법 정리 + + LXC 설치법 정리 + + + + + + + + + + [2025-06-23] + + PVE 내 iptable 적용 + + + + 4.3. PVE 상 VM/LXC 설치법 + + + + iptables 규칙 적용 + + + + + + + + [2025-06-27] 세부내용 추가 4.1. Local Uid/gid 계획 세분화 + + + + + + 2025-06-21 - 초안 작성 + +부록 2.5. 임시 Server 설정 +목표 + + 임시 Server VM을 PVE 위에 설치하고, 설정의 진행사항을 체계적으로 정리합니다. + + 진행 사항 + + [2025-06-25] + + + + 5.1. Debian 설치 + + + + PVE 상 Debian 설치 완료 + + root 계정 및 local 계정 설정 + + + + ssh root 계정 접근 차단 + + + + + + 필수 패키지(iptables, sudo, curl) 설치 완료 + + iptables 규칙 적용 + + + + + + + + + + [2025-06-26] + + + + 5.2 DDNS 설정 + + + + DDNS 스크립트 작성(Cloudflare API 이용하여 직접 작성) + + 작동 확인 + + + + + + + + + + [2025-06-27] + + + + 5.3. Docker 설치 환경 설정 + + + + docker engine 및 compose plugin 설정 완료 + + + + + + + + + + 2025-06-25 - 초안 작성 + +부록 3. 문제 해결(Troubleshooting) + +[2025-05-06] MariaDB 및 Bookstack 도커 실행 불가 상황 +목표 + + Synology DS124 모델 DSM 7.1 Container Manager 패키지 상에서 MariaDB 및 Bookstack 도커 실행 불가 상황 해결 + + 장애 증상 + + + + MariaDB 도커 DB 생성 실패 및 Bookstack 도커 구동 실패 + + Container Manager 상에서는 정상 작동으로 표시 + + MariaDB 도커의 로그 중 Permission denied 메세지 확인 + + Bookstack 도커의 로그 중 DB connection failed 메세지 확인 + + Bookstack 도커 쉘에서 MariaDB 도커 접속 불가 확인 + + + + 시도 조치 + + + + 최초 MariaDB의 계정 및 DB 구성 문제로 판단 후 도커 쉘로 접속하여 Bookstack이 참조하는 DB 및 계정 수동 생성 + + DSM Container 패키지 상 도커 쉘 접속 방법: DSM Container manger > container > 도커 컨테이너 이름 > 작업 > 터미널 열기 > 생성으로 도커 내부 접속 + + + + 이후 Bookstack에서 MariaDB로 접속 시도했으나 접속 불가 + + TLS 통신 문제인지 확인하기 위하여 TLS 접속 미사용 옵션 확인(have_SSL DISABLE) + + show variables like '%ssl%'; + +--- + +have_SSL DISABLE + +--- + + + + TLS 사용이 비활성화 되어 있으므로, 이로 인한 통신 문제가 아닌 것 확인 후 컨테이너 재배포 하여 구동 로그 확인 + + MariaDB 도커의 로그 중 Permission denied 메세지 확인 + + 도커 내부 디렉토리(마운트 된 디렉토리) 권한 수동으로 설정 + + + + 해결 방안 + + + + File station에서 마운트된 디렉토리의 소유자 확인 + + 디렉토리의 소유자가 MariaDB 도커 컨테이너의 고유 UID인 1000이 맞는지 확인 + + 단, 디렉토리를 미리 만들면 1000이 아닌 DSM 사용자의 ID가 지정되므로 도커가 자동으로 생성하게 둬야 한다. + + + + 마운트 된 디렉토리의 권한을 소유자+rwx 하고 난 뒤 컨테이너 재시작 + + MariaDB 도커의 로그 중 Permission denied 메세지 없이 정상 구동 되는 것 확인 + + Bookstack 도커에 마운트 된 디렉토리의 권한 역시 소유자+rwx로 설정 + + Bookstack 도커 실행 이후 MariaDB 도커 내에서 DB 정상 생성되는 것 확인 + + Bookstack 서비스 정상 작동 확인 + + + + 원인 분석 + + Synology DSM은 도커 컨테이너가 마운트 하는 디렉토리에 ContainerManager 그룹이 rwx를 할 수 있도록 권한을 부여합니다. 이 때, MariaDB 및 Bookstack은 고유의 UID/GID를 컨테이너에서 할당하는데 이 부분에서 ACL 충돌이 발생한 것으로 보입니다. 따라서 소유자는 UID/GID:1000/1000인데 디렉토리의 소유자에게 권한이 없어서 디렉토리 접근이 불가능였습니다. 이는 정상적인 DB 생성 및 통신이 불가능한 결과를 만들었습니다. 명시적으로 디렉토리의 소유자에게 rwx 권한을 부여한 이후 문제는 더 이상 발생하지 않았습니다. + + 결론 + + Docker는 가상화된 환경을 제공하지만, Dockerd 실행 과정에서 host의 권한 구조를 따릅니다. 이 경우 특수한 목적의 OS(Synology DSM)등에서는 세부적이지 못한 권한 구조와 Docker의 권한 구조가 충돌할 수 있습니다. 그러므로 일반적인 Linux 환경이 아닌 이런 특수한 환경에서 Docker 구동시 반드시 마운트 된 디렉토리의 소유자와 권한, 그리고 컨테이너 내부의 UID/GID가 일치하는지 확인하여야 합니다. + + 또한 공식 문서나, 블로그 상의 안내 방법은 어디까지나 일반적인 상황에서의 방법이므로, 각 명령어에 대한 이해 없이 단순히 복사 붙여넣기를 할 경우 예기치 못한 오류 혹은 충돌이 발생할 수 있으므로, 장애 발생시 언제나 기본적인 ACL등 부터 체크하고 로그를 꼼꼼히 확인하는 습관을 들여야 할 것입니다. + + + + 2025-05-16 - 초안 작성 및 h2 태그 수정 + + 2025-05-25 - 문체 수정 + + 2025-05-31 - 도커 쉘 접속 방법 추가 + + 2025-06-05 - TLS 통신 부분 명확화 + + 2025-06-20 - 날짜 표기 변경 + +[2025-06-08] Bookstack 도커 내부 게시글 이미지 갱신 불가 상황 +목표 + + Synology DS124 모델 DSM 7.1 Container Manager 패키지 상에서 실행되는 Bookstack 도커 내부 게시글의 이미지 갱신 불가 상황 해결 + + 장애 증상 + + + + Bookstack 내부 페이지 작성 시, 이미지 파일을 갱신하여도 이전 이미지 출력 + + + + 시도 조치 + + + + Bookstack 내부, 이미지 파일 업로드 디렉토리 확인 + + 내부 디렉토리 (/www/uploads/images/gallary/YYYY-MM) 내부 파일 변경 확인 + + 정상적인 갱신 확인 후, Cache 문제 확인 위하여 다른 기기의 브라우저로 접속 + + 정상적으로 갱신 이미지 출력 확인 + + 강력 새로 고침 시도(단축키 ctrl+F5 혹은 ctrl+shift+R) 후 기존 브라우저 상 갱신 이미지 정상 출력 확인 + + + + 해결 방안 + + 이미지 등 페이지 내부 콘텐츠가 갱신되지 않을 시 캐시 제거 및 강력 새로 고침 시도 후 갱신 확인 + + 원인 분석 + + Cache는 서버의 지연을 줄이기 위하여 웹 페이지/이미지/멀티미디어 등을 임시로 저장하는 기술입니다. 브라우저는 페이지를 로딩할 때 HTML 파일을 새로 받아오지만, 브라우저 내부 Cache에 유효 기간이 남은 파일들을 다시 불러오지 않고 사용합니다. 이는 데이터 사용량을 줄일 뿐 더러, 속도면에서도 유리하기 때문입니다. 하지만 이러한 파일들이 수정되었을 경우, Cache 내부의 파일을 참조하고 새로 수정된 파일을 불러오지 않아 수정된 파일이 웹 페이지 상에서 갱신되지 않는 경우가 있습니다. + + 이러한 경우 직접 브라우저 설정에서 Cache를 제거하거나, 강력 새로 고침 기능을 이용하여 Cache 내부에 있는 모든 데이터를 무시하고 다시 불러온다면 해결됩니다. + + 결론 + + 서비스에 발생하는 오류는 서비스 자체의 문제일 수도 있지만 반대로 서비스를 불러오는 클라이언트의 문제일 수도 있습니다. 따라서 어떠한 오류가 발생할 경우 서비스 부분만을 찾아보는 것이 아닌 클라이언트, 네트워크 등 다각변에서 접근할 필요가 있습니다. + + + + 2025-06-08 - 초안 작성 + + 2025-06-20 - 날짜 표기 변경 + +[2025-06-19] - Gitea 도커 초기 설정 페이지에서 Gitea 설치 클릭시 504 Gateway Timeout 발생 상황 +목표 + + Synology DS124 모델 DSM 7.1 Container Manager 패키지 상에서 Gitea 도커 초기 설정 도중, 초기 설정 페이지에서 Gitea 설치하기 버튼 클릭시 504 Gateway Timeout 발생 상황 해결 + + 장애 증상 + + + + Gitea 도커 설치 후 초기 설정 페이지까지 정상 접속 확인 + + 이후 Gitea 설치하기 버튼 클릭시 Synology NAS의 "죄송합니다. 찾고 있는 페이지를 발견하지 못했습니다." (404 Not Found) 오류 발생 + + Gitea container log 상에는 오류 메세지 발생 없음 + + 브라우저 개발자 도구 확인하여 504 Gateway Timeout 오류 발생 + + + + 시도 조치 + + + + 최초 MariaDB의 계정 및 DB 구성 문제로 판단 후 도커 쉘로 접속하여 Gitea가 참조하는 DB 및 계정 설정 확인 + + DSM Container 패키지 상 도커 쉘 접속 방법: DSM Container manger > container > 도커 컨테이너 이름 > 작업 > 터미널 열기 > 생성으로 도커 내부 접속 + + + + DB 및 계정 정상 설정 확인 후 Gitea 도커 쉘로 접속하여 ping 명령어를 통해 연결 상태 확인 + + 도커 container에 마운트 된 /docker/gitea/data 폴더의 권한 확인 + + 모든 문제 확인 후 Synology의 Reverse Proxy 확인 후 프록시 연결 시간 제한 , 프록시 보내기 시간 제한 , 프록시 읽기 시간 제한 60초에서 600초로 변경 + + + + 해결 방안 + + + + 리버스 프록시 상 프록시 연결 시간 제한 , 프록시 보내기 시간 제한 , 프록시 읽기 시간 제한 설정을 충분히 길게 설정 + + 초기 설정 페이지 리다이렉션 후 Gitea 설치 재시도 + + 정상 설치 페이지 작동 확인 + + + + 원인 분석 + + Synology의 Reverse proxy 기능은 대상 서버가 시스템 리소스의 과도한 사용을 막기 위해 다음과 같은 기본 타임 아웃 설정을 사용합니다. + + + + 프록시 연결 시간 제한 = 60초 + + 프록시 보내기 시간 제한 = 60초 + + 프록시 읽기 시간 제한 = 60초 + + + + 이 때 DS124의 제한된 사양(RAM 1GB, 읽기/쓰기 cache 없음, HDD 사용)은 Gitea같은 서비스를 설정하는 시간을 지연시키는 원인이 됩니다. Gitea가 설치에 걸리는 시간이 60초를 넘어가면서 Reverse Proxy는 자동으로 클라이언트와의 연결을 종료하였고, 이에 504 Gateway Timeout 오류가 발생한 것입니다. + + 결론 + + 특정 서비스, 특히 웹 서비스를 사용할 때 서비스나 네트워크 연결 등 자체의 문제가 전혀 없더라도 Reverse Proxy의 설정으로 인하여 504 Gateway Timeout과 같은 오류가 발생할 수 있습니다. 백엔드 작업 시간이 Reverse Proxy의 타임아웃 설정보다 길기 때문에 발생하는 문제이며, 특히 제한된 하드웨어 성능 하에서는 예상보다 초기화 시간이 길어질 수 있으므로 이를 잘 확인해야 합니다. + + + + 2025-06-19 - 초안 작성 + + 2025-06-20 - 날짜 표기 변경 \ No newline at end of file diff --git a/docs/archives/2025-06/홈 서버 구축 계획.txt b/docs/archives/2025-06/홈 서버 구축 계획.txt new file mode 100644 index 0000000..019958a --- /dev/null +++ b/docs/archives/2025-06/홈 서버 구축 계획.txt @@ -0,0 +1,102 @@ +홈 서버 구축 계획 + +1. 목표 + a) 집에서 실제로 사용할 다목적 홈 서버를 구축하며, 다양한 서비스를 분산형으로(마치 실제 물리서버로 분리된 것같이) 관리하고, 이를 실제로 사용하며 정보보안에 관한 지식을 적용하고자 함. + b) 막연하게 생각되는 기술스택 및 포토폴리오 및 정보보안기사를 구체적으로 준비 하기를 원함 + Git 등의 여러 운영 서비스에 익숙해 지기를 원함. + c) 당장 이 계획을 시작으로 모든 과정을 문서화 하고 정리하여 스택을 쌓기를 원함. + +2. 준비된 H/W 및 설정 + a) WTR PRO N150 (N150, 2.5gb lan 포트 x2, RAM x1, m.2 nvme x1, sata x4) + b) 16GB DDR4 3200 x1 + c) NVME SSD 256GB x1, SATA SSD 250GB(콜드 백업 용) + d) sata 3.5" HDD 500GB x4 + e) Domain Name 구매 예정(Cloudflare를 통한 Domain 발급과, api를 활용한 DDNS 자동화, SSL 인증서 발급 및 자동 갱신 고려, 추가로 DDoS 및 자동화된 포트 스캔등을 방지하기 위하여, 공인 ip가 아닌 DNS를 통해 들어오는 요청만 처리할 수 있도록 고려 중) + f) 추가 라우터(iptime a1004v) + g) 현재 홈 네트워크 구조 + WAN - easy mesh(192.168.0.x/24) + { + Main Gateway(Main router) - PC1(static IP) + - PC2(static IP) + - Sub router(static IP) - DS124(Main NAS server) (static IP) + - printer (static IP) + - wireless mobile devices(dynamic IP) + } + Main Router 기준: 국가 차단(한국 캐나다 ip만 허용), 열린 포트(443:https) + NAS 기준(SMB, SSH, SFTP 등 모든 well-known 포트 차단, 오직 http/https 기반 접근만 허용) + +3. 고려 사항 + a) 실제로 서버 구축시 내/외부망 막론하고 사용이 가능한가? + b) 보안 상으로 안전한가? + c) 준비된 H/W 성능 안에서 구현이 가능한가? + d) 이를 통해 나의 기술 및 지식이 성장할 수 있는가? + +5. 기본안(3 가지) + a) 각각의 서버 HW 준비하여 전부 구축 + b) 하나의 운영체제(리눅스 예정)를 기반으로 각 서비스를 도커의 컨테이너 기술을 기반으로 격리하여 (파일 서버, 웹 서버, WAS, 프록시 서버, 미디어 서버, 사진 서버 및 도커를 통한 여러 기능들) 사용. + c) 하이퍼 바이져를 기반으로 각 서비스를 VM 기반으로 OS별 격리하여 (현재 라우터에서 VLAN 사용이 불가능한 지금, MACVLAN을 이용하여 iptime v1004에 공인 IP를 할당해 주고, 내부에서 다시 LAN 환경을 구축 = 방화벽 등의 기능 실습 가능) 사용. + +6. 각 안 별 장단점 + a) 기본안 1 + 불가능- 금전적인 여유 X + b) 기본안 2 + 1) 장점 + A) 관리의 편리함: 모든 서비스는 하나의 OS에서 구동됨. 따라서 각각의 계정을 통한 권한 설정 및 파일 관리의 용이함(각 서버별 파일을 공유할 시, 네트워크를 활용할 필요 없이 그냥 심볼릭링크를 활용하여 도커 서비스에서 마운트하는 등) 이는 보안설정에도 이점을 줌. + B) 이미 구성된 홈 네트워크: 이미 하나의 공인 IP를 기반으로 홈 네트워크가 구성되어 있어, 하나의 OS에 IP 설정만 해두면, 내부적으로 따로 네트워크를 사용할 필요가 없이 서비스가 가능함(도커의 bridge 기능) + C) 하드웨어 사양이 절약됨: 하나의 OS 상에서, OS 보다 가벼운 콘테이너 기반으로 서비스를 운영하기 때문에 하드웨어 사양이 더 높아질 필요가 없음. + D) 데이터 백업의 편리함: 모든 데이터는 OS 상의 /home 안에 저장이 가능함. 각 서버의 설정은 /home/docker 내부에 저장이 가능하며 각 유저의 설정은 /home/usr에서 관리가 가능함. 이는 백업할 데이터가 /home 하나로 일원화 될 수 있음을 이야기함. 심지어 도커의 설정도 단순히 docker-compose.yml을 통해 백업 가능. + 2) 단점 + A) 성장 가능성: 이미 모든 도커는 사용이 가능한 상태로 올라와 있으며, 이를 통해 내가 무엇인가를 구축할 필요 없이 그냥 있는 것을 설치해서 사용하면 됨. 따라서 이해하는 것이 아닌, 단순히 글을 보고 복사 붙여넣기를 하는 상황에 빠지기 쉬움. + B) 보안 지식 습득의 불리함: 이미 홈 네트워크를 구성하며 외부로 부터 내부 서버를 공격할 때 어떤 부분을 고려하고 어떤 부분을 생각해야 하는지 어느정도 습득한 상태에서, "내부망"으로 부터의 공격에 대한 정보 지식습득에 불리함. 앞서 언급했듯, 각 서버별 통신에 네트워크를 사용하는 것이 아닌, 도커 내부의 브릿지 네트워크를 사용하므로 굳이 내부망 보안을 신경쓸 필요가 없어짐. + c) 기본안 3 + 1) 장점 + A) 실제 서버를 운영할 때 장비를 분리하는 것 처럼 모든 장비를 논리적으로 분리하여 사용할 수 있음. (방화벽, etc...) + B) MACVLAN 기능을 적용하여 새로운 공인 ip를 적용하여 홈 서버가 제대로 구축되고 안정화 될 때 까지 격리된 네트워크를 형성해 기존의 홈 네트워크와 분리된 새로운 홈 네트워크를 사용할 수 있음 (기존의 사용 중인 home network와 격리된 환경에서 실험 가능) + C) kail linux 등 실제 해킹 실습용 OS를 올려 해킹 공격/방어 실습이 가능 + D) 방화벽에 대한 심도 깊은 이해 가능 + E) 기본안 2의 내용도 동시에 실습 가능: 모든 서버를 전부 각각의 OS로 올리는 것이 아닌, 한 서버에서 여러 가지 역할을 하는 다목적 서버를 구성할 수 있음. (e.g. 미디어 서버 - immich, vaultwarden, jellyfin 등은 하나에 서버에서 Docker를 통해 구축/ 웹 서버 - Docker를 통해 Wordpress 등의 블로그 서비스 혹은 wiki 서비스 등을 구현 가능) + F) 내가 각각 구현하고 올려본 서버들을 도커 컨테이너화 하여 애드온 형식으로 커스텀 서버를 내가 직접 만들어 볼 수도 있음. + 2) 단점 + A) 하드웨어 사양과 관련된 문제: 현재 PC의 스펙으로 이 모든 것들이 가능할지는 잘 모르겠음. 다만 각 서버의 기반이 될 Linux 등은 전부 CLI로 구성할 것이기에 스펙적으로 크게 모자랄지는 모르겠음. + B) 데이터 백업의 불편함: 각 데이터를 백업하기 위해 모든 데이터를 /home만 백업하는 기본안 2와는 다르게 각각의 운영체제 및 데이터를 전부 따로 백업해야 함. - 데이터 서버를 하나 둠으로, 데이터들은 데이터 대로, 나머지는 각각의 OS단만 백업하는 형식으로 최대한 간단하게 만들 수 있을 것이라고 예상됨. + +7. 최종 선택 및 실행 방향 + a) 최종 선택: 기본안 3 + b) 네트워크 설정: MACVLAN을 통해 새로운 공인 ip를 새로운 공유기에 할당 후, 기존의 네트워크와 별개의 새로운 "홈 서버만을 위한 네트워크 구성"과 고려 사항 확인 + 1) 하이퍼 바이져 자체가 L2 스위치 기능을 적용할 수 있는지...? + 2) 하이퍼 바이져 내부에 있는 격리된 OS들은 각각의 사설 IP를 공유기(라우터)로 부터 할당 받을 수 있는지? + 3) 외부망을 통한 공격/방어 실습을 위한 세팅을 어떻게 할 수 있을지? (e.g. 외부에서 실습을 할 기기의 ip에만 모든 포트 개방 후, 칼리 리눅스로 ip로만 포트 포워딩 등.../ vlan 사용 시 더 편리하겠으나 현재 상황에서 vlan 적용 불가) + 4) VLAN 적용은 불가능하지만, VLAN의 개념과 원리 그리고 적용에 대하여 어떻게 공부할 수 있는지 확인해보기 + 5) 기타 문제 사항들은 직접 경험해보며 트러블 슈팅해보기 + 6) 지금 상황에서 목표 네트워크 구조 + WAN - easy mesh(192.168.0.x/24) + { + Main Gateway1(Main router) - PC1(static IP) + - PC2(static IP) + - Sub router(static IP) - DS124(Main NAS server) (static IP) + - printer (static IP) + - wireless mobile devices(dynamic IP) + } + WAN - (Sub Router/ MACVLAN 이용 공인 IP 할당) - Main Gateway2(192.168.0.x/24) - 하이퍼 바이저 + - 접속 콘솔 + 7) DMZ, TwinIP, MACVLAN 차이: DMZ -> 설정되 기기로 모든 패킷 포트 포워딩/ Twin IP -> 라우터가 가지고 있는 공인 IP를 내부 클라이언트가 사용 가능/ MACVLAN -> 라우터가 가지고 있는 공인 IP가 아닌 MAC 주소를 기반으로 새로운 공인 IP를 클라이언트에 할당 + c) 하이퍼 바이저 OS 선택 및 세팅 + 1) 현재 서버에 사양이 그렇게까지 좋지않고 라이센스 비용을 지불하기 어려우므로, 최대한 가볍고, 오픈 소스 프로그램 및 트러블 슈팅이 유리한 OS를 선택 + A) TrueNAS: ZFS 지원/이미 상당부분 편리한 GUI가 있음 - 무겁고/ 공부 목적에 맞지 않음 + B) Proxmox: 가볍고 오픈 소스 프로그램, 이미 상당부분 커뮤니티가 형성되어 있음. + C) Windows server: GUI 기반, 익숙함 - 무거움, 오픈소스가 아니며, 라이센스가 필요함. + D) 최종 Proxmox 선택 + 2) 스토리지 구성 + A) 각 OS는 용량이 크지 않으므로 그냥 256GB SSD에 용량 할당하여 설치(OS용 SSD) + B) 데이터 서버의 사용 공간을 HDD 4개를 RAID 0+ 혹은 RAID 1+(차이점에 대해 자세히 공부 필요) 혹은 RAID 5로 묶어서 사용해 보며 정리하기(파일 서버에 가용성 중요, 사실 OS 단도 RAID1을 통한 가용성을 확보하고 싶으나 HW상 SSD가 1개만 지원되므로 백업으로 갈음 - 현재는 가용성이 중요하지 않음.) + C) 각 서버들은 필요한 데이터를 데이터 서버를 참조하여 사용(이 경우 webDAV를 통한 마운트나, smb 등의 프로토콜을 통한 읽기/sftp를 통한 업로드를 고려 중 가능한지는 확인해봐야 함) + d) 기타 홈 서버 구축에 필요한 서버: 필요한 기본 서버들(방화벽, 데이터 서버, DB 서버, 웹 서버, 미디어 서버 등등)을 하나씩 순서대로 그리고 중요도 순서대로 선택해서 올려보기 + e) 구축된 서버를 도커를 통한 컨테이너화 해서 Git 배포 경험해보기(ex. 내가 구축한 방화벽 설정 도커) + f) 백업 + 1) 데이터가 빠진 서버 및 OS(VM) 들 각각의 용량은 그렇게 크지 않으므로, 스냅샷 및 외장 SSD를 통한 핫 백업만 진행 (백업은 실습/구축이 이뤄진 후 실제 데이터가 변경되면 변경될 때마다 증분 백업, HDD 부분은 실사용 전까지 예제만 넣을 것이므로 따로 백업 X) + 2) 홈 서버가 어느정도 완성되고 안정화 되기 전까지 중요 데이터는 기존의 NAS를 사용 + 3) 이후 홈 서버가 안정화 되면, NAS를 백업용으로 사용하고 홈 서버를 기존 홈 네트워크에 가지고 들어와 집안의 서버로 활용하기(이후 MACVLAN을 이용한 다른 네트워크에는 실습용 미니 PC를 하나 더 가지고와 공부) + 4) 백업에 종류에 대해 알아보기(전체 백업, 증분 백업, 콜드 백업, 핫백업 등등) + g) 문서화 + 1) 홈 서버에 wiki 및 웹 서버등이 올라가서 그곳에 백업을 하기 전까지는 notion 등을 활용 + 2) Git 등의 관리/배포 서비스 등에 대하여 아는 것이 없으므로 공부 후 이것 역시 문서화 + 3) 홈 서버에 wiki 및 웹 서버등이 서비스 되기 시작하면 notion등에 올라간 문서를 홈 서버로 옮기기 \ No newline at end of file diff --git a/docs/archives/2025-12/01_plans/01_01_plans.md b/docs/archives/2025-12/01_plans/01_01_plans.md new file mode 100644 index 0000000..c8584d6 --- /dev/null +++ b/docs/archives/2025-12/01_plans/01_01_plans.md @@ -0,0 +1,814 @@ +Tags: #plan, #common + +## Hardware information + +### Server Hardware + +- Server: Aoostar WTR Pro N150 + - N150 Processor (4C4T) + - Samsung DDR4 SO-DIM Memory (31GiB) + - Samsung NVMe SSD (1TB) + - SATA HDD (2TB) x 4 + +### BIOS configuration + +- Access BIOS menu with `del` +- BIOS:Advanced:Hardware Monitor:Smart Fan Function +- CPU Fan / Sys Fan1 / Sys Fan2 + - Fan Start Temperture: 45 + +## VM Plans + +### Local MAC address + +- Private Local MAC address principal +- 0A:49:6E:4D:\[VM\]:\[Ports\] + +### Hypervisor + +- OS: Debian13 +- CPU: pCPU +- Memory: 3GiB + - This value is just margin of hypervisor. The rest of allocation of VMs. + - KSM is activated by ksmtuned +- MAC: C8:FF:BF:05:AA:B0, C8:FF:BF:05:AA:B1 +- Disk: 64GiB (`/`), 700 GiB (`/var/lib/libvirt`) + +### Firewall + +- OS: OPNsense25.7 (FreeBSD14.3) +- CPU: 2vCPU (cputune.shares 2048) +- Memory: 4GiB +- MAC: 0A:49:6E:4D:00:00, 0A:49:6E:4D:00:01 +- Disk: 64GiB - qcow2 +- Services: + - Firewall + - IPS/IDS (CrowdSec LAPI, Suricata) + - Kea DHCP + - Central ACME client (automation) + +> Do not allow web ui access from WAN, and only allow specific console user to access its web ui. Do not open ssh port at all, when you need to access its console use virsh console on the hypervisor. Because this is the center of security in this homelab. + +### Network server + +- OS: Debian13 +- CPU: 1vCPU (cputune.shares 512) +- Memory: 2GiB +- MAC: 0A:49:6E:4D:01:00 +- Disk: 32GiB - qcow2 +- Services: + - DDNS script + - AdGuard Home (Resolver DNS) + - BIND9 (Authoritative DNS) + +### Authorization server + +- OS: Debian13 +- CPU: 2vCPU (cputune.shares 1024) +- Memory: 4GiB +- MAC: 0A:49:6E:4D:02:00 +- Disk: 64GiB - qcow2 +- Services: + - Step-CA (File based) + - Caddy-main (Reverse proxy) + - Infrastructure services won't use caddy. + - OPNsense + - CrowdSec + - AdGuard Home + - Step-CA + - Authellia (idP) + LLDAP (PostgreSQL) +### Development server + +- OS: Debian13 +- CPU: 2vCPU (cputune.shares 1024) +- Memory: 6GiB +- MAC: 0A:49:6E:4D:03:00 +- Disk: 256GiB - qcow2 +- Services: + - Postgresql + - Prometheus + - OS: node_exporter(and telegraf) + - VM: libvirt-exporter + - HDD: btrfs-exporter + - Grafana + - Uptime kuma (SQLite) + - Loki, Promtail + - Code-server (File based) + - Postfix, Dovecot, mbsync + - These services are only uses in local mail service (@ilnmors.intenral) + - Postfix (Split mail transper, @ilnmors.internal - directly process and @gmail.com - relayhost) + - Dovecot (IMAP/POP3 server, Save the mail itself) + - mbsync (Get external mail from external service to Dovecot with IMAP protocol) + - Diun (File Provider mode + github provider mode) +> Volume:~/data/containers/code-server/workspace/homelab:/path/of/diun:ro +> File Provider activate and read `.container` +> `.container` needs to have label `Label=diun.enable=true`, `Label=diun.watch_repo=true`. +> In case of local image, use this way. + +```yaml +# diun.yml +regopts: + - name: "caddy-auth-source" + image: "docker.io/caddy" +``` + +```ini +# container file +Label=diun.enable=true +Label=diun.watch_repo=true +Label=diun.regopt=caddy-auth-source +``` + + +### Application server + +- OS Debian13 +- CPU 4vCPU (cputune.shares 2048) +- Memory: 12GiB +- MAC: 0A:49:6E:4D:04:00 +- Disk: + - 256GiB - qcow2 + - 4TB - RAID10, BTRFS +- Services (OIDC is supported): + - OpenCloud (The fork of OwnCloud; It includes Radical, and LibreOffice. Radical will be used as only CardDAV) + - Vikunja (CalDav and To-Do list server; PostgreSQL) + - Gitea (Git service, and wiki; PostgreSQL) + - Outline (Small memo note server; PostgreSQL) + - Wiki.js (Report and book editor; PostgreSQL) + - Immich (Photo album; PostgreSQL) + - PeerTube (Private UCC platform; PostgreSQL) + - Funkwhale (Music server; PostgreSQL) + - Kavita (Web bookshelf; SQLite) + - Audiobookshelf(SQLite) + - Actual budget (Budget program; SQLite) + - Paperless-ngx (Paper based information collection; OCR; PostgreSQL) + - Miniflux (RSS management; PostgreSQL) + - Linkwarden (Archaiving Website; PostgreSQL) + - Ralph (IT products management; PostgreSQL) + - Conduit (Rust matrix server; Local DB) + - SnappyMail (Web mail service frontend with Dovecot) + - Vaultwarden (Password manager; PostgreSQL) + - n8n (Following goal, automation the flow; PostgreSQL) +- Services (Foward_Auth is needed): + - Kopia (backup) + - Hompage + - Define access control with yaml file via Authelia. + ```yaml + - Admin tools: + - group: admin + - OPNsense + - href: "https://opnsnese.iltnmors.internal" + + - Services: + - group: ["admin", "user"] + - Gitea: + - href:"https://gitea.iltnmors.com" + ``` + +- Services(Study): + - Kali (Container) + - Alpine (Container) + +> These containers will be isolated by podman network (which has no host gateway) and podman volume. The study and practice will be conducted only in container with `podman exec -it kali bash` + +### RDBMS and Redis + +#### RDBMS + +Postgresql and mariaDB will be provide database for various services on `auth`, `dev`, `app` servers. Each app can access RDBMS, Postgresql and mariaDB on `dev` server which is the central DB server with TLS. + +#### Redis + +Redis is the cache database, it will operate on each server where Redis is needed `dev`, and `app`, and it supports various app as one container with its own id. + +## Matrix + +### Network matrix + +#### LAN + +- Subnet: 192.168.1.0/24 +- tag: 1 (Native-untagged) +- Static IPs: + - 1: Gateway (opnsense) + - 2-9: Spare IP for APs + - 10: Hypervisor (vmm) + - 11-12: Console + - 20: Backup Server + - 30: Printer +- Dynamic IP pool + - 100-254 + +#### VLAN10 + +- Subnet: 192.168.10.0/24 +- tag: 10 +- Static IPs: + - 1: Gateway (opnsense) + - 10: Hypervisor (vmm) + - 11: Network server (net) + - 12: Authorization server (auth) + - 13: Development server (dev) + - 14: Application server (app) + +#### VPN + +- Subnet: 10.10.10.0/24, 10.10.1.0/24 +- Static IPs: + - 10.10.10.1: Gateway(opnsense) + - 10.10.10.2: console + - 10.10.10.3: phone + - 10.10.10.4: spare + +### UID/GID matrix + +#### Local UID/GID + +- Pool: 2000-2999 +- Static UID: + - 2000: Hypervisor (vmm) + - 2001: Network server (net) + - 2002: Authorization server (auth) + - 2003: Development server (dev) + - 2004: Application server (app) +- Static GID: 2000 (svadmins) + +#### LDAP reservation + +- pool: 3000 - 60000 + +#### Sub id + +- Subuid/Subgid: 100000:65536 + +## File management + +### File name + +- Code files have to use `_` as a separator. (`.sh`, `.py`, etc.) +- Normal files have to use `-` as a separator. + +### Directory structure + +#### Hypervisor + +- ~/data/config/{scripts,server,services,vms} +- ~/data/config/vms/{networks,storages,dumps} +- /var/lib/libvirt/images + +#### VMs + +- ~/data/{config,containers} +- ~/data/config/{containers,scripts,secrets,server,services} +- ~/data/containers/apps/{certs,etc.} +- ~/kopia +- /etc/secrets/$UID + +#### Application server + +##### SSD + +- ~/data/{config,containers} +- ~/data/config/{containers,secrets,scripts,services} +- ~/data/containers/app/{certs,etc.} +- ~/kopia +- /etc/secrets/$UID + +##### HDD + +- btrfs +- ~/hdd/data/containers +- ~/hdd/backups +- The scrub timer systemd is required for its integrity. + +## Certificates management + +- CA: Step-CA (private CA) +- DNS: BIND9 (private authoritative DNS) +### ACME client + +- ACME client: opnsense's `os-acme-client` +- Automation: + - `Upload certificate via SFTP` + - `Run command via SSH` + +### Caddy + +- `caddy-dns/rfc2136` +- `hslatman/caddy-crowdsec-bouncer/crowdsec` +- `hslatman/caddy-crowdsec-bouncer/http` + + +## Secret management + +> It is necessary external KMS or secret management server (like Vault, infisical) not to leave plain data on disk. It is to hard to manage in small homelab environment. Especially, even systemd-cred uses TPM or hardware module, this makes harder to use this on rootless and vm environment. This is why compromise with perfect secret management. + +### Secret file + +- Files: + - ~/data/config/secrets/.secret.yaml + - ~/data/config/secrets/age-key.gpg + - ~/data/config/scripts/edit_secret.sh + - ~/data/config/scripts/extract_secret.sh +- Directories: + - /etc/secrets + - Ownership: root:root + - permission: 511 + - /etc/secrets/$UID/file + - Ownership: $UID:root + - Permission: 500(directory), 400(file) +### Sequence + +- Create `.secret.yaml` +- Cerate `age-key` +- Encrypt `.secret.yaml` with `sops` by `age` key +- Modify `.secret.yaml` with `edit_secret.sh` +- Create `podman secret` or `/etc/secrets/$UID/file` with `extract_secret.sh` + +> Creating podman secret is always manually conducted by `extract_secret.sh`. There is no plain text of secret data in backup target, or git target. + +```yaml +# .secret.yaml +# ~/data/config/secrets/.secret.yaml +# Format of .secret.yaml + +# app1.env: +1SECRET: '1secret' +2SECRET: '2secret' + +app1.file: | + -----TEXT-AREA----- + contents of 3secret + -----END-AREA----- + +# app2.env +3SECRET: '3secret' +4SECRET: '4secret' + +# ... +``` + +#### Secret scripts + +- File: + - ~/data/config/scripts/secrets/edit_secret.sh + - ~/data/config/scripts/secrets/extract_secret.sh + +```bash +#!/bin/bash +# edit_secret.sh /path/of/secret + +set -e + +KEY_PATH="$HOME/data/config/secrets" +SECRET_FILE="$1" + +usage() { + echo "Usage: $0 \"/path/of/secret/file\"" + exit 1 +} + + +if [ -z "$SECRET_FILE" -o ! -f "$SECRET_FILE" ]; then + echo "Error: Secret file path is needed" + usage +fi + + +if [ ! -f "$KEY_PATH/age-key.gpg" ]; then + echo "Error: There is no key file" + exit 1 +fi + +# Delete password file after script +cleanup() { + if [ -f "/run/user/$UID/age-key" ]; then + rm -f "/run/user/$UID/age-key" + fi +} + +trap cleanup EXIT + + + +echo -n "Enter GPG passphrase: " +read -s GPG_PASSPHRASE +echo + +echo "$GPG_PASSPHRASE" | gpg --batch --yes --passphrase-fd 0 \ +--output "/run/user/$UID/age-key" \ +--decrypt "$KEY_PATH/age-key.gpg" && \ +chmod 600 "/run/user/$UID/age-key" + +if [ -z "/run/user/$UID/age-key" ]; then + echo "Error: Key file does not exist" + exit 1 +fi + +gpgconf --kill gpg-agent + +SOPS_AGE_KEY="$(cat "/run/user/$UID/age-key")" + +SOPS_AGE_KEY="$SOPS_AGE_KEY" sops "$SECRET_FILE" +``` + +```bash +#!/bin/bash +# extract_secret.sh /path/of/secret (-f|-e ) + +set -e + +KEY_PATH="$HOME/data/config/secrets" +SECRET_FILE=$1 + +# shift the $2 as $1 ($1 < $2) +shift + +# usage() function +usage() { + echo "Usage: $0 \"/path/of/secret/file\" (-f|-e \"yaml section name\")" >&2 + echo "-f : Print secret file" >&2 + echo "-e : Print secret env file" >&2 + exit 1 +} + +while getopts "f:e:" opt; do + case $opt in + f) + VALUE="$OPTARG" + TYPE="FILE" + ;; + e) + VALUE="$OPTARG" + TYPE="ENV" + ;; + \?) # unknown options + echo "Invalid option: -$OPTARG" >&2 + usage + ;; + :) # parameter required option + echo "Option -$OPTARG requires an argument." >&2 + usage + ;; + esac +done + +# Get option and move to parameters - This has no functional thing, because it only use arguments with parameters +shift $((OPTIND - 1)) + +# Check necessary options +if [ ! -f "$SECRET_FILE" ]; then + echo "Error: secret file path is required" >&2 + usage +fi + +if [ -z "$TYPE" ]; then + echo "Error: -f or -e option requires" >&2 + usage +fi + + +if [ ! -f "$KEY_PATH/age-key.gpg" ]; then + echo "Error: There is no key file" >&2 + usage +fi + +# Delete password file after script +cleanup() { + if [ -f "/run/user/$UID/age-key" ]; then + rm -f "/run/user/$UID/age-key" + fi +} + +trap cleanup EXIT + +echo -n "Enter GPG passphrase: " >&2 +read -s GPG_PASSPHRASE +echo >&2 + +echo "$GPG_PASSPHRASE" | gpg --batch --yes --passphrase-fd 0 \ +--output "/run/user/$UID/age-key" \ +--decrypt "$KEY_PATH/age-key.gpg" && \ +chmod 600 "/run/user/$UID/age-key" + +if [ ! -f "/run/user/$UID/age-key" ]; then + echo "Error: Key file does not exist" >&2 + exit 1 +fi + +gpgconf --kill gpg-agent + +SOPS_AGE_KEY="$(cat "/run/user/$UID/age-key")" + +if [ "$TYPE" == "FILE" ]; then + if RESULT=$(SOPS_AGE_KEY="$SOPS_AGE_KEY" sops --decrypt --extract "[\"$VALUE\"]" --output-type binary "$SECRET_FILE") ; then + echo -n "$RESULT" + exit 0 + else + echo "Error: SOPS extract error" >&2 + exit 1 + fi +fi + +if [ "$TYPE" == "ENV" ]; then + if RESULT=$(SOPS_AGE_KEY="$SOPS_AGE_KEY" sops --decrypt --extract "[\"$VALUE\"]" --output-type dotenv "$SECRET_FILE") ; then + echo -n "$RESULT" + exit 0 + else + echo "Error: SOPS extract error" >&2 + exit 1 + fi +fi +``` + +##### Secret value management + +- Using `extract_secret.sh` +- Inject secret value to `podman secret` or `/etc/secrets/$UID` + +```bash +# /etc/secrets/$UID +# Before use sudo tee, make sure sudo doesn't need password. +# i.e. sudo ps -ef command execute before this command. +# Env file +extract_secret.sh ~/data/config/secrets/.secret.yaml -e "$value" > /run/user/$UID/tmp.env \ +&& sudo mv /run/user/$UID/tmp.env /etc/secrets/$UID/"$FILE_NAME" \ +&& sudo chown $UID:root /etc/secrets/$UID/"$FILE_NAME" \ +&& sudo chmod 400 /etc/secrets/$UID/"$FILE_NAME" +# Normal file +extract_secret.sh ~/data/config/secrets/.secret.yaml -f "$value" > /run/user/$UID/tmp.env \ +&& sudo mv /run/user/$UID/tmp.env /etc/secrets/$UID/"$FILE_NAME" \ +&& sudo chown $UID:root /etc/secrets/$UID/"$FILE_NAME" \ +&& sudo chmod 400 /etc/secrets/$UID/"$FILE_NAME" + +# Podman secret +# Podman doesn't supports .env file parsing, you have to enroll all values +extract_secret.sh ~/data/config/secrets/.secret.yaml -f "$value" | podman secret create "[$FILE_NAME|$ENV_NAME]" - +``` +#### Use podman secret + +```ini +# app.container +[Unit] +Description=app + +[Service] +StartExecPre=/bin/bash -c "wait-for-it.sh ip:port -t 0" + +[Container] +... +Secret=app.env,type=env,target=$ENVRIONMENT_NAME +# or +secret=app_data.file,target=/path/of/secret/file +... +``` + +> podman secret save the `secret data` as `plain text` in disk. However, it is not necessary to have full security in small homelab (practically, it is hard to realize in small environment without external secret server like infisical or vault). When the root permission or user permission compromised then it can be readable. + +#### Change secret + +- Edit `.secret.yaml` +- podman container stop (systemctl) +- `podman secret rm $target` +- Use `extract_secret.sh` +- Restart podman container + +### After code-server building + +Move all secret file on `dev` server's `code-server` container. + +- Files: + - .secret.yaml + - age-key.gpg + - edit_secret.sh + - extract_secret.sh +- Path: $HOME/workspace/homelab/data/common/config/{secrets,scripts} (Mapped volume in container) +- Change the KEY_PATH as `$HOME/workspace/homelab/data/common/config/{secrets,scripts}` on scripts + +#### Apply secrets from code-server + +Use SFTP and SSH (or Ansible playbook), decrypt the secret values and make a file on container's `/run/user/$UID` and upload to target server's `/run/user/$UID`. Then use ssh remote command to add podman secret or mv command at the code-server container. + +It can works on the code-server web terminal. However, if there were problems on caddy, which means not to access web console then just use ssh and podman exec. + +## Update and upgrade policy + +### Hypervisor + +- Never update or upgrade hypervisor before the its stability is verified from other VMs + +### VMs + +- Make a qcow2 snapshot before major update or upgrade, using `virsh snapshot` +- If there were some problems, rollback using snapshot. + +### Containers + +- Check the version from Diun. +- Read caution and changes. +- Apply the update via container file (Prepare the containerfile to make image). `systemctl --user daemon-reload` and `systemctl --user restart container` + +## Backup policy + +### Kopia + +- ~/kopia: The directory of kopia configuration files. +- ~/hdd/backups: The destination directory of each server's Kopia. +- Don't backup the live data such as live DB data. +- Only configuration files are backed up in hypervisor. + +### Configuration file backup + +- Save all configuration files in `code-server` container. +- Path: ~/data/containers/code-server/workspace/homelab +- Use `Gitea` container to track and manage files. +- Apply `Ansible` on `code-server` (Following goal) + +### opnsense + +- `os-sftp-backup` sends its configuration towards `code-server` + +### Application data + +#### Common data + +- `Kopia` backup files to app server using sftp +- Backup target: ~/data +- Path: ~/hdd/backups + +#### DB data + +Only backup `dump` db data. + +##### Schema backup + +```bash +# Dev server +podman exec postgresql sh -c 'pg_dumpall --scheme-only' > ~/data/postgresql/backups/postgresql-cluster-\[date\].dump +``` + +##### DB data backup + +```bash +# VM's application data backup +podman exec application sh -c 'pg_dump -U $DB_USER -p $DB_PW' > ~/data/containers/application/backups/application-\[date\].dump + +# app's application data backup +podman exec application sh -c 'pg_dump -U $DB_USER -p $DB_PW' > ~/hdd/data/containers/application/backups/application-\[date\].dump +``` + +##### Container volume + +```ini +# app.container +#... +[Container] +# ... +Volume=~/data/containers/application/backups:/backups:rw +``` + +##### Example of DB backup senario + +```ini +# postgres-db-backup.service +[Unit] +Description=PostgreSQL Database Backup +After=postgresql.service +Requires=postgresql.service + +[Service] +Type=oneshot +# %% is needed in systemd, because `%` has special meaning in systemd. +ExecStart=/bin/sh -c 'podman exec postgresql sh -c "pg_dumpall --scheme-only" > ~/data/containers/postgresql/backups/postgresql-cluster-$(date +%%Y-%%m-%%d_%%H-%%M-%%S).dump' + +Nice=19 +IOSchedulingClass=idle + +# Management DB dump file +ExecStopPost=/bin/bash -c `find "~/data/containers/postgresql/backups/" -maxdepth 1 -type f -mtime +7 -delete` +``` + +```ini +# postgres-db-backup.timer +[Unit] +Description=Run PostgreSQL backup daily at 2:30 AM + +[Timer] +# everyday 02:30 AM start +OnCalendar=*-*-* 02:30:00 +# Random time to postpone the timer +RandomizedDelaySec=15min +Persistent=true + +[Install] +WantedBy=timers.target +``` + +#### External backup + +- Use `Kopia` in app server to backup files to external data server. + +#### Verify backup + +- Restore random directory from backup on dev server's test directory once a month (or week). +- Check its integrity and availability. +- If there were some problems, check the all backup data and conduct full backup immediately. + +## Systemd + +### `.service` file + +- Path: ~/.config/systemd/user +- Example of `.service` + +```ini +# ~/data/config/services/opnsense.service +# ~/.config/systemd/user/opnsense.service +[Unit] +Description=opnsense Auto Booting +After=network-online.target +Wants=network-online.target +# Requires=x.services + +[Service] +Type=oneshot + +# Maintain status as active +RemainAfterExit=yes + +# Wait for other dependent services +# ExecStartPre=%h/data/config/scripts/wait-for-it.sh -h [ip] -p [port] -t 0 + +# Run the service +ExecStart=/usr/bin/virsh -c qemu:///system start opnsense + +# Stop the service +ExecStop=/usr/bin/virsh -c qemu:///system shutdown opnsense + +[Install] +WantedBy=default.target +``` + +### Hypervisor + +- Adjust booting sequence of VMs via `.service` +- Use `wait-for-it.sh` and `Requires` +- Sequence + - vmm + - opnsense + - net + - auth + - dev + - app + +### Containers + +#### Quadlet + +- Make the `.container` file +- Path: ~/data/config/containers/\[app_name\] +- Symbolic link path: ~/.config/systemd/containers +- `systemctl --user daemon-reload` makes `.service` file automatically +- If pod is needed, then set `.pod` file + +```ini +# app.container +[Quadlet] +# Don't make a dependencies +DefaultDependencies=false + +[Unit] +# Pod=app.pod +Description=app +After=network-online.target +Wants=network-online.target +Requires=required.service + +[Service] +StartExecPre=%h/data/config/scripts/wait-for-it.sh dev.ilnmors.internal:8080 --timeout=0 --strict + +[Container] +# pod=app-pod +Image=localhost/app:1.0.0 +Name=app +Port=2080:80/tcp +Port=2443:443/tcp +Volume=%h/data/containers/app/etc:/etc/app:rw +Volume=%h/data/containers/app/data:/app:rw +Secret=app.env,type=env +Secret=app.file,type=file,target=/path/of/secret/file + +[Install] +WantedBy=default.target +``` + +```ini +# app.pod +[Quadlet] +# Don't make a dependencies +DefaultDependencies=false + +[Pod] +Name=app +PublishPort=2080:80/tcp +``` \ No newline at end of file diff --git a/docs/archives/2025-12/01_plans/01_02_milestone.md b/docs/archives/2025-12/01_plans/01_02_milestone.md new file mode 100644 index 0000000..87d57bd --- /dev/null +++ b/docs/archives/2025-12/01_plans/01_02_milestone.md @@ -0,0 +1,167 @@ +Tags: #plan, #milestone, #common + +## Homelab Project + +### Plans + +- [x] Build plans + +### Organize theory + +- [x] Organize DNS +- [x] Organize DHCP +- [x] Organize PKI +- [x] Organize TLS +- [x] Organize SSO +- [x] Organize Email service +### Organize configuration + +- [x] Organize Debian installation +- [x] Organize Debian common configuration +- [x] Organize iptables +- [x] Organize podman +- [x] Organize CrowdSec +- [x] Organize BTRFS + +### Hypervisor + +- [x] Install Debian13 +- [x] Set common configuration +- [x] Set network interfaces +- [x] Set QEMU/KVM and Libvirt environment + +### opnsense vm + +- [x] Generate opnsense template +- [x] Install opnsense +- [x] Set interface configuration +- [x] Set CrowdSec LAPI configuration (without TLS) +- [x] Set KEA DHCPv4 configuration + +### net vm + +- [x] Generate net vm template +- [x] Install Debian13 +- [x] Set common configuration +- [x] Set network interfaces +- [x] Set DDNS script +- [x] Set BIND container +- [x] Set AdGuard Home container + - [x] Fix DHCP and Static IP server's `resolv.conf`, and opnsense dns + +### auth vm + +- [x] Generate auth vm template +- [x] Install Debian13 +- [x] Set common configuration +- [x] Set Step-CA container + +### opnsense vm + +- [x] Set ACME client in OPNsense +- [x] Set TLS on OPNsense with ACME client +- [x] Set TLS on CrowdSec LAPI with ACME client +- [x] Set ACME automation + +### net vm + + - [x] Set TLS on AdGuard Home container with ACME client + +### dev vm + +- [x] Generate dev vm template +- [x] Install Debian13 +- [x] Set common configuration + +### app vm + +- [x] Generate app vm template +- [x] Install Debian13 +- [x] Set common configuration +- [x] Set BTRFS on `$HOME/hdd` + +### auth vm + +- [x] Set Caddy - auth container (Main caddy) + - [x] Caddy TLS certificates + - [x] Caddy bouncer + - [x] Caddy log agent +- [x] Set crowdsec bouncer + - [x] Set collection in LAPI (parser + scenario) + - [x] Set collection in auth vm + - [x] Set acquis.d/caddy-auth.yaml +- [x] Set LLDAP container +- [x] Set Authelia container + - [x] Forward_Auth setting + +### dev vm +- [x] Set Postgresql container + - [x] Set TLS on Postgresql with ACME client +- [x] Set Caddy - dev container (sidecar caddy) + - [x] Verify TLS re-encryption + - [x] Veryfiy Forward_Auth from Caddy - auth +- [ ] Set code-server container + - [ ] Generate container file (with Git and Ansible) + - [ ] Apply SSO with Authelia and Forward_Auth + - [ ] SSH setting + - [ ] Upload opnsense backup file via SFTP + - [ ] Get all server's configuration file via from terminal + +### app vm + +- [ ] Gitea container + - [ ] DB setting + - [ ] OIDC apply with Authelia + - [ ] Code and configuration file Git +- [ ] Vaultwarden container (User secret management) + - [ ] DB setting + - [ ] OIDC apply with Authelia + - [ ] TOTP setting (recovery code will be saved in .secret.yaml) +### dev container + +- [ ] Set Diun container +- [ ] Set Prometheus and grafana container +- [ ] Set Loki and promtail container +- [ ] Set Postfix +- [ ] Set Dovecot +- [ ] Set Fetchmail +- [ ] Set Mariadb conatiner (when it needs) + - [ ] Set TLS on Mariadb with ACME client + +### app vm + +- [ ] Set Caddy - app container (sidecar caddy) +- [ ] Set app service containers +- [ ] Set all server's Kopia and Gitea (with code-server) + - [ ] Conduct backup verification + - [ ] `Git` all code on Gitea + +--- +## Following goals + +- [ ] Ansible + +> To manage and automate this project, the tool of automation is necessary. In modern architecture, Ansible is one of most powerful tools to automate configuration. After the project will be finished, Ansible will be adopted to manage server's configurations. It supports idempotency powerfully, so from the basic configuration the dev-ops system will be applied on this project. Idempotence is very important. + +- [ ] self inspection or mock audit + +> Every architecture has their own vulnerability. It is because always the administrator itself is the weakest chain in the security. So, it is necessary to inspect the system based on external criteria. There's the list of criteria below. +> +> - ISMS-P - Korean standard +> - ISO/IEC 27001 - International standard/Annex A +> - NIST SP 800-53 - NIST CSF +> - CIS Benchmark - checklist of Debian/OPNsense/RDBMS/etc +> - OWASP Top 10 + +- [ ] documentation deeper + +> The system itself can't prove anything. When the document that everyone can understand what it is supports the system, then the system become the most powerful weapon. +> +> - The code and configuration files, and Ansible playbook based on Git (private Gitea) +> - Architecture Report based on bookstack (As-Is) +> - Policy and Norms Report based on bookstack (To-Be) +> - Audit Report based on bookstack (Proof of Compliance) + +- [ ] hacking simulation for public licence + +> Use podman network and podman volume, create `kali` and `alpin` container to train and study about hacking in `dev` server. These containers won't combined with `systemd` via `Quadlet` \ No newline at end of file diff --git a/docs/archives/2025-12/02_theory/02_01_dns.md b/docs/archives/2025-12/02_theory/02_01_dns.md new file mode 100644 index 0000000..faabd4a --- /dev/null +++ b/docs/archives/2025-12/02_theory/02_01_dns.md @@ -0,0 +1,101 @@ +Tags: #theory, #network, #protocol + +## DNS (Domain Name System) + +In the beginning of the internet, there were a few hosts on networks. It was possible to manage all hosts on network via IP address or domain name in `/etc/hosts` file in each servers. However, it is hard for people to match and remember what IP addresses means. When the internet environment became bigger and bigger, the complex of route the target server would be harder. To solve this problem, the DNS emerged as a translator between IP address and domain name. In modern internet environment, DNS has hierarchy structure from root to TLD, TLD to authoritative server for efficiency. + +### Structure of DNS + +#### Communication + +- DNS: 53 tcp/udp + +DNS communication basically uses 53/udp port. However, in the modern internet environment; which means complex environment sometimes the size of packet is above 512 bytes. In this case, DNS uses 53/tcp too. The vulnerability of DNS is that all communication is on plain data. Everyone can conduct sniffing attack towards DNS packet. + +- DoT (DNS over TLS): 853 tcp + +DoT was developed to encrypt DNS query. DoT uses [TLS](./02_04_tls.md) to request query. This protocol uses TLS. Moreover, because of TLS, nobody can do sniffing attack towards DoT. However, it uses specific port 853. If ISP block the 853 or analyze 853 port, the pattern of usage will be analyzed or even you cannot use DoT itself. Additionally, there is also DNS over DTLS which uses 853 udp. + +- DoH (DNS over HTTPS): 443 tcp/udp + +DoH is very similar with DoT. It uses TLS, and it was developed to encrypt DNS query. there is just one difference. This uses https(443 tcp/udp) instead of 853 tcp. https is standard of web protocol, so it is hard to analyze someone sends DNS request or common web packets. It means, ISP or government cannot block 443 port itself or analyze the pattern of DNS query. Since 2022, there's the new standard DNS over HTTP/3 which uses 443 udp port. + +- DNSSEC (DNS SECurity extensions) + +Originally, client couldn't verify integrity of the response from DNS server. If malicious attacker could get authority of cache DNS server to change their records, all clients would get affected. (i.e. pharming attack). DNSSEC is a protocol to guarantee integrity of DNS record. DNSSEC protocol adds some records in zone, RRSIG(Resource Record Signature), DNSKEY, DS, NSEC, CDNSKEY, CDS. All resolver DNS verify integrity of their records to authoritative DNS with these records. This process is similar to PKI, the chain of trust. + +- ECH (Encrypted Client Hello) + +Basically, client hello packet has SNI (Server Name Indication). Even though all communication under TLS is encrypted, but to start session the packet has to contain the SNI to identify server. To encrypt this information, SNI the ESNI(Encrypted SNI) was developed in 2018 based on TLS 1.3. However, ESNI just could encrypt SNI information. Now, since 2020, the new standard ECH was developed to supersede ESNI. ECH not only encrypt SNI but also encrypt all client hello process. ECH is latest protocol, and it has a lot of dependency in DNS server, service server and client. When all of them supports ECH, then user can use ECH. Because when ECH encrypts client hello data client need the target server's public key (certificate), it has to look up from encrypted DNS (DoH or DoT). + +#### Zone + +DNS server has zones; Forward zone and Reverse zone. + +- Forward zone + +Forward zone has basically information of the pair of domain and IP address. The role of this zone is change domain name to IP address. The domains are managed by IANA, TLD is already reserved. (i.e. `.com`, `.org`, etc...) For private network, `.home.arpa` or `.internal` are reserved. + +- Reverse zone + +Reverse zone also has basically information of the pair of IP address and domain. The role of this zone is change IP address to domain name. To change domain to IP address it uses specific domain name. \[reversed_ip_address\].in_addr.arpa (i.e. 1.168.192.in-addr.arpa) + +#### Records + +Each zone has their record type. If zone were a kind of DB, record would be a data of DB. There is basic records type below. + +- SOA type + +Information of ZONE management. Every zone has this SOA type record. + +- NS type + +Designate authoritative name server of domain zone + +- A type + +Mapping domain to IPv4 address + +- AAAA type + +Mapping domain to IPv6 address + +- PTR type + +Mapping IP address to domain + +- CNAME type + +Mapping domain to domain. CNAME type is kind of alias of domain. It can't have IP address value. The query acts recursively, and it gets IP address at the end. + +#### Key + +There is the keys to control DNS records or zone, even DNS server itself. + +- rndc key + +This key is to control DNS server itself. When rndc key set on DNS server, client can control DNS server with this key like, reboot server, load or unload zone. rndc key is basically generated by `rndc-confgen` command and it is defined on `rndc.conf` and `named.conf`. + +- tsig key + +This key is to guarantee integrity when the server syncronize zone data between other servers (usually master-slave server). It is possible update records via this key depending on the setting. Therefore, tsig key is usually used for DDNS or DNS-01 challenge. The key is generated in the DNS server, and it defined in `named.conf`. + +### DNS Server type + +DNS server basically separated as authoritative DNS and recursive DNS. + +#### Authoritative DNS + +Authoritative DNS has literally authority of domain zone. It doesn't ask recursive queries towards other DNS server in case of the query that is in its authoritative zone. It is necessary to use DNS-01 challenge (ACME protocol). + +#### Recursive DNS + +Recursive DNS oppositely doesn't have authority of the records in its zone. When it gets query request, it ask recursive query towards authoritative DNS. It can store the information of records (cache) and give response towards client with the cache. + +### Split Horizon DNS + +Split Horizon DNS means getting different IP address depending on where the client exists. For instance, if there were the domain `example.com`. This domain has its own private IP address, simultaneously own public IP address (from NAT). When client request the query `example.com` in the private network, private DNS would respond its private IP address. However, when the client request the query in the WAN network, public DNS would respond its public address. Client can access `example.com` in both case, but the IP address which client respond are different. To use this protocol, the network route will be efficient because the packet doesn't have to go out to the WAN area in private network. Basically, it is implemented internal authoritative DNS and recursive DNS. Recursive DNS decides where to send the query based on domain. + +### DDNS (Dynamic DNS) + +Public IP address can be changed by ISP at any time. It is hard (or expensive) to get static public IP address by ISP. However, the service (server) always guarantee their availability regardless what is their IP. DDNS is basically the protocol to change A or AAAA (or CNAME) records in DNS as server's current IP. Server keeps checking their current public IP and when it changes the server send the request to change its A or AAAA records to public authoritative DNS server with authentication with API key or tsig key. \ No newline at end of file diff --git a/docs/archives/2025-12/02_theory/02_02_dhcp.md b/docs/archives/2025-12/02_theory/02_02_dhcp.md new file mode 100644 index 0000000..50f5daf --- /dev/null +++ b/docs/archives/2025-12/02_theory/02_02_dhcp.md @@ -0,0 +1,55 @@ +Tags: #theory, #network, #protocol + +## DHCP (Dynamic Host Configuration Protocol) + +Before DHCP emerged, every client had to set their own static IP or using RARP(Reverse Address Resolution Protocol. They have critical problems. + +- Static IP + - Each host has their own IP regardless they run or not. It cause lack of IP address. + - If administrator made configuration mistake, the network itself could stop. For instance, IP conflict, or subnet configuration error, etc. + +- RARP + - RARP works on L2, it makes hard to implement on hardware. + - RARP server had to exist on every subnet(L2), it was inefficient. + +To solve this problem, BOOTP was developed and it evolved as DHCP. DHCP works on L3, and like its name they lease IP dynamically. It allocates IP to hosts with 3 steps. Lease, renewal, and release. + +### Lease + +When DHCP server gets request from host to allocate IP, DHCP server choose one IP address from its subnet pool, and it responds IP/subnet and information to the hosts. It follows the process called DORA. + +- DHCP Discover + +The host which has no IP address broadcast discover packet including its MAC to local network. Only DHCP server responds this packet, the others discard the packets. + +- DHCP Offer + +When DHCP server gets discover packet, DHCP server broadcast Offer packet including host's MAC of which sends discovery packet. Only host which sends discovery packet, the others discard the packets. + +- DHCP Request + +When the host gets offer packet, it consider the network has DHCP server and broadcast request packet to the local network. More than one DHCP server can exist in one network, therefore host broadcast request packet. + +- DHCP ACK + +When DHCP server gets the Request packet, it searches for an IP address which can be allocated in its pool. When DHCP server finds an available IP address, it sends to ACK packet including IP address and subnet, and optional information(DNS, gateway, etc...) with reservation time. + +### Renewal + +The host allocated IP from DHCP try to renew its IP before it expires. There are two chances to renew reservation time. The first try to renew is when the reservation time remains half of them; T1. In this time, host uses unicast. If the first try failed, then it try one more time when 87.5% of the lease time has passed; T2. In this time, the host uses broadcast. All try fails, client gives up the leased IP and try the lease process again. DHCP server release the IP address from the host. + +- DHCP Request + +The host sends request packet to DHCP server as unicast. + +- DHCP ACK + +When DHCP server gets the request, it sends ACK packet to the host as unicast. + +### Release + +When the host doesn't use IP address anymore, DHCP server makes IP as available IP in its pool. Especially, client can send `DHCPRELEASE` to DHCP server explicitly when it doesn't need IP address. + +### DHCP relay + +Commonly, DHCP is located in router. Because router is the center of networks, and it takes charge of number of networks. However, DHCP server doesn't have to be located in router because of existence of DHCP relay. When router gets DHCP packets (DORA), router can relay the packets as unicast between host and DHCP server which are in different subnet. diff --git a/docs/archives/2025-12/02_theory/02_03_pki.md b/docs/archives/2025-12/02_theory/02_03_pki.md new file mode 100644 index 0000000..8eb202b --- /dev/null +++ b/docs/archives/2025-12/02_theory/02_03_pki.md @@ -0,0 +1,103 @@ +Tags: #theory, #network, #security + +## PKI(Public Key Infrastructure) + +PKI is defined on RFC4949 and RFC5280. PKI is neither one of the protocols nor algorithms, it is a huge infrastructure itself. It has various parts which are CA(Certification Authority), RA(Registration Authority), VA(Validation Authority) and Certificates. + +### CA (Certification Authority) + +CA is the most important part in PKI. It is the source of certification for the secure communication. CA has various roles, some can be delegated to the RA or VA. However, CA is essential in PKI. RA or VA are optional to support CA. The purposes of CA are as follows: + +- Managing certification policy +- Issuing certificates +- Verifying certificates validity +- Managing CRL(Certification Revocation List) +- Cross-certification between root CAs + +CAs often operate in a hierarchy. Each CA has their own role, and they are basically PAA(Policy Approving Authority), PCA(Policy Certification Authority), and CA(Certification Authority). Normally, PAA and PCA are usually combined as root CA, and CA is an intermediate CA. + +#### root CA + +Root CA is simply CA of CA. It is the source of trust chain. Only a root CA can authenticate itself alongside other root CAs(cross certification) or intermediate CAs below it. Theoretically, and precisely root CA can be divided into PAA and PCA, but practically they operate as one authority, root CA. + +#### intermediate CA + +Intermediate CA is the authority which issues certificates for end entities we can use (i.e. web servers). Their certificate is digitally signed (the hash value is signed by root CA's private key) by root CA, so every user can verify their trust with root CA's certificate (root CA's public key). The reason for using intermediate CA is for security. If CA's private key was violated, all chain of trust will be disrupted. When only root CA exists and its private key was violated, all secure communication will be threatened. To avoid and manage these threats root CA and intermediate CA are divided. + +#### Structure of CA + +The basic structure of CA is very simple. It only contains private key, certificate (which is designated by X.509), and provisioner. When CA gets a request of sign on services' certificates it operates as shown below. + +- Provisioner checks CSR (Certificate Signing Request) is valid on its policy +- When CSR is valid, it checks the content of certificates(x.509) and its policy +- CSR and content of certificates are valid, CA signs on the certificate with its private key (sign on hash value with CA's private key) + +After CA's signing, every client can check its validation with CA's certificates (CA's public key) + +### RA (Registration Authority) + +Due to CAs many roles, all processes above are made to be inefficient when conducted. Therefore, CA delegates its registration role to RA. RA seeks to examine CSR and certificates' content. When the content of certificate and CSR are valid under its policy, RA would send request to CA to sign. + +### VA (Validation Authority) + +Additionally, just as RA, CA can delegate its validation role to VA. Basically, VA manages certificates' validation based on CRL (Certificate Revocation List). However, in modern internet environment, it is inefficient to manage all invalid certificates based on a list. To solve this problem, OCSP (Online Certificate Status Protocol) was developed. However, OCSP also has its limitation, the advanced way is being suggested. + +#### CRL (Certificate Revocation List) + +Once the certificate gets signed from CA, it is always a valid cryptograph before it expires. However, in the real world some certificates have to be revoked in case CA's key is violated or the service no longer operates, etc. So, CA needs to manage these kinds of invalid certificates which doesn't expire, such as letting clients know that their certificates are not valid anymore. This is a concept of CRL. VA releases the CRL to clients, the clients download or update the list to judge the certificates' validation. + +#### OCSP (Online Certificate Status Protocol) + +CRL was very effective to revoke invalid certificates but when internet is growing, a massive of invalid certificates appears it shows its limitation. Because to check certificates validation, clients need to download and update CRL from CA or VA. There are tons of invalid certificates, and CRL's size become bigger and bigger. OCSP's concept is when client request to check certificates validation, OCSP server response it. To use OCSP, clients don't need to download full CRL anymore. It is very effective to solve CRL's problem. However, it has problems too. OCSP is defined on RFC6960. OCSP's problems are below. + +- Clients should expose its identity(IP address) to check certificates validation. +- VA can be SPOF(Single Point of Failure). +- It is very hard to match time syncronization +- The more requests come to VA, the more burden is on VA.(DoS, DDoS problem) + +#### OCSP stapling + +OCSP's problem was that client should request to VA directly. So, when the certificate providers(like web server, db, etc.) request its validation and give the proof to client it, clients doesn't need to check the validation of certificate to VA anymore. It reduces VA's burden and time synchronization problems. + +### Certificate + +The form and content of certificate are standardized by X.509. X.509 format contains server's public key, domain(SAN), expired date, and sign of CA, and etc. When CA validate services or people who sent CSR(which contains information to create certificate with x.509 form), CA signs on their certificates (sign on hash value with CA's private key). Then, all clients can validate that certificates with CA's public key(CA's certificate) cryptographically. Ultimately, when clients trust CA, then they can trust services that have certificate signed by CA. However, even if the certificate were valid cryptographically, some certificates would not be valid. It is reason why CRL or OCSP is needed. + +#### The way to issue certificate +##### ACME + +Basically, certificate is formed by X.509. The services which want to get certificate send request(CSR) to CA. When the services were open internet, and it should get the certificate from public CA like let's encrypt on its public domain name. Public CA always sign on public domain which is open internet, and it is the role of public CA. The protocol to automate this process is ACME. Before ACME protocol, getting a certificate was manual. The person in charge should send CSR to CA, and CA checks the CSR and return the certificate ... All process was manual. ACME protocol made this process automatically. Simply, ACME protocol checks services authority on domain with various way, and issues certificate. + +- http-01 challenge + +The server creates certificate file for CA. CA accesses the service server's specific directory via 80/TCP. When CA checked the file, CA issues certificate. + +- DNS-01 challenge + +The server sends request to authoritative DNS server to add specific record with TSIG key(API key). When the specific record successfully added on DNS server and CA can check this, CA issues certificate. This challenge can verify full authority of domain including sub domain, so CA can issue wildcard certificate. + +- TLS_ALPN-01 challenge + +It uses TLS handshake process on 443/TCP. ALPN(Application Layer Protocol Negotiation) is the protocol to decide what protocol server and client will use. In this challenge, CA sends some token to service server. Service server creates temporary TLS certificates using `acme-tls/1` protocol with token from CA. When CA access to server, it asks `acme-tls/1` protocol to server. The server presents the temporary certificate, then CA issues certificate. + +##### Based on identity + +ACME is powerful protocol to automate issuing certificate. However, it is necessary to check ownership of domain to use ACME challenge (Usually, using http or https). However, It is possible to use TLS protocol without http or https. Originally, the process was also manual way called PKIX(Public-Key Infrastructure). It is so complex and slow, and it is impossible to use ACME. To automate this process, modern CA uses JWK(JSON Web Key) and JWT(JSON Web Token). The process is below. + +- The administrator registers system's public key as JWK at CA. + + > JWK is a format of specification of key. It shows key and its information as JSON format. + +- The system sends the JWT to get certificate signed by its private key to CA. + + > JWT is a specification of what client can do after connection or of proof this request is valid. In this process, JWT substitute CSR. + +- CA verify JWT by pre-registered JWK. When JWT is valid, it issues X.509 certificate. + +##### Usage of X.509 Certificates + +Regardless the way to issue certificate, either ACME or the way based on its identity, the certificate which is already issued is always valid before it is revoked. You can use X.509 certificate from the way based on its identity for https, oppositely you can use X.509 certificate from ACME for other TLS protocols (like LDAPS, DB TLS communication, etc). + +#### X.500 and LDAP(Lightweight Directory Access Protocol) + +In the beginning of PKI, there was plan to make a server for all certificates. It is X.500. However, the protocol to implement X.500, DAP(Directory Access Protocol) was too complex and heavy to use internet environment. To make way easy and light, LDAP(Lightweight Directory Access Protocol) was developed to store certificates. However, there are a lot more efficient way to manage certificates appeared already. LDAP could not realize X.500, but it is utilized as centralized authentication system like [SSO](./02_05_sso.md), or OS account management. \ No newline at end of file diff --git a/docs/archives/2025-12/02_theory/02_04_tls.md b/docs/archives/2025-12/02_theory/02_04_tls.md new file mode 100644 index 0000000..13210a3 --- /dev/null +++ b/docs/archives/2025-12/02_theory/02_04_tls.md @@ -0,0 +1,92 @@ +Tags: #theory, #network, #security, #protocol + +## TLS/SSL + +TLS(Transport Layer Security) is the protocol that encrypts communication with certificates under [PKI](./02_03_pki.md). Originally, the communication encryption protocol was suggested as SSL(Secure Socket Layer) in 1995 by Netscape. Before the emergence of TLS/SSL protocol, all communication in web(or database, some more protocols) used plain text, even it contained sensitive data like password. After 1996, the update of SSL protocol stopped at version 3.0, and it has its own vulnerability. The next version of SSL is TLS. Virtually, a lot of people treat SSL as the same as TLS. + +### Encryption + +Before talking about TLS, it is essential to understand what is encryption. Basically, encryption is to hide plain data using algorithm and key mathematically. Encryption can categorized by various criteria, but in this case focusing on key. + +#### Symmetric key encryption + +This way needs only one key to encrypt and decrypt. Just because using one key to encrypt and decrypt, it doesn't require complex calculation. This algorithm is fast, and easy to handle huge data. However, the key is only one so for security the key should be protected. + +#### Asymmetric key encryption + +This way needs two keys to encrypt and decrypt. It means the key for encryption and the key for decryption are different. This uses very complex algorithm to seperate keys. It makes this slow and hard to handle huge data. However, the keys are devided so, one key can be publically shared. + +### Principle of TLS + +The PKI is necessary to use TLS communication. TLS uses both of algorithm to encrypt the communication. When the communication were encrypted, the key should be existed. For this process(to generate key for encryption), server needs X.509 certificate. The certificate contains data of server's public key, domain, and extra informations. To start communication, the server and the client conduct specific process, called `TLS-handshake`. A client sends the request to start communication. The server accepts this request, and negotiates the protocol what they will use in communication. After negotiation, they start to process to generate symmetric key for communication. In this part, there are two protocol to generate symmetric key: RSA and Diffie-hellman. + +#### RSA (Legacy way) + +In this process server and client uses server's public key and private key. A client generates metadata (Pre-master secret) to generate symmetric key. Metadata is encrypted with server's public key and sent to the server. The server decrypts the data from a client with its private key. They uses this data to generate symmetric key. When this process ends, they share the same symmetric key to encrypt content of communication. + +- Cons of RSA + +Security strength of RSA depends on the server's private key. The server's private key was taken from whom have the record of past communication, all communication can be decrypted. Because session key itself (precisely, pre-master secret) is in the communication encrypted by server's public key. So, this way can't guarantee Forward Secrecy which means security of previous communication. Additionally, RSA needs high resource to encrypt and decrypt. Therefore, modern internet environment RSA way is not used frequently. + +#### Diffie-Hellman & ECDHE(Modern standard) + +On the other hand, TLS can use Diffie-Hellman algorithm. This process doesn't exchange any clue of session key. In this process, the server and clients generate secret values. And they make specific results from calculation with public parameters. The server's private key signs on its results just for prooving they are not altered and authenticated. When they exchange each other's results, both of them generate the same session key from results and their own secret value. All process is publically open, but except the server and client themselves, nobody can calculate the secret values and session key mathematically. After all process is done, finally the client combines all communication value between server. And it makes hash value of this, and encrypt the hash value with the session key. The client sends it to the server as finished message, and server verify this. This key is not permanent, it is temporary. There is no encryption way, so even if hacker could get the secret key of server, he wouldn't know what was the session key. + +- Pros of DHE & ECDHE + +On the contrary with RSA case, security strength of DHE & ECDHE doesn't depend on some specific key. Because when it generate session key, the server and client don't send any sensitive values (like pre-master secret) in this protocol. What they send to each other is open values. The server's public key is used for only signing. So, every session key can never be existed after each session; PFS (Perfect Forward Secrecy). Additionally, Elliptic curve way can provide the same strength with shorter key comparing RSA. + +The detail of `TLS-handshake` process is below. + +### Detail of TLS handshake + +#### Start + +- Client hello + +The client sends the request to server with information including SNI (Server Name Indication), TLS version, cipher suite, and client random. SNI is the server's domain information to access. cipher suite is the protocol of encryption, and client random will be used to generate session key. Basically, the content of `client hello` isn't encrypted. It has a problem that ISP(Internet Service Provider) or government can conduct sniffing attack to the Client hello packet. To solve this problem, ECH (Encrypted Client Hello) was developed. However, ECH needs the support of DNS server, browser(host), and server itself. Today, a lot of servers and DNS servers don't support ECH, so it is hard to apply ECH in every environment. + +- Server hello + +When the server receives clients request, it designate how to communicate from the list of client hello. The server sends to the client with the information of what protocol, TLS version, cipher suite to use, and certificate, and server random. The way of encryption: RSA, DHE, ECDHE are including in the cipher suite. + +#### RSA + +When the server and client decide to use RSA way, server sends certificate including public key to the client. + +- Client key exchange + +The client verifies `Server hello` with server's certificate using client's trusted CA list. The certificate is valid then the client generates `Pre-master secret` and encrypts this value with server's public key to send to the server. + +- Session key generation + +The server recieves `pre-master secret` encrypted by its public key that only server (who knows the private key of server) can decrypt. When the server decrypts this value successfully, the server and client knows three values: `Client random`, `Server random`, and `Pre-master secret`. The server and client generate `Session key` from these values individually. + +#### DHE or ECDHE + +DHE and ECDHE follows the exactly same principle of Diffie-hellman algorithm. However, they have a difference of basement in mathematic. DHE is using discrete logarithm, but ECDHE is using elliptic curve discrete logrithm. This is a mathametical topic, so this is skipped. Just important thing is ECDHE is more efficient than RSA or DHE. + +> Key length to guarantee the same level of security: +> +> - RSA/DHE: above 2048 bit +> - ECDHE: above 256 bit + +- Server key exchange + +The server generate the pair of temporary key based on its own principal. The server sends public temporary key to the client. The public temporary key is signed by server's private key. + +- Client key exchange + +The client verify server's temporary public key with server's public key. Simultaneously, client also generate the pair of temporary key based on its own principal. The client sends its public temporary key to the server. + +- Session key generation + +Both of them have their pair of temporary key, and other's temporary public key. They generate the same `Pre-master secret` from their temporary secret key and other's temporary public key. This process has no communication, they calculate the same `Pre-master secret` from themselves. When they have `Pre-master secret`, they generate session key from pre-master secret and client random and server random. + +#### Finish + +After all process is done, the server and client both sends the `Finished message` encrypted by session key. When they can decrypt this messages, the session key is generated properly. They can start communication securely with session key. + +### TLS 1.3 + +Current standard TLS 1.3 changed handshake process as 1-RTT(1 Round Trip Time) and there are no longer available to use RSA way (Legacy). \ No newline at end of file diff --git a/docs/archives/2025-12/02_theory/02_05_sso.md b/docs/archives/2025-12/02_theory/02_05_sso.md new file mode 100644 index 0000000..1d5124b --- /dev/null +++ b/docs/archives/2025-12/02_theory/02_05_sso.md @@ -0,0 +1,131 @@ +Tags: #theory, #network, #security, #protocol + +## SSO (Single Sign On) + +When someone wants to use some services, usually they have to identify themselves to the services. The service verifies who you are (authentication), and what you can do (authorization). Originally, each service has their own authentication and authorization system. However, in modern internet environment, a lot of services are organically connected to each other. It is common for one provider to operate various services. So, it is very inefficient and complex to operate authentication and authorization system like this way. To solve this problem, the concept of SSO which is to centralize all services' authentication and authorization system at once emerged. + +### LDAP (Lightweight Directory Access Protocol) + +To centralize authentication and authorization system, it is necessary to kind of central database of user list naturally. Fortunately, there was a very good and suitable protocol already; LDAP. LDAP was originally suggested to replace DAP (Directory Access Protocol) for X.500 in [PKI](./02_03_pki.md). Even though X.500 didn't materialized, the potentiality of LDAP was selected to materialize SSO. Because LDAP's structure which was designed for manage certificates itself allows to manage user and its authorization. In modern internet environment, many services don't usually use LDAP itself directly in SSO anymore, but it is still used as database of user and their authorization. + +#### Structrue of LDAP + +LLDAP will be used in this homelab. It is the easiest way to understand LDAP is comparing each part of LDAP with filesystem. Because LDAP itself is fundamentally the database which has the tree structure like filesystem. + +##### DN (Distinguished Name) + +DN is the unique path of some specific entry in tree. It is like an absolute path in filesystem. + +- Example: + - `uid=admin,ou=people,dc=ilnmors,dc=internal` + - `uid=authelia,ou=people,dc=ilnmors,dc=internal` + +##### Base DN (Base Distinguished Name) + +Base DN is the root of the tree of LDAP. it is like root path of filesystem `/`. All the actions such as search or work are started from Base DN. + +- LLDAP setting + - Environment.="LLDAP_LDAP_BASE_DN=dc=ilnmors,dc=internal" + - It means, the domain 'ilnmors.internal' will be the Base DN (the root of tree). + +##### Components of DN + +DN has components what has ordinary order. The order of these components are `cn(or uid)`, `ou`, `dc`. They are special attributes, which makes DN. + +- cn (Common name): The name of object. It is like a file itself in filesystem. +- ou (Organizational Unit): The name of the container which contain entries. It is like a folder in filesystem. +- dc (domain component): The domain components, Usually, it devides the full domain `ilnmors.internal` as `dc=ilnmors`, `dc=internal`. + +##### Object (or Entry) + +Object is the real data item the DN defines, like file or folder itself in filesystem. + +##### ObjectClass + +This is the template or blueprint of object. ObjectClass defines what is this object; Either a person or a group, or an organization unit. + +- What is this object; user (cn or uid) or group (ou). +- What is the attribute object must have, and might have. +- Example: + - `ObjectClass: person`: This must have `sn` and `cn` attributes. + - `ObjectClass: organizationalPerson`: This is inherited the `ObjectClass: person`, and it can have `mail` or `number` attributes. +- LLDAP uses standard ObjectClass: `person`, `groupOfNames`. + +##### Attribute + +Attribute is the value of object, like the content of file or folder in filesystem. It is saved as the pairs of key-value. This values are following the definition of ObjectClass. + +- Example + - `uid=user1` has the value following `objectClass: person`, `objectClass: organizationalPerson` + - It must have `sn`, and `cn` + - It can have `mail`, or `number` + - It can have special attribute `memberOf: cn=admins,ou=admin_group, ...` (The attribute that shows the group the user belongs to) + +### IdP (Identity Provider) + +Many of modern services, SP (Service provider), use SAML (Security Assertion Markup Language) or OIDC (OAuth 2.0/OpenID Connect) protocol to implement SSO. Originally, each service had to send request of authentication to central server (which can be LDAP or else) individually to implement SSO. It means each service should protect the sensitive data itself, and some services which have vulnerability can threaten all system. This is why many of modern services use and support SAML or OIDC protocols. The IdP is needed to use these protocols. The IdP acts as the agent of all services to substitute authentication process on their behalf. Only IdP can access the real database, and all services trust IdP's authentication in this model. + +#### SAML (Security Assertion Markup Language) + +SAML protocol was developed in 2001 for SSO. This protocol works on XML (eXtensible Markup Language) format which can meet complex security requirements in enterprise environments. It has been used for a long time, it makes this protocol very stable. However, XML itself is a complex and heavy format. This fact makes the protocol complex and heavy to use in common and small environment. There is the process of SAML below. + +- Start + +User sends the request to the service. When SP recieves the request, it redirect the request to IdP. + +- Authentication + +IdP asks the login information to the user and it authenticate user from database (like LDAP). When authentication process succeed, IdP generates SAML assertion including user's identity based on XML with sign to ensure integrity. IdP sends the this assertion to SP via user's browser. + +- Finish + +SP receives the assertion from IdP via user's browser, it verify the assertion. When it is valid, they allow login. + +#### OIDC (OpenID Connect) and OAuth 2.0 + +OIDC was developed in 2014, it is newer than SAML. This is an authentication layer on OAuth 2.0 protocol. Basically, OAuth 2.0 is for authorization, and OIDC is for authentication. OAuth 2.0 protocol works on JSON/REST format, especially JWT (JSON Web Token) which is lighter and simpler than XML. OIDC is latest standard of SSO, it supports social login, and friendly to API. These features makes this protocol use on small and personal environment easily. There is the process of OIDC below. + +- Start + +User sends the requset to the service (in OIDC, RP; Relying Party). When RP receives the request, it redirects to IdP (in OIDC, OP; OpenID Provider). + +- Authentication + +IdP (OP) asks login information, and simultaneously asks permission to provide the information to RP. After getting information and permission, IdP generates two tokens which are for different purposes. One is an ID token (JWT) which contains user identity for authentication, the other is an access token for authorization. IdP sends these token to RP. The differences between SAML are what data format does protocol use, and whether IdP gets permission or not. + +- Finish + +RP recieves the tokens from IdP, and it verify the tokens. When it is valid, they allow login. + +#### Reverse proxy + +When application doesn't support SSO, then you can use reverse proxy as the door of SSO using `Forward Authentication`. Usually, every web packet pass through reverse proxy in modern internet environment. Therefore, reverse proxy (i.e. Caddy) can intercepts the packets and force them to SSO from IdP or OP (or LDAP itself) before they reach to the application. + +- Header based + +When Authelia success to authenticate someone, Caddy sends specific header which contains user information like `X-Forwarded-User: A`. Application gets this header, it automatically allows login to A. However, it is needed the ID in the application manually. + +> If hacker can access to application without reverse proxy, it can make X header to fake application. In this homelab, all access towards application will be limited by iptables from reverse proxy or sidecar reverse proxy. + +- LDAP based + +LLDAP will be a light and simple LDAP server. Authelia supports OIDC OP(idP) based on external LDAP server. LLDAP server will becom the external LDAP server for Authelia + + +--- +## Example of Authelia flow + +### Flow + +- Define the user and group in LLDAP server + +#### OIDC supported app + +- At the service, choose login way as Authelia (OP) +- Login at the Authelia. +- Authelia access to LLDAP server to authenticate the user based on LDAP. +- Login succeed, Authelia generate token and Service turst OP's token, and allow login (authentication and authorization) + +#### Non-OIDC supported app + +- Foward_Auth function with Authelia. \ No newline at end of file diff --git a/docs/archives/2025-12/02_theory/02_06_email.md b/docs/archives/2025-12/02_theory/02_06_email.md new file mode 100644 index 0000000..911da12 --- /dev/null +++ b/docs/archives/2025-12/02_theory/02_06_email.md @@ -0,0 +1,242 @@ +Tags: #theory, #network, #protocol + +## Email service + +Email is the mail service online via the internet. ARPANET was developed in 1969, since then there has been many attempts to send messages via the internet. The mail which uses `@` character in 1971 and `SMTP(Simple Mail transfer Protocol)` was developed to standardize various ways to email. + +### Component of Email service + +#### Address + +Basically, Email address has format like this. `local-parts@domain`. `local-parts` is identifier, and `domain` is service provider's domain. Following RFC 5321, `domain` doesn't distinguish it upper or lower case. `local-parts` must distinguish them, but practically they doesn't. + +#### MUA (Mail User Agent) + +MUA is the client of Email. The user can write Email, or read the Email which they got recieved. For instance, Outlook, Thunderbird, etc. + +#### MTA (Mail Transfer Agent) + +This is the essential part of Email service. MTA transpers the mail to other MTA or MDA. For instance, Postfix, sendmail, Exim, etc. + +#### MDA (Mail Delivery Agent) + +MDA recieves the mail from MTA, and it store the mail on receivers' mailbox. Sometimes, it is combined MTA or IMAP/POP3 servers. For instance, Dovecot LDA, Procmail, etc. + +#### Flow of Email service + +- User writes the mail on MUA. +- User sends the mail from MUA to MTA using SMTP submission protocol. +- MTA checks receiver's domain, and transfer the mail to other MTA which takes charge of that domain. +- MTA recieves the mail and sends receiver's MDA. +- The receiver's MUA access to the MDA such as IMAP or POP3 server. the receiver can check and read the email on their MUA. + +## Protocols +### SMTP (Simple Mail Transfer Protocol) + +SMTP is standard of email transfer protocol internet defined on RFC 5321. This protocol is used when MUA sends the mail to MTA, and MTA sends the mail other MTAs. This protocol takes charge of all process of transportation of the mails. + +#### Detail of SMTP + +##### Start + +- Connection + +The client and server make the connection via SMTP port (25/tcp). + +##### Greeting + +- `220` code + +The server sends `220` code to the client, they are ready. + +- `HELO` or `EHLO` + +The client sends `HELO` or expand version of `HELO`; `EHLO` command to server to introduce itself. + +##### Designate sender and recipient + +Use the command below, they designate sender and recient. + +- `MAIL FROM:` +- `RCPT TO:` + - If there were various recipients, use this command as much as recipients number. + +##### Transper the mail data + +- `DATA` and `354` + +The client sends `DATA` command to server. After the server responds with `354` code, client sends the data including mail header (From, To, Subject), and content of mail. The end of data is `.`. + +##### End + +- `QUIT` + +The client sends `QUIT` command, the connection is terminated. + +##### Ports + +- `25/tcp` + +Traditional SMTP's standard port. All content using `25/tcp` is not encrypted. Because of security and SPAM problems, a lot of ISP block the `25/tcp` port of common user. + +- `587/tcp` (Submission) + +The standard port of SMTP for encryption. Generally MUA sends the mail to MTA with this port. It is needed to use encrypted connection via `STARTTLS` + +- `465/tcp` (SMTPS) + +This port used to be used for TLS/SSL for SMTP. This is not standard, so it is recommended to use `587/tcp` port for TLS/SSL of SMTP. However, even now this is generally and commonly used. + +##### Security + +SMTP is very old protocol, and this protocol use plain data. It is recommended to use `STARTTLS` or `SMTPS` to encrypt data for security. + +- `SMTPS` + +It uses TLS/SSL from the beginning of connection via `465/tcp` + +- `STARTTLS` + +It uses TLS/SSL after beginning of connection via `587/tcp` as plain data, and start encryption with `STARTTLS` command. + +##### Authentication + +It is necessary to use users' identity like name and password to prevent anyone can sends malicious mail using server. SMTP uses SASL (Simple Authentication and Secuirty Layer) machanism to authenticate its users. + +##### Relay + +MTA has to send the mail to the other MTA for guarantee the mail can arrive the recipitent. MTA uses `relay` function for this. Make sure to allow this function for authenticated user or trusted network to prevent malicious usage. + +### IMAP (Internet Message Access Protocol) + +IMAP is the protocol to read and manage the mails from remote MDA (mail server). The difference between POP3 is that IMAP can manage the mail and its mailbox remotely even without download. It is defined on RFC 3501. + +#### Detail of IMAP + +IMAP is the protocol to have a communication with various commands while the connection is stable. The client sends specific `tag` in front of command, and the server responds with `tag` to process the actions. + +##### Authentication + +- `LOGIN` or `AUTHETICATE` + +IMAP authenticate the user with `LOGIN` command with ID and password or `AUTHENTICATE` command with SASL. + +##### Mailbox + +- `LIST` +- `SELECT` +- `CREATE` +- `DELETE` +- `RENAME` + +##### Mail + +- `FETCH` + +IMAP can take the mail list, the mail itself, or content of the mail, even the attachment in the mail. + +##### Statement + +- `STORE` + - `\Seen` + - `\Flagged` + - `\Answered` + - `\Deleted` + +IMAP can set the status flag of mail with command flags. + + +##### Search + +- `SEARCH` + +IMAP can search the mail with various condition of the mail (Sender, title, contents, date, etc) from server. + +##### Ports + +IMAP strongly recommend to use TLS/SSL with `STARTTLS`. Even though the beginning of conversation is not encrypted, TLS/SSL is applied with the `STARTTLS` command. + +- `143/tcp` + +The basic IMAP port. It is mendetory to use `STARTTLS` to use IMAP with this port. + +- `993/tcp` (IMAPS) + +This port uses TLS/SSL in the beginning of communication. It is not a standard but it is generally and commonly use for security. + +##### Synchronization + +IMAP basically server's mail and mail list, so wherever you access the mail you can see the same condition and status of mailbox. When one mail is modified on one device it is applied all devices simultanaeously. + +- `IDLE` + +This command supports to maintain connection between server and client, when the new mail comes or the status is changed the client can get notification immediately. + +### POP3 (Post Office Protocol version 3) + +POP3 protocol is basically designed to download the mail on local client from remote mail server. It is defined on RFC 1939. The biggest difference between IMAP and POP3 is, POP3 basically delete the mail at the server after downloading. + +#### Detail of POP3 + +##### Authorization + +- `USER` and `PASS` + +The client connect to server and it conduct authentication with `USER` and `PASS` command. + +##### Transcation + +- `STAT` +- `LIST` +- `RETR ` +- `DELE ` +- `RETR` + +POP3 uses various commands to download or delete the mail. It checks the number of mail and size with `STAT`, downloads the mail with `RETR`, deletes the mail with `DELE`, and save the mail on client with `RETR`. + +##### Update + +When the client sends `QUIT` command, then server deletes the mails which have `DELE` marks from server and terminate the connection. + +##### Ports + +POP3 strongly recommend to use TLS/SSL with `STARTTLS`. Even though the beginning of conversation is not encrypted, TLS/SSL is applied with the `STARTTLS` command. + +- `110/tcp` + +The basic port of POP3. It is mendetory to use `STARTTLS` to use IMAP with this port. + +- `995/tcp` (POP3S) + +This port uses TLS/SSL in the beginning of communication. It is not a standard but it is generally and commonly use for security. + +##### Simplity and locality + +POP3 basically delete the mail from mail server, the mail is only on the local client. However, it doesn't require the complex features like IMAP, it can have simplity. + +--- + +## local mail service in homelab + +### SMTP server (MTA) + +#### Postfix + +Postfix will be used as MTA which takes charge of `@ilnmors.internal` domain. However, Postfix in this homelab will never open towards WAN environment. It works as local private MTA. The internal services (Gitea, OPNsense, Prometheus, etc) will sends the mail via `587/tcp` to Postfix. When it needs to send mail towards WAN, it will use `relayhost` function and external Email services such as Google or Naver, etc. `relayhost` makes postfix as one of a `client` not a `MTA`. It means, administrator never takes care about IP reputation or SPAM problems. WAN area's `MTA` function is delegated to public mail service providers. + +### IMAP/POP3 server (MDA) + +#### Dovecot + +Dovecot will be used as IMAP server of local private MTA; Postfix. The user can use MUA (Thunderbird, Outlook, or mail application, even Roundcube webmail) to access the private mail ` +`@ilnmors.internal` via Dovecot. The user will ues `993/tcp` to access Dovecot, and Postfix store the mails on Dovecot. + +#### mbsync + +mbsync will be used as IMAP client of public MTA; Google or Naver. This will fetch public mail `@external-domain.com` to local Postfix from public mail service provider, and eventually the user can access the mail on Dovecot. However, it is important not to delete the mails from public mail servers with proper configuration. + +### MUA + +#### SnappyMail web mail + +This will be used as MUA server on `app` server to access all mails at the same space. \ No newline at end of file diff --git a/docs/archives/2025-12/03_common/03_01_debian_configuration.md b/docs/archives/2025-12/03_common/03_01_debian_configuration.md new file mode 100644 index 0000000..fea4661 --- /dev/null +++ b/docs/archives/2025-12/03_common/03_01_debian_configuration.md @@ -0,0 +1,657 @@ +Tags: #common, #configuration, #os + +## Installation + +### General setting + +- Language: English - English +- Location: Other > Asia > South Korea +- Locale: United State - en_US.UTF-8 +- Keymap to use: American English + +### Network setting + +#### Auto Configuration + +- Auto Configuration: Using DHCP + +#### Manual configuration + +> `net` server should be configured manually - It has no private DHCP server and DNS server + +- `cancel` auto configuration is needed +- IP address: 192.168.10.11/24 +- Gateway: 192.168.10.1 +- Name server address: 1.1.1.2 + > After AdGuard home and BIND are set, change 192.168.10.11 on `/etc/resolv.conf` + +#### Common setting + +- Hostname: \[vmm, net, auth, dev, app\] +- Domain: ilnmors.internal + +### User setting + +- Root Password +- User name: \[vmm, net, auth, dev, app\] +- User account: \[vmm, net, auth, dev, app\] +- User Password + +### Partition setting + +- Partitioning method: Manual + +#### Common + +- 512MiB - EFI system partition (Boot flag: on) +- 1GiB - Ext4 Journaling (Mount: `/boot`) + +#### Hypervisor + +- 780GiB - Physical volume for LVM + - Configure the Logical Volume Manager + - Create volume group - vmm + - Create logical volume - vmm - \[root, swap, data\] + - Finish +- vmm-root: 64GiB - Ext Journaling (Mount: `/`) +- vmm-swap: 16GiB - SWAP area +- vmm-libvirt: 700GiB - Ext4 Journaling (Mount: `/var/lib/libvirt`) + +#### VM servers + +- 32GiB(net), 64GiB(auth), 256GiB(dev,app) - Ext4 Journaling(mount: /) + > Don't set swap partition, in qcow2 it is not effective but causes over head(CoW) + +#### Application server + +- Don't set HDD's partition +- Each HDD (2TB) + > btrfs supports RAID itself. After installing, set RAID10. These will be set with `fdisk` + +### Debian package manager setting + +- Scan extra installation media: no +- Mirror country: South Korea +- Archive mirror: deb.debian.org +- Proxy: \[blank\] +- Popularity-contest: no + +### Installing packages setting + +- \[\*\] SSH server +- \[\*\] Standard system utilities + +## Environment configuration + +### Package configuration + +#### Common packages + +```bash +apt update && apt upgrade +apt install sudo iptables-persistent crowdsec acl +``` + +- wait-for-it.sh + - Download it [here](https://github.com/vishnubob/wait-for-it/blob/master/wait-for-it.sh) + - ~/data/config/scripts +#### Hypervisor + +```bash +apt install qemu-system-x86 ksmtuned libvirt-daemon-system virtinst virt-top ifupdown2 openvswitch-switch + +# ksmtuned automatically adjusts ksm following its usage +# `cat /sys/kernel/mm/ksm/pages_sharing` shows the page shared by ksmtuned, result * 4KiB is sharing volume +``` + +#### VMs + +```bash +apt install podman curl jq age + +# app + +apt install btrfs-progs + +# SOPS package +curl -LO https://github.com/getsops/sops/releases/download/v3.11.0/sops-v3.11.0.linux.amd64 + +mv sops-v3.11.0.linux.amd64 /usr/local/bin/sops + +chmod +x /usr/local/bin/sops +# Check update this via Diun +``` + +### User configuration + +```bash +usermod [user_name] -u [uid] + +# Change default group as svadmins +groupmod [group_name] -g 2000 -n svadmins +chown -R [user_name]:svadmins /home/[user_name] +chmod 770 /home/[user_name] +usermod -aG sudo [user_name] + +# Hypervisor +usermod -aG kvm vmm +usermod -aG libvirt vmm + +# After user configuration, proceed all step as user with sudo +``` + +### Make directory structure + +#### Common + +- /etc/secrets (root:root 711) +- /etc/secrets/\$UID (\$UID:root 500, files: $UID:root 400) + +#### Hypervisor + +- (vmm:svadmins 700) +- ~/data/config/{scripts,server,services,vms} +- ~/data/config/vms/{networks,storages,dumps} +- /var/lib/libvirt/images + +#### VMs + +- (\[vms\]:svadmins 700) +- ~/data/{config,containers} +- ~/data/config/{containers,scripts,secrets,server,services} +- ~/data/containers/apps/{certs,etc.} +- ~/kopia + +#### Application server + +##### SSD + +- (app:svadmins 700) +- ~/data/{config,containers} +- ~/data/config/{containers,scripts,secrets,server,services} +- ~/data/containers/app/{certs,etc.} +- ~/kopia + +##### HDD + +RAID10 HDDs mount on this directory. Following 08_01_app_vm before you set. +- (app:svadmins 700) +- ~/hdd/data/containers +- (app:svadmins 770) +- ~/hdd/backups +- The scrub timer systemd is required for its integrity. + +### SSH configuration + + +- File: + - /etc/ssh/sshd_config + - ~/.ssh/authorized_keys + +```ini +# /etc/ssh/sshd_config +# key generation +ssh-keygen -t ed25519 -f ~/.ssh/key_name -C "comment" +# ... +PermitRootLogin no +# ... +``` + +```bash +# Reload sshd +sudo systemctl restart sshd + +# Deploy publicly key for ssh +mkdir ~/.ssh && chmod 700 ~/.ssh +nano ~/.ssh/authorized_keys && chmod 600 ~/.ssh/authorized_keys +# Paste public key from the client +``` + +> If the ICMP protocol doesn't work towards Windows, check Windows' firewall inbound `파일 및 프린터 공유(에코 요청 - ICMPv4-In)" and "파일 및 프린터 공유(에코 요청 - ICMPv6-In)` activatation + +### Linger configuration + +```bash +sudo loginctl enable-linger [user_name] +loginctl show-user $(whoami) +# Linger=yes +``` + +> Linger is necessary to execute services regardless their login session. If linger option were not activated, all the process would be terminated after logout. + +### Network configuration + +- File: + - /etc/hosts + - /etc/resolv.conf + - /etc/network/interfaces + +#### Hostname + +```ini +# /etc/hosts +[ip_address] [FQDN] [hostname] +``` + +#### DNS + +- Only for `hypervisor` and `net` which are set manually. + +```ini +# /etc/resolv.conf +# Before setting local DNS +nameserver 1.1.1.2 +# After setting local DNS (net server) +# nameserver 192.168.10.11 +``` + +#### Interface + +- Only for `hypervisor` and `net` which are set manually + +##### Hypervisor + +```ini +# /etc/network/interfaces +# ifupdown2 and openvswitch-switch are required + +source /etc/network/interfaces.d/* + +# The loopback network interface +auto lo +iface lo inet loopback + +# The openvswitch virtual bridge interfaces +auto ovsbr0 +iface ovsbr0 inet manual + ovs_type OVSBridge + ovs_ports enp2s0 + +auto ovsbr1 +iface ovsbr1 inet manual + ovs_type OVSBridge + ovs_ports enp3s0 vlan1 vlan10 + +# The primary network interfaces +auto enp2s0 +iface enp2s0 inet manual + ovs_type OVSPort + ovs_bridge ovsbr0 + +auto enp3s0 +iface enp3s0 inet manual + ovs_type OVSPort + ovs_bridge ovsbr1 + ovs_options vlan_mode=native-untagged tag=1 trunks=10 + +# The vlan interfaces +auto vlan1 +iface vlan1 inet static + ovs_type OVSIntPort + ovs_bridge ovsbr1 + ovs_options tag=1 + address 192.168.1.10/24 + + +auto vlan10 +iface vlan10 inet static + ovs_type OVSIntPort + ovs_bridge ovsbr1 + ovs_options tag=10 + address 192.168.10.10/24 + gateway 192.168.10.1 + +# The primary network interfaces for IPv6 +# iface enp2s0 inet6 auto +# iface enp3s0 inet6 auto +# openvswitch vlan options +# ovs_options tag=n : access mode +# ovs_options trunk=n : trunk mode +# ovs_options vlan_mode=native-untagged tag=n trunk=m: native untagged mode +``` + +> Having two IP addresses in a client can cause some problems. One of them is `Asymmetric routing`. When it gets the packet via vlan10 interface from vlan1 client, the server uses vlan1 interface, because client and server are in the same subnet (L2). It can cause problem, like disconnection usually. There is the solution to solve this. +> +> - Use `mangle` table in iptables, you can solve this, but it is complex to use. +> - Use `IP rule` command, it is easier to set. + +##### net + +```ini +# /etc/network/interfaces + +source /etc/network/interfaces.d/* + +# The loopback network interface +auto lo +iface lo inet loopback + +# The primary network interface +auto enp1s0 +iface enp1s0 inet static + address 192.168.10.11/24 + gateway 192.168.10.1 +# Don't set dns options. It will be set on /etc/resolv.conf +``` + +### Hypervisor + +#### VFIO configuration + +##### IOMMU setting + +- File: /etc/default/grub + +```ini +# /etc/default/grub +GRUB_CMDLINE_LINUX_DEFAULT="quiet intel_iommu=on" +# Intel CPU: intel_iommu=on, AMD CPU: amd_iommu=on +``` + +##### Device setting + +- File: /etc/modprobe.d/vfio.conf + +```bash +# Check device +lspci -nn | grep -i sata +# 04:00.0 STAT controller [0106] ... [1b21:1064] +# PCI address: 04:00.00 / Device ID 1b21:1064 + +# IOMMU group checking +readlink /sys/bus/pci/devices/0000\:04\:00.0/iommu_group +# ../../../../kernel/iommu_groups/14 + +# The group of PCI device which is needed to passthrough is 14 +ls /sys/kernel/iommu_groups/devices/14 +# 04:00.00 # To passthrough device, it has to be only device in its IOMMU group +``` + +```ini +# /etc/modprobe.d/vfio.conf +options vfio-pci ids=1b21:1064 +softdep ahci pre: vfio-pci +``` + +##### Apply all configuration + +```bash +sudo update-grub +sudo update-initramfs -u +sudo reboot + +# Check IOMMU enable message +dmesg | grep -e DMAR -e IOMMU +# Check the driver +lspci -nnk -d 1b21:1064 +# Kernel driver in user: vfio-pci +``` + +### VM servers + +#### Serial console setting + +```bash +# It is necessary to connect via ssh to set serial setting +sudo systemctl enable getty@ttyS0 +sudo systemctl start getty@ttyS0 +``` + +#### Secret management + +##### SOPS setting + +- File: + - ~/data/config/secrets/age-key.gpg + - ~/data/config/secrets/.sops.yaml + - ~/data/config/secrets/.secret.yaml + - /etc/secrets/\$UID (\$UID:root 500) + +```bash +# Generate the key for sops +age-keygen -o ~/data/config/secrets/age-key + +# # created: 2025-10-17T13:30:00Z +# # public key: age1ql3z7h0cfscg...... +# AGE-SECRET-KEY-1..... + +# Public key is printed when key generated + +gpg --symmetric age-key && rm age-key +> GPG password: password + +nano ~/data/config/secrets/.sops.yaml && chmod 600 ~/data/config/secrets/.sops.yaml +``` + +```yaml +# ~/data/config/secrets/.sops.yaml +creation_rules: + - path_regex: \.secret\.yaml$ + age: [public_key] +``` + + +##### Create Secret file + +```yaml +# ~/data/config/secrets/.secret.yaml +# Format of .secret.yaml +# app1.env: +1SECRET: '1secret' +2SECRET: '2secret' + +app1.file: | + -----TEXT-AREA----- + contents of 3secret + -----END-AREA----- + +# app2.env +3SECRET: '3secret' +4SECRET: '4secret' + +# ... +``` + +```bash +sops --encrypt --in-place ~/data/config/secrets/.secret.yaml +``` + +##### Secret scripts + +- File: + - ~/data/config/scripts/secrets/edit_secret.sh + - ~/data/config/scripts/secrets/extract_secret.sh + +```bash +#!/bin/bash +# edit_secret.sh /path/of/secret + +set -e + +KEY_PATH="$HOME/data/config/secrets" +SECRET_FILE="$1" + +usage() { + echo "Usage: $0 \"/path/of/secret/file\"" + exit 1 +} + + +if [ -z "$SECRET_FILE" -o ! -f "$SECRET_FILE" ]; then + echo "Error: Secret file path is needed" + usage +fi + + +if [ ! -f "$KEY_PATH/age-key.gpg" ]; then + echo "Error: There is no key file" + exit 1 +fi + +# Delete password file after script +cleanup() { + if [ -f "/run/user/$UID/age-key" ]; then + rm -f "/run/user/$UID/age-key" + fi +} + +trap cleanup EXIT + + + +echo -n "Enter GPG passphrase: " +read -s GPG_PASSPHRASE +echo + +echo "$GPG_PASSPHRASE" | gpg --batch --yes --passphrase-fd 0 \ +--output "/run/user/$UID/age-key" \ +--decrypt "$KEY_PATH/age-key.gpg" && \ +chmod 600 "/run/user/$UID/age-key" + +if [ -z "/run/user/$UID/age-key" ]; then + echo "Error: Key file does not exist" + exit 1 +fi + +gpgconf --kill gpg-agent + +SOPS_AGE_KEY="$(cat "/run/user/$UID/age-key")" + +SOPS_AGE_KEY="$SOPS_AGE_KEY" sops "$SECRET_FILE" +``` + +```bash +#!/bin/bash +# extract_secret.sh /path/of/secret (-f|-e ) + +set -e + +KEY_PATH="$HOME/data/config/secrets" +SECRET_FILE=$1 + +# shift the $2 as $1 ($1 < $2) +shift + +# usage() function +usage() { + echo "Usage: $0 \"/path/of/secret/file\" (-f|-e \"yaml section name\")" >&2 + echo "-f : Print secret file" >&2 + echo "-e : Print secret env file" >&2 + exit 1 +} + +while getopts "f:e:" opt; do + case $opt in + f) + VALUE="$OPTARG" + TYPE="FILE" + ;; + e) + VALUE="$OPTARG" + TYPE="ENV" + ;; + \?) # unknown options + echo "Invalid option: -$OPTARG" >&2 + usage + ;; + :) # parameter required option + echo "Option -$OPTARG requires an argument." >&2 + usage + ;; + esac +done + +# Get option and move to parameters - This has no functional thing, because it only use arguments with parameters +shift $((OPTIND - 1)) + +# Check necessary options +if [ ! -f "$SECRET_FILE" ]; then + echo "Error: secret file path is required" >&2 + usage +fi + +if [ -z "$TYPE" ]; then + echo "Error: -f or -e option requires" >&2 + usage +fi + + +if [ ! -f "$KEY_PATH/age-key.gpg" ]; then + echo "Error: There is no key file" >&2 + usage +fi + +# Delete password file after script +cleanup() { + if [ -f "/run/user/$UID/age-key" ]; then + rm -f "/run/user/$UID/age-key" + fi +} + +trap cleanup EXIT + +echo -n "Enter GPG passphrase: " >&2 +read -s GPG_PASSPHRASE +echo >&2 + +echo "$GPG_PASSPHRASE" | gpg --batch --yes --passphrase-fd 0 \ +--output "/run/user/$UID/age-key" \ +--decrypt "$KEY_PATH/age-key.gpg" && \ +chmod 600 "/run/user/$UID/age-key" + +if [ ! -f "/run/user/$UID/age-key" ]; then + echo "Error: Key file does not exist" >&2 + exit 1 +fi + +gpgconf --kill gpg-agent + +SOPS_AGE_KEY="$(cat "/run/user/$UID/age-key")" + +if [ "$TYPE" == "FILE" ]; then + if RESULT=$(SOPS_AGE_KEY="$SOPS_AGE_KEY" sops --decrypt --extract "[\"$VALUE\"]" --output-type binary "$SECRET_FILE") ; then + echo -n "$RESULT" + exit 0 + else + echo "Error: SOPS extract error" >&2 + exit 1 + fi +fi + +if [ "$TYPE" == "ENV" ]; then + if RESULT=$(SOPS_AGE_KEY="$SOPS_AGE_KEY" sops --decrypt --extract "[\"$VALUE\"]" --output-type dotenv "$SECRET_FILE") ; then + echo -n "$RESULT" + exit 0 + else + echo "Error: SOPS extract error" >&2 + exit 1 + fi +fi +``` + +##### Secret value management + +- Using `extract_secret.sh` +- Inject secret value to `podman secret` or `/etc/secrets/$UID` + +```bash +# /etc/secrets/$UID +# Before use sudo tee, make sure sudo doesn't need password. +# i.e. sudo ps -ef command execute before this command. +# Env file +extract_secret.sh ~/data/config/secrets/.secret.yaml -e "$value" > /run/user/$UID/tmp.env \ +&& sudo mv /run/user/$UID/tmp.env /etc/secrets/$UID/"$FILE_NAME" \ +&& sudo chown $UID:root /etc/secrets/$UID/"$FILE_NAME" \ +&& sudo chmod 400 /etc/secrets/$UID/"$FILE_NAME" +# Normal file +extract_secret.sh ~/data/config/secrets/.secret.yaml -f "$value" > /run/user/$UID/tmp.env \ +&& sudo mv /run/user/$UID/tmp.env /etc/secrets/$UID/"$FILE_NAME" \ +&& sudo chown $UID:root /etc/secrets/$UID/"$FILE_NAME" \ +&& sudo chmod 400 /etc/secrets/$UID/"$FILE_NAME" + +# Podman secret +# Podman doesn't supports .env file parsing, you have to enroll all values +extract_secret.sh ~/data/config/secrets/.secret.yaml -f "$value" | podman secret create "[$FILE_NAME|$ENV_NAME]" - +``` + +> The reason why apply the secret value manually is its complexity. The `yaml` file can hold a lot of values in one section. It is hard to make the script to deal with a lot of exception and senario. The simple way is the best way. \ No newline at end of file diff --git a/docs/archives/2025-12/03_common/03_02_iptables.md b/docs/archives/2025-12/03_common/03_02_iptables.md new file mode 100644 index 0000000..f116a73 --- /dev/null +++ b/docs/archives/2025-12/03_common/03_02_iptables.md @@ -0,0 +1,273 @@ +Tags: #common, #configuration, #network, #security + +## iptables + +iptables is the firewall program to manage netfilter which is in the linux kernel. Basically, the iptables' settings are temporary(when you reboot the computer they disappear). So, you can use netfilter-persistent program to make settings permanent. iptables has 3 modes(tables), which are filter, nat, and mangle. And each table has their chain like input, forward or prerouting, etc. iptables' setting is temporary, when the machine reboot, all rules will be reset. After set the rules use `netfilter-persistent save` to make the rules permanently. + +### tables + +#### filter + +filter is the basic and most important table in iptables. Its role is to simply judge, to ACCEPT or DROP the packets. When you use the iptables command without table option (-t), the filter table is default option. There are chians of filter table below. + +- INPUT: check the packets in +- OUTPUT: check the packets out +- FORWARD: check the packets which are passing through + +#### nat + +nat is the table changing packets' address and port without changing contents of packets. There are chians of nat table below. + +- PREROUTING: change the destination address or port right after packets arrived +- POSTROUTING: change the source address or port before packets depart +- OUTPUT: change the destination address or port of packets which produced by itself (This doesn't change the source IP; DNAT) + +#### mangle + +mangle is a special table to mark on the packets. It works on the every chain and it works on special purpose like asymmetric routing. + +### grammar + +#### Commands + +- -A \[--append\]: create the new rules +- -C \[--check\]: check the packets +- -D \[--delete\]: delete the rules +- -F \[--flush\]: delete all rules from the chain +- -I \[--insert\]: Insert the new rules +- -L \[--list\]: print the rules +- -N \[--new\]: create the new chain +- -P \[--policy\]: change the default policy +- -R \[--replace\]: change the rules as a new rule +- -X \[--delete-chain\]: delete chain +- -Z \[--zero\]: reset the packet and byte counter value of all chain + +#### match + +- -s \[--source\]: designate source ip address or networks +- -d \[--destination\]: designate destination ip address or networks +- -p \[--protocol\]: match protocol(tcp/udp/icmp.. etc.) + - --dport: designate specific protocol number (When the protocol is already defined - tcp or udp) + - --syn: match syn packets. when starting new TCP connection, apply the rule. +- -i \[--in-interface\]: input interface +- -o \[--out-interface\]: output interface +- --comment: comment(max 256byte) +- -f \[--fragment\] +- -t \[--table\]: designate table set(default: filter) +- -j \[--jump\]: designate targets +- -m \[--match\]: match with specific module + - conntrack --ctstate: current linked connection + +#### target + +- ACCEPT: allow packets +- DROP: deny packets without response (Hide server existence) +- REJECT: deny packets with response (Show server existance) +- LOG: log the packets on syslog +- RETURN: stop current rules, and return to the previous chain + +#### Command + +```bash +iptables -L -v -n # Print all rules in filter table +iptables -L -v -n -t nat # Print all rules in nat table +``` + +## netfilter-persistent + +### Save the rules + +```bash +sudo netfilter-persistent save +``` + +- File: + - /etc/iptables/rules.v4 + - /etc/iptables/rules.v6 + +### Reload the rules (manual) + +```bash +# Edit the file +# Test +sudo bash -c 'iptables-restore --test < /etc/iptables/rules.v4' +# If there were no message, it would have no error +sudo netfilter-persistent start +# or +sudo netfilter-persistent reload +``` + +### Rule files + +- File: /etc/iptables/rules.v4 +#### Hypervisor (vmm) + +```ini +*filter +:INPUT DROP [0:0] +:FORWARD DROP [0:0] +:OUTPUT ACCEPT [0:0] +-A INPUT -m conntrack --ctstate RELATED,ESTABLISHED -m comment --comment "allow established connection" -j ACCEPT +-A INPUT -i lo -m comment --comment "allow local connection" -j ACCEPT +-A INPUT -p icmp -m comment --comment "allow ICMP connection" -j ACCEPT +-A INPUT -s 192.168.1.11/32 -p tcp -m tcp --dport 22 -m comment --comment "allow emergemcy LAN console ssh connection" -j ACCEPT +-A INPUT -s 10.10.10.2/32 -p tcp -m tcp --dport 22 -m comment --comment "allow vpn console ssh connection" -j ACCEPT +-A INPUT -s 10.10.10.3/32 -p tcp -m tcp --dport 22 -m comment --comment "allow vpn console ssh connection" -j ACCEPT +-A INPUT -s 10.10.10.4/32 -p tcp -m tcp --dport 22 -m comment --comment "allow vpn console ssh connection" -j ACCEPT +-A INPUT -s 192.168.10.13/32 -p tcp -m tcp --dport 22 -m comment --comment "allow code-server ssh connection" -j ACCEPT +COMMIT +``` + +#### net + +```ini +*filter +:INPUT DROP [0:0] +:FORWARD DROP [0:0] +:OUTPUT ACCEPT [0:0] +-A INPUT -m conntrack --ctstate RELATED,ESTABLISHED -m comment --comment "allow established connection" -j ACCEPT +-A INPUT -i lo -m comment --comment "allow local connection" -j ACCEPT +-A INPUT -p icmp -m comment --comment "allow ICMP connection" -j ACCEPT +-A INPUT -s 10.10.10.2/32 -p tcp -m tcp --dport 22 -m comment --comment "allow vpn console ssh connection" -j ACCEPT +-A INPUT -s 10.10.10.3/32 -p tcp -m tcp --dport 22 -m comment --comment "allow vpn console ssh connection" -j ACCEPT +-A INPUT -s 10.10.10.4/32 -p tcp -m tcp --dport 22 -m comment --comment "allow vpn console ssh connection" -j ACCEPT +-A INPUT -s 192.168.10.1/32 -p tcp -m tcp --dport 22 -m comment --comment "allow OPNsense ssh connection for ACME update" -j ACCEPT +-A INPUT -s 192.168.10.10/32 -p tcp -m tcp --dport 22 -m comment --comment "allow hypervisor ssh connection" -j ACCEPT +-A INPUT -s 192.168.10.13/32 -p tcp -m tcp --dport 22 -m comment --comment "allow code-server ssh connection" -j ACCEPT +-A INPUT -p tcp -m tcp --dport 2053 -m conntrack --ctorigdstport 53 -m comment --comment "allow tcp DNS connection which is only prerouted from 53" -j ACCEPT +-A INPUT -p udp -m udp --dport 2053 -m conntrack --ctorigdstport 53 -m comment --comment "allow udp DNS connection which is only prerouted from 53" -j ACCEPT +-A INPUT -p tcp -m tcp --dport 2443 -m conntrack --ctorigdstport 443 -m comment --comment "allow tcp DoH(https) connection which is only prerouted from 443" -j ACCEPT +-A INPUT -p udp -m udp --dport 2443 -m conntrack --ctorigdstport 443 -m comment --comment "allow udp DoH(https) connection which is only prerouted from 443" -j ACCEPT +-A INPUT -s 192.168.10.1/32 -p tcp -m tcp --dport 2253 -m comment --comment "allow opnsense tcp nsupdate connection" -j ACCEPT +-A INPUT -s 192.168.10.1/32 -p udp -m udp --dport 2253 -m comment --comment "allow opnsense udp nsupdate connection" -j ACCEPT +-A INPUT -s 192.168.10.12/32 -p tcp -m tcp --dport 2253 -m comment --comment "allow auth tcp nsupdate connection" -j ACCEPT +-A INPUT -s 192.168.10.12/32 -p udp -m udp --dport 2253 -m comment --comment "allow auth udp nsupdate connection" -j ACCEPT +-A INPUT -s 192.168.10.13/32 -p tcp -m tcp --dport 2253 -m comment --comment "allow dev tcp nsupdate connection" -j ACCEPT +-A INPUT -s 192.168.10.13/32 -p udp -m udp --dport 2253 -m comment --comment "allow dev udp nsupdate connection" -j ACCEPT +-A INPUT -s 192.168.10.14/32 -p tcp -m tcp --dport 2253 -m comment --comment "allow app tcp nsupdate connection" -j ACCEPT +-A INPUT -s 192.168.10.14/32 -p udp -m udp --dport 2253 -m comment --comment "allow app udp nsupdate connection" -j ACCEPT +COMMIT +*nat +:PREROUTING ACCEPT [0:0] +:INPUT ACCEPT [0:0] +:OUTPUT ACCEPT [0:0] +:POSTROUTING ACCEPT [0:0] +-A PREROUTING -p tcp -m tcp --dport 53 -m comment --comment "allow and preroute tcp DNS connection 53 to 2053" -j REDIRECT --to-ports 2053 +-A PREROUTING -p udp -m udp --dport 53 -m comment --comment "allow and preroute udp DNS connection 53 to 2053" -j REDIRECT --to-ports 2053 +-A PREROUTING -p tcp -m tcp --dport 443 -m comment --comment "allow and preroute tcp DoH(https) connection 443 to 2443" -j REDIRECT --to-ports 2443 +-A PREROUTING -p udp -m udp --dport 443 -m comment --comment "allow and preroute udp DoH(https) connection 443 to 2443" -j REDIRECT --to-ports 2443 +-A OUTPUT -d 127.0.0.1/32 -p tcp -m tcp --dport 53 -m comment --comment "NAT local tcp DNS connection 53 to 2053" -j REDIRECT --to-ports 2053 +-A OUTPUT -d 127.0.0.1/32 -p udp -m udp --dport 53 -m comment --comment "NAT local udp DNS connection 53 to 2053" -j REDIRECT --to-ports 2053 +-A OUTPUT -d 192.168.10.11/32 -p tcp -m tcp --dport 53 -m comment --comment "NAT local tcp DNS connection 53 to 2053" -j REDIRECT --to-ports 2053 +-A OUTPUT -d 192.168.10.11/32 -p udp -m udp --dport 53 -m comment --comment "NAT local udp DNS connection 53 to 2053" -j REDIRECT --to-ports 2053 +-A OUTPUT -d 127.0.0.1/32 -p tcp -m tcp --dport 443 -m comment --comment "NAT local tcp DoH(https) connection 443 to 2443" -j REDIRECT --to-ports 2443 +-A OUTPUT -d 127.0.0.1/32 -p udp -m udp --dport 443 -m comment --comment "NAT local udp DoH(https) connection 443 to 2443" -j REDIRECT --to-ports 2443 +-A OUTPUT -d 192.168.10.11/32 -p tcp -m tcp --dport 443 -m comment --comment "NAT local tcp DoH(https) connection 443 to 2443" -j REDIRECT --to-ports 2443 +-A OUTPUT -d 192.168.10.11/32 -p udp -m udp --dport 443 -m comment --comment "NAT local udp DoH(https) connection 443 to 2443" -j REDIRECT --to-ports 2443 +COMMIT +``` + +#### auth + +```ini +*filter +:INPUT DROP [0:0] +:FORWARD DROP [0:0] +:OUTPUT ACCEPT [204:15800] +-A INPUT -m conntrack --ctstate RELATED,ESTABLISHED -m comment --comment "allow established connection" -j ACCEPT +-A INPUT -i lo -m comment --comment "allow local connection" -j ACCEPT +-A INPUT -p icmp -m comment --comment "allow ICMP connection" -j ACCEPT +-A INPUT -s 10.10.10.2/32 -p tcp -m tcp --dport 22 -m comment --comment "allow vpn console ssh connection" -j ACCEPT +-A INPUT -s 10.10.10.3/32 -p tcp -m tcp --dport 22 -m comment --comment "allow vpn console ssh connection" -j ACCEPT +-A INPUT -s 10.10.10.4/32 -p tcp -m tcp --dport 22 -m comment --comment "allow vpn console ssh connection" -j ACCEPT +-A INPUT -s 192.168.10.1/32 -p tcp -m tcp --dport 22 -m comment --comment "allow OPNsense ssh connection for ACME update" -j ACCEPT +-A INPUT -s 192.168.10.10/32 -p tcp -m tcp --dport 22 -m comment --comment "allow hypervisor ssh connection" -j ACCEPT +-A INPUT -s 192.168.10.13/32 -p tcp -m tcp --dport 22 -m comment --comment "allow code-server ssh connection" -j ACCEPT +-A INPUT -s 192.168.10.1/32 -p tcp -m tcp --dport 9000 -m comment --comment "allow opnsense step-ca connection" -j ACCEPT +-A INPUT -s 192.168.10.13/32 -p tcp -m tcp --dport 9000 -m comment --comment "allow dev step-ca connection" -j ACCEPT +-A INPUT -s 192.168.10.14/32 -p tcp -m tcp --dport 9000 -m comment --comment "allow app step-ca connetcion" -j ACCEPT +-A INPUT -p tcp -m tcp --dport 2080 -m conntrack --ctorigdstport 80 -m comment --comment "allow tcp http connection which is only from 80" -j ACCEPT +-A INPUT -p tcp -m tcp --dport 2443 -m conntrack --ctorigdstport 443 -m comment --comment "allow tcp https connection which is only from 443" -j ACCEPT +COMMIT +*nat +:PREROUTING ACCEPT [0:0] +:INPUT ACCEPT [0:0] +:OUTPUT ACCEPT [0:0] +:POSTROUTING ACCEPT [0:0] +-A PREROUTING -p tcp -m tcp --dport 80 -m comment --comment "allow and preroute tcp http connection 80 to 2080" -j REDIRECT --to-ports 2080 +-A PREROUTING -p tcp -m tcp --dport 443 -m comment --comment "allow and preroute tcp https connection 443 to 2443" -j REDIRECT --to-ports 2443 +-A OUTPUT -d 127.0.0.1/32 -p tcp -m tcp --dport 80 -m comment --comment "NAT local tcp http connection 80 to 2080" -j REDIRECT --to-ports 2080 +-A OUTPUT -d 192.168.10.12/32 -p tcp -m tcp --dport 80 -m comment --comment "NAT local tcp http connection 80 to 2080" -j REDIRECT --to-ports 2080 +-A OUTPUT -d 127.0.0.1/32 -p tcp -m tcp --dport 443 -m comment --comment "NAT local tcp https connection 443 to 2443" -j REDIRECT --to-ports 2443 +-A OUTPUT -d 192.168.10.12/32 -p tcp -m tcp --dport 443 -m comment --comment "NAT local tcp https connection 443 to 2443" -j REDIRECT --to-ports 2443 +-A OUTPUT -d 127.0.0.1/32 -p tcp -m tcp --dport 636 -m comment --comment "NAT local tcp ldaps connection 636 to 6360" -j REDIRECT --to-ports 6360 +-A OUTPUT -d 192.168.10.12/32 -p tcp -m tcp --dport 636 -m comment --comment "NAT local tcp ldaps connection 636 to 6360" -j REDIRECT --to-ports 6360 +COMMIT +``` + +#### dev + +```ini +*filter +:INPUT DROP [0:0] +:FORWARD DROP [0:0] +:OUTPUT ACCEPT [0:0] +-A INPUT -m conntrack --ctstate RELATED,ESTABLISHED -m comment --comment "allow established connection" -j ACCEPT +-A INPUT -i lo -m comment --comment "allow local connection" -j ACCEPT +-A INPUT -p icmp -m comment --comment "allow ICMP connection" -j ACCEPT +-A INPUT -s 10.10.10.2/32 -p tcp -m tcp --dport 22 -m comment --comment "allow vpn console ssh connection" -j ACCEPT +-A INPUT -s 10.10.10.3/32 -p tcp -m tcp --dport 22 -m comment --comment "allow vpn console ssh connection" -j ACCEPT +-A INPUT -s 10.10.10.4/32 -p tcp -m tcp --dport 22 -m comment --comment "allow vpn console ssh connection" -j ACCEPT +-A INPUT -s 192.168.10.1/32 -p tcp -m tcp --dport 22 -m comment --comment "allow OPNsense ssh connection for ACME update" -j ACCEPT +-A INPUT -s 192.168.10.10/32 -p tcp -m tcp --dport 22 -m comment --comment "allow hypervisor ssh connection" -j ACCEPT +-A INPUT -s 192.168.10.12/32 -p tcp -m tcp --dport 5432 -m comment --comment "allow auth postgresql connection" -j ACCEPT +-A INPUT -s 192.168.10.14/32 -p tcp -m tcp --dport 5432 -m comment --comment "allow app postgresql connection" -j ACCEPT +-A INPUT -s 192.168.10.12/32 -p tcp -m tcp --dport 2080 -m conntrack --ctorigdstport 80 -m comment --comment "allow tcp http connection which is only from 80 and main caddy" -j ACCEPT +-A INPUT -s 192.168.10.12/32 -p tcp -m tcp --dport 2443 -m conntrack --ctorigdstport 443 -m comment --comment "allow tcp https connection which is only from 443 and main caddy" -j ACCEPT +COMMIT +*nat +:PREROUTING ACCEPT [0:0] +:INPUT ACCEPT [0:0] +:OUTPUT ACCEPT [0:0] +:POSTROUTING ACCEPT [0:0] +-A PREROUTING -p tcp -m tcp --dport 80 -m comment --comment "allow and preroute tcp http connection 80 to 2080" -j REDIRECT --to-ports 2080 +-A PREROUTING -p tcp -m tcp --dport 443 -m comment --comment "allow and preroute tcp https connection 443 to 2443" -j REDIRECT --to-ports 2443 +-A OUTPUT -d 127.0.0.1/32 -p tcp -m tcp --dport 80 -m comment --comment "NAT local tcp http connection 80 to 2080" -j REDIRECT --to-ports 2080 +-A OUTPUT -d 192.168.10.13/32 -p tcp -m tcp --dport 80 -m comment --comment "NAT local tcp http connection 80 to 2080" -j REDIRECT --to-ports 2080 +-A OUTPUT -d 127.0.0.1/32 -p tcp -m tcp --dport 443 -m comment --comment "NAT local tcp https connection 443 to 2443" -j REDIRECT --to-ports 2443 +-A OUTPUT -d 192.168.10.13/32 -p tcp -m tcp --dport 443 -m comment --comment "NAT local tcp https connection 443 to 2443" -j REDIRECT --to-ports 2443 +COMMIT +``` + +#### app + +```ini +*filter +:INPUT DROP [0:0] +:FORWARD DROP [0:0] +:OUTPUT ACCEPT [0:0] +-A INPUT -m conntrack --ctstate RELATED,ESTABLISHED -m comment --comment "allow established connection" -j ACCEPT +-A INPUT -i lo -m comment --comment "allow local connection" -j ACCEPT +-A INPUT -p icmp -m comment --comment "allow ICMP connection" -j ACCEPT +-A INPUT -s 10.10.10.2/32 -p tcp -m tcp --dport 22 -m comment --comment "allow vpn console ssh connection" -j ACCEPT +-A INPUT -s 10.10.10.3/32 -p tcp -m tcp --dport 22 -m comment --comment "allow vpn console ssh connection" -j ACCEPT +-A INPUT -s 10.10.10.4/32 -p tcp -m tcp --dport 22 -m comment --comment "allow vpn console ssh connection" -j ACCEPT +-A INPUT -s 192.168.10.1/32 -p tcp -m tcp --dport 22 -m comment --comment "allow OPNsense ssh connection for ACME update" -j ACCEPT +-A INPUT -s 192.168.10.10/32 -p tcp -m tcp --dport 22 -m comment --comment "allow hypervisor ssh connection" -j ACCEPT +-A INPUT -s 192.168.10.13/32 -p tcp -m tcp --dport 22 -m comment --comment "allow code-server ssh connection" -j ACCEPT +-A INPUT -p tcp -m tcp --dport 2080 -m conntrack --ctorigdstport 80 -m comment --comment "allow tcp http connection which is only from 80" -j ACCEPT +-A INPUT -p tcp -m tcp --dport 2443 -m conntrack --ctorigdstport 443 -m comment --comment "allow tcp https connection which is only from 443" -j ACCEPT +COMMIT +*nat +:PREROUTING ACCEPT [0:0] +:INPUT ACCEPT [0:0] +:OUTPUT ACCEPT [0:0] +:POSTROUTING ACCEPT [0:0] +-A PREROUTING -p tcp -m tcp --dport 80 -m comment --comment "allow and preroute tcp http connection 80 to 2080" -j REDIRECT --to-ports 2080 +-A PREROUTING -p tcp -m tcp --dport 443 -m comment --comment "allow and preroute tcp https connection 443 to 2443" -j REDIRECT --to-ports 2443 +-A OUTPUT -d 127.0.0.1/32 -p tcp -m tcp --dport 80 -m comment --comment "NAT local tcp http connection 80 to 2080" -j REDIRECT --to-ports 2080 +-A OUTPUT -d 192.168.10.14/32 -p tcp -m tcp --dport 80 -m comment --comment "NAT local tcp http connection 80 to 2080" -j REDIRECT --to-ports 2080 +-A OUTPUT -d 127.0.0.1/32 -p tcp -m tcp --dport 443 -m comment --comment "NAT local tcp https connection 443 to 2443" -j REDIRECT --to-ports 2443 +-A OUTPUT -d 192.168.10.14/32 -p tcp -m tcp --dport 443 -m comment --comment "NAT local tcp https connection 443 to 2443" -j REDIRECT --to-ports 2443 +COMMIT +``` \ No newline at end of file diff --git a/docs/archives/2025-12/03_common/03_03_podman.md b/docs/archives/2025-12/03_common/03_03_podman.md new file mode 100644 index 0000000..dc8b27f --- /dev/null +++ b/docs/archives/2025-12/03_common/03_03_podman.md @@ -0,0 +1,306 @@ +systTags: #common, #configuration, #virtualization, #container, #os + +## podman + +The container is one of virtual technology to run applications independently regardless of what the host system is. One of them, Docker uses daemon(dockerd) and docker socket(root authority), so all container can access hosts' root authority. It causes danger that hackers can take down root authority via docker containers. Additionally, docker daemon system makes it hard to combine each container to systemd, because all containers are in charge of dockerd. + +Podman is a new technology to solve docker's problems. It implements daemonless, and rootless container environment. It reduces the danger of the hacker getting root authority via containers, and it is easy to combine each container to systemd. + +### Configuration + +- File: /etc/containers/registries.conf + +```bash +# Check the linger +loginctl show-user $(whoami) +# Linger=yes +``` + +```ini +# /etc/containers/registries.conf +unqualified-search-registries = ["docker.io"] +``` + +### Networking + +Podman uses a specific IP address and domain names to communicate with its host system using pasta. Pasta is a new default network mode in podman 4.0 which can allow communication with the host system directly. Therefore, it doesn't need a bridge or network host. `169.254.1.2` is 'link-local address' to communicate with host's system. The pasta conduct the SNAT. Therefore, even though the packet's original source IP were container's IP, when the packet passes the pasta its source IP changes as 127.0.0.1 or its own IP (localhost). + +- `/etc/hosts` in container + - 169.254.1.2 host.containers.internal host.docker.internal + +When you use the command below, you can add domain to the line. + +- --host-name mydomain.internal:host-gateway + +Additionally, rootless podman containers cannot bind host's privileged ports number(=<1024). Therefore, if container needed to use these ports, you would have to use iptables' nat table. The example of iptables' usage is [here](./03_02_iptables.md). + +#### Bridge mode + +Bridge mode create a separated virtual IP network from host's network. This mode supports simple DNS function between containers belonging to the same network. This mode basically uses SNAT(Source NAT) mutually; both inbound and outbond. It is because basically podman runs as rootless. So, the container can't distinguish where the packets come from, except from the container belonging to the same podman network. At the same time, client also can't distinguish where the packets come from. Because every packet seems like come from host server. It makes inspection hard for both of client and containers. + +### ID mapping + +Podman basically mapped container's root to host's executing user. Despite the root, podman uses subuid, subgid system. They are set on `/etc/subuid`, `/etc/subgid`. + +- host(1000:1000) < container(0:0) +- host(100999:100999) < container(1000:1000) : subuid, subgid + +When podman runs and executes commands with -u option, --userns=keep-id option, -uid, -gid option, it can adjust mapping. + +- -u uid:gid: excute container with hosts's uid (bring the host's UID/GID towards container's `/etc/passwd` directly.) +- --userns=keep-id: mapping all container's file permission to host's file permission. +- --cap-add=DAC_READ_SEARCH option: without root authority, container can access every file regardless permission. + +#### Mapping error + +Some container doesn't execute a container with root permission. They execute the container with their specific uid (i.e. UID:53 - BIND). In this case, when the container runs with `-u` option or `--userns=keep-id` option can make mapping error very frequently. + +- `-u` option + +When the container runs with `-u` option, the entrypoint can't work properly in many cases because they were already set that runs entrypoint as specific uid. So, if `-u` option were set, then it would cause permission error. + +- `--userns=keep-id` option + +This option makes container's directory/file UID as the same as the host's directory/file UID. So, it turns off the UID/GID mapping itself. When some directory which has root authorization mapped with hosts' file it occurs UID mapping error. + +#### Permission management + +Use ACL packages, to give additional permission of directory. It can give the extra permission to subuid or host's uid. + +```bash +# u:[subuid]:rwx +sudo getfacl /path/of/podman_directory +# `-d` option is to set permission for file or directory which are created automatically +# `-R` option is to set permission for file or directory which already existed +sudo setfacl -d -m u:[subuid]:rwx /path/of/podman_directory +sudo setfacl -d -m u:[hostuid]:rwx /path/of/podman_directory +``` + +### Usage of podman + +#### Containerfile and build + +Containerfile's format is compatible with dockerfile. Here is the example below. Containerfile can be built as podman image with `podman build` command. + +```containerfile +FROM caddy:2.10.2-builder-alpine AS builder + +RUN xcaddy build \ +--with github.com/caddy-dns/rfc2136 \ +--with github.com/hslatman/caddy-crowdsec-bouncer/crowdsec \ +--with github.com/hslatman/caddy-crowdsec-bouncer/http \ + +FROM caddy:2.10.2 + +COPY --from=builder /usr/bin/caddy /usr/bin/caddy +``` + +```bash +# Build container file as podman image +podman build -t caddy:2.10.2-main -f /path/of/containterfile-caddy-main . && podman image prune -f +# Delete source images +podman rmi caddy:2.10.2-builder-alpine +podman rmi caddy:2.10.2 +``` + +#### Podman images + +- `podman images`: Print list of all local podman images +- `podman image pull [image_name]`: Download podman images from repository + - `podman pull` is the same command +- `podman image prune`: Remove unused and untagged images +- `podman image rm [images]`: Remove local podman image + - `podman rmi` is the same command + +#### Run and exec + +podman ps \[--all\]: it shows podman container lists +- podman run + - --name: container's name + - --restart: restart mode - unless-stopped + - --add-host: additional host domain name on 169.254.1.2 + - --cap-add: add some specific privileges without root authority + - -p host_ports:container_port + - -v host_path:container_path:permission(rw, ro, and when you use SELinux, you can use Z or z) + - -e environment_value + - -d: run background + - image_name +- podman exec -it \[container_name\] \[command\] + +#### Pod + +Pod makes each container which are in the same pod share some specific resources. The network(IP address), storage volumes. However, each container has their own file system, process, and resource limits. So, this is very useful to use various containers which has close relationship like application and Redis(cache db). + +- podman pod create + - --name: pod's name + - -p: host_ports:pod_ports +- podman run + - ... + - --pod pod's name + > Don't use `-p` option. Pod already has `-p` option. + +#### Container and file management + +#### container + +- check pure container + +```bash +podman run --rm -it --entrypoint sh [image_name] --args +# or +podman run --rm -it [image_name] sh --args +``` + +#### file management + +- Using `podman exec` to manage file +- Use ACL package `setfacl` +- `--cap-add=DAC_READ_SEARCH` option allows to read all file without permission to backup (for kopia) + +### Quadlet and systemd + +#### Register the secret on podman secret + +- Using `edit_secret.sh` and `extract_secret.sh` +- Inject secret value to `podman secret` or `/etc/secrets/$UID` + +```bash +# /etc/secrets/$UID +# Before use sudo tee, make sure sudo doesn't need password. +# i.e. sudo ps -ef command execute before this command. +# Env file +extract_secret.sh ~/data/config/secrets/.secret.yaml -e "$value" > /run/user/$UID/tmp.env \ +&& sudo mv /run/user/$UID/tmp.env /etc/secrets/$UID/"$FILE_NAME" \ +&& sudo chown $UID:root /etc/secrets/$UID/"$FILE_NAME" \ +&& sudo chmod 400 /etc/secrets/$UID/"$FILE_NAME" +# Normal file +extract_secret.sh ~/data/config/secrets/.secret.yaml -f "$value" > /run/user/$UID/tmp.env \ +&& sudo mv /run/user/$UID/tmp.env /etc/secrets/$UID/"$FILE_NAME" \ +&& sudo chown $UID:root /etc/secrets/$UID/"$FILE_NAME" \ +&& sudo chmod 400 /etc/secrets/$UID/"$FILE_NAME" + +# Podman secret +# Podman doesn't supports .env file parsing, you have to enroll all values +extract_secret.sh ~/data/config/secrets/.secret.yaml -f "$value" | podman secret create "[$FILE_NAME|$ENV_NAME]" - +``` + +> ` podman secret inspect --showsecret --format '{{.SecretData}}' $secret_name` shows the content of secret + + +#### Define quadlet file + +- File: + - ~/data/config/containers/app/app.container + - ~/data/config/containers/app/app.pod + +Quadlet is to define of specification as `.quadlet` or `.container`. Quadlet uses these file to make `.service` file to combine container to systemd. Here is the example of `.container` file below. + +```ini +# app.container +[Quadlet] +# Don't make a dependencies +DefaultDependencies=false + +[Unit] +Description=app +After=a.service +Wants=a.service +Requires=a.service + +[Service] +ExecStartPre=%h/data/config/scripts/wait-for-it.sh -h 192.168.10.1 -p 8080 -t 20 + +[Container] +# Pod=app.pod +Image=localhost/app:1.0.0 + +ContainerName=app + +PublishPort=2080:80/tcp +PublishPort=2443:443/tcp + +AddHost=app.service.internal:host-gateway + +Volume=%h/data/containers/app:/home/app:rw + +Environment="ENV1=ENV1" + +Secret=ENV_NAME,type=env +Secret=app.file,target=/path/of/secret/file/name + +# podman run [options] [image] example --config exconfig +Exec=example --config exconfig + +# If you want to change Entrypoint itself, use +Entrypoint=sh -c 'command' + +# For Diun +Label=diun.enable=true +# For Diun to track repository new version +Label=diun.watch_repo=true +# For Diun, and it needs `diun.yml` configuration +Label=diun.regopt=container-source + + +[Install] +WantedBy=default.target +``` + +```ini +# app.pod +[Quadlet] +# Don't make a dependencies +DefaultDependencies=false +[Pod] +Name=app + +PublishPort=2080:80/tcp +``` +#### Create systemd `.service` file + +```bash +# linger has to be activated +mkdir -p ~/.config/containers/systemd + +ln -s ~/data/config/containers/app/app.container ~/.config/containers/systemd/app.container + +# This command makes ~/.config/systemd/user/my-app.service +systemctl --user daemon-reload +``` + +#### Enable and start service + +```bash +systemctl --user enable app.service +systemctl --user start app.service +``` + + +--- + +## Following goal + +### Health check + +```ini +# i.e. caddy +# Podman [Container] section + +[Container] +# Health check configuration +# Health check command +HealthcheckCommand=curl -f http://localhost/ || exit 1 +# Health check interval +HealthcheckInterval=30s +# the time to wait for health check +HealthcheckTimeout=5s +# the number to try to health check +HealthcheckRetries=3 +# the time to wait to start first health check +HealthcheckStartPeriod=15s + +# override.conf [Service] section +[Service] +# Restart, if it is not healthy +Restart=on-failure +``` \ No newline at end of file diff --git a/docs/archives/2025-12/03_common/03_04_crowdsec.md b/docs/archives/2025-12/03_common/03_04_crowdsec.md new file mode 100644 index 0000000..a6bef9b --- /dev/null +++ b/docs/archives/2025-12/03_common/03_04_crowdsec.md @@ -0,0 +1,321 @@ +Tags: #common, #configuration, #network, #security + +## CrowdSec + +CrowdSec is the free, open-source IPS(Intrusion Prevention System). It has a distributed architecture. When an agent detects malicious IPs from log, it reports the information to LAPI. This information is shared to CrowdSec's central server anonymously and is spread across the world. Additionally, bouncer receives a blacklist from LAPI and when they approach the server, the bouncer blocks them. + + +### Collection + +#### Parser + +It is how to organize the raw logs to the parsed log that scenario can understand. It is works on agent, and parsed log is transferred to LAPI to decide. +#### Scenario + +It is how to analyze the malicious attack from the parsed log from the Agent. The LAPI decides what to do for malicious attack, and transfers the result to Bouncer to block. + +### Agent + +Agent is the detector in each server. They analyze the logs. When they find malicious approaches, or abnormal and harmful actions, they report the information to LAPI(Local API). It analizes the log following `Parsers`. + +### LAPI + +LAPI server is a local central collector and reporter of malicious attack information. It decides what to apply for traffic following `Scenarios`. In this homelab it is located in OPNsense, because the center of gateway of home network is Firewall. When agent reports threats to the LAPI server, LAPI decides whether or not to block them, and reports to the central CrowdSec server. This information will be spread to all CrowdSec users in the world. + +### Bouncer + +When LAPI decides to block some IPs, they create a blacklist and give it to bouncer. Bouncer blocks and bans some IPs depending on LAPI's blacklist. `Caddy-auth (L7)` and `OPNsense (L4)` will be bouncer to ban. The most important thing is LAPI just decide what to ban, and Bouncer conducts ban practically. + +## CrowdSec in OPNsense + +OPNsense supports CrowdSec with community plugin. This is not a basic function so when you want to use it in OPNsense you should install the community plugin. + +### Installation + +- System:Firmware:Plugins + - \[\*\] Show community plugins + - `os-crowdsec` + +### General configuration + +Services:CrowdSec:Settings + - \[\*\] Enable Log Processor (IDS) + - \[\*\] Enable LAPI + - \[\*\] Enable Remediation Component (IPS) + - \[ \] Manual LAPI configuration + - LAPI listen address: \[opnsense IP: 192.168.10.1\] + - LAPI listen port: 8080 + - \[\*\] Create blocklist rules + - `Apply` + +> Enable Remediation Component (IPS) option means, Bouncer will be integrated with OPNsense's firewall rules + +> Set LAPI configuration manually, `Manual LAPI configuration` is needed. + +### Machines configuration + +#### OPNsense console + +```sh +# 8) shell +cscli machines add [server_name] -a -f - +# --- +# Machine 'server_name' successfully added to the local API. +# url: http://192.168.10.1:8080 +# login: [server_name] +# password: (API key) +# --- +``` + +#### Each server + +```ini +# /etc/crowdsec/local_api_credentials.yaml +url: http://192.168.10.1:8080 +login: [server_name] +password: (API key) +# /etc/crowdsec/acquis.d/sshd.yaml +--- +source: journalctl +journalctl_filter: + - "_SYSTEMD_UNIT=ssh.service" labels: + type: sshd + # origin value is syslog +--- + +``` + +```bash +sudo systemctl restart crowdsec +``` + +#### OPNsense web UI + +- Services:CrowdSec:Machines +- checking the lists of server +- Main CLI commands of CrowdSec + +```sh +# View active decisions(Ban list) +cscli decisions list + +# View alerts +cscli alerts list + +# Check connected machines(agents) +cscli machines list +``` + +--- +## TLS on crowdsec communication + +TLS can be applied in CrowdSec communication when internal PKI are set (BIND, Step-CA, ACME-Client in OPNsense). CrowdSec communication can contain sensitive information such as API key, it is recommended to set TLS. + +### General configuration + +- Services:CrowdSec:Settings + - \[\*\] Manual LAPI configuration + +### DNS setting + +### Add new domain in BIND + +Following [here](../06_network/06_03_net_bind.md). +- net server +- file: ~/data/containers/bind/lib/db.ilnmors.internal + +```text +... +crowdsec IN CNAME opnsense.ilnmors.internal. +... +``` + +### ACME setting + +#### Use opnsense certificates + +- Services:ACME Client:Certificates - Certificates + - Alt name: crowdsec.ilnmors.internal + +#### Certificate and key file + +- opnsense +- file: + - /var/etc/acme-client/cert-home/\[ramdom_string\]/opnsense.ilnmors.internal/ + - fullchain.cer + - opnsense.ilnmors.internal.key + > There is `opnsense.ilnmors.internal.cer` file. However, when client verify the certificate, it verify the intermediate CA's certificate and root CA's certificate both. Therefore, in this case, use fullchain.cer + - /usr/local/etc/ssl/cert.pem + +### Add TLS setting in LAPI configuration + +- opnsense +- file: + - /usr/local/etc/crowdsec/config.yaml + - /usr/local/etc/crowdsec/local_api_credentials.yaml + - /usr/local/etc/crowdsec/bouncer/crowdsec-firewall-bouncer.yaml + +```yaml +# config.yaml +api: + client: + # ...client configurations + server: + enable: true + listen_uri: 192.168.10.1:8080 # actual IP address is required. (Do not use FQDN in here, the service listener is binded on network interface) + # ... server configurations + tls: + cert_file: /var/etc/acme-client/cert-home/[random_string]/opnsense.ilnmors.internal/fullchain.cer + key_file: /var/etc/acme-client/cert-home/[ramdom_string]/opnsense.ilnmors.internal/opnsense.ilnmors.internal.key +# random string is generated by opnsense itself. In real environment, check it first. +``` + +```yaml +# local_api_credentials.yaml +... +url: https://crowdsec.ilnmors.internal:8080/ +``` + +```yaml +# crowdsec-firewall-bouncer.yaml +... +api_url: https://crowdsec.ilnmors.internal:8080/ +... +``` + +```sh +service crowdsec restart +``` + +#### CrowdSec LAPI restart setting + +- opnsense +- file: /usr/local/etc/cron.d/crowdsec + +```text +#minute hour mday month wday who command +0 3 * * * root /usr/local/libexec/crowdsec/upgrade-hub +30 3 * * * root /usr/sbin/service crowdsec reload # Add this line to reload every day. +``` + +### Each server configuration + +#### Server's certificate trust + +- each server +- file: /usr/local/share/ca-certificates/root_ca.crt + +```bash +sudo update-ca-certificates +``` + +#### CrowdSec Agent setting + +```ini +# /etc/crowdsec/local_api_credentials.yaml +url: https://crowdsec.ilnmors.internal:8080 +login: [server_name] +password: (API key) +``` + +```bash +sudo systemctl restart crowdsec +``` + +--- + +## Crowdsec in Caddy-auth + +Caddy supports bouncer. Also, it can be work as agent via auth server where Caddy-auth is located. + +### Bouncer configuration + +> Caddy has to contain local CA's root_ca.crt (Step-CA). - containerfile already includes root_ca.crt in container when it was built + +#### OPNsense console + +```sh +cscli bouncer add caddy-auth + +> API key for 'caddy-auth': + +> Secret_value + +> Please keep this key since you will not be able to retrieve it! +> +> +cscli collections install crowdsecurity/caddy +``` + +#### Caddyfile + +```ini +# ... +# Crowdsec bouncer setting +{ + crowdsec { + # CrowdSec LAPI + api_url https://crowdsec.ilnmors.internal:8080 + api_key "{env.CADDY_CROWDSEC_KEY}" + } +} +# ... +``` + +- `podman exec caddy-auth caddy reload --config /etc/caddy/Caddyfile` +### Agent configuration + +#### auth sv + +- File: + - /etc/crowdsec/acquis.yaml + - ~/data/container/caddy_auth/data/access.log + +```yaml +# /etc/crowdsec/acquis.d/caddy-auth.yaml +filenames: + - /var/log/caddy.log +labels: + type: caddy +``` + +```ini +# Caddyfile +# ... +(crowdsec_log) { + log { + output file /data/access.log { + roll_size 10mb + roll_keep 5 + } + } +} +# ... +caddy.ilnmors.com { + import crowdsec_log + route { + crowdsec + root * /usr/share/caddy + file_server + } +} +``` + +```bash +podman exec caddy-auth caddy reload --config /etc/caddy/Caddyfile +sudo mkdir /etc/crowdsec/acquis.d +sudo nano /etc/crowdsec/acquis.d/caddy-auth.yaml +ln -s /home/auth/data/containers/caddy-auth/data/access.log /var/log/caddy.log +# install collection(senario + parser) crowdsecurity/caddy +sudo cscli collections install crowdsecurity/caddy +sudo systemctl restart crowdsec +sudo cscli metrics +``` + +--- +## PLAN + +- distributed bouncer + - [x] Caddy bouncer + +- [ ] dash board \ No newline at end of file diff --git a/docs/archives/2025-12/03_common/03_05_redis.md b/docs/archives/2025-12/03_common/03_05_redis.md new file mode 100644 index 0000000..a12ff83 --- /dev/null +++ b/docs/archives/2025-12/03_common/03_05_redis.md @@ -0,0 +1,107 @@ +Tags: #os, #configuration, #virtualization, #container, #database, #cache + +## Redis + +Redis is the cache database. It doesn't use SQL, it is just key-value data storage. + +### Redis in Pod + +Redis is combined with master services in the pod. It allows master services and Redis can communication in localhost. It doesn't need TLS, password, or even ACL itself. + +There is the list of services which use Redis in Pod. +#### Preperation + +##### Create directory for container + +```bash +mkdir -p ~/data/containers/app/redis +setfacl -m d:g::0 ~/data/containers/app/redis +setfacl -m d:o::0 ~/data/containers/app/redis +setfacl -m u: +$UID:rwx ~/data/containers/app/redis +setfacl -m u:100998:rwx ~/data/containers/app/redis +setfacl -d -m u:$UID:rwx ~/data/containers/app/redis +setfacl -d -m u:100998:rwx ~/data/containers/app/redis +``` + +>Redis container executes as 999:999(redis:redis) permission in container. It is mapped host's 100998. Therefore, directories have to have ACL via `setfacl` + +### Podman Image + +```bash +podman pull redis:8.2.2 # Do not use latest version to management +``` + +### Quadlet + +- File: + - ~/data/config/containers/app/app.pod + - ~/data/config/containers/app/app-redis.container + - ~/data/config/containers/app/app.container + +```ini +# ~/data/config/containers/app/app.pod +[Quadlet] +DefaultDependencies=false + +[Pod] +PodName=app + +# web port +PublishPort=9080:9000/tcp +# LDAP port +#PublishPort=[set_port]:3389 +# Prometheus Port +#PublishPort=[set_port]:9300 +``` + +```ini +# ~/data/config/containers/app/app-redis.container + +[Quadlet] +DefaultDependencies=false + +[Unit] +Description=app - redis +Before=app.service + +[Container] +Pod=app.pod + +Image=redis:8.2.2 + +ContainerName=app-redis + +# Port 6379 + +Volume=%h/data/containers/app/redis:/data:rw + +Environment="TZ=Asia/Seoul" + +[Install] +WantedBy=default.target +``` + +```ini +# ~/data/config/containers/app/app.container +[Quadlet] +DefaultDependencies=false + +[Unit] +Description=app + +After=app-redis.service +Requires=app-redis.service + +[Container] +Pod=app.pod + +Image=app:version + +Volume=%h/data/containers/app/app:/data:rw + +Environment="TZ=Asia/Seoul" + +[Install] +WantedBy=default.target +``` \ No newline at end of file diff --git a/docs/archives/2025-12/03_common/03_06_btrfs.md b/docs/archives/2025-12/03_common/03_06_btrfs.md new file mode 100644 index 0000000..313eceb --- /dev/null +++ b/docs/archives/2025-12/03_common/03_06_btrfs.md @@ -0,0 +1,130 @@ +Tags: #common, #configuration, #os, #filesystem + +## BTRFS usage + +### Setting + +```bash +# All hdd needs partition, but it has no filesystem. To make a partition use `fdisk`. +sudo fdisk "$DIVICE_PATH" +> n # create the new parition +> 1 # Partition number +> Default # First Sector +> Default # Last Sectort +> w # write the new partition + +# check btrfs-progs package +sudo apt list --installed | grep btrfs-progs +# btrfs-progs/stable,now 6.14-1 amd64 [installed] + + +sudo mkfs.btrfs -d raid10 -m raid10 -L hdd /dev/sda1 /dev/sdb1 /dev/sdc1 /dev/sdd1 +# These are partition device files +``` + +### Snapshot + +Usually, the Read-Only snapshot is used. + +```bash +# Create snapshot_subvolume +sudo btrfs subvolume create /home/app/hdd/data +# Create snapshot_directory +mkdir /home/app/hdd/.snapshot +# Create snapshot +sudo btrfs subvolume snapshot -r /home/app/hdd/data /home/app/hdd/.snapshot/data_[date] + +# Rollback (file) +cp /home/app/hdd/.snapshot/data_[date]/file /home/app/hdd/data/ + +# Roleback (volume) +# Current subvolume move +mv /home/app/hdd/data /home/app/hdd/data_fail +sudo btrfs subvolume snapshot /home/app/hdd/.snapshot/data_[date] /home/app/hdd/data +# If the data successfully recovered +sudo btrfs subvolume delete /home/app/hdd/data_fail +``` + +### replace HDD to new system + +btrfs has its own volume manangement data and filesystem in hdd, as metadata. It has no dependency on specific OS or hardware but it can work on every linux system which supports btrfs. + +```bash +# unmount filesystem +sudo umount /home/app/hdd + +# turn off the system, and remove all hdd +# add all disk to new hardware(server), and turn on + +# scan btrfs +sudo btrfs device scan +sudo btrfs filesystem show + +# mount +sudo nano /etc/fstab +# LABEL=hdd /home/app/hdd btrfs defaults,compress=zstd,autodefrag 0 0 +sudo mount -a +``` + +### add extra HDD + +```bash +# Add HDD and check the device file +lsblk + +# Add hdd to btrfs RAID +sudo btrfs device add /dev/xxx /dev/xxy /home/app/hdd + +# Expand the volume +sudo btrfs balance start /home/app/hdd +``` + +### change HDD + +#### btrfs replace + +When sata slot is enough to connect new HDD and old HDD, you can use this way. + +```bash +# check devid of old HDD +sudo btrfs device stats /home/app/hdd # you can check IO error +sudo btrfs filesystem show /home/app/hdd + +# check the new disk's path +lsblk +# /dev/xxx + +# Replace +sudo btrfs replace start [old HDD\'s devid] /dev/xxx /home/app/hdd + +# check +sudo btrfs replace status /home/app/hdd +``` + +#### btrfs device add and delete + +When sata slot is not enough to connect new HDD and old HDD simultaneously, you can use this way. + +```bash +# Check the HDD which will change +sudo btrfs device stats /home/app/hdd # you can check IO error +sudo btrfs filesystem show /home/app/hdd # check disk's devid + +# turn off the system and change the broken HDD to the new HDD +# Make sure the hardware supports Hot-Swap, if it didn't support, you would have to turn off your system fully when you are changing the HDD + +# if the system couldn't mount automatically, mount it manually +# sudo mount -o degraded /dev/xxx /home/app/hdd # /dev/xxx is one of ordinary HDD + +lsblk +# /dev/xxy # new HDD device file path + +# Add new HDD +sudo btrfs device add /dev/xxy /home/app/hdd + +# Delete broken HDD +sudo btrfs device delete missing /home/app/hdd + +# To major balance +sudo btrfs balance start /home/app/hdd +``` \ No newline at end of file diff --git a/docs/archives/2025-12/04_hypervisor/04_01_hypervisor.md b/docs/archives/2025-12/04_hypervisor/04_01_hypervisor.md new file mode 100644 index 0000000..3a4e2ef --- /dev/null +++ b/docs/archives/2025-12/04_hypervisor/04_01_hypervisor.md @@ -0,0 +1,277 @@ +Tags: #os, #hypervisor, #configuration, #virtualization + +## Hypervisor configuration + +### Debian installation and configuration + +- Following [debian configuration](../03_common/03_01_debian_configuration). +- Following [iptables configuration](../03_common/03_02_iptables.md). +- Following [crowdsec](../03_common/03_04_crowdsec.md) (After OPNsense LAPI configuration). + +### QEMU/KVM + +#### KVM + +KVM is a virtualization engine to share and to allocate physical resource to VM. It works on Linux kernel. + +#### QEMU + +QEMU is an emulator. It works with KVM frequently, and it emulates hardware or other OS on hypervisor. + +### Libvirtd + +Libvirtd is a daemon to use libvirt API and command to manage virtualization engines and emulators including QEMU/KVM. It supports not only QEMU/KVM but also XEN, VMWare or etc. + +#### Libvirtd configuration + +##### LIBVIRT_DEFAULT_URI setting + +Basically, KVM and libvirtd require root permission to access low-level device and configuration to implement virtual machines. However, it is possible to use libvirtd command without root permission; `sudo`. When the common user has its group as `kvm`, and `libvirt`, and export environment variable `export LIBVIRT_DEFAULT_URI='qemu:///system'` on `~/.bashrc`. + +```bash +# ~/.bashrc +# If the file doesn't exist, execute the commands below +# sudo cp /etc/skel/.bashrc /home/vmm +# sudo cp /etc/skel/.profile /home/vmm +# sudo chown vmm:svadmins .bashrc .profile + +# add the line below in .bashrc +echo "export LIBVIRT_DEFAULT_URI='qemu:///system'" >> ~/.bashrc +source ~/.bashrc +``` +##### Directory + +- ~/data/config/{scripts,server,services,vms} +- ~/data/config/vms/{networks,storages} + +#### virsh + +##### VM management + +- virsh list \[--all\]: Print vm list +- virsh start \[vm_name\]: Start vm +- virsh shutdown \[vm_name\]: Send ACPI signal to vm (shutdown) +- virsh destroy \[vm_name\]: Stop vm forcefully +- virsh reboot \[vm_name\]: Reboot vm +- virsh autostart \[--disable\]: Register vm autostart +- qemu-img create -f \[format\] /path \[volume\]: Create the virtual disk file + +##### VM configuration + +- virsh edit \[vm_name\]: Open vm template file (xml) and edit +- virsh dumpxml \[vm_name\] > \[file\].xml: Backup vm template file +- virsh define \[file\].xml: Create vm from backup file +- virsh undefine \[vm_name\] \[--nvram\]: Remove vm +> When vm is set as uefi, `--nvram` option is required. +- virsh domrename \[old_vm_name\] \[new_vm_name\]: Rename vm + +##### VM access + +- virsh console \[vm_name\]: Access vm via serial console + +##### Virtual network + +- virsh net-define \[file\].xml: Create virtual network from xml file +- virsh net-start \[network_name\]: Start virtual network +- virsh net-autostart \[network_name\]: Register virtual network auto start +##### Snapshot + +- virsh snapshot-create-as --domain \[vm_name\] --name \[snapshot_name\] --description \[description\] --disk-only --atomic (for qcow2 format) +- virsh snapshot-list \[vm_name\] + +##### VM pool management + +- virsh pool-define ~/data/config/vms/storages/vm-images.xml +- virsh pool-start vm-images +- virsh pool-autostart vm-images +- virsh pool-refresh vm-images - After put the images in the pool +- virsh vol-list vm-images +#### virt-install + +virt-install command helps to define vm template file with various options. + +```bash +virt-install \ +[--import] \ # without booting images, boot with disk. [--cdrom|--location] is not available +--boot uefi \ # activate secure booting +--name vm_name \ +--os-variant [os] \ # set optimized setting for each OS +--vcpu [num] \ +--memory [num] \ # memory size(unit: MiB) +--location [path] \ # or cdrom, add booting images, location is for serial booting +# --disk path=[path],format=[raw|qcow2],discard=unmap \ # discard=unmap: ssd emulation +# --disk vol=vm-images/my-existing-disk.qcow2 \ # To use pool's volume +--disk pool=vms-images,size=[num],format=qcow2,discard=unmap \ # Create qcow2 image with pool configuration +--network network=[network_name],model=virtio,mac=[mac_address] \ +--graphics none \ +--console pty,target_type=serial \ +--extra-args "console=ttyS0,115200" # Define the console configuration +``` + +#### virsh network configuration + +Libvirt's network is defined and saved as a XML. There is setting below. It can be compatible with host's `open vswitch` interfaces. + +- Make xml files + - ~/data/config/vms/networks/ovs-wan-net.xml + - ~/data/config/vms/networks/ovs-lan-net.xml + - `chmod 600 ~/data/config/vms/networks/*` + +```xml + + + + ovs-wan-net + + + + +``` + +```xml + + + + ovs-lan-net + + + + + + + + + + + + + + + +``` + +- Define XML files + +```bash +virsh net-define ~/data/config/vms/networks/ovs-wan-net.xml +virsh net-define ~/data/config/vms/networks/ovs-lan-net.xml +virsh net-start ovs-wan-net +virsh net-start ovs-lan-net +virsh net-autostart ovs-wan-net +virsh net-autostart ovs-lan-net +``` + +#### virsh storage pool configuration + +- Make a storage pool file + - ~/data/config/vms/storages/vm-images.xml + - `chmod 600 ~/data/config/vms/storages/*` + +```xml + + + + vm-images + + /var/lib/libvirt/images + + +``` + +- Define XML files + +```bash +virsh pool-define ~/data/config/vms/storages/vm-images.xml +virsh pool-start vm-images +virsh pool-autostart vm-images +``` + +#### ACL setting + +```bash +# To manage qcow2 file without non-root user +sudo setfacl -R -m u:vmm:rwx /var/lib/libvirt/images +sudo setfacl -d -m u:vmm:rwx /var/lib/libvirt/images +``` + +### Backup configuration + +```bash +cp /etc/network/interfaces /etc/default/grub /etc/modprobe.d/vfio.conf ~/data/config/server + +sudo cp /etc/iptables/rules.v4 ~/data/config/server +``` + +### Systemd + +#### Linger configuration + +```bash +# Check linger configuration +loginctl show-user vmm +# Linger=yes +# It is necessary to use the services after session is done +``` + +- opnsense.service + +```ini +# ~/data/config/services/opnsense.service +# ~/.config/systemd/user/opnsense.service +[Unit] +Description=opnsense Auto Booting +After=network-online.target +Wants=network-online.target + +[Service] +Type=oneshot + +# Maintain status as active +RemainAfterExit=yes + +# Run the service +ExecStart=/usr/bin/virsh -c qemu:///system start opnsense + +# Stop the service +ExecStop=/usr/bin/virsh -c qemu:///system shutdown opnsense + +[Install] +WantedBy=default.target +``` + +```bash +mkdir -p ~/.config/systemd/user +chmod -R 700 ~/.config +ln -s ~/data/config/services/opnsense.service ~/.config/systemd/user/opnsense.service + +systemctl --user daemon-reload +``` + +- net.service + +```ini +# ~/data/config/services/net.service +# ~/.config/systemd/user/net.service +[Unit] +Description=net Auto Booting +After=opnsense.service +Requires=opnsense.service + +[Service] +Type=oneshot + +# Maintain status as active +RemainAfterExit=yes + +# 8080 is CrowdSec API +ExecStartPre=%h/data/config/scripts/wait-for-it.sh -h 192.168.10.1 -p 8080 -t 20 + +# Run the service +ExecStart=/usr/bin/virsh -c qemu:///system start net + +# Stop the service +ExecStop=/usr/bin/virsh -c qemu:///system shutdown net + +[Install] +WantedBy=default.target +``` diff --git a/docs/archives/2025-12/05_firewall/05_01_opnsense_vm.md b/docs/archives/2025-12/05_firewall/05_01_opnsense_vm.md new file mode 100644 index 0000000..6d3d0a8 --- /dev/null +++ b/docs/archives/2025-12/05_firewall/05_01_opnsense_vm.md @@ -0,0 +1,130 @@ +Tags: #os, #firewall, #configuration, #network, #virtualization + + +## Preparation + +### Create VM template + +- ~/data/config/scripts/opnsense.sh + +```bash +virt-install \ +--import \ # Start without CD-ROM or location - This is for Serial installation. +--boot uefi \ # Supports secure booting +--name opnsense \ # VM name +--os-variant freebsd14.2 \ # Choose kind of OS +--vcpus 2 \ # Set number of vcpu (It means core of pcpu) +--memory 4096 \ # Set the memory volume (Default: MiB) +--disk path=/var/lib/libvirt/images/OPNs +ense-25.7-serial-amd64.img,format=raw \ # Installation image file, format raw +--disk pool=vm-images,size=72,format=qcow2,discard=unmap \ # The target disk to install opnsense, it is on SSD; discard=unmap option will emulate SSD. +--network network=ovs-wan-net,model=virtio,mac=0A:49:6E:4D:00:00 \ # Use pre-designated network and specific MAC address +--network network=ovs-lan-net,portgroup=vlan-trunk,model=virtio,mac=0A:49:6E:4D:00:01 \ +--graphics none \ # Don't use any graphic +--console pty,target_type=serial # Use console serial +# After enter this command, then the console start automatically +# Remove all annotation before you make the sh file. +``` + +### OPNsense installation + +```sh +# Interface setting +Press any key to start the manual interface assignment: [enter] +LAGGs: N +VLANs: N +Enter the WAN interface name or 'a' for auto-detection: vtnet0 +Enter the LAN interface name or 'a' for auto-detection: vtnet1 +Enter the Optional interface 1 name or 'a' for auto-detection: [blank] +Do you want to proceed? : Y + +# Login as an installer account +login: installer +password: opnsense + +# installing +Continue with default keymap +Install (ZFS) +stripe +[*] vtbd1: yes +Complete Install: Halt Now +``` + +### Modify VM template + +- virsh edit + +```bash +virsh edit opnsense +``` + +- Edit template file + +```xml + +... + + + 2048 + + + + +... + + + + + +``` + +- Save template file + +```bash +virsh dumpxml opnsense > ~/data/config/vms/dumps/opnsense.xml +``` + +- opnsense.service + +```ini +# ~/data/config/services/opnsense.service +# ~/.config/systemd/user/opnsense.service +[Unit] +Description=opnsense Auto Booting +After=network-online.target +Wants=network-online.target + +[Service] +Type=oneshot + +# Maintain status as active +RemainAfterExit=yes + +# Run the service +ExecStart=/usr/bin/virsh -c qemu:///system start opnsense + +# Stop the service +ExecStop=/usr/bin/virsh -c qemu:///system shutdown opnsense + +[Install] +WantedBy=default.target +``` + +```bash +mkdir -p ~/.config/systemd/user + +chmod -R 700 ~/.config + +ln -s ~/data/config/services/opnsense.service ~/.config/systemd/user/opnsense.service + +systemctl --user daemon-reload +systemctl --user enable opnsense.service +systemctl --user start opnsense.service +``` \ No newline at end of file diff --git a/docs/archives/2025-12/05_firewall/05_02_opnsense_general.md b/docs/archives/2025-12/05_firewall/05_02_opnsense_general.md new file mode 100644 index 0000000..0a6a3b8 --- /dev/null +++ b/docs/archives/2025-12/05_firewall/05_02_opnsense_general.md @@ -0,0 +1,91 @@ +Tags: #os, #firewall, #configuration, #network + +## Web UI + +### Access + +- Browser: https://192.168.1.1 +- Login with root + +### Wizard setting + +- Hostname: opnsense +- Domain: ilnmors.internal +- Timezone: Asia/Seoul + > - System:Settings:General - Time Zone +- DNS Server: 1.1.1.2 + > Temporary DNS server, when Adguard home and BIND is building, the change it. + > - System:Setting:General - DNS server +- \[ \] Override DNS +- \[ \] Enable Resolver + +- WAN + - type dhcp + - \[\*\]Block RFC1918 Private Networks + - \[\*\]Block bogon networks +- LAN + - \[ \] Configure DHCP server +- Change root password + +### User setting + +- System:Access:Users - \[+\] + - Name: opnsense + - Full name: opnsense management account + - E-mail: opnsense@ilnmors.internal + - Group: admins + +> When console access is needed, root account is necessary. Only root account can access console program. However, when using WebUI it is better to use separated account for security. + +### system update and pkg + +#### Updates + +- System:Firmware:Updates + +#### Plugins + +- System:Firmware:Plugins +- \[\*\] Community plugins + - os-crowdsec + - os-acme-client + - os-qemu-guest-agent + >after install and restart plugin check with CLI `virsh qemu-agent-command opnsense '{"execute":"guest-get-osinfo"}'` on hypervisor + - os-telegraf + - os-sftp-backup + + +## Disable unused basic function + +In this homelab project, Kea DHCP and AdGuard home, BIND will be used as DHCP server and DNS server. Therefore, it is necessary to disable Dnsmasq, ISC DHCP and Unbound which are used as basic function of DHCP and DNS in OPNsense. + +- Services:Dnsmasq DNS&DHCP:General + - \[ \] Enable +- `Apply` +- Services:ISC DHCPv4 + - \[ \] Enable +- `Apply` +- Services:ISC DHCPv6 + - \[ \] Enable +- `Apply` +- Services:Unbound DNS:General + - \[ \] Enable +- `Apply` +## Backup + +### ZFS Snapshot + +Before major updating, it is important to make a ZFS snapshot. + +- System:Snapshots - \[+\] + - Name: \[Date\] + - `Save` + +### Setting backup + +OPNsense is managed with configuration file. You can download the configuration file as XML format. + +- System:Configuration:Backups + - Download: `Download configuration` + - Restore: Select file > `Restore configuration` + - sftp: URL / SSH private key \ No newline at end of file diff --git a/docs/archives/2025-12/05_firewall/05_03_opnsense_interface.md b/docs/archives/2025-12/05_firewall/05_03_opnsense_interface.md new file mode 100644 index 0000000..f6e73c5 --- /dev/null +++ b/docs/archives/2025-12/05_firewall/05_03_opnsense_interface.md @@ -0,0 +1,159 @@ +Tags: #os, #firewall, #configuration, #network + +## Interface configuration + +### VLAN setting + +- Interfaces:Devices:VLAN - \[+\] + +| Device | Parent | Tag | Description | +| :--------: | :----: | :-: | :---------: | +| vlan0.1.10 | vtnet1 | 10 | Server | + +> The device name is reserved in Web UI; Name of VLAN device must start with `vlan0` or `qinq0`. In this project, name of vlan device would be `vlan0.[interface_num].[tag_num]` + +- `Apply` + +### VPN setting + +- VPN:WireGuard + - \[\*\] Enable WireGuard +- VPN:WireGuard:Instance - \[+\] + +| Name | Listen Port | Tunnel Address | +| :-------: | :---------: | :------------: | +| WG_SERVER | 11290 | 10.10.10.1/24 | +| WG_USER | 11291 | 10.10.1.1/24 | + +- VPN:WireGuard:PeerGenerate + +| Instance | End point | Name | Address | Pre-shared Key | Allowed IP | Keepalive interval | DNS | +| :-------: | :---------------: | :-----: | :-----------: | :------------: | :------------: | :----------------: | :-----------: | +| WG_SERVER | ilnmors.com:11290 | console | 10.10.10.2/32 | Generate | 192.168.0.0/16 | 25 | 192.168.10.11 | +| WG_SERVER | ilnmors.com:11290 | phone | 10.10.10.3/32 | Generate | 192.168.0.0/16 | 25 | 192.168.10.11 | +| WG_SERVER | ilnmors.com:11290 | spare | 10.10.10.4/32 | Generate | 192.168.0.0/16 | 25 | 192.168.10.11 | + +> Set `PersistentKeepalive = 25` on peer, to avoid NAT timeout. + +> Press `Store and generate next` button after generate each row. + +> Before building net server (Private DNS), use 1.1.1.2 instead of 192.168.10.11. + + +```ini +# console +[Interface] +PrivateKey = 2ACJCZV7Zg4fxCHTxfbmAggX/x06Nt05CC6gJvcrokI= +Address = 10.10.10.2/32 +DNS = 192.168.10.11 + +[Peer] +PublicKey = jqDJKe8pZSK8GXwBnrjBJiflYvJDB7GfgogLsSSdnA4= +PresharedKey = OCx3mohOp2Uipxda7ZJs+78Mjh3Lbf0UfvGZB4SULk4= +Endpoint = ilnmors.com:11290 +AllowedIPs = 192.168.0.0/16 +PersistentKeepalive = 25 + +# phone +[Interface] +PrivateKey = oIhpQn7yTEU3wH+eOVJuzcgo4t05MBqv+OmD1KC/Z38= +Address = 10.10.10.3/32 +DNS = 192.168.10.11 + +[Peer] +PublicKey = jqDJKe8pZSK8GXwBnrjBJiflYvJDB7GfgogLsSSdnA4= +PresharedKey = pqA5OtJy7lZHD+PzzqVEYa/iMHOOCNagAzCfTLoaDmw= +Endpoint = ilnmors.com:11290 +AllowedIPs = 192.168.0.0/16 +PersistentKeepalive = 25 + +# spare +[Interface] +PrivateKey = EIHZRuI5IDG3h8mC3ez4I1duYuVbe5UwgWnVxbg9uFk= +Address = 10.10.10.4/32 +DNS = 192.168.10.11 + +[Peer] +PublicKey = jqDJKe8pZSK8GXwBnrjBJiflYvJDB7GfgogLsSSdnA4= +PresharedKey = Xuf/G0HJ2S/zO0C4zUjjW2rD1b51yUYNhZaByEgfM6I= +Endpoint = ilnmors.com:11290 +AllowedIPs = 192.168.0.0/16 +PersistentKeepalive = 25 +``` + +- `Apply` + +### Interface assignment + +- Interface:Assignments + - vlan0.1.10 - VLAN10 - `Add` + - wg0 - WG_SERVER - `Add` + - wg1 - WG_USER - `Add` +- `Save` + +### Assigned interface setting + +- Interface:WAN + - \[\*\] Enable + - \[\*\] Block private + - \[\*\] Block bogon + - \[\*\] IPv4 DHCP +- `Save` +- Interface:LAN + - \[\*\] Enable + - IPv4 Static (IPv6 Configuration Type: None) + - 192.168.1.1/24 +- `Save` +- Interface:VLAN10 + - \[\*\] Enable + - IPv4 Static + - 192.168.10.1/24 +- Interface:WG_SERVER + - \[\*\] Enable +- `Save` +- Interface:WG_USER + - \[\*\] Enable +- `Save` +- `Apply change` + +### Web UI itself settings + +- System:Settings:Administration + - Web GUI + - Listen Interfaces: LAN, VLAN10 +- `Save` +- Firewall:Settings:Advanced + - \[\*\] Disable anti-lockout +- `Save` + +## Aliases and group configuration + +### Aliases setting + +- Firewall:Aliases - \[+\] + +| Name | Type | Content | Description | +| :------------: | :-----: | :----------------------------------------------------------------------------------------------------: | :-----------------------------: | +| ports_vpn | port(s) | 11290,11291 | ports udp vpn | +| ports_web | port(s) | 80,443 | ports tcp web | +| ports_dhcp4 | port(s) | 67,68 | ports udp dhcp4 | +| ports_dns | port(s) | 53,443 | ports tcp/udp dns including DoH | +| ports_crowdsec | port(s) | 8080 | ports tcp crowdsec | +| hosts_console | host(s) | 192.168.1.11,10.10.10.2,10.10.10.3,10.10.10.4 | hosts console | +| hosts_server | host(s) | 192.168.10.10,192.168.10.11,192.168.10.12,192.168.10.13,192.168.10.14,10.10.10.2,10.10.10.3,10.10.10.4 | hosts servers | +| hosts_net | host(s) | 192.168.10.11 | hosts net | +| hosts_auth | host(s) | 192.168.10.12 | hosts auth | + + +- `Apply` + +### Group setting + +- Firewall:Groups - \[+\] + +| Name | Members | Description | +| :----: | :---------------: | :----------: | +| SERVER | VLAN10, WG_SERVER | Server group | +| USER | LAN, WG_USER | User group | + +- `Apply` \ No newline at end of file diff --git a/docs/archives/2025-12/05_firewall/05_04_opnsense_rules.md b/docs/archives/2025-12/05_firewall/05_04_opnsense_rules.md new file mode 100644 index 0000000..5e06bf8 --- /dev/null +++ b/docs/archives/2025-12/05_firewall/05_04_opnsense_rules.md @@ -0,0 +1,79 @@ +Tags: #os, #firewall, #configuration, #network, #security + +## OPNsense rules + +### NAT + +- Firewall:NAT:Outbound + - Mode: Automatic outbound NAT rule generation (no manual rules can be used) +- Firewall:NAT:Port Forward - \[+\] + +| Interface | TCP/IP version | Proto | Destination | Destination port | Redirect target IP | Redirect target port | Description | +| :-------: | :------------: | :---: | :---------: | :--------------: | :----------------: | :------------------: | :-----------------------------------------------: | +| WAN | IPv4 | TCP | WAN address | ports_web | hosts_auth | ports_web | allow wan clients to access web services from WAN | + +> Set after building main reverse proxy, all web packets from WAN are going to the reverse proxy. + +> Filter rule association's `Add associated filter rule` option will automatically generate the `Pass` rule on WAN interface for Port Forwarding rule. + +### Firewall rules + +OPNsense has prioirty in order to rule's squence. The upper rule is prior than below one. Moreover, when `Quick` option is enabled, OPNsense doesn't check the rule below. It means OPNsense just applies the rule what it is first matched (First match). Reversely, when `Quick` option is disabled, OPNsense checks all rules to apply the packet (Last match). + +#### WAN interface + +- Firewall:Rules:WAN - \[+\] + +| Action | Quick | Interface | Direction | TCP/IP version | Proto | Source | Destination | Destination port | Description | +| :----: | :---: | :-------: | :-------: | :------------: | :---: | :----: | :---------: | :--------------: | :------------------------------------------------------------------------: | +| Pass | * | WAN | IN | IPv4 | UDP | * | WAN address | ports_vpn | allow vpn clients from WAN | +| Pass | * | WAN | IN | IPv4 | TCP | * | WAN address | ports_web | allow wan clients to access web services from WAN(Automatically generated) | + +- `Apply changes` + +#### SERVER interface + +- Firewall:Rules:SERVER - \[+\] + +| Action | Quick | Interface | Direction | TCP/IP version | Proto | Srouce | Destination | Destination port | Description | +| :----: | :---: | :-------: | :-------: | :------------: | :---: | :-----------: | :-----------: | :--------------: | :--------------------------------------------------------------------: | +| Pass | * | SERVER | IN | IPv4 | UDP | * | * | ports_dhcpv4 | allow server clients to access DHCPv4 server from SERVER net | +| Pass | * | SERVER | IN | IPv4 | ICMP | SERVER net | This Firewall | - | allow server clients to access This Firewall with ICMP from SERVER net | +| Block | * | SERVER | IN | IPv4 | * | !hosts_server | * | * | block undesignated server clients to access any from SERVER net | +| Pass | * | SERVER | IN | IPv4 | UDP | SERVER net | This Firewall | NTP | allow server clients to access NTP from SERVER net | +| Pass | * | SERVER | IN | IPv4 | TCP | SERVER net | This Firewall | ports_crowdsec | allow server clients to access CrowdSec LAPI from SERVER net | +| Pass | * | SERVER | IN | IPv4 | TCP | hosts_console | This Firewall | ports_web | allow console to access This Firewall web GUI from SERVER net | +| Pass | * | SERVER | IN | IPv4 | * | hosts_console | USER net | * | allow console to access USER net from SERVER net | +| Block | * | SERVER | IN | IPv4 | TCP | SERVER net | This Firewall | * | block server clients to access This Firewall from SERVER net | +| Block | * | SERVER | IN | IPv4 | * | SERVER net | USER net | * | block server clients to access USER net from SERVER net | +| Pass | - | SERVER | IN | IPv4 | * | SERVER net | * | * | allow server clients to access WAN from SERVER net | + +- `Apply changes` + +> When you add the new server client, you must edit alias `hosts_server` + +#### USER interface + +- Firewall:Rules:USER - \[+\] + +| Action | Quick | Interface | Direction | TCP/IP version | Proto | Srouce | Destination | Destination port | Description | +| :----: | :---: | :-------: | :-------: | :------------: | :-----: | :-----------: | :-----------: | :--------------: | :----------------------------------------------------------------: | +| Pass | * | USER | IN | IPv4 | UDP | * | * | ports_dhcpv4 | allow user clients to access DHCPv4 server from USER net | +| Pass | * | USER | IN | IPv4 | ICMP | USER net | This Firewall | - | allow user clients to access This Firewall with ICMP from USER net | +| Pass | * | USER | IN | IPv4 | TCP/UDP | USER net | hosts_net | ports_dns | allow user clients to access DNS server from USER net | +| Pass | * | USER | IN | IPv4 | TCP | USER net | hosts_auth | ports_web | allow user clients to access reverse proxy from USER net | +| Pass | * | USER | IN | IPv4 | TCP | hosts_console | This Firewall | ports_web | allow console to access This Firewall web GUI from USER net | +| Pass | * | USER | IN | IPv4 | * | hosts_console | SERVER net | * | allow console to access SERVER net from USER net | +| Block | * | USER | IN | IPv4 | * | USER net | This Firewall | * | block user clients to access This Firewall from USER net | +| Block | * | USER | IN | IPv4 | * | USER net | SERVER net | * | block user clients to access SERVER net from USER net | +| Pass | - | USER | IN | IPv4 | * | USER net | * | * | allow user clients to access WAN from USER net | + +- `Apply changes` + +#### LAN interface + +OPNsense automatically generates all pass rules on LAN interface. The USER group will be used instead of LAN interface, all rules will be disabled after USER interface rules set. + +- Firewall:Rules:LAN + - Disable all auto generated rules +- `Apply changes` \ No newline at end of file diff --git a/docs/archives/2025-12/05_firewall/05_05_opnsense_suricata.md b/docs/archives/2025-12/05_firewall/05_05_opnsense_suricata.md new file mode 100644 index 0000000..7358d08 --- /dev/null +++ b/docs/archives/2025-12/05_firewall/05_05_opnsense_suricata.md @@ -0,0 +1,69 @@ +sTags: #os, #firewall, #configuration, #network, #security + +## Suricata + +Suricata is IPS(Intrusion Prevent System)/IDS(Intrusion Detection System) to supersede snort in 2010 from OISF for NSM(Network Security Monitoring). This program supports multi-thread and check the packet and even the programs (such as exe or shell script) in it based on rules. When the suricata find the packet matched with rule it has, it decides `pass; with alert`, `block` the packet. + +### General setting in OPNsense + +#### Enable Suricata + +- Services:Intrusion Detection:Administration - Settings + - \[\*\] Enabled + - \[\*\] IPS mode + - \[\*\] Promiscuous mode (Scan all traffic even L2 traffics) + - Interfaces: Select ALL + - Pattern matcher: Hyperscan (Intel's opensource regex matching library) + +#### Rule set download and update + +- Services:Intrusion Detection:Administration - Download + +- Select the rule set below + - ET open/botcc: List of bot net C&C server + - ET open/compromised: List of known zombie PC + - ET open/drop: List of certain malicious traffic + - ET open/dshield: List of current activated malicious IP + - ET open/emerging-attack_response: List of response after attack success + - ET open/emerging-coinminer: List of coinminer malicious code + - ET open/emerging-current_event: List of latest attack pattern + - ET open/emerging-dns: List of malicious DNS query + - ET open/emerging-exploit: List of attack towards software vulnerability + - ET open/emerging-exploit_kit: List of automatic hacking tool + - ET open/emerging-ja3: List of malicious programs'(tools, or browsers) finger print + - ET open/emerging-malware: List of malware C&C server + - ET open/emerging-mobile_malware: List of mobile malware C&C server + - ET open/emerging-phishing: List of communication regard phishing + - ET open/emerging-policy: List of non-standard or malicious TLS version or http traffics + - ET open/emerging-scan: List of port scan or network scan traffic + - ET open/emerging-shellcode: List of malicious attack shell script + - ET open/emerging-sql: List of sql injection traffic + - ET open/emerging-user_agent: List of malicious bot user-agent + - ET open/emerging-web_client: List of attack towards web browser + - ET open/emerging-web_server: List of attack towards web server + - ET open/emerging-web_specific_apps: List of attack towards specific web application(like word press) + - ET open/emerging-CS_c2: List of Cobalt Strike hacking tool +- `Enable selected` +- `Download & Update Rules` + +> When you want to delete rule set, then select target rule and click `Disable selected` and `Download & Update Rules` + +- Services:Intrusion Detection:Schedule + - \[\*\] Enable update + +#### Policy of rule set + +Suricata rule set in OPNsense has basically `Alert` policy. Therefore, it is necessary to set rules as `Drop` manually. Except `ET open/emerging-ja3` and `ET open/emerging-policy`, it is good for security to drop all rule set. The reason why `ja3` and `policy` remain as `Alert` is they are not extremely dangerous, and they could drop normal packets frequently. + +- Services:Intrusion Detection:Policy - Policies - \[+\] + - Ruleset: All ruleset except `ja3` and `policy` + - Action: Alert, Drop + - New action: Drop + - `Save` +- `Apply` + +### Check the log + +- Services:Intrusion Detection:Administration - Alerts + +If suricata made the wrong action like block the normal packets or pass malicious packet, you can tune the ruleset. At the Alerts section, next to log there's Info column. When you click the pencil icon on info column, then you can change that rule's action in specific environment; source IP. It is a suppress. \ No newline at end of file diff --git a/docs/archives/2025-12/05_firewall/05_06_opnsense_acme.md b/docs/archives/2025-12/05_firewall/05_06_opnsense_acme.md new file mode 100644 index 0000000..ce50dae --- /dev/null +++ b/docs/archives/2025-12/05_firewall/05_06_opnsense_acme.md @@ -0,0 +1,133 @@ +Tags: #os, #firewall, #configuration, #network, #security + +## ACME client in OPNsense + +ACME client needs private CA(Step-CA) and BIND to issue private certificates for TLS. + +### Plugin package + +- os-acme-client + +### DNS records + +- Add new domain in BIND, Following [here](../06_network/06_03_net_bind.md). + - net server + - file: ~/data/containers/bind/lib/db.ilnmors.internal + ```text + ... + opnsense IN A 192.168.10.1 + ... + ``` + +### Trust setting (CA) + +- Add root CA's crt +- System:Settings:Trust:Authorities - \[+\] + - Method: Import an existing Certificate Authority + - Description: step-ca.ilnmors.internal + - Certificate Data: Content of root_ca.crt of Step-CA + - `Save` + +### ACME Client setting + +#### ACME account and server setting + +- Services:ACME Client:Settings + - \[\*\] Enable Plugin + - \[\*\] Auto Renewal +- `Apply` + +- Services:ACME Client:Accounts - \[+\] + - Name: acme.ilnmors.internal + > It is only referred to OPNsense itself. It is not the provisioner name. It doesn't support special characters such as `@` or `!`. + - Description: acme + - Custom CA URL: https://step-ca.ilnmors.internal:9000/acme/acme@ilnmors.internal/directory + + > `[CA_URL]:[port]/acme/[provisioner_name]/directory` + + - `Save` + - `Register account` and check Status column `OK (registered)` + +#### ACME challenge + +There is private authoritative DNS server in this homelab, therefore DNS-01 challenge will be used. + +- Services:ACME Client:Challenge Types - \[+\] + - Name: ilnmors.internal-dns-01-challenge + - Description: step-ca.ilnmors.internal dns-01 challenge + - Challenge Type: DNS-01 + - DNS service: nsupdate (RFC 2136) + - DNS Sleep Time: 10 + > If this option weren't set, ACME client send query towards public DNS. It is necessary to set this option to use private authoritative DNS server. After set value(second) ACME client send the query towards private authoritative DNS. + + - Server (FQDN): bind.ilnmors.internal 2253 + > DNS server domain and port. It uses \[space\] as separator. + + - Secret Key: + ```text + key "acme-key" { + algorithm hmac-sha256; + secret "secret value"; + }; + ``` + > About key, following [here](../06_network/06_03_net_bind.md) + + - Zone: ilnmors.internal + +#### Certificates + +- Services:ACME Client:Certificates - \[+\] + - Common name: opnsense.ilnmors.internal + - Description: opnsense + - Alt Names: crowdsec.ilnmors.internal + - ACME account: acme.ilnmors.internal + - Challenge Type: ilnmors.internal-dns-01-challenge + - \[\*\] Auto Renewal + - `Save` + - Click `Issue or renew certificate` and check Last ACME status `OK` + +#### Automations + +##### Web UI restart automation configuration + +- Services:ACME Client:Automations - \[\+\] + - Name: opnsense-auto-restart-web-ui + - Description: restart opnsense web ui + - Run Command: Restart OPNsense web UI + +> Web UI account `opnsense` doesn't use `sh`, it makes hard ACME client run the command via sh. Just use crontab to reload the crowdsec service everyday in `/usr/local/etc/cron.d/crowdsec`. + +##### Example of remote automations + +OPNsense's ACME client can upload certificates to other server or run specific command when the certificates is issued or renewed. Here is the example to upload the certificate towards other server via sftp. + +- Services:ACME Client:Automations - \[+\] + - Name: test_acme + - Description: certificate upload test + - Run Command: Upload certificate via SFTP + - SFTP Host: net.ilnmors.internal + - Host Key: \[blank\] + > When it is not set, it automatically registers host key in `known_hosts`. + - Username: net + - Identity Type: ed25519 + - Remote Path: /home/net/certificate_test + - `Show Identity` and copy the key value + ```text + ssh-ed25519 [key_value] root@opnsense.ilnmors.internal + ``` + - Add this key on target server's ~/.ssh/authorized_keys + - `Test Connection` + - `Save` + +- Services:ACME Client:Certificates - \[Edit Certificates\] + - Automations: test_acme + + +### OPNsense Web UI certificate setting + +- System:Settings:Administration + - SSL certificate: opnsense.ilnmors.internal (ACME Client) + +### Crowdsec TLS setting + +Following [here](../03_common/03_04_crowdsec.md) \ No newline at end of file diff --git a/docs/archives/2025-12/05_firewall/05_07_opnsense_kea.md b/docs/archives/2025-12/05_firewall/05_07_opnsense_kea.md new file mode 100644 index 0000000..dff6251 --- /dev/null +++ b/docs/archives/2025-12/05_firewall/05_07_opnsense_kea.md @@ -0,0 +1,40 @@ +Tags: #os, #firewall, #configuration, #network + +## Kea DHCP in OPNsense + +Kea DHCP is the next generation DHCP server of ISC(Internet Systems Consortium) to supersede ISC DHCP. Kea has more flexibility in configuration, and higher performance, and modern API than ISC DHCP. OPNsense also select Kea as the future DHCP server than ISC DHCP. Basically, Kea supports container(Docker or podman) but it doesn't use docker.io repository but cloudsmith.io repository. Moreover, Kea DHCP is separated into 3 module kea ddns, kea dhcp 4, kea dhcp 6 as containers. This fact makes network environment (especially when the rootless podman network and DHCP relay are considered.) complex. Therefore, even though Kea in OPNsense doesn't support DDNS module, in this home lab Kea DHCP will be located in OPNsense. + +### Kea DHCP setting + +- Services:Kea DHCP:Kea DHCPv4 - Settings + - \[\*\] Enabled + - Interfaces: LAN, VLAN10 + - \[ \] Firewall rules (Manually created) +- Services:Kea DHCP:Kea DHCPv4 - Subnets - \[+\] + - \[ \] Match client-id (To match based on MAC) + - \[ \] Auto collect option data (To designate optional data manually) + +| Subnet | Description | Pool | Routers(gateway) | DNS | Domain name | +| :-------------: | :---------: | :---------------------------: | :--------------: | :-----------: | :--------------: | +| 192.168.1.0/24 | LAN | 192.168.1.100-192.168.1.254 | 192.168.1.1 | 192.168.10.11 | ilnmors.internal | +| 192.168.10.0/24 | VLAN10 | 192.168.10.254-192.168.10.254 | 192.168.10.1 | 192.168.10.11 | ilnmors.internal | + +> Before building net server (Private DNS), use 1.1.1.2 instead of 192.168.10.11. + +> The reason why VLAN10's pool has only 192.168.1.254 is VLAN10 allows only reservation IP. + +- Services:Kea DHCP:KeaDHCPv4 - Reservation + - Delete all rows. + - Import csv file: kea_dhcp_v4_reservation.csv + +```csv +ip_address,hw_address,hostname,description,option_data, +192.168.1.11,D8:E2:DF:FF:1B:D5,surface,console eth, +192.168.1.30,38:CA:84:94:5E:06,printer,printer, +192.168.10.12,0A:49:6E:4D:02:00,auth,auth, +192.168.10.13,0A:49:6E:4D:03:00,dev,dev, +192.168.10.14,0A:49:6E:4D:04:00,app,app, +``` +- `Apply` + +> vmm(Hypervisor), opnsense(firewall), net(Net server which has DNS in it) should use static IP instead of DHCP reservation. Because these servers are fundamental servers for building network and DHCP. It is very stable for DNS, hypervisor, firewall themselves to use static IP. \ No newline at end of file diff --git a/docs/archives/2025-12/06_network/06_01_net_vm.md b/docs/archives/2025-12/06_network/06_01_net_vm.md new file mode 100644 index 0000000..4b8379c --- /dev/null +++ b/docs/archives/2025-12/06_network/06_01_net_vm.md @@ -0,0 +1,107 @@ +Tags: #os, #configuration, #network, #virtualization + +## Preparation + +### Create VM template + +- ~/data/config/scripts/net.sh + +```bash +virt-install \ +--boot uefi \ +--name net \ +--os-variant debian13 \ +--vcpus 1 \ +--memory 2048 \ +--location /var/lib/libvirt/images/debian-13.0.0-amd64-netinst.iso \ # For serial installing, use `--location` instead of `--cdrom` +--disk pool=vm-images,size=34,format=qcow2,discard=unmap \ +--network network=ovs-lan-net,portgroup=vlan10-access,model=virtio,mac=0A:49:6E:4D:01:00 \ # Use designated ovs port group +--graphics none \ +--console pty,target_type=serial \ +--extra-args "console=ttyS0,115200" +# After enter this command, then the console starts automatically +# Remove all annotation before you make the sh file. +``` + +### Debian installation + +- Following [here](../03_common/03_01_debian_configuration.md) to install Debian. +- Debian installer supports serial mode regardless getty@ttyS0 service is enabled or not. +- Following [here](../03_common/03_02_iptables.md) to set iptables. +- Following [here](../03_common/03_04_crowdsec.md) to set CrowdSec + +#### Serial console setting + +After installation, use `ctrl + ]` to exit console. Before setting getty@ttyS0, you can't use serial console to access VM. Therefore, use IP address set on installation, and connect net server via ssh first, following the step to enable the getty. + +### Modify VM template settings + +After getty setting, shutdown net vm with `shutdown` in VM or `sudo virsh shutdown net` in hypervisor to turn off vm first. + +```bash +virsh edit net +``` + +```xml + +... + + + 512 + + + + +``` + +```bash +virsh dumpxml net > ~/data/config/vms/dumps/net.xml +# Start net server with console +``` + +### Common setting + +- net.service + +```ini +# ~/data/config/services/net.service +# ~/.config/systemd/user/net.service +[Unit] +Description=net Auto Booting +After=network-online.target +Wants=network-online.target +Requires=opnsense.service + +[Service] +Type=oneshot + +# Maintain status as active +RemainAfterExit=yes + +# CrowdSec should be set +ExecStartPre=%h/data/config/scripts/wait-for-it.sh 192.168.10.1:8080 -t 0 + +ExecStartPre=/bin/bash -c "sleep 15" + +# Run the service +ExecStart=/usr/bin/virsh -c qemu:///system start net + +# Stop the service +ExecStop=/usr/bin/virsh -c qemu:///system shutdown net + +[Install] +WantedBy=default.target +``` + +```bash +ln -s ~/data/config/services/net.service ~/.config/systemd/user/net.service + +systemctl --user daemon-reload +systemctl --user enable net.service +systemctl --user start net.service +``` \ No newline at end of file diff --git a/docs/archives/2025-12/06_network/06_02_net_ddns.md b/docs/archives/2025-12/06_network/06_02_net_ddns.md new file mode 100644 index 0000000..6b1802a --- /dev/null +++ b/docs/archives/2025-12/06_network/06_02_net_ddns.md @@ -0,0 +1,387 @@ +Tags: #os, #configuration, #network + +## DDNS + +### Secret management + +- File: + - ~/data/config/secrets/.secret.yaml + - /etc/secrets/2001/ddns.env + +- Edit `.secret.yaml` with `edit_secret.sh` + +```yaml +# ~/data/config/secrets/.secret.yaml +# DDNS +DDNS: + ZONE_ID: 'encrypted value' + API_KEY: 'encrypted value' +``` + +```bash +extract_secret.sh ~/data/config/secrets/.secret.yaml -e "DDNS" > /run/user/$UID/tmp.env && sudo mv /run/user/$UID/tmp.env /etc/secrets/$UID/ddns.env && sudo chown $UID:root /etc/secrets/$UID/ddns.env && sudo chmod 400 /etc/secrets/$UID/ddns.env +``` + +### ddns.sh + +- File: ~/data/config/scripts/ddns/ddns.sh + +```bash +#!/bin/bash +# ~/data/config/scripts/ddns.sh + +# Designate directory +# DIRECTORY="$HOME/data/config/scripts" + +# Information +DOMAIN="" +TTL=180 +C_TTL=86400 +PROXIED="false" +DELETE_FLAG="false" +CURRENT_IP="" + +# These will be injected by systemd +# ZONE_ID='.secret' +# API_KEY='.secret' + +# usage() function +usage() { + echo "Usage: $0 -d \"domain\" [-t \"ttl\"] [-p] [-r] [-c]" + echo "-d : Specify the domain to update" + echo "-t : Specify the TTL(Time to live)" + echo "-p: Specify the cloudflare proxy to use" + echo "-r: Delete the DNS record" + exit 1 +} + +# getopts to get arguments +while getopts "d:t:pr" opt; do + case $opt in + d) + DOMAIN="$OPTARG" + ;; + t) + TTL="$OPTARG" + ;; + p) + PROXIED="true" + ;; + r) + DELETE_FLAG="true" + ;; + \?) # unknown options + echo "Invalid option: -$OPTARG" >&2 + usage + ;; + :) # parameter required option + echo "Option -$OPTARG requires an argument." >&2 + usage + ;; + esac +done + +# Get option and move to parameters - This has no functional thing, because it only use arguments with parameters +shift $((OPTIND - 1)) + +# Check necessary options +if [ -z "$DOMAIN" ]; then + echo "Error: -d option is required" >&2 + usage +fi + +if ! [[ "$TTL" =~ ^[0-9]+$ ]] || [ "$TTL" -le 0 ]; then + echo "Error: -t option (ttl) requires a number above 0." >&2 + usage +fi + +# log() function +log() +{ + local text="$1" + echo -e "$(date "+%Y-%m-%d %H:%M:%S"): [ddns] $text" +} + +# Make log directory +# if [ ! -d "$DIRECTORY/log" ]; then +# mkdir "$DIRECTORY/log" +# fi + +# Check and create log file +# LOG_FILE="$DIRECTORY/log/ddns_$(date "+%Y-%m-%d").log" +# if [ ! -f "$LOG_FILE" ]; then +# log "Notice: log file is created" +# fi + +# Check package +if ! command -v curl &> /dev/null; then + log "Error: curl package is needed" + exit +fi +if ! command -v jq &> /dev/null; then + log "Error: jq package is needed" + exit +fi + +# API options +URL="https://api.cloudflare.com/client/v4/zones/$ZONE_ID/dns_records" +CONTENT_TYPE="Content-Type: application/json" +AUTHORIZATION="Authorization: Bearer $API_KEY" + +# Current IP check +CURRENT_IP=$(curl -sf "https://ifconfig.me") ||\ +CURRENT_IP=$(curl -sf "https://ifconfig.kr") ||\ +CURRENT_IP=$(curl -sf "https://api.ipify.org") +if [ "$CURRENT_IP" == "" ]; then + log "Error: Can't get an IP" + exit +fi + +# DNS functions + +# get_dns_record() function +get_dns_record() +{ + local type="$1" + local name="$2" + + local response="$( + curl -s "$URL?type=$type&name=$name"\ + -H "$CONTENT_TYPE"\ + -H "$AUTHORIZATION")" + if [ "$(echo "$response" | jq -r '.success')" == "false" ]; then + log "Error: Can't get dns record\"Reason: $response" + exit + else + echo "$response" + fi +} + +# create_dns_record() function +create_dns_record() +{ + local type="$1" + local name="$2" + local ttl="$3" + local comment="$4" + local content="$5" + local response="$( + curl -s "$URL"\ + -X POST\ + -H "$CONTENT_TYPE"\ + -H "$AUTHORIZATION"\ + -d "{ + \"name\": \"$name\", + \"ttl\": $ttl, + \"type\": \"$type\", + \"comment\": \"$comment\", + \"content\": \"$content\", + \"proxied\": $PROXIED + }")" + if [ "$(echo "$response" | jq -r '.success')" == "false" ]; then + log "Error: Can't create dns record\"Reason: $response" + exit + else + echo "$response" + fi +} + +# update_dns_record() function +update_dns_record() +{ + local type="$1" + local name="$2" + local ttl="$3" + local comment="$4" + local content="$5" + local id="$6" + local response=$( + curl -s "$URL/$id"\ + -X PUT\ + -H "$CONTENT_TYPE"\ + -H "$AUTHORIZATION"\ + -d "{ + \"name\": \"$name\", + \"ttl\": $ttl, + \"type\": \"$type\", + \"comment\": \"$comment\", + \"content\": \"$content\", + \"proxied\": $PROXIED + }") + if [ "$(echo "$response" | jq -r '.success')" == "false" ]; then + log "Error: Can't update dns record\"Reason: $response" + exit + else + echo "$response" + fi +} + +# delete_dns_record() function +delete_dns_record() +{ + local type="$1" + local id="$2" + + local response=$( + curl -s "$URL/$id"\ + -X DELETE\ + -H "$CONTENT_TYPE"\ + -H "$AUTHORIZATION" + ) + if [ "$(echo "$response" | jq -r '.success')" == "false" ]; then + log "Error: Can't delete dns record\"Reason: $response" + exit + else + echo "$response" + fi +} + +# Get DNS A, and CNAME record +A_DNS_RECORD=$(get_dns_record "A" "$DOMAIN") +S_DNS_RECORD=$(get_dns_record "cname" "*.$DOMAIN") +W_DNS_RECORD=$(get_dns_record "cname" "www.$DOMAIN") + +# Delete DNS record with Delete flag +if [ "$DELETE_FLAG" == "true" ]; then + FLAG="false" + if [ "$(echo $A_DNS_RECORD | jq -r '.result | length')" -eq 1 ]; then + A_DNS_ID="$(echo $A_DNS_RECORD | jq -r '.result[0].id')" + delete_dns_record "A" "$A_DNS_ID" + log "Delete: root DNS record is deleted" + FLAG="true" + fi + if [ "$(echo $S_DNS_RECORD | jq -r '.result | length')" -eq 1 ]; then + S_DNS_ID="$(echo $S_DNS_RECORD | jq -r '.result[0].id')" + delete_dns_record "cname" "$S_DNS_ID" + log "Delete: sub DNS record is deleted" + FLAG="true" + fi + if [ "$(echo $W_DNS_RECORD | jq -r '.result | length')" -eq 1 ]; then + W_DNS_ID="$(echo $W_DNS_RECORD | jq -r '.result[0].id')" + delete_dns_record "cname" "$W_DNS_ID" + log "Delete: www DNS record is deleted" + FLAG="true" + fi + if [ "$FLAG" == "false" ]; then + log "Notice: Nothing is Deleted. There are no DNS records" + fi + exit +fi + +# Create or update DNS A record +if [ "$(echo $A_DNS_RECORD | jq -r '.result | length')" -eq 1 ]; then # root DNS record exist + A_DNS_ID="$(echo $A_DNS_RECORD | jq -r '.result[0].id')" + A_DNS_CONTENT="$(echo $A_DNS_RECORD | jq -r '.result[0].content')" + A_DNS_TTL="$(echo $A_DNS_RECORD | jq -r '.result[0].ttl')" + A_DNS_PROXIED="$(echo $A_DNS_RECORD | jq -r '.result[0].proxied')" + if [ "$A_DNS_CONTENT" != $CURRENT_IP -o "$A_DNS_TTL" != "$TTL" -o "$A_DNS_PROXIED" != "$PROXIED" ]; then + update_dns_record "A" "$DOMAIN" "$TTL" "$(date "+%Y-%m-%d %H:%M:%S"): root domain from ddns.sh" "$CURRENT_IP" "$A_DNS_ID" + log "Update: Root DNS record is successfully changed\nDomain: $DOMAIN\nIP: $A_DNS_CONTENT to $CURRENT_IP\nTTL: $A_DNS_TTL to $TTL\nproxied: $A_DNS_PROXIED to $PROXIED" + else + log "Notice: Root DNS record is not changed\nDomain: $DOMAIN\nIP: $CURRENT_IP\nTTL: $TTL\nproxied: $PROXIED" + fi +else # root DNS record does not exist + create_dns_record "A" "$DOMAIN" "$TTL" "$(date "+%Y-%m-%d %H:%M:%S"): root domain from ddns.sh" "$CURRENT_IP" + log "Create: Root DNS record is successfully created\nDomain: $DOMAIN\nIP: $CURRENT_IP\nTTL: $TTL\nproxied: $PROXIED" +fi + +# Create or update DNS CNAME records +if [ "$(echo $S_DNS_RECORD | jq -r '.result | length')" -eq 1 ]; then # sub DNS record exist + S_DNS_ID="$(echo $S_DNS_RECORD | jq -r '.result[0].id')" + S_DNS_CONTENT="$(echo $S_DNS_RECORD | jq -r '.result[0].content')" + S_DNS_TTL="$(echo $S_DNS_RECORD | jq -r '.result[0].ttl')" + S_DNS_PROXIED="$(echo $S_DNS_RECORD | jq -r '.result[0].proxied')" + if [ "$S_DNS_CONTENT" != "$DOMAIN" -o "$S_DNS_TTL" != "$C_TTL" -o "$S_DNS_PROXIED" != "$PROXIED" ]; then + update_dns_record "cname" "*.$DOMAIN" "$C_TTL" "$(date "+%Y-%m-%d %H:%M:%S"): sub domain from ddns.sh" "$DOMAIN" "$S_DNS_ID" + log "Update: Sub DNS record is successfully changed\nDomain: $S_DNS_CONTENT to *.$DOMAIN\ncname: $DOMAIN \nTTL: $S_DNS_TTL to $C_TTL\nproxied: $S_DNS_PROXIED to $PROXIED" + else + log "Notice: Sub DNS record is not changed\nDomain: *.$DOMAIN\ncname: $DOMAIN\nTTL: $C_TTL\nproxied: $PROXIED" + fi +else # sub DNS record does not exist + create_dns_record "cname" "*.$DOMAIN" "$C_TTL" "$(date "+%Y-%m-%d %H:%M:%S"): sub domain from ddns.sh" "$DOMAIN" + log "Create: Sub DNS record is successfully created\nDomain: *.$DOMAIN\ncname: $DOMAIN\nTTL: $C_TTL\nproxied: $PROXIED" +fi + +if [ "$(echo $W_DNS_RECORD | jq -r '.result | length')" -eq 1 ]; then # www DNS record exist + W_DNS_ID="$(echo $W_DNS_RECORD | jq -r '.result[0].id')" + W_DNS_CONTENT="$(echo $W_DNS_RECORD | jq -r '.result[0].content')" + W_DNS_TTL="$(echo $W_DNS_RECORD | jq -r '.result[0].ttl')" + W_DNS_PROXIED="$(echo $W_DNS_RECORD | jq -r '.result[0].proxied')" + if [ "$W_DNS_CONTENT" != "$DOMAIN" -o "$W_DNS_TTL" != "$C_TTL" -o "$W_DNS_PROXIED" != "$PROXIED" ]; then + update_dns_record "cname" "www.$DOMAIN" "$C_TTL" "$(date "+%Y-%m-%d %H:%M:%S"): www domain from ddns.sh" "$DOMAIN" "$W_DNS_ID" + log "Update: www DNS record is successfully changed\nDomain: $W_DNS_CONTENT to www.$DOMAIN\ncname: $DOMAIN\nTTL: $W_DNS_TTL to $C_TTL\nproxied: $W_DNS_PROXIED to $PROXIED" + else + log "Notice: www DNS record is not changed\nDomain: www.$DOMAIN\ncname: $DOMAIN\nTTL: $C_TTL\nproxied: $PROXIED" + fi +else # www DNS record does not exist + create_dns_record "cname" "www.$DOMAIN" "$C_TTL" "$(date "+%Y-%m-%d %H:%M:%S"): www domain from ddns.sh" "$DOMAIN" + log "Create: www DNS record is successfully created\nDomain: www.$DOMAIN\ncname: $DOMAIN\nTTL: $C_TTL\nproxied: $PROXIED" +fi + +# Remove old backup file (7days before) +# find "$DIRECTORY/log" -maxdepth 1 -type f -mtime +7 -delete +``` + +### Systemd + +- File: + - ~/data/config/services/ddns/ddns.service + - ~/data/config/services/ddns/ddns.timer + - /etc/secrets/2001/ddns.env + +```ini +# ~/data/config/services/ddns/ddns.service +# ~/.config/systemd/user/ddns.service +[Unit] +Description=DDNS Update Service +After=network-online.target +Wants=network-online.target + +[Service] +Type=oneshot + +# logging +StandardOutput=journal +StandardError=journal + +# EnvironmentFile +EnvironmentFile=/etc/secrets/%U/ddns.env + +# Run the script +ExecStart=/bin/bash -c '%h/data/config/scripts/ddns/ddns.sh -d "ilnmors.com"' + +``` + +```ini +# ~/data/config/services/ddns/ddns.timer +# ~/.config/systemd/user/ddns.timer +[Unit] +Description=Run DDNS update service every 5 minutes + +[Timer] +# Execute service after 1 min on booting +OnBootSec=1min + +# Execute service every 5mins +OnUnitActiveSec=5min + +# When timer is activated, Service also starts. +Persistent=true + +[Install] +WantedBy=timers.target +``` + +```bash +# Register service +mkdir -p ~/.config/systemd/user && chmod -R 700 ~/.config + +ln -s ~/data/config/services/ddns/ddns.service ~/.config/systemd/user/ddns.service + +ln -s ~/data/config/services/ddns/ddns.timer ~/.config/systemd/user/ddns.timer + +systemctl --user daemon-reload + +# Start timer and enable +systemctl --user enable --now ddns.timer +``` \ No newline at end of file diff --git a/docs/archives/2025-12/06_network/06_03_net_bind.md b/docs/archives/2025-12/06_network/06_03_net_bind.md new file mode 100644 index 0000000..73de611 --- /dev/null +++ b/docs/archives/2025-12/06_network/06_03_net_bind.md @@ -0,0 +1,338 @@ +Tags: #os, #configuration, #network, #virtualization, #container + +## BIND9 + +BIND9 is an open source authoritative DNS software of ISC. It can work as a personal authoritative in private network environment. + +### Secret management + +- File: + - ~/data/config/secrets/.secret.yaml + +- Edit `.secret.yaml` with `edit_secret.sh` + +#### Generate TSIG key as ACME key + +```bash +podman run --rm --entrypoint /bin/sh internetsystemsconsortium/bind9:9.20 -c "tsig-keygen -a hmac-sha256 acme-key" +# Paste and add on .secret.yaml +# key "acme-key" { +#     algorithm hmac-sha256; +#     secret "your_base64_tsig_secret_here"; +# };' +``` + +```yaml +# ~/data/config/secrets/.secret.yaml +BIND9_ACME_KEY: | + key "acme-key" { +    algorithm hmac-sha256; +    secret "your_base64_tsig_secret_here"; + }; +``` + +```bash +# Copy the secret value from .secret.yaml + +# Podman secret +extract_secret.sh ~/data/config/secrets/.secret.yaml -f "BIND9_ACME_KEY" | podman secret create "acme-key" - +``` + + +### Preparation + +#### iptables and firewall rules + +- Set iptables first, following [here](../03_common/03_02_iptables.md). +- Set firewall rules first, following [here](Latest/05_firewall/05_04_opnsense_rules.md). + +#### Create directory for container + +```bash +mkdir -p ~/data/containers/bind +chmod 700 ~/data/containers/bind +sudo setfacl -m d:g::0 ~/data/containers/bind +sudo setfacl -m d:o::0 ~/data/containers/bind +sudo setfacl -m u:net:rwx ~/data/containers/bind +sudo setfacl -m u:100052:rwx ~/data/containers/bind +sudo setfacl -d -m u:net:rwx ~/data/containers/bind +sudo setfacl -d -m u:100052:rwx ~/data/containers/bind +mkdir -p ~/data/containers/bind/{cache,etc,lib,log} +nano ~/data/containers/bind/{etc,lib}/configuration_files +``` + +> BIND9 container executes as 53:53(bind:bind) permission in container. It is mapped host's 100052. Therefore, directories have to have ACL via `setfacl` + + +### Podman Image + +```bash +podman pull internetsystemsconsortium/bind9:9.20 # Do not use latest version to management +``` + +### Configuration files + +> If `named.conf` file didn't exist in `/etc/bind` or it had error in it, the container would be terminated without any logs. Before starting BIND container, you should makes all configuration file already. + +#### named.conf + +- file: ~/data/containers/bind/etc/named.conf + +```ini +include "/run/secrets/keys/acme-key"; +include "/run/secrets/keys/ddns-key"; + +options { + directory "/var/cache/bind"; + + listen-on { any; }; + listen-on-v6 { ::1; }; + + // Authoritative DNS setting + allow-recursion { none; }; + allow-transfer { none; }; + allow-update { none; }; + + dnssec-validation no; + + check-names master warn; +}; + +zone "ilnmors.internal." { + type primary; + file "/var/lib/bind/db.ilnmors.internal"; + notify yes; + // ACME-01 challenge policy. It allows only subdomain's TXT record update. + update-policy { + grant acme-key subdomain ilnmors.internal. TXT; + grant ddns-key subdomain ilnmors.internal. A AAAA DHCID; + }; +}; + +zone "1.168.192.in-addr.arpa" { + type primary; + file "/var/lib/bind/db.1.168.192.in-addr.arpa"; + notify yes; + update-policy { + grant ddns-key subdomain 1.168.192.in-addr.arpa PTR DHCID; + }; +}; + +zone "10.168.192.in-addr.arpa" { + type primary; + file "/var/lib/bind/db.10.168.192.in-addr.arpa"; + notify yes; + update-policy { + grant ddns-key subdomain 10.168.192.in-addr.arpa PTR DHCID; + }; +}; + +zone "ilnmors.com." { + //split horizon dns + type primary; + file "/var/lib/bind/db.ilnmors.com"; + notify yes; +}; + +logging { + channel default_log { + stderr; + severity info; + }; + category default { default_log; }; + category config { default_log; }; + category queries { default_log; }; +}; +``` + + >If ddns function were required, generate tsig key for ddns and give update policy for each zone like below. +> +> - include "/etc/bind/ddns-key"; +> - update-policy { grant ddns-key subdomain \[zone_domain\] ANY A AAAA TXT; }; + +- Verify the named.conf with the command `named-checkconf` + +#### Zone files + +> When you add the record, you should use `.` at the end of the domain. i.e. mydomain.internal. + +- file: + - ~/data/containers/bind/lib/db.ilnmors.internal + +```ini +$TTL 86400 + +@ IN SOA bind.ilnmors.internal. mail.ilnmors.internal. ( + 2025113001 ; serial + 3600 ; refresh (1 hour) + 1800 ; retry (30 minutes) + 604800 ; expire (1 week) + 86400 ; minimum (1 day) + ) + IN NS bind.ilnmors.internal. +bind IN A 192.168.10.11 +opnsense IN A 192.168.10.1 +vmm IN A 192.168.10.10 +net IN A 192.168.10.11 +crowdsec IN CNAME opnsense.ilnmors.internal. +adguard IN CNAME net.ilnmors.internal. +step-ca IN CNAME auth.ilnmors.internal. +caddy IN CNAME auth.ilnmors.internal. +ldap IN CNAME auth.ilnmors.internal. +authelia IN CNAME auth.ilnmors.internal. +code-server IN CNAME dev.ilnmors.internal. +postgresql IN CNAME dev.ilnmors.internal. +``` + +- ~/data/containers/bind/lib/db.1.168.192.in-addr.arpa + +```ini +$TTL 86400 + +@ IN SOA bind.ilnmors.internal. mail.ilnmors.internal. ( + yyyymmdd01 ; serial + 3600 ; refresh (1 hour) + 1800 ; retry (30 minutes) + 604800 ; expire (1 week) + 86400 ; minimum (1 day) + ) + IN NS bind.ilnmors.internal. +1 IN PTR opnsense.ilnmors.internal. +11 IN PTR console.ilnmors.internal. +30 IN PTR printer.ilnmors.internal. +``` + +- ~/data/containers/bind/lib/db.10.168.192.in-addr.arpa + +```ini +$TTL 86400 + +@ IN SOA bind.ilnmors.internal. mail.ilnmors.internal. ( + yyyymmdd01 ; serial + 3600 ; refresh (1 hour) + 1800 ; retry (30 minutes) + 604800 ; expire (1 week) + 86400 ; minimum (1 day) + ) + IN NS bind.ilnmors.internal. +1 IN PTR opnsense.ilnmors.internal. +10 IN PTR vmm.ilnmors.internal. +11 IN PTR net.ilnmors.internal. +12 IN PTR auth.ilnmors.internal. +13 IN PTR dev.ilnmors.internal. +14 IN PTR app.ilnmors.internal. +``` + +- ~/data/containers/bind/lib/db.ilnmors.com + +```ini +$TTL 86400 + +@ IN SOA bind.ilnmors.internal. mail.ilnmors.internal. ( + yyyymmdd01 ; serial + 3600 ; refresh (1 hour) + 1800 ; retry (30 minutes) + 604800 ; expire (1 week) + 86400 ; minimum (1 day) + ) + IN NS bind.ilnmors.internal. +* IN A 192.168.10.12 +``` + +```bash +# Adguard container has Requires=bind.service. When it restarted, then Adguard also restarted. +systemctl --user restart bind +``` +### Quadlet + +- File: + - ~/data/config/containers/bind/bind.container + +```ini +# ~/data/config/containers/bind/bind.container +[Quadlet] +DefaultDependencies=false + +[Unit] +Description=BIND9 DNS + +After=network-online.target +Wants=network-online.target + +[Container] +Image=docker.io/internetsystemsconsortium/bind9:9.20 + +ContainerName=bind + +PublishPort=2253:53/tcp +PublishPort=2253:53/udp + +Volume=%h/data/containers/bind/etc:/etc/bind:rw +Volume=%h/data/containers/bind/lib:/var/lib/bind:rw +Volume=%h/data/containers/bind/cache:/var/cache/bind:rw +Volume=%h/data/containers/bind/log:/var/log:rw + +Environment="TZ=Asia/Seoul" +Secret=acme-key,target=/run/secrets/key/acme-key +Secret=ddns-key,target=/run/secrets/key/ddns-key + +Label=diun.enable=true +Label=diun.watch_repo=true + +[Install] +WantedBy=default.target +``` + +#### Create systemd `.service` file + +```bash +# linger has to be activated +mkdir -p ~/.config/containers/systemd + +ln -s ~/data/config/containers/bind/bind.container ~/.config/containers/systemd/bind.container + +# This command makes bind.service +systemctl --user daemon-reload +``` + +#### Enable and start service + +```bash +systemctl --user start bind.service +``` + + +### nsupdate (RFC 2136) verification + +```bash +# Update query at net server +# Before test, create temp file .bind.acme-key +nsupdate -k /run/user/$UID/.bind.acme-key +> server 127.0.0.1 2253 +> zone ilnmors.internal +> update add _acme-challenge.ilnmors.internal. 60 TXT "validation-test" +> send +> `ctrl+c` + +# Verify +dig @127.0.0.1 -p 2253 _acme-challenge.ilnmors.internal. TXT +# Print +;; QUESTION SECTION: +;_acme-challenge.ilnmors.internal. IN TXT + +;; ANSWER SECTION: +_acme-challenge.ilnmors.internal. 60 IN TXT "validation-test" + +# Delete query at net server +nsupdate -k /run/user/$UID/.bind.acme-key +> server 127.0.0.1 2253 +> zone ilnmors.internal +> update delete _acme-challenge.ilnmors.internal. 60 TXT "validation-test" +> send +> `ctrl+c` + +# Verify +dig @127.0.0.1 -p 2253 _acme-challenge.ilnmors.internal. TXT +# Print +;; ->>HEADER<<- opcode: QUERY, status: NXDOMAIN, id: 17572 +``` \ No newline at end of file diff --git a/docs/archives/2025-12/06_network/06_04_net_adguard_home.md b/docs/archives/2025-12/06_network/06_04_net_adguard_home.md new file mode 100644 index 0000000..efe15a3 --- /dev/null +++ b/docs/archives/2025-12/06_network/06_04_net_adguard_home.md @@ -0,0 +1,293 @@ +Tags: #os, #configuration, #network, #virtualization, #container + +## AdGuard Home + +AdGuard Home is one of open sourced recursive DNS resolver which can block malicious domain. It supports powerful DNS query filter based on ruleset and split horizon DNS service, plus recursive query on DoH and DoT towards public or internal authoritative DNS server. + +### Secret + +Adgaurd Home container doesn't need any secret value. + +### Preparation + +#### Add new domain in BIND + +Following [here](../06_network/06_03_net_bind.md). + +- net server +- file: ~/data/containers/bind/lib/db.ilnmors.internal + +```ini +# ... +adguard IN CNAME net.ilnmors.internal. +# ... +``` + +```bash +# Adguard container has Requires=bind.service. When it restarted, then Adguard also restarted. +systemctl --user restart bind +``` +#### iptables and firewall rules + +- Set iptables first, following [here](../03_common/03_02_iptables.md). +- Set firewall rules first, following [here](Latest/05_firewall/05_04_opnsense_rules.md). + +#### Create directory for container + +```bash +mkdir -p ~/data/containers/adguard +chmod 700 ~/data/containers/adguard +sudo setfacl -m d:g::0 ~/data/containers/adguard +sudo setfacl -m d:o::0 ~/data/containers/adguard +mkdir -p ~/data/containers/adguard/{work,config,certs} + +``` + +> AdGuard Home container executes as root permission in container. It is not necessary to give `setfacl`. Because the container's root account is mapped as host's uid. + +### Podman Image + +```bash +podman pull adguard/adguardhome:v0.107.68 # Do not use latest version to management +``` + +### Quadlet + +- File: + - ~/data/config/containers/adguard/adguard.container + +```ini +# ~/data/config/containers/adguard/adguard.container +[Quadlet] +DefaultDependencies=false + +[Unit] +Description=AdGuard Home DNS + +After=bind.service +Requires=bind.service + + +[Container] +Image=docker.io/adguard/adguardhome:v0.107.68 + +ContainerName=adguard + +AddHost=bind.ilnmors.internal:host-gateway + +PublishPort=2053:53/tcp +PublishPort=2053:53/udp +PublishPort=2443:443/tcp +PublishPort=2443:443/udp +PublishPort=3000:3000/tcp +# 3000 is temporary port, After TLS setting, delete it. + +Volume=%h/data/containers/adguard/work:/opt/adguardhome/work:rw +Volume=%h/data/containers/adguard/config:/opt/adguardhome/conf:rw +Volume=%h/data/containers/adguard/certs:/etc/ssl/adguard:ro + +Environment="TZ=Asia/Seoul" + +Label=diun.enable=true +Label=diun.watch_repo=true + +[Install] +WantedBy=default.target +``` + +#### Create systemd `.service` file + +```bash +ln -s ~/data/config/containers/adguard/adguard.container ~/.config/containers/systemd/adguard.container + +systemctl --user daemon-reload +``` + +#### Enable and start service + +```bash +systemctl --user start adguard.service +``` + + +### Web UI configuration + +#### Access web UI and initial setting + +- URL: http://192.168.10.11:3000 + > After TLS certificate setting, access to adguard via https `https://192.168.10.11` or `https://adguard.ilnmors.internal` + +#### Initial setting wizard + +- Administrator interface + - Network interface: all + - Interface port: 3000 +- DNS server + - Network interface: all + - Interface port: 53 +- Authentication + - User: adguard + - Password: Password + +### AdGuard Configuration + +#### General setting + +- Filter update interval: 1 hour +- \[\*\] Use AdGuard browsing security web service +- \[\*\] Enable log +- Query logs rotation - 90 days +- \[\*\] Enable statistics +- `Save` + +#### DNS settings + +- Upstream DNS servers + +```text +# Cloudflare DNS +tls://security.cloudflare-dns.com +# Internal DNS bind.ilnmors.internal:2253 +[/ilnmors.internal/]udp://bind.ilnmors.internal:2253 +# Split Horizon DNS +[/*.ilnmors.com/]udp://bind.ilnmors.internal:2253 +``` + +> Internal authoritative DNS will be BIND. BIND will use 2253 port. `bind.ilnmors.internal` is defined on container's `/etc/hosts`, it allows communication with host system. + +- \[\*\] Parallel requests +- Private reverse DNS servers + +```text +udp://bind.ilnmors.internal:2253 +``` + +> Set this option after BIND prepared + +- \[\*\] Use private reverse DNS resolvers +- \[\*\] Enable reverse resolving of client's IP addresses +- `Apply` and `Test upstreams` + +- \[ \] Enable cache + +- `Save` +- Disallowed domains + +```text +... +wpad.ilnmors.internal +_ldap._tcp.dc._msdcs.ilnmors.internal +``` + +- `Save configuration` + +#### Client settings + +- Add Client + +| Client | Name | +| :----------------------------------------------: | :-------: | +| 169.254.1.2, 192.168.10.11 | localhost | +| 10.10.10.2, 10.10.10.3, 10.10.10.4, 192.168.1.11 | console | + +#### Filters - DNS blocklists + +- Add blocklist - Choose from the list + - 1Hosts (Lite) + - AdGuard DNS filter + - AdAway DNS Popup Hosts filter + - HaGeZi's Ultimate Blocklist + - OISD Blocklist Big + - KOR: List-KR DNS + +#### DNS rewrites + +- Filters:DNS rewrites + - bind.ilnmors.internal - 192.168.10.11 + +#### Clients' DNS setting + +##### OPNsense + +- System:Settings:General + - DNS server: 192.168.10.11 +- Services:KEA DHCP:KEA DHCPv4:Subnets + - DNS server: 192.168.10.11 + +##### Hypervisor + +- /etc/resolv.conf + - nameserver 192.168.10.11 + +##### net + +- /etc/resolv.conf + - nameserver 192.168.10.11 + +##### VPN client +```ini +[Interface] +DNS = 192.168.10.11 +``` + + +--- +#### Encryption setting + +> It requires `BIND` and `Step-CA` + +- \[\*\] Enable Encryption (HTTPS, DNS-over_HTTPS, and DNS-over-TLS) +- \[\*\] Enable plain DNS +- \[\*\] Redirect to HTTPS automatically +- HTTPS port: 443 + +##### Certificates + +> AdGuard home doesn't support ACME protocol by itself. It is necessary to use OPNsense's ACME client function to automate adguard home certificates. + +- Add new domain in BIND, Following [here](../06_network/06_03_net_bind.md). + - net server + - file: ~/data/containers/bind/lib/db.ilnmors.internal + ```text + ... + adguard IN CNAME net.ilnmors.internal. + ... + ``` + +- ACME setting (OPNsense) + - Services:ACME Client:Certificates - Certificates - \[+\] + - Common Name: adguard.ilnmors.internal + - Description: adguard + - ACME Account: acme.ilnmors.internal + > Even though provisioner's name includes `@`, it has to use as `.`. + > + > i.e. `acme@ilnmors.internal` > `acme.ilnmors.internal` + - Challenge Type: ilnmors.internal-dns-01-challenge + - \[\*\] Auto Renewal + - Automations: adguard-auto-acme, adguard-auto-restart + +- Automations (OPNsense) + - Services:ACME Client:Automations - Automation - \[+\] + - Name: adguard-auto-acme / adguard-auto-restart + - Description: adguard acme crt issue / restart adguard after crt is issued + - Run Command: Upload certificate via SFTP / Remote command via SSH + - SFTP Host: adguard.ilnmors.internal + - Username: net + - Identity Type: ed25519 + - Remote Path(SFTP): /home/net/data/containers/adguard/certs + - Command(SSH): systemctl --user restart adguard + - `Show Identity` + > Copy Required parameters `ssh-ed25519 ~~~ root@opnsense.ilnmors.internal` + > + > Add parameters in net server's ~/.ssh/authorized_keys + - `Test Connect` and `Save` + +- \[\*\] Set a certificates file path: `/etc/ssl/adguard/adguard.ilnmors.internal/fullchain.pem` +- \[\*\] Set a private key file: `/etc/ssl/adguard/adguard.ilnmors.internal/key.pem` +- \[Save configuration\] + +##### Modify container file + +- Delete `PublishPort=3000:3000/tcp` part +- `systemctl --user daemon-reload` +- `systemctl --user start adguard` \ No newline at end of file diff --git a/docs/archives/2025-12/06_network/06_05_net_kea.md b/docs/archives/2025-12/06_network/06_05_net_kea.md new file mode 100644 index 0000000..f630c0b --- /dev/null +++ b/docs/archives/2025-12/06_network/06_05_net_kea.md @@ -0,0 +1,630 @@ +Tags: #os, #configuration, #network, #virtualization, #container + + +# KEA + +#### Generate TSIG key as DDNS key + +```bash +podman run --rm --entrypoint /bin/sh internetsystemsconsortium/bind9:9.20 -c "tsig-keygen -a hmac-sha256 acme-key" +# Paste and add on .secret.yaml +# key "acme-key" { +#     algorithm hmac-sha256; +#     secret "your_base64_tsig_secret_here"; +# };' +``` + +### DHCPv4 +#### Add user to group + +```bash +sudo apt install kea-dhcp4-server kea-dhcp-ddns-server +sudo usermod -aG _kea net +sudo chmod 770 /etc/kea /var/lib/kea +sudo chmod 660 /etc/kea/kea-dhcp4.conf +sudo mkdir /etc/secrets/$(id -u _kea) +sudo chown _kea:root /etc/secrets/$(id -u _kea) +sudo chmod 500 /etc/secrets/$(id -u _kea) +sudo touch /etc/secrets/$(id -u _kea)/ddns-key +sudo nano /etc/secrets/$(id -u _kea)/ddns-key +sudo chown _kea:root /etc/secrets/$(id -u _kea)/ddns-key +sudo chmod 400 /etc/secrets/$(id -u _kea)/ddns-key +``` + +iptables +``` +*mangle + +*filter +-A INPUT -p udp -m udp --dport 67 -m comment --comment "allow upd DHCPv4 connection" -j ACCEPT + +# check +sudo bash -c 'iptables-restore --test < /etc/iptables/rules.v4' +``` + + kea-dhcp4.conf +```json +{ + "Dhcp4": { + "subnet4": [ + { + "subnet": "192.168.10.0/24", + "pools" : [ + { + "pool": "192.168.10.254-192.168.10.254" + } + ], + "option-data": [ + { + "name": "routers", + "data": "192.168.10.1" + }, + { + "name": "ntp-servers", + "data": "192.168.10.1" + }, + { + "name": "domain-name-servers", + "data": "192.168.10.11" + }, + { + "name": "domain-name", + "data": "ilnmors.internal" + } + ], + "reservations": [ + { + "hw-address": "0a:49:6e:4d:02:00", + "ip-address": "192.168.10.12", + "hostname": "auth" + }, + { + "hw-address": "0a:49:6e:4d:03:00", + "ip-address": "192.168.10.13", + "hostname": "dev" + }, + { + "hw-address": "0a:49:6e:4d:04:00", + "ip-address": "192.168.10.14", + "hostname": "app" + } + ], + "id": 1, + "interface": "enp1s0" + }, + { + "subnet": "192.168.1.0/24", + "relay": { + "ip-addresses": [ "192.168.1.1" ] + }, + "pools": [ + { + "pool": "192.168.1.100-192.168.1.254" + } + ], + "option-data": [ + { + "name": "routers", + "data": "192.168.1.1" + }, + { + "name": "ntp-servers", + "data": "192.168.1.1" + }, + { + "name": "domain-name-servers", + "data": "192.168.10.11" + }, + { + "name": "domain-name", + "data": "ilnmors.internal" + } + ], + "reservations": [ + { + "hw-address": "d8:e2:df:ff:1b:d5", + "ip-address": "192.168.1.11", + "hostname": "surface" + }, + { + "hw-address": "38:ca:84:94:5e:06", + "ip-address": "192.168.1.30", + "hostname": "printer" + } + ], + "id": 2, + "interface": "enp1s0" + } + ], + "interfaces-config": { + "interfaces": [ + "enp1s0" + ], + "dhcp-socket-type": "raw", + "service-sockets-max-retries": 5, + "service-sockets-require-all": true + }, + "renew-timer": 1000, + "rebind-timer": 2000, + "valid-lifetime": 4000, + "loggers": [ + { + "name": "kea-dhcp4", + "output_options": [ + { + "output": "stdout" + } + ], + "severity": "INFO" + } + ], + "lease-database": { + "type": "memfile", + "persist": true, + "name": "/var/lib/kea/kea-leases4.csv", + "lfc-interval": 3600 + }, + "dhcp-ddns": { + "enable-updates": false, + "server-ip": "127.0.0.1", + "server-port": 53001 + "ncr-protocol": "UDP" + "ncr-format": "JSON" + }, + "ddns-send-updates": true, + "ddns-update-on-renew": true, + "ddns-qualifying-suffix": "ilnmors.internal", + "ddns-override-no-update": true, + "ddns-override-client-update": true, + "ddns-replace-client-name": "when-present", + "ddns-generated-prefix": "host" + "hostname-char-set": "[^a-zA-Z0-9.-]", + "hostname-char-replacement": "-" + } +} + +// There is one interface for DHCP host. In this case, the subnet which has dhcp server should be `id=1`. Because Kea DHCP allocate IP from the `id=1` subnet when it recieve pacekt has no giaddr. When Kea DHCP is running on Router, then id is not important because DHCP allocate IP based on their interface. +``` + +#### OPNsense +- Services:Kea DHCP:Kea DHCPv4 + - \[ \] Enabled +- Services:DHCP Relay:Configuration + - Destination - \[+\] + - Name: kea-dhcp-v4 + - Server: 192.168.10.11 + - Relays - \[+\] + - \[\*\] Enabled + - Interface: LAN + - Desitination kea-dhcp-v4 + - \[ \]Agent Information + - Status `green box` check + +### DDNS + +```bash +sudo nano /etc/apparmor.d/local/usr.sbin.kea-dhcp-ddns + +# /etc/secrets/** r, + +sudo apparmor_parser -r /etc/apparmor.d/usr.sbin.kea-dhcp-ddns +sudo systemctl restart kea-dhcp-ddns-server.service +``` + +/etc/secrets/102/ddns-key +```json +[ + { + "name": "ddns-key", + "algorithm": hmac-sha256; + "secret": "secret_value" + } +] +``` + +/etc/kea/kea-dhcp-ddns.conf +```json +{ + "DhcpDdns": + { + "ip-address": "127.0.0.1", + "port": 53001, + "control-socket": { + "socket-type": "unix", + "socket-name": "/run/kea/kea-ddns-ctrl-socket" + }, + "tsig-keys": , + "forward-ddns" : { + "ddns-domains": [ + { + "name": "ilnmors.internal.", + "key-name": "ddns-key", + "dns-servers": [ + { + "ip-address": "127.0.0.1", + "port": 2053 + } + ] + } + ] + }, + "reverse-ddns" : { + "ddns-domains": [ + { + "name": "10.168.192.in-addr.arpa.", + "key-name": "ddns-key", + "dns-servers": [ + { + "ip-address": "127.0.0.1", + "port": 2053 + } + ] + }, + { + "name": "1.168.192.in-addr.arpa.", + "key-name": "ddns-key", + "dns-servers": [ + { + "ip-address": "127.0.0.1", + "port": 2053 + } + ] + } + ] + }, + "loggers": [ + { + "name": "kea-dhcp-ddns", + "output-options": [ + { + "output": "stdout" + } + ], + "severity": "INFO", + } + ] + } +} +``` + + kea-dhcp4.conf + ``` json + // ... + "dhcp-ddns": { + "enable-updates": true, + // ... +} + ``` + +--- + +## BIND9 + +BIND9 is an open source authoritative DNS software of ISC. It can work as a personal authoritative in private network environment. + +### Secret management + +- File: + - ~/data/config/secrets/.secret.yaml + +- Edit `.secret.yaml` with `edit_secret.sh` + +#### Generate TSIG key as ACME key + +```bash +podman run --rm --entrypoint /bin/sh internetsystemsconsortium/bind9:9.20 -c "tsig-keygen -a hmac-sha256 acme-key" +# Paste and add on .secret.yaml +# key "acme-key" { +#     algorithm hmac-sha256; +#     secret "your_base64_tsig_secret_here"; +# };' +``` + +```yaml +# ~/data/config/secrets/.secret.yaml +BIND9_ACME_KEY: | + key "acme-key" { +    algorithm hmac-sha256; +    secret "your_base64_tsig_secret_here"; + }; +``` + +```bash +# Copy the secret value from .secret.yaml + +# Podman secret +extract_secret.sh ~/data/config/secrets/.secret.yaml -f "BIND9_ACME_KEY" | podman secret create "acme-key" - +``` + + +### Preparation + +#### iptables and firewall rules + +- Set iptables first, following [here](../03_common/03_02_iptables.md). +- Set firewall rules first, following [here](Latest/05_firewall/05_04_opnsense_rules.md). + +#### Create directory for container + +```bash +mkdir -p ~/data/containers/bind +chmod 700 ~/data/containers/bind +sudo setfacl -m d:g::0 ~/data/containers/bind +sudo setfacl -m d:o::0 ~/data/containers/bind +sudo setfacl -m u:net:rwx ~/data/containers/bind +sudo setfacl -m u:100052:rwx ~/data/containers/bind +sudo setfacl -d -m u:net:rwx ~/data/containers/bind +sudo setfacl -d -m u:100052:rwx ~/data/containers/bind +mkdir -p ~/data/containers/bind/{cache,etc,lib,log} +nano ~/data/containers/bind/{etc,lib}/configuration_files +``` + +> BIND9 container executes as 53:53(bind:bind) permission in container. It is mapped host's 100052. Therefore, directories have to have ACL via `setfacl` + + +### Podman Image + +```bash +podman pull internetsystemsconsortium/bind9:9.20 # Do not use latest version to management +``` + +### Configuration files + +> If `named.conf` file didn't exist in `/etc/bind` or it had error in it, the container would be terminated without any logs. Before starting BIND container, you should makes all configuration file already. + +#### named.conf + +- file: ~/data/containers/bind/etc/named.conf + +```ini +include "/etc/bind/key/acme-key"; + +options { + directory "/var/cache/bind"; + + listen-on { any; }; + listen-on-v6 { ::1; }; + + // Authoritative DNS setting + allow-recursion { none; }; + allow-transfer { none; }; + allow-update { none; }; + + dnssec-validation no; + + check-names master warn; +}; + +zone "ilnmors.internal." { + type primary; + file "/var/lib/bind/db.ilnmors.internal"; + notify yes; + // ACME-01 challenge policy. It allows only subdomain's TXT record update. + update-policy { + grant acme-key subdomain ilnmors.internal. TXT; + }; +}; + +zone "1.168.192.in-addr.arpa" { + type primary; + file "/var/lib/bind/db.1.168.192.in-addr.arpa"; + notify yes; +}; + +zone "10.168.192.in-addr.arpa" { + type primary; + file "/var/lib/bind/db.10.168.192.in-addr.arpa"; + notify yes; +}; + +zone "ilnmors.com." { + //split horizon dns + type primary; + file "/var/lib/bind/db.ilnmors.com"; + notify yes; +}; + +logging { + channel default_log { + stderr; + severity info; + }; + category default { default_log; }; + category config { default_log; }; + category queries { default_log; }; +}; +``` + + >If ddns function were required, generate tsig key for ddns and give update policy for each zone like below. +> +> - include "/etc/bind/ddns-key"; +> - update-policy { grant ddns-key subdomain \[zone_domain\] ANY A AAAA TXT; }; + +- Verify the named.conf with the command `named-checkconf` + +#### Zone files + +> When you add the record, you should use `.` at the end of the domain. i.e. mydomain.internal. + +- file: + - ~/data/containers/bind/lib/db.ilnmors.internal + +```ini +$TTL 86400 + +@ IN SOA bind.ilnmors.internal. mail.ilnmors.internal. ( + yyyymmdd01 ; serial + 3600 ; refresh (1 hour) + 1800 ; retry (30 minutes) + 604800 ; expire (1 week) + 86400 ; minimum (1 day) + ) + IN NS bind.ilnmors.internal. +bind IN A 192.168.10.11 +opnsense IN A 192.168.10.1 +vmm IN A 192.168.10.10 +net IN A 192.168.10.11 +auth IN A 192.168.10.12 +dev IN A 192.168.10.13 +app IN A 192.168.10.14 +console IN A 192.168.1.11 +printer IN A 192.168.1.30 +crowdsec IN CNAME opnsense.ilnmors.internal. +adguard IN CNAME net.ilnmors.internal. +step-ca IN CNAME auth.ilnmors.internal. +caddy IN CNAME auth.ilnmors.internal. +ldap IN CNAME auth.ilnmors.internal. +postgresql IN CNAME dev.ilnmors.internal. +``` + +- ~/data/containers/bind/lib/db.1.168.192.in-addr.arpa + +```ini +$TTL 86400 + +@ IN SOA bind.ilnmors.internal. mail.ilnmors.internal. ( + yyyymmdd01 ; serial + 3600 ; refresh (1 hour) + 1800 ; retry (30 minutes) + 604800 ; expire (1 week) + 86400 ; minimum (1 day) + ) + IN NS bind.ilnmors.internal. +1 IN PTR opnsense.ilnmors.internal. +11 IN PTR console.ilnmors.internal. +30 IN PTR printer.ilnmors.internal. +``` + +- ~/data/containers/bind/lib/db.10.168.192.in-addr.arpa + +```ini +$TTL 86400 + +@ IN SOA bind.ilnmors.internal. mail.ilnmors.internal. ( + yyyymmdd01 ; serial + 3600 ; refresh (1 hour) + 1800 ; retry (30 minutes) + 604800 ; expire (1 week) + 86400 ; minimum (1 day) + ) + IN NS bind.ilnmors.internal. +1 IN PTR opnsense.ilnmors.internal. +10 IN PTR vmm.ilnmors.internal. +11 IN PTR net.ilnmors.internal. +12 IN PTR auth.ilnmors.internal. +13 IN PTR dev.ilnmors.internal. +14 IN PTR app.ilnmors.internal. +``` + +- ~/data/containers/bind/lib/db.ilnmors.com + +```ini +$TTL 86400 + +@ IN SOA bind.ilnmors.internal. mail.ilnmors.internal. ( + yyyymmdd01 ; serial + 3600 ; refresh (1 hour) + 1800 ; retry (30 minutes) + 604800 ; expire (1 week) + 86400 ; minimum (1 day) + ) + IN NS bind.ilnmors.internal. +* IN A 192.168.10.12 +``` + +```bash +# Adguard container has Requires=bind.service. When it restarted, then Adguard also restarted. +systemctl --user restart bind +``` +### Quadlet + +- File: + - ~/data/config/containers/bind/bind.container + +```ini +# ~/data/config/containers/bind/bind.container +[Quadlet] +DefaultDependencies=false + +[Unit] +Description=BIND9 DNS + +After=network-online.target +Wants=network-online.target + +[Container] +Image=docker.io/internetsystemsconsortium/bind9:9.20 + +ContainerName=bind + +PublishPort=2253:53/tcp +PublishPort=2253:53/udp + +Volume=%h/data/containers/bind/etc:/etc/bind:rw +Volume=%h/data/containers/bind/lib:/var/lib/bind:rw +Volume=%h/data/containers/bind/cache:/var/cache/bind:rw +Volume=%h/data/containers/bind/log:/var/log:rw + +Environment="TZ=Asia/Seoul" +Secret=acme-key,target=/etc/bind/key/acme-key + +Label=diun.enable=true +Label=diun.watch_repo=true + +[Install] +WantedBy=default.target +``` + +#### Create systemd `.service` file + +```bash +# linger has to be activated +mkdir -p ~/.config/containers/systemd + +ln -s ~/data/config/containers/bind/bind.container ~/.config/containers/systemd/bind.container + +# This command makes bind.service +systemctl --user daemon-reload +``` + +#### Enable and start service + +```bash +systemctl --user start bind.service +``` + + +### nsupdate (RFC 2136) verification + +```bash +# Update query at net server +# Before test, create temp file .bind.acme-key +nsupdate -k /run/user/$UID/.bind.acme-key +> server 127.0.0.1 2253 +> zone ilnmors.internal +> update add _acme-challenge.ilnmors.internal. 60 TXT "validation-test" +> send +> `ctrl+c` + +# Verify +dig @127.0.0.1 -p 2253 _acme-challenge.ilnmors.internal. TXT +# Print +;; QUESTION SECTION: +;_acme-challenge.ilnmors.internal. IN TXT + +;; ANSWER SECTION: +_acme-challenge.ilnmors.internal. 60 IN TXT "validation-test" + +# Delete query at net server +nsupdate -k /run/user/$UID/.bind.acme-key +> server 127.0.0.1 2253 +> zone ilnmors.internal +> update delete _acme-challenge.ilnmors.internal. 60 TXT "validation-test" +> send +> `ctrl+c` + +# Verify +dig @127.0.0.1 -p 2253 _acme-challenge.ilnmors.internal. TXT +# Print +;; ->>HEADER<<- opcode: QUERY, status: NXDOMAIN, id: 17572 +``` \ No newline at end of file diff --git a/docs/archives/2025-12/07_authorization/07_01_auth_vm.md b/docs/archives/2025-12/07_authorization/07_01_auth_vm.md new file mode 100644 index 0000000..92d830f --- /dev/null +++ b/docs/archives/2025-12/07_authorization/07_01_auth_vm.md @@ -0,0 +1,148 @@ +Tags: #os, #configuration, #network, #virtualization, #authorization, #authentication + +## Preparation + +### Set DHCP reservation and DNS record + +#### Set DHCP reservation on KEA DHCP in OPNsense + +Following [here](05_07_opnsense_kea.md) + +- Services:Kea DHCP:Kea DHCPv4:Reservations - \[+\] + - Subnet: 192.168.10.0/24 + - IP address: 192.168.10.12 + - MAC address: 0A:49:6E:4D:02:00 + - Hostname: auth + - Description: auth + - `save` + +#### Set DNS records in BIND + +Following [here](../06_network/06_03_net_bind.md). + +- net server +- file: + - ~/data/containers/bind/lib/db.ilnmors.internal + - ~/data/containers/bind/lib/db.10.168.192.in-addr.arpa + +```ini +# db.ilnmors.internal +# ... +auth IN A 192.168.10.12 +# ... +# db.10.168.192.in-addr.arpa +# ... +12 IN PTR auth.ilnmors.internal. +# ... +``` + +```bash +# Adguard container has Requires=bind.service. When it restarted, then Adguard also restarted. +systemctl --user restart bind +``` + +### Create VM template + +- ~/data/config/scripts/auth.sh + +```bash +virt-install \ +--boot uefi \ +--name auth \ +--os-variant debian13 \ +--vcpus 2 \ +--memory 4096 \ +--location /var/lib/libvirt/images/debian-13.0.0-amd64-netinst.iso \ # For serial installing, use `--location` instead of `--cdrom` +--disk pool=vm-images,size=66,format=qcow2,discard=unmap \ +--network network=ovs-lan-net,portgroup=vlan10-access,model=virtio,mac=0A:49:6E:4D:02:00 \ # Use designated ovs port group +--graphics none \ +--console pty,target_type=serial \ +--extra-args "console=ttyS0,115200" +# After enter this command, then the console start automatically +# Remove all annotation before you make the sh file. +``` + +### Debian installing + +- Following [here](../03_common/03_01_debian_configuration.md) to install Debian. +- Debian installer supports serial mode regardless getty@ttyS0 service is enabled or not. +- Following [here](../03_common/03_02_iptables.md) to set iptables. +- Following [here](../03_common/03_04_crowdsec.md) to set CrowdSec + +#### Serial console setting + +After installation, use `ctrl + ]` to exit console. Before setting getty@ttyS0, you can't use serial console to access VM. Therefore, use IP address set on installation, and connect net server via ssh first, following the step to enable the getty. + +### Modify VM template settings + +After getty setting, shutdown auth vm with `shutdown` in VM or `sudo virsh shutdown auth` in hypervisor to turn off vm first. + +```bash +virsh edit auth +``` + +```xml + +... + + + 1024 + + + + +``` + +```bash +virsh dumpxml auth > ~/data/config/vms/dumps/auth.xml +virsh start auth && virsh console auth +# Start auth server with console +``` + +### Common setting + +- auth.service + +```ini +# ~/data/config/services/auth.service +# ~/.config/systemd/user/auth.service +[Unit] +Description=auth Auto Booting +After=network-online.target +Wants=network-online.target +Requires=opnsense.service + +[Service] +Type=oneshot + +# Maintain status as active +RemainAfterExit=yes + +# CrowdSec should be set +ExecStartPre=%h/data/config/scripts/wait-for-it.sh 192.168.10.1:8080 -t 0 +ExecStartPre=%h/data/config/scripts/wait-for-it.sh 192.168.10.11:53 -t 0 + +ExecStartPre=/bin/bash -c "sleep 15" + +# Run the service +ExecStart=/usr/bin/virsh -c qemu:///system start auth + +# Stop the service +ExecStop=/usr/bin/virsh -c qemu:///system shutdown auth + +[Install] +WantedBy=default.target +``` + +```bash +ln -s ~/data/config/services/auth.service ~/.config/systemd/user/auth.service + +systemctl --user daemon-reload +systemctl --user enable auth.service +systemctl --user start auth.service +``` \ No newline at end of file diff --git a/docs/archives/2025-12/07_authorization/07_02_auth_step-ca.md b/docs/archives/2025-12/07_authorization/07_02_auth_step-ca.md new file mode 100644 index 0000000..75d4cd0 --- /dev/null +++ b/docs/archives/2025-12/07_authorization/07_02_auth_step-ca.md @@ -0,0 +1,320 @@ +Tags: #os, #configuration, #network, #virtualization, #container, #security + +## Step-CA + +Step-CA is the modern CA server which can operate in private network environment. It can issue CA and certificates, apply the policy with provisioner. It supports ACME, JWK, etc. + +### Secret management + +- File: + - ~/data/config/secrets/.secret.yaml + +- Edit `.secret.yaml` with `edit_secret.sh` + + +```yaml +# ~/data/config/secrets/.secret.yaml +# STEP_CA +STEP_CA_PASSWORD: generated_value +``` + + +### Preparation + +#### iptables and firewall rules + +- Set iptables first, following [here](../03_common/03_02_iptables.md). +- Set firewall rules first, following [here](Latest/05_firewall/05_04_opnsense_rules.md). +#### Create directory for container + +```bash +mkdir -p ~/data/containers/step-ca +chmod 700 ~/data/containers/step-ca +setfacl -m d:g::0 ~/data/containers/step-ca +setfacl -m d:o::0 ~/data/containers/step-ca +setfacl -m u:auth:rwx ~/data/containers/step-ca +setfacl -m u:100999:rwx ~/data/containers/step-ca +setfacl -d -m u:auth:rwx ~/data/containers/step-ca +setfacl -d -m u:100999:rwx ~/data/containers/step-ca +# After generating +sudo find ~/data/containers/step-ca -type f -exec setfacl -m m::rw {} \; +sudo find ~/data/containers/step-ca -type d -exec setfacl -m m::rwx {} \; +``` + +> Step-CA container executes as 1000:1000(step:step) permission in container. It is mapped host's 100999. Therefore, directories have to have ACL via `setfacl` + +#### Add new domain in BIND + +Following [here](../06_network/06_03_net_bind.md). + +- net server +- file: /home/net/data/containers/bind/lib/db.ilnmors.internal + +```ini +# ... +step-ca IN CNAME auth.ilnmors.internal. +# ... +``` + +```bash +# Adguard container has Requires=bind.service. When it restarted, then Adguard also restarted. +systemctl --user restart bind +``` + +### Podman Image + +```bash +podman pull smallstep/step-ca:0.28.4 # Do not use latest version to management +``` + +#### CA generation + +```bash +podman run --rm -it \ +-v /home/auth/data/containers/step-ca:/home/step:rw \ +smallstep/step-ca:0.28.4 step ca init \ +--deployment-type standalone \ +--name ilnmors.internal \ +--dns step-ca.ilnmors.internal \ +--address :9000 \ +--provisioner step-admin@ilnmors.internal +# Private mode: standalone + +# Intermediate CA setting options +# --ra stepCAS \ +# --issuer https://step-ca.ilnmors.internal:9000 \ +# --issuer-fingerprint ~~~~ \ +# --issuer-provisioner jwk-ca@dev.ilnmors.internal +# --confidential-file ~~~ + +> [leave empty and we\'ll generate one]: [blank] +# Print +--- +✔ Password: Generated_value # Copy this value and paste in .secret.yaml file as STEP_CA_PASSWORD= +✔ Root fingerprint: fingerprint +--- +``` + +> Password value encrypts root CA's private key + +```bash +# Podman secret +extract_secret.sh ~/data/config/secrets/.secret.yaml -f "STEP_CA_PASSWORD" | podman secret create "STEP_CA_PASSWORD" - +``` + +### Configuration files + +- File: ~/data/containers/step-ca/config/ca.json + +#### Provisioner + +Provisioner is basically the object of issuing certificates as a RA. They verify CSR from client and when it is valid with its policy they will sign the certificates with CA's private key. Step-CA supports various type of provisioner. In this homelab, only ACME will be used. Because it is easy to manage when you use OPNsense ACME client. Step-CA supports one root CA and one intermediate CA in one container, only one intermediate CA will be operated in this project. However, the way to set multi intermediate CA will be explained, and jwk way in this document. + +##### jwk-ca@ilnmors.internal + +This provisioner is to issue intermediate CA. It wouldn't be used in this project. The option for CA in X.509 format is optional and defined in as extension option. To define these option in step-ca, the template file is needed. + +- file: ~/data/containers/step-ca/templates/ca.tpl + +```json +{ + "subject": {{ toJson .Subject }}, + "keyUsage": ["certSign", "crlSign"], + "basicConstraints": { + "isCA": true, + "maxPathLen": 0 + } +} +``` + +> keyUsage: Designate to manage certificates and CRL +> isCA: Designate the certificate to use CA +> maxPathLen: Designate allowed below CA's number + + +- Define provisioner + +```bash +podman exec -it step-ca \ +step ca provisioner add jwk-ca@ilnmors.internal \ +--create \ # Generate key pair automatically +--type JWK \ +--ca-config /home/step/config/ca.json \ # Sign on certificate with root CA's private key +--x509-template /home/step/template/ca.tpl \ # Use x509 template +--x509-max-dur 87600h \ # +--x509-default-dur 87600h +``` + +##### jwk@ilnmors.internal + +This provisioner is to issue the certificates like DB communication based on its identity (Using JWK and JWT pre-shared). The certificate is issued based on enrolled key in provisioner. However, in this project all crt will be used central ACME client `Opnsens ACME client` and `Caddy`. + +- Define provisioner + +```bash +podman exec -it step-ca \ +step ca provisioner add jwk-crt@ilnmors.internal \ +--create \ # Generate key pair automatically +--type JWK \ +--x509-default-dur 2160h # To set default expire date as 90 days. +``` + +##### acme@ilnmors.internal + +This provisioner is to issue the certificates for https communication. The certificate is issued based on challenge; the ownership of domain. + +- Define provisioner +```bash +podman exec -it step-ca \ +step ca provisioner add acme@ilnmors.internal \ +--type ACME \ +--x509-default-dur 2160h # To set default expire date as 90 days. +``` + +#### Subject + +Step-CA uses subject as a account. It is used to manage Step-CA remotely. To use this, it is necessary to use `--remote-management` option when the step-CA is initially set or fix `ca.json` authority.enableAdmin:true. When subject is enabled, provisioners aren't defined in ca.json but its own DB. + +#### Policy + +Self-hosted Step-CA server doesn't support to give x509 policy for each provisioner. It only allows public policy. Only `ilnmors.internal` and `*.ilnmors.internal` certificates are required, so designate the policy in `ca.json` + +> Policies can be administered using the step CLI application. The commands are part of the step ca policy namespace. In a self-hosted step-ca, policies can be configured on the authority level. Source: [here](https://smallstep.com/docs/step-ca/policies/) + +- file: ~/data/containers/step-ca/config/ca.json + +```json +... +"authority": { + "policy": { + "x509": { + "allow": { + "dns": [ + "ilnmors.internal", + "*.ilnmors.internal" + ] + }, + "allowWildcardNames": true + } + }, + "provisioners": [ ... ] + .... +} +... +``` +### Quadlet + +- File: + - ~/data/config/containers/step-ca/step-ca.container + +```ini +# ~/data/config/containers/step-ca/step-ca.container +[Quadlet] +DefaultDependencies=false + +[Unit] +Description=Step-CA + +After=network-online.target +Wants=network-online.target + +[Container] +Image=docker.io/smallstep/step-ca:0.28.4 + +ContainerName=step-ca + +PublishPort=9000:9000/tcp + +Volume=%h/data/containers/step-ca:/home/step:rw + +Environment="TZ=Asia/Seoul" +Environment="PWDPATH=/run/secrets/STEP_CA_PASSWORD" + +Secret=STEP_CA_PASSWORD,target=/run/secrets/STEP_CA_PASSWORD + +Label=diun.enable=true +Label=diun.watch_repo=true + +[Install] +WantedBy=default.target +``` + +#### Create systemd `.service` file + +```bash +mkdir -p ~/.config/containers/systemd + +ln -s ~/data/config/containers/step-ca/step-ca.container ~/.config/containers/systemd/step-ca.container + +systemctl --user daemon-reload +``` + +#### Enable and start service + +```bash +systemctl --user start step-ca.service +``` + +### Verify server + +#### Server health check + +```bash +curl -k https://step-ca.ilnmors.internal:9000/health +> {"status":"ok"} +``` + +#### Server policy check + +```bash +podman exec -it step-ca step ca certificate test.com test.crt test_key --provisioner acme@ilnmors.internal +> error creating new ACME order: The server will not issue certificates for the identifier +``` + +--- +### Set trust Root CRT + +#### Linux + +##### Debian/ubuntu + +- File: /usr/local/share/ca-certificates/{ca.crt, ca.pem} +- `update-ca-certificates` + +##### Cent/RHEL/Fedora + +- File: /etc/pki/ca-trust/source/anchors/{ca.crt, ca.pem} +- `update-ca-trust` + +#### Windows + +- `Windows + R` + `certlm.msc` +- `All Task` - `Import` + +#### Firefox + +- Setting - Security - certificates - CA - add + +--- +### intermediate CA setting + +It won't be used in this project, however here is the way to set. + +#### Example of dev.ilnmors.internal intermediate CA + +- init CA +```bash +podman run --rm -it \ +-v /home/dev/data/containers/step-ca:/home/step:rw \ +smallstep/step-ca:0.28.4 step ca init \ +--deployment-type standalone \ +--name dev.ilnmors.internal \ +--dns step-ca.dev.ilnmors.internal \ +--address :9000 \ +--provisioner admin@dev.ilnmors.internal \ +--ra stepCAS \ +--issuer https://step-ca.ilnmors.internal:9000 \ +--issuer-fingerprint ~~~~ \ # root CA's fingerprint +--issuer-provisioner jwk-ca@dev.ilnmors.internal +--confidential-file ~~~ # jwk-ca's password file +``` \ No newline at end of file diff --git a/docs/archives/2025-12/07_authorization/07_03_auth_main_caddy.md b/docs/archives/2025-12/07_authorization/07_03_auth_main_caddy.md new file mode 100644 index 0000000..0d678b9 --- /dev/null +++ b/docs/archives/2025-12/07_authorization/07_03_auth_main_caddy.md @@ -0,0 +1,231 @@ +Tags: #os, #configuration, #network, #virtualization, #container, #security, #authentication, #authorization, #sso + +## Caddy - auth + +Caddy is an open source reverse proxy (web server) which supports automatically to apply TLS certificates via ACME protocol from CA. It supports various module including dns module. However, the most important and fundamental services such as OPNsense, AdGuard Home, Step-CA, Authelia would not use caddy for independency. + +### Secret management + +- File: + - ~/data/config/secrets/.secret.yaml + +- Edit `.secret.yaml` with `edit_secret.sh` + +```yaml +# ~/data/config/secrets/.secret.yaml +# CADDY: +CADDY_ACME_KEY: acme-key_key_value (Only secret value) +CADDY_CROWDSEC_KEY: CADDY_LAPI_KEY +``` + +```bash +# Podman secret +extract_secret.sh .secret.yaml -f CADDY_ACME_KEY | podman secret create CADDY_ACME_KEY - + +extract_secret.sh .secret.yaml -f CADDY_CROWDSEC_KEY | podman secret create CADDY_CROWDSEC_KEY - +``` + +### Preparation + +#### iptables and firewall rules + +- Set iptables first, following [here](../03_common/03_02_iptables.md). +- Set firewall rules first, following [here](Latest/05_firewall/05_04_opnsense_rules.md). +#### Create directory for container + +```bash +mkdir -p ~/data/containers/caddy-auth/{etc,data} +chmod -R 700 ~/data/containers/caddy-auth +``` + +> Caddy container executes as 0:0(root:root) permission in container. It is mapped host's UID. Therefore, directories don't have to have ACL via `setfacl` + +#### Add new domain in BIND + +Following [here](../06_network/06_03_net_bind.md). + +- net server +- file: ~/data/containers/bind/lib/db.ilnmors.internal + +```ini +# ... +login IN CNAME auth.ilnmors.internal. +# ... +``` + +```bash +# Adguard container has Requires=bind.service. When it restarted, then Adguard also restarted. +systemctl --user restart bind +``` + +### Podman Image + +#### Podman containerfile + +Caddy supports various module for it. rfc2136(nsupdate) module, crowdsec will be used in this homelab project. + +- file: + - ~/data/config/containers/caddy-auth/containerfile-caddy-2.10.2-auth + - ~/data/config/containers/caddy-auth/root_ca.crt + +```containerfile +FROM caddy:2.10.2-builder-alpine AS builder + +RUN xcaddy build \ +--with github.com/caddy-dns/rfc2136 \ +--with github.com/hslatman/caddy-crowdsec-bouncer/crowdsec \ +--with github.com/hslatman/caddy-crowdsec-bouncer/http + +FROM caddy:2.10.2 + +COPY --from=builder /usr/bin/caddy /usr/bin/caddy + +COPY ./root_ca.crt /usr/local/share/ca-certificates/root_ca.crt + +RUN update-ca-certificates +``` + + +#### Podman image build + +```bash +podman build -t caddy:2.10.2-auth -f ~/data/config/containers/caddy-auth/containerfile-caddy-2.10.2-auth . && podman image prune -f +# Delete pure caddy and caddy-builder-alpine images after command above manually. +``` + +### Configuration files + +Caddyfile will be updated after Authelia setting + +```bash +# fix inconsistencies +podman exec caddy-auth caddy fmt --overwrite /etc/caddy/Caddyfile +# After Caddyfile setting is changed use this command. +podman exec caddy-auth caddy reload --config /etc/caddy/Caddyfile +``` + +- file: + - ~/data/containers/caddy-auth/etc/Caddyfile + - ~/data/containers/caddy-auth/certs/root_ca.crt + +```ini +# Caddyfile +# ~/data/containers/caddy-auth/etc/Caddyfile + +# Global option +{ + # CrowdSec LAPI connection + crowdsec { + api_url https://crowdsec.ilnmors.internal:8080 + api_key "{file./run/secrets/CADDY_CROWDSEC_KEY}" + } +} + +# Snippets +# CrowdSec log for parser +(crowdsec_log) { + log { + output file /data/access.log { + mode 0640 + roll_size 100MiB + roll_keep 1 + } + } +} +# Private TLS ACME with DNS-01-challenge +(private_tls) { + tls { + issuer acme { + dir https://step-ca.ilnmors.internal:9000/acme/acme@ilnmors.internal/directory + dns rfc2136 { + server bind.ilnmors.internal:2253 + key_name acme-key + key_alg hmac-sha256 + key "{file./run/secrets/CADDY_ACME_KEY}" + } + } + } +} + +test.ilnmors.com { + import crowdsec_log + route { + crowdsec + root * /usr/share/caddy + file_server + } +} + +caddy.ilnmors.internal { + import private_tls + import crowdsec_log + route { + crowdsec + root * /usr/share/caddy + file_server + } +} +``` + +### Quadlet + +- File: + - ~/data/config/containers/caddy-auth/caddy-auth.container + +```ini +# ~/data/config/containers/caddy-auth/caddy-auth.container +[Quadlet] +DefaultDependencies=false + +[Unit] +Description=Caddy - auth + +After=step-ca.service +Requires=step-ca.service + +[Container] +Image=localhost/caddy:2.10.2-auth + +ContainerName=caddy-auth + +# To issue certificate from step-ca +AddHost=step-ca.ilnmors.internal:host-gateway + +PublishPort=2080:80/tcp +PublishPort=2443:443/tcp + +Volume=%h/data/containers/caddy-auth/etc:/etc/caddy:rw +Volume=%h/data/containers/caddy-auth/data:/data:rw + +Environment="TZ=Asia/Seoul" + +Secret=CADDY_ACME_KEY,target=/run/secrets/CADDY_ACME_KEY +Secret=CADDY_CROWDSEC_KEY,target=/run/secrets/CADDY_CROWDSEC_KEY + +Label=diun.enable=true +Label=diun.watch_repo=true +# This label need configuration on `diun.yml` +Label=diun.regopt=caddy-auth-source + +[Install] +WantedBy=default.target +``` + +#### Create systemd `.service` file + +```bash +# linger has to be activated +ln -s ~/data/config/containers/caddy-auth/caddy-auth.container ~/.config/containers/systemd/caddy-auth.container + +systemctl --user daemon-reload +``` + +#### Enable and start service + +```bash +systemctl --user start caddy-auth.service +``` + +### Crowdsec bouncer and agent + +- Following [here](../03_common/03_04_crowdsec.md). \ No newline at end of file diff --git a/docs/archives/2025-12/07_authorization/07_04_auth_authentik.md b/docs/archives/2025-12/07_authorization/07_04_auth_authentik.md new file mode 100644 index 0000000..e87fd95 --- /dev/null +++ b/docs/archives/2025-12/07_authorization/07_04_auth_authentik.md @@ -0,0 +1,315 @@ +Tags: #os, #configuration, #network, #virtualization, #container, #security, #authentication, #authorization, #sso + +## Authentik + +Authentik is one of famous and strong open source idP solutions which support SSO, 2FA, LDAP, RADIUS, etc. It will be combining with Caddy-security module to apply SSO. + +### Secret management + +File: + - ~/data/config/secrets/.secret.yaml + +- Edit `.secret.yaml` with `edit_secret.sh` + +```yaml +# ~/data/config/secrets/.secret.yaml +# Authentik: +AUTHENTIK_SECRET_KEY: openssl rand -base64 32 value +AUTHENTIK_POSTGRESQL__PASSWORD: openssl rand -base64 32 value +``` + +```bash +# Podman secret +extract_secret.sh .secret.yaml -f AUTHENTIK_SECRET_KEY | podman secret create AUTHENTIK_SECRET_KEY - + +extract_secret.sh .secret.yaml -f AUTHENTIK_POSTGRESQL__PASSWORD | podman secret create AUTHENTIK_POSTGRESQL__PASSWORD - +``` + +### Preparation + +#### iptables and firewall rules + +- Set iptables first, following [here](../03_common/03_02_iptables.md). +- Set firewall rules first, following [here](Latest/05_firewall/05_04_opnsense_rules.md). +#### Create directory for container + +```bash +mkdir -p ~/data/containers/authentik +chmod 700 ~/data/containers/authentik +setfacl -m d:g::0 ~/data/containers/authentik +setfacl -m d:o::0 ~/data/containers/authentik +setfacl -m u:auth:rwx ~/data/containers/authentik +setfacl -m u:100999:rwx ~/data/containers/authentik +setfacl -d -m u:auth:rwx ~/data/containers/authentik +setfacl -d -m u:100999:rwx ~/data/containers/authentik +mkdir -p ~/data/containers/authentik/{backups,certs,media,templates} +# After generating +sudo find ~/data/containers/authentik -type f -exec setfacl -m m::rw {} \; +sudo find ~/data/containers/authentik -type d -exec setfacl -m m::rwx {} \; +``` + +> Authentik container executes as 1000:1000(authentik:authentik) permission in container. It is mapped host's 100999. Therefore, directories have to have ACL via `setfacl` + +#### Add new domain in BIND + +Following [here](../06_network/06_03_net_bind.md). + +- net server +- file: ~/data/containers/bind/lib/db.ilnmors.internal + +```ini +# ... +authentik IN CNAME auth.ilnmors.internal. +# ... +``` + +```bash +# Adguard container has Requires=bind.service. When it restarted, then Adguard also restarted. +systemctl --user restart bind +``` + +#### Add new database and user + +- Following [here](../08_development/08_02_dev_postgresql.md). Authentik uses postgresql. +- dev server + +```bash +podman exec -it -u postgres postgresql psql -U postgres +> # Create user and database +> CREATE USER authentik WITH PASSWORD '$AUTHENTIK_POSTGRESQL__PASSWORD_value'; +> CREATE DATABASE authentik_db; +> ALTER DATABASE authentik_db OWNER TO authentik; +> \du +> \l +``` + +#### Add information in caddy-auth + +- Following [here](./07_03_auth_main_caddy.md). +- auth server +- File: ~/data/containers/caddy-auth/etc/Caddyfile +```ini +authentik.ilnmors.internal { + import internal_tls + import crowdsec_log + reverse_proxy authentik.ilnmors.internal:9080 +} +``` + +```bash +# After Caddyfile setting is changed use this command. +podman exec caddy-auth caddy reload --config /etc/caddy/Caddyfile +# fix inconsistencies +podman exec caddy-auth caddy fmt --overwrite /etc/caddy/Caddyfile +``` +### Podman Image + +```bash +podman pull ghcr.io/goauthentik/server:2025.10.0 # Do not use latest version to management +``` + +### Configuration file + +- file: + - ~/data/containers/authentik/certs/root_ca.crt + +### Quadlet + +- File: + - ~/data/config/containers/authentik/authentik-pod.pod + - ~/data/config/containers/authentik/authentik.container + - ~/data/config/containers/authentik/authentik-worker.container + +```ini +# ~/data/config/containers/authentik.pod +[Quadlet] +DefaultDependencies=false + +[Pod] +PodName=authentik + +# web port +PublishPort=9080:9000/tcp +# LDAP port +#PublishPort=[set_port]:3389 +# Prometheus Port +#PublishPort=[set_port]:9300 +``` + +```ini +# ~/data/config/containers/authentik/authentik.container +[Quadlet] +DefaultDependencies=false + +[Unit] +Description=Authentik - Server + +After=caddy.service +Wants=caddy.service + +[Service] +ExecStartPre=%h/data/config/scripts/wait-for-it.sh -h postgresql.ilnmors.internal -p 5432 -t 0 +ExecStartPre=sleep 5 + +[Container] +Pod=authentik.pod + +Image=ghcr.io/goauthentik/server:2025.10.0 + +ContainerName=authentik-server + +# Change default http port from 9000 to 9080 - in pod + + +Volume=%h/data/containers/authentik/media:/media:rw +Volume=%h/data/containers/authentik/certs:/certs:ro +Volume=%h/data/containers/authentik/templates:/templates:rw +Volume=%h/data/containers/authentik/backups:/backups:rw + +# Default +Environment="TZ=Asia/Seoul" +# Listen +#AUTHENTIK_LISTEN__HTTP=0.0.0.0:9000 +# LDAP > 0.0.0.0:3389 +# METRICS > 0.0.0.0:9300 +# AUTHENTIK_LISTEN__TRUSTED_PROXY_CIDRS > `127.0.0.0/8`, `10.0.0.0/8`, `172.16.0.0/12`, `192.168.0.0/16`, `fe80::/10`, `::1/128` + +# DB connection - This can be changed as hot-reloading, however adding or removing needs restart. +Environment="AUTHENTIK_POSTGRESQL__HOST=postgresql.ilnmors.internal" +Environment="AUTHENTIK_POSTGRESQL__PORT=5432" +# Password will be injected as secret +Environment="AUTHENTIK_POSTGRESQL__USER=authentik" +Environment="AUTHENTIK_POSTGRESQL__NAME=authentik_db" + +# SSL DB configuration +Environment="AUTHENTIK_POSTGRESQL__SSLMODE=verify-full" +Environment="AUTHENTIK_POSTGRESQL__SSLROOTCERT=/certs/root_ca.crt" +# This homelab doesn't use mTLS, therefore do not set `AUTHENTIK_POSTGRESQL__SSLCERT` and `AUTHENTIK_POSTGRESQL__SSLKEY` + +# Media configuration - 'file' or 's3' +Environment="AUTHENTIK_STORAGE__MEDIA_BACKEND=file" + +# Email configuration - after generate local Email services +# AUTHENTIK_EMAIL__HOST=ilnmors.internal +# AUTHENTIK_EMAIL__PORT=25 +# AUTHENTIK_EMAIL__USERNAME=authentik +# AUTHENTIK_EMAIL__USE_TLS=true +# AUTHENTIK_EMAIL__FROM=authentik@ilnmors.internal + + +Secret=AUTHENTIK_SECRET_KEY,type=env +Secret=AUTHENTIK_POSTGRESQL__PASSWORD,type=env + +# Start server +Exec=server + +Label=diun.enable=true +Label=diun.watch_repo=true + +[Install] +WantedBy=default.target +``` + +```ini +# ~/data/config/containers/authentik/authentik-worker.container +[Quadlet] +DefaultDependencies=false + +[Unit] +Description=Authentik - worker + +After=authentik-server.service +Requires=authentik-server.service + +[Container] +Pod=authentik.pod + +Image=ghcr.io/goauthentik/server:2025.10.0 + +ContainerName=authentik-worker + +# Change default http port from 9000 to 9080 - in pod + + +Volume=%h/data/containers/authentik/media:/media:rw +Volume=%h/data/containers/authentik/certs:/certs:ro +Volume=%h/data/containers/authentik/templates:/templates:rw +Volume=%h/data/containers/authentik/backups:/backups:rw + +# Default +Environment="TZ=Asia/Seoul" +# Listen +#AUTHENTIK_LISTEN__HTTP=0.0.0.0:9000 +# LDAP > 0.0.0.0:3389 +# METRICS > 0.0.0.0:9300 +# AUTHENTIK_LISTEN__TRUSTED_PROXY_CIDRS > `127.0.0.0/8`, `10.0.0.0/8`, `172.16.0.0/12`, `192.168.0.0/16`, `fe80::/10`, `::1/128` + +# DB connection - This can be changed as hot-reloading, however adding or removing needs restart. +Environment="AUTHENTIK_POSTGRESQL__HOST=postgresql.ilnmors.internal" +Environment="AUTHENTIK_POSTGRESQL__PORT=5432" +# Password will be injected as secret +Environment="AUTHENTIK_POSTGRESQL__USER=authentik" +Environment="AUTHENTIK_POSTGRESQL__NAME=authentik_db" + +# SSL DB configuration +Environment="AUTHENTIK_POSTGRESQL__SSLMODE=verify-full" +Environment="AUTHENTIK_POSTGRESQL__SSLROOTCERT=/certs/root_ca.crt" +# This homelab doesn't use mTLS, therefore do not set `AUTHENTIK_POSTGRESQL__SSLCERT` and `AUTHENTIK_POSTGRESQL__SSLKEY` + +# Media configuration - 'file' or 's3' +Environment="AUTHENTIK_STORAGE__MEDIA_BACKEND=file" + +# Email configuration - after generate local Email services +# AUTHENTIK_EMAIL__HOST=ilnmors.internal +# AUTHENTIK_EMAIL__PORT=25 +# AUTHENTIK_EMAIL__USERNAME=authentik +# AUTHENTIK_EMAIL__USE_TLS=true +# AUTHENTIK_EMAIL__FROM=authentik@ilnmors.internal + + +Secret=AUTHENTIK_SECRET_KEY,type=env +Secret=AUTHENTIK_POSTGRESQL__PASSWORD,type=env + +# Start worker +Exec=worker + +[Install] +WantedBy=default.target +``` + +> All configurations, except DB connection information will be saved in postgresql. Do not set Environment varibles. It has the first priority, it will override all configuration from web UI. +#### Create systemd `.service` file + +```bash +# linger has to be activated +ln -s ~/data/config/containers/authentik/authentik-pod.pod ~/.config/containers/systemd/authentik-pod.pod + +ln -s ~/data/config/containers/authentik/authentik-server.container ~/.config/containers/systemd/authentik.container + +ln -s ~/data/config/containers/authentik/authentik-worker.container ~/.config/containers/systemd/authentik-worker.container + +systemctl --user daemon-reload +``` + +#### Enable and start service + +```bash +systemctl --user start authentik.service +``` + +### Web UI configuration + +#### Access web UI and initial setting + +- URL: https://authentik.ilnmors.internal/if/flow/initial-setup/ + +> If you can't initialize the authentik at the first time, `DROP DATABASE authentik_db;` and recreate database after authentik container stop. + +#### Initial setting wizard + +- Email: Admin-email + - thiswork21@gmail.com +- Password: password + + + diff --git a/docs/archives/2025-12/07_authorization/07_04_auth_lldap.md b/docs/archives/2025-12/07_authorization/07_04_auth_lldap.md new file mode 100644 index 0000000..02ec3a6 --- /dev/null +++ b/docs/archives/2025-12/07_authorization/07_04_auth_lldap.md @@ -0,0 +1,386 @@ +Tags: #os, #configuration, #network, #virtualization, #container, #security, #authentication, #authorization, #sso + +## LLDAP (Light LDAP) + +LLDAP provides LDAP protocol as very light and modern way. It supports not only main function of LDAP protocol such as Group, or User management but also web UI and RESTFul API. Additionally, it is very simple to set and manage and takes a small amount of resources. Following [here](../02_theory/02_05_sso.md) about the structure of LDAP. + +### Secret management + +File: + - ~/data/config/secrets/.secret.yaml + +- Edit `.secret.yaml` with `edit_secret.sh` + +```yaml +# ~/data/config/secrets/.secret.yaml +# LLDAP: +LLDAP_DATABASE_URL: postgres://ldap:$PASSWORD@postgresql.ilnmors.internal/ldap_db?sslmode=verify-full&sslrootcert=/etc/ssl/ldap/root_ca.crt # $PASSWORD=openssl rand -base64 32 value and it should be encoded as URL + +LLDAP_LDAP_USER_PASSWORD: $PASSWORD + +LLDAP_KEY_SEED: "$(LC_ALL=C tr -dc 'A-Za-z0-9!#%&()*+,-./:;<=>?@[\]^_{|}~' ?@[\]^_{|}~' lldap container executes as 1000:1000(lldap:lldap) permission in container. It is mapped host's 100999. Therefore, directories have to have ACL via `setfacl` + +#### Add new domain in BIND + +Following [here](../06_network/06_03_net_bind.md). + +- net server +- file: ~/data/containers/bind/lib/db.ilnmors.internal + +```ini +# ... +ldap IN CNAME auth.ilnmors.internal. +# ... +``` + +```bash +# Adguard container has Requires=bind.service. When it restarted, then Adguard also restarted. +systemctl --user restart bind +``` + +#### Add new database and user + +- Following [here](../08_development/08_02_dev_postgresql.md). LLDAP can use postgresql (Basic is SQLite). +- dev server + +```bash +podman exec -it -u postgres postgresql psql -U postgres +> # Create user and database +> CREATE USER ldap WITH PASSWORD '$POSTGRES_LDAP_PASSWORD'; +> CREATE DATABASE ldap_db; +> ALTER DATABASE ldap_db OWNER TO ldap; +> \du +> \l +``` + +#### Add information in caddy-auth + +- Following [here](./07_03_auth_main_caddy.md). +- auth server +- File: ~/data/containers/caddy-auth/etc/Caddyfile + +```ini +ldap.ilnmors.internal { + import internal_tls + import crowdsec_log + reverse_proxy host.containers.internal:17170 +} +``` + +```bash +# fix inconsistencies +podman exec caddy-auth caddy fmt --overwrite /etc/caddy/Caddyfile +# After Caddyfile setting is changed use this command. +podman exec caddy-auth caddy reload --config /etc/caddy/Caddyfile +``` + +#### Certificates configuration + +- File: ~/data/containers/certs/root_ca.crt + +- ACME setting (OPNsense) + - Services:ACME Client:Certificates - Certificates - \[+\] + - Common Name: ldap.ilnmors.internal + - Description: ldap + - ACME Account: ldap.ilnmors.internal + > Even though provisioner's name includes `@`, it has to use as `.`. + > + > i.e. `acme@ilnmors.internal` > `acme.ilnmors.internal` + - Challenge Type: ilnmors.internal-dns-01-challenge + - \[\*\] Auto Renewal + - Automations: ldap-auto-acme, ldap-auto-restart + +- Automations (OPNsense) + - Services:ACME Client:Automations - Automation - \[+\] + - Name: ldap-auto-acme / ldap-auto-reload + - Description: ldap acme crt issue / reload ldap after crt is issued + - Run Command: Upload certificate via SFTP / Remote command via SSH + - SFTP Host: ldap.ilnmors.internal + - Username: auth + - Identity Type: ed25519 + - Remote Path(SFTP): /home/auth/data/containers/ldap/certs + - Command(SSH): setfacl -m m::r /home/auth/data/containers/ldap/certs/ldap.ilnmors.internal/* && systemctl --user restart ldap.service + - `Show Identity` + > Copy Required parameters `ssh-ed25519 ~~~ root@opnsense.ilnmors.internal` + > + > Add parameters in net server's ~/.ssh/authorized_keys + - `Test Connect` and `Save` + +- SSH command will be applied after postgresql start. + +### Podman Image + +```bash +podman pull lldap/lldap:v0.6.2 # Do not use latest version to management +``` + +### Configuration file + +- file: + - ~/data/containers/ldap/certs/root_ca.crt + - ~/data/containers/ldap/data/lldap_config.toml + +### Initiating + +```bash +podman run --rm \ +--secret LLDAP_DATABASE_URL,type=env \ +--secret LLDAP_KEY_SEED,type=env \ +--secret LLDAP_JWT_SECRET,type=env \ +--secret LLDAP_LDAP_USER_PASSWORD,type=env \ +-e TZ="Asia/Seoul" \ +-e LLDAP_LDAP_BASE_DN="dc=ilnmors,dc=internal" \ +-v "$HOME"/data/containers/ldap/data:/data:rw \ +-v "$HOME"/data/containers/ldap/certs:/etc/ssl/ldap:ro \ +lldap/lldap:v0.6.2 +# `Ctrl + C` exit + +podman secret rm LLDAP_LDAP_USER_PASSWORD +``` + +### Quadlet + +- File: + - ~/data/config/containers/ldap/ldap.container + +```ini +# ~/data/config/containers/ldap/ldap.container +[Quadlet] +DefaultDependencies=false + +[Unit] +Description=LDAP + +After=caddy.service +Wants=caddy.service + +[Service] +ExecStartPre=%h/data/config/scripts/wait-for-it.sh -h postgresql.ilnmors.internal -p 5432 -t 0 +ExecStartPre=sleep 5 + +[Container] + +Image=lldap/lldap:v0.6.2 + +ContainerName=ldap + +# For LDAPS - 636 > 6360 iptables +PublishPort=6360:6360/tcp +# Web UI +PublishPort=17170:17170/tcp + + +Volume=%h/data/containers/ldap/data:/data:rw +Volume=%h/data/containers/ldap/certs:/etc/ssl/ldap:ro + +# Default +Environment="TZ=Asia/Seoul" + +# Domain +Environment="LLDAP_LDAP_BASE_DN=dc=ilnmors,dc=internal" + +# LDAPS +Environment="LLDAP_LDAPS_OPTIONS__ENABLED=true" +Environment="LLDAP_LDAPS_OPTIONS__CERT_FILE=/etc/ssl/ldap/ldap.ilnmors.internal/fullchain.pem" +Environment="LLDAP_LDAPS_OPTIONS__KEY_FILE=/etc/ssl/ldap/ldap.ilnmors.internal/key.pem" + +# SMTP options > you can set all of these at the /data/config.toml instead of Environment +# Only `LLDAP_SMTP_OPTIONS__PASSWORD` will be injected by secret +# LLDAP_SMTP_OPTIONS__ENABLE_PASSWORD_RESET=true +# LLDAP_SMTP_OPTIONS__SERVER=smtp.example.com +# LLDAP_SMTP_OPTIONS__PORT=465 +# LLDAP_SMTP_OPTIONS__SMTP_ENCRYPTION=TLS +# LLDAP_SMTP_OPTIONS__USER=no-reply@example.com +# LLDAP_SMTP_OPTIONS__PASSWORD=PasswordGoesHere +# LLDAP_SMTP_OPTIONS__FROM=no-reply +# LLDAP_SMTP_OPTIONS__TO=admin + +# Database +Secret=LLDAP_DATABASE_URL,type=env + +# Secrets +Secret=LLDAP_KEY_SEED,type=env +Secret=LLDAP_JWT_SECRET,type=env + +Label=diun.enable=true +Label=diun.watch_repo=true + +[Install] +WantedBy=default.target +``` + +#### Create systemd `.service` file + +```bash +# linger has to be activated +ln -s ~/data/config/containers/ldap/ldap.container ~/.config/containers/systemd/ldap.container + +systemctl --user daemon-reload +``` + +#### Enable and start service + +```bash +systemctl --user start ldap.service +``` + +### DB backup + +- Following [here](../08_development/08_02_dev_postgresql.md). +- dev server + +```bash +systemctl --user enable --now postgresql-data-backup@ldap.timer +``` + +> The data saved in postgresql are all LDAP data including all users, groups and hashed passwords. However, LLDAP_KEY_SEED and LLDAP_JWT_SECRET are not saved in postgresql. It is for container itself and it is used as Environment value in the container. + +### Configuration + +#### Access web UI and Login + +- URL: https://ldap.ilnmors.internal +- ID: admin +- PW: $LLDAP_LDAP_USER_PASSWORD + +#### Create the groups + +- Groups - \[\+\] Create a group + - Group: admins + - Group: users +#### Create the authelia user + +- Users: \[\+\] Create a user + - Username (cn; uid): authelia + - Display name: Authelia + - First Name: Authelia + - Last Name (sn): Service + - Email (mail): authelia@ilnmors.internal + - Password: "$(openssl rand -base64 32)" +- lldap_strict_readonly \[Add to group\] + - This group allow search authority. +> Save the password in .secret.yaml + +#### Create the normal user + +- Users: \[\+\] Create a user + - Username (cn; uid): user + - First Name: John + - Last Name (sn): Doe + - Email (mail): john_doe@ilnmors.internal + - Password: "$PASSWORD" +- (admins|users) \[Add to group\] + +> Custom schema in `User schema`, `Gropu schema` doesn't need to be added. This is for advanced function to add additional value such as `identity number` or `phone number`. Hardcoded schema, which means basic schema the lldap provides is enough to use Authelia. + +> After all these steps, now you can integrate the Authelia for SSO. + +### Usage of LDAP + +#### Service Bind + +LDAP call `login` as Bind. When the authelia Bind to the LDAP server, it can get the authority to search in `lldap_strict_readonly` group. + +#### Search + +authelia account has the authority to search, it can search to send the query. + +##### Flow of search + +- Client (authelia) sends the query + - `uid=user in dc=ilnmors,dc=ilnmors` +- LDAP server searches the DN of entry + - `uid=user,ou=people,dc=ilnmors,dc=ilnmors` +- LDAP sends the DN to Client (authelia) + +### Authelia's work flow + +#### First login + +##### User login query + +User try to login on login page of Authelia. + +- id: user +- password: 1234 + +##### Service Bind (Bind and search) + +authelia binds to LLDAP server based on the information in configuration.yml. + +- dn: authelia +- password: authelia's password + +##### Search + +authelia sends the query to LLDAP after bind. +- `uid=user in dc=ilnmors,dc=internal` + +##### Request + +LLDAP server searches the entry and send the DN information query to authelia. + +- `uid=user,ou=people,dc=ilnmors,dc=internal` + +#### Verify the user login (Second login) + +##### User Bind (Bind only) + +authelia tries to bind LLDAP server based on the information that user input. + +- dn: requested uid +- password: 1234 + +##### Verification from LLDAP + +LLDAP verify the password from authelia with its hash value saved in LLDAP's database. + +##### Request + +LLDAP server sends the result as `Success` or `Fail`. + +> Search authority is basic authority of user who binds to LDAP server. It is just the way to check success or fail bind is the charge of Authelia. \ No newline at end of file diff --git a/docs/archives/2025-12/07_authorization/07_05_auth_authelia.md b/docs/archives/2025-12/07_authorization/07_05_auth_authelia.md new file mode 100644 index 0000000..3055a95 --- /dev/null +++ b/docs/archives/2025-12/07_authorization/07_05_auth_authelia.md @@ -0,0 +1,651 @@ +Tags: #os, #configuration, #network, #virtualization, #container, #security, #authentication, #authorization, #sso + +## Authelia + +Authelia is one of the open source authentication and authorization server which can management IAM(Identity and Access Management). It supports SSO and even OpenID Connect 1.0 Provider (idP) on OAuth 2.0. Authelia uses its backend database as the file or LDAP server. LLDAP server is this backend database for authelia in this homelab. + +### Secret management + +File: + - ~/data/config/secrets/.secret.yaml + +- Edit `.secret.yaml` with `edit_secret.sh` + +```yaml +# ~/data/config/secrets/.secret.yaml +# Authelia: +AUTHELIA_JWT_SECRET: "$(LC_ALL=C tr -dc 'A-Za-z0-9!#%&()*+,-./:;<=>?@[\]^_{|}~' ?@[\]^_{|}~' ?@[\]^_{|}~' authelia container executes as 0:0(root:root) permission in container. It is mapped host's UID. Therefore, directories don't have to have ACL via `setfacl` + +#### Add new domain in BIND + +Following [here](../06_network/06_03_net_bind.md). + +- net server +- file: ~/data/containers/bind/lib/db.ilnmors.internal + +```ini +# ... +authelia IN CNAME auth.ilnmors.internal. +# ... +``` + +```bash +# Adguard container has Requires=bind.service. When it restarted, then Adguard also restarted. +systemctl --user restart bind +``` + +#### Add new database and user + +- Following [here](../08_development/08_02_dev_postgresql.md). LLDAP can use postgresql (Basic is SQLite). +- dev server + +```bash +podman exec -it -u postgres postgresql psql -U postgres +> # Create user and database +> CREATE USER authelia WITH PASSWORD '$POSTGRES_AUTHELIA_PASSWORD'; +> CREATE DATABASE authelia_db; +> ALTER DATABASE authelia_db OWNER TO authelia; +> \du +> \l +``` + +#### Add information in caddy-auth + +- Following [here](./07_03_auth_main_caddy.md). +- auth server +- File: ~/data/containers/caddy-auth/etc/Caddyfile +```ini +authelia.ilnmors.com { + import crowdsec_log + route { + crowdsec + reverse_proxy host.containers.internal:9091 + } +} + +authelia.ilnmors.internal { + import internal_tls + import crowdsec_log + route { + crowdsec + reverse_proxy host.containers.internal:9091 + } +} +``` + +```bash +# fix inconsistencies +podman exec caddy-auth caddy fmt --overwrite /etc/caddy/Caddyfile +# After Caddyfile setting is changed use this command. +podman exec caddy-auth caddy reload --config /etc/caddy/Caddyfile +``` + +### Podman Image + +```bash +podman pull authelia/authelia:4.39.13 # Do not use latest version to management +``` + +### Configuration file + +- file: + - ~/data/containers/authelia/certs/root_ca.crt + - ~/data/containers/authelia/config/configuration.yml + +#### configuration.yml + +```yaml +# authelia configuration.yml +--- +# certificates setting +certificates_directory: '/etc/ssl/authelia/' + +# them setting - light, dark, grey, auto. +theme: 'auto' + +# Server configuration +server: + # TLS will be applied on caddy + address: 'tcp://:9091/' + +# Log configuration +log: + level: 'debug' + #file_path: 'path/of/log/file' - without this option, using stdout + +# TOTP configuration +totp: + # issure option is for 2FA app. It works as identifier. "My homelab' or 'ilnmors.internal', 'Authelia - ilnmors' + issuer: 'ilnmors.internal' + +# Identity validation confituration +identity_validation: + reset_password: + jwt_secret: '' # $AUTHELIA_IDENTITY_VALIDATION_RESET_PASSWORD_JWT_SECRET_FILE + +# Authentication backend provider configuration +authentication_backend: + ldap: + # ldaps uses 636 -> NAT automatically change port 636 in output packet -> 6360 which lldap server uses. + address: 'ldaps://ldap.ilnmors.internal' + implementation: 'lldap' + # tls configruation, it uses certificates_directory's /etc/ssl/authelia/root_ca.crt + tls: + server_name: 'ldap.ilnmors.internal' + skip_verify: false + # LLDAP base DN + base_dn: 'dc=ilnmors,dc=internal' + additional_users_dn: 'ou=people' + additional_groups_dn: 'ou=groups' + # LLDAP filters + users_filter: '(&(|({username_attribute}={input})({mail_attribute}={input}))(objectClass=person))' + groups_filter: '(&(member={dn})(objectClass=groupOfNames))' + # LLDAP bind account configuration + user: 'uid=authelia,ou=people,dc=ilnmors,dc=internal' + password: '' # $AUTHELIA_AUTHENTICATION_BACKEND_LDAP_PASSWORD_FILE + # LLDAP schema mapping + attributes: + username: 'uid' + display_name: 'displayName' + mail: 'mail' + group_name: 'cn' + +# Access control configuration +access_control: + default_policy: 'deny' + rules: + # authelia portal + - domain: 'authelia.ilnmors.internal' + policy: 'bypass' + - domain: 'authelia.ilnmors.com' + policy: 'bypass' + +# Session provider configuration +session: + secret: '' # $AUTHELIA_SESSION_SECRET_FILE + expiration: '24 hours' # Session maintains for 24 hours + inactivity: '2 hours' # Session maintains for 2 hours without actions + cookies: + - name: 'authelia_private_session' + domain: 'ilnmors.internal' + authelia_url: 'https://authelia.ilnmors.internal' + same_site: 'lax' + - name: 'authelia_public_session' + domain: 'ilnmors.com' + authelia_url: 'https://authelia.ilnmors.com' + same_site: 'lax' + +# This authelia doesn't use Redis. + +# Storage provider configuration +storage: + encryption_key: '' # $AUTHELIA_STORAGE_ENCRYPTION_KEY_FILE + postgres: + address: 'tcp://postgresql.ilnmors.internal:5432' + database: 'authelia_db' + username: 'authelia' + password: '' # $AUTHELIA_STORAGE_POSTGRES_PASSWORD_FILE + tls: + server_name: 'postgresql.ilnmors.internal' + skip_verify: false + +# Notification provider +notifier: + filesystem: + filename: '/config/notification.txt' +# Following goal, After `Postfix` and `Dovecot` setting. +#smtp: + #address: 'smtp.ilnmors.internal' + +# OIDC preperation +# Identity provisioner configuration +#identity_providers: +# oidc: +# hmac_secret: '' # $AUTHELIA_IDENTITY_PROVIDERS_OIDC_HMAC_SECRET_FILE +# jwks: +# - algorithm: 'RS256' +# use: 'sig' +# key: {{/* {{ secret "/run/secrets/AUTHELIA_JWKS_RS256" | mindent 10 "|" | msquote }} /*}} +# - algorithm: 'ES256' +# use: 'sig' +# key: {{/* {{ secret "/run/secrets/AUTHELIA_JWKS_ES256" | mindent 10 "|" | msquote }} /*}} +# clients: +... +``` + +### Quadlet + +- File: + - ~/data/config/containers/authelia/authelia.container + +```ini +# ~/data/config/containers/authelia/authelia.container +[Quadlet] +DefaultDependencies=false + +[Unit] +Description=Authelia + +After=caddy.service +Wants=caddy.service +After=ldap.service +Requires=ldap.service + +[Service] +ExecStartPre=%h/data/config/scripts/wait-for-it.sh -h localhost -p 6360 -t 0 +ExecStartPre=sleep 5 + +[Container] + +Image=authelia/authelia:4.39.13 + +ContainerName=authelia + +AddHost=ldap.ilnmors.internal:host-gateway + +# Web UI +PublishPort=9091:9091/tcp + + +Volume=%h/data/containers/authelia/config:/config:rw +Volume=%h/data/containers/ldap/certs:/etc/ssl/authelia:ro + +# Default +Environment="TZ=Asia/Seoul" +# Enable Go template engine +# !CAUTION! +# If this environment were enabled, you would have to use {{/* ... /*}} for {{ go_filter }} options. Go engine always processes its own grammar first. +Environment="X_AUTHELIA_CONFIG_FILTERS=template" + +# Encryption + +## JWT +Environment="AUTHELIA_IDENTITY_VALIDATION_RESET_PASSWORD_JWT_SECRET_FILE=/run/secrets/AUTHELIA_JWT_SECRET" + +Secret=AUTHELIA_JWT_SECRET,target=/run/secrets/AUTHELIA_JWT_SECRET + +## Session +Environment="AUTHELIA_SESSION_SECRET_FILE=/run/secrets/AUTHELIA_SESSION_SECRET" + +Secret=AUTHELIA_SESSION_SECRET,target=/run/secrets/AUTHELIA_SESSION_SECRET + +## Storage +Environment="AUTHELIA_STORAGE_ENCRYPTION_KEY_FILE=/run/secrets/AUTHELIA_STORAGE_SECRET" + +Secret=AUTHELIA_STORAGE_SECRET,target=/run/secrets/AUTHELIA_STORAGE_SECRET + +# OIDC (HMAC, JWKS) + +# Environment="AUTHELIA_IDENTITY_PROVIDERS_OIDC_HMAC_SECRET_FILE=/run/secrets/AUTHELIA_HMAC_SECRET" + +# Secret=AUTHELIA_HMAC_SECRET,target=/run/secrets/AUTHELIA_HMAC_SECRET + +# Secret=AUTHELIA_JWKS_RS256,target=/run/secrets/AUTHELIA_JWKS_RS256 + +# Secret=AUTHELIA_JWKS_ES256,target=/run/secrets/AUTHELIA_JWKS_ES256 + +# LDAP +Environment="AUTHELIA_AUTHENTICATION_BACKEND_LDAP_PASSWORD_FILE=/run/secrets/AUTHELIA_LDAP_PASSWORD" + +Secret=AUTHELIA_LDAP_PASSWORD,target=/run/secrets/AUTHELIA_LDAP_PASSWORD + +# Database +Environment="AUTHELIA_STORAGE_POSTGRES_PASSWORD_FILE=/run/secrets/POSTGRES_AUTHELIA_PASSWORD" + +Secret=POSTGRES_AUTHELIA_PASSWORD,target=/run/secrets/POSTGRES_AUTHELIA_PASSWORD + +Label=diun.enable=true +Label=diun.watch_repo=true + +[Install] +WantedBy=default.target +``` + +#### Create systemd `.service` file + +```bash +# linger has to be activated +ln -s ~/data/config/containers/authelia/authelia.container ~/.config/containers/systemd/authelia.container + +systemctl --user daemon-reload +``` + +#### Enable and start service + +```bash +systemctl --user start authelia.service +``` + +### DB backup + +- Following [here](../08_development/08_02_dev_postgresql.md). +- dev server + +```bash +systemctl --user enable --now postgresql-data-backup@authelia.timer +``` + +### Verification + +- Web UI: + - https://authelia.ilnmors.internal + - https://authelia.ilnmors.com + +- Login with LLDAP's User + - Login: LLDAP User + - Password: LLDAP Password + +- Check the session + +### Apply Forward_Auth + +Basically, OIDC will be used in this homelab for the application which supports OIDC natively. However, some applications don't support OIDC or even login system. Therfore, Forward_Auth function in Caddy is used for authentication of those services. + +#### Configuration files + +- File: + - ~/data/containers/caddy-auth/etc/Caddyfile + - ~/data/containers/authelia/config/configuration.yml + +#### Caddy + +```ini +# Caddyfile + +# ... + +# Foward auth test in local +test-admin.ilnmors.com { + import crowdsec_log + route { + crowdsec + forward_auth host.containers.internal:9091 { + # Authelia Forward Auth endpoint URI + uri /api/authz/forward-auth + copy_headers Remote-User Remote-Groups Remote-Email Remote-Name + } + # back end service + root * /usr/share/caddy + file_server + } +} + +# Forward auth test in local +test-default.ilnmors.com { + import crowdsec_log + route { + crowdsec + forward_auth host.containers.internal:9091 { + # Authelia Forward Auth endpoint URI + uri /api/authz/forward-auth + copy_headers Remote-User Remote-Groups Remote-Email Remote-Name + } + # back end service + root * /usr/share/caddy + file_server + } +} + +# ... +``` + +#### Authelia + +```yaml +# configuration.yml + +# ... +# Access control configuration +access_control: + default_policy: 'deny' + rules: + # authelia portal + - domain: 'authelia.ilnmors.internal' + policy: 'bypass' + - domain: 'authelia.ilnmors.com' + policy: 'bypass' + - domain: 'test-admin.ilnmors.com' + policy: 'one_factor' + # Access control for Forward_Auth + subject: + - 'group:admins' + - domain: 'test-default.ilnmors.com' + policy: 'one_factor' + # Access control for Forward_Auth + subject: + - 'group:admins' + - 'group:users' + +session: + secret: '' # $AUTHELIA_SESSION_SECRET_FILE + cookies: + - name: 'authelia_internal_session' + domain: 'ilnmors.internal' + authelia_url: 'https://authelia.ilnmors.internal' + # When login succeed, redirect + # default_redirection_url: 'https://authelia.ilnmors.internal' + same_site: 'lax' + - name: 'authelia_com_session' + domain: 'ilnmors.com' + authelia_url: 'https://authelia.ilnmors.com' + # default_redirection_url: 'https://authelia.ilnmors.com' + same_site: 'lax' +# ... +``` + +#### Verification + +- https://test-admin.ilnmors.com + - user_test (gruop: users): 403 Forbidden + - admin_test (group: admins): File server +- https://test-default.ilnmors.com + - admin_test (Just move): File server + - user_test (Session initiating): File server + +--- + +### OIDC + +```bash +openssl rand -base64 32 +# Add this value to .secret.yaml +# APP_OIDC_KEY: secret value +# Make the hash value of this secret +# It is needed or not depends on app + +extract_secret.sh .secret.yaml -f APP_OIDC_KEY | podman secret create APP_OIDC_KEY - + +# Copy this value and paste next to client_secret: in configuration.yml + +# Hash value generate +extract_secret.sh .secret.yaml -f CADDY_OIDC_KEY +> secret value + +podman run --rm \ +authelia/authelia:4.39.13 authelia + rypto hash generate --password secret_value --no-confirm + +# Hash value validate +podman run --rm authelia/authelia:4.39.13 authelia crypto hash validate --password secret_value -- 'HASH_VALUE' +> The password matches the digest. +``` + +##### Authelia + +- Remove annotation of OIDC in container file + +```ini +# ~/data/config/containers/authelia/authelia.container +# ... +# OIDC (HMAC, JWKS) + +Environment=AUTHELIA_IDENTITY_PROVIDERS_OIDC_HMAC_SECRET_FILE=/run/secrets/AUTHELIA_HMAC_SECRET" + +Secret=AUTHELIA_HMAC_SECRET,target=/run/secrets/AUTHELIA_HMAC_SECRET + +Secret=AUTHELIA_JWKS_RS256,target=/run/secrets/AUTHELIA_JWKS_RS256 + +Secret=AUTHELIA_JWKS_ES256,target=/run/secrets/AUTHELIA_JWKS_ES256 +# Remove annotation + +# ... +``` + +- Fix the configuration.yml + +```yaml +--- +# certificates setting + +# ... + +# Identity provisioner configuration +identity_providers: + oidc: + hmac_secret: '' # $AUTHELIA_IDENTITY_PROVIDERS_OIDC_HMAC_SECRET_FILE + jwks: + - algorithm: 'RS256' + use: 'sig' + key: {{ secret "/run/secrets/AUTHELIA_JWKS_RS256" | mindent 10 "|" | msquote }} + - algorithm: 'ES256' + use: 'sig' + key: {{ secret "/run/secrets/AUTHELIA_JWKS_ES256" | mindent 10 "|" | msquote }} + clients: + - client_id: 'app' + client_name: 'app' + # It depends on application + client_secret: 'HASH_VALUE' + # If there were not client secret, public should be `true` + public: \[ true | false \] + response_types: + - 'code' + scopes: + - 'openid' + - 'profile' + - 'email' + - 'groups' + redirect_uris: + - 'https://app.ilnmors.com/oauth2/callback + - 'https://app.ilnmors.com/' + token_endpoint_auth_method: 'client_secret_post | client_secret_basic' + authorization_policy: 'one_factor' +... +``` + +```bash +# restart the service +systemctl --user daemon-reload +systemctl --user restart authelia +``` + +#### Caddy + +- Add the reverse proxy. + +```ini +# Caddyfile +# ~/data/containers/caddy-auth/etc/Caddyfile + +# ... +app.ilnmors.com { + import crowdsec_log + route { + crowdsec + # X-Forward-Host Domain doesn't need to appy on DNS + header_up X-Forwarded-Host app.app.ilnmors.internal + reverse_porxy app.ilnmors.internal + } +} +# ... +``` + +```ini +# app server's sidecar caddy + +app.ilnmors.internal +{ + import internal_tls + @notes header X-Forwarded-Host app.app.ilnmors.internal + reverse_proxy @notes host.containers.internal:3000 +} +``` + +- Fix Caddyfile format and restart service + +```bash +# fix inconsistencies +podman exec caddy-auth caddy fmt --overwrite /etc/caddy/Caddyfile +# After Caddyfile setting is changed use this command. +systemctl --user daemon-reload +systemctl --user restart caddy-auth +``` + +#### Verification + +- Following. \ No newline at end of file diff --git a/docs/archives/2025-12/08_development/08_01_dev_vm.md b/docs/archives/2025-12/08_development/08_01_dev_vm.md new file mode 100644 index 0000000..a687040 --- /dev/null +++ b/docs/archives/2025-12/08_development/08_01_dev_vm.md @@ -0,0 +1,149 @@ +Tags: #os, #configuration, #development, #virtualization + +## Preparation + +### Set DHCP reservation and DNS record + +#### Set DHCP reservation on KEA DHCP in OPNsense + +Following [here](05_07_opnsense_kea.md) + +- Services:Kea DHCP:Kea DHCPv4:Reservations - \[+\] + - Subnet: 192.168.10.0/24 + - IP address: 192.168.10.13 + - MAC address: 0A:49:6E:4D:03:00 + - Hostname: dev + - Description: dev + - `save` + +#### Set DNS records in BIND + +Following [here](../06_network/06_03_net_bind.md). + +- net server +- file: + - ~/data/containers/bind/lib/db.ilnmors.internal + - ~/data/containers/bind/lib/db.10.168.192.in-addr.arpa + +```ini +# db.ilnmors.internal +# ... +dev IN A 192.168.10.13 +# ... +# db.10.168.192.in-addr.arpa +# ... +13 IN PTR dev.ilnmors.internal. +# ... +``` + +```bash +# Adguard container has Requires=bind.service. When it restarted, then Adguard also restarted. +systemctl --user restart bind +``` + +### Create VM template + +- ~/data/config/scripts/dev.sh + +```bash +virt-install \ +--boot uefi \ +--name dev \ +--os-variant debian13 \ +--vcpus 2 \ +--memory 6144 \ +--location /var/lib/libvirt/images/debian-13.0.0-amd64-netinst.iso \ # For serial installing, use `--location` instead of `--cdrom` +--disk pool=vm-images,size=258,format=qcow2,discard=unmap \ +--network network=ovs-lan-net,portgroup=vlan10-access,model=virtio,mac=0A:49:6E:4D:03:00 \ # Use designated ovs port group +--graphics none \ +--console pty,target_type=serial \ +--extra-args "console=ttyS0,115200" +# After enter this command, then the console start automatically +# Remove all annotation before you make the sh file. +``` + +### Debian installing + +- Following [here](../03_common/03_01_debian_configuration.md) to install Debian. +- Debian installer supports serial mode regardless getty@ttyS0 service is enabled or not. +- Following [here](../03_common/03_02_iptables.md) to set iptables. +- Following [here](../03_common/03_04_crowdsec.md) to set CrowdSec + +#### Serial console setting + +After installation, use `ctrl + ]` to exit console. Before setting getty@ttyS0, you can't use serial console to access VM. Therefore, use IP address set on installation, and connect net server via ssh first, following the step to enable the getty. + +### Modify VM template settings + +After getty setting, shutdown dev vm with `shutdown` in VM or `sudo virsh shutdown dev` in hypervisor to turn off vm first. + +```bash +virsh edit dev +``` + +```xml + +... + + + 1024 + + + + +``` + +```bash +virsh dumpxml dev > ~/data/config/vms/dumps/dev.xml +virsh start dev && virsh console dev +# Start dev server with console +``` + +### Common setting + +- dev.service + +```ini +# ~/data/config/services/dev.service +# ~/.config/systemd/user/dev.service +[Unit] +Description=dev Auto Booting +After=network-online.target +Wants=network-online.target +Requires=opnsense.service + +[Service] +Type=oneshot + +# Maintain status as active +RemainAfterExit=yes + +# CrowdSec should be set +ExecStartPre=%h/data/config/scripts/wait-for-it.sh 192.168.10.1:8080 -t 0 +ExecStartPre=%h/data/config/scripts/wait-for-it.sh 192.168.10.11:53 -t 0 +ExecStartPre=%h/data/config/scripts/wait-for-it.sh 192.168.10.12:9000 -t 0 + +ExecStartPre=/bin/bash -c "sleep 15" + +# Run the service +ExecStart=/usr/bin/virsh -c qemu:///system start dev + +# Stop the service +ExecStop=/usr/bin/virsh -c qemu:///system shutdown dev + +[Install] +WantedBy=default.target +``` + +```bash +ln -s ~/data/config/services/dev.service ~/.config/systemd/user/dev.service + +systemctl --user daemon-reload +systemctl --user enable dev.service +systemctl --user start dev.service +``` \ No newline at end of file diff --git a/docs/archives/2025-12/08_development/08_02_dev_postgresql.md b/docs/archives/2025-12/08_development/08_02_dev_postgresql.md new file mode 100644 index 0000000..314d954 --- /dev/null +++ b/docs/archives/2025-12/08_development/08_02_dev_postgresql.md @@ -0,0 +1,534 @@ +Tags: #os, #configuration, #virtualization, #container, #database + +## postgresql + +Postgresql is one of most famous open source RDBMS (Relational Database Management System). RDBMS saves the data as table which is based on row and coloumn. This uses SQL (Structure Quarey Language) to manage a lot of data and guarantee integrity of data. + +### Secret management + +All transaction of postgresql which needs root permission will be conducted on local container or podman exec (without other Pod) in this porject. However, the environment value `POSTGRES_PASSWORD` should be set when the database is initiated. It makes postgresql programs root permission access way as trust which means only local access is allowed. This infromation will be set on `pg_hba.conf` like below. + +- `local all postgres trust` +- `hostssl all all 192.168.10.x/32 scram-sha-256` (The server that uses postgresql) +- `host all all 127.0.0.1/32 scram-sha-256` + +#### Secret + +- File: + - ~/data/config/secrets/.secret.yaml + +- Edit `.secret.yaml` with `edit_secret.sh` + + +```yaml +# ~/data/config/secrets/.secret.yaml +# postgresql +POSTGRES_PASSWORD: secret +``` + +### Preparation + +#### iptables and firewall rules + +- Set iptables first, following [here](../03_common/03_02_iptables.md). + - 5432: auth, app +- Set firewall rules first, following [here](Latest/05_firewall/05_04_opnsense_rules.md). + +#### Create directory for container + +```bash +mkdir -p ~/data/containers/postgresql +chmod 700 ~/data/containers/postgresql +setfacl -m d:g::0 ~/data/containers/postgresql +setfacl -m d:o::0 ~/data/containers/postgresql +setfacl -m u:dev:rwx ~/data/containers/postgresql +setfacl -m u:100998:rwx ~/data/containers/postgresql +setfacl -d -m u:dev:rwx ~/data/containers/postgresql +setfacl -d -m u:100998:rwx ~/data/containers/postgresql +mkdir ~/data/containers/postgresql/{backups,config,certs,data,initdb} + +# After generating +c +sudo find ~/data/containers/postgresql -type d -exec setfacl -m m::rwx {} \; +``` + +> postgresql container executes as 999:999(postgres:postgres) permission in container. It is mapped host's 100998. Therefore, directories have to have ACL via `setfacl` + +#### Add new domain in BIND + +Following [here](../06_network/06_03_net_bind.md). + +- net server +- file: ~/data/containers/bind/lib/db.ilnmors.internal + +```ini +# ... +postgresql IN CNAME dev.ilnmors.internal. +# ... +``` + +```bash +# Adguard container has Requires=bind.service. When it restarted, then Adguard also restarted. +systemctl --user restart bind +``` + +### Podman Image + +```bash +podman pull postgres:18.0 # Do not use latest version to management +``` + +### Config file + +- File: + - ~/data/containers/postgresql/config/postgresql.conf + - ~/data/containers/postgresql/config/pg_hba.conf + + +```bash +# extracting config file +podman run --rm postgres:18.0 cat /usr/share/postgresql/postgresql.conf.sample > ~/data/containers/postgresql/config/postgresql.conf + +podman run --rm postgres:18.0 cat /usr/share/postgresql/18/pg_hba.conf.sample > ~/data/containers/postgresql/config/pg_hba.conf + +# Generate the password +openssl rand -base64 32 + +# Podman secret +extract_secret.sh ~/data/config/secrets/.secret.yaml -f "POSTGRES_PASSWORD" | podman secret create "POSTGRES_PASSWORD" - +``` + +> If there were the schema backup file, then go to the `Restore` section. + +```ini +# ~/data/containers/postgresql/config/postgresql.conf + +# Add settings for extensions here + +# hba_file directory +hba_file = '/config/pg_hba.conf' + +# Listen_address +listen_addresses = '*' + +# listen_port +port = 5432 + +# SSL +ssl = on +ssl_ca_file = '/etc/ssl/postgresql/root_ca.crt' +ssl_cert_file = '/etc/ssl/postgresql/postgresql.ilnmors.internal/fullchain.pem' +ssl_key_file = '/etc/ssl/postgresql/postgresql.ilnmors.internal/key.pem' +ssl_ciphers = 'HIGH:!aNULL:!MD5' +ssl_prefer_server_ciphers = on + +# log +log_destination = 'stderr' +log_checkpoints = on +log_temp_files = 0 +log_min_duration_statement = 500 + +``` + +```ini +# ~/data/containers/postgresql/config/pg_hba.conf + +# Local host `trust` +local all all trust + +# Local connection (container) needs password (127.0.0.1 - container loopback) +host all all 127.0.0.1/32 scram-sha-256 +# Local connection (dev) needs password (169.254.1.2 - host-gateway) +# Maybe Grafana(SQLite), Uptime kuma(SQLite), Loki(BoltDB), Dovecot(LDAP or /> +#hostssl all all 169.254.1.2/32 scram-sha-256 + +# auth VM (Authentik, 192.168.10.12) +hostssl all all 192.168.10.12/32 scram-sha-256 + +# app VM (Applications, 192.168.10.14) +hostssl all all 192.168.10.14/32 scram-sha-256 + +# explicit deny +host all all 192.168.10.0/24 reject +``` + +#### Initiating (Restoring) + +- File: + - ~/data/config/scripts/postgresql/postgresql_init.sh + - ~/data/containers/postgresql/backups/postgresql-cluster_$(date "+%Y-%m-%d").sql + + +```bash +#!/bin/bash +# ~/data/config/scripts/postgresql/postgresql_init.sh [FILE_PATH] +set -e +DATA_PATH="$HOME/data/containers/postgresql/data" +FILE_PATH="$1" +VERSION=18 +FLAG="" + +# Check the PostgreSQL service + +if [ $(systemctl --user is-active postgresql) == "active" ]; then + echo "PostgreSQL should be terminated" + exit 1 +fi + +# Check the podman secret + +if [ -z "$(podman secret list | grep "POSTGRES_PASSWORD")" ]; then + echo "POSTGRES_PASSWORD has to be in podman secret" + exit 1 +fi + +# Check the data path + +if [ -n "$(ls -A $DATA_PATH)" ]; then + echo "$DATA_PATH should be empty" + exit 1 +fi + +# Check the sql file + +if [ -z "$FILE_PATH" ]; then + echo "Initiating PostgreSQL" + FLAG="FALSE" +else + if [ ! -f "$FILE_PATH" -o ! -s "$FILE_PATH" -o -z "$(echo $FILE_PATH | grep "\.sql$")" ]; then + echo "Availavble .sql format file is needed" + exit 1 + fi + echo "Restoring PostgreSQL" + FLAG="TRUE" +fi + + + +# Initiating +podman run --rm \ +--secret POSTGRES_PASSWORD,type=env \ +-e TZ="Asia/Seoul" \ +-v "$DATA_PATH":/var/lib/postgresql:rw \ +postgres:18.0 \ +-C "port" || true + +# postgresql start +echo "Start postgresql service" +systemctl --user start postgresql + +# Restoring +if [ "$FLAG" == "TRUE" ]; then + while [ -z "$(systemctl --user status postgresql | grep "database system is ready to accept connections")" ]; do + sleep 1 + done + echo "Start restoring PostgreSQL" + cat "$FILE_PATH" | podman exec -i -u postgres postgresql psql -U postgres + echo "Finish restoring PostgreSQL" +fi + +exit 0 +``` +#### Certificates configuration + +- File: ~/data/containers/postgresql/certs/root_ca.crt + +- ACME setting (OPNsense) + - Services:ACME Client:Certificates - Certificates - \[+\] + - Common Name: postgresql.ilnmors.internal + - Description: postgresql + - ACME Account: acme.ilnmors.internal + > Even though provisioner's name includes `@`, it has to use as `.`. + > + > i.e. `acme@ilnmors.internal` > `acme.ilnmors.internal` + - Challenge Type: ilnmors.internal-dns-01-challenge + - \[\*\] Auto Renewal + - Automations: postgresql-auto-acme, postgresql-auto-restart + +- Automations (OPNsense) + - Services:ACME Client:Automations - Automation - \[+\] + - Name: postgresql-auto-acme / postgresql-auto-reload + - Description: postgresql acme crt issue / reload postgresql after crt is issued + - Run Command: Upload certificate via SFTP / Remote command via SSH + - SFTP Host: postgresql.ilnmors.internal + - Username: dev + - Identity Type: ed25519 + - Remote Path(SFTP): /home/dev/data/containers/postgresql/certs + - Command(SSH): setfacl -m m::r /home/dev/data/containers/postgresql/certs/postgresql.ilnmors.internal/* && podman exec -u postgres postgresql pg_ctl reload + - `Show Identity` + > Copy Required parameters `ssh-ed25519 ~~~ root@opnsense.ilnmors.internal` + > + > Add parameters in net server's ~/.ssh/authorized_keys + - `Test Connect` and `Save` + +- SSH command will be applied after postgresql start. + +#### Quadlet + +- File: + - ~/data/config/containers/postgresql/postgresql.container + +```ini +# ~/data/config/containers/postgresql/postgresql.container +[Quadlet] +DefaultDependencies=false + +[Unit] +Description=PostgreSQL + +After=network-online.target +Wants=network-online.target + +[Container] +Image=postgres:18.0 + +ContainerName=postgresql + +PublishPort=5432:5432/tcp + +Volume=%h/data/containers/postgresql/data:/var/lib/postgresql:rw +Volume=%h/data/containers/postgresql/config:/config:ro +Volume=%h/data/containers/postgresql/backups:/backups:rw +Volume=%h/data/containers/postgresql/certs:/etc/ssl/postgresql:ro + +Environment="TZ=Asia/Seoul" + +Exec=postgres -c 'config_file=/config/postgresql.conf' + +Label=diun.enable=true +Label=diun.watch_repo=true + +[Install] +WantedBy=default.target +``` + +#### Create systemd `.service` file + +```bash +mkdir -p ~/.config/containers/systemd +chmod -R 700 ~/.config/containers/systemd + +ln -s ~/data/config/containers/postgresql/postgresql.container ~/.config/containers/systemd/postgresql.container + +systemctl --user daemon-reload +``` + +#### Enable and start service + +```bash +# Before start service, setfacl for certificates (before register automation) +setfacl -m m::r /home/dev/data/containers/postgresql/certs/postgresql.ilnmors.internal/* + +systemctl --user start postgresql.service +``` + +### Create user + +- Login to psql in the container + +```bash +podman exec -it -u postgres postgresql psql -U postgres +> # Create user and database +> CREATE USER $USER WITH PASSWORD 'password'; +> CREATE DATABASE $DB; +> ALTER DATABASE $DB OWNER TO $USER; +> \du +> \l + +# Whenever you modify the schema including user or database structure, conduct postgresql-cluster-backup.service +systemctl --user start postgresql-cluster-backup.service + + +> # If you want to change the password +> ALTER USER $USER WITH PASSWORD 'password'; +> # After this, update the .secret.yaml file and podman secret +``` + +### Backup + +#### Backup service + +- File: + - ~/data/config/services/postgresql/postgresql-cluster-backup.service + - ~/data/config/services/postgresql/postgresql-cluster-backup.timer + - ~/data/config/services/postgresql/postgresql-data-backup@.service + - ~/data/config/services/postgresql/postgresql-data-backup@.timer + +#### Cluster + +```ini +# ~/data/config/services/postgresql/postgresql-cluster-backup.service +# ~/.config/systemd/user/postgresql-cluster-backup.service +[Unit] +Description=PostgreSQL Cluster Backup Service +After=postgresql.service +BindsTo=postgresql.service + +[Service] +Type=oneshot + +# logging +StandardOutput=journal +StandardError=journal + +ExecStartPre=podman exec -u postgres postgresql sh -c "mkdir -p /backups/cluster" + +# Run the script +ExecStart=podman exec -u postgres postgresql sh -c "pg_dumpall -U postgres --schema-only | grep -v -E \"ROLE postgres\" > /backups/cluster/postgresql-cluster_$(date "+%%Y-%%m-%%d").sql" + +ExecStop=podman exec -u postgres postgresql sh -c "find /backups/cluster -maxdepth 1 -type f -mtime +7 -delete" +``` + +```ini +# ~/data/config/services/postgresql/postgresql-cluster-backup.timer +# ~/.config/systemd/user/postgresql-cluster-backup.timer +[Unit] +Description=Run PostgreSQL Cluster Backup service every day + +[Timer] +# Execute service after 1 min on booting +OnBootSec=1min + +# Execute service every day 00:00 +OnCalendar=*-*-* 00:00:00 +# Random time to postpone the timer +RandomizedDelaySec=15min +Persistent=true + +# When timer is activated, Service also starts. +Persistent=true + +[Install] +WantedBy=timers.target +``` + +##### Restore cluster + +- File: + - ~/data/config/scripts/postgresql/postgresql_init.sh + - ~/data/containers/postgresql/backups/cluster/postgresql-cluster_$(date "+%Y-%m-%d").sql + +- Use `postgresql_init.sh postgresql-cluster_$(date "+%Y-%m-%d").sql` command + +#### Data for each app + +```ini +# ~/data/config/services/postgresql/postgresql-data-backup@.service +# ~/.config/systemd/user/postgresql-data-backup@.service +[Unit] +Description=PostgreSQL Data %i Backup Service +After=postgresql.service +BindsTo=postgresql.service + +[Service] +Type=oneshot + +# logging +StandardOutput=journal +StandardError=journal + +ExecStartPre=podman exec -u postgres postgresql sh -c "mkdir -p /backups/%i" +# Run the script +ExecStart=podman exec -u postgres postgresql sh -c "pg_dump -U postgres -d %i_db --data-only > /backups/%i/postgresql-%i-data_$(date "+%%Y-%%m-%%d").sql" + +ExecStop=podman exec -u postgres postgresql sh -c "find "/backups/%i" -maxdepth 1 -type f -mtime +7 -delete" +``` + +```ini +# ~/data/config/services/postgresql/postgresql-data-backup@.timer +# ~/data/config/services/postgresql/postgresql-data-backup@.timer +[Unit] +Description=Run %i Data Backup service every day + +[Timer] +# Execute service after 1 min on booting +OnBootSec=1min + +# Execute service every day 00:00 +OnCalendar=*-*-* 00:00:00 +# Random time to postpone the timer +RandomizedDelaySec=15min +Persistent=true + +# When timer is activated, Service also starts. +Persistent=true + +[Install] +WantedBy=timers.target +``` + +##### Data restore + +- File: postgresql-app-data_$(date "+%Y-%m-%d").sql + +```bash +# The Schema must be needed. (cluster, DB and user) + +# DB owner's session terminate +# server where app is located +systemctl --user stop app.service +# Check session +# dev server +# Print the all session +podman exec -u postgres postgresql psql -U postgres -c "SELECT * from pg_stat_activity;" +> $TARGET_PID +# exit session +podman exec -u postgres postgresql psql -U postgres -c "SELECT pg_terminate_backend($TARGET_PID);" + +# Using psql +cat postgresql-app-data_$(date "+%Y-%m-%d").sql | podman exec -i -u postgres postgresql psql -U app +``` + +#### Register service + +```bash +# Register service +mkdir -p ~/.config/systemd/user && chmod 700 ~/.config/systemd/user + +ln -s ~/data/config/services/postgresql/postgresql-cluster-backup.service ~/.config/systemd/user/postgresql-cluster-backup.service + +ln -s ~/data/config/services/postgresql/postgresql-cluster-backup.timer ~/.config/systemd/user/postgresql-cluster-backup.timer + +ln -s ~/data/config/services/postgresql/postgresql-data-backup\@.service ~/.config/systemd/user/postgresql-data-backup\@.service + +ln -s ~/data/config/services/postgresql/postgresql-data-backup\@.timer ~/.config/systemd/user/postgresql-data-backup\@.timer + +systemctl --user daemon-reload + +# Start timer and enable +systemctl --user enable --now postgresql-cluster-backup.timer +systemctl --user enable --now postgresql-data-backup@app.timer +``` + +### Verification + +```bash +# Init database +postgresql_init.sh +# ... Start postgresql service + + # Create user and database +podman exec -it -u postgres postgresql psql -U postgres +> CREATE USER test WITH PASSWORD 'abc'; +> CREATE DATABASE test_db; +> ALTER DATABASE test_db OWNER TO test; +> \du +> \l +> \q + +# Backup service executes +systemctl --user start postgresql-cluster-backup.service + +# Stop and remove all data +systemctl --stop postgresql +sudo find "/home/dev/data/containers/postgresql/data" -mindepth 1 -delete + +# Restore database +postgresql_init.sh ~/data/containers/backups/filename.sql + +# Check restoring +podman exec -it -u postgres postgresql psql -U postgres +> \du +> \l +``` + diff --git a/docs/archives/2025-12/08_development/08_03_dev_sidecar_caddy.md b/docs/archives/2025-12/08_development/08_03_dev_sidecar_caddy.md new file mode 100644 index 0000000..bacd56c --- /dev/null +++ b/docs/archives/2025-12/08_development/08_03_dev_sidecar_caddy.md @@ -0,0 +1,259 @@ +Tags: #os, #configuration, #network, #virtualization, #container, #security + +## Caddy - dev + +Caddy is an open source reverse proxy (web server) which supports automatically to apply TLS certificates via ACME protocol from CA. This caddy will work as sidecar caddy, so it only uses private TLS. It can only communication with auth's main caddy. This means it doesn't need any module except RFC2136 module. Because main caddy will conduct WAF function at all. + +### Secret management + +- File: + - ~/data/config/secrets/.secret.yaml + +- Edit `.secret.yaml` with `edit_secret.sh` + +```yaml +# ~/data/config/secrets/.secret.yaml +# CADDY: +CADDY_ACME_KEY: acme-key_key_value (Only secret value) +``` + +```bash +# Podman secret +extract_secret.sh .secret.yaml -f CADDY_ACME_KEY | podman secret create CADDY_ACME_KEY - +``` + +### Preparation + +#### iptables and firewall rules + +- Set iptables first, following [here](../03_common/03_02_iptables.md). + - Limit access client as Caddy - auth ( -s 192.168.10.12/32 ) + - 443 > 2443 (iptables setting) +- Set firewall rules first, following [here](Latest/05_firewall/05_04_opnsense_rules.md). +#### Create directory for container + +```bash +mkdir -p ~/data/containers/caddy-dev/{etc,data} +chmod -R 700 ~/data/containers/caddy-dev +``` + +> Caddy container executes as 0:0(root:root) permission in container. It is mapped host's UID. Therefore, directories don't have to have ACL via `setfacl` + +#### Add new domain in BIND + +Following [here](../06_network/06_03_net_bind.md). + +- net server +- file: ~/data/containers/bind/lib/db.ilnmors.internal + +```ini +# ... +dev IN A 192.168.10.13 +# ... +``` + +```bash +# Adguard container has Requires=bind.service. When it restarted, then Adguard also restarted. +systemctl --user restart bind +``` + +### Podman Image + +#### Podman containerfile + +Caddy supports various module for it. rfc2136(nsupdate) module, Sidecar caddy only recieves the request from the main caddy. Therefore it doesn't need anymore. + +- file: + - ~/data/config/containers/caddy-dev/containerfile-caddy-2.10.2-dev + - ~/data/config/containers/caddy-dev/root_ca.crt + +```containerfile +FROM caddy:2.10.2-builder-alpine AS builder + +RUN xcaddy build \ +--with github.com/caddy-dns/rfc2136 + +FROM caddy:2.10.2 + +COPY --from=builder /usr/bin/caddy /usr/bin/caddy + +COPY ./root_ca.crt /usr/local/share/ca-certificates/root_ca.crt + +RUN update-ca-certificates +``` + +#### Podman image build + +```bash +podman build -t caddy:2.10.2-dev -f ~/data/config/containers/caddy-dev/containerfile-caddy-2.10.2-dev . && podman image prune -f +# Delete pure caddy and caddy-builder-alpine images after command above manually. +``` + +### Configuration files + +Caddyfile will be updated after Authelia setting + +```bash +# fix inconsistencies +podman exec caddy-dev caddy fmt --overwrite /etc/caddy/Caddyfile +# After Caddyfile setting is changed use this command. +podman exec caddy-dev caddy reload --config /etc/caddy/Caddyfile +``` + +- file: + - ~/data/containers/caddy-auth/etc/Caddyfile + - ~/data/containers/authelia/config/configuration.yml + - ~/data/containers/caddy-dev/etc/Caddyfile + +```ini +# Caddyfile +# ~/data/containers/caddy-auth/etc/Caddyfile + +# Forward Auth for other vms +(apply_forward_auth) { + forward_auth host.containers.internal:9091 { + uri /api/authz/forward-auth + copy_headers Remote-User Remote-Groups Remote-Email R> + } + reverse_proxy {args[0]} { + header_up Host {http.reverse_proxy.upstream.host} + # X-Forwarded-Host header contains original Host value + } +} + + +# ... +dev-test.ilnmors.com { + import crowdsec_log + route { + crowdsec + import apply_forward_auth https://dev.ilnmors.internal + } +} +# ... +``` + +```ini +# Caddyfile +# ~/data/containers/caddy-dev/etc/Caddyfile +{ + server { + trusted_proxies static 192.168.10.12/32 + trusted_proxies_strict + # To find the real client's IP. + # default = false : left to right, when the Caddy met the untrusted IP first, then it would treat as client IP + # true : right to left > It is easy to find currupted client IP + } +} + +(private_tls) { + tls { + issuer acme { + dir https://step-ca.ilnmors.internal:9000/acme/acme@ilnmors.internal/directory + dns rfc2136 { + server bind.ilnmors.internal:2253 + key_name acme-key + key_alg hmac-sha256 + key "{file./run/secrets/CADDY_ACME_KEY}" + } + } + } +} + +dev.ilnmors.internal { + import private_tls + @test header X-Forwarded-Host dev-test.ilnmors.com + route @test { + root * /usr/share/caddy + file_server + } +} +``` + +```yaml +# configuration.yml + +# ... +# Access control configuration +access_control: + default_policy: 'deny' + rules: + # authelia portal + - domain: 'authelia.ilnmors.internal' + policy: 'bypass' + - domain: 'authelia.ilnmors.com' + policy: 'bypass' + - domain: 'dev-test.ilnmors.com' + policy: 'one_factor' + # Access control for Forward_Auth + subject: + - 'group:admins' + +# ... +``` + +### Quadlet + +- File: + - ~/data/config/containers/caddy-auth/caddy-dev.container + +```ini +# ~/data/config/containers/caddy-dev/caddy-dev.container +# ~/data/config/containers/caddy-dev/caddy-dev.container +[Quadlet] +DefaultDependencies=false + +[Unit] +Description=Caddy - dev + +After=network-online.target +Wants=network-online.target + +[Service] +# Main Caddy and Step-CA +ExecStartPre=%h/data/config/scripts/wait-for-it.sh -h 192.168.10.12 -p 443 -t 0 +ExecStartPre=%h/data/config/scripts/wait-for-it.sh -h 192.168.10.12 -p 9000 -t 0 +ExecStartPre=sleep 5 + +[Container] +Image=localhost/caddy:2.10.2-dev + +ContainerName=caddy-dev + +PublishPort=2080:80/tcp +PublishPort=2443:443/tcp + +Volume=%h/data/containers/caddy-dev/etc:/etc/caddy:rw +Volume=%h/data/containers/caddy-dev/data:/data:rw + +Environment="TZ=Asia/Seoul" + +Secret=CADDY_ACME_KEY,target=/run/secrets/CADDY_ACME_KEY + +Label=diun.enable=true +Label=diun.watch_repo=true + +[Install] +WantedBy=default.target +``` + +#### Create systemd `.service` file + +```bash +# linger has to be activated +ln -s ~/data/config/containers/caddy-dev/caddy-dev.container ~/.config/containers/systemd/caddy-dev.container + +systemctl --user daemon-reload +``` + +#### Enable and start service + +```bash +systemctl --user start caddy-dev.service +``` + +#### Verification + +- https://dev-test.ilnmors.com + - user_test (gruop: users): 403 Forbidden + - admin_test (group: admins): File server \ No newline at end of file diff --git a/docs/archives/2025-12/08_development/08_04_dev_code-server.md b/docs/archives/2025-12/08_development/08_04_dev_code-server.md new file mode 100644 index 0000000..25c8e9c --- /dev/null +++ b/docs/archives/2025-12/08_development/08_04_dev_code-server.md @@ -0,0 +1,341 @@ +Tags: #os, #configuration, #security , #virtualization, #container, #development + +## Code-Server + +Code-Server is an open source and self-hosted Code editer (or IDE with its plugins) to use on web browser. This supports terminal and code editer, git, and ansible. It will be used as a bastion host in the home-lab. Code-Server doesn't support login system. Therefore, authelia and caddy's Forward-Auth function will be used in this homelab system. + +### Secret management + +- File: + - ~/data/config/secrets/.secret.yaml + +- Edit `.secret.yaml` with `edit_secret.sh` + +```yaml +# ~/data/config/secrets/.secret.yaml +# CODE-SERVER: +CODESERVER_SSH_KEY: SSH_KEY_VALUE +``` + +```bash + +ssh-keygen -t ed25519 -f /run/user/$UID/codeserver -C "code-server@ilnmors.internal" +> [enter] # no passphrase + +cat /run/user/$UID/codeserver +> Private key value +# add in .secret.yaml as CODESERVER_SSH_KEY +cat /run/user/$UID/codeserver.pub +> Public key value +# add in .secret.yaml as a annotation + +rm -rf /run/user/$UID/codeserver* + +extract_secret.sh .secret.yaml -f CODESERVER_SSH_KEY | podman secret create CODESERVER_SSH_KEY - +``` + +### Preparation + +#### Create directory for container + +```bash +mkdir -p ~/data/containers/code-server +chmod -R 700 ~/data/containers/code-server +setfacl -m d:g::0 ~/data/containers/code-server +setfacl -m d:o::0 ~/data/containers/code-server +setfacl -m u:dev:rwx ~/data/containers/code-server +setfacl -m u:100999:rwx ~/data/containers/code-server +setfacl -d -m u:dev:rwx ~/data/containers/code-server +setfacl -d -m u:100999:rwx ~/data/containers/code-server +mkdir -p ~/data/containers/code-server/{config,local,ssh,workspace} +echo $PUBLIC_SSH_KEY_VALUE > ~/data/containers/code-server/ssh/id_codeserver.pub + +``` + +> Code-Server container executes as 1000:1000(coder:coder) permission in container. It is mapped host's 100999. Therefore, directories have to have ACL via `setfacl` + +#### Add new domain in BIND + +Following [here](../06_network/06_03_net_bind.md). + +- net server +- file: ~/data/containers/bind/lib/db.ilnmors.internal + +```ini +# ... +code-server IN CNAME auth.ilnmors.internal. +# ... +``` + +```bash +# Adguard container has Requires=bind.service. When it restarted, then Adguard also restarted. +systemctl --user restart bind +``` + +#### Add new rules in Caddy and authelia + +##### caddy-auth + +- auth server +- file: ~/data/containers/caddy-auth/etc/Caddyfile + +```ini +# ... +code-server.ilnmors.internal { + import private_tls + import crowdsec_log + route { + crowdsec + import apply_forward_auth https://dev.ilnmors.internal + } +# ... +``` + +##### caddy-dev + +- dev server +- file: ~/data/containers/caddy-dev/etc/Caddyfile + +```ini +# ... +dev.ilnmors.internal { + import private_tls + # ... + @code-server header X-Forwarded-Host code-server.ilnmors.internal + # ... + route @code-server { + reverse_proxy host.containers.internal:8000 { + # Sidecar caddy's Caddyfile should change `Host` header as original `Host` value from `X-Forwarded-Host` to prevent websocket problem. + header_up Host {http.request.header.X-Forwarded-Host} + } + } +} +``` + +##### authelia + +```yaml +# configuration.yml + +# ... +# Access control configuration +access_control: + default_policy: 'deny' + rules: + # authelia portal + - domain: 'authelia.ilnmors.internal' + policy: 'bypass' + - domain: 'authelia.ilnmors.com' + policy: 'bypass' + - domain: 'dev-test.ilnmors.com' + policy: 'one_factor' + # Access control for Forward_Auth + subject: + - 'group:admins' + - domain: 'code-server.ilnmors.internal' + policy: 'one_factor' + subject: + - 'group:admins' + +# ... +``` + +#### SSH configuration + +##### Each server + +- file: ~/.ssh/authorized_keys + +```ini +# ... +Contents of code-server's public key value +# ... +``` + +##### Code-Server container + +- file: ~/data/containers/code-server/ssh/config + +```ini +Host vmm + HostName 192.168.1.10 + User vmm + IdentityFile /run/secrets/CODESERVER_SSH_KEY + +Host net + HostName 192.168.10.11 + User net + IdentityFile /run/secrets/CODESERVER_SSH_KEY + +Host auth + HostName 192.168.10.12 + User auth + IdentityFile /run/secrets/CODESERVER_SSH_KEY + +# dev is the server where code-server is located. It needs host.containers.internal +Host dev + HostName host.containers.internal + User dev + IdentityFile /run/secrets/CODESERVER_SSH_KEY + +Host app + HostName 192.168.10.14 + User app + IdentityFile /run/secrets/CODESERVER_SSH_KEY +``` + +```bash +podman unshare chown 1000:1000 ~/data/containers/code-server/ssh/config +``` + +### Podman Image + +#### Podman containerfile + +Code-server supports various module for it. Git and ansible will be used in this project. + +- file: + - ~/data/config/containers/code-server/containerfile-code-server-4.105.1 + - ~/data/config/containers/code-server/root_ca.crt + +```containerfile +FROM codercom/code-server:4.105.1 + +USER root + +RUN export SUDO_FORCE_REMOVE=yes && \ +apt-get update && \ +apt-get install -y --no-install-recommends git ansible curl jq age gnupg && \ +apt-get purge -y --auto-remove sudo && \ +apt-get clean + +RUN curl -LO https://github.com/getsops/sops/releases/download/v3.11.0/sops-v3.11.0.linux.amd64 && \ +mv sops-v3.11.0.linux.amd64 /usr/local/bin/sops && \ +chmod +x /usr/local/bin/sops && \ +rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* + +COPY ./root_ca.crt /usr/local/share/ca-certificates/root_ca.crt + +RUN update-ca-certificates + +USER coder +``` + +#### Podman image build + +```bash +podman build -t code-server:4.105.1 -f ~/data/config/containers/code-server/containerfile-code-server-4.105.1 . && podman image prune -f +# Delete pure code-server +``` + +### Quadlet + +- File: + - ~/data/config/containers/code-server/code-server.container + +```ini +# ~/data/config/containers/code-server/code-server.container +# ~/.config/containers/systemd/code-server.container +[Quadlet] +DefaultDependencies=false + +[Unit] +Description=Code-Server + +After=caddy-dev.service +Wants=caddy-dev.service + +[Container] +Image=localhost/code-server:4.105.1 + +ContainerName=code-server + +HostName=code-server + +# CrowdSec uses 8080 port +PublishPort=8000:8080/tcp + +Volume=%h/data/containers/code-server/workspace:/home/coder/workspace:rw +Volume=%h/data/containers/code-server/config:/home/coder/.config:rw +Volume=%h/data/containers/code-server/local:/home/coder/.local:rw +Volume=%h/data/containers/code-server/ssh:/home/coder/.ssh:rw + +Environment="TZ=Asia/Seoul" +# when you needs root permission, you have to access the container via dev server's command 'poman exec -it code-server' + +Secret=CODESERVER_SSH_KEY,target=/run/secrets/CODESERVER_SSH_KEY + +Label=diun.enable=true +Label=diun.watch_repo=true +# This label need configuration on `diun.yml` +Label=diun.regopt=code-server-source + + +[Install] +WantedBy=default.target +``` + +#### Create systemd `.service` file + +```bash +# linger has to be activated +ln -s ~/data/config/containers/code-server/code-server.container ~/.config/containers/systemd/code-server.container + +systemctl --user daemon-reload +``` + +#### Enable and start service + +```bash +systemctl --user start code-server.service +``` + +#### Disable password + +```bash +nano ~/data/containers/code-server/config/code-server/config.yaml +# bind-addr: 127.0.0.1:8080 +# auth: none <- edit this as `none` from `password` +# password: <- remove this part +# cert: false +``` + + +#### Set default workspace + +- Setting:Profile:Default:Folders&workspaces + - Add Folder > /home/coder/workspace +- Setting:Settings:Workbench:Settings Editor + - Terminal > Integrated: Gpu Acceleration: off + - Edit in settings.json + +```json +{ + +    "workbench.settings.applyToAllProfiles": [ + +    ], + +    "workbench.colorCustomizations": { + +      "terminal.background": "#0C0C0C", + +      "terminal.foreground": "#CCCCCC" + +    }, + +    "files.associations": { + +        "*.container": "ini", + +        "*.service": "ini", + +        "*.timer": "ini", + +        "containerfile*": "dockerfile" + +    } + +} +``` +#### Verification diff --git a/docs/archives/2025-12/09_application/09_01_app_vm.md b/docs/archives/2025-12/09_application/09_01_app_vm.md new file mode 100644 index 0000000..c1ce3ff --- /dev/null +++ b/docs/archives/2025-12/09_application/09_01_app_vm.md @@ -0,0 +1,253 @@ +Tags: #os, #configuration, #application, #virtualization + +## Preparation + +### Set DHCP reservation and DNS record + +#### Set DHCP reservation on KEA DHCP in OPNsense + +Following [here](05_07_opnsense_kea.md) + +- Services:Kea DHCP:Kea DHCPv4:Reservations - \[+\] + - Subnet: 192.168.10.0/24 + - IP address: 192.168.10.14 + - MAC address: 0A:49:6E:4D:04:00 + - Hostname: app + - Description: app + - `save` + +#### Set DNS records in BIND + +Following [here](../06_network/06_03_net_bind.md). + +- net server +- file: + - ~/data/containers/bind/lib/db.ilnmors.internal + - ~/data/containers/bind/lib/db.10.168.192.in-addr.arpa + +```ini +# db.ilnmors.internal +# ... +app IN A 192.168.10.14 +# ... +# db.10.168.192.in-addr.arpa +# ... +14 IN PTR app.ilnmors.internal. +# ... +``` + +### Create VM template + +```bash +virt-install \ +--boot uefi \ +--name app \ +--os-variant debian13 \ +--vcpus 4 \ +--memory 12288 \ +--location /home/vmm/data/vms/images/debian-13.0.0-amd64-netinst.iso \ # For serial installing, use `--location` instead of `--cdrom` +--disk pool=vm-images,size=258,format=qcow2,discard=unmap \ +--network network=ovs-lan-net,portgroup=vlan10-access,model=virtio,mac=0A:49:6E:4D:04:00 \ # Use designated ovs port group +--graphics none \ +--console pty,target_type=serial \ +--extra-args "console=ttyS0,115200" +# After enter this command, then the console start automatically +# Remove all annotation before you make the sh file. +``` + +### Debian installing + +- Following [here](../03_common/03_01_debian_configuration.md) to install Debian. +- Debian installer supports serial mode regardless getty@ttyS0 service is enabled or not. +- Following [here](../03_common/03_02_iptables.mc) to set iptables. +- Following [here](../03_common/03_04_crowdsec.md) to set CrowdSec + +#### Serial console setting + +After installation, use `ctrl + ]` to exit console. Before setting getty@ttyS0, you can't use serial console to access VM. Therefore, use IP address set on installation, and connect net server via ssh first, following the step to enable the getty. + +#### btrfs RAID setting + +Following [here](03_06_btrfs.md) how to use btrfs + +- directory: /home/app/hdd + +```bash +# Make the directory, RAID partition will be mounted +mkdir /home/app/hdd + +# check btrfs-progs package +sudo apt list --installed | grep btrfs-progs +# btrfs-progs/stable,now 6.14-1 amd64 [installed] + +# Check the disk status +lsblk -o NAME,PTTYPE,FSTYPE,SIZE,MOUNTPOINT +# - /dev/sda: Physical slot 2 +# - /dev/sdb: Physical slot 1 +# - /dev/sdc: Physical slot 4 +# - /dev/sdd: Physical slot 3 +# If you want to manage the partition or disk, then use fsdisk. + +sudo fdisk "$DIVICE_PATH" +> n # create the new parition +> 1 # Partition number +> Default # First Sector +> Default # Last Sectort +> w # write the new partition + +lsblk -o NAME,PTTYPE,FSTYPE,SIZE,MOUNTPOINT +# - /dev/sda1: partition of slot 2 +# - /dev/sdb1: partition of slot 1 +# - /dev/sdc1: partition of slot 4 +# - /dev/sdd1: partition of slot 3 + +# btrfs RAID10 volume creation +# -d: the way data store, -m: the way metadata store, -L: create label +sudo mkfs.btrfs -d raid10 -m raid10 -L hdd /dev/sda1 /dev/sdb1 /dev/sdc1 /dev/sdd1 + +# check the RAID10 volume, it shows label and uuid. +sudo btrfs filesystem show +lsblk -f + +# Mount RAID10 volume permanently +sudo nano /etc/fstab +# # btrfs RAID10 storage pool; mount option, compress=zstd: realtime compression. autodefrag: conduct auto defragmentation +# LABEL=hdd /home/app/hdd btrfs defaults,compress=zstd,autodefrag 0 0 + +# release the fs +sudo systemctl daemon-reload +sudo mount -a + +# check the mount +df -h +/dev/sda1 3.7T /home/app/hdd + +# Scrubbing btrfs +sudo btrfs scrub start /home/app/hdd +sudo btrfs scrub status /home/app/hdd +``` +> btrfs RAID doesn't use fixed hdds pair. It uses flexible chunck unit data management. Therefore user doesn't have to know about which disks are the pair, just change the disk which is broken. + +#### btrfs scrub on systemd + +File: + - ~/data/config/services/btrfs-scrub/btrfs-scrub.service + - ~/data/config/services/btrfs-scrub/btrfs-scrub.timer + +```ini +# ~/data/config/services/btrfs-scrub/btrfs-scrub.service +# /etc/systemd/system +[Unit] +Description=BTRFS Scrub for /home/app/hdd +After=home-app-hdd.mount +Wants=home-app-hdd.mount + +[Service] +Type=oneshot +ExecStart=btrfs scrub start /home/app/hdd + +Nice=19 +IOSchedulingClass=idle +# Nice: CPU priority; -20: highest, 0: default, 19: lowest +# IOSchedulingClass: Disk priority; realtime: highest, best-effort: default, idle: lowest +``` + +```ini +# ~/data/config/services/btrfs-scrub/btrfs-scrub.timer +# /etc/systemd/system +[Unit] +Description=Run BTRFS scrub for /home/app/hdd monthly + +[Timer] +OnCalendar=*-*-01 03:00:00 +Persistent=true +# Persistent=true: If the service couldn't run because of some reasons, it execute the service immediately when it is possible + +[Install] +WantedBy=timers.target +``` + +```bash + +sudo ln -s ~/data/config/services/btrfs-scrub/btrfs-scrub.service /etc/systemd/system/btrfs-scrub.service + +sudo ln -s ~/data/config/services/btrfs-scrub/btrfs-scrub.timer /etc/systemd/system/btrfs-scrub.timer + +sudo systemctl daemon-reload + +sudo systemctl enable --now btrfs-scrub.timer +``` +### Modify VM template settings + +After getty setting, shutdown app vm with `shutdown` in VM or `sudo virsh shutdown app` in hypervisor to turn off vm first. + +```bash +virsh edit app +``` + +```xml + +... + + + 2048 + + + + +``` + +```bash +virsh dumpxml app > ~/data/config/vms/dumps/app.xml +virsh start app && virsh console app +# Start app server with console +``` + +### Common setting + +- app.service + +```ini +# ~/data/config/services/app.service +# ~/.config/systemd/user/app.service +[Unit] +Description=app Auto Booting +After=network-online.target +Wants=network-online.target +Requires=opnsense.service + +[Service] +Type=oneshot + +# Maintain status as active +RemainAfterExit=yes + +# CrowdSec should be set +ExecStartPre=%h/data/config/scripts/wait-for-it.sh 192.168.10.1:8080 -t 0 +ExecStartPre=%h/data/config/scripts/wait-for-it.sh 192.168.10.11:53 -t 0 +ExecStartPre=%h/data/config/scripts/wait-for-it.sh 192.168.10.12:9000 -t 0 + +ExecStartPre=/bin/bash -c "sleep 15" + +# Run the service +ExecStart=/usr/bin/virsh -c qemu:///system start app + +# Stop the service +ExecStop=/usr/bin/virsh -c qemu:///system shutdown app + +[Install] +WantedBy=default.target +``` + +```bash +ln -s ~/data/config/services/app.service ~/.config/systemd/user/app.service + +systemctl --user daemon-reload +systemctl --user enable app.service +systemctl --user start app.service +``` \ No newline at end of file diff --git a/docs/archives/2025-12/console.md b/docs/archives/2025-12/console.md new file mode 100644 index 0000000..0b488f3 --- /dev/null +++ b/docs/archives/2025-12/console.md @@ -0,0 +1,360 @@ +# Console client + +Tags: #os, #windows, #virtualization, #wsl, #vscode + +## Preparation + +### WSL + +#### WSL command + +Run the commands in PowerShell or CMD. Installation and uninstallation process needs admin privileges. + +```PowerShell +# --- Install and setup --- +# Activate WSL (First time only) +wsl --install + +# Install specific OS +wsl --install -d Debian + +# Check the list +wsl --list --online + +# Check the version +wsl -l -v + +# --- Run and manage --- +# Run WSL +wsl -d Debian # -u root # run with root + +# Shutdown WSL +# This is needed when the configuration is changed +wsl --shutdown + +# Shutdown specific version +wsl --terminate Debian + +# --- Backup and restore --- + +# Backup WSL +wsl --export Debian C:\backups\wsl.tar +# Import WSL +wsl --import Debian C:\WSL\Debian C:\backups\wsl.tar + +# Open the linux directory on windows explorer +# bash +# explorer.exe . +# Windows explorer +# \\wsl$ on the windows explorer + +# --- Reset or inactivate --- +# Reset the specific version +wsl --unregister Debian + +# Inactive WSL +wsl --uninstall +``` + +#### WSL configuration + +##### Installation + +```PowerShell +# Activate WSL and install Debian +wsl --install -d Debian +# Enter new UNIX username: debian +# Enter new password: debian + +``` + +##### Configuration + +- `Win`:Windows Linux Subsystem Configuration \(GUI\) +- Processor and memory + - Processor: 4 + - Memory: 4096MB + - Swap: 0 +- Filesystem + - Basic VHD: 32768MB +- Networking + - Mode: Mirrored + +#### WSL Start + +```PowerShell +# Start WSL +wsl -d Debian +# User and group configuration +sudo groupadd -g 2000 svadmins +sudo useradd -u 2999 -g svadmins -G sudo -c "Console Client" -m -d /home/console -s /bin/bash console +sudo passwd console +# New password: random string +exit + +# PowerShell +wsl --shutdown +wsl -d Debian -u console + +# Delete default account +sudo userdel -r debian + +# Set default user +sudo nano /etc/wsl.conf +# ... +# [user] +# default=console +exit + +# PowerShell +wsl --shutdown +wsl -d Debian +# Check `console` login + +# Create the directory for VS Code +mkdir workspace && chmod 700 workspace +``` + +### VS Code + +#### Installation + +- Site: https://code.visualstudio.com/ + - Download for Windows +- Execute the installation file + +#### Configuration + +- Extensions\(`Ctrl` + `shift` + `x`\):WSL + - Install WSL by Microsoft +- Remote Explorer:Debian:Connect in Current Windows +- `Ctrl` + `k` and `Ctrl` + `t` for theme + - Dark Modern +- `Ctrl` + `k` and `Ctrl` + `o` for `open folder` + - /home/console/workspace/ + - Do you trust the authors of the files in this folder - `Yes, I trust the authors` +- `Ctrl` + `Shift` + `` ` `` for `open terminal` + +## Bastion host + +### Directory structures + +Use `mkdir` to make these directories. +- ~/workspace/homelab/data/ + - utils + - common - wait-for-it.sh, sops, etc...  + - \[server_name\]/\[bin_name\] - ddns, init_db, etc ...  + - servers + - os/\[iso or img files for installation\] + - \[server_name\]/\[service_name; iptables, interface, ssh, vfio, etc..\] - rules.v4, sshd_config, etc...  + - services + - \[server_name\]/\[services_name\] + - *.containers or *.service (systemd files) + - config - services configuration (named.conf, etc; !No live data files like DB file or media file. Only configuration files based on text or binary files.)  + - secrets - secret_scripts, secret.yaml (central secret management) +- ~/workspace/homelab/docs  + - library + - archives  + - before_bastion_host/current_documents_and_directories + - references + - techs + - current_common_documents  + - theories + - current_theory_documents  + - images  +  - media  +  - etc.  + - plans  + - plan.md  + - milestone.md  + - infrastructures + - common + - debian_configuration.md (OS, network, uid/gid, packages)  + - deployment.md  + - security_policies.md (iptables, crowdsec)  + - data_polices.md (storage, backup, database) + - \[server_name\] + - \[server_name\].md - virtual hardware, security, services, etc... + - \[services_name\].md + +### Packages + +- External binary packages are located in here + - ~/workspace/homelab/data/bin/common + +```bash +sudo apt update && sudo apt upgrade +# Packages from repository +sudo apt install gnupg acl curl jq age git openssh-client + +# Git config +git config --global user.name "il" +git config --global user.email "il@ilnmors.internal" + +# Sops +## Sops for amd processor (N150) +curl -LO https://github.com/getsops/sops/releases/download/v3.11.0/sops-v3.11.0.linux.amd64 + +## Sops for arm processor (Snapdragon Plus) +curl -LO https://github.com/getsops/sops/releases/download/v3.11.0/sops-v3.11.0.linux.arm64 + +mkdir -p ~/workspace/homelab/data/bin/common && chmod 700 ~/workspace/homelab/data/bin/common + +mv sops-v3.11.0.linux.amd64 sops-v3.11.0.linux.arm64 ~/workspace/homelab/data/bin/common/ + +sudo cp ~/workspace/homelab/data/bin/common/sops-v3.11.0.linux.arm64 /usr/local/bin/sops + +sudo chmod +x /usr/local/bin/sops + +# wait-for-it.sh +curl -LO https://github.com/vishnubob/wait-for-it/blob/master/wait-for-it.sh + +mv wait-for-it.sh ~/workspace/homelab/data/bin/common/ + +# acme.sh +curl -LO https://github.com/acmesh-official/acme.sh/blob/master/acme.sh +mv acme.sh ~/workspace/homelab/data/bin/common/ +``` + +### Secret management + +- Files: + - ~/workspace/homelab/data/secrets/secret.yaml + - ~/workspace/homelab/data/secrets/.sops.yaml + - ~/workspace/homelab/data/secrets/age-key.gpg + - ~/workspace/homelab/data/secrets/edit_secret.sh + - ~/workspace/homelab/data/secrets/extract_secret.sh + +#### Apply the secrets + +- Server: console + +##### Generate and encrypt age key + + +```bash +# Generate the key for sops +age-keygen -o ~/workspace/homelab/data/secrets/age-key +# # created: 2025-10-17T13:30:00Z +# # public key: age1ql3z7h0cfscg...... +# AGE-SECRET-KEY-1..... + +# Public key is printed when key generated +gpg --symmetric age-key && rm age-key +> GPG password: password + +nano ~/workspace/homelab/data/secrets/.sops.yaml +``` + +##### Key value setting for sops + +```yaml +# ~/workspace/homelab/data/secrets/.sops.yaml +creation_rules: +  - path_regex: secret\.yaml$ +    age: [public_key value; age~~~] +``` + +##### Mnagement secret + +```bash +# Create secret +cd ~/workspace/homelab/data/secrets +nano secret.yaml + +# Replace the file as secret file +sops --encrypt --in-place secret.yaml + +# edit secret.yaml +./edit_secret.sh secret.yaml + +# Create secret files in each server +./extract_secret.sh secret.yaml [-n] (-e|-f $ENV) > $TMP_PATH/tmp_secret + +# deploy the tmp_secret to server to /run/user/$UID/filename +scp $TMP_PATH/tmp_secret [server]:/run/user/$TARGET_UID/filename + +# `<< 'EOF'` sends string itself +# `<< EOF` sends string after interpreting +ssh [server] << 'EOF' +    sudo mv /run/user/$UID/filename /etc/secrets/$UID/secret_file +    rm -rf /run/user/$UID/filename +    sudo chown $UID:root /etc/secrets/$UID/secret_file +    sudo chmod 400 /etc/secrets/$UID/secret_file +EOF + +rm -rf $TMP_PATH/tmp_secret + +# Podman secret in each server +./extract_secret.sh secret.yaml [-n] -f $ENV | ssh sv "podman secret create $ENV -" +``` + +#### Usage of podman secret + +```container +#... +#... +[Container] +# .. +Secret=env,type=env,target=env +Secret=app,target=/run/secrets/app +``` + +### ssh configuration + +#### ssh key gen + +```bash +mkdir -p ~/.ssh && chmod 700 ~/.ssh +ssh-keygen -t ed25519 -f ~/.ssh/id_console -C "il@ilnmors.internal" + +# Add private key value to ~/workspace/homelab/data/secret/secret.yaml with sops +## # console ssh public key: +## # ed25519 ~~~~ il@ilnmors.internal +## # console ssh private key +## CONSOLE_SSH_PRIVATE_KEY: | +## ----BEGIN---- +## ... +## ----END---- + +sudo mkdir -p /etc/secrets/2999 # $UID of `console` +sudo chown root:root /etc/secrets && sudo chmod 711 /etc/secrets +sudo chown console:root /etc/secrets/2999 && sudo chmod 500 /etc/secrets/2999 +sudo mv ~/.ssh/id_console /etc/secrets/2999/ && sudo chown console:root /etc/secrets/2999/id_console && sudo chmod 400 /etc/secrets/2999/id_console +``` + +#### ssh key config + +```ini +# ~/.ssh/config + +Host vmm + HostName [vmm ip from ncpa.cpl's temporary dhcp ip address] + User vmm + IdentityFile /etc/secrets/2999/id_console + +# Host vmm +# HostName 192.168.10.10 +# User vmm +# IdentityFile /etc/secrets/2999/id_console + +# Host net +# HostName 192.168.10.11 +# User net +# IdentityFile /etc/secrets/2999/id_console + +# Host auth +# HostName 192.168.10.12 +# User auth +# IdentityFile /etc/secrets/2999/id_console + +# Host dev +# HostName 192.168.10.13 +# User dev +# IdentityFile /etc/secrets/2999/id_console + +# Host app +# HostName 192.168.10.14 +# User app +# IdentityFile /etc/secrets/2999/id_console +``` \ No newline at end of file diff --git a/docs/archives/2025-12/scripts.md b/docs/archives/2025-12/scripts.md new file mode 100644 index 0000000..6ca7438 --- /dev/null +++ b/docs/archives/2025-12/scripts.md @@ -0,0 +1,452 @@ +```bash +#!/bin/bash + +# edit_secret.sh /path/of/secret + + + +set -e + + + +KEY_PATH="$HOME/workspace/homelab/data/secrets" + +TMP_PATH="/run/user/$UID" + +SECRET_FILE="$1" + + + +# Usage function + +usage() { + +    echo "Usage: $0 \"/path/of/secret/file\"" >&2 + +    exit 1 + +} + + + +# log function + +log() + +{ + +    local text="$1" + +    echo -e "$(date "+%Y-%m-%d %H:%M:%S"): [edit_script] $text" >&2 + +} + + + +# Secret file check + +if [ -z "$SECRET_FILE" -o ! -f "$SECRET_FILE" ]; then + +    log "Error: Secret file path is needed" + +    usage + +fi + + + +# age-key file check + +if [ ! -f "$KEY_PATH/age-key.gpg" ]; then + +    log "Error: There is no key file" + +    exit 1 + +fi + + + +# Dependency check + +if ! command -v sops >/dev/null; then + +    log "Error: sops package is needed" + +    exit + +fi + + + +if ! command -v gpg >/dev/null; then + +    log "Error: gnupg package is needed" + +    exit + +fi + + + + +# Delete password file after script certainly + +cleanup() { + +    if [ -f "$TMP_PATH/age-key" ]; then + +        log "Notice: age-key was deleted" + +        rm -f "$TMP_PATH/age-key" + +    fi + +} + + + +trap cleanup EXIT + + + + +# Get GPG password from prompt + +echo -n "Enter GPG passphrase: " >&2 + +read -s GPG_PASSPHRASE + +echo "" >&2 + + + +# Decrypt age-key on memory + +echo "$GPG_PASSPHRASE" | gpg --batch --yes --passphrase-fd 0 \ + +--output "$TMP_PATH/age-key" \ + +--decrypt "$KEY_PATH/age-key.gpg" && \ + +chmod 600 "$TMP_PATH/age-key" + + + +unset GPG_PASSPHRASE + + + +# Check the decrypted key on memory + +if [ ! -f "$TMP_PATH/age-key" ]; then + +        log "Error: Decrypted key file does not exist" + +        exit 1 + +fi + + + +# kill the gpg session + +gpgconf --kill gpg-agent + + + +# Open sops editor + +SOPS_AGE_KEY_FILE="$TMP_PATH/age-key" sops "$SECRET_FILE" + +rm -f "$TMP_PATH/age-key" >&2 + + + +exit 0 +``` + +```bash +#!/bin/bash + +# extract_secret.sh /path/of/secret [-n] (-f|-e ) + + + +set -e + + + +KEY_PATH="$HOME/workspace/homelab/data/secrets" + +TMP_PATH="/run/user/$UID" + +SECRET_FILE=$1 + +VALUE="" + +TYPE="" + +NEWLINE="true" + + + +# Remove $1 and shift $(n-1) < $n + +shift + + + +# usage() function + +usage() { + +        echo "Usage: $0 \"/path/of/secret/file\" [-n] (-f|-e \"yaml section name\")" >&2 + +        echo "-n: remove the newline" >&2 + +        echo "-f : Print secret file" >&2 + +        echo "-e : Print secret env file" >&2 + +        exit 1 + +} + + + +# log() function + +log() + +{ + +    local text="$1" + +    echo -e "$(date "+%Y-%m-%d %H:%M:%S"): [extract_script] $text" >&2 + +} + + + +while getopts "f:e:n" opt; do + +    case $opt in + +        f) + +            VALUE="$OPTARG" + +            TYPE="FILE" + +            ;; + +        e) + +            VALUE="$OPTARG" + +            TYPE="ENV" + +            ;; + +        n) + +            NEWLINE="false" + +            ;; + +        \?) # unknown options + +            log "Invalid option: -$OPTARG" + +            usage + +            ;; + +        :) # parameter required option + +            log "Option -$OPTARG requires an argument." + +            usage + +            ;; + +    esac + +done + + + +# Get option and move to parameters + +shift $((OPTIND - 1)) + + + +# Check necessary options + +if [ -z "$SECRET_FILE" -o ! -f "$SECRET_FILE" ]; then + +    log "Error: secret file path is required" + +    usage + +fi + + + +if [ -z "$TYPE" ]; then + +        log "Error: -f or -e option requires" + +        usage + +fi + + + +# age-key file check + +if [ ! -f "$KEY_PATH/age-key.gpg" ]; then + +    log "Error: There is no key file" + +    exit 1 + +fi + + + +# Dependency check + +if ! command -v sops >/dev/null; then + +    log "Error: sops package is needed" + +    exit + +fi + + + +if ! command -v gpg >/dev/null; then + +    log "Error: gnupg package is needed" + +    exit + +fi + + + + +# Delete password file after script certainly + +cleanup() { + +    if [ -f "$TMP_PATH/age-key" ]; then + +        log "Notice: age-key was deleted" + +        rm -f "$TMP_PATH/age-key" + +    fi + +} + + + +trap cleanup EXIT + + + +echo -n "Enter GPG passphrase: " >&2 + +read -s GPG_PASSPHRASE + +echo "" >&2 + + + +echo "$GPG_PASSPHRASE" | gpg --batch --yes --passphrase-fd 0 \ + +--output "$TMP_PATH/age-key" \ + +--decrypt "$KEY_PATH/age-key.gpg" && \ + +chmod 600 "$TMP_PATH/age-key" + + + +unset GPG_PASSPHRASE + + + +if [ ! -f "$TMP_PATH/age-key" ]; then + +        log "Error: Decrypted key file does not exist" + +        exit 1 + +fi + + + +gpgconf --kill gpg-agent + + + +if [ "$TYPE" == "FILE" ]; then + +        if RESULT=$(SOPS_AGE_KEY_FILE="$TMP_PATH/age-key" sops --decrypt --extract "[\"$VALUE\"]" --output-type binary "$SECRET_FILE") ; then + +                if [ "$NEWLINE" == "true" ]; then + +                    echo "$RESULT" + +                else + +                    echo -n "$RESULT" + +                fi + +                exit 0 + +        else + +                log "Error: SOPS extract error" + +                exit 1 + +        fi + +fi + + + +if [ "$TYPE" == "ENV" ]; then + +        if RESULT=$(SOPS_AGE_KEY_FILE="$TMP_PATH/age-key" sops --decrypt --extract "[\"$VALUE\"]" --output-type dotenv "$SECRET_FILE") ; then + +                if [ "$NEWLINE" == "true" ]; then + +                    echo "$RESULT" + +                else + +                    echo -n "$RESULT" + +                fi + +                exit 0 + +        else + +                log "Error: SOPS extract error" + +                exit 1 + +        fi + +fi +``` \ No newline at end of file diff --git a/docs/notes/.gitkeep b/docs/notes/.gitkeep new file mode 100644 index 0000000..e69de29 diff --git a/docs/runbook/00-operate.md b/docs/runbook/00-operate.md new file mode 100644 index 0000000..0cb6ef2 --- /dev/null +++ b/docs/runbook/00-operate.md @@ -0,0 +1,104 @@ +# Operation + +## Migration backup + +When the migration is decided, the manual backup after shutting all services down is necessary. Run the DB dump backup and kopia backup manually before reset the system. + +## Windows + +- Following the process step. + - [01-windows.md](./01-windows.md) + +## Certificates and wireguard + +- Following the process two steps. + - [02-certificates.md](./02-certificates.md) + - [03-wireguard.md](./03-wireguard.md) + +## console + +### Data restore + +Only when kopia repository exists. + +```bash +kopia repository connect --override-username="console" --override-hostname="console.ilnmors.internal" + +kopia snapshot list --all + +# mount volumes +kopia mount $CONSOLE_SNAPSHOT_ID ~/workspace/homelab/volumes/console +kopia mount $INFRA_SNAPSHOT_ID ~/workspace/homelab/volumes/infra +kopia mount $APP_SNAPSHOT_ID ~/workspace/homelab/volumes/app + +# Set initiating db dump files +cp ~/workspace/homelab/volumes/infra/cluster/cluster.sql ~/workspace/homelab/config/services/containers/infra/postgresql/init/cluster.sql +# ... +``` + +### Provisioning + +Ansible playbooks should be declarative. This won't contain complex branch logics \(Declarative over imperative\). Playbooks describes what should be there, not how to. The basic rule is manual destroy and auto reprovisioning. + +#### vmm and fw + +- Following the process step. + - [04-hypervisor.md](./04-hypervisor.md) + +```bash +# Sign on vmm's host ssh keys and set libvirt environment # Ignore fingerprint just once +cd ~/workspace/homelab/ansible + +# Disconnect all internet connection except LAN between vmm +ansible-playbook playbooks/vmm/site.yaml --tags "init" --ssh-common-args='-o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null' +# Create fw +ansible-playbook playbooks/vmm/create_vm.yaml --tags "fw" +# Make it sure disable other NIC (Wifi or etc) before run playbooks, and set gateway +ansible-playbook playbooks/fw/site.yaml --tags "init" --ssh-common-args='-o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null' # When nftables is changed, crowdsec handlers work. However, when it is in initiating, handler is failed. It is normal work. +ansible-playbook playbooks/fw/site.yaml --tags "site" +# Set vmm after air-gap +ansible-playbook playbooks/vmm/site.yaml --tags "site" +``` + +## External hardware on CLIENT + +- Following the process two steps. + - [05-hardwares.md](./05-hardwares.md) + - [06-kopia.md](./06-kopia.md) + +## Other vms + +```bash +# Kopia mount +## Create Kopia password file +touch /etc/secret/$CONSOLE_UID/kopia_password +### Add kopia.user.console value from secrets.yaml +## Connect repository +KOPIA_PASSWORD="$(cat /etc/secrets/$CONSOLE_UID/kopia_password)" \ +/usr/bin/kopia repository connect server \ +--url=https://nas.ilnmors.internal:51515 \ +--override-username=console \ +--override-hostname=console.ilnmors.internal +## mount kopia directories +kopia mount $INFRA_SNAPSHOT_ID ~/workspace/data/volumes/infra +kopia mount $APP_SNAPSHOT_ID ~/workspace/data/volumes/app +## bringing sql dump files to init directory +cp ~/workspace/data/volumes/infra/cluster/$FILE_NAME ~/workspace/config/services/containers/infra/postgresql/init/pg_cluster.sql +## repeate for every service +# Create vms (Possible tags = ["fw", "infra", "auth", "app"]) +ansible-playbook playbooks/vmm/create_vm.yaml --tags "$VM_NAME" +# Sign on vms' host ssh keys # Ignore fingerprint just once +ansible-playbook playbooks/$VM_NAME/site.yaml --tags "init" --ssh-common-args='-o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null' +ansible-playbook playbooks/$VM_NAME/site.yaml --tags "site" +# app +ansible-playbook playbooks/app/site.yaml --tags "init" --ssh-common-args='-o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null' +## Rsync app data +rsync -avz --numeric-ids -e ssh --rsync-path="sudo rsync" ~/workspace/homelab/volumes/kopia/app/ app@app.ilnmors.internal:/home/app/data/ +# --numeric-ids: set owner and groups based on uid and gid +# -e ssh --rsync-path="sudo rsync": run rsync as root permission, sudo password needed +# Maybe rsync-path is run on background, it might not print prompt for password. Just enter the password +ansible-playbook playbooks/app/site.yaml --tags "site" + +# Create console snapshot +kopia snapshot create ~/workspace +``` diff --git a/docs/runbook/01-windows.md b/docs/runbook/01-windows.md new file mode 100644 index 0000000..f19259c --- /dev/null +++ b/docs/runbook/01-windows.md @@ -0,0 +1,255 @@ +# Windows prerequisite + +## Installation + +- Language: English (US) +- Region: Korea +- Keyboard: US +- Internet connection +- Account setting: DOS prompt (Shift + F10) +```dos +start ms-cxh:localonly +``` + +## Initial configuration + +### Time and language + +- Preferred Languages: Add a Language: Korean + - Don't set additional options +- Typing: Advanced Keyboard setting: Override for default input method: Korean: Microsoft IME + +### Set the password +- Settings: Accounts: Sign-in options +- Password +- PIN +- Windows Hello + +### Disable unused functions + +```PowerShell +# Administrator privileges are required +## Disable Recall +DISM /Online /Disable-Feature /FeatureName:Recall /NoRestart + +## Disable Copilot +reg add "HKCU\Software\Policies\Microsoft\Windows\WindowsCopilot" /v TurnOffWindowsCopilot /t REG_DWORD /d 1 /f +reg add "HKLM\Software\Policies\Microsoft\Windows\WindowsCopilot" /v TurnOffWindowsCopilot /t REG_DWORD /d 1 /f + +## Delete programs +Get-AppxPackage *bingnews* | Remove-AppxPackage +Get-AppxPackage *bingweather* | Remove-AppxPackage +Get-AppxPackage *gethelp* | Remove-AppxPackage +Get-AppxPackage *tips* | Remove-AppxPackage +Get-AppxPackage *feedbackhub* | Remove-AppxPackage + +Get-AppxPackage *solitairecollection* | Remove-AppxPackage +Get-AppxPackage *windowscommunicationsapps* | Remove-AppxPackage +Get-AppxPackage *outlookforwindows* | Remove-AppxPackage +Get-AppxPackage *people* | Remove-AppxPackage +Get-AppxPackage *clipchamp* | Remove-AppxPackage + +Get-AppxPackage *todos* | Remove-AppxPackage +Get-AppxPackage *quickassist* | Remove-AppxPackage + +Get-AppxPackage *windowsmaps* | Remove-AppxPackage + +Get-AppxPackage *xbox* | Remove-AppxPackage +Get-AppxPackage *gamingapp* | Remove-AppxPackage + +## Disable telemetry +Stop-Service DiagTrack +Set-Service DiagTrack -StartupType Disable +Stop-Service dmwappushservice +Set-Service dmwappushservice -StartupType Disable + +## Compact OS configuration +compact /compactos:always +``` + +### Delete programs + +- Add or remove programs + - Copilot + - Family + - Microsoft 365 (Other languages; except en-us, ko-kr) + - Microsoft 365 copilot + - Microsoft edge game assist + - Microsoft Onedrive + - Microsoft Teams + - Speech pack + +### Edge configuration +- Settings: System and performance: System + - Disable All options + +### Explorer configuration + +- Settings: Privacy + - Disable All options + +### Login on Microsoft account + +sign in on app only +- surface app +- microsoft office 365 + +### Install Microsoft app + +- Microsoft PC manager + +### Firewall configuration + +- WindowsDefender Firewall:Inbound Rules: + - File and Printer Sharing (Echo Request - ICMPv4-In) - Profile: Private, Public + - General: \[x\] Enable + - Scope: 192.168.1.0/24, 192.168.10.0/24, 192.168.99.0/24 + - File and Printer Sharing (Echo Request - ICMPv6-In) - Profile: Private, Public + - General: \[x\] Enable + - Scope: fd00::/8 + - Apply + +### NIC VLAN configuration + + +- Device Manager:Network Adapters:Surface network adpaters + - Advanced:Priority & VLAN: Priority & VLAN disable + +## Set WSL2 + +### Create wsl config + +- C:\Users\$USERNAME\.wslconfig +```ini +[wsl2] +processors=4 +memory=4294967296 +swap=0 +defaultVhdSize=34359738368 +networkingMode=Mirrored +``` + +### Installation + +```PowerShell +# Run with administrator's authority +# Install wsl +wsl --install -d Debian +# Enter new UNIX username: debian +# Enter new password: debian +wsl --shutdown +# --- Uninstall Debian --- +# wsl --unregister Debian +# wsl --uninstall +# ------ +``` + +### WSL Debian configuration + +```bash +# PowerShell +wsl -d Debian +# bash +## User create +sudo groupadd -g 2000 svadmins +sudo useradd -u 2999 -g svadmins -G sudo -c "Console Client" -m -d /home/console -s /bin/bash console +sudo passwd console +# New password: [password] +exit + +# PowerShell +wsl --shutdown +wsl -d Debian -u console + +# bash +## User delete and set wsl.conf +sudo userdel -r debian +echo '[user]' | sudo tee -a /etc/wsl.conf +echo 'default=console' | sudo tee -a /etc/wsl.conf +echo '[network]' | sudo tee -a /etc/wsl.conf +echo 'generateHosts = false' | sudo tee -a /etc/wsl.conf +exit + +wsl --shutdown + +wsl -d Debian + +## package +sudo apt update && sudo apt upgrade +sudo apt install curl jq gnupg git fuse3 +sudo apt install ansible-core ansible-lint --no-install-recommends +ansible-galaxy collection install community.libvirt +ansible-galaxy collection install community.general +ansible-galaxy collection install ansible.posix + +## SOPS (arm64) +## Check for latest release: Current version: 3.12.1 +## x86_64 url: https://github.com/getsops/sops/releases/download/v3.12.1/sops_3.12.1_amd64.deb + +curl -L -o sops_3.12.1.deb https://github.com/getsops/sops/releases/download/v3.12.1/sops_3.12.1_arm64.deb +sudo apt install ./sops_3.12.1.deb +rm -rf ./sops_3.12.1.deb + +mkdir ~/workspace +``` + +### VS Code + +#### VS Code Download +- Download from https://code.visualstudio.com/Download# + - `User Installer Arm64` + +#### VS Code configuration + +- WSL extension\(`Ctrl + shift + x`\) + - Install `WSL` by Microsoft + - Remote Explorer:Debian:Connect in Current Windows +- `Ctrl + k` and `Ctrl + o` + - Open folder: `/home/console/workspace` +- `` Ctrl + shift + ` `` for Terminal +- Extensions\(`Ctrl + shift + x`\) + - Install `Ansible` by RedHat + +### Playbooks + +```bash +# Ansible playbook + +# Copy all files "workspace" directory to Debian via VS Code + +cd ~/workspace/homelab/ansible +ansible-playbook playbooks/console/site.yaml --tags "init" +``` + +#### Terminal configuration + +- Terminal: Setting: Startup + - Default profile + - Debian + - Profile: Debian: + - Icon + - "ms-appx:///ProfileIcons/{61c54bbd-c2c6-5271-96e7-009a87ff44bf}.png" + - Starting Directory + - ~ + +## Secret management + +### Flow + +- Secret data + - `secrets.yaml` + - encrypted by sops with age-key +- age-key + - `age-key.gpg` and `ansible/group_vars/all.yaml` + - encrypted by gpg and ansible vault with master key +- Master key + - The key which has above 40 characters containing upper and lower letters, numbers, and special letters + - managed by physical media \(Mind, MDisc, paper\) as file, string, and QR + - This value is never saved in server or console. +- Root CA \(including ssh CA\) must not be deployed. + - The tasks with root CA must be performed manually. The source of Trust is the most important in security. +- Intermediate CA can be deployed. + - Intermediate CA is operated as a live server. + - Intermediate CA can be revoked by the root CA + - Only encrypted intermediate CA's private key is deployed. + - CA server decrypts intermediate CA's private key in memory at runtime. diff --git a/docs/runbook/02-certificates.md b/docs/runbook/02-certificates.md new file mode 100644 index 0000000..a149b28 --- /dev/null +++ b/docs/runbook/02-certificates.md @@ -0,0 +1,169 @@ +# Certificates + +Create and renew certificates are very important, and very barely executed. It is managed manually without ansible. + +#### PKI CA signed offline + +step-cli is installed by ansible playbook for console. + +```bash +# Generate CA key password +openssl rand -base64 32 > /run/user/$UID/root_ca_password +openssl rand -base64 32 > /run/user/$UID/intermediate_ca_password +# Save the values in `secrets.yaml` + +# Create CAs \(Key and cert) +# Root CA +step certificate create \ +"ilnmors.internal Root CA" /run/user/$UID/root_ca.crt /run/user/$UID/root_ca.key \ +--password-file /run/user/$UID/root_ca_password \ +--profile root-ca \ +--not-after 87600h +# Save the key and crt files content in `secrets.yaml` + +# Intermediate CA +step certificate create \ +"ilnmors.internal Intermediate CA" /run/user/$UID/intermediate_ca.crt /run/user/$UID/intermediate_ca.key \ +--password-file /run/user/$UID/intermediate_ca_password \ +--profile intermediate-ca \ +--ca /run/user/$UID/root_ca.crt \ +--ca-key /run/user/$UID/root_ca.key \ +--ca-password-file /run/user/$UID/root_ca_password \ +--not-after 43800h +# Save the key and crt files content in `secrets.yaml` + +# fw + +step certificate create \ +"crowdsec.ilnmors.internal" /run/user/$UID/crowdsec.crt /run/user/$UID/crowdsec.key \ +--profile leaf \ +--san crowdsec.ilnmors.internal \ +--ca /run/user/$UID/intermediate_ca.crt \ +--ca-key /run/user/$UID/intermediate_ca.key \ +--ca-password-file /run/user/$UID/intermediate_ca_password \ +--not-after 21900h \ +--insecure --no-password + +step certificate create \ +"blocky.ilnmors.internal" /run/user/$UID/blocky.crt /run/user/$UID/blocky.key \ +--profile leaf \ +--san blocky.ilnmors.internal \ +--ca /run/user/$UID/intermediate_ca.crt \ +--ca-key /run/user/$UID/intermediate_ca.key \ +--ca-password-file /run/user/$UID/intermediate_ca_password \ +--not-after 21900h \ +--insecure --no-password + +# infra + +step certificate create \ +"postgresql.ilnmors.internal" /run/user/$UID/postgresql.crt /run/user/$UID/postgresql.key \ +--profile leaf \ +--san postgresql.ilnmors.internal \ +--ca /run/user/$UID/intermediate_ca.crt \ +--ca-key /run/user/$UID/intermediate_ca.key \ +--ca-password-file /run/user/$UID/intermediate_ca_password \ +--not-after 21900h \ +--insecure --no-password + +step certificate create \ +"ldap.ilnmors.internal" /run/user/$UID/ldap.crt /run/user/$UID/ldap.key \ +--profile leaf \ +--san ldap.ilnmors.internal \ +--ca /run/user/$UID/intermediate_ca.crt \ +--ca-key /run/user/$UID/intermediate_ca.key \ +--ca-password-file /run/user/$UID/intermediate_ca_password \ +--not-after 21900h \ +--insecure --no-password + +step certificate create \ +"prometheus.ilnmors.internal" /run/user/$UID/prometheus.crt /run/user/$UID/prometheus.key \ +--profile leaf \ +--san prometheus.ilnmors.internal \ +--ca /run/user/$UID/intermediate_ca.crt \ +--ca-key /run/user/$UID/intermediate_ca.key \ +--ca-password-file /run/user/$UID/intermediate_ca_password \ +--not-after 21900h \ +--insecure --no-password + +step certificate create \ +"loki.ilnmors.internal" /run/user/$UID/loki.crt /run/user/$UID/loki.key \ +--profile leaf \ +--san loki.ilnmors.internal \ +--ca /run/user/$UID/intermediate_ca.crt \ +--ca-key /run/user/$UID/intermediate_ca.key \ +--ca-password-file /run/user/$UID/intermediate_ca_password \ +--not-after 21900h \ +--insecure --no-password + +# DSM + +step certificate create \ +"nas.ilnmors.internal" /run/user/$UID/nas.crt /run/user/$UID/nas.key \ +--profile leaf \ +--san nas.ilnmors.internal \ +--ca /run/user/$UID/intermediate_ca.crt \ +--ca-key /run/user/$UID/intermediate_ca.key \ +--ca-password-file /run/user/$UID/intermediate_ca_password \ +--not-after 21900h \ +--insecure --no-password + +## Recreate leaf certificates +## update secrets.yaml +step certificate create \ +"crowdsec.ilnmors.internal" /run/user/$UID/crowdsec.crt /run/user/$UID/crowdsec.key \ +--profile leaf \ +--san crowdsec.ilnmors.internal \ +--ca /run/user/$UID/intermediate_ca.crt \ +--ca-key /run/user/$UID/intermediate_ca.key \ +--ca-password-file /run/user/$UID/intermediate_ca_password \ +--not-after 21900h \ +--insecure --no-password -f +# print +cat /run/user/$UID/crowdsec.key +cat /run/user/$UID/crowdsec.crt + +# Verify +step certificate verify /run/user/$UID/test.crt --roots /run/user/$UID/root_ca.crt +# Inspect +step certificate inspect /run/user/$UID/test.crt +# validate date +sudo step certificate inspect --format json /run/user/$UID/test.crt | jq '.validity.end' +# margin date +echo "$(( ($(date -d 2028-07-17T03:50:10Z +%s) - $(date +%s)) / 60 / 60 / 24 ))" + +# Delete temporary files +rm /run/user/$UID/root_ca* +rm /run/user/$UID/intermediate_ca* +rm /run/user/$UID/*.key +rm /run/user/$UID/*.crt +``` + +#### SSH CA + +```bash +# Generate SSH CA +ssh-keygen -t ed25519 -f /run/user/$UID/id_local_ssh_ca -C "LOCAL_SSH_CA" -N "" +# Save the key and crt files content in `secrets.yaml` +echo @cert-authority *.ilnmors.internal "$(cat /run/user/$UID/id_local_ssh_ca.pub)" | sudo tee /etc/ssh/ssh_known_hosts >/dev/null && sudo chmod 644 /etc/ssh/ssh_known_hosts + +# Signing HOST SSH crt by SSH CA key +ssh-keygen -s /run/user/$UID/id_local_ssh_ca \ +-h \ +-I "vmm" \ +-n "vmm,vmm_init,vmm.ilnmors.internal,init.vmm.ilnmors.internal" \ +/run/user/$UID/id_vmm_ssh_host.pub +# This process is automated by ansible + +ssh-keygen -L -f /etc/ssh/ssh_host_ed25519_key-cert.pub + +# Create SSH client key +ssh-keygen -t ed25519 -f /etc/secrets/$UID/id_console -C "il@ilnmors.internal" -N "" + +# Signing SSH client crt by SSH CA key +ssh-keygen -s /run/user/$UID/id_local_ssh_ca \ +-I "console" \ +-n "vmm,fw,infra,auth,app" \ +/etc/secrets/$UID/id_console.pub +# This process is automated by ansible +``` diff --git a/docs/runbook/03-wireguard.md b/docs/runbook/03-wireguard.md new file mode 100644 index 0000000..fdfb08d --- /dev/null +++ b/docs/runbook/03-wireguard.md @@ -0,0 +1,23 @@ +# WireGuard key get + +```bash +wg genkey | tee /run/user/$UID/server_private.key | wg pubkey | tee /run/user/$UID/server_pub.key +wg genkey | tee /run/user/$UID/client_private.key | wg pubkey | tee /run/user/$UID/client_pub.key +wg genpsk | tee /run/user/$UID/preshared.key +# Save the key files content in `secrets.yaml` + +# Client wireguard setting + +[Interface] +PrivateKey = client_private.key +Address = 192.168.99.20/32, fd00:99::20/128 +DNS = 192.168.10.2, fd00:10::2 + +[Peer] +PublicKey = server_pub.key +PresharedKey = preshared.key +AllowedIPs = 192.168.0.0/16, fd00::/8 +Endpoint = vpn.ilnmors.com:11290 +PersistentKeepalive = 25 + +``` diff --git a/docs/runbook/04-hypervisor.md b/docs/runbook/04-hypervisor.md new file mode 100644 index 0000000..04cae92 --- /dev/null +++ b/docs/runbook/04-hypervisor.md @@ -0,0 +1,162 @@ +# Hypervisor \(vmm\) + +Initiating hypervisor doesn't use ansible. Hypervisor is working on hardware itself, so there is a lot of possible variables like IOMMU id, MAC addresses, etc. + +Hypervisor is initiated manually with the configuration files which are stored in USB or External HDD and, WAN connection is from ISP DHCP. All the files in `~/workspace/homelab/data/vmm_init/` + +## Installing Debian on server + +- BIOS: + - Check BIOS configuration; IOMMU/VT-d +- General: + - Language: English - English + - Location: Other > Asia > South Korea + - Locale: United State - en_US.UTF-8 + - Keymap to use: American English +- Network: + - Auto Configuration: Using DHCP +- Server: + - Hostname: vmm + - Domain: ilnmors.internal +- User: + - Root Password: \[blank\] + - Full name for the new user: vmm + - User Name: bootstrap + - User Password: debian +- Partition setting: manual + - 512MiB - EFI system partition \(Booting flag: on\) + - 1GiB - Ext4 Journaling \(Mount: /boot) + - 800 GiB -LVM + - 64GiB: vmm-root - Ext4 Journaling \(Mount: /\) + - 700GiB: vmm-libvirt - Ext4 \(Mount: /var/lib/libvirt\) +- Debian package manager setting + - Scan extra installation media: no + - Mirror country: South Korea + - Archive mirror: deb.debian.org + - Proxy: \[blank\] + - Popularity-contest: no +- Installing packages setting + - \[\*\] SSH server + - \[\*\] Standard system utilities + +### Initial configuration + +Hypervisor operates pure L2 switch for fw and it never can access WAN without fw after initial configuration. This means, there is an air-gap which means hypervisor cannot access to WAN for a while \(from end of initial setting to the beginning of fw setting\). + +Hypervisor operates on hardware. Hardware information is always uncertain, and it is set only once. Managing this process as IaC is over engineering. + +```bash +# Mount USB on server +lsblk -l +# /dev/xxx # USB +sudo mkdir /mnt/usb +sudo mount /dev/xxx /mnt/usb + +# Setting user and groups +sudo groupadd svadmins -g 2000 +sudo useradd -u 2000 -g svadmins -G sudo -c "Hypervisor" -m -d /home/vmm -s /bin/bash vmm + +# Installing packages +sudo apt update && sudo apt upgrade + +sudo apt install -y \ +acl curl jq crowdsec systemd-resolved \ +qemu-system-x86 ksmtuned libvirt-daemon-system virt-top \ +python3 python3-apt python3-libvirt python3-lxml + +# Deploy ssh ca +sudo cp /mnt/usb/vmm/ssh/local_ssh_ca.pub /etc/ssh/ +sudo chmod 644 /etc/ssh/local_ssh_ca.pub +sudo cp /mnt/usb/vmm/ssh/sshd_config.d/*.conf /etc/ssh/sshd_conifg.d/ +sudo chmod 644 /etc/ssh/sshd_config.d/ + +# Deploy networkd configuration files +sudo cp /mnt/usb/vmm/network/* /etc/systemd/network/ + +sudo chmod 644 /etc/systemd/network/* + +sudo cp /mnt/usb/vmm/sysctl.d/bridge.conf /etc/sysctl.d/bridge.conf +sudo chmod 644 /etc/sysctl.d/bridge.conf + +# Check physical MAC address and modify .link file +ip addr +sudo nano /etc/systemd/network/eth0.link +sudo nano /etc/systemd/network/eth1.link + +sudo systemctl disable networking.service +sudo systemctl enable systemd-networkd.service + +# Deploy nftables config files +sudo cp /mnt/usb/vmm/nftables.conf /etc/nftables.conf + +sudo chmod 700 /etc/nftables.conf + +# Fix grub +sudo cp /mnt/usb/vmm/grub.d/iommu.cfg /etc/default/grub.d/iommu.cfg +# GRUB_CMDLINE_LINUX_DEFAULT="quiet intel_iommu=on iommu=pt" +## Intel CPU: intel_iommu=on, AMD CPU: amd_iommu=on +sudo chmod 644 /etc/default/grub.d/iommu.cfg + +# Check PCIE ID and IOMMU group +lspci -nn | grep -i -e "SATA" -e "VGA" + +# 00:02.0 VGA compatible controller [0300] ... [8086:46d4] +# PCI address: 00:02.0 / Device ID 8086:46d4 + +# 04:00.0 SATA controller [0106] ... [1b21:1064] +# PCI address: 04:00.00 / Device ID 1b21:1064 + +# Check the iommu group of devices +## VGA +readlink /sys/bus/pci/devices/0000\:02\:00.0/iommu_group +# ../../../../kernel/iommu_groups/12 +ls /sys/kernel/iommu_groups/12/devices/ +# 02:00.00 +## SATA Controller +readlink /sys/bus/pci/devices/0000\:04\:00.0/iommu_group +# ../../../../kernel/iommu_groups/14 +ls /sys/kernel/iommu_groups/14/devices/ +# 04:00.00 + +# To passthrough device, it has to be only device in its IOMMU group or All devices in the same group should be passthroughed simultanaeously. + +# Add modprobe.d/vfio.conf +sudo cp /mnt/usb/vmm/modprobe.d/vfio.conf /etc/modprobe.d/vfio.conf +sudo nano /etc/modprobe.d/vfio.conf +# options vfio-pci ids=8086:46d4,1b21:1064 +# softdep i915 pre: vfio-pci +# softdep ahci pre: vfio-pci +sudo chmod 644 /etc/modprobe.d/vfio.conf + +# Apply vfio configurations +sudo update-grub +sudo update-initramfs -u + +# umount usb +sudo umount -f -l /mnt/usb + +# reboot system +sudo reboot + +# Connect console and vmm via lan cable, Set console's NIC as 192.168.1.11 +# delete default user +# ssh -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null vmm_init +# login: vmm + +# check vfio-pci +lspci -nnk -d 8086:46d4 +lspci -nnk -d 1b21:1064 +# Kernel driver in user: vfio-pci + +id vmm +# check sudo group +su - bootstrap +sudo passwd vmm +# enter new password +exit + +# delete default user +sudo userdel -r bootstrap +id bootstrap +# id: 'bootstrap': no such user +``` diff --git a/docs/runbook/05-hardwares.md b/docs/runbook/05-hardwares.md new file mode 100644 index 0000000..1ac82d8 --- /dev/null +++ b/docs/runbook/05-hardwares.md @@ -0,0 +1,220 @@ +# Hardware + +All hardware configuration is set after fw vm. The MAC address of hardware is reservated on kea-dhcp. + +## Vlan switch + +### Access VLAN switch + +- http://switch.ilnmors.internal \(192.168.1.2, KEA-DHCP, Only IPv4 support\) + - before set ipv6, use ip4 address instead of FQDN + - id: admin, password: admin + - new password: switch.password + +### Set VLAN +- VLAN:802.1Q VLAN + - \[x\] Enable - Apply + - VLAN client + - id 1 + - name default > client + - member \(Untagged\) + - Port 1 \(Trunk, untagged\): Linux bridge is already process untagged packet as id 1 + - Port 3 + - Port 4 + - Port 5 + - Port 6 + - Port 7 + - Port 8 + - VLAN server + - id 10 + - name server + - member + - Port 1 \(Trunk, tagged\) + - VLAN user + - id 20 + - name user + - member + - Port 1 \(Trunk, tagged\) + - Port 2 \(Not a member of client vlan, untagged\) + +- VLAN:802.1Q VLAN PVID setting + - Port 2 + - PVID 20 + +### Verify VLAN configuration + +- Manually set consol ip as user +- Connect console to Port 2 +- Check internet connection + + +## DSM \(DS124\) + +- https://finds.synology.com/# \(192.168.1.11, KEA-DHCP\) + - Install DSM + +### Initial configuration +- Device name: ilnmorsNAS +- Administrator account: il +- Password: dsm.il.password + +- automatical update +- synology account - skip, skip anyway +- opt disagree + +### Storage + +- Storage Manager:Storage:Create:btrfs - Create + +### IP address + +Kea in fw already reserved DSM's IP. However it is necessary to set IP address statically for stable operation. + +- Control Panel:Network:Network Interface:LAN + - Edit:IPv4:Use manual configuration + - Edit:IPv6:Use manual configuration + +### Certificates + +- Control Panel:Security:Certificate + - Replace an existing certificate:synology + - Description: ilnmors.internal + - Private Key + - Certificate + - Intermediate certificate + - Edit: For: Set as default certificate + - Setting \(!CAUTION!\) + - Even though you set the certificate as default, you have to set certificate for each services. + - configure: service: certificate: nas.ilnmors.internal + +## Authelia OIDC + +- **!CAUTION!** It can be set after authelia is implemented +- Following [here](../../../config/containers/auth/authelia/config/authelia.yaml.j2) for Authelia configuration +- Control Panel:Domain/LDAP:SSO Client + - Login Settings: \[x\] Select SSO by default on the login page + - Services + - \[x\] Enable OpenID Connect SSO service + - OpenID Connect SSO Settings + - Profile: OIDC + - Account type: Domain/LDAP/local + - Name: Authelia + - Well-Known URL: https://authelia.ilnmors.com/.well-known/openid-configuration + - Application ID: dsm \(what you designated\) + - Application Secret: secret value + - Redirect URI: https://nas.ilnmors.internal:5001 + - Authorization scope: openid profile groups email + - Username claim: preferred_username +- Match the user name \(ID\) in DSM and lldap id. + +### Kopia in DSM + +#### Upload Kopia repository to DSM + +- Directory + - Control Panel:shared folder: docker + - Create: docker/kopia - permission: everyone rwx - inheretence to sub directories + +- Container manager + - Package Center:Conatiner manager:install + +- Upload repository directory from console to DSM + - docker/kopia/repository + +- Add certificate - DSM reverse proxy cannot deal with gRPC + - /docker/kopia/config/ssl/nas.key + - /docker/kopia/config/ssl/nas.crt \(including intermediate crt\) + +- container manager:images:import + - kopia/kopia + - tags: \{\{ version['packages']['kopia'] \}\} +- run + - image: kopia/kopia + - containername: kopia-server + - \[x\] Enable auto restart + - port: 51515:51515 + - volume: /docker/kopia/config:/app/config:rw + - volume: /docker/kopia/cache:/app/cache:rw + - volume: /docker/kopia/logs:/app/logs:rw + - volume: /docker/kopia/repository:/repository:rw + - environment: KOPIA_PASSWORD=$KOPIA.REPOSITORY + - command: server start --no-ui --tls-cert-file=/app/config/ssl/nas.crt --tls-key-file=/app/config/ssl/nas.key --address=0.0.0.0:51515 --log-level=info +- action:Terminal:Create +```bash +kopia repository connect filesystem \ +--path=/repository \ +--override-username="il" \ +--override-hostname="nas.ilnmors.internal" +``` +- action:restart + +- Set firewall nftables +- Remove kopia_tmp dir from console + +### Connection from client + +#### Structure + +Repository directory - encrypted by server KOPIA_PASSWORD as master key of repository + +Server manage ACL with user password, user's KOPIA_PASSWORD. When server verify user with their password, server works with its repository password. + +Repository - \(Repository key; master key\) - Server - \(User key; access key\) - Client + +- Client knows its access password as KOPIA_PASSWORD to access server. It doesn't know master key, server's KOPIA_PASSWORD. server will control repository by its KOPIA_PASSWORD. their name is the same but it is different. + +#### Access + +```bash +# Console +# you have to use `'` not `"` +KOPIA_PASSWORD='$kopia.user.user_name' \ +/usr/bin/kopia repository connect server \ +--url=https://nas.ilnmors.internal:51515 \ +--override-username=console \ +--override-hostname=console.ilnmors.internal +# This makes repository.config on ~/.config/kopia +# verify with kopia server acl list command + +# infra or app +## /etc/secrets/$KOPIA_UID/kopia.env +KOPIA_PASSWORD={{ hostvars['console']['kopia']['user'][node['name']] }} +KOPIA_CONFIG_PATH=/etc/kopia/repository.config +KOPIA_CACHE_DIRECTORY=/var/cache/kopia +KOPIA_LOG_DIR=/var/cache/kopia/logs +KOPIA_CHECK_FOR_UPDATES=false +## .service file +BindReadOnlyPaths=/path/to/backup +# In root namescope, %u always bring 0 +BindPaths=/etc/kopia +BindPaths=/etc/secrets/{{ kopia_uid }} +BindPaths=/var/cache/kopia +EnvironmentFile=/etc/secrets/{{ kopia_uid }}/kopia.env + +ExecStartPre=/usr/bin/kopia repository connect server \ + --url=https://{{ infra_uri['kopia']['domain'] }}:{{ infra_uri['kopia']['ports']['https'] }} \ + --override-username={{ node['name'] }} \ + --override-hostname={{ node['name'] }}.ilnmors.internal + +ExecStart=/usr/bin/kopia snapshot create \ + /path/to/backup +``` + +### Check kopia snapshot + +```bash +# snapshot id check +kopia snapshot list [--all] +# Snapshot ID check +kopia ls -l [-r: for recursive] $SNAPSHOT_ID +kopia show -l $SNAPSHOT_ID/file/path +# or +kopia show -l $FILE_ID +``` + +### Restore + +```bash +mkdir -p /mnt/kopia +kopia mount [$SNAPSHOT_ID|all] kopia & +``` diff --git a/docs/runbook/06-kopia.md b/docs/runbook/06-kopia.md new file mode 100644 index 0000000..d6de6a8 --- /dev/null +++ b/docs/runbook/06-kopia.md @@ -0,0 +1,248 @@ +# kopia + +## Create repository + +```bash +# export KOPIA_PASSWORD="your_repository_password" +# It exists by Docker environment, however when it is used by pure linux server this environemnt is needed. + +# *! CAUTION !* +# THIS PROCESS CONTAINING SECRET VALUES. +# WHEN YOU TYPE THE COMMAND ON SHELL, YOU MUST USE [BLANK] BEFORE COMMAND +# e.g. +# shell@shell$ command (X) +# shell@shell$ [BLANK]command (O) +# BLANK prevent the command to save on .bash_history +# After finish this process, use `history -c` and `clear` for just in case. + + +# Kopia CLI Create repository +mkdir -p /home/console/workspace/kopia_tmp/{repository,config,cache} + +/usr/bin/kopia repository create filesystem \ +--password="kopia.repository" \ +--config-file="/home/console/workspace/kopia_tmp/config/repository.config" \ +--cache-directory="/home/console/workspace/kopia_tmp/cache" \ +--path="/home/console/workspace/kopia_tmp/repository" \ +--description="ilnmors.internal kopia repository" \ +--create-only + +# block hash: BLAKE2B-256-128 +# encryption: AES256-GCM-HMAC-SHA256 +# key derivation: scrypt-65536-8-1 +# splitter: DYNAMIC-4M-BUZHASH + +# Kopia CLI Connect repository +/usr/bin/kopia repository connect filesystem \ +--password="kopia.repository" \ +--config-file="/home/console/workspace/kopia_tmp/config/repository.config" \ +--cache-directory="/home/console/workspace/kopia_tmp/cache" \ +--path="/home/console/workspace/kopia_tmp/repository" \ +--override-username="il" \ +--override-hostname="nas.ilnmors.internal" \ +--description="ilnmors.internal kopia repository" + +# Connected to repository + +# Start kopia server on pure linux server + +# /usr/bin/kopia server start \ +# --password="kopia.repository" \ +# --config-file="/home/console/workspace/kopia_tmp/config/repository.config" \ +# --cache-directory="/home/console/workspace/kopia_tmp/cache" \ +# --no-ui \ +# --address=localhost:51515 \ +# --insecure \ +# --log-level=info & + +# All after these processes reset history. +clear +history -c +``` + +### Set users + +- Container manager: container: kopia-server: Action: Open terminal: Create: bash + +```bash +# export KOPIA_PASSWORD="your_repository_password" +# It exists by Docker environment, however when it is used by pure linux server this environemnt is needed. + +# Create users +## console@console.ilnmors.internal, infra@infra.ilnmors.internal, app@app.ilnmors.internal +## Repository pasword is not required, because it is already connected +/usr/bin/kopia server users add \ +--config-file="/home/console/workspace/kopia_tmp/config/repository.config" \ +--cache-directory="/home/console/workspace/kopia_tmp/cache" \ +--user-password="$kopia.user.console" \ +console@console.ilnmors.internal + +/usr/bin/kopia server users add \ +--config-file="/home/console/workspace/kopia_tmp/config/repository.config" \ +--cache-directory="/home/console/workspace/kopia_tmp/cache" \ +--user-password="$kopia.user.infra" \ +infra@infra.ilnmors.internal + +/usr/bin/kopia server users add \ +--config-file="/home/console/workspace/kopia_tmp/config/repository.config" \ +--cache-directory="/home/console/workspace/kopia_tmp/cache" \ +--user-password="$kopia.user.app" \ +app@app.ilnmors.internal + +## verify +/usr/bin/kopia server users list \ +--config-file="/home/console/workspace/kopia_tmp/config/repository.config" \ +--cache-directory="/home/console/workspace/kopia_tmp/cache" + +# ACL +## Console - Run these commands on local container +## snapshot, policy, user, acl, content +/usr/bin/kopia server acl add \ +--config-file="/home/console/workspace/kopia_tmp/config/repository.config" \ +--cache-directory="/home/console/workspace/kopia_tmp/cache" \ +--user="console@console.ilnmors.internal" \ +--target="type=snapshot" \ +--access=FULL + +/usr/bin/kopia server acl add \ +--config-file="/home/console/workspace/kopia_tmp/config/repository.config" \ +--cache-directory="/home/console/workspace/kopia_tmp/cache" \ +--user="console@console.ilnmors.internal" \ +--target="type=policy" \ +--access=FULL + +/usr/bin/kopia server acl add \ +--config-file="/home/console/workspace/kopia_tmp/config/repository.config" \ +--cache-directory="/home/console/workspace/kopia_tmp/cache" \ +--user="console@console.ilnmors.internal" \ +--target="type=user" \ +--access=FULL + +/usr/bin/kopia server acl add \ +--config-file="/home/console/workspace/kopia_tmp/config/repository.config" \ +--cache-directory="/home/console/workspace/kopia_tmp/cache" \ +--user="console@console.ilnmors.internal" \ +--target="type=acl" \ +--access=FULL + +/usr/bin/kopia server acl add \ +--config-file="/home/console/workspace/kopia_tmp/config/repository.config" \ +--cache-directory="/home/console/workspace/kopia_tmp/cache" \ +--user="console@console.ilnmors.internal" \ +--target="type=content" \ +--access=FULL + +## app and infra - Run these command on console +### Global pull to remove duplicates +/usr/bin/kopia server acl add \ +--config-file="/home/console/workspace/kopia_tmp/config/repository.config" \ +--cache-directory="/home/console/workspace/kopia_tmp/cache" \ +--user="infra@infra.ilnmors.internal" \ +--target="type=content" \ +--access=APPEND + +/usr/bin/kopia server acl add \ +--config-file="/home/console/workspace/kopia_tmp/config/repository.config" \ +--cache-directory="/home/console/workspace/kopia_tmp/cache" \ +--user="app@app.ilnmors.internal" \ +--target="type=content" \ +--access=APPEND + +### Snapshot +/usr/bin/kopia server acl add \ +--config-file="/home/console/workspace/kopia_tmp/config/repository.config" \ +--cache-directory="/home/console/workspace/kopia_tmp/cache" \ +--user="infra@infra.ilnmors.internal" \ +--target=type=snapshot,username=infra,hostname=infra.ilnmors.internal \ +--access=FULL + +/usr/bin/kopia server acl add \ +--config-file="/home/console/workspace/kopia_tmp/config/repository.config" \ +--cache-directory="/home/console/workspace/kopia_tmp/cache" \ +--user="app@app.ilnmors.internal" \ +--target=type=snapshot,username=app,hostname=app.ilnmors.internal \ +--access=FULL + +### Policy +kopia server acl add \ +--config-file="/home/console/workspace/kopia_tmp/config/repository.config" \ +--cache-directory="/home/console/workspace/kopia_tmp/cache" \ +--user="infra@infra.ilnmors.internal" \ +--target="type=policy" \ +--access=READ + +kopia server acl add \ +--config-file="/home/console/workspace/kopia_tmp/config/repository.config" \ +--cache-directory="/home/console/workspace/kopia_tmp/cache" \ +--user="infra@infra.ilnmors.internal" \ +--target="type=policy,username=infra,hostname=infra.ilnmors.internal" \ +--access=FULL + +kopia server acl add \ +--config-file="/home/console/workspace/kopia_tmp/config/repository.config" \ +--cache-directory="/home/console/workspace/kopia_tmp/cache" \ +--user="app@app.ilnmors.internal" \ +--target="type=policy" \ +--access=READ + +kopia server acl add \ +--config-file="/home/console/workspace/kopia_tmp/config/repository.config" \ +--cache-directory="/home/console/workspace/kopia_tmp/cache" \ +--user="app@app.ilnmors.internal" \ +--target="type=policy,username=app,hostname=app.ilnmors.internal" \ +--access=FULL +``` + +### Set policy + +- Container manager: container: kopia-server: Action: Open terminal: Create: bash + +```bash +# compression +/usr/bin/kopia policy set \ +--config-file="/home/console/workspace/kopia_tmp/config/repository.config" \ +--cache-directory="/home/console/workspace/kopia_tmp/cache" \ +--global \ +--compression=zstd + +# infra for db dump sql +kopia policy set infra@infra.ilnmors.internal \ +--config-file="/home/console/workspace/kopia_tmp/config/repository.config" \ +--cache-directory="/home/console/workspace/kopia_tmp/cache" \ +--compression=zstd \ +--keep-latest=10 \ +--keep-hourly=0 \ +--keep-daily=14 \ +--keep-weekly=4 \ +--keep-monthly=6 \ +--keep-annual=1 + +# app for media, stateful data +kopia policy set app@app.ilnmors.internal \ +--config-file="/home/console/workspace/kopia_tmp/config/repository.config" \ +--cache-directory="/home/console/workspace/kopia_tmp/cache" \ +--compression=none \ +--keep-latest=10 \ +--keep-hourly=0 \ +--keep-daily=14 \ +--keep-weekly=4 \ +--keep-monthly=6 \ +--keep-annual=1 + +# verify +kopia server acl list \ +--config-file="/home/console/workspace/kopia_tmp/config/repository.config" \ +--cache-directory="/home/console/workspace/kopia_tmp/cache" \ + +kopia policy list \ +--config-file="/home/console/workspace/kopia_tmp/config/repository.config" \ +--cache-directory="/home/console/workspace/kopia_tmp/cache" \ +``` + +## Remove repository from Console + +```bash +# After uploading repository to DSM +rm -rf /home/console/workspace/kopia_tmp +history -c +``` diff --git a/docs/runbook/07-git.md b/docs/runbook/07-git.md new file mode 100644 index 0000000..85de53e --- /dev/null +++ b/docs/runbook/07-git.md @@ -0,0 +1,71 @@ +# Git configuration + +## Local git + +```bash +# git config +git config --global user.name "il" +git config --global user.email "il@ilnmors.internal" +git config --global init.defaultBranch main # Set default branch name as main + +# Git repository path +# $PROJECT_REPOSITORY/.git + +# Remote registration and push git +cd ~/workspace/homelab + +# Create .gitignore management for data directory +echo data/bin/ | tee ./.gitignore +echo data/volumes/ | tee -a ./.gitignore +echo data/images/ | tee -a ./.gitignore +echo docs/archives/textfiles/ | tee -a ./.gitignore + +# Select files +# When the set repository as first time +git init + +# To commit and tag, you should `git add` +git add . +# Check git changes +git status +git commit -m "1.0.0: Release IaaS baseline" +# git commit -m "docs: update 07-git.md to add the way to manage git system" +# Make current documents as snapshot +git tag -a 1.0.0 -m "IaaS baseline" +# Make special changes +# In this homelab, [Infra_structure_change]:[Services_change]:[Documents_and_configuration_change] +# Tagging and commit should be distinguished. +# The change which affects system: tagging +# The change which doesn't affect system: commit + +# Commands +git status # What files are changed +git log # The version record +git diff # What is changed after last commit +git show $tag # Tag version and information check. +git checkout $tag # rollback to tag version +git branch $branch_name # Create branch +git switch $branch_name # Switch branch +git branch # list of branch +git switch main # Switch to main branch +git merge $branch_name # run at the main branch, merge. +git stash # temporary save +git stash pop # get temporary save + +# After git switch +git switch service +git rebase --ignore-date main # set date as current time on main branch +``` + +## Add Service with git + +```bash +# Example of establish gitea +git branch caddy-app +git switch caddy-app +git commit -m "0.0.1-caddy-app: Start caddy-app branch" +git tag -a 0.0.1-caddy-app -m "caddy-app: Start caddy-app branch" +## After finishing gitea implement +git switch main +git merge caddy-app +``` diff --git a/docs/services/app/igpu_firmware.md b/docs/services/app/igpu_firmware.md new file mode 100644 index 0000000..7d09dcb --- /dev/null +++ b/docs/services/app/igpu_firmware.md @@ -0,0 +1,12 @@ +# Firmware installation + +Cloud-init has no firmware in it, so i915 driver is needed to install. + +When the app node is initiated by ansible, `firmware-intel-graphics` and `intel-media-va-driver-non-free` are installed. And only when they are installed, `update-initramfs -u` and `reboot` modules run as a handler. + +## Verification +After reboot, check the render device. +```bash +ls -l /dev/dri +# crw-rw---- 1 root video 226, 0 ... card0 +# crw-rw---- 1 root render 226, 128 ... renderD128 \ No newline at end of file diff --git a/docs/services/common/alloy.md b/docs/services/common/alloy.md new file mode 100644 index 0000000..954668e --- /dev/null +++ b/docs/services/common/alloy.md @@ -0,0 +1,35 @@ +# Alloy + +## Communication + +Alloy runs on systemd \(host\), and postgresql runs as container \(rootless podman\). When host system and container communicate, container recognizes host system as host-gateway \(Link local address\). + +## postgresql monitor + +### Monitor exporter + +```sql +postgres=# CREATE USER alloy WITH PASSWORD 'password'; +CREATE ROLE +postgres=# GRANT pg_monitor TO alloy; +GRANT ROLE +postgres=# \drg + List of role grants + Role name | Member of | Options | Grantor +-----------+------------+--------------+---------- + alloy | pg_monitor | INHERIT, SET | postgres +(1 row) +``` +### pg_hba.conf +```conf +hostssl postgres alloy {{ hostvars['fw']['network4']['infra']['server'] }}/32 trust +hostssl postgres alloy {{ hostvars['fw']['network6']['infra']['server'] }}/128 trust +hostssl postgres alloy {{ hostvars['fw']['network4']['subnet']['lla'] }} trust +hostssl postgres alloy {{ hostvars['fw']['network6']['subnet']['lla'] }} trust +``` + +### check + +```bash +curl http://localhost:12345/metrics +``` diff --git a/docs/services/common/caddy.md b/docs/services/common/caddy.md new file mode 100644 index 0000000..b945f22 --- /dev/null +++ b/docs/services/common/caddy.md @@ -0,0 +1,45 @@ +# Caddy + +## TLS re-encryption + +This is not a perfect E2EE communication theorogically, however technically it is. The main caddy decrypt as an edge node of WAN side, and it becomes a client of side caddy with private certificate. + +### .com public domain + +WAN - \(Let's Encrypt certificate\) -> Caddy \(auth\) - \(ilnmors internal certificate\) -> Caddy \(app\) or https services - http -> app's local service + +### .internal private domain +client - \(ilnmors internal certificate\) -> Caddy \(Infra\) - http -> local services + +### DNS record + +*.app.ilnmors.internal - CNAME -> app.ilnmors.internal + +## X-Forwarded-Host + +When caddy in app conducts TLS re-encryption, it is important to change their Host header as X-Forwarded-Host haeder for session maintainance. + +## Example + +```ini +# Auth server +test.ilnmors.com +{ + import crowdsec_log + route { + crowdsec + reverse_proxy https://test.app.ilnmors.internal + } +} +# App server +test.app.ilnmors.internal +{ + import internal_tls + trusted_proxies {{ hostvars['fw']['network4']['auth']['server'] }} {{ hostvars['fw']['network6']['auth']['server'] }} + route { + reverse_proxy host.containers.internal:3000 { + header_up Host {header.X-Forwarded-Host} {Host} + } + } +} +``` diff --git a/docs/services/common/crowdsec.md b/docs/services/common/crowdsec.md new file mode 100644 index 0000000..d4dfcb0 --- /dev/null +++ b/docs/services/common/crowdsec.md @@ -0,0 +1,233 @@ +# Crowdsec + +## LAPI + +### Detecting +Host logs \> CrowdSec Agent\(parser\) > CrowdSec LAPI + +### Decision +CrowdSec LAPI \(Decision + Register\) + +### Block +CrowdSec LAPI \> CrowdSec Bouncer \(Block\) + +## CAPI +CrowdSec CAPI \> crowdsec LAPI \(local\) \> CrowdSec Bouncer \(Block\) + +## Ansible Deployment + +### Set LAPI (fw/roles/tasks/set_crowdsec_lapi.yaml) + +- Deploy fw's config.yaml +- Deploy crowdsec certificates +- Register machines \(Agents\) +- Register bouncers \(Bouncers\) + +### Set Bouncer (fw/roles/tasks/set_crowdsec_bouncer.yaml) + +- Deploy crowdsec-firewall-bouncer.yaml +- Install suricata collection \(parser\) with cscli +- Set acquis.d for suricata +- set-only: bouncer can't get metrics from the chain and rules count result which it doesn't make. - It means, it is impossible to use prometheus metric with set-only true option. +- chain or rules matched count reasults are able to check on nftables. + - use sudo nft list chain inet filter global to check packet blocked. \(counter command is required\) + +### Set Machines; agents (common/tasks/set_crowdsec_agent.yaml) + +- Deploy config.yaml except fw \(disable LAPI, online_api_credentials\) +- Deploy local_api_credentials.yaml + +### Set caddy host (auth/tasks/set_caddy.yaml) + +- Set caddy CrowdSec module +- Set caddy log directory +- Install caddy collection \(parser\) with cscli +- Set acquis.d for caddy + +### Set whitelist (/etc/crowdsec/parser/s02-enrich/whitelists.yaml) + +- Set only local console IP address +- This can block local VM to the other subnet, but the communication between vms is possible because they are in the same subnet\(L2\) - packets don't pass the fw. +- Crowdsec bouncer only conducts blocks forward chain which pass Firewall, it is blocked by crowdsec bouncer based on lapi + +## Test + +### Decision test + +> Set test decisions and check it + +fw@fw:/etc/crowdsec/bouncers$ sudo cscli decisions add --ip 5.5.5.5 --duration 10m --reason "Test" +INFO[12-01-2026 01:50:40] Decision successfully added +fw@fw:/etc/crowdsec/bouncers$ sudo tail -f /var/log/crowdsec-firewall-bouncer.log + +time="12-01-2026 01:50:22" level=info msg="backend type : nftables" +time="12-01-2026 01:50:22" level=info msg="nftables initiated" +time="12-01-2026 01:50:22" level=info msg="Using API key auth" +time="12-01-2026 01:50:22" level=info msg="Processing new and deleted decisions . . ." +time="12-01-2026 01:50:22" level=info msg="Serving metrics at 127.0.0.1:60601/metrics" +time="12-01-2026 01:50:22" level=info msg="1320 decisions deleted" +time="12-01-2026 01:50:22" level=info msg="15810 decisions added" +time="12-01-2026 01:50:42" level=info msg="1 decision added" + +fw@fw:/etc/crowdsec/bouncers$ sudo nft list ruleset | grep -i 5.5.5.5 + 5.5.5.5 timeout 9m54s876ms expires 9m22s296ms, + +### Parser test + +> CrowdSec "crowdsecurity/suricata-evelogs" only parses "event_type: alert". You can test with cscli explain + +fw@fw:~$ sudo cscli explain --file /tmp/suri_test.log --type suricata-evelogs --verbose +line: {"timestamp":"2026-01-11T14:43:52.153576+0000","flow_id":972844861874490,"in_iface":"wan","event_type":"alert","src_ip":"197.242.151.53","src_port":42976,"dest_ip":"59.5.196.55","dest_port":38694,"proto":"TCP","flow":{"pkts_toserver":1,"pkts_toclient":0,"bytes_toserver":60,"bytes_toclient":0,"start":"2026-01-11T14:42:51.554188+0000","end":"2026-01-11T14:42:51.554188+0000","age":0,"state":"new","reason":"timeout","alerted":false},"community_id":"1:Ovyuzq7R8yA3YfxM8jEExR5BZMI=","tcp":{"tcp_flags":"02","tcp_flags_ts":"02","tcp_flags_tc":"00","syn":true,"state":"syn_sent","ts_max_regions":1,"tc_max_regions":1}} + ├ s00-raw + | ├ 🟢 crowdsecurity/non-syslog (first_parser) + | └ 🔴 crowdsecurity/syslog-logs + ├ s01-parse + | ├ 🔴 crowdsecurity/apache2-logs + | ├ 🔴 crowdsecurity/nginx-logs + | ├ 🔴 crowdsecurity/sshd-logs + | ├ 🟢 crowdsecurity/suricata-evelogs (+9 ~2) + | ├ update evt.Stage : s01-parse -> s02-enrich + | ├ create evt.Parsed.dest_ip : 59.5.196.55 + | ├ create evt.Parsed.dest_port : 38694 + | ├ create evt.Parsed.proto : TCP + | ├ create evt.Parsed.time : 2026-01-11T14:43:52.153576 + | ├ update evt.StrTime : -> 2026-01-11T14:43:52.153576Z + | ├ create evt.Meta.log_type : suricata_alert + | ├ create evt.Meta.service : suricata + | ├ create evt.Meta.source_ip : 197.242.151.53 + | ├ create evt.Meta.sub_log_type : suricata_alert_eve_json + | ├ create evt.Meta.suricata_flow_id : 972844861874490 + | └ 🔴 crowdsecurity/suricata-fastlogs + ├ s02-enrich + | ├ 🟢 crowdsecurity/dateparse-enrich (+2 ~1) + | ├ create evt.Enriched.MarshaledTime : 2026-01-11T14:43:52.153576Z + | ├ update evt.MarshaledTime : -> 2026-01-11T14:43:52.153576Z + | ├ create evt.Meta.timestamp : 2026-01-11T14:43:52.153576Z + | ├ 🟢 crowdsecurity/geoip-enrich (+13) + | ├ create evt.Enriched.IsInEU : false + | ├ create evt.Enriched.IsoCode : ZA + | ├ create evt.Enriched.ASNumber : 37611 + | ├ create evt.Enriched.Latitude : -28.998400 + | ├ create evt.Enriched.Longitude : 23.988800 + | ├ create evt.Enriched.SourceRange : 197.242.144.0/20 + | ├ create evt.Enriched.ASNNumber : 37611 + | ├ create evt.Enriched.ASNOrg : Afrihost + | ├ create evt.Meta.ASNNumber : 37611 + | ├ create evt.Meta.IsInEU : false + | ├ create evt.Meta.SourceRange : 197.242.144.0/20 + | ├ create evt.Meta.ASNOrg : Afrihost + | ├ create evt.Meta.IsoCode : ZA + | ├ 🔴 crowdsecurity/http-logs + | └ 🟢 crowdsecurity/whitelists (unchanged) + ├-------- parser success 🟢 + ├ Scenarios + +#### Caddy +auth@auth:~/containers/authelia/config$ sudo cscli explain --file /var/log/caddy/access.log --type caddy +line: {"level":"info","ts":1771601235.7503738,"logger":"http.log.access.log1","msg":"handled request","request":{"remote_ip":"192.168.99.20","remote_port":"59900","client_ip":"192.168.99.20","proto":"HTTP/2.0","method":"GET","host":"authelia.ilnmors.com","uri":"/static/js/components.TimerIcon.CO1b_Yfm.js","headers":{"Accept-Encoding":["gzip, deflate, br, zstd"],"Referer":["https://authelia.ilnmors.com/settings"],"Te":["trailers"],"Accept":["*/*"],"Sec-Fetch-Dest":["script"],"Priority":["u=1"],"Sec-Fetch-Mode":["cors"],"Accept-Language":["en-US,en;q=0.9"],"Cookie":["REDACTED"],"Sec-Fetch-Site":["same-origin"],"User-Agent":["Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:147.0) Gecko/20100101 Firefox/147.0"]},"tls":{"resumed":false,"version":772,"cipher_suite":4865,"proto":"h2","server_name":"authelia.ilnmors.com"}},"bytes_read":0,"user_id":"","duration":0.0077169,"size":10193,"status":200,"resp_headers":{"Via":["1.1 Caddy"],"Alt-Svc":["h3=\":443\"; ma=2592000"],"X-Content-Type-Options":["nosniff"],"Content-Security-Policy":["default-src 'none'"],"Date":["Fri, 20 Feb 2026 15:27:15 GMT"],"Etag":["7850315714d1e01e73f4879aa3cb7465b4e879dc"],"Cache-Control":["public, max-age=0, must-revalidate"],"Content-Length":["10193"],"X-Frame-Options":["DENY"],"Content-Type":["text/javascript; charset=utf-8"],"Referrer-Policy":["strict-origin-when-cross-origin"],"Permissions-Policy":["accelerometer=(), autoplay=(), camera=(), display-capture=(), geolocation=(), gyroscope=(), keyboard-map=(), magnetometer=(), microphone=(), midi=(), payment=(), picture-in-picture=(), screen-wake-lock=(), sync-xhr=(), xr-spatial-tracking=(), interest-cohort=()"],"X-Dns-Prefetch-Control":["off"]}} + ├ s00-raw + | ├ 🟢 crowdsecurity/non-syslog (first_parser) + | └ 🔴 crowdsecurity/syslog-logs + ├ s01-parse + | ├ 🔴 crowdsecurity/apache2-logs + | └ 🟢 crowdsecurity/caddy-logs (+19 ~2) + ├ s02-enrich + | ├ 🟢 crowdsecurity/dateparse-enrich (+2 ~1) + | ├ 🟢 crowdsecurity/http-logs (+7) + | └ 🟢 crowdsecurity/whitelists (~2 [whitelisted]) + └-------- parser failure 🔴 + +## BAN logs case + +### LAPI metrics + +fw@fw:~$ sudo cscli metrics + +Acquisition Metrics: +╭─────────────────────────────────────────────────┬────────────┬──────────────┬────────────────┬────────────────────────╮ +│ Source │ Lines read │ Lines parsed │ Lines unparsed │ Lines poured to bucket │ +├─────────────────────────────────────────────────┼────────────┼──────────────┼────────────────┼────────────────────────┤ +│ file:/var/log/suricata/eve.json │ 130.25k │ - │ 130.25k │ - │ +│ journalctl:journalctl-_SYSTEMD_UNIT=ssh.service │ 6 │ - │ 6 │ - │ +╰─────────────────────────────────────────────────┴────────────┴──────────────┴────────────────┴────────────────────────╯ + +Parser Metrics: +╭─────────────────────────────────┬─────────┬─────────┬──────────╮ +│ Parsers │ Hits │ Parsed │ Unparsed │ +├─────────────────────────────────┼─────────┼─────────┼──────────┤ +│ child-crowdsecurity/sshd-logs │ 60 │ - │ 60 │ +│ child-crowdsecurity/syslog-logs │ 6 │ 6 │ - │ +│ crowdsecurity/non-syslog │ 130.25k │ 130.25k │ - │ +│ crowdsecurity/sshd-logs │ 6 │ - │ 6 │ +│ crowdsecurity/syslog-logs │ 6 │ 6 │ - │ +╰─────────────────────────────────┴─────────┴─────────┴──────────╯ + +Local Api Metrics: +╭──────────────────────┬────────┬───────╮ +│ Route │ Method │ Hits │ +├──────────────────────┼────────┼───────┤ +│ /v1/alerts │ GET │ 1 │ +│ /v1/alerts │ POST │ 6 │ +│ /v1/decisions/stream │ GET │ 11337 │ +│ /v1/heartbeat │ GET │ 8053 │ +│ /v1/watchers/login │ POST │ 145 │ +╰──────────────────────┴────────┴───────╯ + +Local Api Machines Metrics: +╭─────────┬───────────────┬────────┬──────╮ +│ Machine │ Route │ Method │ Hits │ +├─────────┼───────────────┼────────┼──────┤ +│ app │ /v1/heartbeat │ GET │ 1587 │ +│ auth │ /v1/alerts │ GET │ 1 │ +│ auth │ /v1/alerts │ POST │ 6 │ +│ auth │ /v1/heartbeat │ GET │ 1605 │ +│ fw │ /v1/heartbeat │ GET │ 1621 │ +│ infra │ /v1/heartbeat │ GET │ 1620 │ +│ vmm │ /v1/heartbeat │ GET │ 1620 │ +╰─────────┴───────────────┴────────┴──────╯ + +Local Api Bouncers Metrics: +╭───────────────┬──────────────────────┬────────┬──────╮ +│ Bouncer │ Route │ Method │ Hits │ +├───────────────┼──────────────────────┼────────┼──────┤ +│ caddy-bouncer │ /v1/decisions/stream │ GET │ 1608 │ +│ fw-bouncer │ /v1/decisions/stream │ GET │ 9729 │ +╰───────────────┴──────────────────────┴────────┴──────╯ + +Local Api Decisions: +╭─────────────────┬────────┬────────┬───────╮ +│ Reason │ Origin │ Action │ Count │ +├─────────────────┼────────┼────────┼───────┤ +│ http:exploit │ CAPI │ ban │ 17803 │ +│ http:scan │ CAPI │ ban │ 4583 │ +│ ssh:bruteforce │ CAPI │ ban │ 2509 │ +│ http:bruteforce │ CAPI │ ban │ 1721 │ +│ http:crawl │ CAPI │ ban │ 87 │ +│ http:dos │ CAPI │ ban │ 15 │ +╰─────────────────┴────────┴────────┴───────╯ + +Local Api Alerts: +╭───────────────────────────────────┬───────╮ +│ Reason │ Count │ +├───────────────────────────────────┼───────┤ +│ crowdsecurity/http-bad-user-agent │ 2 │ +│ crowdsecurity/jira_cve-2021-26086 │ 4 │ +╰───────────────────────────────────┴───────╯ + +### WAF parser alerts + +auth@auth:~$ sudo cscli alerts list +╭────┬────────────────────┬───────────────────────────────────┬─────────┬────┬───────────┬─────────────────────────────────────────╮ +│ ID │ value │ reason │ country │ as │ decisions │ created_at │ +├────┼────────────────────┼───────────────────────────────────┼─────────┼────┼───────────┼─────────────────────────────────────────┤ +│ 25 │ Ip:206.168.34.127 │ crowdsecurity/http-bad-user-agent │ │ │ ban:1 │ 2026-03-07 02:26:58.074029091 +0000 UTC │ +│ 23 │ Ip:162.142.125.212 │ crowdsecurity/http-bad-user-agent │ │ │ ban:1 │ 2026-03-07 00:19:08.421713824 +0000 UTC │ +│ 12 │ Ip:159.65.144.72 │ crowdsecurity/jira_cve-2021-26086 │ │ │ ban:1 │ 2026-03-06 04:19:04.975124762 +0000 UTC │ +│ 11 │ Ip:206.189.95.232 │ crowdsecurity/jira_cve-2021-26086 │ │ │ ban:1 │ 2026-03-06 04:19:01.215582087 +0000 UTC │ +│ 10 │ Ip:68.183.9.16 │ crowdsecurity/jira_cve-2021-26086 │ │ │ ban:1 │ 2026-03-06 04:18:22.120468981 +0000 UTC │ +│ 9 │ Ip:138.68.144.227 │ crowdsecurity/jira_cve-2021-26086 │ │ │ ban:1 │ 2026-03-06 04:18:18.35776077 +0000 UTC │ +╰────┴────────────────────┴───────────────────────────────────┴─────────┴────┴───────────┴─────────────────────────────────────────╯ + + + + diff --git a/docs/services/common/kopia.md b/docs/services/common/kopia.md new file mode 100644 index 0000000..c69e8ca --- /dev/null +++ b/docs/services/common/kopia.md @@ -0,0 +1,14 @@ +# Kopia + +Kopia is one of modern backup solution to support very strong deduplication. + +## Repository + +Kopia saves all information, even the users and policies on repository. Repository itself is complete. Repository is encrypted by master password. + +## User and policy + +When kopia is run as a kopia server, client can access to server with user and user password. The clients don't have to know master password. Kopia server decrypt the repository with the master password, and the client just access to the kopia server with their user account. + +Repository \<- Master password -\> Kopia server \<- User password -\> Kopia client + diff --git a/docs/services/console/git.md b/docs/services/console/git.md new file mode 100644 index 0000000..e69de29 diff --git a/docs/services/fw/kea.md b/docs/services/fw/kea.md new file mode 100644 index 0000000..fe7fcfe --- /dev/null +++ b/docs/services/fw/kea.md @@ -0,0 +1,29 @@ +# IP + +## IPv4 + +### Subnet management +- Static subnet \(manage without dhcp\) + - client \(for ipv4, set reservation\) + - server +- Dynamic subnet \(manage with dhcp\) + - user + +## IPv6 + +### Subnet management +- Static subnet \(manage without RA - specific defination\) + - client \(Designated ULA with NAT66\) + - server \(Designated ULA with NAT66\) +- Dynamic subnet \(manage with RA and SLAAC\) + - user \(Autogenerated GUA\) + +## Firewall policy for each subnet + +### Static subnet + +Make polices based on each specific designated IP address for nodes. + +### Dynamic subnet + +Make polices based on subnet \(or interface itself\) diff --git a/docs/services/infra/ca.md b/docs/services/infra/ca.md new file mode 100644 index 0000000..d82cb4a --- /dev/null +++ b/docs/services/infra/ca.md @@ -0,0 +1,146 @@ +## Operation +Refer to Ansible playbook + +## Configuration files + +- ca.json +- defaults.json + +### Provisioner + +Provisioner is basically the object of issuing certificates as a RA. They verify CSR from client and when it is valid with its policy they will sign the certificates with CA's private key. Step-CA supports various type of provisioner. In this homelab, only ACME will be used. Because infrastructure's certificates is issued manually. Step-CA supports one root CA and one intermediate CA in one container, only one intermediate CA will be operated in this project. + +#### jwk-ca@ilnmors.internal + +This provisioner is to issue intermediate CA. It wouldn't be used in this project. The option for CA in X.509 format is optional and defined in as extension option. To define these option in step-ca, the template file is needed. + +- file: ~/data/containers/step-ca/templates/ca.tpl + +```json +{ + "subject": {{ toJson .Subject }}, + "keyUsage": ["certSign", "crlSign"], + "basicConstraints": { + "isCA": true, + "maxPathLen": 0 + } +} +``` + +> keyUsage: Designate to manage certificates and CRL +> isCA: Designate the certificate to use CA +> maxPathLen: Designate allowed below CA's number + + +- Define provisioner + +```bash +podman exec -it step-ca \ +step ca provisioner add jwk-ca@ilnmors.internal \ +--create \ # Generate key pair automatically +--type JWK \ +--ca-config /home/step/config/ca.json \ # Sign on certificate with root CA's private key +--x509-template /home/step/template/ca.tpl \ # Use x509 template +--x509-max-dur 87600h \ # +--x509-default-dur 87600h +``` + +#### jwk@ilnmors.internal + +This provisioner is to issue the certificates like DB communication based on its identity (Using JWK and JWT pre-shared). The certificate is issued based on enrolled key in provisioner. However, in this project all crt will be used central ACME client `Caddy`. + +- Define provisioner + +```bash +podman exec -it step-ca \ +step ca provisioner add jwk@ilnmors.internal \ +--create \ # Generate key pair automatically +--type JWK \ +--x509-default-dur 2160h # To set default expire date as 90 days. +``` + +#### acme@ilnmors.internal + +This provisioner is to issue the certificates for https communication. The certificate is issued based on challenge; the ownership of domain. + +- Define provisioner +```bash +podman exec -it step-ca \ +step ca provisioner add acme@ilnmors.internal \ +--type ACME \ +--x509-default-dur 2160h # To set default expire date as 90 days. +``` + +### Subject + +Step-CA uses subject as a account. It is used to manage Step-CA remotely. To use this, it is necessary to use `--remote-management` option when the step-CA is initially set or fix `ca.json` authority.enableAdmin:true. When subject is enabled, provisioners aren't defined in ca.json but its own DB. + +### Policy + +Self-hosted Step-CA server doesn't support to give x509 policy for each provisioner. It only allows public policy. Only `ilnmors.internal` and `*.ilnmors.internal` certificates are required, so designate the policy in `ca.json` + +> Policies can be administered using the step CLI application. The commands are part of the step ca policy namespace. In a self-hosted step-ca, policies can be configured on the authority level. Source: [here](https://smallstep.com/docs/step-ca/policies/) + +- file: ~/data/containers/step-ca/config/ca.json + +```json +... +"authority": { + "policy": { + "x509": { + "allow": { + "dns": [ + "ilnmors.internal", + "*.ilnmors.internal" + ] + }, + "allowWildcardNames": true + } + }, + "provisioners": [ ... ] + .... +} +... +``` + +## Verify server + +### Server health check + +```bash +curl -k https://ca.ilnmors.internal:9000/health +> {"status":"ok"} +``` + +### Server policy check + +```bash +podman exec -it ca step ca certificate test.com test.crt test_key --provisioner acme@ilnmors.internal +> error creating new ACME order: The server will not issue certificates for the identifier +``` + +--- +## Set trust Root CRT + +### Linux + +#### Debian/ubuntu + +- File: /usr/local/share/ca-certificates/{ca.crt, ca.pem} +- `update-ca-certificates` + +#### Cent/RHEL/Fedora + +- File: /etc/pki/ca-trust/source/anchors/{ca.crt, ca.pem} +- `update-ca-trust` + +### Windows + +- `Windows + R` + `certlm.msc` +- `All Task` - `Import` + +### Firefox + +- Setting - Security - view certificates - Authority - add + - \[x\] trust this ca to identify website + - \[x\] trust this ca to identify email users diff --git a/docs/services/infra/grafana.md b/docs/services/infra/grafana.md new file mode 100644 index 0000000..ed347fe --- /dev/null +++ b/docs/services/infra/grafana.md @@ -0,0 +1,20 @@ +# Grafana + +## Operation +Refer to Ansible playbook +\(Postgresql user and DB is needed\) +\(LDAP strict readonly account is needed\) + +## Verification +- Check Caddyfile \(without caddy, use 3000 ports\) +- https://grafana.ilnmors.internal +- login with LDAP user + - connection:data sources: \[prometheus|loki\]: provisioned + - https://prometheus.ilnmors.internal:9090 + - https://loki.ilnmors.internal:3100 + + - check drill down:metrics + +## Dashboard + +- Dashboard isn't saved on local directory. They are saved on DB \(Postgresql\). \ No newline at end of file diff --git a/docs/services/infra/ldap.md b/docs/services/infra/ldap.md new file mode 100644 index 0000000..6280b40 --- /dev/null +++ b/docs/services/infra/ldap.md @@ -0,0 +1,154 @@ +## Operation +Refer to Ansible playbook +\(Postgresql user and DB is needed\) + +Integrate configuration with various app: https://github.com/lldap/lldap/blob/main/example_configs + +## Configuration +### DB URL + +Jinja2 `urlencode` module doesn't replace `/` as `%2F`. replace('/', '%2F') is necessary. +ex\) {{ var | urlencode | replace('/', '%2F') }} + +### Reset administrator password + +```bash +# infra +sudo nano $LDAP_PATH/data/lldap_config.toml +# Add below on file +ldap_user_pass = "REPLACE_WITH_PASSWORD" +force_ldap_user_pass_reset = true +# Restart lldap +systemctl --user restart ldap.service +# Delete added lines from lldap_config.toml +# ldap_user_pass = "REPLACE_WITH_PASSWORD" +# *YOU MUST DELETE PASSWORD PART* +# force_ldap_user_pass_reset = true +``` + +### Access web UI and Login + +- URL: http://ldap.ilnmors.internal:17170 \(This is temporary access way before Caddy, which is reverse proxy, is set) +- ID: admin +- PW: $LLDAP_LDAP_USER_PASSWORD + +### Create the groups + +- Groups - \[\+\] Create a group + - Group: admins + - Group: users + +It is necessary to manage ACL via authelia based on groups. + +### Create the authelia user for OCID \(OP\) + +- Users: \[\+\] Create a user + - Username (cn; uid): authelia + - Display name: Authelia + - First Name: Authelia + - Last Name (sn): Service + - Email (mail): authelia@ilnmors.internal + - Password: "$(openssl rand -base64 32)" +- Groups:lldap_strict_readonly: \[Add to group\] + - This group allow search authority. +- Users: \[\+\] Create a user + - Username (cn; uid): grafana + - Display name: Grafana + - First Name: Grafana + - Last Name (sn): Service + - Email (mail): grafana@ilnmors.internal + - Password: "$(openssl rand -base64 32)" +- Groups:lldap_strict_readonly: \[Add to group\] + - This group allow search authority. +> Save the password in .secret.yaml + +### Create the normal users + +- Users: \[\+\] Create a user + - Username (cn; uid): il + - First Name: Il + - Last Name (sn): Lee + - Email (mail): il@ilnmors.internal + - Password: "$PASSWORD" +- Groups:lldap_admin&admins&users: \[Add to group\] +- Users: \[\+\] Create a user + - Username (cn; uid): user + - First Name: John + - Last Name (sn): Doe + - Email (mail): john_doe@ilnmors.internal + - Password: "$PASSWORD" +- Groups:(admins|users): \[Add to group\] + +> Custom schema in `User schema`, `Group schema` doesn't need to be added. This is for advanced function to add additional value such as `identity number` or `phone number`. Hardcoded schema, which means basic schema the lldap provides is enough to use Authelia. + +> After all these steps, now you can integrate the Authelia for SSO. + +## Usage of LDAP + +### Service Bind + +LDAP call `login` as Bind. When the authelia Bind to the LDAP server, it can get the authority to search in `lldap_strict_readonly` group. + +### Search + +authelia account has the authority to search, it can search to send the query. + +#### Flow of search + +- Client (authelia) sends the query + - `uid=user in dc=ilnmors,dc=internal` +- LDAP server searches the DN of entry + - `uid=user,ou=people,dc=ilnmors,dc=internal` +- LDAP sends the DN to Client (authelia) + +## Authelia's work flow + +### First login + +#### User login query + +User try to login on login page of Authelia. + +- id: user +- password: 1234 + +#### Service Bind (Bind and search) + +authelia binds to LLDAP server based on the information in configuration.yml. + +- dn: authelia +- password: authelia's password + +#### Search + +authelia sends the query to LLDAP after bind. +- `uid=user in dc=ilnmors,dc=internal` + +#### Request + +LLDAP server searches the entry and send the DN information query to authelia. + +- `uid=user,ou=people,dc=ilnmors,dc=internal` + +### Verify the user login (Second login) + +#### User Bind (Bind only) + +authelia tries to bind LLDAP server based on the information that user input. + +- dn: requested uid +- password: 1234 + +#### Verification from LLDAP + +LLDAP verify the password from authelia with its hash value saved in LLDAP's database. + +#### Request + +LLDAP server sends the result as `Success` or `Fail`. + +> Search authority is basic authority of user who binds to LDAP server. It is just the way to check success or fail bind is the charge of Authelia. + +## verify + +- openssl s_client -connect ldap.ilnmors.internal:636 -tls1_3 diff --git a/docs/services/infra/loki.md b/docs/services/infra/loki.md new file mode 100644 index 0000000..ba4e995 --- /dev/null +++ b/docs/services/infra/loki.md @@ -0,0 +1,12 @@ +# loki + +## Operation +Refer to Ansible playbook +## Verification +- fw@fw:/var/lib/bind$ curl -k https://loki.ilnmors.internal:3100/ready \(Node which is in NET_SERVER except infra itself\) + - ready +- fw@fw:/var/lib/bind$ curl -k https://loki.ilnmors.internal:3100/metrics + - metrics lists +- fw@fw:/var/lib/bind$ curl -k https://loki.ilnmors.internal:3100/loki/api/v1/labels + - no org id + - JSON format labels when alloy is set diff --git a/docs/services/infra/postgresql.md b/docs/services/infra/postgresql.md new file mode 100644 index 0000000..7779967 --- /dev/null +++ b/docs/services/infra/postgresql.md @@ -0,0 +1,64 @@ +# Postgresql + +## Operation +Refer to Ansible playbook + +## File management +```bash +# console +## cluster +scp infra@infra:$POSTGRESQL_BACKUP_PATH/pg_cluster.sql $HOMELAB_PATH/data/backups/infra/postgresql/pg_cluster.sql +## data +scp infra@infra:$POSTGRESQL_BACKUP_PATH/pg_backup.sql $HOMELAB_PATH/data/backups/infra/postgresql/pg_backup.sql + +## The data is managed by kopia. +``` + +## Verification + +```bash +# ... Start postgresql service + +# Create user and database +podman exec -it -u postgres postgresql "psql -U postgres" +> CREATE USER service WITH PASSWORD 'abc'; +> CREATE DATABASE service_db; +> ALTER DATABASE service_db OWNER TO service; +> \du +> \l +> \q + +# Reset database +> SELECT pg_terminate_backend(pid) FROM pg_stat_activity WHERE datname = 'service_db'; # connection reset +> DROP DATABASE service_db; +> CREATE DATABASE service_db; +> ALTER DATABASE service_db OWNER TO service; +> \du +> \l +> \q + +# Restor database (manually) +podman exec -u postgres postgresql "psql -U postgres -f $POSTGRESQL_BACKUP_PATH_IN_CONTAINER/script.sql" + +# Backup service executes +systemctl --user start postgresql-cluster-backup.service + +# Stop and remove all data +systemctl --stop postgresql +sudo find "/home/infra/data/containers/postgresql/data" -mindepth 1 -delete + +# Restore database +# Just locate sql files on data_path, and use playbooks + +# Check restoring +podman exec -it -u postgres postgresql psql -U postgres +> \du +> \l + +# Check extension +postgres=# SHOW shared_preload_libraries; + shared_preload_libraries +-------------------------- + vchord.so +(1 row) +``` diff --git a/docs/services/infra/prometheus.md b/docs/services/infra/prometheus.md new file mode 100644 index 0000000..e869cf3 --- /dev/null +++ b/docs/services/infra/prometheus.md @@ -0,0 +1,12 @@ +# Prometheus + +## Operation +Refer to Ansible playbook +## Verification +- Check Caddyfile \(without caddy, use 9090 ports\) +- https://prometheus.ilnmors.internal +- Status:Target Health + - Check `Endpoint localhost:9090 ` with green circle +- Status:command-line flag + - Check `--web.enable-remote-write-receiver: true` + diff --git a/docs/services/systemd/systemd-networkd.md b/docs/services/systemd/systemd-networkd.md new file mode 100644 index 0000000..e6acd80 --- /dev/null +++ b/docs/services/systemd/systemd-networkd.md @@ -0,0 +1,35 @@ +# systemd-networkd + +- Use `networkctl` and the files in `/etc/systemd/network` +- link file + Link file links hardware interface and kernel while booting +- netdev file + netdev file defines virtual interface \(port, bridge\) +- network file + network file defines network option above interfaces + +## commands + +- reload + - networkctl reload + - networkctl reconfigure \[interface name\] + +## references + +- https://manpages.debian.org/testing/systemd/systemd/networkctl.1.en.html +- https://manpages.debian.org/testing/systemd/systemd.link.5.en.html +- https://manpages.debian.org/testing/systemd/systemd.network.5.en.html +- https://manpages.debian.org/testing/systemd/systemd.netdev.5.en.html + +## Plans + +- Hypervisor's linux bridges work as L2 switch + - br0 is completely L2 switch \(LinkLocalAddressing=no\) + - br1 has ip address for hypervisor itself, but basically works as L2 switch whitch can deal with VLAN tags; id=1,10 +- Firewall's port \(wan\) works as Gateway which can conduct NAT +- Firewall's port \(clients\) works as trunk port which can deal with VLAN tags; id=1,10,20 +- Firewall's port + - client, id = 1 + - server, id = 10 + - user, id = 20 + - wg0 diff --git a/docs/services/systemd/systemd-quadlet.md b/docs/services/systemd/systemd-quadlet.md new file mode 100644 index 0000000..1a66fc0 --- /dev/null +++ b/docs/services/systemd/systemd-quadlet.md @@ -0,0 +1,67 @@ +# systemd-quadlet + +Quadlet is for defining container configuration and lifecycle combining systemd and podman. + +## Rootless container + +Containers should be isolated from host OS. However, docker runs with root permission on daemon \(dockerd\). This means when one docker container has vulnerability and it is taken over, all the host system authority is threatened. Rootless container, podman runs without root permission and daemon so that even if one of containers is taken over, prevent the damage in host's normal user authority. + +Rootless container maps UID/GID between host and its own following namespace. Host's user UID/GID is mapped with container's root, and host's subuid/subgid defined on `/etc/subuid`, `/etc/subgid` is mapped with container's user UID/GID by default. + +- Default `/etc/subuid` and `/etc/subgid` + - user:100000:65536 + - host user 1000 > container root 0 + - host subuid 100999 > containers 1000 + +Rootless services originally depends on session. It is necessary to set `linger` to guarantee the service health regardless the session. + +- sudo loginctl enable-linger user + - ls /var/lib/systemd/linger/user + +## Quadlet + +Quadlet defines specification of container in `.container` file and generates `.service` automatically for systemd. systemd can manage the container like its own service with `systemctl` command. + +```ini +# $HOME/.config/containers/systemd/a.container +[Quadlet] +# Don't make a dependencies +DefaultDependencies=false + +[Unit] +Description=app +After=network-online.target +Wants=network-online.target +BindsTo=a.service +Requires=a.service + +[Service] +ExecStartPre=/bin/sh -c 'echo "Waiting for infra-postgresql..."; until nc -z postgresql.ilnmors.internal 5432; do sleep 1; done;' + +[Container] +Image=localhost/app:1.0.0 + +ContainerName=app + +PublishPort=2080:80/tcp +PublishPort=2443:443/tcp + +AddHost=app.service.internal:host-gateway + +Volume=%h/data/containers/app:/home/app:rw + +Environment="ENV1=ENV1" + +Secret=ENV_NAME,type=env +Secret=app.file,target=/path/of/secret/file/name + +# podman run [options] [image] example --config exconfig +Exec=example --config exconfig + +# If you want to change Entrypoint itself, use +Entrypoint=sh -c 'command' + +[Install] +# Guarantee auto start +WantedBy=default.target +``` diff --git a/docs/services/vmm/libvirt/cloud-init.md b/docs/services/vmm/libvirt/cloud-init.md new file mode 100644 index 0000000..8cf33c4 --- /dev/null +++ b/docs/services/vmm/libvirt/cloud-init.md @@ -0,0 +1,125 @@ +# cloud-init and seed.iso + +## reference + +- https://cloudinit.readthedocs.io/en/latest/reference/examples.html#yaml-examples + +## packages + +- cloud-image-utils +- genisoimage + +## meta-data + +- meta-data.yaml + +```yaml +instance-id: test-vm-$DATE +local-hostname: test +``` + +## user-data + +- user-data.yaml + +```yaml +#cloud-config + +# Command which is excuted when systemd boots +bootcmd: + - groupadd -g 2000 svadmins || true + +hostname: test + +# auto resize partition and filesystem depends on virtual disk image +growpart: + mode: auto + devices: ['/'] + ignore_growroot_disabled: false +resize_rootfs: true + +# prohibit root login +disable_root: true + +users: + - name: test + gecos: test + primary_group: svadmins + groups: sudo + lock_passwd: false + passwd: $(openssl passwd -6 'password') + shell: /bin/bash + ssh_authorized_keys: + - 'ssh-ed25519 KEY_VALUE' + +write_files: + # ip_forward option + - path: /etc/sysctl.d/ipforward.conf + content: | + net.ipv4.ip_forward=1 + permissions: '0644' + # systemd-networkd files + - path: /etc/systemd/network/00-eth0.link + content: | + [Match] + MACAddress=0a:49:6e:4d:00:00 + [Link] + Name=eth0 + permissions: '0644' + # - path: /etc/systemd/network/files.... + # ssh host files + - path: /etc/ssh/id_test_ssh_host + content: | + -----BEGIN OPENSSH PRIVATE KEY----- + -----END OPENSSH PRIVATE KEY----- + permissions: '0600' + - path: /etc/ssh/id_test_ssh_host.pub + content: | + ssh-ed25519 KEY_VALUE TEST_SSH_HOST + permissions: '0644' + - path: /etc/ssh/id_test_ssh_host-cert.pub + content: | + ssh-ed25519-cert-v01@openssh.com KEY_VALUE TEST_SSH_HOST + permissions: '0644' + # sshd_config + - path: /etc/ssh/sshd_config.d/cert.conf + content: | + HostKey /etc/ssh/id_test_ssh_host + HostCertificate /etc/ssh/id_test_ssh_host-cert.pub + permissions: '0644' + - path: /etc/ssh/sshd_config.d/permit_root_login.conf + content: | + PermitRootLogin no + permissions: '0644' + +runcmd: + # systemd-networkd interface loading + - update-initramfs -u + - systemctl disable networking + - systemctl enable systemd-networkd + - systemctl enable getty@ttyS0 + - sync + +power_state: + delay: "now" + mode: reboot + message: "rebooting after cloud-init configuration" + timeout: 30 +``` + +## network-config + +- network-config.yaml + +```yaml +version: 2 +ethernets: {} +network: + config: disabled +``` + +## Create seed.iso + +```bash +cloud-localds -N network-config test_seed.iso user-data.yaml meta-data +``` diff --git a/docs/services/vmm/libvirt/undefine.md b/docs/services/vmm/libvirt/undefine.md new file mode 100644 index 0000000..588abd0 --- /dev/null +++ b/docs/services/vmm/libvirt/undefine.md @@ -0,0 +1,18 @@ +# Undefine VM + +Undefine VM is critical to whole systme. + +## process + +```bash +# Shutdown VM +systemctl --user stop "$VM_NAME".service +## virsh stop|destroy "$VM_NAME" + +# Undefien VM +virsh undefine "$VM_NAME" --nvram # All vms use uefi, so the option, `--nvram` is needed to remove nvram file + +# Delete VM files +sudo rm -r /var/lib/libvirt/images/"$VM_NAME".qcow2 +sudo rm -r /var/lib/libvirt/seeds/"$VM_NAME"_seed.iso +``` diff --git a/docs/specifications/environments.md b/docs/specifications/environments.md new file mode 100644 index 0000000..2f9d1b1 --- /dev/null +++ b/docs/specifications/environments.md @@ -0,0 +1,154 @@ +# Server and client environments + +## Console + +- OS: WSL2 \(Debian 13\) +- Processor: 4vCPU +- Memory: 4GiB +- Disk: + - 32GiB for `/` \(VHD file\) +- Services: + - [x] Terminal + - [x] Step-CLI + - [x] Ansible + - Git + - Kopia + - [x] cloud-image-utils + +## vmm \(Hypervisor\) + +- OS: Debian13 +- Processor: pCPU \(N150\) +- Memory: 3GiB \(margin\) + - KSM allows more than 3GiB for vmm +- MAC: + - c8:ff:bf:05:aa:b0 + - c8:ff:bf:05:aa:b1 +- Disk: + - SSD: + - 64GiB for `/` \(ext4 in LVM\) + - 700GiB for `/var/lib/libvirt` \(ext4 in LVM\) +- Services: + - [x] QEMU/KVM + - [x] libvirtd + - [x] ksmtuned + +## fw \(Firewall\) + +- OS: Debian13 +- Processor: 2vCPU + - cputune.shares 2048 +- Memory: 4GiB +- MAC: + - 0a:49:6e:4d:00:00 + - 0a:49:6e:4d:00:01 +- Disk: + - SSD: 64GiB for `/` \(ext4 in qcow2 file\) +- Services: + - native packages: + - [x] nftables \(firewall based on ZONE\) + - [x] Suricata \(IDS\) + - [x] CrowdSec LAPI \(IPS\) + - [x] Kea DHCP + - [x] Wireguard-tool + - [x] BIND9 \(Local authoritative DNS\) + - [x] Blocky \(Resolver DNS\) + - Scripts: + - [x] ddns.sh + +## infra \(Infrastructure\) + +- OS: Debian13 +- Processor: 2vCPU + - cputune.shares 1024 +- Memory: 6GiB +- MAC: 0a:49:6e:4d:01:00 +- Disk: + - SSD: 256GiB for `/` \(ext4 in qcow2 file\) +- Services: + - Rootless containers: + - [x] PostgreSQL + - [x] lldap + - [x] Step-CA + - [x] Caddy \(with nsupdate\) + - [x] Prometheus \(alloy - push\) + - [x] Loki \(alloy\) + - [x] Grafana + + - Study \(Rootless container\): + - Kali + - Debian + + +## auth \(Authorization\) + +- OS: Debian13 +- Processor: 2vCPU + - cputune.shares 512 +- Memory: 2GiB +- MAC: 0a:49:6e:4d:02:00 +- Disk: + - SSD: 64GiB for `/` \(ext4 in qcow2 file\) +- Services: + - Rootless containers: + - [x] Caddy \(with nsupdate, crowdsec-http, crowdsec-bouncer module\) + - [x] authelia + +## app \(Application\) + +- OS: Debian13 +- Processor: 4vCPU + - cputune.shares 1024 +- Memory: 16GiB +- MAC: 0a:49:6e:4d:03:00 +- Disk: + - SSD: 256GiB for `/` \(ext4 in qcow2 file\) + - HDD: 4TB for `/home/app/data` \(btrfs\) +- VFIO \(Hardware passthrough): + - Graphic: N150 iGPU + - Disk: SATA Controller +- Services: + - OIDC native services: + - OpenCloud \(with Radicale, Collabora Web Office\) + - Vikunja \(with CalDAV\) + - Gitea + - Outline + - Wiki.js + - WriteFreely + - Immich + - MediaCMS + - Funkwhale + - Kavita + - Audiobookshelf + - we-promise/sure - budget + - Paperless-ngx + - Miniflux + - Linkwarden + - Ralph + - Conduit + - SnappyMail + - Vaultwarden + + - Forward_auth + - Homepage + +## External Backup server + +- OS: DSM \(Synology\) +- Processor: pCPU \(Realtek RTD1619B\) +- Memory: 1GiB +- MAC: 90:09:d0:65:a9:db +- Disk: + - HDD: 4TB +- Services: + - SFTP + - Kopia repository server + - CloudSync \(Upload backup files to Cloud\) diff --git a/docs/specifications/hardwares.md b/docs/specifications/hardwares.md new file mode 100644 index 0000000..29c6a7d --- /dev/null +++ b/docs/specifications/hardwares.md @@ -0,0 +1,67 @@ +# Hardware specifications + +## Servers + +### Main server + +- Aoostar WTR Pro N150 + - Processor: Intel N150 \(4C4T\) + - Graphic: Intel UHD Graphics + - 2.5 Gbps NIC x 2 + - M.2 Slot x 2 \(SSD, WiFi\) + - SATA bay x 4 + - 279,900 KRW +- Samsung DDR4 SO-DIMM 3200 32G x 1 + - 106,900 KRW +- Samsung 980 Pro 1TB TLC x 1 + - 276,000 KRW \(Previously owned\) +- 3RAYS glaicer 6 m.2 SSD heatsink x 1 + - 7,330 KRW +- HGST Ultrastar 7K4000 2TB HDD x 3 + - 99,000 KRW +- HGST Ultrastar 7K2 2TB HDD x 1 + - 43,000 KRW +- Total price: 698,030 KRW \(1,460,030 KRW with previously owned ones\) + +### Backup server +- Synology DS124 + - Processor: Realtek RTD1619B \(4C4T\) + - Memory: DDR4 1GB + - 1 Gbps NIC x 1 + - SATA bay x 1 + - 242,000 KRW +- TOSHIBA DT02 4TB x 1 + - 55,000 KRW +- Total price: 297,000 KRW + +### Console \(Laptop\) +- Microsoft surface laptop 7th ZGJ-00021 + - Processor: Snapdragon X Plus \(ARM64, 10C10T\) + - Memory: LPDDR5x 16GB + - SSD: 256GB SSD + - OS: Windows11 Home + - 1,290,210 KRW +- Microsoft surface USB-C travel hub x 1 + - 157,890 KRW +- Total price: 1,448,100 KRW + +### External HDD +- EFM 3.5 External HDD case ipTIME HDD3135 Plus x 1 + - 29,400 KRW +- Seagate BARRACUDA HDD 2TB x 1 + - 99,000 KRW \(Previously owned\) +- Total price: 128,400 KRW + +## Devices + +### Switch +- TP-link TL-SG108E x 1 + - 1 Gbps NIC x 8 + - IEEE 802.2q + - 39,900 KRW +- Total Price: 39,900 KRW + +### Monitor +- Samsung S6 LS27F610 x 1 + - 277,000 KRW +- Total price: 277,000 KRW diff --git a/docs/specifications/matrix.md b/docs/specifications/matrix.md new file mode 100644 index 0000000..ee2aa11 --- /dev/null +++ b/docs/specifications/matrix.md @@ -0,0 +1,117 @@ +# Matrix + +## UID/GID Matrix + +### Table + +|name|uid|gid|comments| +|:-:|:-:|:-:|:-:| +|svadmins|-|2000|server group| +|vmm|2000|2000|hypervisor| +|fw|2001|2000|firewall| +|infra|2002|2000|infrastructure| +|auth|2003|2000|authentication and authorization| +|app|2004|2000|services| +|console|2999|2000|console node\(surface\)| + +### subuid and subgid + +- user:100000:65536 + +## Switch ports matrix + +### 8 Ports main switch + +|port number|node|subnet|id| +|:-:|:-:|:-:|:-:| +|1|WTR Pro N150|Trunk|-| +|2|AP\(Preparation\)|USER|20| +|3|DS124\(NAS\)|CLIENT|1| +|4|Console|CLIENT|1| +|5|Printer|CLIENT|1| +|6|-|-|-| +|7|-|-|-| +|8|-|-|-| + +## IP matrix + +### Subnet + +|name|IPv4|IPv6|id| +|:-:|:-:|:-:|:-:| +|CLIENT|192.168.1.0/24|fd00:1::/64\(ULA\)|1| +|SERVER|192.168.10.0/24|fd00:10::/64\(ULA\)|10| +|USER|192.168.20.0/24|GUA from ISP|20| +|WG0|192.168.99.0/24|fd00:99::/64\(ULA\)|-| + +### Host + +#### console: +- CLIENT + - 192.168.1.20 + - fd00:1::20 +- WG0 + - 192.168.99.20 + - fd00:99::20 + +#### fw +- CLIENT + - 192.168.1.1 + - fd00:1::1 +- SERVER + - 192.168.10.1 + - fd00:10::1 +- USER + - 192.168.20.1 + - GUA SLAAC +- WG0 + - 192.168.99.1 + - fd00:99::1 + +#### blocky \(fw\) +- SERVER + - 192.168.10.2 + - fd00:10::2 + +#### bind \(fw\) +- SERVER + - 192.168.10.3 + - fd00:10::3 + +#### vmm +- CLIENT + - 192.168.1.10 + - fd00:1::10 +- SERVER + - 192.168.10.10 + - fd00:10::10 + +#### infra +- SERVER + - 192.168.10.11 + - fd00:10::11 + +#### auth +- SERVER + - 192.168.10.12 + - fd00:10::12 + +#### app +- SERVER + - 192.168.10.13 + - fd00:10::13 + +#### VLAN switch +- CLIENT + - 192.168.1.2 + - fd00:1::2 + +#### ds124 +- CLIENT + - 192.168.1.11 + - fd00:1::11 + +#### Printer +- CLIENT + - 192.168.1.101 + - fd00:1::101 diff --git a/docs/theories/network/dhcp.md b/docs/theories/network/dhcp.md new file mode 100644 index 0000000..cad0729 --- /dev/null +++ b/docs/theories/network/dhcp.md @@ -0,0 +1,53 @@ +# DHCP (Dynamic Host Configuration Protocol) + +Before DHCP emerged, every client had to set their own static IP or using RARP\(Reverse Address Resolution Protocol\). They have critical problems. + +- Static IP + - Each host has their own IP regardless they run or not. It cause lack of IP address. + - If administrator made configuration mistake, the network itself could stop. For instance, IP conflict, or subnet configuration error, etc. + +- RARP + - RARP works on L2, it makes hard to implement on hardware. + - RARP server had to exist on every subnet(L2), it was inefficient. + +To solve this problem, BOOTP was developed and it evolved as DHCP. DHCP works on L3, and like its name they lease IP dynamically. It allocates IP to hosts with 3 steps. Lease, renewal, and release. + +## Lease + +When DHCP server gets request from host to allocate IP, DHCP server choose one IP address from its subnet pool, and it responds IP/subnet and information to the hosts. It follows the process called DORA. + +- DHCP Discover + +The host which has no IP address broadcast discover packet including its MAC to local network. Only DHCP server responds this packet, the others discard the packets. + +- DHCP Offer + +When DHCP server gets discover packet, DHCP server broadcast Offer packet including host's MAC of which sends discovery packet. Only host which sends discovery packet, the others discard the packets. + +- DHCP Request + +When the host gets offer packet, it consider the network has DHCP server and broadcast request packet to the local network. More than one DHCP server can exist in one network, therefore host broadcast request packet. + +- DHCP ACK + +When DHCP server gets the Request packet, it searches for an IP address which can be allocated in its pool. When DHCP server finds an available IP address, it sends to ACK packet including IP address and subnet, and optional information(DNS, gateway, etc...) with reservation time. + +## Renewal + +The host allocated IP from DHCP try to renew its IP before it expires. There are two chances to renew reservation time. The first try to renew is when the reservation time remains half of them; T1. In this time, host uses unicast. If the first try failed, then it try one more time when 87.5% of the lease time has passed; T2. In this time, the host uses broadcast. All try fails, client gives up the leased IP and try the lease process again. DHCP server release the IP address from the host. + +- DHCP Request + +The host sends request packet to DHCP server as unicast. + +- DHCP ACK + +When DHCP server gets the request, it sends ACK packet to the host as unicast. + +## Release + +When the host doesn't use IP address anymore, DHCP server makes IP as available IP in its pool. Especially, client can send `DHCPRELEASE` to DHCP server explicitly when it doesn't need IP address. + +## DHCP relay + +Commonly, DHCP is located in router. Because router is the center of networks, and it takes charge of number of networks. However, DHCP server doesn't have to be located in router because of existence of DHCP relay. When router gets DHCP packets (DORA), router can relay the packets as unicast between host and DHCP server which are in different subnet. diff --git a/docs/theories/network/dns.md b/docs/theories/network/dns.md new file mode 100644 index 0000000..d0d929c --- /dev/null +++ b/docs/theories/network/dns.md @@ -0,0 +1,99 @@ +# DNS (Domain Name System) + +In the beginning of the internet, there were a few hosts on networks. It was possible to manage all hosts on network via IP address or domain name in `/etc/hosts` file in each servers. However, it is hard for people to match and remember what IP addresses means. When the internet environment became bigger and bigger, the complex of route the target server would be harder. To solve this problem, the DNS emerged as a translator between IP address and domain name. In modern internet environment, DNS has hierarchy structure from root to TLD, TLD to authoritative server for efficiency. + +## Structure of DNS + +### Communication + +- DNS: 53 tcp/udp + +DNS communication basically uses 53/udp port. However, in the modern internet environment; which means complex environment sometimes the size of packet is above 512 bytes. In this case, DNS uses 53/tcp too. The vulnerability of DNS is that all communication is on plain data. Everyone can conduct sniffing attack towards DNS packet. + +- DoT (DNS over TLS): 853 tcp + +DoT was developed to encrypt DNS query. DoT uses [TLS](./tls.md) to request query. This protocol uses TLS. Moreover, because of TLS, nobody can do sniffing attack towards DoT. However, it uses specific port 853. If ISP block the 853 or analyze 853 port, the pattern of usage will be analyzed or even you cannot use DoT itself. Additionally, there is also DNS over DTLS which uses 853 udp. + +- DoH (DNS over HTTPS): 443 tcp/udp + +DoH is very similar with DoT. It uses TLS, and it was developed to encrypt DNS query. there is just one difference. This uses https(443 tcp/udp) instead of 853 tcp. https is standard of web protocol, so it is hard to analyze someone sends DNS request or common web packets. It means, ISP or government cannot block 443 port itself or analyze the pattern of DNS query. Since 2022, there's the new standard DNS over HTTP/3 which uses 443 udp port. + +- DNSSEC (DNS SECurity extensions) + +Originally, client couldn't verify integrity of the response from DNS server. If malicious attacker could get authority of cache DNS server to change their records, all clients would get affected. (i.e. pharming attack). DNSSEC is a protocol to guarantee integrity of DNS record. DNSSEC protocol adds some records in zone, RRSIG(Resource Record Signature), DNSKEY, DS, NSEC, CDNSKEY, CDS. All resolver DNS verify integrity of their records to authoritative DNS with these records. This process is similar to PKI, the chain of trust. + +- ECH (Encrypted Client Hello) + +Basically, client hello packet has SNI (Server Name Indication). Even though all communication under TLS is encrypted, but to start session the packet has to contain the SNI to identify server. To encrypt this information, SNI the ESNI(Encrypted SNI) was developed in 2018 based on TLS 1.3. However, ESNI just could encrypt SNI information. Now, since 2020, the new standard ECH was developed to supersede ESNI. ECH not only encrypt SNI but also encrypt all client hello process. ECH is latest protocol, and it has a lot of dependency in DNS server, service server and client. When all of them supports ECH, then user can use ECH. Because when ECH encrypts client hello data client need the target server's public key (certificate), it has to look up from encrypted DNS (DoH or DoT). + +### Zone + +DNS server has zones; Forward zone and Reverse zone. + +- Forward zone + +Forward zone has basically information of the pair of domain and IP address. The role of this zone is change domain name to IP address. The domains are managed by IANA, TLD is already reserved. (i.e. `.com`, `.org`, etc...) For private network, `.home.arpa` or `.internal` are reserved. + +- Reverse zone + +Reverse zone also has basically information of the pair of IP address and domain. The role of this zone is change IP address to domain name. To change domain to IP address it uses specific domain name. \[reversed_ip_address\].in_addr.arpa (i.e. 1.168.192.in-addr.arpa) + +### Records + +Each zone has their record type. If zone were a kind of DB, record would be a data of DB. There is basic records type below. + +- SOA type + +Information of ZONE management. Every zone has this SOA type record. + +- NS type + +Designate authoritative name server of domain zone + +- A type + +Mapping domain to IPv4 address + +- AAAA type + +Mapping domain to IPv6 address + +- PTR type + +Mapping IP address to domain + +- CNAME type + +Mapping domain to domain. CNAME type is kind of alias of domain. It can't have IP address value. The query acts recursively, and it gets IP address at the end. + +### Key + +There is the keys to control DNS records or zone, even DNS server itself. + +- rndc key + +This key is to control DNS server itself. When rndc key set on DNS server, client can control DNS server with this key like, reboot server, load or unload zone. rndc key is basically generated by `rndc-confgen` command and it is defined on `rndc.conf` and `named.conf`. + +- tsig key + +This key is to guarantee integrity when the server syncronize zone data between other servers (usually master-slave server). It is possible update records via this key depending on the setting. Therefore, tsig key is usually used for DDNS or DNS-01 challenge. The key is generated in the DNS server, and it defined in `named.conf`. + +## DNS Server type + +DNS server basically separated as authoritative DNS and recursive DNS. + +### Authoritative DNS + +Authoritative DNS has literally authority of domain zone. It doesn't ask recursive queries towards other DNS server in case of the query that is in its authoritative zone. It is necessary to use DNS-01 challenge (ACME protocol). + +### Recursive DNS + +Recursive DNS oppositely doesn't have authority of the records in its zone. When it gets query request, it ask recursive query towards authoritative DNS. It can store the information of records (cache) and give response towards client with the cache. + +## Split Horizon DNS + +Split Horizon DNS means getting different IP address depending on where the client exists. For instance, if there were the domain `example.com`. This domain has its own private IP address, simultaneously own public IP address (from NAT). When client request the query `example.com` in the private network, private DNS would respond its private IP address. However, when the client request the query in the WAN network, public DNS would respond its public address. Client can access `example.com` in both case, but the IP address which client respond are different. To use this protocol, the network route will be efficient because the packet doesn't have to go out to the WAN area in private network. Basically, it is implemented internal authoritative DNS and recursive DNS. Recursive DNS decides where to send the query based on domain. + +## DDNS (Dynamic DNS) + +Public IP address can be changed by ISP at any time. It is hard (or expensive) to get static public IP address by ISP. However, the service (server) always guarantee their availability regardless what is their IP. DDNS is basically the protocol to change A or AAAA (or CNAME) records in DNS as server's current IP. Server keeps checking their current public IP and when it changes the server send the request to change its A or AAAA records to public authoritative DNS server with authentication with API key or tsig key. diff --git a/docs/theories/network/email.md b/docs/theories/network/email.md new file mode 100644 index 0000000..7054802 --- /dev/null +++ b/docs/theories/network/email.md @@ -0,0 +1,241 @@ +# Email service + +Email is the mail service online via the internet. ARPANET was developed in 1969, since then there has been many attempts to send messages via the internet. The mail which uses `@` character in 1971 and `SMTP(Simple Mail transfer Protocol)` was developed to standardize various ways to email. + +## Component of Email service + +### Address + +Basically, Email address has format like this. `local-parts@domain`. `local-parts` is identifier, and `domain` is service provider's domain. Following RFC 5321, `domain` doesn't distinguish it upper or lower case. `local-parts` must distinguish them, but practically they doesn't. + +### MUA (Mail User Agent) + +MUA is the client of Email. The user can write Email, or read the Email which they got recieved. For instance, Outlook, Thunderbird, etc. + +### MTA (Mail Transfer Agent) + +This is the essential part of Email service. MTA transpers the mail to other MTA or MDA. For instance, Postfix, sendmail, Exim, etc. + +### MDA (Mail Delivery Agent) + +MDA recieves the mail from MTA, and it store the mail on receivers' mailbox. Sometimes, it is combined MTA or IMAP/POP3 servers. For instance, Dovecot LDA, Procmail, etc. + +### Flow of Email service + +- User writes the mail on MUA. +- User sends the mail from MUA to MTA using SMTP submission protocol. +- MTA checks receiver's domain, and transfer the mail to other MTA which takes charge of that domain. +- MTA recieves the mail and sends receiver's MDA. +- The receiver's MUA access to the MDA such as IMAP or POP3 server. the receiver can check and read the email on their MUA. + +## Protocols + +### SMTP (Simple Mail Transfer Protocol) + +SMTP is standard of email transfer protocol internet defined on RFC 5321. This protocol is used when MUA sends the mail to MTA, and MTA sends the mail other MTAs. This protocol takes charge of all process of transportation of the mails. + +#### Detail of SMTP + +##### Start + +- Connection + +The client and server make the connection via SMTP port (25/tcp). + +##### Greeting + +- `220` code + +The server sends `220` code to the client, they are ready. + +- `HELO` or `EHLO` + +The client sends `HELO` or expand version of `HELO`; `EHLO` command to server to introduce itself. + +##### Designate sender and recipient + +Use the command below, they designate sender and recient. + +- `MAIL FROM:` +- `RCPT TO:` + - If there were various recipients, use this command as much as recipients number. + +##### Transper the mail data + +- `DATA` and `354` + +The client sends `DATA` command to server. After the server responds with `354` code, client sends the data including mail header (From, To, Subject), and content of mail. The end of data is `.`. + +##### End + +- `QUIT` + +The client sends `QUIT` command, the connection is terminated. + +##### Ports + +- `25/tcp` + +Traditional SMTP's standard port. All content using `25/tcp` is not encrypted. Because of security and SPAM problems, a lot of ISP block the `25/tcp` port of common user. + +- `587/tcp` (Submission) + +The standard port of SMTP for encryption. Generally MUA sends the mail to MTA with this port. It is needed to use encrypted connection via `STARTTLS` + +- `465/tcp` (SMTPS) + +This port used to be used for TLS/SSL for SMTP. This is not standard, so it is recommended to use `587/tcp` port for TLS/SSL of SMTP. However, even now this is generally and commonly used. + +##### Security + +SMTP is very old protocol, and this protocol use plain data. It is recommended to use `STARTTLS` or `SMTPS` to encrypt data for security. + +- `SMTPS` + +It uses TLS/SSL from the beginning of connection via `465/tcp` + +- `STARTTLS` + +It uses TLS/SSL after beginning of connection via `587/tcp` as plain data, and start encryption with `STARTTLS` command. + +##### Authentication + +It is necessary to use users' identity like name and password to prevent anyone can sends malicious mail using server. SMTP uses SASL (Simple Authentication and Secuirty Layer) machanism to authenticate its users. + +##### Relay + +MTA has to send the mail to the other MTA for guarantee the mail can arrive the recipitent. MTA uses `relay` function for this. Make sure to allow this function for authenticated user or trusted network to prevent malicious usage. + +### IMAP (Internet Message Access Protocol) + +IMAP is the protocol to read and manage the mails from remote MDA (mail server). The difference between POP3 is that IMAP can manage the mail and its mailbox remotely even without download. It is defined on RFC 3501. + +#### Detail of IMAP + +IMAP is the protocol to have a communication with various commands while the connection is stable. The client sends specific `tag` in front of command, and the server responds with `tag` to process the actions. + +##### Authentication + +- `LOGIN` or `AUTHETICATE` + +IMAP authenticate the user with `LOGIN` command with ID and password or `AUTHENTICATE` command with SASL. + +##### Mailbox + +- `LIST` +- `SELECT` +- `CREATE` +- `DELETE` +- `RENAME` + +##### Mail + +- `FETCH` + +IMAP can take the mail list, the mail itself, or content of the mail, even the attachment in the mail. + +##### Statement + +- `STORE` + - `\Seen` + - `\Flagged` + - `\Answered` + - `\Deleted` + +IMAP can set the status flag of mail with command flags. + + +##### Search + +- `SEARCH` + +IMAP can search the mail with various condition of the mail (Sender, title, contents, date, etc) from server. + +##### Ports + +IMAP strongly recommend to use TLS/SSL with `STARTTLS`. Even though the beginning of conversation is not encrypted, TLS/SSL is applied with the `STARTTLS` command. + +- `143/tcp` + +The basic IMAP port. It is mendetory to use `STARTTLS` to use IMAP with this port. + +- `993/tcp` (IMAPS) + +This port uses TLS/SSL in the beginning of communication. It is not a standard but it is generally and commonly use for security. + +##### Synchronization + +IMAP basically server's mail and mail list, so wherever you access the mail you can see the same condition and status of mailbox. When one mail is modified on one device it is applied all devices simultanaeously. + +- `IDLE` + +This command supports to maintain connection between server and client, when the new mail comes or the status is changed the client can get notification immediately. + +### POP3 (Post Office Protocol version 3) + +POP3 protocol is basically designed to download the mail on local client from remote mail server. It is defined on RFC 1939. The biggest difference between IMAP and POP3 is, POP3 basically delete the mail at the server after downloading. + +#### Detail of POP3 + +##### Authorization + +- `USER` and `PASS` + +The client connect to server and it conduct authentication with `USER` and `PASS` command. + +##### Transcation + +- `STAT` +- `LIST` +- `RETR ` +- `DELE ` +- `RETR` + +POP3 uses various commands to download or delete the mail. It checks the number of mail and size with `STAT`, downloads the mail with `RETR`, deletes the mail with `DELE`, and save the mail on client with `RETR`. + +##### Update + +When the client sends `QUIT` command, then server deletes the mails which have `DELE` marks from server and terminate the connection. + +##### Ports + +POP3 strongly recommend to use TLS/SSL with `STARTTLS`. Even though the beginning of conversation is not encrypted, TLS/SSL is applied with the `STARTTLS` command. + +- `110/tcp` + +The basic port of POP3. It is mendetory to use `STARTTLS` to use IMAP with this port. + +- `995/tcp` (POP3S) + +This port uses TLS/SSL in the beginning of communication. It is not a standard but it is generally and commonly use for security. + +##### Simplity and locality + +POP3 basically delete the mail from mail server, the mail is only on the local client. However, it doesn't require the complex features like IMAP, it can have simplity. + +--- + +## local mail service in homelab + +### SMTP server (MTA) + +#### Postfix + +Postfix will be used as MTA which takes charge of `@ilnmors.internal` domain. However, Postfix in this homelab will never open towards WAN environment. It works as local private MTA. The internal services (Gitea, OPNsense, Prometheus, etc) will sends the mail via `587/tcp` to Postfix. When it needs to send mail towards WAN, it will use `relayhost` function and external Email services such as Google or Naver, etc. `relayhost` makes postfix as one of a `client` not a `MTA`. It means, administrator never takes care about IP reputation or SPAM problems. WAN area's `MTA` function is delegated to public mail service providers. + +### IMAP/POP3 server (MDA) + +#### Dovecot + +Dovecot will be used as IMAP server of local private MTA; Postfix. The user can use MUA (Thunderbird, Outlook, or mail application, even Roundcube webmail) to access the private mail ` +`@ilnmors.internal` via Dovecot. The user will ues `993/tcp` to access Dovecot, and Postfix store the mails on Dovecot. + +#### mbsync + +mbsync will be used as IMAP client of public MTA; Google or Naver. This will fetch public mail `@external-domain.com` to local Postfix from public mail service provider, and eventually the user can access the mail on Dovecot. However, it is important not to delete the mails from public mail servers with proper configuration. + +### MUA + +#### SnappyMail web mail + +This will be used as MUA server on `app` server to access all mails at the same space. diff --git a/docs/theories/network/link-local.md b/docs/theories/network/link-local.md new file mode 100644 index 0000000..3702407 --- /dev/null +++ b/docs/theories/network/link-local.md @@ -0,0 +1,30 @@ +# Link-local address + +link-local address is for reserved subnets for L2 communication. + +## IPv4 + +### APIPA + +When the client couldn't get IP address from DHCP, OS automatically allocate IP address subnet 169.254.0.0/16. This address can never pass L3 point \(router\). It is usually used for internal communication in cloud environment, or PASTA network in containers. + +### RFC1918 + +These are originally IP addresses subnet 10.0.0.0/8, 172.16.0.0/12, 192.168.0.0/16 which can communicate beyond L3 point \(Router\). However, it is reserved for LAN \(Local Area Network\) because of the lack of the number of IPv4 address. They can communicate to the other subnets but they cannot be used on WAN environment, which means ISP cannot allocate these IP subnet to their client. + +## IPv6 + +Link-local address is very important in IPv6 unlike IPv4. Basically, every edge node must have GUA in IPv6 protocol, and a node can have number of IPv6 addresses on an interface. So, they communicate with auto-generated linklocal address fe80::/10 in L2 area, and they communicate with ULA or GUA in L3 Area. + +## Linklocal in PASTA + +### IPv4 +- 127.0.0.1: container itself +- 169.254.0.0/16: container and host communication \(linklocal\) +- RFC1918 for private LAN +- WAN +### IPv6 +- \[::1\]: container itself +- \[fe80::\]: container and host communication \(linklocal\) +- \[fd00::\]: (ULA) for private LAN +- [ Global IPv6 ] diff --git a/docs/theories/network/tls.md b/docs/theories/network/tls.md new file mode 100644 index 0000000..c0dfa20 --- /dev/null +++ b/docs/theories/network/tls.md @@ -0,0 +1,90 @@ +# TLS/SSL + +TLS(Transport Layer Security) is the protocol that encrypts communication with certificates under [PKI](../pki/pki.md). Originally, the communication encryption protocol was suggested as SSL(Secure Socket Layer) in 1995 by Netscape. Before the emergence of TLS/SSL protocol, all communication in web(or database, some more protocols) used plain text, even it contained sensitive data like password. After 1996, the update of SSL protocol stopped at version 3.0, and it has its own vulnerability. The next version of SSL is TLS. Virtually, a lot of people treat SSL as the same as TLS. + +## Encryption + +Before talking about TLS, it is essential to understand what is encryption. Basically, encryption is to hide plain data using algorithm and key mathematically. Encryption can categorized by various criteria, but in this case focusing on key. + +### Symmetric key encryption + +This way needs only one key to encrypt and decrypt. Just because using one key to encrypt and decrypt, it doesn't require complex calculation. This algorithm is fast, and easy to handle huge data. However, the key is only one so for security the key should be protected. + +### Asymmetric key encryption + +This way needs two keys to encrypt and decrypt. It means the key for encryption and the key for decryption are different. This uses very complex algorithm to seperate keys. It makes this slow and hard to handle huge data. However, the keys are devided so, one key can be publically shared. + +## Principle of TLS + +The PKI is necessary to use TLS communication. TLS uses both of algorithm to encrypt the communication. When the communication were encrypted, the key should exist. For this process(to generate key for encryption), server needs X.509 certificate. The certificate contains data of server's public key, domain, and extra informations. To start communication, the server and the client conduct specific process, called `TLS-handshake`. A client sends the request to start communication. The server accepts this request, and negotiates the protocol what they will use in communication. After negotiation, they start to process to generate symmetric key for communication. In this part, there are two protocol to generate symmetric key: RSA and Diffie-hellman. + +### RSA (Legacy way) + +In this process server and client uses server's public key and private key. A client generates metadata (Pre-master secret) to generate symmetric key. Metadata is encrypted with server's public key and sent to the server. The server decrypts the data from a client with its private key. They uses this data to generate symmetric key. When this process ends, they share the same symmetric key to encrypt content of communication. + +- Cons of RSA + +Security strength of RSA depends on the server's private key. The server's private key was taken from whom have the record of past communication, all communication can be decrypted. Because session key itself (precisely, pre-master secret) is in the communication encrypted by server's public key. So, this way can't guarantee Forward Secrecy which means security of previous communication. Additionally, RSA needs high resource to encrypt and decrypt. Therefore, modern internet environment RSA way is not used frequently. + +### Diffie-Hellman & ECDHE(Modern standard) + +On the other hand, TLS can use Diffie-Hellman algorithm. This process doesn't exchange any clue of session key. In this process, the server and clients generate secret values. And they make specific results from calculation with public parameters. The server's private key signs on its results just for prooving they are not altered and authenticated. When they exchange each other's results, both of them generate the same session key from results and their own secret value. All process is publically open, but except the server and client themselves, nobody can calculate the secret values and session key mathematically. After all process is done, finally the client combines all communication value between server. And it makes hash value of this, and encrypt the hash value with the session key. The client sends it to the server as finished message, and server verify this. This key is not permanent, it is temporary. There is no encryption way, so even if hacker could get the secret key of server, he wouldn't know what was the session key. + +- Pros of DHE & ECDHE + +On the contrary with RSA case, security strength of DHE & ECDHE doesn't depend on some specific key. Because when it generate session key, the server and client don't send any sensitive values (like pre-master secret) in this protocol. What they send to each other is open values. The server's public key is used for only signing. So, every session key can never exist after each session; PFS (Perfect Forward Secrecy). Additionally, Elliptic curve way can provide the same strength with shorter key comparing RSA. + +The detail of `TLS-handshake` process is below. + +## Detail of TLS handshake + +### Start + +- Client hello + +The client sends the request to server with information including SNI (Server Name Indication), TLS version, cipher suite, and client random. SNI is the server's domain information to access. cipher suite is the protocol of encryption, and client random will be used to generate session key. Basically, the content of `client hello` isn't encrypted. It has a problem that ISP(Internet Service Provider) or government can conduct sniffing attack to the Client hello packet. To solve this problem, ECH (Encrypted Client Hello) was developed. However, ECH needs the support of DNS server, browser(host), and server itself. Today, a lot of servers and DNS servers don't support ECH, so it is hard to apply ECH in every environment. + +- Server hello + +When the server receives clients request, it designate how to communicate from the list of client hello. The server sends to the client with the information of what protocol, TLS version, cipher suite to use, and certificate, and server random. The way of encryption: RSA, DHE, ECDHE are including in the cipher suite. + +### RSA + +When the server and client decide to use RSA way, server sends certificate including public key to the client. + +- Client key exchange + +The client verifies `Server hello` with server's certificate using client's trusted CA list. The certificate is valid then the client generates `Pre-master secret` and encrypts this value with server's public key to send to the server. + +- Session key generation + +The server recieves `pre-master secret` encrypted by its public key that only server (who knows the private key of server) can decrypt. When the server decrypts this value successfully, the server and client knows three values: `Client random`, `Server random`, and `Pre-master secret`. The server and client generate `Session key` from these values individually. + +### DHE or ECDHE + +DHE and ECDHE follows the exactly same principle of Diffie-hellman algorithm. However, they have a difference of basement in mathematic. DHE is using discrete logarithm, but ECDHE is using elliptic curve discrete logrithm. This is a mathametical topic, so this is skipped. Just important thing is ECDHE is more efficient than RSA or DHE. + +> Key length to guarantee the same level of security: +> +> - RSA/DHE: above 2048 bit +> - ECDHE: above 256 bit + +- Server key exchange + +The server generate the pair of temporary key based on its own principal. The server sends public temporary key to the client. The public temporary key is signed by server's private key. + +- Client key exchange + +The client verify server's temporary public key with server's public key. Simultaneously, client also generate the pair of temporary key based on its own principal. The client sends its public temporary key to the server. + +- Session key generation + +Both of them have their pair of temporary key, and other's temporary public key. They generate the same `Pre-master secret` from their temporary secret key and other's temporary public key. This process has no communication, they calculate the same `Pre-master secret` from themselves. When they have `Pre-master secret`, they generate session key from pre-master secret and client random and server random. + +### Finish + +After all process is done, the server and client both sends the `Finished message` encrypted by session key. When they can decrypt this messages, the session key is generated properly. They can start communication securely with session key. + +## TLS 1.3 + +Current standard TLS 1.3 changed handshake process as 1-RTT(1 Round Trip Time) and there are no longer available to use RSA way (Legacy). diff --git a/docs/theories/pki/pki.md b/docs/theories/pki/pki.md new file mode 100644 index 0000000..7491481 --- /dev/null +++ b/docs/theories/pki/pki.md @@ -0,0 +1,102 @@ +# PKI(Public Key Infrastructure) + +PKI is defined on RFC4949 and RFC5280. PKI is neither one of the protocols nor algorithms, it is a huge infrastructure itself. It has various parts which are CA(Certification Authority), RA(Registration Authority), VA(Validation Authority) and Certificates. + +## CA (Certification Authority) + +CA is the most important part in PKI. It is the source of certification for the secure communication. CA has various roles, some can be delegated to the RA or VA. However, CA is essential in PKI. RA or VA are optional to support CA. The purposes of CA are as follows: + +- Managing certification policy +- Issuing certificates +- Verifying certificates validity +- Managing CRL(Certification Revocation List) +- Cross-certification between root CAs + +CAs often operate in a hierarchy. Each CA has their own role, and they are basically PAA(Policy Approving Authority), PCA(Policy Certification Authority), and CA(Certification Authority). Normally, PAA and PCA are usually combined as root CA, and CA is an intermediate CA. + +### root CA + +Root CA is simply CA of CA. It is the source of trust chain. Only a root CA can authenticate itself alongside other root CAs(cross certification) or intermediate CAs below it. Theoretically, and precisely root CA can be divided into PAA and PCA, but practically they operate as one authority, root CA. + +### intermediate CA + +Intermediate CA is the authority which issues certificates for end entities we can use (i.e. web servers). Their certificate is digitally signed (the hash value is signed by root CA's private key) by root CA, so every user can verify their trust with root CA's certificate (root CA's public key). The reason for using intermediate CA is for security. If CA's private key was violated, all chain of trust will be disrupted. When only root CA exists and its private key was violated, all secure communication will be threatened. To avoid and manage these threats root CA and intermediate CA are divided. + +### Structure of CA + +The basic structure of CA is very simple. It only contains private key, certificate (which is designated by X.509), and provisioner. When CA gets a request of sign on services' certificates it operates as shown below. + +- Provisioner checks CSR (Certificate Signing Request) is valid on its policy +- When CSR is valid, it checks the content of certificates(x.509) and its policy +- CSR and content of certificates are valid, CA signs on the certificate with its private key (sign on hash value with CA's private key) + +After CA's signing, every client can check its validation with CA's certificates (CA's public key) + +## RA (Registration Authority) + +Due to CAs many roles, all processes above are made to be inefficient when conducted. Therefore, CA delegates its registration role to RA. RA seeks to examine CSR and certificates' content. When the content of certificate and CSR are valid under its policy, RA would send request to CA to sign. + +## VA (Validation Authority) + +Additionally, just as RA, CA can delegate its validation role to VA. Basically, VA manages certificates' validation based on CRL (Certificate Revocation List). However, in modern internet environment, it is inefficient to manage all invalid certificates based on a list. To solve this problem, OCSP (Online Certificate Status Protocol) was developed. However, OCSP also has its limitation, the advanced way is being suggested. + +### CRL (Certificate Revocation List) + +Once the certificate gets signed from CA, it is always a valid cryptograph before it expires. However, in the real world some certificates have to be revoked in case CA's key is violated or the service no longer operates, etc. So, CA needs to manage these kinds of invalid certificates which doesn't expire, such as letting clients know that their certificates are not valid anymore. This is a concept of CRL. VA releases the CRL to clients, the clients download or update the list to judge the certificates' validation. + +### OCSP (Online Certificate Status Protocol) + +CRL was very effective to revoke invalid certificates but when internet is growing, a massive of invalid certificates appears it shows its limitation. Because to check certificates validation, clients need to download and update CRL from CA or VA. There are tons of invalid certificates, and CRL's size become bigger and bigger. OCSP's concept is when client request to check certificates validation, OCSP server response it. To use OCSP, clients don't need to download full CRL anymore. It is very effective to solve CRL's problem. However, it has problems too. OCSP is defined on RFC6960. OCSP's problems are below. + +- Clients should expose its identity(IP address) to check certificates validation. +- VA can be SPOF(Single Point of Failure). +- It is very hard to match time syncronization +- The more requests come to VA, the more burden is on VA.(DoS, DDoS problem) + +### OCSP stapling + +OCSP's problem was that client should request to VA directly. So, when the certificate providers(like web server, db, etc.) request its validation and give the proof to client it, clients doesn't need to check the validation of certificate to VA anymore. It reduces VA's burden and time synchronization problems. + +## Certificate + +The form and content of certificate are standardized by X.509. X.509 format contains server's public key, domain(SAN), expired date, and sign of CA, and etc. When CA validate services or people who sent CSR(which contains information to create certificate with x.509 form), CA signs on their certificates (sign on hash value with CA's private key). Then, all clients can validate that certificates with CA's public key(CA's certificate) cryptographically. Ultimately, when clients trust CA, then they can trust services that have certificate signed by CA. However, even if the certificate were valid cryptographically, some certificates would not be valid. It is reason why CRL or OCSP is needed. + +### The way to issue certificate + +#### ACME + +Basically, certificate is formed by X.509. The services which want to get certificate send request(CSR) to CA. When the services were open internet, and it should get the certificate from public CA like let's encrypt on its public domain name. Public CA always sign on public domain which is open internet, and it is the role of public CA. The protocol to automate this process is ACME. Before ACME protocol, getting a certificate was manual. The person in charge should send CSR to CA, and CA checks the CSR and return the certificate ... All process was manual. ACME protocol made this process automatically. Simply, ACME protocol checks services authority on domain with various way, and issues certificate. + +- http-01 challenge + +The server creates certificate file for CA. CA accesses the service server's specific directory via 80/TCP. When CA checked the file, CA issues certificate. + +- DNS-01 challenge + +The server sends request to authoritative DNS server to add specific record with TSIG key(API key). When the specific record successfully added on DNS server and CA can check this, CA issues certificate. This challenge can verify full authority of domain including sub domain, so CA can issue wildcard certificate. + +- TLS_ALPN-01 challenge + +It uses TLS handshake process on 443/TCP. ALPN(Application Layer Protocol Negotiation) is the protocol to decide what protocol server and client will use. In this challenge, CA sends some token to service server. Service server creates temporary TLS certificates using `acme-tls/1` protocol with token from CA. When CA access to server, it asks `acme-tls/1` protocol to server. The server presents the temporary certificate, then CA issues certificate. + +#### Based on identity + +ACME is powerful protocol to automate issuing certificate. However, it is necessary to check ownership of domain to use ACME challenge (Usually, using http or https). However, It is possible to use TLS protocol without http or https. Originally, the process was also manual way called PKIX(Public-Key Infrastructure). It is so complex and slow, and it is impossible to use ACME. To automate this process, modern CA uses JWK(JSON Web Key) and JWT(JSON Web Token). The process is below. + +- The administrator registers system's public key as JWK at CA. + + > JWK is a format of specification of key. It shows key and its information as JSON format. + +- The system sends the JWT to get certificate signed by its private key to CA. + + > JWT is a specification of what client can do after connection or of proof this request is valid. In this process, JWT substitute CSR. + +- CA verify JWT by pre-registered JWK. When JWT is valid, it issues X.509 certificate. + +#### Usage of X.509 Certificates + +Regardless the way to issue certificate, either ACME or the way based on its identity, the certificate which is already issued is always valid before it is revoked. You can use X.509 certificate from the way based on its identity for https, oppositely you can use X.509 certificate from ACME for other TLS protocols (like LDAPS, DB TLS communication, etc). + +### X.500 and LDAP(Lightweight Directory Access Protocol) + +In the beginning of PKI, there was plan to make a server for all certificates. It is X.500. However, the protocol to implement X.500, DAP(Directory Access Protocol) was too complex and heavy to use internet environment. To make way easy and light, LDAP(Lightweight Directory Access Protocol) was developed to store certificates. However, there are a lot more efficient way to manage certificates appeared already. LDAP could not realize X.500, but it is utilized as centralized authentication system like [SSO](./sso.md), or OS account management. diff --git a/docs/theories/pki/sso.md b/docs/theories/pki/sso.md new file mode 100644 index 0000000..a6dfa31 --- /dev/null +++ b/docs/theories/pki/sso.md @@ -0,0 +1,129 @@ +# SSO (Single Sign On) + +When someone wants to use some services, usually they have to identify themselves to the services. The service verifies who you are (authentication), and what you can do (authorization). Originally, each service has their own authentication and authorization system. However, in modern internet environment, a lot of services are organically connected to each other. It is common for one provider to operate various services. So, it is very inefficient and complex to operate authentication and authorization system like this way. To solve this problem, the concept of SSO which is to centralize all services' authentication and authorization system at once emerged. + +## LDAP (Lightweight Directory Access Protocol) + +To centralize authentication and authorization system, it is necessary to kind of central database of user list naturally. Fortunately, there was a very good and suitable protocol already; LDAP. LDAP was originally suggested to replace DAP (Directory Access Protocol) for X.500 in [PKI](./pki.md). Even though X.500 didn't materialized, the potentiality of LDAP was selected to materialize SSO. Because LDAP's structure which was designed for manage certificates itself allows to manage user and its authorization. In modern internet environment, many services don't usually use LDAP itself directly in SSO anymore, but it is still used as database of user and their authorization. + +### Structrue of LDAP + +LLDAP will be used in this homelab. It is the easiest way to understand LDAP is comparing each part of LDAP with filesystem. Because LDAP itself is fundamentally the database which has the tree structure like filesystem. + +#### DN (Distinguished Name) + +DN is the unique path of some specific entry in tree. It is like an absolute path in filesystem. + +- Example: + - `uid=admin,ou=people,dc=ilnmors,dc=internal` + - `uid=authelia,ou=people,dc=ilnmors,dc=internal` + +#### Base DN (Base Distinguished Name) + +Base DN is the root of the tree of LDAP. it is like root path of filesystem `/`. All the actions such as search or work are started from Base DN. + +- LLDAP setting + - Environment.="LLDAP_LDAP_BASE_DN=dc=ilnmors,dc=internal" + - It means, the domain 'ilnmors.internal' will be the Base DN (the root of tree). + +#### Components of DN + +DN has components what has ordinary order. The order of these components are `cn(or uid)`, `ou`, `dc`. They are special attributes, which makes DN. + +- cn (Common name): The name of object. It is like a file itself in filesystem. +- ou (Organizational Unit): The name of the container which contain entries. It is like a folder in filesystem. +- dc (domain component): The domain components, Usually, it devides the full domain `ilnmors.internal` as `dc=ilnmors`, `dc=internal`. + +#### Object (or Entry) + +Object is the real data item the DN defines, like file or folder itself in filesystem. + +#### ObjectClass + +This is the template or blueprint of object. ObjectClass defines what is this object; Either a person or a group, or an organization unit. + +- What is this object; user (cn or uid) or group (ou). +- What is the attribute object must have, and might have. +- Example: + - `ObjectClass: person`: This must have `sn` and `cn` attributes. + - `ObjectClass: organizationalPerson`: This is inherited the `ObjectClass: person`, and it can have `mail` or `number` attributes. +- LLDAP uses standard ObjectClass: `person`, `groupOfNames`. + +#### Attribute + +Attribute is the value of object, like the content of file or folder in filesystem. It is saved as the pairs of key-value. This values are following the definition of ObjectClass. + +- Example + - `uid=user1` has the value following `objectClass: person`, `objectClass: organizationalPerson` + - It must have `sn`, and `cn` + - It can have `mail`, or `number` + - It can have special attribute `memberOf: cn=admins,ou=admin_group, ...` (The attribute that shows the group the user belongs to) + +## IdP (Identity Provider) + +Many of modern services, SP (Service provider), use SAML (Security Assertion Markup Language) or OIDC (OAuth 2.0/OpenID Connect) protocol to implement SSO. Originally, each service had to send request of authentication to central server (which can be LDAP or else) individually to implement SSO. It means each service should protect the sensitive data itself, and some services which have vulnerability can threaten all system. This is why many of modern services use and support SAML or OIDC protocols. The IdP is needed to use these protocols. The IdP acts as the agent of all services to substitute authentication process on their behalf. Only IdP can access the real database, and all services trust IdP's authentication in this model. + +### SAML (Security Assertion Markup Language) + +SAML protocol was developed in 2001 for SSO. This protocol works on XML (eXtensible Markup Language) format which can meet complex security requirements in enterprise environments. It has been used for a long time, it makes this protocol very stable. However, XML itself is a complex and heavy format. This fact makes the protocol complex and heavy to use in common and small environment. There is the process of SAML below. + +- Start + +User sends the request to the service. When SP recieves the request, it redirect the request to IdP. + +- Authentication + +IdP asks the login information to the user and it authenticate user from database (like LDAP). When authentication process succeed, IdP generates SAML assertion including user's identity based on XML with sign to ensure integrity. IdP sends the this assertion to SP via user's browser. + +- Finish + +SP receives the assertion from IdP via user's browser, it verify the assertion. When it is valid, they allow login. + +### OIDC (OpenID Connect) and OAuth 2.0 + +OIDC was developed in 2014, it is newer than SAML. This is an authentication layer on OAuth 2.0 protocol. Basically, OAuth 2.0 is for authorization, and OIDC is for authentication. OAuth 2.0 protocol works on JSON/REST format, especially JWT (JSON Web Token) which is lighter and simpler than XML. OIDC is latest standard of SSO, it supports social login, and friendly to API. These features makes this protocol use on small and personal environment easily. There is the process of OIDC below. + +- Start + +User sends the requset to the service (in OIDC, RP; Relying Party). When RP receives the request, it redirects to IdP (in OIDC, OP; OpenID Provider). + +- Authentication + +IdP (OP) asks login information, and simultaneously asks permission to provide the information to RP. After getting information and permission, IdP generates two tokens which are for different purposes. One is an ID token (JWT) which contains user identity for authentication, the other is an access token for authorization. IdP sends these token to RP. The differences between SAML are what data format does protocol use, and whether IdP gets permission or not. + +- Finish + +RP recieves the tokens from IdP, and it verify the tokens. When it is valid, they allow login. + +### Reverse proxy + +When application doesn't support SSO, then you can use reverse proxy as the door of SSO using `Forward Authentication`. Usually, every web packet pass through reverse proxy in modern internet environment. Therefore, reverse proxy (i.e. Caddy) can intercepts the packets and force them to SSO from IdP or OP (or LDAP itself) before they reach to the application. + +- Header based + +When Authelia success to authenticate someone, Caddy sends specific header which contains user information like `X-Forwarded-User: A`. Application gets this header, it automatically allows login to A. However, it is needed the ID in the application manually. + +> If hacker can access to application without reverse proxy, it can make X header to fake application. In this homelab, all access towards application will be limited by iptables from reverse proxy or sidecar reverse proxy. + +- LDAP based + +LLDAP will be a light and simple LDAP server. Authelia supports OIDC OP(idP) based on external LDAP server. LLDAP server will becom the external LDAP server for Authelia + + +--- +# Example of Authelia flow + +## Flow + +- Define the user and group in LLDAP server + +### OIDC supported app + +- At the service, choose login way as Authelia (OP) +- Login at the Authelia. +- Authelia access to LLDAP server to authenticate the user based on LDAP. +- Login succeed, Authelia generate token and Service turst OP's token, and allow login (authentication and authorization) + +### Non-OIDC supported app + +- Foward_Auth function with Authelia. diff --git a/docs/theories/virtualization/passthrough.md b/docs/theories/virtualization/passthrough.md new file mode 100644 index 0000000..4c64daa --- /dev/null +++ b/docs/theories/virtualization/passthrough.md @@ -0,0 +1,15 @@ +# Hardware passthrough + +The concept of hardware passthrough is directly passing the hardware devices to virtual machines, bypassing hypervisor or emulation layers. virtual machines can access to physical hardware directly. + +## GRUB + +GRUB \(Grand Unified Bootloader\) is bootloader for OS. They runs first when the computer boots, load the kernel on memory, and binds hardwares on kernel with their driver. The configuration of sequence that GRUB decides is stored on initramfs. + +## IOMMU + +IOMMU is MMU \(Memory Management Unit\) for I/O device. It convert and isolate logical address of IO devices to physical address of memory, so that I/O device's DMA \(Direct Memory Access\). When IOMMU is enabled on GRUB, they can allocate and manage hardwares address on physical memory. This fact allows, this can reserve physical address area for device to allocate virtual machines. + +## VFIO + +vfio is driver that the devices which is allocated on virtual machines. vfio driver prevents access from hypervisor OS, so that only virtual machine which has that device can access to the hardware.