1.0.0 Release IaaS

This commit is contained in:
2026-03-15 04:41:02 +09:00
commit a7365da431
292 changed files with 36059 additions and 0 deletions

View File

@@ -0,0 +1,67 @@
[Quadlet]
DefaultDependencies=false
[Unit]
Description=Authelia
After=caddy.service
Wants=caddy.service
[Container]
Image=docker.io/authelia/authelia:{{ version['containers']['authelia'] }}
ContainerName=authelia
HostName=authelia
# Web UI
PublishPort=9091:9091/tcp
Volume=%h/containers/authelia/config:/config:rw
Volume=%h/containers/authelia/certs:/etc/ssl/authelia:ro
# Default
Environment="TZ=Asia/Seoul"
# Enable Go template engine
# !CAUTION!
{% raw %}# If this environment were enabled, you would have to use {{/* ... /*}} for {{ go_filter }} options. Go engine always processes its own grammar first.
{% endraw %}
Environment="X_AUTHELIA_CONFIG_FILTERS=template"
# Encryption
## JWT
Environment="AUTHELIA_IDENTITY_VALIDATION_RESET_PASSWORD_JWT_SECRET_FILE=/run/secrets/AUTHELIA_JWT_SECRET"
Secret=AUTHELIA_JWT_SECRET,target=/run/secrets/AUTHELIA_JWT_SECRET
## Session
Environment="AUTHELIA_SESSION_SECRET_FILE=/run/secrets/AUTHELIA_SESSION_SECRET"
Secret=AUTHELIA_SESSION_SECRET,target=/run/secrets/AUTHELIA_SESSION_SECRET
## Storage
Environment="AUTHELIA_STORAGE_ENCRYPTION_KEY_FILE=/run/secrets/AUTHELIA_STORAGE_SECRET"
Secret=AUTHELIA_STORAGE_SECRET,target=/run/secrets/AUTHELIA_STORAGE_SECRET
# OIDC (HMAC, JWKS), This part needs the clients to integrate with Authelia in order for it to activate.
Environment="AUTHELIA_IDENTITY_PROVIDERS_OIDC_HMAC_SECRET_FILE=/run/secrets/AUTHELIA_HMAC_SECRET"
Secret=AUTHELIA_HMAC_SECRET,target=/run/secrets/AUTHELIA_HMAC_SECRET
Secret=AUTHELIA_JWKS_RS256,target=/run/secrets/AUTHELIA_JWKS_RS256
Secret=AUTHELIA_JWKS_ES256,target=/run/secrets/AUTHELIA_JWKS_ES256
# LDAP
Environment="AUTHELIA_AUTHENTICATION_BACKEND_LDAP_PASSWORD_FILE=/run/secrets/AUTHELIA_LDAP_PASSWORD"
Secret=AUTHELIA_LDAP_PASSWORD,target=/run/secrets/AUTHELIA_LDAP_PASSWORD
# Database
Environment="AUTHELIA_STORAGE_POSTGRES_PASSWORD_FILE=/run/secrets/POSTGRES_AUTHELIA_PASSWORD"
Secret=POSTGRES_AUTHELIA_PASSWORD,target=/run/secrets/POSTGRES_AUTHELIA_PASSWORD
Exec=--config /config/authelia.yaml
[Service]
# Wait for dependency
# They run as rootless podman container, so their port is not opened until they are normaly running
# Check their ports with nc command
ExecStartPre=/usr/bin/nc -zv {{ infra_uri['postgresql']['domain'] }} {{ infra_uri['postgresql']['ports']['tcp'] }}
ExecStartPre=/usr/bin/nc -zv {{ infra_uri['ldap']['domain'] }} {{ infra_uri['ldap']['ports']['ldaps'] }}
ExecStartPre=sleep 5
Restart=always
RestartSec=10s
TimeoutStopSec=120
[Install]
WantedBy=default.target

View File

@@ -0,0 +1,133 @@
---
# https://github.com/lldap/lldap/blob/main/example_configs/authelia.md
# authelia.yaml
# certificates setting
certificates_directory: '/etc/ssl/authelia/'
# them setting - light, dark, grey, auto.
theme: 'auto'
# Server configuration
server:
# TLS will be applied on caddy
address: 'tcp://:9091/'
# Log configuration
log:
level: 'info'
#file_path: 'path/of/log/file' - without this option, using stdout
# TOTP configuration
totp:
# issure option is for 2FA app. It works as identifier. "My homelab' or 'ilnmors.internal', 'Authelia - ilnmors'
issuer: 'ilnmors.internal'
# Identity validation confituration
identity_validation:
reset_password:
jwt_secret: '' # $AUTHELIA_IDENTITY_VALIDATION_RESET_PASSWORD_JWT_SECRET_FILE option is designated in container file
# Authentication backend provider configuration
authentication_backend:
ldap:
# ldaps uses 636 -> NAT automatically change port 636 in output packet -> 2636 which lldap server uses.
address: 'ldaps://ldap.ilnmors.internal'
implementation: 'lldap'
# tls configruation, it uses certificates_directory's /etc/ssl/authelia/ilnmors_root_ca.crt
tls:
server_name: 'ldap.ilnmors.internal'
skip_verify: false
# LLDAP base DN
base_dn: 'dc=ilnmors,dc=internal'
additional_users_dn: 'ou=people'
additional_groups_dn: 'ou=groups'
# LLDAP filters
users_filter: '(&(|({username_attribute}={input})({mail_attribute}={input}))(objectClass=person))'
groups_filter: '(&(member={dn})(objectClass=groupOfNames))'
# LLDAP bind account configuration
user: 'uid=authelia,ou=people,dc=ilnmors,dc=internal'
password: '' # $AUTHELIA_AUTHENTICATION_BACKEND_LDAP_PASSWORD_FILE option is designated in container file
# Access control configuration
access_control:
default_policy: 'deny'
rules:
# authelia portal
- domain: 'authelia.ilnmors.internal'
policy: 'bypass'
- domain: 'authelia.ilnmors.com'
policy: 'bypass'
- domain: 'test.ilnmors.com'
policy: 'one_factor'
subject:
- 'group:admins'
# Session provider configuration
session:
secret: '' # $AUTHELIA_SESSION_SECRET_FILE is designated in container file
expiration: '24 hours' # Session maintains for 24 hours
inactivity: '24 hours' # Session maintains for 24 hours without actions
cookies:
- name: 'authelia_public_session'
domain: 'ilnmors.com'
authelia_url: 'https://authelia.ilnmors.com'
same_site: 'lax'
# This authelia doesn't use Redis.
# Storage provider configuration
storage:
encryption_key: '' # $AUTHELIA_STORAGE_ENCRYPTION_KEY_FILE is designated in container file
postgres:
address: 'tcp://{{ infra_uri['postgresql']['domain'] }}:{{ infra_uri['postgresql']['ports']['tcp'] }}'
database: 'authelia_db'
username: 'authelia'
password: '' # $AUTHELIA_STORAGE_POSTGRES_PASSWORD_FILE is designated in container file
tls:
server_name: '{{ infra_uri['postgresql']['domain'] }}'
skip_verify: false
# Notification provider
notifier:
filesystem:
filename: '/config/notification.txt'
# This part needs the clients to integrate with Authelia in order for it to activate.
identity_providers:
oidc:
hmac_secret: '' # $AUTHELIA_IDENTITY_PROVIDERS_OIDC_HMAC_SECRET_FILE
jwks:{% raw %}
- algorithm: 'RS256'
use: 'sig'
key: {{ secret "/run/secrets/AUTHELIA_JWKS_RS256" | mindent 10 "|" | msquote }}
- algorithm: 'ES256'
use: 'sig'
key: {{ secret "/run/secrets/AUTHELIA_JWKS_ES256" | mindent 10 "|" | msquote }}{% endraw %}
clients:
# https://www.authelia.com/integration/openid-connect/clients/synology-dsm/
- client_id: 'dsm'
client_name: 'dsm'
# It depends on application
# hash vaule generate:
# podman exec -it authelia sh
# authelia crypto hash generate pbkdf2 --password 'password'
client_secret: '{{ hostvars['console']['dsm']['oidc']['hash'] }}'
# If there were not client secret, public should be `true` [true | false]
public: false
authorization_policy: 'one_factor'
require_pkce: false
pkce_challenge_method: ''
redirect_uris:
- 'https://{{ infra_uri['nas']['domain'] }}:{{ infra_uri['nas']['ports']['https'] }}'
scopes:
- 'openid'
- 'profile'
- 'groups'
- 'email'
response_types:
- 'code'
grant_types:
- 'authorization_code'
access_token_signed_response_alg: 'none'
userinfo_signed_response_alg: 'none'
# [ client_secret_post | client_secret_basic ]
token_endpoint_auth_method: 'client_secret_post'

View File

@@ -0,0 +1,17 @@
FROM docker.io/library/caddy:{{ version['containers']['caddy'] }}-builder-alpine AS builder
RUN xcaddy build \
{% if node['name'] == 'auth' %}
--with github.com/caddy-dns/rfc2136 \
--with github.com/hslatman/caddy-crowdsec-bouncer/crowdsec \
--with github.com/hslatman/caddy-crowdsec-bouncer/http
{% else %}
--with github.com/caddy-dns/rfc2136
{% endif %}
FROM docker.io/library/caddy:{{ version['containers']['caddy'] }}
COPY --from=builder /usr/bin/caddy /usr/bin/caddy
COPY ./ilnmors_root_ca.crt /usr/local/share/ca-certificates/ilnmors_root_ca.crt
RUN update-ca-certificates

View File

@@ -0,0 +1,49 @@
[Quadlet]
DefaultDependencies=false
[Unit]
Description=Caddy
{% if node['name'] == "infra" %}
After=ca.service
Requires=ca.service
{% else %}
After=network-online.target
Wants=network-online.target
{% endif %}
[Container]
Image=ilnmors.internal/{{ node['name'] }}/caddy:{{ version['containers']['caddy'] }}
ContainerName=caddy_{{ node['name'] }}
HostName=caddy_{{ node['name'] }}
{% if node['name'] == 'infra' %}
AddHost={{ infra_uri['ca']['domain'] }}:host-gateway
AddHost={{ infra_uri['prometheus']['domain'] }}:host-gateway
AddHost={{ infra_uri['loki']['domain'] }}:host-gateway
{% endif %}
PublishPort=2080:80/tcp
PublishPort=2443:443/tcp
Volume=%h/containers/caddy/etc:/etc/caddy:ro
Volume=%h/containers/caddy/data:/data:rw
{% if node['name'] == 'auth' %}
Volume=/var/log/caddy:/log:rw
{% endif %}
Environment="TZ=Asia/Seoul"
Secret=CADDY_ACME_KEY,target=/run/secrets/CADDY_ACME_KEY
{% if node['name'] == 'auth' %}
Secret=CADDY_CROWDSEC_KEY,target=/run/secrets/CADDY_CROWDSEC_KEY
{% endif %}
[Service]
Restart=always
RestartSec=10s
TimeoutStopSec=120
[Install]
WantedBy=default.target

View File

@@ -0,0 +1,62 @@
{
# CrowdSec LAPI connection
crowdsec {
api_url https://{{ infra_uri['crowdsec']['domain'] }}:{{ infra_uri['crowdsec']['ports']['https'] }}
api_key "{file./run/secrets/CADDY_CROWDSEC_KEY}"
}
}
# Snippets
# CrowdSec log for parser
(crowdsec_log) {
log {
output file /log/access.log {
mode 0644
roll_size 100MiB
roll_keep 1
}
format json
}
}
# Private TLS ACME with DNS-01-challenge
(private_tls) {
tls {
issuer acme {
dir https://{{ infra_uri['ca']['domain'] }}:{{ infra_uri['ca']['ports']['https'] }}/acme/acme@ilnmors.internal/directory
dns rfc2136 {
server {{ infra_uri['bind']['domain'] }}:{{ infra_uri['bind']['ports']['dns'] }}
key_name acme-key
key_alg hmac-sha256
key "{file./run/secrets/CADDY_ACME_KEY}"
}
}
}
}
# Public domain
authelia.ilnmors.com {
import crowdsec_log
route {
crowdsec
reverse_proxy host.containers.internal:9091
}
}
test.ilnmors.com {
import crowdsec_log
route {
crowdsec
forward_auth host.containers.internal:9091 {
# Authelia Forward Auth endpoint URI
uri /api/authz/forward-auth
copy_headers Remote-User Remote-Groups Remote-Email Remote-Name
}
root * /usr/share/caddy
file_server
}
}
# Internal domain
auth.ilnmors.internal {
import private_tls
metrics
}

View File

@@ -0,0 +1,40 @@
# Private TLS ACME with DNS-01-challenge
(private_tls) {
tls {
issuer acme {
dir https://{{ infra_uri['ca']['domain'] }}:{{ infra_uri['ca']['ports']['https'] }}/acme/acme@ilnmors.internal/directory
dns rfc2136 {
server {{ infra_uri['bind']['domain'] }}:{{ infra_uri['bind']['ports']['dns'] }}
key_name acme-key
key_alg hmac-sha256
key "{file./run/secrets/CADDY_ACME_KEY}"
}
}
}
}
infra.ilnmors.internal {
import private_tls
metrics
}
{{ infra_uri['ldap']['domain'] }} {
import private_tls
route {
reverse_proxy host.containers.internal:{{ infra_uri['ldap']['ports']['http'] }}
}
}
{{ infra_uri['prometheus']['domain'] }} {
import private_tls
route {
reverse_proxy https://{{ infra_uri['prometheus']['domain'] }}:{{ infra_uri['prometheus']['ports']['https'] }}
}
}
grafana.ilnmors.internal {
import private_tls
route {
reverse_proxy host.containers.internal:3000
}
}

View File

@@ -0,0 +1,35 @@
[Quadlet]
DefaultDependencies=false
[Unit]
Description=CA
After=network-online.target
Wants=network-online.target
[Container]
Image=docker.io/smallstep/step-ca:{{ version['containers']['step'] }}
ContainerName=ca
HostName=ca
PublishPort=9000:9000/tcp
Volume=%h/containers/ca/certs:/home/step/certs:ro
Volume=%h/containers/ca/secrets:/home/step/secrets:ro
Volume=%h/containers/ca/config:/home/step/config:rw
Volume=%h/containers/ca/db:/home/step/db:rw
Volume=%h/containers/ca/templates:/home/step/templates:rw
Environment="TZ=Asia/Seoul"
Environment="PWDPATH=/run/secrets/STEP_CA_PASSWORD"
Secret=STEP_CA_PASSWORD,target=/run/secrets/STEP_CA_PASSWORD
[Service]
Restart=always
RestartSec=10s
TimeoutStopSec=120
[Install]
WantedBy=default.target

View File

@@ -0,0 +1,61 @@
{
"root": "/home/step/certs/ilnmors_root_ca.crt",
"federatedRoots": null,
"crt": "/home/step/certs/ilnmors_intermediate_ca.crt",
"key": "/home/step/secrets/ilnmors_intermediate_ca.key",
"address": ":9000",
"insecureAddress": "",
"dnsNames": [
"{{ infra_uri['ca']['domain'] }}"
],
"logger": {
"format": "text"
},
"db": {
"type": "badgerv2",
"dataSource": "/home/step/db",
"badgerFileLoadingMode": ""
},
"authority": {
"policy": {
"x509": {
"allow": {
"dns": [
"ilnmors.internal",
"*.ilnmors.internal"
]
},
"allowWildcardNames": true
}
},
"provisioners": [
{
"type": "ACME",
"name": "acme@ilnmors.internal",
"claims": {
"defaultTLSCertDuration": "2160h0m0s",
"enableSSHCA": true,
"disableRenewal": false,
"allowRenewalAfterExpiry": false,
"disableSmallstepExtensions": false
},
"options": {
"x509": {},
"ssh": {}
}
}
],
"template": {},
"backdate": "1m0s"
},
"tls": {
"cipherSuites": [
"TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256",
"TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256"
],
"minVersion": 1.2,
"maxVersion": 1.3,
"renegotiation": false
},
"commonName": "ilnmors Online CA"
}

View File

@@ -0,0 +1,6 @@
{
"ca-url": "https://{{ infra_uri['ca']['domain'] }}:{{ infra_uri['ca']['ports']['https'] }}",
"ca-config": "/home/step/config/ca.json",
"fingerprint": "215c851d2d0d2dbf90fc3507425207c29696ffd587c640c94a68dddb1d84d8e8",
"root": "/home/step/certs/ilnmors_root_ca.crt"
}

View File

@@ -0,0 +1,8 @@
{
"subject": {{ toJson .Subject }},
"keyUsage": ["certSign", "crlSign"],
"basicConstraints": {
"isCA": true,
"maxPathLen": 0
}
}

View File

@@ -0,0 +1,54 @@
# https://github.com/grafana/grafana/blob/main/conf/defaults.ini
[paths]
data = /var/lib/grafana
logs = /var/log/grafana
plugins = /var/lib/grafana/plugins
provisioning = /etc/grafana/provisioning
[server]
protocol = http
http_port = 3000
domain = grafana.ilnmors.internal
root_url = http://grafana.ilnmors.internal/
router_logging = false
[database]
type = postgres
host = {{ infra_uri['postgresql']['domain'] }}:{{ infra_uri['postgresql']['ports']['tcp'] }}
name = grafana_db
user = grafana
password = $__file{/run/secrets/GF_DB_PASSWORD}
ssl_mode = verify-full
ca_cert_path = /etc/ssl/grafana/ilnmors_root_ca.crt
[auth.ldap]
enabled = true
config_file = /etc/grafana/ldap.toml
allow_sign_up = true
[auth]
disable_login_form = false
allow_anonymous_device_id_auth = false
[security]
# local admin
admin_user = local_admin
# local password
admin_password = $__file{/run/secrets/GF_ADMIN_PASSWORD}
cookie_secure = true
cookie_samesite = lax
allow_embedding = false
# [smtp]
# enabled = true
# host = localhost:25
# from_address = alert@ilnmors.internal
# from_name = Grafana-Infra
[analytics]
reporting_enabled = false
check_for_updates = false
[log]
mode = console
level = info

View File

@@ -0,0 +1,47 @@
# https://github.com/lldap/lldap/blob/main/example_configs/grafana_ldap_config.toml
[[servers]]
host = "{{ infra_uri['ldap']['domain'] }}"
port = {{ infra_uri['ldap']['ports']['ldaps'] }}
# Activate STARTTLS or LDAPS
use_ssl = true
# true = STARTTLS, false = LDAPS
start_tls = false
tls_ciphers = []
min_tls_version = ""
ssl_skip_verify = false
root_ca_cert = "/etc/ssl/grafana/ilnmors_root_ca.crt"
# mTLS option, it is not needed
# client_cert = "/path/to/client.crt"
# client_key = "/path/to/client.key"
bind_dn = "uid=grafana,ou=people,dc=ilnmors,dc=internal"
bind_password = "$__file{/run/secrets/LDAP_BIND_PASSWORD}"
search_filter = "(|(uid=%s)(mail=%s))"
search_base_dns = ["dc=ilnmors,dc=internal"]
[servers.attributes]
member_of = "memberOf"
email = "mail"
name = "displayName"
surname = "sn"
username = "uid"
group_search_filter = "(&(objectClass=groupOfUniqueNames)(uniqueMember=%s))"
group_search_base_dns = ["ou=groups,dc=ilnmors,dc=internal"]
group_search_filter_user_attribute = "uid"
[[servers.group_mappings]]
group_dn = "cn=lldap_admin,ou=groups,dc=ilnmors,dc=internal"
org_role = "Admin"
grafana_admin = true
[[servers.group_mappings]]
group_dn = "cn=admins,ou=groups,dc=ilnmors,dc=internal"
org_role = "Editor"
grafana_admin = false
[[servers.group_mappings]]
group_dn = "cn=users,ou=groups,dc=ilnmors,dc=internal"
org_role = "Viewer"
grafana_admin = false

View File

@@ -0,0 +1,29 @@
# https://github.com/grafana/grafana/blob/main/conf/provisioning/datasources/sample.yaml
apiVersion: 1
datasources:
- name: Prometheus
type: prometheus
url: https://prometheus.ilnmors.internal:9090
access: proxy
isDefault: true
jsonData:
tlsAuth: false
tlsAuthWithCACert: true
httpMethod: POST
secureJsonData:
tlsCACert: "$__file{/etc/ssl/grafana/ilnmors_root_ca.crt}"
- name: Loki
type: loki
url: https://loki.ilnmors.internal:3100
access: proxy
jsonData:
tlsAuth: false
tlsAuthWithCACert: true
# Tenent value set "to solve no org id"
httpHeaderName1: "X-Scope-OrgID"
maxLines: 1000
secureJsonData:
tlsCACert: "$__file{/etc/ssl/grafana/ilnmors_root_ca.crt}"
httpHeaderValue1: "ilnmors.internal"

View File

@@ -0,0 +1,43 @@
[Quadlet]
DefaultDependencies=false
[Unit]
Description=Grafana
After=postgresql.service ldap.service
Requires=postgresql.service ldap.service
[Container]
Image=docker.io/grafana/grafana:{{ version['containers']['grafana'] }}
ContainerName=grafana
HostName=grafana
AddHost={{ infra_uri['postgresql']['domain'] }}:host-gateway
AddHost={{ infra_uri['ldap']['domain'] }}:host-gateway
AddHost={{ infra_uri['prometheus']['domain'] }}:host-gateway
AddHost={{ infra_uri['loki']['domain'] }}:host-gateway
PublishPort=3000:3000/tcp
Volume=%h/containers/grafana/data:/var/lib/grafana:rw
Volume=%h/containers/grafana/etc:/etc/grafana:ro
Volume=%h/containers/grafana/ssl:/etc/ssl/grafana:ro
Environment="TZ=Asia/Seoul"
Environment="GF_PATHS_CONFIG=/etc/grafana/grafana.ini"
# plugin
# Environment="GF_INSTALL_PLUGINS=grafana-clock-panel,grafana-simple-json-datasource"
Environment="GF_FEATURE_TOGGLES_EXPAND_ENV_VARS=true"
Secret=GF_DB_PASSWORD,target=/run/secrets/GF_DB_PASSWORD
Secret=LDAP_BIND_PASSWORD,target=/run/secrets/LDAP_BIND_PASSWORD
Secret=GF_ADMIN_PASSWORD,target=/run/secrets/GF_ADMIN_PASSWORD
[Service]
Restart=always
RestartSec=10s
TimeoutStopSec=120
[Install]
WantedBy=default.target

View File

@@ -0,0 +1,64 @@
[Quadlet]
DefaultDependencies=false
[Unit]
Description=LDAP
After=postgresql.service
Requires=postgresql.service
[Container]
Image=docker.io/lldap/lldap:{{ version['containers']['ldap'] }}
ContainerName=ldap
HostName=ldap
# They are at the same host (for Pasta, it is needed)
AddHost={{ infra_uri['postgresql']['domain'] }}:host-gateway
# For LDAPS - 636 > 6360 nftables
PublishPort=6360:6360/tcp
# Web UI
PublishPort=17170:17170/tcp
Volume=%h/containers/ldap/data:/data:rw
Volume=%h/containers/ldap/ssl:/etc/ssl/ldap:ro
# Default
Environment="TZ=Asia/Seoul"
# Domain
Environment="LLDAP_LDAP_BASE_DN=dc=ilnmors,dc=internal"
# LDAPS
Environment="LLDAP_LDAPS_OPTIONS__ENABLED=true"
Environment="LLDAP_LDAPS_OPTIONS__CERT_FILE=/etc/ssl/ldap/ldap.crt"
Environment="LLDAP_LDAPS_OPTIONS__KEY_FILE=/etc/ssl/ldap/ldap.key"
# Secret files' Path
Environment="LLDAP_KEY_SEED_FILE=/run/secrets/LLDAP_KEY_SEED"
Environment="LLDAP_JWT_SECRET_FILE=/run/secrets/LLDAP_JWT_SECRET"
# SMTP options > you can set all of these at the /data/config.toml instead of Environment
# Only `LLDAP_SMTP_OPTIONS__PASSWORD` will be injected by secret
# LLDAP_SMTP_OPTIONS__ENABLE_PASSWORD_RESET=true
# LLDAP_SMTP_OPTIONS__SERVER=smtp.example.com
# LLDAP_SMTP_OPTIONS__PORT=465
# LLDAP_SMTP_OPTIONS__SMTP_ENCRYPTION=TLS
# LLDAP_SMTP_OPTIONS__USER=no-reply@example.com
# LLDAP_SMTP_OPTIONS__PASSWORD=PasswordGoesHere
# LLDAP_SMTP_OPTIONS__FROM=no-reply <no-reply@example.com>
# LLDAP_SMTP_OPTIONS__TO=admin <admin@example.com>
# Database
Secret=LLDAP_DATABASE_URL,type=env
# Secrets
Secret=LLDAP_KEY_SEED,target="/run/secrets/LLDAP_KEY_SEED"
Secret=LLDAP_JWT_SECRET,target="/run/secrets/LLDAP_JWT_SECRET"
[Service]
Restart=always
RestartSec=10s
TimeoutStopSec=120
[Install]
WantedBy=default.target

View File

@@ -0,0 +1,46 @@
---
server:
http_listen_address: "::"
http_listen_port: 3100
http_tls_config:
cert_file: /etc/ssl/loki/loki.crt
key_file: /etc/ssl/loki/loki.key
#memberlist:
# join_members: ["localhost"]
# bind_addr: ['::']
# bind_port: 7946
schema_config:
configs:
- from: "2023-01-01"
store: tsdb
object_store: filesystem
schema: v13
index:
prefix: index_
period: 24h
limits_config:
retention_period: 30d
reject_old_samples: true
reject_old_samples_max_age: 168h
common:
instance_addr: localhost
path_prefix: /loki
replication_factor: 1
storage:
filesystem:
chunks_directory: /loki/chunks
rules_directory: /loki/rules
ring:
kvstore:
store: inmemory
compactor:
working_directory: /loki/compactor
delete_request_store: filesystem
compaction_interval: 10m
retention_enabled: true
retention_delete_delay: 2h

View File

@@ -0,0 +1,32 @@
[Quadlet]
DefaultDependencies=false
[Unit]
Description=Loki
After=network-online.target
Wants=network-online.target
[Container]
Image=docker.io/grafana/loki:{{ version['containers']['loki'] }}
ContainerName=loki
HostName=loki
PublishPort=3100:3100/tcp
Volume=%h/containers/loki/data:/loki:rw
Volume=%h/containers/loki/etc:/etc/loki:ro
Volume=%h/containers/loki/ssl:/etc/ssl/loki:ro
Environment="TZ=Asia/Seoul"
Exec=--config.file=/etc/loki/loki.yaml
[Service]
Restart=always
RestartSec=10s
TimeoutStopSec=120
[Install]
WantedBy=default.target

View File

@@ -0,0 +1,12 @@
ARG PG_VER={{ version['containers']['postgresql'] }}
FROM docker.io/library/postgres:${PG_VER}
ARG VECTORCHORD_VER={{ version['containers']['vectorchord'] }}
RUN apt update && \
apt install -y wget postgresql-${PG_MAJOR}-pgvector && \
wget -nv -O /tmp/vchord.deb https://github.com/tensorchord/VectorChord/releases/download/${VECTORCHORD_VER}/postgresql-${PG_MAJOR}-vchord_${VECTORCHORD_VER}-1_amd64.deb && \
apt install -y /tmp/vchord.deb && \
apt purge -y wget && apt autoremove -y && \
rm -rf /tmp/vchord.deb /var/lib/apt/lists/*

View File

@@ -0,0 +1,28 @@
# @authcomment@
# TYPE DATABASE USER ADDRESS METHOD
# Local host `trust`
local all all trust
# Local monitoring connection (host - infra VM) `trust`
hostssl postgres alloy {{ hostvars['fw']['network4']['infra']['server'] }}/32 trust
hostssl postgres alloy {{ hostvars['fw']['network6']['infra']['server'] }}/128 trust
hostssl postgres alloy {{ hostvars['fw']['network4']['subnet']['lla'] }} trust
hostssl postgres alloy {{ hostvars['fw']['network6']['subnet']['lla'] }} trust
# Local connection (in postgresql container) needs password (127.0.0.1 - container loopback)
host all all 127.0.0.1/32 scram-sha-256
host all all ::1/128 scram-sha-256
# Local connection (host - infra VM) needs password (169.254.1.0/24 - link_local subnet for containers in pasta mode)
hostssl all all {{ hostvars['fw']['network4']['infra']['server'] }}/32 scram-sha-256
hostssl all all {{ hostvars['fw']['network6']['infra']['server'] }}/128 scram-sha-256
hostssl all all {{ hostvars['fw']['network4']['subnet']['lla'] }} scram-sha-256
hostssl all all {{ hostvars['fw']['network6']['subnet']['lla'] }} scram-sha-256
# auth VM
hostssl all all {{ hostvars['fw']['network4']['auth']['server'] }}/32 scram-sha-256
hostssl all all {{ hostvars['fw']['network6']['auth']['server'] }}/128 scram-sha-256
# app VM (Applications, 192.168.10.13)
hostssl all all {{ hostvars['fw']['network4']['app']['server'] }}/32 scram-sha-256
hostssl all all {{ hostvars['fw']['network6']['app']['server'] }}/128 scram-sha-256

View File

@@ -0,0 +1,41 @@
#------------------------------------------------------------------------------
# CUSTOMIZED OPTIONS
#------------------------------------------------------------------------------
# Add settings for extensions here
# Listen_address
listen_addresses = '*'
# Max connections
max_connections = 250
# listen_port
port = 5432
# SSL
ssl = on
ssl_ca_file = '/etc/ssl/postgresql/ilnmors_root_ca.crt'
ssl_cert_file = '/etc/ssl/postgresql/postgresql.crt'
ssl_key_file = '/etc/ssl/postgresql/postgresql.key'
ssl_ciphers = 'HIGH:!aNULL:!MD5'
ssl_prefer_server_ciphers = on
# log
log_destination = 'stderr'
log_checkpoints = on
log_temp_files = 0
log_min_duration_statement = 500
# IO
track_io_timing = on
## immich_config
shared_preload_libraries = 'vchord.so'
search_path = '"$user", public'
max_wal_size = 5GB
shared_buffers = 512MB
wal_compression = on
work_mem = 16MB
autovacuum_vacuum_scale_factor = 0.1
autovacuum_analyze_scale_factor = 0.05
autovacuum_vacuum_cost_limit = 1000
effective_io_concurrency = 200
random_page_cost = 1.2

View File

@@ -0,0 +1,36 @@
[Quadlet]
DefaultDependencies=false
[Unit]
Description=PostgreSQL
After=network-online.target
Wants=network-online.target
[Container]
Image=ilnmors.internal/{{ node['name'] }}/postgres:pg{{ version['containers']['postgresql'] }}-vectorchord{{ version['containers']['vectorchord'] }}
ContainerName=postgresql
HostName=postgresql
PublishPort=5432:5432/tcp
Volume=%h/containers/postgresql/data:/var/lib/postgresql:rw
Volume=%h/containers/postgresql/config:/config:ro
Volume=%h/containers/postgresql/ssl:/etc/ssl/postgresql:ro
Volume=%h/containers/postgresql/init:/docker-entrypoint-initdb.d/:ro
Volume=%h/containers/postgresql/backups:/backups:rw
Environment="TZ=Asia/Seoul"
# This option is only for init process, after init custom config file `pg_hba.conf` will control this option.
Environment="POSTGRES_HOST_AUTH_METHOD=trust"
Exec=postgres -c 'config_file=/config/postgresql.conf' -c 'hba_file=/config/pg_hba.conf'
[Service]
Restart=always
RestartSec=10s
TimeoutStopSec=120
[Install]
WantedBy=default.target

View File

@@ -0,0 +1,18 @@
[Unit]
Description=PostgreSQL Cluster Backup Service
After=postgresql.service
BindsTo=postgresql.service
[Service]
Type=oneshot
# logging
StandardOutput=journal
StandardError=journal
ExecStartPre=/usr/bin/podman exec postgresql sh -c "mkdir -p /backups/cluster && chown postgres:root /backups/cluster && chmod 770 /backups/cluster"
# Run the script
ExecStart=/usr/bin/podman exec -u postgres postgresql sh -c 'pg_dumpall -U postgres --schema-only | grep -v -E "CREATE ROLE postgres" > /backups/cluster/pg_cluster_$(date "+%%Y-%%m-%%d").sql'
ExecStart=/usr/bin/podman exec -u postgres postgresql sh -c "find /backups/cluster -maxdepth 1 -type f -mtime +7 -delete"
ExecStart=/usr/bin/podman exec postgresql sh -c "chown -R postgres:root /backups/cluster && chmod 660 /backups/cluster/*"

View File

@@ -0,0 +1,17 @@
[Unit]
Description=Run PostgreSQL Cluster Backup service every day
[Timer]
# Execute service after 1 min on booting
OnBootSec=1min
# Execute service every day 00:00
OnCalendar=*-*-* 00:00:00
# Random time to postpone the timer
RandomizedDelaySec=15min
# When timer is activated, Service also starts.
Persistent=true
[Install]
WantedBy=timers.target

View File

@@ -0,0 +1,19 @@
[Unit]
Description=PostgreSQL Data %i Backup Service
After=postgresql.service
BindsTo=postgresql.service
[Service]
Type=oneshot
# logging
StandardOutput=journal
StandardError=journal
ExecStartPre=/usr/bin/podman exec postgresql sh -c "mkdir -p /backups/%i && chown postgres:root /backups/%i && chmod 770 /backups/%i"
# Run the script
ExecStart=/usr/bin/podman exec -u postgres postgresql sh -c 'printf "\\connect %i_db\n" > /backups/%i/pg_%i_$(date "+%%Y-%%m-%%d").sql'
ExecStart=/usr/bin/podman exec -u postgres postgresql sh -c 'pg_dump -U postgres -d %i_db --data-only >> /backups/%i/pg_%i_$(date "+%%Y-%%m-%%d").sql'
ExecStart=/usr/bin/podman exec -u postgres postgresql sh -c "find /backups/%i -maxdepth 1 -type f -mtime +7 -delete"
ExecStart=/usr/bin/podman exec postgresql sh -c "chown -R postgres:root /backups/%i && chmod 660 /backups/%i/*"

View File

@@ -0,0 +1,17 @@
[Unit]
Description=Run %i Data Backup service every day
[Timer]
# Execute service after 1 min on booting
OnBootSec=1min
# Execute service every day 00:00
OnCalendar=*-*-* 00:00:00
# Random time to postpone the timer
RandomizedDelaySec=15min
# When timer is activated, Service also starts.
Persistent=true
[Install]
WantedBy=timers.target

View File

@@ -0,0 +1,32 @@
# my global config
global:
scrape_interval: 15s # Set the scrape interval to every 15 seconds. Default is every 1 minute.
evaluation_interval: 15s # Evaluate rules every 15 seconds. The default is every 1 minute.
# scrape_timeout is set to the global default (10s).
# Alertmanager configuration
alerting:
alertmanagers:
- static_configs:
- targets:
# - alertmanager:9093
# Load rules once and periodically evaluate them according to the global 'evaluation_interval'.
rule_files:
- "/etc/prometheus/rules.yaml"
# A scrape configuration containing exactly one endpoint to scrape:
# Here it's Prometheus itself.
scrape_configs:
# The job name is added as a label `job=<job_name>` to any timeseries scraped from this config.
- job_name: "prometheus"
# metrics_path defaults to '/metrics'
scheme: "https"
tls_config:
ca_file: "/etc/ssl/prometheus/ilnmors_root_ca.crt"
server_name: "{{ infra_uri['prometheus']['domain'] }}"
static_configs:
- targets: ["localhost:9090"]
# The label name is added as a label `label_name=<label_value>` to any timeseries scraped from this config.
labels:
instance: "{{ node['name'] }}"

View File

@@ -0,0 +1,38 @@
groups:
- name: node_exporters_heartbeat
rules:
{% for instance in ['vmm', 'fw', 'infra', 'auth', 'app'] %}
- alert: {{ instance }}_node_exporter_down
expr: |
(present_over_time(up{instance="{{ instance }}"}[5m]) or on() vector(0)) == 0
for: 30s
labels:
severity: critical
annotations:
summary: "Exporter heartbeat is down: {{ instance }}"
description: "{{ instance }} exporter is down for 5 mins"
{% endfor %}
- name: postgresql_heartbeat
rules:
- alert: Postgresql_Down
expr: |
(present_over_time(pg_up{instance="infra", job="postgres"}[5m]) or on() vector(0)) == 0
for: 30s
labels:
severity: critical
annotations:
summary: "Postgresql Heartbeat Lost: postgresql"
description: "postgresql node is down for 5 mins."
- name: Certificate_expiry_check
rules:
{% for filename in ['root.crt', 'intermediate.crt', 'crowdsec.crt', 'blocky.crt', 'postgresql.crt', 'ldap.crt', 'prometheus.crt', 'loki.crt', 'dsm.crt'] %}
- alert: {{ filename | replace('.', '_') }}_is_expired_soon
expr: |
max(x509_cert_not_after{filename="{{ filename }}"}) - time() < 2592000
for: 1d
labels:
severity: critical
annotations:
summary: "{{ filename }} is expired in 30 days"
description: "{{ filename }} is expired in 30 days."
{% endfor %}

View File

@@ -0,0 +1,9 @@
# Additionally, a certificate and a key file are needed.
tls_server_config:
cert_file: "/etc/ssl/prometheus/prometheus.crt"
key_file: "/etc/ssl/prometheus/prometheus.key"
# Passwords are hashed with bcrypt: https://github.com/prometheus/exporter-toolkit/blob/master/docs/web-configuration.md#about-bcrypt
#basic_auth_users:
# alice: $2y$10$mDwo.lAisC94iLAyP81MCesa29IzH37oigHC/42V2pdJlUprsJPze
# bob: $2y$10$hLqFl9jSjoAAy95Z/zw8Ye8wkdMBM8c5Bn1ptYqP/AXyV0.oy0S8m

View File

@@ -0,0 +1,38 @@
[Quadlet]
DefaultDependencies=false
[Unit]
Description=Prometheus
After=network-online.target
Wants=network-online.target
[Container]
Image=docker.io/prom/prometheus:{{ version['containers']['prometheus'] }}
ContainerName=prometheus
HostName=prometheus
PublishPort=9090:9090/tcp
Volume=%h/containers/prometheus/data:/prometheus:rw
Volume=%h/containers/prometheus/etc:/etc/prometheus:ro
Volume=%h/containers/prometheus/ssl:/etc/ssl/prometheus:ro
Environment="TZ=Asia/Seoul"
Exec=--config.file=/etc/prometheus/prometheus.yaml \
--web.config.file=/etc/prometheus/web-config.yaml \
--web.enable-remote-write-receiver \
--storage.tsdb.path=/prometheus \
--storage.tsdb.retention.time=30d \
--storage.tsdb.retention.size=15GB \
--storage.tsdb.wal-compression
[Service]
Restart=always
RestartSec=10s
TimeoutStopSec=120
[Install]
WantedBy=default.target

View File

@@ -0,0 +1,26 @@
[Quadlet]
DefaultDependencies=false
[Unit]
Description=x509-Exporter
After=network-online.target
Wants=network-online.target
[Container]
Image=docker.io/enix/x509-certificate-exporter:{{ version['containers']['x509-exporter'] }}
ContainerName=x509-exporter
HostName=X509-exporter
Volume=%h/containers/x509-exporter/certs:/certs:ro
PublishPort=9793:9793
Exec=--listen-address :9793 --watch-dir=/certs
[Service]
Restart=always
RestartSec=10s
TimeoutStopSec=120
[Install]
WantedBy=default.target

View File

@@ -0,0 +1,299 @@
// The "name" and "job"
// job > prometheus: which exporter / loki: which service
// name > prometheus: which service
// service_name > loki: which service
// Metric
//// Metric ouput
prometheus.remote_write "prometheus" {
endpoint {
url = "https://{{ infra_uri['prometheus']['domain'] }}:{{ infra_uri['prometheus']['ports']['https'] }}/api/v1/write"
}
}
//// Metric relabel
////// For node metrics
prometheus.relabel "system_relabel" {
forward_to = [prometheus.remote_write.prometheus.receiver]
rule {
target_label = "instance"
replacement = "{{ node['name'] }}"
}
rule {
source_labels = ["job"]
regex = "integrations\\/(.+)"
target_label = "job"
replacement = "$1"
}
rule {
source_labels = ["name"]
regex = "(.+)\\.service"
target_label = "name"
replacement = "$1"
}
}
////// For service metrics
prometheus.relabel "default_label" {
forward_to = [prometheus.remote_write.prometheus.receiver]
rule {
target_label = "instance"
replacement = "{{ node['name'] }}"
}
rule {
source_labels = ["job"]
regex = "prometheus\\.scrape\\.(.+)"
target_label = "job"
replacement = "$1"
}
rule {
source_labels = ["job"]
regex = "integrations\\/(.+)"
target_label = "job"
replacement = "$1"
}
}
//// Metric input
////// For node metrics
prometheus.exporter.unix "system" {
enable_collectors = ["systemd", "cgroup", "processes", "cpu", "meminfo", "filesystem", "netdev"]
filesystem {
mount_points_exclude = "^/(sys|proc|dev|run|var/lib/docker/.+|var/lib/kubelet/.+)($|/)"
fs_types_exclude = "^(tmpfs|devtmpfs|devfs|iso9660|overlay|aufs|squashfs)$"
}
}
prometheus.scrape "system" {
targets = prometheus.exporter.unix.system.targets
forward_to = [prometheus.relabel.system_relabel.receiver]
}
{% if node['name'] == 'fw' %}
////// For Crowdsec metrics
prometheus.scrape "crowdsec" {
targets = [
{ "__address__" = "{{ infra_uri['crowdsec']['domain'] }}:6060", "job" = "crowdsec" },
{ "__address__" = "{{ infra_uri['crowdsec']['domain'] }}:60601", "job" = "crowdsec-bouncer" },
]
honor_labels = true
forward_to = [prometheus.relabel.default_label.receiver]
}
{% endif %}
{% if node['name'] == 'infra' %}
////// For postgresql metrics
prometheus.exporter.postgres "postgresql" {
data_source_names = [
"postgres://alloy@{{ infra_uri['postgresql']['domain'] }}:{{ infra_uri['postgresql']['ports']['tcp'] }}/postgres?sslmode=verify-full",
]
}
prometheus.scrape "postgresql" {
targets = prometheus.exporter.postgres.postgresql.targets
forward_to = [prometheus.relabel.default_label.receiver]
}
///// For certificates metrics
prometheus.scrape "x509" {
targets = [
{ "__address__" = "{{ node['name'] }}.ilnmors.internal:9793" },
]
forward_to = [prometheus.relabel.default_label.receiver]
}
{% endif %}
{% if node['name'] in ['infra', 'auth', 'app'] %}
////// For Input Caddy metrics
prometheus.scrape "caddy" {
targets = [
{ "__address__" = "{{ node['name'] }}.ilnmors.internal:443" },
]
scheme = "https"
forward_to = [prometheus.relabel.default_label.receiver]
}
{% endif %}
// Log
//// Logs output
loki.write "loki" {
endpoint {
url = "https://{{ infra_uri['loki']['domain'] }}:{{ infra_uri['loki']['ports']['https'] }}/loki/api/v1/push"
tenant_id = "ilnmors.internal"
}
}
//// Logs relabel
///// journal
loki.relabel "journal_relabel" {
forward_to = []
rule {
target_label = "instance"
replacement = "{{ node['name'] }}"
}
// Default value
rule {
target_label = "job"
replacement = "systemd-journal"
}
// if identifier exists
rule {
source_labels = ["__journal_syslog_identifier"]
regex = "(.+)"
target_label = "job"
replacement = "$1"
}
// if systemd_unit exists
rule {
source_labels = ["__journal__systemd_unit"]
regex = "(.+)\\.service"
target_label = "job"
replacement = "$1"
}
// if systemd_unit is "user@$UID"
rule {
source_labels = ["job"]
regex = "user@\\d+"
target_label = "job"
replacement = "systemd-journal"
}
// if systemd_user_unit exists
rule {
source_labels = ["__journal__systemd_user_unit"]
regex = "(.+)\\.service"
target_label = "job"
replacement = "$1"
}
rule {
source_labels = ["__journal_priority_keyword"]
target_label = "level"
}
}
{% if node['name'] == "fw" %}
loki.relabel "suricata_relabel" {
forward_to = [loki.process.suricata_json.receiver]
rule {
target_label = "instance"
replacement = "{{ node['name'] }}"
}
rule {
target_label = "level"
replacement = "info"
}
rule {
target_label = "job"
replacement = "suricata_eve"
}
}
{% endif %}
{% if node['name'] == "auth" %}
loki.relabel "caddy_relabel" {
forward_to = [loki.process.caddy_json.receiver]
rule {
target_label = "instance"
replacement = "{{ node['name'] }}"
}
rule {
target_label = "level"
replacement = "info"
}
rule {
target_label = "job"
replacement = "caddy_access"
}
}
{% endif %}
//// Log parser
///// journal
loki.process "journal_parser" {
forward_to = [loki.write.loki.receiver]
// Severity parsing
// If content of log includes "level" information, change the level
stage.logfmt {
mapping = {
"content_level" = "level",
}
}
stage.labels {
values = {
"level" = "content_level",
}
}
// Add this section as parser for each service
// common
stage.match {
selector = "{job=\"sshd\"}"
stage.regex {
expression = "Accepted \\w+ for (?P<user>\\w+) from (?P<ip>[\\d\\.]+)"
}
stage.labels {
values = { "user" = "" }
}
}
// infra
{% if node['name'] == 'infra' %}
// auth
{% elif node['name'] == 'auth' %}
// app
{% elif node['name'] == 'app' %}
{% endif %}
}
{% if node['name'] == "fw" %}
////// suricata
loki.process "suricata_json" {
forward_to = [loki.write.loki.receiver]
stage.json {
expressions = {
event_type = "event_type",
src_ip = "src_ip",
severity = "alert.severity",
}
}
stage.labels {
values = { event_type = "", severity = "" }
}
}
{% endif %}
{% if node['name'] == "auth" %}
////// caddy
loki.process "caddy_json" {
forward_to = [loki.write.loki.receiver]
stage.json {
expressions = {
status = "status",
method = "method",
remote_ip = "remote_ip",
duration = "duration",
}
}
stage.labels {
values = { status = "", method = "" }
}
}
{% endif %}
//// Logs input
////// journald
loki.source.journal "systemd" {
forward_to = [loki.process.journal_parser.receiver]
// Temporary tags like "__journal__systemd_unit" is automatically removed when logs is passing "forward_to"
// To relabel tags with temporary tags, relabel_rules command is necessary.
relabel_rules = loki.relabel.journal_relabel.rules
}
{% if node['name'] == 'fw' %}
////// suricata
local.file_match "suricata_logs" {
path_targets = [{ "__path__" = "/var/log/suricata/eve.json", "instance" = "{{ node['name'] }}" }]
}
loki.source.file "suricata" {
targets = local.file_match.suricata_logs.targets
forward_to = [loki.relabel.suricata_relabel.receiver]
}
{% endif %}
{% if node['name'] == 'auth' %}
////// caddy
local.file_match "caddy_logs" {
path_targets = [{ "__path__" = "/var/log/caddy/access.log", "instance" = "{{ node['name'] }}" }]
}
loki.source.file "caddy" {
targets = local.file_match.caddy_logs.targets
forward_to = [loki.relabel.caddy_relabel.receiver]
}
{% endif %}

View File

@@ -0,0 +1,5 @@
# Suricata logs
filenames:
- /var/log/caddy/access.log
labels:
type: caddy

View File

@@ -0,0 +1,5 @@
# Suricata logs
filenames:
- /var/log/suricata/eve.json
labels:
type: suricata

View File

@@ -0,0 +1,56 @@
mode: nftables
pid_dir: /var/run/
update_frequency: 10s
log_mode: file
log_dir: /var/log/
log_level: info
log_compression: true
log_max_size: 100
log_max_backups: 3
log_max_age: 30
api_url: "https://{{ infra_uri['crowdsec']['domain'] }}:{{ infra_uri['crowdsec']['ports']['https'] }}"
api_key: "{{ hostvars['console']['crowdsec']['bouncer']['fw'] }}"
insecure_skip_verify: false
disable_ipv6: false
deny_action: DROP
deny_log: false
supported_decisions_types:
- ban
#to change log prefix
#deny_log_prefix: "crowdsec: "
#to change the blacklists name
blacklists_ipv4: crowdsec-blacklists
blacklists_ipv6: crowdsec6-blacklists
#type of ipset to use
ipset_type: nethash
#if present, insert rule in those chains
#iptables_chains:
# - INPUT
# - FORWARD
# - OUTPUT
# - DOCKER-USER
## nftables > table inet filter's set crowddsec-blacklists_ipv4,6 is needed
nftables:
ipv4:
enabled: true
set-only: true
family: inet
table: filter
chain: global
ipv6:
enabled: true
set-only: true
family: inet
table: filter
chain: global
# packet filter
pf:
# an empty string disables the anchor
anchor_name: ""
# Crowdsec firewall bouncer cannot use "[::]" yet
prometheus:
enabled: true
listen_addr: "::"
listen_port: 60601

View File

@@ -0,0 +1,11 @@
name: crowdsecurity/whitelists
description: "Whitelist console/admin hosts only"
whitelist:
reason: "trusted admin hosts"
ip:
- "127.0.0.1"
- "::1"
- "{{ hostvars['fw']['network4']['console']['client'] }}"
- "{{ hostvars['fw']['network4']['console']['wg'] }}"
- "{{ hostvars['fw']['network6']['console']['client'] }}"
- "{{ hostvars['fw']['network6']['console']['wg'] }}"

View File

@@ -0,0 +1,10 @@
[Unit]
Description=Crowdsec Rule Update Service
After=network-online.target
Wants=network-online.target
[Service]
Type=oneshot
ExecStart=/usr/bin/cscli hub update
ExecStart=/usr/bin/cscli hub upgrade
ExecStartPost=/bin/systemctl restart crowdsec

View File

@@ -0,0 +1,10 @@
[Unit]
Description=Daily Crowdsec Rule Update Timer
[Timer]
OnCalendar=*-*-* 05:00:00
Persistent=true
RandomizedDelaySec=300
[Install]
WantedBy=timers.target

View File

@@ -0,0 +1,66 @@
common:
daemonize: true
log_media: file
log_level: info
log_dir: /var/log/
log_max_size: 20
compress_logs: true
log_max_files: 10
working_dir: .
config_paths:
config_dir: /etc/crowdsec/
data_dir: /var/lib/crowdsec/data/
simulation_path: /etc/crowdsec/simulation.yaml
hub_dir: /var/lib/crowdsec/hub/
index_path: /var/lib/crowdsec/hub/.index.json
notification_dir: /etc/crowdsec/notifications/
plugin_dir: /usr/lib/crowdsec/plugins/
crowdsec_service:
acquisition_path: /etc/crowdsec/acquis.yaml
acquisition_dir: /etc/crowdsec/acquis.d
parser_routines: 1
cscli:
output: human
color: auto
db_config:
log_level: info
type: sqlite
db_path: /var/lib/crowdsec/data/crowdsec.db
#max_open_conns: 100
#user:
#password:
#db_name:
#host:
#port:
flush:
max_items: 5000
max_age: 7d
plugin_config:
user: nobody # plugin process would be ran on behalf of this user
group: nogroup # plugin process would be ran on behalf of this group
api:
client:
insecure_skip_verify: false
credentials_path: /etc/crowdsec/local_api_credentials.yaml
{% if node['name'] == 'fw' %}
server:
log_level: info
listen_uri: "[::]:8080"
profiles_path: /etc/crowdsec/profiles.yaml
console_path: /etc/crowdsec/console.yaml
online_client: # Central API credentials (to push signals and receive bad IPs)
credentials_path: /etc/crowdsec/online_api_credentials.yaml
trusted_ips: # IP ranges, or IPs which can have admin API access
- ::1
- 127.0.0.1
- {{ hostvars['fw']['network6']['subnet']['server'] }}
- {{ hostvars['fw']['network4']['subnet']['server'] }}
tls:
cert_file: /etc/crowdsec/ssl/crowdsec.crt
key_file: /etc/crowdsec/ssl/crowdsec.key
prometheus:
enabled: true
level: full
listen_addr: "[::]"
listen_port: 6060
{% endif %}

View File

@@ -0,0 +1,3 @@
url: https://{{ infra_uri['crowdsec']['domain'] }}:{{ infra_uri['crowdsec']['ports']['https'] }}
login: {{ node['name'] }}
password: {{ hostvars['console']['crowdsec']['machine'][node['name']] }}

View File

@@ -0,0 +1,49 @@
[Unit]
Description=Kopia backup service
Wants=network-online.target
After=network-online.target
[Service]
User=kopia
Group=kopia
Type=oneshot
# logging
StandardOutput=journal
StandardError=journal
CapabilityBoundingSet=CAP_DAC_READ_SEARCH
AmbientCapabilities=CAP_DAC_READ_SEARCH
ProtectSystem=strict
ProtectHome=tmpfs
InaccessiblePaths=/boot /root
{% if node['name'] == 'infra' %}
BindReadOnlyPaths=/home/infra/containers/postgresql/backups
{% elif node['name'] == 'app' %}
BindReadOnlyPaths=/home/app/data
{% endif %}
# In root namescope, %u always bring 0
BindPaths=/etc/kopia
BindPaths=/etc/secrets/{{ kopia_uid }}
BindPaths=/var/cache/kopia
EnvironmentFile=/etc/secrets/{{ kopia_uid }}/kopia.env
ExecStartPre=/usr/bin/kopia repository connect server \
--url=https://{{ infra_uri['kopia']['domain'] }}:{{ infra_uri['kopia']['ports']['https'] }} \
--override-username={{ node['name'] }} \
--override-hostname={{ node['name'] }}.ilnmors.internal
{% if node['name'] == 'infra' %}
ExecStart=/usr/bin/kopia snapshot create \
/home/infra/containers/postgresql/backups
{% elif node['name'] == 'app' %}
ExecStart=/usr/bin/kopia snapshot create \
/home/app/data
{% endif %}
[Install]
WantedBy=multi-user.target

View File

@@ -0,0 +1,10 @@
[Unit]
Description=Daily Kopia backup timer
[Timer]
OnCalendar=*-*-* 03:00:00
Persistent=true
RandomizedDelaySec=300
[Install]
WantedBy=timers.target

View File

@@ -0,0 +1,5 @@
KOPIA_PASSWORD={{ hostvars['console']['kopia']['user'][node['name']] }}
KOPIA_CONFIG_PATH=/etc/kopia/repository.config
KOPIA_CACHE_DIRECTORY=/var/cache/kopia
KOPIA_LOG_DIR=/var/cache/kopia/logs
KOPIA_CHECK_FOR_UPDATES=false

View File

@@ -0,0 +1,68 @@
include "/etc/bind/acme.key";
options {
directory "/var/cache/bind";
listen-on port 53 { {{ hostvars['fw']['network4']['bind']['server'] }}; };
listen-on-v6 port 53 { {{ hostvars['fw']['network6']['bind']['server'] }}; };
// Authoritative DNS setting
allow-recursion { none; };
allow-transfer { none; };
allow-update { none; };
dnssec-validation no;
check-names master warn;
};
zone "ilnmors.internal." {
type primary;
file "/var/lib/bind/db.ilnmors.internal";
notify yes;
// ACME-01 challenge policy. It allows only TXT record of subdomain update.
update-policy {
grant acme-key subdomain ilnmors.internal. TXT;
};
};
zone "1.168.192.in-addr.arpa" {
type primary;
file "/var/lib/bind/db.1.168.192.in-addr.arpa";
notify yes;
};
zone "10.168.192.in-addr.arpa" {
type primary;
file "/var/lib/bind/db.10.168.192.in-addr.arpa";
notify yes;
};
zone "0.0.0.0.0.0.0.0.1.0.0.0.0.0.d.f.ip6.arpa" {
type primary;
file "/var/lib/bind/db.1.00df.ip6.arpa";
notify yes;
};
zone "0.0.0.0.0.0.0.0.0.1.0.0.0.0.d.f.ip6.arpa" {
type primary;
file "/var/lib/bind/db.10.00df.ip6.arpa";
notify yes;
};
zone "ilnmors.com." {
//split horizon dns
type primary;
file "/var/lib/bind/db.ilnmors.com";
notify yes;
};
logging {
channel default_log {
stderr;
severity info;
};
category default { default_log; };
category config { default_log; };
category queries { default_log; };
};

View File

@@ -0,0 +1,13 @@
$TTL 86400
@ IN SOA bind.ilnmors.internal. mail.ilnmors.internal. (
2026021201 ; serial
3600 ; refresh (1 hour)
1800 ; retry (30 minutes)
604800 ; expire (1 week)
86400 ; minimum (1 day)
)
IN NS bind.ilnmors.internal.
1.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0 IN PTR fw.ilnmors.internal.
1.1.0.0.0.0.0.0.0.0.0.0.0.0.0.0 IN PTR nas.ilnmors.internal.
0.2.0.0.0.0.0.0.0.0.0.0.0.0.0.0 IN PTR console.ilnmors.internal.

View File

@@ -0,0 +1,13 @@
$TTL 86400
@ IN SOA bind.ilnmors.internal. mail.ilnmors.internal. (
2026021201 ; serial
3600 ; refresh (1 hour)
1800 ; retry (30 minutes)
604800 ; expire (1 week)
86400 ; minimum (1 day)
)
IN NS bind.ilnmors.internal.
1 IN PTR fw.ilnmors.internal.
11 IN PTR nas.ilnmors.internal.
20 IN PTR console.ilnmors.internal.

View File

@@ -0,0 +1,17 @@
$TTL 86400
@ IN SOA bind.ilnmors.internal. mail.ilnmors.internal. (
2026021201 ; serial
3600 ; refresh (1 hour)
1800 ; retry (30 minutes)
604800 ; expire (1 week)
86400 ; minimum (1 day)
)
IN NS bind.ilnmors.internal.
1.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0 IN PTR fw.ilnmors.internal.
2.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0 IN PTR blocky.ilnmors.internal.
3.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0 IN PTR bind.ilnmors.internal.
0.1.0.0.0.0.0.0.0.0.0.0.0.0.0.0 IN PTR vmm.ilnmors.internal.
1.1.0.0.0.0.0.0.0.0.0.0.0.0.0.0 IN PTR infra.ilnmors.internal.
2.1.0.0.0.0.0.0.0.0.0.0.0.0.0.0 IN PTR auth.ilnmors.internal.
3.1.0.0.0.0.0.0.0.0.0.0.0.0.0.0 IN PTR app.ilnmors.internal.

View File

@@ -0,0 +1,17 @@
$TTL 86400
@ IN SOA bind.ilnmors.internal. mail.ilnmors.internal. (
2026021201 ; serial
3600 ; refresh (1 hour)
1800 ; retry (30 minutes)
604800 ; expire (1 week)
86400 ; minimum (1 day)
)
IN NS bind.ilnmors.internal.
1 IN PTR fw.ilnmors.internal.
2 IN PTR blocky.ilnmors.internal.
3 IN PTR bind.ilnmors.internal.
10 IN PTR vmm.ilnmors.internal.
11 IN PTR infra.ilnmors.internal.
12 IN PTR auth.ilnmors.internal.
13 IN PTR app.ilnmors.internal.

View File

@@ -0,0 +1,12 @@
$TTL 86400
@ IN SOA bind.ilnmors.internal. mail.ilnmors.internal. (
2026021201 ; serial
3600 ; refresh (1 hour)
1800 ; retry (30 minutes)
604800 ; expire (1 week)
86400 ; minimum (1 day)
)
IN NS bind.ilnmors.internal.
* IN A 192.168.10.12
* IN AAAA fd00:10::12

View File

@@ -0,0 +1,40 @@
$TTL 86400
@ IN SOA bind.ilnmors.internal. mail.ilnmors.internal. (
2026021201 ; serial
3600 ; refresh (1 hour)
1800 ; retry (30 minutes)
604800 ; expire (1 week)
86400 ; minimum (1 day)
)
IN NS bind.ilnmors.internal.
bind IN A 192.168.10.3
bind IN AAAA fd00:10::3
fw IN A 192.168.10.1
fw IN AAAA fd00:10::1
blocky IN A 192.168.10.2
blocky IN AAAA fd00:10::2
vmm IN A 192.168.10.10
vmm IN AAAA fd00:10::10
infra IN A 192.168.10.11
infra IN AAAA fd00:10::11
auth IN A 192.168.10.12
auth IN AAAA fd00:10::12
app IN A 192.168.10.13
app IN AAAA fd00:10::13
switch IN A 192.168.1.2
nas IN A 192.168.1.11
nas IN AAAA fd00:1::11
console IN A 192.168.1.20
console IN AAAA fd00:1::20
printer IN A 192.168.1.101
ntp IN CNAME fw.ilnmors.internal.
crowdsec IN CNAME fw.ilnmors.internal.
ca IN CNAME infra.ilnmors.internal.
postgresql IN CNAME infra.ilnmors.internal.
ldap IN CNAME infra.ilnmors.internal.
prometheus IN CNAME infra.ilnmors.internal.
loki IN CNAME infra.ilnmors.internal.
grafana IN CNAME infra.ilnmors.internal.
authelia IN CNAME auth.ilnmors.internal.
*.app IN CNAME app.ilnmors.internal.

View File

@@ -0,0 +1,23 @@
[Unit]
Description=Blocky DNS Resolver
Wants=network-online.target
After=network-online.target
[Service]
User=blocky
Group=blocky
CapabilityBoundingSet=CAP_NET_BIND_SERVICE
AmbientCapabilities=CAP_NET_BIND_SERVICE
ExecStart=/usr/local/bin/blocky --config /etc/blocky/config.yaml
Restart=always
RestartSec=5s
NoNewPrivileges=true
ProtectSystem=full
ProtectHome=true
[Install]
WantedBy=multi-user.target

View File

@@ -0,0 +1,67 @@
certFile: "/etc/blocky/ssl/blocky.crt"
keyFile: "/etc/blocky/ssl/blocky.key"
minTlsServeVersion: 1.2
connectIPVersion: dual
ports:
dns:
- "{{ hostvars['fw']['network4']['blocky']['server'] }}:53"
- "[{{ hostvars['fw']['network6']['blocky']['server'] }}]:53"
tls:
- "{{ hostvars['fw']['network4']['blocky']['server'] }}:853"
- "[{{ hostvars['fw']['network6']['blocky']['server'] }}]:853"
https:
- "{{ hostvars['fw']['network4']['blocky']['server'] }}:443"
- "[{{ hostvars['fw']['network6']['blocky']['server'] }}]:443"
log:
level: info
format: text
timestamp: true
privacy: false
upstreams:
groups:
default:
- "tcp-tls:1.1.1.1:853"
- "tcp-tls:1.0.0.1:853"
- "tcp-tls:[2606:4700:4700::1111]:853"
- "tcp-tls:[2606:4700:4700::1001]:853"
conditional:
fallbackUpstream: false
mapping:
ilnmors.internal: "{{ hostvars['fw']['network4']['bind']['server'] }}, {{ hostvars['fw']['network6']['bind']['server'] }}"
ilnmors.com: "{{ hostvars['fw']['network4']['bind']['server'] }}, {{ hostvars['fw']['network6']['bind']['server'] }}"
1.168.192.in-addr.arpa: "{{ hostvars['fw']['network4']['bind']['server'] }}, {{ hostvars['fw']['network6']['bind']['server'] }}"
10.168.192.in-addr.arpa: "{{ hostvars['fw']['network4']['bind']['server'] }}, {{ hostvars['fw']['network6']['bind']['server'] }}"
0.0.0.0.0.0.0.0.1.0.0.0.0.0.d.f.ip6.arpa: "{{ hostvars['fw']['network4']['bind']['server'] }}, {{ hostvars['fw']['network6']['bind']['server'] }}"
0.0.0.0.0.0.0.0.0.1.0.0.0.0.d.f.ip6.arpa: "{{ hostvars['fw']['network4']['bind']['server'] }}, {{ hostvars['fw']['network6']['bind']['server'] }}"
vpn.ilnmors.com: "tcp-tls:1.1.1.1:853, tcp-tls:1.0.0.1:853, tcp-tls:[2606:4700:4700::1111]:853, tcp-tls:[2606:4700:4700::1001]:853"
blocking:
blockType: nxDomain
denylists:
ads:
# [ General ]
- https://raw.githubusercontent.com/StevenBlack/hosts/master/hosts
- https://big.oisd.nl
- https://o0.pages.dev/Lite/domains.txt
# [ Korean regional ]
- https://raw.githubusercontent.com/yous/YousList/master/hosts.txt
# [ Telemetry ]
- https://raw.githubusercontent.com/crazy-max/WindowsSpyBlocker/master/data/hosts/spy.txt
- https://raw.githubusercontent.com/Perflyst/PiHoleBlocklist/master/SmartTV.txt
clientGroupsBlock:
default:
- ads
caching:
minTime: 5m
maxTime: 30m
cacheTimeNegative: 0m
prefetching: true
prometheus:
enable: false
path: /metrics

View File

@@ -0,0 +1,9 @@
# 1. Access Control (IPv4)
allow {{ hostvars['fw']['network4']['subnet']['client'] }}
allow {{ hostvars['fw']['network4']['subnet']['server'] }}
allow {{ hostvars['fw']['network4']['subnet']['wg'] }}
# 2. Access Control (IPv6)
allow {{ hostvars['fw']['network6']['subnet']['client'] }}
allow {{ hostvars['fw']['network6']['subnet']['server'] }}
allow {{ hostvars['fw']['network6']['subnet']['wg'] }}

View File

@@ -0,0 +1,15 @@
[Unit]
Description=DDNS Update Service
After=network-online.target
Wants=network-online.target
[Service]
Type=oneshot
StandardOutput=journal
StandardError=journal
EnvironmentFile=/etc/secrets/%U/ddns.env
# Run the script
ExecStart=/usr/local/bin/ddns.sh -d "ilnmors.com"

View File

@@ -0,0 +1,299 @@
#!/bin/bash
## Change Log format as logfmt (refactoring)
# ddns.sh -d domain [-t <ttl>] [-p] [-r] [-c]
# Default Information
DOMAIN=""
TTL=180
C_TTL=86400
PROXIED="false"
DELETE_FLAG="false"
CURRENT_IP=""
# These will be injected by systemd
# ZONE_ID='.secret'
# API_KEY='.secret'
# usage() function
usage() {
echo "Usage: $0 -d \"domain\" [-t \"ttl\"] [-p] [-r] [-c]"
echo "-d <domain>: Specify the domain to update"
echo "-t <ttl>: Specify the TTL(Time to live)"
echo "-p: Specify the cloudflare proxy to use"
echo "-r: Delete the DNS record"
exit 1
}
# Log function
log() {
local timestamp=$(date -u +"%Y-%m-%dT%H:%M:%SZ")
local level="$1"
local msg="$2"
echo "time=\"$timestamp\" level=\"$level\" msg=\"$msg\" source=\"ddns.sh\"">&2
}
# getopts to get arguments
while getopts "d:t:pr" opt; do
case $opt in
d)
DOMAIN="$OPTARG"
;;
t)
TTL="$OPTARG"
;;
p)
PROXIED="true"
;;
r)
DELETE_FLAG="true"
;;
\?) # unknown options
log "error" "Invalid option: -$OPTARG"
usage
;;
:) # parameter required option
log "error" "Option -$OPTARG requires an argument."
usage
;;
esac
done
# Get option and move to parameters - This has no functional thing, because it only use arguments with parameters
shift $((OPTIND - 1))
# Check necessary options
if [ -z "$DOMAIN" ]; then
log "error" "-d option is required"
usage
fi
if ! [[ "$TTL" =~ ^[0-9]+$ ]] || [ "$TTL" -le 0 ]; then
log "error" "-t option (ttl) requires a number above 0."
usage
fi
# Check necessary environment variables (Injected by systemd or shell)
if [ -z "$ZONE_ID" ]; then
log "error" "ZONE_ID is required via environment variable."
exit 1
fi
if [ -z "$API_KEY" ]; then
log "error" "API_KEY is required via environment variable."
exit 1
fi
# Check package
if ! command -v curl >/dev/null; then
log "error" "curl is required"
exit 1
fi
if ! command -v jq >/dev/null; then
log "error" "jq is required"
exit 1
fi
# API options
URL="https://api.cloudflare.com/client/v4/zones/$ZONE_ID/dns_records"
CONTENT_TYPE="Content-Type: application/json"
AUTHORIZATION="Authorization: Bearer $API_KEY"
# Current IP
CURRENT_IP=$( ip address show dev wan | grep 'inet ' | awk '{print $2}' | cut -d'/' -f1 )
# Get current IP from external server when IP is private IP
if [[ -z "$CURRENT_IP" || "$CURRENT_IP" =~ ^(10\.|172\.(1[6-9]|2[0-9]|3[0-1])\.|192\.168\.|127\.) ]]; then
log "info" "IP from interface is private or empty. Fetching public IP..."
CURRENT_IP=$(curl -sf "https://ifconfig.me") ||\
CURRENT_IP=$(curl -sf "https://ifconfig.kr") ||\
CURRENT_IP=$(curl -sf "https://api.ipify.org")
fi
if [ "$CURRENT_IP" == "" ]; then
log "Error" "Can't get an IP"
exit 1
fi
# DNS functions
# get_dns_record() function
get_dns_record()
{
local type="$1"
local name="$2"
local response="$(
curl -s "$URL?type=$type&name=$name"\
-H "$CONTENT_TYPE"\
-H "$AUTHORIZATION")"
if [ "$(echo "$response" | jq -r '.success')" == "false" ]; then
log "error" "Can't get dns record by $response"
exit 1
else
# return
echo "$response"
fi
}
# create_dns_record() function
create_dns_record()
{
local type="$1"
local name="$2"
local ttl="$3"
local comment="$4"
local content="$5"
local response="$(
curl -s "$URL"\
-X POST\
-H "$CONTENT_TYPE"\
-H "$AUTHORIZATION"\
-d "{
\"name\": \"$name\",
\"ttl\": $ttl,
\"type\": \"$type\",
\"comment\": \"$comment\",
\"content\": \"$content\",
\"proxied\": $PROXIED
}")"
if [ "$(echo "$response" | jq -r '.success')" == "false" ]; then
log "error" "Can't create dns record by $response"
exit 1
else
# return
echo "$response"
fi
}
# update_dns_record() function
update_dns_record()
{
local type="$1"
local name="$2"
local ttl="$3"
local comment="$4"
local content="$5"
local id="$6"
local response=$(
curl -s "$URL/$id"\
-X PUT\
-H "$CONTENT_TYPE"\
-H "$AUTHORIZATION"\
-d "{
\"name\": \"$name\",
\"ttl\": $ttl,
\"type\": \"$type\",
\"comment\": \"$comment\",
\"content\": \"$content\",
\"proxied\": $PROXIED
}")
if [ "$(echo "$response" | jq -r '.success')" == "false" ]; then
log "error" "Can't update dns record by $response"
exit 1
else
#return
echo "$response"
fi
}
# delete_dns_record() function
delete_dns_record()
{
local type="$1"
local id="$2"
local response=$(
curl -s "$URL/$id"\
-X DELETE\
-H "$CONTENT_TYPE"\
-H "$AUTHORIZATION"
)
if [ "$(echo "$response" | jq -r '.success')" == "false" ]; then
log "error" "Can't delete dns record by $response"
exit 1
else
# return
echo "$response"
fi
}
# Get DNS A, and CNAME record
A_DNS_RECORD=$(get_dns_record "A" "$DOMAIN")
S_DNS_RECORD=$(get_dns_record "cname" "*.$DOMAIN")
W_DNS_RECORD=$(get_dns_record "cname" "www.$DOMAIN")
# Delete DNS record with Delete flag
if [ "$DELETE_FLAG" == "true" ]; then
FLAG="false"
if [ "$(echo $A_DNS_RECORD | jq -r '.result | length')" -eq 1 ]; then
A_DNS_ID="$(echo $A_DNS_RECORD | jq -r '.result[0].id')"
delete_dns_record "A" "$A_DNS_ID"
log "info" "root DNS record is deleted"
FLAG="true"
fi
if [ "$(echo $S_DNS_RECORD | jq -r '.result | length')" -eq 1 ]; then
S_DNS_ID="$(echo $S_DNS_RECORD | jq -r '.result[0].id')"
delete_dns_record "cname" "$S_DNS_ID"
log "info" "sub DNS record is deleted"
FLAG="true"
fi
if [ "$(echo $W_DNS_RECORD | jq -r '.result | length')" -eq 1 ]; then
W_DNS_ID="$(echo $W_DNS_RECORD | jq -r '.result[0].id')"
delete_dns_record "cname" "$W_DNS_ID"
log "info" "www DNS record is deleted"
FLAG="true"
fi
if [ "$FLAG" == "false" ]; then
log "info" "Nothing is Deleted. There are no DNS records"
fi
exit
fi
# Create or update DNS A record
if [ "$(echo $A_DNS_RECORD | jq -r '.result | length')" -eq 1 ]; then # root DNS record exist
A_DNS_ID="$(echo $A_DNS_RECORD | jq -r '.result[0].id')"
A_DNS_CONTENT="$(echo $A_DNS_RECORD | jq -r '.result[0].content')"
A_DNS_TTL="$(echo $A_DNS_RECORD | jq -r '.result[0].ttl')"
A_DNS_PROXIED="$(echo $A_DNS_RECORD | jq -r '.result[0].proxied')"
if [ "$A_DNS_CONTENT" != $CURRENT_IP -o "$A_DNS_TTL" != "$TTL" -o "$A_DNS_PROXIED" != "$PROXIED" ]; then
update_dns_record "A" "$DOMAIN" "$TTL" "$(date "+%Y-%m-%d %H:%M:%S"): root domain from ddns.sh" "$CURRENT_IP" "$A_DNS_ID"
log "info" "Root DNS record is successfully changed Domain: $DOMAIN IP: $A_DNS_CONTENT to $CURRENT_IP TTL: $A_DNS_TTL to $TTL proxied: $A_DNS_PROXIED to $PROXIED"
else
log "info" "Root DNS record is not changed Domain: $DOMAIN IP: $CURRENT_IP TTL: $TTL proxied: $PROXIED"
fi
else # root DNS record does not exist
create_dns_record "A" "$DOMAIN" "$TTL" "$(date "+%Y-%m-%d %H:%M:%S"): root domain from ddns.sh" "$CURRENT_IP"
log "info" "Root DNS record is successfully created Domain: $DOMAIN IP: $CURRENT_IP TTL: $TTL proxied: $PROXIED"
fi
# Create or update DNS CNAME records
if [ "$(echo $S_DNS_RECORD | jq -r '.result | length')" -eq 1 ]; then # sub DNS record exist
S_DNS_ID="$(echo $S_DNS_RECORD | jq -r '.result[0].id')"
S_DNS_CONTENT="$(echo $S_DNS_RECORD | jq -r '.result[0].content')"
S_DNS_TTL="$(echo $S_DNS_RECORD | jq -r '.result[0].ttl')"
S_DNS_PROXIED="$(echo $S_DNS_RECORD | jq -r '.result[0].proxied')"
if [ "$S_DNS_CONTENT" != "$DOMAIN" -o "$S_DNS_TTL" != "$C_TTL" -o "$S_DNS_PROXIED" != "$PROXIED" ]; then
update_dns_record "cname" "*.$DOMAIN" "$C_TTL" "$(date "+%Y-%m-%d %H:%M:%S"): sub domain from ddns.sh" "$DOMAIN" "$S_DNS_ID"
log "info" "Sub DNS record is successfully changed Domain: $S_DNS_CONTENT to *.$DOMAIN cname: $DOMAIN TTL: $S_DNS_TTL to $C_TTL proxied: $S_DNS_PROXIED to $PROXIED"
else
log "info" "Sub DNS record is not changed Domain: *.$DOMAIN cname: $DOMAIN TTL: $C_TTL proxied: $PROXIED"
fi
else # sub DNS record does not exist
create_dns_record "cname" "*.$DOMAIN" "$C_TTL" "$(date "+%Y-%m-%d %H:%M:%S"): sub domain from ddns.sh" "$DOMAIN"
log "info" "Sub DNS record is successfully created Domain: *.$DOMAIN cname: $DOMAIN TTL: $C_TTL proxied: $PROXIED"
fi
if [ "$(echo $W_DNS_RECORD | jq -r '.result | length')" -eq 1 ]; then # www DNS record exist
W_DNS_ID="$(echo $W_DNS_RECORD | jq -r '.result[0].id')"
W_DNS_CONTENT="$(echo $W_DNS_RECORD | jq -r '.result[0].content')"
W_DNS_TTL="$(echo $W_DNS_RECORD | jq -r '.result[0].ttl')"
W_DNS_PROXIED="$(echo $W_DNS_RECORD | jq -r '.result[0].proxied')"
if [ "$W_DNS_CONTENT" != "$DOMAIN" -o "$W_DNS_TTL" != "$C_TTL" -o "$W_DNS_PROXIED" != "$PROXIED" ]; then
update_dns_record "cname" "www.$DOMAIN" "$C_TTL" "$(date "+%Y-%m-%d %H:%M:%S"): www domain from ddns.sh" "$DOMAIN" "$W_DNS_ID"
log "info" "www DNS record is successfully changed Domain: $W_DNS_CONTENT to www.$DOMAIN cname: $DOMAIN TTL: $W_DNS_TTL to $C_TTL proxied: $W_DNS_PROXIED to $PROXIED"
else
log "info" "www DNS record is not changed Domain: www.$DOMAIN cname: $DOMAIN TTL: $C_TTL proxied: $PROXIED"
fi
else # www DNS record does not exist
create_dns_record "cname" "www.$DOMAIN" "$C_TTL" "$(date "+%Y-%m-%d %H:%M:%S"): www domain from ddns.sh" "$DOMAIN"
log "info" "www DNS record is successfully created Domain: www.$DOMAIN cname: $DOMAIN TTL: $C_TTL proxied: $PROXIED"
fi

View File

@@ -0,0 +1,10 @@
[Unit]
Description=Run DDNS update service every 5 minutes
[Timer]
OnBootSec=1min
OnUnitActiveSec=5min
Persistent=true
[Install]
WantedBy=timers.target

View File

@@ -0,0 +1,105 @@
{
"Dhcp4": {
"subnet4": [
{
"subnet": "{{ hostvars['fw']['network4']['subnet']['client'] }}",
"pools" : [
{
"pool": "192.168.1.254-192.168.1.254"
}
],
"option-data": [
{
"name": "routers",
"data": "{{ hostvars['fw']['network4']['firewall']['client'] }}"
},
{
"name": "domain-name-servers",
"data": "{{ hostvars['fw']['network4']['blocky']['server'] }}"
},
{
"name": "domain-name",
"data": "ilnmors.internal."
}
],
"reservations": [
{
"hw-address": "58:04:4f:18:6c:5e",
"ip-address": "{{ hostvars['fw']['network4']['switch']['client'] }}",
"hostname": "switch"
},
{
"hw-address": "90:09:d0:65:a9:db",
"ip-address": "{{ hostvars['fw']['network4']['nas']['client'] }}",
"hostname": "nas"
},
{
"hw-address": "d8:e2:df:ff:1b:d5",
"ip-address": "{{ hostvars['fw']['network4']['console']['client'] }}",
"hostname": "surface"
},
{
"hw-address": "38:ca:84:94:5e:06",
"ip-address": "{{ hostvars['fw']['network4']['printer']['client'] }}",
"hostname": "printer"
}
],
"id": 1,
"interface": "client"
},
{
"subnet": "{{ hostvars['fw']['network4']['subnet']['user'] }}",
"pools" : [
{
"pool": "192.168.20.2-192.168.20.254"
}
],
"option-data": [
{
"name": "routers",
"data": "{{ hostvars['fw']['network4']['firewall']['user'] }}"
},
{
"name": "domain-name-servers",
"data": "{{ hostvars['fw']['network4']['blocky']['server'] }}"
},
{
"name": "domain-name",
"data": "ilnmors.internal."
}
],
"id": 2,
"interface": "user"
}
],
"interfaces-config": {
"interfaces": [
"client",
"user"
],
"dhcp-socket-type": "raw",
"service-sockets-max-retries": 5,
"service-sockets-require-all": true
},
"renew-timer": 1000,
"rebind-timer": 2000,
"valid-lifetime": 4000,
"loggers": [
{
"name": "kea-dhcp4",
"output_options": [
{
"output": "stdout"
}
],
"severity": "INFO"
}
],
"lease-database": {
"type": "memfile",
"persist": true,
"name": "/var/lib/kea/kea-leases4.csv",
"lfc-interval": 3600
}
}
}

View File

@@ -0,0 +1,7 @@
# Stream events
2210010 # SURICATA STREAM 3way handshake wrong seq wrong ack / TCP 3-way handshake in local networks
2210021
2210045
# Wrong thread warning
2210059

View File

@@ -0,0 +1,518 @@
%YAML 1.1
---
suricata-version: "7.0"
vars:
address-groups:
HOME_NET: "{{ hostvars['fw']['suricata']['home_net'] }}"
EXTERNAL_NET: "!$HOME_NET"
HTTP_SERVERS: "$HOME_NET"
SMTP_SERVERS: "$HOME_NET"
SQL_SERVERS: "$HOME_NET"
DNS_SERVERS: "$HOME_NET"
TELNET_SERVERS: "$HOME_NET"
AIM_SERVERS: "$EXTERNAL_NET"
DC_SERVERS: "$HOME_NET"
DNP3_SERVER: "$HOME_NET"
DNP3_CLIENT: "$HOME_NET"
MODBUS_CLIENT: "$HOME_NET"
MODBUS_SERVER: "$HOME_NET"
ENIP_CLIENT: "$HOME_NET"
ENIP_SERVER: "$HOME_NET"
port-groups:
HTTP_PORTS: "80"
SHELLCODE_PORTS: "!80"
ORACLE_PORTS: 1521
SSH_PORTS: 22
DNP3_PORTS: 20000
MODBUS_PORTS: 502
FILE_DATA_PORTS: "[$HTTP_PORTS,110,143]"
FTP_PORTS: 21
GENEVE_PORTS: 6081
VXLAN_PORTS: 4789
TEREDO_PORTS: 3544
default-log-dir: /var/log/suricata/
stats:
enabled: yes
interval: 8
plugins:
outputs:
- fast:
enabled: yes
filename: fast.log
append: yes
- eve-log:
enabled: yes
filetype: regular
filename: eve.json
pcap-file: false
community-id: true
community-id-seed: 0
xff:
enabled: no
mode: extra-data
deployment: reverse
header: X-Forwarded-For
types:
- alert:
tagged-packets: yes
- frame:
enabled: no
- anomaly:
enabled: yes
types:
- http:
extended: yes
- dns:
- tls:
extended: yes
- files:
force-magic: no
- smtp:
- ftp
- rdp
- nfs
- smb
- tftp
- ike
- dcerpc
- krb5
- bittorrent-dht
- snmp
- rfb
- sip
- quic:
- dhcp:
enabled: yes
extended: no
- ssh
- mqtt:
- http2
- pgsql:
enabled: no
- stats:
totals: yes
threads: no
deltas: no
- flow
- http-log:
enabled: no
filename: http.log
append: yes
- tls-log:
enabled: no
filename: tls.log
append: yes
- tls-store:
enabled: no
- pcap-log:
enabled: no
filename: log.pcap
limit: 1000mb
max-files: 2000
compression: none
mode: normal # normal, multi or sguil.
use-stream-depth: no
honor-pass-rules: no
- alert-debug:
enabled: no
filename: alert-debug.log
append: yes
- stats:
enabled: yes
filename: stats.log
append: yes
totals: yes
threads: no
- syslog:
enabled: no
facility: local5
- file-store:
version: 2
enabled: no
xff:
enabled: no
mode: extra-data
deployment: reverse
header: X-Forwarded-For
- tcp-data:
enabled: no
type: file
filename: tcp-data.log
- http-body-data:
enabled: no
type: file
filename: http-data.log
- lua:
enabled: no
scripts:
logging:
default-log-level: notice
default-output-filter:
outputs:
- console:
enabled: yes
- file:
enabled: yes
level: info
filename: suricata.log
- syslog:
enabled: no
facility: local5
format: "[%i] <%d> -- "
af-packet:
{% for iface in hostvars['fw']['suricata']['interfaces'] %}
- interface: {{ iface }}
cluster-id: {{ 99 - loop.index0 }}
cluster-type: cluster_flow
defrag: yes
use-mmap: yes
tpacket-v3: yes
checksum-checks: no
{% endfor %}
app-layer:
protocols:
telnet:
enabled: yes
rfb:
enabled: yes
detection-ports:
dp: 5900, 5901, 5902, 5903, 5904, 5905, 5906, 5907, 5908, 5909
mqtt:
enabled: yes
krb5:
enabled: yes
bittorrent-dht:
enabled: yes
snmp:
enabled: yes
ike:
enabled: yes
tls:
enabled: yes
detection-ports:
dp: 443
pgsql:
enabled: no
stream-depth: 0
dcerpc:
enabled: yes
ftp:
enabled: yes
rdp:
ssh:
enabled: yes
http2:
enabled: yes
smtp:
enabled: yes
raw-extraction: no
mime:
decode-mime: yes
decode-base64: yes
decode-quoted-printable: yes
header-value-depth: 2000
extract-urls: yes
inspected-tracker:
content-limit: 100000
content-inspect-min-size: 32768
content-inspect-window: 4096
imap:
enabled: detection-only
smb:
enabled: yes
detection-ports:
dp: 139, 445
nfs:
enabled: yes
tftp:
enabled: yes
dns:
tcp:
enabled: yes
detection-ports:
dp: 53
udp:
enabled: yes
detection-ports:
dp: 53
http:
enabled: yes
libhtp:
default-config:
personality: IDS
request-body-limit: 100kb
response-body-limit: 100kb
request-body-minimal-inspect-size: 32kb
request-body-inspect-window: 4kb
response-body-minimal-inspect-size: 40kb
response-body-inspect-window: 16kb
response-body-decompress-layer-limit: 2
http-body-inline: auto
swf-decompression:
enabled: no
type: both
compress-depth: 100kb
decompress-depth: 100kb
double-decode-path: no
double-decode-query: no
server-config:
modbus:
enabled: no
detection-ports:
dp: 502
stream-depth: 0
dnp3:
enabled: no
detection-ports:
dp: 20000
enip:
enabled: no
detection-ports:
dp: 44818
sp: 44818
ntp:
enabled: yes
quic:
enabled: yes
dhcp:
enabled: yes
sip:
asn1-max-frames: 256
datasets:
defaults:
limits:
rules:
security:
limit-noproc: true
landlock:
enabled: no
directories:
read:
- /usr/
- /etc/
- /etc/suricata/
lua:
coredump:
max-dump: unlimited
unix-command:
enabled: yes
filename: /var/run/suricata-command.socket
legacy:
uricontent: enabled
exception-policy: auto
engine-analysis:
rules-fast-pattern: yes
rules: yes
pcre:
match-limit: 3500
match-limit-recursion: 1500
host-os-policy:
windows: [0.0.0.0/0]
bsd: []
bsd-right: []
old-linux: []
linux: []
old-solaris: []
solaris: []
hpux10: []
hpux11: []
irix: []
macos: []
vista: []
windows2k3: []
defrag:
memcap: 32mb
hash-size: 65536
trackers: 65535 # number of defragmented flows to follow
max-frags: 65535 # number of fragments to keep (higher than trackers)
prealloc: yes
timeout: 60
flow:
memcap: 128mb
hash-size: 65536
prealloc: 10000
emergency-recovery: 30
vlan:
use-for-tracking: true
livedev:
use-for-tracking: true
flow-timeouts:
default:
new: 30
established: 300
closed: 0
bypassed: 100
emergency-new: 10
emergency-established: 100
emergency-closed: 0
emergency-bypassed: 50
tcp:
new: 60
established: 600
closed: 60
bypassed: 100
emergency-new: 5
emergency-established: 100
emergency-closed: 10
emergency-bypassed: 50
udp:
new: 30
established: 300
bypassed: 100
emergency-new: 10
emergency-established: 100
emergency-bypassed: 50
icmp:
new: 30
established: 300
bypassed: 100
emergency-new: 10
emergency-established: 100
emergency-bypassed: 50
stream:
memcap: 64mb
checksum-validation: yes
inline: auto
reassembly:
memcap: 256mb
depth: 1mb
toserver-chunk-size: 2560
toclient-chunk-size: 2560
randomize-chunk-size: yes
host:
hash-size: 4096
prealloc: 1000
memcap: 32mb
decoder:
teredo:
enabled: true
ports: $TEREDO_PORTS
vxlan:
enabled: true
ports: $VXLAN_PORTS
geneve:
enabled: true
ports: $GENEVE_PORTS
detect:
profile: medium
custom-values:
toclient-groups: 3
toserver-groups: 25
sgh-mpm-context: auto
prefilter:
default: mpm
grouping:
profiling:
grouping:
dump-to-disk: false
include-rules: false
include-mpm-stats: false
mpm-algo: auto
threading:
set-cpu-affinity: no
cpu-affinity:
- management-cpu-set:
cpu: [ 0 ]
- receive-cpu-set:
cpu: [ 0 ]
- worker-cpu-set:
cpu: [ "all" ]
mode: "exclusive"
prio:
low: [ 0 ]
medium: [ "1-2" ]
high: [ 3 ]
default: "medium"
detect-thread-ratio: 1.0
luajit:
states: 128
profiling:
rules:
enabled: yes
filename: rule_perf.log
append: yes
limit: 10
json: yes
keywords:
enabled: yes
filename: keyword_perf.log
append: yes
prefilter:
enabled: yes
filename: prefilter_perf.log
append: yes
rulegroups:
enabled: yes
filename: rule_group_perf.log
append: yes
packets:
enabled: yes
filename: packet_stats.log
append: yes
csv:
enabled: no
filename: packet_stats.csv
locks:
enabled: no
filename: lock_stats.log
append: yes
pcap-log:
enabled: no
filename: pcaplog_stats.log
append: yes
nfq:
nflog:
- group: 2
buffer-size: 18432
- group: default
qthreshold: 1
qtimeout: 100
max-size: 20000
capture:
ipfw:
napatech:
default-rule-path: /var/lib/suricata/rules
rule-files:
- suricata.rules
classification-file: /etc/suricata/classification.config
reference-config-file: /etc/suricata/reference.config

View File

@@ -0,0 +1,9 @@
[Unit]
Description=Suricata Rule Update Service
After=network-online.target
Wants=network-online.target
[Service]
Type=oneshot
ExecStart=/usr/bin/suricata-update --disable-conf /etc/suricata/disable.conf --enable-conf /etc/suricata/enable.conf --local /etc/suricata/rules/local.rules
ExecStartPost=/usr/bin/systemctl reload suricata

View File

@@ -0,0 +1,10 @@
[Unit]
Description=Daily Suricata Rule Update Timer
[Timer]
OnCalendar=*-*-* 06:00:00
Persistent=true
RandomizedDelaySec=300
[Install]
WantedBy=timers.target

View File

@@ -0,0 +1,79 @@
#cloud-config
bootcmd:
- groupadd -g 2000 svadmins || true
hostname: {{ hostvars[target_vm]['vm']['name'] }}
disable_root: true
users:
- name: {{ target_vm }}
uid: {{ hostvars[target_vm]['node']['uid'] }}
gecos: {{ target_vm }}
primary_group: svadmins
groups: sudo
lock_passwd: false
passwd: {{ hostvars['console']['sudo']['hash'][target_vm] }}
shell: /bin/bash
write_files:
- path: /etc/ssh/local_ssh_ca.pub
content: |
{{ hostvars['console']['ssh']['ca']['pub'] | trim }}
owner: "root:root"
permissions: "0644"
- path: /etc/ssh/sshd_config.d/ssh_ca.conf
content: |
TrustedUserCAKeys /etc/ssh/local_ssh_ca.pub
owner: "root:root"
permissions: "0644"
- path: /etc/ssh/sshd_config.d/prohibit_root.conf
content: |
PermitRootLogin no
owner: "root:root"
permissions: "0644"
- path: /etc/apt/sources.list.d/debian.sources
content: |
Types: deb deb-src
URIs: https://deb.debian.org/debian
Suites: trixie trixie-updates trixie-backports
Components: main contrib non-free non-free-firmware
Signed-By: /usr/share/keyrings/debian-archive-keyring.gpg
Types: deb deb-src
URIs: https://deb.debian.org/debian-security
Suites: trixie-security
Components: main contrib non-free non-free-firmware
Signed-By: /usr/share/keyrings/debian-archive-keyring.gpg
owner: "root:root"
permissions: "0644"
{% if target_vm == 'fw' %}
- path: /etc/sysctl.d/ipforward.conf
content: |
net.ipv4.ip_forward = 1
net.ipv6.conf.all.forwarding = 1
owner: "root:root"
permissions: "0644"
{% endif %}
{% set net_config_dir = 'fw' if target_vm == 'fw' else 'common' %}
{% for file_path in query('fileglob', hostvars['console']['node']['config_path'] + '/node/' + net_config_dir + '/networkd/' + '/*') | sort %}
- path: /etc/systemd/network/{{ file_path | basename}}
content: |
{{ lookup('template', file_path) | indent(8) | trim }}
owner: "root:root"
permissions: "0644"
{% endfor %}
runcmd:
- update-initramfs -u
- systemctl disable networking
- systemctl enable systemd-networkd
- systemctl enable getty@ttyS0
- sync
power_state:
delay: "now"
mode: reboot
message: "rebooting after cloud-init configuration"
timeout: 30

View File

@@ -0,0 +1,23 @@
[Unit]
Description=app vm
After=network-online.target libvirtd.service fw.service infra.service auth.service
Wants=fw.service infra.service auth.service
[Service]
Type=oneshot
RemainAfterExit=yes
TimeoutStopSec=360
ExecStart=/usr/bin/virsh -c qemu:///system start app
ExecStartPost=/bin/sleep 30
ExecStop=/bin/bash -c '\
/usr/bin/virsh -c qemu:///system shutdown app; \
while /usr/bin/virsh -c qemu:///system list --state-running --name | grep -q "app"; do \
echo "Waiting for app to shutdown..."; \
sleep 2; \
done'
[Install]
WantedBy=default.target

View File

@@ -0,0 +1,23 @@
[Unit]
Description=auth vm
After=network-online.target libvirtd.service fw.service infra.service
Wants=fw.service infra.service
[Service]
Type=oneshot
RemainAfterExit=yes
TimeoutStopSec=360
ExecStart=/usr/bin/virsh -c qemu:///system start auth
ExecStartPost=/bin/sleep 30
ExecStop=/bin/bash -c '\
/usr/bin/virsh -c qemu:///system shutdown auth; \
while /usr/bin/virsh -c qemu:///system list --state-running --name | grep -q "auth"; do \
echo "Waiting for auth to shutdown..."; \
sleep 2; \
done'
[Install]
WantedBy=default.target

View File

@@ -0,0 +1,23 @@
[Unit]
Description=fw vm
After=network-online.target libvirtd.service
Wants=network-online.target
[Service]
Type=oneshot
RemainAfterExit=yes
TimeoutStopSec=360
ExecStart=/usr/bin/virsh -c qemu:///system start fw
ExecStartPost=/bin/sleep 30
ExecStop=/bin/bash -c '\
/usr/bin/virsh -c qemu:///system shutdown fw; \
while /usr/bin/virsh -c qemu:///system list --state-running --name | grep -q "fw"; do \
echo "Waiting for fw to shutdown..."; \
sleep 2; \
done'
[Install]
WantedBy=default.target

View File

@@ -0,0 +1,23 @@
[Unit]
Description=infra vm
After=network-online.target libvirtd.service fw.service
Wants=fw.service
[Service]
Type=oneshot
RemainAfterExit=yes
TimeoutStopSec=360
ExecStart=/usr/bin/virsh -c qemu:///system start infra
ExecStartPost=/bin/sleep 30
ExecStop=/bin/bash -c '\
/usr/bin/virsh -c qemu:///system shutdown infra; \
while /usr/bin/virsh -c qemu:///system list --state-running --name | grep -q "infra"; do \
echo "Waiting for infra to shutdown..."; \
sleep 2; \
done'
[Install]
WantedBy=default.target

View File

@@ -0,0 +1,19 @@
<!-- LAN network for Hypervisor -->
<!-- lan-net.xml -->
<network>
<name>lan-net</name>
<forward mode='bridge'/>
<bridge name='br1'/>
<portgroup name='vlan-trunk'>
<vlan trunk='yes'>
<tag id='1' nativeMode='untagged'/>
<tag id='10'/>
<tag id='20'/>
</vlan>
</portgroup>
<portgroup name='vlan10-access'>
<vlan>
<tag id='10'/>
</vlan>
</portgroup>
</network>

View File

@@ -0,0 +1,7 @@
<!-- WAN network for Hypervisor -->
<!-- wan-net.xml -->
<network>
<name>wan-net</name>
<forward mode='bridge'/>
<bridge name='br0'/>
</network>

View File

@@ -0,0 +1,8 @@
<!-- Storage pool define -->
<!-- images.xml -->
<pool type='dir'>
<name>images-pool</name>
<target>
<path>/var/lib/libvirt/images</path>
</target>
</pool>

View File

@@ -0,0 +1,8 @@
<!-- Storage pool define -->
<!-- seeds-pool.xml -->
<pool type='dir'>
<name>seeds-pool</name>
<target>
<path>/var/lib/libvirt/seeds</path>
</target>
</pool>

View File

@@ -0,0 +1,78 @@
<domain type='kvm'>
<name>{{ hostvars[target_vm]['vm']['name'] }}</name>
<memory unit='GiB'>{{ hostvars[target_vm]['vm']['memory'] }}</memory>
<vcpu placement='static'>{{ hostvars[target_vm]['vm']['cpu'] }}</vcpu>
<cputune>
<shares>{{ hostvars[target_vm]['vm']['shares'] }}</shares>
</cputune>
<os firmware='efi'>
<type arch='x86_64' machine='pc-q35-10.0'>hvm</type>
<firmware>
<feature enabled='yes' name='enrolled-keys'/>
<feature enabled='yes' name='secure-boot'/>
</firmware>
<loader readonly='yes' secure='yes' type='pflash' format='raw'>/usr/share/OVMF/OVMF_CODE_4M.ms.fd</loader>
<nvram template='/usr/share/OVMF/OVMF_VARS_4M.ms.fd' templateFormat='raw' format='raw'>/var/lib/libvirt/qemu/nvram/{{ hostvars[target_vm]['vm']['name'] }}_VARS.fd</nvram>
<boot dev='hd'/>
</os>
<features>
<acpi/>
<apic/>
</features>
<cpu mode='host-passthrough' check='none' migratable='on'/>
<on_poweroff>destroy</on_poweroff>
<on_reboot>restart</on_reboot>
<on_crash>destroy</on_crash>
<devices>
<disk type='file' device='disk'>
<driver name='qemu' type='qcow2' discard='unmap' />
<source file='/var/lib/libvirt/images/{{ hostvars[target_vm]['vm']['name'] }}.qcow2' />
<target dev='vda' bus='virtio' />
</disk>
<disk type='file' device='cdrom'>
<driver name='qemu' type='raw' cache='none' />
<source file='/var/lib/libvirt/seeds/{{ hostvars[target_vm]['vm']['name'] }}_seed.iso' />
<target dev='sdb' bus='sata' />
<readonly/>
</disk>
{% if target_vm == 'fw' %}
<interface type='network'>
<mac address='{{ hostvars[target_vm]['vm']['wan_mac'] }}' />
<source network='{{ hostvars[target_vm]['vm']['wan_net'] }}' />
<model type='virtio' />
</interface>
<interface type='network'>
<mac address='{{ hostvars[target_vm]['vm']['lan_mac'] }}' />
<source network='{{ hostvars[target_vm]['vm']['lan_net'] }}' portgroup='{{ hostvars[target_vm]['vm']['lan_group'] }}' />
<model type='virtio' />
</interface>
{% else %}
<interface type='network'>
<mac address='{{ hostvars[target_vm]['vm']['lan_mac'] }}' />
<source network='{{ hostvars[target_vm]['vm']['lan_net'] }}' portgroup='{{ hostvars[target_vm]['vm']['lan_group'] }}' />
<model type='virtio' />
</interface>
{% endif %}
<console type='pty'>
<target type='serial' port='0' />
</console>
<channel type='unix'>
<target type='virtio' name='org.qemu.guest_agent.0' />
<address type='virtio-serial' controller='0' bus='0' port='1' />
</channel>
{% if target_vm == 'app' %}
{% for device in hostvars[target_vm]['vm']['pass_through'].values() %}
<hostdev mode='subsystem' type='pci' managed='yes'>
<driver name='vfio'/>
<source>
<address type='pci' domain='{{ device['domain'] }}' bus='{{ device['bus'] }}' slot='{{ device['slot'] }}' function='{{ device['function'] }}'/>
</source>
<address type='pci' domain='{{ device['domain'] }}' bus='{{ device['bus'] }}' slot='{{ device['slot'] }}' function='{{ device['function'] }}'/>
</hostdev>
{% endfor %}
{% endif %}
<tpm model='tpm-crb'>
<backend type='emulator' version='2.0'/>
</tpm>
</devices>
</domain>