1.0.0 Release IaaS

This commit is contained in:
2026-03-15 04:41:02 +09:00
commit a7365da431
292 changed files with 36059 additions and 0 deletions

View File

@@ -0,0 +1,299 @@
// The "name" and "job"
// job > prometheus: which exporter / loki: which service
// name > prometheus: which service
// service_name > loki: which service
// Metric
//// Metric ouput
prometheus.remote_write "prometheus" {
endpoint {
url = "https://{{ infra_uri['prometheus']['domain'] }}:{{ infra_uri['prometheus']['ports']['https'] }}/api/v1/write"
}
}
//// Metric relabel
////// For node metrics
prometheus.relabel "system_relabel" {
forward_to = [prometheus.remote_write.prometheus.receiver]
rule {
target_label = "instance"
replacement = "{{ node['name'] }}"
}
rule {
source_labels = ["job"]
regex = "integrations\\/(.+)"
target_label = "job"
replacement = "$1"
}
rule {
source_labels = ["name"]
regex = "(.+)\\.service"
target_label = "name"
replacement = "$1"
}
}
////// For service metrics
prometheus.relabel "default_label" {
forward_to = [prometheus.remote_write.prometheus.receiver]
rule {
target_label = "instance"
replacement = "{{ node['name'] }}"
}
rule {
source_labels = ["job"]
regex = "prometheus\\.scrape\\.(.+)"
target_label = "job"
replacement = "$1"
}
rule {
source_labels = ["job"]
regex = "integrations\\/(.+)"
target_label = "job"
replacement = "$1"
}
}
//// Metric input
////// For node metrics
prometheus.exporter.unix "system" {
enable_collectors = ["systemd", "cgroup", "processes", "cpu", "meminfo", "filesystem", "netdev"]
filesystem {
mount_points_exclude = "^/(sys|proc|dev|run|var/lib/docker/.+|var/lib/kubelet/.+)($|/)"
fs_types_exclude = "^(tmpfs|devtmpfs|devfs|iso9660|overlay|aufs|squashfs)$"
}
}
prometheus.scrape "system" {
targets = prometheus.exporter.unix.system.targets
forward_to = [prometheus.relabel.system_relabel.receiver]
}
{% if node['name'] == 'fw' %}
////// For Crowdsec metrics
prometheus.scrape "crowdsec" {
targets = [
{ "__address__" = "{{ infra_uri['crowdsec']['domain'] }}:6060", "job" = "crowdsec" },
{ "__address__" = "{{ infra_uri['crowdsec']['domain'] }}:60601", "job" = "crowdsec-bouncer" },
]
honor_labels = true
forward_to = [prometheus.relabel.default_label.receiver]
}
{% endif %}
{% if node['name'] == 'infra' %}
////// For postgresql metrics
prometheus.exporter.postgres "postgresql" {
data_source_names = [
"postgres://alloy@{{ infra_uri['postgresql']['domain'] }}:{{ infra_uri['postgresql']['ports']['tcp'] }}/postgres?sslmode=verify-full",
]
}
prometheus.scrape "postgresql" {
targets = prometheus.exporter.postgres.postgresql.targets
forward_to = [prometheus.relabel.default_label.receiver]
}
///// For certificates metrics
prometheus.scrape "x509" {
targets = [
{ "__address__" = "{{ node['name'] }}.ilnmors.internal:9793" },
]
forward_to = [prometheus.relabel.default_label.receiver]
}
{% endif %}
{% if node['name'] in ['infra', 'auth', 'app'] %}
////// For Input Caddy metrics
prometheus.scrape "caddy" {
targets = [
{ "__address__" = "{{ node['name'] }}.ilnmors.internal:443" },
]
scheme = "https"
forward_to = [prometheus.relabel.default_label.receiver]
}
{% endif %}
// Log
//// Logs output
loki.write "loki" {
endpoint {
url = "https://{{ infra_uri['loki']['domain'] }}:{{ infra_uri['loki']['ports']['https'] }}/loki/api/v1/push"
tenant_id = "ilnmors.internal"
}
}
//// Logs relabel
///// journal
loki.relabel "journal_relabel" {
forward_to = []
rule {
target_label = "instance"
replacement = "{{ node['name'] }}"
}
// Default value
rule {
target_label = "job"
replacement = "systemd-journal"
}
// if identifier exists
rule {
source_labels = ["__journal_syslog_identifier"]
regex = "(.+)"
target_label = "job"
replacement = "$1"
}
// if systemd_unit exists
rule {
source_labels = ["__journal__systemd_unit"]
regex = "(.+)\\.service"
target_label = "job"
replacement = "$1"
}
// if systemd_unit is "user@$UID"
rule {
source_labels = ["job"]
regex = "user@\\d+"
target_label = "job"
replacement = "systemd-journal"
}
// if systemd_user_unit exists
rule {
source_labels = ["__journal__systemd_user_unit"]
regex = "(.+)\\.service"
target_label = "job"
replacement = "$1"
}
rule {
source_labels = ["__journal_priority_keyword"]
target_label = "level"
}
}
{% if node['name'] == "fw" %}
loki.relabel "suricata_relabel" {
forward_to = [loki.process.suricata_json.receiver]
rule {
target_label = "instance"
replacement = "{{ node['name'] }}"
}
rule {
target_label = "level"
replacement = "info"
}
rule {
target_label = "job"
replacement = "suricata_eve"
}
}
{% endif %}
{% if node['name'] == "auth" %}
loki.relabel "caddy_relabel" {
forward_to = [loki.process.caddy_json.receiver]
rule {
target_label = "instance"
replacement = "{{ node['name'] }}"
}
rule {
target_label = "level"
replacement = "info"
}
rule {
target_label = "job"
replacement = "caddy_access"
}
}
{% endif %}
//// Log parser
///// journal
loki.process "journal_parser" {
forward_to = [loki.write.loki.receiver]
// Severity parsing
// If content of log includes "level" information, change the level
stage.logfmt {
mapping = {
"content_level" = "level",
}
}
stage.labels {
values = {
"level" = "content_level",
}
}
// Add this section as parser for each service
// common
stage.match {
selector = "{job=\"sshd\"}"
stage.regex {
expression = "Accepted \\w+ for (?P<user>\\w+) from (?P<ip>[\\d\\.]+)"
}
stage.labels {
values = { "user" = "" }
}
}
// infra
{% if node['name'] == 'infra' %}
// auth
{% elif node['name'] == 'auth' %}
// app
{% elif node['name'] == 'app' %}
{% endif %}
}
{% if node['name'] == "fw" %}
////// suricata
loki.process "suricata_json" {
forward_to = [loki.write.loki.receiver]
stage.json {
expressions = {
event_type = "event_type",
src_ip = "src_ip",
severity = "alert.severity",
}
}
stage.labels {
values = { event_type = "", severity = "" }
}
}
{% endif %}
{% if node['name'] == "auth" %}
////// caddy
loki.process "caddy_json" {
forward_to = [loki.write.loki.receiver]
stage.json {
expressions = {
status = "status",
method = "method",
remote_ip = "remote_ip",
duration = "duration",
}
}
stage.labels {
values = { status = "", method = "" }
}
}
{% endif %}
//// Logs input
////// journald
loki.source.journal "systemd" {
forward_to = [loki.process.journal_parser.receiver]
// Temporary tags like "__journal__systemd_unit" is automatically removed when logs is passing "forward_to"
// To relabel tags with temporary tags, relabel_rules command is necessary.
relabel_rules = loki.relabel.journal_relabel.rules
}
{% if node['name'] == 'fw' %}
////// suricata
local.file_match "suricata_logs" {
path_targets = [{ "__path__" = "/var/log/suricata/eve.json", "instance" = "{{ node['name'] }}" }]
}
loki.source.file "suricata" {
targets = local.file_match.suricata_logs.targets
forward_to = [loki.relabel.suricata_relabel.receiver]
}
{% endif %}
{% if node['name'] == 'auth' %}
////// caddy
local.file_match "caddy_logs" {
path_targets = [{ "__path__" = "/var/log/caddy/access.log", "instance" = "{{ node['name'] }}" }]
}
loki.source.file "caddy" {
targets = local.file_match.caddy_logs.targets
forward_to = [loki.relabel.caddy_relabel.receiver]
}
{% endif %}

View File

@@ -0,0 +1,5 @@
# Suricata logs
filenames:
- /var/log/caddy/access.log
labels:
type: caddy

View File

@@ -0,0 +1,5 @@
# Suricata logs
filenames:
- /var/log/suricata/eve.json
labels:
type: suricata

View File

@@ -0,0 +1,56 @@
mode: nftables
pid_dir: /var/run/
update_frequency: 10s
log_mode: file
log_dir: /var/log/
log_level: info
log_compression: true
log_max_size: 100
log_max_backups: 3
log_max_age: 30
api_url: "https://{{ infra_uri['crowdsec']['domain'] }}:{{ infra_uri['crowdsec']['ports']['https'] }}"
api_key: "{{ hostvars['console']['crowdsec']['bouncer']['fw'] }}"
insecure_skip_verify: false
disable_ipv6: false
deny_action: DROP
deny_log: false
supported_decisions_types:
- ban
#to change log prefix
#deny_log_prefix: "crowdsec: "
#to change the blacklists name
blacklists_ipv4: crowdsec-blacklists
blacklists_ipv6: crowdsec6-blacklists
#type of ipset to use
ipset_type: nethash
#if present, insert rule in those chains
#iptables_chains:
# - INPUT
# - FORWARD
# - OUTPUT
# - DOCKER-USER
## nftables > table inet filter's set crowddsec-blacklists_ipv4,6 is needed
nftables:
ipv4:
enabled: true
set-only: true
family: inet
table: filter
chain: global
ipv6:
enabled: true
set-only: true
family: inet
table: filter
chain: global
# packet filter
pf:
# an empty string disables the anchor
anchor_name: ""
# Crowdsec firewall bouncer cannot use "[::]" yet
prometheus:
enabled: true
listen_addr: "::"
listen_port: 60601

View File

@@ -0,0 +1,11 @@
name: crowdsecurity/whitelists
description: "Whitelist console/admin hosts only"
whitelist:
reason: "trusted admin hosts"
ip:
- "127.0.0.1"
- "::1"
- "{{ hostvars['fw']['network4']['console']['client'] }}"
- "{{ hostvars['fw']['network4']['console']['wg'] }}"
- "{{ hostvars['fw']['network6']['console']['client'] }}"
- "{{ hostvars['fw']['network6']['console']['wg'] }}"

View File

@@ -0,0 +1,10 @@
[Unit]
Description=Crowdsec Rule Update Service
After=network-online.target
Wants=network-online.target
[Service]
Type=oneshot
ExecStart=/usr/bin/cscli hub update
ExecStart=/usr/bin/cscli hub upgrade
ExecStartPost=/bin/systemctl restart crowdsec

View File

@@ -0,0 +1,10 @@
[Unit]
Description=Daily Crowdsec Rule Update Timer
[Timer]
OnCalendar=*-*-* 05:00:00
Persistent=true
RandomizedDelaySec=300
[Install]
WantedBy=timers.target

View File

@@ -0,0 +1,66 @@
common:
daemonize: true
log_media: file
log_level: info
log_dir: /var/log/
log_max_size: 20
compress_logs: true
log_max_files: 10
working_dir: .
config_paths:
config_dir: /etc/crowdsec/
data_dir: /var/lib/crowdsec/data/
simulation_path: /etc/crowdsec/simulation.yaml
hub_dir: /var/lib/crowdsec/hub/
index_path: /var/lib/crowdsec/hub/.index.json
notification_dir: /etc/crowdsec/notifications/
plugin_dir: /usr/lib/crowdsec/plugins/
crowdsec_service:
acquisition_path: /etc/crowdsec/acquis.yaml
acquisition_dir: /etc/crowdsec/acquis.d
parser_routines: 1
cscli:
output: human
color: auto
db_config:
log_level: info
type: sqlite
db_path: /var/lib/crowdsec/data/crowdsec.db
#max_open_conns: 100
#user:
#password:
#db_name:
#host:
#port:
flush:
max_items: 5000
max_age: 7d
plugin_config:
user: nobody # plugin process would be ran on behalf of this user
group: nogroup # plugin process would be ran on behalf of this group
api:
client:
insecure_skip_verify: false
credentials_path: /etc/crowdsec/local_api_credentials.yaml
{% if node['name'] == 'fw' %}
server:
log_level: info
listen_uri: "[::]:8080"
profiles_path: /etc/crowdsec/profiles.yaml
console_path: /etc/crowdsec/console.yaml
online_client: # Central API credentials (to push signals and receive bad IPs)
credentials_path: /etc/crowdsec/online_api_credentials.yaml
trusted_ips: # IP ranges, or IPs which can have admin API access
- ::1
- 127.0.0.1
- {{ hostvars['fw']['network6']['subnet']['server'] }}
- {{ hostvars['fw']['network4']['subnet']['server'] }}
tls:
cert_file: /etc/crowdsec/ssl/crowdsec.crt
key_file: /etc/crowdsec/ssl/crowdsec.key
prometheus:
enabled: true
level: full
listen_addr: "[::]"
listen_port: 6060
{% endif %}

View File

@@ -0,0 +1,3 @@
url: https://{{ infra_uri['crowdsec']['domain'] }}:{{ infra_uri['crowdsec']['ports']['https'] }}
login: {{ node['name'] }}
password: {{ hostvars['console']['crowdsec']['machine'][node['name']] }}

View File

@@ -0,0 +1,49 @@
[Unit]
Description=Kopia backup service
Wants=network-online.target
After=network-online.target
[Service]
User=kopia
Group=kopia
Type=oneshot
# logging
StandardOutput=journal
StandardError=journal
CapabilityBoundingSet=CAP_DAC_READ_SEARCH
AmbientCapabilities=CAP_DAC_READ_SEARCH
ProtectSystem=strict
ProtectHome=tmpfs
InaccessiblePaths=/boot /root
{% if node['name'] == 'infra' %}
BindReadOnlyPaths=/home/infra/containers/postgresql/backups
{% elif node['name'] == 'app' %}
BindReadOnlyPaths=/home/app/data
{% endif %}
# In root namescope, %u always bring 0
BindPaths=/etc/kopia
BindPaths=/etc/secrets/{{ kopia_uid }}
BindPaths=/var/cache/kopia
EnvironmentFile=/etc/secrets/{{ kopia_uid }}/kopia.env
ExecStartPre=/usr/bin/kopia repository connect server \
--url=https://{{ infra_uri['kopia']['domain'] }}:{{ infra_uri['kopia']['ports']['https'] }} \
--override-username={{ node['name'] }} \
--override-hostname={{ node['name'] }}.ilnmors.internal
{% if node['name'] == 'infra' %}
ExecStart=/usr/bin/kopia snapshot create \
/home/infra/containers/postgresql/backups
{% elif node['name'] == 'app' %}
ExecStart=/usr/bin/kopia snapshot create \
/home/app/data
{% endif %}
[Install]
WantedBy=multi-user.target

View File

@@ -0,0 +1,10 @@
[Unit]
Description=Daily Kopia backup timer
[Timer]
OnCalendar=*-*-* 03:00:00
Persistent=true
RandomizedDelaySec=300
[Install]
WantedBy=timers.target

View File

@@ -0,0 +1,5 @@
KOPIA_PASSWORD={{ hostvars['console']['kopia']['user'][node['name']] }}
KOPIA_CONFIG_PATH=/etc/kopia/repository.config
KOPIA_CACHE_DIRECTORY=/var/cache/kopia
KOPIA_LOG_DIR=/var/cache/kopia/logs
KOPIA_CHECK_FOR_UPDATES=false

View File

@@ -0,0 +1,68 @@
include "/etc/bind/acme.key";
options {
directory "/var/cache/bind";
listen-on port 53 { {{ hostvars['fw']['network4']['bind']['server'] }}; };
listen-on-v6 port 53 { {{ hostvars['fw']['network6']['bind']['server'] }}; };
// Authoritative DNS setting
allow-recursion { none; };
allow-transfer { none; };
allow-update { none; };
dnssec-validation no;
check-names master warn;
};
zone "ilnmors.internal." {
type primary;
file "/var/lib/bind/db.ilnmors.internal";
notify yes;
// ACME-01 challenge policy. It allows only TXT record of subdomain update.
update-policy {
grant acme-key subdomain ilnmors.internal. TXT;
};
};
zone "1.168.192.in-addr.arpa" {
type primary;
file "/var/lib/bind/db.1.168.192.in-addr.arpa";
notify yes;
};
zone "10.168.192.in-addr.arpa" {
type primary;
file "/var/lib/bind/db.10.168.192.in-addr.arpa";
notify yes;
};
zone "0.0.0.0.0.0.0.0.1.0.0.0.0.0.d.f.ip6.arpa" {
type primary;
file "/var/lib/bind/db.1.00df.ip6.arpa";
notify yes;
};
zone "0.0.0.0.0.0.0.0.0.1.0.0.0.0.d.f.ip6.arpa" {
type primary;
file "/var/lib/bind/db.10.00df.ip6.arpa";
notify yes;
};
zone "ilnmors.com." {
//split horizon dns
type primary;
file "/var/lib/bind/db.ilnmors.com";
notify yes;
};
logging {
channel default_log {
stderr;
severity info;
};
category default { default_log; };
category config { default_log; };
category queries { default_log; };
};

View File

@@ -0,0 +1,13 @@
$TTL 86400
@ IN SOA bind.ilnmors.internal. mail.ilnmors.internal. (
2026021201 ; serial
3600 ; refresh (1 hour)
1800 ; retry (30 minutes)
604800 ; expire (1 week)
86400 ; minimum (1 day)
)
IN NS bind.ilnmors.internal.
1.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0 IN PTR fw.ilnmors.internal.
1.1.0.0.0.0.0.0.0.0.0.0.0.0.0.0 IN PTR nas.ilnmors.internal.
0.2.0.0.0.0.0.0.0.0.0.0.0.0.0.0 IN PTR console.ilnmors.internal.

View File

@@ -0,0 +1,13 @@
$TTL 86400
@ IN SOA bind.ilnmors.internal. mail.ilnmors.internal. (
2026021201 ; serial
3600 ; refresh (1 hour)
1800 ; retry (30 minutes)
604800 ; expire (1 week)
86400 ; minimum (1 day)
)
IN NS bind.ilnmors.internal.
1 IN PTR fw.ilnmors.internal.
11 IN PTR nas.ilnmors.internal.
20 IN PTR console.ilnmors.internal.

View File

@@ -0,0 +1,17 @@
$TTL 86400
@ IN SOA bind.ilnmors.internal. mail.ilnmors.internal. (
2026021201 ; serial
3600 ; refresh (1 hour)
1800 ; retry (30 minutes)
604800 ; expire (1 week)
86400 ; minimum (1 day)
)
IN NS bind.ilnmors.internal.
1.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0 IN PTR fw.ilnmors.internal.
2.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0 IN PTR blocky.ilnmors.internal.
3.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0 IN PTR bind.ilnmors.internal.
0.1.0.0.0.0.0.0.0.0.0.0.0.0.0.0 IN PTR vmm.ilnmors.internal.
1.1.0.0.0.0.0.0.0.0.0.0.0.0.0.0 IN PTR infra.ilnmors.internal.
2.1.0.0.0.0.0.0.0.0.0.0.0.0.0.0 IN PTR auth.ilnmors.internal.
3.1.0.0.0.0.0.0.0.0.0.0.0.0.0.0 IN PTR app.ilnmors.internal.

View File

@@ -0,0 +1,17 @@
$TTL 86400
@ IN SOA bind.ilnmors.internal. mail.ilnmors.internal. (
2026021201 ; serial
3600 ; refresh (1 hour)
1800 ; retry (30 minutes)
604800 ; expire (1 week)
86400 ; minimum (1 day)
)
IN NS bind.ilnmors.internal.
1 IN PTR fw.ilnmors.internal.
2 IN PTR blocky.ilnmors.internal.
3 IN PTR bind.ilnmors.internal.
10 IN PTR vmm.ilnmors.internal.
11 IN PTR infra.ilnmors.internal.
12 IN PTR auth.ilnmors.internal.
13 IN PTR app.ilnmors.internal.

View File

@@ -0,0 +1,12 @@
$TTL 86400
@ IN SOA bind.ilnmors.internal. mail.ilnmors.internal. (
2026021201 ; serial
3600 ; refresh (1 hour)
1800 ; retry (30 minutes)
604800 ; expire (1 week)
86400 ; minimum (1 day)
)
IN NS bind.ilnmors.internal.
* IN A 192.168.10.12
* IN AAAA fd00:10::12

View File

@@ -0,0 +1,40 @@
$TTL 86400
@ IN SOA bind.ilnmors.internal. mail.ilnmors.internal. (
2026021201 ; serial
3600 ; refresh (1 hour)
1800 ; retry (30 minutes)
604800 ; expire (1 week)
86400 ; minimum (1 day)
)
IN NS bind.ilnmors.internal.
bind IN A 192.168.10.3
bind IN AAAA fd00:10::3
fw IN A 192.168.10.1
fw IN AAAA fd00:10::1
blocky IN A 192.168.10.2
blocky IN AAAA fd00:10::2
vmm IN A 192.168.10.10
vmm IN AAAA fd00:10::10
infra IN A 192.168.10.11
infra IN AAAA fd00:10::11
auth IN A 192.168.10.12
auth IN AAAA fd00:10::12
app IN A 192.168.10.13
app IN AAAA fd00:10::13
switch IN A 192.168.1.2
nas IN A 192.168.1.11
nas IN AAAA fd00:1::11
console IN A 192.168.1.20
console IN AAAA fd00:1::20
printer IN A 192.168.1.101
ntp IN CNAME fw.ilnmors.internal.
crowdsec IN CNAME fw.ilnmors.internal.
ca IN CNAME infra.ilnmors.internal.
postgresql IN CNAME infra.ilnmors.internal.
ldap IN CNAME infra.ilnmors.internal.
prometheus IN CNAME infra.ilnmors.internal.
loki IN CNAME infra.ilnmors.internal.
grafana IN CNAME infra.ilnmors.internal.
authelia IN CNAME auth.ilnmors.internal.
*.app IN CNAME app.ilnmors.internal.

View File

@@ -0,0 +1,23 @@
[Unit]
Description=Blocky DNS Resolver
Wants=network-online.target
After=network-online.target
[Service]
User=blocky
Group=blocky
CapabilityBoundingSet=CAP_NET_BIND_SERVICE
AmbientCapabilities=CAP_NET_BIND_SERVICE
ExecStart=/usr/local/bin/blocky --config /etc/blocky/config.yaml
Restart=always
RestartSec=5s
NoNewPrivileges=true
ProtectSystem=full
ProtectHome=true
[Install]
WantedBy=multi-user.target

View File

@@ -0,0 +1,67 @@
certFile: "/etc/blocky/ssl/blocky.crt"
keyFile: "/etc/blocky/ssl/blocky.key"
minTlsServeVersion: 1.2
connectIPVersion: dual
ports:
dns:
- "{{ hostvars['fw']['network4']['blocky']['server'] }}:53"
- "[{{ hostvars['fw']['network6']['blocky']['server'] }}]:53"
tls:
- "{{ hostvars['fw']['network4']['blocky']['server'] }}:853"
- "[{{ hostvars['fw']['network6']['blocky']['server'] }}]:853"
https:
- "{{ hostvars['fw']['network4']['blocky']['server'] }}:443"
- "[{{ hostvars['fw']['network6']['blocky']['server'] }}]:443"
log:
level: info
format: text
timestamp: true
privacy: false
upstreams:
groups:
default:
- "tcp-tls:1.1.1.1:853"
- "tcp-tls:1.0.0.1:853"
- "tcp-tls:[2606:4700:4700::1111]:853"
- "tcp-tls:[2606:4700:4700::1001]:853"
conditional:
fallbackUpstream: false
mapping:
ilnmors.internal: "{{ hostvars['fw']['network4']['bind']['server'] }}, {{ hostvars['fw']['network6']['bind']['server'] }}"
ilnmors.com: "{{ hostvars['fw']['network4']['bind']['server'] }}, {{ hostvars['fw']['network6']['bind']['server'] }}"
1.168.192.in-addr.arpa: "{{ hostvars['fw']['network4']['bind']['server'] }}, {{ hostvars['fw']['network6']['bind']['server'] }}"
10.168.192.in-addr.arpa: "{{ hostvars['fw']['network4']['bind']['server'] }}, {{ hostvars['fw']['network6']['bind']['server'] }}"
0.0.0.0.0.0.0.0.1.0.0.0.0.0.d.f.ip6.arpa: "{{ hostvars['fw']['network4']['bind']['server'] }}, {{ hostvars['fw']['network6']['bind']['server'] }}"
0.0.0.0.0.0.0.0.0.1.0.0.0.0.d.f.ip6.arpa: "{{ hostvars['fw']['network4']['bind']['server'] }}, {{ hostvars['fw']['network6']['bind']['server'] }}"
vpn.ilnmors.com: "tcp-tls:1.1.1.1:853, tcp-tls:1.0.0.1:853, tcp-tls:[2606:4700:4700::1111]:853, tcp-tls:[2606:4700:4700::1001]:853"
blocking:
blockType: nxDomain
denylists:
ads:
# [ General ]
- https://raw.githubusercontent.com/StevenBlack/hosts/master/hosts
- https://big.oisd.nl
- https://o0.pages.dev/Lite/domains.txt
# [ Korean regional ]
- https://raw.githubusercontent.com/yous/YousList/master/hosts.txt
# [ Telemetry ]
- https://raw.githubusercontent.com/crazy-max/WindowsSpyBlocker/master/data/hosts/spy.txt
- https://raw.githubusercontent.com/Perflyst/PiHoleBlocklist/master/SmartTV.txt
clientGroupsBlock:
default:
- ads
caching:
minTime: 5m
maxTime: 30m
cacheTimeNegative: 0m
prefetching: true
prometheus:
enable: false
path: /metrics

View File

@@ -0,0 +1,9 @@
# 1. Access Control (IPv4)
allow {{ hostvars['fw']['network4']['subnet']['client'] }}
allow {{ hostvars['fw']['network4']['subnet']['server'] }}
allow {{ hostvars['fw']['network4']['subnet']['wg'] }}
# 2. Access Control (IPv6)
allow {{ hostvars['fw']['network6']['subnet']['client'] }}
allow {{ hostvars['fw']['network6']['subnet']['server'] }}
allow {{ hostvars['fw']['network6']['subnet']['wg'] }}

View File

@@ -0,0 +1,15 @@
[Unit]
Description=DDNS Update Service
After=network-online.target
Wants=network-online.target
[Service]
Type=oneshot
StandardOutput=journal
StandardError=journal
EnvironmentFile=/etc/secrets/%U/ddns.env
# Run the script
ExecStart=/usr/local/bin/ddns.sh -d "ilnmors.com"

View File

@@ -0,0 +1,299 @@
#!/bin/bash
## Change Log format as logfmt (refactoring)
# ddns.sh -d domain [-t <ttl>] [-p] [-r] [-c]
# Default Information
DOMAIN=""
TTL=180
C_TTL=86400
PROXIED="false"
DELETE_FLAG="false"
CURRENT_IP=""
# These will be injected by systemd
# ZONE_ID='.secret'
# API_KEY='.secret'
# usage() function
usage() {
echo "Usage: $0 -d \"domain\" [-t \"ttl\"] [-p] [-r] [-c]"
echo "-d <domain>: Specify the domain to update"
echo "-t <ttl>: Specify the TTL(Time to live)"
echo "-p: Specify the cloudflare proxy to use"
echo "-r: Delete the DNS record"
exit 1
}
# Log function
log() {
local timestamp=$(date -u +"%Y-%m-%dT%H:%M:%SZ")
local level="$1"
local msg="$2"
echo "time=\"$timestamp\" level=\"$level\" msg=\"$msg\" source=\"ddns.sh\"">&2
}
# getopts to get arguments
while getopts "d:t:pr" opt; do
case $opt in
d)
DOMAIN="$OPTARG"
;;
t)
TTL="$OPTARG"
;;
p)
PROXIED="true"
;;
r)
DELETE_FLAG="true"
;;
\?) # unknown options
log "error" "Invalid option: -$OPTARG"
usage
;;
:) # parameter required option
log "error" "Option -$OPTARG requires an argument."
usage
;;
esac
done
# Get option and move to parameters - This has no functional thing, because it only use arguments with parameters
shift $((OPTIND - 1))
# Check necessary options
if [ -z "$DOMAIN" ]; then
log "error" "-d option is required"
usage
fi
if ! [[ "$TTL" =~ ^[0-9]+$ ]] || [ "$TTL" -le 0 ]; then
log "error" "-t option (ttl) requires a number above 0."
usage
fi
# Check necessary environment variables (Injected by systemd or shell)
if [ -z "$ZONE_ID" ]; then
log "error" "ZONE_ID is required via environment variable."
exit 1
fi
if [ -z "$API_KEY" ]; then
log "error" "API_KEY is required via environment variable."
exit 1
fi
# Check package
if ! command -v curl >/dev/null; then
log "error" "curl is required"
exit 1
fi
if ! command -v jq >/dev/null; then
log "error" "jq is required"
exit 1
fi
# API options
URL="https://api.cloudflare.com/client/v4/zones/$ZONE_ID/dns_records"
CONTENT_TYPE="Content-Type: application/json"
AUTHORIZATION="Authorization: Bearer $API_KEY"
# Current IP
CURRENT_IP=$( ip address show dev wan | grep 'inet ' | awk '{print $2}' | cut -d'/' -f1 )
# Get current IP from external server when IP is private IP
if [[ -z "$CURRENT_IP" || "$CURRENT_IP" =~ ^(10\.|172\.(1[6-9]|2[0-9]|3[0-1])\.|192\.168\.|127\.) ]]; then
log "info" "IP from interface is private or empty. Fetching public IP..."
CURRENT_IP=$(curl -sf "https://ifconfig.me") ||\
CURRENT_IP=$(curl -sf "https://ifconfig.kr") ||\
CURRENT_IP=$(curl -sf "https://api.ipify.org")
fi
if [ "$CURRENT_IP" == "" ]; then
log "Error" "Can't get an IP"
exit 1
fi
# DNS functions
# get_dns_record() function
get_dns_record()
{
local type="$1"
local name="$2"
local response="$(
curl -s "$URL?type=$type&name=$name"\
-H "$CONTENT_TYPE"\
-H "$AUTHORIZATION")"
if [ "$(echo "$response" | jq -r '.success')" == "false" ]; then
log "error" "Can't get dns record by $response"
exit 1
else
# return
echo "$response"
fi
}
# create_dns_record() function
create_dns_record()
{
local type="$1"
local name="$2"
local ttl="$3"
local comment="$4"
local content="$5"
local response="$(
curl -s "$URL"\
-X POST\
-H "$CONTENT_TYPE"\
-H "$AUTHORIZATION"\
-d "{
\"name\": \"$name\",
\"ttl\": $ttl,
\"type\": \"$type\",
\"comment\": \"$comment\",
\"content\": \"$content\",
\"proxied\": $PROXIED
}")"
if [ "$(echo "$response" | jq -r '.success')" == "false" ]; then
log "error" "Can't create dns record by $response"
exit 1
else
# return
echo "$response"
fi
}
# update_dns_record() function
update_dns_record()
{
local type="$1"
local name="$2"
local ttl="$3"
local comment="$4"
local content="$5"
local id="$6"
local response=$(
curl -s "$URL/$id"\
-X PUT\
-H "$CONTENT_TYPE"\
-H "$AUTHORIZATION"\
-d "{
\"name\": \"$name\",
\"ttl\": $ttl,
\"type\": \"$type\",
\"comment\": \"$comment\",
\"content\": \"$content\",
\"proxied\": $PROXIED
}")
if [ "$(echo "$response" | jq -r '.success')" == "false" ]; then
log "error" "Can't update dns record by $response"
exit 1
else
#return
echo "$response"
fi
}
# delete_dns_record() function
delete_dns_record()
{
local type="$1"
local id="$2"
local response=$(
curl -s "$URL/$id"\
-X DELETE\
-H "$CONTENT_TYPE"\
-H "$AUTHORIZATION"
)
if [ "$(echo "$response" | jq -r '.success')" == "false" ]; then
log "error" "Can't delete dns record by $response"
exit 1
else
# return
echo "$response"
fi
}
# Get DNS A, and CNAME record
A_DNS_RECORD=$(get_dns_record "A" "$DOMAIN")
S_DNS_RECORD=$(get_dns_record "cname" "*.$DOMAIN")
W_DNS_RECORD=$(get_dns_record "cname" "www.$DOMAIN")
# Delete DNS record with Delete flag
if [ "$DELETE_FLAG" == "true" ]; then
FLAG="false"
if [ "$(echo $A_DNS_RECORD | jq -r '.result | length')" -eq 1 ]; then
A_DNS_ID="$(echo $A_DNS_RECORD | jq -r '.result[0].id')"
delete_dns_record "A" "$A_DNS_ID"
log "info" "root DNS record is deleted"
FLAG="true"
fi
if [ "$(echo $S_DNS_RECORD | jq -r '.result | length')" -eq 1 ]; then
S_DNS_ID="$(echo $S_DNS_RECORD | jq -r '.result[0].id')"
delete_dns_record "cname" "$S_DNS_ID"
log "info" "sub DNS record is deleted"
FLAG="true"
fi
if [ "$(echo $W_DNS_RECORD | jq -r '.result | length')" -eq 1 ]; then
W_DNS_ID="$(echo $W_DNS_RECORD | jq -r '.result[0].id')"
delete_dns_record "cname" "$W_DNS_ID"
log "info" "www DNS record is deleted"
FLAG="true"
fi
if [ "$FLAG" == "false" ]; then
log "info" "Nothing is Deleted. There are no DNS records"
fi
exit
fi
# Create or update DNS A record
if [ "$(echo $A_DNS_RECORD | jq -r '.result | length')" -eq 1 ]; then # root DNS record exist
A_DNS_ID="$(echo $A_DNS_RECORD | jq -r '.result[0].id')"
A_DNS_CONTENT="$(echo $A_DNS_RECORD | jq -r '.result[0].content')"
A_DNS_TTL="$(echo $A_DNS_RECORD | jq -r '.result[0].ttl')"
A_DNS_PROXIED="$(echo $A_DNS_RECORD | jq -r '.result[0].proxied')"
if [ "$A_DNS_CONTENT" != $CURRENT_IP -o "$A_DNS_TTL" != "$TTL" -o "$A_DNS_PROXIED" != "$PROXIED" ]; then
update_dns_record "A" "$DOMAIN" "$TTL" "$(date "+%Y-%m-%d %H:%M:%S"): root domain from ddns.sh" "$CURRENT_IP" "$A_DNS_ID"
log "info" "Root DNS record is successfully changed Domain: $DOMAIN IP: $A_DNS_CONTENT to $CURRENT_IP TTL: $A_DNS_TTL to $TTL proxied: $A_DNS_PROXIED to $PROXIED"
else
log "info" "Root DNS record is not changed Domain: $DOMAIN IP: $CURRENT_IP TTL: $TTL proxied: $PROXIED"
fi
else # root DNS record does not exist
create_dns_record "A" "$DOMAIN" "$TTL" "$(date "+%Y-%m-%d %H:%M:%S"): root domain from ddns.sh" "$CURRENT_IP"
log "info" "Root DNS record is successfully created Domain: $DOMAIN IP: $CURRENT_IP TTL: $TTL proxied: $PROXIED"
fi
# Create or update DNS CNAME records
if [ "$(echo $S_DNS_RECORD | jq -r '.result | length')" -eq 1 ]; then # sub DNS record exist
S_DNS_ID="$(echo $S_DNS_RECORD | jq -r '.result[0].id')"
S_DNS_CONTENT="$(echo $S_DNS_RECORD | jq -r '.result[0].content')"
S_DNS_TTL="$(echo $S_DNS_RECORD | jq -r '.result[0].ttl')"
S_DNS_PROXIED="$(echo $S_DNS_RECORD | jq -r '.result[0].proxied')"
if [ "$S_DNS_CONTENT" != "$DOMAIN" -o "$S_DNS_TTL" != "$C_TTL" -o "$S_DNS_PROXIED" != "$PROXIED" ]; then
update_dns_record "cname" "*.$DOMAIN" "$C_TTL" "$(date "+%Y-%m-%d %H:%M:%S"): sub domain from ddns.sh" "$DOMAIN" "$S_DNS_ID"
log "info" "Sub DNS record is successfully changed Domain: $S_DNS_CONTENT to *.$DOMAIN cname: $DOMAIN TTL: $S_DNS_TTL to $C_TTL proxied: $S_DNS_PROXIED to $PROXIED"
else
log "info" "Sub DNS record is not changed Domain: *.$DOMAIN cname: $DOMAIN TTL: $C_TTL proxied: $PROXIED"
fi
else # sub DNS record does not exist
create_dns_record "cname" "*.$DOMAIN" "$C_TTL" "$(date "+%Y-%m-%d %H:%M:%S"): sub domain from ddns.sh" "$DOMAIN"
log "info" "Sub DNS record is successfully created Domain: *.$DOMAIN cname: $DOMAIN TTL: $C_TTL proxied: $PROXIED"
fi
if [ "$(echo $W_DNS_RECORD | jq -r '.result | length')" -eq 1 ]; then # www DNS record exist
W_DNS_ID="$(echo $W_DNS_RECORD | jq -r '.result[0].id')"
W_DNS_CONTENT="$(echo $W_DNS_RECORD | jq -r '.result[0].content')"
W_DNS_TTL="$(echo $W_DNS_RECORD | jq -r '.result[0].ttl')"
W_DNS_PROXIED="$(echo $W_DNS_RECORD | jq -r '.result[0].proxied')"
if [ "$W_DNS_CONTENT" != "$DOMAIN" -o "$W_DNS_TTL" != "$C_TTL" -o "$W_DNS_PROXIED" != "$PROXIED" ]; then
update_dns_record "cname" "www.$DOMAIN" "$C_TTL" "$(date "+%Y-%m-%d %H:%M:%S"): www domain from ddns.sh" "$DOMAIN" "$W_DNS_ID"
log "info" "www DNS record is successfully changed Domain: $W_DNS_CONTENT to www.$DOMAIN cname: $DOMAIN TTL: $W_DNS_TTL to $C_TTL proxied: $W_DNS_PROXIED to $PROXIED"
else
log "info" "www DNS record is not changed Domain: www.$DOMAIN cname: $DOMAIN TTL: $C_TTL proxied: $PROXIED"
fi
else # www DNS record does not exist
create_dns_record "cname" "www.$DOMAIN" "$C_TTL" "$(date "+%Y-%m-%d %H:%M:%S"): www domain from ddns.sh" "$DOMAIN"
log "info" "www DNS record is successfully created Domain: www.$DOMAIN cname: $DOMAIN TTL: $C_TTL proxied: $PROXIED"
fi

View File

@@ -0,0 +1,10 @@
[Unit]
Description=Run DDNS update service every 5 minutes
[Timer]
OnBootSec=1min
OnUnitActiveSec=5min
Persistent=true
[Install]
WantedBy=timers.target

View File

@@ -0,0 +1,105 @@
{
"Dhcp4": {
"subnet4": [
{
"subnet": "{{ hostvars['fw']['network4']['subnet']['client'] }}",
"pools" : [
{
"pool": "192.168.1.254-192.168.1.254"
}
],
"option-data": [
{
"name": "routers",
"data": "{{ hostvars['fw']['network4']['firewall']['client'] }}"
},
{
"name": "domain-name-servers",
"data": "{{ hostvars['fw']['network4']['blocky']['server'] }}"
},
{
"name": "domain-name",
"data": "ilnmors.internal."
}
],
"reservations": [
{
"hw-address": "58:04:4f:18:6c:5e",
"ip-address": "{{ hostvars['fw']['network4']['switch']['client'] }}",
"hostname": "switch"
},
{
"hw-address": "90:09:d0:65:a9:db",
"ip-address": "{{ hostvars['fw']['network4']['nas']['client'] }}",
"hostname": "nas"
},
{
"hw-address": "d8:e2:df:ff:1b:d5",
"ip-address": "{{ hostvars['fw']['network4']['console']['client'] }}",
"hostname": "surface"
},
{
"hw-address": "38:ca:84:94:5e:06",
"ip-address": "{{ hostvars['fw']['network4']['printer']['client'] }}",
"hostname": "printer"
}
],
"id": 1,
"interface": "client"
},
{
"subnet": "{{ hostvars['fw']['network4']['subnet']['user'] }}",
"pools" : [
{
"pool": "192.168.20.2-192.168.20.254"
}
],
"option-data": [
{
"name": "routers",
"data": "{{ hostvars['fw']['network4']['firewall']['user'] }}"
},
{
"name": "domain-name-servers",
"data": "{{ hostvars['fw']['network4']['blocky']['server'] }}"
},
{
"name": "domain-name",
"data": "ilnmors.internal."
}
],
"id": 2,
"interface": "user"
}
],
"interfaces-config": {
"interfaces": [
"client",
"user"
],
"dhcp-socket-type": "raw",
"service-sockets-max-retries": 5,
"service-sockets-require-all": true
},
"renew-timer": 1000,
"rebind-timer": 2000,
"valid-lifetime": 4000,
"loggers": [
{
"name": "kea-dhcp4",
"output_options": [
{
"output": "stdout"
}
],
"severity": "INFO"
}
],
"lease-database": {
"type": "memfile",
"persist": true,
"name": "/var/lib/kea/kea-leases4.csv",
"lfc-interval": 3600
}
}
}

View File

@@ -0,0 +1,7 @@
# Stream events
2210010 # SURICATA STREAM 3way handshake wrong seq wrong ack / TCP 3-way handshake in local networks
2210021
2210045
# Wrong thread warning
2210059

View File

@@ -0,0 +1,518 @@
%YAML 1.1
---
suricata-version: "7.0"
vars:
address-groups:
HOME_NET: "{{ hostvars['fw']['suricata']['home_net'] }}"
EXTERNAL_NET: "!$HOME_NET"
HTTP_SERVERS: "$HOME_NET"
SMTP_SERVERS: "$HOME_NET"
SQL_SERVERS: "$HOME_NET"
DNS_SERVERS: "$HOME_NET"
TELNET_SERVERS: "$HOME_NET"
AIM_SERVERS: "$EXTERNAL_NET"
DC_SERVERS: "$HOME_NET"
DNP3_SERVER: "$HOME_NET"
DNP3_CLIENT: "$HOME_NET"
MODBUS_CLIENT: "$HOME_NET"
MODBUS_SERVER: "$HOME_NET"
ENIP_CLIENT: "$HOME_NET"
ENIP_SERVER: "$HOME_NET"
port-groups:
HTTP_PORTS: "80"
SHELLCODE_PORTS: "!80"
ORACLE_PORTS: 1521
SSH_PORTS: 22
DNP3_PORTS: 20000
MODBUS_PORTS: 502
FILE_DATA_PORTS: "[$HTTP_PORTS,110,143]"
FTP_PORTS: 21
GENEVE_PORTS: 6081
VXLAN_PORTS: 4789
TEREDO_PORTS: 3544
default-log-dir: /var/log/suricata/
stats:
enabled: yes
interval: 8
plugins:
outputs:
- fast:
enabled: yes
filename: fast.log
append: yes
- eve-log:
enabled: yes
filetype: regular
filename: eve.json
pcap-file: false
community-id: true
community-id-seed: 0
xff:
enabled: no
mode: extra-data
deployment: reverse
header: X-Forwarded-For
types:
- alert:
tagged-packets: yes
- frame:
enabled: no
- anomaly:
enabled: yes
types:
- http:
extended: yes
- dns:
- tls:
extended: yes
- files:
force-magic: no
- smtp:
- ftp
- rdp
- nfs
- smb
- tftp
- ike
- dcerpc
- krb5
- bittorrent-dht
- snmp
- rfb
- sip
- quic:
- dhcp:
enabled: yes
extended: no
- ssh
- mqtt:
- http2
- pgsql:
enabled: no
- stats:
totals: yes
threads: no
deltas: no
- flow
- http-log:
enabled: no
filename: http.log
append: yes
- tls-log:
enabled: no
filename: tls.log
append: yes
- tls-store:
enabled: no
- pcap-log:
enabled: no
filename: log.pcap
limit: 1000mb
max-files: 2000
compression: none
mode: normal # normal, multi or sguil.
use-stream-depth: no
honor-pass-rules: no
- alert-debug:
enabled: no
filename: alert-debug.log
append: yes
- stats:
enabled: yes
filename: stats.log
append: yes
totals: yes
threads: no
- syslog:
enabled: no
facility: local5
- file-store:
version: 2
enabled: no
xff:
enabled: no
mode: extra-data
deployment: reverse
header: X-Forwarded-For
- tcp-data:
enabled: no
type: file
filename: tcp-data.log
- http-body-data:
enabled: no
type: file
filename: http-data.log
- lua:
enabled: no
scripts:
logging:
default-log-level: notice
default-output-filter:
outputs:
- console:
enabled: yes
- file:
enabled: yes
level: info
filename: suricata.log
- syslog:
enabled: no
facility: local5
format: "[%i] <%d> -- "
af-packet:
{% for iface in hostvars['fw']['suricata']['interfaces'] %}
- interface: {{ iface }}
cluster-id: {{ 99 - loop.index0 }}
cluster-type: cluster_flow
defrag: yes
use-mmap: yes
tpacket-v3: yes
checksum-checks: no
{% endfor %}
app-layer:
protocols:
telnet:
enabled: yes
rfb:
enabled: yes
detection-ports:
dp: 5900, 5901, 5902, 5903, 5904, 5905, 5906, 5907, 5908, 5909
mqtt:
enabled: yes
krb5:
enabled: yes
bittorrent-dht:
enabled: yes
snmp:
enabled: yes
ike:
enabled: yes
tls:
enabled: yes
detection-ports:
dp: 443
pgsql:
enabled: no
stream-depth: 0
dcerpc:
enabled: yes
ftp:
enabled: yes
rdp:
ssh:
enabled: yes
http2:
enabled: yes
smtp:
enabled: yes
raw-extraction: no
mime:
decode-mime: yes
decode-base64: yes
decode-quoted-printable: yes
header-value-depth: 2000
extract-urls: yes
inspected-tracker:
content-limit: 100000
content-inspect-min-size: 32768
content-inspect-window: 4096
imap:
enabled: detection-only
smb:
enabled: yes
detection-ports:
dp: 139, 445
nfs:
enabled: yes
tftp:
enabled: yes
dns:
tcp:
enabled: yes
detection-ports:
dp: 53
udp:
enabled: yes
detection-ports:
dp: 53
http:
enabled: yes
libhtp:
default-config:
personality: IDS
request-body-limit: 100kb
response-body-limit: 100kb
request-body-minimal-inspect-size: 32kb
request-body-inspect-window: 4kb
response-body-minimal-inspect-size: 40kb
response-body-inspect-window: 16kb
response-body-decompress-layer-limit: 2
http-body-inline: auto
swf-decompression:
enabled: no
type: both
compress-depth: 100kb
decompress-depth: 100kb
double-decode-path: no
double-decode-query: no
server-config:
modbus:
enabled: no
detection-ports:
dp: 502
stream-depth: 0
dnp3:
enabled: no
detection-ports:
dp: 20000
enip:
enabled: no
detection-ports:
dp: 44818
sp: 44818
ntp:
enabled: yes
quic:
enabled: yes
dhcp:
enabled: yes
sip:
asn1-max-frames: 256
datasets:
defaults:
limits:
rules:
security:
limit-noproc: true
landlock:
enabled: no
directories:
read:
- /usr/
- /etc/
- /etc/suricata/
lua:
coredump:
max-dump: unlimited
unix-command:
enabled: yes
filename: /var/run/suricata-command.socket
legacy:
uricontent: enabled
exception-policy: auto
engine-analysis:
rules-fast-pattern: yes
rules: yes
pcre:
match-limit: 3500
match-limit-recursion: 1500
host-os-policy:
windows: [0.0.0.0/0]
bsd: []
bsd-right: []
old-linux: []
linux: []
old-solaris: []
solaris: []
hpux10: []
hpux11: []
irix: []
macos: []
vista: []
windows2k3: []
defrag:
memcap: 32mb
hash-size: 65536
trackers: 65535 # number of defragmented flows to follow
max-frags: 65535 # number of fragments to keep (higher than trackers)
prealloc: yes
timeout: 60
flow:
memcap: 128mb
hash-size: 65536
prealloc: 10000
emergency-recovery: 30
vlan:
use-for-tracking: true
livedev:
use-for-tracking: true
flow-timeouts:
default:
new: 30
established: 300
closed: 0
bypassed: 100
emergency-new: 10
emergency-established: 100
emergency-closed: 0
emergency-bypassed: 50
tcp:
new: 60
established: 600
closed: 60
bypassed: 100
emergency-new: 5
emergency-established: 100
emergency-closed: 10
emergency-bypassed: 50
udp:
new: 30
established: 300
bypassed: 100
emergency-new: 10
emergency-established: 100
emergency-bypassed: 50
icmp:
new: 30
established: 300
bypassed: 100
emergency-new: 10
emergency-established: 100
emergency-bypassed: 50
stream:
memcap: 64mb
checksum-validation: yes
inline: auto
reassembly:
memcap: 256mb
depth: 1mb
toserver-chunk-size: 2560
toclient-chunk-size: 2560
randomize-chunk-size: yes
host:
hash-size: 4096
prealloc: 1000
memcap: 32mb
decoder:
teredo:
enabled: true
ports: $TEREDO_PORTS
vxlan:
enabled: true
ports: $VXLAN_PORTS
geneve:
enabled: true
ports: $GENEVE_PORTS
detect:
profile: medium
custom-values:
toclient-groups: 3
toserver-groups: 25
sgh-mpm-context: auto
prefilter:
default: mpm
grouping:
profiling:
grouping:
dump-to-disk: false
include-rules: false
include-mpm-stats: false
mpm-algo: auto
threading:
set-cpu-affinity: no
cpu-affinity:
- management-cpu-set:
cpu: [ 0 ]
- receive-cpu-set:
cpu: [ 0 ]
- worker-cpu-set:
cpu: [ "all" ]
mode: "exclusive"
prio:
low: [ 0 ]
medium: [ "1-2" ]
high: [ 3 ]
default: "medium"
detect-thread-ratio: 1.0
luajit:
states: 128
profiling:
rules:
enabled: yes
filename: rule_perf.log
append: yes
limit: 10
json: yes
keywords:
enabled: yes
filename: keyword_perf.log
append: yes
prefilter:
enabled: yes
filename: prefilter_perf.log
append: yes
rulegroups:
enabled: yes
filename: rule_group_perf.log
append: yes
packets:
enabled: yes
filename: packet_stats.log
append: yes
csv:
enabled: no
filename: packet_stats.csv
locks:
enabled: no
filename: lock_stats.log
append: yes
pcap-log:
enabled: no
filename: pcaplog_stats.log
append: yes
nfq:
nflog:
- group: 2
buffer-size: 18432
- group: default
qthreshold: 1
qtimeout: 100
max-size: 20000
capture:
ipfw:
napatech:
default-rule-path: /var/lib/suricata/rules
rule-files:
- suricata.rules
classification-file: /etc/suricata/classification.config
reference-config-file: /etc/suricata/reference.config

View File

@@ -0,0 +1,9 @@
[Unit]
Description=Suricata Rule Update Service
After=network-online.target
Wants=network-online.target
[Service]
Type=oneshot
ExecStart=/usr/bin/suricata-update --disable-conf /etc/suricata/disable.conf --enable-conf /etc/suricata/enable.conf --local /etc/suricata/rules/local.rules
ExecStartPost=/usr/bin/systemctl reload suricata

View File

@@ -0,0 +1,10 @@
[Unit]
Description=Daily Suricata Rule Update Timer
[Timer]
OnCalendar=*-*-* 06:00:00
Persistent=true
RandomizedDelaySec=300
[Install]
WantedBy=timers.target

View File

@@ -0,0 +1,79 @@
#cloud-config
bootcmd:
- groupadd -g 2000 svadmins || true
hostname: {{ hostvars[target_vm]['vm']['name'] }}
disable_root: true
users:
- name: {{ target_vm }}
uid: {{ hostvars[target_vm]['node']['uid'] }}
gecos: {{ target_vm }}
primary_group: svadmins
groups: sudo
lock_passwd: false
passwd: {{ hostvars['console']['sudo']['hash'][target_vm] }}
shell: /bin/bash
write_files:
- path: /etc/ssh/local_ssh_ca.pub
content: |
{{ hostvars['console']['ssh']['ca']['pub'] | trim }}
owner: "root:root"
permissions: "0644"
- path: /etc/ssh/sshd_config.d/ssh_ca.conf
content: |
TrustedUserCAKeys /etc/ssh/local_ssh_ca.pub
owner: "root:root"
permissions: "0644"
- path: /etc/ssh/sshd_config.d/prohibit_root.conf
content: |
PermitRootLogin no
owner: "root:root"
permissions: "0644"
- path: /etc/apt/sources.list.d/debian.sources
content: |
Types: deb deb-src
URIs: https://deb.debian.org/debian
Suites: trixie trixie-updates trixie-backports
Components: main contrib non-free non-free-firmware
Signed-By: /usr/share/keyrings/debian-archive-keyring.gpg
Types: deb deb-src
URIs: https://deb.debian.org/debian-security
Suites: trixie-security
Components: main contrib non-free non-free-firmware
Signed-By: /usr/share/keyrings/debian-archive-keyring.gpg
owner: "root:root"
permissions: "0644"
{% if target_vm == 'fw' %}
- path: /etc/sysctl.d/ipforward.conf
content: |
net.ipv4.ip_forward = 1
net.ipv6.conf.all.forwarding = 1
owner: "root:root"
permissions: "0644"
{% endif %}
{% set net_config_dir = 'fw' if target_vm == 'fw' else 'common' %}
{% for file_path in query('fileglob', hostvars['console']['node']['config_path'] + '/node/' + net_config_dir + '/networkd/' + '/*') | sort %}
- path: /etc/systemd/network/{{ file_path | basename}}
content: |
{{ lookup('template', file_path) | indent(8) | trim }}
owner: "root:root"
permissions: "0644"
{% endfor %}
runcmd:
- update-initramfs -u
- systemctl disable networking
- systemctl enable systemd-networkd
- systemctl enable getty@ttyS0
- sync
power_state:
delay: "now"
mode: reboot
message: "rebooting after cloud-init configuration"
timeout: 30

View File

@@ -0,0 +1,23 @@
[Unit]
Description=app vm
After=network-online.target libvirtd.service fw.service infra.service auth.service
Wants=fw.service infra.service auth.service
[Service]
Type=oneshot
RemainAfterExit=yes
TimeoutStopSec=360
ExecStart=/usr/bin/virsh -c qemu:///system start app
ExecStartPost=/bin/sleep 30
ExecStop=/bin/bash -c '\
/usr/bin/virsh -c qemu:///system shutdown app; \
while /usr/bin/virsh -c qemu:///system list --state-running --name | grep -q "app"; do \
echo "Waiting for app to shutdown..."; \
sleep 2; \
done'
[Install]
WantedBy=default.target

View File

@@ -0,0 +1,23 @@
[Unit]
Description=auth vm
After=network-online.target libvirtd.service fw.service infra.service
Wants=fw.service infra.service
[Service]
Type=oneshot
RemainAfterExit=yes
TimeoutStopSec=360
ExecStart=/usr/bin/virsh -c qemu:///system start auth
ExecStartPost=/bin/sleep 30
ExecStop=/bin/bash -c '\
/usr/bin/virsh -c qemu:///system shutdown auth; \
while /usr/bin/virsh -c qemu:///system list --state-running --name | grep -q "auth"; do \
echo "Waiting for auth to shutdown..."; \
sleep 2; \
done'
[Install]
WantedBy=default.target

View File

@@ -0,0 +1,23 @@
[Unit]
Description=fw vm
After=network-online.target libvirtd.service
Wants=network-online.target
[Service]
Type=oneshot
RemainAfterExit=yes
TimeoutStopSec=360
ExecStart=/usr/bin/virsh -c qemu:///system start fw
ExecStartPost=/bin/sleep 30
ExecStop=/bin/bash -c '\
/usr/bin/virsh -c qemu:///system shutdown fw; \
while /usr/bin/virsh -c qemu:///system list --state-running --name | grep -q "fw"; do \
echo "Waiting for fw to shutdown..."; \
sleep 2; \
done'
[Install]
WantedBy=default.target

View File

@@ -0,0 +1,23 @@
[Unit]
Description=infra vm
After=network-online.target libvirtd.service fw.service
Wants=fw.service
[Service]
Type=oneshot
RemainAfterExit=yes
TimeoutStopSec=360
ExecStart=/usr/bin/virsh -c qemu:///system start infra
ExecStartPost=/bin/sleep 30
ExecStop=/bin/bash -c '\
/usr/bin/virsh -c qemu:///system shutdown infra; \
while /usr/bin/virsh -c qemu:///system list --state-running --name | grep -q "infra"; do \
echo "Waiting for infra to shutdown..."; \
sleep 2; \
done'
[Install]
WantedBy=default.target

View File

@@ -0,0 +1,19 @@
<!-- LAN network for Hypervisor -->
<!-- lan-net.xml -->
<network>
<name>lan-net</name>
<forward mode='bridge'/>
<bridge name='br1'/>
<portgroup name='vlan-trunk'>
<vlan trunk='yes'>
<tag id='1' nativeMode='untagged'/>
<tag id='10'/>
<tag id='20'/>
</vlan>
</portgroup>
<portgroup name='vlan10-access'>
<vlan>
<tag id='10'/>
</vlan>
</portgroup>
</network>

View File

@@ -0,0 +1,7 @@
<!-- WAN network for Hypervisor -->
<!-- wan-net.xml -->
<network>
<name>wan-net</name>
<forward mode='bridge'/>
<bridge name='br0'/>
</network>

View File

@@ -0,0 +1,8 @@
<!-- Storage pool define -->
<!-- images.xml -->
<pool type='dir'>
<name>images-pool</name>
<target>
<path>/var/lib/libvirt/images</path>
</target>
</pool>

View File

@@ -0,0 +1,8 @@
<!-- Storage pool define -->
<!-- seeds-pool.xml -->
<pool type='dir'>
<name>seeds-pool</name>
<target>
<path>/var/lib/libvirt/seeds</path>
</target>
</pool>

View File

@@ -0,0 +1,78 @@
<domain type='kvm'>
<name>{{ hostvars[target_vm]['vm']['name'] }}</name>
<memory unit='GiB'>{{ hostvars[target_vm]['vm']['memory'] }}</memory>
<vcpu placement='static'>{{ hostvars[target_vm]['vm']['cpu'] }}</vcpu>
<cputune>
<shares>{{ hostvars[target_vm]['vm']['shares'] }}</shares>
</cputune>
<os firmware='efi'>
<type arch='x86_64' machine='pc-q35-10.0'>hvm</type>
<firmware>
<feature enabled='yes' name='enrolled-keys'/>
<feature enabled='yes' name='secure-boot'/>
</firmware>
<loader readonly='yes' secure='yes' type='pflash' format='raw'>/usr/share/OVMF/OVMF_CODE_4M.ms.fd</loader>
<nvram template='/usr/share/OVMF/OVMF_VARS_4M.ms.fd' templateFormat='raw' format='raw'>/var/lib/libvirt/qemu/nvram/{{ hostvars[target_vm]['vm']['name'] }}_VARS.fd</nvram>
<boot dev='hd'/>
</os>
<features>
<acpi/>
<apic/>
</features>
<cpu mode='host-passthrough' check='none' migratable='on'/>
<on_poweroff>destroy</on_poweroff>
<on_reboot>restart</on_reboot>
<on_crash>destroy</on_crash>
<devices>
<disk type='file' device='disk'>
<driver name='qemu' type='qcow2' discard='unmap' />
<source file='/var/lib/libvirt/images/{{ hostvars[target_vm]['vm']['name'] }}.qcow2' />
<target dev='vda' bus='virtio' />
</disk>
<disk type='file' device='cdrom'>
<driver name='qemu' type='raw' cache='none' />
<source file='/var/lib/libvirt/seeds/{{ hostvars[target_vm]['vm']['name'] }}_seed.iso' />
<target dev='sdb' bus='sata' />
<readonly/>
</disk>
{% if target_vm == 'fw' %}
<interface type='network'>
<mac address='{{ hostvars[target_vm]['vm']['wan_mac'] }}' />
<source network='{{ hostvars[target_vm]['vm']['wan_net'] }}' />
<model type='virtio' />
</interface>
<interface type='network'>
<mac address='{{ hostvars[target_vm]['vm']['lan_mac'] }}' />
<source network='{{ hostvars[target_vm]['vm']['lan_net'] }}' portgroup='{{ hostvars[target_vm]['vm']['lan_group'] }}' />
<model type='virtio' />
</interface>
{% else %}
<interface type='network'>
<mac address='{{ hostvars[target_vm]['vm']['lan_mac'] }}' />
<source network='{{ hostvars[target_vm]['vm']['lan_net'] }}' portgroup='{{ hostvars[target_vm]['vm']['lan_group'] }}' />
<model type='virtio' />
</interface>
{% endif %}
<console type='pty'>
<target type='serial' port='0' />
</console>
<channel type='unix'>
<target type='virtio' name='org.qemu.guest_agent.0' />
<address type='virtio-serial' controller='0' bus='0' port='1' />
</channel>
{% if target_vm == 'app' %}
{% for device in hostvars[target_vm]['vm']['pass_through'].values() %}
<hostdev mode='subsystem' type='pci' managed='yes'>
<driver name='vfio'/>
<source>
<address type='pci' domain='{{ device['domain'] }}' bus='{{ device['bus'] }}' slot='{{ device['slot'] }}' function='{{ device['function'] }}'/>
</source>
<address type='pci' domain='{{ device['domain'] }}' bus='{{ device['bus'] }}' slot='{{ device['slot'] }}' function='{{ device['function'] }}'/>
</hostdev>
{% endfor %}
{% endif %}
<tpm model='tpm-crb'>
<backend type='emulator' version='2.0'/>
</tpm>
</devices>
</domain>