1.0.0 Release IaaS

This commit is contained in:
2026-03-15 04:41:02 +09:00
commit a7365da431
292 changed files with 36059 additions and 0 deletions

View File

@@ -0,0 +1,299 @@
// The "name" and "job"
// job > prometheus: which exporter / loki: which service
// name > prometheus: which service
// service_name > loki: which service
// Metric
//// Metric ouput
prometheus.remote_write "prometheus" {
endpoint {
url = "https://{{ infra_uri['prometheus']['domain'] }}:{{ infra_uri['prometheus']['ports']['https'] }}/api/v1/write"
}
}
//// Metric relabel
////// For node metrics
prometheus.relabel "system_relabel" {
forward_to = [prometheus.remote_write.prometheus.receiver]
rule {
target_label = "instance"
replacement = "{{ node['name'] }}"
}
rule {
source_labels = ["job"]
regex = "integrations\\/(.+)"
target_label = "job"
replacement = "$1"
}
rule {
source_labels = ["name"]
regex = "(.+)\\.service"
target_label = "name"
replacement = "$1"
}
}
////// For service metrics
prometheus.relabel "default_label" {
forward_to = [prometheus.remote_write.prometheus.receiver]
rule {
target_label = "instance"
replacement = "{{ node['name'] }}"
}
rule {
source_labels = ["job"]
regex = "prometheus\\.scrape\\.(.+)"
target_label = "job"
replacement = "$1"
}
rule {
source_labels = ["job"]
regex = "integrations\\/(.+)"
target_label = "job"
replacement = "$1"
}
}
//// Metric input
////// For node metrics
prometheus.exporter.unix "system" {
enable_collectors = ["systemd", "cgroup", "processes", "cpu", "meminfo", "filesystem", "netdev"]
filesystem {
mount_points_exclude = "^/(sys|proc|dev|run|var/lib/docker/.+|var/lib/kubelet/.+)($|/)"
fs_types_exclude = "^(tmpfs|devtmpfs|devfs|iso9660|overlay|aufs|squashfs)$"
}
}
prometheus.scrape "system" {
targets = prometheus.exporter.unix.system.targets
forward_to = [prometheus.relabel.system_relabel.receiver]
}
{% if node['name'] == 'fw' %}
////// For Crowdsec metrics
prometheus.scrape "crowdsec" {
targets = [
{ "__address__" = "{{ infra_uri['crowdsec']['domain'] }}:6060", "job" = "crowdsec" },
{ "__address__" = "{{ infra_uri['crowdsec']['domain'] }}:60601", "job" = "crowdsec-bouncer" },
]
honor_labels = true
forward_to = [prometheus.relabel.default_label.receiver]
}
{% endif %}
{% if node['name'] == 'infra' %}
////// For postgresql metrics
prometheus.exporter.postgres "postgresql" {
data_source_names = [
"postgres://alloy@{{ infra_uri['postgresql']['domain'] }}:{{ infra_uri['postgresql']['ports']['tcp'] }}/postgres?sslmode=verify-full",
]
}
prometheus.scrape "postgresql" {
targets = prometheus.exporter.postgres.postgresql.targets
forward_to = [prometheus.relabel.default_label.receiver]
}
///// For certificates metrics
prometheus.scrape "x509" {
targets = [
{ "__address__" = "{{ node['name'] }}.ilnmors.internal:9793" },
]
forward_to = [prometheus.relabel.default_label.receiver]
}
{% endif %}
{% if node['name'] in ['infra', 'auth', 'app'] %}
////// For Input Caddy metrics
prometheus.scrape "caddy" {
targets = [
{ "__address__" = "{{ node['name'] }}.ilnmors.internal:443" },
]
scheme = "https"
forward_to = [prometheus.relabel.default_label.receiver]
}
{% endif %}
// Log
//// Logs output
loki.write "loki" {
endpoint {
url = "https://{{ infra_uri['loki']['domain'] }}:{{ infra_uri['loki']['ports']['https'] }}/loki/api/v1/push"
tenant_id = "ilnmors.internal"
}
}
//// Logs relabel
///// journal
loki.relabel "journal_relabel" {
forward_to = []
rule {
target_label = "instance"
replacement = "{{ node['name'] }}"
}
// Default value
rule {
target_label = "job"
replacement = "systemd-journal"
}
// if identifier exists
rule {
source_labels = ["__journal_syslog_identifier"]
regex = "(.+)"
target_label = "job"
replacement = "$1"
}
// if systemd_unit exists
rule {
source_labels = ["__journal__systemd_unit"]
regex = "(.+)\\.service"
target_label = "job"
replacement = "$1"
}
// if systemd_unit is "user@$UID"
rule {
source_labels = ["job"]
regex = "user@\\d+"
target_label = "job"
replacement = "systemd-journal"
}
// if systemd_user_unit exists
rule {
source_labels = ["__journal__systemd_user_unit"]
regex = "(.+)\\.service"
target_label = "job"
replacement = "$1"
}
rule {
source_labels = ["__journal_priority_keyword"]
target_label = "level"
}
}
{% if node['name'] == "fw" %}
loki.relabel "suricata_relabel" {
forward_to = [loki.process.suricata_json.receiver]
rule {
target_label = "instance"
replacement = "{{ node['name'] }}"
}
rule {
target_label = "level"
replacement = "info"
}
rule {
target_label = "job"
replacement = "suricata_eve"
}
}
{% endif %}
{% if node['name'] == "auth" %}
loki.relabel "caddy_relabel" {
forward_to = [loki.process.caddy_json.receiver]
rule {
target_label = "instance"
replacement = "{{ node['name'] }}"
}
rule {
target_label = "level"
replacement = "info"
}
rule {
target_label = "job"
replacement = "caddy_access"
}
}
{% endif %}
//// Log parser
///// journal
loki.process "journal_parser" {
forward_to = [loki.write.loki.receiver]
// Severity parsing
// If content of log includes "level" information, change the level
stage.logfmt {
mapping = {
"content_level" = "level",
}
}
stage.labels {
values = {
"level" = "content_level",
}
}
// Add this section as parser for each service
// common
stage.match {
selector = "{job=\"sshd\"}"
stage.regex {
expression = "Accepted \\w+ for (?P<user>\\w+) from (?P<ip>[\\d\\.]+)"
}
stage.labels {
values = { "user" = "" }
}
}
// infra
{% if node['name'] == 'infra' %}
// auth
{% elif node['name'] == 'auth' %}
// app
{% elif node['name'] == 'app' %}
{% endif %}
}
{% if node['name'] == "fw" %}
////// suricata
loki.process "suricata_json" {
forward_to = [loki.write.loki.receiver]
stage.json {
expressions = {
event_type = "event_type",
src_ip = "src_ip",
severity = "alert.severity",
}
}
stage.labels {
values = { event_type = "", severity = "" }
}
}
{% endif %}
{% if node['name'] == "auth" %}
////// caddy
loki.process "caddy_json" {
forward_to = [loki.write.loki.receiver]
stage.json {
expressions = {
status = "status",
method = "method",
remote_ip = "remote_ip",
duration = "duration",
}
}
stage.labels {
values = { status = "", method = "" }
}
}
{% endif %}
//// Logs input
////// journald
loki.source.journal "systemd" {
forward_to = [loki.process.journal_parser.receiver]
// Temporary tags like "__journal__systemd_unit" is automatically removed when logs is passing "forward_to"
// To relabel tags with temporary tags, relabel_rules command is necessary.
relabel_rules = loki.relabel.journal_relabel.rules
}
{% if node['name'] == 'fw' %}
////// suricata
local.file_match "suricata_logs" {
path_targets = [{ "__path__" = "/var/log/suricata/eve.json", "instance" = "{{ node['name'] }}" }]
}
loki.source.file "suricata" {
targets = local.file_match.suricata_logs.targets
forward_to = [loki.relabel.suricata_relabel.receiver]
}
{% endif %}
{% if node['name'] == 'auth' %}
////// caddy
local.file_match "caddy_logs" {
path_targets = [{ "__path__" = "/var/log/caddy/access.log", "instance" = "{{ node['name'] }}" }]
}
loki.source.file "caddy" {
targets = local.file_match.caddy_logs.targets
forward_to = [loki.relabel.caddy_relabel.receiver]
}
{% endif %}

View File

@@ -0,0 +1,5 @@
# Suricata logs
filenames:
- /var/log/caddy/access.log
labels:
type: caddy

View File

@@ -0,0 +1,5 @@
# Suricata logs
filenames:
- /var/log/suricata/eve.json
labels:
type: suricata

View File

@@ -0,0 +1,56 @@
mode: nftables
pid_dir: /var/run/
update_frequency: 10s
log_mode: file
log_dir: /var/log/
log_level: info
log_compression: true
log_max_size: 100
log_max_backups: 3
log_max_age: 30
api_url: "https://{{ infra_uri['crowdsec']['domain'] }}:{{ infra_uri['crowdsec']['ports']['https'] }}"
api_key: "{{ hostvars['console']['crowdsec']['bouncer']['fw'] }}"
insecure_skip_verify: false
disable_ipv6: false
deny_action: DROP
deny_log: false
supported_decisions_types:
- ban
#to change log prefix
#deny_log_prefix: "crowdsec: "
#to change the blacklists name
blacklists_ipv4: crowdsec-blacklists
blacklists_ipv6: crowdsec6-blacklists
#type of ipset to use
ipset_type: nethash
#if present, insert rule in those chains
#iptables_chains:
# - INPUT
# - FORWARD
# - OUTPUT
# - DOCKER-USER
## nftables > table inet filter's set crowddsec-blacklists_ipv4,6 is needed
nftables:
ipv4:
enabled: true
set-only: true
family: inet
table: filter
chain: global
ipv6:
enabled: true
set-only: true
family: inet
table: filter
chain: global
# packet filter
pf:
# an empty string disables the anchor
anchor_name: ""
# Crowdsec firewall bouncer cannot use "[::]" yet
prometheus:
enabled: true
listen_addr: "::"
listen_port: 60601

View File

@@ -0,0 +1,11 @@
name: crowdsecurity/whitelists
description: "Whitelist console/admin hosts only"
whitelist:
reason: "trusted admin hosts"
ip:
- "127.0.0.1"
- "::1"
- "{{ hostvars['fw']['network4']['console']['client'] }}"
- "{{ hostvars['fw']['network4']['console']['wg'] }}"
- "{{ hostvars['fw']['network6']['console']['client'] }}"
- "{{ hostvars['fw']['network6']['console']['wg'] }}"

View File

@@ -0,0 +1,10 @@
[Unit]
Description=Crowdsec Rule Update Service
After=network-online.target
Wants=network-online.target
[Service]
Type=oneshot
ExecStart=/usr/bin/cscli hub update
ExecStart=/usr/bin/cscli hub upgrade
ExecStartPost=/bin/systemctl restart crowdsec

View File

@@ -0,0 +1,10 @@
[Unit]
Description=Daily Crowdsec Rule Update Timer
[Timer]
OnCalendar=*-*-* 05:00:00
Persistent=true
RandomizedDelaySec=300
[Install]
WantedBy=timers.target

View File

@@ -0,0 +1,66 @@
common:
daemonize: true
log_media: file
log_level: info
log_dir: /var/log/
log_max_size: 20
compress_logs: true
log_max_files: 10
working_dir: .
config_paths:
config_dir: /etc/crowdsec/
data_dir: /var/lib/crowdsec/data/
simulation_path: /etc/crowdsec/simulation.yaml
hub_dir: /var/lib/crowdsec/hub/
index_path: /var/lib/crowdsec/hub/.index.json
notification_dir: /etc/crowdsec/notifications/
plugin_dir: /usr/lib/crowdsec/plugins/
crowdsec_service:
acquisition_path: /etc/crowdsec/acquis.yaml
acquisition_dir: /etc/crowdsec/acquis.d
parser_routines: 1
cscli:
output: human
color: auto
db_config:
log_level: info
type: sqlite
db_path: /var/lib/crowdsec/data/crowdsec.db
#max_open_conns: 100
#user:
#password:
#db_name:
#host:
#port:
flush:
max_items: 5000
max_age: 7d
plugin_config:
user: nobody # plugin process would be ran on behalf of this user
group: nogroup # plugin process would be ran on behalf of this group
api:
client:
insecure_skip_verify: false
credentials_path: /etc/crowdsec/local_api_credentials.yaml
{% if node['name'] == 'fw' %}
server:
log_level: info
listen_uri: "[::]:8080"
profiles_path: /etc/crowdsec/profiles.yaml
console_path: /etc/crowdsec/console.yaml
online_client: # Central API credentials (to push signals and receive bad IPs)
credentials_path: /etc/crowdsec/online_api_credentials.yaml
trusted_ips: # IP ranges, or IPs which can have admin API access
- ::1
- 127.0.0.1
- {{ hostvars['fw']['network6']['subnet']['server'] }}
- {{ hostvars['fw']['network4']['subnet']['server'] }}
tls:
cert_file: /etc/crowdsec/ssl/crowdsec.crt
key_file: /etc/crowdsec/ssl/crowdsec.key
prometheus:
enabled: true
level: full
listen_addr: "[::]"
listen_port: 6060
{% endif %}

View File

@@ -0,0 +1,3 @@
url: https://{{ infra_uri['crowdsec']['domain'] }}:{{ infra_uri['crowdsec']['ports']['https'] }}
login: {{ node['name'] }}
password: {{ hostvars['console']['crowdsec']['machine'][node['name']] }}

View File

@@ -0,0 +1,49 @@
[Unit]
Description=Kopia backup service
Wants=network-online.target
After=network-online.target
[Service]
User=kopia
Group=kopia
Type=oneshot
# logging
StandardOutput=journal
StandardError=journal
CapabilityBoundingSet=CAP_DAC_READ_SEARCH
AmbientCapabilities=CAP_DAC_READ_SEARCH
ProtectSystem=strict
ProtectHome=tmpfs
InaccessiblePaths=/boot /root
{% if node['name'] == 'infra' %}
BindReadOnlyPaths=/home/infra/containers/postgresql/backups
{% elif node['name'] == 'app' %}
BindReadOnlyPaths=/home/app/data
{% endif %}
# In root namescope, %u always bring 0
BindPaths=/etc/kopia
BindPaths=/etc/secrets/{{ kopia_uid }}
BindPaths=/var/cache/kopia
EnvironmentFile=/etc/secrets/{{ kopia_uid }}/kopia.env
ExecStartPre=/usr/bin/kopia repository connect server \
--url=https://{{ infra_uri['kopia']['domain'] }}:{{ infra_uri['kopia']['ports']['https'] }} \
--override-username={{ node['name'] }} \
--override-hostname={{ node['name'] }}.ilnmors.internal
{% if node['name'] == 'infra' %}
ExecStart=/usr/bin/kopia snapshot create \
/home/infra/containers/postgresql/backups
{% elif node['name'] == 'app' %}
ExecStart=/usr/bin/kopia snapshot create \
/home/app/data
{% endif %}
[Install]
WantedBy=multi-user.target

View File

@@ -0,0 +1,10 @@
[Unit]
Description=Daily Kopia backup timer
[Timer]
OnCalendar=*-*-* 03:00:00
Persistent=true
RandomizedDelaySec=300
[Install]
WantedBy=timers.target

View File

@@ -0,0 +1,5 @@
KOPIA_PASSWORD={{ hostvars['console']['kopia']['user'][node['name']] }}
KOPIA_CONFIG_PATH=/etc/kopia/repository.config
KOPIA_CACHE_DIRECTORY=/var/cache/kopia
KOPIA_LOG_DIR=/var/cache/kopia/logs
KOPIA_CHECK_FOR_UPDATES=false