1.0.0 Release IaaS
This commit is contained in:
70
ansible/roles/app/tasks/node/set_raid.yaml
Normal file
70
ansible/roles/app/tasks/node/set_raid.yaml
Normal file
@@ -0,0 +1,70 @@
|
||||
---
|
||||
- name: Check btrfs installation
|
||||
ansible.builtin.shell: |
|
||||
command -v btrfs
|
||||
become: true # btrfs is located in /usr/sbin, which means root permission is needed.
|
||||
changed_when: false
|
||||
failed_when: false
|
||||
register: "is_btrfs_installed"
|
||||
ignore_errors: true
|
||||
|
||||
- name: Install btrfs
|
||||
ansible.builtin.apt:
|
||||
name: "btrfs-progs"
|
||||
state: "present"
|
||||
become: true
|
||||
when: is_btrfs_installed.rc != 0
|
||||
|
||||
- name: Set hard disk path
|
||||
ansible.builtin.shell: |
|
||||
set -o pipefail
|
||||
ls -1 /dev/disk/by-path/*{{ vm['pass_through']['sata_controller']['address'] }}* | \
|
||||
grep -v '\.0$' | \
|
||||
sort
|
||||
changed_when: false
|
||||
register: "hdd_path_list"
|
||||
|
||||
- name: Check app_hdd filesystem already exists
|
||||
ansible.builtin.command: |
|
||||
blkid -L {{ storage['btrfs']['label'] }}
|
||||
register: is_app_data
|
||||
changed_when: false
|
||||
failed_when: false
|
||||
become: true
|
||||
|
||||
- name: Check disk number
|
||||
ansible.builtin.fail:
|
||||
msg: "Below 4 disks for RAID10, found {{ hdd_path_list.stdout_lines | length }}"
|
||||
when: (hdd_path_list.stdout_lines | length) < 4
|
||||
|
||||
- name: Set btrfs raid10 volume
|
||||
ansible.builtin.shell: |
|
||||
mkfs.btrfs -f \
|
||||
-L {{ storage['btrfs']['label'] }} \
|
||||
-d {{ storage['btrfs']['level'] }} \
|
||||
-m {{ storage['btrfs']['level'] }} \
|
||||
{{ hdd_path_list.stdout_lines | join(' ') }}
|
||||
become: true
|
||||
when:
|
||||
- is_app_data.rc != 0
|
||||
- (hdd_path_list.stdout_lines | length) >= 4
|
||||
changed_when: is_mkfs.rc == 0
|
||||
register: "is_mkfs"
|
||||
|
||||
- name: Mount btrfs raid10 volume
|
||||
ansible.posix.mount:
|
||||
path: "{{ storage['btrfs']['mount_point'] }}"
|
||||
src: "LABEL={{ storage['btrfs']['label'] }}"
|
||||
state: "mounted"
|
||||
fstype: "btrfs"
|
||||
opts: "defaults,noatime,compress=zstd:3,autodefrag,degraded,nofail"
|
||||
become: true
|
||||
|
||||
- name: Set hard disk path permissions
|
||||
ansible.builtin.file:
|
||||
path: "{{ storage['btrfs']['mount_point'] }}"
|
||||
state: "directory"
|
||||
owner: "{{ ansible_user }}"
|
||||
group: "svadmins"
|
||||
mode: "0770"
|
||||
become: true
|
||||
11
ansible/roles/auth/handlers/main.yaml
Normal file
11
ansible/roles/auth/handlers/main.yaml
Normal file
@@ -0,0 +1,11 @@
|
||||
---
|
||||
- name: Restart authelia
|
||||
ansible.builtin.systemd:
|
||||
name: "authelia.service"
|
||||
state: "restarted"
|
||||
enabled: true
|
||||
scope: "user"
|
||||
daemon_reload: true
|
||||
changed_when: false
|
||||
listen: "notification_restart_authelia"
|
||||
ignore_errors: true # noqa: ignore-errors
|
||||
78
ansible/roles/auth/tasks/services/set_authelia.yaml
Normal file
78
ansible/roles/auth/tasks/services/set_authelia.yaml
Normal file
@@ -0,0 +1,78 @@
|
||||
---
|
||||
- name: Create authelia directory
|
||||
ansible.builtin.file:
|
||||
path: "{{ node['home_path'] }}/containers/{{ item }}"
|
||||
state: "directory"
|
||||
owner: "{{ ansible_user }}"
|
||||
group: "svadmins"
|
||||
mode: "0770"
|
||||
loop:
|
||||
- "authelia"
|
||||
- "authelia/config"
|
||||
- "authelia/certs"
|
||||
become: true
|
||||
|
||||
- name: Deploy authelia configuration file
|
||||
ansible.builtin.template:
|
||||
src: "{{ hostvars['console']['node']['config_path'] }}/services/containers/auth/authelia/config/authelia.yaml.j2"
|
||||
dest: "{{ node['home_path'] }}/containers/authelia/config/authelia.yaml"
|
||||
owner: "{{ ansible_user }}"
|
||||
group: "svadmins"
|
||||
mode: "0600"
|
||||
become: true
|
||||
notify: "notification_restart_authelia"
|
||||
no_log: true
|
||||
|
||||
- name: Deploy certificates
|
||||
ansible.builtin.copy:
|
||||
content: |
|
||||
{{ hostvars['console']['ca']['root']['crt'] }}
|
||||
dest: "{{ node['home_path'] }}/containers/authelia/certs/ilnmors_root_ca.crt"
|
||||
owner: "{{ ansible_user }}"
|
||||
group: "svadmins"
|
||||
mode: "0440"
|
||||
become: true
|
||||
no_log: true
|
||||
|
||||
- name: Register secret value to podman secret
|
||||
containers.podman.podman_secret:
|
||||
name: "{{ item.name }}"
|
||||
data: "{{ item.value }}"
|
||||
state: "present"
|
||||
force: true
|
||||
loop:
|
||||
- name: "AUTHELIA_JWT_SECRET"
|
||||
value: "{{ hostvars['console']['authelia']['jwt_secret'] }}"
|
||||
- name: "AUTHELIA_SESSION_SECRET"
|
||||
value: "{{ hostvars['console']['authelia']['session_secret'] }}"
|
||||
- name: "AUTHELIA_STORAGE_SECRET"
|
||||
value: "{{ hostvars['console']['authelia']['storage_secret'] }}"
|
||||
- name: "AUTHELIA_HMAC_SECRET"
|
||||
value: "{{ hostvars['console']['authelia']['hmac_secret'] }}"
|
||||
- name: "AUTHELIA_JWKS_RS256"
|
||||
value: "{{ hostvars['console']['authelia']['jwk_rs256'] }}"
|
||||
- name: "AUTHELIA_JWKS_ES256"
|
||||
value: "{{ hostvars['console']['authelia']['jwk_es256'] }}"
|
||||
- name: "AUTHELIA_LDAP_PASSWORD"
|
||||
value: "{{ hostvars['console']['ldap']['password']['authelia'] }}"
|
||||
- name: "POSTGRES_AUTHELIA_PASSWORD"
|
||||
value: "{{ hostvars['console']['postgresql']['password']['authelia'] }}"
|
||||
notify: "notification_restart_authelia"
|
||||
no_log: true
|
||||
|
||||
- name: Deploy container file
|
||||
ansible.builtin.template:
|
||||
src: "{{ hostvars['console']['node']['config_path'] }}/services/containers/auth/authelia/authelia.container.j2"
|
||||
dest: "{{ node['home_path'] }}/.config/containers/systemd/authelia.container"
|
||||
owner: "{{ ansible_user }}"
|
||||
group: "svadmins"
|
||||
mode: "0644"
|
||||
notify: "notification_restart_authelia"
|
||||
|
||||
- name: Enable authelia.service
|
||||
ansible.builtin.systemd:
|
||||
name: "authelia.service"
|
||||
state: "started"
|
||||
enabled: true
|
||||
daemon_reload: true
|
||||
scope: "user"
|
||||
101
ansible/roles/common/handlers/main.yaml
Normal file
101
ansible/roles/common/handlers/main.yaml
Normal file
@@ -0,0 +1,101 @@
|
||||
---
|
||||
- name: Restart ca certificate
|
||||
ansible.builtin.command: |
|
||||
update-ca-certificates
|
||||
become: true
|
||||
changed_when: false
|
||||
listen: "notification_update_ca"
|
||||
ignore_errors: true # noqa: ignore-errors
|
||||
|
||||
- name: Restart sshd
|
||||
ansible.builtin.systemd:
|
||||
name: "sshd.service"
|
||||
state: "restarted"
|
||||
enabled: true
|
||||
become: true
|
||||
changed_when: false
|
||||
listen: "notification_restart_sshd"
|
||||
ignore_errors: true # noqa: ignore-errors
|
||||
|
||||
- name: Reload systemd-networkd
|
||||
ansible.builtin.systemd:
|
||||
name: "systemd-networkd.service"
|
||||
state: "reloaded"
|
||||
enabled: true
|
||||
become: true
|
||||
changed_when: false
|
||||
listen: "notification_reload_networkctl"
|
||||
ignore_errors: true # noqa: ignore-errors
|
||||
|
||||
- name: Reload systemd-resolved.service
|
||||
ansible.builtin.systemd:
|
||||
name: "systemd-resolved.service"
|
||||
state: "reloaded"
|
||||
enabled: true
|
||||
become: true
|
||||
changed_when: false
|
||||
listen: "notification_reload_resolved"
|
||||
ignore_errors: true # noqa: ignore-errors
|
||||
|
||||
- name: Restart systemd-timesyncd
|
||||
ansible.builtin.systemd:
|
||||
name: "systemd-timesyncd.service"
|
||||
state: "restarted"
|
||||
enabled: true
|
||||
become: true
|
||||
changed_when: false
|
||||
listen: "notification_restart_timesyncd"
|
||||
ignore_errors: true # noqa: ignore-errors
|
||||
|
||||
- name: Update nftables
|
||||
ansible.builtin.command: |
|
||||
nft -f /etc/nftables.conf
|
||||
become: true
|
||||
changed_when: false
|
||||
listen: "notification_update_nftables"
|
||||
ignore_errors: true # noqa: ignore-errors
|
||||
|
||||
- name: Restart crowdsec
|
||||
ansible.builtin.systemd:
|
||||
name: "crowdsec.service"
|
||||
state: "restarted"
|
||||
enabled: true
|
||||
daemon_reload: true
|
||||
become: true
|
||||
changed_when: false
|
||||
listen: "notification_restart_crowdsec"
|
||||
ignore_errors: true # noqa: ignore-errors
|
||||
|
||||
- name: Restart crowdsec bouncer
|
||||
ansible.builtin.systemd:
|
||||
name: "crowdsec-firewall-bouncer.service"
|
||||
state: "restarted"
|
||||
enabled: true
|
||||
daemon_reload: true
|
||||
become: true
|
||||
when: node['name'] == 'fw'
|
||||
changed_when: false
|
||||
listen: "notification_restart_crowdsec_bouncer"
|
||||
ignore_errors: true # noqa: ignore-errors
|
||||
|
||||
- name: Restart caddy
|
||||
ansible.builtin.systemd:
|
||||
name: "caddy.service"
|
||||
state: "restarted"
|
||||
enabled: true
|
||||
scope: "user"
|
||||
daemon_reload: true
|
||||
changed_when: false
|
||||
listen: "notification_restart_caddy"
|
||||
ignore_errors: true # noqa: ignore-errors
|
||||
|
||||
- name: Restart alloy
|
||||
ansible.builtin.systemd:
|
||||
name: "alloy.service"
|
||||
state: "restarted"
|
||||
enabled: true
|
||||
daemon_reload: true
|
||||
become: true
|
||||
changed_when: false
|
||||
listen: "notification_restart_alloy"
|
||||
ignore_errors: true # noqa: ignore-errors
|
||||
34
ansible/roles/common/tasks/node/create_default_dir.yaml
Normal file
34
ansible/roles/common/tasks/node/create_default_dir.yaml
Normal file
@@ -0,0 +1,34 @@
|
||||
---
|
||||
- name: Create common secret directory
|
||||
ansible.builtin.file:
|
||||
path: "/etc/secrets"
|
||||
state: "directory"
|
||||
owner: "root"
|
||||
group: "root"
|
||||
mode: "0711"
|
||||
become: true
|
||||
|
||||
- name: Create user secret directory
|
||||
ansible.builtin.file:
|
||||
path: "/etc/secrets/{{ node['uid'] }}"
|
||||
state: "directory"
|
||||
owner: "{{ ansible_user }}"
|
||||
group: "root"
|
||||
mode: "0500"
|
||||
become: true
|
||||
|
||||
- name: Create user systemd directory
|
||||
ansible.builtin.file:
|
||||
path: "{{ node['home_path'] }}/.config/systemd/user"
|
||||
state: "directory"
|
||||
owner: "{{ ansible_user }}"
|
||||
group: "svadmins"
|
||||
mode: "0700"
|
||||
|
||||
- name: Create quadlet directory
|
||||
ansible.builtin.file:
|
||||
path: "{{ node['home_path'] }}/.config/containers/systemd"
|
||||
state: "directory"
|
||||
owner: "{{ ansible_user }}"
|
||||
group: "svadmins"
|
||||
mode: "0700"
|
||||
9
ansible/roles/common/tasks/node/deploy_hosts.yaml
Normal file
9
ansible/roles/common/tasks/node/deploy_hosts.yaml
Normal file
@@ -0,0 +1,9 @@
|
||||
---
|
||||
- name: Deploy /etc/hosts
|
||||
ansible.builtin.template:
|
||||
src: "{{ hostvars['console']['node']['config_path'] }}/node/common/hosts.j2"
|
||||
dest: "/etc/hosts"
|
||||
owner: "root"
|
||||
group: "root"
|
||||
mode: "0644"
|
||||
become: true
|
||||
10
ansible/roles/common/tasks/node/deploy_root_ca.yaml
Normal file
10
ansible/roles/common/tasks/node/deploy_root_ca.yaml
Normal file
@@ -0,0 +1,10 @@
|
||||
---
|
||||
- name: Deploy root_ca.crt
|
||||
ansible.builtin.copy:
|
||||
content: "{{ hostvars['console']['ca']['root']['crt'] }}"
|
||||
dest: "/usr/local/share/ca-certificates/ilnmors_root_ca.crt"
|
||||
owner: "root"
|
||||
group: "root"
|
||||
mode: "0644"
|
||||
become: true
|
||||
notify: "notification_update_ca"
|
||||
20
ansible/roles/common/tasks/node/set_linger.yaml
Normal file
20
ansible/roles/common/tasks/node/set_linger.yaml
Normal file
@@ -0,0 +1,20 @@
|
||||
---
|
||||
- name: Checking linger
|
||||
ansible.builtin.stat:
|
||||
path: "/var/lib/systemd/linger/{{ ansible_user }}"
|
||||
register: "is_linger_file"
|
||||
|
||||
- name: Activate linger
|
||||
when: not is_linger_file.stat.exists
|
||||
block:
|
||||
- name: Enable linger
|
||||
ansible.builtin.command: |
|
||||
loginctl enable-linger {{ ansible_user }}
|
||||
become: true
|
||||
changed_when: true
|
||||
|
||||
- name: Reboot system to ensure DBUS socket activation
|
||||
ansible.builtin.reboot:
|
||||
reboot_timeout: 300
|
||||
post_reboot_delay: 3
|
||||
become: true
|
||||
23
ansible/roles/common/tasks/node/set_networkd.yaml
Normal file
23
ansible/roles/common/tasks/node/set_networkd.yaml
Normal file
@@ -0,0 +1,23 @@
|
||||
---
|
||||
- name: Set network files directory
|
||||
ansible.builtin.set_fact:
|
||||
directory_name: "{{ node['name'] }}"
|
||||
when: node['name'] in ["vmm", "fw"]
|
||||
|
||||
- name: Set target vm
|
||||
ansible.builtin.set_fact:
|
||||
target_vm: "{{ node['name'] }}"
|
||||
|
||||
- name: Deploy networkd files
|
||||
ansible.builtin.template:
|
||||
src: "{{ item }}"
|
||||
dest: "/etc/systemd/network/{{ item | basename }}"
|
||||
owner: "root"
|
||||
group: "systemd-network"
|
||||
mode: "0640"
|
||||
loop: "{{ query('fileglob', hostvars['console']['node']['config_path'] + '/node/' + (directory_name | default('common')) + '/networkd/*') | sort }}"
|
||||
become: true
|
||||
notify:
|
||||
- "notification_reload_networkctl"
|
||||
- "notification_restart_crowdsec"
|
||||
no_log: true
|
||||
36
ansible/roles/common/tasks/node/set_nftables.yaml
Normal file
36
ansible/roles/common/tasks/node/set_nftables.yaml
Normal file
@@ -0,0 +1,36 @@
|
||||
---
|
||||
- name: Check nftables installation
|
||||
ansible.builtin.shell: |
|
||||
command -v nft
|
||||
become: true # nftables is located in /usr/sbin, which means root permission is needed.
|
||||
changed_when: false
|
||||
failed_when: false
|
||||
register: "is_nftables_installed"
|
||||
ignore_errors: true
|
||||
|
||||
- name: Install nftables
|
||||
ansible.builtin.apt:
|
||||
name: "nftables"
|
||||
state: "present"
|
||||
become: true
|
||||
when: is_nftables_installed.rc != 0
|
||||
|
||||
- name: Enable nftables.service
|
||||
ansible.builtin.systemd:
|
||||
name: "nftables.service"
|
||||
state: "started"
|
||||
enabled: true
|
||||
become: true
|
||||
|
||||
- name: Deploy nftables.conf
|
||||
ansible.builtin.template:
|
||||
src: "{{ hostvars['console']['node']['config_path'] }}/node/{{ node['name'] }}/nftables.conf.j2"
|
||||
dest: "/etc/nftables.conf"
|
||||
owner: "root"
|
||||
group: "root"
|
||||
mode: "0700"
|
||||
validate: "/usr/sbin/nft -c -f %s"
|
||||
become: true
|
||||
notify:
|
||||
- "notification_update_nftables"
|
||||
- "notification_restart_crowdsec_bouncer"
|
||||
39
ansible/roles/common/tasks/node/set_resolved.yaml
Normal file
39
ansible/roles/common/tasks/node/set_resolved.yaml
Normal file
@@ -0,0 +1,39 @@
|
||||
---
|
||||
- name: Enable systemd-resolved.service
|
||||
ansible.builtin.systemd:
|
||||
name: "systemd-resolved.service"
|
||||
state: "started"
|
||||
enabled: true
|
||||
become: true
|
||||
|
||||
- name: Check global.conf
|
||||
ansible.builtin.stat:
|
||||
path: "/etc/systemd/resolved.conf.d/global.conf"
|
||||
register: "is_global_conf"
|
||||
|
||||
- name: Create resolved directory
|
||||
ansible.builtin.file:
|
||||
path: "/etc/systemd/resolved.conf.d"
|
||||
state: "directory"
|
||||
owner: "root"
|
||||
group: "root"
|
||||
mode: "0755"
|
||||
become: true
|
||||
|
||||
- name: Deploy global conf file
|
||||
ansible.builtin.template:
|
||||
src: "{{ hostvars['console']['node']['config_path'] }}/node/common/resolved/global.conf.j2"
|
||||
dest: "/etc/systemd/resolved.conf.d/global.conf"
|
||||
owner: "root"
|
||||
group: "systemd-resolve"
|
||||
mode: "0640"
|
||||
become: true
|
||||
notify: "notification_reload_resolved"
|
||||
|
||||
- name: Restart systemd-resolved.service when it is initiated
|
||||
ansible.builtin.systemd:
|
||||
name: "systemd-resolved.service"
|
||||
state: "restarted"
|
||||
enabled: true
|
||||
become: true
|
||||
when: not is_global_conf.stat.exists
|
||||
119
ansible/roles/common/tasks/node/set_ssh_host.yaml
Normal file
119
ansible/roles/common/tasks/node/set_ssh_host.yaml
Normal file
@@ -0,0 +1,119 @@
|
||||
---
|
||||
- name: Deploy /etc/ssh/local_ssh_ca.pub
|
||||
ansible.builtin.copy:
|
||||
content: |
|
||||
{{ hostvars['console']['ssh']['ca']['pub'] }}
|
||||
dest: "/etc/ssh/local_ssh_ca.pub"
|
||||
owner: "root"
|
||||
group: "root"
|
||||
mode: "0644"
|
||||
become: true
|
||||
no_log: true
|
||||
|
||||
- name: Check ssh_host_key-cert.pub
|
||||
ansible.builtin.stat:
|
||||
path: "/etc/ssh/ssh_host_ed25519_key-cert.pub"
|
||||
register: "is_signed_ca_key"
|
||||
|
||||
- name: Get current ssh_host_key-cert.pub Key ID
|
||||
ansible.builtin.shell: |
|
||||
set -o pipefail
|
||||
ssh-keygen -L -f /etc/ssh/ssh_host_ed25519_key-cert.pub | \
|
||||
grep "Key ID" | \
|
||||
sed -E 's/.*Key ID: "(.*)"/\1/'
|
||||
when: is_signed_ca_key.stat.exists
|
||||
changed_when: false
|
||||
register: "current_key_id"
|
||||
no_log: true
|
||||
|
||||
- name: Get current ssh_host_key-cert.pub san
|
||||
ansible.builtin.shell: |
|
||||
set -o pipefail
|
||||
ssh-keygen -L -f /etc/ssh/ssh_host_ed25519_key-cert.pub | \
|
||||
sed -n '/Principals:/,/Critical Options:/p' | \
|
||||
sed '1d;$d' | \
|
||||
sed 's/^[[:space:]]*//'
|
||||
when: is_signed_ca_key.stat.exists
|
||||
changed_when: false
|
||||
register: "current_san_id"
|
||||
no_log: true
|
||||
|
||||
- name: Set current key informations
|
||||
ansible.builtin.set_fact:
|
||||
current_id_key: "{{ current_key_id.stdout }}"
|
||||
current_san_list: "{{ current_san_id.stdout_lines }}"
|
||||
when: is_signed_ca_key.stat.exists
|
||||
no_log: true
|
||||
|
||||
- name: Compare key values between current information and defined information
|
||||
ansible.builtin.set_fact:
|
||||
is_certificate_info_different: true
|
||||
when: (current_id_key | default("")) != node['name'] or (current_san_list | default([])) != (node['ssh_san'].split(',') | map('trim') | list)
|
||||
|
||||
- name: Get SSH CA and signing
|
||||
when: not is_signed_ca_key.stat.exists or (is_certificate_info_different | default(false))
|
||||
block:
|
||||
- name: Get ssh_host_key.pub from remote server
|
||||
ansible.builtin.fetch:
|
||||
src: "/etc/ssh/ssh_host_ed25519_key.pub"
|
||||
dest: "/run/user/{{ hostvars['console']['node']['uid'] }}/{{ node['name'] }}_ssh_host_ed25519_key.pub"
|
||||
flat: true
|
||||
become: true
|
||||
|
||||
- name: Get SSH CA
|
||||
delegate_to: "console"
|
||||
ansible.builtin.copy:
|
||||
content: |
|
||||
{{ hostvars['console']['ssh']['ca']['key'] }}
|
||||
dest: "/run/user/{{ hostvars['console']['node']['uid'] }}/local_ssh_ca_private_key"
|
||||
owner: "console"
|
||||
group: "svadmins"
|
||||
mode: "0400"
|
||||
no_log: true
|
||||
|
||||
- name: Sign on ssh host keys (pub file)
|
||||
delegate_to: "console"
|
||||
ansible.builtin.command: |
|
||||
ssh-keygen -s /run/user/{{ hostvars['console']['node']['uid'] }}/local_ssh_ca_private_key \
|
||||
-h \
|
||||
-I "{{ node['name'] }}" \
|
||||
-n "{{ node['ssh_san'] }}" \
|
||||
/run/user/{{ hostvars['console']['node']['uid'] }}/{{ node['name'] }}_ssh_host_ed25519_key.pub
|
||||
changed_when: not is_signed_ca_key.stat.exists or (is_certificate_info_different | default(false))
|
||||
no_log: true
|
||||
|
||||
- name: Deploy signed pub file
|
||||
ansible.builtin.copy:
|
||||
src: "/run/user/{{ hostvars['console']['node']['uid'] }}/{{ node['name'] }}_ssh_host_ed25519_key-cert.pub"
|
||||
dest: "/etc/ssh/ssh_host_ed25519_key-cert.pub"
|
||||
owner: "root"
|
||||
group: "root"
|
||||
mode: "0644"
|
||||
become: true
|
||||
notify: "notification_restart_sshd"
|
||||
|
||||
always:
|
||||
- name: Clean temporary files
|
||||
delegate_to: "console"
|
||||
ansible.builtin.file:
|
||||
path: "/run/user/{{ hostvars['console']['node']['uid'] }}/{{ item }}"
|
||||
state: "absent"
|
||||
loop:
|
||||
- "{{ node['name'] }}_ssh_host_ed25519_key.pub"
|
||||
- "{{ node['name'] }}_ssh_host_ed25519_key-cert.pub"
|
||||
- "local_ssh_ca_private_key"
|
||||
no_log: true
|
||||
|
||||
- name: Set sshd_config.d files
|
||||
ansible.builtin.copy:
|
||||
src: "{{ hostvars['console']['node']['config_path'] }}/node/common/ssh/{{ item }}"
|
||||
dest: "/etc/ssh/sshd_config.d/{{ item }}"
|
||||
owner: "root"
|
||||
group: "root"
|
||||
mode: "0644"
|
||||
loop:
|
||||
- "prohibit_root.conf"
|
||||
- "ssh_ca.conf"
|
||||
- "host_certificate.conf"
|
||||
become: true
|
||||
notify: "notification_restart_sshd"
|
||||
20
ansible/roles/common/tasks/node/set_timesyncd.yaml
Normal file
20
ansible/roles/common/tasks/node/set_timesyncd.yaml
Normal file
@@ -0,0 +1,20 @@
|
||||
---
|
||||
- name: Create timesyncd.conf.d
|
||||
ansible.builtin.file:
|
||||
path: "/etc/systemd/timesyncd.conf.d"
|
||||
state: "directory"
|
||||
owner: "root"
|
||||
group: "root"
|
||||
mode: "0755"
|
||||
become: true
|
||||
|
||||
- name: Deploy timesyncd.conf.d/local-ntp.conf
|
||||
ansible.builtin.copy:
|
||||
src: "{{ hostvars['console']['node']['config_path'] }}/node/common/timesyncd/local-ntp.conf"
|
||||
dest: "/etc/systemd/timesyncd.conf.d/local-ntp.conf"
|
||||
owner: "root"
|
||||
group: "systemd-timesync"
|
||||
mode: "0640"
|
||||
become: true
|
||||
notify: "notification_restart_timesyncd"
|
||||
no_log: true
|
||||
15
ansible/roles/common/tasks/node/set_wireguard.yaml
Normal file
15
ansible/roles/common/tasks/node/set_wireguard.yaml
Normal file
@@ -0,0 +1,15 @@
|
||||
---
|
||||
- name: Create wg0 files
|
||||
ansible.builtin.template:
|
||||
src: "{{ hostvars['console']['node']['config_path'] }}/node/fw/wireguard/{{ item }}"
|
||||
dest: "/etc/systemd/network/{{ item }}"
|
||||
owner: "root"
|
||||
group: "systemd-network"
|
||||
mode: "0640"
|
||||
loop:
|
||||
- "30-fw-wg0.netdev"
|
||||
- "31-fw-wg0.network"
|
||||
become: true
|
||||
when: node['name'] == 'fw'
|
||||
notify: "notification_reload_networkctl"
|
||||
no_log: true
|
||||
73
ansible/roles/common/tasks/services/set_alloy.yaml
Normal file
73
ansible/roles/common/tasks/services/set_alloy.yaml
Normal file
@@ -0,0 +1,73 @@
|
||||
---
|
||||
- name: Gather system facts (hardware)
|
||||
ansible.builtin.setup:
|
||||
gather_subset:
|
||||
- hardware
|
||||
become: true
|
||||
|
||||
- name: Deploy alloy deb file (x86_64)
|
||||
ansible.builtin.copy:
|
||||
src: "{{ hostvars['console']['node']['data_path'] }}/bin/alloy-{{ version['packages']['alloy'] }}-amd64.deb"
|
||||
dest: "/var/cache/apt/archives/alloy-{{ version['packages']['alloy'] }}.deb"
|
||||
owner: "root"
|
||||
group: "root"
|
||||
mode: "0644"
|
||||
become: true
|
||||
when: ansible_facts['architecture'] == "x86_64"
|
||||
|
||||
- name: Deploy alloy deb file (aarch64)
|
||||
ansible.builtin.copy:
|
||||
src: "{{ hostvars['console']['node']['data_path'] }}/bin/alloy-{{ version['packages']['alloy'] }}-arm64.deb"
|
||||
dest: "/var/cache/apt/archives/alloy-{{ version['packages']['alloy'] }}.deb"
|
||||
owner: "root"
|
||||
group: "root"
|
||||
mode: "0644"
|
||||
become: true
|
||||
when: ansible_facts['architecture'] == "aarch64"
|
||||
|
||||
- name: Install alloy
|
||||
ansible.builtin.apt:
|
||||
deb: "/var/cache/apt/archives/alloy-{{ version['packages']['alloy'] }}.deb"
|
||||
state: "present"
|
||||
become: true
|
||||
|
||||
- name: Deploy alloy config
|
||||
ansible.builtin.template:
|
||||
src: "{{ hostvars['console']['node']['config_path'] }}/services/systemd/common/alloy/config.alloy.j2"
|
||||
dest: "/etc/alloy/config.alloy"
|
||||
owner: "root"
|
||||
group: "root"
|
||||
mode: "0644"
|
||||
become: true
|
||||
notify: "notification_restart_alloy"
|
||||
no_log: true
|
||||
|
||||
- name: Create alloy.service.d
|
||||
ansible.builtin.file:
|
||||
path: "/etc/systemd/system/alloy.service.d"
|
||||
state: "directory"
|
||||
owner: "root"
|
||||
group: "root"
|
||||
mode: "0755"
|
||||
become: true
|
||||
|
||||
- name: Set alloy.service.d/override.conf
|
||||
ansible.builtin.copy:
|
||||
dest: "/etc/systemd/system/alloy.service.d/override.conf"
|
||||
content: |
|
||||
[Service]
|
||||
Restart=always
|
||||
RestartSec=60
|
||||
owner: "root"
|
||||
group: "root"
|
||||
mode: "0644"
|
||||
become: true
|
||||
notify: "notification_restart_alloy"
|
||||
|
||||
- name: Enable alloy service
|
||||
ansible.builtin.systemd:
|
||||
name: "alloy.service"
|
||||
state: "started"
|
||||
enabled: true
|
||||
daemon_reload: true
|
||||
become: true
|
||||
99
ansible/roles/common/tasks/services/set_caddy.yaml
Normal file
99
ansible/roles/common/tasks/services/set_caddy.yaml
Normal file
@@ -0,0 +1,99 @@
|
||||
---
|
||||
# infra, auth, app (vmm, fw has no podman in it)
|
||||
- name: Create caddy directory
|
||||
ansible.builtin.file:
|
||||
path: "{{ node['home_path'] }}/containers/{{ item }}"
|
||||
owner: "{{ ansible_user }}"
|
||||
group: "svadmins"
|
||||
state: "directory"
|
||||
mode: "0770"
|
||||
loop:
|
||||
- "caddy"
|
||||
- "caddy/etc"
|
||||
- "caddy/data"
|
||||
- "caddy/build"
|
||||
become: true
|
||||
|
||||
- name: Create caddy log directory for auth
|
||||
ansible.builtin.file:
|
||||
path: /var/log/caddy
|
||||
owner: "{{ ansible_user }}"
|
||||
group: "svadmins"
|
||||
state: "directory"
|
||||
mode: "0755"
|
||||
become: true
|
||||
when: node['name'] == "auth"
|
||||
|
||||
- name: Register acme key to podman secret
|
||||
containers.podman.podman_secret:
|
||||
name: "CADDY_ACME_KEY"
|
||||
data: "{{ hostvars['console']['ca']['acme_key'] }}"
|
||||
state: "present"
|
||||
force: true
|
||||
notify: "notification_restart_caddy"
|
||||
no_log: true
|
||||
|
||||
- name: Register crowdsec bouncer key to podman secret
|
||||
containers.podman.podman_secret:
|
||||
name: "CADDY_CROWDSEC_KEY"
|
||||
data: "{{ hostvars['console']['crowdsec']['bouncer']['caddy'] }}"
|
||||
state: "present"
|
||||
force: true
|
||||
when: node['name'] == "auth"
|
||||
notify: "notification_restart_caddy"
|
||||
no_log: true
|
||||
|
||||
- name: Deploy containerfile for build
|
||||
ansible.builtin.template:
|
||||
src: "{{ hostvars['console']['node']['config_path'] }}/services/containers/common/caddy/build/caddy.containerfile.j2"
|
||||
dest: "{{ node['home_path'] }}/containers/caddy/build/Containerfile"
|
||||
owner: "{{ ansible_user }}"
|
||||
group: "svadmins"
|
||||
mode: "0640"
|
||||
|
||||
- name: Deploy root crt for build
|
||||
ansible.builtin.copy:
|
||||
content: "{{ hostvars['console']['ca']['root']['crt'] }}"
|
||||
dest: "{{ node['home_path'] }}/containers/caddy/build/ilnmors_root_ca.crt"
|
||||
owner: "{{ ansible_user }}"
|
||||
group: "svadmins"
|
||||
mode: "0640"
|
||||
no_log: true
|
||||
|
||||
- name: Build caddy container image
|
||||
containers.podman.podman_image:
|
||||
name: "ilnmors.internal/{{ node['name'] }}/caddy"
|
||||
# check tags from container file
|
||||
tag: "{{ version['containers']['caddy'] }}"
|
||||
state: "build"
|
||||
path: "{{ node['home_path'] }}/containers/caddy/build"
|
||||
|
||||
- name: Prune caddy dangling images
|
||||
containers.podman.podman_prune:
|
||||
image: true
|
||||
|
||||
- name: Deploy caddyfile
|
||||
ansible.builtin.template:
|
||||
src: "{{ hostvars['console']['node']['config_path'] }}/services/containers/common/caddy/etc/{{ node['name'] }}/Caddyfile.j2"
|
||||
dest: "{{ node['home_path'] }}/containers/caddy/etc/Caddyfile"
|
||||
owner: "{{ ansible_user }}"
|
||||
group: "svadmins"
|
||||
mode: "0600"
|
||||
notify: "notification_restart_caddy"
|
||||
|
||||
- name: Deploy container file
|
||||
ansible.builtin.template:
|
||||
src: "{{ hostvars['console']['node']['config_path'] }}/services/containers/common/caddy/caddy.container.j2"
|
||||
dest: "{{ node['home_path'] }}/.config/containers/systemd/caddy.container"
|
||||
owner: "{{ ansible_user }}"
|
||||
group: "svadmins"
|
||||
mode: "0644"
|
||||
notify: "notification_restart_caddy"
|
||||
|
||||
- name: Enable caddy
|
||||
ansible.builtin.systemd:
|
||||
name: "caddy.service"
|
||||
state: "started"
|
||||
enabled: true
|
||||
daemon_reload: true
|
||||
scope: "user"
|
||||
304
ansible/roles/common/tasks/services/set_crowdsec.yaml
Normal file
304
ansible/roles/common/tasks/services/set_crowdsec.yaml
Normal file
@@ -0,0 +1,304 @@
|
||||
---
|
||||
- name: Check crowdsec installed
|
||||
ansible.builtin.shell: |
|
||||
command -v crowdsec
|
||||
changed_when: false
|
||||
failed_when: false
|
||||
register: "is_crowdsec_installed"
|
||||
ignore_errors: true
|
||||
|
||||
- name: Check crowdsec bouncer installed
|
||||
ansible.builtin.shell: |
|
||||
command -v crowdsec-firewall-bouncer
|
||||
when: node['name'] == "fw"
|
||||
changed_when: false
|
||||
failed_when: false
|
||||
register: "is_crowdsec_bouncer_installed"
|
||||
ignore_errors: true
|
||||
|
||||
- name: Install crowdsec
|
||||
ansible.builtin.apt:
|
||||
name: "crowdsec"
|
||||
state: "present"
|
||||
become: true
|
||||
when: is_crowdsec_installed.rc != 0
|
||||
|
||||
- name: Install crowdsec bouncers
|
||||
ansible.builtin.apt:
|
||||
name: "crowdsec-firewall-bouncer"
|
||||
state: "present"
|
||||
become: true
|
||||
when:
|
||||
- node['name'] == "fw"
|
||||
- is_crowdsec_bouncer_installed.rc != 0
|
||||
|
||||
- name: Set acquis.d list for bouncer
|
||||
ansible.builtin.set_fact:
|
||||
acquisd_list:
|
||||
fw:
|
||||
collection: "crowdsecurity/suricata"
|
||||
config: "suricata.yaml"
|
||||
auth:
|
||||
collection: "crowdsecurity/caddy"
|
||||
config: "caddy.yaml"
|
||||
|
||||
- name: Deploy crowdsec-update service files
|
||||
ansible.builtin.copy:
|
||||
src: "{{ hostvars['console']['node']['config_path'] }}/services/systemd/common/crowdsec/{{ item }}"
|
||||
dest: "/etc/systemd/system/{{ item }}"
|
||||
owner: "root"
|
||||
group: "root"
|
||||
mode: "0644"
|
||||
validate: "/usr/bin/systemd-analyze verify %s"
|
||||
loop:
|
||||
- "crowdsec-update.service"
|
||||
- "crowdsec-update.timer"
|
||||
become: true
|
||||
|
||||
- name: Deploy crowdsec config.yaml
|
||||
ansible.builtin.template:
|
||||
src: "{{ hostvars['console']['node']['config_path'] }}/services/systemd/common/crowdsec/etc/config.yaml.j2"
|
||||
dest: "/etc/crowdsec/config.yaml"
|
||||
owner: "root"
|
||||
group: "root"
|
||||
mode: "0644"
|
||||
become: true
|
||||
notify: "notification_restart_crowdsec"
|
||||
no_log: true
|
||||
|
||||
- name: Deploy crowdsec local_api_credentials.yaml
|
||||
ansible.builtin.template:
|
||||
src: "{{ hostvars['console']['node']['config_path'] }}/services/systemd/common/crowdsec/etc/local_api_credentials.yaml.j2"
|
||||
dest: "/etc/crowdsec/local_api_credentials.yaml"
|
||||
owner: "root"
|
||||
group: "root"
|
||||
mode: "0600"
|
||||
become: true
|
||||
notify: "notification_restart_crowdsec"
|
||||
no_log: true
|
||||
|
||||
- name: Set Crowdsec LAPI configuration
|
||||
when: node['name'] == "fw"
|
||||
block:
|
||||
- name: Create crowdsec ssl directory
|
||||
ansible.builtin.file:
|
||||
path: "/etc/crowdsec/ssl"
|
||||
state: "directory"
|
||||
owner: "root"
|
||||
group: "root"
|
||||
mode: "0700"
|
||||
become: true
|
||||
|
||||
- name: Deploy crowdsec lapi ssl certificate
|
||||
ansible.builtin.copy:
|
||||
content: |
|
||||
{{ hostvars['console']['crowdsec']['crt'] | trim }}
|
||||
{{ hostvars['console']['ca']['intermediate']['crt'] }}
|
||||
dest: "/etc/crowdsec/ssl/crowdsec.crt"
|
||||
owner: "root"
|
||||
group: "root"
|
||||
mode: "0644"
|
||||
become: true
|
||||
notify: "notification_restart_crowdsec"
|
||||
no_log: true
|
||||
|
||||
- name: Deploy crowdsec lapi ssl key
|
||||
ansible.builtin.copy:
|
||||
content: |
|
||||
{{ hostvars['console']['crowdsec']['key'] }}
|
||||
dest: "/etc/crowdsec/ssl/crowdsec.key"
|
||||
owner: "root"
|
||||
group: "root"
|
||||
mode: "0400"
|
||||
become: true
|
||||
notify: "notification_restart_crowdsec"
|
||||
no_log: true
|
||||
|
||||
- name: Get existing machines list
|
||||
ansible.builtin.command:
|
||||
cmd: "cscli machines list -o json"
|
||||
become: true
|
||||
changed_when: false
|
||||
register: "existing_crowdsec_machines_list"
|
||||
|
||||
- name: Set existing machines' name
|
||||
ansible.builtin.set_fact:
|
||||
existing_machines_name: "{{ existing_crowdsec_machines_list.stdout | from_json | map(attribute='machineId') | list }}"
|
||||
|
||||
- name: Set goal machines' name
|
||||
ansible.builtin.set_fact:
|
||||
machines_name: ["fw", "vmm", "infra", "auth", "app"]
|
||||
no_log: true
|
||||
|
||||
- name: Prune unknown (random) machines
|
||||
ansible.builtin.command:
|
||||
cmd: "cscli machines delete {{ item }}"
|
||||
loop: "{{ existing_machines_name | difference(machines_name) }}"
|
||||
become: true
|
||||
changed_when: true
|
||||
|
||||
- name: Register crowdsec machines to LAPI server
|
||||
ansible.builtin.command:
|
||||
cmd: "cscli machines add {{ item }} --password {{ hostvars['console']['crowdsec']['machine'][item] }} --force -f /dev/null"
|
||||
loop: "{{ machines_name }}"
|
||||
become: true
|
||||
changed_when: false
|
||||
no_log: true
|
||||
|
||||
- name: Get existing bouncers list
|
||||
ansible.builtin.command:
|
||||
cmd: "cscli bouncers list -o json"
|
||||
become: true
|
||||
register: "existing_crowdsec_bouncers_list"
|
||||
changed_when: false
|
||||
|
||||
- name: Set existing bouncers' name
|
||||
ansible.builtin.set_fact:
|
||||
existing_bouncers_name: "{{ existing_crowdsec_bouncers_list.stdout | from_json | map(attribute='name') | list }}"
|
||||
|
||||
- name: Flush bouncers
|
||||
ansible.builtin.command:
|
||||
cmd: "cscli bouncers delete {{ item }}"
|
||||
loop: "{{ existing_bouncers_name }}"
|
||||
become: true
|
||||
changed_when: true
|
||||
|
||||
- name: Set bouncers' name
|
||||
ansible.builtin.set_fact:
|
||||
bouncers_name: ["fw", "caddy"]
|
||||
|
||||
- name: Register Firewall Bouncer to LAPI
|
||||
ansible.builtin.command:
|
||||
cmd: "cscli bouncers add {{ item }}-bouncer -k {{ hostvars['console']['crowdsec']['bouncer'][item] }}"
|
||||
loop: "{{ bouncers_name }}"
|
||||
become: true
|
||||
changed_when: true
|
||||
notify: "notification_restart_crowdsec_bouncer"
|
||||
no_log: true
|
||||
|
||||
- name: Set crowdsec bouncer
|
||||
when: node['name'] in acquisd_list
|
||||
block:
|
||||
- name: Install crowdsec collection
|
||||
ansible.builtin.command:
|
||||
cmd: "cscli collections install {{ acquisd_list[node['name']]['collection'] }}"
|
||||
become: true
|
||||
changed_when: "'overwrite' not in is_collection_installed.stderr"
|
||||
failed_when:
|
||||
- is_collection_installed.rc != 0
|
||||
- "'already installed' not in is_collection_installed.stderr"
|
||||
register: "is_collection_installed"
|
||||
|
||||
- name: Create crowdsec acquis.d directory
|
||||
ansible.builtin.file:
|
||||
path: "/etc/crowdsec/acquis.d"
|
||||
state: "directory"
|
||||
owner: "root"
|
||||
group: "root"
|
||||
mode: "0755"
|
||||
become: true
|
||||
|
||||
- name: Create whitelists.yaml
|
||||
ansible.builtin.template:
|
||||
src: "{{ hostvars['console']['node']['config_path'] }}/services/systemd/common/crowdsec/bouncers/whitelists.yaml.j2"
|
||||
dest: "/etc/crowdsec/parsers/s02-enrich/whitelists.yaml"
|
||||
owner: "root"
|
||||
group: "root"
|
||||
mode: "0644"
|
||||
become: true
|
||||
notify:
|
||||
- "notification_restart_crowdsec"
|
||||
- "notification_restart_crowdsec_bouncer"
|
||||
no_log: true
|
||||
|
||||
- name: Deploy acquis.d file
|
||||
ansible.builtin.copy:
|
||||
src: "{{ hostvars['console']['node']['config_path'] }}/services/systemd/common/crowdsec/acquis.d/{{ acquisd_list[node['name']]['config'] }}"
|
||||
dest: "/etc/crowdsec/acquis.d/{{ acquisd_list[node['name']]['config'] }}"
|
||||
owner: "root"
|
||||
group: "root"
|
||||
mode: "0644"
|
||||
become: true
|
||||
notify: "notification_restart_crowdsec"
|
||||
|
||||
- name: Set Crowdsec-Firewall-Bouncer
|
||||
when: node['name'] == "fw"
|
||||
block:
|
||||
- name: Deploy crowdsec-firewall-bouncer.yaml
|
||||
ansible.builtin.template:
|
||||
src: "{{ hostvars['console']['node']['config_path'] }}/services/systemd/common/crowdsec/bouncers/crowdsec-firewall-bouncer.yaml.j2"
|
||||
dest: "/etc/crowdsec/bouncers/crowdsec-firewall-bouncer.yaml"
|
||||
owner: "root"
|
||||
group: "root"
|
||||
mode: "0600"
|
||||
become: true
|
||||
notify: "notification_restart_crowdsec_bouncer"
|
||||
|
||||
- name: Delete crowdsec-firewall-bouncer.yaml subfiles (.id, .local)
|
||||
ansible.builtin.file:
|
||||
path: "/etc/crowdsec/bouncers/crowdsec-firewall-bouncer.yaml.{{ item }}"
|
||||
state: "absent"
|
||||
loop:
|
||||
- "local"
|
||||
- "id"
|
||||
become: true
|
||||
notify: "notification_restart_crowdsec_bouncer"
|
||||
|
||||
- name: Create crowdsec-firewall-bouncer.service.d
|
||||
ansible.builtin.file:
|
||||
path: "/etc/systemd/system/crowdsec-firewall-bouncer.service.d"
|
||||
state: "directory"
|
||||
owner: "root"
|
||||
group: "root"
|
||||
mode: "0755"
|
||||
become: true
|
||||
|
||||
- name: Set crowdsec-firewall-bouncer.service.d/override.conf
|
||||
ansible.builtin.copy:
|
||||
dest: "/etc/systemd/system/crowdsec-firewall-bouncer.service.d/override.conf"
|
||||
content: |
|
||||
[Service]
|
||||
Type=simple
|
||||
TimeoutStartSec=600
|
||||
Restart=always
|
||||
RestartSec=60
|
||||
owner: "root"
|
||||
group: "root"
|
||||
mode: "0644"
|
||||
become: true
|
||||
notify: "notification_restart_crowdsec_bouncer"
|
||||
|
||||
|
||||
- name: Create crowdsec.service.d
|
||||
ansible.builtin.file:
|
||||
path: "/etc/systemd/system/crowdsec.service.d"
|
||||
state: "directory"
|
||||
owner: "root"
|
||||
group: "root"
|
||||
mode: "0755"
|
||||
become: true
|
||||
|
||||
- name: Set crowdsec.service.d/override.conf
|
||||
ansible.builtin.copy:
|
||||
dest: "/etc/systemd/system/crowdsec.service.d/override.conf"
|
||||
content: |
|
||||
[Service]
|
||||
Restart=always
|
||||
RestartSec=60
|
||||
owner: "root"
|
||||
group: "root"
|
||||
mode: "0644"
|
||||
become: true
|
||||
notify: "notification_restart_crowdsec"
|
||||
|
||||
- name: Enable auto crowdsec rules update
|
||||
ansible.builtin.systemd:
|
||||
name: "crowdsec-update.timer"
|
||||
state: "started"
|
||||
enabled: true
|
||||
daemon_reload: true
|
||||
become: true
|
||||
|
||||
# cscli bouncers list
|
||||
# cscli machines list
|
||||
# cscli metrics
|
||||
137
ansible/roles/common/tasks/services/set_kopia.yaml
Normal file
137
ansible/roles/common/tasks/services/set_kopia.yaml
Normal file
@@ -0,0 +1,137 @@
|
||||
---
|
||||
- name: Gather system facts (hardware)
|
||||
ansible.builtin.setup:
|
||||
gather_subset:
|
||||
- hardware
|
||||
become: true
|
||||
|
||||
- name: Check kopia installation
|
||||
ansible.builtin.shell: |
|
||||
command -v kopia
|
||||
changed_when: false
|
||||
failed_when: false
|
||||
register: "is_kopia_installed"
|
||||
ignore_errors: true
|
||||
|
||||
- name: Set console kopia
|
||||
when: node['name'] == 'console'
|
||||
block:
|
||||
- name: Apply cli tools (x86_64)
|
||||
ansible.builtin.apt:
|
||||
deb: "{{ node['data_path'] }}/bin/kopia-{{ version['packages']['kopia'] }}-amd64.deb"
|
||||
state: "present"
|
||||
become: true
|
||||
when:
|
||||
- ansible_facts['architecture'] == "x86_64"
|
||||
- is_kopia_installed.rc != 0
|
||||
- name: Apply cli tools (aarch64)
|
||||
ansible.builtin.apt:
|
||||
deb: "{{ node['data_path'] }}/bin/kopia-{{ version['packages']['kopia'] }}-arm64.deb"
|
||||
state: "present"
|
||||
become: true
|
||||
when:
|
||||
- ansible_facts['architecture'] == "aarch64"
|
||||
- is_kopia_installed.rc != 0
|
||||
- name: Connect kopia server
|
||||
environment:
|
||||
KOPIA_PASSWORD: "{{ hostvars['console']['kopia']['user']['console'] }}"
|
||||
ansible.builtin.shell: |
|
||||
/usr/bin/kopia repository connect server \
|
||||
--url=https://{{ infra_uri['kopia']['domain'] }}:{{ infra_uri['kopia']['ports']['https'] }} \
|
||||
--override-username=console \
|
||||
--override-hostname=console.ilnmors.internal
|
||||
changed_when: false
|
||||
failed_when: is_kopia_connected.rc != 0
|
||||
register: "is_kopia_connected"
|
||||
no_log: true
|
||||
|
||||
- name: Set infra/app kopia
|
||||
when: node['name'] in ['infra', 'app']
|
||||
block:
|
||||
- name: Set kopia uid
|
||||
ansible.builtin.set_fact:
|
||||
kopia_uid: 951
|
||||
- name: Deploy kopia deb file (x86_64)
|
||||
ansible.builtin.copy:
|
||||
src: "{{ hostvars['console']['node']['data_path'] }}/bin/kopia-{{ version['packages']['kopia'] }}-amd64.deb"
|
||||
dest: "/var/cache/apt/archives/kopia-{{ version['packages']['kopia'] }}.deb"
|
||||
owner: "root"
|
||||
group: "root"
|
||||
mode: "0644"
|
||||
become: true
|
||||
when: ansible_facts['architecture'] == "x86_64"
|
||||
- name: Deploy kopia deb file (aarch64)
|
||||
ansible.builtin.copy:
|
||||
src: "{{ hostvars['console']['node']['data_path'] }}/bin/kopia-{{ version['packages']['kopia'] }}-arm64.deb"
|
||||
dest: "/var/cache/apt/archives/kopia-{{ version['packages']['kopia'] }}.deb"
|
||||
owner: "root"
|
||||
group: "root"
|
||||
mode: "0644"
|
||||
become: true
|
||||
when: ansible_facts['architecture'] == "aarch64"
|
||||
- name: Create kopia group
|
||||
ansible.builtin.group:
|
||||
name: "kopia"
|
||||
gid: "{{ kopia_uid }}"
|
||||
state: "present"
|
||||
become: true
|
||||
- name: Create kopia user
|
||||
ansible.builtin.user:
|
||||
name: "kopia"
|
||||
uid: "{{ kopia_uid }}"
|
||||
group: "kopia"
|
||||
shell: "/usr/sbin/nologin"
|
||||
password_lock: true
|
||||
comment: "Kopia backup User"
|
||||
state: "present"
|
||||
become: true
|
||||
- name: Create kopia directory
|
||||
ansible.builtin.file:
|
||||
path: "{{ item.name }}"
|
||||
state: "directory"
|
||||
owner: "kopia"
|
||||
group: "root"
|
||||
mode: "{{ item.mode }}"
|
||||
loop:
|
||||
- name: "/etc/kopia"
|
||||
mode: "0700"
|
||||
- name: "/etc/secrets/951"
|
||||
mode: "0500"
|
||||
- name: "/var/cache/kopia"
|
||||
mode: "0700"
|
||||
become: true
|
||||
no_log: true
|
||||
- name: Install kopia
|
||||
ansible.builtin.apt:
|
||||
deb: "/var/cache/apt/archives/kopia-{{ version['packages']['kopia'] }}.deb"
|
||||
state: "present"
|
||||
become: true
|
||||
when: is_kopia_installed.rc != 0
|
||||
- name: Deploy kopia env
|
||||
ansible.builtin.template:
|
||||
src: "{{ hostvars['console']['node']['config_path'] }}/services/systemd/common/kopia/kopia.env.j2"
|
||||
dest: "/etc/secrets/{{ kopia_uid }}/kopia.env"
|
||||
owner: "{{ kopia_uid }}"
|
||||
group: "root"
|
||||
mode: "0400"
|
||||
become: true
|
||||
no_log: true
|
||||
- name: Deploy kopia service files
|
||||
ansible.builtin.template:
|
||||
src: "{{ hostvars['console']['node']['config_path'] }}/services/systemd/common/kopia/{{ item }}.j2"
|
||||
dest: "/etc/systemd/system/{{ item }}"
|
||||
owner: "root"
|
||||
group: "root"
|
||||
mode: "0644"
|
||||
validate: "/usr/bin/systemd-analyze verify %s"
|
||||
loop:
|
||||
- "kopia-backup.service"
|
||||
- "kopia-backup.timer"
|
||||
become: true
|
||||
- name: Enable auto kopia rules update
|
||||
ansible.builtin.systemd:
|
||||
name: "kopia-backup.timer"
|
||||
state: "started"
|
||||
enabled: true
|
||||
daemon_reload: true
|
||||
become: true
|
||||
46
ansible/roles/common/tasks/services/set_podman.yaml
Normal file
46
ansible/roles/common/tasks/services/set_podman.yaml
Normal file
@@ -0,0 +1,46 @@
|
||||
---
|
||||
- name: Check podman installation
|
||||
ansible.builtin.shell: |
|
||||
command -v podman
|
||||
changed_when: false
|
||||
failed_when: false
|
||||
register: "is_podman_installed"
|
||||
ignore_errors: true
|
||||
|
||||
- name: Create container directory
|
||||
ansible.builtin.file:
|
||||
path: "{{ node['home_path'] }}/containers"
|
||||
owner: "{{ ansible_user }}"
|
||||
group: "svadmins"
|
||||
state: "directory"
|
||||
mode: "0700"
|
||||
|
||||
- name: Create contaienr data directory for app
|
||||
ansible.builtin.file:
|
||||
path: "{{ node['home_path'] }}/data/containers"
|
||||
owner: "{{ ansible_user }}"
|
||||
group: "svadmins"
|
||||
state: "directory"
|
||||
mode: "0770"
|
||||
when: node['name'] == "app"
|
||||
|
||||
- name: Install podman and reset ssh connection for initiating
|
||||
when: is_podman_installed.rc != 0
|
||||
become: true
|
||||
block:
|
||||
- name: Set subid scope (Overwrite)
|
||||
ansible.builtin.copy:
|
||||
content: |
|
||||
{{ ansible_user }}:100000:65536
|
||||
dest: "/etc/sub{{ item }}"
|
||||
owner: "root"
|
||||
group: "root"
|
||||
mode: "0644"
|
||||
loop:
|
||||
- "uid"
|
||||
- "gid"
|
||||
- name: Install podman
|
||||
ansible.builtin.apt:
|
||||
name:
|
||||
- "podman"
|
||||
state: "present"
|
||||
8
ansible/roles/console/handlers/main.yaml
Normal file
8
ansible/roles/console/handlers/main.yaml
Normal file
@@ -0,0 +1,8 @@
|
||||
---
|
||||
- name: Register font
|
||||
ansible.builtin.shell: |
|
||||
fc-cache -f -v
|
||||
become: true
|
||||
changed_when: false
|
||||
listen: "notification_update_font"
|
||||
ignore_errors: true # noqa: ignore-errors
|
||||
29
ansible/roles/console/tasks/node/load_secret_vars.yaml
Normal file
29
ansible/roles/console/tasks/node/load_secret_vars.yaml
Normal file
@@ -0,0 +1,29 @@
|
||||
---
|
||||
- name: Check sops installation (Prerequisite)
|
||||
ansible.builtin.shell: |
|
||||
command -v sops
|
||||
changed_when: false
|
||||
failed_when: false
|
||||
register: "is_sops_installed"
|
||||
ignore_errors: true
|
||||
|
||||
- name: Failure when sops is missing
|
||||
ansible.builtin.fail:
|
||||
msg: "sops is not installed. Please install sops manually as described in README.md before running this playbook"
|
||||
when: is_sops_installed.rc != 0
|
||||
|
||||
- name: Decrypt secret values in console
|
||||
environment:
|
||||
SOPS_AGE_KEY: "{{ hostvars['console']['age_key'] }}"
|
||||
ansible.builtin.command: |
|
||||
sops -d --output-type yaml {{ hostvars['console']['node']['config_path'] }}/secrets/secrets.yaml
|
||||
changed_when: false
|
||||
register: "decrypted_secrets"
|
||||
run_once: true
|
||||
no_log: true
|
||||
|
||||
- name: Load decrypted secret vaules in console
|
||||
ansible.builtin.set_fact:
|
||||
"{{ item.key }}": "{{ item.value }}"
|
||||
loop: "{{ decrypted_secrets.stdout | from_yaml | dict2items }}"
|
||||
no_log: true
|
||||
109
ansible/roles/console/tasks/node/set_ssh_client.yaml
Normal file
109
ansible/roles/console/tasks/node/set_ssh_client.yaml
Normal file
@@ -0,0 +1,109 @@
|
||||
---
|
||||
- name: Create ssh id_console
|
||||
ansible.builtin.copy:
|
||||
content: "{{ hostvars['console']['ssh']['console']['key'] }}"
|
||||
dest: "/etc/secrets/{{ node['uid'] }}/id_console"
|
||||
owner: "{{ ansible_user }}"
|
||||
group: "root"
|
||||
mode: "0400"
|
||||
become: true
|
||||
no_log: true
|
||||
|
||||
- name: Create ssh id_console.pub
|
||||
ansible.builtin.copy:
|
||||
content: "{{ hostvars['console']['ssh']['console']['pub'] }}"
|
||||
dest: "/etc/secrets/{{ node['uid'] }}/id_console.pub"
|
||||
owner: "{{ ansible_user }}"
|
||||
group: "root"
|
||||
mode: "0400"
|
||||
become: true
|
||||
no_log: true
|
||||
|
||||
- name: Create ssh_known_hosts
|
||||
become: true
|
||||
ansible.builtin.copy:
|
||||
content: |
|
||||
@cert-authority *.ilnmors.internal {{ hostvars['console']['ssh']['ca']['pub'] }}
|
||||
dest: "/etc/ssh/ssh_known_hosts"
|
||||
owner: "root"
|
||||
group: "root"
|
||||
mode: "0644"
|
||||
no_log: true
|
||||
|
||||
- name: Check id_console-cert.pub
|
||||
ansible.builtin.stat:
|
||||
path: "/etc/secrets/{{ node['uid'] }}/id_console-cert.pub"
|
||||
register: "is_signed_console_key"
|
||||
|
||||
- name: Get current id_console-cert.pub allow users
|
||||
ansible.builtin.shell: |
|
||||
set -o pipefail
|
||||
ssh-keygen -L -f /etc/secrets/{{ node['uid'] }}/id_console-cert.pub | \
|
||||
sed -n '/Principals:/,/Critical Options:/p' | \
|
||||
sed '1d;$d' | \
|
||||
sed 's/^[[:space:]]*//'
|
||||
when: is_signed_console_key.stat.exists
|
||||
changed_when: false
|
||||
register: "current_allow_users"
|
||||
no_log: true
|
||||
|
||||
- name: Set key informations
|
||||
ansible.builtin.set_fact:
|
||||
current_user_list: "{{ current_allow_users.stdout_lines }}"
|
||||
when: is_signed_console_key.stat.exists
|
||||
no_log: true
|
||||
|
||||
- name: Compare key values between current information and defined information
|
||||
ansible.builtin.set_fact:
|
||||
is_certificate_info_different: true
|
||||
when: (current_user_list | default([])) != (node['ssh_users'].split(',') | map('trim') | list)
|
||||
|
||||
- name: Get SSH CA and signing
|
||||
when: not is_signed_console_key.stat.exists or (is_certificate_info_different | default(false))
|
||||
block:
|
||||
- name: Get SSH CA
|
||||
ansible.builtin.copy:
|
||||
content: |
|
||||
{{ hostvars['console']['ssh']['ca']['key'] }}
|
||||
dest: "/run/user/{{ node['uid'] }}/local_ssh_ca_private_key"
|
||||
owner: "console"
|
||||
group: "svadmins"
|
||||
mode: "0400"
|
||||
no_log: true
|
||||
- name: Sign on ssh console key (pub file)
|
||||
ansible.builtin.command: |
|
||||
ssh-keygen -s /run/user/{{ node['uid'] }}/local_ssh_ca_private_key \
|
||||
-I "{{ node['name'] }}" \
|
||||
-n "{{ node['ssh_users'] }}" \
|
||||
/etc/secrets/{{ node['uid'] }}/id_console.pub
|
||||
become: true
|
||||
changed_when: not is_signed_console_key.stat.exists or (is_certificate_info_different | default(false))
|
||||
no_log: true
|
||||
always:
|
||||
- name: Clean temporary files
|
||||
ansible.builtin.file:
|
||||
path: "/run/user/{{ node['uid'] }}/local_ssh_ca_private_key"
|
||||
state: "absent"
|
||||
no_log: true
|
||||
|
||||
- name: Create .ssh directory
|
||||
ansible.builtin.file:
|
||||
path: "{{ node['home_path'] }}/.ssh"
|
||||
state: "directory"
|
||||
owner: "{{ ansible_user }}"
|
||||
group: "svadmins"
|
||||
mode: "0700"
|
||||
|
||||
- name: Create ssh config file
|
||||
ansible.builtin.copy:
|
||||
content: |
|
||||
{% for host in groups['all'] if host != 'console' %}
|
||||
Host {{ host }}
|
||||
HostName {{ hostvars[host]['ansible_host'] }}
|
||||
User {{ hostvars[host]['ansible_user'] }}
|
||||
IdentityFile /etc/secrets/{{ node['uid'] }}/id_console
|
||||
{% endfor %}
|
||||
dest: "{{ node['home_path'] }}/.ssh/config"
|
||||
owner: "{{ ansible_user }}"
|
||||
group: "svadmins"
|
||||
mode: "0600"
|
||||
31
ansible/roles/console/tasks/services/set_chromium.yaml
Normal file
31
ansible/roles/console/tasks/services/set_chromium.yaml
Normal file
@@ -0,0 +1,31 @@
|
||||
---
|
||||
- name: Check chromium installation
|
||||
ansible.builtin.shell: |
|
||||
command -v chromium
|
||||
changed_when: false
|
||||
failed_when: false
|
||||
register: "is_chromium_installed"
|
||||
ignore_errors: true
|
||||
|
||||
- name: Check korean font installation
|
||||
ansible.builtin.shell: |
|
||||
fc-list | grep -i "nanum"
|
||||
changed_when: false
|
||||
failed_when: false
|
||||
register: "is_font_installed"
|
||||
ignore_errors: true
|
||||
|
||||
- name: Install chromium
|
||||
ansible.builtin.apt:
|
||||
name: "chromium"
|
||||
state: "present"
|
||||
become: true
|
||||
when: is_chromium_installed.rc != 0
|
||||
|
||||
- name: Install font
|
||||
ansible.builtin.apt:
|
||||
name: "fonts-nanum"
|
||||
state: "present"
|
||||
become: true
|
||||
when: is_font_installed.rc != 0
|
||||
notify: "notification_update_font"
|
||||
108
ansible/roles/console/tasks/services/set_cli_tools.yaml
Normal file
108
ansible/roles/console/tasks/services/set_cli_tools.yaml
Normal file
@@ -0,0 +1,108 @@
|
||||
---
|
||||
- name: Gather system facts (hardware)
|
||||
ansible.builtin.setup:
|
||||
gather_subset:
|
||||
- hardware
|
||||
become: true
|
||||
|
||||
- name: Check ansible installation
|
||||
ansible.builtin.shell: |
|
||||
command -v ansible
|
||||
changed_when: false
|
||||
failed_when: false
|
||||
register: "is_ansible_installed"
|
||||
ignore_errors: true
|
||||
|
||||
- name: Upgrade ansible module
|
||||
community.general.ansible_galaxy_install:
|
||||
type: "collection"
|
||||
name: "{{ item }}"
|
||||
state: "latest"
|
||||
loop:
|
||||
- "ansible.posix"
|
||||
- "community.libvirt"
|
||||
- "community.general"
|
||||
- "containers.podman"
|
||||
when: is_ansible_installed.rc == 0
|
||||
|
||||
- name: Download sops
|
||||
ansible.builtin.get_url:
|
||||
url: "https://github.com/getsops/sops/releases/download/v{{ version['packages']['sops'] }}/\
|
||||
sops_{{ version['packages']['sops'] }}_{{ item }}.deb"
|
||||
dest: "{{ node['data_path'] }}/bin/sops-{{ version['packages']['sops'] }}-{{ item }}.deb"
|
||||
owner: "{{ ansible_user }}"
|
||||
group: "svadmins"
|
||||
mode: "0600"
|
||||
loop:
|
||||
- "amd64"
|
||||
- "arm64"
|
||||
|
||||
- name: Download step-cli
|
||||
ansible.builtin.get_url:
|
||||
url: "https://dl.smallstep.com/gh-release/cli/gh-release-header/v{{ version['packages']['step'] }}/\
|
||||
step-cli_{{ version['packages']['step'] }}-1_{{ item }}.deb"
|
||||
dest: "{{ node['data_path'] }}/bin/step-{{ version['packages']['step'] }}-{{ item }}.deb"
|
||||
owner: "{{ ansible_user }}"
|
||||
group: "svadmins"
|
||||
mode: "0600"
|
||||
loop:
|
||||
- "amd64"
|
||||
- "arm64"
|
||||
|
||||
- name: Download kopia
|
||||
ansible.builtin.get_url:
|
||||
url: "https://github.com/kopia/kopia/releases/download/v{{ version['packages']['kopia'] }}/\
|
||||
kopia_{{ version['packages']['kopia'] }}_linux_{{ item }}.deb"
|
||||
dest: "{{ node['data_path'] }}/bin/kopia-{{ version['packages']['kopia'] }}-{{ item }}.deb"
|
||||
owner: "{{ ansible_user }}"
|
||||
group: "svadmins"
|
||||
mode: "0600"
|
||||
loop:
|
||||
- "amd64"
|
||||
- "arm64"
|
||||
|
||||
- name: Download blocky
|
||||
ansible.builtin.get_url:
|
||||
url: "https://github.com/0xERR0R/blocky/releases/download/v{{ version['packages']['blocky'] }}/\
|
||||
blocky_v{{ version['packages']['blocky'] }}_Linux_{{ item }}.tar.gz"
|
||||
dest: "{{ node['data_path'] }}/bin/blocky-{{ version['packages']['blocky'] }}-{{ item }}.tar.gz"
|
||||
owner: "{{ ansible_user }}"
|
||||
group: "svadmins"
|
||||
mode: "0600" # noqa: line-length
|
||||
loop:
|
||||
- "x86_64"
|
||||
- "arm64"
|
||||
|
||||
- name: Download alloy
|
||||
ansible.builtin.get_url:
|
||||
url: "https://github.com/grafana/alloy/releases/download/v{{ version['packages']['alloy'] }}/\
|
||||
alloy-{{ version['packages']['alloy'] }}-1.{{ item }}.deb"
|
||||
dest: "{{ node['data_path'] }}/bin/alloy-{{ version['packages']['alloy'] }}-{{ item }}.deb"
|
||||
owner: "{{ ansible_user }}"
|
||||
group: "svadmins"
|
||||
mode: "0600"
|
||||
loop:
|
||||
- "amd64"
|
||||
- "arm64"
|
||||
|
||||
- name: Apply cli tools (x86_64)
|
||||
ansible.builtin.apt:
|
||||
deb: "{{ node['data_path'] }}/bin/{{ item }}"
|
||||
state: "present"
|
||||
loop:
|
||||
- "sops-{{ version['packages']['sops'] }}-amd64.deb"
|
||||
- "step-{{ version['packages']['step'] }}-amd64.deb"
|
||||
- "kopia-{{ version['packages']['kopia'] }}-amd64.deb"
|
||||
become: true
|
||||
when: ansible_facts['architecture'] == "x86_64"
|
||||
|
||||
- name: Apply cli tools (aarch64)
|
||||
ansible.builtin.apt:
|
||||
deb: "{{ node['data_path'] }}/bin/{{ item }}"
|
||||
state: "present"
|
||||
loop:
|
||||
- "sops-{{ version['packages']['sops'] }}-arm64.deb"
|
||||
- "step-{{ version['packages']['step'] }}-arm64.deb"
|
||||
- "kopia-{{ version['packages']['kopia'] }}-arm64.deb"
|
||||
become: true
|
||||
when: ansible_facts['architecture'] == "aarch64"
|
||||
63
ansible/roles/fw/handlers/main.yaml
Normal file
63
ansible/roles/fw/handlers/main.yaml
Normal file
@@ -0,0 +1,63 @@
|
||||
---
|
||||
- name: Restart chrony
|
||||
ansible.builtin.systemd:
|
||||
name: "chrony.service"
|
||||
state: "restarted"
|
||||
enabled: true
|
||||
daemon_reload: true
|
||||
become: true
|
||||
changed_when: false
|
||||
listen: "notification_restart_chrony"
|
||||
ignore_errors: true # noqa: ignore-errors
|
||||
|
||||
- name: Update suricata rules
|
||||
ansible.builtin.command:
|
||||
suricata-update --disable-conf /etc/suricata/disable.conf --enable-conf /etc/suricata/enable.conf --local /etc/suricata/rules/local.rules
|
||||
become: true
|
||||
changed_when: false
|
||||
listen: "notification_update_suricata_rules"
|
||||
ignore_errors: true # noqa: ignore-errors
|
||||
|
||||
- name: Restart suricata
|
||||
ansible.builtin.systemd:
|
||||
name: "suricata.service"
|
||||
state: "restarted"
|
||||
enabled: true
|
||||
daemon_reload: true
|
||||
become: true
|
||||
changed_when: false
|
||||
listen: "notification_restart_suricata"
|
||||
ignore_errors: true # noqa: ignore-errors
|
||||
|
||||
- name: Restart bind9
|
||||
ansible.builtin.systemd:
|
||||
name: "named.service"
|
||||
state: "restarted"
|
||||
enabled: true
|
||||
daemon_reload: true
|
||||
become: true
|
||||
changed_when: false
|
||||
listen: "notification_restart_bind"
|
||||
ignore_errors: true # noqa: ignore-errors
|
||||
|
||||
- name: Restart blocky
|
||||
ansible.builtin.systemd:
|
||||
name: "blocky.service"
|
||||
state: "restarted"
|
||||
enabled: "true"
|
||||
daemon_reload: true
|
||||
become: true
|
||||
changed_when: false
|
||||
listen: "notification_restart_blocky"
|
||||
ignore_errors: true # noqa: ignore-errors
|
||||
|
||||
- name: Restart kea-dhcp4
|
||||
ansible.builtin.systemd:
|
||||
name: "kea-dhcp4-server.service"
|
||||
state: "restarted"
|
||||
enabled: true
|
||||
daemon_reload: true
|
||||
become: true
|
||||
changed_when: false
|
||||
listen: "notification_restart_kea4"
|
||||
ignore_errors: true # noqa: ignore-errors
|
||||
103
ansible/roles/fw/tasks/services/set_bind.yaml
Normal file
103
ansible/roles/fw/tasks/services/set_bind.yaml
Normal file
@@ -0,0 +1,103 @@
|
||||
---
|
||||
- name: Check bind9 installation
|
||||
ansible.builtin.shell: |
|
||||
command -v named
|
||||
become: true # named is located in /usr/sbin, which means root permission is needed.
|
||||
changed_when: false
|
||||
failed_when: false
|
||||
register: "is_bind_installed"
|
||||
ignore_errors: true
|
||||
|
||||
- name: Set bind9 zone files
|
||||
ansible.builtin.set_fact:
|
||||
bind_zone_files:
|
||||
- "db.ilnmors.internal"
|
||||
- "db.ilnmors.com"
|
||||
- "db.1.168.192.in-addr.arpa"
|
||||
- "db.10.168.192.in-addr.arpa"
|
||||
- "db.1.00df.ip6.arpa"
|
||||
- "db.10.00df.ip6.arpa"
|
||||
|
||||
- name: Install bind9
|
||||
ansible.builtin.apt:
|
||||
name: "bind9"
|
||||
state: "present"
|
||||
become: true
|
||||
when: is_bind_installed.rc != 0
|
||||
|
||||
- name: Deploy acem.key
|
||||
ansible.builtin.copy:
|
||||
content: "{{ hostvars['console']['bind']['acme_key'] }}"
|
||||
dest: "/etc/bind/acme.key"
|
||||
owner: "bind"
|
||||
group: "bind"
|
||||
mode: "0640"
|
||||
become: true
|
||||
notify: "notification_restart_bind"
|
||||
no_log: true
|
||||
|
||||
- name: Deploy db files
|
||||
ansible.builtin.copy:
|
||||
src: "{{ hostvars['console']['node']['config_path'] }}/services/systemd/fw/bind/lib/{{ item }}"
|
||||
dest: "/var/lib/bind/{{ item }}"
|
||||
owner: "bind"
|
||||
group: "bind"
|
||||
mode: "0640"
|
||||
loop: "{{ bind_zone_files }}"
|
||||
become: true
|
||||
notify: "notification_restart_bind"
|
||||
no_log: true
|
||||
|
||||
- name: Clean BIND journal files
|
||||
ansible.builtin.file:
|
||||
path: "/var/lib/bind/{{ item }}.jnl"
|
||||
state: absent
|
||||
loop: "{{ bind_zone_files }}"
|
||||
become: true
|
||||
notify: "notification_restart_bind"
|
||||
no_log: true
|
||||
|
||||
- name: Deploy named.conf
|
||||
ansible.builtin.template:
|
||||
src: "{{ hostvars['console']['node']['config_path'] }}/services/systemd/fw/bind/etc/named.conf.j2"
|
||||
dest: "/etc/bind/named.conf"
|
||||
owner: "root"
|
||||
group: "bind"
|
||||
mode: "0640"
|
||||
validate: "/usr/bin/named-checkconf -z %s"
|
||||
become: true
|
||||
notify: "notification_restart_bind"
|
||||
no_log: true
|
||||
|
||||
- name: Create named.service.d
|
||||
ansible.builtin.file:
|
||||
path: "/etc/systemd/system/named.service.d"
|
||||
state: "directory"
|
||||
owner: "root"
|
||||
group: "root"
|
||||
mode: "0755"
|
||||
become: true
|
||||
|
||||
- name: Set named.service.d/override.conf
|
||||
ansible.builtin.copy:
|
||||
dest: "/etc/systemd/system/named.service.d/override.conf"
|
||||
content: |
|
||||
[Service]
|
||||
Restart=always
|
||||
RestartSec=60
|
||||
owner: "root"
|
||||
group: "root"
|
||||
mode: "0644"
|
||||
become: true
|
||||
notify: "notification_restart_bind"
|
||||
|
||||
- name: Enable bind9 service
|
||||
ansible.builtin.systemd:
|
||||
name: "named.service"
|
||||
state: "started"
|
||||
enabled: true
|
||||
become: true
|
||||
|
||||
# Verify working
|
||||
# dig A fw.ilnmors.internal @fd00:10::3
|
||||
# dig AAAA fw.ilnmors.internal @fd00:10::3
|
||||
117
ansible/roles/fw/tasks/services/set_blocky.yaml
Normal file
117
ansible/roles/fw/tasks/services/set_blocky.yaml
Normal file
@@ -0,0 +1,117 @@
|
||||
---
|
||||
- name: Gather system facts (hardware)
|
||||
ansible.builtin.setup:
|
||||
gather_subset:
|
||||
- hardware
|
||||
become: true
|
||||
|
||||
- name: Create blocky group
|
||||
ansible.builtin.group:
|
||||
name: "blocky"
|
||||
gid: 953
|
||||
state: "present"
|
||||
become: true
|
||||
|
||||
- name: Create blocky user
|
||||
ansible.builtin.user:
|
||||
name: "blocky"
|
||||
uid: 953
|
||||
group: "blocky"
|
||||
shell: "/usr/sbin/nologin"
|
||||
password_lock: true
|
||||
comment: "Blocky DNS User"
|
||||
state: "present"
|
||||
become: true
|
||||
|
||||
- name: Create blocky etc directory
|
||||
ansible.builtin.file:
|
||||
path: "{{ item }}"
|
||||
owner: "blocky"
|
||||
group: "blocky"
|
||||
mode: "0750"
|
||||
state: "directory"
|
||||
loop:
|
||||
- "/etc/blocky"
|
||||
- "/etc/blocky/ssl"
|
||||
become: true
|
||||
|
||||
- name: Deploy blocky binary file (x86_64)
|
||||
ansible.builtin.unarchive:
|
||||
src: "{{ hostvars['console']['node']['data_path'] }}/bin/blocky-{{ version['packages']['blocky'] }}-x86_64.tar.gz"
|
||||
dest: "/usr/local/bin/"
|
||||
owner: "root"
|
||||
group: "root"
|
||||
mode: "0755"
|
||||
extra_opts:
|
||||
- "--strip-components=0"
|
||||
- "--wildcards"
|
||||
- "blocky"
|
||||
become: true
|
||||
when: ansible_facts['architecture'] == "x86_64"
|
||||
notify: "notification_restart_blocky"
|
||||
|
||||
- name: Deploy blocky binary file (aarch64)
|
||||
ansible.builtin.unarchive:
|
||||
src: "{{ hostvars['console']['node']['data_path'] }}/bin/blocky-{{ version['packages']['blocky'] }}-arm64.tar.gz"
|
||||
dest: "/usr/local/bin/"
|
||||
owner: "root"
|
||||
group: "root"
|
||||
mode: "0755"
|
||||
extra_opts:
|
||||
- "--strip-components=0"
|
||||
- "--wildcards"
|
||||
- "blocky"
|
||||
become: true
|
||||
when: ansible_facts['architecture'] == "aarch64"
|
||||
notify: "notification_restart_blocky"
|
||||
|
||||
- name: Deploy blocky config
|
||||
ansible.builtin.template:
|
||||
src: "{{ hostvars['console']['node']['config_path'] }}/services/systemd/fw/blocky/etc/config.yaml.j2"
|
||||
dest: "/etc/blocky/config.yaml"
|
||||
owner: "blocky"
|
||||
group: "blocky"
|
||||
mode: "0640"
|
||||
become: true
|
||||
notify: "notification_restart_blocky"
|
||||
no_log: true
|
||||
|
||||
- name: Deploy blocky certificate and key
|
||||
ansible.builtin.copy:
|
||||
content: |
|
||||
{{ item.value }}
|
||||
dest: "/etc/blocky/ssl/{{ item.name }}"
|
||||
owner: "blocky"
|
||||
group: "blocky"
|
||||
mode: "{{ item.mode }}"
|
||||
loop:
|
||||
- name: "blocky.crt"
|
||||
value: |
|
||||
{{ hostvars['console']['blocky']['crt'] | trim }}
|
||||
{{ hostvars['console']['ca']['intermediate']['crt'] }}
|
||||
mode: "0440"
|
||||
- name: "blocky.key"
|
||||
value: "{{ hostvars['console']['blocky']['key'] }}"
|
||||
mode: "0400"
|
||||
become: true
|
||||
notify: "notification_restart_blocky"
|
||||
no_log: true
|
||||
|
||||
- name: Deploy blocky service
|
||||
ansible.builtin.copy:
|
||||
src: "{{ hostvars['console']['node']['config_path'] }}/services/systemd/fw/blocky/blocky.service"
|
||||
dest: "/etc/systemd/system/blocky.service"
|
||||
owner: "root"
|
||||
group: "root"
|
||||
mode: "0644"
|
||||
validate: "/usr/bin/systemd-analyze verify %s"
|
||||
become: true
|
||||
notify: "notification_restart_blocky"
|
||||
|
||||
- name: Enable blocky service
|
||||
ansible.builtin.systemd:
|
||||
name: "blocky.service"
|
||||
state: "started"
|
||||
enabled: true
|
||||
daemon_reload: true
|
||||
become: true
|
||||
55
ansible/roles/fw/tasks/services/set_chrony.yaml
Normal file
55
ansible/roles/fw/tasks/services/set_chrony.yaml
Normal file
@@ -0,0 +1,55 @@
|
||||
---
|
||||
- name: Check chrnoy installation
|
||||
ansible.builtin.shell: |
|
||||
command -v chronyc
|
||||
changed_when: false
|
||||
failed_when: false
|
||||
register: "is_chrony_installed"
|
||||
ignore_errors: true
|
||||
|
||||
- name: Install chrony
|
||||
ansible.builtin.apt:
|
||||
name: "chrony"
|
||||
state: "present"
|
||||
become: true
|
||||
when: is_chrony_installed.rc != 0
|
||||
|
||||
- name: Deploy local acl file
|
||||
ansible.builtin.template:
|
||||
src: "{{ hostvars['console']['node']['config_path'] }}/services/systemd/fw/chrony/local-acl.conf.j2"
|
||||
dest: "/etc/chrony/conf.d/local-acl.conf"
|
||||
owner: "root"
|
||||
group: "root"
|
||||
mode: "0644"
|
||||
become: true
|
||||
notify: "notification_restart_chrony"
|
||||
|
||||
- name: Create chrony.service.d
|
||||
ansible.builtin.file:
|
||||
path: "/etc/systemd/system/chrony.service.d"
|
||||
state: "directory"
|
||||
owner: "root"
|
||||
group: "root"
|
||||
mode: "0755"
|
||||
become: true
|
||||
|
||||
- name: Set chrony.service.d/override.conf
|
||||
ansible.builtin.copy:
|
||||
dest: "/etc/systemd/system/chrony.service.d/override.conf"
|
||||
content: |
|
||||
[Service]
|
||||
Restart=always
|
||||
RestartSec=60
|
||||
owner: "root"
|
||||
group: "root"
|
||||
mode: "0644"
|
||||
become: true
|
||||
notify: "notification_restart_chrony"
|
||||
|
||||
- name: Enable chrony service
|
||||
ansible.builtin.systemd:
|
||||
name: "chrony.service"
|
||||
state: "started"
|
||||
enabled: true
|
||||
daemon_reload: true
|
||||
become: true
|
||||
41
ansible/roles/fw/tasks/services/set_ddns.yaml
Normal file
41
ansible/roles/fw/tasks/services/set_ddns.yaml
Normal file
@@ -0,0 +1,41 @@
|
||||
---
|
||||
- name: Create ddns secret env file
|
||||
ansible.builtin.copy:
|
||||
content: |
|
||||
ZONE_ID={{ hostvars['console']['ddns']['zone_id'] }}
|
||||
API_KEY={{ hostvars['console']['ddns']['api_key'] }}
|
||||
dest: "/etc/secrets/{{ node['uid'] }}/ddns.env"
|
||||
owner: "{{ ansible_user }}"
|
||||
group: "svadmins"
|
||||
mode: "0400"
|
||||
become: true
|
||||
no_log: true
|
||||
|
||||
- name: Deploy ddns script
|
||||
ansible.builtin.copy:
|
||||
src: "{{ hostvars['console']['node']['config_path'] }}/services/systemd/fw/ddns/ddns.sh"
|
||||
dest: "/usr/local/bin"
|
||||
owner: "{{ ansible_user }}"
|
||||
group: "svadmins"
|
||||
mode: "0711"
|
||||
become: true
|
||||
|
||||
- name: Deploy ddns service files
|
||||
ansible.builtin.copy:
|
||||
src: "{{ hostvars['console']['node']['config_path'] }}/services/systemd/fw/ddns/{{ item }}"
|
||||
dest: "{{ node['home_path'] }}/.config/systemd/user/{{ item }}"
|
||||
owner: "{{ ansible_user }}"
|
||||
group: "svadmins"
|
||||
mode: "0600"
|
||||
validate: "/usr/bin/systemd-analyze verify %s"
|
||||
loop:
|
||||
- "ddns.service"
|
||||
- "ddns.timer"
|
||||
|
||||
- name: Register ddns timer
|
||||
ansible.builtin.systemd:
|
||||
name: "ddns.timer"
|
||||
state: "started"
|
||||
enabled: true
|
||||
daemon_reload: true
|
||||
scope: "user"
|
||||
57
ansible/roles/fw/tasks/services/set_kea.yaml
Normal file
57
ansible/roles/fw/tasks/services/set_kea.yaml
Normal file
@@ -0,0 +1,57 @@
|
||||
---
|
||||
- name: Check Kea dhcp4 installation
|
||||
ansible.builtin.shell: |
|
||||
command -v kea-dhcp4
|
||||
become: true # kea-dhcp4 is located in /usr/sbin, which means root permission is needed.
|
||||
changed_when: false
|
||||
failed_when: false
|
||||
register: "is_kea4_installed"
|
||||
ignore_errors: true
|
||||
|
||||
- name: Install kea dhcp 4
|
||||
ansible.builtin.apt:
|
||||
name:
|
||||
- "kea-dhcp4-server"
|
||||
state: "present"
|
||||
become: true
|
||||
when: is_kea4_installed.rc != 0
|
||||
|
||||
- name: Deploy kea dhcp4 conf
|
||||
ansible.builtin.template:
|
||||
src: "{{ hostvars['console']['node']['config_path'] }}/services/systemd/fw/kea/kea-dhcp4.conf.j2"
|
||||
dest: "/etc/kea/kea-dhcp4.conf"
|
||||
owner: "_kea"
|
||||
group: "_kea"
|
||||
mode: "0600"
|
||||
become: true
|
||||
notify: "notification_restart_kea4"
|
||||
|
||||
- name: Create kea-dhcp-server.service.d
|
||||
ansible.builtin.file:
|
||||
path: "/etc/systemd/system/kea-dhcp4-server.service.d"
|
||||
state: "directory"
|
||||
owner: "root"
|
||||
group: "root"
|
||||
mode: "0755"
|
||||
become: true
|
||||
|
||||
- name: Set kea-dhcp-server.service.d/override.conf
|
||||
ansible.builtin.copy:
|
||||
dest: "/etc/systemd/system/kea-dhcp4-server.service.d/override.conf"
|
||||
content: |
|
||||
[Service]
|
||||
Restart=always
|
||||
RestartSec=60
|
||||
owner: "root"
|
||||
group: "root"
|
||||
mode: "0644"
|
||||
become: true
|
||||
notify:
|
||||
- "notification_restart_kea4"
|
||||
|
||||
- name: Enable kea service
|
||||
ansible.builtin.systemd:
|
||||
name: "kea-dhcp4-server.service"
|
||||
state: "started"
|
||||
enabled: true
|
||||
become: true
|
||||
141
ansible/roles/fw/tasks/services/set_suricata.yaml
Normal file
141
ansible/roles/fw/tasks/services/set_suricata.yaml
Normal file
@@ -0,0 +1,141 @@
|
||||
---
|
||||
- name: Check suricata installation
|
||||
ansible.builtin.shell: |
|
||||
command -v suricata
|
||||
changed_when: false
|
||||
failed_when: false
|
||||
register: "is_suricata_installed"
|
||||
ignore_errors: true
|
||||
|
||||
- name: Install suricata
|
||||
ansible.builtin.apt:
|
||||
name:
|
||||
- "suricata"
|
||||
- "suricata-update"
|
||||
state: "present"
|
||||
become: true
|
||||
when: is_suricata_installed.rc != 0
|
||||
|
||||
- name: Deploy suricata-update service files
|
||||
ansible.builtin.copy:
|
||||
src: "{{ hostvars['console']['node']['config_path'] }}/services/systemd/fw/suricata/{{ item }}"
|
||||
dest: "/etc/systemd/system/{{ item }}"
|
||||
owner: "root"
|
||||
group: "root"
|
||||
mode: "0644"
|
||||
validate: "/usr/bin/systemd-analyze verify %s"
|
||||
loop:
|
||||
- "suricata-update.service"
|
||||
- "suricata-update.timer"
|
||||
become: true
|
||||
|
||||
- name: Deploy suricata custom configurations
|
||||
ansible.builtin.copy:
|
||||
src: "{{ hostvars['console']['node']['config_path'] }}/services/systemd/fw/suricata/etc/{{ item }}"
|
||||
dest: "/etc/suricata/{{ item }}"
|
||||
owner: "root"
|
||||
group: "root"
|
||||
mode: "0644"
|
||||
loop:
|
||||
- "disable.conf"
|
||||
- "enable.conf"
|
||||
become: true
|
||||
notify:
|
||||
- "notification_update_suricata_rules"
|
||||
- "notification_restart_suricata"
|
||||
|
||||
- name: Deploy suricata custom rules
|
||||
ansible.builtin.copy:
|
||||
src: "{{ hostvars['console']['node']['config_path'] }}/services/systemd/fw/suricata/etc/local.rules"
|
||||
dest: "/etc/suricata/rules/local.rules"
|
||||
owner: "root"
|
||||
group: "root"
|
||||
mode: "0644"
|
||||
become: true
|
||||
notify:
|
||||
- "notification_update_suricata_rules"
|
||||
- "notification_restart_suricata"
|
||||
|
||||
- name: Check suricata rules
|
||||
ansible.builtin.stat:
|
||||
path: "/var/lib/suricata/rules/suricata.rules"
|
||||
register: "is_suricata_rules_file"
|
||||
|
||||
- name: Update suricata rules
|
||||
ansible.builtin.command:
|
||||
suricata-update
|
||||
become: true
|
||||
when: not is_suricata_rules_file.stat.exists
|
||||
changed_when: true
|
||||
|
||||
- name: Enable auto suricata rules update
|
||||
ansible.builtin.systemd:
|
||||
name: "suricata-update.timer"
|
||||
state: "started"
|
||||
enabled: true
|
||||
daemon_reload: true
|
||||
become: true
|
||||
|
||||
- name: Deploy suricata.yaml
|
||||
ansible.builtin.template:
|
||||
src: "{{ hostvars['console']['node']['config_path'] }}/services/systemd/fw/suricata/etc/suricata.yaml.j2"
|
||||
dest: "/etc/suricata/suricata.yaml"
|
||||
owner: "root"
|
||||
group: "root"
|
||||
mode: "0644"
|
||||
validate: "/usr/bin/suricata -T -c %s"
|
||||
become: true
|
||||
notify: "notification_restart_suricata"
|
||||
|
||||
- name: Create suricata.service.d
|
||||
ansible.builtin.file:
|
||||
path: "/etc/systemd/system/suricata.service.d"
|
||||
state: "directory"
|
||||
owner: "root"
|
||||
group: "root"
|
||||
mode: "0755"
|
||||
become: true
|
||||
|
||||
- name: Set suricata.service.d/override.conf
|
||||
ansible.builtin.copy:
|
||||
dest: "/etc/systemd/system/suricata.service.d/override.conf"
|
||||
content: |
|
||||
[Service]
|
||||
Restart=always
|
||||
RestartSec=60
|
||||
owner: "root"
|
||||
group: "root"
|
||||
mode: "0644"
|
||||
become: true
|
||||
notify:
|
||||
- "notification_restart_suricata"
|
||||
|
||||
- name: Enable suricata service
|
||||
ansible.builtin.systemd:
|
||||
name: "suricata.service"
|
||||
state: "started"
|
||||
enabled: true
|
||||
daemon_reload: true
|
||||
become: true
|
||||
|
||||
- name: Set suricata logs logrotate
|
||||
ansible.builtin.copy:
|
||||
content: |
|
||||
/var/log/suricata/*.log /var/log/suricata/*.json {
|
||||
weekly
|
||||
missingok
|
||||
rotate 4
|
||||
compress
|
||||
delaycompress
|
||||
notifempty
|
||||
maxsize 500M
|
||||
sharedscripts
|
||||
postrotate
|
||||
/usr/bin/systemctl reload suricata > /dev/null 2>/dev/null || true
|
||||
endscript
|
||||
}
|
||||
dest: "/etc/logrotate.d/suricata"
|
||||
owner: "root"
|
||||
group: "root"
|
||||
mode: "0644"
|
||||
become: true
|
||||
85
ansible/roles/infra/handlers/main.yaml
Normal file
85
ansible/roles/infra/handlers/main.yaml
Normal file
@@ -0,0 +1,85 @@
|
||||
- name: Restart ca
|
||||
ansible.builtin.systemd:
|
||||
name: "ca.service"
|
||||
state: "restarted"
|
||||
enabled: "true"
|
||||
daemon_reload: true
|
||||
scope: "user"
|
||||
changed_when: false
|
||||
listen: "notification_restart_ca"
|
||||
ignore_errors: true # noqa: ignore-errors
|
||||
|
||||
- name: Reload postgresql
|
||||
ansible.builtin.command:
|
||||
/usr/bin/podman exec -u postgres postgresql sh -c "pg_ctl reload"
|
||||
when: not (is_postgresql_init_run | default(false))
|
||||
changed_when: false
|
||||
listen: "notification_reload_postgresql"
|
||||
ignore_errors: true # noqa: ignore-errors
|
||||
|
||||
- name: Restart postgresql
|
||||
ansible.builtin.systemd:
|
||||
name: "postgresql.service"
|
||||
state: "restarted"
|
||||
enabled: true
|
||||
daemon_reload: true
|
||||
scope: "user"
|
||||
when: not (is_postgresql_init_run | default(false))
|
||||
changed_when: false
|
||||
listen: "notification_restart_postgresql"
|
||||
ignore_errors: true # noqa: ignore-errors
|
||||
|
||||
- name: Restart ldap
|
||||
ansible.builtin.systemd:
|
||||
name: "ldap.service"
|
||||
state: "restarted"
|
||||
enabled: true
|
||||
daemon_reload: true
|
||||
scope: "user"
|
||||
changed_when: false
|
||||
listen: "notification_restart_ldap"
|
||||
ignore_errors: true # noqa: ignore-errors
|
||||
|
||||
- name: Restart prometheus
|
||||
ansible.builtin.systemd:
|
||||
name: "prometheus.service"
|
||||
state: "restarted"
|
||||
enabled: true
|
||||
daemon_reload: true
|
||||
scope: "user"
|
||||
changed_when: false
|
||||
listen: "notification_restart_prometheus"
|
||||
ignore_errors: true # noqa: ignore-errors
|
||||
|
||||
- name: Restart loki
|
||||
ansible.builtin.systemd:
|
||||
name: "loki.service"
|
||||
state: "restarted"
|
||||
enabled: true
|
||||
daemon_reload: true
|
||||
scope: "user"
|
||||
changed_when: false
|
||||
listen: "notification_restart_loki"
|
||||
ignore_errors: true # noqa: ignore-errors
|
||||
|
||||
- name: Restart grafana
|
||||
ansible.builtin.systemd:
|
||||
name: "grafana.service"
|
||||
state: "restarted"
|
||||
enabled: true
|
||||
daemon_reload: true
|
||||
scope: "user"
|
||||
changed_when: false
|
||||
listen: "notification_restart_grafana"
|
||||
ignore_errors: true # noqa: ignore-errors
|
||||
|
||||
- name: Enable x509-exporter.service
|
||||
ansible.builtin.systemd:
|
||||
name: "x509-exporter.service"
|
||||
state: "started"
|
||||
enabled: true
|
||||
daemon_reload: true
|
||||
scope: "user"
|
||||
changed_when: false
|
||||
listen: "notification_restart_x509-exporter"
|
||||
ignore_errors: true # noqa: ignore-errors
|
||||
84
ansible/roles/infra/tasks/services/set_ca_server.yaml
Normal file
84
ansible/roles/infra/tasks/services/set_ca_server.yaml
Normal file
@@ -0,0 +1,84 @@
|
||||
---
|
||||
- name: Set ca container subuid
|
||||
ansible.builtin.set_fact:
|
||||
ca_subuid: "100999"
|
||||
|
||||
- name: Create ca directory
|
||||
ansible.builtin.file:
|
||||
path: "{{ node['home_path'] }}/containers/{{ item }}"
|
||||
owner: "{{ ca_subuid }}"
|
||||
group: "svadmins"
|
||||
state: "directory"
|
||||
mode: "0770"
|
||||
loop:
|
||||
- "ca"
|
||||
- "ca/certs"
|
||||
- "ca/secrets"
|
||||
- "ca/config"
|
||||
- "ca/db"
|
||||
- "ca/templates"
|
||||
become: true
|
||||
|
||||
- name: Register secret value to podman secret
|
||||
containers.podman.podman_secret:
|
||||
name: "STEP_CA_PASSWORD"
|
||||
data: "{{ hostvars['console']['ca']['intermediate']['password'] }}"
|
||||
state: "present"
|
||||
force: true
|
||||
notify: "notification_restart_ca"
|
||||
no_log: true
|
||||
|
||||
- name: Deploy ca config files
|
||||
ansible.builtin.template:
|
||||
src: "{{ hostvars['console']['node']['config_path'] }}/services/containers/infra/ca/config/{{ item }}.j2"
|
||||
dest: "{{ node['home_path'] }}/containers/ca/config/{{ item }}"
|
||||
owner: "{{ ca_subuid }}"
|
||||
group: "svadmins"
|
||||
mode: "0400"
|
||||
loop:
|
||||
- "ca.json"
|
||||
- "defaults.json"
|
||||
become: true
|
||||
notify: "notification_restart_ca"
|
||||
|
||||
- name: Deploy ca certificate and key
|
||||
ansible.builtin.copy:
|
||||
content: |
|
||||
{{ item.value }}
|
||||
dest: "{{ item.path }}/{{ item.name }}"
|
||||
owner: "{{ ca_subuid }}"
|
||||
group: "svadmins"
|
||||
mode: "{{ item.mode }}"
|
||||
loop:
|
||||
- name: "ilnmors_root_ca.crt"
|
||||
value: "{{ hostvars['console']['ca']['root']['crt'] }}"
|
||||
path: "{{ node['home_path'] }}/containers/ca/certs"
|
||||
mode: "0440"
|
||||
- name: "ilnmors_intermediate_ca.crt"
|
||||
value: "{{ hostvars['console']['ca']['intermediate']['crt'] }}"
|
||||
path: "{{ node['home_path'] }}/containers/ca/certs"
|
||||
mode: "0440"
|
||||
- name: "ilnmors_intermediate_ca.key"
|
||||
value: "{{ hostvars['console']['ca']['intermediate']['key'] }}"
|
||||
path: "{{ node['home_path'] }}/containers/ca/secrets"
|
||||
mode: "0400"
|
||||
become: true
|
||||
notify: "notification_restart_ca"
|
||||
no_log: true
|
||||
|
||||
- name: Deploy container file
|
||||
ansible.builtin.template:
|
||||
src: "{{ hostvars['console']['node']['config_path'] }}/services/containers/infra/ca/ca.container.j2"
|
||||
dest: "{{ node['home_path'] }}/.config/containers/systemd/ca.container"
|
||||
owner: "{{ ansible_user }}"
|
||||
group: "svadmins"
|
||||
mode: "0644"
|
||||
notify: "notification_restart_ca"
|
||||
|
||||
- name: Enable ca
|
||||
ansible.builtin.systemd:
|
||||
name: "ca.service"
|
||||
state: "started"
|
||||
enabled: true
|
||||
daemon_reload: true
|
||||
scope: "user"
|
||||
89
ansible/roles/infra/tasks/services/set_grafana.yaml
Normal file
89
ansible/roles/infra/tasks/services/set_grafana.yaml
Normal file
@@ -0,0 +1,89 @@
|
||||
---
|
||||
- name: Set grafana container subuid
|
||||
ansible.builtin.set_fact:
|
||||
grafana_subuid: "100471"
|
||||
|
||||
- name: Create grafana directory
|
||||
ansible.builtin.file:
|
||||
path: "{{ node['home_path'] }}/containers/{{ item }}"
|
||||
owner: "{{ grafana_subuid }}"
|
||||
group: "svadmins"
|
||||
state: "directory"
|
||||
mode: "0770"
|
||||
loop:
|
||||
- "grafana"
|
||||
- "grafana/data"
|
||||
- "grafana/etc"
|
||||
- "grafana/etc/provisioning"
|
||||
- "grafana/etc/dashboards"
|
||||
- "grafana/ssl"
|
||||
become: true
|
||||
|
||||
- name: Deploy root certificate and key
|
||||
ansible.builtin.copy:
|
||||
content: |
|
||||
{{ hostvars['console']['ca']['root']['crt'] }}
|
||||
dest: "{{ node['home_path'] }}/containers/grafana/ssl/ilnmors_root_ca.crt"
|
||||
owner: "{{ grafana_subuid }}"
|
||||
group: "svadmins"
|
||||
mode: "0400"
|
||||
become: true
|
||||
notify: "notification_restart_grafana"
|
||||
no_log: true
|
||||
|
||||
- name: Register secret value to podman secret
|
||||
containers.podman.podman_secret:
|
||||
name: "{{ item.name }}"
|
||||
data: "{{ item.value }}"
|
||||
state: "present"
|
||||
force: true
|
||||
loop:
|
||||
- name: "GF_DB_PASSWORD"
|
||||
value: "{{ hostvars['console']['postgresql']['password']['grafana'] }}"
|
||||
- name: "LDAP_BIND_PASSWORD"
|
||||
value: "{{ hostvars['console']['ldap']['password']['grafana'] }}"
|
||||
- name: "GF_ADMIN_PASSWORD"
|
||||
value: "{{ hostvars['console']['grafana']['user']['password'] }}"
|
||||
notify: "notification_restart_grafana"
|
||||
no_log: true
|
||||
|
||||
- name: Deploy configruation files
|
||||
ansible.builtin.template:
|
||||
src: "{{ hostvars['console']['node']['config_path'] }}/services/containers/infra/grafana/etc/{{ item }}.j2"
|
||||
dest: "{{ node['home_path'] }}/containers/grafana/etc/{{ item }}"
|
||||
owner: "{{ grafana_subuid }}"
|
||||
group: "svadmins"
|
||||
mode: "0400"
|
||||
loop:
|
||||
- "grafana.ini"
|
||||
- "ldap.toml"
|
||||
become: true
|
||||
notify: "notification_restart_grafana"
|
||||
no_log: true
|
||||
|
||||
- name: Deploy provisioing and dashboard files
|
||||
ansible.builtin.copy:
|
||||
src: "{{ hostvars['console']['node']['config_path'] }}/services/containers/infra/grafana/etc/provisioning/"
|
||||
dest: "{{ node['home_path'] }}/containers/grafana/etc/provisioning/"
|
||||
owner: "{{ grafana_subuid }}"
|
||||
group: "svadmins"
|
||||
mode: "0400"
|
||||
become: true
|
||||
notify: "notification_restart_grafana"
|
||||
|
||||
- name: Deploy container file
|
||||
ansible.builtin.template:
|
||||
src: "{{ hostvars['console']['node']['config_path'] }}/services/containers/infra/grafana/grafana.container.j2"
|
||||
dest: "{{ node['home_path'] }}/.config/containers/systemd/grafana.container"
|
||||
owner: "{{ ansible_user }}"
|
||||
group: "svadmins"
|
||||
mode: "0644"
|
||||
notify: "notification_restart_grafana"
|
||||
|
||||
- name: Enable grafana
|
||||
ansible.builtin.systemd:
|
||||
name: "grafana.service"
|
||||
state: "started"
|
||||
enabled: true
|
||||
daemon_reload: true
|
||||
scope: "user"
|
||||
112
ansible/roles/infra/tasks/services/set_ldap.yaml
Normal file
112
ansible/roles/infra/tasks/services/set_ldap.yaml
Normal file
@@ -0,0 +1,112 @@
|
||||
---
|
||||
- name: Set ldap container subuid
|
||||
ansible.builtin.set_fact:
|
||||
ldap_subuid: "100999"
|
||||
|
||||
- name: Create ldap directory
|
||||
ansible.builtin.file:
|
||||
path: "{{ node['home_path'] }}/containers/{{ item }}"
|
||||
owner: "{{ ldap_subuid }}"
|
||||
group: "svadmins"
|
||||
state: "directory"
|
||||
mode: "0770"
|
||||
loop:
|
||||
- "ldap"
|
||||
- "ldap/data"
|
||||
- "ldap/ssl"
|
||||
become: true
|
||||
|
||||
- name: Deploy ldap certificate and key
|
||||
ansible.builtin.copy:
|
||||
content: |
|
||||
{{ item.value }}
|
||||
dest: "{{ node['home_path'] }}/containers/ldap/ssl/{{ item.name }}"
|
||||
owner: "{{ ldap_subuid }}"
|
||||
group: "svadmins"
|
||||
mode: "{{ item.mode }}"
|
||||
loop:
|
||||
- name: "ilnmors_root_ca.crt"
|
||||
value: "{{ hostvars['console']['ca']['root']['crt'] }}"
|
||||
mode: "0440"
|
||||
- name: "ldap.crt"
|
||||
value: |
|
||||
{{ hostvars['console']['ldap']['crt'] | trim }}
|
||||
{{ hostvars['console']['ca']['intermediate']['crt'] }}
|
||||
mode: "0440"
|
||||
- name: "ldap.key"
|
||||
value: "{{ hostvars['console']['ldap']['key'] }}"
|
||||
mode: "0400"
|
||||
become: true
|
||||
notify: "notification_restart_ldap"
|
||||
no_log: true
|
||||
|
||||
- name: Register secret value to podman secret
|
||||
containers.podman.podman_secret:
|
||||
name: "{{ item.name }}"
|
||||
data: "{{ item.value }}"
|
||||
state: "present"
|
||||
force: true
|
||||
loop:
|
||||
# urlencode doesn't fix `/` as `%2F`. It needs replace
|
||||
- name: "LLDAP_DATABASE_URL"
|
||||
value: "postgres://ldap:{{ hostvars['console']['postgresql']['password']['ldap'] | urlencode | replace('/', '%2F') }}\
|
||||
@{{ infra_uri['postgresql']['domain'] }}/ldap_db?sslmode=verify-full&sslrootcert=/etc/ssl/ldap/ilnmors_root_ca.crt"
|
||||
- name: "LLDAP_KEY_SEED"
|
||||
value: "{{ hostvars['console']['ldap']['seed_key'] }}"
|
||||
- name: "LLDAP_JWT_SECRET"
|
||||
value: "{{ hostvars['console']['ldap']['jwt_secret'] }}"
|
||||
notify: "notification_restart_ldap"
|
||||
no_log: true
|
||||
|
||||
- name: Initiate ldap (When = false, If DB data does not exist in postgresql, activate this block)
|
||||
when: false
|
||||
become: true
|
||||
block:
|
||||
- name: Register extra secret value to podman secret
|
||||
containers.podman.podman_secret:
|
||||
name: "LLDAP_LDAP_USER_PASSWORD"
|
||||
data: "{{ hostvars['console']['ldap']['password']['user'] }}"
|
||||
state: "present"
|
||||
force: true
|
||||
# You must check the image version first (following container file on data/config/containers/infra/ldap/ldap.container)
|
||||
|
||||
- name: Initiate ldap
|
||||
containers.podman.podman_container:
|
||||
name: "init_LLDAP"
|
||||
image: "docker.io/lldap/lldap:{{ version['containers']['ldap'] }}"
|
||||
rm: true
|
||||
detach: false
|
||||
env:
|
||||
TZ: "Asia/Seoul"
|
||||
LLDAP_LDAP_BASE_DN: "dc=ilnmors,dc=internal"
|
||||
secrets:
|
||||
- "LLDAP_DATABASE_URL,type=env"
|
||||
- "LLDAP_KEY_SEED,type=env"
|
||||
- "LLDAP_JWT_SECRET,type=env"
|
||||
- "LLDAP_LDAP_USER_PASSWORD,type=env"
|
||||
volumes:
|
||||
- "{{ node['home_path'] }}/containers/ldap/data:/data:rw"
|
||||
- "{{ node['home_path'] }}/containers/ldap/ssl:/etc/ssl/ldap:ro"
|
||||
|
||||
always:
|
||||
- name: Clean extra secret value from podman secret
|
||||
containers.podman.podman_secret:
|
||||
name: "LLDAP_LDAP_USER_PASSWORD"
|
||||
state: "absent"
|
||||
|
||||
- name: Deploy container file
|
||||
ansible.builtin.template:
|
||||
src: "{{ hostvars['console']['node']['config_path'] }}/services/containers/infra/ldap/ldap.container.j2"
|
||||
dest: "{{ node['home_path'] }}/.config/containers/systemd/ldap.container"
|
||||
owner: "{{ ansible_user }}"
|
||||
group: "svadmins"
|
||||
mode: "0644"
|
||||
notify: "notification_restart_ldap"
|
||||
|
||||
- name: Enable ldap
|
||||
ansible.builtin.systemd:
|
||||
name: "ldap.service"
|
||||
state: "started"
|
||||
enabled: true
|
||||
daemon_reload: true
|
||||
scope: "user"
|
||||
70
ansible/roles/infra/tasks/services/set_loki.yaml
Normal file
70
ansible/roles/infra/tasks/services/set_loki.yaml
Normal file
@@ -0,0 +1,70 @@
|
||||
---
|
||||
- name: Set loki container subuid
|
||||
ansible.builtin.set_fact:
|
||||
loki_subuid: "110000" # 10001
|
||||
|
||||
- name: Create loki directory
|
||||
ansible.builtin.file:
|
||||
path: "{{ node['home_path'] }}/containers/{{ item }}"
|
||||
state: "directory"
|
||||
owner: "{{ loki_subuid }}"
|
||||
group: "svadmins"
|
||||
mode: "0770"
|
||||
loop:
|
||||
- "loki"
|
||||
- "loki/etc"
|
||||
- "loki/data"
|
||||
- "loki/ssl"
|
||||
become: true
|
||||
|
||||
- name: Deploy loki configuration file
|
||||
ansible.builtin.copy:
|
||||
src: "{{ hostvars['console']['node']['config_path'] }}/services/containers/infra/loki/etc/loki.yaml"
|
||||
dest: "{{ node['home_path'] }}/containers/loki/etc/loki.yaml"
|
||||
owner: "{{ loki_subuid }}"
|
||||
group: "svadmins"
|
||||
mode: "0600"
|
||||
become: true
|
||||
notify: "notification_restart_loki"
|
||||
no_log: true
|
||||
|
||||
- name: Deploy loki certificate and key
|
||||
ansible.builtin.copy:
|
||||
content: |
|
||||
{{ item.value }}
|
||||
dest: "{{ node['home_path'] }}/containers/loki/ssl/{{ item.name }}"
|
||||
owner: "{{ loki_subuid }}"
|
||||
group: "svadmins"
|
||||
mode: "{{ item.mode }}"
|
||||
loop:
|
||||
- name: "ilnmors_root_ca.crt"
|
||||
value: "{{ hostvars['console']['ca']['root']['crt'] }}"
|
||||
mode: "0440"
|
||||
- name: "loki.crt"
|
||||
value: |
|
||||
{{ hostvars['console']['loki']['crt'] | trim }}
|
||||
{{ hostvars['console']['ca']['intermediate']['crt'] }}
|
||||
mode: "0440"
|
||||
- name: "loki.key"
|
||||
value: "{{ hostvars['console']['loki']['key'] }}"
|
||||
mode: "0400"
|
||||
become: true
|
||||
notify: "notification_restart_loki"
|
||||
no_log: true
|
||||
|
||||
- name: Deploy container file
|
||||
ansible.builtin.template:
|
||||
src: "{{ hostvars['console']['node']['config_path'] }}/services/containers/infra/loki/loki.container.j2"
|
||||
dest: "{{ node['home_path'] }}/.config/containers/systemd/loki.container"
|
||||
owner: "{{ ansible_user }}"
|
||||
group: "svadmins"
|
||||
mode: "0644"
|
||||
notify: "notification_restart_loki"
|
||||
|
||||
- name: Enable loki
|
||||
ansible.builtin.systemd:
|
||||
name: "loki.service"
|
||||
state: "started"
|
||||
enabled: true
|
||||
daemon_reload: true
|
||||
scope: "user"
|
||||
169
ansible/roles/infra/tasks/services/set_postgresql.yaml
Normal file
169
ansible/roles/infra/tasks/services/set_postgresql.yaml
Normal file
@@ -0,0 +1,169 @@
|
||||
---
|
||||
- name: Set postgresql container subuid
|
||||
ansible.builtin.set_fact:
|
||||
postgresql_subuid: "100998"
|
||||
|
||||
- name: Set connected services list
|
||||
ansible.builtin.set_fact:
|
||||
# telegraf has no database
|
||||
connected_services:
|
||||
- "ldap"
|
||||
- "authelia"
|
||||
- "grafana"
|
||||
|
||||
- name: Create postgresql directory
|
||||
ansible.builtin.file:
|
||||
path: "{{ node['home_path'] }}/containers/{{ item }}"
|
||||
state: "directory"
|
||||
owner: "{{ postgresql_subuid }}"
|
||||
group: "svadmins"
|
||||
mode: "0770"
|
||||
loop:
|
||||
- "postgresql"
|
||||
- "postgresql/data"
|
||||
- "postgresql/config"
|
||||
- "postgresql/ssl"
|
||||
- "postgresql/init"
|
||||
- "postgresql/backups"
|
||||
- "postgresql/build"
|
||||
become: true
|
||||
|
||||
- name: Deploy containerfile for build
|
||||
ansible.builtin.template:
|
||||
src: "{{ hostvars['console']['node']['config_path'] }}/services/containers/infra/postgresql/build/postgresql.containerfile.j2"
|
||||
dest: "{{ node['home_path'] }}/containers/postgresql/build/Containerfile"
|
||||
owner: "{{ ansible_user }}"
|
||||
group: "svadmins"
|
||||
mode: "0640"
|
||||
|
||||
- name: Build postgresql container image
|
||||
containers.podman.podman_image:
|
||||
name: "ilnmors.internal/{{ node['name'] }}/postgres"
|
||||
# check tags from container file
|
||||
tag: "pg{{ version['containers']['postgresql'] }}-vectorchord{{ version['containers']['vectorchord'] }}"
|
||||
state: "build"
|
||||
path: "{{ node['home_path'] }}/containers/postgresql/build"
|
||||
|
||||
- name: Prune postgresql dangling images
|
||||
containers.podman.podman_prune:
|
||||
image: true
|
||||
|
||||
- name: Deploy postgresql configuration files
|
||||
ansible.builtin.template:
|
||||
src: "{{ hostvars['console']['node']['config_path'] }}/services/containers/infra/postgresql/config/{{ item }}.j2"
|
||||
dest: "{{ node['home_path'] }}/containers/postgresql/config/{{ item }}"
|
||||
owner: "{{ postgresql_subuid }}"
|
||||
group: "svadmins"
|
||||
mode: "0600"
|
||||
loop:
|
||||
- "postgresql.conf"
|
||||
- "pg_hba.conf"
|
||||
become: true
|
||||
notify: "notification_reload_postgresql"
|
||||
no_log: true
|
||||
|
||||
- name: Deploy postgresql certificate and key
|
||||
ansible.builtin.copy:
|
||||
content: |
|
||||
{{ item.value }}
|
||||
dest: "{{ node['home_path'] }}/containers/postgresql/ssl/{{ item.name }}"
|
||||
owner: "{{ postgresql_subuid }}"
|
||||
group: "svadmins"
|
||||
mode: "{{ item.mode }}"
|
||||
loop:
|
||||
- name: "ilnmors_root_ca.crt"
|
||||
value: "{{ hostvars['console']['ca']['root']['crt'] }}"
|
||||
mode: "0440"
|
||||
- name: "postgresql.crt"
|
||||
value: |
|
||||
{{ hostvars['console']['postgresql']['crt'] | trim }}
|
||||
{{ hostvars['console']['ca']['intermediate']['crt'] }}
|
||||
mode: "0440"
|
||||
- name: "postgresql.key"
|
||||
value: "{{ hostvars['console']['postgresql']['key'] }}"
|
||||
mode: "0400"
|
||||
become: true
|
||||
notify: "notification_reload_postgresql"
|
||||
no_log: true
|
||||
|
||||
- name: Check data directory empty
|
||||
ansible.builtin.find:
|
||||
paths: "{{ node['home_path'] }}/containers/postgresql/data/"
|
||||
hidden: true
|
||||
file_type: "any"
|
||||
become: true
|
||||
register: "is_data_dir_empty"
|
||||
|
||||
- name: Prepare initiating DB
|
||||
when: is_data_dir_empty.matched == 0
|
||||
become: true
|
||||
block:
|
||||
# `init/pg_cluster.sql` should be fetched from postgresql's backup directory before running initiating
|
||||
- name: Deploy init cluster sql file
|
||||
ansible.builtin.copy:
|
||||
src: "{{ hostvars['console']['node']['config_path'] }}/services/containers/infra/postgresql/init/pg_cluster.sql"
|
||||
dest: "{{ node['home_path'] }}/containers/postgresql/init/0_pg_cluster.sql"
|
||||
owner: "{{ postgresql_subuid }}"
|
||||
group: "svadmins"
|
||||
mode: "0600"
|
||||
|
||||
- name: Deploy resoring data sql files
|
||||
ansible.builtin.copy:
|
||||
src: "{{ hostvars['console']['node']['config_path'] }}/services/containers/infra/postgresql/init/pg_{{ item }}.sql"
|
||||
dest: "{{ node['home_path'] }}/containers/postgresql/init/{{ index_num + 1 }}_pg_{{ item }}.sql"
|
||||
owner: "{{ postgresql_subuid }}"
|
||||
group: "svadmins"
|
||||
mode: "0600"
|
||||
loop: "{{ connected_services }}"
|
||||
loop_control:
|
||||
index_var: index_num
|
||||
- name: Set is_postgresql_init_run
|
||||
ansible.builtin.set_fact:
|
||||
is_postgresql_init_run: true
|
||||
|
||||
- name: Deploy container file
|
||||
ansible.builtin.template:
|
||||
src: "{{ hostvars['console']['node']['config_path'] }}/services/containers/infra/postgresql/postgresql.container.j2"
|
||||
dest: "{{ node['home_path'] }}/.config/containers/systemd/postgresql.container"
|
||||
owner: "{{ ansible_user }}"
|
||||
group: "svadmins"
|
||||
mode: "0644"
|
||||
notify: "notification_restart_postgresql"
|
||||
|
||||
- name: Deploy backup service files
|
||||
ansible.builtin.copy:
|
||||
src: "{{ hostvars['console']['node']['config_path'] }}/services/containers/infra/postgresql/services/{{ item }}"
|
||||
dest: "{{ node['home_path'] }}/.config/systemd/user/{{ item }}"
|
||||
owner: "{{ ansible_user }}"
|
||||
group: "svadmins"
|
||||
mode: "0644"
|
||||
loop:
|
||||
- "postgresql-cluster-backup.service"
|
||||
- "postgresql-cluster-backup.timer"
|
||||
- "postgresql-data-backup@.service"
|
||||
- "postgresql-data-backup@.timer"
|
||||
|
||||
- name: Enable postgresql
|
||||
ansible.builtin.systemd:
|
||||
name: "postgresql.service"
|
||||
state: "started"
|
||||
enabled: true
|
||||
daemon_reload: true
|
||||
scope: "user"
|
||||
|
||||
- name: Enable cluster backup timer
|
||||
ansible.builtin.systemd:
|
||||
name: "postgresql-cluster-backup.timer"
|
||||
state: "started"
|
||||
enabled: true
|
||||
daemon_reload: true
|
||||
scope: "user"
|
||||
|
||||
- name: Enable data backup timer
|
||||
ansible.builtin.systemd:
|
||||
name: "postgresql-data-backup@{{ item }}.timer"
|
||||
state: "started"
|
||||
enabled: true
|
||||
daemon_reload: true
|
||||
scope: "user"
|
||||
loop: "{{ connected_services }}"
|
||||
74
ansible/roles/infra/tasks/services/set_prometheus.yaml
Normal file
74
ansible/roles/infra/tasks/services/set_prometheus.yaml
Normal file
@@ -0,0 +1,74 @@
|
||||
---
|
||||
- name: Set prometheus container subuid
|
||||
ansible.builtin.set_fact:
|
||||
prometheus_subuid: "165533" # nobody - 65534
|
||||
|
||||
- name: Create prometheus directory
|
||||
ansible.builtin.file:
|
||||
path: "{{ node['home_path'] }}/containers/{{ item }}"
|
||||
state: "directory"
|
||||
owner: "{{ prometheus_subuid }}"
|
||||
group: "svadmins"
|
||||
mode: "0770"
|
||||
loop:
|
||||
- "prometheus"
|
||||
- "prometheus/etc"
|
||||
- "prometheus/data"
|
||||
- "prometheus/ssl"
|
||||
become: true
|
||||
|
||||
- name: Deploy prometheus configuration file
|
||||
ansible.builtin.template:
|
||||
src: "{{ hostvars['console']['node']['config_path'] }}/services/containers/infra/prometheus/etc/{{ item }}.j2"
|
||||
dest: "{{ node['home_path'] }}/containers/prometheus/etc/{{ item }}"
|
||||
owner: "{{ prometheus_subuid }}"
|
||||
group: "svadmins"
|
||||
mode: "0600"
|
||||
loop:
|
||||
- "prometheus.yaml"
|
||||
- "rules.yaml"
|
||||
- "web-config.yaml"
|
||||
become: true
|
||||
notify: "notification_restart_prometheus"
|
||||
no_log: true
|
||||
|
||||
- name: Deploy prometheus certificate and key
|
||||
ansible.builtin.copy:
|
||||
content: |
|
||||
{{ item.value }}
|
||||
dest: "{{ node['home_path'] }}/containers/prometheus/ssl/{{ item.name }}"
|
||||
owner: "{{ prometheus_subuid }}"
|
||||
group: "svadmins"
|
||||
mode: "{{ item.mode }}"
|
||||
loop:
|
||||
- name: "ilnmors_root_ca.crt"
|
||||
value: "{{ hostvars['console']['ca']['root']['crt'] }}"
|
||||
mode: "0440"
|
||||
- name: "prometheus.crt"
|
||||
value: |
|
||||
{{ hostvars['console']['prometheus']['crt'] | trim }}
|
||||
{{ hostvars['console']['ca']['intermediate']['crt'] }}
|
||||
mode: "0440"
|
||||
- name: "prometheus.key"
|
||||
value: "{{ hostvars['console']['prometheus']['key'] }}"
|
||||
mode: "0400"
|
||||
become: true
|
||||
notify: "notification_restart_prometheus"
|
||||
no_log: true
|
||||
|
||||
- name: Deploy container file
|
||||
ansible.builtin.template:
|
||||
src: "{{ hostvars['console']['node']['config_path'] }}/services/containers/infra/prometheus/prometheus.container.j2"
|
||||
dest: "{{ node['home_path'] }}/.config/containers/systemd/prometheus.container"
|
||||
owner: "{{ ansible_user }}"
|
||||
group: "svadmins"
|
||||
mode: "0644"
|
||||
notify: "notification_restart_prometheus"
|
||||
|
||||
- name: Enable prometheus
|
||||
ansible.builtin.systemd:
|
||||
name: "prometheus.service"
|
||||
state: "started"
|
||||
enabled: true
|
||||
daemon_reload: true
|
||||
scope: "user"
|
||||
63
ansible/roles/infra/tasks/services/set_x509-exporter.yaml
Normal file
63
ansible/roles/infra/tasks/services/set_x509-exporter.yaml
Normal file
@@ -0,0 +1,63 @@
|
||||
---
|
||||
- name: Set x509-exporter container subuid
|
||||
ansible.builtin.set_fact:
|
||||
x509_exporter_subuid: "165533" # nobody - 65534
|
||||
|
||||
- name: Create x509-exporter directory
|
||||
ansible.builtin.file:
|
||||
path: "{{ node['home_path'] }}/containers/{{ item }}"
|
||||
state: "directory"
|
||||
owner: "{{ x509_exporter_subuid }}"
|
||||
group: "svadmins"
|
||||
mode: "0770"
|
||||
loop:
|
||||
- "x509-exporter"
|
||||
- "x509-exporter/certs"
|
||||
become: true
|
||||
|
||||
- name: Deploy certificates
|
||||
ansible.builtin.copy:
|
||||
content: |
|
||||
{{ item.value }}
|
||||
dest: "{{ node['home_path'] }}/containers/x509-exporter/certs/{{ item.name }}"
|
||||
owner: "{{ x509_exporter_subuid }}"
|
||||
group: "svadmins"
|
||||
mode: "0440"
|
||||
loop:
|
||||
- name: "root.crt"
|
||||
value: "{{ hostvars['console']['ca']['root']['crt'] }}"
|
||||
- name: "intermediate.crt"
|
||||
value: "{{ hostvars['console']['ca']['intermediate']['crt'] }}"
|
||||
- name: "crowdsec.crt"
|
||||
value: "{{ hostvars['console']['crowdsec']['crt'] }}"
|
||||
- name: "blocky.crt"
|
||||
value: "{{ hostvars['console']['blocky']['crt'] }}"
|
||||
- name: "postgresql.crt"
|
||||
value: "{{ hostvars['console']['postgresql']['crt'] }}"
|
||||
- name: "ldap.crt"
|
||||
value: "{{ hostvars['console']['ldap']['crt'] }}"
|
||||
- name: "prometheus.crt"
|
||||
value: "{{ hostvars['console']['prometheus']['crt'] }}"
|
||||
- name: "loki.crt"
|
||||
value: "{{ hostvars['console']['loki']['crt'] }}"
|
||||
- name: "dsm.crt"
|
||||
value: "{{ hostvars['console']['dsm']['crt'] }}"
|
||||
become: true
|
||||
no_log: true
|
||||
|
||||
- name: Deploy container file
|
||||
ansible.builtin.template:
|
||||
src: "{{ hostvars['console']['node']['config_path'] }}/services/containers/infra/x509-exporter/x509-exporter.container.j2"
|
||||
dest: "{{ node['home_path'] }}/.config/containers/systemd/x509-exporter.container"
|
||||
owner: "{{ ansible_user }}"
|
||||
group: "svadmins"
|
||||
mode: "0644"
|
||||
notify: "notification_restart_x509-exporter"
|
||||
|
||||
- name: Enable x509-exporter.service
|
||||
ansible.builtin.systemd:
|
||||
name: "x509-exporter.service"
|
||||
state: "started"
|
||||
enabled: true
|
||||
daemon_reload: true
|
||||
scope: "user"
|
||||
92
ansible/roles/vmm/tasks/node/set_libvirt.yaml
Normal file
92
ansible/roles/vmm/tasks/node/set_libvirt.yaml
Normal file
@@ -0,0 +1,92 @@
|
||||
---
|
||||
- name: Add user in libvirt group
|
||||
ansible.builtin.user:
|
||||
name: "{{ ansible_user }}"
|
||||
state: "present"
|
||||
groups: "libvirt, kvm, libvirt-qemu"
|
||||
append: true
|
||||
become: true
|
||||
|
||||
- name: Check libvirt directory
|
||||
ansible.builtin.stat:
|
||||
path: "/var/lib/libvirt/{{ item }}"
|
||||
loop:
|
||||
- "images"
|
||||
- "seeds"
|
||||
register: "is_libvirt_dir"
|
||||
|
||||
- name: Create libvirt directory
|
||||
ansible.builtin.file:
|
||||
path: "/var/lib/libvirt/{{ item.item }}"
|
||||
state: "directory"
|
||||
owner: "root"
|
||||
group: "root"
|
||||
mode: "0711"
|
||||
loop: "{{ is_libvirt_dir.results }}"
|
||||
when: not item.stat.exists
|
||||
become: true
|
||||
no_log: true
|
||||
|
||||
- name: Set LIBVIRT_DEFAULT_URI
|
||||
ansible.builtin.lineinfile:
|
||||
path: "{{ node['home_path'] }}/.bashrc"
|
||||
state: "present"
|
||||
line: "export LIBVIRT_DEFAULT_URI='qemu:///system'"
|
||||
regexp: '^export LIBVIRT_DEFAULT_URI='
|
||||
|
||||
- name: Define virtual networks
|
||||
community.libvirt.virt_net:
|
||||
name: "{{ item }}"
|
||||
xml: "{{ lookup('file', hostvars['console']['node']['config_path'] + '/services/systemd/vmm/libvirt/xml/networks/' + item + '.xml') }}"
|
||||
uri: "qemu:///system"
|
||||
command: "define"
|
||||
loop:
|
||||
- "wan-net"
|
||||
- "lan-net"
|
||||
|
||||
- name: Start virtual networks
|
||||
community.libvirt.virt_net:
|
||||
name: "{{ item }}"
|
||||
state: "active"
|
||||
uri: "qemu:///system"
|
||||
autostart: true
|
||||
loop:
|
||||
- "wan-net"
|
||||
- "lan-net"
|
||||
|
||||
- name: Autostart virtual networks
|
||||
community.libvirt.virt_net:
|
||||
name: "{{ item }}"
|
||||
uri: "qemu:///system"
|
||||
autostart: true
|
||||
loop:
|
||||
- "wan-net"
|
||||
- "lan-net"
|
||||
|
||||
- name: Define virtual storage pool
|
||||
community.libvirt.virt_pool:
|
||||
name: "{{ item }}"
|
||||
xml: "{{ lookup('file', hostvars['console']['node']['config_path'] + '/services/systemd/vmm/libvirt/xml/storages/' + item + '.xml') }}"
|
||||
uri: "qemu:///system"
|
||||
command: "define"
|
||||
loop:
|
||||
- "images-pool"
|
||||
- "seeds-pool"
|
||||
|
||||
- name: Start virtual storage pool
|
||||
community.libvirt.virt_pool:
|
||||
name: "{{ item }}"
|
||||
state: "active"
|
||||
uri: "qemu:///system"
|
||||
loop:
|
||||
- "images-pool"
|
||||
- "seeds-pool"
|
||||
|
||||
- name: Autostart virtual storage pool
|
||||
community.libvirt.virt_pool:
|
||||
name: "{{ item }}"
|
||||
uri: "qemu:///system"
|
||||
autostart: true
|
||||
loop:
|
||||
- "images-pool"
|
||||
- "seeds-pool"
|
||||
59
ansible/roles/vmm/tasks/vm/create_seed.yaml
Normal file
59
ansible/roles/vmm/tasks/vm/create_seed.yaml
Normal file
@@ -0,0 +1,59 @@
|
||||
---
|
||||
# This task is located in vmm roles because of its attributes,
|
||||
# but all process should be run in "console".
|
||||
# At the playbook, `delegate_to: "console"` option is applyed by `apply:`.
|
||||
- name: Create images directory
|
||||
ansible.builtin.file:
|
||||
path: "{{ hostvars['console']['node']['data_path'] }}/images/seeds/{{ target_vm }}"
|
||||
state: "directory"
|
||||
owner: "console"
|
||||
group: "svadmins"
|
||||
mode: "0700"
|
||||
|
||||
- name: Create temp meta-data
|
||||
ansible.builtin.copy:
|
||||
content: |
|
||||
instance-id: vm-{{ target_vm }}
|
||||
local-hostname: {{ target_vm }}
|
||||
dest: "{{ hostvars['console']['node']['data_path'] }}/images/seeds/{{ target_vm }}/meta-data"
|
||||
owner: "console"
|
||||
group: "svadmins"
|
||||
mode: "0600"
|
||||
register: "vm_meta_data"
|
||||
no_log: true
|
||||
|
||||
- name: Create temp user-data
|
||||
ansible.builtin.template:
|
||||
src: "{{ hostvars['console']['node']['config_path'] }}/services/systemd/vmm/libvirt/seeds/user-data.j2"
|
||||
dest: "{{ hostvars['console']['node']['data_path'] }}/images/seeds/{{ target_vm }}/user-data"
|
||||
owner: "console"
|
||||
group: "svadmins"
|
||||
mode: "0600"
|
||||
register: "vm_user_data"
|
||||
no_log: true
|
||||
|
||||
- name: Create temp network-config
|
||||
ansible.builtin.copy:
|
||||
content: |
|
||||
network: {config: disabled}
|
||||
dest: "{{ hostvars['console']['node']['data_path'] }}/images/seeds/{{ target_vm }}/network-config"
|
||||
owner: "console"
|
||||
group: "svadmins"
|
||||
mode: "0600"
|
||||
register: "vm_network_config"
|
||||
no_log: true
|
||||
|
||||
- name: Check seed.iso
|
||||
ansible.builtin.stat:
|
||||
path: "{{ hostvars['console']['node']['data_path'] }}/images/seeds/{{ target_vm }}/seed.iso"
|
||||
register: "is_seediso"
|
||||
|
||||
- name: Create seed.iso
|
||||
ansible.builtin.shell:
|
||||
cmd: |
|
||||
cloud-localds -N {{ hostvars['console']['node']['data_path'] }}/images/seeds/{{ target_vm }}/network-config \
|
||||
{{ hostvars['console']['node']['data_path'] }}/images/seeds/{{ target_vm }}/seed.iso \
|
||||
{{ hostvars['console']['node']['data_path'] }}/images/seeds/{{ target_vm }}/user-data \
|
||||
{{ hostvars['console']['node']['data_path'] }}/images/seeds/{{ target_vm }}/meta-data
|
||||
when: vm_meta_data.changed or vm_user_data.changed or vm_network_config.changed or not is_seediso.stat.exists
|
||||
changed_when: true
|
||||
55
ansible/roles/vmm/tasks/vm/deploy_vm_init.yaml
Normal file
55
ansible/roles/vmm/tasks/vm/deploy_vm_init.yaml
Normal file
@@ -0,0 +1,55 @@
|
||||
---
|
||||
- name: Check vm cloud-init
|
||||
ansible.builtin.stat:
|
||||
path: "/var/lib/libvirt/images/debian-13.qcow2"
|
||||
become: true
|
||||
register: is_cloud_init_file
|
||||
|
||||
- name: Deploy vm cloud-init
|
||||
ansible.builtin.copy:
|
||||
src: "{{ hostvars['console']['node']['data_path'] }}/images/debian-13-generic-amd64.qcow2"
|
||||
dest: "/var/lib/libvirt/images/debian-13.qcow2"
|
||||
owner: "{{ ansible_user }}"
|
||||
group: "svadmins"
|
||||
mode: "0644"
|
||||
force: false
|
||||
become: true
|
||||
when: not is_cloud_init_file.stat.exists
|
||||
|
||||
- name: Remote copy vm cloud-init file
|
||||
ansible.builtin.copy:
|
||||
src: "/var/lib/libvirt/images/debian-13.qcow2"
|
||||
dest: "/var/lib/libvirt/images/{{ target_vm }}.qcow2"
|
||||
owner: "{{ ansible_user }}"
|
||||
group: "svadmins"
|
||||
mode: "0644"
|
||||
remote_src: true
|
||||
force: false
|
||||
become: true
|
||||
|
||||
- name: Check deployed cloud-init file info
|
||||
ansible.builtin.command:
|
||||
cmd: "qemu-img info /var/lib/libvirt/images/{{ target_vm }}.qcow2 --output json"
|
||||
changed_when: false
|
||||
failed_when:
|
||||
- deployed_cloudfile_info.rc != 0
|
||||
- ("lock") not in deployed_cloudfile_info.stderr
|
||||
register: "deployed_cloudfile_info"
|
||||
|
||||
- name: Resize deployed cloud-init file
|
||||
ansible.builtin.command:
|
||||
cmd: "qemu-img resize /var/lib/libvirt/images/{{ target_vm }}.qcow2 {{ hostvars[target_vm]['vm']['storage'] }}G"
|
||||
when:
|
||||
- deployed_cloudfile_info.rc == 0
|
||||
- (deployed_cloudfile_info.stdout | from_json)['virtual-size'] < (hostvars[target_vm]['vm']['storage'] | int * 1024 * 1024 * 1024)
|
||||
changed_when: true
|
||||
|
||||
- name: Deploy vm seed.iso
|
||||
ansible.builtin.copy:
|
||||
src: "{{ hostvars['console']['node']['data_path'] }}/images/seeds/{{ target_vm }}/seed.iso"
|
||||
dest: "/var/lib/libvirt/seeds/{{ target_vm }}_seed.iso"
|
||||
owner: "{{ ansible_user }}"
|
||||
group: "svadmins"
|
||||
mode: "0644"
|
||||
become: true
|
||||
when: deployed_cloudfile_info.rc == 0
|
||||
24
ansible/roles/vmm/tasks/vm/register_vm.yaml
Normal file
24
ansible/roles/vmm/tasks/vm/register_vm.yaml
Normal file
@@ -0,0 +1,24 @@
|
||||
---
|
||||
- name: Register VM xml file
|
||||
community.libvirt.virt:
|
||||
name: "{{ target_vm }}"
|
||||
xml: |
|
||||
{{ lookup('template', hostvars['console']['node']['config_path'] + '/services/systemd/vmm/libvirt/xml/vms/vms.xml.j2') }}
|
||||
uri: "qemu:///system"
|
||||
command: define
|
||||
|
||||
- name: Deploy VM systemd file
|
||||
ansible.builtin.copy:
|
||||
src: "{{ hostvars['console']['node']['config_path'] }}/services/systemd/vmm/libvirt/services/{{ target_vm }}.service"
|
||||
dest: "{{ node['home_path'] }}/.config/systemd/user/{{ target_vm }}.service"
|
||||
owner: "{{ ansible_user }}"
|
||||
group: "svadmins"
|
||||
mode: "0400"
|
||||
|
||||
- name: Register VM service
|
||||
ansible.builtin.systemd:
|
||||
name: "{{ target_vm }}.service"
|
||||
state: "started"
|
||||
enabled: true
|
||||
daemon_reload: true
|
||||
scope: "user"
|
||||
Reference in New Issue
Block a user