Compare commits

...

52 Commits

Author SHA1 Message Date
07ae702362 Removing Immich from backups 2025-12-24 14:49:54 -05:00
8168246198 Adding some date logging 2025-12-13 12:59:11 -05:00
eec8b0ae6b Fixing a minor issue with backup directory mounting on docker nodes and removing monitor from the process 2025-12-12 22:41:44 -05:00
fb7c3ddbad Adding the rclone step, still missing db backups 2025-11-18 18:52:43 -05:00
e912ee0133 Merge branch 'main' of https://git.coldlightalchemist.com/Bradley/Random_Ansible_Stuff 2025-11-18 18:13:01 -05:00
c23b925736 Removing publicworks from the backup process 2025-11-18 18:12:52 -05:00
c2e77c780d Adding some stuff for church 2025-09-27 12:17:41 -04:00
59cd39f5d6 Adding monitor to the docker nodes group 2025-09-11 20:48:28 -04:00
6f39b7f6e3 Adding monitor VM 2025-09-11 20:45:17 -04:00
2ed0d51167 Configs for other services that need to be backed up 2025-08-31 22:01:35 -04:00
f1fb6562a5 More fixing files kept stuff 2025-08-31 11:26:59 -04:00
bbe8debeb6 Fixing a mapping issue for some of the backup cleanups 2025-08-31 11:16:09 -04:00
01e4a973e0 Fixes for env file archiving 2025-08-25 20:50:34 -04:00
ddf60af4c2 A little debugging 2025-08-25 20:48:04 -04:00
104cc97f96 ENV backup fixes 2025-08-25 20:40:31 -04:00
5bd16afc90 Fix for finding hidden files 2025-08-25 20:19:43 -04:00
07d9637e9b Loop var fixes 2025-08-25 19:58:44 -04:00
120f3f8a26 Fixing some delegation and removing a become directive 2025-08-25 19:30:24 -04:00
75ad0e053b Only root can write to the backup directory, which is probably fine...probably 2025-08-25 18:58:45 -04:00
4d213eff75 Fixing a forgotten delegation 2025-08-25 18:57:06 -04:00
274a736639 Removing some junk and fixing a bad variable name 2025-08-25 18:55:13 -04:00
bb27a5d3ce PFSense is weird about SSL and HSTS. Removing the S in HTTPS so it can redirect on its own 2025-08-25 18:52:59 -04:00
c078925621 Fixing inventory backup URLs for PFSense nodes 2025-08-25 18:42:00 -04:00
d53b9b59bd Fixing share mounting in backup protocol 2025-08-25 18:39:03 -04:00
38414b5066 Removing fact gathering from the backup protocol playbook 2025-08-25 18:37:53 -04:00
922a4634b7 Fixing import vs include role in the backup plays 2025-08-25 18:34:46 -04:00
3fad0e1124 Minor changes 2025-08-25 15:51:53 -04:00
846496d618 More backup stuff 2025-08-20 17:45:57 -04:00
16e9c32e41 More backup solution engineering 2025-08-20 17:32:18 -04:00
752353cc41 Missed a file in the last update 2025-08-17 23:18:47 -04:00
686543a1b9 The beginnings of a backup solution 2025-08-17 21:01:27 -04:00
d9262852c8 Revaulting a smaller salt to satisfy passlib 2025-01-14 19:11:17 -05:00
93cc091cef Trying something different for passlib 2025-01-14 19:06:46 -05:00
c6d7edac85 Minor fix for the password updater playbook 2025-01-14 19:03:24 -05:00
6d1318e954 Adding update_user_password 2025-01-14 18:57:18 -05:00
b71861555f Adding some additional hosts 2024-12-05 18:41:27 -05:00
4a4c5d9aad Adding all the stuff that was needed to get gitea working again... 2024-12-03 13:12:59 -05:00
5d517edc4e Changing the port number of HTTPd test 2024-11-27 15:43:46 -05:00
aa62c09790 Minor fix for IaC_httptest 2024-11-27 15:34:07 -05:00
e06cc98179 Adding IaC_httptest 2024-11-27 14:54:09 -05:00
1ee28bd0bb Trying to resolve a mysql root password issue 2024-11-24 12:06:42 -05:00
f158328629 Changing container versions for MySQL and Postgres 2024-11-23 17:22:33 -05:00
df0b896ec4 Securing IaC_database configuration 2024-11-22 12:02:56 -05:00
66ca351e2e More experimentation with make_lvm_mount 2024-11-19 17:37:15 -05:00
08ac08561d More mount tweaks in make_lvm_mount 2024-11-19 17:32:47 -05:00
8929965866 Fixing an issue with make_lvm_mount that causes mounts to not have the right permissions after setting up the mount 2024-11-19 17:27:24 -05:00
d841524c59 More tweaking for IaC_database.yml 2024-11-19 17:17:14 -05:00
eb4e59ed34 I think I've found the hiccup with the DB server IaC configuration 2024-11-19 17:09:33 -05:00
3009794bd2 Questioning my sanity but may have a fix here 2024-11-13 20:58:21 -05:00
331e0d1efb More fixes for IaC_database! 2024-11-13 17:59:49 -05:00
d573bd65db Adding some more fixes for IaC_database playbook 2024-11-13 17:44:15 -05:00
0ef43910dd Further attempts to fix issues with IaC_database 2024-11-12 21:05:10 -05:00
22 changed files with 1412 additions and 277 deletions

View File

@@ -1,3 +1,3 @@
{ {
"ansible.python.interpreterPath": "c:\\Program Files\\Python312\\python.exe" "ansible.python.interpreterPath": "c:\\Program Files\\Python312\\python.exe"
} }

View File

@@ -1,2 +1,2 @@
[defaults] [defaults]
host_key_checking = false host_key_checking = false

12
inventories/church.yml Normal file
View File

@@ -0,0 +1,12 @@
---
all:
hosts:
vestrytv:
ansible_host: "192.168.1.200"
connection: "local"
sanctuarytvs:
ansible_host: "192.168.1.201"
children:
docker_nodes:
hosts:
vestrytv:

View File

@@ -1,13 +0,0 @@
[ansible_nodes]
blacktide ansible_host=192.168.3.2 connection=local
[podman_nodes]
arcade ansible_host=10.20.24.3
beachpolice ansible_host=10.42.0.3
bulletinboard ansible_host=10.26.48.3
lifeguard ansible_host=172.16.132.4
beachsidelibrary ansible_host=10.12.34.3
[pfsense_nodes]
openocean ansible_host=172.16.132.2
boardwalk ansible_host=10.77.7.2

View File

@@ -0,0 +1,151 @@
---
all:
hosts:
blacktide:
ansible_host: "192.168.3.2"
connection: "local"
web:
ansible_host: "10.26.48.3"
docker_backup:
- container_name: http-test
directories_to_backup:
- /var/www/html
backup_dir: /backup/httptest
backup_name_prefix: http-test
max_backups_kept: 3
- container_name: gitea
directories_to_backup:
- /gitea
backup_dir: /backup/gitea
backup_name_prefix: gitea
max_backups_kept: 3
- container_name: infrastructure-compose-web_recipes-1
directories_to_backup:
- /tandoor
backup_dir: /backup/tandoor
backup_name_prefix: tandoor
max_backups_kept: 3
database:
ansible_host: "10.12.34.3"
publicworks:
ansible_host: "10.77.7.3"
dmz:
ansible_host: "172.16.132.4"
docker_backup:
- container_name: infrastructure-compose-velocity-1
directories_to_backup:
- /velocity
backup_dir: /backup/velocity
backup_name_prefix: velocity
max_backups_kept: 3
- container_name: infrastructure-compose-bedrockconnect-1
directories_to_backup:
- /root/bedrockconnect/custom_servers.json
backup_dir: /backup/bedrockconnect
backup_name_prefix: bedrockconnect
max_backups_kept: 3
games:
ansible_host: "10.20.24.3"
docker_backup:
- container_name: infrastructure-compose-team2648mc-1
directories_to_backup:
- /minecraft_team2648mc/superflat
- /minecraft_team2648mc/world
- /minecraft_team2648mc/server.properties
- /minecraft_team2648mc/config/paper-global.yml
- /minecraft_team2648mc/whitelist.json
- /minecraft_team2648mc/plugins/CatchBall
- /minecraft_team2648mc/plugins/ChestShop
- /minecraft_team2648mc/plugins/CoreProtect/config.yml
- /minecraft_team2648mc/plugins/dynmap/configuration.txt
- /minecraft_team2648mc/plugins/EconomyShopGUI
- /minecraft_team2648mc/plugins/GriefPreventionData
- /minecraft_team2648mc/plugins/HuskHomes
- /minecraft_team2648mc/plugins/LuckPerms
- /minecraft_team2648mc/plugins/mcMMO/config.yml
- /minecraft_team2648mc/plugins/Multiverse-Core
- /minecraft_team2648mc/plugins/Multiverse-Portals
- /minecraft_team2648mc/plugins/PlayerPoints
- /minecraft_team2648mc/plugins/SkinsRestorer
- /minecraft_team2648mc/plugins/Vault
backup_dir: /backup/minecraft
backup_name_prefix: team2648mc
max_backups_kept: 3
- container_name: infrastructure-compose-caleysmc-1
directories_to_backup:
- /minecraft_caleysmc/world
- /minecraft_caleysmc/server.properties
- /minecraft_caleysmc/config/paper-global.yml
- /minecraft_caleysmc/whitelist.json
- /minecraft_caleysmc/plugins/CatchBall
- /minecraft_caleysmc/plugins/ChestShop
- /minecraft_caleysmc/plugins/CoreProtect/config.yml
- /minecraft_caleysmc/plugins/dynmap/configuration.txt
- /minecraft_caleysmc/plugins/EconomyShopGUI
- /minecraft_caleysmc/plugins/GriefPreventionData
- /minecraft_caleysmc/plugins/HuskHomes
- /minecraft_caleysmc/plugins/LuckPerms
- /minecraft_caleysmc/plugins/mcMMO/config.yml
- /minecraft_caleysmc/plugins/Multiverse-Core
- /minecraft_caleysmc/plugins/Multiverse-Portals
- /minecraft_caleysmc/plugins/PlayerPoints
- /minecraft_caleysmc/plugins/SkinsRestorer
- /minecraft_caleysmc/plugins/Vault
backup_dir: /backup/minecraft
backup_name_prefix: caleysmc
max_backups_kept: 3
- container_name: infrastructure-compose-thespacebetween-1
directories_to_backup:
- /minecraft_caleysmc/world
- /minecraft_caleysmc/server.properties
- /minecraft_caleysmc/config/paper-global.yml
- /minecraft_caleysmc/whitelist.json
- /minecraft_caleysmc/plugins/SkinsRestorer
backup_dir: /backup/minecraft
backup_name_prefix: thespacebetween
max_backups_kept: 3
nfsserver:
ansible_host: "10.42.0.3"
openocean:
ansible_host: "172.16.132.2"
backup_url: "172.16.132.2:1234"
backup_user: "backup"
backup_user_password: !vault |
$ANSIBLE_VAULT;1.1;AES256
31336636333061393333326263353835636431313739613462356531623532663137626466613433
3438616239333536643835643933333461323666343864640a386361306163623261656630643837
65396139333264636333656337383766393931393934373335646231336330656561303039326665
3331636339356236330a333330373264306163393162386330393763613332376261373433303035
65633034366439343232356639346665326234666630633063616437376131396463
backup_location: "/backup"
backup_number_to_keep: 10
boardwalk:
ansible_host: "10.77.7.2"
backup_url: "10.77.7.2:1234"
backup_user: "backup"
backup_user_password: !vault |
$ANSIBLE_VAULT;1.1;AES256
34313763623733323133393734326533333661343239393037666462323732393839386131393165
6233376533623431333238373039353330616265363566320a393730343938306430313864666534
36363736303436643163313636303931323032366136616634366363383036303737356336343638
6566663062336566320a323536303233393431363263313933643839303435356266656136343438
35306638356564333962656433323735656136386130373233393765616265306636
backup_location: "/backup"
backup_number_to_keep: 10
monitor:
ansible_host: "10.42.0.4"
children:
docker_nodes:
hosts:
web:
database:
publicworks:
dmz:
games:
pfsense_nodes:
hosts:
openocean:
boardwalk:
database_nodes:
database:

View File

@@ -1,9 +0,0 @@
[masters]
kubemaster ansible_host=10.20.24.4 master=true
[workers]
kubeworker1 ansible_host=10.20.24.5 worker=true
kubeworker2 ansible_host=10.20.24.6 worker=true
[ansible_nodes]
ansible ansible_host=10.20.24.3 connection=local

View File

@@ -39,6 +39,23 @@
comment: "Podman user for Postgresql Database" comment: "Podman user for Postgresql Database"
uid: 2000 uid: 2000
- name: Make .bashrc.d directory for psql user
ansible.builtin.file:
path: /home/psql/.bashrc.d
owner: psql
group: psql
mode: "0750"
state: directory
- name: Set XDG_RUNTIME_DIR var for psql user
ansible.builtin.lineinfile:
path: /home/psql/.bashrc.d/systemd
owner: psql
group: psql
mode: "0750"
line: "export XDG_RUNTIME_DIR=/run/user/2000"
create: true
- name: Allow psql user to linger - name: Allow psql user to linger
ansible.builtin.shell: ansible.builtin.shell:
cmd: "loginctl enable-linger 2000" cmd: "loginctl enable-linger 2000"
@@ -66,6 +83,41 @@
comment: "Podman user for MySQL Database" comment: "Podman user for MySQL Database"
uid: 2001 uid: 2001
- name: Make .bashrc.d directory for mysql user
ansible.builtin.file:
path: /home/mysql/.bashrc.d
owner: mysql
group: mysql
mode: "0750"
state: directory
- name: Set XDG_RUNTIME_DIR var for mysql user
ansible.builtin.lineinfile:
path: /home/mysql/.bashrc.d/systemd
owner: mysql
group: mysql
mode: "0750"
line: "export XDG_RUNTIME_DIR=/run/user/2001"
create: true
- name: Place container environment file for psql user
ansible.builtin.lineinfile:
path: /home/psql/.containerenv
owner: psql
group: psql
mode: "0750"
line: "POSTGRES_PASSWORD={{ postgres_db_password }}"
create: true
- name: Place container environment file for mysql user
ansible.builtin.lineinfile:
path: /home/mysql/.containerenv
owner: mysql
group: mysql
mode: "0750"
line: "MYSQL_ROOT_PASSWORD={{ mysql_db_password }}"
create: true
- name: Allow mysql user to linger - name: Allow mysql user to linger
ansible.builtin.shell: ansible.builtin.shell:
cmd: "loginctl enable-linger 2001" cmd: "loginctl enable-linger 2001"
@@ -85,6 +137,10 @@
group: mysql group: mysql
mode: "0755" mode: "0755"
lv: "{{ mysql_lv_name }}" lv: "{{ mysql_lv_name }}"
- name: Run systemctl daemon-reload
ansible.builtin.systemd_service:
daemon_reload: true
- name: Write subuid user entry for psql - name: Write subuid user entry for psql
ansible.builtin.lineinfile: ansible.builtin.lineinfile:
@@ -132,40 +188,44 @@
permanent: true permanent: true
immediate: true immediate: true
# TODO Unit spec should get *.mount After directive from variables
- name: Build postgres quadlet - name: Build postgres quadlet
containers.podman.podman_container: containers.podman.podman_container:
name: postgres name: postgres
image: "postgres:latest" image: "docker.io/library/postgres:17"
state: quadlet state: quadlet
quadlet_filename: "postgres-quadlet" quadlet_filename: "postgres-quadlet"
quadlet_file_mode: "0640" quadlet_file_mode: "0640"
user: "psql" rm: false
ports: ports:
- "5432:5432" - "5432:5432"
volumes: volumes:
- "{{ postgres_data_directory }}:/var/lib/postgresql/data" - "{{ postgres_data_directory }}:/var/lib/postgresql/data:Z"
quadlet_options: quadlet_options:
- "AutoUpdate=registry" - "AutoUpdate=registry"
- "Pull=newer" - "Pull=newer"
- | - |
[Install] [Install]
WantedBy=default.target WantedBy=default.target
env: - |
POSTGRES_PASSWORD: "{{ postgres_db_password }}" [Unit]
Description=Postgres Quadlet
After=pgdata.mount
env_file: "/home/psql/.containerenv"
become_user: "psql" become_user: "psql"
- name: Build mysql quadlet - name: Build mysql quadlet
containers.podman.podman_container: containers.podman.podman_container:
name: mysql name: mysql
image: "mysql:latest" image: "docker.io/library/mysql:8"
state: quadlet state: quadlet
quadlet_filename: "mysql-quadlet" quadlet_filename: "mysql-quadlet"
quadlet_file_mode: "0640" quadlet_file_mode: "0640"
user: "mysql" rm: false
ports: ports:
- "3306:3306" - "3306:3306"
volumes: volumes:
- "{{ mysql_data_directory }}:/var/lib/mysql" - "{{ mysql_data_directory }}:/var/lib/mysql:Z"
quadlet_options: quadlet_options:
- "AutoUpdate=registry" - "AutoUpdate=registry"
- "Pull=newer" - "Pull=newer"
@@ -173,16 +233,31 @@
- | - |
[Install] [Install]
WantedBy=default.target WantedBy=default.target
env: - |
MYSQL_ROOT_PASSWORD: "{{ mysql_db_password }}" [Unit]
Description=MySQL Quadlet
After=mysql_data.mount
env_file: "/home/mysql/.containerenv"
become_user: "mysql" become_user: "mysql"
- name: Run systemctl --user daemon-reload - name: Run systemctl --user daemon-reload
ansible.builtin.shell: ansible.builtin.systemd_service:
cmd: "systemctl --user daemon-reload" daemon_reload: true
scope: user
become_user: "{{ item }}" become_user: "{{ item }}"
become_method: community.general.machinectl
loop: loop:
- psql - psql
- mysql - mysql
- name: Run systemctl --user start for each quadlet
ansible.builtin.systemd_service:
name: "{{ item.service }}"
scope: user
state: started
become_user: "{{ item.user }}"
loop:
- service: postgres-quadlet.service
user: psql
- service: mysql-quadlet.service
user: mysql

159
playbooks/IaC_gitea.yml Normal file
View File

@@ -0,0 +1,159 @@
---
- hosts: bulletinboard
become: true
become_method: sudo
become_user: root
vars:
gitea_device: "/dev/vdc"
gitea_vg_name: "vg_gitea"
gitea_lv_name: "lv_gitea"
gitea_data_directory: "/gitea"
vars_prompt:
- name: gitea_password
prompt: "Enter gitea Password: "
private: true
encrypt: sha512_crypt
confirm: true
salt_size: 7
- name: gitea_db_password
prompt: "Enter Gitea DB Password: "
private: true
tasks:
- name: Create gitea user
ansible.builtin.user:
name: gitea
password: "{{ gitea_password }}"
comment: "Podman user for Gitea application"
uid: 2001
- name: Make .bashrc.d directory for gitea user
ansible.builtin.file:
path: /home/gitea/.bashrc.d
owner: gitea
group: gitea
mode: "0750"
state: directory
- name: Set XDG_RUNTIME_DIR var for gitea user
ansible.builtin.lineinfile:
path: /home/gitea/.bashrc.d/systemd
owner: gitea
group: gitea
mode: "0750"
line: "export XDG_RUNTIME_DIR=/run/user/2001"
create: true
- name: Allow gitea user to linger
ansible.builtin.shell:
cmd: "loginctl enable-linger 2001"
- name: Place container environment file for gitea user
ansible.builtin.lineinfile:
path: /home/gitea/.containerenv
owner: gitea
group: gitea
mode: "0750"
line: "{{ item }}"
insertafter: EOF
create: true
no_log: true
loop:
- "GITEA__database_DB_TYPE=mysql"
- "GITEA__database__HOST=10.12.34.3:3306"
- "GITEA__database__NAME=gitea"
- "GITEA__database__USER=gitea"
- "GITEA__database__PASSWD={{ gitea_db_password }}"
- name: Run systemctl daemon-reload
ansible.builtin.systemd_service:
daemon_reload: true
- name: Write subuid user entry for gitea
ansible.builtin.lineinfile:
path: /etc/subuid
line: "gitea:102000:2000"
insertafter: EOF
create: true
state: present
- name: Write subgid user entry for gitea
ansible.builtin.lineinfile:
path: /etc/subgid
line: "gitea:102000:2000"
insertafter: EOF
create: true
state: present
- name: Configure firewalld for gitea
ansible.posix.firewalld:
port: "8081/tcp"
state: enabled
permanent: true
immediate: true
- name: Configure quadlet volumes
containers.podman.podman_volume:
state: quadlet
name: "gitea-{{ item }}"
quadlet_filename: "gitea-quadlet-volumes-{{ item }}"
quadlet_file_mode: "0640"
quadlet_options:
- |
[Install]
WantedBy=default.target
- |
[Unit]
Description=Gitea {{ item }} Volume
loop:
- "data"
- "config"
become_user: gitea
# TODO Unit spec should get *.mount After directive from variables
- name: Build gitea quadlet
containers.podman.podman_container:
name: gitea
image: "docker.io/gitea/gitea:latest-rootless"
state: quadlet
quadlet_filename: "gitea-quadlet"
quadlet_file_mode: "0640"
rm: false
ports:
- "8081:3000"
volumes:
- "gitea-data:/var/lib/gitea:Z"
- "gitea-config:/etc/gitea:Z"
- "/etc/localtime:/etc/timezone:ro"
- "/etc/localtime:/etc/localtime:ro"
quadlet_options:
- "AutoUpdate=registry"
- "Pull=newer"
- |
[Install]
WantedBy=default.target
- |
[Unit]
Description=Gitea Quadlet
- |
[Service]
ExecStartPre=/home/gitea/service_up.sh 3306
env_file: "/home/gitea/.containerenv"
become_user: "gitea"
- name: Run systemctl --user daemon-reload
ansible.builtin.systemd_service:
daemon_reload: true
scope: user
become_user: "gitea"
- name: Run systemctl --user start for all services
ansible.builtin.systemd_service:
state: started
scope: user
name: "{{ item }}"
loop:
- "gitea-quadlet-volumes-data-volume.service"
- "gitea-quadlet-volumes-config-volume.service"
- "gitea-quadlet.service"
become_user: "gitea"

160
playbooks/IaC_gitea.yml.bak Normal file
View File

@@ -0,0 +1,160 @@
---
- hosts: bulletinboard
become: true
become_method: sudo
become_user: root
vars:
gitea_device: "/dev/vdc"
gitea_vg_name: "vg_gitea"
gitea_lv_name: "lv_gitea"
gitea_data_directory: "/gitea"
vars_prompt:
- name: gitea_password
prompt: "Enter gitea Password: "
private: true
encrypt: sha512_crypt
confirm: true
salt_size: 7
- name: gitea_db_password
prompt: "Enter Gitea DB Password: "
private: true
tasks:
- name: Create gitea user
ansible.builtin.user:
name: gitea
password: "{{ gitea_password }}"
comment: "Podman user for Gitea application"
uid: 2001
- name: Make .bashrc.d directory for gitea user
ansible.builtin.file:
path: /home/gitea/.bashrc.d
owner: gitea
group: gitea
mode: "0750"
state: directory
- name: Set XDG_RUNTIME_DIR var for gitea user
ansible.builtin.lineinfile:
path: /home/gitea/.bashrc.d/systemd
owner: gitea
group: gitea
mode: "0750"
line: "export XDG_RUNTIME_DIR=/run/user/2001"
create: true
- name: Allow gitea user to linger
ansible.builtin.shell:
cmd: "loginctl enable-linger 2001"
- name: Build /gitea mount
ansible.builtin.import_role:
name: make_lvm_mount
vars:
device_name: "{{ gitea_device }}"
vg_name: "{{ gitea_vg_name }}"
lvs:
- lv_name: "{{ gitea_lv_name }}"
lv_size: "100%FREE"
directories:
- name: "{{ gitea_data_directory }}"
owner: gitea
group: gitea
mode: "0755"
lv: "{{ gitea_lv_name }}"
- name: Make /gitea subdirectories
ansible.builtin.file:
path: "{{ gitea_data_directory }}/{{ item }}"
owner: gitea
group: gitea
mode: "0750"
state: directory
loop:
- "data"
- "config"
- name: Place container environment file for gitea user
ansible.builtin.lineinfile:
path: /home/gitea/.containerenv
owner: gitea
group: gitea
mode: "0750"
line: "{{ item }}"
insertafter: EOF
create: true
no_log: true
loop:
- "USER_UID=2001"
- "USER_GID=2001"
- "GITEA__database_DB_TYPE=mysql"
- "GITEA__database__HOST=10.12.34.3:3306"
- "GITEA__database__NAME=gitea"
- "GITEA__database__USER=gitea"
- "GITEA__database__PASSWD={{ gitea_db_password }}"
- name: Run systemctl daemon-reload
ansible.builtin.systemd_service:
daemon_reload: true
- name: Write subuid user entry for gitea
ansible.builtin.lineinfile:
path: /etc/subuid
line: "gitea:102000:2000"
insertafter: EOF
create: true
state: present
- name: Write subgid user entry for gitea
ansible.builtin.lineinfile:
path: /etc/subgid
line: "gitea:102000:2000"
insertafter: EOF
create: true
state: present
- name: Configure firewalld for gitea
ansible.posix.firewalld:
port: "8081/tcp"
state: enabled
permanent: true
immediate: true
# TODO Unit spec should get *.mount After directive from variables
- name: Build gitea quadlet
containers.podman.podman_container:
name: gitea
image: "docker.io/gitea/gitea:latest-rootless"
state: quadlet
quadlet_filename: "gitea-quadlet"
quadlet_file_mode: "0640"
rm: false
ports:
- "8081:3000"
volumes:
- "{{ gitea_data_directory }}/data:/var/lib/gitea:Z"
- "{{ gitea_data_directory }}/config:/etc/gitea:Z"
- "/etc/localtime:/etc/timezone:ro"
- "/etc/localtime:/etc/localtime:ro"
quadlet_options:
- "AutoUpdate=registry"
- "Pull=newer"
- |
[Install]
WantedBy=default.target
- |
[Unit]
Description=Gitea Quadlet
After=gitea.mount
- |
[Service]
ExecStartPre=/home/gitea/service_up.sh 3306
env_file: "/home/gitea/.containerenv"
become_user: "gitea"
- name: Run systemctl --user daemon-reload
ansible.builtin.systemd_service:
daemon_reload: true
scope: user
become_user: "gitea"

118
playbooks/IaC_httptest.yml Normal file
View File

@@ -0,0 +1,118 @@
---
- hosts: bulletinboard
become: true
become_method: sudo
become_user:
vars:
httptest_user_www: "/home/httptest/www"
vars_prompt:
- name: httptest_password
prompt: "Enter httptest Password: "
private: true
encrypt: sha512_crypt
confirm: true
salt_size: 7
tasks:
- name: Create httptest user
ansible.builtin.user:
name: httptest
password: "{{ httptest_password }}"
comment: "Podman user for httpd test host"
uid: 2000
- name: Make .bashrc.d directory for httptest user
ansible.builtin.file:
path: /home/httptest/.bashrc.d
owner: httptest
group: httptest
mode: "0750"
state: directory
- name: Set XDG_RUNTIME_DIR var for httptest user
ansible.builtin.lineinfile:
path: /home/httptest/.bashrc.d/systemd
owner: httptest
group: httptest
mode: "0750"
line: "export XDG_RUNTIME_DIR=/run/user/2000"
create: true
- name: Allow httptest user to linger
ansible.builtin.shell:
cmd: "loginctl enable-linger 2000"
- name: Make www directory for httptest user
ansible.builtin.file:
path: "{{ httptest_user_www }}"
owner: httptest
group: httptest
mode: "0755"
state: directory
- name: Make index.html file
ansible.builtin.lineinfile:
path: "{{ httptest_user_www }}/index.html"
owner: httptest
group: httptest
mode: "0644"
line: "<!DOCTYPE html><html><body><h1>TEST</h1></body></html>"
create: true
- name: Write subuid user entry for httptest
ansible.builtin.lineinfile:
path: /etc/subuid
line: "httptest:100000:2000"
insertafter: EOF
create: true
state: present
- name: Write subgid user entry for httptest
ansible.builtin.lineinfile:
path: /etc/subgid
line: "httptest:100000:2000"
insertafter: EOF
create: true
state: present
- name: Configure firewalld for httpd
ansible.posix.firewalld:
port: "8080/tcp"
state: enabled
permanent: true
immediate: true
- name: Build httpd quadlet
containers.podman.podman_container:
name: httptest
image: "docker.io/library/httpd:latest"
state: quadlet
quadlet_filename: "httptest-quadlet"
quadlet_file_mode: "0640"
rm: false
ports: 8080:80
volumes:
- "{{ httptest_user_www }}:/usr/local/apache2/htdocs:Z"
quadlet_options:
- "AutoUpdate=registry"
- "Pull=newer"
- |
[Install]
WantedBy=default.target
- |
[Unit]
Description=httpd Test Quadlet
After=home.mount
become_user: "httptest"
- name: Run systemctl --user daemon-reload
ansible.builtin.systemd_service:
daemon_reload: true
scope: user
become_user: "httptest"
- name: Run systemctl --user start for httptest-quadlet
ansible.builtin.systemd_service:
name: "httptest-quadlet.service"
scope: user
state: started
become_user: "httptest"

View File

@@ -0,0 +1,220 @@
---
# Bits an pieces of this play are stolen from https://github.com/gavinwill/ansible-role-pfsense-backup/tree/main
- name: PFSense Backups
hosts: pfsense_nodes
gather_facts: false
collections:
- ansible.posix
tasks:
- name: "Log first stage start time"
ansible.builtin.debug:
msg: "{{ now() }}"
run_once: true
delegate_to: blacktide
- name: Mount backup share
ansible.posix.mount:
path: /backup
src: "10.42.0.3:/backups/pfsense"
fstype: nfs
opts: "hard,intr,nodev,nosuid,noexec"
state: ephemeral
delegate_to: blacktide
run_once: true
become: true
become_method: sudo
- name: Get Cookies and CSRF Token
ansible.builtin.uri:
url: "http://{{ backup_url }}/diag_backup.php"
validate_certs: false
method: GET
return_content: true
register: pfsense_cookie_token
no_log: "{{ pfsense_backup_disable_logging | default(false) }}"
delegate_to: blacktide
- name: Set CSRF Token and Cookie Fact
ansible.builtin.set_fact:
pfsense_backup_csrf: "{{ pfsense_cookie_token.content | regex_search('var\\s+csrfMagicToken\\s+=\\s+\\\"([a-f0-9sidp:;,]+)\\\"', '\\1') }}"
pfsense_backup_cookie: "{{ pfsense_cookie_token.set_cookie }}"
no_log: "{{ pfsense_backup_disable_logging | default(false) }}"
- name: Authenticate with backup page and register backup CSRF
ansible.builtin.uri:
url: "http://{{ backup_url }}/diag_backup.php"
validate_certs: false
follow_redirects: false
method: POST
return_content: true
body_format: form-urlencoded
status_code: 302
body:
login: Login
usernamefld: "{{ backup_user }}"
passwordfld: "{{ backup_user_password }}"
__csrf_magic: "{{ pfsense_backup_csrf }}"
headers:
Cookie: "{{ pfsense_backup_cookie }}"
register: pfsense_cookie_token_2
delegate_to: blacktide
no_log: "{{ pfsense_backup_disable_logging | default(false) }}"
- name: Set Cookie Fact from Backup page
ansible.builtin.set_fact:
pfsense_backup_cookie_1: "{{ pfsense_cookie_token_2.set_cookie }}"
no_log: "{{ pfsense_backup_disable_logging | default(false) }}"
- name: Fetch Target page for new CSRF token
ansible.builtin.uri:
url: "http://{{ backup_url }}/diag_backup.php"
validate_certs: false
follow_redirects: false
method: GET
return_content: true
headers:
Cookie: "{{ pfsense_backup_cookie_1 }}"
register: pfsense_cookie_token_3
delegate_to: blacktide
no_log: "{{ pfsense_backup_disable_logging | default(false) }}"
- name: Set fact for CSRF Token and Cookie
ansible.builtin.set_fact:
pfsense_backup_csrf_1: "{{ pfsense_cookie_token_3.content | regex_search('var\\s+csrfMagicToken\\s+=\\s+\\\"([a-f0-9sidp:;,]+)\\\"', '\\1') }}"
pfsense_backup_cookie_2: "{{ pfsense_cookie_token_3.set_cookie }}"
no_log: "{{ pfsense_backup_disable_logging | default(false) }}"
- name: Download Backup Configuration
ansible.builtin.uri:
url: "http://{{ backup_url }}/diag_backup.php"
validate_certs: false
follow_redirects: false
method: "POST"
return_content: true
body_format: form-urlencoded
body:
download: download
backupssh: "yes"
backupdata: "yes"
donotbackuprrd: "yes"
__csrf_magic: "{{ pfsense_backup_csrf_1 }}"
headers:
Cookie: "{{ pfsense_backup_cookie_2 }}"
dest: "{{ backup_location }}/{{ inventory_hostname }}_{{ now().strftime('%Y%m%d%H%M%S') }}.xml"
changed_when: false
delegate_to: blacktide
no_log: "{{ pfsense_backup_disable_logging | default(false) }}"
- name: Find all PFSense backups for the current host
ansible.builtin.find:
paths: "{{ backup_location }}"
patterns: "{{ inventory_hostname }}*"
register: all_pfsense_backups
delegate_to: blacktide
- name: If too many backups kept
when: all_pfsense_backups.files | length > backup_number_to_keep
block:
- name: Get the oldest file paths
ansible.builtin.set_fact:
oldest_file_paths: >-
{{ (all_pfsense_backups.files | sort(attribute='mtime'))[:all_pfsense_backups.files | length - backup_number_to_keep] |
map(attribute='path') | list }}
- name: Remove the files
ansible.builtin.file:
path: "{{ item }}"
state: absent
loop: "{{ oldest_file_paths }}"
delegate_to: blacktide
- name: Docker Infrastructure Compose Backups
hosts: docker_nodes,!publicworks
become: true
become_method: sudo
gather_facts: false
collections:
- community.docker
- community.general
vars:
env_backups_to_keep: 10
tasks:
- name: "Log first stage start time"
ansible.builtin.debug:
msg: "{{ now() }}"
run_once: true
delegate_to: blacktide
- name: Run mount to ensure the /backup directory is present
ansible.builtin.shell:
cmd: "mount -a"
changed_when: false
- name: Run container mounts backup
ansible.builtin.include_role:
name: docker_backup
vars:
backup_rules: "{{ item }}"
when: docker_backup is defined and docker_backup | length != 0
loop: "{{ docker_backup }}"
- name: Stat the /root/infrastructure-compose folder
ansible.builtin.stat:
path: "/root/infrastructure-compose"
register: infra_compose_stat
- name: Find all .env files
ansible.builtin.find:
paths: "/root/infrastructure-compose"
hidden: true
patterns: ".*.env"
when: infra_compose_stat.stat.exists
register: all_env_files
- name: .env Backup block
when: infra_compose_stat.stat.exists and all_env_files.files is defined and all_env_files.files | length != 0
block:
- name: Show file list
ansible.builtin.debug:
msg: "{{ all_env_files.files | map(attribute='path') | list }}"
- name: Archive .env files
community.general.archive:
path: "{{ all_env_files.files | map(attribute='path') | list }}"
dest: >-
/backup/{{ inventory_hostname }}_{{ now().strftime("%Y%m%d%H%M%S") }}.tar.gz
format: gz
force_archive: true
- name: Find all .env backup files for the current host
ansible.builtin.find:
paths: "/backup"
patterns: "{{ inventory_hostname }}*"
register: backup_env_files
- name: If too many backups kept
when: backup_env_files.files | length > env_backups_to_keep
block:
- name: Get the oldest file paths
ansible.builtin.set_fact:
oldest_file_paths: >-
{{ (backup_env_files.files | sort(attribute='mtime'))[:backup_env_files.files | length - env_backups_to_keep] |
map(attribute='path') | list }}
- name: Remove the files
ansible.builtin.file:
path: "{{ item }}"
state: absent
loop: "{{ oldest_file_paths }}"
- name: Trigger rclone backup
hosts: nfsserver
become: true
become_method: sudo
gather_facts: false
tasks:
- name: Run rclone
ansible.builtin.shell:
cmd: "rclone sync /backups dropbox:Backup/infrastructure --progress --dropbox-chunk-size 128M --dropbox-batch-mode sync --dropbox-batch-size 100 --tpslimit 12 --tpslimit-burst 0"
changed_when: false
async: 3600
poll: 60

View File

@@ -1,76 +1,76 @@
--- ---
- hosts: masters,workers - hosts: masters,workers
become: true become: true
become_method: sudo become_method: sudo
become_user: root become_user: root
tasks: tasks:
- name: Update grub config to remove zram generation - name: Update grub config to remove zram generation
ansible.builtin.shell: ansible.builtin.shell:
cmd: grubby --update-kernel ALL --args='systemd.zram=0' cmd: grubby --update-kernel ALL --args='systemd.zram=0'
- name: Update grub config - name: Update grub config
ansible.builtin.shell: ansible.builtin.shell:
cmd: grub2-mkconfig -o /boot/grub2/grub.cfg cmd: grub2-mkconfig -o /boot/grub2/grub.cfg
- name: Reboot the system to get rid of the zram swap that's already been set up - name: Reboot the system to get rid of the zram swap that's already been set up
ansible.builtin.reboot: ansible.builtin.reboot:
reboot_timeout: 900 reboot_timeout: 900
- name: Set SELinux to Permissive - name: Set SELinux to Permissive
ansible.posix.selinux: ansible.posix.selinux:
state: disabled state: disabled
- name: Disable firewalld - name: Disable firewalld
ansible.builtin.service: ansible.builtin.service:
name: firewalld name: firewalld
enabled: false enabled: false
state: stopped state: stopped
- name: Install iptables components - name: Install iptables components
ansible.builtin.yum: ansible.builtin.yum:
name: name:
- iptables - iptables
- iproute-tc - iproute-tc
state: present state: present
- name: Add overlay modprobe module - name: Add overlay modprobe module
community.general.modprobe: community.general.modprobe:
name: overlay name: overlay
persistent: present persistent: present
state: present state: present
- name: Add br_netfilter module - name: Add br_netfilter module
community.general.modprobe: community.general.modprobe:
name: br_netfilter name: br_netfilter
persistent: present persistent: present
state: present state: present
- name: Create network settings configuration file - name: Create network settings configuration file
ansible.builtin.blockinfile: ansible.builtin.blockinfile:
path: "/etc/sysctl.d/99-kubernetes-cri.conf" path: "/etc/sysctl.d/99-kubernetes-cri.conf"
block: | block: |
net.bridge.bridge-nf-call-iptables = 1 net.bridge.bridge-nf-call-iptables = 1
net.ipv4.ip_forward = 1 net.ipv4.ip_forward = 1
net.bridge.bridge-nf-call-ip6tables = 1 net.bridge.bridge-nf-call-ip6tables = 1
create: true create: true
- name: Apply new sysctl settings - name: Apply new sysctl settings
ansible.builtin.shell: ansible.builtin.shell:
cmd: sysctl --system cmd: sysctl --system
changed_when: false changed_when: false
- name: Install cri-o and kubernetes - name: Install cri-o and kubernetes
ansible.builtin.yum: ansible.builtin.yum:
name: name:
- cri-o - cri-o
- containernetworking-plugins - containernetworking-plugins
- kubernetes - kubernetes
- kubernetes-kubeadm - kubernetes-kubeadm
- kubernetes-client - kubernetes-client
state: present state: present
- name: Enable and start cri-o - name: Enable and start cri-o
ansible.builtin.service: ansible.builtin.service:
name: crio name: crio
enabled: true enabled: true
state: started state: started

View File

@@ -0,0 +1,47 @@
---
- hosts: docker_nodes
become: true
become_method: sudo
become_user: root
vars:
docker_device: "/dev/vdb"
docker_vg_name: "vg_docker"
docker_lv_name: "lv_docker"
docker_data_directory: "/var/lib/docker"
tasks:
- name: Build /var/lib/docker mount
ansible.builtin.import_role:
name: make_lvm_mount
vars:
device_name: "{{ docker_device }}"
vg_name: "{{ docker_vg_name }}"
lvs:
- lv_name: "{{ docker_lv_name }}"
lv_size: "100%FREE"
directories:
- name: "{{ docker_data_directory }}"
owner: root
group: root
mode: "0750"
lv: "{{ docker_lv_name }}"
tags:
- "make_mount"
- name: Add repo with config-manager
ansible.builtin.shell:
cmd: "dnf config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo"
- name: Install docker
ansible.builtin.yum:
name:
- docker-ce
- docker-ce-cli
- containerd.io
- docker-compose-plugin
- name: Start docker service
ansible.builtin.systemd_service:
name: docker
enabled: true
state: started

View File

@@ -1,31 +1,31 @@
--- ---
- hosts: all - hosts: all
become: true become: true
become_method: su become_method: su
become_user: root become_user: root
tasks: tasks:
- name: Create the ansible user - name: Create the ansible user
ansible.builtin.user: ansible.builtin.user:
name: ansible name: ansible
append: true append: true
state: present state: present
createhome: true createhome: true
shell: /bin/bash shell: /bin/bash
- name: Make sure the sudoers dropin directory exists - name: Make sure the sudoers dropin directory exists
ansible.builtin.file: ansible.builtin.file:
path: "/etc/sudoers.d" path: "/etc/sudoers.d"
state: directory state: directory
- name: Create a sudoers file for the ansible user - name: Create a sudoers file for the ansible user
ansible.builtin.lineinfile: ansible.builtin.lineinfile:
path: "/etc/sudoers.d/50-ansible" path: "/etc/sudoers.d/50-ansible"
line: "ansible ALL=(ALL) NOPASSWD: ALL" line: "ansible ALL=(ALL) NOPASSWD: ALL"
create: true create: true
validate: "visudo -cf %s" validate: "visudo -cf %s"
- name: Add authorized key for ansible user - name: Add authorized key for ansible user
ansible.builtin.authorized_key: ansible.builtin.authorized_key:
user: ansible user: ansible
key: "{{ lookup('ansible.builtin.file', '/home/ansible/.ssh/id_rsa.pub') }}" key: "{{ lookup('ansible.builtin.file', '/home/ansible/.ssh/id_rsa.pub') }}"

View File

@@ -1,16 +1,16 @@
--- ---
- hosts: all - hosts: all
become: true become: true
become_method: sudo become_method: sudo
become_user: root become_user: root
tasks: tasks:
- name: Update all packages - name: Update all packages
ansible.builtin.yum: ansible.builtin.yum:
name: "*" name: "*"
state: latest state: latest
async: 3600 async: 3600
poll: 60 poll: 60
- name: Reboot Node - name: Reboot Node
ansible.builtin.reboot: ansible.builtin.reboot:
reboot_timeout: 1800 reboot_timeout: 1800

View File

@@ -1,116 +1,116 @@
--- ---
- hosts: masters,workers - hosts: masters,workers
become: yes become: yes
become_method: sudo become_method: sudo
become_user: root become_user: root
tasks: tasks:
- name: Add overlay modprobe module - name: Add overlay modprobe module
community.general.modprobe: community.general.modprobe:
name: overlay name: overlay
persistent: present persistent: present
state: present state: present
- name: Add br_netfilter module - name: Add br_netfilter module
community.general.modprobe: community.general.modprobe:
name: br_netfilter name: br_netfilter
persistent: present persistent: present
state: present state: present
- name: Set SELinux to Permissive - name: Set SELinux to Permissive
ansible.posix.selinux: ansible.posix.selinux:
state: permissive state: permissive
- name: Set firewalld configuration | Master Nodes - name: Set firewalld configuration | Master Nodes
ansible.posix.firewalld: ansible.posix.firewalld:
port: "{{ item }}" port: "{{ item }}"
permanent: true permanent: true
state: enabled state: enabled
loop: loop:
- "6443/tcp" - "6443/tcp"
- "2379-2380/tcp" - "2379-2380/tcp"
- "10250/tcp" - "10250/tcp"
- "10251/tcp" - "10251/tcp"
- "10259/tcp" - "10259/tcp"
- "10257/tcp" - "10257/tcp"
- "179/tcp" - "179/tcp"
- "4789/udp" - "4789/udp"
when: master | default(false) when: master | default(false)
- name: Set firewalld configuration | Worker Nodes - name: Set firewalld configuration | Worker Nodes
ansible.posix.firewalld: ansible.posix.firewalld:
port: "{{ item }}" port: "{{ item }}"
permanent: true permanent: true
state: enabled state: enabled
loop: loop:
- "179/tcp" - "179/tcp"
- "10250/tcp" - "10250/tcp"
- "30000-32767/tcp" - "30000-32767/tcp"
- "4789/udp" - "4789/udp"
when: worker | default(false) when: worker | default(false)
- name: Create network settings configuration file - name: Create network settings configuration file
ansible.builtin.blockinfile: ansible.builtin.blockinfile:
path: "/etc/sysctl.d/99-kubernetes-cri.conf" path: "/etc/sysctl.d/99-kubernetes-cri.conf"
block: | block: |
net.bridge.bridge-nf-call-iptables = 1 net.bridge.bridge-nf-call-iptables = 1
net.ipv4.ip_forward = 1 net.ipv4.ip_forward = 1
net.bridge.bridge-nf-call-ip6tables = 1 net.bridge.bridge-nf-call-ip6tables = 1
create: true create: true
- name: Apply new sysctl settings - name: Apply new sysctl settings
ansible.builtin.shell: ansible.builtin.shell:
cmd: sysctl --system cmd: sysctl --system
changed_when: false changed_when: false
- name: Add docker repo - name: Add docker repo
ansible.builtin.shell: ansible.builtin.shell:
cmd: dnf config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo cmd: dnf config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo
changed_when: false changed_when: false
- name: Install containerd - name: Install containerd
ansible.builtin.yum: ansible.builtin.yum:
name: containerd.io name: containerd.io
state: present state: present
- name: Build default containerd config - name: Build default containerd config
ansible.builtin.shell: ansible.builtin.shell:
cmd: set -o pipefail && mkdir -p /etc/containerd && containerd config default | tee /etc/containerd/config.toml cmd: set -o pipefail && mkdir -p /etc/containerd && containerd config default | tee /etc/containerd/config.toml
changed_when: false changed_when: false
- name: Restart containerd - name: Restart containerd
ansible.builtin.service: ansible.builtin.service:
name: containerd name: containerd
state: restarted state: restarted
enabled: true enabled: true
- name: Create Kubernetes repo - name: Create Kubernetes repo
ansible.builtin.blockinfile: ansible.builtin.blockinfile:
path: "/etc/yum.repos.d/kubernetes.repo" path: "/etc/yum.repos.d/kubernetes.repo"
create: true create: true
block: | block: |
[kubernetes] [kubernetes]
name=Kubernetes name=Kubernetes
baseurl=https://pkgs.k8s.io/core:/stable:/v1.31/rpm/ baseurl=https://pkgs.k8s.io/core:/stable:/v1.31/rpm/
enabled=1 enabled=1
gpgcheck=1 gpgcheck=1
gpgkey=https://pkgs.k8s.io/core:/stable:/v1.31/rpm/repodata/repomd.xml.key gpgkey=https://pkgs.k8s.io/core:/stable:/v1.31/rpm/repodata/repomd.xml.key
exclude=kubelet kubeadm kubectl cri-tools kubernetes-cni exclude=kubelet kubeadm kubectl cri-tools kubernetes-cni
- name: Install Kubernetes components - name: Install Kubernetes components
ansible.builtin.yum: ansible.builtin.yum:
name: name:
- kubelet - kubelet
- kubeadm - kubeadm
- kubectl - kubectl
state: present state: present
disable_excludes: all disable_excludes: all
- name: Disable running swap - name: Disable running swap
ansible.builtin.shell: ansible.builtin.shell:
cmd: swapoff -a cmd: swapoff -a
changed_when: false changed_when: false
- name: Disable swap in fstab - name: Disable swap in fstab
ansible.builtin.shell: ansible.builtin.shell:
cmd: sed -i '/ swap / s/^\(.*\)$/#\1/g' /etc/fstab cmd: sed -i '/ swap / s/^\(.*\)$/#\1/g' /etc/fstab
changed_when: false changed_when: false

View File

@@ -0,0 +1,38 @@
Role Name
=========
A brief description of the role goes here.
Requirements
------------
Any pre-requisites that may not be covered by Ansible itself or the role should be mentioned here. For instance, if the role uses the EC2 module, it may be a good idea to mention in this section that the boto package is required.
Role Variables
--------------
A description of the settable variables for this role should go here, including any variables that are in defaults/main.yml, vars/main.yml, and any variables that can/should be set via parameters to the role. Any variables that are read from other roles and/or the global scope (ie. hostvars, group vars, etc.) should be mentioned here as well.
Dependencies
------------
A list of other roles hosted on Galaxy should go here, plus any details in regards to parameters that may need to be set for other roles, or variables that are used from other roles.
Example Playbook
----------------
Including an example of how to use your role (for instance, with variables passed in as parameters) is always nice for users too:
- hosts: servers
roles:
- { role: username.rolename, x: 42 }
License
-------
BSD
Author Information
------------------
An optional section for the role authors to include contact information, or a website (HTML is not allowed).

View File

@@ -0,0 +1,2 @@
---
backup_rules: ""

View File

@@ -0,0 +1,52 @@
galaxy_info:
author: your name
description: your role description
company: your company (optional)
# If the issue tracker for your role is not on github, uncomment the
# next line and provide a value
# issue_tracker_url: http://example.com/issue/tracker
# Choose a valid license ID from https://spdx.org - some suggested licenses:
# - BSD-3-Clause (default)
# - MIT
# - GPL-2.0-or-later
# - GPL-3.0-only
# - Apache-2.0
# - CC-BY-4.0
license: license (GPL-2.0-or-later, MIT, etc)
min_ansible_version: 2.1
# If this a Container Enabled role, provide the minimum Ansible Container version.
# min_ansible_container_version:
#
# Provide a list of supported platforms, and for each platform a list of versions.
# If you don't wish to enumerate all versions for a particular platform, use 'all'.
# To view available platforms and versions (or releases), visit:
# https://galaxy.ansible.com/api/v1/platforms/
#
# platforms:
# - name: Fedora
# versions:
# - all
# - 25
# - name: SomePlatform
# versions:
# - all
# - 1.0
# - 7
# - 99.99
galaxy_tags: []
# List tags for your role here, one per line. A tag is a keyword that describes
# and categorizes the role. Users find roles by searching for tags. Be sure to
# remove the '[]' above, if you add tags to this list.
#
# NOTE: A tag is limited to a single word comprised of alphanumeric characters.
# Maximum 20 tags per role.
dependencies: []
# List your role dependencies here, one per line. Be sure to remove the '[]' above,
# if you add dependencies to this list.

View File

@@ -0,0 +1,63 @@
---
- name: Ensure backup rules contains the necessary fields
ansible.builtin.fail:
msg: "You must define {{ backup_rule_checker }} in the backup ruleset"
when: not backup_rules[backup_rule_checker] is defined
loop:
- container_name
- directories_to_backup
- backup_dir
- backup_name_prefix
- max_backups_kept
loop_control:
loop_var: backup_rule_checker
- name: Make sure backup dir exists
ansible.builtin.file:
path: "{{ backup_rules['backup_dir'] }}"
state: directory
recurse: true
- name: Stop the running container
community.docker.docker_container:
name: "{{ backup_rules.container_name }}"
state: stopped
- name: Archive necessary directories
community.general.archive:
path: "{{ backup_rules.directories_to_backup }}"
dest: >-
{{ backup_rules.backup_dir }}/{{ backup_rules.backup_name_prefix }}_{{ now().strftime("%Y%m%d%H%M%S") }}.tar.gz
format: gz
async: 3600
poll: 60
- name: Start the stopped container
community.docker.docker_container:
name: "{{ backup_rules.container_name }}"
state: started
- name: Find all files that start with the backup_name_prefix
ansible.builtin.find:
paths: "{{ backup_rules.backup_dir }}"
patterns: "{{ backup_rules.backup_name_prefix }}*"
register: all_backup_files
- name: If too many backups kept
when: all_backup_files.files | length > backup_rules.max_backups_kept
block:
- name: Get the oldest file paths
ansible.builtin.set_fact:
oldest_file_paths: >-
{{ (all_backup_files.files | sort(attribute='mtime'))[:all_backup_files.files | length - backup_rules.max_backups_kept] |
map(attribute='path') | list }}
- name: Remove the files
ansible.builtin.file:
path: "{{ old_file_to_delete }}"
state: absent
loop: "{{ oldest_file_paths }}"
loop_control:
loop_var: old_file_to_delete

View File

@@ -34,3 +34,11 @@
state: mounted state: mounted
loop: "{{ directories }}" loop: "{{ directories }}"
- name: Ensure data directories have the right permissions
ansible.builtin.file:
path: "{{ item.name }}"
state: directory
owner: "{{ item.owner }}"
group: "{{ item.group }}"
mode: "{{ item.mode }}"
loop: "{{ directories }}"

View File

@@ -0,0 +1,52 @@
---
- hosts: all
vars:
password_salt: !vault |
$ANSIBLE_VAULT;1.1;AES256
38386463386336393336643934393736633235623939306263663737303130316438343037353135
6535633737343438393239636230666664666331346564380a613161376237323262613164316564
65643733373739666165313065383030353664656161393261623762623733353938333964346536
3064316661323964390a326564613734316162613432396432363737376438323664656666613035
30386662653266373766613837373534616639383866383732646336373037653430
tasks:
- name: "Fail if what_user is not set"
ansible.builtin.fail:
msg: "You have to specify the what_user variable"
when: not what_user is defined
run_once: true
delegate_to: 127.0.0.1
- name: "Fail if what_password is not set"
ansible.builtin.fail:
msg: "You have to specify the what_password variable"
when: not what_user is defined
run_once: true
delegate_to: 127.0.0.1
- name: Fail if user specified does not exist
ansible.builtin.getent:
database: passwd
key: "{{ what_user }}"
- name: Ensure passlib is installed locally
ansible.builtin.pip:
name: passlib
extra_args: "--user"
run_once: true
delegate_to: 127.0.0.1
- name: Generate password outside root context
ansible.builtin.set_fact:
hashed_pass: "{{ what_password | password_hash('sha512', password_salt) }}"
run_once: true
delegate_to: 127.0.0.1
- name: Update user password
ansible.builtin.user:
name: "{{ what_user }}"
password: "{{ what_password | password_hash('sha512', password_salt) }}"
become: true
become_method: sudo
become_user: root