Files
base-infra/config.yml
Felix Nehrke 1f69c1578c Add longhorn distributed storage to the k3s-cluster
This change adds longhorn, an addition to Kubernetes that adds the
ability to use distributed storage over all nodes to the cluster.

Note, that I tried that in December already but due to very high load on
the machines I rolled _everything_ back. Though, it turned out that the
high load was not because of longhorn, but instead because of bad
configuration of the server, as described in the see-also commit.

Reference: https://longhorn.io/
Reference: https://longhorn.io/docs/1.10.1/deploy/install/install-with-helm/
See-also: 4b8a3d12c4 Use etcd instead of sqlite for k3s-server
2026-01-23 00:45:00 +01:00

55 lines
1.8 KiB
YAML

all:
vars:
api_endpoint: "{{ hostvars[groups['server'][0]]['ansible_host'] | default(groups['server'][0]) }}"
longhorn_state: present
cert_manager_state: present
cert_manager_version: v1.18.2
letsencrypt_clusterissuers:
staging:
server: https://acme-staging-v02.api.letsencrypt.org/directory
email: "{{ vault_letsencrypt_issuer_email }}"
prod:
server: https://acme-v02.api.letsencrypt.org/directory
email: "{{ vault_letsencrypt_issuer_email }}"
gitea_chart_version: 12.4.0
gitea_state: present
gitea_host: gitea.nehrke.info
gitea_certificate_issuer: letsencrypt-prod
gitea_image_pull_policy: Always
gitea_image_registry_secret: "{{ vault_gitea_pull_registry_secret | b64encode }}"
gitea_admin_user: "{{ vault_gitea_admin_user }}"
gitea_admin_password: "{{ vault_gitea_admin_password }}"
gitea_admin_email: "{{ vault_gitea_admin_email }}"
concourse_state: present
concourse_chart_version: 19.0.2
concourse_certificate_issuer: letsencrypt-prod
concourse_host: ci.nehrke.info
concourse_local_users: "{{ vault_concourse_local_users }}"
concourse_worker_replicas: 2
snappass_state: present
snappass_host: snappass.nehrke.info
snappass_certificate_issuer: letsencrypt-prod
k3s_cluster:
vars:
ansible_user: root
# note the space between the IPs!
dns_servers: 8.8.8.8 8.8.4.4
extra_server_args: '--cluster-init'
agent:
vars:
ansible_ssh_common_args: -o StrictHostKeyChecking=accept-new -o ProxyCommand="ssh -p {{ hostvars[groups['server'][0]]['ansible_port'] }} -W %h:%p -q root@{{ api_endpoint }}"
k3s_version: v1.31.6+k3s1
server:
vars:
ansible_ssh_common_args: '-o StrictHostKeyChecking=accept-new'
k3s_version: v1.31.6+k3s1