Compare commits

...

13 commits

30 changed files with 256 additions and 68 deletions

View file

@ -63,11 +63,8 @@ vault_ca_cert_payload: |
nomad_version: 1.1.3
nomad_podman_driver_version: 0.3.0
# podman
podman_version: 3.0.1+dfsg1-3+b2
# lnd
lnd_version: 0.13.1-beta
lnd_version: 0.14.2-beta
# lego
lego_version: 4.4.0

View file

@ -3,3 +3,5 @@ hashi_arch: arm
consul_arch: arm64
nomad_arch: arm64
docker_arch: arm64
k3s_version: v1.23.4+k3s1
k3s_role: 'client'

View file

@ -1,5 +0,0 @@
---
nomad_meta_values:
- { name: "storage_optimized", value: "false" }
- { name: "ram_optimized", value: "false" }
...

View file

@ -0,0 +1,3 @@
---
k3s_role: server
...

View file

@ -1,10 +1,10 @@
---
haproxy_domains:
- { name: "gitea", url: "git.minhas.io" }
- { name: "freshrss", url: "rss.minhas.io" }
- { name: "radicale", url: "dav.minhas.io" }
- { name: "wallabag", url: "wallabag.minhas.io" }
- { name: "gitea", url: "git.minhas.io" }
- { name: "kanban", url: "kanban.minhas.io" }
- { name: "sudoscientist-go-backend", url: "api.sudoscientist.com" }
- { name: "nextcloud", url: "nextcloud.minhas.io" }
- { name: "radicale", url: "dav.minhas.io" }
- { name: "sudoscientist-go-backend", url: "api.sudoscientist.com" }
- { name: "wallabag", url: "wallabag.minhas.io" }
...

View file

@ -17,6 +17,9 @@ ivyking.minhas.io
[hardtack]
hardtack[1:7].minhas.io
[k3s]
hardtack[1:7].minhas.io
[lnd]
redwingcherokee.minhas.io
@ -24,7 +27,6 @@ redwingcherokee.minhas.io
ivyking.minhas.io
[nomad_client]
hardtack[1:7].minhas.io
ivyking.minhas.io
sedan.minhas.io

View file

@ -0,0 +1,5 @@
---
- hosts: k3s
roles:
- role: k3s
...

View file

@ -2,6 +2,5 @@
- hosts: lnd
roles:
- role: tor
- role: bitcoind
- role: lnd
...

View file

@ -5,6 +5,7 @@
- import_playbook: consul-client.yml
- import_playbook: docker.yml
- import_playbook: nomad.yml
- import_playbook: k3s.yml
- import_playbook: nexus.yml
- import_playbook: lnd.yml
- import_playbook: wekan.yml

View file

@ -21,8 +21,6 @@
state: present
mode: 0644
- name: update apt cache
apt:
- name: install docker-ce
apt:
state: present

View file

@ -32,6 +32,11 @@ defaults
errorfile 503 /etc/haproxy/errors/503.http
errorfile 504 /etc/haproxy/errors/504.http
frontend fe_tcp
mode tcp
bind :8000
default_backend be_airsonic
frontend fe_default
mode http
bind :443 ssl crt /etc/haproxy/certs/ alpn h2,http/1.1
@ -51,6 +56,11 @@ backend be_{{ domain.name }}
server-template {{ domain.name }} 1 _{{ domain.name }}._tcp.service.masked.name resolvers consul resolve-opts allow-dup-ip resolve-prefer ipv4 check
{% endfor %}
backend be_airsonic
balance leastconn
server airsonic 192.168.0.12:8000
resolvers consul
nameserver consul 127.0.0.1:8600
accepted_payload_size 8192

View file

@ -0,0 +1,16 @@
---
- name: template k3s server systemd
template:
src: templates/k3s.service.j2
dest: /etc/systemd/system/k3s.service
owner: root
group: root
mode: 0644
- name: enable and start k3s
systemd:
daemon_reload: yes
enabled: yes
name: k3s
state: started
...

View file

@ -0,0 +1,25 @@
---
- name: check k3s version
shell:
cmd: "k3s --version | grep k3s | cut -d' ' -f3"
args:
executable: /bin/bash
changed_when: False
register: installed_k3s_version
check_mode: False
- name: get k3s
get_url:
url: "https://github.com/k3s-io/k3s/releases/download/{{ k3s_version }}/k3s-arm64"
dest: /usr/local/bin/k3s
mode: 0755
owner: root
group: root
when: installed_k3s_version.stdout != k3s_version
- name: link k3s
file:
src: /usr/local/bin/k3s
dest: /usr/local/bin/kubernetes
state: link
...

View file

@ -0,0 +1,7 @@
---
- include: get_k3s.yml
- include: server.yml
when: k3s_role == "server"
- include: clients.yml
when: k3s_role == "client"
...

View file

@ -0,0 +1,25 @@
---
- name: template k3s server systemd
template:
src: templates/k3s.service.j2
dest: /etc/systemd/system/k3s.service
owner: root
group: root
mode: 0644
- name: enable and start k3s
systemd:
daemon_reload: yes
enabled: yes
name: k3s
state: started
- name: get k3s token
slurp:
src: /var/lib/rancher/k3s/server/node-token
register: registered_k3s_node_token
- name: set k3s token var
set_fact:
k3s_node_token: "{{ registered_k3s_node_token.content | b64decode | trim }}"
...

View file

@ -0,0 +1,23 @@
[Unit]
Description=k3s
Wants=network-online.target
After=network-online.target
[Service]
ExecReload=/bin/kill -HUP $MAINPID
{% if k3s_role == 'server' %}
ExecStart=/usr/local/bin/k3s server --write-kubeconfig-mode 644 --disable servicelb --disable traefik
{% else %}
ExecStart=/usr/local/bin/k3s agent --server https://hardtack1.minhas.io:6443 --token {{ hostvars['hardtack1.minhas.io'].k3s_node_token }}
{% endif %}
KillMode=process
KillSignal=SIGINT
LimitNOFILE=infinity
LimitNPROC=infinity
Restart=on-failure
RestartSec=2
StartLimitBurst=3
TasksMax=infinity
[Install]
WantedBy=multi-user.target

View file

@ -100,12 +100,26 @@
mode: 0700
loop: "{{ lego_certs }}"
- name: set cron env to bash
cron:
name: SHELL
env: True
job: /bin/bash
user: lego
- name: set cron env to bash
cron:
name: SHELL
env: True
job: /bin/bash
user: root
- name: create renewal crontabs
cron:
name: "{{ item.name }} renewal"
hour: "4"
user: lego
job: 'source /etc/default/lego && /usr/local/bin/lego --pem --path {{ lego_path }} --email {{ lego_email_address }} --dns {{ item.dns }} --domains "{{ item.domain }}" renew --days 45'
job: 'source /etc/default/lego && /usr/local/bin/lego --pem --path {{ lego_path }} --email {{ lego_email_address }} --dns {{ item.dns }} --domains "{{ item.domain }}" renew --days 30'
loop: "{{ lego_certs }}"
- name: create haproxy reload crontab

View file

@ -119,7 +119,7 @@
- name: run nexus3
docker_container:
name: nexus
image: sonatype/nexus3
image: sonatype/nexus3:latest
env:
REGISTRY_HTTP_TLS_CERTIFICATE: /certs/nexus.crt
REGISTRY_HTTP_TLS_KEY: /certs/nexus.key

View file

@ -0,0 +1,29 @@
[containers]
default_capabilities = [
"CHOWN",
"DAC_OVERRIDE",
"FOWNER",
"FSETID",
"KILL",
"NET_BIND_SERVICE",
"SETFCAP",
"SETGID",
"SETPCAP",
"SETUID",
"SYS_CHROOT"
]
default_sysctls = [
"net.ipv4.ping_group_range=0 1",
]
[engine]
runtime = "crun"
cgroup_manager = "cgroupfs"
events_logger = "journald"
#[storage]
#driver = "overlay"
#
#[storage.options]
#mount_program = "/usr/bin/fuse-overlayfs"

View file

@ -1,11 +0,0 @@
[Unit]
Description=Podman API Socket
Documentation=man:podman-system-service(1)
[Socket]
ListenStream=/run/podman/io.podman
SocketMode=0660
SocketGroup=podman
[Install]
WantedBy=sockets.target

View file

@ -1,5 +1,5 @@
---
- import_tasks: podman_prep.yml
- import_tasks: podman.yml
- import_tasks: nomad.yml
- import_tasks: client_setup.yml
...

View file

@ -59,6 +59,11 @@
group: root
notify: daemon_reload
- name: get podman from passwd
getent:
database: passwd
key: podman
- name: template nomad config
template:
src: templates/nomad.hcl.j2

View file

@ -0,0 +1,72 @@
---
- name: ensure podman group
group:
name: podman
state: present
system: True
- name: ensure podman user
user:
name: podman
state: present
group: podman
system: True
- name: ensure podman is installed
apt:
name:
- catatonit
- fuse-overlayfs
- podman
- slirp4netns
- uidmap
state: present
- name: ensure containers.conf is configured
copy:
src: containers.conf
dest: /etc/containers/containers.conf
owner: root
group: root
mode: 0644
- name: Check if podman lingers
stat: path=/var/lib/systemd/linger/podman
register: linger
- name: enable lingering for podman
command: loginctl enable-linger podman
when: not linger.stat.exists
- name: enable podman
systemd:
name: podman
state: started
enabled: True
scope: user
changed_when: False
become: True
become_user: podman
- name: check if subuid is configured
shell: grep podman /etc/subuid
register: subuid
changed_when: False
check_mode: False
failed_when: False
- name: check if subgid is configured
shell: grep podman /etc/subgid
register: subgid
changed_when: False
check_mode: False
failed_when: False
- name: configure subuid
shell: usermod --add-subuids 200000-201000 podman
when: subuid.rc != 0
- name: configure subgid
shell: usermod --add-subgids 200000-201000 podman
when: subgid.rc != 0
...

View file

@ -1,30 +0,0 @@
---
- name: ensure podman group
group:
name: podman
state: present
system: True
- name: ensure podman user
user:
name: podman
state: present
group: podman
system: True
- name: ensure podman is installed
apt:
name:
- fuse-overlayfs
- "podman={{ podman_version }}"
- uidmap
state: present
- name: enable podman
systemd:
name: podman
state: started
enabled: True
daemon_reload: True
changed_when: False
...

View file

@ -39,6 +39,6 @@ plugin_dir = "/opt/nomad_plugins"
plugin "nomad-driver-podman" {
enabled = true
config {
socket_path = "unix:///run/user/1000/podman/podman.sock"
socket_path = "unix:///run/user/{{ getent_passwd.podman[1] }}/podman/podman.sock"
}
}

View file

@ -1,4 +1,4 @@
FROM nextcloud:21.0.0-apache
FROM nextcloud:23.0-apache
# Copy masked.name root cert
COPY files/MaskedName_Root_CA.crt /usr/local/share/ca-certificates/MaskedName_Root_CA.crt

View file

@ -1,4 +1,4 @@
FROM golang:1.17-alpine3.14
FROM golang:alpine
RUN apk add --no-cache ca-certificates git && \
go install -tags 'postgres' github.com/golang-migrate/migrate/v4/cmd/migrate@latest && \
@ -6,7 +6,7 @@ RUN apk add --no-cache ca-certificates git && \
cd ${GOPATH}/src/git.minhas.io/asara && \
git clone https://git.minhas.io/asara/sudoscientist-go-backend && \
cd ${GOPATH}/src/git.minhas.io/asara/sudoscientist-go-backend && \
go mod init && go get && go build main.go && \
go mod init && go get && go build -o /go/bin/sudoscientist-go-backend main.go && \
mv /go/bin/* /usr/local/bin/ && \
rm -rf /go/src && \
apk del git

View file

@ -23,7 +23,7 @@ job "gitea" {
}
driver = "docker"
config {
image = "docker.service.masked.name:8082/gitea"
image = "docker.service.masked.name:8082/gitea:latest"
ports = ["http"]
volumes = [
"/mnt/raid/gitea:/data"

View file

@ -23,7 +23,7 @@ job "nextcloud" {
}
driver = "docker"
config {
image = "docker.service.masked.name:8082/nextcloud:21.0.0-apache"
image = "docker.service.masked.name:8082/nextcloud:latest"
ports = ["nextcloud"]
volumes = [
"/mnt/raid/nextcloud/:/var/www/html"

View file

@ -5,9 +5,10 @@ job "sudoscientist-go-backend" {
constraint {
attribute = "${attr.cpu.arch}"
value = "arm64"
value = "amd64"
}
update {
stagger = "30s"
max_parallel = 1