uptimekuma for kubernetes

This commit is contained in:
Tomás Limpinho
2026-04-30 14:55:25 +01:00
parent bdd082160f
commit 643959ea2f
26 changed files with 691 additions and 1 deletions

View File

@ -0,0 +1,9 @@
apiVersion: v1
kind: Secret
metadata:
name: regcred
namespace: mariadb
data:
.dockerconfigjson: >-
eyJhdXRocyI6eyJodHRwczovL2luZGV4LmRvY2tlci5pby92MS8iOnsidXNlcm5hbWUiOiJ1c2VyIiwicGFzc3dvcmQiOiJwYXNzIiwiYXV0aCI6ImRmamlla2ZlcldFS1dFa29mY2RrbzM0MzUzZmQ9In19fQ==
type: kubernetes.io/dockerconfigjson

View File

@ -0,0 +1,10 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: mariadb-config
namespace: mariadb
data:
my.cnf: |
[mysqld]
innodb_use_native_aio=0
innodb_flush_method=fsync

View File

@ -0,0 +1,4 @@
apiVersion: v1
kind: Namespace
metadata:
name: mariadb

View File

@ -0,0 +1,11 @@
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: mariadb-nfs-csi
namespace: mariadb
provisioner: nfs.csi.k8s.io
parameters:
server: 192.168.1.22
share: /mnt/fenix-main-nas-pool-0/data/k8s-Volumes/k8s-cluster-iac-deployed/mariadb
allowVolumeExpansion: true
reclaimPolicy: Retain

View File

@ -0,0 +1,31 @@
apiVersion: v1
kind: PersistentVolume
metadata:
name: mariadb-pv-0
namespace: mariadb
spec:
capacity:
storage: 50Gi
storageClassName: mariadb-nfs-csi
accessModes:
- ReadWriteOnce
persistentVolumeReclaimPolicy: Retain
nfs:
server: 192.168.1.22
path: /mnt/fenix-main-nas-pool-0/data/k8s-Volumes/k8s-cluster-iac-deployed/mariadb
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: mariadb-data-mariadb-statefulset-0
namespace: mariadb
spec:
storageClassName: mariadb-nfs-csi
accessModes:
- ReadWriteOnce
volumeName: mariadb-pv-0
resources:
requests:
storage: 50Gi
---

View File

@ -0,0 +1,11 @@
apiVersion: v1
kind: Secret
metadata:
name: mariadb-secret
namespace: mariadb
type: Opaque
data:
MARIADB_ROOT_PASSWORD: TUFSSUFEQl9ST09UX1BBU1NXT1JE
MARIADB_DATABASE: TUFSSUFEQl9EQVRBQkFTRQ==
MARIADB_USER: TUFSSUFEQl9VU0VS
MARIADB_PASSWORD: TUFSSUFEQl9QQVNTV09SRA==

View File

@ -0,0 +1,13 @@
apiVersion: v1
kind: Service
metadata:
name: mariadb-service
namespace: mariadb
spec:
ports:
- port: 3306
targetPort: 3306
selector:
app: mariadb-statefulset
type: LoadBalancer
loadBalancerIP: 10.240.0.102

View File

@ -0,0 +1,50 @@
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: mariadb-statefulset
namespace: mariadb
spec:
serviceName: "mariadb-statefulset"
replicas: 1
selector:
matchLabels:
app: mariadb-statefulset
template:
metadata:
labels:
app: mariadb-statefulset
spec:
imagePullSecrets:
- name: regcred
containers:
- name: mariadb-statefulset
image: mariadb:11
ports:
- containerPort: 3306
envFrom:
- secretRef:
name: mariadb-secret
volumeMounts:
- mountPath: /var/lib/mysql
name: mariadb-data
- mountPath: /etc/mysql/conf.d/my.cnf
name: mariadb-config
subPath: my.cnf
volumes:
- name: mariadb-config
configMap:
name: mariadb-config
volumeClaimTemplates:
- kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: mariadb-data
namespace: mariadb
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 50Gi

View File

@ -0,0 +1,52 @@
- name: Remover o diretório /tmp/mariadb/kubernetes-files
ansible.builtin.file:
path: /tmp/mariadb/kubernetes-files
state: absent
- name: Criar diretório temporário no remoto
file:
path: /tmp/mariadb/kubernetes-files
state: directory
mode: '0755'
- name: Copy file with owner and permissions
ansible.builtin.copy:
src: ../files
dest: /tmp/mariadb/kubernetes-files
owner: fenix
group: root
mode: '0644'
- name: Listar conteúdo do diretório remoto
shell: ls -l /tmp/mariadb/kubernetes-files/files
register: resultado_ls
- name: Obter várias notas do Bitwarden
shell: |
echo "unlock"
BW_SESSION=$(bw unlock {{ bw_password }} --raw)
echo "get item"
bw get item "{{ item.id }}" --session $BW_SESSION | jq -r '.notes' > {{ item.dest }}
loop:
- { id: "iac.ansible.dockersecrets", dest: "/tmp/mariadb/kubernetes-files/files/docker-secrets.yaml" }
- { id: "iac.ansible.mariadb.secret", dest: "/tmp/mariadb/kubernetes-files/files/mariadb-secret.yaml" }
args:
executable: /bin/bash
environment:
BW_PASSWORD: "{{ BW_PASSWORD }}"
- name: Mostrar resultado do ls
debug:
var: resultado_ls.stdout_lines
- name: Aplicar o mariadb
become: yes
become_user: fenix
shell: |
kubectl apply -f /tmp/mariadb/kubernetes-files/files/mariadb-namespace.yaml
kubectl apply -f /tmp/mariadb/kubernetes-files/files/
environment:
KUBECONFIG: /home/fenix/.kube/config

View File

@ -0,0 +1,4 @@
bw_password: "{{ lookup('env', 'BW_PASSWORD') }}"
VAULTWARDEN_LINK: "{{ lookup('env', 'VAULTWARDEN_LINK') }}"
BW_CLIENTID: "{{ lookup('env', 'BW_CLIENTID') }}"
BW_CLIENTSECRET : "{{ lookup('env', 'BW_CLIENTSECRET') }}"

Binary file not shown.

Binary file not shown.

View File

@ -0,0 +1,9 @@
apiVersion: v1
kind: Secret
metadata:
name: regcred
namespace: monitoring
data:
.dockerconfigjson: >-
eyJhdXRocyI6eyJodHRwczovL2luZGV4LmRvY2tlci5pby92MS8iOnsidXNlcm5hbWUiOiJ1c2VyIiwicGFzc3dvcmQiOiJwYXNzIiwiYXV0aCI6ImRmamlla2ZlcldFS1dFa29mY2RrbzM0MzUzZmQ9In19fQ==
type: kubernetes.io/dockerconfigjson

View File

@ -0,0 +1,230 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: uptime-kuma-sync-script
namespace: monitoring
data:
sync.py: |
import subprocess
import sys
import time
import inspect
import types
subprocess.run([sys.executable, "-m", "pip", "install", "uptime-kuma-api-v2", "--quiet"], check=True)
subprocess.run([
"bash", "-c",
"curl -LO https://dl.k8s.io/release/$(curl -Ls https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl && "
"chmod +x kubectl && mv kubectl /usr/local/bin/kubectl"
], check=True)
from uptime_kuma_api import UptimeKumaApi, MonitorType
import os
# ============================================================
# CONFIGURAÇÃO
# ============================================================
NOTIFICATION_IDS = [1]
FIXED_TAGS = ["k8s", "IAC"]
STATUS_PAGE_SLUG = "fenix"
STATUS_PAGE_TITLE = "Fenix IAC"
# ============================================================
UPTIME_KUMA_URL = os.environ["UPTIME_KUMA_URL"]
USERNAME = os.environ["USERNAME"]
PASSWORD = os.environ["PASSWORD"]
print("==> A autenticar no Uptime Kuma...")
api = UptimeKumaApi(UPTIME_KUMA_URL)
api.login(USERNAME, PASSWORD)
print("==> Autenticado com sucesso")
# ── Monkey-patch _build_status_page_data ─────────────────────
original_build = api._build_status_page_data.__func__
def patched_build(self, **kwargs):
result = original_build(self, **kwargs)
print(f" [DEBUG] type(result): {type(result)}")
print(f" [DEBUG] result: {result}")
slug, data, icon, public_group_list = result
data.pop("googleAnalyticsId", None)
return (slug, data, icon, public_group_list)
api._build_status_page_data = types.MethodType(patched_build, api)
print("==> Patch aplicado ao _build_status_page_data")
# ── Tags ─────────────────────────────────────────────────────
print("==> A sincronizar tags...")
existing_tags = {t["name"]: t["id"] for t in api.get_tags()}
def ensure_tag(name, color="#0099ff"):
if name not in existing_tags:
print(f" [TAG] A criar tag '{name}'...")
result = api.add_tag(name=name, color=color)
existing_tags[name] = result["id"]
return existing_tags[name]
ensure_tag("k8s", color="#326CE5")
ensure_tag("IAC", color="#7B42BC")
# ── Monitores existentes ──────────────────────────────────────
print("==> A obter monitores existentes...")
existing_monitors = api.get_monitors()
existing_names = {m["name"] for m in existing_monitors}
print(f" {len(existing_names)} monitores existentes")
# ── Garantir grupo fenix ──────────────────────────────────────
print("==> A verificar grupo 'fenix'...")
fenix_group_id = None
for m in existing_monitors:
if m["name"] == "fenix" and m["type"] == "group":
fenix_group_id = m["id"]
print(f" [OK] Grupo 'fenix' já existe (ID: {fenix_group_id})")
break
if fenix_group_id is None:
print(" [CRIAR] A criar grupo 'fenix'...")
group = api.add_monitor(type=MonitorType.GROUP, name="fenix")
fenix_group_id = group["monitorID"]
print(f" [OK] Grupo 'fenix' criado (ID: {fenix_group_id})")
# ── Services do cluster ───────────────────────────────────────
print("==> A listar Services do cluster...")
result = subprocess.run(
[
"kubectl", "get", "svc", "-A", "--no-headers",
"-o", "custom-columns=NAMESPACE:.metadata.namespace,NAME:.metadata.name,PORT:.spec.ports[0].port,TYPE:.spec.type"
],
capture_output=True, text=True
)
services = []
for line in result.stdout.strip().split("\n"):
parts = line.split()
if len(parts) < 3:
continue
namespace, name, port = parts[0], parts[1], parts[2]
if name == "kubernetes" or port == "<none>":
continue
services.append((namespace, name, port))
print(f" {len(services)} services encontrados")
# ── Criar monitores ───────────────────────────────────────────
created = 0
skipped = 0
for namespace, name, port in services:
monitor_name = f"{namespace}/{name}"
hostname = f"{name}.{namespace}.svc.cluster.local"
if monitor_name in existing_names:
print(f" [SKIP] {monitor_name}")
skipped += 1
continue
print(f" [CRIAR] {monitor_name} ({hostname}:{port})")
try:
ensure_tag(namespace, color="#10B981")
monitor = api.add_monitor(
type=MonitorType.PORT,
name=monitor_name,
hostname=hostname,
port=int(port),
interval=60,
retryInterval=60,
maxretries=3,
parent=fenix_group_id,
notificationIDList={str(nid): True for nid in NOTIFICATION_IDS},
)
monitor_id = monitor["monitorID"]
api.add_monitor_tag(tag_id=existing_tags["k8s"], monitor_id=monitor_id)
api.add_monitor_tag(tag_id=existing_tags["IAC"], monitor_id=monitor_id)
api.add_monitor_tag(tag_id=existing_tags[namespace], monitor_id=monitor_id)
print(f" [OK] {monitor_name} criado com tags e notificações")
created += 1
except Exception as e:
print(f" [ERRO] {monitor_name}: {e}")
# ── Refrescar lista de monitores após criação ─────────────────
existing_monitors = api.get_monitors()
# ── Status Page ───────────────────────────────────────────────
print("==> A atualizar status page...")
try:
existing_pages = api.get_status_pages()
page_exists = any(p["slug"] == STATUS_PAGE_SLUG for p in existing_pages)
if not page_exists:
print(f" [CRIAR] A criar status page '{STATUS_PAGE_SLUG}'...")
api.add_status_page(STATUS_PAGE_SLUG, STATUS_PAGE_TITLE)
time.sleep(5)
print(f" [OK] Status page criada")
current = api.get_status_page(STATUS_PAGE_SLUG)
all_fenix_monitor_ids = [m["id"] for m in existing_monitors if m.get("parent") == fenix_group_id]
existing_in_page = []
for group in current.get("publicGroupList", []):
for mon in group.get("monitorList", []):
existing_in_page.append(mon["id"])
missing_ids = [mid for mid in all_fenix_monitor_ids if mid not in existing_in_page]
print(f" [DEBUG] all_fenix_monitor_ids: {all_fenix_monitor_ids}")
print(f" [DEBUG] missing_ids: {missing_ids}")
if not missing_ids:
print(f" [SKIP] Todos os monitores já estão na status page")
else:
public_group_list = current.get("publicGroupList", [])
if public_group_list:
for mid in missing_ids:
public_group_list[0]["monitorList"].append({"id": mid})
else:
public_group_list = [
{
"name": "Fenix IAC K8s",
"weight": 1,
"monitorList": [{"id": mid} for mid in all_fenix_monitor_ids],
}
]
print(f" [DEBUG] publicGroupList: {public_group_list}")
api.save_status_page(
slug=STATUS_PAGE_SLUG,
id=current["id"],
title=current.get("title", STATUS_PAGE_TITLE),
description=current.get("description"),
theme=current.get("theme", "auto"),
published=current.get("published", True),
showTags=current.get("showTags", True),
domainNameList=current.get("domainNameList", []),
customCSS=current.get("customCSS") or "",
footerText=current.get("footerText"),
showPoweredBy=current.get("showPoweredBy", True),
showCertificateExpiry=current.get("showCertificateExpiry", False),
icon=current.get("icon", "/icon.svg"),
publicGroupList=public_group_list,
)
print(f" [OK] Status page atualizada — {len(missing_ids)} monitores adicionados")
print(f" URL: {UPTIME_KUMA_URL}/status/{STATUS_PAGE_SLUG}")
except Exception as e:
print(f" [ERRO] Status page: {e}")
import traceback
traceback.print_exc()
print(f"==> Sync concluído — {created} criados, {skipped} ignorados")
api.disconnect()

View File

@ -0,0 +1,41 @@
apiVersion: batch/v1
kind: CronJob
metadata:
name: uptime-kuma-sync
namespace: monitoring
spec:
schedule: "0 * * * *" # cada hora
jobTemplate:
spec:
template:
spec:
serviceAccountName: uptime-kuma-sync
restartPolicy: OnFailure
containers:
- name: sync
image: python:3.12-slim
command: ["bash", "-c", "apt-get update -q && apt-get install -y -q curl && curl -LO https://dl.k8s.io/release/$(curl -Ls https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl && chmod +x kubectl && mv kubectl /usr/local/bin/ && python /scripts/sync.py"]
env:
- name: USERNAME
valueFrom:
secretKeyRef:
name: uptime-kuma-api-secret
key: USERNAME
- name: PASSWORD
valueFrom:
secretKeyRef:
name: uptime-kuma-api-secret
key: PASSWORD
- name: UPTIME_KUMA_URL
valueFrom:
secretKeyRef:
name: uptime-kuma-api-secret
key: UPTIME_KUMA_URL
volumeMounts:
- name: script
mountPath: /scripts
volumes:
- name: script
configMap:
name: uptime-kuma-sync-script
defaultMode: 0755

View File

@ -0,0 +1,46 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: uptime-kuma
namespace: monitoring
spec:
replicas: 1
selector:
matchLabels:
app: uptime-kuma
strategy:
type: Recreate # necessário — SQLite não suporta múltiplas réplicas
template:
metadata:
labels:
app: uptime-kuma
spec:
imagePullSecrets:
- name: regcred
containers:
- name: uptime-kuma
image: louislam/uptime-kuma:2.2.1
ports:
- containerPort: 3001
name: http
env:
- name: UPTIME_KUMA_DB_TYPE
value: mariadb
- name: UPTIME_KUMA_DB_HOSTNAME
value: "mariadb-service.mariadb.svc.cluster.local"
- name: UPTIME_KUMA_DB_PORT
value: "3306"
envFrom:
- secretRef:
name: uptime-kuma-mariadb-secret
volumeMounts:
- name: data
mountPath: /app/data
volumes:
- name: data
persistentVolumeClaim:
claimName: uptime-kuma-data-pvc

View File

@ -0,0 +1,10 @@
apiVersion: v1
kind: Secret
metadata:
name: uptime-kuma-api-secret
namespace: monitoring
type: Opaque
data:
USERNAME: VVNFUk5BTUU=
PASSWORD: UEFTU1dPUkQ=
UPTIME_KUMA_URL: VVBUSU1FX0tVTUFfVVJM

View File

@ -0,0 +1,4 @@
apiVersion: v1
kind: Namespace
metadata:
name: monitoring

View File

@ -0,0 +1,12 @@
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: uptime-kuma-nfs-csi
namespace: monitoring
provisioner: nfs.csi.k8s.io
parameters:
mountOptions: "nolock,soft,intr"
server: 192.168.1.22
share: /mnt/fenix-main-nas-pool-0/data/k8s-Volumes/k8s-cluster-iac-deployed/uptime-kuma/data
allowVolumeExpansion: true
reclaimPolicy: Retain

View File

@ -0,0 +1,32 @@
apiVersion: v1
kind: PersistentVolume
metadata:
name: uptime-kuma-data-pv
namespace: monitoring
spec:
capacity:
storage: 60Gi
storageClassName: uptime-kuma-nfs-csi
accessModes:
- ReadWriteOnce
persistentVolumeReclaimPolicy: Retain
mountOptions:
- nolock
- nfsvers=3
nfs:
server: 192.168.1.22
path: /mnt/fenix-main-nas-pool-0/data/k8s-Volumes/k8s-cluster-iac-deployed/uptime-kuma/data
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: uptime-kuma-data-pvc
namespace: monitoring
spec:
storageClassName: uptime-kuma-nfs-csi
accessModes:
- ReadWriteOnce
volumeName: uptime-kuma-data-pv
resources:
requests:
storage: 60Gi

View File

@ -0,0 +1,10 @@
apiVersion: v1
kind: Secret
metadata:
name: uptime-kuma-mariadb-secret
namespace: monitoring
type: Opaque
data:
UPTIME_KUMA_DB_NAME: TUFSSUFEQl9EQVRBQkFTRQ==
UPTIME_KUMA_DB_USERNAME: TUFSSUFEQl9VU0VS
UPTIME_KUMA_DB_PASSWORD: TUFSSUFEQl9QQVNTV09SRA==

View File

@ -0,0 +1,13 @@
apiVersion: v1
kind: Service
metadata:
name: uptime-kuma
namespace: monitoring
spec:
selector:
app: uptime-kuma
ports:
- port: 3001
targetPort: http
name: http
type: ClusterIP

View File

@ -0,0 +1,27 @@
apiVersion: v1
kind: ServiceAccount
metadata:
name: uptime-kuma-sync
namespace: monitoring
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: uptime-kuma-sync
rules:
- apiGroups: [""]
resources: ["services"]
verbs: ["get", "list"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: uptime-kuma-sync
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: uptime-kuma-sync
subjects:
- kind: ServiceAccount
name: uptime-kuma-sync
namespace: monitoring

View File

@ -0,0 +1,53 @@
- name: Remover o diretório /tmp/monitoring/uptime-kuma/kubernetes-files
ansible.builtin.file:
path: /tmp/monitoring/uptime-kuma/kubernetes-files
state: absent
- name: Criar diretório temporário no remoto
file:
path: /tmp/monitoring/uptime-kuma/kubernetes-files
state: directory
mode: '0755'
- name: Copy file with owner and permissions
ansible.builtin.copy:
src: ../files
dest: /tmp/monitoring/uptime-kuma/kubernetes-files
owner: fenix
group: root
mode: '0644'
- name: Listar conteúdo do diretório remoto
shell: ls -l /tmp/monitoring/uptime-kuma/kubernetes-files/files
register: resultado_ls
- name: Obter várias notas do Bitwarden
shell: |
echo "unlock"
BW_SESSION=$(bw unlock {{ bw_password }} --raw)
echo "get item"
bw get item "{{ item.id }}" --session $BW_SESSION | jq -r '.notes' > {{ item.dest }}
loop:
- { id: "iac.ansible.dockersecrets", dest: "/tmp/monitoring/uptime-kuma/files/docker-secrets.yaml" }
- { id: "iac.ansible.uptimekuma.mariadbsecret", dest: "/tmp/monitoring/uptime-kuma/files/uptime-kuma-secret.yaml" }
- { id: "iac.ansible.uptimekuma.monitorssecret", dest: "/tmp/monitoring/uptime-kuma/files/uptime-kuma-monitors-secret.yaml" }
args:
executable: /bin/bash
environment:
BW_PASSWORD: "{{ BW_PASSWORD }}"
- name: Mostrar resultado do ls
debug:
var: resultado_ls.stdout_lines
- name: Aplicar o stolon
become: yes
become_user: fenix
shell: |
kubectl apply -f /tmp/monitoring/uptime-kuma/kubernetes-files/files/uptime-kuma-namespace.yaml
kubectl apply -f /tmp/monitoring/uptime-kuma/kubernetes-files/files/
environment:
KUBECONFIG: /home/fenix/.kube/config

View File

@ -0,0 +1,4 @@
bw_password: "{{ lookup('env', 'BW_PASSWORD') }}"
VAULTWARDEN_LINK: "{{ lookup('env', 'VAULTWARDEN_LINK') }}"
BW_CLIENTID: "{{ lookup('env', 'BW_CLIENTID') }}"
BW_CLIENTSECRET : "{{ lookup('env', 'BW_CLIENTSECRET') }}"