Compare commits

...

11 Commits

55 changed files with 1827 additions and 39 deletions

View File

@ -22,6 +22,7 @@
hosts: master hosts: master
become: yes become: yes
roles: roles:
- metrics
- nvidia - nvidia
- stolon - stolon
- cloudflared - cloudflared
@ -43,3 +44,8 @@
- bookshelf - bookshelf
- shelfarr - shelfarr
- minecraft - minecraft
- lidarr
- soulseek
- soularr
- mariadb
- uptime-kuma

View File

@ -17,7 +17,7 @@ spec:
- name: regcred - name: regcred
containers: containers:
- name: bazarr - name: bazarr
image: lscr.io/linuxserver/bazarr:1.5.3 image: lscr.io/linuxserver/bazarr:1.5.6
securityContext: securityContext:
capabilities: capabilities:
add: add:
@ -26,6 +26,8 @@ spec:
- containerPort: 6767 - containerPort: 6767
name: webui name: webui
env: env:
- name: BAZARR_DEBUG
value: "true"
- name: PUID - name: PUID
value: "1013" value: "1013"
- name: PGID - name: PGID
@ -58,6 +60,10 @@ spec:
mountPath: /config mountPath: /config
- name: media - name: media
mountPath: /media mountPath: /media
- name: lingarrpy
mountPath: /app/bazarr/bin/bazarr/subtitles/tools/translate/services/lingarr_translator.py
subPath: lingarr_translator.py
readOnly: true
volumes: volumes:
- name: config - name: config
persistentVolumeClaim: persistentVolumeClaim:
@ -65,6 +71,9 @@ spec:
- name: media - name: media
persistentVolumeClaim: persistentVolumeClaim:
claimName: bazarr-media-pvc claimName: bazarr-media-pvc
- name: lingarrpy
configMap:
name: bazarr-lingarrpy-configmap

View File

@ -0,0 +1,207 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: bazarr-lingarrpy-configmap
namespace: stack-arr
data:
lingarr_translator.py: |
# coding=utf-8
import logging
import pysubs2
import requests
from retry.api import retry
from deep_translator.exceptions import TooManyRequests, RequestError
from app.config import settings
from app.database import TableShows, TableEpisodes, TableMovies, database, select
from app.jobs_queue import jobs_queue
from languages.custom_lang import CustomLanguage
from languages.get_languages import alpha3_from_alpha2, language_from_alpha2, language_from_alpha3
from radarr.history import history_log_movie
from sonarr.history import history_log
from subtitles.processing import ProcessSubtitlesResult
from utilities.path_mappings import path_mappings
from ..core.translator_utils import add_translator_info, create_process_result, get_title
logger = logging.getLogger(__name__)
class LingarrTranslatorService:
def __init__(self, source_srt_file, dest_srt_file, lang_obj, to_lang, from_lang, media_type,
video_path, orig_to_lang, forced, hi, sonarr_series_id, sonarr_episode_id,
radarr_id):
self.source_srt_file = source_srt_file
self.dest_srt_file = dest_srt_file
self.lang_obj = lang_obj
self.to_lang = to_lang
self.from_lang = from_lang
self.media_type = media_type
self.video_path = video_path
self.orig_to_lang = orig_to_lang
self.forced = forced
self.hi = hi
self.sonarr_series_id = sonarr_series_id
self.sonarr_episode_id = sonarr_episode_id
self.radarr_id = radarr_id
self.language_code_convert_dict = {
'zh': 'zh-CN',
'zt': 'zh-TW',
'pb': 'pt-BR',
}
def translate(self, job_id=None):
try:
jobs_queue.update_job_progress(job_id=job_id, progress_max=1, progress_message=self.source_srt_file)
subs = pysubs2.load(self.source_srt_file, encoding='utf-8')
lines_list = [x.plaintext for x in subs]
lines_list_len = len(lines_list)
if lines_list_len == 0:
logger.debug('No lines to translate in subtitle file')
return self.dest_srt_file
logger.debug(f'Starting translation for {self.source_srt_file}')
translated_lines = self._translate_content(lines_list, job_id=job_id)
if translated_lines is None:
logger.error(f'Translation failed for {self.source_srt_file}')
jobs_queue.update_job_progress(job_id=job_id,
progress_message=f'Translation failed for {self.source_srt_file}')
return False
logger.debug(f'BAZARR saving Lingarr translated subtitles to {self.dest_srt_file}')
translation_map = {}
for item in translated_lines:
if isinstance(item, dict) and 'position' in item and 'line' in item:
translation_map[item['position']] = item['line']
for i, line in enumerate(subs):
if i in translation_map and translation_map[i]:
line.text = translation_map[i]
try:
subs.save(self.dest_srt_file)
add_translator_info(self.dest_srt_file, f"# Subtitles translated with Lingarr # ")
except OSError:
logger.error(f'BAZARR is unable to save translated subtitles to {self.dest_srt_file}')
jobs_queue.update_job_progress(job_id=job_id,
progress_message=f'Translation failed: Unable to save translated '
f'subtitles to {self.dest_srt_file}')
raise OSError
message = (f"{language_from_alpha2(self.from_lang)} subtitles translated to "
f"{language_from_alpha3(self.to_lang)} using Lingarr.")
result = create_process_result(message, self.video_path, self.orig_to_lang, self.forced, self.hi,
self.dest_srt_file, self.media_type)
if self.media_type == 'series':
history_log(action=6,
sonarr_series_id=self.sonarr_series_id,
sonarr_episode_id=self.sonarr_episode_id,
result=result)
else:
history_log_movie(action=6,
radarr_id=self.radarr_id,
result=result)
jobs_queue.update_job_progress(job_id=job_id, progress_value='max')
return self.dest_srt_file
except Exception as e:
logger.error(f'BAZARR encountered an error during Lingarr translation: {str(e)}')
jobs_queue.update_job_progress(job_id=job_id, progress_message=f'Lingarr translation failed: {str(e)}')
return False
@retry(exceptions=(TooManyRequests, RequestError, requests.exceptions.RequestException), tries=3, delay=1,
backoff=2, jitter=(0, 1))
def _translate_content(self, lines_list, job_id):
try:
source_lang = self.language_code_convert_dict.get(self.from_lang, self.from_lang)
target_lang = self.language_code_convert_dict.get(self.orig_to_lang, self.orig_to_lang)
lines_payload = []
for i, line in enumerate(lines_list):
lines_payload.append({
"position": i,
"line": line if line and line.strip() else 'a'
})
title = get_title(
media_type=self.media_type,
radarr_id=self.radarr_id,
sonarr_series_id=self.sonarr_series_id,
sonarr_episode_id=self.sonarr_episode_id
)
if self.media_type == 'series':
api_media_type = "Episode"
arr_media_id = self.sonarr_series_id or 0
else:
api_media_type = "Movie"
arr_media_id = self.radarr_id or 0
payload = {
"arrMediaId": arr_media_id,
"title": title,
"sourceLanguage": source_lang,
"targetLanguage": target_lang,
"mediaType": api_media_type,
"lines": lines_payload
}
logger.debug(f'BAZARR is sending {len(lines_payload)} lines to Lingarr with full media context')
headers = {"Content-Type": "application/json"}
if settings.translator.lingarr_token:
headers["X-Api-Key"] = settings.translator.lingarr_token
response = requests.post(
f"{settings.translator.lingarr_url}/api/translate/content",
json=payload,
headers=headers,
timeout=1800
)
if response.status_code == 200:
translated_batch = response.json()
# Validate response
if isinstance(translated_batch, list):
for item in translated_batch:
if not isinstance(item, dict) or 'position' not in item or 'line' not in item:
logger.error(f'Invalid response format from Lingarr API: {item}')
return None
return translated_batch
else:
logger.error(f'Unexpected response format from Lingarr API: {translated_batch}')
return None
elif response.status_code == 401:
raise RequestError("Authentication failed: Invalid or missing API key")
elif response.status_code == 429:
raise TooManyRequests("Rate limit exceeded")
elif response.status_code >= 500:
raise RequestError(f"Server error: {response.status_code}")
else:
logger.debug(f'Lingarr API error: {response.status_code} - {response.text}')
return None
except requests.exceptions.Timeout:
logger.debug('Lingarr API request timed out')
raise RequestError("Request timed out")
except requests.exceptions.ConnectionError:
logger.debug('Lingarr API connection error')
raise RequestError("Connection error")
except requests.exceptions.RequestException as e:
logger.debug(f'Lingarr API request failed: {str(e)}')
raise
except (TooManyRequests, RequestError) as e:
logger.error(f'Lingarr API error after retries: {str(e)}')
jobs_queue.update_job_progress(job_id=job_id, progress_message=f'Lingarr API error: {str(e)}')
raise
except Exception as e:
logger.error(f'Unexpected error in Lingarr translation: {str(e)}')
jobs_queue.update_job_progress(job_id=job_id, progress_message=f'Translation error: {str(e)}')

Binary file not shown.

View File

@ -9,6 +9,93 @@ spec:
--- ---
apiVersion: apps/v1 apiVersion: apps/v1
kind: Deployment kind: Deployment
metadata:
name: ai-openwebui-deployment
namespace: fenix-ai
spec:
replicas: 1
selector:
matchLabels:
app: ai-openwebui
template:
metadata:
labels:
app: ai-openwebui
spec:
containers:
- name: ai-openwebui
image: ghcr.io/open-webui/open-webui:cuda
ports:
- containerPort: 8080
env:
- name: WEBUI_HOST
value: "0.0.0.0"
- name: OLLAMA_BASE_URL
value: "http://ollama-api-svc.fenix-ai.svc.cluster.local:11434"
volumeMounts:
- name: ai-openwebui-data
mountPath: /app/backend/data
volumes:
- name: ai-openwebui-data
persistentVolumeClaim:
claimName: ai-openwebui-data-pvc
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: ai-openwebui-data-pv
namespace: fenix-ai
spec:
capacity:
storage: 40Gi
storageClassName: ai-openwebui-nfs-csi
accessModes:
- ReadWriteOnce
persistentVolumeReclaimPolicy: Retain
nfs:
server: 192.168.1.22
path: /mnt/fenix-main-nas-pool-0/data/k8s-Volumes/k8s-cluster-iac-deployed/ai-openwebui
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: ai-openwebui-data-pvc
namespace: fenix-ai
spec:
storageClassName: ai-openwebui-nfs-csi
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 40Gi
---
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: ai-openwebui-nfs-csi
namespace: fenix-ai
provisioner: nfs.csi.k8s.io
parameters:
server: 192.168.1.22
share: /mnt/fenix-main-nas-pool-0/data/k8s-Volumes/k8s-cluster-iac-deployed/ai-openwebui
allowVolumeExpansion: true
reclaimPolicy: Retain
---
apiVersion: v1
kind: Service
metadata:
name: ai-openwebui-svc
namespace: fenix-ai
spec:
selector:
app: ai-openwebui
ports:
- port: 8080
targetPort: 8080
type: ClusterIP # ou LoadBalancer se tiveres suporte
---
apiVersion: apps/v1
kind: Deployment
metadata: metadata:
name: ollama-deployment name: ollama-deployment
namespace: fenix-ai namespace: fenix-ai
@ -30,9 +117,16 @@ spec:
resources: resources:
limits: limits:
nvidia.com/gpu: 1 # garante uso da tua RTX 4060 Ti nvidia.com/gpu: 1 # garante uso da tua RTX 4060 Ti
memory: 17Gi
env: env:
- name: OLLAMA_HOST - name: OLLAMA_HOST
value: "0.0.0.0" value: "0.0.0.0"
- name: NVIDIA_VISIBLE_DEVICES
value: "all"
- name: NVIDIA_DRIVER_CAPABILITIES
value: "compute,utility"
- name: CUDA_VISIBLE_DEVICES
value: "0"
volumeMounts: volumeMounts:
- name: ollama-data - name: ollama-data
mountPath: /root/.ollama mountPath: /root/.ollama
@ -62,7 +156,7 @@ metadata:
namespace: fenix-ai namespace: fenix-ai
spec: spec:
capacity: capacity:
storage: 20Gi storage: 40Gi
storageClassName: ollama-ai-nfs-csi storageClassName: ollama-ai-nfs-csi
accessModes: accessModes:
- ReadWriteOnce - ReadWriteOnce
@ -82,7 +176,7 @@ spec:
- ReadWriteOnce - ReadWriteOnce
resources: resources:
requests: requests:
storage: 20Gi storage: 40Gi
--- ---
apiVersion: storage.k8s.io/v1 apiVersion: storage.k8s.io/v1
kind: StorageClass kind: StorageClass

View File

@ -0,0 +1,79 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: lidarr
namespace: stack-arr
spec:
replicas: 1
selector:
matchLabels:
app: lidarr
template:
metadata:
labels:
app: lidarr
spec:
imagePullSecrets:
- name: regcred
containers:
- name: lidarr
image: ghcr.io/hotio/lidarr:release-563b232
securityContext:
capabilities:
add:
- NET_ADMIN
ports:
- containerPort: 8686
name: webui
env:
- name: PUID
value: "1013"
- name: PGID
value: "1013"
- name: TZ
value: 'Etc/UTC'
- name: LIDARR__POSTGRES__HOST
value: 'stolon-proxy-service.postgresql.svc.cluster.local'
- name: LIDARR__POSTGRES__PORT
value: '5432'
- name: LIDARR__POSTGRES__USER
valueFrom:
secretKeyRef:
name: lidarr-secret
key: username
- name: LIDARR__POSTGRES__PASSWORD
valueFrom:
secretKeyRef:
name: lidarr-secret
key: password
- name: LIDARR__POSTGRES__MAINDB
valueFrom:
secretKeyRef:
name: lidarr-secret
key: maindb
- name: LIDARR__POSTGRES__LOGDB
valueFrom:
secretKeyRef:
name: lidarr-secret
key: logsdb
volumeMounts:
- name: config
mountPath: /config
- name: downloads
mountPath: /downloads
- name: music
mountPath: /data
volumes:
- name: config
persistentVolumeClaim:
claimName: lidarr-config-pvc
- name: downloads
persistentVolumeClaim:
claimName: qbittorrent-downloads-pvc
- name: music
persistentVolumeClaim:
claimName: lidarr-music-pvc

View File

@ -0,0 +1,23 @@
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: lidarr-nfs-csi
namespace: stack-arr
provisioner: nfs.csi.k8s.io
parameters:
server: 192.168.1.22
share: /mnt/fenix-main-nas-pool-0/data/k8s-Volumes/k8s-cluster-iac-deployed/stack-arr/lidarr
allowVolumeExpansion: true
reclaimPolicy: Retain
---
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: lidarr-music-nfs-csi
namespace: stack-arr
provisioner: nfs.csi.k8s.io
parameters:
server: 192.168.1.22
share: /mnt/fenix-main-nas-pool-0/data/Filmes_e_Series/Musicas
allowVolumeExpansion: true
reclaimPolicy: Retain

View File

@ -0,0 +1,60 @@
apiVersion: v1
kind: PersistentVolume
metadata:
name: lidarr-config-pv
namespace: stack-arr
spec:
capacity:
storage: 2Gi
storageClassName: lidarr-nfs-csi
accessModes:
- ReadWriteOnce
persistentVolumeReclaimPolicy: Retain
nfs:
server: 192.168.1.22
path: /mnt/fenix-main-nas-pool-0/data/k8s-Volumes/k8s-cluster-iac-deployed/stack-arr/lidarr/config
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: lidarr-config-pvc
namespace: stack-arr
spec:
storageClassName: lidarr-nfs-csi
accessModes:
- ReadWriteOnce
volumeName: lidarr-config-pv
resources:
requests:
storage: 2Gi
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: lidarr-music-pv
namespace: stack-arr
spec:
capacity:
storage: 50Gi
storageClassName: lidarr-music-nfs-csi
accessModes:
- ReadWriteOnce
persistentVolumeReclaimPolicy: Retain
nfs:
server: 192.168.1.22
path: /mnt/fenix-main-nas-pool-0/data/Filmes_e_Series/Musicas
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: lidarr-music-pvc
namespace: stack-arr
spec:
storageClassName: lidarr-music-nfs-csi
accessModes:
- ReadWriteOnce
volumeName: lidarr-music-pv
resources:
requests:
storage: 50Gi
---

View File

@ -0,0 +1,11 @@
apiVersion: v1
kind: Secret
metadata:
name: lidarr-secret
namespace: stack-arr
type: Opaque
data:
username: dXNlcm5hbWU=
password: cGFzc3dvcmQ=
maindb: bWFpbmRiLXByb3dsYXJy
logsdb: bG9nZGItcHJvd2xhcnI=

View File

@ -0,0 +1,12 @@
apiVersion: v1
kind: Service
metadata:
name: lidarr-service
namespace: stack-arr
spec:
ports:
- port: 8686
targetPort: 8686
selector:
app: lidarr
type: ClusterIP

View File

@ -0,0 +1,51 @@
- name: Remover o diretório /tmp/stack-arr/lidarr/kubernetes-files
ansible.builtin.file:
path: /tmp/stack-arr/lidarr/kubernetes-files
state: absent
- name: Criar diretório temporário no remoto
file:
path: /tmp/stack-arr/lidarr/kubernetes-files
state: directory
mode: '0755'
- name: Copy file with owner and permissions
ansible.builtin.copy:
src: ../files
dest: /tmp/stack-arr/lidarr/kubernetes-files
owner: fenix
group: root
mode: '0644'
- name: Obter várias notas do Bitwarden
shell: |
echo "unlock"
BW_SESSION=$(bw unlock {{ bw_password }} --raw)
echo "get item"
bw get item "{{ item.id }}" --session $BW_SESSION | jq -r '.notes' > {{ item.dest }}
loop:
- { id: "iac.ansible.stackarr.lidarr.secret", dest: "/tmp/stack-arr/lidarr/kubernetes-files/files/lidarr-secret.yaml" }
args:
executable: /bin/bash
environment:
BW_PASSWORD: "{{ BW_PASSWORD }}"
- name: Listar conteúdo do diretório remoto
shell: ls -l /tmp/stack-arr/lidarr/kubernetes-files/files
register: resultado_ls
- name: Mostrar resultado do ls
debug:
var: resultado_ls.stdout_lines
- name: Aplicar o stolon
become: yes
become_user: fenix
shell: |
kubectl apply -f /tmp/stack-arr/lidarr/kubernetes-files/files/
environment:
KUBECONFIG: /home/fenix/.kube/config

View File

@ -0,0 +1,4 @@
bw_password: "{{ lookup('env', 'BW_PASSWORD') }}"
VAULTWARDEN_LINK: "{{ lookup('env', 'VAULTWARDEN_LINK') }}"
BW_CLIENTID: "{{ lookup('env', 'BW_CLIENTID') }}"
BW_CLIENTSECRET : "{{ lookup('env', 'BW_CLIENTSECRET') }}"

View File

@ -0,0 +1,24 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: lingarr-configmap
namespace: stack-arr
data:
Lingarr.Server.runtimeconfig.json: |
{
"runtimeOptions": {
"tfm": "net9.0",
"frameworks": [
{ "name": "Microsoft.NETCore.App", "version": "9.0.0" },
{ "name": "Microsoft.AspNetCore.App", "version": "9.0.0" }
],
"configProperties": {
"System.GC.Server": true,
"System.Globalization.Invariant": false,
"System.Reflection.Metadata.MetadataUpdater.IsSupported": false,
"System.Reflection.NullabilityInfoContext.IsSupported": true,
"System.Runtime.Serialization.EnableUnsafeBinaryFormatterSerialization": false,
"Npgsql.EnableLegacyTimestampBehavior": true
}
}
}

View File

@ -15,10 +15,12 @@ spec:
spec: spec:
containers: containers:
- name: lingarr - name: lingarr
image: lingarr/lingarr:latest image: lingarr/lingarr:main
ports: ports:
- containerPort: 9876 - containerPort: 9876
env: env:
- name: TZ
value: "UTC"
- name: ASPNETCORE_URLS - name: ASPNETCORE_URLS
value: "http://+:9876" value: "http://+:9876"
- name: WHISPER_BASE_URL - name: WHISPER_BASE_URL
@ -27,10 +29,48 @@ spec:
value: "auto" value: "auto"
- name: TARGET_LANGUAGE - name: TARGET_LANGUAGE
value: "pt" value: "pt"
- name: DB_CONNECTION
value: postgresql
- name: DB_HOST
value: 'stolon-proxy-service.postgresql.svc.cluster.local'
- name: DB_PORT
value: '5432'
- name: DB_USERNAME
valueFrom:
secretKeyRef:
name: lingarr-secret
key: username
- name: DB_PASSWORD
valueFrom:
secretKeyRef:
name: lingarr-secret
key: password
- name: DB_DATABASE
valueFrom:
secretKeyRef:
name: lingarr-secret
key: maindb
volumeMounts: volumeMounts:
- name: config - name: config
mountPath: /app/config mountPath: /app/config
- name: runtimeconfig
mountPath: /app/Lingarr.Server.runtimeconfig.json
subPath: Lingarr.Server.runtimeconfig.json
readOnly: true
- name: tv
mountPath: /tv
- name: anime
mountPath: /anime
volumes: volumes:
- name: config - name: config
persistentVolumeClaim: persistentVolumeClaim:
claimName: lingarr-config-pvc claimName: lingarr-config-pvc
- name: runtimeconfig
configMap:
name: lingarr-configmap
- name: tv
persistentVolumeClaim:
claimName: sonarr-tv-pvc
- name: anime
persistentVolumeClaim:
claimName: sonarr-anime-pvc

View File

@ -0,0 +1,10 @@
apiVersion: v1
kind: Secret
metadata:
name: lingarr-secret
namespace: stack-arr
type: Opaque
data:
username: dXNlcm5hbWU=
password: cGFzc3dvcmQ=
maindb: bWFpbmRiLXByb3dsYXJy

View File

@ -18,18 +18,18 @@
mode: '0644' mode: '0644'
#- name: Obter várias notas do Bitwarden - name: Obter várias notas do Bitwarden
# shell: | shell: |
# echo "unlock" echo "unlock"
# BW_SESSION=$(bw unlock {{ bw_password }} --raw) BW_SESSION=$(bw unlock {{ bw_password }} --raw)
# echo "get item" echo "get item"
# bw get item "{{ item.id }}" --session $BW_SESSION | jq -r '.notes' > {{ item.dest }} bw get item "{{ item.id }}" --session $BW_SESSION | jq -r '.notes' > {{ item.dest }}
# loop: loop:
# - { id: "iac.ansible.stackarr.radarr.secret", dest: "/tmp/stack-arr/radarr/kubernetes-files/files/radarr-secret.yaml" } - { id: "iac.ansible.stackarr.lingarr.secret", dest: "/tmp/stack-arr/lingarr/kubernetes-files/files/lingarr-secret.yaml" }
# args: args:
# executable: /bin/bash executable: /bin/bash
# environment: environment:
# BW_PASSWORD: "{{ BW_PASSWORD }}" BW_PASSWORD: "{{ BW_PASSWORD }}"
- name: Listar conteúdo do diretório remoto - name: Listar conteúdo do diretório remoto

View File

@ -0,0 +1,9 @@
apiVersion: v1
kind: Secret
metadata:
name: regcred
namespace: mariadb
data:
.dockerconfigjson: >-
eyJhdXRocyI6eyJodHRwczovL2luZGV4LmRvY2tlci5pby92MS8iOnsidXNlcm5hbWUiOiJ1c2VyIiwicGFzc3dvcmQiOiJwYXNzIiwiYXV0aCI6ImRmamlla2ZlcldFS1dFa29mY2RrbzM0MzUzZmQ9In19fQ==
type: kubernetes.io/dockerconfigjson

View File

@ -0,0 +1,10 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: mariadb-config
namespace: mariadb
data:
my.cnf: |
[mysqld]
innodb_use_native_aio=0
innodb_flush_method=fsync

View File

@ -0,0 +1,4 @@
apiVersion: v1
kind: Namespace
metadata:
name: mariadb

View File

@ -0,0 +1,11 @@
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: mariadb-nfs-csi
namespace: mariadb
provisioner: nfs.csi.k8s.io
parameters:
server: 192.168.1.22
share: /mnt/fenix-main-nas-pool-0/data/k8s-Volumes/k8s-cluster-iac-deployed/mariadb
allowVolumeExpansion: true
reclaimPolicy: Retain

View File

@ -0,0 +1,31 @@
apiVersion: v1
kind: PersistentVolume
metadata:
name: mariadb-pv-0
namespace: mariadb
spec:
capacity:
storage: 50Gi
storageClassName: mariadb-nfs-csi
accessModes:
- ReadWriteOnce
persistentVolumeReclaimPolicy: Retain
nfs:
server: 192.168.1.22
path: /mnt/fenix-main-nas-pool-0/data/k8s-Volumes/k8s-cluster-iac-deployed/mariadb
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: mariadb-data-mariadb-statefulset-0
namespace: mariadb
spec:
storageClassName: mariadb-nfs-csi
accessModes:
- ReadWriteOnce
volumeName: mariadb-pv-0
resources:
requests:
storage: 50Gi
---

View File

@ -0,0 +1,11 @@
apiVersion: v1
kind: Secret
metadata:
name: mariadb-secret
namespace: mariadb
type: Opaque
data:
MARIADB_ROOT_PASSWORD: TUFSSUFEQl9ST09UX1BBU1NXT1JE
MARIADB_DATABASE: TUFSSUFEQl9EQVRBQkFTRQ==
MARIADB_USER: TUFSSUFEQl9VU0VS
MARIADB_PASSWORD: TUFSSUFEQl9QQVNTV09SRA==

View File

@ -0,0 +1,13 @@
apiVersion: v1
kind: Service
metadata:
name: mariadb-service
namespace: mariadb
spec:
ports:
- port: 3306
targetPort: 3306
selector:
app: mariadb-statefulset
type: LoadBalancer
loadBalancerIP: 10.240.0.102

View File

@ -0,0 +1,50 @@
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: mariadb-statefulset
namespace: mariadb
spec:
serviceName: "mariadb-statefulset"
replicas: 1
selector:
matchLabels:
app: mariadb-statefulset
template:
metadata:
labels:
app: mariadb-statefulset
spec:
imagePullSecrets:
- name: regcred
containers:
- name: mariadb-statefulset
image: mariadb:11
ports:
- containerPort: 3306
envFrom:
- secretRef:
name: mariadb-secret
volumeMounts:
- mountPath: /var/lib/mysql
name: mariadb-data
- mountPath: /etc/mysql/conf.d/my.cnf
name: mariadb-config
subPath: my.cnf
volumes:
- name: mariadb-config
configMap:
name: mariadb-config
volumeClaimTemplates:
- kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: mariadb-data
namespace: mariadb
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 50Gi

View File

@ -0,0 +1,52 @@
- name: Remover o diretório /tmp/mariadb/kubernetes-files
ansible.builtin.file:
path: /tmp/mariadb/kubernetes-files
state: absent
- name: Criar diretório temporário no remoto
file:
path: /tmp/mariadb/kubernetes-files
state: directory
mode: '0755'
- name: Copy file with owner and permissions
ansible.builtin.copy:
src: ../files
dest: /tmp/mariadb/kubernetes-files
owner: fenix
group: root
mode: '0644'
- name: Listar conteúdo do diretório remoto
shell: ls -l /tmp/mariadb/kubernetes-files/files
register: resultado_ls
- name: Obter várias notas do Bitwarden
shell: |
echo "unlock"
BW_SESSION=$(bw unlock {{ bw_password }} --raw)
echo "get item"
bw get item "{{ item.id }}" --session $BW_SESSION | jq -r '.notes' > {{ item.dest }}
loop:
- { id: "iac.ansible.dockersecrets", dest: "/tmp/mariadb/kubernetes-files/files/docker-secrets.yaml" }
- { id: "iac.ansible.mariadb.secret", dest: "/tmp/mariadb/kubernetes-files/files/mariadb-secret.yaml" }
args:
executable: /bin/bash
environment:
BW_PASSWORD: "{{ BW_PASSWORD }}"
- name: Mostrar resultado do ls
debug:
var: resultado_ls.stdout_lines
- name: Aplicar o mariadb
become: yes
become_user: fenix
shell: |
kubectl apply -f /tmp/mariadb/kubernetes-files/files/mariadb-namespace.yaml
kubectl apply -f /tmp/mariadb/kubernetes-files/files/
environment:
KUBECONFIG: /home/fenix/.kube/config

View File

@ -0,0 +1,4 @@
bw_password: "{{ lookup('env', 'BW_PASSWORD') }}"
VAULTWARDEN_LINK: "{{ lookup('env', 'VAULTWARDEN_LINK') }}"
BW_CLIENTID: "{{ lookup('env', 'BW_CLIENTID') }}"
BW_CLIENTSECRET : "{{ lookup('env', 'BW_CLIENTSECRET') }}"

View File

@ -4,17 +4,31 @@ metadata:
name: minecraft-configmap name: minecraft-configmap
namespace: minecraft namespace: minecraft
data: data:
SERVER_NAME: "Example Minecraft Server" EULA: "TRUE"
EULA: "true" TYPE: "NEOFORGE"
MAX_PLAYERS: "10" VERSION: "1.21.1"
NEOFORGE_VERSION: "21.1.219"
MEMORY: "9336M"
MOTD: "Bemvindos ao mundo fenix"
ONLINE_MODE: "false"
DIFFICULTY: "2"
ENABLE_COMMAND_BLOCK: "true"
SIMULATION_DISTANCE: "8"
VIEW_DISTANCE: "8"
SEED: "fenix"
LEVEL: "world-fenix"
LEVEL_TYPE: "large_biomes"
ENABLE_WHITELIST: "true" ENABLE_WHITELIST: "true"
OPS: | CURSEFORGE_FILES: |-
tomas 1356598
Shy_Doge CF_API_KEY: "apikeyhere"
aleroqu MODRINTH_PROJECTS: |-
kiuma bluemap:5.7-neoforge
WHITELIST: | MODRINTH_DOWNLOAD_DEPENDENCIES: "required"
tomas ALLOW_FLIGHT: "true"
Shy_Doge ANNOUNCE_PLAYER_ACHIEVEMENTS: "true"
aleroqu SERVER_NAME: "FenixMine"
kiuma UID: "1013"
GID: "1013"
LOG_IPS: "false"
USE_AIKAR_FLAGS: "false"

View File

@ -11,3 +11,30 @@ spec:
app: minecraft app: minecraft
type: LoadBalancer type: LoadBalancer
loadBalancerIP: 192.168.1.153 loadBalancerIP: 192.168.1.153
---
apiVersion: v1
kind: Service
metadata:
name: minecraft-bluemap-service
namespace: minecraft
spec:
ports:
- port: 8100
protocol: TCP
selector:
app: minecraft
type: ClusterIP
---
apiVersion: v1
kind: Service
metadata:
name: minecraft-rcon-service
namespace: minecraft
spec:
ports:
- port: 25575
protocol: TCP
selector:
app: minecraft
type: ClusterIP

View File

@ -37,12 +37,19 @@ spec:
requests: requests:
cpu: 2 cpu: 2
memory: 5Gi memory: 5Gi
limits:
cpu: 2
memory: 11Gi
envFrom: envFrom:
- configMapRef: - configMapRef:
name: minecraft-configmap name: minecraft-configmap
ports: ports:
- containerPort: 25565 - containerPort: 25565
name: serverport name: serverport
- containerPort: 8100
name: bluemap
- containerPort: 25575
name: rcon
volumeMounts: volumeMounts:
- name: data - name: data
mountPath: /data mountPath: /data

View File

@ -1,18 +1,18 @@
- name: Remover o diretório /tmp/stack-arr/minecraft/kubernetes-files - name: Remover o diretório /tmp/minecraft/minecraft/kubernetes-files
ansible.builtin.file: ansible.builtin.file:
path: /tmp/stack-arr/minecraft/kubernetes-files path: /tmp/minecraft/minecraft/kubernetes-files
state: absent state: absent
- name: Criar diretório temporário no remoto - name: Criar diretório temporário no remoto
file: file:
path: /tmp/stack-arr/minecraft/kubernetes-files path: /tmp/minecraft/minecraft/kubernetes-files
state: directory state: directory
mode: '0755' mode: '0755'
- name: Copy file with owner and permissions - name: Copy file with owner and permissions
ansible.builtin.copy: ansible.builtin.copy:
src: ../files src: ../files
dest: /tmp/stack-arr/minecraft/kubernetes-files dest: /tmp/minecraft/minecraft/kubernetes-files
owner: fenix owner: fenix
group: root group: root
mode: '0644' mode: '0644'
@ -26,6 +26,7 @@
bw get item "{{ item.id }}" --session $BW_SESSION | jq -r '.notes' > {{ item.dest }} bw get item "{{ item.id }}" --session $BW_SESSION | jq -r '.notes' > {{ item.dest }}
loop: loop:
- { id: "iac.ansible.minecraft.minecraft.secret", dest: "/tmp/minecraft/minecraft/kubernetes-files/files/minecraft-secret.yaml" } - { id: "iac.ansible.minecraft.minecraft.secret", dest: "/tmp/minecraft/minecraft/kubernetes-files/files/minecraft-secret.yaml" }
- { id: "iac.ansible.minecraft.minecraft.configmap", dest: "/tmp/minecraft/minecraft/kubernetes-files/files/minecraft-configmap.yaml" }
args: args:
executable: /bin/bash executable: /bin/bash
environment: environment:
@ -33,7 +34,7 @@
- name: Listar conteúdo do diretório remoto - name: Listar conteúdo do diretório remoto
shell: ls -l /tmp/stack-arr/minecraft/kubernetes-files/files shell: ls -l /tmp/minecraft/minecraft/kubernetes-files/files
register: resultado_ls register: resultado_ls
@ -46,7 +47,7 @@
become: yes become: yes
become_user: fenix become_user: fenix
shell: | shell: |
kubectl apply -f /tmp/stack-arr/minecraft/kubernetes-files/files/minecraft-namespace.yaml kubectl apply -f /tmp/minecraft/minecraft/kubernetes-files/files/minecraft-namespace.yaml
kubectl apply -f /tmp/stack-arr/minecraft/kubernetes-files/files/ kubectl apply -f /tmp/minecraft/minecraft/kubernetes-files/files/
environment: environment:
KUBECONFIG: /home/fenix/.kube/config KUBECONFIG: /home/fenix/.kube/config

View File

@ -0,0 +1,49 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: soularr
namespace: stack-arr
spec:
replicas: 1
selector:
matchLabels:
app: soularr
template:
metadata:
labels:
app: soularr
spec:
imagePullSecrets:
- name: regcred
containers:
- name: soularr
image: mrusse08/soularr:latest
securityContext:
capabilities:
add:
- NET_ADMIN
env:
- name: PUID
value: "1013"
- name: PGID
value: "1013"
- name: TZ
value: 'Etc/UTC'
- name: SCRIPT_INTERVAL
value: '300'
volumeMounts:
- name: downloads
mountPath: /downloads
- name: config
mountPath: /data
volumes:
- name: downloads
persistentVolumeClaim:
claimName: qbittorrent-downloads-pvc
- name: config
persistentVolumeClaim:
claimName: soularr-config-pvc

View File

@ -0,0 +1,12 @@
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: soularr-nfs-csi
namespace: stack-arr
provisioner: nfs.csi.k8s.io
parameters:
server: 192.168.1.22
share: /mnt/fenix-main-nas-pool-0/data/k8s-Volumes/k8s-cluster-iac-deployed/stack-arr/soularr
allowVolumeExpansion: true
reclaimPolicy: Retain
---

View File

@ -0,0 +1,30 @@
apiVersion: v1
kind: PersistentVolume
metadata:
name: soularr-config-pv
namespace: stack-arr
spec:
capacity:
storage: 10Gi
storageClassName: soularr-nfs-csi
accessModes:
- ReadWriteOnce
persistentVolumeReclaimPolicy: Retain
nfs:
server: 192.168.1.22
path: /mnt/fenix-main-nas-pool-0/data/k8s-Volumes/k8s-cluster-iac-deployed/stack-arr/soularr/config
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: soularr-config-pvc
namespace: stack-arr
spec:
storageClassName: soularr-nfs-csi
accessModes:
- ReadWriteOnce
volumeName: soularr-config-pv
resources:
requests:
storage: 10Gi
---

View File

@ -0,0 +1,51 @@
- name: Remover o diretório /tmp/stack-arr/soularr/kubernetes-files
ansible.builtin.file:
path: /tmp/stack-arr/soularr/kubernetes-files
state: absent
- name: Criar diretório temporário no remoto
file:
path: /tmp/stack-arr/soularr/kubernetes-files
state: directory
mode: '0755'
- name: Copy file with owner and permissions
ansible.builtin.copy:
src: ../files
dest: /tmp/stack-arr/soularr/kubernetes-files
owner: fenix
group: root
mode: '0644'
- name: Obter várias notas do Bitwarden
shell: |
echo "unlock"
BW_SESSION=$(bw unlock {{ bw_password }} --raw)
echo "get item"
bw get item "{{ item.id }}" --session $BW_SESSION | jq -r '.notes' > {{ item.dest }}
loop:
- { id: "iac.ansible.stackarr.soularr.secret", dest: "/tmp/stack-arr/soularr/kubernetes-files/files/soularr-secret.yaml" }
args:
executable: /bin/bash
environment:
BW_PASSWORD: "{{ BW_PASSWORD }}"
- name: Listar conteúdo do diretório remoto
shell: ls -l /tmp/stack-arr/soularr/kubernetes-files/files
register: resultado_ls
- name: Mostrar resultado do ls
debug:
var: resultado_ls.stdout_lines
- name: Aplicar o stolon
become: yes
become_user: fenix
shell: |
kubectl apply -f /tmp/stack-arr/soularr/kubernetes-files/files/
environment:
KUBECONFIG: /home/fenix/.kube/config

View File

@ -0,0 +1,4 @@
bw_password: "{{ lookup('env', 'BW_PASSWORD') }}"
VAULTWARDEN_LINK: "{{ lookup('env', 'VAULTWARDEN_LINK') }}"
BW_CLIENTID: "{{ lookup('env', 'BW_CLIENTID') }}"
BW_CLIENTSECRET : "{{ lookup('env', 'BW_CLIENTSECRET') }}"

View File

@ -0,0 +1,93 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: soulseek
namespace: stack-arr
spec:
replicas: 1
selector:
matchLabels:
app: soulseek
template:
metadata:
labels:
app: soulseek
spec:
imagePullSecrets:
- name: regcred
initContainers:
- name: init-tun
image: busybox
command:
- sh
- -c
- |
mkdir -p /dev/net
[ -c /dev/net/tun ] || mknod /dev/net/tun c 10 200
chmod 0666 /dev/net/tun
securityContext:
privileged: true
containers:
- name: vpn
image: ghcr.io/wfg/openvpn-client
securityContext:
capabilities:
add:
- NET_ADMIN
env:
- name: VPN_CONFIG_FILE
value: "vpn.conf"
- name: KILL_SWITCH
value: "off"
volumeMounts:
- name: vpn-config
mountPath: /data/vpn
- name: tun-device # ← mount host tun device
mountPath: /dev/net/tun
- name: slskd
image: slskd/slskd
securityContext:
capabilities:
add:
- NET_ADMIN
ports:
- containerPort: 5030
name: webui
- containerPort: 5031
name: https
- containerPort: 50300
name: communication
env:
- name: SLSKD_FLAGS_NO_SQLITE_POOLING
value: "true"
- name: SLSKD_FLAGS_VOLATILE_AGENT_TOKEN
value: "true"
- name: TZ
value: 'Etc/UTC'
- name: UID
value: "1013"
- name: GID
value: "1013"
volumeMounts:
- name: app
mountPath: /app
- name: media
mountPath: /downloads
volumes:
- name: app
persistentVolumeClaim:
claimName: soulseek-app-pvc
- name: media
persistentVolumeClaim:
claimName: soulseek-media-pvc
- name: vpn-config
persistentVolumeClaim:
claimName: soulseek-vpn-config-pvc
- name: tun-device
hostPath:
path: /dev/net/tun
type: CharDevice

View File

@ -0,0 +1,24 @@
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: soulseek-nfs-csi
namespace: stack-arr
provisioner: nfs.csi.k8s.io
parameters:
mountOptions: "nolock,soft,intr"
server: 192.168.1.22
share: /mnt/fenix-main-nas-pool-0/data/k8s-Volumes/k8s-cluster-iac-deployed/stack-arr/soulseek
allowVolumeExpansion: true
reclaimPolicy: Retain
---
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: soulseek-media-nfs-csi
namespace: stack-arr
provisioner: nfs.csi.k8s.io
parameters:
server: 192.168.1.22
share: /mnt/fenix-main-nas-pool-0/data/Filmes_e_Series/Downloads
allowVolumeExpansion: true
reclaimPolicy: Retain

View File

@ -0,0 +1,94 @@
apiVersion: v1
kind: PersistentVolume
metadata:
name: soulseek-app-pv
namespace: stack-arr
spec:
capacity:
storage: 6Gi
storageClassName: soulseek-nfs-csi
accessModes:
- ReadWriteOnce
persistentVolumeReclaimPolicy: Retain
mountOptions:
- nolock
- nfsvers=3
nfs:
server: 192.168.1.22
path: /mnt/fenix-main-nas-pool-0/data/k8s-Volumes/k8s-cluster-iac-deployed/stack-arr/soulseek/config
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: soulseek-app-pvc
namespace: stack-arr
spec:
storageClassName: soulseek-nfs-csi
accessModes:
- ReadWriteOnce
volumeName: soulseek-app-pv
resources:
requests:
storage: 6Gi
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: soulseek-media-pv
namespace: stack-arr
spec:
capacity:
storage: 500Gi
storageClassName: soulseek-media-nfs-csi
accessModes:
- ReadWriteOnce
persistentVolumeReclaimPolicy: Retain
nfs:
server: 192.168.1.22
path: /mnt/fenix-main-nas-pool-0/data/Filmes_e_Series/Downloads
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: soulseek-media-pvc
namespace: stack-arr
spec:
storageClassName: soulseek-media-nfs-csi
accessModes:
- ReadWriteOnce
volumeName: soulseek-media-pv
resources:
requests:
storage: 500Gi
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: soulseek-vpn-config-pv
namespace: stack-arr
spec:
capacity:
storage: 10Gi
storageClassName: soulseek-nfs-csi
accessModes:
- ReadWriteOnce
persistentVolumeReclaimPolicy: Retain
nfs:
server: 192.168.1.22
path: /mnt/fenix-main-nas-pool-0/data/k8s-Volumes/k8s-cluster-iac-deployed/stack-arr/soulseek/vpn/config
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: soulseek-vpn-config-pvc
namespace: stack-arr
spec:
storageClassName: soulseek-nfs-csi
accessModes:
- ReadWriteOnce
volumeName: soulseek-vpn-config-pv
resources:
requests:
storage: 10Gi
---

View File

@ -0,0 +1,25 @@
apiVersion: v1
kind: Service
metadata:
name: soulseek-service
namespace: stack-arr
spec:
ports:
- port: 5030
targetPort: 5030
selector:
app: soulseek
type: ClusterIP
---
apiVersion: v1
kind: Service
metadata:
name: soulseek-communication-service
namespace: stack-arr
spec:
ports:
- port: 50300
targetPort: 50300
selector:
app: soulseek
type: ClusterIP

View File

@ -0,0 +1,37 @@
- name: Remover o diretório /tmp/stack-arr/soulseek/kubernetes-files
ansible.builtin.file:
path: /tmp/stack-arr/soulseek/kubernetes-files
state: absent
- name: Criar diretório temporário no remoto
file:
path: /tmp/stack-arr/soulseek/kubernetes-files
state: directory
mode: '0755'
- name: Copy file with owner and permissions
ansible.builtin.copy:
src: ../files
dest: /tmp/stack-arr/soulseek/kubernetes-files
owner: fenix
group: root
mode: '0644'
- name: Listar conteúdo do diretório remoto
shell: ls -l /tmp/stack-arr/soulseek/kubernetes-files/files
register: resultado_ls
- name: Mostrar resultado do ls
debug:
var: resultado_ls.stdout_lines
- name: Aplicar o stolon
become: yes
become_user: fenix
shell: |
kubectl apply -f /tmp/stack-arr/soulseek/kubernetes-files/files/
environment:
KUBECONFIG: /home/fenix/.kube/config

View File

@ -0,0 +1,4 @@
bw_password: "{{ lookup('env', 'BW_PASSWORD') }}"
VAULTWARDEN_LINK: "{{ lookup('env', 'VAULTWARDEN_LINK') }}"
BW_CLIENTID: "{{ lookup('env', 'BW_CLIENTID') }}"
BW_CLIENTSECRET : "{{ lookup('env', 'BW_CLIENTSECRET') }}"

View File

@ -0,0 +1,9 @@
apiVersion: v1
kind: Secret
metadata:
name: regcred
namespace: monitoring
data:
.dockerconfigjson: >-
eyJhdXRocyI6eyJodHRwczovL2luZGV4LmRvY2tlci5pby92MS8iOnsidXNlcm5hbWUiOiJ1c2VyIiwicGFzc3dvcmQiOiJwYXNzIiwiYXV0aCI6ImRmamlla2ZlcldFS1dFa29mY2RrbzM0MzUzZmQ9In19fQ==
type: kubernetes.io/dockerconfigjson

View File

@ -0,0 +1,230 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: uptime-kuma-sync-script
namespace: monitoring
data:
sync.py: |
import subprocess
import sys
import time
import inspect
import types
subprocess.run([sys.executable, "-m", "pip", "install", "uptime-kuma-api-v2", "--quiet"], check=True)
subprocess.run([
"bash", "-c",
"curl -LO https://dl.k8s.io/release/$(curl -Ls https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl && "
"chmod +x kubectl && mv kubectl /usr/local/bin/kubectl"
], check=True)
from uptime_kuma_api import UptimeKumaApi, MonitorType
import os
# ============================================================
# CONFIGURAÇÃO
# ============================================================
NOTIFICATION_IDS = [1]
FIXED_TAGS = ["k8s", "IAC"]
STATUS_PAGE_SLUG = "fenix"
STATUS_PAGE_TITLE = "Fenix IAC"
# ============================================================
UPTIME_KUMA_URL = os.environ["UPTIME_KUMA_URL"]
USERNAME = os.environ["USERNAME"]
PASSWORD = os.environ["PASSWORD"]
print("==> A autenticar no Uptime Kuma...")
api = UptimeKumaApi(UPTIME_KUMA_URL)
api.login(USERNAME, PASSWORD)
print("==> Autenticado com sucesso")
# ── Monkey-patch _build_status_page_data ─────────────────────
original_build = api._build_status_page_data.__func__
def patched_build(self, **kwargs):
result = original_build(self, **kwargs)
print(f" [DEBUG] type(result): {type(result)}")
print(f" [DEBUG] result: {result}")
slug, data, icon, public_group_list = result
data.pop("googleAnalyticsId", None)
return (slug, data, icon, public_group_list)
api._build_status_page_data = types.MethodType(patched_build, api)
print("==> Patch aplicado ao _build_status_page_data")
# ── Tags ─────────────────────────────────────────────────────
print("==> A sincronizar tags...")
existing_tags = {t["name"]: t["id"] for t in api.get_tags()}
def ensure_tag(name, color="#0099ff"):
if name not in existing_tags:
print(f" [TAG] A criar tag '{name}'...")
result = api.add_tag(name=name, color=color)
existing_tags[name] = result["id"]
return existing_tags[name]
ensure_tag("k8s", color="#326CE5")
ensure_tag("IAC", color="#7B42BC")
# ── Monitores existentes ──────────────────────────────────────
print("==> A obter monitores existentes...")
existing_monitors = api.get_monitors()
existing_names = {m["name"] for m in existing_monitors}
print(f" {len(existing_names)} monitores existentes")
# ── Garantir grupo fenix ──────────────────────────────────────
print("==> A verificar grupo 'fenix'...")
fenix_group_id = None
for m in existing_monitors:
if m["name"] == "fenix" and m["type"] == "group":
fenix_group_id = m["id"]
print(f" [OK] Grupo 'fenix' já existe (ID: {fenix_group_id})")
break
if fenix_group_id is None:
print(" [CRIAR] A criar grupo 'fenix'...")
group = api.add_monitor(type=MonitorType.GROUP, name="fenix")
fenix_group_id = group["monitorID"]
print(f" [OK] Grupo 'fenix' criado (ID: {fenix_group_id})")
# ── Services do cluster ───────────────────────────────────────
print("==> A listar Services do cluster...")
result = subprocess.run(
[
"kubectl", "get", "svc", "-A", "--no-headers",
"-o", "custom-columns=NAMESPACE:.metadata.namespace,NAME:.metadata.name,PORT:.spec.ports[0].port,TYPE:.spec.type"
],
capture_output=True, text=True
)
services = []
for line in result.stdout.strip().split("\n"):
parts = line.split()
if len(parts) < 3:
continue
namespace, name, port = parts[0], parts[1], parts[2]
if name == "kubernetes" or port == "<none>":
continue
services.append((namespace, name, port))
print(f" {len(services)} services encontrados")
# ── Criar monitores ───────────────────────────────────────────
created = 0
skipped = 0
for namespace, name, port in services:
monitor_name = f"{namespace}/{name}"
hostname = f"{name}.{namespace}.svc.cluster.local"
if monitor_name in existing_names:
print(f" [SKIP] {monitor_name}")
skipped += 1
continue
print(f" [CRIAR] {monitor_name} ({hostname}:{port})")
try:
ensure_tag(namespace, color="#10B981")
monitor = api.add_monitor(
type=MonitorType.PORT,
name=monitor_name,
hostname=hostname,
port=int(port),
interval=60,
retryInterval=60,
maxretries=3,
parent=fenix_group_id,
notificationIDList={str(nid): True for nid in NOTIFICATION_IDS},
)
monitor_id = monitor["monitorID"]
api.add_monitor_tag(tag_id=existing_tags["k8s"], monitor_id=monitor_id)
api.add_monitor_tag(tag_id=existing_tags["IAC"], monitor_id=monitor_id)
api.add_monitor_tag(tag_id=existing_tags[namespace], monitor_id=monitor_id)
print(f" [OK] {monitor_name} criado com tags e notificações")
created += 1
except Exception as e:
print(f" [ERRO] {monitor_name}: {e}")
# ── Refrescar lista de monitores após criação ─────────────────
existing_monitors = api.get_monitors()
# ── Status Page ───────────────────────────────────────────────
print("==> A atualizar status page...")
try:
existing_pages = api.get_status_pages()
page_exists = any(p["slug"] == STATUS_PAGE_SLUG for p in existing_pages)
if not page_exists:
print(f" [CRIAR] A criar status page '{STATUS_PAGE_SLUG}'...")
api.add_status_page(STATUS_PAGE_SLUG, STATUS_PAGE_TITLE)
time.sleep(5)
print(f" [OK] Status page criada")
current = api.get_status_page(STATUS_PAGE_SLUG)
all_fenix_monitor_ids = [m["id"] for m in existing_monitors if m.get("parent") == fenix_group_id]
existing_in_page = []
for group in current.get("publicGroupList", []):
for mon in group.get("monitorList", []):
existing_in_page.append(mon["id"])
missing_ids = [mid for mid in all_fenix_monitor_ids if mid not in existing_in_page]
print(f" [DEBUG] all_fenix_monitor_ids: {all_fenix_monitor_ids}")
print(f" [DEBUG] missing_ids: {missing_ids}")
if not missing_ids:
print(f" [SKIP] Todos os monitores já estão na status page")
else:
public_group_list = current.get("publicGroupList", [])
if public_group_list:
for mid in missing_ids:
public_group_list[0]["monitorList"].append({"id": mid})
else:
public_group_list = [
{
"name": "Fenix IAC K8s",
"weight": 1,
"monitorList": [{"id": mid} for mid in all_fenix_monitor_ids],
}
]
print(f" [DEBUG] publicGroupList: {public_group_list}")
api.save_status_page(
slug=STATUS_PAGE_SLUG,
id=current["id"],
title=current.get("title", STATUS_PAGE_TITLE),
description=current.get("description"),
theme=current.get("theme", "auto"),
published=current.get("published", True),
showTags=current.get("showTags", True),
domainNameList=current.get("domainNameList", []),
customCSS=current.get("customCSS") or "",
footerText=current.get("footerText"),
showPoweredBy=current.get("showPoweredBy", True),
showCertificateExpiry=current.get("showCertificateExpiry", False),
icon=current.get("icon", "/icon.svg"),
publicGroupList=public_group_list,
)
print(f" [OK] Status page atualizada — {len(missing_ids)} monitores adicionados")
print(f" URL: {UPTIME_KUMA_URL}/status/{STATUS_PAGE_SLUG}")
except Exception as e:
print(f" [ERRO] Status page: {e}")
import traceback
traceback.print_exc()
print(f"==> Sync concluído — {created} criados, {skipped} ignorados")
api.disconnect()

View File

@ -0,0 +1,41 @@
apiVersion: batch/v1
kind: CronJob
metadata:
name: uptime-kuma-sync
namespace: monitoring
spec:
schedule: "0 * * * *" # cada hora
jobTemplate:
spec:
template:
spec:
serviceAccountName: uptime-kuma-sync
restartPolicy: OnFailure
containers:
- name: sync
image: python:3.12-slim
command: ["bash", "-c", "apt-get update -q && apt-get install -y -q curl && curl -LO https://dl.k8s.io/release/$(curl -Ls https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl && chmod +x kubectl && mv kubectl /usr/local/bin/ && python /scripts/sync.py"]
env:
- name: USERNAME
valueFrom:
secretKeyRef:
name: uptime-kuma-api-secret
key: USERNAME
- name: PASSWORD
valueFrom:
secretKeyRef:
name: uptime-kuma-api-secret
key: PASSWORD
- name: UPTIME_KUMA_URL
valueFrom:
secretKeyRef:
name: uptime-kuma-api-secret
key: UPTIME_KUMA_URL
volumeMounts:
- name: script
mountPath: /scripts
volumes:
- name: script
configMap:
name: uptime-kuma-sync-script
defaultMode: 0755

View File

@ -0,0 +1,46 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: uptime-kuma
namespace: monitoring
spec:
replicas: 1
selector:
matchLabels:
app: uptime-kuma
strategy:
type: Recreate # necessário — SQLite não suporta múltiplas réplicas
template:
metadata:
labels:
app: uptime-kuma
spec:
imagePullSecrets:
- name: regcred
containers:
- name: uptime-kuma
image: louislam/uptime-kuma:2.2.1
ports:
- containerPort: 3001
name: http
env:
- name: UPTIME_KUMA_DB_TYPE
value: mariadb
- name: UPTIME_KUMA_DB_HOSTNAME
value: "mariadb-service.mariadb.svc.cluster.local"
- name: UPTIME_KUMA_DB_PORT
value: "3306"
envFrom:
- secretRef:
name: uptime-kuma-mariadb-secret
volumeMounts:
- name: data
mountPath: /app/data
volumes:
- name: data
persistentVolumeClaim:
claimName: uptime-kuma-data-pvc

View File

@ -0,0 +1,10 @@
apiVersion: v1
kind: Secret
metadata:
name: uptime-kuma-api-secret
namespace: monitoring
type: Opaque
data:
USERNAME: VVNFUk5BTUU=
PASSWORD: UEFTU1dPUkQ=
UPTIME_KUMA_URL: VVBUSU1FX0tVTUFfVVJM

View File

@ -0,0 +1,4 @@
apiVersion: v1
kind: Namespace
metadata:
name: monitoring

View File

@ -0,0 +1,12 @@
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: uptime-kuma-nfs-csi
namespace: monitoring
provisioner: nfs.csi.k8s.io
parameters:
mountOptions: "nolock,soft,intr"
server: 192.168.1.22
share: /mnt/fenix-main-nas-pool-0/data/k8s-Volumes/k8s-cluster-iac-deployed/uptime-kuma/data
allowVolumeExpansion: true
reclaimPolicy: Retain

View File

@ -0,0 +1,32 @@
apiVersion: v1
kind: PersistentVolume
metadata:
name: uptime-kuma-data-pv
namespace: monitoring
spec:
capacity:
storage: 60Gi
storageClassName: uptime-kuma-nfs-csi
accessModes:
- ReadWriteOnce
persistentVolumeReclaimPolicy: Retain
mountOptions:
- nolock
- nfsvers=3
nfs:
server: 192.168.1.22
path: /mnt/fenix-main-nas-pool-0/data/k8s-Volumes/k8s-cluster-iac-deployed/uptime-kuma/data
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: uptime-kuma-data-pvc
namespace: monitoring
spec:
storageClassName: uptime-kuma-nfs-csi
accessModes:
- ReadWriteOnce
volumeName: uptime-kuma-data-pv
resources:
requests:
storage: 60Gi

View File

@ -0,0 +1,10 @@
apiVersion: v1
kind: Secret
metadata:
name: uptime-kuma-mariadb-secret
namespace: monitoring
type: Opaque
data:
UPTIME_KUMA_DB_NAME: TUFSSUFEQl9EQVRBQkFTRQ==
UPTIME_KUMA_DB_USERNAME: TUFSSUFEQl9VU0VS
UPTIME_KUMA_DB_PASSWORD: TUFSSUFEQl9QQVNTV09SRA==

View File

@ -0,0 +1,13 @@
apiVersion: v1
kind: Service
metadata:
name: uptime-kuma
namespace: monitoring
spec:
selector:
app: uptime-kuma
ports:
- port: 3001
targetPort: http
name: http
type: ClusterIP

View File

@ -0,0 +1,27 @@
apiVersion: v1
kind: ServiceAccount
metadata:
name: uptime-kuma-sync
namespace: monitoring
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: uptime-kuma-sync
rules:
- apiGroups: [""]
resources: ["services"]
verbs: ["get", "list"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: uptime-kuma-sync
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: uptime-kuma-sync
subjects:
- kind: ServiceAccount
name: uptime-kuma-sync
namespace: monitoring

View File

@ -0,0 +1,53 @@
- name: Remover o diretório /tmp/monitoring/uptime-kuma/kubernetes-files
ansible.builtin.file:
path: /tmp/monitoring/uptime-kuma/kubernetes-files
state: absent
- name: Criar diretório temporário no remoto
file:
path: /tmp/monitoring/uptime-kuma/kubernetes-files
state: directory
mode: '0755'
- name: Copy file with owner and permissions
ansible.builtin.copy:
src: ../files
dest: /tmp/monitoring/uptime-kuma/kubernetes-files
owner: fenix
group: root
mode: '0644'
- name: Listar conteúdo do diretório remoto
shell: ls -l /tmp/monitoring/uptime-kuma/kubernetes-files/files
register: resultado_ls
- name: Obter várias notas do Bitwarden
shell: |
echo "unlock"
BW_SESSION=$(bw unlock {{ bw_password }} --raw)
echo "get item"
bw get item "{{ item.id }}" --session $BW_SESSION | jq -r '.notes' > {{ item.dest }}
loop:
- { id: "iac.ansible.dockersecrets", dest: "/tmp/monitoring/uptime-kuma/files/docker-secrets.yaml" }
- { id: "iac.ansible.uptimekuma.mariadbsecret", dest: "/tmp/monitoring/uptime-kuma/files/uptime-kuma-secret.yaml" }
- { id: "iac.ansible.uptimekuma.monitorssecret", dest: "/tmp/monitoring/uptime-kuma/files/uptime-kuma-monitors-secret.yaml" }
args:
executable: /bin/bash
environment:
BW_PASSWORD: "{{ BW_PASSWORD }}"
- name: Mostrar resultado do ls
debug:
var: resultado_ls.stdout_lines
- name: Aplicar o stolon
become: yes
become_user: fenix
shell: |
kubectl apply -f /tmp/monitoring/uptime-kuma/kubernetes-files/files/uptime-kuma-namespace.yaml
kubectl apply -f /tmp/monitoring/uptime-kuma/kubernetes-files/files/
environment:
KUBECONFIG: /home/fenix/.kube/config

View File

@ -0,0 +1,4 @@
bw_password: "{{ lookup('env', 'BW_PASSWORD') }}"
VAULTWARDEN_LINK: "{{ lookup('env', 'VAULTWARDEN_LINK') }}"
BW_CLIENTID: "{{ lookup('env', 'BW_CLIENTID') }}"
BW_CLIENTSECRET : "{{ lookup('env', 'BW_CLIENTSECRET') }}"