mirror of
https://gitea.fenix-dev.com/fenix-gitea-admin/iac-ansible-private.git
synced 2026-05-14 00:15:20 +00:00
Compare commits
3 Commits
bdd082160f
...
e12c4a9943
| Author | SHA1 | Date | |
|---|---|---|---|
| e12c4a9943 | |||
| 643959ea2f | |||
| e200933337 |
@ -43,5 +43,9 @@
|
||||
- mangareader
|
||||
- bookshelf
|
||||
- shelfarr
|
||||
- minecraft
|
||||
- lidarr
|
||||
- minecraft
|
||||
- soulseek
|
||||
- soularr
|
||||
- mariadb
|
||||
- uptime-kuma
|
||||
@ -17,7 +17,7 @@ spec:
|
||||
- name: regcred
|
||||
containers:
|
||||
- name: bazarr
|
||||
image: lscr.io/linuxserver/bazarr:1.5.3
|
||||
image: lscr.io/linuxserver/bazarr:1.5.6
|
||||
securityContext:
|
||||
capabilities:
|
||||
add:
|
||||
@ -26,6 +26,8 @@ spec:
|
||||
- containerPort: 6767
|
||||
name: webui
|
||||
env:
|
||||
- name: BAZARR_DEBUG
|
||||
value: "true"
|
||||
- name: PUID
|
||||
value: "1013"
|
||||
- name: PGID
|
||||
@ -58,6 +60,10 @@ spec:
|
||||
mountPath: /config
|
||||
- name: media
|
||||
mountPath: /media
|
||||
- name: lingarrpy
|
||||
mountPath: /app/bazarr/bin/bazarr/subtitles/tools/translate/services/lingarr_translator.py
|
||||
subPath: lingarr_translator.py
|
||||
readOnly: true
|
||||
volumes:
|
||||
- name: config
|
||||
persistentVolumeClaim:
|
||||
@ -65,6 +71,9 @@ spec:
|
||||
- name: media
|
||||
persistentVolumeClaim:
|
||||
claimName: bazarr-media-pvc
|
||||
- name: lingarrpy
|
||||
configMap:
|
||||
name: bazarr-lingarrpy-configmap
|
||||
|
||||
|
||||
|
||||
|
||||
207
roles/bazarr/files/lingarr-configmap.yaml
Normal file
207
roles/bazarr/files/lingarr-configmap.yaml
Normal file
@ -0,0 +1,207 @@
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: bazarr-lingarrpy-configmap
|
||||
namespace: stack-arr
|
||||
data:
|
||||
lingarr_translator.py: |
|
||||
# coding=utf-8
|
||||
|
||||
import logging
|
||||
import pysubs2
|
||||
import requests
|
||||
|
||||
from retry.api import retry
|
||||
from deep_translator.exceptions import TooManyRequests, RequestError
|
||||
|
||||
from app.config import settings
|
||||
from app.database import TableShows, TableEpisodes, TableMovies, database, select
|
||||
from app.jobs_queue import jobs_queue
|
||||
from languages.custom_lang import CustomLanguage
|
||||
from languages.get_languages import alpha3_from_alpha2, language_from_alpha2, language_from_alpha3
|
||||
from radarr.history import history_log_movie
|
||||
from sonarr.history import history_log
|
||||
from subtitles.processing import ProcessSubtitlesResult
|
||||
from utilities.path_mappings import path_mappings
|
||||
|
||||
from ..core.translator_utils import add_translator_info, create_process_result, get_title
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class LingarrTranslatorService:
|
||||
def __init__(self, source_srt_file, dest_srt_file, lang_obj, to_lang, from_lang, media_type,
|
||||
video_path, orig_to_lang, forced, hi, sonarr_series_id, sonarr_episode_id,
|
||||
radarr_id):
|
||||
self.source_srt_file = source_srt_file
|
||||
self.dest_srt_file = dest_srt_file
|
||||
self.lang_obj = lang_obj
|
||||
self.to_lang = to_lang
|
||||
self.from_lang = from_lang
|
||||
self.media_type = media_type
|
||||
self.video_path = video_path
|
||||
self.orig_to_lang = orig_to_lang
|
||||
self.forced = forced
|
||||
self.hi = hi
|
||||
self.sonarr_series_id = sonarr_series_id
|
||||
self.sonarr_episode_id = sonarr_episode_id
|
||||
self.radarr_id = radarr_id
|
||||
self.language_code_convert_dict = {
|
||||
'zh': 'zh-CN',
|
||||
'zt': 'zh-TW',
|
||||
'pb': 'pt-BR',
|
||||
}
|
||||
|
||||
def translate(self, job_id=None):
|
||||
try:
|
||||
jobs_queue.update_job_progress(job_id=job_id, progress_max=1, progress_message=self.source_srt_file)
|
||||
|
||||
subs = pysubs2.load(self.source_srt_file, encoding='utf-8')
|
||||
lines_list = [x.plaintext for x in subs]
|
||||
lines_list_len = len(lines_list)
|
||||
|
||||
if lines_list_len == 0:
|
||||
logger.debug('No lines to translate in subtitle file')
|
||||
return self.dest_srt_file
|
||||
|
||||
logger.debug(f'Starting translation for {self.source_srt_file}')
|
||||
translated_lines = self._translate_content(lines_list, job_id=job_id)
|
||||
|
||||
if translated_lines is None:
|
||||
logger.error(f'Translation failed for {self.source_srt_file}')
|
||||
jobs_queue.update_job_progress(job_id=job_id,
|
||||
progress_message=f'Translation failed for {self.source_srt_file}')
|
||||
return False
|
||||
|
||||
logger.debug(f'BAZARR saving Lingarr translated subtitles to {self.dest_srt_file}')
|
||||
translation_map = {}
|
||||
for item in translated_lines:
|
||||
if isinstance(item, dict) and 'position' in item and 'line' in item:
|
||||
translation_map[item['position']] = item['line']
|
||||
|
||||
for i, line in enumerate(subs):
|
||||
if i in translation_map and translation_map[i]:
|
||||
line.text = translation_map[i]
|
||||
|
||||
try:
|
||||
subs.save(self.dest_srt_file)
|
||||
add_translator_info(self.dest_srt_file, f"# Subtitles translated with Lingarr # ")
|
||||
except OSError:
|
||||
logger.error(f'BAZARR is unable to save translated subtitles to {self.dest_srt_file}')
|
||||
jobs_queue.update_job_progress(job_id=job_id,
|
||||
progress_message=f'Translation failed: Unable to save translated '
|
||||
f'subtitles to {self.dest_srt_file}')
|
||||
raise OSError
|
||||
|
||||
message = (f"{language_from_alpha2(self.from_lang)} subtitles translated to "
|
||||
f"{language_from_alpha3(self.to_lang)} using Lingarr.")
|
||||
result = create_process_result(message, self.video_path, self.orig_to_lang, self.forced, self.hi,
|
||||
self.dest_srt_file, self.media_type)
|
||||
|
||||
if self.media_type == 'series':
|
||||
history_log(action=6,
|
||||
sonarr_series_id=self.sonarr_series_id,
|
||||
sonarr_episode_id=self.sonarr_episode_id,
|
||||
result=result)
|
||||
else:
|
||||
history_log_movie(action=6,
|
||||
radarr_id=self.radarr_id,
|
||||
result=result)
|
||||
|
||||
jobs_queue.update_job_progress(job_id=job_id, progress_value='max')
|
||||
|
||||
return self.dest_srt_file
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f'BAZARR encountered an error during Lingarr translation: {str(e)}')
|
||||
jobs_queue.update_job_progress(job_id=job_id, progress_message=f'Lingarr translation failed: {str(e)}')
|
||||
return False
|
||||
|
||||
@retry(exceptions=(TooManyRequests, RequestError, requests.exceptions.RequestException), tries=3, delay=1,
|
||||
backoff=2, jitter=(0, 1))
|
||||
def _translate_content(self, lines_list, job_id):
|
||||
try:
|
||||
source_lang = self.language_code_convert_dict.get(self.from_lang, self.from_lang)
|
||||
target_lang = self.language_code_convert_dict.get(self.orig_to_lang, self.orig_to_lang)
|
||||
|
||||
lines_payload = []
|
||||
for i, line in enumerate(lines_list):
|
||||
lines_payload.append({
|
||||
"position": i,
|
||||
"line": line if line and line.strip() else 'a'
|
||||
})
|
||||
|
||||
title = get_title(
|
||||
media_type=self.media_type,
|
||||
radarr_id=self.radarr_id,
|
||||
sonarr_series_id=self.sonarr_series_id,
|
||||
sonarr_episode_id=self.sonarr_episode_id
|
||||
)
|
||||
|
||||
if self.media_type == 'series':
|
||||
api_media_type = "Episode"
|
||||
arr_media_id = self.sonarr_series_id or 0
|
||||
else:
|
||||
api_media_type = "Movie"
|
||||
arr_media_id = self.radarr_id or 0
|
||||
|
||||
payload = {
|
||||
"arrMediaId": arr_media_id,
|
||||
"title": title,
|
||||
"sourceLanguage": source_lang,
|
||||
"targetLanguage": target_lang,
|
||||
"mediaType": api_media_type,
|
||||
"lines": lines_payload
|
||||
}
|
||||
|
||||
logger.debug(f'BAZARR is sending {len(lines_payload)} lines to Lingarr with full media context')
|
||||
|
||||
headers = {"Content-Type": "application/json"}
|
||||
if settings.translator.lingarr_token:
|
||||
headers["X-Api-Key"] = settings.translator.lingarr_token
|
||||
|
||||
response = requests.post(
|
||||
f"{settings.translator.lingarr_url}/api/translate/content",
|
||||
json=payload,
|
||||
headers=headers,
|
||||
timeout=1800
|
||||
)
|
||||
|
||||
if response.status_code == 200:
|
||||
translated_batch = response.json()
|
||||
# Validate response
|
||||
if isinstance(translated_batch, list):
|
||||
for item in translated_batch:
|
||||
if not isinstance(item, dict) or 'position' not in item or 'line' not in item:
|
||||
logger.error(f'Invalid response format from Lingarr API: {item}')
|
||||
return None
|
||||
return translated_batch
|
||||
else:
|
||||
logger.error(f'Unexpected response format from Lingarr API: {translated_batch}')
|
||||
return None
|
||||
elif response.status_code == 401:
|
||||
raise RequestError("Authentication failed: Invalid or missing API key")
|
||||
elif response.status_code == 429:
|
||||
raise TooManyRequests("Rate limit exceeded")
|
||||
elif response.status_code >= 500:
|
||||
raise RequestError(f"Server error: {response.status_code}")
|
||||
else:
|
||||
logger.debug(f'Lingarr API error: {response.status_code} - {response.text}')
|
||||
return None
|
||||
|
||||
except requests.exceptions.Timeout:
|
||||
logger.debug('Lingarr API request timed out')
|
||||
raise RequestError("Request timed out")
|
||||
except requests.exceptions.ConnectionError:
|
||||
logger.debug('Lingarr API connection error')
|
||||
raise RequestError("Connection error")
|
||||
except requests.exceptions.RequestException as e:
|
||||
logger.debug(f'Lingarr API request failed: {str(e)}')
|
||||
raise
|
||||
except (TooManyRequests, RequestError) as e:
|
||||
logger.error(f'Lingarr API error after retries: {str(e)}')
|
||||
jobs_queue.update_job_progress(job_id=job_id, progress_message=f'Lingarr API error: {str(e)}')
|
||||
raise
|
||||
except Exception as e:
|
||||
logger.error(f'Unexpected error in Lingarr translation: {str(e)}')
|
||||
jobs_queue.update_job_progress(job_id=job_id, progress_message=f'Translation error: {str(e)}')
|
||||
BIN
roles/bazarr/files/teste.yml
Normal file
BIN
roles/bazarr/files/teste.yml
Normal file
Binary file not shown.
24
roles/lingarr/files/lingarr-configmap.yaml
Normal file
24
roles/lingarr/files/lingarr-configmap.yaml
Normal file
@ -0,0 +1,24 @@
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: lingarr-configmap
|
||||
namespace: stack-arr
|
||||
data:
|
||||
Lingarr.Server.runtimeconfig.json: |
|
||||
{
|
||||
"runtimeOptions": {
|
||||
"tfm": "net9.0",
|
||||
"frameworks": [
|
||||
{ "name": "Microsoft.NETCore.App", "version": "9.0.0" },
|
||||
{ "name": "Microsoft.AspNetCore.App", "version": "9.0.0" }
|
||||
],
|
||||
"configProperties": {
|
||||
"System.GC.Server": true,
|
||||
"System.Globalization.Invariant": false,
|
||||
"System.Reflection.Metadata.MetadataUpdater.IsSupported": false,
|
||||
"System.Reflection.NullabilityInfoContext.IsSupported": true,
|
||||
"System.Runtime.Serialization.EnableUnsafeBinaryFormatterSerialization": false,
|
||||
"Npgsql.EnableLegacyTimestampBehavior": true
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -15,10 +15,12 @@ spec:
|
||||
spec:
|
||||
containers:
|
||||
- name: lingarr
|
||||
image: lingarr/lingarr:latest
|
||||
image: lingarr/lingarr:main
|
||||
ports:
|
||||
- containerPort: 9876
|
||||
env:
|
||||
- name: TZ
|
||||
value: "UTC"
|
||||
- name: ASPNETCORE_URLS
|
||||
value: "http://+:9876"
|
||||
- name: WHISPER_BASE_URL
|
||||
@ -27,10 +29,48 @@ spec:
|
||||
value: "auto"
|
||||
- name: TARGET_LANGUAGE
|
||||
value: "pt"
|
||||
- name: DB_CONNECTION
|
||||
value: postgresql
|
||||
- name: DB_HOST
|
||||
value: 'stolon-proxy-service.postgresql.svc.cluster.local'
|
||||
- name: DB_PORT
|
||||
value: '5432'
|
||||
- name: DB_USERNAME
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
name: lingarr-secret
|
||||
key: username
|
||||
- name: DB_PASSWORD
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
name: lingarr-secret
|
||||
key: password
|
||||
- name: DB_DATABASE
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
name: lingarr-secret
|
||||
key: maindb
|
||||
volumeMounts:
|
||||
- name: config
|
||||
mountPath: /app/config
|
||||
- name: runtimeconfig
|
||||
mountPath: /app/Lingarr.Server.runtimeconfig.json
|
||||
subPath: Lingarr.Server.runtimeconfig.json
|
||||
readOnly: true
|
||||
- name: tv
|
||||
mountPath: /tv
|
||||
- name: anime
|
||||
mountPath: /anime
|
||||
volumes:
|
||||
- name: config
|
||||
persistentVolumeClaim:
|
||||
claimName: lingarr-config-pvc
|
||||
- name: runtimeconfig
|
||||
configMap:
|
||||
name: lingarr-configmap
|
||||
- name: tv
|
||||
persistentVolumeClaim:
|
||||
claimName: sonarr-tv-pvc
|
||||
- name: anime
|
||||
persistentVolumeClaim:
|
||||
claimName: sonarr-anime-pvc
|
||||
|
||||
10
roles/lingarr/files/lingarr-secret.yaml
Normal file
10
roles/lingarr/files/lingarr-secret.yaml
Normal file
@ -0,0 +1,10 @@
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
name: lingarr-secret
|
||||
namespace: stack-arr
|
||||
type: Opaque
|
||||
data:
|
||||
username: dXNlcm5hbWU=
|
||||
password: cGFzc3dvcmQ=
|
||||
maindb: bWFpbmRiLXByb3dsYXJy
|
||||
@ -18,18 +18,18 @@
|
||||
mode: '0644'
|
||||
|
||||
|
||||
#- name: Obter várias notas do Bitwarden
|
||||
# shell: |
|
||||
# echo "unlock"
|
||||
# BW_SESSION=$(bw unlock {{ bw_password }} --raw)
|
||||
# echo "get item"
|
||||
# bw get item "{{ item.id }}" --session $BW_SESSION | jq -r '.notes' > {{ item.dest }}
|
||||
# loop:
|
||||
# - { id: "iac.ansible.stackarr.radarr.secret", dest: "/tmp/stack-arr/radarr/kubernetes-files/files/radarr-secret.yaml" }
|
||||
# args:
|
||||
# executable: /bin/bash
|
||||
# environment:
|
||||
# BW_PASSWORD: "{{ BW_PASSWORD }}"
|
||||
- name: Obter várias notas do Bitwarden
|
||||
shell: |
|
||||
echo "unlock"
|
||||
BW_SESSION=$(bw unlock {{ bw_password }} --raw)
|
||||
echo "get item"
|
||||
bw get item "{{ item.id }}" --session $BW_SESSION | jq -r '.notes' > {{ item.dest }}
|
||||
loop:
|
||||
- { id: "iac.ansible.stackarr.lingarr.secret", dest: "/tmp/stack-arr/lingarr/kubernetes-files/files/lingarr-secret.yaml" }
|
||||
args:
|
||||
executable: /bin/bash
|
||||
environment:
|
||||
BW_PASSWORD: "{{ BW_PASSWORD }}"
|
||||
|
||||
|
||||
- name: Listar conteúdo do diretório remoto
|
||||
|
||||
9
roles/mariadb/files/docker-secrets.yaml
Normal file
9
roles/mariadb/files/docker-secrets.yaml
Normal file
@ -0,0 +1,9 @@
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
name: regcred
|
||||
namespace: mariadb
|
||||
data:
|
||||
.dockerconfigjson: >-
|
||||
eyJhdXRocyI6eyJodHRwczovL2luZGV4LmRvY2tlci5pby92MS8iOnsidXNlcm5hbWUiOiJ1c2VyIiwicGFzc3dvcmQiOiJwYXNzIiwiYXV0aCI6ImRmamlla2ZlcldFS1dFa29mY2RrbzM0MzUzZmQ9In19fQ==
|
||||
type: kubernetes.io/dockerconfigjson
|
||||
10
roles/mariadb/files/mariadb-configmap.yaml
Normal file
10
roles/mariadb/files/mariadb-configmap.yaml
Normal file
@ -0,0 +1,10 @@
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: mariadb-config
|
||||
namespace: mariadb
|
||||
data:
|
||||
my.cnf: |
|
||||
[mysqld]
|
||||
innodb_use_native_aio=0
|
||||
innodb_flush_method=fsync
|
||||
4
roles/mariadb/files/mariadb-namespace.yaml
Normal file
4
roles/mariadb/files/mariadb-namespace.yaml
Normal file
@ -0,0 +1,4 @@
|
||||
apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
name: mariadb
|
||||
11
roles/mariadb/files/mariadb-nfs-csi.yaml
Normal file
11
roles/mariadb/files/mariadb-nfs-csi.yaml
Normal file
@ -0,0 +1,11 @@
|
||||
apiVersion: storage.k8s.io/v1
|
||||
kind: StorageClass
|
||||
metadata:
|
||||
name: mariadb-nfs-csi
|
||||
namespace: mariadb
|
||||
provisioner: nfs.csi.k8s.io
|
||||
parameters:
|
||||
server: 192.168.1.22
|
||||
share: /mnt/fenix-main-nas-pool-0/data/k8s-Volumes/k8s-cluster-iac-deployed/mariadb
|
||||
allowVolumeExpansion: true
|
||||
reclaimPolicy: Retain
|
||||
31
roles/mariadb/files/mariadb-pvcs.yaml
Normal file
31
roles/mariadb/files/mariadb-pvcs.yaml
Normal file
@ -0,0 +1,31 @@
|
||||
apiVersion: v1
|
||||
kind: PersistentVolume
|
||||
metadata:
|
||||
name: mariadb-pv-0
|
||||
namespace: mariadb
|
||||
spec:
|
||||
capacity:
|
||||
storage: 50Gi
|
||||
storageClassName: mariadb-nfs-csi
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
persistentVolumeReclaimPolicy: Retain
|
||||
nfs:
|
||||
server: 192.168.1.22
|
||||
path: /mnt/fenix-main-nas-pool-0/data/k8s-Volumes/k8s-cluster-iac-deployed/mariadb
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: PersistentVolumeClaim
|
||||
metadata:
|
||||
name: mariadb-data-mariadb-statefulset-0
|
||||
namespace: mariadb
|
||||
spec:
|
||||
storageClassName: mariadb-nfs-csi
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
volumeName: mariadb-pv-0
|
||||
resources:
|
||||
requests:
|
||||
storage: 50Gi
|
||||
---
|
||||
|
||||
11
roles/mariadb/files/mariadb-secret.yaml
Normal file
11
roles/mariadb/files/mariadb-secret.yaml
Normal file
@ -0,0 +1,11 @@
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
name: mariadb-secret
|
||||
namespace: mariadb
|
||||
type: Opaque
|
||||
data:
|
||||
MARIADB_ROOT_PASSWORD: TUFSSUFEQl9ST09UX1BBU1NXT1JE
|
||||
MARIADB_DATABASE: TUFSSUFEQl9EQVRBQkFTRQ==
|
||||
MARIADB_USER: TUFSSUFEQl9VU0VS
|
||||
MARIADB_PASSWORD: TUFSSUFEQl9QQVNTV09SRA==
|
||||
13
roles/mariadb/files/mariadb-service.yaml
Normal file
13
roles/mariadb/files/mariadb-service.yaml
Normal file
@ -0,0 +1,13 @@
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: mariadb-service
|
||||
namespace: mariadb
|
||||
spec:
|
||||
ports:
|
||||
- port: 3306
|
||||
targetPort: 3306
|
||||
selector:
|
||||
app: mariadb-statefulset
|
||||
type: LoadBalancer
|
||||
loadBalancerIP: 10.240.0.102
|
||||
50
roles/mariadb/files/mariadb-statefulset.yaml
Normal file
50
roles/mariadb/files/mariadb-statefulset.yaml
Normal file
@ -0,0 +1,50 @@
|
||||
apiVersion: apps/v1
|
||||
kind: StatefulSet
|
||||
metadata:
|
||||
name: mariadb-statefulset
|
||||
namespace: mariadb
|
||||
spec:
|
||||
serviceName: "mariadb-statefulset"
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app: mariadb-statefulset
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: mariadb-statefulset
|
||||
spec:
|
||||
imagePullSecrets:
|
||||
- name: regcred
|
||||
containers:
|
||||
- name: mariadb-statefulset
|
||||
image: mariadb:11
|
||||
ports:
|
||||
- containerPort: 3306
|
||||
envFrom:
|
||||
- secretRef:
|
||||
name: mariadb-secret
|
||||
volumeMounts:
|
||||
- mountPath: /var/lib/mysql
|
||||
name: mariadb-data
|
||||
- mountPath: /etc/mysql/conf.d/my.cnf
|
||||
name: mariadb-config
|
||||
subPath: my.cnf
|
||||
volumes:
|
||||
- name: mariadb-config
|
||||
configMap:
|
||||
name: mariadb-config
|
||||
|
||||
volumeClaimTemplates:
|
||||
- kind: PersistentVolumeClaim
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: mariadb-data
|
||||
namespace: mariadb
|
||||
spec:
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
resources:
|
||||
requests:
|
||||
storage: 50Gi
|
||||
|
||||
52
roles/mariadb/tasks/main.yml
Normal file
52
roles/mariadb/tasks/main.yml
Normal file
@ -0,0 +1,52 @@
|
||||
- name: Remover o diretório /tmp/mariadb/kubernetes-files
|
||||
ansible.builtin.file:
|
||||
path: /tmp/mariadb/kubernetes-files
|
||||
state: absent
|
||||
|
||||
- name: Criar diretório temporário no remoto
|
||||
file:
|
||||
path: /tmp/mariadb/kubernetes-files
|
||||
state: directory
|
||||
mode: '0755'
|
||||
|
||||
- name: Copy file with owner and permissions
|
||||
ansible.builtin.copy:
|
||||
src: ../files
|
||||
dest: /tmp/mariadb/kubernetes-files
|
||||
owner: fenix
|
||||
group: root
|
||||
mode: '0644'
|
||||
|
||||
- name: Listar conteúdo do diretório remoto
|
||||
shell: ls -l /tmp/mariadb/kubernetes-files/files
|
||||
register: resultado_ls
|
||||
|
||||
|
||||
- name: Obter várias notas do Bitwarden
|
||||
shell: |
|
||||
echo "unlock"
|
||||
BW_SESSION=$(bw unlock {{ bw_password }} --raw)
|
||||
echo "get item"
|
||||
bw get item "{{ item.id }}" --session $BW_SESSION | jq -r '.notes' > {{ item.dest }}
|
||||
loop:
|
||||
- { id: "iac.ansible.dockersecrets", dest: "/tmp/mariadb/kubernetes-files/files/docker-secrets.yaml" }
|
||||
- { id: "iac.ansible.mariadb.secret", dest: "/tmp/mariadb/kubernetes-files/files/mariadb-secret.yaml" }
|
||||
args:
|
||||
executable: /bin/bash
|
||||
environment:
|
||||
BW_PASSWORD: "{{ BW_PASSWORD }}"
|
||||
|
||||
|
||||
- name: Mostrar resultado do ls
|
||||
debug:
|
||||
var: resultado_ls.stdout_lines
|
||||
|
||||
- name: Aplicar o mariadb
|
||||
become: yes
|
||||
become_user: fenix
|
||||
shell: |
|
||||
kubectl apply -f /tmp/mariadb/kubernetes-files/files/mariadb-namespace.yaml
|
||||
kubectl apply -f /tmp/mariadb/kubernetes-files/files/
|
||||
environment:
|
||||
KUBECONFIG: /home/fenix/.kube/config
|
||||
|
||||
4
roles/mariadb/vars/main.yml
Normal file
4
roles/mariadb/vars/main.yml
Normal file
@ -0,0 +1,4 @@
|
||||
bw_password: "{{ lookup('env', 'BW_PASSWORD') }}"
|
||||
VAULTWARDEN_LINK: "{{ lookup('env', 'VAULTWARDEN_LINK') }}"
|
||||
BW_CLIENTID: "{{ lookup('env', 'BW_CLIENTID') }}"
|
||||
BW_CLIENTSECRET : "{{ lookup('env', 'BW_CLIENTSECRET') }}"
|
||||
@ -1,18 +1,18 @@
|
||||
- name: Remover o diretório /tmp/stack-arr/lidarr/kubernetes-files
|
||||
- name: Remover o diretório /tmp/stack-arr/soularr/kubernetes-files
|
||||
ansible.builtin.file:
|
||||
path: /tmp/stack-arr/lidarr/kubernetes-files
|
||||
path: /tmp/stack-arr/soularr/kubernetes-files
|
||||
state: absent
|
||||
|
||||
- name: Criar diretório temporário no remoto
|
||||
file:
|
||||
path: /tmp/stack-arr/lidarr/kubernetes-files
|
||||
path: /tmp/stack-arr/soularr/kubernetes-files
|
||||
state: directory
|
||||
mode: '0755'
|
||||
|
||||
- name: Copy file with owner and permissions
|
||||
ansible.builtin.copy:
|
||||
src: ../files
|
||||
dest: /tmp/stack-arr/lidarr/kubernetes-files
|
||||
dest: /tmp/stack-arr/soularr/kubernetes-files
|
||||
owner: fenix
|
||||
group: root
|
||||
mode: '0644'
|
||||
@ -25,7 +25,7 @@
|
||||
echo "get item"
|
||||
bw get item "{{ item.id }}" --session $BW_SESSION | jq -r '.notes' > {{ item.dest }}
|
||||
loop:
|
||||
- { id: "iac.ansible.stackarr.lidarr.secret", dest: "/tmp/stack-arr/lidarr/kubernetes-files/files/lidarr-secret.yaml" }
|
||||
- { id: "iac.ansible.stackarr.soularr.secret", dest: "/tmp/stack-arr/soularr/kubernetes-files/files/soularr-secret.yaml" }
|
||||
args:
|
||||
executable: /bin/bash
|
||||
environment:
|
||||
@ -33,7 +33,7 @@
|
||||
|
||||
|
||||
- name: Listar conteúdo do diretório remoto
|
||||
shell: ls -l /tmp/stack-arr/lidarr/kubernetes-files/files
|
||||
shell: ls -l /tmp/stack-arr/soularr/kubernetes-files/files
|
||||
register: resultado_ls
|
||||
|
||||
|
||||
@ -46,6 +46,6 @@
|
||||
become: yes
|
||||
become_user: fenix
|
||||
shell: |
|
||||
kubectl apply -f /tmp/stack-arr/lidarr/kubernetes-files/files/
|
||||
kubectl apply -f /tmp/stack-arr/soularr/kubernetes-files/files/
|
||||
environment:
|
||||
KUBECONFIG: /home/fenix/.kube/config
|
||||
@ -60,6 +60,8 @@ spec:
|
||||
env:
|
||||
- name: SLSKD_FLAGS_NO_SQLITE_POOLING
|
||||
value: "true"
|
||||
- name: SLSKD_FLAGS_VOLATILE_AGENT_TOKEN
|
||||
value: "true"
|
||||
- name: TZ
|
||||
value: 'Etc/UTC'
|
||||
- name: UID
|
||||
@ -70,7 +72,7 @@ spec:
|
||||
- name: app
|
||||
mountPath: /app
|
||||
- name: media
|
||||
mountPath: /data
|
||||
mountPath: /downloads
|
||||
volumes:
|
||||
- name: app
|
||||
persistentVolumeClaim:
|
||||
|
||||
Binary file not shown.
Binary file not shown.
9
roles/uptime-kuma/files/docker-secrets.yaml
Normal file
9
roles/uptime-kuma/files/docker-secrets.yaml
Normal file
@ -0,0 +1,9 @@
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
name: regcred
|
||||
namespace: monitoring
|
||||
data:
|
||||
.dockerconfigjson: >-
|
||||
eyJhdXRocyI6eyJodHRwczovL2luZGV4LmRvY2tlci5pby92MS8iOnsidXNlcm5hbWUiOiJ1c2VyIiwicGFzc3dvcmQiOiJwYXNzIiwiYXV0aCI6ImRmamlla2ZlcldFS1dFa29mY2RrbzM0MzUzZmQ9In19fQ==
|
||||
type: kubernetes.io/dockerconfigjson
|
||||
@ -0,0 +1,230 @@
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: uptime-kuma-sync-script
|
||||
namespace: monitoring
|
||||
data:
|
||||
sync.py: |
|
||||
import subprocess
|
||||
import sys
|
||||
import time
|
||||
import inspect
|
||||
import types
|
||||
|
||||
subprocess.run([sys.executable, "-m", "pip", "install", "uptime-kuma-api-v2", "--quiet"], check=True)
|
||||
|
||||
subprocess.run([
|
||||
"bash", "-c",
|
||||
"curl -LO https://dl.k8s.io/release/$(curl -Ls https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl && "
|
||||
"chmod +x kubectl && mv kubectl /usr/local/bin/kubectl"
|
||||
], check=True)
|
||||
|
||||
from uptime_kuma_api import UptimeKumaApi, MonitorType
|
||||
import os
|
||||
|
||||
# ============================================================
|
||||
# CONFIGURAÇÃO
|
||||
# ============================================================
|
||||
NOTIFICATION_IDS = [1]
|
||||
FIXED_TAGS = ["k8s", "IAC"]
|
||||
STATUS_PAGE_SLUG = "fenix"
|
||||
STATUS_PAGE_TITLE = "Fenix IAC"
|
||||
# ============================================================
|
||||
|
||||
UPTIME_KUMA_URL = os.environ["UPTIME_KUMA_URL"]
|
||||
USERNAME = os.environ["USERNAME"]
|
||||
PASSWORD = os.environ["PASSWORD"]
|
||||
|
||||
print("==> A autenticar no Uptime Kuma...")
|
||||
api = UptimeKumaApi(UPTIME_KUMA_URL)
|
||||
api.login(USERNAME, PASSWORD)
|
||||
print("==> Autenticado com sucesso")
|
||||
|
||||
# ── Monkey-patch _build_status_page_data ─────────────────────
|
||||
original_build = api._build_status_page_data.__func__
|
||||
|
||||
def patched_build(self, **kwargs):
|
||||
result = original_build(self, **kwargs)
|
||||
print(f" [DEBUG] type(result): {type(result)}")
|
||||
print(f" [DEBUG] result: {result}")
|
||||
slug, data, icon, public_group_list = result
|
||||
data.pop("googleAnalyticsId", None)
|
||||
return (slug, data, icon, public_group_list)
|
||||
|
||||
api._build_status_page_data = types.MethodType(patched_build, api)
|
||||
print("==> Patch aplicado ao _build_status_page_data")
|
||||
|
||||
# ── Tags ─────────────────────────────────────────────────────
|
||||
print("==> A sincronizar tags...")
|
||||
existing_tags = {t["name"]: t["id"] for t in api.get_tags()}
|
||||
|
||||
def ensure_tag(name, color="#0099ff"):
|
||||
if name not in existing_tags:
|
||||
print(f" [TAG] A criar tag '{name}'...")
|
||||
result = api.add_tag(name=name, color=color)
|
||||
existing_tags[name] = result["id"]
|
||||
return existing_tags[name]
|
||||
|
||||
ensure_tag("k8s", color="#326CE5")
|
||||
ensure_tag("IAC", color="#7B42BC")
|
||||
|
||||
# ── Monitores existentes ──────────────────────────────────────
|
||||
print("==> A obter monitores existentes...")
|
||||
existing_monitors = api.get_monitors()
|
||||
existing_names = {m["name"] for m in existing_monitors}
|
||||
print(f" {len(existing_names)} monitores existentes")
|
||||
|
||||
# ── Garantir grupo fenix ──────────────────────────────────────
|
||||
print("==> A verificar grupo 'fenix'...")
|
||||
fenix_group_id = None
|
||||
for m in existing_monitors:
|
||||
if m["name"] == "fenix" and m["type"] == "group":
|
||||
fenix_group_id = m["id"]
|
||||
print(f" [OK] Grupo 'fenix' já existe (ID: {fenix_group_id})")
|
||||
break
|
||||
|
||||
if fenix_group_id is None:
|
||||
print(" [CRIAR] A criar grupo 'fenix'...")
|
||||
group = api.add_monitor(type=MonitorType.GROUP, name="fenix")
|
||||
fenix_group_id = group["monitorID"]
|
||||
print(f" [OK] Grupo 'fenix' criado (ID: {fenix_group_id})")
|
||||
|
||||
# ── Services do cluster ───────────────────────────────────────
|
||||
print("==> A listar Services do cluster...")
|
||||
result = subprocess.run(
|
||||
[
|
||||
"kubectl", "get", "svc", "-A", "--no-headers",
|
||||
"-o", "custom-columns=NAMESPACE:.metadata.namespace,NAME:.metadata.name,PORT:.spec.ports[0].port,TYPE:.spec.type"
|
||||
],
|
||||
capture_output=True, text=True
|
||||
)
|
||||
|
||||
services = []
|
||||
for line in result.stdout.strip().split("\n"):
|
||||
parts = line.split()
|
||||
if len(parts) < 3:
|
||||
continue
|
||||
namespace, name, port = parts[0], parts[1], parts[2]
|
||||
if name == "kubernetes" or port == "<none>":
|
||||
continue
|
||||
services.append((namespace, name, port))
|
||||
|
||||
print(f" {len(services)} services encontrados")
|
||||
|
||||
# ── Criar monitores ───────────────────────────────────────────
|
||||
created = 0
|
||||
skipped = 0
|
||||
|
||||
for namespace, name, port in services:
|
||||
monitor_name = f"{namespace}/{name}"
|
||||
hostname = f"{name}.{namespace}.svc.cluster.local"
|
||||
|
||||
if monitor_name in existing_names:
|
||||
print(f" [SKIP] {monitor_name}")
|
||||
skipped += 1
|
||||
continue
|
||||
|
||||
print(f" [CRIAR] {monitor_name} ({hostname}:{port})")
|
||||
try:
|
||||
ensure_tag(namespace, color="#10B981")
|
||||
|
||||
monitor = api.add_monitor(
|
||||
type=MonitorType.PORT,
|
||||
name=monitor_name,
|
||||
hostname=hostname,
|
||||
port=int(port),
|
||||
interval=60,
|
||||
retryInterval=60,
|
||||
maxretries=3,
|
||||
parent=fenix_group_id,
|
||||
notificationIDList={str(nid): True for nid in NOTIFICATION_IDS},
|
||||
)
|
||||
|
||||
monitor_id = monitor["monitorID"]
|
||||
|
||||
api.add_monitor_tag(tag_id=existing_tags["k8s"], monitor_id=monitor_id)
|
||||
api.add_monitor_tag(tag_id=existing_tags["IAC"], monitor_id=monitor_id)
|
||||
api.add_monitor_tag(tag_id=existing_tags[namespace], monitor_id=monitor_id)
|
||||
|
||||
print(f" [OK] {monitor_name} criado com tags e notificações")
|
||||
created += 1
|
||||
|
||||
except Exception as e:
|
||||
print(f" [ERRO] {monitor_name}: {e}")
|
||||
|
||||
# ── Refrescar lista de monitores após criação ─────────────────
|
||||
existing_monitors = api.get_monitors()
|
||||
|
||||
# ── Status Page ───────────────────────────────────────────────
|
||||
print("==> A atualizar status page...")
|
||||
|
||||
try:
|
||||
existing_pages = api.get_status_pages()
|
||||
page_exists = any(p["slug"] == STATUS_PAGE_SLUG for p in existing_pages)
|
||||
|
||||
if not page_exists:
|
||||
print(f" [CRIAR] A criar status page '{STATUS_PAGE_SLUG}'...")
|
||||
api.add_status_page(STATUS_PAGE_SLUG, STATUS_PAGE_TITLE)
|
||||
time.sleep(5)
|
||||
print(f" [OK] Status page criada")
|
||||
|
||||
current = api.get_status_page(STATUS_PAGE_SLUG)
|
||||
|
||||
all_fenix_monitor_ids = [m["id"] for m in existing_monitors if m.get("parent") == fenix_group_id]
|
||||
|
||||
existing_in_page = []
|
||||
for group in current.get("publicGroupList", []):
|
||||
for mon in group.get("monitorList", []):
|
||||
existing_in_page.append(mon["id"])
|
||||
|
||||
missing_ids = [mid for mid in all_fenix_monitor_ids if mid not in existing_in_page]
|
||||
|
||||
print(f" [DEBUG] all_fenix_monitor_ids: {all_fenix_monitor_ids}")
|
||||
print(f" [DEBUG] missing_ids: {missing_ids}")
|
||||
|
||||
if not missing_ids:
|
||||
print(f" [SKIP] Todos os monitores já estão na status page")
|
||||
else:
|
||||
public_group_list = current.get("publicGroupList", [])
|
||||
|
||||
if public_group_list:
|
||||
for mid in missing_ids:
|
||||
public_group_list[0]["monitorList"].append({"id": mid})
|
||||
else:
|
||||
public_group_list = [
|
||||
{
|
||||
"name": "Fenix IAC K8s",
|
||||
"weight": 1,
|
||||
"monitorList": [{"id": mid} for mid in all_fenix_monitor_ids],
|
||||
}
|
||||
]
|
||||
|
||||
print(f" [DEBUG] publicGroupList: {public_group_list}")
|
||||
|
||||
api.save_status_page(
|
||||
slug=STATUS_PAGE_SLUG,
|
||||
id=current["id"],
|
||||
title=current.get("title", STATUS_PAGE_TITLE),
|
||||
description=current.get("description"),
|
||||
theme=current.get("theme", "auto"),
|
||||
published=current.get("published", True),
|
||||
showTags=current.get("showTags", True),
|
||||
domainNameList=current.get("domainNameList", []),
|
||||
customCSS=current.get("customCSS") or "",
|
||||
footerText=current.get("footerText"),
|
||||
showPoweredBy=current.get("showPoweredBy", True),
|
||||
showCertificateExpiry=current.get("showCertificateExpiry", False),
|
||||
icon=current.get("icon", "/icon.svg"),
|
||||
publicGroupList=public_group_list,
|
||||
)
|
||||
|
||||
print(f" [OK] Status page atualizada — {len(missing_ids)} monitores adicionados")
|
||||
print(f" URL: {UPTIME_KUMA_URL}/status/{STATUS_PAGE_SLUG}")
|
||||
|
||||
except Exception as e:
|
||||
print(f" [ERRO] Status page: {e}")
|
||||
import traceback
|
||||
traceback.print_exc()
|
||||
|
||||
print(f"==> Sync concluído — {created} criados, {skipped} ignorados")
|
||||
api.disconnect()
|
||||
41
roles/uptime-kuma/files/uptime-kuma-cronjob-monitors.yaml
Normal file
41
roles/uptime-kuma/files/uptime-kuma-cronjob-monitors.yaml
Normal file
@ -0,0 +1,41 @@
|
||||
apiVersion: batch/v1
|
||||
kind: CronJob
|
||||
metadata:
|
||||
name: uptime-kuma-sync
|
||||
namespace: monitoring
|
||||
spec:
|
||||
schedule: "0 * * * *" # cada hora
|
||||
jobTemplate:
|
||||
spec:
|
||||
template:
|
||||
spec:
|
||||
serviceAccountName: uptime-kuma-sync
|
||||
restartPolicy: OnFailure
|
||||
containers:
|
||||
- name: sync
|
||||
image: python:3.12-slim
|
||||
command: ["bash", "-c", "apt-get update -q && apt-get install -y -q curl && curl -LO https://dl.k8s.io/release/$(curl -Ls https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl && chmod +x kubectl && mv kubectl /usr/local/bin/ && python /scripts/sync.py"]
|
||||
env:
|
||||
- name: USERNAME
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
name: uptime-kuma-api-secret
|
||||
key: USERNAME
|
||||
- name: PASSWORD
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
name: uptime-kuma-api-secret
|
||||
key: PASSWORD
|
||||
- name: UPTIME_KUMA_URL
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
name: uptime-kuma-api-secret
|
||||
key: UPTIME_KUMA_URL
|
||||
volumeMounts:
|
||||
- name: script
|
||||
mountPath: /scripts
|
||||
volumes:
|
||||
- name: script
|
||||
configMap:
|
||||
name: uptime-kuma-sync-script
|
||||
defaultMode: 0755
|
||||
46
roles/uptime-kuma/files/uptime-kuma-deployment.yaml
Normal file
46
roles/uptime-kuma/files/uptime-kuma-deployment.yaml
Normal file
@ -0,0 +1,46 @@
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: uptime-kuma
|
||||
namespace: monitoring
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app: uptime-kuma
|
||||
strategy:
|
||||
type: Recreate # necessário — SQLite não suporta múltiplas réplicas
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: uptime-kuma
|
||||
spec:
|
||||
imagePullSecrets:
|
||||
- name: regcred
|
||||
containers:
|
||||
- name: uptime-kuma
|
||||
image: louislam/uptime-kuma:2.2.1
|
||||
ports:
|
||||
- containerPort: 3001
|
||||
name: http
|
||||
env:
|
||||
- name: UPTIME_KUMA_DB_TYPE
|
||||
value: mariadb
|
||||
- name: UPTIME_KUMA_DB_HOSTNAME
|
||||
value: "mariadb-service.mariadb.svc.cluster.local"
|
||||
- name: UPTIME_KUMA_DB_PORT
|
||||
value: "3306"
|
||||
envFrom:
|
||||
- secretRef:
|
||||
name: uptime-kuma-mariadb-secret
|
||||
volumeMounts:
|
||||
- name: data
|
||||
mountPath: /app/data
|
||||
volumes:
|
||||
- name: data
|
||||
persistentVolumeClaim:
|
||||
claimName: uptime-kuma-data-pvc
|
||||
|
||||
|
||||
|
||||
|
||||
10
roles/uptime-kuma/files/uptime-kuma-monitors-secret.yaml
Normal file
10
roles/uptime-kuma/files/uptime-kuma-monitors-secret.yaml
Normal file
@ -0,0 +1,10 @@
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
name: uptime-kuma-api-secret
|
||||
namespace: monitoring
|
||||
type: Opaque
|
||||
data:
|
||||
USERNAME: VVNFUk5BTUU=
|
||||
PASSWORD: UEFTU1dPUkQ=
|
||||
UPTIME_KUMA_URL: VVBUSU1FX0tVTUFfVVJM
|
||||
4
roles/uptime-kuma/files/uptime-kuma-namespace.yaml
Normal file
4
roles/uptime-kuma/files/uptime-kuma-namespace.yaml
Normal file
@ -0,0 +1,4 @@
|
||||
apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
name: monitoring
|
||||
12
roles/uptime-kuma/files/uptime-kuma-nfs-csi.yaml
Normal file
12
roles/uptime-kuma/files/uptime-kuma-nfs-csi.yaml
Normal file
@ -0,0 +1,12 @@
|
||||
apiVersion: storage.k8s.io/v1
|
||||
kind: StorageClass
|
||||
metadata:
|
||||
name: uptime-kuma-nfs-csi
|
||||
namespace: monitoring
|
||||
provisioner: nfs.csi.k8s.io
|
||||
parameters:
|
||||
mountOptions: "nolock,soft,intr"
|
||||
server: 192.168.1.22
|
||||
share: /mnt/fenix-main-nas-pool-0/data/k8s-Volumes/k8s-cluster-iac-deployed/uptime-kuma/data
|
||||
allowVolumeExpansion: true
|
||||
reclaimPolicy: Retain
|
||||
32
roles/uptime-kuma/files/uptime-kuma-pvcs.yaml
Normal file
32
roles/uptime-kuma/files/uptime-kuma-pvcs.yaml
Normal file
@ -0,0 +1,32 @@
|
||||
apiVersion: v1
|
||||
kind: PersistentVolume
|
||||
metadata:
|
||||
name: uptime-kuma-data-pv
|
||||
namespace: monitoring
|
||||
spec:
|
||||
capacity:
|
||||
storage: 60Gi
|
||||
storageClassName: uptime-kuma-nfs-csi
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
persistentVolumeReclaimPolicy: Retain
|
||||
mountOptions:
|
||||
- nolock
|
||||
- nfsvers=3
|
||||
nfs:
|
||||
server: 192.168.1.22
|
||||
path: /mnt/fenix-main-nas-pool-0/data/k8s-Volumes/k8s-cluster-iac-deployed/uptime-kuma/data
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: PersistentVolumeClaim
|
||||
metadata:
|
||||
name: uptime-kuma-data-pvc
|
||||
namespace: monitoring
|
||||
spec:
|
||||
storageClassName: uptime-kuma-nfs-csi
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
volumeName: uptime-kuma-data-pv
|
||||
resources:
|
||||
requests:
|
||||
storage: 60Gi
|
||||
10
roles/uptime-kuma/files/uptime-kuma-secret.yaml
Normal file
10
roles/uptime-kuma/files/uptime-kuma-secret.yaml
Normal file
@ -0,0 +1,10 @@
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
name: uptime-kuma-mariadb-secret
|
||||
namespace: monitoring
|
||||
type: Opaque
|
||||
data:
|
||||
UPTIME_KUMA_DB_NAME: TUFSSUFEQl9EQVRBQkFTRQ==
|
||||
UPTIME_KUMA_DB_USERNAME: TUFSSUFEQl9VU0VS
|
||||
UPTIME_KUMA_DB_PASSWORD: TUFSSUFEQl9QQVNTV09SRA==
|
||||
13
roles/uptime-kuma/files/uptime-kuma-service.yaml
Normal file
13
roles/uptime-kuma/files/uptime-kuma-service.yaml
Normal file
@ -0,0 +1,13 @@
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: uptime-kuma
|
||||
namespace: monitoring
|
||||
spec:
|
||||
selector:
|
||||
app: uptime-kuma
|
||||
ports:
|
||||
- port: 3001
|
||||
targetPort: http
|
||||
name: http
|
||||
type: ClusterIP
|
||||
@ -0,0 +1,27 @@
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: uptime-kuma-sync
|
||||
namespace: monitoring
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: uptime-kuma-sync
|
||||
rules:
|
||||
- apiGroups: [""]
|
||||
resources: ["services"]
|
||||
verbs: ["get", "list"]
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: uptime-kuma-sync
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: uptime-kuma-sync
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: uptime-kuma-sync
|
||||
namespace: monitoring
|
||||
53
roles/uptime-kuma/tasks/main.yml
Normal file
53
roles/uptime-kuma/tasks/main.yml
Normal file
@ -0,0 +1,53 @@
|
||||
- name: Remover o diretório /tmp/monitoring/uptime-kuma/kubernetes-files
|
||||
ansible.builtin.file:
|
||||
path: /tmp/monitoring/uptime-kuma/kubernetes-files
|
||||
state: absent
|
||||
|
||||
- name: Criar diretório temporário no remoto
|
||||
file:
|
||||
path: /tmp/monitoring/uptime-kuma/kubernetes-files
|
||||
state: directory
|
||||
mode: '0755'
|
||||
|
||||
- name: Copy file with owner and permissions
|
||||
ansible.builtin.copy:
|
||||
src: ../files
|
||||
dest: /tmp/monitoring/uptime-kuma/kubernetes-files
|
||||
owner: fenix
|
||||
group: root
|
||||
mode: '0644'
|
||||
|
||||
|
||||
- name: Listar conteúdo do diretório remoto
|
||||
shell: ls -l /tmp/monitoring/uptime-kuma/kubernetes-files/files
|
||||
register: resultado_ls
|
||||
|
||||
|
||||
- name: Obter várias notas do Bitwarden
|
||||
shell: |
|
||||
echo "unlock"
|
||||
BW_SESSION=$(bw unlock {{ bw_password }} --raw)
|
||||
echo "get item"
|
||||
bw get item "{{ item.id }}" --session $BW_SESSION | jq -r '.notes' > {{ item.dest }}
|
||||
loop:
|
||||
- { id: "iac.ansible.dockersecrets", dest: "/tmp/monitoring/uptime-kuma/files/docker-secrets.yaml" }
|
||||
- { id: "iac.ansible.uptimekuma.mariadbsecret", dest: "/tmp/monitoring/uptime-kuma/files/uptime-kuma-secret.yaml" }
|
||||
- { id: "iac.ansible.uptimekuma.monitorssecret", dest: "/tmp/monitoring/uptime-kuma/files/uptime-kuma-monitors-secret.yaml" }
|
||||
args:
|
||||
executable: /bin/bash
|
||||
environment:
|
||||
BW_PASSWORD: "{{ BW_PASSWORD }}"
|
||||
|
||||
- name: Mostrar resultado do ls
|
||||
debug:
|
||||
var: resultado_ls.stdout_lines
|
||||
|
||||
|
||||
- name: Aplicar o stolon
|
||||
become: yes
|
||||
become_user: fenix
|
||||
shell: |
|
||||
kubectl apply -f /tmp/monitoring/uptime-kuma/kubernetes-files/files/uptime-kuma-namespace.yaml
|
||||
kubectl apply -f /tmp/monitoring/uptime-kuma/kubernetes-files/files/
|
||||
environment:
|
||||
KUBECONFIG: /home/fenix/.kube/config
|
||||
4
roles/uptime-kuma/vars/main.yml
Normal file
4
roles/uptime-kuma/vars/main.yml
Normal file
@ -0,0 +1,4 @@
|
||||
bw_password: "{{ lookup('env', 'BW_PASSWORD') }}"
|
||||
VAULTWARDEN_LINK: "{{ lookup('env', 'VAULTWARDEN_LINK') }}"
|
||||
BW_CLIENTID: "{{ lookup('env', 'BW_CLIENTID') }}"
|
||||
BW_CLIENTSECRET : "{{ lookup('env', 'BW_CLIENTSECRET') }}"
|
||||
Reference in New Issue
Block a user