From f693c857b12e454aa7f1d7228b308d417d3a5468 Mon Sep 17 00:00:00 2001 From: varun-r-mallya Date: Fri, 20 Jun 2025 19:55:13 +0000 Subject: [PATCH] configuration 1 Signed-off-by: varun-r-mallya --- control/Caddyfile | 35 +++++++++++ control/brightness.py | 58 +++++++++++++++++ control/containers-up.service | 13 ++++ control/services.sh | 24 +++++++ management/monitoring/docker-compose.yml | 58 +++++++++++++++++ management/monitoring/prometheus.yml | 13 ++++ management/nextcloud/cron | 1 + management/nextcloud/nextcloud-snap-backup.sh | 34 ++++++++++ .../nextcloud/nextcloud-snap-restore.sh | 46 ++++++++++++++ management/seanime/config/config.toml | 30 +++++++++ management/seanime/docker-compose.yml | 62 +++++++++++++++++++ 11 files changed, 374 insertions(+) create mode 100644 control/Caddyfile create mode 100755 control/brightness.py create mode 100644 control/containers-up.service create mode 100755 control/services.sh create mode 100644 management/monitoring/docker-compose.yml create mode 100644 management/monitoring/prometheus.yml create mode 100644 management/nextcloud/cron create mode 100755 management/nextcloud/nextcloud-snap-backup.sh create mode 100755 management/nextcloud/nextcloud-snap-restore.sh create mode 100644 management/seanime/config/config.toml create mode 100644 management/seanime/docker-compose.yml diff --git a/control/Caddyfile b/control/Caddyfile new file mode 100644 index 0000000..7c3305e --- /dev/null +++ b/control/Caddyfile @@ -0,0 +1,35 @@ +# The Caddyfile is an easy way to configure your Caddy web server. +# +# Unless the file starts with a global options block, the first +# uncommented line is always the address of your site. +# +# To use your own domain name (with automatic HTTPS), first make +# sure your domain's A/AAAA DNS records are properly pointed to +# this machine's public IP, then replace ":80" below with your +# domain name. + +# :80 { + # Set this path to your site's directory. +# root * /usr/share/caddy + + # Enable the static file server. +# file_server + + # Another common task is to set up a reverse proxy: + # reverse_proxy localhost:8080 + + # Or serve a PHP site through php-fpm: + # php_fastcgi localhost:9000 +#} + +:43211 { + basicauth /* { + # caddy hash-password --plaintext yourpassword + varun $2a$14$ZY.l3V.ecHUWEO0phmNr9.ieQN15n5uIyrlLx4S4rLRKgm6YoGxGe + } + reverse_proxy localhost:43210 + +} + +# Refer to the Caddy docs for more information: +# https://caddyserver.com/docs/caddyfile diff --git a/control/brightness.py b/control/brightness.py new file mode 100755 index 0000000..6a1796b --- /dev/null +++ b/control/brightness.py @@ -0,0 +1,58 @@ +#!/usr/bin/env python3 +import os +import sys + +# Detect the backlight device +def find_backlight_device(): + base_path = '/sys/class/backlight' + try: + devices = os.listdir(base_path) + if not devices: + print("No backlight device found.") + sys.exit(1) + return os.path.join(base_path, devices[0]) + except FileNotFoundError: + print("Backlight control not supported on this system.") + sys.exit(1) + +def get_brightness_info(dev_path): + with open(os.path.join(dev_path, 'max_brightness')) as f: + max_brightness = int(f.read().strip()) + with open(os.path.join(dev_path, 'brightness')) as f: + current_brightness = int(f.read().strip()) + return current_brightness, max_brightness + +def set_brightness(dev_path, value): + brightness_path = os.path.join(dev_path, 'brightness') + try: + with open(brightness_path, 'w') as f: + f.write(str(value)) + except PermissionError: + print("Permission denied. Try running as root (sudo).") + sys.exit(1) + +def main(): + if len(sys.argv) != 2: + print("Usage: brightness.py ") + sys.exit(1) + + dev_path = find_backlight_device() + current, max_brightness = get_brightness_info(dev_path) + arg = sys.argv[1] + + try: + if arg.startswith('+') or arg.startswith('-'): + new_brightness = current + int(arg) + else: + new_brightness = int(arg) + + new_brightness = max(0, min(new_brightness, max_brightness)) + set_brightness(dev_path, new_brightness) + print(f"Brightness set to {new_brightness}/{max_brightness}") + except ValueError: + print("Invalid brightness value.") + sys.exit(1) + +if __name__ == '__main__': + main() + diff --git a/control/containers-up.service b/control/containers-up.service new file mode 100644 index 0000000..8ea8eaf --- /dev/null +++ b/control/containers-up.service @@ -0,0 +1,13 @@ +[Unit] +Description=Run services.sh to run Docker compose on boot +After=network-online.target docker.service +Wants=network-online.target + +[Service] +Type=oneshot +ExecStart=/home/xeon/control/services.sh +RemainAfterExit=true + +[Install] +WantedBy=multi-user.target + diff --git a/control/services.sh b/control/services.sh new file mode 100755 index 0000000..6c96095 --- /dev/null +++ b/control/services.sh @@ -0,0 +1,24 @@ +#!/bin/bash + +# List your service directories here (full paths or relative paths) +services=( + "/home/xeon/management/monitoring" + "/home/xeon/management/seanime" +) + +#echo "Reducing brightness" +#/home/xeon/control/brightness.py 0 + +echo "🚀 Starting selected Docker Compose stacks..." + +for dir in "${services[@]}"; do + if [ -d "$dir" ] && [ -f "$dir/docker-compose.yml" ]; then + echo "➡️ Starting stack in $dir" + (cd "$dir" && docker compose up -d) + else + echo "⚠️ Skipping $dir — not a valid Docker Compose directory" + fi +done + +echo "✅ All listed stacks processed." + diff --git a/management/monitoring/docker-compose.yml b/management/monitoring/docker-compose.yml new file mode 100644 index 0000000..05be0ea --- /dev/null +++ b/management/monitoring/docker-compose.yml @@ -0,0 +1,58 @@ +networks: + monitoring: + driver: bridge + +volumes: + prometheus_data: {} + grafana_data: {} + +services: + node-exporter: + image: prom/node-exporter:latest + container_name: node-exporter + restart: unless-stopped + volumes: + - /proc:/host/proc:ro + - /sys:/host/sys:ro + - /:/rootfs:ro + command: + - '--path.procfs=/host/proc' + - '--path.rootfs=/rootfs' + - '--path.sysfs=/host/sys' + - '--collector.filesystem.mount-points-exclude=^/(sys|proc|dev|host|etc)($$|/)' + expose: + - 9100 + networks: + - monitoring + + prometheus: + image: prom/prometheus:latest + container_name: prometheus + restart: unless-stopped + volumes: + - ./prometheus.yml:/etc/prometheus/prometheus.yml + - prometheus_data:/prometheus + command: + - '--config.file=/etc/prometheus/prometheus.yml' + - '--storage.tsdb.path=/prometheus' + - '--web.console.libraries=/etc/prometheus/console_libraries' + - '--web.console.templates=/etc/prometheus/consoles' + - '--web.enable-lifecycle' + expose: + - 9090 + networks: + - monitoring + + grafana: + image: grafana/grafana:latest + container_name: grafana + restart: unless-stopped + volumes: + - grafana_data:/var/lib/grafana + ports: + - 30691:3000 + networks: + - monitoring + environment: + - GF_SECURITY_ADMIN_USER=admin + - GF_SECURITY_ADMIN_PASSWORD=admin diff --git a/management/monitoring/prometheus.yml b/management/monitoring/prometheus.yml new file mode 100644 index 0000000..701388a --- /dev/null +++ b/management/monitoring/prometheus.yml @@ -0,0 +1,13 @@ +global: + scrape_interval: 1m + +scrape_configs: + - job_name: 'prometheus' + scrape_interval: 1m + static_configs: + - targets: ['localhost:9090'] + + - job_name: 'node' + static_configs: + - targets: ['node-exporter:9100'] + diff --git a/management/nextcloud/cron b/management/nextcloud/cron new file mode 100644 index 0000000..29fe633 --- /dev/null +++ b/management/nextcloud/cron @@ -0,0 +1 @@ +0 3 */2 * * /home/xeon/management/nextcloud/nextcloud-snap-backup.sh >> /var/log/nextcloud-borg.log 2>&1 diff --git a/management/nextcloud/nextcloud-snap-backup.sh b/management/nextcloud/nextcloud-snap-backup.sh new file mode 100755 index 0000000..01144f1 --- /dev/null +++ b/management/nextcloud/nextcloud-snap-backup.sh @@ -0,0 +1,34 @@ +#!/bin/bash +set -e + +BORG_REPO="/mnt/data/bbackup" +SNAP_BACKUP_DIR="/var/snap/nextcloud/common/backups" +TAR_TMP="/tmp" + +# === Step 1: Run nextcloud.export === +echo "📦 Exporting Nextcloud..." +sudo nextcloud.export -abcd + +# === Step 2: Find most recent export folder === +EXPORT_DIR=$(ls -td "$SNAP_BACKUP_DIR"/20* | head -n 1) +FOLDER_NAME=$(basename "$EXPORT_DIR") +TAR_FILE="$TAR_TMP/nextcloud-backup-$FOLDER_NAME.tar" + +echo "✅ Found export directory: $EXPORT_DIR" + +# === Step 3: Tar the export directory === +sudo tar -cvf "$TAR_FILE" -C "$SNAP_BACKUP_DIR" "$FOLDER_NAME" + +# === Step 4: Create Borg backup === +borg create --verbose --stats --compression zstd \ + "$BORG_REPO::nextcloud-export-$FOLDER_NAME" \ + "$TAR_FILE" + +# === Step 5: Prune old backups === +borg prune -v --list "$BORG_REPO" \ + --keep-daily=7 --keep-weekly=4 --keep-monthly=6 + +# === Step 6: Clean up === +sudo rm -f "$TAR_FILE" +sudo rm -rf "$EXPORT_DIR" + diff --git a/management/nextcloud/nextcloud-snap-restore.sh b/management/nextcloud/nextcloud-snap-restore.sh new file mode 100755 index 0000000..00a2bde --- /dev/null +++ b/management/nextcloud/nextcloud-snap-restore.sh @@ -0,0 +1,46 @@ +#!/bin/bash +set -e + +# === Config === +BORG_REPO="/mnt/data/bbackup" +RESTORE_BASE="/var/snap/nextcloud/common/restore" +TMP_DIR="/tmp/nextcloud_restore_tmp" + +# === Step 1: List available Borg archives +echo "🔍 Available Nextcloud Backups:" +borg list "$BORG_REPO" | grep nextcloud-export- | nl + +# === Step 2: Ask user to choose one +read -p "Enter the number of the backup to restore: " CHOICE + +ARCHIVE_NAME=$(borg list "$BORG_REPO" | grep nextcloud-export- | sed -n "${CHOICE}p" | awk '{print $1}') + +if [ -z "$ARCHIVE_NAME" ]; then + echo "❌ Invalid selection" + exit 1 +fi + +echo "✅ Selected archive: $ARCHIVE_NAME" + +# === Step 3: Extract Borg archive +rm -rf "$TMP_DIR" +mkdir -p "$TMP_DIR" +cd "$TMP_DIR" + +echo "📦 Extracting archive..." +borg extract "$BORG_REPO::$ARCHIVE_NAME" + +cd ./tmp +# === Step 4: Extract tar into Snap-visible restore directory +TAR_FILE=$(ls nextcloud-backup-*.tar) +FOLDER_NAME="${TAR_FILE%.tar}" + +sudo mkdir -p "$RESTORE_BASE/$FOLDER_NAME" +sudo tar -xvf "$TAR_FILE" -C "$RESTORE_BASE/$FOLDER_NAME" + +# === Step 5: Run nextcloud.import +echo "🚀 Restoring Nextcloud from backup..." +sudo nextcloud.import "$RESTORE_BASE/$FOLDER_NAME" + +echo "✅ Done restoring from $ARCHIVE_NAME" + diff --git a/management/seanime/config/config.toml b/management/seanime/config/config.toml new file mode 100644 index 0000000..8043a33 --- /dev/null +++ b/management/seanime/config/config.toml @@ -0,0 +1,30 @@ +version = '2.8.4' + +[cache] +dir = '$SEANIME_DATA_DIR/cache' +transcodedir = '$SEANIME_DATA_DIR/cache/transcode' + +[database] +name = 'seanime' + +[extensions] +dir = '$SEANIME_DATA_DIR/extensions' + +[logs] +dir = '$SEANIME_DATA_DIR/logs' + +[manga] +downloaddir = '$SEANIME_DATA_DIR/manga' + +[offline] +assetdir = '$SEANIME_DATA_DIR/offline/assets' +dir = '$SEANIME_DATA_DIR/offline' + +[server] +host = '0.0.0.0' +offline = false +port = 43211 +usebinarypath = true + +[web] +assetdir = '$SEANIME_DATA_DIR/assets' diff --git a/management/seanime/docker-compose.yml b/management/seanime/docker-compose.yml new file mode 100644 index 0000000..cfe4b77 --- /dev/null +++ b/management/seanime/docker-compose.yml @@ -0,0 +1,62 @@ +services: + seanime: + image: valgul/seanime:latest + container_name: seanime-container + depends_on: + - torrent + volumes: + - ./downloads:/downloads + - ./config:/root/.config/Seanime + #- /usr/lib/x86_64-linux-gnu/dri/:/usr/lib/x86_64-linux-gnu/dri/ # uncomment for intel support + #devices: # uncomment for intel support + # - /dev/dri/card0:/dev/dri/card0 # uncomment for intel support + # - /dev/dri/renderD128:/dev/dri/renderD128 # uncomment for intel support + # group_add: # uncomment for intel support + # - "video" # uncomment for intel support + # - 105 # uncomment for intel support + ports: + - 43210:43211 + #network_mode: "service:gluetun" # uncomment if using gluetun also comment all ports section + #deploy: # nvidia and you need NVIDIA Container Toolkit + # resources: + # reservations: + # devices: + # - driver: nvidia + # count: 1 + # capabilities: [gpu] + torrent: + image: lscr.io/linuxserver/qbittorrent:latest + container_name: torrent + environment: + - TZ=Etc/UTC + - WEBUI_PORT=8080 + - TORRENTING_PORT=6881 + volumes: + - ./appdata:/config + - ./downloads:/downloads # same as seanime downloads folder + ports: + - 43212:8080 + - 43213:6881 + - 43213:6881/udp + restart: unless-stopped + #network_mode: "service:gluetun" # uncomment if using gluetun also comment all ports section + + #gluetun: #so your torrent use a vpn (would be bad if it was your ip) + #image: qmcgaw/gluetun + #container_name: gluetun + #cap_add: + #- NET_ADMIN + #devices: + #- /dev/net/tun:/dev/net/tun + #ports: + #- 8888:8888/tcp # HTTP proxy + #- 8388:8388/tcp # Shadowsocks + #- 8388:8388/udp # Shadowsocks + + #- 8080:8080 #torrent + #- 6881:6881 #torrent + #- 6881:6881/udp #torrent + #environment: + #- FIREWALL_INPUT_PORTS=8080,6881,43211 + # See https://github.com/qdm12/gluetun-wiki/tree/main/setup#setup + #- VPN_SERVICE_PROVIDER=