mirror of
https://github.com/varun-r-mallya/server-scripts.git
synced 2025-12-31 20:16:25 +00:00
35
control/Caddyfile
Normal file
35
control/Caddyfile
Normal file
@ -0,0 +1,35 @@
|
||||
# The Caddyfile is an easy way to configure your Caddy web server.
|
||||
#
|
||||
# Unless the file starts with a global options block, the first
|
||||
# uncommented line is always the address of your site.
|
||||
#
|
||||
# To use your own domain name (with automatic HTTPS), first make
|
||||
# sure your domain's A/AAAA DNS records are properly pointed to
|
||||
# this machine's public IP, then replace ":80" below with your
|
||||
# domain name.
|
||||
|
||||
# :80 {
|
||||
# Set this path to your site's directory.
|
||||
# root * /usr/share/caddy
|
||||
|
||||
# Enable the static file server.
|
||||
# file_server
|
||||
|
||||
# Another common task is to set up a reverse proxy:
|
||||
# reverse_proxy localhost:8080
|
||||
|
||||
# Or serve a PHP site through php-fpm:
|
||||
# php_fastcgi localhost:9000
|
||||
#}
|
||||
|
||||
:43211 {
|
||||
basicauth /* {
|
||||
# caddy hash-password --plaintext yourpassword
|
||||
varun $2a$14$ZY.l3V.ecHUWEO0phmNr9.ieQN15n5uIyrlLx4S4rLRKgm6YoGxGe
|
||||
}
|
||||
reverse_proxy localhost:43210
|
||||
|
||||
}
|
||||
|
||||
# Refer to the Caddy docs for more information:
|
||||
# https://caddyserver.com/docs/caddyfile
|
||||
58
control/brightness.py
Executable file
58
control/brightness.py
Executable file
@ -0,0 +1,58 @@
|
||||
#!/usr/bin/env python3
|
||||
import os
|
||||
import sys
|
||||
|
||||
# Detect the backlight device
|
||||
def find_backlight_device():
|
||||
base_path = '/sys/class/backlight'
|
||||
try:
|
||||
devices = os.listdir(base_path)
|
||||
if not devices:
|
||||
print("No backlight device found.")
|
||||
sys.exit(1)
|
||||
return os.path.join(base_path, devices[0])
|
||||
except FileNotFoundError:
|
||||
print("Backlight control not supported on this system.")
|
||||
sys.exit(1)
|
||||
|
||||
def get_brightness_info(dev_path):
|
||||
with open(os.path.join(dev_path, 'max_brightness')) as f:
|
||||
max_brightness = int(f.read().strip())
|
||||
with open(os.path.join(dev_path, 'brightness')) as f:
|
||||
current_brightness = int(f.read().strip())
|
||||
return current_brightness, max_brightness
|
||||
|
||||
def set_brightness(dev_path, value):
|
||||
brightness_path = os.path.join(dev_path, 'brightness')
|
||||
try:
|
||||
with open(brightness_path, 'w') as f:
|
||||
f.write(str(value))
|
||||
except PermissionError:
|
||||
print("Permission denied. Try running as root (sudo).")
|
||||
sys.exit(1)
|
||||
|
||||
def main():
|
||||
if len(sys.argv) != 2:
|
||||
print("Usage: brightness.py <value|+10|-10>")
|
||||
sys.exit(1)
|
||||
|
||||
dev_path = find_backlight_device()
|
||||
current, max_brightness = get_brightness_info(dev_path)
|
||||
arg = sys.argv[1]
|
||||
|
||||
try:
|
||||
if arg.startswith('+') or arg.startswith('-'):
|
||||
new_brightness = current + int(arg)
|
||||
else:
|
||||
new_brightness = int(arg)
|
||||
|
||||
new_brightness = max(0, min(new_brightness, max_brightness))
|
||||
set_brightness(dev_path, new_brightness)
|
||||
print(f"Brightness set to {new_brightness}/{max_brightness}")
|
||||
except ValueError:
|
||||
print("Invalid brightness value.")
|
||||
sys.exit(1)
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
|
||||
13
control/containers-up.service
Normal file
13
control/containers-up.service
Normal file
@ -0,0 +1,13 @@
|
||||
[Unit]
|
||||
Description=Run services.sh to run Docker compose on boot
|
||||
After=network-online.target docker.service
|
||||
Wants=network-online.target
|
||||
|
||||
[Service]
|
||||
Type=oneshot
|
||||
ExecStart=/home/xeon/control/services.sh
|
||||
RemainAfterExit=true
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
|
||||
24
control/services.sh
Executable file
24
control/services.sh
Executable file
@ -0,0 +1,24 @@
|
||||
#!/bin/bash
|
||||
|
||||
# List your service directories here (full paths or relative paths)
|
||||
services=(
|
||||
"/home/xeon/management/monitoring"
|
||||
"/home/xeon/management/seanime"
|
||||
)
|
||||
|
||||
#echo "Reducing brightness"
|
||||
#/home/xeon/control/brightness.py 0
|
||||
|
||||
echo "🚀 Starting selected Docker Compose stacks..."
|
||||
|
||||
for dir in "${services[@]}"; do
|
||||
if [ -d "$dir" ] && [ -f "$dir/docker-compose.yml" ]; then
|
||||
echo "➡️ Starting stack in $dir"
|
||||
(cd "$dir" && docker compose up -d)
|
||||
else
|
||||
echo "⚠️ Skipping $dir — not a valid Docker Compose directory"
|
||||
fi
|
||||
done
|
||||
|
||||
echo "✅ All listed stacks processed."
|
||||
|
||||
58
management/monitoring/docker-compose.yml
Normal file
58
management/monitoring/docker-compose.yml
Normal file
@ -0,0 +1,58 @@
|
||||
networks:
|
||||
monitoring:
|
||||
driver: bridge
|
||||
|
||||
volumes:
|
||||
prometheus_data: {}
|
||||
grafana_data: {}
|
||||
|
||||
services:
|
||||
node-exporter:
|
||||
image: prom/node-exporter:latest
|
||||
container_name: node-exporter
|
||||
restart: unless-stopped
|
||||
volumes:
|
||||
- /proc:/host/proc:ro
|
||||
- /sys:/host/sys:ro
|
||||
- /:/rootfs:ro
|
||||
command:
|
||||
- '--path.procfs=/host/proc'
|
||||
- '--path.rootfs=/rootfs'
|
||||
- '--path.sysfs=/host/sys'
|
||||
- '--collector.filesystem.mount-points-exclude=^/(sys|proc|dev|host|etc)($$|/)'
|
||||
expose:
|
||||
- 9100
|
||||
networks:
|
||||
- monitoring
|
||||
|
||||
prometheus:
|
||||
image: prom/prometheus:latest
|
||||
container_name: prometheus
|
||||
restart: unless-stopped
|
||||
volumes:
|
||||
- ./prometheus.yml:/etc/prometheus/prometheus.yml
|
||||
- prometheus_data:/prometheus
|
||||
command:
|
||||
- '--config.file=/etc/prometheus/prometheus.yml'
|
||||
- '--storage.tsdb.path=/prometheus'
|
||||
- '--web.console.libraries=/etc/prometheus/console_libraries'
|
||||
- '--web.console.templates=/etc/prometheus/consoles'
|
||||
- '--web.enable-lifecycle'
|
||||
expose:
|
||||
- 9090
|
||||
networks:
|
||||
- monitoring
|
||||
|
||||
grafana:
|
||||
image: grafana/grafana:latest
|
||||
container_name: grafana
|
||||
restart: unless-stopped
|
||||
volumes:
|
||||
- grafana_data:/var/lib/grafana
|
||||
ports:
|
||||
- 30691:3000
|
||||
networks:
|
||||
- monitoring
|
||||
environment:
|
||||
- GF_SECURITY_ADMIN_USER=admin
|
||||
- GF_SECURITY_ADMIN_PASSWORD=admin
|
||||
13
management/monitoring/prometheus.yml
Normal file
13
management/monitoring/prometheus.yml
Normal file
@ -0,0 +1,13 @@
|
||||
global:
|
||||
scrape_interval: 1m
|
||||
|
||||
scrape_configs:
|
||||
- job_name: 'prometheus'
|
||||
scrape_interval: 1m
|
||||
static_configs:
|
||||
- targets: ['localhost:9090']
|
||||
|
||||
- job_name: 'node'
|
||||
static_configs:
|
||||
- targets: ['node-exporter:9100']
|
||||
|
||||
1
management/nextcloud/cron
Normal file
1
management/nextcloud/cron
Normal file
@ -0,0 +1 @@
|
||||
0 3 */2 * * /home/xeon/management/nextcloud/nextcloud-snap-backup.sh >> /var/log/nextcloud-borg.log 2>&1
|
||||
34
management/nextcloud/nextcloud-snap-backup.sh
Executable file
34
management/nextcloud/nextcloud-snap-backup.sh
Executable file
@ -0,0 +1,34 @@
|
||||
#!/bin/bash
|
||||
set -e
|
||||
|
||||
BORG_REPO="/mnt/data/bbackup"
|
||||
SNAP_BACKUP_DIR="/var/snap/nextcloud/common/backups"
|
||||
TAR_TMP="/tmp"
|
||||
|
||||
# === Step 1: Run nextcloud.export ===
|
||||
echo "📦 Exporting Nextcloud..."
|
||||
sudo nextcloud.export -abcd
|
||||
|
||||
# === Step 2: Find most recent export folder ===
|
||||
EXPORT_DIR=$(ls -td "$SNAP_BACKUP_DIR"/20* | head -n 1)
|
||||
FOLDER_NAME=$(basename "$EXPORT_DIR")
|
||||
TAR_FILE="$TAR_TMP/nextcloud-backup-$FOLDER_NAME.tar"
|
||||
|
||||
echo "✅ Found export directory: $EXPORT_DIR"
|
||||
|
||||
# === Step 3: Tar the export directory ===
|
||||
sudo tar -cvf "$TAR_FILE" -C "$SNAP_BACKUP_DIR" "$FOLDER_NAME"
|
||||
|
||||
# === Step 4: Create Borg backup ===
|
||||
borg create --verbose --stats --compression zstd \
|
||||
"$BORG_REPO::nextcloud-export-$FOLDER_NAME" \
|
||||
"$TAR_FILE"
|
||||
|
||||
# === Step 5: Prune old backups ===
|
||||
borg prune -v --list "$BORG_REPO" \
|
||||
--keep-daily=7 --keep-weekly=4 --keep-monthly=6
|
||||
|
||||
# === Step 6: Clean up ===
|
||||
sudo rm -f "$TAR_FILE"
|
||||
sudo rm -rf "$EXPORT_DIR"
|
||||
|
||||
46
management/nextcloud/nextcloud-snap-restore.sh
Executable file
46
management/nextcloud/nextcloud-snap-restore.sh
Executable file
@ -0,0 +1,46 @@
|
||||
#!/bin/bash
|
||||
set -e
|
||||
|
||||
# === Config ===
|
||||
BORG_REPO="/mnt/data/bbackup"
|
||||
RESTORE_BASE="/var/snap/nextcloud/common/restore"
|
||||
TMP_DIR="/tmp/nextcloud_restore_tmp"
|
||||
|
||||
# === Step 1: List available Borg archives
|
||||
echo "🔍 Available Nextcloud Backups:"
|
||||
borg list "$BORG_REPO" | grep nextcloud-export- | nl
|
||||
|
||||
# === Step 2: Ask user to choose one
|
||||
read -p "Enter the number of the backup to restore: " CHOICE
|
||||
|
||||
ARCHIVE_NAME=$(borg list "$BORG_REPO" | grep nextcloud-export- | sed -n "${CHOICE}p" | awk '{print $1}')
|
||||
|
||||
if [ -z "$ARCHIVE_NAME" ]; then
|
||||
echo "❌ Invalid selection"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "✅ Selected archive: $ARCHIVE_NAME"
|
||||
|
||||
# === Step 3: Extract Borg archive
|
||||
rm -rf "$TMP_DIR"
|
||||
mkdir -p "$TMP_DIR"
|
||||
cd "$TMP_DIR"
|
||||
|
||||
echo "📦 Extracting archive..."
|
||||
borg extract "$BORG_REPO::$ARCHIVE_NAME"
|
||||
|
||||
cd ./tmp
|
||||
# === Step 4: Extract tar into Snap-visible restore directory
|
||||
TAR_FILE=$(ls nextcloud-backup-*.tar)
|
||||
FOLDER_NAME="${TAR_FILE%.tar}"
|
||||
|
||||
sudo mkdir -p "$RESTORE_BASE/$FOLDER_NAME"
|
||||
sudo tar -xvf "$TAR_FILE" -C "$RESTORE_BASE/$FOLDER_NAME"
|
||||
|
||||
# === Step 5: Run nextcloud.import
|
||||
echo "🚀 Restoring Nextcloud from backup..."
|
||||
sudo nextcloud.import "$RESTORE_BASE/$FOLDER_NAME"
|
||||
|
||||
echo "✅ Done restoring from $ARCHIVE_NAME"
|
||||
|
||||
30
management/seanime/config/config.toml
Normal file
30
management/seanime/config/config.toml
Normal file
@ -0,0 +1,30 @@
|
||||
version = '2.8.4'
|
||||
|
||||
[cache]
|
||||
dir = '$SEANIME_DATA_DIR/cache'
|
||||
transcodedir = '$SEANIME_DATA_DIR/cache/transcode'
|
||||
|
||||
[database]
|
||||
name = 'seanime'
|
||||
|
||||
[extensions]
|
||||
dir = '$SEANIME_DATA_DIR/extensions'
|
||||
|
||||
[logs]
|
||||
dir = '$SEANIME_DATA_DIR/logs'
|
||||
|
||||
[manga]
|
||||
downloaddir = '$SEANIME_DATA_DIR/manga'
|
||||
|
||||
[offline]
|
||||
assetdir = '$SEANIME_DATA_DIR/offline/assets'
|
||||
dir = '$SEANIME_DATA_DIR/offline'
|
||||
|
||||
[server]
|
||||
host = '0.0.0.0'
|
||||
offline = false
|
||||
port = 43211
|
||||
usebinarypath = true
|
||||
|
||||
[web]
|
||||
assetdir = '$SEANIME_DATA_DIR/assets'
|
||||
62
management/seanime/docker-compose.yml
Normal file
62
management/seanime/docker-compose.yml
Normal file
@ -0,0 +1,62 @@
|
||||
services:
|
||||
seanime:
|
||||
image: valgul/seanime:latest
|
||||
container_name: seanime-container
|
||||
depends_on:
|
||||
- torrent
|
||||
volumes:
|
||||
- ./downloads:/downloads
|
||||
- ./config:/root/.config/Seanime
|
||||
#- /usr/lib/x86_64-linux-gnu/dri/:/usr/lib/x86_64-linux-gnu/dri/ # uncomment for intel support
|
||||
#devices: # uncomment for intel support
|
||||
# - /dev/dri/card0:/dev/dri/card0 # uncomment for intel support
|
||||
# - /dev/dri/renderD128:/dev/dri/renderD128 # uncomment for intel support
|
||||
# group_add: # uncomment for intel support
|
||||
# - "video" # uncomment for intel support
|
||||
# - 105 # uncomment for intel support
|
||||
ports:
|
||||
- 43210:43211
|
||||
#network_mode: "service:gluetun" # uncomment if using gluetun also comment all ports section
|
||||
#deploy: # nvidia and you need NVIDIA Container Toolkit
|
||||
# resources:
|
||||
# reservations:
|
||||
# devices:
|
||||
# - driver: nvidia
|
||||
# count: 1
|
||||
# capabilities: [gpu]
|
||||
torrent:
|
||||
image: lscr.io/linuxserver/qbittorrent:latest
|
||||
container_name: torrent
|
||||
environment:
|
||||
- TZ=Etc/UTC
|
||||
- WEBUI_PORT=8080
|
||||
- TORRENTING_PORT=6881
|
||||
volumes:
|
||||
- ./appdata:/config
|
||||
- ./downloads:/downloads # same as seanime downloads folder
|
||||
ports:
|
||||
- 43212:8080
|
||||
- 43213:6881
|
||||
- 43213:6881/udp
|
||||
restart: unless-stopped
|
||||
#network_mode: "service:gluetun" # uncomment if using gluetun also comment all ports section
|
||||
|
||||
#gluetun: #so your torrent use a vpn (would be bad if it was your ip)
|
||||
#image: qmcgaw/gluetun
|
||||
#container_name: gluetun
|
||||
#cap_add:
|
||||
#- NET_ADMIN
|
||||
#devices:
|
||||
#- /dev/net/tun:/dev/net/tun
|
||||
#ports:
|
||||
#- 8888:8888/tcp # HTTP proxy
|
||||
#- 8388:8388/tcp # Shadowsocks
|
||||
#- 8388:8388/udp # Shadowsocks
|
||||
|
||||
#- 8080:8080 #torrent
|
||||
#- 6881:6881 #torrent
|
||||
#- 6881:6881/udp #torrent
|
||||
#environment:
|
||||
#- FIREWALL_INPUT_PORTS=8080,6881,43211
|
||||
# See https://github.com/qdm12/gluetun-wiki/tree/main/setup#setup
|
||||
#- VPN_SERVICE_PROVIDER=
|
||||
Reference in New Issue
Block a user