Initial commit (I feel unsafe)

This commit is contained in:
2026-01-16 00:11:58 -08:00
commit d69daa37bf
121 changed files with 4153 additions and 0 deletions

25
.editorconfig Normal file
View File

@@ -0,0 +1,25 @@
# EditorConfig helps developers define and maintain consistent
# coding styles between different editors and IDEs
# https://editorconfig.org
# top-most EditorConfig file
root = true
[*]
end_of_line = lf
insert_final_newline = true
charset = utf-8
indent_style = space
indent_size = 2
trim_trailing_whitespace = true
max_line_length = 80
[*.txt]
indent_style = tab
indent_size = 4
[*.{diff,md}]
trim_trailing_whitespace = false

31
.gitattributes vendored Normal file
View File

@@ -0,0 +1,31 @@
##########################################################################
# The .gitattributes file tells git how to handle line endings,
# how to recognize and diff different file types, specifies merge
# strategies, content filters(?) to run on commit/checkout, etc.
# See more:
# https://git-scm.com/book/en/v2/Customizing-Git-Git-Attributes
##########################################################################
# Set the default behavior, in case people don't have core.autocrlf set.
* text=auto eol=lf
# Scripts
*.sh text eol=lf
*.ps1 text eol=crlf
# Docs
*.md text diff=markdown
*.txt text
# Config
.editorconfig text
*.env text
.gitattributes text
*.yml text
*.yaml text
# Ignore files
*.*ignore text

23
.gitignore vendored Normal file
View File

@@ -0,0 +1,23 @@
*env
rclone.conf
# SSH key pair
*.key
*.pub
authelia/secrets/*
!authelia/secrets/README.md
wg/connections/*.conf
wg/connections/README.md
wg/all_proxied/*.ps1
!wg/all_proxied/*copy.ps1
ghost_server-credentials.exp
ghost_server-config.production.json
# lel
file_transfers.ps1
freshStart.ps1
windows.md

1
.shellcheckrc Normal file
View File

@@ -0,0 +1 @@
external-sources=true

176
README.md Normal file
View File

@@ -0,0 +1,176 @@
# The Playground
---
## Blueprint usage
**_take a deep breath, clear your mind, and open Spotify_**
---
### Pre-requisite #0: A fresh start
#### Fill `*-env` for all (fml)
```bash
########################
# instance-env
export BACKUP_BUCKET='b2:the-bucket-name-goes-here' # Backblaze
export NOTIF_URL='https://ntfy.sh/the-topic-name-goes-here' # ntfy
export BASE_DOMAIN='knravish.me'
########################
# Some common stuff
export BUCKET_PATH="${BACKUP_BUCKET}/path/to/backup/in/object/storage"
export VOLUME_PATH="${HOME}/${USER}-data" # or wherever your service's data is
export PORT=<whatever> # your service's webserver
# PUID and PGID for running containers as non-root
PUID=$(id -u "$USER")
export PUID
PGID=$(id -g "$USER")
export PGID
########################
# Application specific stuff
# Just look it up bruh I can't be arsed
########################
# ok but WireGuard is a PITA
export UDP_PORT=<whatever>
export GUI_PORT=<whatever>
# shellcheck disable=SC2016
export PASSWORD_HASH=<whatever>
export WG_HOST="vpn.${BASE_DOMAIN}"
# hacky?
WG_DEVICE=$(ip route get 8.8.8.8 2>&1 | awk '{ printf "%s",$5;}')
export WG_DEVICE
```
### Then
- run `file_transfers.ps1` (assuming Windows host for now)
- update DNS records as needed
### Pre-requisite #1: Ports in VPC/VCN firewall rules
- allow all ICMP traffic for pinging (already open?)
- list of active listeners
- `*` - equivalent to `0.0.0.0, [::]`
- `%lo` - loopback
- `enp0s6` - name of the Internet-facing gateway interface of the host
- `Forwarded` - if port is open in the host's firewall (for VPS? if open in virtual network security rules)
| Address | Port | Protocol | Desc. | Forwarded? |
| --------------- | ----- | -------- | ------------------------------------------ | ---------- |
| | | ICMP | All ICMP traffic | O |
| \* | 22 | TCP | SSH | O |
| 127.0.0.53%lo | 53 | TCP, UDP | systemd-resolved (stub? vestigial?) | X |
| 10.0.0.3%enp0s6 | 68 | UDP | DHCP | X |
| \* | 80 | TCP | Nginx (HTTP) | O |
| \* | 443 | TCP | Nginx (HTTPS) | O |
| 127.0.0.1 | 2368 | TCP | Ghost blog | X |
| 127.0.0.1 | 3456 | TCP | Vikunja | X |
| 127.0.0.1 | 5006 | TCP | Actual Budget | X |
| 127.0.0.1 | 5100 | TCP | Password Pusher (pwpush) | X |
| 127.0.0.1 | 8080 | TCP | Shlink | X |
| 127.0.0.1 | 9001 | TCP | Spotify Manager (that's us!) | X |
| 127.0.0.1 | 8081 | TCP | Stirling-PDF | X |
| 127.0.0.1 | 9091 | TCP | Authelia | X |
| 127.0.0.1 | 8384 | TCP | Syncthing (web GUI) | X |
| \* | 21027 | UDP | Syncthing (discovery broadcasting) | O |
| \* | 22000 | TCP, UDP | Syncthing (sync protocol; UDP is for QUIC) | O |
| \* | 25565 | TCP | Minecraft server - Java edition, 1.20.4 | O |
| \* | 51820 | UDP | WireGuard (VPN tunnel) | O |
| 127.0.0.1 | 51821 | TCP | WireGuard (web GUI) | X |
| 127.0.0.1 | 5230 | TCP | Usememos | X |
| 127.0.0.1 | 3000 | TCP | Homepage | X |
| \* | 30000 | TCP | Foundry VTT | X |
| 127.0.0.1 | 3001 | TCP | Gitea | X |
### Pre-requisite #2: Config the master script
- start with `instance-setup`
- reevaluate sudo perms... have given too much stuff too much permissions :\)
- ensure ufw is disabled
- (Oracle VPS only) open iptables to all (`-I INPUT -j ACCEPT` or something)
- maybe use new pro token
- check email address too
---
## Applications
### Authelia
### Actual
- PWA on mobile!
### Ghost
- requires checks for ghost, cli, and node version updates
### Minecraft
- version-locked 1.20.4
- backup of everything, including JAR file
### Password Pusher
### Shlink
- managed on [shlink.io webapp](https://app.shlink.io)
- API key for GUI management, else run command in container
### Spotify Manager
- yippee!
- be conservative with dep. updates
### Stirling-PDF
- guest creds are `'guest':'temppass3'`
### Wireguard/wg-easy
- access VPS services on its `10.0.0.3/24` address
## Tooling and config
### bash
- place new aliases in `/etc/skel` file as well
- do not place non-sensitive stuff in `/etc/environment`
- `cp -pr` for recursive copying and without changing mode or access time
### nginx
- current practice - place configs in `conf.d`, change extension to not end in `.conf` for disabled sites
- old practice - `sites-enabled` soft links to `sites-available` files as needed
- serving some temporary files to share from /var/www/tmpfiles
- the build with added modules is fked up, ignore
### rclone
- config is for Backblaze B2, 10GB total
- always log!!! and notify!!!
### systemd
- `WantedBy` should be
- `default.target` for user services
- `multi-user.target` for system services
### cron
- cron doesn't get the same env as a normal login/shell, so give it a minimal set of vars
- set `USER` at the start of every user crontab
- set `XDG_RUNTIME_DIR` and `DBUS_SESSION_BUS_ADDRESS` for users that run systemd user services
- stagger cronjobs to avoid resource contention

40
actual_server-backup Normal file
View File

@@ -0,0 +1,40 @@
#!/bin/bash
# shellcheck source=actual_server-env
. "${HOME}"/"${USER}"-env
mkdir -p "${HOME}"/backup_logs
logFile=${HOME}/backup_logs/$(date +%y_%m).log
{
echo -e "\n[+] actual backup\n"
mkdir -p /tmp/"${USER}"-backup/{user,server}-files
sudo docker compose -f "${HOME}"/"${USER}"-compose.yaml stop
cp -pr "${VOLUME_PATH}"/* /tmp/"${USER}"-backup
sudo docker compose -f "${HOME}"/"${USER}"-compose.yaml start
rclone copy /tmp/"${USER}"-backup "${BUCKET_PATH}" -v
if [ $? -ne 0 ]; then
curl -Ss \
-H "Title: Actual Server" \
-H "Priority: 3" \
-H "Tags: warning,backup" \
-d "Backup not completed" \
"${NOTIF_URL}"
rm -r /tmp/"${USER}"-backup
exit 1
fi
curl -Ss \
-H "Title: Actual Server" \
-H "Priority: 2" \
-H "Tags: heavy_check_mark,backup" \
-d "Backup completed" \
"${NOTIF_URL}"
rm -r /tmp/"${USER}"-backup
} &>>"$logFile"

View File

@@ -0,0 +1,26 @@
---
services:
actual:
image: ghcr.io/actualbudget/actual
container_name: actual
pull_policy: always
restart: unless-stopped
ports:
- 127.0.0.1:${PORT}:5006
deploy:
resources:
limits:
memory: 2048M
volumes:
- type: bind
source: ${VOLUME_PATH}
target: /data
bind:
create_host_path: true
user: ${PUID}:${PGID}
healthcheck:
test: ['CMD-SHELL', 'node src/scripts/health-check.js']
interval: 60s
timeout: 10s
retries: 3
start_period: 20s

2
actual_server-cronjob Normal file
View File

@@ -0,0 +1,2 @@
0 10 * * * /home/actual_server/actual_server-backup
0 11 * * 2 /home/actual_server/actual_server-update

25
actual_server-setup Normal file
View File

@@ -0,0 +1,25 @@
#!/bin/bash
# shellcheck source=actual_server-env
. "${HOME}"/"${USER}"-env
echo -e "\n[+] setting up actual\n\n-------\n"
mkdir -p "${VOLUME_PATH}"
envsubst <"${HOME}"/"${USER}"-compose_template.yaml >"${HOME}"/"${USER}"-compose.yaml
sudo docker compose -f "${HOME}"/"${USER}"-compose.yaml up -d
echo "[/] waiting for migrations to run..."
sleep 10 # wait for migrations to run
echo "[+] restoring backup data"
sudo docker compose -f "${HOME}"/"${USER}"-compose.yaml stop
rm -r "${VOLUME_PATH:?}"/*
rclone copy "${BUCKET_PATH}" "${VOLUME_PATH}" -v
sudo docker compose -f "${HOME}"/"${USER}"-compose.yaml start

14
actual_server-teardown Normal file
View File

@@ -0,0 +1,14 @@
#!/bin/bash
username=actual_server
# application
sudo docker compose -f /home/${username}/${username}-compose.yaml down -v
uid_num=$(id -u $username)
sudo killall -9 -v -g -u $username
sudo crontab -r -u $username
sudo deluser --remove-all-files $username
# clean-up
sudo find / -user "$uid_num" -delete

11
actual_server-update Normal file
View File

@@ -0,0 +1,11 @@
#!/bin/bash
mkdir -p "${HOME}"/update_logs
logFile=${HOME}/update_logs/$(date +%y_%m).log
{
echo -e "\n[+] updating actual\n"
sudo docker compose -f "${HOME}"/"${USER}"-compose.yaml pull &&
sudo docker compose -f "${HOME}"/"${USER}"-compose.yaml up -d --always-recreate-deps --remove-orphans &&
yes | sudo docker image prune -af
} &>>"$logFile"

View File

@@ -0,0 +1,17 @@
server {
server_name api.spotify-manager.knravish.me;
location / {
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header Host $http_host;
proxy_pass http://127.0.0.1:9001;
proxy_redirect off;
proxy_set_header Access-Control-Allow-Origin *;
proxy_read_timeout 600s;
proxy_send_timeout 600s;
}
listen 80;
}

21
auth.knravish.me.conf Normal file
View File

@@ -0,0 +1,21 @@
server {
server_name auth.knravish.me;
index index.html index.htm;
set $upstream http://127.0.0.1:9091;
location / {
include /etc/nginx/snippets/proxy.conf;
proxy_pass $upstream;
}
location = /api/verify {
proxy_pass $upstream;
}
location /api/authz/ {
proxy_pass $upstream;
}
listen 80;
}

View File

@@ -0,0 +1,32 @@
## Send a subrequest to Authelia to verify if the user is authenticated and has permission to access the resource.
auth_request /internal/authelia/authz;
## Save the upstream metadata response headers from Authelia to variables.
auth_request_set $user $upstream_http_remote_user;
auth_request_set $groups $upstream_http_remote_groups;
auth_request_set $name $upstream_http_remote_name;
auth_request_set $email $upstream_http_remote_email;
## Inject the metadata response headers from the variables into the request made to the backend.
proxy_set_header Remote-User $user;
proxy_set_header Remote-Groups $groups;
proxy_set_header Remote-Email $email;
proxy_set_header Remote-Name $name;
## Configure the redirection when the authz failure occurs. Lines starting with 'Modern Method' and 'Legacy Method'
## should be commented / uncommented as pairs. The modern method uses the session cookies configuration's authelia_url
## value to determine the redirection URL here. It's much simpler and compatible with the mutli-cookie domain easily.
## Modern Method: Set the $redirection_url to the Location header of the response to the Authz endpoint.
auth_request_set $redirection_url $upstream_http_location;
## Modern Method: When there is a 401 response code from the authz endpoint redirect to the $redirection_url.
error_page 401 =302 $redirection_url;
## Legacy Method: Set $target_url to the original requested URL.
## This requires http_set_misc module, replace 'set_escape_uri' with 'set' if you don't have this module.
# set_escape_uri $target_url $scheme://$http_host$request_uri;
## Legacy Method: When there is a 401 response code from the authz endpoint redirect to the portal with the 'rd'
## URL parameter set to $target_url. This requires users update 'auth.knravish.me/' with their external authelia URL.
# error_page 401 =302 https://auth.knravish.me/?rd=$target_url;

View File

@@ -0,0 +1,32 @@
set $upstream_authelia http://127.0.0.1:9091/api/authz/auth-request;
## Virtual endpoint created by nginx to forward auth requests.
location /internal/authelia/authz {
## Essential Proxy Configuration
internal;
proxy_pass $upstream_authelia;
## Headers
## The headers starting with X-* are required.
proxy_set_header X-Original-Method $request_method;
proxy_set_header X-Original-URL $scheme://$http_host$request_uri;
proxy_set_header X-Forwarded-For $remote_addr;
proxy_set_header Content-Length "";
proxy_set_header Connection "";
## Basic Proxy Configuration
proxy_pass_request_body off;
proxy_next_upstream error timeout invalid_header http_500 http_502 http_503; # Timeout if the real server is dead
proxy_redirect http:// $scheme://;
proxy_http_version 1.1;
proxy_cache_bypass $cookie_session;
proxy_no_cache $cookie_session;
proxy_buffers 4 32k;
client_body_buffer_size 128k;
## Advanced Proxy Configuration
send_timeout 5m;
proxy_read_timeout 240;
proxy_send_timeout 240;
proxy_connect_timeout 240;
}

View File

@@ -0,0 +1,37 @@
## The only custom header to be added for our uses
proxy_set_header Access-Control-Allow-Origin *;
## Headers
proxy_set_header Host $host;
proxy_set_header X-Original-URL $scheme://$http_host$request_uri;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_set_header X-Forwarded-Host $http_host;
proxy_set_header X-Forwarded-URI $request_uri;
proxy_set_header X-Forwarded-Ssl on;
proxy_set_header X-Forwarded-For $remote_addr;
proxy_set_header X-Real-IP $remote_addr;
## Basic Proxy Configuration
client_body_buffer_size 128k;
proxy_next_upstream error timeout invalid_header http_500 http_502 http_503; ## Timeout if the real server is dead.
proxy_redirect http:// $scheme://;
proxy_http_version 1.1;
proxy_cache_bypass $cookie_session;
proxy_no_cache $cookie_session;
proxy_buffers 64 256k;
## Trusted Proxies Configuration
## Please read the following documentation before configuring this:
## https://www.authelia.com/integration/proxies/nginx/#trusted-proxies
# set_real_ip_from 10.0.0.0/8;
# set_real_ip_from 172.16.0.0/12;
# set_real_ip_from 192.168.0.0/16;
# set_real_ip_from fc00::/7;
real_ip_header X-Forwarded-For;
real_ip_recursive on;
## Advanced Proxy Configuration
send_timeout 5m;
proxy_read_timeout 360;
proxy_send_timeout 360;
proxy_connect_timeout 360;

View File

@@ -0,0 +1,3 @@
## WebSocket Example
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";

View File

@@ -0,0 +1,6 @@
# Secrets
- enc_key
- jwt_sec
- ses_sec
- smtp_pass

39
authelia_server-backup Normal file
View File

@@ -0,0 +1,39 @@
#!/bin/bash
# shellcheck source=authelia_server-env
. "${HOME}"/"${USER}"-env
mkdir -p "${HOME}"/backup_logs
logFile=${HOME}/backup_logs/$(date +%y_%m).log
{
echo -e "\n[+] authelia backup\n"
mkdir -p /tmp/"${USER}"-backup
sudo docker compose -f "${HOME}"/"${USER}"-compose.yaml stop
cp -pr "${VOLUME_PATH}"/* /tmp/"${USER}"-backup
sudo docker compose -f "${HOME}"/"${USER}"-compose.yaml start
if ! rclone copy /tmp/"${USER}"-backup "${BUCKET_PATH}" -v; then
curl -Ss \
-H "Title: Authelia" \
-H "Priority: 3" \
-H "Tags: warning,backup" \
-d "Backup not completed" \
"${NOTIF_URL}"
rm -r /tmp/"${USER}"-backup
exit 1
fi
curl -Ss \
-H "Title: Authelia" \
-H "Priority: 2" \
-H "Tags: heavy_check_mark,backup" \
-d "Backup completed" \
"${NOTIF_URL}"
rm -r /tmp/"${USER}"-backup
} &>>"$logFile"

View File

@@ -0,0 +1,53 @@
---
secrets:
JWT_SECRET:
file: '${SECRETS_PATH}/jwt_sec'
SESSION_SECRET:
file: '${SECRETS_PATH}/ses_sec'
STORAGE_ENCRYPTION_KEY:
file: '${SECRETS_PATH}/enc_key'
SMTP_PASSWORD:
file: '${SECRETS_PATH}/smtp_pass'
services:
redis:
container_name: 'authelia-redis'
image: redis:alpine
command: redis-server --save 60 1 --loglevel warning
pull_policy: always
restart: unless-stopped
networks:
authelia_server_network:
aliases: []
volumes:
- ${REDIS_PATH}:/data
user: ${PUID}:${PGID}
authelia:
container_name: 'authelia'
image: authelia/authelia
pull_policy: always
restart: unless-stopped
networks:
authelia_server_network:
aliases: []
ports:
- '127.0.0.1:9091:9091'
secrets:
- 'JWT_SECRET'
- 'SESSION_SECRET'
- 'STORAGE_ENCRYPTION_KEY'
- 'SMTP_PASSWORD'
environment:
AUTHELIA_IDENTITY_VALIDATION_RESET_PASSWORD_JWT_SECRET_FILE: '/run/secrets/JWT_SECRET'
AUTHELIA_SESSION_SECRET_FILE: '/run/secrets/SESSION_SECRET'
AUTHELIA_STORAGE_ENCRYPTION_KEY_FILE: '/run/secrets/STORAGE_ENCRYPTION_KEY'
AUTHELIA_NOTIFIER_SMTP_PASSWORD_FILE: '/run/secrets/SMTP_PASSWORD'
PUID: ${PUID}
PGID: ${PGID}
volumes:
- ${VOLUME_PATH}:/config
networks:
authelia_server_network:
external: true
name: 'authelia_server_network'

View File

@@ -0,0 +1,164 @@
authentication_backend:
file:
path: /config/users.yaml
watch: true
access_control:
default_policy: deny
networks:
- name: 'internal'
networks:
- '10.0.0.0/8'
- '172.16.0.0/12'
- '192.168.0.0/18'
rules:
# go from most to least specific
###### bypasses ######
# CORS preflight
- domain: '*.knravish.me'
methods: 'OPTIONS'
policy: 'bypass'
### status endpoints ###
# https://auth.knravish.me/api/health - status
- domain: 'auth.knravish.me'
resources: '^\/api\/health$'
policy: 'bypass'
# https://budget.knravish.me/info - info
- domain: 'budget.knravish.me'
resources: '^\/info$'
policy: 'bypass'
# https://blog.knravish.me/ghost/api/admin/site - info
- domain: 'blog.knravish.me'
resources: '^\/ghost\/api\/admin\/site$'
policy: 'bypass'
# # https://git.knravish.me/api/healthz - health
# - domain: 'git.knravish.me'
# resources: '^\/api\/healthz$'
# policy: 'bypass'
# https://notes.knravish.me/api/v1/workspace/profile - info
- domain: 'notes.knravish.me'
resources: '^\/api\/v1\/workspace\/profile$'
policy: 'bypass'
# https://pdf.knravish.me/api/v1/info/status - status
- domain: 'pdf.knravish.me'
resources: '^\/api\/v1\/info\/status$'
policy: 'bypass'
# https://planning.knravish.me/manifest.webmanifest - PWA
# for the homepage widget
# https://planning.knravish.me/api/v1/projects
# https://planning.knravish.me/api/v1/tasks/all?filter=done%3Dfalse&sort_by=due_date
- domain: 'planning.knravish.me'
resources:
- '^\/manifest.webmanifest$'
- '^\/api\/v1\/projects$'
- '^\/api\/v1\/tasks\/all\?filter=done%3Dfalse&sort_by=due_date$'
policy: 'bypass'
# https://recipes.knravish.me/api/app/about - status
- domain: 'recipes.knravish.me'
resources:
- '^\/api\/households\/statistics$' # homepage widget
- '^\/api\/app\/about$'
policy: 'bypass'
# https://syncthing.knravish.me/rest/noauth/health
- domain: 'syncthing.knravish.me'
resources: '^\/rest\/noauth\/health$'
policy: 'bypass'
# https://vpn.knravish.me/api/release - status
- domain: 'vpn.knravish.me'
resources:
- '^\/api\/wireguard\/client$' # homepage widget
- '^\/api\/release$'
policy: 'bypass'
# https://vtt.knravish.me/api/status
- domain: 'vtt.knravish.me'
resources: '^\/api\/status$'
policy: 'bypass'
###### 1FA ######
# sensitive data - only self
- domain:
- 'budget.knravish.me'
subject:
- 'user:self'
policy: 'one_factor'
# sensitive admin - only self
- domain:
- 'vpn.knravish.me'
- 'syncthing.knravish.me'
subject:
- 'user:self'
policy: 'one_factor'
# ghost blog admin
- domain: 'blog.knravish.me'
resources: '^\/ghost([\/?].*)?$'
subject:
- 'group:admin'
policy: 'one_factor'
# foundry VTT
- domain: 'vtt.knravish.me'
subject:
- 'group:admin'
- 'group:foundry'
policy: 'one_factor'
# mealie recipes
- domain: 'recipes.knravish.me'
subject:
- 'group:admin'
- 'group:mealie'
policy: 'one_factor'
###### 2FA ######
# master bypass - super_admin (currently only self)
- domain: '*.knravish.me'
subject:
- 'group:super_admin'
policy: 'two_factor'
password_policy:
zxcvbn:
enabled: true
# SECRET
# identity_validation:
# reset_password:
# jwt_secret: ''
session:
# SECRET
# secret: ''
redis:
host: 'authelia-redis'
inactivity: '1w'
expiration: '2w'
remember_me: '3M'
cookies:
- domain: 'knravish.me'
authelia_url: 'https://auth.knravish.me'
storage:
# SECRET
# encryption_key: ''
local:
path: '/config/db.sqlite3'
notifier:
smtp:
address: 'smtp://smtp.purelymail.com:587'
timeout: '15s'
username: 'noreply@knravish.me'
# SECRET
# password: ''
sender: 'Authelia <noreply@knravish.me>'
identifier: 'knravish.me'
subject: '[Authelia] {title}'
theme: 'auto'
server:
endpoints:
authz:
auth-request:
implementation: 'AuthRequest'
authn_strategies:
- name: 'HeaderAuthorization'
schemes:
- 'Basic'
- name: 'CookieSession'

2
authelia_server-cronjob Normal file
View File

@@ -0,0 +1,2 @@
1 10 * * * /home/authelia_server/authelia_server-backup
1 11 * * 2 /home/authelia_server/authelia_server-update

35
authelia_server-setup Normal file
View File

@@ -0,0 +1,35 @@
#!/bin/bash
# shellcheck source=authelia_server-env
. "${HOME}"/"${USER}"-env
echo -e "\n[+] setting up authelia\n\n-------\n"
echo -e "\n[!] not really automated cuz of the nginx and secrets stuff\n"
mkdir -p "${REDIS_PATH}"
mkdir -p "${VOLUME_PATH}"
mkdir -p "${SECRETS_PATH}"
chown -R "${USER}":"${USER}" "${SECRETS_PATH}"
chmod -R 600 "${SECRETS_PATH}"
echo -e "\n[Q] STOP! scp the secrets if you haven't\n"
sleep 5
echo -e "\n[!] hope you've copied the secrets MANUALLY\n"
cp "${HOME}"/"${USER}"-configuration.yaml "${VOLUME_PATH}"/configuration.yml
envsubst <"${HOME}"/"${USER}"-compose_template.yaml >"${HOME}"/"${USER}"-compose.yaml
sudo docker network create -d bridge "${USER}"_network
sudo docker compose -f "${HOME}"/"${USER}"-compose.yaml up -d
echo "[+] restoring from backup..."
sudo docker compose -f "${HOME}"/"${USER}"-compose.yaml stop
rm -rf "${VOLUME_PATH}"
rclone copy "${BUCKET_PATH}" "${VOLUME_PATH}" -v
sudo docker compose -f "${HOME}"/"${USER}"-compose.yaml start

1
authelia_server-teardown Normal file
View File

@@ -0,0 +1 @@
um not so simple, need to edit the nginx configs

11
authelia_server-update Normal file
View File

@@ -0,0 +1,11 @@
#!/bin/bash
mkdir -p "${HOME}"/update_logs
logFile=${HOME}/update_logs/$(date +%y_%m).log
{
echo -e "\n[+] updating authelia\n"
sudo docker compose -f "${HOME}"/"${USER}"-compose.yaml pull &&
sudo docker compose -f "${HOME}"/"${USER}"-compose.yaml up -d --always-recreate-deps --remove-orphans &&
yes | sudo docker image prune -af
} &>>"$logFile"

16
budget.knravish.me.conf Normal file
View File

@@ -0,0 +1,16 @@
server {
server_name budget.knravish.me;
index index.html index.htm;
include /etc/nginx/snippets/authelia-location.conf;
set $upstream http://127.0.0.1:5006;
location / {
include /etc/nginx/snippets/proxy.conf;
include /etc/nginx/snippets/authelia-authrequest.conf;
proxy_pass $upstream;
}
listen 80;
}

18
dash.knravish.me.conf Normal file
View File

@@ -0,0 +1,18 @@
server {
server_name dash.knravish.me;
index index.html index.htm;
location / {
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header Host $http_host;
proxy_pass http://127.0.0.1:3000;
proxy_redirect off;
proxy_set_header Access-Control-Allow-Origin *;
proxy_read_timeout 600s;
proxy_send_timeout 600s;
}
listen 80;
}

150
file_transfers copy.ps1 Normal file
View File

@@ -0,0 +1,150 @@
$dirname = $PSScriptRoot
$remote_home_folder = "your_default_user@1.2.3.4"
$key = "your.private.key"
function TransferFile {
param (
[Parameter(Mandatory)]
[string]$FileName,
[ValidateNotNullOrEmpty()]
[string]$DestPath = ""
)
scp -i "${dirname}\${key}" "${dirname}\${FileName}" "${remote_home_folder}:${DestPath}"
}
# backups
TransferFile "actual_server-backup"
TransferFile "authelia_server-backup"
TransferFile "foundry_server-backup"
TransferFile "ghost_server-credentials.exp"
TransferFile "ghost_server-backup"
TransferFile "homepage_server-backup"
TransferFile "mealie_server-backup"
TransferFile "memos_server-backup"
TransferFile "minecraft_server-backup"
# TransferFile "stirling_server-backup"
TransferFile "syncthing_server-backup"
TransferFile "wg_server-backup"
# updates
TransferFile "actual_server-update"
TransferFile "authelia_server-update"
TransferFile "foundry_server-update"
# TransferFile "ghost_server-update"
TransferFile "homepage_server-update"
TransferFile "mealie_server-update"
TransferFile "memos_server-update"
# TransferFile "minecraft_server-update"
TransferFile "stirling_server-update"
# TransferFile "syncthing_server-update"
TransferFile "wg_server-update"
# cronjobs
TransferFile "ubuntu-cronjob"
TransferFile "actual_server-cronjob"
TransferFile "authelia_server-cronjob"
TransferFile "foundry_server-cronjob"
TransferFile "ghost_server-cronjob"
TransferFile "homepage_server-cronjob"
TransferFile "mealie_server-cronjob"
TransferFile "memos_server-cronjob"
TransferFile "minecraft_server-cronjob"
TransferFile "syncthing_server-cronjob"
TransferFile "wg_server-cronjob"
# env vars
TransferFile "instance-env"
TransferFile "actual_server-env"
TransferFile "authelia_server-env"
TransferFile "foundry_server-env"
TransferFile "ghost_server-env"
TransferFile "homepage_server-env"
TransferFile "mealie_server-env"
TransferFile "memos_server-env"
TransferFile "minecraft_server-env"
TransferFile "pwpush_server-env"
# TransferFile "shlink_server-env"
TransferFile "stirling_server-env"
TransferFile "syncthing_server-env"
TransferFile "wg_server-env"
# config files
## misc.
TransferFile "rclone.conf" ".config/rclone"
TransferFile "authelia_server-configuration.yaml"
TransferFile "ghost_server-config.production.json"
TransferFile "pwpush_server-settings.yaml"
### systemd
TransferFile "minecraft_server-start.service"
TransferFile "minecraft_server-start.socket"
## nginx configs
### authelia nginx snippets
TransferFile "authelia\nginx_snippets\authelia-authrequest.conf"
TransferFile "authelia\nginx_snippets\authelia-location.conf"
TransferFile "authelia\nginx_snippets\proxy.conf"
TransferFile "authelia\nginx_snippets\websocket.conf"
### sites
TransferFile "auth.knravish.me.conf"
TransferFile "budget.knravish.me.conf"
TransferFile "dash.knravish.me.conf"
TransferFile "lnk.knravish.me.conf"
TransferFile "notes.knravish.me.conf"
TransferFile "paste.knravish.me.conf"
TransferFile "pdf.knravish.me.conf"
TransferFile "recipes.knravish.me.conf"
TransferFile "syncthing.knravish.me.conf"
TransferFile "vpn.knravish.me.conf"
TransferFile "vtt.knravish.me.conf"
# docker-compose files
TransferFile "actual_server-compose_template.yaml"
TransferFile "authelia_server-compose_template.yaml"
TransferFile "homepage_server-compose_template.yaml"
TransferFile "mealie_server-compose_template.yaml"
TransferFile "memos_server-compose_template.yaml"
TransferFile "pwpush_server-compose_template.yaml"
TransferFile "shlink_server-compose.yaml" # TransferFile "shlink_server-compose_template.yaml"
TransferFile "stirling_server-compose_template.yaml"
TransferFile "wg_server-compose_template.yaml"
# setup scripts
TransferFile "instance-setup" # run as ubuntu
TransferFile "actual_server-setup"
TransferFile "authelia_server-setup"
TransferFile "foundry_server-setup"
TransferFile "ghost_server-setup"
TransferFile "homepage_server-setup"
TransferFile "mealie_server-setup"
TransferFile "memos_server-setup"
TransferFile "minecraft_server-setup"
TransferFile "pwpush_server-setup"
TransferFile "shlink_server-setup"
TransferFile "stirling_server-setup"
TransferFile "syncthing_server-setup"
TransferFile "wg_server-setup"
# teardown scripts - run as ubuntu
TransferFile "actual_server-teardown"
TransferFile "authelia_server-teardown"
TransferFile "foundry_server-teardown"
# TransferFile "ghost_server-teardown"
TransferFile "homepage_server-teardown"
TransferFile "mealie_server-teardown"
TransferFile "memos_server-teardown"
# TransferFile "minecraft_server-teardown"
TransferFile "pwpush_server-teardown"
TransferFile "shlink_server-teardown"
TransferFile "stirling_server-teardown"
# TransferFile "syncthing_server-teardown"
TransferFile "wg_server-teardown"
# secrets
TransferFile "authelia\secrets\enc_key" "authelia_secrets"
TransferFile "authelia\secrets\jwt_sec" "authelia_secrets"
TransferFile "authelia\secrets\ses_sec" "authelia_secrets"
TransferFile "authelia\secrets\smtp_pass" "authelia_secrets"
# miscellaneous
TransferFile "ubuntu_auto_apt_upgrade"

40
foundry_server-backup Normal file
View File

@@ -0,0 +1,40 @@
#!/bin/bash
# shellcheck source=foundry_server-env
. "${HOME}"/"${USER}"-env
mkdir -p "${HOME}"/backup_logs
logFile=${HOME}/backup_logs/$(date +%y_%m).log
{
echo -e "\n[+] foundry backup\n"
mkdir -p /tmp/"${USER}"-backup
systemctl --user stop "${USER}"-start.service
cp -pr "${FOUNDRY_DATA_PATH}"/* /tmp/"${USER}"-backup
systemctl --user restart "${USER}"-start.service
rclone copy /tmp/"${USER}"-backup "${BUCKET_PATH}" -v
if [ $? -ne 0 ]; then
curl -Ss \
-H "Title: Foundry VTT" \
-H "Priority: 3" \
-H "Tags: warning,backup" \
-d "Backup not completed" \
"${NOTIF_URL}"
rm -r /tmp/"${USER}"-backup
exit 1
fi
curl -Ss \
-H "Title: Foundry VTT" \
-H "Priority: 2" \
-H "Tags: heavy_check_mark,backup" \
-d "Backup completed" \
"${NOTIF_URL}"
rm -r /tmp/"${USER}"-backup
} &>>"$logFile"

1
foundry_server-cronjob Normal file
View File

@@ -0,0 +1 @@
2 10,22 * * * /home/foundry_server/foundry_server-backup

37
foundry_server-setup Normal file
View File

@@ -0,0 +1,37 @@
#!/bin/bash
# shellcheck source=foundry_server-env
. "${HOME}"/"${USER}"-env
echo -e "\n[+] setting up foundry\n\n-------\n"
echo "[+] nvm and node"
curl -o- https://raw.githubusercontent.com/nvm-sh/nvm/v0.40.1/install.sh | bash
export NVM_DIR="$HOME/.nvm"
[ -s "$NVM_DIR/nvm.sh" ] && \. "$NVM_DIR/nvm.sh" # This loads nvm
[ -s "$NVM_DIR/bash_completion" ] && \. "$NVM_DIR/bash_completion" # This loads nvm bash_completion
nvm install --lts
nvm alias default node
echo "[+] foundry"
mkdir -p "${HOME}"/foundry
mkdir -p "${FOUNDRY_DATA_PATH}"
cd foundry || exit
wget -O foundryvtt.zip "${FOUNDRY_TIMED_URL}"
unzip foundryvtt.zip
rm foundryvtt.zip
echo "[+] restoring backup data"
rclone copy "${BUCKET_PATH}" "${FOUNDRY_DATA_PATH}" -v
echo "[+] setting up systemctl and starting"
mkdir -p "${HOME}"/.config/systemd/user/
cp "${HOME}"/"${USER}"-start.service "${HOME}"/.config/systemd/user/
systemctl --user daemon-reload
systemctl --user enable --now "${USER}"-start.service

View File

@@ -0,0 +1,13 @@
[Unit]
Description=Foundry VTT
After=network.target
[Service]
Type=simple
Restart=on-failure
RestartSec=1
WorkingDirectory=%h/foundry
ExecStart=/bin/bash -c ". ${HOME}/.nvm/nvm.sh ; node resources/app/main.js"
[Install]
WantedBy=default.target

15
foundry_server-teardown Normal file
View File

@@ -0,0 +1,15 @@
#!/bin/bash
username=foundry_server
# application
sudo machinectl shell ${username}@ /bin/bash -c "systemctl --user disable --now ${username}-start.service ; systemctl --user daemon-reload"
sudo machinectl shell ${username}@ /bin/bash -c ". ~/.nvm/nvm.sh ; nvm deactivate ; nvm uninstall --lts"
uid_num=$(id -u $username)
sudo killall -9 -v -g -u $username
sudo crontab -r -u $username
sudo deluser --remove-all-files $username
# clean-up
sudo find / -user "$uid_num" -delete

0
foundry_server-update Normal file
View File

1
freshStart copy.ps1 Normal file
View File

@@ -0,0 +1 @@
ssh -i $PSScriptRoot/your.private.key your_default_user@1.2.3.4

45
ghost_server-backup Normal file
View File

@@ -0,0 +1,45 @@
#!/bin/bash
# shellcheck source=ghost_server-env
. "${HOME}"/"${USER}"-env
mkdir -p "${HOME}"/backup_logs
logFile=${HOME}/backup_logs/$(date +%y_%m).log
{
echo -e "\n[+] ghost backup\n"
cd "${BLOG_PATH}" || exit
if ! /usr/bin/expect "${HOME}"/"${USER}"-credentials.exp; then
curl -Ss \
-H "Title: Ghost Blog" \
-H "Priority: 3" \
-H "Tags: warning,backup" \
-d "Backup not completed - ghost backup failure" \
"${NOTIF_URL}"
rm -r "${BLOG_PATH}"/backup*
exit 1
fi
echo "[+] local backup taken"
if ! rclone copyto "${BLOG_PATH}"/backup*.zip "${BUCKET_PATH}" -v; then
curl -Ss \
-H "Title: Ghost Blog" \
-H "Priority: 3" \
-H "Tags: warning,backup" \
-d "Backup not completed - rclone failure" \
"${NOTIF_URL}"
rm -r "${BLOG_PATH}"/backup*
exit 1
fi
curl -Ss \
-H "Title: Ghost Blog" \
-H "Priority: 2" \
-H "Tags: heavy_check_mark,backup" \
-d "Backup completed" \
"${NOTIF_URL}"
rm -r "${BLOG_PATH}"/backup*
} &>>"$logFile"

View File

@@ -0,0 +1,38 @@
{
"url": "https://blog.knravish.me",
"server": {
"port": 2368,
"host": "127.0.0.1"
},
"database": {
"client": "mysql",
"connection": {
"host": "postgres_hostname",
"user": "postgres_username",
"password": "postgres_password",
"database": "defaultdb",
"port": <postgres_port>,
"ssl": {
"ca": "<postgres_ssl_ca_cert_rsa_contents>",
"rejectUnauthorized": true
}
}
},
"mail": {
"transport": "Direct"
},
"logging": {
"transports": [
"file",
"stdout"
]
},
"process": "systemd",
"paths": {
"contentPath": "/var/www/blog.knravish.me/content"
},
"bootstrap-socket": {
"port": 8000,
"host": "localhost"
}
}

View File

@@ -0,0 +1,14 @@
#!/usr/bin/expect
set email "<email_address>"
set pw "<password>"
spawn ghost backup
expect "Ghost administrator email address"
send "$email\r"
expect "Ghost administrator password"
send "$pw\r"
expect eof

1
ghost_server-cronjob Normal file
View File

@@ -0,0 +1 @@
3 10 * * * /home/ghost_server/ghost_server-backup

52
ghost_server-setup Normal file
View File

@@ -0,0 +1,52 @@
#!/bin/bash
# shellcheck source=ghost_server-env
. "${HOME}"/"${USER}"-env
email_address=hello@knravish.me
echo -e "\n[+] setting up ghost\n\n-------\n"
echo "[+] node and companions"
# ghost doesn't play well with nvm for some reason, probably because of installation location and sudo access
# Download and import the Nodesource GPG key
sudo mkdir -p /etc/apt/keyrings
curl -fsSL https://deb.nodesource.com/gpgkey/nodesource-repo.gpg.key | sudo gpg --dearmor -o /etc/apt/keyrings/nodesource.gpg
NODE_MAJOR=20 # Use a supported version
echo "deb [signed-by=/etc/apt/keyrings/nodesource.gpg] https://deb.nodesource.com/node_$NODE_MAJOR.x nodistro main" | sudo tee /etc/apt/sources.list.d/nodesource.list
sudo apt-get update
sudo apt-get install nodejs -y
echo "[+] getting ready..."
ghost_cli_ver="1.26.0"
sudo npm i -g ghost-cli@${ghost_cli_ver}
sudo mkdir -p "${BLOG_PATH}"
sudo chown "${USER}":"${USER}" "${BLOG_PATH}"
sudo chmod 775 "${BLOG_PATH}"
# ghost really needs to update to newer nginx versions and conventions...
sudo mkdir -p /etc/nginx/sites-available/ /etc/nginx/sites-enabled/ /etc/nginx/snippets/
echo "[+] ooh, interactive stuff"
# currently track manually, maybe automate
ghost_ver="5.105.0"
cd "${BLOG_PATH}" && ghost install ${ghost_ver} --no-setup
sudo cp "${HOME}"/"${USER}"-config.production.json "${BLOG_PATH}"/
sudo chown "${USER}":"${USER}" "${BLOG_PATH}"/"${USER}"-config.production.json
mv "${BLOG_PATH}"/"${USER}"-config.production.json "${BLOG_PATH}"/config.production.json
cd "${BLOG_PATH}" && ghost setup --auto --sslemail ${email_address}
echo "[+] restoring backup data"
sudo rm -r "${BLOG_PATH}"/content/*
rclone copyto "${BUCKET_PATH}" "${BLOG_PATH}"/ghostBackup.zip
sudo unzip "${BLOG_PATH}"/ghostBackup.zip -d "${BLOG_PATH}"/content/
sudo chown -R ghost:ghost "${BLOG_PATH}"/content/
echo -e "\n-----\nIMPORTANT\n-----\n[X] modify the nginx default config file to include the sites-enabled directory\n"

18
git.knravish.me.conf Normal file
View File

@@ -0,0 +1,18 @@
server {
server_name git.knravish.me;
index index.html index.htm;
location / {
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header Host $http_host;
proxy_pass http://127.0.0.1:3001;
proxy_redirect off;
proxy_set_header Access-Control-Allow-Origin *;
proxy_read_timeout 600s;
proxy_send_timeout 600s;
}
listen 80;
}

44
gitea_server-backup Normal file
View File

@@ -0,0 +1,44 @@
#!/bin/bash
# shellcheck source=gitea_server-env
. "${HOME}"/"${USER}"-env
mkdir -p "${HOME}"/backup_logs
logFile=${HOME}/backup_logs/$(date +%y_%m).log
{
echo -e "\n[+] gitea backup\n"
mkdir -p /tmp/"${USER}"-backup
sudo docker compose -f "${HOME}"/"${USER}"-compose.yaml stop gitea
cp -pr "${VOLUME_PATH}"/config /tmp/"${USER}"-backup
cp -pr "${VOLUME_PATH}"/data /tmp/"${USER}"-backup
# shellcheck disable=SC2024
sudo docker exec -u "${PUID}:${PGID}" -it gitea-postgres sh -c \
'pg_dumpall -c --if-exists -U gitea' >/tmp/"${USER}"-backup/db.out
sudo docker compose -f "${HOME}"/"${USER}"-compose.yaml start gitea
if ! rclone copy /tmp/"${USER}"-backup "${BUCKET_PATH}" -v; then
curl -Ss \
-H "Title: Gitea" \
-H "Priority: 3" \
-H "Tags: warning,backup" \
-d "Backup not completed" \
"${NOTIF_URL}"
rm -rf /tmp/"${USER}"-backup
exit 1
fi
curl -Ss \
-H "Title: Gitea" \
-H "Priority: 2" \
-H "Tags: heavy_check_mark,backup" \
-d "Backup completed" \
"${NOTIF_URL}"
rm -rf /tmp/"${USER}"-backup
} &>>"$logFile"

View File

@@ -0,0 +1,40 @@
---
services:
gitea:
image: docker.gitea.com/gitea:1-rootless
container_name: gitea
pull_policy: always
restart: unless-stopped
volumes:
- ${VOLUME_PATH}/data:/var/lib/gitea
- ${VOLUME_PATH}/config:/etc/gitea
- /etc/timezone:/etc/timezone:ro
- /etc/localtime:/etc/localtime:ro
ports:
- 127.0.0.1:${PORT}:3000
# - 2222:2222 # for internal SSH. unnecessary?
environment:
- GITEA__database__DB_TYPE=postgres
- GITEA__database__HOST=db:5432
- GITEA__database__NAME=gitea
- GITEA__database__USER=gitea
- GITEA__database__PASSWD=gitea
- USER=git
- USER_UID=${PUID}
- USER_GID=${PGID}
depends_on:
- db
user: ${PUID}:${PGID}
db:
image: postgres:16
container_name: gitea-postgres
pull_policy: always
restart: unless-stopped
environment:
- POSTGRES_USER=gitea
- POSTGRES_PASSWORD=gitea
- POSTGRES_DB=gitea
volumes:
- ${VOLUME_PATH}/postgres:/var/lib/postgresql/data
user: ${PUID}:${PGID}

1
gitea_server-cronjob Normal file
View File

@@ -0,0 +1 @@
4 10 * * * /home/gitea_server/gitea_server-backup

33
gitea_server-setup Normal file
View File

@@ -0,0 +1,33 @@
#!/bin/bash
echo -e "\n[+] setting up gitea\n\n-------\n"
# shellcheck source=gitea_server-env
. "${HOME}"/"${USER}"-env
mkdir -p "${VOLUME_PATH}"/{data,config,postgres}
envsubst <"${HOME}"/"${USER}"-compose_template.yaml >"${HOME}"/"${USER}"-compose.yaml
sudo docker compose -f "${HOME}"/"${USER}"-compose.yaml up -d
echo "[+] preparing to restore from backup..."
sleep 5
sudo docker compose -f "${HOME}"/"${USER}"-compose.yaml stop gitea
mkdir -p "${VOLUME_PATH}"/restore_files
rclone copy "${BUCKET_PATH}" "${VOLUME_PATH}"/restore_files
echo "[+] restoring from backup..."
cp -fr "${VOLUME_PATH}"/restore_files/config "${VOLUME_PATH}"
cp -fr "${VOLUME_PATH}"/restore_files/data "${VOLUME_PATH}"
chown -R "${PUID}":"${PGID}" "${VOLUME_PATH}"/config "${VOLUME_PATH}"/data
cat "${VOLUME_PATH}"/restore_files/db.out | sudo docker exec -i gitea-postgres psql -qXU gitea
echo "[+] restarting..."
sudo docker compose -f "${HOME}"/"${USER}"-compose.yaml restart
# cleanup
rm -rf "${VOLUME_PATH}"/restore_files

0
gitea_server-teardown Normal file
View File

0
gitea_server-update Normal file
View File

39
homepage_server-backup Normal file
View File

@@ -0,0 +1,39 @@
#!/bin/bash
# shellcheck source=homepage_server-env
. "${HOME}"/"${USER}"-env
mkdir -p "${HOME}"/backup_logs
logFile=${HOME}/backup_logs/$(date +%y_%m).log
{
echo -e "\n[+] homepage backup\n"
mkdir -p /tmp/"${USER}"-backup
sudo docker compose -f "${HOME}"/"${USER}"-compose.yaml stop
cp -pr "${VOLUME_PATH}"/* /tmp/"${USER}"-backup
sudo docker compose -f "${HOME}"/"${USER}"-compose.yaml start
if ! rclone copy /tmp/"${USER}"-backup "${BUCKET_PATH}" -v; then
curl -Ss \
-H "Title: Homepage" \
-H "Priority: 3" \
-H "Tags: warning,backup" \
-d "Backup not completed" \
"${NOTIF_URL}"
rm -r /tmp/"${USER}"-backup
exit 1
fi
curl -Ss \
-H "Title: Homepage" \
-H "Priority: 2" \
-H "Tags: heavy_check_mark,backup" \
-d "Backup completed" \
"${NOTIF_URL}"
rm -r /tmp/"${USER}"-backup
} &>>"$logFile"

View File

@@ -0,0 +1,32 @@
---
services:
dockerproxy:
image: ghcr.io/tecnativa/docker-socket-proxy:latest
container_name: dockerproxy
environment:
- CONTAINERS=1 # Allow access to viewing containers
- SERVICES=0 # Allow access to viewing services (necessary when using Docker Swarm)
- TASKS=0 # Allow access to viewing tasks (necessary when using Docker Swarm)
- POST=0 # Disallow any POST operations (effectively read-only)
ports:
- 127.0.0.1:${DOCKER_PORT}:${DOCKER_PORT}
volumes:
- /var/run/docker.sock:/var/run/docker.sock:ro # Mounted as read-only
restart: unless-stopped
homepage:
image: ghcr.io/gethomepage/homepage:latest
container_name: homepage
pull_policy: always
restart: unless-stopped
ports:
- 127.0.0.1:${PORT}:${PORT}
init: true
volumes:
- ${VOLUME_PATH}/config:/app/config # Make sure your local config directory exists
- ${VOLUME_PATH}/icons:/app/public/icons # icons
- ${VOLUME_PATH}/images:/app/public/images # images
environment:
PUID: ${PUID}
PGID: ${PGID}
HOMEPAGE_ALLOWED_HOSTS: dash.knravish.me

2
homepage_server-cronjob Normal file
View File

@@ -0,0 +1,2 @@
5 10 * * * /home/homepage_server/homepage_server-backup
5 11 * * 2 /home/homepage_server/homepage_server-update

22
homepage_server-geticon Normal file
View File

@@ -0,0 +1,22 @@
#!/bin/bash
# shellcheck source=homepage_server-env
. "${HOME}"/"${USER}"-env
base_url=https://cdn.jsdelivr.net/gh/walkxcode/dashboard-icons
svg_url=${base_url}/svg/${1}.svg
png_url=${base_url}/png/${1}.png
if ! curl -I "${svg_url}" | grep -E "HTTP/.* 404" >/dev/null; then
curl -Ss -O --output-dir "${VOLUME_PATH}"/icons "${svg_url}"
echo "svg"
exit 0
elif ! curl -I "${png_url}" | grep -E "HTTP/.* 404" >/dev/null; then
curl -Ss -O --output-dir "${VOLUME_PATH}"/icons "${png_url}"
echo "png"
exit 0
else
echo "Not Found"
exit 1
fi

18
homepage_server-getimage Normal file
View File

@@ -0,0 +1,18 @@
#!/bin/bash
# shellcheck source=homepage_server-env
. "${HOME}"/"${USER}"-env
headers=$(curl -SsIXGET "$1")
status_code=$(echo "$headers" | grep -E "HTTP/.* [0-9]{3}" | awk '{print $2}')
if [[ $status_code == "200" ]]; then
ext=$(echo "$headers" | grep "content-type:" | awk -F/ '{print $2}' | tr -d " \t\n\r")
curl -Ss -o "${VOLUME_PATH}"/images/"${2}"."${ext}" "${1}"
echo "found"
exit 0
else
echo "Not Found"
exit 1
fi

20
homepage_server-setup Normal file
View File

@@ -0,0 +1,20 @@
#!/bin/bash
echo -e "\n[+] setting up homepage\n\n-------\n"
# shellcheck source=homepage_server-env
. "${HOME}"/"${USER}"-env
mkdir -p "${VOLUME_PATH}"
envsubst <"${HOME}"/"${USER}"-compose_template.yaml >"${HOME}"/"${USER}"-compose.yaml
sudo docker compose -f "${HOME}"/"${USER}"-compose.yaml up -d
echo "[+] restoring from backup..."
sudo docker compose -f "${HOME}"/"${USER}"-compose.yaml stop
rm -rf "${VOLUME_PATH}"
rclone copy "${BUCKET_PATH}" "${VOLUME_PATH}" -v
sudo docker compose -f "${HOME}"/"${USER}"-compose.yaml start

14
homepage_server-teardown Normal file
View File

@@ -0,0 +1,14 @@
#!/bin/bash
username=homepage_server
# application
sudo docker compose -f /home/${username}/${username}-compose.yaml down -v
uid_num=$(id -u $username)
sudo killall -9 -v -g -u $username
sudo crontab -r -u $username
sudo deluser --remove-all-files $username
# clean-up
sudo find / -user "$uid_num" -delete

11
homepage_server-update Normal file
View File

@@ -0,0 +1,11 @@
#!/bin/bash
mkdir -p "${HOME}"/update_logs
logFile=${HOME}/update_logs/$(date +%y_%m).log
{
echo -e "\n[+] updating homepage\n"
sudo docker compose -f "${HOME}"/"${USER}"-compose.yaml pull &&
sudo docker compose -f "${HOME}"/"${USER}"-compose.yaml up -d --always-recreate-deps --remove-orphans &&
yes | sudo docker image prune -af
} &>>"$logFile"

6
instance-bash_aliases Normal file
View File

@@ -0,0 +1,6 @@
#!/bin/bash
alias less="less -r"
alias dsizes="sudo du --max-depth=1 -h"
alias workas="sudo machinectl shell"
alias psdeets="ps -o pid,vsz=MEMORY -o user,group=GROUP -o comm,args=ARGS -p"

View File

@@ -0,0 +1,3 @@
#!/bin/bash
complete -W "$(compgen -u)" workas

274
instance-setup Normal file
View File

@@ -0,0 +1,274 @@
#!/bin/bash
echo -e "\n[+] Let's begin!\n\n-------\n"
# define these first
[[ -z "$BASE_DOMAIN" ]] && echo "base domain missing" && exit 1
[[ -z "$CF_EMAIL_ALIAS" ]] && echo "domain email missing" && exit 1
[[ -z "$UBUNTU_PRO_TOKEN" ]] && echo "ubuntu pro token missing" && exit 1
[[ -z "$B2_COLON_BUCKET_NAME" ]] && echo "b2 bucket name missing" && exit 1
[[ -z "$NTFY_URL" ]] && echo "ntfy endpoint missing" && exit 1
domain=$BASE_DOMAIN
email_address=${CF_EMAIL_ALIAS}
echo "BASE_DOMAIN=${BASE_DOMAIN}" | sudo tee -a /etc/environment
echo "BACKUP_BUCKET=${B2_COLON_BUCKET_NAME}" | sudo tee -a /etc/environment # current: the startingOut one
echo "NOTIF_URL=${NTFY_URL}" | sudo tee -a /etc/environment # current: endpoint on ntfy.sh
# some useful aliases
cat instance-bash_aliases | tee -a ~/.bash_aliases
cat instance-bash_aliases | sudo tee -a /etc/skel/.bash_aliases
# some useful autocompletions
chmod 774 instance-bash_autocompletions
./instance-bash_autocompletions
cd ~ || exit
sudo apt-get update
sudo apt-get upgrade -y
sudo pro attach "$UBUNTU_PRO_TOKEN"
if [[ $(cloud-init query platform) == 'oracle' ]]; then
# https://www.reddit.com/r/oraclecloud/comments/r8lkf7/a_quick_tips_to_people_who_are_having_issue/
echo "[+] disabling ufw and netfilter rules (OCI default)"
sudo ufw disable
sudo iptables -I INPUT -j ACCEPT
sudo iptables-save | sudo dd of=/etc/iptables/rules.v4
fi
echo "[+] packages"
# JDK 17 or higher needed for MC
sudo apt-get install build-essential curl gnupg2 ca-certificates lsb-release ubuntu-keyring apt-transport-https expect -y
sudo apt-get install openjdk-21-jdk-headless systemd-container fail2ban -y
sudo systemctl enable --now fail2ban.service
echo "[+] docker"
sudo install -m 0775 -d /etc/apt/keyrings
sudo curl -fsSL https://download.docker.com/linux/ubuntu/gpg -o /etc/apt/keyrings/docker.asc
sudo chmod a+r /etc/apt/keyrings/docker.asc
echo \
"deb [arch=$(dpkg --print-architecture) signed-by=/etc/apt/keyrings/docker.asc] \
https://download.docker.com/linux/ubuntu $(lsb_release -cs 2>/dev/null) stable" |
sudo tee /etc/apt/sources.list.d/docker.list >/dev/null
echo "[+] nginx"
# http://nginx.org/en/linux_packages.html#Ubuntu
curl -L https://nginx.org/keys/nginx_signing.key | gpg --dearmor |
sudo tee /usr/share/keyrings/nginx-archive-keyring.gpg >/dev/null
expected_nginx_fingerprint='573BFD6B3D8FBC641079A6ABABF5BD827BD9BF62'
if ! gpg --dry-run --quiet --no-keyring --import --import-options \
import-show /usr/share/keyrings/nginx-archive-keyring.gpg |
grep -c $expected_nginx_fingerprint; then
echo -e "\n[!] Nginx GPG key fingerprint does not match, aborting...\n"
sudo rm /usr/share/keyrings/nginx-archive-keyring.gpg
exit 1
fi
echo "deb [signed-by=/usr/share/keyrings/nginx-archive-keyring.gpg] \
http://nginx.org/packages/ubuntu $(lsb_release -cs 2>/dev/null) nginx" |
sudo tee /etc/apt/sources.list.d/nginx.list
echo -e "Package: *\nPin: origin nginx.org\nPin: release o=nginx\nPin-Priority: 900\n" |
sudo tee /etc/apt/preferences.d/99nginx
echo "[+] syncthing"
sudo curl -L -o /etc/apt/keyrings/syncthing-archive-keyring.gpg https://syncthing.net/release-key.gpg
echo "deb [signed-by=/etc/apt/keyrings/syncthing-archive-keyring.gpg]\
https://apt.syncthing.net/ syncthing stable-v2" |
sudo tee /etc/apt/sources.list.d/syncthing.list
echo -e "Package: *\nPin: origin apt.syncthing.net\nPin-Priority: 990\n" |
sudo tee /etc/apt/preferences.d/syncthing.pref
echo "[+] putting it all together"
sudo apt-get update
sudo apt-get install docker-ce docker-ce-cli containerd.io docker-buildx-plugin docker-compose-plugin nginx syncthing -y
if ! sudo docker run hello-world | grep -c 'installation appears to be working correctly'; then
echo -e "\n[!] Docker installation failed, aborting...\n"
exit 1
fi
echo "[+] rclone"
curl https://rclone.org/install.sh | sudo bash
echo "[+] certbot from snap ugh"
sudo snap install core
sudo snap refresh core
sudo apt-get remove certbot
sudo snap install --classic certbot
sudo ln -s /snap/bin/certbot /usr/bin/certbot
echo "[+] add users for applications"
# format - tool name underscore 'server'
users=(
"actual_server"
"authelia_server"
"foundry_server"
"ghost_server"
"gitea_server"
"homepage_server"
"mealie_server"
"memos_server"
"minecraft_server"
"pwpush_server"
"shlink_server"
"spotmgr_server"
"stirling_server"
"syncthing_server"
"vikunja_server"
"wg_server"
)
for username in "${users[@]}"; do
sudo useradd -m -U -s /bin/bash "${username}"
# setup script
sudo cp ~/"${username}"-setup /home/"${username}"/
sudo chmod 774 /home/"${username}"/"${username}"-setup
sudo chown "${username}":"${username}" /home/"${username}"/"${username}"-setup
sudo cp ~/"${username}"-env /home/"${username}"/
sudo chmod 600 /home/"${username}"/"${username}"-env
sudo chown "${username}":"${username}" /home/"${username}"/"${username}"-env
# user services won't linger by default
sudo loginctl enable-linger "${username}"
done
# admin privileges, needed for anyone running docker
admin_users=(
"actual_server"
"authelia_server"
"ghost_server"
"gitea_server"
"homepage_server"
"mealie_server"
"memos_server"
"pwpush_server"
"shlink_server"
"spotmgr_server"
"stirling_server"
"vikunja_server"
"wg_server"
)
for username in "${admin_users[@]}"; do
sudo usermod -aG sudo "${username}"
echo "${username} ALL=(ALL:ALL) NOPASSWD: ALL" | sudo tee /etc/sudoers.d/"${username}"
# compose files
sudo cp ~/"${username}"-compose_template.yaml /home/"${username}"/
sudo chmod 664 /home/"${username}"/"${username}"-compose_template.yaml
sudo chown "${username}":"${username}" /home/"${username}"/"${username}"-compose_template.yaml
sudo cp ~/"${username}"-compose.yaml /home/"${username}"/
sudo chmod 600 /home/"${username}"/"${username}"-compose.yaml
sudo chown "${username}":"${username}" /home/"${username}"/"${username}"-compose.yaml
done
echo "[+] distribute and apply respective config files"
echo -e "\t[-] rclone"
for username in "${users[@]}"; do
sudo mkdir -p /home/"${username}"/.config/rclone/
sudo cp ~/.config/rclone/rclone.conf /home/"${username}"/.config/rclone/
sudo chmod -R 600 /home/"${username}"/.config/rclone/rclone.conf
sudo chown -R "${username}":"${username}" /home/"${username}"/
done
# consider switching to acme.sh instead of certbot to avoid snap
echo -e "\t[-] nginx and certbot"
cert_subdomains=(
"api.spotify-manager"
"auth"
"budget"
"dash"
"git"
"lnk"
"notes"
"paste"
"planning"
"pdf"
"recipes"
"syncthing"
"vpn"
"vtt"
)
# ghost handles SSL by itself, might be worth looking into it to either shift to certbot
for subdomain in "${cert_subdomains[@]}"; do
# revoke existing certs if any
sudo certbot revoke -n --delete-after-revoke --cert-name "${subdomain}"."${domain}"
sudo cp ~/"${subdomain}"."${domain}".conf /etc/nginx/conf.d/
sudo chmod 664 /etc/nginx/conf.d/"${subdomain}"."${domain}".conf
sudo chown root:root /etc/nginx/conf.d/"${subdomain}"."${domain}".conf
if ! sudo nginx -t; then
echo -e "\n\t[!] Bad Nginx config for ${subdomain}.${domain}, aborting...\n"
exit 1
fi
sudo nginx -s reload
# ----------------------------------------------------------------------
# STOP!
# Check DNS records before proceeding
# ----------------------------------------------------------------------
# https://letsencrypt.org/docs/duplicate-certificate-limit/#description
# certbot has 5 per week duplicate cert limit. use --test-cert flag for testing
if ! sudo certbot -n --nginx --agree-tos -m "${email_address}" -d "${subdomain}"."${domain}"; then
echo -e "\n\t[!] Certbot failed to get cert for ${subdomain}.${domain}, aborting...\n"
exit 1
fi
sudo nginx -s reload
done
echo -e "\t[-] user-specific files"
# bash variable expansion ftw - https://stackoverflow.com/a/63821858/7630441
user_files=(
"authelia_server-configuration.yaml"
"foundry_server-start.service"
"ghost_server-config.production.json"
"ghost_server-credentials.exp"
"minecraft_server-start.service"
"minecraft_server-start.socket"
"pwpush_server-settings.yaml"
)
for f in "${user_files[@]}"; do
username=${f%%-*} # strips the part from before the hyphen
sudo cp ~/"${f}" /home/"${username}"/
sudo chmod 664 /home/"${username}"/"${f}"
sudo chown "${username}":"${username}" /home/"${username}"/"${f}"
done
echo -e "[+] cronjobs: backups, updates"
for username in "${users[@]}"; do
sudo cp ~/"${username}"-backup /home/"${username}"/
sudo chmod 774 /home/"${username}"/"${username}"-backup
sudo chown "${username}":"${username}" /home/"${username}"/"${username}"-backup
sudo cp ~/"${username}"-update /home/"${username}"/
sudo chmod 774 /home/"${username}"/"${username}"-update
sudo chown "${username}":"${username}" /home/"${username}"/"${username}"-update
{
# first add some useful env vars that aren't in cron's exec env
echo "USER=$username"
echo "XDG_RUNTIME_DIR=/run/user/$(id -u "$username")"
echo "DBUS_SESSION_BUS_ADDRESS=unix:path=/run/user/$(id -u "$username")/bus"
# then the defined cronjob
cat ~/"${username}"-cronjob
} >~/"${username}".cronjobs
# install to crontab
sudo crontab -u "${username}" ~/"${username}".cronjobs
rm ~/"${username}".cronjobs
done
# shellcheck disable=SC2024
sudo crontab -l -u ubuntu >~/ubuntu.cronjobs
cat ~/ubuntu-cronjob >>~/ubuntu.cronjobs
sudo crontab -u ubuntu ~/ubuntu.cronjobs
rm ~/ubuntu.cronjobs
for username in "${users[@]}"; do
chmod ug+x "${username}"-teardown
done

14
lnk.knravish.me.conf Normal file
View File

@@ -0,0 +1,14 @@
server {
server_name lnk.knravish.me;
charset utf-8;
location / {
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header Host $http_host;
proxy_pass http://127.0.0.1:8080;
}
listen 80;
}

39
mealie_server-backup Normal file
View File

@@ -0,0 +1,39 @@
#!/bin/bash
# shellcheck source=mealie_server-env
. "${HOME}"/"${USER}"-env
mkdir -p "${HOME}"/backup_logs
logFile=${HOME}/backup_logs/$(date +%y_%m).log
{
echo -e "\n[+] mealie backup\n"
mkdir -p /tmp/"${USER}"-backup
sudo docker compose -f "${HOME}"/"${USER}"-compose.yaml stop
cp -pr "${VOLUME_PATH}"/* /tmp/"${USER}"-backup
sudo docker compose -f "${HOME}"/"${USER}"-compose.yaml start
if ! rclone copy /tmp/"${USER}"-backup "${BUCKET_PATH}" -v; then
curl -Ss \
-H "Title: Mealie" \
-H "Priority: 3" \
-H "Tags: warning,backup" \
-d "Backup not completed" \
"${NOTIF_URL}"
rm -r /tmp/"${USER}"-backup
exit 1
fi
curl -Ss \
-H "Title: Mealie" \
-H "Priority: 2" \
-H "Tags: heavy_check_mark,backup" \
-d "Backup completed" \
"${NOTIF_URL}"
rm -r /tmp/"${USER}"-backup
} &>>"$logFile"

View File

@@ -0,0 +1,27 @@
---
services:
mealie:
image: ghcr.io/mealie-recipes/mealie
container_name: mealie
pull_policy: always
restart: unless-stopped
ports:
- 127.0.0.1:${PORT}:9000
deploy:
resources:
limits:
memory: 2048M
volumes:
- type: bind
source: ${VOLUME_PATH}
target: /app/data
bind:
create_host_path: true
environment:
ALLOW_SIGNUP: false
PUID: ${PUID}
PGID: ${PGID}
TZ: America/Phoenix
MAX_WORKERS: 1
WEB_CONCURRENCY: 1
BASE_URL: ${BASE_URL}

2
mealie_server-cronjob Normal file
View File

@@ -0,0 +1,2 @@
6 10 * * * /home/mealie_server/mealie_server-backup
6 11 * * 2 /home/mealie_server/mealie_server-update

20
mealie_server-setup Normal file
View File

@@ -0,0 +1,20 @@
#!/bin/bash
# shellcheck source=mealie_server-env
. "${HOME}"/"${USER}"-env
echo -e "\n[+] setting up mealie\n\n-------\n"
envsubst < "${HOME}"/"${USER}"-compose_template.yaml > "${HOME}"/"${USER}"-compose.yaml
sudo docker compose -f "${HOME}"/"${USER}"-compose.yaml up -d
echo "[+] restoring backup data"
sudo docker compose -f "${HOME}"/"${USER}"-compose.yaml stop
rm -r "${VOLUME_PATH:?}"/*
rclone copy "${BUCKET_PATH}" "${VOLUME_PATH}" -v
sudo docker compose -f "${HOME}"/"${USER}"-compose.yaml start

14
mealie_server-teardown Normal file
View File

@@ -0,0 +1,14 @@
#!/bin/bash
username=mealie_server
# application
sudo docker compose -f /home/${username}/${username}-compose.yaml down -v
uid_num=$(id -u $username)
sudo killall -9 -v -g -u $username
sudo crontab -r -u $username
sudo deluser --remove-all-files $username
# clean-up
sudo find / -user "$uid_num" -delete

11
mealie_server-update Normal file
View File

@@ -0,0 +1,11 @@
#!/bin/bash
mkdir -p "${HOME}"/update_logs
logFile=${HOME}/update_logs/$(date +%y_%m).log
{
echo -e "\n[+] updating mealie\n"
sudo docker compose -f "${HOME}"/"${USER}"-compose.yaml pull &&
sudo docker compose -f "${HOME}"/"${USER}"-compose.yaml up -d --always-recreate-deps --remove-orphans &&
yes | sudo docker image prune -af
} &>>"$logFile"

39
memos_server-backup Normal file
View File

@@ -0,0 +1,39 @@
#!/bin/bash
# shellcheck source=memos_server-env
. "${HOME}"/"${USER}"-env
mkdir -p "${HOME}"/backup_logs
logFile=${HOME}/backup_logs/$(date +%y_%m).log
{
echo -e "\n[+] memos backup\n"
mkdir -p /tmp/"${USER}"-backup
sudo docker compose -f "${HOME}"/"${USER}"-compose.yaml stop
cp -pr "${VOLUME_PATH}"/* /tmp/"${USER}"-backup
sudo docker compose -f "${HOME}"/"${USER}"-compose.yaml start
if ! rclone copy /tmp/"${USER}"-backup "${BUCKET_PATH}" -v; then
curl -Ss \
-H "Title: Memos" \
-H "Priority: 3" \
-H "Tags: warning,backup" \
-d "Backup not completed" \
"${NOTIF_URL}"
rm -r /tmp/"${USER}"-backup
exit 1
fi
curl -Ss \
-H "Title: Memos" \
-H "Priority: 2" \
-H "Tags: heavy_check_mark,backup" \
-d "Backup completed" \
"${NOTIF_URL}"
rm -r /tmp/"${USER}"-backup
} &>>"$logFile"

View File

@@ -0,0 +1,15 @@
---
services:
memos:
image: neosmemo/memos:stable
container_name: memos
pull_policy: always
restart: unless-stopped
ports:
- 127.0.0.1:${PORT}:${PORT}
volumes:
- type: bind
source: ${VOLUME_PATH}
target: /var/opt/memos
bind:
create_host_path: true

2
memos_server-cronjob Normal file
View File

@@ -0,0 +1,2 @@
7 10 * * * /home/memos_server/memos_server-backup
7 11 * * 2 /home/memos_server/memos_server-update

26
memos_server-setup Normal file
View File

@@ -0,0 +1,26 @@
#!/bin/bash
echo -e "\n[+] setting up usememos\n\n-------\n"
# shellcheck source=memos_server-env
. "${HOME}"/"${USER}"-env
envsubst <"${HOME}"/"${USER}"-compose_template.yaml >"${HOME}"/"${USER}"-compose.yaml
sudo docker compose -f "${HOME}"/"${USER}"-compose.yaml up -d
echo -e "\n[+] restoring from backup..."
sudo docker compose -f "${HOME}"/"${USER}"-compose.yaml stop
sudo rm -rf "${HOME}"/.memos/*
mkdir memos_data
rclone copy "${BUCKET_PATH}" "${HOME}"/memos_data -v
sudo cp memos_data/* "${HOME}"/.memos
rm -rf memos_data
echo "[+] restarting..."
sudo docker compose -f "${HOME}"/"${USER}"-compose.yaml start

14
memos_server-teardown Normal file
View File

@@ -0,0 +1,14 @@
#!/bin/bash
username=memos_server
# application
sudo docker compose -f /home/${username}/${username}-compose.yaml down -v
uid_num=$(id -u $username)
sudo killall -9 -v -g -u $username
sudo crontab -r -u $username
sudo deluser --remove-all-files $username
# clean-up
sudo find / -user "$uid_num" -delete

11
memos_server-update Normal file
View File

@@ -0,0 +1,11 @@
#!/bin/bash
mkdir -p "${HOME}"/update_logs
logFile=${HOME}/update_logs/$(date +%y_%m).log
{
echo -e "\n[+] updating memos\n"
sudo docker compose -f "${HOME}"/"${USER}"-compose.yaml pull &&
sudo docker compose -f "${HOME}"/"${USER}"-compose.yaml up -d --always-recreate-deps --remove-orphans &&
yes | sudo docker image prune -af
} &>>"$logFile"

45
minecraft_server-backup Normal file
View File

@@ -0,0 +1,45 @@
#!/bin/bash
# shellcheck source=minecraft_server-env
. "${HOME}"/"${USER}"-env
mkdir -p "${HOME}"/backup_logs
logFile=${HOME}/backup_logs/$(date +%y_%m).log
{
echo -e "\n[+] minecraft backup\n"
mkdir -p /tmp/"${USER}"-backup
insock=${HOME}/${USER}.stdin
# https://www.reddit.com/r/admincraft/comments/vgdbi/minecraft_backups_saveoff_and_saveall/
echo "/save-off" >"${insock}"
echo "/save-all" >"${insock}"
systemctl --user stop "${USER}"-start.{socket,service}
cp -pr "${DATA_PATH}"/* /tmp/"${USER}"-backup
systemctl --user restart "${USER}"-start.{socket,service}
echo "/save-on" >"${insock}"
if ! rclone copy /tmp/"${USER}"-backup "${BUCKET_PATH}" -v; then
curl -Ss \
-H "Title: Minecraft Server" \
-H "Priority: 3" \
-H "Tags: warning,backup" \
-d "Backup not completed" \
"${NOTIF_URL}"
rm -r /tmp/"${USER}"-backup
exit 1
fi
curl -Ss \
-H "Title: Minecraft Server" \
-H "Priority: 2" \
-H "Tags: heavy_check_mark,backup" \
-d "Backup completed" \
"${NOTIF_URL}"
rm -r /tmp/"${USER}"-backup
} &>>"$logFile"

1
minecraft_server-cronjob Normal file
View File

@@ -0,0 +1 @@
8 10 * * * /home/minecraft_server/minecraft_server-backup

21
minecraft_server-setup Normal file
View File

@@ -0,0 +1,21 @@
#!/bin/bash
echo -e "\n[+] setting up the minecraft server\n\n-------\n"
# shellcheck source=minecraft_server-env
. "${HOME}"/"${USER}"-env
mkdir -p "${DATA_PATH}"
echo "[+] restoring backup data"
rclone copy "${BUCKET_PATH}" "${DATA_PATH}"/ -v
echo "[+] setting up systemctl and starting"
mkdir -p "${HOME}"/.config/systemd/user/
cp "${HOME}"/"${USER}"-start.{service,socket} "${HOME}"/.config/systemd/user/
systemctl --user daemon-reload
systemctl --user restart "${USER}"-start.socket
systemctl --user enable --now "${USER}"-start.service

View File

@@ -0,0 +1,17 @@
[Unit]
Description=Minecraft server
After=network.target
[Service]
Type=simple
Restart=on-failure
RestartSec=1
WorkingDirectory=%h/%u
ExecStart=/usr/bin/java -Xms1024M -jar %h/%u/server.jar nogui
Sockets=%u-start.socket
StandardInput=socket
StandardOutput=truncate:%h/%u.run.log
StandardError=append:%h/%u.err.log
[Install]
WantedBy=default.target

View File

@@ -0,0 +1,3 @@
[Socket]
ListenFIFO=%h/%u.stdin
Service=%u-start.service

18
notes.knravish.me.conf Normal file
View File

@@ -0,0 +1,18 @@
server {
server_name notes.knravish.me;
index index.html index.htm;
location / {
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header Host $http_host;
proxy_pass http://127.0.0.1:5230;
proxy_redirect off;
proxy_set_header Access-Control-Allow-Origin *;
proxy_read_timeout 600s;
proxy_send_timeout 600s;
}
listen 80;
}

18
paste.knravish.me.conf Normal file
View File

@@ -0,0 +1,18 @@
server {
server_name paste.knravish.me;
index index.html index.htm;
location / {
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header Host $http_host;
proxy_pass http://127.0.0.1:5100;
proxy_redirect off;
proxy_set_header Access-Control-Allow-Origin *;
proxy_read_timeout 600s;
proxy_send_timeout 600s;
}
listen 80;
}

18
pdf.knravish.me.conf Normal file
View File

@@ -0,0 +1,18 @@
server {
server_name pdf.knravish.me;
index index.html index.htm;
location / {
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header Host $http_host;
proxy_pass http://127.0.0.1:8081;
proxy_redirect off;
proxy_set_header Access-Control-Allow-Origin *;
proxy_read_timeout 600s;
proxy_send_timeout 600s;
}
listen 80;
}

16
planning.knravish.me.conf Normal file
View File

@@ -0,0 +1,16 @@
server {
server_name planning.knravish.me;
index index.html index.htm;
include /etc/nginx/snippets/authelia-location.conf;
set $upstream http://127.0.0.1:3456;
location / {
include /etc/nginx/snippets/proxy.conf;
include /etc/nginx/snippets/authelia-authrequest.conf;
proxy_pass $upstream;
}
listen 80;
}

View File

@@ -0,0 +1,35 @@
---
services:
pwpush:
image: pglombardo/pwpush
container_name: pwpush
pull_policy: always
restart: unless-stopped
ports:
- 127.0.0.1:${PORT}:5100
environment:
PWP__MAIL__SMTP_PASSWORD: ${SMTP_PASSWORD}
DATABASE_URL: 'postgres://postgres:${PG_PASS}@postgres:5432/postgres'
volumes:
- ${VOLUME_PATH}/config/${USER}-settings.yaml:/opt/PasswordPusher/config/settings.yml
- type: bind
source: ${VOLUME_PATH}/files
target: /opt/PasswordPusher/storage
bind:
create_host_path: true
depends_on:
- postgres
postgres:
image: docker.io/postgres:16
container_name: 'pwpush-postgres'
pull_policy: always
restart: unless-stopped
volumes:
- type: bind
source: ${VOLUME_PATH}/database
target: /var/lib/postgresql/data
bind:
create_host_path: true
environment:
POSTGRES_PASSWORD: ${PG_PASS}
user: ${PUID}:${PGID}

981
pwpush_server-settings.yaml Normal file
View File

@@ -0,0 +1,981 @@
# Global Application Configuration
#
# This file uses YAML syntax. Indentation must be 2 spaces (not tabs).
#
# See also https://docs.pwpush.com/docs/config-strategies/
# for a further explanation of the larger settings available here.
### Application Defaults
#
### URL Pushes
#
# Enable or disable URL based pushes. These allow you to share URLs securely.
# Like regular pushes, they expire after a set time or amount of views.
#
# Note that `enable_logins` is required for URL based pushes to work. It is a
# feature for logged in users only.
#
# Environment variable override:
# PWP__ENABLE_URL_PUSHES='false'
#
enable_url_pushes: true
### File Uploads
#
# File uploads are disabled by default since they require a place to store
# those files.
#
# If enabling file uploads, make sure to fill out the 'files' section below.
#
# Note that `enable_logins` is required for file uploads to work. It is a
# feature for logged in users only.
#
# Environment variable override:
# PWP__ENABLE_FILE_PUSHES='false'
#
enable_file_pushes: true
### Logins (User accounts)
#
# Logins are disabled by default since they require an MTA (email) server
# available to send emails through.
#
# If enabling logins, make sure to fill out the 'mail' section below.
#
# For instructions on how to enable logins, see this page:
# https://github.com/pglombardo/PasswordPusher/discussions/276
#
# Environment variable override:
# PWP__ENABLE_LOGINS='false'
#
enable_logins: true
## Disable Signups
#
# Disallow new user accounts to be created in the application.
#
# Set this after you have your desired user accounts created. It will
# not allow any further user account creation.
#
# Environment variable override:
# PWP__DISABLE_SIGNUPS='false'
#
disable_signups: false
## Limit Signups to Specific Email Domains
#
# By default, anyone can sign up for an account. The following default regular
# expression just validates if it is a valid email address.
#
# signup_email_regexp: '\A[^@\s]+@[^@\s]+\z'
#
# If you would like to limit signups to specific email domains, you can extend
# the regular expression below to include the domains you want to allow.
#
# For example, to only allow signups from the domain 'hey.com', you would
# change the following to:
#
# signup_email_regexp: '\A[^@\s]+@(hey\.com)\z'
#
# or for multiple domains:
#
# signup_email_regexp: '\A[^@\s]+@(hey\.com|gmail\.com)\z'
#
# Tip: use https://rubular.com to test out your regular expressions. It includes
# a guide to what each component means in regexp.
#
# Environment variable override:
# PWP__SIGNUP_EMAIL_REGEXP='\A[^@\s]+@[^@\s]+\z'
#
signup_email_regexp: '\A[^@\s]+@[^@\s]+\z'
### Allow Anonymous
#
# By default, Password Pusher can be used by anonymous users to push
# new passwords and generate secret URLs. If you want to limit functionality
# to logged in users only, set the following value to true.
#
# This does not affect password secret URLs themselves as anonymous is always
# allowed there.
#
# Environment variable override:
# PWP__ALLOW_ANONYMOUS='true'
#
allow_anonymous: true
### Host Domain
#
# The domain (without protocol) where this instance is hosted
# Used in generating fully qualified URLs.
#
# Make sure to set this for email links to work correctly.
#
# Environment variable override:
# PWP__HOST_DOMAIN='pwpush.com'
#
host_domain: 'paste.knravish.me'
### Host Protocol
#
# The protocol to reach the domain above
# Used in generating fully qualified URLs.
#
# Make sure to set this for email links to work correctly.
#
# Environment variable override:
# PWP__HOST_PROTOCOL='https'
#
host_protocol: 'https'
### Base URL Override
#
# Set the following value to force the base URL of generated links.
#
# Environment variable override:
# PWP__OVERRIDE_BASE_URL='https://pwpush.mydomain.com'
#
# You could even add a port if needed:
# PWP__OVERRIDE_BASE_URL='https://pwpush.mydomain.com:5100'
#
# Set this value without a trailing slash ('/').
#
# override_base_url: 'https://pwpush.mydomain.com'
### Show version on the footer
#
# Enable/disable PasswordPusher version on the footer.
#
# Environment variable override:
# PWP__SHOW_VERSION=true
#
# Default: true
show_version: true
### Show the GDPR cookie consent banner
#
# Enable/disable the GDPR cookie consent banner.
#
# Environment variable override:
# PWP__SHOW_GDPR_CONSENT_BANNER=true
#
# Default: true
show_gdpr_consent_banner: true
### Timezone
#
# Set the timezone for the application. A full list of timezone strings
# can be found here:
# https://en.wikipedia.org/wiki/List_of_tz_database_time_zones
#
# Environment variable override:
# PWP__TIMEZONE='America/New_York'
#
# Default: 'America/New_York'
timezone: 'America/Phoenix'
### Allowed Hosts
#
# This is a list of allowed hosts for the application. This is used to
# prevent host header attacks.
#
# When set, the application will only respond to requests with a host header
# that matches one of the values in this list.
#
# This feature is generally only used when the application is behind a proxy.
#
# It's generally not required to use this unless you are getting the related error
# in the application. localhost and the IP that the application is running on
# are always allowed.
#
# Note: If you need more than one value to the environment variable, separate
# entries by a single space.
#
# Environment variable override:
# PWP__ALLOWED_HOSTS='pwpush.com pwpush.mydomain.com pwpush.myotherdomain'
#
# allowed_hosts:
# - 'pwpush.com'
# - 'pwpush.mydomain.com'
# - 'pwpush.myotherdomain.com'
## Expiration Settings for Password Pushes
#
pw:
# Expire Password Pushes After XX Days
#
# Controls the "Expire After Days" for Password Pushes
#
# Environment variable overrides:
# PWP__PW__EXPIRE_AFTER_DAYS_DEFAULT=7
# PWP__PW__EXPIRE_AFTER_DAYS_MIN=1
# PWP__PW__EXPIRE_AFTER_DAYS_MAX=90
#
expire_after_days_default: 7
expire_after_days_min: 1
expire_after_days_max: 90
# Expire Password Pushes After XX Views
#
# Controls the "Expire After Views" form settings in Password#new
#
# Environment variable overrides:
# PWP__PW__EXPIRE_AFTER_VIEWS_DEFAULT=5
# PWP__PW__EXPIRE_AFTER_VIEWS_MIN=1
# PWP__PW__EXPIRE_AFTER_VIEWS_MAX=100
#
expire_after_views_default: 5
expire_after_views_min: 1
expire_after_views_max: 100
# Retrieval Step for Password Pushes
#
# This enables or disables the "1-click retrieval step" feature entirely. For the default value
# when it is enabled here, see the next setting.
#
# Environment variable override:
# PWP__PW__ENABLE_RETRIEVAL_STEP='false'
#
enable_retrieval_step: true
# Default Form Value for the Retrieval Step
#
# When the retrieval step is enabled (above), what is the default value on the form?
#
# When true, secret URLs will be generated as /p/xxxxxxxx/r which will show a page
# requiring a click to view the page /p/xxxxxxxx
#
# Environment variable override:
# PWP__PW__RETRIEVAL_STEP_DEFAULT='true'
#
retrieval_step_default: false
# Deletable Password Pushes
#
# default: true
#
# This enables or disables the "Allow Immediate Deletion" feature entirely. For the default value
# when it is enabled here, see the next setting.
#
# Environment variable override:
# PWP__PW__ENABLE_DELETABLE_PUSHES='false'
#
enable_deletable_pushes: true
# Deletable Pushes Default Value
#
# default: true
#
# When this is set to true, this option does two things:
# 1. Sets the default check state for the "Allow viewers to
# optionally delete password before expiration" checkbox
# 2. JSON API: Sets the default value for newly pushed passwords if
# unspecified
#
# Environment variable override:
# PWP__PW__DELETABLE_PUSHES_DEFAULT='false'
#
deletable_pushes_default: true
# Blur Payloads
#
# default: true
#
# This option does not affect the JSON API - web UI only.
# When this is set to true, this option will display the pushed text payload as
# blurred out text. This is useful for recipients in public places who don't
# want to reveal the sensitive information until when they choose.
#
# The blur is disabled with a single mouse click.
#
# Setting this option to false will disable the blur feature entirely for password pushes.
#
# Note: This is a global on/off switch currently. This may be made configurable per push
# in the future by adding a new checkbox and a `blur_default` setting.
#
# Environment variable override:
# PWP__PW__ENABLE_BLUR='false'
#
enable_blur: true
## Expiration Settings for URL Pushes
#
url:
# Expire URL Pushes After XX Days
#
# Controls the "Expire After Days" for URL Pushes
#
# Environment variable overrides:
# PWP__URL__EXPIRE_AFTER_DAYS_DEFAULT=7
# PWP__URL__EXPIRE_AFTER_DAYS_MIN=1
# PWP__URL__EXPIRE_AFTER_DAYS_MAX=90
#
expire_after_days_default: 7
expire_after_days_min: 1
expire_after_days_max: 90
# Expire URL Pushes After XX Views
#
# Controls the "Expire After Views" form settings in Password#new
#
# Environment variable overrides:
# PWP__URL__EXPIRE_AFTER_VIEWS_DEFAULT=5
# PWP__URL__EXPIRE_AFTER_VIEWS_MIN=1
# PWP__URL__EXPIRE_AFTER_VIEWS_MAX=100
#
expire_after_views_default: 5
expire_after_views_min: 1
expire_after_views_max: 100
# Retrieval Step for URL Pushes
#
# This enables or disables the "1-click retrieval step" feature entirely. For the default value
# when it is enabled here, see the next setting.
#
# Environment variable override:
# PWP__URL__ENABLE_RETRIEVAL_STEP='false'
#
enable_retrieval_step: true
# Default Form Value for the Retrieval Step
#
# When the retrieval step is enabled (above), what is the default value on the form?
#
# When true, secret URLs will be generated as /r/xxxxxxxx/r which will show a page
# requiring a click to view the page /r/xxxxxxxx
#
# Environment variable override:
# PWP__URL__RETRIEVAL_STEP_DEFAULT='true'
#
retrieval_step_default: false
### File Upload: Expiration & Storage Settings
#
files:
# Expire File Pushes After XX Days
#
# Controls the "Expire After Days" for File Pushes
#
# Environment variable overrides:
# PWP__FILES__EXPIRE_AFTER_DAYS_DEFAULT=7
# PWP__FILES__EXPIRE_AFTER_DAYS_MIN=1
# PWP__FILES__EXPIRE_AFTER_DAYS_MAX=90
#
expire_after_days_default: 7
expire_after_days_min: 1
expire_after_days_max: 90
# Expire File Pushes After XX Views
#
# Controls the "Expire After Views" form settings for File Pushes
#
# Environment variable overrides:
# PWP__FILES__EXPIRE_AFTER_VIEWS_DEFAULT=5
# PWP__FILES__EXPIRE_AFTER_VIEWS_MIN=1
# PWP__FILES__EXPIRE_AFTER_VIEWS_MAX=100
#
expire_after_views_default: 5
expire_after_views_min: 1
expire_after_views_max: 100
# Retrieval Step for File Pushes
#
# This enables or disables the "1-click retrieval step" feature entirely. For the default value
# when it is enabled here, see the next setting.
#
# Environment variable override:
# PWP__FILES__ENABLE_RETRIEVAL_STEP='false'
#
enable_retrieval_step: true
# Default Form Value for the Retrieval Step
#
# When the retrieval step is enabled (above), what is the default value on the form?
#
# When true, secret URLs will be generated as /f/xxxxxxxx/r which will show a page
# requiring a click to view the page /f/xxxxxxxx
#
# Environment variable override:
# PWP__FILES__RETRIEVAL_STEP_DEFAULT='true'
#
retrieval_step_default: true
# Deletable File Pushes
#
# default: true
#
# This enables or disables the "Allow Immediate Deletion" feature entirely. For the default value
# when it is enabled here, see the next setting.
#
# Environment variable override:
# PWP__FILES__ENABLE_DELETABLE_PUSHES='false'
#
enable_deletable_pushes: true
# Deletable File Pushes Default Value
#
# default: true
#
# When this is set to true, this option does two things:
# 1. Sets the default check state for the "Allow viewers to
# optionally delete password before expiration" checkbox
# 2. JSON API: Sets the default value for newly pushed passwords if
# unspecified
#
# Environment variable override:
# PWP__FILES__DELETABLE_PUSHES_DEFAULT='false'
#
deletable_pushes_default: true
# Blur Payloads
#
# default: true
#
# This option does not affect the JSON API - web UI only.
# When this is set to true, this option will display the pushed text payload as
# blurred out text. This is useful for recipients in public places who don't
# want to reveal the sensitive information until when they choose.
#
# The blur is disabled with a single mouse click.
#
# Setting this option to false will disable the blur feature entirely for file pushes.
#
# Note: This is a global on/off switch currently. This may be made configurable per push
# in the future by adding a new checkbox and a `blur_default` setting.
#
# Environment variable override:
# PWP__FILES__BLUR='false'
#
enable_blur: true
# Maximum File Upload Count
#
# default: 10
#
# This option controls the maximum number of files that can be uploaded
# in a single push.
#
# Environment variable override:
# PWP__FILES__MAX_FILE_UPLOADS=10
#
max_file_uploads: 10
# File Storage
#
# Password Pusher can store uploaded files into Amazon S3, Google Cloud Services
# or Microsoft Azure.
#
# Choose your file storage preference by setting the following option to
# one of the following values:
# * local - use local disk (likely won't work in container environments)
# * amazon - use Amazon S3 (and provide 's3' credentials below)
# * google - use Google Cloud Storage (and provide 'gcs' credentials below)
# * microsoft - use Microsoft Azure Storage (and provide 'as' credentials below)
#
# Environment variable override:
# PWP__FILES__STORAGE='local'
#
storage: 'local'
# Amazon S3 Storage Credentials
s3:
# Environment Variable Override: PWP__FILES__S3__ENDPOINT=''
endpoint: ''
# Environment Variable Override: PWP__FILES__S3__ACCESS_KEY_ID=''
access_key_id: '_'
# Environment Variable Override: PWP__FILES__S3__SECRET_ACCESS_KEY=''
secret_access_key: ''
# Environment Variable Override: PWP__FILES__S3__REGION=''
region: 'us-east-1'
# Environment Variable Override: PWP__FILES__S3__BUCKET=''
bucket: 'pwpush-files'
# Google Cloud Storage Credentials
gcs:
# Environment Variable Override: PWP__FILES__GCS__PROJECT=''
project: ''
# Environment Variable Override: PWP__FILES__GCS__CREDENTIALS=''
credentials: ''
# Environment Variable Override: PWP__FILES__GCS__BUCKET=''
bucket: ''
#
# Optionally use IAM instead of the credentials when signing URLs.
# This is useful if you are authenticating your GKE applications with Workload Identity,
# See here: https://edgeguides.rubyonrails.org/active_storage_overview.html#google-cloud-storage-service
#
# Environment Variable Override: PWP__FILES__GCS__IAM=true
iam: false
# Environment Variable Override: PWP__FILES__GCS__GSA_EMAIL='email@domain.com'
gsa_email: null
# Microsoft Azure Storage Credentials
as:
# Environment Variable Override: PWP__FILES__AS__STORAGE_ACCOUNT_NAME=''
storage_account_name: ''
# Environment Variable Override: PWP__FILES__AS__STORAGE_ACCESS_KEY=''
storage_access_key: ''
# Environment Variable Override: PWP__FILES__AS__CONTAINER=''
container: ''
### Password Generator Defaults
#
# Set the defaults of the front page password generator.
#
gen:
# Whether generated passwords have numbers
#
# Environment variable override:
# PWP__GEN__HAS_NUMBERS='true'
#
has_numbers: true
# Whether generated passwords will be title cased
#
# Environment variable override:
# PWP__GEN__TITLE_CASED='true'
#
title_cased: true
# Whether generated passwords will use separators between syllables
#
# Environment variable override:
# PWP__GEN__USE_SEPARATORS='true'
#
use_separators: true
# List of consonants to generate from
#
# Environment variable override:
# PWP__GEN__CONSONANTS='bcdfghklmnprstvz'
#
consonants: 'bcdfghklmnprstvz'
# List of vowels to generate from
#
# Environment variable override:
# PWP__GEN__VOWELS='aeiouy'
#
vowels: 'aeiouy'
# If `use_separators` is enabled above, the list of separators to use (randomly)
#
# Environment variable override:
# PWP__GEN__SEPARATORS='-_='
#
separators: '-_='
# The maximum length of each syllable that a generated password can have
#
# Environment variable override:
# PWP__GEN__MAX_SYLLABLE_LENGTH=3
#
max_syllable_length: 3
# The minimum length of each syllable that a generated password can have
#
# Environment variable override:
# PWP__GEN__MIN_SYLLABLE_LENGTH=1
#
min_syllable_length: 1
# The exact number of syllables that a generated password will have
#
# Environment variable override:
# PWP__GEN__SYLLABLES_COUNT=3
#
syllables_count: 3
brand:
### Site Title
#
# Environment variable override: PWP__BRAND__TITLE='Acme Corp.'
#
title: 'Password Pusher'
### Site Tagline
#
# Environment variable override: PWP__BRAND__TAGLINE='Security First'
#
tagline: 'Go Ahead. Email Another Password.'
### Site Disclaimer
#
# Environment variable override: PWP__BRAND__DISCLAIMER='Use at own use risk.'
# disclaimer: 'This is a dummy disclaimer and should not be considered legally binding or taken seriously in any way. The content provided here is for entertainment and illustrative purposes only. Any resemblance to actual disclaimers is purely coincidental. We do not endorse or encourage the use of this disclaimer for any real-world applications, and we strongly advise consulting a legal professional for creating legitimate and appropriate disclaimers for your specific needs. By reading this disclaimer, you agree not to hold us responsible for any confusion, amusement, or bewilderment it may cause. This disclaimer has no legal validity, and any attempt to rely on it for legal, financial, or any other serious matters is ill-advised. Please use disclaimers responsibly and in accordance with applicable laws and regulations.'
### Show Footer Menu Toggle
#
# Environment variable override: PWP__BRAND__SHOW_FOOTER_MENU='true'
#
show_footer_menu: true
### Site logo
#
# ..for both a light and dark theme
#
# You can also replace these relative paths with fully qualified HTTP links to
# external resources such as Amazon S3 etc.
# e.g. PWP__BRAND__DARK_LOGO='https://mys3bucket.amazonaws.com/a/some-image.png'
#
# Environment variable override: PWP__BRAND__LIGHT_LOGO='https://mys3bucket.amazonaws.com/a/lea+giuliana.png'
# Environment variable override: PWP__BRAND__DARK_LOGO='https://mys3bucket.amazonaws.com/a/lea+giuliana.png'
#
# light_logo: 'logo-transparent-sm-bare.png'
# dark_logo: 'logo-transparent-sm-dark-bare.png'
### Favicon & icon images for mobile. When people on mobile (phones/tablets), bookmark
# the site or it is shown in history, these icons are used.
#
# You can also replace these relative paths with fully qualified HTTP links to
# external resources such as Amazon S3 etc.
# e.g. PWP__BRAND__ICON_57x57='https://mys3bucket.amazonaws.com/a/some-image.png'
#
# Although you should set all of the following values, at a bare minimum, make sure
# to set at least icon_57x57 and icon_96x96. Without these two, things are guaranteed
# to not work.
#
# You can use an icon generator such as:
# https://www.favicongenerator.com
# https://www.favicon-generator.org
#
# Environment variable override: PWP__BRAND__ICON_57x57='/path/to/image'
# icon_57x57: 'apple-icon-57x57.png'
# Environment variable override: PWP__BRAND__ICON_60x60='/path/to/image'
# icon_60x60: 'apple-icon-60x60.png'
# Environment variable override: PWP__BRAND__ICON_72x72='/path/to/image'
# icon_72x72: 'apple-icon-72x72.png'
# Environment variable override: PWP__BRAND__ICON_76x76='/path/to/image'
# icon_76x76: 'apple-icon-76x76.png'
# Environment variable override: PWP__BRAND__ICON_114x114='/path/to/image'
# icon_114x114: 'apple-icon-114x114.png'
# Environment variable override: PWP__BRAND__ICON_120x120='/path/to/image'
# icon_120x120: 'apple-icon-120x120.png'
# Environment variable override: PWP__BRAND__ICON_144x144='/path/to/image'
# icon_144x144: 'apple-icon-144x144.png'
# Environment variable override: PWP__BRAND__ICON_152x152='/path/to/image'
# icon_152x152: 'apple-icon-152x152.png'
# Environment variable override: PWP__BRAND__ICON_180x180='/path/to/image'
# icon_180x180: 'apple-icon-180x180.png'
# Environment variable override: PWP__BRAND__ICON_192x192='/path/to/image'
# icon_192x192: 'android-icon-192x192.png'
# Environment variable override: PWP__BRAND__ICON_32x32='/path/to/image'
# icon_32x32: 'favicon-32x32.png'
# Environment variable override: PWP__BRAND__ICON_96x96='/path/to/image'
# icon_96x96: 'favicon-96x96.png'
# Environment variable override: PWP__BRAND__ICON_16x16='/path/to/image'
# icon_16x16: 'favicon-16x16.png'
# Environment variable override: PWP__BRAND__ICON_144x144='/path/to/image'
# ms_icon_144x144: 'ms-icon-144x144.png'
### Throttling
#
# Configure the application throttling limits.
#
# Throttling enforces a minimum time interval
# between subsequent HTTP requests from a particular client, as
# well as by defining a maximum number of allowed HTTP requests
# per a given time period (per second or minute)
#
# See https://github.com/rack/rack-attack
#
throttling:
# ..maximum number of allowed HTTP requests per minute
#
# Default: 120
#
# Environment Variable Override: PWP__THROTTLING__MINUTE='60'
minute: 120
# ..maximum number of allowed HTTP requests per second
#
# Default: 60
#
# Environment Variable Override: PWP__THROTTLING__SECOND='20'
second: 60
### Trusted Proxies
#
# By default, Password Pusher will only proxy related headers from proxies on
# the local network. If you are using a proxy that is not on the local network,
# you will need to add the IP address of the proxy to the list below.
#
# This is useful if you are using a remote reverse proxy such as Cloudflare to
# serve the application. If local, you can leave this setting as is.
#
# Multiple IP addresses can be added by separating them with a comma.
#
# Environment Variable Override:
# PWP__TRUSTED_PROXIES='<ipaddress>'
# PWP__TRUSTED_PROXIES='<ipaddress1>,<ipaddress2>'
#
# trusted_proxies:
# - '1.2.3.4'
# - '2.3.4.5'
## Cloudflare Proxy
#
# If you are using Cloudflare as a proxy, you will need to set the following
# value to true. This will cause the application to fetch the list of Cloudflare
# proxy IP addresses and add them to the list of trusted proxies.
#
# Note that on application boot, this will trigger two HTTPs requests to fetch
# the list of Cloudflare IP addresses. The requests each have a timeout of 15 seconds
# and may delay on container boot.
#
# Environment Variable Override:
# PWP__CLOUDFLARE_PROXY='false'
#
cloudflare_proxy: false
### Mail Server Configuration
#
# When logins are enabled, an SMTP server is required to send emails to users
# for things such as forgot password, unlock account, confirm account etc.
# If `enable_logins` is set to true above, the following _are required_ to be
# filled out with valid values.
#
# These values are passed through to ActionMailer configuration. The documentation
# for ActionMailer is at:
# https://guides.rubyonrails.org/action_mailer_basics.html#action-mailer-configuration
#
# IMPORTANT: Also set host_domain and host_protocol above for email links to work correctly!
#
mail:
# Email delivery errors will be shown in the application
# Environment Variable Override: PWP__MAIL__RAISE_DELIVERY_ERRORS='false'
raise_delivery_errors: true
# Allows you to use a remote mail server. Just change it from its default "localhost" setting.
# Environment Variable Override: PWP__MAIL__SMTP_ADDRESS='smtp.example.com'
smtp_address: mail.smtp2go.com
# If you need to specify a HELO domain, you can do it here.
# Environment Variable Override: PWP__MAIL__SMTP_DOMAIN='xyz.dev'
# smtp_domain: ''
# Port of the SMTP server
# Environment Variable Override: PWP__MAIL__SMTP_PORT='587'
smtp_port: 2525
# If your mail server requires authentication, you need to specify the
# authentication type here. This is a string and one of :plain (will send
# the password in the clear), :login (will send password Base64 encoded)
# or :cram_md5 (combines a Challenge/Response mechanism to exchange
# information and a cryptographic Message Digest 5 algorithm to hash
# important information)
#
# Important: Comment this out if your server doesn't require authentication.
#
# Environment Variable Override: PWP__MAIL__SMTP_AUTHENTICATION='plain'
# smtp_authentication: 'plain'
# If your mail server requires authentication, set the username in this setting.
# Environment Variable Override: PWP__MAIL__SMTP_USER_NAME='apikey'
smtp_user_name: 'freshStart-OCI'
# If your mail server requires authentication, set the password in this setting.
# Environment Variable Override: PWP__MAIL__SMTP_PASSWORD='something@&#$'
# smtp_password: ''
# Use STARTTLS when connecting to your SMTP server and fail if unsupported.
# Environment Variable Override: PWP__MAIL__SMTP_STARTTLS='true'
# smtp_starttls: false
# Detects if STARTTLS is enabled in your SMTP server and starts to use it. Defaults to true.
# Environment Variable Override: PWP__MAIL__SMTP_ENABLE_STARTTLS_AUTO='false'
smtp_enable_starttls_auto: true
# Number of seconds to wait while attempting to open a connection.
# Environment Variable Override: PWP__MAIL__SMTP_OPEN_TIMEOUT='10'
smtp_open_timeout: 10
# Number of seconds to wait until timing-out a read(2) call.
# Environment Variable Override: PWP__MAIL__SMTP_READ_TIMEOUT='10'
smtp_read_timeout: 10
# When using TLS, you can set how OpenSSL checks the certificate. This is
# useful if you need to validate a self-signed and/or a wildcard certificate.
# This can be one of the OpenSSL verify constants, :none or :peer
# Environment Variable Override: PWP__MAIL__SMTP_OPENSSL_VERIFY_MODE='none'
# smtp_openssl_verify_mode: 'peer'
# Configure the e-mail address which will be shown as 'From' in emails
# See config/initializers/devise.rb where this is used
# Environment Variable Override: PWP__MAIL__MAILER_SENDER='"Password Pusher" <pglombardo@pwpush.com>'
mailer_sender: '"Password Pusher" <no-reply@mail.knravish.me>'
### Docker Pre-compilation
#
# This is useful if you modified the assets (e.g. CSS, JS, images) to customize
# the theme etc... Assets are precompiled before serving.
#
# If you set a custom theme, you will need to precompile the assets on container boot.
#
# Pre-compilation isn't supported in this yaml file. It is only supported
# through the environment variable PWP_PRECOMPILE='true'.
#
# Environment Variable: PWP_PRECOMPILE='false'
### Themes
#
# Password Pusher uses Bootswatch themes. See https://bootswatch.com/
#
# The following are the available themes. The default theme is 'default'.
#
# 'cerulean', 'cosmo', 'cyborg', 'darkly', 'flatly', 'journal', 'litera', 'lumen',
# 'lux', 'materia', 'minty', 'morph', 'pulse', 'quartz', 'sandstone', 'simplex',
# 'sketchy', 'slate', 'solar', 'spacelab', 'superhero', 'united', 'vapor', 'yeti', 'zephyr'
#
# To change the theme, set the `theme` value to the name of the theme you
# want to use.
#
# Environment Variable Override: PWP__THEME='default'
#
# Important Note: This setting can only be controlled by environment variable. (PWP__THEME)
# It cannot be set in this config file.
theme: 'default'
### Site Default Locale
#
# The default language for the application. This must be one of the
# valid/supported language codes from the list above.
#
# Note: This locale _must_ be in the list of enabled_language_codes.
#
# Example: default_locale: :es
#
# Environment Variable Override: PWP__DEFAULT_LOCALE='es'
default_locale: en
### Language & Internationalization
#
# List of enabled languages for the application.
#
# To remove the availability of languages from the application entirely,
# comment out (or remove) the language code(s) from the `enabled_language_codes`
# list below.
#
enabled_language_codes:
- ca # 'Català'
- cs # 'Čeština'
- da # 'Dansk'
- de # 'Deutsch'
- en # 'English'
- en-GB # 'English (UK)'
- es # 'Español'
- eu # 'Euskara'
- fi # 'Suomi'
- fr # 'Français'
- hi # 'हिन्दी'
- hu # 'Magyar'
- id # 'Indonesian'
- is # 'Íslenska'
- it # 'Italiano'
- ja # '日本語'
- ko # '한국어'
- lv # 'Latviski'
- nl # 'Nederlands'
- 'no' # 'Norsk' # _no_ keyword in Ruby evaluates to false #-(
- pl # 'Polski'
- pt-BR # 'Português'
- pt-PT # 'Português'
- ro # 'Română'
- ru # 'Русский'
- sr # 'Српски'
- sk # 'Slovenský'
- sv # 'Svenska'
- th # 'ไทย'
- uk # 'Українська'
- ur # 'اردو'
- zh-CN # '中文'
### Language & Internationalization
#
# Map of language codes to language name.
# Used internally for the language selector model.
#
# <language code>: '<language name>'
language_codes:
ca: 'Català'
cs: 'Čeština'
da: 'Dansk'
de: 'Deutsch'
en: 'English'
es: 'Español'
eu: 'Euskara'
en-GB: 'English (UK)'
fi: 'Suomi'
fr: 'Français'
hi: 'हिन्दी'
hu: 'Magyar'
id: 'Indonesian'
is: 'Íslenska'
it: 'Italiano'
ja: '日本語'
ko: '한국어'
lv: 'Latviski'
nl: 'Nederlands'
'no': 'Norsk' # _no_ keyword in Ruby evaluates to false :-(
pl: 'Polski'
pt-BR: 'Português'
pt-PT: 'Português'
ro: 'Română'
ru: 'Русский'
sk: 'Slovenský'
sr: 'Српски'
sv: 'Svenska'
th: 'ไทย'
uk: 'Українська'
ur: 'اردو'
zh-CN: '中文'
# Used internally for the language selector model.
# This provides a conversion of language codes to
# country codes show the correct flag icons.
# See:
# https://github.com/lipis/flag-icons
# https://flagicons.lipis.dev
country_codes:
ca: :es-ct
cs: :cz
da: :dk
de: :de
en: :us
en-GB: :gb
es: :es
eu: :es-pv
fi: :fi
fr: :fr
hi: :in
hu: :hu
id: :id
is: :is
it: :it
ja: :jp
ko: :kr
lv: :lv
nl: :nl
'no': :no # _no_ keyword in Ruby evaluates to false :-(
pl: :pl
pt-BR: :br
pt-PT: :pt
ro: :ro
ru: :ru
sk: :sk
sr: :rs
sv: :se
th: :th
uk: :ua
ur: :pk
zh-CN: :cn
# Configure the logging verbosity of the application.
#
# Valid values are: :debug, :info, :warn, :error, :fatal
log_level: :warn
# In containers, it is usually desired to log to stdout
# instead of using log files (e.g. log/production.log).
#
log_to_stdout: true

14
pwpush_server-setup Normal file
View File

@@ -0,0 +1,14 @@
#!/bin/bash
# shellcheck source=pwpush_server-env
. "${HOME}"/"${USER}"-env
echo -e "\n[+] setting up pwpush\n\n-------\n"
mkdir -p "${VOLUME_PATH}"/config
mkdir -p "${VOLUME_PATH}"/database
cp "${USER}"-settings.yaml "${VOLUME_PATH}"/config
envsubst <"${HOME}"/"${USER}"-compose_template.yaml >"${HOME}"/"${USER}"-compose.yaml
sudo docker compose -f "${HOME}"/"${USER}"-compose.yaml up -d

14
pwpush_server-teardown Normal file
View File

@@ -0,0 +1,14 @@
#!/bin/sh
username=pwpush_server
# application
sudo docker compose -f /home/${username}/${username}-compose.yaml down -v
uid_num=$(id -u $username)
sudo killall -9 -v -g -u $username
sudo crontab -r -u $username
sudo deluser --remove-all-files $username
# clean-up
sudo find / -user "$uid_num" -delete

18
recipes.knravish.me.conf Normal file
View File

@@ -0,0 +1,18 @@
server {
server_name recipes.knravish.me;
index index.html index.htm;
location / {
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header Host $http_host;
proxy_pass http://127.0.0.1:9925;
proxy_redirect off;
proxy_set_header Access-Control-Allow-Origin *;
proxy_read_timeout 600s;
proxy_send_timeout 600s;
}
listen 80;
}

View File

@@ -0,0 +1,13 @@
---
services:
shlink:
image: ghcr.io/shlinkio/shlink:stable
container_name: my_shlink
pull_policy: always
restart: unless-stopped
ports:
- 127.0.0.1:8080:8080
environment:
DEFAULT_DOMAIN: lnk.knravish.me
IS_HTTPS_ENABLED: true
DISABLE_TRACKING: true

20
shlink_server-setup Normal file
View File

@@ -0,0 +1,20 @@
#!/bin/bash
echo -e "\n[+] setting up shlink\n\n-------\n"
sudo docker compose -f "${HOME}"/"${USER}"-compose.yaml up -d
declare -A codes=(
["in"]="https://linkedin.com/in/kaushik-ravishankar"
["github"]="https://github.com/20kaushik02"
["folio"]="https://knravish.me"
["k23"]="https://k23.kurukshetraceg.org.in"
)
# give it some time to start
sleep 1
for shortcode in "${!codes[@]}"; do
echo "$shortcode - ${codes[$shortcode]}"
sudo docker exec -it my_shlink shlink short-url:create -c "$shortcode" -rnf "${codes[$shortcode]}"
done

14
shlink_server-teardown Normal file
View File

@@ -0,0 +1,14 @@
#!/bin/bash
username=shlink_server
# application
sudo docker compose -f /home/${username}/${username}-compose.yaml down -v
uid_num=$(id -u $username)
sudo killall -9 -v -g -u $username
sudo crontab -r -u $username
sudo deluser --remove-all-files $username
# clean-up
sudo find / -user "$uid_num" -delete

42
spotmgr_server-backup Normal file
View File

@@ -0,0 +1,42 @@
#!/bin/bash
# shellcheck source=spotmgr_server-env
. "${HOME}"/"${USER}"-env
mkdir -p "${HOME}"/backup_logs
logFile=${HOME}/backup_logs/$(date +%y_%m).log
{
echo -e "\n[+] spotify-manager backup\n"
mkdir -p /tmp/"${USER}"-backup
sudo docker compose -f "${HOME}"/"${USER}"-compose.yaml stop
sudo docker compose -f "${HOME}"/"${USER}"-compose.yaml start postgres
# shellcheck disable=SC2024
sudo docker exec -u "${PUID}:${PGID}" -it spotify-manager-postgres sh -c \
'pg_dumpall -c --if-exists -U postgres' >/tmp/"${USER}"-backup/db.out
sudo docker compose -f "${HOME}"/"${USER}"-compose.yaml start
if ! rclone copy /tmp/"${USER}"-backup "${BUCKET_PATH}" -v; then
curl -Ss \
-H "Title: Spotify Manager" \
-H "Priority: 3" \
-H "Tags: warning,backup" \
-d "Backup not completed" \
"${NOTIF_URL}"
rm -rf /tmp/"${USER}"-backup
exit 1
fi
curl -Ss \
-H "Title: Spotify Manager" \
-H "Priority: 2" \
-H "Tags: heavy_check_mark,backup" \
-d "Backup completed" \
"${NOTIF_URL}"
rm -rf /tmp/"${USER}"-backup
} &>>"$logFile"

View File

@@ -0,0 +1,56 @@
---
services:
postgres:
container_name: spotify-manager-postgres
image: postgres
restart: on-failure
environment:
POSTGRES_PASSWORD: ${POSTGRES_PASSWORD}
POSTGRES_DB: spotify-manager
volumes:
- ${VOLUME_PATH}/pgdata:/var/lib/postgresql/data
- /etc/passwd:/etc/passwd:ro
user: ${PUID}:${PGID}
healthcheck:
test: ['CMD-SHELL', 'psql -U postgres -d spotify-manager -c "select version();"']
interval: 1s
retries: 5
timeout: 5s
redis:
container_name: spotify-manager-redis
image: redis
restart: on-failure
volumes:
- ${VOLUME_PATH}/redisdata:/data
user: ${PUID}:${PGID}
healthcheck:
test: ['CMD-SHELL', 'redis-cli ping | grep PONG']
interval: 1s
retries: 5
timeout: 3s
api:
container_name: spotify-manager-api
image: kaushikr2/spotify-manager-api
init: true
restart: on-failure
ports:
- 127.0.0.1:9001:9001
depends_on:
postgres:
condition: service_healthy
restart: true
redis:
condition: service_healthy
restart: true
environment:
NODE_ENV: production
SPOTMGR_PORT: 9001
SPOTMGR_CLIENT_ID: ${SPOTMGR_CLIENT_ID}
SPOTMGR_CLIENT_SECRET: ${SPOTMGR_CLIENT_SECRET}
SPOTMGR_SESSION_SECRET: ${SPOTMGR_SESSION_SECRET}
SPOTMGR_TRUST_PROXY: 1
SPOTMGR_BASE_DOMAIN: 'spotify-manager.knravish.me'
SPOTMGR_REDIRECT_URI: 'https://api.spotify-manager.knravish.me/api/auth/callback'
SPOTMGR_APP_URI: 'https://spotify-manager.knravish.me/'
SPOTMGR_DB_URI: 'postgres://postgres:${POSTGRES_PASSWORD}@postgres:5432/spotify-manager'
SPOTMGR_REDIS_URI: redis://redis:6379

1
spotmgr_server-cronjob Normal file
View File

@@ -0,0 +1 @@
9 10 * * * /home/spotmgr_server/spotmgr_server-backup

22
spotmgr_server-setup Normal file
View File

@@ -0,0 +1,22 @@
#!/bin/bash
echo -e "\n[+] setting up spotify-manager\n\n-------\n"
# shellcheck source=spotmgr_server-env
. "${HOME}"/"${USER}"-env
mkdir -p "${VOLUME_PATH}"/{pg,redis}data
envsubst <"${HOME}"/"${USER}"-compose_template.yaml >"${HOME}"/"${USER}"-compose.yaml
sudo docker compose -f "${HOME}"/"${USER}"-compose.yaml up -d
echo "[+] restoring database from backup..."
sudo docker compose -f "${HOME}"/"${USER}"-compose.yaml stop
sudo docker compose -f "${HOME}"/"${USER}"-compose.yaml start postgres
rclone copy "${BUCKET_PATH}" "${HOME}" -v
cat db.out | sudo docker exec -i spotify-manager-postgres psql -U postgres -X
echo "[+] restarting..."
sudo docker compose -f "${HOME}"/"${USER}"-compose.yaml start

View File

@@ -0,0 +1,36 @@
---
services:
stirling:
image: frooodle/s-pdf:latest
container_name: stirling-pdf
pull_policy: always
restart: unless-stopped
ports:
- 127.0.0.1:${PORT}:8080
volumes:
- type: bind
source: ${VOLUME_PATH}/trainingData
target: /usr/share/tessdata
bind:
create_host_path: true
- type: bind
source: ${VOLUME_PATH}/extraConfigs
target: /configs
bind:
create_host_path: true
- type: bind
source: ${VOLUME_PATH}/logs
target: /logs
bind:
create_host_path: true
environment:
PUID: ${PUID}
PGID: ${PGID}
DOCKER_ENABLE_SECURITY: true
SECURITY_ENABLE_LOGIN: true
SECURITY_INITIALLOGIN_USERNAME: ${INITIAL_USERNAME}
SECURITY_INITIALLOGIN_PASSWORD: ${INITIAL_PASSWORD}
SECURITY_CSRFDISABLED: false
SYSTEM_SHOWUPDATEONLYADMIN: true
INSTALL_BOOK_AND_ADVANCED_HTML_OPS: false
LANGS: en_US

1
stirling_server-cronjob Normal file
View File

@@ -0,0 +1 @@
10 11 * * 2 /home/stirling_server/stirling_server-update

11
stirling_server-setup Normal file
View File

@@ -0,0 +1,11 @@
#!/bin/bash
echo -e "\n[+] setting up stirling-pdf\n\n-------\n"
# shellcheck source=stirling_server-env
. "${HOME}"/"${USER}"-env
mkdir -p "${VOLUME_PATH}"
envsubst <"${HOME}"/"${USER}"-compose_template.yaml >"${HOME}"/"${USER}"-compose.yaml
sudo docker compose -f "${HOME}"/"${USER}"-compose.yaml up -d

14
stirling_server-teardown Normal file
View File

@@ -0,0 +1,14 @@
#!/bin/bash
username=stirling_server
# application
sudo docker compose -f /home/${username}/${username}-compose.yaml down -v
uid_num=$(id -u $username)
sudo killall -9 -v -g -u $username
sudo crontab -r -u $username
sudo deluser --remove-all-files $username
# clean-up
sudo find / -user "$uid_num" -delete

11
stirling_server-update Normal file
View File

@@ -0,0 +1,11 @@
#!/bin/bash
mkdir -p "${HOME}"/update_logs
logFile=${HOME}/update_logs/$(date +%y_%m).log
{
echo -e "\n[+] updating stirling-pdf\n"
sudo docker compose -f "${HOME}"/"${USER}"-compose.yaml pull &&
sudo docker compose -f "${HOME}"/"${USER}"-compose.yaml up -d --always-recreate-deps --remove-orphans &&
yes | sudo docker image prune -af
} &>>"$logFile"

View File

@@ -0,0 +1,16 @@
server {
server_name syncthing.knravish.me;
index index.html index.htm;
include /etc/nginx/snippets/authelia-location.conf;
set $upstream http://127.0.0.1:8384;
location / {
include /etc/nginx/snippets/proxy.conf;
include /etc/nginx/snippets/authelia-authrequest.conf;
proxy_pass $upstream;
}
listen 80;
}

39
syncthing_server-backup Normal file
View File

@@ -0,0 +1,39 @@
#!/bin/bash
# shellcheck source=syncthing_server-env
. "${HOME}"/"${USER}"-env
mkdir -p "${HOME}"/backup_logs
logFile=${HOME}/backup_logs/$(date +%y_%m).log
{
echo -e "\n[+] syncthing backup\n"
mkdir -p /tmp/"${USER}"-backup
syncthing cli operations shutdown
cp -pr "${CONFIG_PATH}"/* /tmp/"${USER}"-backup
systemctl --user restart syncthing.service
if ! rclone copy /tmp/"${USER}"-backup "${BUCKET_PATH}" --exclude ./*.db/** -v; then
curl -Ss \
-H "Title: Syncthing" \
-H "Priority: 3" \
-H "Tags: warning,backup" \
-d "Backup not completed" \
"${NOTIF_URL}"
rm -r /tmp/"${USER}"-backup
exit 1
fi
curl -Ss \
-H "Title: Syncthing" \
-H "Priority: 2" \
-H "Tags: heavy_check_mark,backup" \
-d "Backup completed" \
"${NOTIF_URL}"
rm -r /tmp/"${USER}"-backup
} &>>"$logFile"

1
syncthing_server-cronjob Normal file
View File

@@ -0,0 +1 @@
11 10 * * * /home/syncthing_server/syncthing_server-backup

Some files were not shown because too many files have changed in this diff Show More