commit d69daa37bf2a249ed8d1fa1a0a768f6200e689fe Author: Kaushik Narayan R Date: Fri Jan 16 00:11:58 2026 -0800 Initial commit (I feel unsafe) diff --git a/.editorconfig b/.editorconfig new file mode 100644 index 0000000..f158368 --- /dev/null +++ b/.editorconfig @@ -0,0 +1,25 @@ +# EditorConfig helps developers define and maintain consistent +# coding styles between different editors and IDEs +# https://editorconfig.org + +# top-most EditorConfig file +root = true + +[*] +end_of_line = lf +insert_final_newline = true + +charset = utf-8 + +indent_style = space +indent_size = 2 + +trim_trailing_whitespace = true +max_line_length = 80 + +[*.txt] +indent_style = tab +indent_size = 4 + +[*.{diff,md}] +trim_trailing_whitespace = false diff --git a/.gitattributes b/.gitattributes new file mode 100644 index 0000000..71afda6 --- /dev/null +++ b/.gitattributes @@ -0,0 +1,31 @@ +########################################################################## + +# The .gitattributes file tells git how to handle line endings, +# how to recognize and diff different file types, specifies merge +# strategies, content filters(?) to run on commit/checkout, etc. + +# See more: +# https://git-scm.com/book/en/v2/Customizing-Git-Git-Attributes + +########################################################################## + +# Set the default behavior, in case people don't have core.autocrlf set. +* text=auto eol=lf + +# Scripts +*.sh text eol=lf +*.ps1 text eol=crlf + +# Docs +*.md text diff=markdown +*.txt text + +# Config +.editorconfig text +*.env text +.gitattributes text +*.yml text +*.yaml text + +# Ignore files +*.*ignore text diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..ad4348e --- /dev/null +++ b/.gitignore @@ -0,0 +1,23 @@ +*env +rclone.conf + +# SSH key pair +*.key +*.pub + +authelia/secrets/* +!authelia/secrets/README.md + +wg/connections/*.conf +wg/connections/README.md + +wg/all_proxied/*.ps1 +!wg/all_proxied/*copy.ps1 + +ghost_server-credentials.exp +ghost_server-config.production.json + +# lel +file_transfers.ps1 +freshStart.ps1 +windows.md diff --git a/.shellcheckrc b/.shellcheckrc new file mode 100644 index 0000000..256d0e6 --- /dev/null +++ b/.shellcheckrc @@ -0,0 +1 @@ +external-sources=true \ No newline at end of file diff --git a/README.md b/README.md new file mode 100644 index 0000000..519ca9c --- /dev/null +++ b/README.md @@ -0,0 +1,176 @@ +# The Playground + +--- + +## Blueprint usage + +**_take a deep breath, clear your mind, and open Spotify_** + +--- + +### Pre-requisite #0: A fresh start + +#### Fill `*-env` for all (fml) + +```bash +######################## +# instance-env + +export BACKUP_BUCKET='b2:the-bucket-name-goes-here' # Backblaze +export NOTIF_URL='https://ntfy.sh/the-topic-name-goes-here' # ntfy +export BASE_DOMAIN='knravish.me' + +######################## +# Some common stuff + +export BUCKET_PATH="${BACKUP_BUCKET}/path/to/backup/in/object/storage" + +export VOLUME_PATH="${HOME}/${USER}-data" # or wherever your service's data is + +export PORT= # your service's webserver + +# PUID and PGID for running containers as non-root +PUID=$(id -u "$USER") +export PUID +PGID=$(id -g "$USER") +export PGID + +######################## +# Application specific stuff +# Just look it up bruh I can't be arsed + +######################## +# ok but WireGuard is a PITA +export UDP_PORT= +export GUI_PORT= + +# shellcheck disable=SC2016 +export PASSWORD_HASH= +export WG_HOST="vpn.${BASE_DOMAIN}" + +# hacky? +WG_DEVICE=$(ip route get 8.8.8.8 2>&1 | awk '{ printf "%s",$5;}') +export WG_DEVICE +``` + +### Then + +- run `file_transfers.ps1` (assuming Windows host for now) +- update DNS records as needed + +### Pre-requisite #1: Ports in VPC/VCN firewall rules + +- allow all ICMP traffic for pinging (already open?) +- list of active listeners + - `*` - equivalent to `0.0.0.0, [::]` + - `%lo` - loopback + - `enp0s6` - name of the Internet-facing gateway interface of the host + - `Forwarded` - if port is open in the host's firewall (for VPS? if open in virtual network security rules) + +| Address | Port | Protocol | Desc. | Forwarded? | +| --------------- | ----- | -------- | ------------------------------------------ | ---------- | +| | | ICMP | All ICMP traffic | O | +| \* | 22 | TCP | SSH | O | +| 127.0.0.53%lo | 53 | TCP, UDP | systemd-resolved (stub? vestigial?) | X | +| 10.0.0.3%enp0s6 | 68 | UDP | DHCP | X | +| \* | 80 | TCP | Nginx (HTTP) | O | +| \* | 443 | TCP | Nginx (HTTPS) | O | +| 127.0.0.1 | 2368 | TCP | Ghost blog | X | +| 127.0.0.1 | 3456 | TCP | Vikunja | X | +| 127.0.0.1 | 5006 | TCP | Actual Budget | X | +| 127.0.0.1 | 5100 | TCP | Password Pusher (pwpush) | X | +| 127.0.0.1 | 8080 | TCP | Shlink | X | +| 127.0.0.1 | 9001 | TCP | Spotify Manager (that's us!) | X | +| 127.0.0.1 | 8081 | TCP | Stirling-PDF | X | +| 127.0.0.1 | 9091 | TCP | Authelia | X | +| 127.0.0.1 | 8384 | TCP | Syncthing (web GUI) | X | +| \* | 21027 | UDP | Syncthing (discovery broadcasting) | O | +| \* | 22000 | TCP, UDP | Syncthing (sync protocol; UDP is for QUIC) | O | +| \* | 25565 | TCP | Minecraft server - Java edition, 1.20.4 | O | +| \* | 51820 | UDP | WireGuard (VPN tunnel) | O | +| 127.0.0.1 | 51821 | TCP | WireGuard (web GUI) | X | +| 127.0.0.1 | 5230 | TCP | Usememos | X | +| 127.0.0.1 | 3000 | TCP | Homepage | X | +| \* | 30000 | TCP | Foundry VTT | X | +| 127.0.0.1 | 3001 | TCP | Gitea | X | + +### Pre-requisite #2: Config the master script + +- start with `instance-setup` + - reevaluate sudo perms... have given too much stuff too much permissions :\) + - ensure ufw is disabled + - (Oracle VPS only) open iptables to all (`-I INPUT -j ACCEPT` or something) + - maybe use new pro token + - check email address too + +--- + +## Applications + +### Authelia + +### Actual + +- PWA on mobile! + +### Ghost + +- requires checks for ghost, cli, and node version updates + +### Minecraft + +- version-locked 1.20.4 +- backup of everything, including JAR file + +### Password Pusher + +### Shlink + +- managed on [shlink.io webapp](https://app.shlink.io) +- API key for GUI management, else run command in container + +### Spotify Manager + +- yippee! +- be conservative with dep. updates + +### Stirling-PDF + +- guest creds are `'guest':'temppass3'` + +### Wireguard/wg-easy + +- access VPS services on its `10.0.0.3/24` address + +## Tooling and config + +### bash + +- place new aliases in `/etc/skel` file as well +- do not place non-sensitive stuff in `/etc/environment` +- `cp -pr` for recursive copying and without changing mode or access time + +### nginx + +- current practice - place configs in `conf.d`, change extension to not end in `.conf` for disabled sites + - old practice - `sites-enabled` soft links to `sites-available` files as needed +- serving some temporary files to share from /var/www/tmpfiles +- the build with added modules is fked up, ignore + +### rclone + +- config is for Backblaze B2, 10GB total +- always log!!! and notify!!! + +### systemd + +- `WantedBy` should be + - `default.target` for user services + - `multi-user.target` for system services + +### cron + +- cron doesn't get the same env as a normal login/shell, so give it a minimal set of vars +- set `USER` at the start of every user crontab +- set `XDG_RUNTIME_DIR` and `DBUS_SESSION_BUS_ADDRESS` for users that run systemd user services +- stagger cronjobs to avoid resource contention diff --git a/actual_server-backup b/actual_server-backup new file mode 100644 index 0000000..d6b1d93 --- /dev/null +++ b/actual_server-backup @@ -0,0 +1,40 @@ +#!/bin/bash + +# shellcheck source=actual_server-env +. "${HOME}"/"${USER}"-env + +mkdir -p "${HOME}"/backup_logs +logFile=${HOME}/backup_logs/$(date +%y_%m).log + +{ + echo -e "\n[+] actual backup\n" + + mkdir -p /tmp/"${USER}"-backup/{user,server}-files + + sudo docker compose -f "${HOME}"/"${USER}"-compose.yaml stop + + cp -pr "${VOLUME_PATH}"/* /tmp/"${USER}"-backup + + sudo docker compose -f "${HOME}"/"${USER}"-compose.yaml start + + rclone copy /tmp/"${USER}"-backup "${BUCKET_PATH}" -v + if [ $? -ne 0 ]; then + curl -Ss \ + -H "Title: Actual Server" \ + -H "Priority: 3" \ + -H "Tags: warning,backup" \ + -d "Backup not completed" \ + "${NOTIF_URL}" + rm -r /tmp/"${USER}"-backup + exit 1 + fi + + curl -Ss \ + -H "Title: Actual Server" \ + -H "Priority: 2" \ + -H "Tags: heavy_check_mark,backup" \ + -d "Backup completed" \ + "${NOTIF_URL}" + rm -r /tmp/"${USER}"-backup + +} &>>"$logFile" diff --git a/actual_server-compose_template.yaml b/actual_server-compose_template.yaml new file mode 100644 index 0000000..c05d363 --- /dev/null +++ b/actual_server-compose_template.yaml @@ -0,0 +1,26 @@ +--- +services: + actual: + image: ghcr.io/actualbudget/actual + container_name: actual + pull_policy: always + restart: unless-stopped + ports: + - 127.0.0.1:${PORT}:5006 + deploy: + resources: + limits: + memory: 2048M + volumes: + - type: bind + source: ${VOLUME_PATH} + target: /data + bind: + create_host_path: true + user: ${PUID}:${PGID} + healthcheck: + test: ['CMD-SHELL', 'node src/scripts/health-check.js'] + interval: 60s + timeout: 10s + retries: 3 + start_period: 20s diff --git a/actual_server-cronjob b/actual_server-cronjob new file mode 100644 index 0000000..58bd1a6 --- /dev/null +++ b/actual_server-cronjob @@ -0,0 +1,2 @@ +0 10 * * * /home/actual_server/actual_server-backup +0 11 * * 2 /home/actual_server/actual_server-update diff --git a/actual_server-setup b/actual_server-setup new file mode 100644 index 0000000..e4e8cae --- /dev/null +++ b/actual_server-setup @@ -0,0 +1,25 @@ +#!/bin/bash + +# shellcheck source=actual_server-env +. "${HOME}"/"${USER}"-env + +echo -e "\n[+] setting up actual\n\n-------\n" + +mkdir -p "${VOLUME_PATH}" + +envsubst <"${HOME}"/"${USER}"-compose_template.yaml >"${HOME}"/"${USER}"-compose.yaml + +sudo docker compose -f "${HOME}"/"${USER}"-compose.yaml up -d + +echo "[/] waiting for migrations to run..." +sleep 10 # wait for migrations to run + +echo "[+] restoring backup data" + +sudo docker compose -f "${HOME}"/"${USER}"-compose.yaml stop + +rm -r "${VOLUME_PATH:?}"/* + +rclone copy "${BUCKET_PATH}" "${VOLUME_PATH}" -v + +sudo docker compose -f "${HOME}"/"${USER}"-compose.yaml start diff --git a/actual_server-teardown b/actual_server-teardown new file mode 100644 index 0000000..e8728b0 --- /dev/null +++ b/actual_server-teardown @@ -0,0 +1,14 @@ +#!/bin/bash + +username=actual_server + +# application +sudo docker compose -f /home/${username}/${username}-compose.yaml down -v + +uid_num=$(id -u $username) +sudo killall -9 -v -g -u $username +sudo crontab -r -u $username +sudo deluser --remove-all-files $username + +# clean-up +sudo find / -user "$uid_num" -delete diff --git a/actual_server-update b/actual_server-update new file mode 100644 index 0000000..7a669db --- /dev/null +++ b/actual_server-update @@ -0,0 +1,11 @@ +#!/bin/bash + +mkdir -p "${HOME}"/update_logs +logFile=${HOME}/update_logs/$(date +%y_%m).log +{ + echo -e "\n[+] updating actual\n" + + sudo docker compose -f "${HOME}"/"${USER}"-compose.yaml pull && + sudo docker compose -f "${HOME}"/"${USER}"-compose.yaml up -d --always-recreate-deps --remove-orphans && + yes | sudo docker image prune -af +} &>>"$logFile" diff --git a/api.spotify-manager.knravish.me.conf b/api.spotify-manager.knravish.me.conf new file mode 100644 index 0000000..bb2e162 --- /dev/null +++ b/api.spotify-manager.knravish.me.conf @@ -0,0 +1,17 @@ +server { + server_name api.spotify-manager.knravish.me; + + location / { + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_set_header X-Forwarded-Proto $scheme; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header Host $http_host; + proxy_pass http://127.0.0.1:9001; + proxy_redirect off; + proxy_set_header Access-Control-Allow-Origin *; + proxy_read_timeout 600s; + proxy_send_timeout 600s; + } + + listen 80; +} diff --git a/auth.knravish.me.conf b/auth.knravish.me.conf new file mode 100644 index 0000000..34d47b3 --- /dev/null +++ b/auth.knravish.me.conf @@ -0,0 +1,21 @@ +server { + server_name auth.knravish.me; + index index.html index.htm; + + set $upstream http://127.0.0.1:9091; + + location / { + include /etc/nginx/snippets/proxy.conf; + proxy_pass $upstream; + } + + location = /api/verify { + proxy_pass $upstream; + } + + location /api/authz/ { + proxy_pass $upstream; + } + + listen 80; +} diff --git a/authelia/nginx_snippets/authelia-authrequest.conf b/authelia/nginx_snippets/authelia-authrequest.conf new file mode 100644 index 0000000..8f76f76 --- /dev/null +++ b/authelia/nginx_snippets/authelia-authrequest.conf @@ -0,0 +1,32 @@ +## Send a subrequest to Authelia to verify if the user is authenticated and has permission to access the resource. +auth_request /internal/authelia/authz; + +## Save the upstream metadata response headers from Authelia to variables. +auth_request_set $user $upstream_http_remote_user; +auth_request_set $groups $upstream_http_remote_groups; +auth_request_set $name $upstream_http_remote_name; +auth_request_set $email $upstream_http_remote_email; + +## Inject the metadata response headers from the variables into the request made to the backend. +proxy_set_header Remote-User $user; +proxy_set_header Remote-Groups $groups; +proxy_set_header Remote-Email $email; +proxy_set_header Remote-Name $name; + +## Configure the redirection when the authz failure occurs. Lines starting with 'Modern Method' and 'Legacy Method' +## should be commented / uncommented as pairs. The modern method uses the session cookies configuration's authelia_url +## value to determine the redirection URL here. It's much simpler and compatible with the mutli-cookie domain easily. + +## Modern Method: Set the $redirection_url to the Location header of the response to the Authz endpoint. +auth_request_set $redirection_url $upstream_http_location; + +## Modern Method: When there is a 401 response code from the authz endpoint redirect to the $redirection_url. +error_page 401 =302 $redirection_url; + +## Legacy Method: Set $target_url to the original requested URL. +## This requires http_set_misc module, replace 'set_escape_uri' with 'set' if you don't have this module. +# set_escape_uri $target_url $scheme://$http_host$request_uri; + +## Legacy Method: When there is a 401 response code from the authz endpoint redirect to the portal with the 'rd' +## URL parameter set to $target_url. This requires users update 'auth.knravish.me/' with their external authelia URL. +# error_page 401 =302 https://auth.knravish.me/?rd=$target_url; \ No newline at end of file diff --git a/authelia/nginx_snippets/authelia-location.conf b/authelia/nginx_snippets/authelia-location.conf new file mode 100644 index 0000000..b38faf3 --- /dev/null +++ b/authelia/nginx_snippets/authelia-location.conf @@ -0,0 +1,32 @@ +set $upstream_authelia http://127.0.0.1:9091/api/authz/auth-request; + +## Virtual endpoint created by nginx to forward auth requests. +location /internal/authelia/authz { + ## Essential Proxy Configuration + internal; + proxy_pass $upstream_authelia; + + ## Headers + ## The headers starting with X-* are required. + proxy_set_header X-Original-Method $request_method; + proxy_set_header X-Original-URL $scheme://$http_host$request_uri; + proxy_set_header X-Forwarded-For $remote_addr; + proxy_set_header Content-Length ""; + proxy_set_header Connection ""; + + ## Basic Proxy Configuration + proxy_pass_request_body off; + proxy_next_upstream error timeout invalid_header http_500 http_502 http_503; # Timeout if the real server is dead + proxy_redirect http:// $scheme://; + proxy_http_version 1.1; + proxy_cache_bypass $cookie_session; + proxy_no_cache $cookie_session; + proxy_buffers 4 32k; + client_body_buffer_size 128k; + + ## Advanced Proxy Configuration + send_timeout 5m; + proxy_read_timeout 240; + proxy_send_timeout 240; + proxy_connect_timeout 240; +} \ No newline at end of file diff --git a/authelia/nginx_snippets/proxy.conf b/authelia/nginx_snippets/proxy.conf new file mode 100644 index 0000000..a2cd50d --- /dev/null +++ b/authelia/nginx_snippets/proxy.conf @@ -0,0 +1,37 @@ +## The only custom header to be added for our uses +proxy_set_header Access-Control-Allow-Origin *; + +## Headers +proxy_set_header Host $host; +proxy_set_header X-Original-URL $scheme://$http_host$request_uri; +proxy_set_header X-Forwarded-Proto $scheme; +proxy_set_header X-Forwarded-Host $http_host; +proxy_set_header X-Forwarded-URI $request_uri; +proxy_set_header X-Forwarded-Ssl on; +proxy_set_header X-Forwarded-For $remote_addr; +proxy_set_header X-Real-IP $remote_addr; + +## Basic Proxy Configuration +client_body_buffer_size 128k; +proxy_next_upstream error timeout invalid_header http_500 http_502 http_503; ## Timeout if the real server is dead. +proxy_redirect http:// $scheme://; +proxy_http_version 1.1; +proxy_cache_bypass $cookie_session; +proxy_no_cache $cookie_session; +proxy_buffers 64 256k; + +## Trusted Proxies Configuration +## Please read the following documentation before configuring this: +## https://www.authelia.com/integration/proxies/nginx/#trusted-proxies +# set_real_ip_from 10.0.0.0/8; +# set_real_ip_from 172.16.0.0/12; +# set_real_ip_from 192.168.0.0/16; +# set_real_ip_from fc00::/7; +real_ip_header X-Forwarded-For; +real_ip_recursive on; + +## Advanced Proxy Configuration +send_timeout 5m; +proxy_read_timeout 360; +proxy_send_timeout 360; +proxy_connect_timeout 360; \ No newline at end of file diff --git a/authelia/nginx_snippets/websocket.conf b/authelia/nginx_snippets/websocket.conf new file mode 100644 index 0000000..656426f --- /dev/null +++ b/authelia/nginx_snippets/websocket.conf @@ -0,0 +1,3 @@ +## WebSocket Example +proxy_set_header Upgrade $http_upgrade; +proxy_set_header Connection "upgrade"; \ No newline at end of file diff --git a/authelia/secrets/README.md b/authelia/secrets/README.md new file mode 100644 index 0000000..12358e1 --- /dev/null +++ b/authelia/secrets/README.md @@ -0,0 +1,6 @@ +# Secrets + +- enc_key +- jwt_sec +- ses_sec +- smtp_pass diff --git a/authelia_server-backup b/authelia_server-backup new file mode 100644 index 0000000..46b0dae --- /dev/null +++ b/authelia_server-backup @@ -0,0 +1,39 @@ +#!/bin/bash + +# shellcheck source=authelia_server-env +. "${HOME}"/"${USER}"-env + +mkdir -p "${HOME}"/backup_logs +logFile=${HOME}/backup_logs/$(date +%y_%m).log + +{ + echo -e "\n[+] authelia backup\n" + + mkdir -p /tmp/"${USER}"-backup + + sudo docker compose -f "${HOME}"/"${USER}"-compose.yaml stop + + cp -pr "${VOLUME_PATH}"/* /tmp/"${USER}"-backup + + sudo docker compose -f "${HOME}"/"${USER}"-compose.yaml start + + if ! rclone copy /tmp/"${USER}"-backup "${BUCKET_PATH}" -v; then + curl -Ss \ + -H "Title: Authelia" \ + -H "Priority: 3" \ + -H "Tags: warning,backup" \ + -d "Backup not completed" \ + "${NOTIF_URL}" + rm -r /tmp/"${USER}"-backup + exit 1 + fi + + curl -Ss \ + -H "Title: Authelia" \ + -H "Priority: 2" \ + -H "Tags: heavy_check_mark,backup" \ + -d "Backup completed" \ + "${NOTIF_URL}" + rm -r /tmp/"${USER}"-backup + +} &>>"$logFile" diff --git a/authelia_server-compose_template.yaml b/authelia_server-compose_template.yaml new file mode 100644 index 0000000..ad27773 --- /dev/null +++ b/authelia_server-compose_template.yaml @@ -0,0 +1,53 @@ +--- +secrets: + JWT_SECRET: + file: '${SECRETS_PATH}/jwt_sec' + SESSION_SECRET: + file: '${SECRETS_PATH}/ses_sec' + STORAGE_ENCRYPTION_KEY: + file: '${SECRETS_PATH}/enc_key' + SMTP_PASSWORD: + file: '${SECRETS_PATH}/smtp_pass' + +services: + redis: + container_name: 'authelia-redis' + image: redis:alpine + command: redis-server --save 60 1 --loglevel warning + pull_policy: always + restart: unless-stopped + networks: + authelia_server_network: + aliases: [] + volumes: + - ${REDIS_PATH}:/data + user: ${PUID}:${PGID} + authelia: + container_name: 'authelia' + image: authelia/authelia + pull_policy: always + restart: unless-stopped + networks: + authelia_server_network: + aliases: [] + ports: + - '127.0.0.1:9091:9091' + secrets: + - 'JWT_SECRET' + - 'SESSION_SECRET' + - 'STORAGE_ENCRYPTION_KEY' + - 'SMTP_PASSWORD' + environment: + AUTHELIA_IDENTITY_VALIDATION_RESET_PASSWORD_JWT_SECRET_FILE: '/run/secrets/JWT_SECRET' + AUTHELIA_SESSION_SECRET_FILE: '/run/secrets/SESSION_SECRET' + AUTHELIA_STORAGE_ENCRYPTION_KEY_FILE: '/run/secrets/STORAGE_ENCRYPTION_KEY' + AUTHELIA_NOTIFIER_SMTP_PASSWORD_FILE: '/run/secrets/SMTP_PASSWORD' + PUID: ${PUID} + PGID: ${PGID} + volumes: + - ${VOLUME_PATH}:/config + +networks: + authelia_server_network: + external: true + name: 'authelia_server_network' diff --git a/authelia_server-configuration.yaml b/authelia_server-configuration.yaml new file mode 100644 index 0000000..ae1ee1f --- /dev/null +++ b/authelia_server-configuration.yaml @@ -0,0 +1,164 @@ +authentication_backend: + file: + path: /config/users.yaml + watch: true + +access_control: + default_policy: deny + networks: + - name: 'internal' + networks: + - '10.0.0.0/8' + - '172.16.0.0/12' + - '192.168.0.0/18' + rules: + # go from most to least specific + ###### bypasses ###### + # CORS preflight + - domain: '*.knravish.me' + methods: 'OPTIONS' + policy: 'bypass' + ### status endpoints ### + # https://auth.knravish.me/api/health - status + - domain: 'auth.knravish.me' + resources: '^\/api\/health$' + policy: 'bypass' + # https://budget.knravish.me/info - info + - domain: 'budget.knravish.me' + resources: '^\/info$' + policy: 'bypass' + # https://blog.knravish.me/ghost/api/admin/site - info + - domain: 'blog.knravish.me' + resources: '^\/ghost\/api\/admin\/site$' + policy: 'bypass' + # # https://git.knravish.me/api/healthz - health + # - domain: 'git.knravish.me' + # resources: '^\/api\/healthz$' + # policy: 'bypass' + # https://notes.knravish.me/api/v1/workspace/profile - info + - domain: 'notes.knravish.me' + resources: '^\/api\/v1\/workspace\/profile$' + policy: 'bypass' + # https://pdf.knravish.me/api/v1/info/status - status + - domain: 'pdf.knravish.me' + resources: '^\/api\/v1\/info\/status$' + policy: 'bypass' + # https://planning.knravish.me/manifest.webmanifest - PWA + # for the homepage widget + # https://planning.knravish.me/api/v1/projects + # https://planning.knravish.me/api/v1/tasks/all?filter=done%3Dfalse&sort_by=due_date + - domain: 'planning.knravish.me' + resources: + - '^\/manifest.webmanifest$' + - '^\/api\/v1\/projects$' + - '^\/api\/v1\/tasks\/all\?filter=done%3Dfalse&sort_by=due_date$' + policy: 'bypass' + # https://recipes.knravish.me/api/app/about - status + - domain: 'recipes.knravish.me' + resources: + - '^\/api\/households\/statistics$' # homepage widget + - '^\/api\/app\/about$' + policy: 'bypass' + # https://syncthing.knravish.me/rest/noauth/health + - domain: 'syncthing.knravish.me' + resources: '^\/rest\/noauth\/health$' + policy: 'bypass' + # https://vpn.knravish.me/api/release - status + - domain: 'vpn.knravish.me' + resources: + - '^\/api\/wireguard\/client$' # homepage widget + - '^\/api\/release$' + policy: 'bypass' + # https://vtt.knravish.me/api/status + - domain: 'vtt.knravish.me' + resources: '^\/api\/status$' + policy: 'bypass' + ###### 1FA ###### + # sensitive data - only self + - domain: + - 'budget.knravish.me' + subject: + - 'user:self' + policy: 'one_factor' + # sensitive admin - only self + - domain: + - 'vpn.knravish.me' + - 'syncthing.knravish.me' + subject: + - 'user:self' + policy: 'one_factor' + # ghost blog admin + - domain: 'blog.knravish.me' + resources: '^\/ghost([\/?].*)?$' + subject: + - 'group:admin' + policy: 'one_factor' + # foundry VTT + - domain: 'vtt.knravish.me' + subject: + - 'group:admin' + - 'group:foundry' + policy: 'one_factor' + # mealie recipes + - domain: 'recipes.knravish.me' + subject: + - 'group:admin' + - 'group:mealie' + policy: 'one_factor' + ###### 2FA ###### + # master bypass - super_admin (currently only self) + - domain: '*.knravish.me' + subject: + - 'group:super_admin' + policy: 'two_factor' + +password_policy: + zxcvbn: + enabled: true + +# SECRET +# identity_validation: +# reset_password: +# jwt_secret: '' + +session: + # SECRET + # secret: '' + redis: + host: 'authelia-redis' + inactivity: '1w' + expiration: '2w' + remember_me: '3M' + cookies: + - domain: 'knravish.me' + authelia_url: 'https://auth.knravish.me' + +storage: + # SECRET + # encryption_key: '' + local: + path: '/config/db.sqlite3' + +notifier: + smtp: + address: 'smtp://smtp.purelymail.com:587' + timeout: '15s' + username: 'noreply@knravish.me' + # SECRET + # password: '' + sender: 'Authelia ' + identifier: 'knravish.me' + subject: '[Authelia] {title}' + +theme: 'auto' + +server: + endpoints: + authz: + auth-request: + implementation: 'AuthRequest' + authn_strategies: + - name: 'HeaderAuthorization' + schemes: + - 'Basic' + - name: 'CookieSession' diff --git a/authelia_server-cronjob b/authelia_server-cronjob new file mode 100644 index 0000000..9328bf5 --- /dev/null +++ b/authelia_server-cronjob @@ -0,0 +1,2 @@ +1 10 * * * /home/authelia_server/authelia_server-backup +1 11 * * 2 /home/authelia_server/authelia_server-update diff --git a/authelia_server-setup b/authelia_server-setup new file mode 100644 index 0000000..6ebaf01 --- /dev/null +++ b/authelia_server-setup @@ -0,0 +1,35 @@ +#!/bin/bash + +# shellcheck source=authelia_server-env +. "${HOME}"/"${USER}"-env + +echo -e "\n[+] setting up authelia\n\n-------\n" + +echo -e "\n[!] not really automated cuz of the nginx and secrets stuff\n" + +mkdir -p "${REDIS_PATH}" +mkdir -p "${VOLUME_PATH}" +mkdir -p "${SECRETS_PATH}" + +chown -R "${USER}":"${USER}" "${SECRETS_PATH}" +chmod -R 600 "${SECRETS_PATH}" + +echo -e "\n[Q] STOP! scp the secrets if you haven't\n" +sleep 5 +echo -e "\n[!] hope you've copied the secrets MANUALLY\n" + +cp "${HOME}"/"${USER}"-configuration.yaml "${VOLUME_PATH}"/configuration.yml + +envsubst <"${HOME}"/"${USER}"-compose_template.yaml >"${HOME}"/"${USER}"-compose.yaml + +sudo docker network create -d bridge "${USER}"_network +sudo docker compose -f "${HOME}"/"${USER}"-compose.yaml up -d + +echo "[+] restoring from backup..." + +sudo docker compose -f "${HOME}"/"${USER}"-compose.yaml stop + +rm -rf "${VOLUME_PATH}" +rclone copy "${BUCKET_PATH}" "${VOLUME_PATH}" -v + +sudo docker compose -f "${HOME}"/"${USER}"-compose.yaml start diff --git a/authelia_server-teardown b/authelia_server-teardown new file mode 100644 index 0000000..fea2473 --- /dev/null +++ b/authelia_server-teardown @@ -0,0 +1 @@ +um not so simple, need to edit the nginx configs \ No newline at end of file diff --git a/authelia_server-update b/authelia_server-update new file mode 100644 index 0000000..437a3fd --- /dev/null +++ b/authelia_server-update @@ -0,0 +1,11 @@ +#!/bin/bash + +mkdir -p "${HOME}"/update_logs +logFile=${HOME}/update_logs/$(date +%y_%m).log +{ + echo -e "\n[+] updating authelia\n" + + sudo docker compose -f "${HOME}"/"${USER}"-compose.yaml pull && + sudo docker compose -f "${HOME}"/"${USER}"-compose.yaml up -d --always-recreate-deps --remove-orphans && + yes | sudo docker image prune -af +} &>>"$logFile" diff --git a/budget.knravish.me.conf b/budget.knravish.me.conf new file mode 100644 index 0000000..62384b7 --- /dev/null +++ b/budget.knravish.me.conf @@ -0,0 +1,16 @@ +server { + server_name budget.knravish.me; + index index.html index.htm; + + include /etc/nginx/snippets/authelia-location.conf; + + set $upstream http://127.0.0.1:5006; + + location / { + include /etc/nginx/snippets/proxy.conf; + include /etc/nginx/snippets/authelia-authrequest.conf; + proxy_pass $upstream; + } + + listen 80; +} diff --git a/dash.knravish.me.conf b/dash.knravish.me.conf new file mode 100644 index 0000000..52e289a --- /dev/null +++ b/dash.knravish.me.conf @@ -0,0 +1,18 @@ +server { + server_name dash.knravish.me; + index index.html index.htm; + + location / { + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_set_header X-Forwarded-Proto $scheme; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header Host $http_host; + proxy_pass http://127.0.0.1:3000; + proxy_redirect off; + proxy_set_header Access-Control-Allow-Origin *; + proxy_read_timeout 600s; + proxy_send_timeout 600s; + } + + listen 80; +} diff --git a/file_transfers copy.ps1 b/file_transfers copy.ps1 new file mode 100644 index 0000000..47b4de1 --- /dev/null +++ b/file_transfers copy.ps1 @@ -0,0 +1,150 @@ +$dirname = $PSScriptRoot +$remote_home_folder = "your_default_user@1.2.3.4" +$key = "your.private.key" + +function TransferFile { + param ( + [Parameter(Mandatory)] + [string]$FileName, + [ValidateNotNullOrEmpty()] + [string]$DestPath = "" + ) + + scp -i "${dirname}\${key}" "${dirname}\${FileName}" "${remote_home_folder}:${DestPath}" +} + +# backups +TransferFile "actual_server-backup" +TransferFile "authelia_server-backup" +TransferFile "foundry_server-backup" +TransferFile "ghost_server-credentials.exp" +TransferFile "ghost_server-backup" +TransferFile "homepage_server-backup" +TransferFile "mealie_server-backup" +TransferFile "memos_server-backup" +TransferFile "minecraft_server-backup" +# TransferFile "stirling_server-backup" +TransferFile "syncthing_server-backup" +TransferFile "wg_server-backup" + +# updates +TransferFile "actual_server-update" +TransferFile "authelia_server-update" +TransferFile "foundry_server-update" +# TransferFile "ghost_server-update" +TransferFile "homepage_server-update" +TransferFile "mealie_server-update" +TransferFile "memos_server-update" +# TransferFile "minecraft_server-update" +TransferFile "stirling_server-update" +# TransferFile "syncthing_server-update" +TransferFile "wg_server-update" + +# cronjobs +TransferFile "ubuntu-cronjob" +TransferFile "actual_server-cronjob" +TransferFile "authelia_server-cronjob" +TransferFile "foundry_server-cronjob" +TransferFile "ghost_server-cronjob" +TransferFile "homepage_server-cronjob" +TransferFile "mealie_server-cronjob" +TransferFile "memos_server-cronjob" +TransferFile "minecraft_server-cronjob" +TransferFile "syncthing_server-cronjob" +TransferFile "wg_server-cronjob" + +# env vars +TransferFile "instance-env" +TransferFile "actual_server-env" +TransferFile "authelia_server-env" +TransferFile "foundry_server-env" +TransferFile "ghost_server-env" +TransferFile "homepage_server-env" +TransferFile "mealie_server-env" +TransferFile "memos_server-env" +TransferFile "minecraft_server-env" +TransferFile "pwpush_server-env" +# TransferFile "shlink_server-env" +TransferFile "stirling_server-env" +TransferFile "syncthing_server-env" +TransferFile "wg_server-env" + +# config files +## misc. +TransferFile "rclone.conf" ".config/rclone" +TransferFile "authelia_server-configuration.yaml" +TransferFile "ghost_server-config.production.json" +TransferFile "pwpush_server-settings.yaml" +### systemd +TransferFile "minecraft_server-start.service" +TransferFile "minecraft_server-start.socket" +## nginx configs +### authelia nginx snippets +TransferFile "authelia\nginx_snippets\authelia-authrequest.conf" +TransferFile "authelia\nginx_snippets\authelia-location.conf" +TransferFile "authelia\nginx_snippets\proxy.conf" +TransferFile "authelia\nginx_snippets\websocket.conf" +### sites +TransferFile "auth.knravish.me.conf" +TransferFile "budget.knravish.me.conf" +TransferFile "dash.knravish.me.conf" +TransferFile "lnk.knravish.me.conf" +TransferFile "notes.knravish.me.conf" +TransferFile "paste.knravish.me.conf" +TransferFile "pdf.knravish.me.conf" +TransferFile "recipes.knravish.me.conf" +TransferFile "syncthing.knravish.me.conf" +TransferFile "vpn.knravish.me.conf" +TransferFile "vtt.knravish.me.conf" + +# docker-compose files + +TransferFile "actual_server-compose_template.yaml" +TransferFile "authelia_server-compose_template.yaml" +TransferFile "homepage_server-compose_template.yaml" +TransferFile "mealie_server-compose_template.yaml" +TransferFile "memos_server-compose_template.yaml" +TransferFile "pwpush_server-compose_template.yaml" +TransferFile "shlink_server-compose.yaml" # TransferFile "shlink_server-compose_template.yaml" +TransferFile "stirling_server-compose_template.yaml" +TransferFile "wg_server-compose_template.yaml" + +# setup scripts +TransferFile "instance-setup" # run as ubuntu +TransferFile "actual_server-setup" +TransferFile "authelia_server-setup" +TransferFile "foundry_server-setup" +TransferFile "ghost_server-setup" +TransferFile "homepage_server-setup" +TransferFile "mealie_server-setup" +TransferFile "memos_server-setup" +TransferFile "minecraft_server-setup" +TransferFile "pwpush_server-setup" +TransferFile "shlink_server-setup" +TransferFile "stirling_server-setup" +TransferFile "syncthing_server-setup" +TransferFile "wg_server-setup" + +# teardown scripts - run as ubuntu +TransferFile "actual_server-teardown" +TransferFile "authelia_server-teardown" +TransferFile "foundry_server-teardown" +# TransferFile "ghost_server-teardown" +TransferFile "homepage_server-teardown" +TransferFile "mealie_server-teardown" +TransferFile "memos_server-teardown" +# TransferFile "minecraft_server-teardown" +TransferFile "pwpush_server-teardown" +TransferFile "shlink_server-teardown" +TransferFile "stirling_server-teardown" +# TransferFile "syncthing_server-teardown" +TransferFile "wg_server-teardown" + +# secrets +TransferFile "authelia\secrets\enc_key" "authelia_secrets" +TransferFile "authelia\secrets\jwt_sec" "authelia_secrets" +TransferFile "authelia\secrets\ses_sec" "authelia_secrets" +TransferFile "authelia\secrets\smtp_pass" "authelia_secrets" + +# miscellaneous +TransferFile "ubuntu_auto_apt_upgrade" diff --git a/foundry_server-backup b/foundry_server-backup new file mode 100644 index 0000000..6f342fa --- /dev/null +++ b/foundry_server-backup @@ -0,0 +1,40 @@ +#!/bin/bash + +# shellcheck source=foundry_server-env +. "${HOME}"/"${USER}"-env + +mkdir -p "${HOME}"/backup_logs +logFile=${HOME}/backup_logs/$(date +%y_%m).log + +{ + echo -e "\n[+] foundry backup\n" + + mkdir -p /tmp/"${USER}"-backup + + systemctl --user stop "${USER}"-start.service + + cp -pr "${FOUNDRY_DATA_PATH}"/* /tmp/"${USER}"-backup + + systemctl --user restart "${USER}"-start.service + + rclone copy /tmp/"${USER}"-backup "${BUCKET_PATH}" -v + if [ $? -ne 0 ]; then + curl -Ss \ + -H "Title: Foundry VTT" \ + -H "Priority: 3" \ + -H "Tags: warning,backup" \ + -d "Backup not completed" \ + "${NOTIF_URL}" + rm -r /tmp/"${USER}"-backup + exit 1 + fi + + curl -Ss \ + -H "Title: Foundry VTT" \ + -H "Priority: 2" \ + -H "Tags: heavy_check_mark,backup" \ + -d "Backup completed" \ + "${NOTIF_URL}" + rm -r /tmp/"${USER}"-backup + +} &>>"$logFile" diff --git a/foundry_server-cronjob b/foundry_server-cronjob new file mode 100644 index 0000000..32ed55f --- /dev/null +++ b/foundry_server-cronjob @@ -0,0 +1 @@ +2 10,22 * * * /home/foundry_server/foundry_server-backup diff --git a/foundry_server-setup b/foundry_server-setup new file mode 100644 index 0000000..9d7fb5c --- /dev/null +++ b/foundry_server-setup @@ -0,0 +1,37 @@ +#!/bin/bash + +# shellcheck source=foundry_server-env +. "${HOME}"/"${USER}"-env + +echo -e "\n[+] setting up foundry\n\n-------\n" + +echo "[+] nvm and node" + +curl -o- https://raw.githubusercontent.com/nvm-sh/nvm/v0.40.1/install.sh | bash +export NVM_DIR="$HOME/.nvm" +[ -s "$NVM_DIR/nvm.sh" ] && \. "$NVM_DIR/nvm.sh" # This loads nvm +[ -s "$NVM_DIR/bash_completion" ] && \. "$NVM_DIR/bash_completion" # This loads nvm bash_completion + +nvm install --lts +nvm alias default node + +echo "[+] foundry" + +mkdir -p "${HOME}"/foundry +mkdir -p "${FOUNDRY_DATA_PATH}" +cd foundry || exit +wget -O foundryvtt.zip "${FOUNDRY_TIMED_URL}" +unzip foundryvtt.zip +rm foundryvtt.zip + +echo "[+] restoring backup data" + +rclone copy "${BUCKET_PATH}" "${FOUNDRY_DATA_PATH}" -v + +echo "[+] setting up systemctl and starting" + +mkdir -p "${HOME}"/.config/systemd/user/ +cp "${HOME}"/"${USER}"-start.service "${HOME}"/.config/systemd/user/ + +systemctl --user daemon-reload +systemctl --user enable --now "${USER}"-start.service diff --git a/foundry_server-start.service b/foundry_server-start.service new file mode 100644 index 0000000..4398dda --- /dev/null +++ b/foundry_server-start.service @@ -0,0 +1,13 @@ +[Unit] +Description=Foundry VTT +After=network.target + +[Service] +Type=simple +Restart=on-failure +RestartSec=1 +WorkingDirectory=%h/foundry +ExecStart=/bin/bash -c ". ${HOME}/.nvm/nvm.sh ; node resources/app/main.js" + +[Install] +WantedBy=default.target diff --git a/foundry_server-teardown b/foundry_server-teardown new file mode 100644 index 0000000..9fb2020 --- /dev/null +++ b/foundry_server-teardown @@ -0,0 +1,15 @@ +#!/bin/bash + +username=foundry_server + +# application +sudo machinectl shell ${username}@ /bin/bash -c "systemctl --user disable --now ${username}-start.service ; systemctl --user daemon-reload" +sudo machinectl shell ${username}@ /bin/bash -c ". ~/.nvm/nvm.sh ; nvm deactivate ; nvm uninstall --lts" + +uid_num=$(id -u $username) +sudo killall -9 -v -g -u $username +sudo crontab -r -u $username +sudo deluser --remove-all-files $username + +# clean-up +sudo find / -user "$uid_num" -delete diff --git a/foundry_server-update b/foundry_server-update new file mode 100644 index 0000000..e69de29 diff --git a/freshStart copy.ps1 b/freshStart copy.ps1 new file mode 100644 index 0000000..c83d1ed --- /dev/null +++ b/freshStart copy.ps1 @@ -0,0 +1 @@ +ssh -i $PSScriptRoot/your.private.key your_default_user@1.2.3.4 diff --git a/ghost_server-backup b/ghost_server-backup new file mode 100644 index 0000000..f0c308c --- /dev/null +++ b/ghost_server-backup @@ -0,0 +1,45 @@ +#!/bin/bash + +# shellcheck source=ghost_server-env +. "${HOME}"/"${USER}"-env + +mkdir -p "${HOME}"/backup_logs +logFile=${HOME}/backup_logs/$(date +%y_%m).log +{ + echo -e "\n[+] ghost backup\n" + + cd "${BLOG_PATH}" || exit + + if ! /usr/bin/expect "${HOME}"/"${USER}"-credentials.exp; then + curl -Ss \ + -H "Title: Ghost Blog" \ + -H "Priority: 3" \ + -H "Tags: warning,backup" \ + -d "Backup not completed - ghost backup failure" \ + "${NOTIF_URL}" + rm -r "${BLOG_PATH}"/backup* + exit 1 + fi + + echo "[+] local backup taken" + + if ! rclone copyto "${BLOG_PATH}"/backup*.zip "${BUCKET_PATH}" -v; then + curl -Ss \ + -H "Title: Ghost Blog" \ + -H "Priority: 3" \ + -H "Tags: warning,backup" \ + -d "Backup not completed - rclone failure" \ + "${NOTIF_URL}" + rm -r "${BLOG_PATH}"/backup* + exit 1 + fi + + curl -Ss \ + -H "Title: Ghost Blog" \ + -H "Priority: 2" \ + -H "Tags: heavy_check_mark,backup" \ + -d "Backup completed" \ + "${NOTIF_URL}" + rm -r "${BLOG_PATH}"/backup* + +} &>>"$logFile" diff --git a/ghost_server-config.production copy.json b/ghost_server-config.production copy.json new file mode 100644 index 0000000..c87ae06 --- /dev/null +++ b/ghost_server-config.production copy.json @@ -0,0 +1,38 @@ +{ + "url": "https://blog.knravish.me", + "server": { + "port": 2368, + "host": "127.0.0.1" + }, + "database": { + "client": "mysql", + "connection": { + "host": "postgres_hostname", + "user": "postgres_username", + "password": "postgres_password", + "database": "defaultdb", + "port": , + "ssl": { + "ca": "", + "rejectUnauthorized": true + } + } + }, + "mail": { + "transport": "Direct" + }, + "logging": { + "transports": [ + "file", + "stdout" + ] + }, + "process": "systemd", + "paths": { + "contentPath": "/var/www/blog.knravish.me/content" + }, + "bootstrap-socket": { + "port": 8000, + "host": "localhost" + } +} diff --git a/ghost_server-credentials copy.exp b/ghost_server-credentials copy.exp new file mode 100644 index 0000000..4fcf71c --- /dev/null +++ b/ghost_server-credentials copy.exp @@ -0,0 +1,14 @@ +#!/usr/bin/expect + +set email "" +set pw "" + +spawn ghost backup + +expect "Ghost administrator email address" +send "$email\r" + +expect "Ghost administrator password" +send "$pw\r" + +expect eof diff --git a/ghost_server-cronjob b/ghost_server-cronjob new file mode 100644 index 0000000..5fb9c40 --- /dev/null +++ b/ghost_server-cronjob @@ -0,0 +1 @@ +3 10 * * * /home/ghost_server/ghost_server-backup diff --git a/ghost_server-setup b/ghost_server-setup new file mode 100644 index 0000000..dd2767c --- /dev/null +++ b/ghost_server-setup @@ -0,0 +1,52 @@ +#!/bin/bash + +# shellcheck source=ghost_server-env +. "${HOME}"/"${USER}"-env + +email_address=hello@knravish.me +echo -e "\n[+] setting up ghost\n\n-------\n" + +echo "[+] node and companions" +# ghost doesn't play well with nvm for some reason, probably because of installation location and sudo access +# Download and import the Nodesource GPG key +sudo mkdir -p /etc/apt/keyrings +curl -fsSL https://deb.nodesource.com/gpgkey/nodesource-repo.gpg.key | sudo gpg --dearmor -o /etc/apt/keyrings/nodesource.gpg +NODE_MAJOR=20 # Use a supported version +echo "deb [signed-by=/etc/apt/keyrings/nodesource.gpg] https://deb.nodesource.com/node_$NODE_MAJOR.x nodistro main" | sudo tee /etc/apt/sources.list.d/nodesource.list +sudo apt-get update +sudo apt-get install nodejs -y + +echo "[+] getting ready..." + +ghost_cli_ver="1.26.0" +sudo npm i -g ghost-cli@${ghost_cli_ver} + +sudo mkdir -p "${BLOG_PATH}" +sudo chown "${USER}":"${USER}" "${BLOG_PATH}" +sudo chmod 775 "${BLOG_PATH}" + +# ghost really needs to update to newer nginx versions and conventions... +sudo mkdir -p /etc/nginx/sites-available/ /etc/nginx/sites-enabled/ /etc/nginx/snippets/ + +echo "[+] ooh, interactive stuff" + +# currently track manually, maybe automate +ghost_ver="5.105.0" + +cd "${BLOG_PATH}" && ghost install ${ghost_ver} --no-setup + +sudo cp "${HOME}"/"${USER}"-config.production.json "${BLOG_PATH}"/ +sudo chown "${USER}":"${USER}" "${BLOG_PATH}"/"${USER}"-config.production.json +mv "${BLOG_PATH}"/"${USER}"-config.production.json "${BLOG_PATH}"/config.production.json + +cd "${BLOG_PATH}" && ghost setup --auto --sslemail ${email_address} + +echo "[+] restoring backup data" + +sudo rm -r "${BLOG_PATH}"/content/* + +rclone copyto "${BUCKET_PATH}" "${BLOG_PATH}"/ghostBackup.zip +sudo unzip "${BLOG_PATH}"/ghostBackup.zip -d "${BLOG_PATH}"/content/ +sudo chown -R ghost:ghost "${BLOG_PATH}"/content/ + +echo -e "\n-----\nIMPORTANT\n-----\n[X] modify the nginx default config file to include the sites-enabled directory\n" diff --git a/git.knravish.me.conf b/git.knravish.me.conf new file mode 100644 index 0000000..2f7c350 --- /dev/null +++ b/git.knravish.me.conf @@ -0,0 +1,18 @@ +server { + server_name git.knravish.me; + index index.html index.htm; + + location / { + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_set_header X-Forwarded-Proto $scheme; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header Host $http_host; + proxy_pass http://127.0.0.1:3001; + proxy_redirect off; + proxy_set_header Access-Control-Allow-Origin *; + proxy_read_timeout 600s; + proxy_send_timeout 600s; + } + + listen 80; +} diff --git a/gitea_server-backup b/gitea_server-backup new file mode 100644 index 0000000..121ae45 --- /dev/null +++ b/gitea_server-backup @@ -0,0 +1,44 @@ +#!/bin/bash + +# shellcheck source=gitea_server-env +. "${HOME}"/"${USER}"-env + +mkdir -p "${HOME}"/backup_logs +logFile=${HOME}/backup_logs/$(date +%y_%m).log + +{ + echo -e "\n[+] gitea backup\n" + + mkdir -p /tmp/"${USER}"-backup + + sudo docker compose -f "${HOME}"/"${USER}"-compose.yaml stop gitea + + cp -pr "${VOLUME_PATH}"/config /tmp/"${USER}"-backup + cp -pr "${VOLUME_PATH}"/data /tmp/"${USER}"-backup + + # shellcheck disable=SC2024 + sudo docker exec -u "${PUID}:${PGID}" -it gitea-postgres sh -c \ + 'pg_dumpall -c --if-exists -U gitea' >/tmp/"${USER}"-backup/db.out + + sudo docker compose -f "${HOME}"/"${USER}"-compose.yaml start gitea + + if ! rclone copy /tmp/"${USER}"-backup "${BUCKET_PATH}" -v; then + curl -Ss \ + -H "Title: Gitea" \ + -H "Priority: 3" \ + -H "Tags: warning,backup" \ + -d "Backup not completed" \ + "${NOTIF_URL}" + rm -rf /tmp/"${USER}"-backup + exit 1 + fi + + curl -Ss \ + -H "Title: Gitea" \ + -H "Priority: 2" \ + -H "Tags: heavy_check_mark,backup" \ + -d "Backup completed" \ + "${NOTIF_URL}" + rm -rf /tmp/"${USER}"-backup + +} &>>"$logFile" diff --git a/gitea_server-compose_template.yaml b/gitea_server-compose_template.yaml new file mode 100644 index 0000000..185ebaa --- /dev/null +++ b/gitea_server-compose_template.yaml @@ -0,0 +1,40 @@ +--- +services: + gitea: + image: docker.gitea.com/gitea:1-rootless + container_name: gitea + pull_policy: always + restart: unless-stopped + volumes: + - ${VOLUME_PATH}/data:/var/lib/gitea + - ${VOLUME_PATH}/config:/etc/gitea + - /etc/timezone:/etc/timezone:ro + - /etc/localtime:/etc/localtime:ro + ports: + - 127.0.0.1:${PORT}:3000 + # - 2222:2222 # for internal SSH. unnecessary? + environment: + - GITEA__database__DB_TYPE=postgres + - GITEA__database__HOST=db:5432 + - GITEA__database__NAME=gitea + - GITEA__database__USER=gitea + - GITEA__database__PASSWD=gitea + - USER=git + - USER_UID=${PUID} + - USER_GID=${PGID} + depends_on: + - db + user: ${PUID}:${PGID} + + db: + image: postgres:16 + container_name: gitea-postgres + pull_policy: always + restart: unless-stopped + environment: + - POSTGRES_USER=gitea + - POSTGRES_PASSWORD=gitea + - POSTGRES_DB=gitea + volumes: + - ${VOLUME_PATH}/postgres:/var/lib/postgresql/data + user: ${PUID}:${PGID} diff --git a/gitea_server-cronjob b/gitea_server-cronjob new file mode 100644 index 0000000..911e99b --- /dev/null +++ b/gitea_server-cronjob @@ -0,0 +1 @@ +4 10 * * * /home/gitea_server/gitea_server-backup diff --git a/gitea_server-setup b/gitea_server-setup new file mode 100644 index 0000000..e8270b7 --- /dev/null +++ b/gitea_server-setup @@ -0,0 +1,33 @@ +#!/bin/bash + +echo -e "\n[+] setting up gitea\n\n-------\n" + +# shellcheck source=gitea_server-env +. "${HOME}"/"${USER}"-env + +mkdir -p "${VOLUME_PATH}"/{data,config,postgres} +envsubst <"${HOME}"/"${USER}"-compose_template.yaml >"${HOME}"/"${USER}"-compose.yaml + +sudo docker compose -f "${HOME}"/"${USER}"-compose.yaml up -d + +echo "[+] preparing to restore from backup..." + +sleep 5 +sudo docker compose -f "${HOME}"/"${USER}"-compose.yaml stop gitea + +mkdir -p "${VOLUME_PATH}"/restore_files +rclone copy "${BUCKET_PATH}" "${VOLUME_PATH}"/restore_files + +echo "[+] restoring from backup..." + +cp -fr "${VOLUME_PATH}"/restore_files/config "${VOLUME_PATH}" +cp -fr "${VOLUME_PATH}"/restore_files/data "${VOLUME_PATH}" +chown -R "${PUID}":"${PGID}" "${VOLUME_PATH}"/config "${VOLUME_PATH}"/data + +cat "${VOLUME_PATH}"/restore_files/db.out | sudo docker exec -i gitea-postgres psql -qXU gitea + +echo "[+] restarting..." +sudo docker compose -f "${HOME}"/"${USER}"-compose.yaml restart + +# cleanup +rm -rf "${VOLUME_PATH}"/restore_files diff --git a/gitea_server-teardown b/gitea_server-teardown new file mode 100644 index 0000000..e69de29 diff --git a/gitea_server-update b/gitea_server-update new file mode 100644 index 0000000..e69de29 diff --git a/homepage_server-backup b/homepage_server-backup new file mode 100644 index 0000000..bfc5fdf --- /dev/null +++ b/homepage_server-backup @@ -0,0 +1,39 @@ +#!/bin/bash + +# shellcheck source=homepage_server-env +. "${HOME}"/"${USER}"-env + +mkdir -p "${HOME}"/backup_logs +logFile=${HOME}/backup_logs/$(date +%y_%m).log + +{ + echo -e "\n[+] homepage backup\n" + + mkdir -p /tmp/"${USER}"-backup + + sudo docker compose -f "${HOME}"/"${USER}"-compose.yaml stop + + cp -pr "${VOLUME_PATH}"/* /tmp/"${USER}"-backup + + sudo docker compose -f "${HOME}"/"${USER}"-compose.yaml start + + if ! rclone copy /tmp/"${USER}"-backup "${BUCKET_PATH}" -v; then + curl -Ss \ + -H "Title: Homepage" \ + -H "Priority: 3" \ + -H "Tags: warning,backup" \ + -d "Backup not completed" \ + "${NOTIF_URL}" + rm -r /tmp/"${USER}"-backup + exit 1 + fi + + curl -Ss \ + -H "Title: Homepage" \ + -H "Priority: 2" \ + -H "Tags: heavy_check_mark,backup" \ + -d "Backup completed" \ + "${NOTIF_URL}" + rm -r /tmp/"${USER}"-backup + +} &>>"$logFile" diff --git a/homepage_server-compose_template.yaml b/homepage_server-compose_template.yaml new file mode 100644 index 0000000..bf231b9 --- /dev/null +++ b/homepage_server-compose_template.yaml @@ -0,0 +1,32 @@ +--- +services: + dockerproxy: + image: ghcr.io/tecnativa/docker-socket-proxy:latest + container_name: dockerproxy + environment: + - CONTAINERS=1 # Allow access to viewing containers + - SERVICES=0 # Allow access to viewing services (necessary when using Docker Swarm) + - TASKS=0 # Allow access to viewing tasks (necessary when using Docker Swarm) + - POST=0 # Disallow any POST operations (effectively read-only) + ports: + - 127.0.0.1:${DOCKER_PORT}:${DOCKER_PORT} + volumes: + - /var/run/docker.sock:/var/run/docker.sock:ro # Mounted as read-only + restart: unless-stopped + + homepage: + image: ghcr.io/gethomepage/homepage:latest + container_name: homepage + pull_policy: always + restart: unless-stopped + ports: + - 127.0.0.1:${PORT}:${PORT} + init: true + volumes: + - ${VOLUME_PATH}/config:/app/config # Make sure your local config directory exists + - ${VOLUME_PATH}/icons:/app/public/icons # icons + - ${VOLUME_PATH}/images:/app/public/images # images + environment: + PUID: ${PUID} + PGID: ${PGID} + HOMEPAGE_ALLOWED_HOSTS: dash.knravish.me diff --git a/homepage_server-cronjob b/homepage_server-cronjob new file mode 100644 index 0000000..0e105ff --- /dev/null +++ b/homepage_server-cronjob @@ -0,0 +1,2 @@ +5 10 * * * /home/homepage_server/homepage_server-backup +5 11 * * 2 /home/homepage_server/homepage_server-update diff --git a/homepage_server-geticon b/homepage_server-geticon new file mode 100644 index 0000000..8ced154 --- /dev/null +++ b/homepage_server-geticon @@ -0,0 +1,22 @@ +#!/bin/bash + +# shellcheck source=homepage_server-env +. "${HOME}"/"${USER}"-env + +base_url=https://cdn.jsdelivr.net/gh/walkxcode/dashboard-icons + +svg_url=${base_url}/svg/${1}.svg +png_url=${base_url}/png/${1}.png + +if ! curl -I "${svg_url}" | grep -E "HTTP/.* 404" >/dev/null; then + curl -Ss -O --output-dir "${VOLUME_PATH}"/icons "${svg_url}" + echo "svg" + exit 0 +elif ! curl -I "${png_url}" | grep -E "HTTP/.* 404" >/dev/null; then + curl -Ss -O --output-dir "${VOLUME_PATH}"/icons "${png_url}" + echo "png" + exit 0 +else + echo "Not Found" + exit 1 +fi diff --git a/homepage_server-getimage b/homepage_server-getimage new file mode 100644 index 0000000..bff1d26 --- /dev/null +++ b/homepage_server-getimage @@ -0,0 +1,18 @@ +#!/bin/bash + +# shellcheck source=homepage_server-env +. "${HOME}"/"${USER}"-env + +headers=$(curl -SsIXGET "$1") + +status_code=$(echo "$headers" | grep -E "HTTP/.* [0-9]{3}" | awk '{print $2}') + +if [[ $status_code == "200" ]]; then + ext=$(echo "$headers" | grep "content-type:" | awk -F/ '{print $2}' | tr -d " \t\n\r") + curl -Ss -o "${VOLUME_PATH}"/images/"${2}"."${ext}" "${1}" + echo "found" + exit 0 +else + echo "Not Found" + exit 1 +fi diff --git a/homepage_server-setup b/homepage_server-setup new file mode 100644 index 0000000..7f2b3bb --- /dev/null +++ b/homepage_server-setup @@ -0,0 +1,20 @@ +#!/bin/bash + +echo -e "\n[+] setting up homepage\n\n-------\n" + +# shellcheck source=homepage_server-env +. "${HOME}"/"${USER}"-env + +mkdir -p "${VOLUME_PATH}" +envsubst <"${HOME}"/"${USER}"-compose_template.yaml >"${HOME}"/"${USER}"-compose.yaml + +sudo docker compose -f "${HOME}"/"${USER}"-compose.yaml up -d + +echo "[+] restoring from backup..." + +sudo docker compose -f "${HOME}"/"${USER}"-compose.yaml stop + +rm -rf "${VOLUME_PATH}" +rclone copy "${BUCKET_PATH}" "${VOLUME_PATH}" -v + +sudo docker compose -f "${HOME}"/"${USER}"-compose.yaml start diff --git a/homepage_server-teardown b/homepage_server-teardown new file mode 100644 index 0000000..4b0eed0 --- /dev/null +++ b/homepage_server-teardown @@ -0,0 +1,14 @@ +#!/bin/bash + +username=homepage_server + +# application +sudo docker compose -f /home/${username}/${username}-compose.yaml down -v + +uid_num=$(id -u $username) +sudo killall -9 -v -g -u $username +sudo crontab -r -u $username +sudo deluser --remove-all-files $username + +# clean-up +sudo find / -user "$uid_num" -delete diff --git a/homepage_server-update b/homepage_server-update new file mode 100644 index 0000000..2fea949 --- /dev/null +++ b/homepage_server-update @@ -0,0 +1,11 @@ +#!/bin/bash + +mkdir -p "${HOME}"/update_logs +logFile=${HOME}/update_logs/$(date +%y_%m).log +{ + echo -e "\n[+] updating homepage\n" + + sudo docker compose -f "${HOME}"/"${USER}"-compose.yaml pull && + sudo docker compose -f "${HOME}"/"${USER}"-compose.yaml up -d --always-recreate-deps --remove-orphans && + yes | sudo docker image prune -af +} &>>"$logFile" diff --git a/instance-bash_aliases b/instance-bash_aliases new file mode 100644 index 0000000..6181da9 --- /dev/null +++ b/instance-bash_aliases @@ -0,0 +1,6 @@ +#!/bin/bash + +alias less="less -r" +alias dsizes="sudo du --max-depth=1 -h" +alias workas="sudo machinectl shell" +alias psdeets="ps -o pid,vsz=MEMORY -o user,group=GROUP -o comm,args=ARGS -p" diff --git a/instance-bash_autocompletions b/instance-bash_autocompletions new file mode 100644 index 0000000..4b1f41e --- /dev/null +++ b/instance-bash_autocompletions @@ -0,0 +1,3 @@ +#!/bin/bash + +complete -W "$(compgen -u)" workas diff --git a/instance-setup b/instance-setup new file mode 100644 index 0000000..0ab7551 --- /dev/null +++ b/instance-setup @@ -0,0 +1,274 @@ +#!/bin/bash + +echo -e "\n[+] Let's begin!\n\n-------\n" + +# define these first +[[ -z "$BASE_DOMAIN" ]] && echo "base domain missing" && exit 1 +[[ -z "$CF_EMAIL_ALIAS" ]] && echo "domain email missing" && exit 1 +[[ -z "$UBUNTU_PRO_TOKEN" ]] && echo "ubuntu pro token missing" && exit 1 +[[ -z "$B2_COLON_BUCKET_NAME" ]] && echo "b2 bucket name missing" && exit 1 +[[ -z "$NTFY_URL" ]] && echo "ntfy endpoint missing" && exit 1 + +domain=$BASE_DOMAIN +email_address=${CF_EMAIL_ALIAS} + +echo "BASE_DOMAIN=${BASE_DOMAIN}" | sudo tee -a /etc/environment +echo "BACKUP_BUCKET=${B2_COLON_BUCKET_NAME}" | sudo tee -a /etc/environment # current: the startingOut one +echo "NOTIF_URL=${NTFY_URL}" | sudo tee -a /etc/environment # current: endpoint on ntfy.sh + +# some useful aliases +cat instance-bash_aliases | tee -a ~/.bash_aliases +cat instance-bash_aliases | sudo tee -a /etc/skel/.bash_aliases + +# some useful autocompletions +chmod 774 instance-bash_autocompletions +./instance-bash_autocompletions + +cd ~ || exit +sudo apt-get update +sudo apt-get upgrade -y +sudo pro attach "$UBUNTU_PRO_TOKEN" + +if [[ $(cloud-init query platform) == 'oracle' ]]; then + # https://www.reddit.com/r/oraclecloud/comments/r8lkf7/a_quick_tips_to_people_who_are_having_issue/ + echo "[+] disabling ufw and netfilter rules (OCI default)" + sudo ufw disable + sudo iptables -I INPUT -j ACCEPT + sudo iptables-save | sudo dd of=/etc/iptables/rules.v4 +fi + +echo "[+] packages" +# JDK 17 or higher needed for MC +sudo apt-get install build-essential curl gnupg2 ca-certificates lsb-release ubuntu-keyring apt-transport-https expect -y +sudo apt-get install openjdk-21-jdk-headless systemd-container fail2ban -y +sudo systemctl enable --now fail2ban.service + +echo "[+] docker" +sudo install -m 0775 -d /etc/apt/keyrings +sudo curl -fsSL https://download.docker.com/linux/ubuntu/gpg -o /etc/apt/keyrings/docker.asc +sudo chmod a+r /etc/apt/keyrings/docker.asc +echo \ + "deb [arch=$(dpkg --print-architecture) signed-by=/etc/apt/keyrings/docker.asc] \ + https://download.docker.com/linux/ubuntu $(lsb_release -cs 2>/dev/null) stable" | + sudo tee /etc/apt/sources.list.d/docker.list >/dev/null + +echo "[+] nginx" +# http://nginx.org/en/linux_packages.html#Ubuntu +curl -L https://nginx.org/keys/nginx_signing.key | gpg --dearmor | + sudo tee /usr/share/keyrings/nginx-archive-keyring.gpg >/dev/null +expected_nginx_fingerprint='573BFD6B3D8FBC641079A6ABABF5BD827BD9BF62' +if ! gpg --dry-run --quiet --no-keyring --import --import-options \ + import-show /usr/share/keyrings/nginx-archive-keyring.gpg | + grep -c $expected_nginx_fingerprint; then + echo -e "\n[!] Nginx GPG key fingerprint does not match, aborting...\n" + sudo rm /usr/share/keyrings/nginx-archive-keyring.gpg + exit 1 +fi +echo "deb [signed-by=/usr/share/keyrings/nginx-archive-keyring.gpg] \ + http://nginx.org/packages/ubuntu $(lsb_release -cs 2>/dev/null) nginx" | + sudo tee /etc/apt/sources.list.d/nginx.list +echo -e "Package: *\nPin: origin nginx.org\nPin: release o=nginx\nPin-Priority: 900\n" | + sudo tee /etc/apt/preferences.d/99nginx + +echo "[+] syncthing" +sudo curl -L -o /etc/apt/keyrings/syncthing-archive-keyring.gpg https://syncthing.net/release-key.gpg +echo "deb [signed-by=/etc/apt/keyrings/syncthing-archive-keyring.gpg]\ + https://apt.syncthing.net/ syncthing stable-v2" | + sudo tee /etc/apt/sources.list.d/syncthing.list +echo -e "Package: *\nPin: origin apt.syncthing.net\nPin-Priority: 990\n" | + sudo tee /etc/apt/preferences.d/syncthing.pref + +echo "[+] putting it all together" +sudo apt-get update +sudo apt-get install docker-ce docker-ce-cli containerd.io docker-buildx-plugin docker-compose-plugin nginx syncthing -y +if ! sudo docker run hello-world | grep -c 'installation appears to be working correctly'; then + echo -e "\n[!] Docker installation failed, aborting...\n" + exit 1 +fi + +echo "[+] rclone" + +curl https://rclone.org/install.sh | sudo bash + +echo "[+] certbot from snap ugh" + +sudo snap install core +sudo snap refresh core +sudo apt-get remove certbot +sudo snap install --classic certbot +sudo ln -s /snap/bin/certbot /usr/bin/certbot + +echo "[+] add users for applications" +# format - tool name underscore 'server' +users=( + "actual_server" + "authelia_server" + "foundry_server" + "ghost_server" + "gitea_server" + "homepage_server" + "mealie_server" + "memos_server" + "minecraft_server" + "pwpush_server" + "shlink_server" + "spotmgr_server" + "stirling_server" + "syncthing_server" + "vikunja_server" + "wg_server" +) +for username in "${users[@]}"; do + sudo useradd -m -U -s /bin/bash "${username}" + + # setup script + sudo cp ~/"${username}"-setup /home/"${username}"/ + sudo chmod 774 /home/"${username}"/"${username}"-setup + sudo chown "${username}":"${username}" /home/"${username}"/"${username}"-setup + sudo cp ~/"${username}"-env /home/"${username}"/ + sudo chmod 600 /home/"${username}"/"${username}"-env + sudo chown "${username}":"${username}" /home/"${username}"/"${username}"-env + + # user services won't linger by default + sudo loginctl enable-linger "${username}" +done + +# admin privileges, needed for anyone running docker +admin_users=( + "actual_server" + "authelia_server" + "ghost_server" + "gitea_server" + "homepage_server" + "mealie_server" + "memos_server" + "pwpush_server" + "shlink_server" + "spotmgr_server" + "stirling_server" + "vikunja_server" + "wg_server" +) +for username in "${admin_users[@]}"; do + sudo usermod -aG sudo "${username}" + echo "${username} ALL=(ALL:ALL) NOPASSWD: ALL" | sudo tee /etc/sudoers.d/"${username}" + + # compose files + sudo cp ~/"${username}"-compose_template.yaml /home/"${username}"/ + sudo chmod 664 /home/"${username}"/"${username}"-compose_template.yaml + sudo chown "${username}":"${username}" /home/"${username}"/"${username}"-compose_template.yaml + sudo cp ~/"${username}"-compose.yaml /home/"${username}"/ + sudo chmod 600 /home/"${username}"/"${username}"-compose.yaml + sudo chown "${username}":"${username}" /home/"${username}"/"${username}"-compose.yaml +done + +echo "[+] distribute and apply respective config files" + +echo -e "\t[-] rclone" + +for username in "${users[@]}"; do + sudo mkdir -p /home/"${username}"/.config/rclone/ + sudo cp ~/.config/rclone/rclone.conf /home/"${username}"/.config/rclone/ + sudo chmod -R 600 /home/"${username}"/.config/rclone/rclone.conf + sudo chown -R "${username}":"${username}" /home/"${username}"/ +done + +# consider switching to acme.sh instead of certbot to avoid snap +echo -e "\t[-] nginx and certbot" + +cert_subdomains=( + "api.spotify-manager" + "auth" + "budget" + "dash" + "git" + "lnk" + "notes" + "paste" + "planning" + "pdf" + "recipes" + "syncthing" + "vpn" + "vtt" +) +# ghost handles SSL by itself, might be worth looking into it to either shift to certbot +for subdomain in "${cert_subdomains[@]}"; do + # revoke existing certs if any + sudo certbot revoke -n --delete-after-revoke --cert-name "${subdomain}"."${domain}" + sudo cp ~/"${subdomain}"."${domain}".conf /etc/nginx/conf.d/ + sudo chmod 664 /etc/nginx/conf.d/"${subdomain}"."${domain}".conf + sudo chown root:root /etc/nginx/conf.d/"${subdomain}"."${domain}".conf + if ! sudo nginx -t; then + echo -e "\n\t[!] Bad Nginx config for ${subdomain}.${domain}, aborting...\n" + exit 1 + fi + sudo nginx -s reload + + # ---------------------------------------------------------------------- + # STOP! + # Check DNS records before proceeding + # ---------------------------------------------------------------------- + + # https://letsencrypt.org/docs/duplicate-certificate-limit/#description + # certbot has 5 per week duplicate cert limit. use --test-cert flag for testing + if ! sudo certbot -n --nginx --agree-tos -m "${email_address}" -d "${subdomain}"."${domain}"; then + echo -e "\n\t[!] Certbot failed to get cert for ${subdomain}.${domain}, aborting...\n" + exit 1 + fi + sudo nginx -s reload +done + +echo -e "\t[-] user-specific files" + +# bash variable expansion ftw - https://stackoverflow.com/a/63821858/7630441 +user_files=( + "authelia_server-configuration.yaml" + "foundry_server-start.service" + "ghost_server-config.production.json" + "ghost_server-credentials.exp" + "minecraft_server-start.service" + "minecraft_server-start.socket" + "pwpush_server-settings.yaml" +) + +for f in "${user_files[@]}"; do + username=${f%%-*} # strips the part from before the hyphen + sudo cp ~/"${f}" /home/"${username}"/ + sudo chmod 664 /home/"${username}"/"${f}" + sudo chown "${username}":"${username}" /home/"${username}"/"${f}" +done + +echo -e "[+] cronjobs: backups, updates" + +for username in "${users[@]}"; do + sudo cp ~/"${username}"-backup /home/"${username}"/ + sudo chmod 774 /home/"${username}"/"${username}"-backup + sudo chown "${username}":"${username}" /home/"${username}"/"${username}"-backup + sudo cp ~/"${username}"-update /home/"${username}"/ + sudo chmod 774 /home/"${username}"/"${username}"-update + sudo chown "${username}":"${username}" /home/"${username}"/"${username}"-update + + { + # first add some useful env vars that aren't in cron's exec env + echo "USER=$username" + echo "XDG_RUNTIME_DIR=/run/user/$(id -u "$username")" + echo "DBUS_SESSION_BUS_ADDRESS=unix:path=/run/user/$(id -u "$username")/bus" + # then the defined cronjob + cat ~/"${username}"-cronjob + } >~/"${username}".cronjobs + + # install to crontab + sudo crontab -u "${username}" ~/"${username}".cronjobs + rm ~/"${username}".cronjobs +done + +# shellcheck disable=SC2024 +sudo crontab -l -u ubuntu >~/ubuntu.cronjobs +cat ~/ubuntu-cronjob >>~/ubuntu.cronjobs +sudo crontab -u ubuntu ~/ubuntu.cronjobs +rm ~/ubuntu.cronjobs + +for username in "${users[@]}"; do + chmod ug+x "${username}"-teardown +done diff --git a/lnk.knravish.me.conf b/lnk.knravish.me.conf new file mode 100644 index 0000000..8c1229f --- /dev/null +++ b/lnk.knravish.me.conf @@ -0,0 +1,14 @@ +server { + server_name lnk.knravish.me; + charset utf-8; + + location / { + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_set_header X-Forwarded-Proto $scheme; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header Host $http_host; + proxy_pass http://127.0.0.1:8080; + } + + listen 80; +} diff --git a/mealie_server-backup b/mealie_server-backup new file mode 100644 index 0000000..7c01eaf --- /dev/null +++ b/mealie_server-backup @@ -0,0 +1,39 @@ +#!/bin/bash + +# shellcheck source=mealie_server-env +. "${HOME}"/"${USER}"-env + +mkdir -p "${HOME}"/backup_logs +logFile=${HOME}/backup_logs/$(date +%y_%m).log + +{ + echo -e "\n[+] mealie backup\n" + + mkdir -p /tmp/"${USER}"-backup + + sudo docker compose -f "${HOME}"/"${USER}"-compose.yaml stop + + cp -pr "${VOLUME_PATH}"/* /tmp/"${USER}"-backup + + sudo docker compose -f "${HOME}"/"${USER}"-compose.yaml start + + if ! rclone copy /tmp/"${USER}"-backup "${BUCKET_PATH}" -v; then + curl -Ss \ + -H "Title: Mealie" \ + -H "Priority: 3" \ + -H "Tags: warning,backup" \ + -d "Backup not completed" \ + "${NOTIF_URL}" + rm -r /tmp/"${USER}"-backup + exit 1 + fi + + curl -Ss \ + -H "Title: Mealie" \ + -H "Priority: 2" \ + -H "Tags: heavy_check_mark,backup" \ + -d "Backup completed" \ + "${NOTIF_URL}" + rm -r /tmp/"${USER}"-backup + +} &>>"$logFile" diff --git a/mealie_server-compose_template.yaml b/mealie_server-compose_template.yaml new file mode 100644 index 0000000..820562e --- /dev/null +++ b/mealie_server-compose_template.yaml @@ -0,0 +1,27 @@ +--- +services: + mealie: + image: ghcr.io/mealie-recipes/mealie + container_name: mealie + pull_policy: always + restart: unless-stopped + ports: + - 127.0.0.1:${PORT}:9000 + deploy: + resources: + limits: + memory: 2048M + volumes: + - type: bind + source: ${VOLUME_PATH} + target: /app/data + bind: + create_host_path: true + environment: + ALLOW_SIGNUP: false + PUID: ${PUID} + PGID: ${PGID} + TZ: America/Phoenix + MAX_WORKERS: 1 + WEB_CONCURRENCY: 1 + BASE_URL: ${BASE_URL} diff --git a/mealie_server-cronjob b/mealie_server-cronjob new file mode 100644 index 0000000..b932d08 --- /dev/null +++ b/mealie_server-cronjob @@ -0,0 +1,2 @@ +6 10 * * * /home/mealie_server/mealie_server-backup +6 11 * * 2 /home/mealie_server/mealie_server-update diff --git a/mealie_server-setup b/mealie_server-setup new file mode 100644 index 0000000..2f3635b --- /dev/null +++ b/mealie_server-setup @@ -0,0 +1,20 @@ +#!/bin/bash + +# shellcheck source=mealie_server-env +. "${HOME}"/"${USER}"-env + +echo -e "\n[+] setting up mealie\n\n-------\n" + +envsubst < "${HOME}"/"${USER}"-compose_template.yaml > "${HOME}"/"${USER}"-compose.yaml + +sudo docker compose -f "${HOME}"/"${USER}"-compose.yaml up -d + +echo "[+] restoring backup data" + +sudo docker compose -f "${HOME}"/"${USER}"-compose.yaml stop + +rm -r "${VOLUME_PATH:?}"/* + +rclone copy "${BUCKET_PATH}" "${VOLUME_PATH}" -v + +sudo docker compose -f "${HOME}"/"${USER}"-compose.yaml start diff --git a/mealie_server-teardown b/mealie_server-teardown new file mode 100644 index 0000000..f8a3311 --- /dev/null +++ b/mealie_server-teardown @@ -0,0 +1,14 @@ +#!/bin/bash + +username=mealie_server + +# application +sudo docker compose -f /home/${username}/${username}-compose.yaml down -v + +uid_num=$(id -u $username) +sudo killall -9 -v -g -u $username +sudo crontab -r -u $username +sudo deluser --remove-all-files $username + +# clean-up +sudo find / -user "$uid_num" -delete diff --git a/mealie_server-update b/mealie_server-update new file mode 100644 index 0000000..ae6f081 --- /dev/null +++ b/mealie_server-update @@ -0,0 +1,11 @@ +#!/bin/bash + +mkdir -p "${HOME}"/update_logs +logFile=${HOME}/update_logs/$(date +%y_%m).log +{ + echo -e "\n[+] updating mealie\n" + + sudo docker compose -f "${HOME}"/"${USER}"-compose.yaml pull && + sudo docker compose -f "${HOME}"/"${USER}"-compose.yaml up -d --always-recreate-deps --remove-orphans && + yes | sudo docker image prune -af +} &>>"$logFile" diff --git a/memos_server-backup b/memos_server-backup new file mode 100644 index 0000000..8a381e3 --- /dev/null +++ b/memos_server-backup @@ -0,0 +1,39 @@ +#!/bin/bash + +# shellcheck source=memos_server-env +. "${HOME}"/"${USER}"-env + +mkdir -p "${HOME}"/backup_logs +logFile=${HOME}/backup_logs/$(date +%y_%m).log + +{ + echo -e "\n[+] memos backup\n" + + mkdir -p /tmp/"${USER}"-backup + + sudo docker compose -f "${HOME}"/"${USER}"-compose.yaml stop + + cp -pr "${VOLUME_PATH}"/* /tmp/"${USER}"-backup + + sudo docker compose -f "${HOME}"/"${USER}"-compose.yaml start + + if ! rclone copy /tmp/"${USER}"-backup "${BUCKET_PATH}" -v; then + curl -Ss \ + -H "Title: Memos" \ + -H "Priority: 3" \ + -H "Tags: warning,backup" \ + -d "Backup not completed" \ + "${NOTIF_URL}" + rm -r /tmp/"${USER}"-backup + exit 1 + fi + + curl -Ss \ + -H "Title: Memos" \ + -H "Priority: 2" \ + -H "Tags: heavy_check_mark,backup" \ + -d "Backup completed" \ + "${NOTIF_URL}" + rm -r /tmp/"${USER}"-backup + +} &>>"$logFile" diff --git a/memos_server-compose_template.yaml b/memos_server-compose_template.yaml new file mode 100644 index 0000000..1456dad --- /dev/null +++ b/memos_server-compose_template.yaml @@ -0,0 +1,15 @@ +--- +services: + memos: + image: neosmemo/memos:stable + container_name: memos + pull_policy: always + restart: unless-stopped + ports: + - 127.0.0.1:${PORT}:${PORT} + volumes: + - type: bind + source: ${VOLUME_PATH} + target: /var/opt/memos + bind: + create_host_path: true diff --git a/memos_server-cronjob b/memos_server-cronjob new file mode 100644 index 0000000..5599c2a --- /dev/null +++ b/memos_server-cronjob @@ -0,0 +1,2 @@ +7 10 * * * /home/memos_server/memos_server-backup +7 11 * * 2 /home/memos_server/memos_server-update diff --git a/memos_server-setup b/memos_server-setup new file mode 100644 index 0000000..6a45cda --- /dev/null +++ b/memos_server-setup @@ -0,0 +1,26 @@ +#!/bin/bash + +echo -e "\n[+] setting up usememos\n\n-------\n" + +# shellcheck source=memos_server-env +. "${HOME}"/"${USER}"-env + +envsubst <"${HOME}"/"${USER}"-compose_template.yaml >"${HOME}"/"${USER}"-compose.yaml + +sudo docker compose -f "${HOME}"/"${USER}"-compose.yaml up -d + +echo -e "\n[+] restoring from backup..." + +sudo docker compose -f "${HOME}"/"${USER}"-compose.yaml stop + +sudo rm -rf "${HOME}"/.memos/* + +mkdir memos_data +rclone copy "${BUCKET_PATH}" "${HOME}"/memos_data -v + +sudo cp memos_data/* "${HOME}"/.memos +rm -rf memos_data + +echo "[+] restarting..." + +sudo docker compose -f "${HOME}"/"${USER}"-compose.yaml start diff --git a/memos_server-teardown b/memos_server-teardown new file mode 100644 index 0000000..42d4f5a --- /dev/null +++ b/memos_server-teardown @@ -0,0 +1,14 @@ +#!/bin/bash + +username=memos_server + +# application +sudo docker compose -f /home/${username}/${username}-compose.yaml down -v + +uid_num=$(id -u $username) +sudo killall -9 -v -g -u $username +sudo crontab -r -u $username +sudo deluser --remove-all-files $username + +# clean-up +sudo find / -user "$uid_num" -delete diff --git a/memos_server-update b/memos_server-update new file mode 100644 index 0000000..0641102 --- /dev/null +++ b/memos_server-update @@ -0,0 +1,11 @@ +#!/bin/bash + +mkdir -p "${HOME}"/update_logs +logFile=${HOME}/update_logs/$(date +%y_%m).log +{ + echo -e "\n[+] updating memos\n" + + sudo docker compose -f "${HOME}"/"${USER}"-compose.yaml pull && + sudo docker compose -f "${HOME}"/"${USER}"-compose.yaml up -d --always-recreate-deps --remove-orphans && + yes | sudo docker image prune -af +} &>>"$logFile" diff --git a/minecraft_server-backup b/minecraft_server-backup new file mode 100644 index 0000000..db1a90c --- /dev/null +++ b/minecraft_server-backup @@ -0,0 +1,45 @@ +#!/bin/bash + +# shellcheck source=minecraft_server-env +. "${HOME}"/"${USER}"-env + +mkdir -p "${HOME}"/backup_logs +logFile=${HOME}/backup_logs/$(date +%y_%m).log + +{ + echo -e "\n[+] minecraft backup\n" + + mkdir -p /tmp/"${USER}"-backup + + insock=${HOME}/${USER}.stdin + + # https://www.reddit.com/r/admincraft/comments/vgdbi/minecraft_backups_saveoff_and_saveall/ + echo "/save-off" >"${insock}" + echo "/save-all" >"${insock}" + systemctl --user stop "${USER}"-start.{socket,service} + + cp -pr "${DATA_PATH}"/* /tmp/"${USER}"-backup + + systemctl --user restart "${USER}"-start.{socket,service} + echo "/save-on" >"${insock}" + + if ! rclone copy /tmp/"${USER}"-backup "${BUCKET_PATH}" -v; then + curl -Ss \ + -H "Title: Minecraft Server" \ + -H "Priority: 3" \ + -H "Tags: warning,backup" \ + -d "Backup not completed" \ + "${NOTIF_URL}" + rm -r /tmp/"${USER}"-backup + exit 1 + fi + + curl -Ss \ + -H "Title: Minecraft Server" \ + -H "Priority: 2" \ + -H "Tags: heavy_check_mark,backup" \ + -d "Backup completed" \ + "${NOTIF_URL}" + rm -r /tmp/"${USER}"-backup + +} &>>"$logFile" diff --git a/minecraft_server-cronjob b/minecraft_server-cronjob new file mode 100644 index 0000000..b387e37 --- /dev/null +++ b/minecraft_server-cronjob @@ -0,0 +1 @@ +8 10 * * * /home/minecraft_server/minecraft_server-backup diff --git a/minecraft_server-setup b/minecraft_server-setup new file mode 100644 index 0000000..bc29727 --- /dev/null +++ b/minecraft_server-setup @@ -0,0 +1,21 @@ +#!/bin/bash + +echo -e "\n[+] setting up the minecraft server\n\n-------\n" + +# shellcheck source=minecraft_server-env +. "${HOME}"/"${USER}"-env + +mkdir -p "${DATA_PATH}" + +echo "[+] restoring backup data" + +rclone copy "${BUCKET_PATH}" "${DATA_PATH}"/ -v + +echo "[+] setting up systemctl and starting" + +mkdir -p "${HOME}"/.config/systemd/user/ +cp "${HOME}"/"${USER}"-start.{service,socket} "${HOME}"/.config/systemd/user/ + +systemctl --user daemon-reload +systemctl --user restart "${USER}"-start.socket +systemctl --user enable --now "${USER}"-start.service diff --git a/minecraft_server-start.service b/minecraft_server-start.service new file mode 100644 index 0000000..f1bf0a2 --- /dev/null +++ b/minecraft_server-start.service @@ -0,0 +1,17 @@ +[Unit] +Description=Minecraft server +After=network.target + +[Service] +Type=simple +Restart=on-failure +RestartSec=1 +WorkingDirectory=%h/%u +ExecStart=/usr/bin/java -Xms1024M -jar %h/%u/server.jar nogui +Sockets=%u-start.socket +StandardInput=socket +StandardOutput=truncate:%h/%u.run.log +StandardError=append:%h/%u.err.log + +[Install] +WantedBy=default.target diff --git a/minecraft_server-start.socket b/minecraft_server-start.socket new file mode 100644 index 0000000..fdf831b --- /dev/null +++ b/minecraft_server-start.socket @@ -0,0 +1,3 @@ +[Socket] +ListenFIFO=%h/%u.stdin +Service=%u-start.service diff --git a/notes.knravish.me.conf b/notes.knravish.me.conf new file mode 100644 index 0000000..6315c13 --- /dev/null +++ b/notes.knravish.me.conf @@ -0,0 +1,18 @@ +server { + server_name notes.knravish.me; + index index.html index.htm; + + location / { + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_set_header X-Forwarded-Proto $scheme; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header Host $http_host; + proxy_pass http://127.0.0.1:5230; + proxy_redirect off; + proxy_set_header Access-Control-Allow-Origin *; + proxy_read_timeout 600s; + proxy_send_timeout 600s; + } + + listen 80; +} diff --git a/paste.knravish.me.conf b/paste.knravish.me.conf new file mode 100644 index 0000000..e897152 --- /dev/null +++ b/paste.knravish.me.conf @@ -0,0 +1,18 @@ +server { + server_name paste.knravish.me; + index index.html index.htm; + + location / { + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_set_header X-Forwarded-Proto $scheme; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header Host $http_host; + proxy_pass http://127.0.0.1:5100; + proxy_redirect off; + proxy_set_header Access-Control-Allow-Origin *; + proxy_read_timeout 600s; + proxy_send_timeout 600s; + } + + listen 80; +} diff --git a/pdf.knravish.me.conf b/pdf.knravish.me.conf new file mode 100644 index 0000000..4b6f53b --- /dev/null +++ b/pdf.knravish.me.conf @@ -0,0 +1,18 @@ +server { + server_name pdf.knravish.me; + index index.html index.htm; + + location / { + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_set_header X-Forwarded-Proto $scheme; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header Host $http_host; + proxy_pass http://127.0.0.1:8081; + proxy_redirect off; + proxy_set_header Access-Control-Allow-Origin *; + proxy_read_timeout 600s; + proxy_send_timeout 600s; + } + + listen 80; +} diff --git a/planning.knravish.me.conf b/planning.knravish.me.conf new file mode 100644 index 0000000..94ad0d5 --- /dev/null +++ b/planning.knravish.me.conf @@ -0,0 +1,16 @@ +server { + server_name planning.knravish.me; + index index.html index.htm; + + include /etc/nginx/snippets/authelia-location.conf; + + set $upstream http://127.0.0.1:3456; + + location / { + include /etc/nginx/snippets/proxy.conf; + include /etc/nginx/snippets/authelia-authrequest.conf; + proxy_pass $upstream; + } + + listen 80; +} \ No newline at end of file diff --git a/pwpush_server-compose_template.yaml b/pwpush_server-compose_template.yaml new file mode 100644 index 0000000..6f0bb65 --- /dev/null +++ b/pwpush_server-compose_template.yaml @@ -0,0 +1,35 @@ +--- +services: + pwpush: + image: pglombardo/pwpush + container_name: pwpush + pull_policy: always + restart: unless-stopped + ports: + - 127.0.0.1:${PORT}:5100 + environment: + PWP__MAIL__SMTP_PASSWORD: ${SMTP_PASSWORD} + DATABASE_URL: 'postgres://postgres:${PG_PASS}@postgres:5432/postgres' + volumes: + - ${VOLUME_PATH}/config/${USER}-settings.yaml:/opt/PasswordPusher/config/settings.yml + - type: bind + source: ${VOLUME_PATH}/files + target: /opt/PasswordPusher/storage + bind: + create_host_path: true + depends_on: + - postgres + postgres: + image: docker.io/postgres:16 + container_name: 'pwpush-postgres' + pull_policy: always + restart: unless-stopped + volumes: + - type: bind + source: ${VOLUME_PATH}/database + target: /var/lib/postgresql/data + bind: + create_host_path: true + environment: + POSTGRES_PASSWORD: ${PG_PASS} + user: ${PUID}:${PGID} diff --git a/pwpush_server-settings.yaml b/pwpush_server-settings.yaml new file mode 100644 index 0000000..4c88ee3 --- /dev/null +++ b/pwpush_server-settings.yaml @@ -0,0 +1,981 @@ +# Global Application Configuration +# +# This file uses YAML syntax. Indentation must be 2 spaces (not tabs). +# +# See also https://docs.pwpush.com/docs/config-strategies/ +# for a further explanation of the larger settings available here. + +### Application Defaults +# + +### URL Pushes +# +# Enable or disable URL based pushes. These allow you to share URLs securely. +# Like regular pushes, they expire after a set time or amount of views. +# +# Note that `enable_logins` is required for URL based pushes to work. It is a +# feature for logged in users only. +# +# Environment variable override: +# PWP__ENABLE_URL_PUSHES='false' +# +enable_url_pushes: true + +### File Uploads +# +# File uploads are disabled by default since they require a place to store +# those files. +# +# If enabling file uploads, make sure to fill out the 'files' section below. +# +# Note that `enable_logins` is required for file uploads to work. It is a +# feature for logged in users only. +# +# Environment variable override: +# PWP__ENABLE_FILE_PUSHES='false' +# +enable_file_pushes: true + +### Logins (User accounts) +# +# Logins are disabled by default since they require an MTA (email) server +# available to send emails through. +# +# If enabling logins, make sure to fill out the 'mail' section below. +# +# For instructions on how to enable logins, see this page: +# https://github.com/pglombardo/PasswordPusher/discussions/276 +# +# Environment variable override: +# PWP__ENABLE_LOGINS='false' +# +enable_logins: true + +## Disable Signups +# +# Disallow new user accounts to be created in the application. +# +# Set this after you have your desired user accounts created. It will +# not allow any further user account creation. +# +# Environment variable override: +# PWP__DISABLE_SIGNUPS='false' +# +disable_signups: false + +## Limit Signups to Specific Email Domains +# +# By default, anyone can sign up for an account. The following default regular +# expression just validates if it is a valid email address. +# +# signup_email_regexp: '\A[^@\s]+@[^@\s]+\z' +# +# If you would like to limit signups to specific email domains, you can extend +# the regular expression below to include the domains you want to allow. +# +# For example, to only allow signups from the domain 'hey.com', you would +# change the following to: +# +# signup_email_regexp: '\A[^@\s]+@(hey\.com)\z' +# +# or for multiple domains: +# +# signup_email_regexp: '\A[^@\s]+@(hey\.com|gmail\.com)\z' +# +# Tip: use https://rubular.com to test out your regular expressions. It includes +# a guide to what each component means in regexp. +# +# Environment variable override: +# PWP__SIGNUP_EMAIL_REGEXP='\A[^@\s]+@[^@\s]+\z' +# +signup_email_regexp: '\A[^@\s]+@[^@\s]+\z' + +### Allow Anonymous +# +# By default, Password Pusher can be used by anonymous users to push +# new passwords and generate secret URLs. If you want to limit functionality +# to logged in users only, set the following value to true. +# +# This does not affect password secret URLs themselves as anonymous is always +# allowed there. +# +# Environment variable override: +# PWP__ALLOW_ANONYMOUS='true' +# +allow_anonymous: true + +### Host Domain +# +# The domain (without protocol) where this instance is hosted +# Used in generating fully qualified URLs. +# +# Make sure to set this for email links to work correctly. +# +# Environment variable override: +# PWP__HOST_DOMAIN='pwpush.com' +# +host_domain: 'paste.knravish.me' + +### Host Protocol +# +# The protocol to reach the domain above +# Used in generating fully qualified URLs. +# +# Make sure to set this for email links to work correctly. +# +# Environment variable override: +# PWP__HOST_PROTOCOL='https' +# +host_protocol: 'https' + +### Base URL Override +# +# Set the following value to force the base URL of generated links. +# +# Environment variable override: +# PWP__OVERRIDE_BASE_URL='https://pwpush.mydomain.com' +# +# You could even add a port if needed: +# PWP__OVERRIDE_BASE_URL='https://pwpush.mydomain.com:5100' +# +# Set this value without a trailing slash ('/'). +# +# override_base_url: 'https://pwpush.mydomain.com' + +### Show version on the footer +# +# Enable/disable PasswordPusher version on the footer. +# +# Environment variable override: +# PWP__SHOW_VERSION=true +# +# Default: true +show_version: true + +### Show the GDPR cookie consent banner +# +# Enable/disable the GDPR cookie consent banner. +# +# Environment variable override: +# PWP__SHOW_GDPR_CONSENT_BANNER=true +# +# Default: true +show_gdpr_consent_banner: true + +### Timezone +# +# Set the timezone for the application. A full list of timezone strings +# can be found here: +# https://en.wikipedia.org/wiki/List_of_tz_database_time_zones +# +# Environment variable override: +# PWP__TIMEZONE='America/New_York' +# +# Default: 'America/New_York' +timezone: 'America/Phoenix' + +### Allowed Hosts +# +# This is a list of allowed hosts for the application. This is used to +# prevent host header attacks. +# +# When set, the application will only respond to requests with a host header +# that matches one of the values in this list. +# +# This feature is generally only used when the application is behind a proxy. +# +# It's generally not required to use this unless you are getting the related error +# in the application. localhost and the IP that the application is running on +# are always allowed. +# +# Note: If you need more than one value to the environment variable, separate +# entries by a single space. +# +# Environment variable override: +# PWP__ALLOWED_HOSTS='pwpush.com pwpush.mydomain.com pwpush.myotherdomain' +# +# allowed_hosts: +# - 'pwpush.com' +# - 'pwpush.mydomain.com' +# - 'pwpush.myotherdomain.com' + +## Expiration Settings for Password Pushes +# +pw: + # Expire Password Pushes After XX Days + # + # Controls the "Expire After Days" for Password Pushes + # + # Environment variable overrides: + # PWP__PW__EXPIRE_AFTER_DAYS_DEFAULT=7 + # PWP__PW__EXPIRE_AFTER_DAYS_MIN=1 + # PWP__PW__EXPIRE_AFTER_DAYS_MAX=90 + # + expire_after_days_default: 7 + expire_after_days_min: 1 + expire_after_days_max: 90 + + # Expire Password Pushes After XX Views + # + # Controls the "Expire After Views" form settings in Password#new + # + # Environment variable overrides: + # PWP__PW__EXPIRE_AFTER_VIEWS_DEFAULT=5 + # PWP__PW__EXPIRE_AFTER_VIEWS_MIN=1 + # PWP__PW__EXPIRE_AFTER_VIEWS_MAX=100 + # + expire_after_views_default: 5 + expire_after_views_min: 1 + expire_after_views_max: 100 + + # Retrieval Step for Password Pushes + # + # This enables or disables the "1-click retrieval step" feature entirely. For the default value + # when it is enabled here, see the next setting. + # + # Environment variable override: + # PWP__PW__ENABLE_RETRIEVAL_STEP='false' + # + enable_retrieval_step: true + + # Default Form Value for the Retrieval Step + # + # When the retrieval step is enabled (above), what is the default value on the form? + # + # When true, secret URLs will be generated as /p/xxxxxxxx/r which will show a page + # requiring a click to view the page /p/xxxxxxxx + # + # Environment variable override: + # PWP__PW__RETRIEVAL_STEP_DEFAULT='true' + # + retrieval_step_default: false + + # Deletable Password Pushes + # + # default: true + # + # This enables or disables the "Allow Immediate Deletion" feature entirely. For the default value + # when it is enabled here, see the next setting. + # + # Environment variable override: + # PWP__PW__ENABLE_DELETABLE_PUSHES='false' + # + enable_deletable_pushes: true + + # Deletable Pushes Default Value + # + # default: true + # + # When this is set to true, this option does two things: + # 1. Sets the default check state for the "Allow viewers to + # optionally delete password before expiration" checkbox + # 2. JSON API: Sets the default value for newly pushed passwords if + # unspecified + # + # Environment variable override: + # PWP__PW__DELETABLE_PUSHES_DEFAULT='false' + # + deletable_pushes_default: true + + # Blur Payloads + # + # default: true + # + # This option does not affect the JSON API - web UI only. + # When this is set to true, this option will display the pushed text payload as + # blurred out text. This is useful for recipients in public places who don't + # want to reveal the sensitive information until when they choose. + # + # The blur is disabled with a single mouse click. + # + # Setting this option to false will disable the blur feature entirely for password pushes. + # + # Note: This is a global on/off switch currently. This may be made configurable per push + # in the future by adding a new checkbox and a `blur_default` setting. + # + # Environment variable override: + # PWP__PW__ENABLE_BLUR='false' + # + enable_blur: true + +## Expiration Settings for URL Pushes +# +url: + # Expire URL Pushes After XX Days + # + # Controls the "Expire After Days" for URL Pushes + # + # Environment variable overrides: + # PWP__URL__EXPIRE_AFTER_DAYS_DEFAULT=7 + # PWP__URL__EXPIRE_AFTER_DAYS_MIN=1 + # PWP__URL__EXPIRE_AFTER_DAYS_MAX=90 + # + expire_after_days_default: 7 + expire_after_days_min: 1 + expire_after_days_max: 90 + + # Expire URL Pushes After XX Views + # + # Controls the "Expire After Views" form settings in Password#new + # + # Environment variable overrides: + # PWP__URL__EXPIRE_AFTER_VIEWS_DEFAULT=5 + # PWP__URL__EXPIRE_AFTER_VIEWS_MIN=1 + # PWP__URL__EXPIRE_AFTER_VIEWS_MAX=100 + # + expire_after_views_default: 5 + expire_after_views_min: 1 + expire_after_views_max: 100 + + # Retrieval Step for URL Pushes + # + # This enables or disables the "1-click retrieval step" feature entirely. For the default value + # when it is enabled here, see the next setting. + # + # Environment variable override: + # PWP__URL__ENABLE_RETRIEVAL_STEP='false' + # + enable_retrieval_step: true + + # Default Form Value for the Retrieval Step + # + # When the retrieval step is enabled (above), what is the default value on the form? + # + # When true, secret URLs will be generated as /r/xxxxxxxx/r which will show a page + # requiring a click to view the page /r/xxxxxxxx + # + # Environment variable override: + # PWP__URL__RETRIEVAL_STEP_DEFAULT='true' + # + retrieval_step_default: false + +### File Upload: Expiration & Storage Settings +# +files: + # Expire File Pushes After XX Days + # + # Controls the "Expire After Days" for File Pushes + # + # Environment variable overrides: + # PWP__FILES__EXPIRE_AFTER_DAYS_DEFAULT=7 + # PWP__FILES__EXPIRE_AFTER_DAYS_MIN=1 + # PWP__FILES__EXPIRE_AFTER_DAYS_MAX=90 + # + expire_after_days_default: 7 + expire_after_days_min: 1 + expire_after_days_max: 90 + + # Expire File Pushes After XX Views + # + # Controls the "Expire After Views" form settings for File Pushes + # + # Environment variable overrides: + # PWP__FILES__EXPIRE_AFTER_VIEWS_DEFAULT=5 + # PWP__FILES__EXPIRE_AFTER_VIEWS_MIN=1 + # PWP__FILES__EXPIRE_AFTER_VIEWS_MAX=100 + # + expire_after_views_default: 5 + expire_after_views_min: 1 + expire_after_views_max: 100 + + # Retrieval Step for File Pushes + # + # This enables or disables the "1-click retrieval step" feature entirely. For the default value + # when it is enabled here, see the next setting. + # + # Environment variable override: + # PWP__FILES__ENABLE_RETRIEVAL_STEP='false' + # + enable_retrieval_step: true + + # Default Form Value for the Retrieval Step + # + # When the retrieval step is enabled (above), what is the default value on the form? + # + # When true, secret URLs will be generated as /f/xxxxxxxx/r which will show a page + # requiring a click to view the page /f/xxxxxxxx + # + # Environment variable override: + # PWP__FILES__RETRIEVAL_STEP_DEFAULT='true' + # + retrieval_step_default: true + + # Deletable File Pushes + # + # default: true + # + # This enables or disables the "Allow Immediate Deletion" feature entirely. For the default value + # when it is enabled here, see the next setting. + # + # Environment variable override: + # PWP__FILES__ENABLE_DELETABLE_PUSHES='false' + # + enable_deletable_pushes: true + + # Deletable File Pushes Default Value + # + # default: true + # + # When this is set to true, this option does two things: + # 1. Sets the default check state for the "Allow viewers to + # optionally delete password before expiration" checkbox + # 2. JSON API: Sets the default value for newly pushed passwords if + # unspecified + # + # Environment variable override: + # PWP__FILES__DELETABLE_PUSHES_DEFAULT='false' + # + deletable_pushes_default: true + + # Blur Payloads + # + # default: true + # + # This option does not affect the JSON API - web UI only. + # When this is set to true, this option will display the pushed text payload as + # blurred out text. This is useful for recipients in public places who don't + # want to reveal the sensitive information until when they choose. + # + # The blur is disabled with a single mouse click. + # + # Setting this option to false will disable the blur feature entirely for file pushes. + # + # Note: This is a global on/off switch currently. This may be made configurable per push + # in the future by adding a new checkbox and a `blur_default` setting. + # + # Environment variable override: + # PWP__FILES__BLUR='false' + # + enable_blur: true + + # Maximum File Upload Count + # + # default: 10 + # + # This option controls the maximum number of files that can be uploaded + # in a single push. + # + # Environment variable override: + # PWP__FILES__MAX_FILE_UPLOADS=10 + # + max_file_uploads: 10 + + # File Storage + # + # Password Pusher can store uploaded files into Amazon S3, Google Cloud Services + # or Microsoft Azure. + # + # Choose your file storage preference by setting the following option to + # one of the following values: + # * local - use local disk (likely won't work in container environments) + # * amazon - use Amazon S3 (and provide 's3' credentials below) + # * google - use Google Cloud Storage (and provide 'gcs' credentials below) + # * microsoft - use Microsoft Azure Storage (and provide 'as' credentials below) + # + # Environment variable override: + # PWP__FILES__STORAGE='local' + # + storage: 'local' + + # Amazon S3 Storage Credentials + s3: + # Environment Variable Override: PWP__FILES__S3__ENDPOINT='' + endpoint: '' + # Environment Variable Override: PWP__FILES__S3__ACCESS_KEY_ID='' + access_key_id: '_' + # Environment Variable Override: PWP__FILES__S3__SECRET_ACCESS_KEY='' + secret_access_key: '' + # Environment Variable Override: PWP__FILES__S3__REGION='' + region: 'us-east-1' + # Environment Variable Override: PWP__FILES__S3__BUCKET='' + bucket: 'pwpush-files' + + # Google Cloud Storage Credentials + gcs: + # Environment Variable Override: PWP__FILES__GCS__PROJECT='' + project: '' + # Environment Variable Override: PWP__FILES__GCS__CREDENTIALS='' + credentials: '' + # Environment Variable Override: PWP__FILES__GCS__BUCKET='' + bucket: '' + # + # Optionally use IAM instead of the credentials when signing URLs. + # This is useful if you are authenticating your GKE applications with Workload Identity, + # See here: https://edgeguides.rubyonrails.org/active_storage_overview.html#google-cloud-storage-service + # + # Environment Variable Override: PWP__FILES__GCS__IAM=true + iam: false + # Environment Variable Override: PWP__FILES__GCS__GSA_EMAIL='email@domain.com' + gsa_email: null + + # Microsoft Azure Storage Credentials + as: + # Environment Variable Override: PWP__FILES__AS__STORAGE_ACCOUNT_NAME='' + storage_account_name: '' + # Environment Variable Override: PWP__FILES__AS__STORAGE_ACCESS_KEY='' + storage_access_key: '' + # Environment Variable Override: PWP__FILES__AS__CONTAINER='' + container: '' + +### Password Generator Defaults +# +# Set the defaults of the front page password generator. +# +gen: + # Whether generated passwords have numbers + # + # Environment variable override: + # PWP__GEN__HAS_NUMBERS='true' + # + has_numbers: true + + # Whether generated passwords will be title cased + # + # Environment variable override: + # PWP__GEN__TITLE_CASED='true' + # + title_cased: true + + # Whether generated passwords will use separators between syllables + # + # Environment variable override: + # PWP__GEN__USE_SEPARATORS='true' + # + use_separators: true + + # List of consonants to generate from + # + # Environment variable override: + # PWP__GEN__CONSONANTS='bcdfghklmnprstvz' + # + consonants: 'bcdfghklmnprstvz' + + # List of vowels to generate from + # + # Environment variable override: + # PWP__GEN__VOWELS='aeiouy' + # + vowels: 'aeiouy' + + # If `use_separators` is enabled above, the list of separators to use (randomly) + # + # Environment variable override: + # PWP__GEN__SEPARATORS='-_=' + # + separators: '-_=' + + # The maximum length of each syllable that a generated password can have + # + # Environment variable override: + # PWP__GEN__MAX_SYLLABLE_LENGTH=3 + # + max_syllable_length: 3 + + # The minimum length of each syllable that a generated password can have + # + # Environment variable override: + # PWP__GEN__MIN_SYLLABLE_LENGTH=1 + # + min_syllable_length: 1 + + # The exact number of syllables that a generated password will have + # + # Environment variable override: + # PWP__GEN__SYLLABLES_COUNT=3 + # + syllables_count: 3 + +brand: + ### Site Title + # + # Environment variable override: PWP__BRAND__TITLE='Acme Corp.' + # + title: 'Password Pusher' + + ### Site Tagline + # + # Environment variable override: PWP__BRAND__TAGLINE='Security First' + # + tagline: 'Go Ahead. Email Another Password.' + + ### Site Disclaimer + # + # Environment variable override: PWP__BRAND__DISCLAIMER='Use at own use risk.' + # disclaimer: 'This is a dummy disclaimer and should not be considered legally binding or taken seriously in any way. The content provided here is for entertainment and illustrative purposes only. Any resemblance to actual disclaimers is purely coincidental. We do not endorse or encourage the use of this disclaimer for any real-world applications, and we strongly advise consulting a legal professional for creating legitimate and appropriate disclaimers for your specific needs. By reading this disclaimer, you agree not to hold us responsible for any confusion, amusement, or bewilderment it may cause. This disclaimer has no legal validity, and any attempt to rely on it for legal, financial, or any other serious matters is ill-advised. Please use disclaimers responsibly and in accordance with applicable laws and regulations.' + + ### Show Footer Menu Toggle + # + # Environment variable override: PWP__BRAND__SHOW_FOOTER_MENU='true' + # + show_footer_menu: true + + ### Site logo + # + # ..for both a light and dark theme + # + # You can also replace these relative paths with fully qualified HTTP links to + # external resources such as Amazon S3 etc. + # e.g. PWP__BRAND__DARK_LOGO='https://mys3bucket.amazonaws.com/a/some-image.png' + # + # Environment variable override: PWP__BRAND__LIGHT_LOGO='https://mys3bucket.amazonaws.com/a/lea+giuliana.png' + # Environment variable override: PWP__BRAND__DARK_LOGO='https://mys3bucket.amazonaws.com/a/lea+giuliana.png' + # + # light_logo: 'logo-transparent-sm-bare.png' + # dark_logo: 'logo-transparent-sm-dark-bare.png' + + ### Favicon & icon images for mobile. When people on mobile (phones/tablets), bookmark + # the site or it is shown in history, these icons are used. + # + # You can also replace these relative paths with fully qualified HTTP links to + # external resources such as Amazon S3 etc. + # e.g. PWP__BRAND__ICON_57x57='https://mys3bucket.amazonaws.com/a/some-image.png' + # + # Although you should set all of the following values, at a bare minimum, make sure + # to set at least icon_57x57 and icon_96x96. Without these two, things are guaranteed + # to not work. + # + # You can use an icon generator such as: + # https://www.favicongenerator.com + # https://www.favicon-generator.org + + # + # Environment variable override: PWP__BRAND__ICON_57x57='/path/to/image' + # icon_57x57: 'apple-icon-57x57.png' + # Environment variable override: PWP__BRAND__ICON_60x60='/path/to/image' + # icon_60x60: 'apple-icon-60x60.png' + # Environment variable override: PWP__BRAND__ICON_72x72='/path/to/image' + # icon_72x72: 'apple-icon-72x72.png' + # Environment variable override: PWP__BRAND__ICON_76x76='/path/to/image' + # icon_76x76: 'apple-icon-76x76.png' + # Environment variable override: PWP__BRAND__ICON_114x114='/path/to/image' + # icon_114x114: 'apple-icon-114x114.png' + # Environment variable override: PWP__BRAND__ICON_120x120='/path/to/image' + # icon_120x120: 'apple-icon-120x120.png' + # Environment variable override: PWP__BRAND__ICON_144x144='/path/to/image' + # icon_144x144: 'apple-icon-144x144.png' + # Environment variable override: PWP__BRAND__ICON_152x152='/path/to/image' + # icon_152x152: 'apple-icon-152x152.png' + # Environment variable override: PWP__BRAND__ICON_180x180='/path/to/image' + # icon_180x180: 'apple-icon-180x180.png' + # Environment variable override: PWP__BRAND__ICON_192x192='/path/to/image' + # icon_192x192: 'android-icon-192x192.png' + # Environment variable override: PWP__BRAND__ICON_32x32='/path/to/image' + # icon_32x32: 'favicon-32x32.png' + # Environment variable override: PWP__BRAND__ICON_96x96='/path/to/image' + # icon_96x96: 'favicon-96x96.png' + # Environment variable override: PWP__BRAND__ICON_16x16='/path/to/image' + # icon_16x16: 'favicon-16x16.png' + # Environment variable override: PWP__BRAND__ICON_144x144='/path/to/image' + # ms_icon_144x144: 'ms-icon-144x144.png' + +### Throttling +# +# Configure the application throttling limits. +# +# Throttling enforces a minimum time interval +# between subsequent HTTP requests from a particular client, as +# well as by defining a maximum number of allowed HTTP requests +# per a given time period (per second or minute) +# +# See https://github.com/rack/rack-attack +# +throttling: + # ..maximum number of allowed HTTP requests per minute + # + # Default: 120 + # + # Environment Variable Override: PWP__THROTTLING__MINUTE='60' + minute: 120 + + # ..maximum number of allowed HTTP requests per second + # + # Default: 60 + # + # Environment Variable Override: PWP__THROTTLING__SECOND='20' + second: 60 + +### Trusted Proxies +# +# By default, Password Pusher will only proxy related headers from proxies on +# the local network. If you are using a proxy that is not on the local network, +# you will need to add the IP address of the proxy to the list below. +# +# This is useful if you are using a remote reverse proxy such as Cloudflare to +# serve the application. If local, you can leave this setting as is. +# +# Multiple IP addresses can be added by separating them with a comma. +# +# Environment Variable Override: +# PWP__TRUSTED_PROXIES='' +# PWP__TRUSTED_PROXIES=',' +# +# trusted_proxies: +# - '1.2.3.4' +# - '2.3.4.5' + +## Cloudflare Proxy +# +# If you are using Cloudflare as a proxy, you will need to set the following +# value to true. This will cause the application to fetch the list of Cloudflare +# proxy IP addresses and add them to the list of trusted proxies. +# +# Note that on application boot, this will trigger two HTTPs requests to fetch +# the list of Cloudflare IP addresses. The requests each have a timeout of 15 seconds +# and may delay on container boot. +# +# Environment Variable Override: +# PWP__CLOUDFLARE_PROXY='false' +# +cloudflare_proxy: false + +### Mail Server Configuration +# +# When logins are enabled, an SMTP server is required to send emails to users +# for things such as forgot password, unlock account, confirm account etc. +# If `enable_logins` is set to true above, the following _are required_ to be +# filled out with valid values. +# +# These values are passed through to ActionMailer configuration. The documentation +# for ActionMailer is at: +# https://guides.rubyonrails.org/action_mailer_basics.html#action-mailer-configuration +# +# IMPORTANT: Also set host_domain and host_protocol above for email links to work correctly! +# +mail: + # Email delivery errors will be shown in the application + # Environment Variable Override: PWP__MAIL__RAISE_DELIVERY_ERRORS='false' + raise_delivery_errors: true + + # Allows you to use a remote mail server. Just change it from its default "localhost" setting. + # Environment Variable Override: PWP__MAIL__SMTP_ADDRESS='smtp.example.com' + smtp_address: mail.smtp2go.com + + # If you need to specify a HELO domain, you can do it here. + # Environment Variable Override: PWP__MAIL__SMTP_DOMAIN='xyz.dev' + # smtp_domain: '' + + # Port of the SMTP server + # Environment Variable Override: PWP__MAIL__SMTP_PORT='587' + smtp_port: 2525 + + # If your mail server requires authentication, you need to specify the + # authentication type here. This is a string and one of :plain (will send + # the password in the clear), :login (will send password Base64 encoded) + # or :cram_md5 (combines a Challenge/Response mechanism to exchange + # information and a cryptographic Message Digest 5 algorithm to hash + # important information) + # + # Important: Comment this out if your server doesn't require authentication. + # + # Environment Variable Override: PWP__MAIL__SMTP_AUTHENTICATION='plain' + # smtp_authentication: 'plain' + + # If your mail server requires authentication, set the username in this setting. + # Environment Variable Override: PWP__MAIL__SMTP_USER_NAME='apikey' + smtp_user_name: 'freshStart-OCI' + + # If your mail server requires authentication, set the password in this setting. + # Environment Variable Override: PWP__MAIL__SMTP_PASSWORD='something@&#$' + # smtp_password: '' + + # Use STARTTLS when connecting to your SMTP server and fail if unsupported. + # Environment Variable Override: PWP__MAIL__SMTP_STARTTLS='true' + # smtp_starttls: false + + # Detects if STARTTLS is enabled in your SMTP server and starts to use it. Defaults to true. + # Environment Variable Override: PWP__MAIL__SMTP_ENABLE_STARTTLS_AUTO='false' + smtp_enable_starttls_auto: true + + # Number of seconds to wait while attempting to open a connection. + # Environment Variable Override: PWP__MAIL__SMTP_OPEN_TIMEOUT='10' + smtp_open_timeout: 10 + + # Number of seconds to wait until timing-out a read(2) call. + # Environment Variable Override: PWP__MAIL__SMTP_READ_TIMEOUT='10' + smtp_read_timeout: 10 + + # When using TLS, you can set how OpenSSL checks the certificate. This is + # useful if you need to validate a self-signed and/or a wildcard certificate. + # This can be one of the OpenSSL verify constants, :none or :peer + # Environment Variable Override: PWP__MAIL__SMTP_OPENSSL_VERIFY_MODE='none' + # smtp_openssl_verify_mode: 'peer' + + # Configure the e-mail address which will be shown as 'From' in emails + # See config/initializers/devise.rb where this is used + # Environment Variable Override: PWP__MAIL__MAILER_SENDER='"Password Pusher" ' + mailer_sender: '"Password Pusher" ' + +### Docker Pre-compilation +# +# This is useful if you modified the assets (e.g. CSS, JS, images) to customize +# the theme etc... Assets are precompiled before serving. +# +# If you set a custom theme, you will need to precompile the assets on container boot. +# +# Pre-compilation isn't supported in this yaml file. It is only supported +# through the environment variable PWP_PRECOMPILE='true'. +# +# Environment Variable: PWP_PRECOMPILE='false' + +### Themes +# +# Password Pusher uses Bootswatch themes. See https://bootswatch.com/ +# +# The following are the available themes. The default theme is 'default'. +# +# 'cerulean', 'cosmo', 'cyborg', 'darkly', 'flatly', 'journal', 'litera', 'lumen', +# 'lux', 'materia', 'minty', 'morph', 'pulse', 'quartz', 'sandstone', 'simplex', +# 'sketchy', 'slate', 'solar', 'spacelab', 'superhero', 'united', 'vapor', 'yeti', 'zephyr' +# +# To change the theme, set the `theme` value to the name of the theme you +# want to use. +# +# Environment Variable Override: PWP__THEME='default' +# +# Important Note: This setting can only be controlled by environment variable. (PWP__THEME) +# It cannot be set in this config file. +theme: 'default' + +### Site Default Locale +# +# The default language for the application. This must be one of the +# valid/supported language codes from the list above. +# +# Note: This locale _must_ be in the list of enabled_language_codes. +# +# Example: default_locale: :es +# +# Environment Variable Override: PWP__DEFAULT_LOCALE='es' +default_locale: en + +### Language & Internationalization +# +# List of enabled languages for the application. +# +# To remove the availability of languages from the application entirely, +# comment out (or remove) the language code(s) from the `enabled_language_codes` +# list below. +# +enabled_language_codes: + - ca # 'Català' + - cs # 'Čeština' + - da # 'Dansk' + - de # 'Deutsch' + - en # 'English' + - en-GB # 'English (UK)' + - es # 'Español' + - eu # 'Euskara' + - fi # 'Suomi' + - fr # 'Français' + - hi # 'हिन्दी' + - hu # 'Magyar' + - id # 'Indonesian' + - is # 'Íslenska' + - it # 'Italiano' + - ja # '日本語' + - ko # '한국어' + - lv # 'Latviski' + - nl # 'Nederlands' + - 'no' # 'Norsk' # _no_ keyword in Ruby evaluates to false #-( + - pl # 'Polski' + - pt-BR # 'Português' + - pt-PT # 'Português' + - ro # 'Română' + - ru # 'Русский' + - sr # 'Српски' + - sk # 'Slovenský' + - sv # 'Svenska' + - th # 'ไทย' + - uk # 'Українська' + - ur # 'اردو' + - zh-CN # '中文' + +### Language & Internationalization +# +# Map of language codes to language name. +# Used internally for the language selector model. +# +# : '' +language_codes: + ca: 'Català' + cs: 'Čeština' + da: 'Dansk' + de: 'Deutsch' + en: 'English' + es: 'Español' + eu: 'Euskara' + en-GB: 'English (UK)' + fi: 'Suomi' + fr: 'Français' + hi: 'हिन्दी' + hu: 'Magyar' + id: 'Indonesian' + is: 'Íslenska' + it: 'Italiano' + ja: '日本語' + ko: '한국어' + lv: 'Latviski' + nl: 'Nederlands' + 'no': 'Norsk' # _no_ keyword in Ruby evaluates to false :-( + pl: 'Polski' + pt-BR: 'Português' + pt-PT: 'Português' + ro: 'Română' + ru: 'Русский' + sk: 'Slovenský' + sr: 'Српски' + sv: 'Svenska' + th: 'ไทย' + uk: 'Українська' + ur: 'اردو' + zh-CN: '中文' + +# Used internally for the language selector model. +# This provides a conversion of language codes to +# country codes show the correct flag icons. +# See: +# https://github.com/lipis/flag-icons +# https://flagicons.lipis.dev +country_codes: + ca: :es-ct + cs: :cz + da: :dk + de: :de + en: :us + en-GB: :gb + es: :es + eu: :es-pv + fi: :fi + fr: :fr + hi: :in + hu: :hu + id: :id + is: :is + it: :it + ja: :jp + ko: :kr + lv: :lv + nl: :nl + 'no': :no # _no_ keyword in Ruby evaluates to false :-( + pl: :pl + pt-BR: :br + pt-PT: :pt + ro: :ro + ru: :ru + sk: :sk + sr: :rs + sv: :se + th: :th + uk: :ua + ur: :pk + zh-CN: :cn + +# Configure the logging verbosity of the application. +# +# Valid values are: :debug, :info, :warn, :error, :fatal +log_level: :warn + +# In containers, it is usually desired to log to stdout +# instead of using log files (e.g. log/production.log). +# +log_to_stdout: true diff --git a/pwpush_server-setup b/pwpush_server-setup new file mode 100644 index 0000000..f56d6c2 --- /dev/null +++ b/pwpush_server-setup @@ -0,0 +1,14 @@ +#!/bin/bash + +# shellcheck source=pwpush_server-env +. "${HOME}"/"${USER}"-env + +echo -e "\n[+] setting up pwpush\n\n-------\n" + +mkdir -p "${VOLUME_PATH}"/config +mkdir -p "${VOLUME_PATH}"/database +cp "${USER}"-settings.yaml "${VOLUME_PATH}"/config + +envsubst <"${HOME}"/"${USER}"-compose_template.yaml >"${HOME}"/"${USER}"-compose.yaml + +sudo docker compose -f "${HOME}"/"${USER}"-compose.yaml up -d diff --git a/pwpush_server-teardown b/pwpush_server-teardown new file mode 100644 index 0000000..950d40d --- /dev/null +++ b/pwpush_server-teardown @@ -0,0 +1,14 @@ +#!/bin/sh + +username=pwpush_server + +# application +sudo docker compose -f /home/${username}/${username}-compose.yaml down -v + +uid_num=$(id -u $username) +sudo killall -9 -v -g -u $username +sudo crontab -r -u $username +sudo deluser --remove-all-files $username + +# clean-up +sudo find / -user "$uid_num" -delete diff --git a/recipes.knravish.me.conf b/recipes.knravish.me.conf new file mode 100644 index 0000000..a31b8d5 --- /dev/null +++ b/recipes.knravish.me.conf @@ -0,0 +1,18 @@ +server { + server_name recipes.knravish.me; + index index.html index.htm; + + location / { + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_set_header X-Forwarded-Proto $scheme; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header Host $http_host; + proxy_pass http://127.0.0.1:9925; + proxy_redirect off; + proxy_set_header Access-Control-Allow-Origin *; + proxy_read_timeout 600s; + proxy_send_timeout 600s; + } + + listen 80; +} diff --git a/shlink_server-compose.yaml b/shlink_server-compose.yaml new file mode 100644 index 0000000..a620354 --- /dev/null +++ b/shlink_server-compose.yaml @@ -0,0 +1,13 @@ +--- +services: + shlink: + image: ghcr.io/shlinkio/shlink:stable + container_name: my_shlink + pull_policy: always + restart: unless-stopped + ports: + - 127.0.0.1:8080:8080 + environment: + DEFAULT_DOMAIN: lnk.knravish.me + IS_HTTPS_ENABLED: true + DISABLE_TRACKING: true diff --git a/shlink_server-setup b/shlink_server-setup new file mode 100644 index 0000000..274df17 --- /dev/null +++ b/shlink_server-setup @@ -0,0 +1,20 @@ +#!/bin/bash + +echo -e "\n[+] setting up shlink\n\n-------\n" + +sudo docker compose -f "${HOME}"/"${USER}"-compose.yaml up -d + +declare -A codes=( + ["in"]="https://linkedin.com/in/kaushik-ravishankar" + ["github"]="https://github.com/20kaushik02" + ["folio"]="https://knravish.me" + ["k23"]="https://k23.kurukshetraceg.org.in" +) + +# give it some time to start +sleep 1 + +for shortcode in "${!codes[@]}"; do + echo "$shortcode - ${codes[$shortcode]}" + sudo docker exec -it my_shlink shlink short-url:create -c "$shortcode" -rnf "${codes[$shortcode]}" +done diff --git a/shlink_server-teardown b/shlink_server-teardown new file mode 100644 index 0000000..36de4b4 --- /dev/null +++ b/shlink_server-teardown @@ -0,0 +1,14 @@ +#!/bin/bash + +username=shlink_server + +# application +sudo docker compose -f /home/${username}/${username}-compose.yaml down -v + +uid_num=$(id -u $username) +sudo killall -9 -v -g -u $username +sudo crontab -r -u $username +sudo deluser --remove-all-files $username + +# clean-up +sudo find / -user "$uid_num" -delete diff --git a/spotmgr_server-backup b/spotmgr_server-backup new file mode 100644 index 0000000..836a596 --- /dev/null +++ b/spotmgr_server-backup @@ -0,0 +1,42 @@ +#!/bin/bash + +# shellcheck source=spotmgr_server-env +. "${HOME}"/"${USER}"-env + +mkdir -p "${HOME}"/backup_logs +logFile=${HOME}/backup_logs/$(date +%y_%m).log + +{ + echo -e "\n[+] spotify-manager backup\n" + + mkdir -p /tmp/"${USER}"-backup + + sudo docker compose -f "${HOME}"/"${USER}"-compose.yaml stop + + sudo docker compose -f "${HOME}"/"${USER}"-compose.yaml start postgres + # shellcheck disable=SC2024 + sudo docker exec -u "${PUID}:${PGID}" -it spotify-manager-postgres sh -c \ + 'pg_dumpall -c --if-exists -U postgres' >/tmp/"${USER}"-backup/db.out + + sudo docker compose -f "${HOME}"/"${USER}"-compose.yaml start + + if ! rclone copy /tmp/"${USER}"-backup "${BUCKET_PATH}" -v; then + curl -Ss \ + -H "Title: Spotify Manager" \ + -H "Priority: 3" \ + -H "Tags: warning,backup" \ + -d "Backup not completed" \ + "${NOTIF_URL}" + rm -rf /tmp/"${USER}"-backup + exit 1 + fi + + curl -Ss \ + -H "Title: Spotify Manager" \ + -H "Priority: 2" \ + -H "Tags: heavy_check_mark,backup" \ + -d "Backup completed" \ + "${NOTIF_URL}" + rm -rf /tmp/"${USER}"-backup + +} &>>"$logFile" diff --git a/spotmgr_server-compose_template.yaml b/spotmgr_server-compose_template.yaml new file mode 100644 index 0000000..6b8a24c --- /dev/null +++ b/spotmgr_server-compose_template.yaml @@ -0,0 +1,56 @@ +--- +services: + postgres: + container_name: spotify-manager-postgres + image: postgres + restart: on-failure + environment: + POSTGRES_PASSWORD: ${POSTGRES_PASSWORD} + POSTGRES_DB: spotify-manager + volumes: + - ${VOLUME_PATH}/pgdata:/var/lib/postgresql/data + - /etc/passwd:/etc/passwd:ro + user: ${PUID}:${PGID} + healthcheck: + test: ['CMD-SHELL', 'psql -U postgres -d spotify-manager -c "select version();"'] + interval: 1s + retries: 5 + timeout: 5s + redis: + container_name: spotify-manager-redis + image: redis + restart: on-failure + volumes: + - ${VOLUME_PATH}/redisdata:/data + user: ${PUID}:${PGID} + healthcheck: + test: ['CMD-SHELL', 'redis-cli ping | grep PONG'] + interval: 1s + retries: 5 + timeout: 3s + api: + container_name: spotify-manager-api + image: kaushikr2/spotify-manager-api + init: true + restart: on-failure + ports: + - 127.0.0.1:9001:9001 + depends_on: + postgres: + condition: service_healthy + restart: true + redis: + condition: service_healthy + restart: true + environment: + NODE_ENV: production + SPOTMGR_PORT: 9001 + SPOTMGR_CLIENT_ID: ${SPOTMGR_CLIENT_ID} + SPOTMGR_CLIENT_SECRET: ${SPOTMGR_CLIENT_SECRET} + SPOTMGR_SESSION_SECRET: ${SPOTMGR_SESSION_SECRET} + SPOTMGR_TRUST_PROXY: 1 + SPOTMGR_BASE_DOMAIN: 'spotify-manager.knravish.me' + SPOTMGR_REDIRECT_URI: 'https://api.spotify-manager.knravish.me/api/auth/callback' + SPOTMGR_APP_URI: 'https://spotify-manager.knravish.me/' + SPOTMGR_DB_URI: 'postgres://postgres:${POSTGRES_PASSWORD}@postgres:5432/spotify-manager' + SPOTMGR_REDIS_URI: redis://redis:6379 diff --git a/spotmgr_server-cronjob b/spotmgr_server-cronjob new file mode 100644 index 0000000..bb1d96b --- /dev/null +++ b/spotmgr_server-cronjob @@ -0,0 +1 @@ +9 10 * * * /home/spotmgr_server/spotmgr_server-backup diff --git a/spotmgr_server-setup b/spotmgr_server-setup new file mode 100644 index 0000000..8860e53 --- /dev/null +++ b/spotmgr_server-setup @@ -0,0 +1,22 @@ +#!/bin/bash + +echo -e "\n[+] setting up spotify-manager\n\n-------\n" + +# shellcheck source=spotmgr_server-env +. "${HOME}"/"${USER}"-env + +mkdir -p "${VOLUME_PATH}"/{pg,redis}data +envsubst <"${HOME}"/"${USER}"-compose_template.yaml >"${HOME}"/"${USER}"-compose.yaml + +sudo docker compose -f "${HOME}"/"${USER}"-compose.yaml up -d + +echo "[+] restoring database from backup..." + +sudo docker compose -f "${HOME}"/"${USER}"-compose.yaml stop +sudo docker compose -f "${HOME}"/"${USER}"-compose.yaml start postgres + +rclone copy "${BUCKET_PATH}" "${HOME}" -v +cat db.out | sudo docker exec -i spotify-manager-postgres psql -U postgres -X + +echo "[+] restarting..." +sudo docker compose -f "${HOME}"/"${USER}"-compose.yaml start diff --git a/stirling_server-compose_template.yaml b/stirling_server-compose_template.yaml new file mode 100644 index 0000000..9c4e569 --- /dev/null +++ b/stirling_server-compose_template.yaml @@ -0,0 +1,36 @@ +--- +services: + stirling: + image: frooodle/s-pdf:latest + container_name: stirling-pdf + pull_policy: always + restart: unless-stopped + ports: + - 127.0.0.1:${PORT}:8080 + volumes: + - type: bind + source: ${VOLUME_PATH}/trainingData + target: /usr/share/tessdata + bind: + create_host_path: true + - type: bind + source: ${VOLUME_PATH}/extraConfigs + target: /configs + bind: + create_host_path: true + - type: bind + source: ${VOLUME_PATH}/logs + target: /logs + bind: + create_host_path: true + environment: + PUID: ${PUID} + PGID: ${PGID} + DOCKER_ENABLE_SECURITY: true + SECURITY_ENABLE_LOGIN: true + SECURITY_INITIALLOGIN_USERNAME: ${INITIAL_USERNAME} + SECURITY_INITIALLOGIN_PASSWORD: ${INITIAL_PASSWORD} + SECURITY_CSRFDISABLED: false + SYSTEM_SHOWUPDATEONLYADMIN: true + INSTALL_BOOK_AND_ADVANCED_HTML_OPS: false + LANGS: en_US diff --git a/stirling_server-cronjob b/stirling_server-cronjob new file mode 100644 index 0000000..b560763 --- /dev/null +++ b/stirling_server-cronjob @@ -0,0 +1 @@ +10 11 * * 2 /home/stirling_server/stirling_server-update diff --git a/stirling_server-setup b/stirling_server-setup new file mode 100644 index 0000000..69d7bb8 --- /dev/null +++ b/stirling_server-setup @@ -0,0 +1,11 @@ +#!/bin/bash + +echo -e "\n[+] setting up stirling-pdf\n\n-------\n" + +# shellcheck source=stirling_server-env +. "${HOME}"/"${USER}"-env + +mkdir -p "${VOLUME_PATH}" +envsubst <"${HOME}"/"${USER}"-compose_template.yaml >"${HOME}"/"${USER}"-compose.yaml + +sudo docker compose -f "${HOME}"/"${USER}"-compose.yaml up -d diff --git a/stirling_server-teardown b/stirling_server-teardown new file mode 100644 index 0000000..6d4d56e --- /dev/null +++ b/stirling_server-teardown @@ -0,0 +1,14 @@ +#!/bin/bash + +username=stirling_server + +# application +sudo docker compose -f /home/${username}/${username}-compose.yaml down -v + +uid_num=$(id -u $username) +sudo killall -9 -v -g -u $username +sudo crontab -r -u $username +sudo deluser --remove-all-files $username + +# clean-up +sudo find / -user "$uid_num" -delete diff --git a/stirling_server-update b/stirling_server-update new file mode 100644 index 0000000..220a290 --- /dev/null +++ b/stirling_server-update @@ -0,0 +1,11 @@ +#!/bin/bash + +mkdir -p "${HOME}"/update_logs +logFile=${HOME}/update_logs/$(date +%y_%m).log +{ + echo -e "\n[+] updating stirling-pdf\n" + + sudo docker compose -f "${HOME}"/"${USER}"-compose.yaml pull && + sudo docker compose -f "${HOME}"/"${USER}"-compose.yaml up -d --always-recreate-deps --remove-orphans && + yes | sudo docker image prune -af +} &>>"$logFile" diff --git a/syncthing.knravish.me.conf b/syncthing.knravish.me.conf new file mode 100644 index 0000000..91f12dd --- /dev/null +++ b/syncthing.knravish.me.conf @@ -0,0 +1,16 @@ +server { + server_name syncthing.knravish.me; + index index.html index.htm; + + include /etc/nginx/snippets/authelia-location.conf; + + set $upstream http://127.0.0.1:8384; + + location / { + include /etc/nginx/snippets/proxy.conf; + include /etc/nginx/snippets/authelia-authrequest.conf; + proxy_pass $upstream; + } + + listen 80; +} diff --git a/syncthing_server-backup b/syncthing_server-backup new file mode 100644 index 0000000..e1ca41e --- /dev/null +++ b/syncthing_server-backup @@ -0,0 +1,39 @@ +#!/bin/bash + +# shellcheck source=syncthing_server-env +. "${HOME}"/"${USER}"-env + +mkdir -p "${HOME}"/backup_logs +logFile=${HOME}/backup_logs/$(date +%y_%m).log + +{ + echo -e "\n[+] syncthing backup\n" + + mkdir -p /tmp/"${USER}"-backup + + syncthing cli operations shutdown + + cp -pr "${CONFIG_PATH}"/* /tmp/"${USER}"-backup + + systemctl --user restart syncthing.service + + if ! rclone copy /tmp/"${USER}"-backup "${BUCKET_PATH}" --exclude ./*.db/** -v; then + curl -Ss \ + -H "Title: Syncthing" \ + -H "Priority: 3" \ + -H "Tags: warning,backup" \ + -d "Backup not completed" \ + "${NOTIF_URL}" + rm -r /tmp/"${USER}"-backup + exit 1 + fi + + curl -Ss \ + -H "Title: Syncthing" \ + -H "Priority: 2" \ + -H "Tags: heavy_check_mark,backup" \ + -d "Backup completed" \ + "${NOTIF_URL}" + rm -r /tmp/"${USER}"-backup + +} &>>"$logFile" diff --git a/syncthing_server-cronjob b/syncthing_server-cronjob new file mode 100644 index 0000000..4f71fac --- /dev/null +++ b/syncthing_server-cronjob @@ -0,0 +1 @@ +11 10 * * * /home/syncthing_server/syncthing_server-backup diff --git a/syncthing_server-setup b/syncthing_server-setup new file mode 100644 index 0000000..ebe3877 --- /dev/null +++ b/syncthing_server-setup @@ -0,0 +1,18 @@ +#!/bin/bash + +# Syncthing starts running and installs user service upon installation (in instance-setup) +echo -e "\n[+] setting up syncthing\n-------\n" + +# shellcheck source=syncthing_server-env +. "${HOME}/${USER}-env" + +echo -e "[+] restoring config from backup..." + +syncthing cli operations shutdown + +rm -rf "${CONFIG_PATH}"/*.db # regenerate db to avoid data loss/errors +rclone copy "${BUCKET_PATH}" "${CONFIG_PATH}" -v + +echo "[+] restarting..." + +systemctl --user restart syncthing.service diff --git a/ubuntu-cronjob b/ubuntu-cronjob new file mode 100644 index 0000000..81d104a --- /dev/null +++ b/ubuntu-cronjob @@ -0,0 +1,2 @@ +USER=ubuntu +0 7 * * 2 /home/ubuntu/ubuntu_auto_apt_upgrade diff --git a/ubuntu_auto_apt_upgrade b/ubuntu_auto_apt_upgrade new file mode 100644 index 0000000..7dba9ea --- /dev/null +++ b/ubuntu_auto_apt_upgrade @@ -0,0 +1,26 @@ +#!/bin/bash + +mkdir -p "${HOME}"/upgrade_logs +logFile=${HOME}/upgrade_logs/$(date +%y_%m).log +rebootDelayInMinutes=10 + +{ + echo "[+] $(date -I'seconds')" + echo "[+] Auto apt upgrade starting..." + sudo apt-get update + + sudo apt-get upgrade -y + + if [[ -s /var/run/reboot-required ]]; then + curl -Ss \ + -H "Title: System Reboot scheduled" \ + -H "Priority: 3" \ + -H "Tags: loudspeaker,reboot" \ + -d "Rebooting in $rebootDelayInMinutes minutes. Reason: package updates" \ + "${NOTIF_URL}" + echo "[!] Rebooting in $rebootDelayInMinutes minutes..." + echo 'sudo reboot' | at now + $rebootDelayInMinutes minutes + else + echo "[+] Upgrade complete, no reboot required." + fi +} &>>"$logFile" diff --git a/vikunja_server-backup b/vikunja_server-backup new file mode 100644 index 0000000..5becb93 --- /dev/null +++ b/vikunja_server-backup @@ -0,0 +1,39 @@ +#!/bin/bash + +# shellcheck source=vikunja_server-env +. "${HOME}"/"${USER}"-env + +mkdir -p "${HOME}"/backup_logs +logFile=${HOME}/backup_logs/$(date +%y_%m).log + +{ + echo -e "\n[+] vikunja backup\n" + + mkdir -p /tmp/"${USER}"-backup + + sudo docker compose -f "${HOME}"/"${USER}"-compose.yaml stop + + cp -pr "${VOLUME_PATH}"/* /tmp/"${USER}"-backup + + sudo docker compose -f "${HOME}"/"${USER}"-compose.yaml start + + if ! rclone copy /tmp/"${USER}"-backup "${BUCKET_PATH}" -v; then + curl -Ss \ + -H "Title: Vikunja" \ + -H "Priority: 3" \ + -H "Tags: warning,backup" \ + -d "Backup not completed" \ + "${NOTIF_URL}" + rm -r /tmp/"${USER}"-backup + exit 1 + fi + + curl -Ss \ + -H "Title: Vikunja" \ + -H "Priority: 2" \ + -H "Tags: heavy_check_mark,backup" \ + -d "Backup completed" \ + "${NOTIF_URL}" + rm -r /tmp/"${USER}"-backup + +} &>>"$logFile" diff --git a/vikunja_server-compose_template.yaml b/vikunja_server-compose_template.yaml new file mode 100644 index 0000000..13c4394 --- /dev/null +++ b/vikunja_server-compose_template.yaml @@ -0,0 +1,24 @@ +services: + vikunja: + image: vikunja/vikunja + container_name: vikunja + pull_policy: always + restart: unless-stopped + ports: + - 127.0.0.1:${PORT}:3456 + user: ${PUID}:${PGID} + volumes: + - type: bind + source: ${VOLUME_PATH}/files + target: /app/vikunja/files + bind: + create_host_path: true + - type: bind + source: ${VOLUME_PATH}/db + target: /db + bind: + create_host_path: true + environment: + VIKUNJA_SERVICE_JWTSECRET: ${JWT_SECRET} + VIKUNJA_SERVICE_PUBLICURL: ${BASE_URL} + VIKUNJA_DATABASE_PATH: /db/vikunja.db diff --git a/vikunja_server-cronjob b/vikunja_server-cronjob new file mode 100644 index 0000000..94ca47f --- /dev/null +++ b/vikunja_server-cronjob @@ -0,0 +1,2 @@ +12 10 * * * /home/vikunja_server/vikunja_server-backup +12 11 * * 2 /home/vikunja_server/vikunja_server-update diff --git a/vikunja_server-setup b/vikunja_server-setup new file mode 100644 index 0000000..2f5ccae --- /dev/null +++ b/vikunja_server-setup @@ -0,0 +1,22 @@ +#!/bin/bash + +echo -e "\n[+] setting up vikunja\n\n-------\n" + +# shellcheck source=vikunja_server-env +. "${HOME}"/"${USER}"-env + +mkdir -p "${VOLUME_PATH}"/{files,db} + +envsubst <"${HOME}"/"${USER}"-compose_template.yaml >"${HOME}"/"${USER}"-compose.yaml + +sudo docker compose -f "${HOME}"/"${USER}"-compose.yaml up -d + +echo "[+] restoring from backup..." + +sudo docker compose -f "${HOME}"/"${USER}"-compose.yaml stop + +rm -r "${VOLUME_PATH:?}"/* + +rclone copy "${BUCKET_PATH}" "${VOLUME_PATH}" -v + +sudo docker compose -f "${HOME}"/"${USER}"-compose.yaml start diff --git a/vikunja_server-teardown b/vikunja_server-teardown new file mode 100644 index 0000000..6ff72ab --- /dev/null +++ b/vikunja_server-teardown @@ -0,0 +1,14 @@ +#!/bin/bash + +username=vikunja_server + +# application +sudo docker compose -f /home/${username}/${username}-compose.yaml down -v + +uid_num=$(id -u $username) +sudo killall -9 -v -g -u $username +sudo crontab -r -u $username +sudo deluser --remove-all-files $username + +# clean-up +sudo find / -user "$uid_num" -delete diff --git a/vikunja_server-update b/vikunja_server-update new file mode 100644 index 0000000..be09450 --- /dev/null +++ b/vikunja_server-update @@ -0,0 +1,11 @@ +#!/bin/bash + +mkdir -p "${HOME}"/update_logs +logFile=${HOME}/update_logs/$(date +%y_%m).log +{ + echo -e "\n[+] updating vikunja\n" + + sudo docker compose -f "${HOME}"/"${USER}"-compose.yaml pull && + sudo docker compose -f "${HOME}"/"${USER}"-compose.yaml up -d --always-recreate-deps --remove-orphans && + yes | sudo docker image prune -af +} &>>"$logFile" diff --git a/vpn.knravish.me.conf b/vpn.knravish.me.conf new file mode 100644 index 0000000..6aabfaf --- /dev/null +++ b/vpn.knravish.me.conf @@ -0,0 +1,16 @@ +server { + server_name vpn.knravish.me; + index index.html index.htm; + + include /etc/nginx/snippets/authelia-location.conf; + + set $upstream http://127.0.0.1:51821; + + location / { + include /etc/nginx/snippets/proxy.conf; + include /etc/nginx/snippets/authelia-authrequest.conf; + proxy_pass $upstream; + } + + listen 80; +} diff --git a/vtt.knravish.me.conf b/vtt.knravish.me.conf new file mode 100644 index 0000000..8e0b04a --- /dev/null +++ b/vtt.knravish.me.conf @@ -0,0 +1,16 @@ +server { + server_name vtt.knravish.me; + + include /etc/nginx/snippets/authelia-location.conf; + + set $upstream http://127.0.0.1:30000; + + location / { + include /etc/nginx/snippets/proxy.conf; + include /etc/nginx/snippets/websocket.conf; + include /etc/nginx/snippets/authelia-authrequest.conf; + proxy_pass $upstream; + } + + listen 80; +} diff --git a/wg/Split Tunneling WireGuard.pdf b/wg/Split Tunneling WireGuard.pdf new file mode 100644 index 0000000..a73dba0 Binary files /dev/null and b/wg/Split Tunneling WireGuard.pdf differ diff --git a/wg/all_proxied/PostUp copy.ps1 b/wg/all_proxied/PostUp copy.ps1 new file mode 100644 index 0000000..0211346 --- /dev/null +++ b/wg/all_proxied/PostUp copy.ps1 @@ -0,0 +1,26 @@ +# Wireguard tunnel interface details +$wgInterface = Get-NetAdapter -InterfaceAlias $env:WIREGUARD_TUNNEL_NAME +$wgAddress = (Get-NetIPAddress -InterfaceAlias $env:WIREGUARD_TUNNEL_NAME -AddressFamily IPv4 ).IPAddress + +# add default 0.0.0.0/0 route with low priority +route add 0.0.0.0 mask 0.0.0.0 0.0.0.0 IF $wgInterface.ifIndex metric 999 + +# Set the interface metric for the WireGuard tunnel +Set-NetIPInterface -InterfaceIndex $wgInterface.ifIndex -InterfaceMetric 999 + +# Navigate to the 3proxy directory +Set-Location "\\3proxy-0.9.4-x64\bin64\" +$cfg_file = "3proxy-wireguard.cfg" + +# Create 3proxy configuration file +'auth none' | Out-File -FilePath $cfg_file -Encoding ASCII +'internal 127.0.0.1' | Out-File -FilePath $cfg_file -Append -Encoding ASCII +"external ${wgAddress}" | Out-File -FilePath $cfg_file -Append -Encoding ASCII + +# rest of the proxy configuration +'socks' | Out-File -FilePath $cfg_file -Append -Encoding ASCII +'log "%USERPROFILE%\.logs\3proxy\%Y%m%d.log" D' | Out-File -FilePath $cfg_file -Append -Encoding ASCII +'rotate 30' | Out-File -FilePath $cfg_file -Append -Encoding ASCII + +# Start 3proxy in the background +Start-Process -FilePath '.\3proxy.exe' -ArgumentList $cfg_file -NoNewWindow diff --git a/wg/connections/README.md b/wg/connections/README.md new file mode 100644 index 0000000..134cd41 --- /dev/null +++ b/wg/connections/README.md @@ -0,0 +1 @@ +# Remember to keep your wg confs safe diff --git a/wg_server-backup b/wg_server-backup new file mode 100644 index 0000000..67e1b9c --- /dev/null +++ b/wg_server-backup @@ -0,0 +1,40 @@ +#!/bin/bash + +# shellcheck source=wg_server-env +. "${HOME}"/"${USER}"-env + +mkdir -p "${HOME}"/backup_logs +logFile=${HOME}/backup_logs/$(date +%y_%m).log + +{ + echo -e "\n[+] wg-easy backup\n" + + mkdir -p /tmp/"${USER}"-backup + + sudo docker compose -f "${HOME}"/"${USER}"-compose.yaml stop + + sudo cp -pr "${VOLUME_PATH}"/wg0.json /tmp/"${USER}"-backup + + sudo docker compose -f "${HOME}"/"${USER}"-compose.yaml start + + sudo chown "${USER}":"${USER}" /tmp/"${USER}"-backup/* + if ! rclone copy /tmp/"${USER}"-backup "${BUCKET_PATH}" -v; then + curl -Ss \ + -H "Title: WG-Easy" \ + -H "Priority: 3" \ + -H "Tags: warning,backup" \ + -d "Backup not completed" \ + "${NOTIF_URL}" + rm -rf /tmp/"${USER}"-backup + exit 1 + fi + + curl -Ss \ + -H "Title: WG-Easy" \ + -H "Priority: 2" \ + -H "Tags: heavy_check_mark,backup" \ + -d "Backup completed" \ + "${NOTIF_URL}" + rm -rf /tmp/"${USER}"-backup + +} &>>"$logFile" diff --git a/wg_server-compose_template.yaml b/wg_server-compose_template.yaml new file mode 100644 index 0000000..4589de1 --- /dev/null +++ b/wg_server-compose_template.yaml @@ -0,0 +1,29 @@ +--- +services: + wg-easy: + image: ghcr.io/wg-easy/wg-easy:14 # breaking changes... + container_name: wg-easy + volumes: + - type: bind + source: ${VOLUME_PATH} + target: /etc/wireguard + bind: + create_host_path: true + ports: + - '${UDP_PORT}:51820/udp' + - '127.0.0.1:${GUI_PORT}:51821/tcp' + pull_policy: always + restart: unless-stopped + cap_add: + - NET_ADMIN + - SYS_MODULE + sysctls: + net.ipv4.ip_forward: 1 + net.ipv4.conf.all.src_valid_mark: 1 + environment: + PASSWORD_HASH: ${PASSWORD_HASH} + WG_HOST: ${WG_HOST} + WG_DEVICE: ${WG_DEVICE} # WAN interface + WG_PERSISTENT_KEEPALIVE: 25 + WG_POST_UP: 'iptables -I FORWARD -i wg0 -d 10.0.0.0/8 -j REJECT; iptables -I FORWARD -i wg0 -s 10.8.0.0/24 -d 10.0.0.0/8 -j ACCEPT; iptables -t nat -A POSTROUTING -o eth0 -j MASQUERADE' + WG_POST_DOWN: 'iptables -I FORWARD -D wg0 -d 10.0.0.0/8 -j REJECT; iptables -I FORWARD -D wg0 -s 10.8.0.0/24 -d 10.0.0.0/8 -j ACCEPT; iptables -t nat -D POSTROUTING -o eth0 -j MASQUERADE' diff --git a/wg_server-cronjob b/wg_server-cronjob new file mode 100644 index 0000000..bca8236 --- /dev/null +++ b/wg_server-cronjob @@ -0,0 +1,2 @@ +13 10 * * * /home/wg_server/wg_server-backup +13 11 * * 2 /home/wg_server/wg_server-update diff --git a/wg_server-setup b/wg_server-setup new file mode 100644 index 0000000..d5ee4b6 --- /dev/null +++ b/wg_server-setup @@ -0,0 +1,22 @@ +#!/bin/bash + +echo -e "\n[+] setting up wg-easy\n\n-------\n" + +# shellcheck source=wg_server-env +. "${HOME}"/"${USER}"-env + +envsubst <"${HOME}"/"${USER}"-compose_template.yaml >"${HOME}"/"${USER}"-compose.yaml + +sudo docker compose -f "${HOME}"/"${USER}"-compose.yaml up -d + +echo "[+] restoring configs from backup..." + +sudo docker compose -f "${HOME}"/"${USER}"-compose.yaml stop +sudo rm "${VOLUME_PATH}"/* + +rclone copy "${BUCKET_PATH}" "${HOME}" -v +sudo cp wg0.json "${VOLUME_PATH}"/ + +echo "[+] restarting..." + +sudo docker compose -f "${HOME}"/"${USER}"-compose.yaml start diff --git a/wg_server-teardown b/wg_server-teardown new file mode 100644 index 0000000..2e5d32b --- /dev/null +++ b/wg_server-teardown @@ -0,0 +1,14 @@ +#!/bin/bash + +username=wg_server + +# application +sudo docker compose -f /home/${username}/${username}-compose.yaml down -v + +uid_num=$(id -u $username) +sudo killall -9 -v -g -u $username +sudo crontab -r -u $username +sudo deluser --remove-all-files $username + +# clean-up +sudo find / -user "$uid_num" -delete diff --git a/wg_server-update b/wg_server-update new file mode 100644 index 0000000..9602685 --- /dev/null +++ b/wg_server-update @@ -0,0 +1,11 @@ +#!/bin/bash + +mkdir -p "${HOME}"/update_logs +logFile=${HOME}/update_logs/$(date +%y_%m).log +{ + echo -e "\n[+] updating wg-easy\n" + + sudo docker compose -f "${HOME}"/"${USER}"-compose.yaml pull && + sudo docker compose -f "${HOME}"/"${USER}"-compose.yaml up -d --always-recreate-deps --remove-orphans && + yes | sudo docker image prune -af +} &>>"$logFile" diff --git a/windows copy.md b/windows copy.md new file mode 100644 index 0000000..58da7c1 --- /dev/null +++ b/windows copy.md @@ -0,0 +1,223 @@ +# Windows machine stuff + +## Windows SSH server setup + +- make sure openssh server optional feature is enabled + +```powershell +powershell.exe "Get-WindowsCapability -Online | ? Name -like 'OpenSSH.Server*'" +``` + +- configuration, firewall rule, ssh-agent + +```powershell +# Set the sshd service to be started automatically +Get-Service -Name sshd | Set-Service -StartupType Automatic + +# Now start the sshd service +Start-Service sshd + +# Configure port if needed +New-NetFirewallRule -Name sshd -DisplayName 'OpenSSH Server (sshd)' -Enabled True -Direction Inbound -Protocol TCP -Action Allow -LocalPort 22 + +# Generate SSH keypair +ssh-keygen.exe -t ed25519 + +# Not sure if this ssh-agent stuff is needed but ok + +# By default the ssh-agent service is disabled. Configure it to start automatically. +# Make sure you're running as an Administrator. +Get-Service ssh-agent | Set-Service -StartupType Automatic + +# Start the service +Start-Service ssh-agent + +# This should return a status of Running +Get-Service ssh-agent + +# Now load your key files into ssh-agent +ssh-add $env:USERPROFILE\.ssh\id_ed25519 + +# Main part + +# Get the public key file generated previously on your client +$authorizedKey = Get-Content -Path $env:USERPROFILE\.ssh\id_ed25519.pub + +# Generate the PowerShell to be run remote that will copy the public key file generated previously on your client to the authorized_keys file on your server +$remotePowershell = "powershell New-Item -Force -ItemType Directory -Path $env:USERPROFILE\.ssh; Add-Content -Force -Path $env:USERPROFILE\.ssh\authorized_keys -Value '$authorizedKey'" + +# Connect to your server and run the PowerShell using the $remotePowerShell variable +ssh "$(whoami)@localhost" $remotePowershell +``` + +- edit `%PROGRAMDATA%/ssh/sshd_config` as administrator + +```ssh-config +PermitRootLogin no +MaxAuthTries 1 + +PubkeyAuthentication yes + +PasswordAuthentication no + +#Match Group administrators +# AuthorizedKeysFile __PROGRAMDATA__/ssh/administrators_authorized_keys +``` + +## WSL + +### prevent shutdown + +- [see here](https://github.com/microsoft/WSL/issues/8854#issuecomment-1490454734) +- the service file + +```systemd +[Unit] +Description=Keep Distro Alive + +[Service] +# cleanup if already waiting +# get waitfor path by `which waitfor` +ExecStartPre=/mnt/c/Windows/system32/waitfor.exe /si MakeDistroAlive +ExecStart=/mnt/c/Windows/system32/waitfor.exe MakeDistroAlive + +[Install] +WantedBy=multi-user.target +``` + +### networking mode - mirrored + +- [see here](https://superuser.com/a/1671057) +- create/add to `%USERPROFILE%/.wslconfig`: + +```ini +[wsl2] +networkingMode=mirrored +``` + +### get Wireguard interface + +- interface should exist, else it'll be blank + +```bash +wg_if=$(ip -4 -brief addr show | grep $wg_if_addr | awk '{printf "%s",$1;}') +curl -v --interface $wg_if +``` + +## 3proxy (native port for windows) + +- download `.zip` from [github releases (last checked ver - 0.9.4)](https://github.com/3proxy/3proxy/releases) and extract +- create a `.cfg` file: + +```ini +system "echo '3proxy up!'" + +config "\\3proxy-0.9.4-x64\bin64\3proxy.cfg" +monitor "\\3proxy-0.9.4-x64\bin64\3proxy.cfg" + +log "%USERPROFILE%\.logs\3proxy\%Y%m%d.log" D # the D at the end is important + +rotate 30 + +external 10.8.0.2 +internal 127.0.0.1 + +service + +auth none +socks +``` + +## The Wireguard split-tunnel problem + +- i wish to route certain _applications_: not IP address ranges, but programs, over a wireguard tunnel +- in particular, i wish to do this for traffic originating from my wireguard peer running on my local Windows machine, and the tunnel in question connects to a cloud VPS running Wireguard +- i believe this is called 'application-based split tunneling' + +- my understanding is that this is something that wireguard's Windows client does not support out of the box +- however, if my assumption is correct, i have an alternative approach in mind: + - first, whenever the wireguard interface gets created on my machine, it would also create the corresponding routes for the tunnel, and these routes have a low metric value + - here, i would set the metric value of the new route(s) to a value higher than the default route that routes most/all the traffic, thereby deprioritizing the tunnel + - automate this part to update the metric on interface creation/teardown + - this would effectively leave the tunnel active but unused + - then, i would create a SOCKS proxy on my local machine, to localhost itself + - finally, i would bind any application that would use the wireguard tunnel to this proxy, through the application's settings itself if it provides such functionality, or through third-party applications, such as Proxifier + +### Disable automatic route creation + +- in the [Interface] section of your tunnel config, add `Table = off` + - this informs WireGuard not to create a default route automatically +- note that this also disables blocking of untunneled traffic (kill-switch functionality), which is what we want in order to achieve split-tunneling +- this is a must. direct route manipulation through scripting is not permitted, if the kill-switch functionality is active. it will simply drop traffic (IIRC) + +### Enable Wireguard scripts + +- [scripts are not enabled in Wireguard Windows by default](https://github.com/WireGuard/wireguard-windows/blob/master/docs/adminregistry.md) + - go to `Computer\HKEY_LOCAL_MACHINE\SOFTWARE` + - right-click `SOFTWARE` in the navigation pane, click `New -> Key`, name it `WireGuard` + - create a new `DWORD (32-bit) Value` in the new created key named `DangerousScriptExecution` + - set its value to `1` +- now you can add `PreUp`, `PostUp`, `PreDown`, `PostDown` scripts + +#### PostUp script + +- since we disabled automatic default route(s) addition to table, we have to add the necessary routes + - modify routes according to allowedIPs + - given case is 0.0.0.0/0 + - for other cases, see what routes WireGuard generates on its own normally, and add the missing route(s) from those + - other routes get added automatically + +```powershell +'postup start' | Out-File -FilePath "${PSScriptRoot}\PostUp.log" + +# Wireguard tunnel interface details +$wgInterface = Get-NetAdapter -InterfaceAlias $env:WIREGUARD_TUNNEL_NAME +$wgAddress = (Get-NetIPAddress -InterfaceAlias $env:WIREGUARD_TUNNEL_NAME -AddressFamily IPv4 ).IPAddress + +# add default 0.0.0.0/0 route with low priority +route add 0.0.0.0 mask 0.0.0.0 0.0.0.0 IF $wgInterface.ifIndex metric 999 + +# Set the interface metric for the WireGuard tunnel +Set-NetIPInterface -InterfaceIndex $wgInterface.ifIndex -InterfaceMetric 999 + +# Navigate to the 3proxy directory +Set-Location "\\3proxy-0.9.4-x64\bin64\" +$cfg_file = "3proxy-wireguard.cfg" + +# Create 3proxy configuration file +'auth none' | Out-File -FilePath $cfg_file -Encoding ASCII +'internal 127.0.0.1' | Out-File -FilePath $cfg_file -Append -Encoding ASCII +"external ${wgAddress}" | Out-File -FilePath $cfg_file -Append -Encoding ASCII + +# rest of the proxy configuration +'socks' | Out-File -FilePath $cfg_file -Append -Encoding ASCII +'log "%USERPROFILE%\.logs\3proxy\%Y%m%d.log" D' | Out-File -FilePath $cfg_file -Append -Encoding ASCII +'rotate 30' | Out-File -FilePath $cfg_file -Append -Encoding ASCII + +# Start 3proxy in the background +Start-Process -FilePath '.\3proxy.exe' -ArgumentList $cfg_file -NoNewWindow + +'postup end' | Out-File -FilePath "${PSScriptRoot}\PostUp.log" -Append + +``` + +#### PreDown script + +- make sure to specify all routes created in the `PostUp` script + +```powershell +'predown start' | Out-File -FilePath "${PSScriptRoot}\PreDown.log" + +# WireGuard tunnel details +$wgInterface = Get-NetAdapter -Name $env:WIREGUARD_TUNNEL_NAME + +# Delete the default 0.0.0.0/0 route using the interface index +route delete 0.0.0.0 mask 0.0.0.0 0.0.0.0 if $wgInterface.ifIndex + +# Terminate any running instances of 3proxy.exe +Set-Location "\\3proxy-0.9.4-x64\bin64\" +Stop-Process -Name "3proxy.exe" -Force + +'predown end' | Out-File -FilePath "${PSScriptRoot}\PreDown.log" -Append + +```