Added folders as well...

This commit is contained in:
NetShade 2024-07-04 13:15:34 +02:00
commit f72d79958d
62 changed files with 3248 additions and 0 deletions

1
README.md Normal file
View File

@ -0,0 +1 @@
*README.md*

View File

@ -0,0 +1,30 @@
version: '3'
services:
actual_server:
image: docker.io/actualbudget/actual-server:latest
container_name: actual
network_mode: bridge
ports:
# This line makes Actual available at port 5006 of the device you run the server on,
# i.e. http://localhost:5006. You can change the first number to change the port, if you want.
- '5006:5006'
environment:
# Uncomment any of the lines below to set configuration options.
#- ACTUAL_HTTPS_KEY=/data/selfhost.key
#- ACTUAL_HTTPS_CERT=/data/selfhost.crt
- ACTUAL_UPLOAD_FILE_SYNC_SIZE_LIMIT_MB=20
- ACTUAL_UPLOAD_SYNC_ENCRYPTED_FILE_SYNC_SIZE_LIMIT_MB=50
- ACTUAL_UPLOAD_FILE_SIZE_LIMIT_MB=20
# See all options and more details at https://actualbudget.github.io/docs/Installing/Configuration
# !! If you are not using any of these options, remove the 'environment:' tag entirely.
volumes:
# Change './actual-data' below to the path to the folder you want Actual to store its data in on your server.
# '/data' is the path Actual will look for its files in by default, so leave that as-is.
- actual-data:/data
#- /etc/letsencrypt/live/niefelheim.com/fullchain.pem:/data/selfhost.crt:ro
#- /etc/letsencrypt/live/niefelheim.com/privkey.pem:/data/selfhost.key:ro
restart: unless-stopped
volumes:
actual-data:
external: true
name: actual-data

View File

@ -0,0 +1,139 @@
version: "3.7"
services:
bazarr:
container_name: bazarr
image: ghcr.io/hotio/bazarr:latest
network_mode: bridge
restart: unless-stopped
logging:
driver: json-file
ports:
- "6767:6767"
environment:
- PUID=992
- PGID=979
- UMASK=002
- TZ=Europe/Stockholm
volumes:
- "bazarr:/config"
- "/etc/localtime:/etc/localtime:ro"
- "/data/media/movies:/media/movies"
- "/data/media/tv:/media/tv"
- "/data/media/trash/tv:/media/trash"
- "/data/backup/arr/bazarr:/config/backup"
- "/var/cloud/transmission/downloads:/data/torrents"
lidarr:
container_name: lidarr
image: ghcr.io/hotio/lidarr:latest
network_mode: bridge
restart: unless-stopped
logging:
driver: json-file
ports:
- "8686:8686"
environment:
- PUID=993
- PGID=979
- UMASK=002
- TZ=Europe/Stockholm
volumes:
- "lidarr:/config"
- "/etc/localtime:/etc/localtime:ro"
- "/data/media/music:/media/music"
- "/data/media/trash/music:/media/trash"
- "/data/backup/arr/lidarr:/config/backup"
- "/var/cloud/transmission/downloads:/data/torrents"
prowlarr:
container_name: prowlarr
image: ghcr.io/hotio/prowlarr:latest
network_mode: bridge
restart: unless-stopped
logging:
driver: json-file
ports:
- "9696:9696"
environment:
- PUID=988
- PGID=979
- UMASK=002
- TZ=Europe/Stockholm
volumes:
- "prowlarr:/config"
- "/etc/localtime:/etc/localtime:ro"
- "/data/backup/arr/prowlarr:/config/backup"
radarr:
container_name: radarr
image: ghcr.io/hotio/radarr:latest
network_mode: bridge
restart: unless-stopped
logging:
driver: json-file
ports:
- "7878:7878"
environment:
- PUID=990
- PGID=979
- UMASK=002
- TZ=Europe/Stockholm
volumes:
- "radarr:/config"
- "/etc/localtime:/etc/localtime:ro"
- "/var/cloud/transmission/downloads:/data/torrents"
- "/data/media/movies:/media/movies"
- "/data/backup/arr/radarr:/config/backup"
- "/data/media/trash/movies:/media/trash"
readarr:
container_name: readarr
image: ghcr.io/hotio/readarr:latest
network_mode: bridge
restart: unless-stopped
logging:
driver: json-file
ports:
- "8787:8787"
environment:
- PUID=991
- PGID=979
- UMASK=002
- TZ=Europe/Stockholm
volumes:
- "readarr:/config"
- "/etc/localtime:/etc/localtime:ro"
- "/var/cloud/transmission/downloads:/data/torrents"
- "/data/media/books:/media/books"
- "/data/media/trash/books:/media/trash"
- "/data/backup/arr/readarr:/config/backup"
sonarr:
container_name: sonarr
image: ghcr.io/hotio/sonarr:latest
network_mode: bridge
restart: unless-stopped
logging:
driver: json-file
ports:
- "8989:8989"
environment:
- PUID=989
- PGID=979
- UMASK=002
- TZ=Europe/Stockholm
volumes:
- "sonarr:/config"
- "/etc/localtime:/etc/localtime:ro"
- "/data/media/tv:/media/tv"
- "/var/cloud/transmission/downloads:/data/torrents"
- "/data/media/trash/tv:/media/trash"
- "/data/backup/arr/sonarr:/config/backup"
volumes:
bazarr:
driver: local
lidarr:
driver: local
prowlarr:
driver: local
radarr:
driver: local
readarr:
driver: local
sonarr:
driver: local

1
compose/bazarr-up.sh Executable file
View File

@ -0,0 +1 @@
COMPOSE_HTTP_TIMEOUT=120 docker-compose --verbose --log-level info up -d bazarr

View File

@ -0,0 +1,26 @@
version: "3.7"
services:
calibre:
image: lscr.io/linuxserver/calibre:latest
container_name: calibre
network_mode: bridge
ports:
- 8080:8080
- 8181:8181
- 8081:8081
security_opt:
- seccomp:unconfined #optional
environment:
- PUID=987
- PGID=979
- TZ=Europe/Stockholm
- CUSTOM_USER=netshade
- LC_ALL=sv_SE.UTF-8
- PASSWORD=Januari20 #optional
- CLI_ARGS= #optional
volumes:
- "calibre:/config"
- "/var/cloud/Media/Books:/media/books"
volumes:
calibre:
driver: local

View File

@ -0,0 +1,50 @@
version: "3.7"
services:
bazarr:
container_name: bazarr
image: cr.hotio.dev/hotio/bazarr
network_mode: "host"
environment:
- PUID=117
- PGID=979
- UMASK=002
- TZ=Etc/UTC
volumes:
- /opt/docker/arr/bazarr/config:/config
- /var/cloud/Media:/var/cloud/Media
lidarr:
container_name: lidarr
image: cr.hotio.dev/hotio/lidarr
network_mode: "host"
environment:
- PUID=117
- PGID=979
- UMASK=002
- TZ=Etc/UTC
volumes:
- /opt/docker/arr/lidarr/config:/config
- /var/cloud/Media/Mp3:/var/cloud/Media/Mp3
- /var/cloud/transmission/downloads:/var/cloud/transmission/downloads
prowlarr:
container_name: prowlarr
image: cr.hotio.dev/hotio/prowlarr:testing
network_mode: "host"
environment:
- PUID=117
- PGID=979
- UMASK=002
- TZ=Etc/UTC
volumes:
- /opt/docker/arr/prowlarr/config:/config
prowlarr:
container_name: prowlarr
image: cr.hotio.dev/hotio/prowlarr:testing
network_mode: "host"
environment:
- PUID=117
- PGID=979
- UMASK=002
- TZ=Etc/UTC
volumes:
- /opt/docker/arr/prowlarr/config:/config

View File

@ -0,0 +1,93 @@
version: "3.7"
services:
bazarr:
container_name: bazarr
image: cr.hotio.dev/hotio/bazarr
ports:
- "6767:6767"
environment:
- PUID=117
- PGID=979
- UMASK=002
- TZ=Etc/UTC
volumes:
- /opt/bazarr/config:/config
- /var/cloud/Meida:/var/cloud/Media
lidarr:
container_name: lidarr
image: cr.hotio.dev/hotio/lidarr
ports:
- "8686:8686"
environment:
- PUID=117
- PGID=979
- UMASK=002
- TZ=Etc/UTC
volumes:
- /opt/lidarr/config:/config
- /var/cloud/Media/Mp3:/var/cloud/Media/Mp3
prowlarr:
container_name: prowlarr
image: cr.hotio.dev/hotio/prowlarr:testing
ports:
- "9696:9696"
environment:
- PUID=117
- PGID=979
- UMASK=002
- TZ=Etc/UTC
volumes:
- /opt/prowlarr/config:/config
readarr:
container_name: readarr
image: cr.hotio.dev/hotio/readarr:nightly
ports:
- "8787:8787"
environment:
- PUID=117
- PGID=979
- UMASK=002
- TZ=Etc/UTC
volumes:
- /opt/readarr/config:/config
- /var/cloud/Media:/media
radarr:
container_name: radarr
image: cr.hotio.dev/hotio/radarr
ports:
- "7878:7878"
environment:
- PUID=117
- PGID=979
- UMASK=002
- TZ=Etc/UTC
volumes:
- /opt/radarr/config:/config
- /var/cloud/Media:/media
sonarr:
container_name: sonarr
image: cr.hotio.dev/hotio/sonarr
ports:
- "8989:8989"
environment:
- PUID=117
- PGID=979
- UMASK=002
- TZ=Etc/UTC
volumes:
- /opt/sonarr/config:/config
- /var/cloud/Media/Serier:/var/cloud/Media/Serier
jellyfin:
container_name: jellyfin
image: cr.hotio.dev/hotio/jellyfin
ports:
- "8096:8096"
environment:
- PUID=117
- PGID=979
- UMASK=002
- TZ=Etc/UTC
volumes:
- /opt/jellyfin/config:/config
- /var/cloud/Media:/media

View File

@ -0,0 +1,38 @@
version: "3.7"
services:
ferdi-server:
image: getferdi/ferdi-server
container_name: ferdi-server
network_mode: "bridge"
environment:
- NODE_ENV=development
- APP_URL=localhost
- DB_CONNECTION=mysql
- DB_HOST=127.0.0.1
- DB_PORT=3306
- DB_USER=ferdi
- DB_PASSWORD=WUfkwibeLCBikho7
- DB_DATABASE=ferdi
- DB_SSL=false
- MAIL_CONNECTION=smtp
- SMTP_HOST=127.0.0.1
- SMTP_PORT=2525
- MAIL_SSL=false
- MAIL_USERNAME=ferdi
- MAIL_PASSWORD=yncMdnlA4nopNkzkqXO62fa0ry0=
- MAIL_SENDER=ferdi@niefelheim.com
- IS_CREATION_ENABLED=true
- IS_DASHBOARD_ENABLED=true
- IS_REGISTRATION_ENABLED=true
- CONNECT_WITH_FRANZ=true
- DATA_DIR=/data
- TZ=Etc/UTC
- PUID=117
- PGID=979
volumes:
- /opt/ferdi/data:/data
- /opt/ferdi/recipes:/app/recipes
ports:
- 3333:3333
restart: unless-stopped

View File

@ -0,0 +1,128 @@
version: "3.7"
services:
bazarr:
container_name: bazarr
image: cr.hotio.dev/hotio/bazarr
ports:
- "6767:6767"
environment:
- PUID=117
- PGID=979
- UMASK=002
- TZ=Etc/UTC
volumes:
- /opt/bazarr/config:/config
- /var/cloud/Media:/var/cloud/Media
lidarr:
container_name: lidarr
image: cr.hotio.dev/hotio/lidarr
ports:
- "8686:8686"
environment:
- PUID=117
- PGID=979
- UMASK=002
- TZ=Etc/UTC
volumes:
- /opt/lidarr/config:/config
- /var/cloud/Media/Mp3:/var/cloud/Media/Mp3
prowlarr:
container_name: prowlarr
image: cr.hotio.dev/hotio/prowlarr:testing
ports:
- "9696:9696"
environment:
- PUID=117
- PGID=979
- UMASK=002
- TZ=Etc/UTC
volumes:
- /opt/prowlarr/config:/config
readarr:
container_name: readarr
image: cr.hotio.dev/hotio/readarr:nightly
ports:
- "8787:8787"
environment:
- PUID=117
- PGID=979
- UMASK=002
- TZ=Etc/UTC
volumes:
- /opt/readarr/config:/config
- /var/cloud/Media:/media
radarr:
container_name: radarr
image: cr.hotio.dev/hotio/radarr
ports:
- "7878:7878"
environment:
- PUID=117
- PGID=979
- UMASK=002
- TZ=Etc/UTC
volumes:
- /opt/radarr/config:/config
- /var/cloud/Media:/media
sonarr:
container_name: sonarr
image: cr.hotio.dev/hotio/sonarr
ports:
- "8989:8989"
environment:
- PUID=117
- PGID=979
- UMASK=002
- TZ=Etc/UTC
volumes:
- /opt/sonarr/config:/config
- /var/cloud/Media/Serier:/var/cloud/Media/Serier
jellyfin:
container_name: jellyfin
image: cr.hotio.dev/hotio/jellyfin
ports:
- "8096:8096"
environment:
- PUID=117
- PGID=979
- UMASK=002
- TZ=Etc/UTC
volumes:
- /opt/jellyfin/config:/config
- /var/cloud/Media:/media
ferdi-server:
image: getferdi/ferdi-server
container_name: ferdi-server
network_mode: "host"
environment:
- NODE_ENV=development
- APP_URL=localhost
- DB_CONNECTION=mysql
- DB_HOST=127.0.0.1
- DB_PORT=3306
- DB_USER=ferdi
- DB_PASSWORD=WUfkwibeLCBikho7
- DB_DATABASE=ferdi
- DB_SSL=false
- MAIL_CONNECTION=smtp
- SMTP_HOST=127.0.0.1
- SMTP_PORT=2525
- MAIL_SSL=false
- MAIL_USERNAME=ferdi
- MAIL_PASSWORD=yncMdnlA4nopNkzkqXO62fa0ry0=
- MAIL_SENDER=ferdi@niefelheim.com
- IS_CREATION_ENABLED=true
- IS_DASHBOARD_ENABLED=true
- IS_REGISTRATION_ENABLED=true
- CONNECT_WITH_FRANZ=true
- DATA_DIR=/data
- TZ=Etc/UTC
- PUID=117
- PGID=979
volumes:
- /opt/ferdi/data:/data
- /opt/ferdi/recipes:/app/recipes
#ports:
# - 3333:3333
restart: unless-stopped

View File

@ -0,0 +1,124 @@
version: "3.7"
services:
bazarr:
container_name: bazarr
image: cr.hotio.dev/hotio/bazarr
network_mode: "host"
environment:
- PUID=117
- PGID=979
- UMASK=002
- TZ=Etc/UTC
volumes:
- /opt/bazarr/config:/config
- /var/cloud/Media:/var/cloud/Media
lidarr:
container_name: lidarr
image: cr.hotio.dev/hotio/lidarr
network_mode: "host"
environment:
- PUID=117
- PGID=979
- UMASK=002
- TZ=Etc/UTC
volumes:
- /opt/lidarr/config:/config
- /var/cloud/Media/Mp3:/var/cloud/Media/Mp3
- /var/cloud/transmission/downloads:/var/cloud/transmission/downloads
prowlarr:
container_name: prowlarr
image: cr.hotio.dev/hotio/prowlarr:testing
network_mode: "host"
environment:
- PUID=117
- PGID=979
- UMASK=002
- TZ=Etc/UTC
volumes:
- /opt/prowlarr/config:/config
readarr:
container_name: readarr
image: cr.hotio.dev/hotio/readarr:nightly
network_mode: "host"
environment:
- PUID=117
- PGID=979
- UMASK=002
- TZ=Etc/UTC
volumes:
- /opt/readarr/config:/config
- /var/cloud/Media:/media
radarr:
container_name: radarr
image: cr.hotio.dev/hotio/radarr
network_mode: "host"
environment:
- PUID=117
- PGID=979
- UMASK=002
- TZ=Etc/UTC
volumes:
- /opt/radarr/config:/config
- /var/cloud/Media:/media
- /var/cloud/transmission/downloads:/var/cloud/transmission/downloads
sonarr:
container_name: sonarr
image: cr.hotio.dev/hotio/sonarr
network_mode: "host"
environment:
- PUID=117
- PGID=979
- UMASK=002
- TZ=Etc/UTC
volumes:
- /opt/sonarr/config:/config
- /var/cloud/Media/Serier:/var/cloud/Media/Serier
jellyfin:
container_name: jellyfin
image: cr.hotio.dev/hotio/jellyfin
ports:
- "8096:8096"
environment:
- PUID=117
- PGID=979
- UMASK=002
- TZ=Etc/UTC
volumes:
- /opt/jellyfin/config:/config
- /var/cloud/Media:/media
ferdi-server:
image: getferdi/ferdi-server
container_name: ferdi-server
network_mode: "host"
environment:
- NODE_ENV=development
- APP_URL=localhost
- DB_CONNECTION=mysql
- DB_HOST=127.0.0.1
- DB_PORT=3306
- DB_USER=ferdi
- DB_PASSWORD=WUfkwibeLCBikho7
- DB_DATABASE=ferdi
- DB_SSL=false
- MAIL_CONNECTION=smtp
- SMTP_HOST=127.0.0.1
- SMTP_PORT=2525
- MAIL_SSL=false
- MAIL_USERNAME=ferdi
- MAIL_PASSWORD=yncMdnlA4nopNkzkqXO62fa0ry0=
- MAIL_SENDER=ferdi@niefelheim.com
- IS_CREATION_ENABLED=true
- IS_DASHBOARD_ENABLED=true
- IS_REGISTRATION_ENABLED=true
- CONNECT_WITH_FRANZ=true
- DATA_DIR=/data
- TZ=Etc/UTC
- PUID=117
- PGID=979
volumes:
- /opt/ferdi/data:/data
- /opt/ferdi/recipes:/app/recipes
#ports:
# - 3333:3333
restart: unless-stopped

1
compose/ferdi-up.sh Executable file
View File

@ -0,0 +1 @@
COMPOSE_HTTP_TIMEOUT=120 docker-compose --verbose --log-level info up -d ferdi-server

4
compose/firefly/.db.env Normal file
View File

@ -0,0 +1,4 @@
MYSQL_RANDOM_ROOT_PASSWORD=yes
MYSQL_USER=firefly
MYSQL_PASSWORD=secret_firefly_password
MYSQL_DATABASE=firefly

336
compose/firefly/.env Normal file
View File

@ -0,0 +1,336 @@
# You can leave this on "local". If you change it to production most console commands will ask for extra confirmation.
# Never set it to "testing".
APP_ENV=production
# Set to true if you want to see debug information in error screens.
APP_DEBUG=false
# This should be your email address.
# If you use Docker or similar, you can set this variable from a file by using SITE_OWNER_FILE
# The variable is used in some errors shown to users who aren't admin.
SITE_OWNER=netshade@niefelheim.com
# The encryption key for your sessions. Keep this very secure.
# Change it to a string of exactly 32 chars or use something like `php artisan key:generate` to generate it.
# If you use Docker or similar, you can set this variable from a file by using APP_KEY_FILE
#
# Avoid the "#" character in your APP_KEY, it may break things.
#
APP_KEY="fgTgcY*H!@sz6w*upr3gL9ZrZ3j^K%T2"
# Firefly III will launch using this language (for new users and unauthenticated visitors)
# For a list of available languages: https://github.com/firefly-iii/firefly-iii/tree/main/resources/lang
#
# If text is still in English, remember that not everything may have been translated.
DEFAULT_LANGUAGE=sv_SE
# The locale defines how numbers are formatted.
# by default this value is the same as whatever the language is.
DEFAULT_LOCALE=equal
# Change this value to your preferred time zone.
# Example: Europe/Amsterdam
# For a list of supported time zones, see https://en.wikipedia.org/wiki/List_of_tz_database_time_zones
TZ=Europe/Stockholm
# TRUSTED_PROXIES is a useful variable when using Docker and/or a reverse proxy.
# Set it to ** and reverse proxies work just fine.
TRUSTED_PROXIES=**
# The log channel defines where your log entries go to.
# Several other options exist. You can use 'single' for one big fat error log (not recommended).
# Also available are 'syslog', 'errorlog' and 'stdout' which will log to the system itself.
# A rotating log option is 'daily', creates 5 files that (surprise) rotate.
# A cool option is 'papertrail' for cloud logging
# Default setting 'stack' will log to 'daily' and to 'stdout' at the same time.
LOG_CHANNEL=stack
# Log level. You can set this from least severe to most severe:
# debug, info, notice, warning, error, critical, alert, emergency
# If you set it to debug your logs will grow large, and fast. If you set it to emergency probably
# nothing will get logged, ever.
APP_LOG_LEVEL=notice
# Audit log level.
# The audit log is used to log notable Firefly III events on a separate channel.
# These log entries may contain sensitive financial information.
# The audit log is disabled by default.
#
# To enable it, set AUDIT_LOG_LEVEL to "info"
# To disable it, set AUDIT_LOG_LEVEL to "emergency"
AUDIT_LOG_LEVEL=emergency
#
# If you want, you can redirect the audit logs to another channel.
# Set 'audit_stdout', 'audit_syslog', 'audit_errorlog' to log to the system itself.
# Use audit_daily to log to a rotating file.
# Use audit_papertrail to log to papertrail.
#
# If you do this, the audit logs may be mixed with normal logs because the settings for these channels
# are often the same as the settings for the normal logs.
AUDIT_LOG_CHANNEL=
#
# Used when logging to papertrail:
# Also used when audit logs log to papertrail:
#
PAPERTRAIL_HOST=
PAPERTRAIL_PORT=
# Database credentials. Make sure the database exists. I recommend a dedicated user for Firefly III
# For other database types, please see the FAQ: https://docs.firefly-iii.org/references/faq/install/#i-want-to-use-sqlite
# If you use Docker or similar, you can set these variables from a file by appending them with _FILE
# Use "pgsql" for PostgreSQL
# Use "mysql" for MySQL and MariaDB.
# Use "sqlite" for SQLite.
DB_CONNECTION=mysql
DB_HOST=192.168.1.125
DB_PORT=3306
DB_DATABASE=firefly
DB_USERNAME=firefly
DB_PASSWORD="^Y7ieT@fNYP5rAahTSEtR2T&w2zbzc5A"
# leave empty or omit when not using a socket connection
DB_SOCKET=/var/run/mysqld/mysqld.sock
# MySQL supports SSL. You can configure it here.
# If you use Docker or similar, you can set these variables from a file by appending them with _FILE
MYSQL_USE_SSL=false
MYSQL_SSL_VERIFY_SERVER_CERT=true
# You need to set at least of these options
MYSQL_SSL_CAPATH=/etc/ssl/certs/
MYSQL_SSL_CA=
MYSQL_SSL_CERT=
MYSQL_SSL_KEY=
MYSQL_SSL_CIPHER=
# PostgreSQL supports SSL. You can configure it here.
# If you use Docker or similar, you can set these variables from a file by appending them with _FILE
PGSQL_SSL_MODE=prefer
PGSQL_SSL_ROOT_CERT=null
PGSQL_SSL_CERT=null
PGSQL_SSL_KEY=null
PGSQL_SSL_CRL_FILE=null
# more PostgreSQL settings
PGSQL_SCHEMA=public
# If you're looking for performance improvements, you could install memcached or redis
CACHE_DRIVER=file
SESSION_DRIVER=file
# If you set either of the options above to 'redis', you might want to update these settings too
# If you use Docker or similar, you can set REDIS_HOST_FILE, REDIS_PASSWORD_FILE or
# REDIS_PORT_FILE to set the value from a file instead of from an environment variable
# can be tcp or unix. http is not supported
REDIS_SCHEME=tcp
# use only when using 'unix' for REDIS_SCHEME. Leave empty otherwise.
REDIS_PATH=
# use only when using 'tcp' or 'http' for REDIS_SCHEME. Leave empty otherwise.
REDIS_HOST=127.0.0.1
REDIS_PORT=6379
# Use only with Redis 6+ with proper ACL set. Leave empty otherwise.
REDIS_USERNAME=
REDIS_PASSWORD=
# always use quotes and make sure redis db "0" and "1" exists. Otherwise change accordingly.
REDIS_DB="0"
REDIS_CACHE_DB="1"
# Cookie settings. Should not be necessary to change these.
# If you use Docker or similar, you can set COOKIE_DOMAIN_FILE to set
# the value from a file instead of from an environment variable
# Setting samesite to "strict" may give you trouble logging in.
COOKIE_PATH="/"
COOKIE_DOMAIN=
COOKIE_SECURE=false
COOKIE_SAMESITE=lax
# If you want Firefly III to email you, update these settings
# For instructions, see: https://docs.firefly-iii.org/how-to/firefly-iii/advanced/notifications/#email
# If you use Docker or similar, you can set these variables from a file by appending them with _FILE
MAIL_MAILER=sendmail
MAIL_HOST=niefelheim.com
MAIL_PORT=25
MAIL_FROM=firefly@niefelheim.com
MAIL_USERNAME=null
MAIL_PASSWORD=null
MAIL_ENCRYPTION=null
MAIL_SENDMAIL_COMMAND=mailx
# Other mail drivers:
# If you use Docker or similar, you can set these variables from a file by appending them with _FILE
MAILGUN_DOMAIN=
MAILGUN_SECRET=
# If you are on EU region in mailgun, use api.eu.mailgun.net, otherwise use api.mailgun.net
# If you use Docker or similar, you can set this variable from a file by appending it with _FILE
MAILGUN_ENDPOINT=api.mailgun.net
# If you use Docker or similar, you can set these variables from a file by appending them with _FILE
MANDRILL_SECRET=
SPARKPOST_SECRET=
# Firefly III can send you the following messages.
SEND_ERROR_MESSAGE=true
# These messages contain (sensitive) transaction information:
SEND_REPORT_JOURNALS=true
# Set this value to true if you want to set the location of certain things, like transactions.
# Since this involves an external service, it's optional and disabled by default.
ENABLE_EXTERNAL_MAP=false
# Set this value to true if you want Firefly III to download currency exchange rates
# from the internet. These rates are hosted by the creator of Firefly III inside
# an Azure Storage Container.
# Not all currencies may be available. Rates may be wrong.
ENABLE_EXTERNAL_RATES=false
# The map will default to this location:
MAP_DEFAULT_LAT=51.983333
MAP_DEFAULT_LONG=5.916667
MAP_DEFAULT_ZOOM=6
#
# Some objects have room for an URL, like transactions and webhooks.
# By default, the following protocols are allowed:
# http, https, ftp, ftps, mailto
#
# To change this, set your preferred comma separated set below.
# Be sure to include http, https and other default ones if you need to.
#
VALID_URL_PROTOCOLS=
#
# Firefly III authentication settings
#
#
# Firefly III supports a few authentication methods:
# - 'web' (default, uses built in DB)
# - 'remote_user_guard' for Authelia etc
# Read more about these settings in the documentation.
# https://docs.firefly-iii.org/how-to/firefly-iii/advanced/authentication/
#
# LDAP is no longer supported :(
#
AUTHENTICATION_GUARD=web
#
# Remote user guard settings
#
AUTHENTICATION_GUARD_HEADER=REMOTE_USER
AUTHENTICATION_GUARD_EMAIL=
#
# Firefly III generates a basic keypair for your OAuth tokens.
# If you want, you can overrule the key with your own (secure) value.
# It's also possible to set PASSPORT_PUBLIC_KEY_FILE or PASSPORT_PRIVATE_KEY_FILE
# if you're using Docker secrets or similar solutions for secret management
#
PASSPORT_PRIVATE_KEY=
PASSPORT_PUBLIC_KEY=
#
# Extra authentication settings
#
CUSTOM_LOGOUT_URL=
# You can disable the X-Frame-Options header if it interferes with tools like
# Organizr. This is at your own risk. Applications running in frames run the risk
# of leaking information to their parent frame.
DISABLE_FRAME_HEADER=false
# You can disable the Content Security Policy header when you're using an ancient browser
# or any version of Microsoft Edge / Internet Explorer (which amounts to the same thing really)
# This leaves you with the risk of not being able to stop XSS bugs should they ever surface.
# This is at your own risk.
DISABLE_CSP_HEADER=false
# If you wish to track your own behavior over Firefly III, set valid analytics tracker information here.
# Nobody uses this except for me on the demo site. But hey, feel free to use this if you want to.
# Do not prepend the TRACKER_URL with http:// or https://
# The only tracker supported is Matomo.
# You can set the following variables from a file by appending them with _FILE:
TRACKER_SITE_ID=
TRACKER_URL=
#
# Firefly III supports webhooks. These are security sensitive and must be enabled manually first.
#
ALLOW_WEBHOOKS=false
#
# The static cron job token can be useful when you use Docker and wish to manage cron jobs.
# 1. Set this token to any 32-character value (this is important!).
# 2. Use this token in the cron URL instead of a user's command line token that you can find in /profile
#
# For more info: https://docs.firefly-iii.org/how-to/firefly-iii/advanced/cron/
#
# You can set this variable from a file by appending it with _FILE
#
STATIC_CRON_TOKEN=entokenlikenannantokenmenutantec
# You can fine tune the start-up of a Docker container by editing these environment variables.
# Use this at your own risk. Disabling certain checks and features may result in lots of inconsistent data.
# However if you know what you're doing you can significantly speed up container start times.
# Set each value to true to enable, or false to disable.
# Set this to true to build all locales supported by Firefly III.
# This may take quite some time (several minutes) and is generally not recommended.
# If you wish to change or alter the list of locales, start your Docker container with
# `docker run -v locale.gen:/etc/locale.gen -e DKR_BUILD_LOCALE=true`
# and make sure your preferred locales are in your own locale.gen.
DKR_BUILD_LOCALE=false
# Check if the SQLite database exists. Can be skipped if you're not using SQLite.
# Won't significantly speed up things.
DKR_CHECK_SQLITE=true
# Run database creation and migration commands. Disable this only if you're 100% sure the DB exists
# and is up to date.
DKR_RUN_MIGRATION=true
# Run database upgrade commands. Disable this only when you're 100% sure your DB is up-to-date
# with the latest fixes (outside of migrations!)
DKR_RUN_UPGRADE=true
# Verify database integrity. Includes all data checks and verifications.
# Disabling this makes Firefly III assume your DB is intact.
DKR_RUN_VERIFY=true
# Run database reporting commands. When disabled, Firefly III won't go over your data to report current state.
# Disabling this should have no impact on data integrity or safety but it won't warn you of possible issues.
DKR_RUN_REPORT=true
# Generate OAuth2 keys.
# When disabled, Firefly III won't attempt to generate OAuth2 Passport keys. This won't be an issue, IFF (if and only if)
# you had previously generated keys already and they're stored in your database for restoration.
DKR_RUN_PASSPORT_INSTALL=true
# Leave the following configuration vars as is.
# Unless you like to tinker and know what you're doing.
APP_NAME=FireflyIII
BROADCAST_DRIVER=log
QUEUE_DRIVER=sync
CACHE_PREFIX=firefly
PUSHER_KEY=
IPINFO_TOKEN=
PUSHER_SECRET=
PUSHER_ID=
DEMO_USERNAME=
DEMO_PASSWORD=
#
# The v2 layout is very experimental. If it breaks you get to keep both parts.
# Be wary of data loss.
#
FIREFLY_III_LAYOUT=v1
#
# Please make sure this URL matches the external URL of your Firefly III installation.
# It is used to validate specific requests and to generate URLs in emails.
#
APP_URL=http://ff.niefelheim.com/

View File

@ -0,0 +1,245 @@
# Firefly Data Importer (FIDI) configuration file
# Where is Firefly III?
#
# 1) Make sure you ADD http:// or https://
# 2) Make sure you REMOVE any trailing slash from the end of the URL.
# 3) In case of Docker, refer to the internal IP of your Firefly III installation.
#
# Setting this value is not mandatory. But it is very useful.
#
# This variable can be set from a file if you append it with _FILE
#
FIREFLY_III_URL=http://app:8080
#
# Imagine Firefly III can be reached at "http://172.16.0.2:8082" (internal Docker network or something).
# But you have a fancy URL: "https://personal-finances.bill.microsoft.com/"
#
# In those cases, you can overrule the URL so when the data importer links back to Firefly III, it uses the correct URL.
#
# 1) Make sure you ADD http:// or https://
# 2) Make sure you REMOVE any trailing slash from the end of the URL.
#
# IF YOU SET THIS VALUE, YOU MUST ALSO SET THE FIREFLY_III_URL
#
# This variable can be set from a file if you append it with _FILE
#
VANITY_URL=https://ff.niefelheim.com
#
# Set your Firefly III Personal Access Token (OAuth)
# You can create a Personal Access Token on the /profile page:
# go to the OAuth tab, then Personal Access Token and "Create token".
#
# - Do not use the "command line token". That's the WRONG one.
# - Do not use "APP_KEY" value from your Firefly III installation. That's the WRONG one.
#
# Setting this value is not mandatory. Instructions will follow if you omit this field.
#
# This variable can be set from a file if you append it with _FILE
#
FIREFLY_III_ACCESS_TOKEN=eyJ0eXAiOiJKV1QiLCJhbGciOiJSUzI1NiJ9.eyJhdWQiOiIxNCIsImp0aSI6ImY1MDExOTIxNWQ1OTQ1MGIxMzhhYzJlYTFmYzM1MWJkNGU2ODlmYTM0NDhjZTkyY2VmMDRiNDJmMDkxM2E0MTg2NzQ0NjA5NTdiNWVjNDZlIiwiaWF0IjoxNzEwOTI2OTQzLjQwOTA0MiwibmJmIjoxNzEwOTI2OTQzLjQwOTA0NiwiZXhwIjoxNzQyNDYyOTQyLjEyMTQwOCwic3ViIjoiMSIsInNjb3BlcyI6W119.iaaTxKMkGqh3BtxnxvoUT3Wt37eqQc9-3xyClmEuDbAsCN3-hLNHtA09zpYoVVZZDj9UyMuc7Y_-HNYxoDg-Pc9OYKPWE-AX94W_n34rAaujCjRI1WObwB1FkAFNEKbHYiSBIs8fZPp9W7sBLWT1erurMHQFySdKyqtDuzMVYsKhvlVBR3RCR_UhCOD1m8uxIN5XPVNj60BElrc3skU_NRvNIKwN3o6i-WTr081iT9OdlKyvM6bNxr86cl2ebJlu1OJIJT1_WpnGmLImgq8iwoBkyDgDbV7FJ_hKnihuoAR4fenU5E3bDcTfUad2-hAvtR2C5JXOs1iv93JMuN2zO1ANodW6WOBQuOXFZF1eKWKpslCG0V0D-ZTxpIKD21MY35e7qrjXVGjeB_JWFe3UdclljD4Mk_zdqXynzpyKouUtbT9J8_M9j8izMUwbHSo8wbffzBxOlrV_gYxeCu0tZ_6a7FNfRtDP0VdwbDsCf1dn1dYRyzXMH3fUSwgMUq_2-3J1Un_m6Gm34uCUWU3b2vqNYF7DqaXdlSjyrrd-gnptvx9WXq5Ewn3Kfq87wjSWTqnyjOJbPplHUVlcxMB3Br7TR_mg4qYtQOt_82Rld1kPf_HNQlgONGIuxDODTMcnHVLEMDaeXuDDzF86GNLdt-dXKYqJps9R4a9SHtMUpW4
#
# You can also use a public client ID. This is available in Firefly III 5.4.0-alpha.3 and higher.
# This is a number (1, 2, 3). If you use the client ID, you can leave the access token empty and vice versa.
#
# This value is not mandatory to set. Instructions will follow if you omit this field.
#
# This variable can be set from a file if you append it with _FILE
#
FIREFLY_III_CLIENT_ID=
#
# Nordigen information.
# The key and ID can be set from a file if you append it with _FILE
#
NORDIGEN_ID=a63b17d9-b9ca-43ec-bc3a-509af3a44167
NORDIGEN_KEY=25c475dadb6c2dd8c83fa6ea46795210fdab266b3764162f083484eda4b5076d4ded4ddb9cc63a07acd50d49b42477a01574d94d6550f7a4ba3236ddd94255f0
NORDIGEN_SANDBOX=false
#
# Spectre information
#
# The ID and secret can be set from a file if you append it with _FILE
SPECTRE_APP_ID=
SPECTRE_SECRET=
#
# Use cache. No need to do this.
#
USE_CACHE=false
#
# If set to true, the data import will not complain about running into duplicates.
# This will give you cleaner import mails if you run regular imports.
#
# This means that the data importer will not import duplicates, but it will not complain about them either.
#
# This setting has no influence on the settings in your configuration(.json).
#
# Of course, if something goes wrong *because* the transaction is a duplicate you will
# NEVER know unless you start digging in your log files. So be careful with this.
#
IGNORE_DUPLICATE_ERRORS=false
#
# Auto import settings. Due to security constraints, you MUST enable each feature individually.
# You must also set a secret. The secret is used for the web routes.
#
# The auto-import secret must be a string of at least 16 characters.
# Visit this page for inspiration: https://www.random.org/passwords/?num=1&len=16&format=html&rnd=new
#
# Submit it using ?secret=X
#
# This variable can be set from a file if you append it with _FILE
#
AUTO_IMPORT_SECRET=9hTCgH6CJmGUJKcF
#
# Is the /autoimport even endpoint enabled?
# By default it's disabled, and the secret alone will not enable it.
#
CAN_POST_AUTOIMPORT=true
#
# Is the /autoupload endpoint enabled?
# By default it's disabled, and the secret alone will not enable it.
#
CAN_POST_FILES=true
#
# Import directory white list. You need to set this before the auto importer will accept a directory to import from.
#
# This variable can be set from a file if you append it with _FILE
#
IMPORT_DIR_ALLOWLIST=/import
#
# When you're running Firefly III under a (self-signed) certificate,
# the data importer may have trouble verifying the TLS connection.
#
# You have a few options to make sure the data importer can connect
# to Firefly III:
# - 'true': will verify all certificates. The most secure option and the default.
# - 'file.pem': refer to a file (you must provide it) to your custom root or intermediate certificates.
# - 'false': will verify NO certificates. Not very secure.
VERIFY_TLS_SECURITY=true
#
# If you want, you can set a directory here where the data importer will look for import configurations.
# This is a separate setting from the /import directory that the auto-import uses.
# Setting this variable isn't necessary. The default value is "storage/configurations".
#
# This variable can be set from a file if you append it with _FILE
#
JSON_CONFIGURATION_DIR=
#
# Time out when connecting with Firefly III.
# π*10 seconds is usually fine.
#
CONNECTION_TIMEOUT=31.41
# The following variables can be useful when debugging the application
APP_ENV=local
APP_DEBUG=false
LOG_CHANNEL=stack
#
# If you turn this on, expect massive logs with lots of privacy sensitive data
#
LOG_RETURN_JSON=false
# Log level. You can set this from least severe to most severe:
# debug, info, notice, warning, error, critical, alert, emergency
# If you set it to debug your logs will grow large, and fast. If you set it to emergency probably
# nothing will get logged, ever.
LOG_LEVEL=debug
# TRUSTED_PROXIES is a useful variable when using Docker and/or a reverse proxy.
# Set it to ** and reverse proxies work just fine.
TRUSTED_PROXIES=
#
# Time zone
#
TZ=Europe/Stockholm
#
# Use ASSET_URL when you're running the data importer in a sub-directory.
#
ASSET_URL=
#
# Email settings.
# The data importer can send you a message with all errors, warnings and messages
# after a successful import. This is disabled by default
#
ENABLE_MAIL_REPORT=true
#
# Force Firefly III URL to be secure?
#
#
EXPECT_SECURE_URL=false
# If enabled, define which mailer you want to use.
# Options include: smtp, mailgun, postmark, sendmail, log, array
# Amazon SES is not supported.
# log = drop mails in the logs instead of sending them
# array = debug mailer that does nothing.
MAIL_MAILER=sendmail
# where to send the report?
MAIL_DESTINATION=netshade@niefelheim.com
# other mail settings
# These variables can be set from a file if you append it with _FILE
MAIL_FROM_ADDRESS=noreply@niefelheim.com
MAIL_HOST=localhost
MAIL_PORT=25
MAIL_USERNAME=username
MAIL_PASSWORD=password
MAIL_ENCRYPTION=null
# Extra settings depending on your mail configuration above.
# These variables can be set from a file if you append it with _FILE
MAILGUN_DOMAIN=
MAILGUN_SECRET=
MAILGUN_ENDPOINT=
POSTMARK_TOKEN=
#
# You probably won't need to change these settings.
#
BROADCAST_DRIVER=log
CACHE_DRIVER=file
QUEUE_CONNECTION=sync
SESSION_DRIVER=file
SESSION_LIFETIME=120
IS_EXTERNAL=false
REDIS_HOST=127.0.0.1
REDIS_PASSWORD=null
REDIS_PORT=6379
# always use quotes
REDIS_DB="0"
REDIS_CACHE_DB="1"
# The only tracker supported is Matomo.
# This is used on the public instance over at https://data-importer.firefly-iii.org
TRACKER_SITE_ID=
TRACKER_URL=
APP_NAME=DataImporter
#
# The APP_URL environment variable is NOT used anywhere.
# Don't bother setting it to fix your reverse proxy problems. It won't help.
# Don't open issues telling me it doesn't help because it's not supposed to.
# Laravel uses this to generate links on the command line, which is a feature the data importer does not use.
#
APP_URL=http://localhost

View File

@ -0,0 +1,46 @@
version: '3.3'
services:
app:
image: fireflyiii/core:latest
hostname: app
container_name: firefly_iii_core
restart: always
volumes:
- firefly_iii_upload:/var/www/html/storage/upload
env_file: .env
networks:
- firefly_iii
ports:
- 80:8080
depends_on:
- db
db:
image: mariadb:lts
hostname: db
container_name: firefly_iii_db
restart: always
env_file: .db.env
networks:
- firefly_iii
volumes:
- firefly_iii_db:/var/lib/mysql
cron:
#
# To make this work, set STATIC_CRON_TOKEN in your .env file or as an environment variable and replace REPLACEME below
# The STATIC_CRON_TOKEN must be *exactly* 32 characters long
#
image: alpine
restart: always
container_name: firefly_iii_cron
command: sh -c "echo \"0 3 * * * wget -qO- http://app:8080/api/v1/cron/REPLACEME\" | crontab - && crond -f -L /dev/stdout"
networks:
- firefly_iii
volumes:
firefly_iii_upload:
firefly_iii_db:
networks:
firefly_iii:
driver: bridge

View File

@ -0,0 +1,46 @@
version: '3.3'
services:
app:
image: fireflyiii/core:latest
hostname: app
container_name: firefly_iii_core
restart: always
volumes:
- /data/share/firefly:/var/www/html/storage/upload
- /run/mysqld/mysqld.sock:/var/run/mysqld/mysqld.sock
env_file: .env
networks:
- firefly_iii
ports:
- 8088:8080
importer:
image: fireflyiii/data-importer:latest
hostname: importer
restart: unless-stopped
container_name: firefly_iii_importer
networks:
- firefly_iii
ports:
- 8188:8080
depends_on:
- app
env_file: .importer.env
volumes:
- /data/share/ff_import:/import
cron:
#
# To make this work, set STATIC_CRON_TOKEN in your .env file or as an environment variable and replace REPLACEME below
# The STATIC_CRON_TOKEN must be *exactly* 32 characters long
#
image: alpine
restart: always
container_name: firefly_iii_cron
command: sh -c "echo \"0 3 * * * wget -qO- http://app:8080/api/v1/cron/entokenlikenannantokenmenutantec\" | crontab - && crond -f -L /dev/stdout"
networks:
- firefly_iii
networks:
firefly_iii:
driver: bridge

1
compose/grocy-up.sh Executable file
View File

@ -0,0 +1 @@
COMPOSE_HTTP_TIMEOUT=120 docker-compose --verbose --log-level info up -d grocy

View File

@ -0,0 +1,18 @@
version: "3.9"
services:
grocy:
image: lscr.io/linuxserver/grocy:latest
container_name: grocy
network_mode: bridge
ports:
- 9283:80
environment:
- PUID=82
- PGID=82
- TZ=Europe/Stockholm
volumes:
- grocy:/config
volumes:
grocy:
external: true
name: grocy

1
compose/jellyfin-up.sh Executable file
View File

@ -0,0 +1 @@
COMPOSE_HTTP_TIMEOUT=120 docker-compose --verbose --log-level info up -d jellyfin

1
compose/lidarr-up.sh Executable file
View File

@ -0,0 +1 @@
COMPOSE_HTTP_TIMEOUT=120 docker-compose --verbose --log-level info up -d lidarr

1
compose/matrix/client Normal file
View File

@ -0,0 +1 @@
{"m.homeserver":{"base_url":"https://matrix.niefelheim.com/"},"m.identity_server":{"base_url":"https://matrix.org"},"m.server":"https://matrix.niefelheim.com","org.matrix.msc3575.proxy":"https://matrix.niefelheim.com"}

View File

@ -0,0 +1,150 @@
version: "3.9"
services:
synapse:
image: matrixdotorg/synapse:latest
container_name: synapse
restart: unless-stopped
networks:
- Matrix
hostname: synapse
ports:
- "8008:8008"
environment:
- UID=998
- GID=991
- TZ=Europe/Stockholm
volumes:
- synapse_data:/data
- /etc/letsencrypt/live/matrix.niefelheim.com:/data/tls:ro
- /usr/local/share/docker/shared_secret_authenticator.py:/usr/local/lib/python3.11/site-packages/shared_secret_authenticator.py
sliding-sync:
image: ghcr.io/matrix-org/sliding-sync:latest
container_name: sliding-sync
restart: unless-stopped
networks:
- Matrix
hostname: syncv3
ports:
- "8009:8009"
environment:
- "SYNCV3_SERVER=https://syncv3.niefelheim.com"
- "SYNCV3_SECRET=14b82584030154fc47f23fc8177fabf9c795ea3b4a6e11d32f452b353809cbe8"
- "SYNCV3_BINDADDR=:8009"
- "SYNCV3_DB=user=syncv3 dbname=syncv3 sslmode=disable host=192.168.1.161 password='SyncoptIcon'"
- SYNCV3_PROM=:2112
- UID=998
- GID=991
- TZ=Europe/Stockholm
maubot:
image: dock.mau.dev/maubot/maubot:latest
container_name: maubot
restart: unless-stopped
networks:
- Matrix
hostname: maubot
ports:
- 29316:29316
environment:
- UID=998
- GID=991
- TZ=Europe/Stockholm
volumes:
- maubot_data:/data
mautrix_discord:
image: dock.mau.dev/mautrix/discord:latest
container_name: mautrix_discord
hostname: mdiscord
restart: unless-stopped
networks:
- Matrix
environment:
- UID=998
- GID=991
- TZ=Europe/Stockholm
volumes:
- mautrix_discord_data:/data
mautrix_instagram:
image: dock.mau.dev/mautrix/instagram:latest
container_name: mautrix_instagram
hostname: minstagram
restart: unless-stopped
networks:
- Matrix
environment:
- UID=998
- GID=991
- TZ=Europe/Stockholm
volumes:
- mautrix_instagram_data:/data
mautrix_meta:
image: dock.mau.dev/mautrix/meta:latest
hostname: mmeta
container_name: mautrix_meta
restart: unless-stopped
networks:
- Matrix
environment:
- UID=998
- GID=991
- TZ=Europe/Stockholm
volumes:
- mautrix_meta_data:/data
mautrix_telegram:
image: dock.mau.dev/mautrix/telegram:latest
hostname: mtelegram
container_name: mautrix_telegram
restart: unless-stopped
networks:
- Matrix
environment:
- UID=998
- GID=991
- TZ=Europe/Stockholm
volumes:
- mautrix_telegram_data:/data
mautrix_twitter:
image: dock.mau.dev/mautrix/twitter:latest
hostname: mtwitter
container_name: mautrix_twitter
restart: unless-stopped
networks:
- Matrix
environment:
- UID=998
- GID=991
- TZ=Europe/Stockholm
volumes:
- mautrix_twitter_data:/data
mautrix_whatsapp:
image: dock.mau.dev/mautrix/whatsapp:latest
hostname: mwhatsapp
container_name: mautrix_whatsapp
restart: unless-stopped
networks:
- Matrix
environment:
- UID=998
- GID=991
- TZ=Europe/Stockholm
volumes:
- mautrix_whatsapp_data:/data
volumes:
synapse_data:
external: true
maubot_data:
external: true
mautrix_discord_data:
external: true
mautrix_instagram_data:
external: true
mautrix_meta_data:
external: true
mautrix_telegram_data:
external: true
mautrix_twitter_data:
external: true
mautrix_whatsapp_data:
external: true
networks:
Matrix:
external: true

View File

@ -0,0 +1 @@
14b82584030154fc47f23fc8177fabf9c795ea3b4a6e11d32f452b353809cbe8

View File

@ -0,0 +1,48 @@
version: "3.7"
services:
jellyfin:
container_name: jellyfin
image: ghcr.io/hotio/jellyfin:latest
network_mode: bridge
ports: ["8096:8096"]
restart: unless-stopped
environment:
- PUID=117
- PGID=979
- UMASK=002
- TZ=Europe/Stockholm
volumes:
- jellyfin:/config
- /data/media:/media
airsonic-advanced:
image: lscr.io/linuxserver/airsonic-advanced:latest
container_name: airsonic-advanced
network_mode: bridge
ports:
- 4040:4040
- 8070:8070
logging:
driver: json-file
environment:
- PUID=117
- PGID=979
- TZ=Europe/Stockholm
- CONTEXT_PATH=/airsonic #optional
- JAVA_OPTS= #optional
volumes:
- airsonic-config:/config
- /data/media/music:/music
- /data/media/playlists:/playlists
- /data/media/podcasts:/podcasts
- /data/media/misc:/media
- /data/media/trash:/trash
devices:
- /dev/snd:/dev/snd #optional
restart: unless-stopped
volumes:
jellyfin:
external: true
airsonic-config:
external: true
name: airsonic-config

View File

@ -0,0 +1,49 @@
version: "3.9"
services:
monica:
image: lscr.io/linuxserver/monica:latest
container_name: monica
network_mode: bridge
ports:
- 8456:80
environment:
- PUID=82
- PGID=82
- TZ=Europe/Stockholm
- DB_HOST=niefelheim.com
- DB_PORT=3306
- DB_USERNAME=netshade
- DB_PASSWORD=mrmd87
- DB_DATABASE=monica
- DB_UNIX_SOCKET=/var/run/mysqld/mysqld.sock
- DB_USE_UTF8MB4=true
- MAIL_FROM_NAME="Ratatosk, Niefelheims messenger."
- MAIL_FROM_ADDRESS=ratatosk@niefelheim.com
- MAIL_HOST=niefelheim.com
- MAIL_MAILER=smtp
- MAIL_PASSWORD=4VeN6UPmdNWrXY
- MAIL_PORT=25
- MAIL_USERNAME=ratatosk@niefelheim.com
- MFA_ENABLED=true
- GPG_KEYS=528995BFEDFBA7191D46839EF9BA0ADA31CBD89E 39B641343D8C104B2B146DC3F9C39DC0B9698544 F1F692238FBC1666E5A5CCD4199F9DFEF6FFBAFD
- DAV_ENABLED=true
- APP_KEY=base64:FAh3gDuGgAtMNarC1+xPpmlz8scUOsO5UfxTZy7RYts=
- ENABLE_GEOLOCATION=true
- LOCATION_IQ_API_KEY=pk.d622d5801094eb0bfd1333b4a812f19b
- ENABLE_WEATHER=true
- APP_URL=http://localhost
- TRUSTED_PROXIES=172.17.0.1,192.168.1.125
- APP_ENV=production
- APP_DISABLE_SIGNUP=true
- REDIS_HOST=192.168.1.125
- REDIS_PASSWORD=XF5DiPZD
- CACHE_DRIVER=redis
- SESSION_DRIVER=redis
- QUEUE_CONNECTION=sync
volumes:
- /run/mysqld/mysqld.sock:/var/run/mysqld/mysqld.sock
- monica:/var/www/html/storage
volumes:
monica:
external: true
name: monica

View File

@ -0,0 +1,10 @@
version: "2"
services:
fail2ban_exporter:
image: registry.gitlab.com/hectorjsmith/fail2ban-prometheus-exporter:latest
container_name: fail2ban_exporter
network_mode: bridge
volumes:
- /var/run/fail2ban/:/var/run/fail2ban:ro
ports: ["9191:9191"]
restart: unless-stopped

View File

@ -0,0 +1,86 @@
version: "3.9"
services:
app:
build: ./app
image: monica-app
container_name: monica
env_file: .env
volumes:
- data:/var/www/html/storage
restart: unless-stoppped
cron:
build: ./app
image: monica-app
command: cron.sh
env_file: .env
restart: unless-stopped
volumes:
- data:/var/www/html/storage
queue:
build: ./app
image: monica-app
command: queue.sh
env_file: .env
restart: unless-stopped
volumes:
- data:/var/www/html/storage
web:
build: ./web
image: monica-web
restart: unless-stopped
environment:
- VIRTUAL_HOST=monica.niefelheim.com
- LETSENCRYPT_HOST=monica.niefelheim.com
- LETSENCRYPT_EMAIL=mattias.granlund@gmail.com
volumes:
- data:/var/www/html/storage:ro
networks:
- proxy-tier
- default
volumes:
data:
mysql:
certs:
vhost.d:
html:
networks:
proxy-tier:
name: bridge
external: true
network_mode: "bridge"
environment:
- NODE_ENV=development
- APP_URL=localhost
- DB_CONNECTION=mysql
- DB_HOST=127.0.0.1
- DB_PORT=3306
- DB_USER=ferdi
- DB_PASSWORD=WUfkwibeLCBikho7
- DB_DATABASE=ferdi
- DB_SSL=false
- MAIL_CONNECTION=smtp
- SMTP_HOST=127.0.0.1
- SMTP_PORT=2525
- MAIL_SSL=false
- MAIL_USERNAME=ferdi
- MAIL_PASSWORD=yncMdnlA4nopNkzkqXO62fa0ry0=
- MAIL_SENDER=ferdi@niefelheim.com
- IS_CREATION_ENABLED=true
- IS_DASHBOARD_ENABLED=true
- IS_REGISTRATION_ENABLED=true
- CONNECT_WITH_FRANZ=true
- DATA_DIR=/data
- TZ=Etc/UTC
- PUID=117
- PGID=979
volumes:
- /opt/docker/monica:/var/www/html/storage
- /run/mysqld/mysqld.sock:/var/run/mysqld.sock
ports:
- 80:8456
restart: unless-stopped

View File

@ -0,0 +1,49 @@
version: "3.9"
services:
monica:
image: lscr.io/linuxserver/monica:latest
container_name: monica
network_mode: bridge
ports:
- 8456:80
environment:
- PUID=82
- PGID=82
- TZ=Europe/Stockholm
- DB_HOST=niefelheim.com
- DB_PORT=3306
- DB_USERNAME=netshade
- DB_PASSWORD=mrmd87
- DB_DATABASE=monica
- DB_UNIX_SOCKET=/var/run/mysqld/mysqld.sock
- DB_USE_UTF8MB4=true
- MAIL_FROM_NAME="Ratatosk, Niefelheims messenger."
- MAIL_FROM_ADDRESS=ratatosk@niefelheim.com
- MAIL_HOST=niefelheim.com
- MAIL_MAILER=smtp
- MAIL_PASSWORD=4VeN6UPmdNWrXY
- MAIL_PORT=25
- MAIL_USERNAME=ratatosk@niefelheim.com
- MFA_ENABLED=true
- GPG_KEYS=528995BFEDFBA7191D46839EF9BA0ADA31CBD89E 39B641343D8C104B2B146DC3F9C39DC0B9698544 F1F692238FBC1666E5A5CCD4199F9DFEF6FFBAFD
- DAV_ENABLED=true
- APP_KEY=base64:FAh3gDuGgAtMNarC1+xPpmlz8scUOsO5UfxTZy7RYts=
- ENABLE_GEOLOCATION=true
- LOCATION_IQ_API_KEY=pk.d622d5801094eb0bfd1333b4a812f19b
- ENABLE_WEATHER=true
- APP_URL=http://localhost
- TRUSTED_PROXIES=172.17.0.1,192.168.1.125
- APP_ENV=production
- APP_DISABLE_SIGNUP=true
- REDIS_HOST=192.168.1.125
- REDIS_PASSWORD=XF5DiPZD
- CACHE_DRIVER=redis
- SESSION_DRIVER=redis
- QUEUE_CONNECTION=sync
volumes:
- /run/mysqld/mysqld.sock:/var/run/mysqld/mysqld.sock
- monica:/var/www/html/storage
volumes:
monica:
external: true
name: monica

View File

@ -0,0 +1,39 @@
version: "3.9"
services:
app:
image: monica:fpm
container_name: monica_fpm
network_mode: bridge
environment:
- APP_ENV=production
- APP_KEY=base64:FAh3gDuGgAtMNarC1+xPpmlz8scUOsO5UfxTZy7RYts=
- APP_TRUSTED_PROXIES=172.17.0.1,192.168.1.125
- DB_HOST=192.168.1.125
- DB_DATABASE=monica
- DB_USERNAME=netshade
- DB_PASSWORD=mrmd87
- LOG_CHANNEL=stderr
- CACHE_DRIVER=database
- SESSION_DRIVER=database
- QUEUE_DRIVER=sync
volumes:
- monica:/var/www/html/storage
restart: unless-stopped
web:
build: ./web
container_name: monica_web
network_mode: bridge
ports:
- 8456:80
depends_on:
- app
volumes:
- monica:/var/www/html/storage:ro
restart: unless-stopped
volumes:
monica:
name: monica
driver: local

View File

@ -0,0 +1,9 @@
FROM monica:fpm AS monica
FROM nginx:alpine
COPY nginx.conf /etc/nginx/nginx.conf
# Copy content of monica image
COPY --from=monica /var/www/html /var/www/html
RUN ln -sf /var/www/html/storage/app/public /var/www/html/public/storage

161
compose/pim/web/nginx.conf Normal file
View File

@ -0,0 +1,161 @@
worker_processes 1;
error_log /var/log/nginx/error.log warn;
pid /var/run/nginx.pid;
events {
worker_connections 1024;
}
http {
include /etc/nginx/mime.types;
default_type application/octet-stream;
log_format main '$remote_addr - $remote_user [$time_local] "$request" '
'$status $body_bytes_sent "$http_referer" '
'"$http_user_agent" "$http_x_forwarded_for"';
access_log /var/log/nginx/access.log main;
sendfile on;
#tcp_nopush on;
keepalive_timeout 65;
set_real_ip_from 10.0.0.0/8;
set_real_ip_from 172.16.0.0/12;
set_real_ip_from 192.168.0.0/16;
real_ip_header X-Real-IP;
# Connect to app service
upstream php-handler {
server app:9000;
}
server {
listen 80;
server_name monica;
## HSTS ##
# Add the 'Strict-Transport-Security' headers to enable HSTS protocol.
# WARNING: Only add the preload option once you read about the consequences: https://hstspreload.org/.
# This form will add the domain to a hardcoded list that is shipped in all major browsers and getting
# removed from this list could take several months.
#
#add_header Strict-Transport-Security "max-age=31536000; includeSubDomains; preload;" always;
add_header Referrer-Policy "no-referrer" always;
add_header X-Content-Type-Options "nosniff" always;
add_header X-Download-Options "noopen" always;
add_header X-Frame-Options "SAMEORIGIN" always;
add_header X-Permitted-Cross-Domain-Policies "none" always;
add_header X-Robots-Tag "none" always;
add_header X-XSS-Protection "1; mode=block" always;
# Remove X-Powered-By, which is an information leak
fastcgi_hide_header X-Powered-By;
root /var/www/html/public;
index index.html index.htm index.php;
charset utf-8;
location / {
try_files $uri $uri/ /index.php?$query_string;
}
location ~ ^/(?:robots.txt|security.txt) {
allow all;
log_not_found off;
access_log off;
}
error_page 404 500 502 503 504 /index.php;
location ~ /\.well-known/(?:carddav|caldav) {
return 301 $scheme://$host/dav;
}
location = /.well-known/security.txt {
return 301 $scheme://$host/security.txt;
}
location ~ /\.(?!well-known).* {
deny all;
}
# set max upload size
client_max_body_size 10G;
fastcgi_buffers 64 4K;
# Enable gzip but do not remove ETag headers
gzip on;
gzip_vary on;
gzip_comp_level 4;
gzip_min_length 256;
gzip_proxied expired no-cache no-store private no_last_modified no_etag auth;
gzip_types application/atom+xml application/javascript application/json application/ld+json application/manifest+json application/rss+xml application/vnd.geo+json application/vnd.ms-fontobject application/x-font-ttf application/x-web-app-manifest+json application/xhtml+xml application/xml font/opentype image/bmp image/svg+xml image/x-icon text/cache-manifest text/css text/plain text/vcard text/vnd.rim.location.xloc text/vtt text/x-component text/x-cross-domain-policy;
# Uncomment if your server is build with the ngx_pagespeed module
# This module is currently not supported.
#pagespeed off;
location ~ \.php(/|$) {
# regex to split $uri to $fastcgi_script_name and $fastcgi_path
fastcgi_split_path_info ^(.+?\.php)(/.*)$;
# Check that the PHP script exists before passing it
try_files $fastcgi_script_name =404;
fastcgi_pass php-handler;
fastcgi_index index.php;
include fastcgi_params;
fastcgi_param SCRIPT_FILENAME $document_root$fastcgi_script_name;
# Bypass the fact that try_files resets $fastcgi_path_info
# see: http://trac.nginx.org/nginx/ticket/321
set $path_info $fastcgi_path_info;
fastcgi_param PATH_INFO $path_info;
}
# Adding the cache control header for js and css files
# Make sure it is BELOW the PHP block
location ~ \.(?:css|js|woff2?|svg|gif|json)$ {
try_files $uri /index.php$request_uri;
add_header Cache-Control "public, max-age=15778463";
## HSTS ##
# Add the 'Strict-Transport-Security' headers to enable HSTS protocol.
# Note it is intended to have those duplicated to the ones above.
# WARNING: Only add the preload option once you read about the consequences: https://hstspreload.org/.
# This form will add the domain to a hardcoded list that is shipped in all major browsers and getting
# removed from this list could take several months.
#
#add_header Strict-Transport-Security "max-age=31536000; includeSubDomains; preload;" always;
add_header Referrer-Policy "no-referrer" always;
add_header X-Content-Type-Options "nosniff" always;
add_header X-Download-Options "noopen" always;
add_header X-Frame-Options "SAMEORIGIN" always;
add_header X-Permitted-Cross-Domain-Policies "none" always;
add_header X-Robots-Tag "none" always;
add_header X-XSS-Protection "1; mode=block" always;
# Optional: Don't log access to assets
access_log off;
}
location ~ \.(?:png|html|ttf|ico|jpg|jpeg)$ {
try_files $uri /index.php$request_uri;
# Optional: Don't log access to assets
access_log off;
}
# deny access to .htaccess files
location ~ /\.ht {
deny all;
}
}
}

1
compose/prowlarr-up.sh Executable file
View File

@ -0,0 +1 @@
COMPOSE_HTTP_TIMEOUT=120 docker-compose --verbose --log-level info up -d prowlarr

1
compose/radarr-up.sh Executable file
View File

@ -0,0 +1 @@
COMPOSE_HTTP_TIMEOUT=120 docker-compose --verbose --log-level info up -d radarr

1
compose/readarr-up.sh Executable file
View File

@ -0,0 +1 @@
COMPOSE_HTTP_TIMEOUT=120 docker-compose --verbose --log-level info up -d readarr

View File

@ -0,0 +1,38 @@
version: "3.7"
services:
ferdi-server:
image: getferdi/ferdi-server
container_name: ferdi-server
network_mode: "bridge"
environment:
- NODE_ENV=development
- APP_URL=localhost
- DB_CONNECTION=mysql
- DB_HOST=127.0.0.1
- DB_PORT=3306
- DB_USER=ferdi
- DB_PASSWORD=WUfkwibeLCBikho7
- DB_DATABASE=ferdi
- DB_SSL=false
- MAIL_CONNECTION=smtp
- SMTP_HOST=127.0.0.1
- SMTP_PORT=2525
- MAIL_SSL=false
- MAIL_USERNAME=ferdi
- MAIL_PASSWORD=yncMdnlA4nopNkzkqXO62fa0ry0=
- MAIL_SENDER=ferdi@niefelheim.com
- IS_CREATION_ENABLED=true
- IS_DASHBOARD_ENABLED=true
- IS_REGISTRATION_ENABLED=true
- CONNECT_WITH_FRANZ=true
- DATA_DIR=/data
- TZ=Etc/UTC
- PUID=117
- PGID=979
volumes:
- /opt/ferdi/data:/data
- /opt/ferdi/recipes:/app/recipes
ports:
- 3333:3333
restart: unless-stopped

1
compose/sonarr-up.sh Executable file
View File

@ -0,0 +1 @@
COMPOSE_HTTP_TIMEOUT=120 docker-compose --verbose --log-level info up -d sonarr

View File

@ -0,0 +1,49 @@
version: "3"
services:
dns-server:
container_name: dns-server
hostname: dns-server
image: technitium/dns-server:latest
# For DHCP deployments, use "host" network mode and remove all the port mappings, including the ports array by commenting them
# network_mode: "host"
ports:
- "5380:5380/tcp" #DNS web console (HTTP)
- "53443:53443/tcp" #DNS web console (HTTPS)
- "53:53/udp" #DNS service
- "53:53/tcp" #DNS service
- "853:853/udp" #DNS-over-QUIC service
- "853:853/tcp" #DNS-over-TLS service
# - "443:443/udp" #DNS-over-HTTPS service (HTTP/3)
# - "443:443/tcp" #DNS-over-HTTPS service (HTTP/1.1, HTTP/2)
# - "80:80/tcp" #DNS-over-HTTP service (use with reverse proxy or certbot certificate renewal)
# - "8053:8053/tcp" #DNS-over-HTTP service (use with reverse proxy)
# - "67:67/udp" #DHCP service
environment:
- DNS_SERVER_DOMAIN=niefelheim.com #The primary domain name used by this DNS Server to identify itself.
# - DNS_SERVER_ADMIN_PASSWORD=password #DNS web console admin user password.
# - DNS_SERVER_ADMIN_PASSWORD_FILE=password.txt #The path to a file that contains a plain text password for the DNS web console admin user.
# - DNS_SERVER_PREFER_IPV6=false #DNS Server will use IPv6 for querying whenever possible with this option enabled.
# - DNS_SERVER_WEB_SERVICE_HTTP_PORT=5380 #The TCP port number for the DNS web console over HTTP protocol.
# - DNS_SERVER_WEB_SERVICE_HTTPS_PORT=53443 #The TCP port number for the DNS web console over HTTPS protocol.
# - DNS_SERVER_WEB_SERVICE_ENABLE_HTTPS=false #Enables HTTPS for the DNS web console.
# - DNS_SERVER_WEB_SERVICE_USE_SELF_SIGNED_CERT=false #Enables self signed TLS certificate for the DNS web console.
# - DNS_SERVER_OPTIONAL_PROTOCOL_DNS_OVER_HTTP=false #Enables DNS server optional protocol DNS-over-HTTP on TCP port 8053 to be used with a TLS terminating reverse proxy like nginx.
# - DNS_SERVER_RECURSION=AllowOnlyForPrivateNetworks #Recursion options: Allow, Deny, AllowOnlyForPrivateNetworks, UseSpecifiedNetworks.
# - DNS_SERVER_RECURSION_DENIED_NETWORKS=1.1.1.0/24 #Comma separated list of IP addresses or network addresses to deny recursion. Valid only for `UseSpecifiedNetworks` recursion option.
# - DNS_SERVER_RECURSION_ALLOWED_NETWORKS=127.0.0.1, 192.168.1.0/24 #Comma separated list of IP addresses or network addresses to allow recursion. Valid only for `UseSpecifiedNetworks` recursion option.
# - DNS_SERVER_ENABLE_BLOCKING=false #Sets the DNS server to block domain names using Blocked Zone and Block List Zone.
# - DNS_SERVER_ALLOW_TXT_BLOCKING_REPORT=false #Specifies if the DNS Server should respond with TXT records containing a blocked domain report for TXT type requests.
# - DNS_SERVER_BLOCK_LIST_URLS= #A comma separated list of block list URLs.
# - DNS_SERVER_FORWARDERS=1.1.1.1, 8.8.8.8 #Comma separated list of forwarder addresses.
# - DNS_SERVER_FORWARDER_PROTOCOL=Tcp #Forwarder protocol options: Udp, Tcp, Tls, Https, HttpsJson.
# - DNS_SERVER_LOG_USING_LOCAL_TIME=true #Enable this option to use local time instead of UTC for logging.
volumes:
- config:/etc/dns
- /data/logs/dns:/logs
- /etc/ssl/technitium:/etc/ssl/dns
restart: unless-stopped
sysctls:
- net.ipv4.ip_local_port_range=1024 65000
volumes:
config:

View File

@ -0,0 +1,14 @@
version: '3'
services:
watchtower:
image: containrrr/watchtower
container_name: watchtower
network_mode: bridge
environment:
- TZ="Europe/Stockholm"
- WATCHTOWER_CLEANUP=true
- WATCHTOWER_DISABLE_CONTAINERS="portainer-ee mc_bedrock"
volumes:
- /var/run/docker.sock:/var/run/docker.sock
- /etc/localtime:/etc/localtime:ro
restart: unless-stopped

View File

@ -0,0 +1,134 @@
version: "3.7"
services:
bazarr:
container_name: bazarr
image: ghcr.io/hotio/bazarr:latest
network_mode: bridge
restart: unless-stopped
logging:
driver: json-file
ports:
- "6767:6767"
environment:
- PUID=992
- PGID=979
- UMASK=002
- TZ=Europe/Stockholm
volumes:
- "bazarr:/config"
- "/etc/localtime:/etc/localtime:ro"
- "/var/cloud/Media/Film:/media/movies"
- "/var/cloud/Media/Serier:/media/tv"
- "/var/cloud/Media/Trashcan:/media/trash"
- "/var/cloud/transmission/downloads:/data/torrents"
lidarr:
container_name: lidarr
image: ghcr.io/hotio/lidarr:latest
network_mode: bridge
restart: unless-stopped
logging:
driver: json-file
ports:
- "8686:8686"
environment:
- PUID=993
- PGID=979
- UMASK=002
- TZ=Europe/Stockholm
volumes:
- "lidarr:/config"
- "/etc/localtime:/etc/localtime:ro"
- "/var/cloud/Media/Mp3:/media/music"
- "/var/cloud/Media/Trashcan:/media/trash"
- "/var/cloud/transmission/downloads:/data/torrents"
prowlarr:
container_name: prowlarr
image: ghcr.io/hotio/prowlarr:latest
network_mode: bridge
restart: unless-stopped
logging:
driver: json-file
ports:
- "9696:9696"
environment:
- PUID=988
- PGID=979
- UMASK=002
- TZ=Europe/Stockholm
volumes:
- "prowlarr:/config"
- "/etc/localtime:/etc/localtime:ro"
radarr:
container_name: radarr
image: ghcr.io/hotio/radarr:latest
network_mode: bridge
restart: unless-stopped
logging:
driver: json-file
ports:
- "7878:7878"
environment:
- PUID=990
- PGID=979
- UMASK=002
- TZ=Europe/Stockholm
volumes:
- "radarr:/config"
- "/etc/localtime:/etc/localtime:ro"
- "/var/cloud/transmission/downloads:/data/torrents"
- "/var/cloud/Media/Film:/media/movies"
- "/var/cloud/Media/Kids:/media/movies2"
- "/var/cloud/Media/Trashcan/Movies:/media/trash"
readarr:
container_name: readarr
image: ghcr.io/hotio/readarr:latest
network_mode: bridge
restart: unless-stopped
logging:
driver: json-file
ports:
- "8787:8787"
environment:
- PUID=991
- PGID=979
- UMASK=002
- TZ=Europe/Stockholm
volumes:
- "readarr:/config"
- "/etc/localtime:/etc/localtime:ro"
- "/var/cloud/transmission/downloads:/data/torrents"
- "/var/cloud/Media/Books:/media/books"
- "/var/cloud/Media/Trashcan/Books:/media/trash"
sonarr:
container_name: sonarr
image: ghcr.io/hotio/sonarr:latest
network_mode: bridge
restart: unless-stopped
logging:
driver: json-file
ports:
- "8989:8989"
environment:
- PUID=989
- PGID=979
- UMASK=002
- TZ=Europe/Stockholm
volumes:
- "sonarr:/config"
- "/etc/localtime:/etc/localtime:ro"
- "/var/cloud/Media/Serier:/media/tv"
- "/var/cloud/transmission/downloads:/data/torrents"
- "/var/cloud/Media/Trashcan/Series:/media/trash"
volumes:
bazarr:
driver: local
lidarr:
driver: local
prowlarr:
driver: local
radarr:
driver: local
readarr:
driver: local
sonarr:
driver: local

View File

@ -0,0 +1,327 @@
version: "3.6"
services:
# image used to index torrent links from the internet
prowlarr:
image: linuxserver/prowlarr:latest
container_name: arr-suite-prowlarr
deploy:
placement:
constraints: [node.labels.media_disk == true ]
environment:
- PUID=988
- PGID=979
- UMASK=002
- TZ=Europe/Stockholm
volumes:
- "prowlarr:/config"
- "/etc/localtime:/etc/localtime:ro"
#- ${DOCKER_VOLUME_STORAGE:-/mnt/docker-volumes}/arr-suite/configs/prowlarr:/config # database and Prowlarr configs
expose:
- 9696/tcp # web ui
ports:
- 9696:9696/tcp # web ui
restart: unless-stopped
networks:
- traefik-public
labels:
- traefik.enable=true
- traefik.docker.network=traefik-public
- traefik.constraint-label=traefik-public
- "traefik.http.routers.arr-prowlarr.rule=(Host(`niefelheim.com`) && Path(`/prowlarr`))"
- traefik.http.services.arr-prowlarr.loadbalancer.server.port=9696
#- traefik.http.routers.prowlarr.entrypoints=web
#- traefik.http.routers.prowlarr_https.entrypoints=web-secure
- 'traefik.http.routers.arr-prowlarr_https.rule=(Host(`niefelheim.com`) && Path(`/prowlarr`))'
- traefik.http.routers.arr-prowlarr_https.tls=true
- traefik.http.routers.arr-prowlarr_https.tls.certresolver=le
# # Optional part for traefik middlewares
# - traefik.http.routers.prowlarr.middlewares=local-ipwhitelist@file
# image used to scan for tv shows
sonarr:
image: linuxserver/sonarr:latest
container_name: arr-suite-sonarr
deploy:
placement:
constraints: [node.labels.media_disk == true ]
environment:
- PUID=989
- PGID=979
- TZ=Europe/Stockholm
- UMASK=002
volumes:
- "sonarr:/config"
- "/etc/localtime:/etc/localtime:ro"
- "/var/cloud/Media/Serier:/media/tv"
- "/var/cloud/transmission/downloads:/data/torrents"
- "/var/cloud/Media/Trashcan/Series:/media/trash"
#- ${DOCKER_VOLUME_STORAGE:-/mnt/docker-volumes}/arr-suite/configs/sonarr:/config # database and Radarr configs
#- ${DOCKER_VOLUME_STORAGE:-/mnt/docker-volumes}/arr-suite/media:/media # location of media and qbittorrent download folder
expose:
- 8989/tcp # web ui
ports:
- 8989:8989/tcp # web ui
restart: unless-stopped
networks:
- traefik-public
labels:
- traefik.enable=true
- traefik.docker.network=traefik-public
- traefik.constraint-label=traefik-public
- "traefik.http.routers.sonarr.rule=(Host(`niefelheim.com`) && Path(`/sonarr`))"
- traefik.http.services.sonarr.loadbalancer.server.port=8989
# # Optional part for traefik middlewares
# - traefik.http.routers.sonarr.middlewares=local-ipwhitelist@file
# image used to scan for movies
radarr:
image: linuxserver/radarr:latest
container_name: arr-suite-radarr
deploy:
placement:
constraints: [node.labels.media_disk == true ]
environment:
- PUID=990
- PGID=979
- UMASK=002
- TZ=Europe/Stockholm
volumes:
- "radarr:/config"
- "/etc/localtime:/etc/localtime:ro"
- "/var/cloud/transmission/downloads:/data/torrents"
- "/var/cloud/Media/Film:/media/movies"
- "/var/cloud/Media/Kids:/media/movies2"
- "/var/cloud/Media/Trashcan/Movies:/media/trash"
#- ${DOCKER_VOLUME_STORAGE:-/mnt/docker-volumes}/arr-suite/configs/radarr:/config
#- ${DOCKER_VOLUME_STORAGE:-/mnt/docker-volumes}/arr-suite/media:/media # location of media and qbittorrent download folder
expose:
- 7878/tcp # web ui
ports:
- 7878:7878/tcp # web ui
restart: unless-stopped
networks:
- traefik-public
labels:
- traefik.enable=true
- traefik.docker.network=traefik-public
- "traefik.http.routers.radarr.rule=(Host(`niefelheim.com`) && Path(`/radarr`))"
- traefik.constraint-label=traefik-public
- traefik.http.services.radarr.loadbalancer.server.port=7878
# # Optional part for traefik middlewares
# - traefik.http.routers.radarr.middlewares=local-ipwhitelist@file
# image used to scan for music
lidarr:
image: linuxserver/lidarr:latest
container_name: arr-suite-lidarr
deploy:
placement:
constraints: [node.label.media_disk == true ]
environment:
- PUID=993
- PGID=979
- TZ=Europe/Stockholm
volumes:
- "lidarr:/config"
- "/etc/localtime:/etc/localtime:ro"
- "/var/cloud/Media/Mp3:/media/music"
- "/var/cloud/Media/Trashcan:/media/trash"
- "/var/cloud/transmission/downloads:/data/torrents"
#- ${DOCKER_VOLUME_STORAGE:-/mnt/docker-volumes}/arr-suite/configs/lidarr:/config
#- ${DOCKER_VOLUME_STORAGE:-/mnt/docker-volumes}/arr-suite/media:/media # location of media and qbittorrent download folder
expose:
- 8686/tcp # web ui
ports:
- 8686:8686/tcp # web ui
restart: unless-stopped
networks:
- traefik-public
labels:
- traefik.enable=true
- traefik.docker.network=traefik-public
- "traefik.http.routers.lidarr.rule=(Host(`niefelheim.com`) && Path(`/lidarr`))"
- traefik.constraint-label=traefik-public
- traefik.http.services.lidarr.loadbalancer.server.port=8686
# # Optional part for traefik middlewares
# - traefik.http.routers.lidarr.middlewares=local-ipwhitelist@file
# image used to scan for books
readarr:
image: linuxserver/readarr:develop
container_name: arr-suite-readarr
deploy:
placement:
constraints: [node.label.media_disk == true ]
environment:
- PUID=991
- PGID=979
- TZ=Europe/Stockholm
volumes:
- "readarr:/config"
- "/etc/localtime:/etc/localtime:ro"
- "/var/cloud/transmission/downloads:/data/torrents"
- "/var/cloud/Media/Books:/media/books"
- "/var/cloud/Media/Trashcan/Books:/media/trash"
#- ${DOCKER_VOLUME_STORAGE:-/mnt/docker-volumes}/arr-suite/configs/readarr:/config
#- ${DOCKER_VOLUME_STORAGE:-/mnt/docker-volumes}/arr-suite/media:/media # location of media and qbittorrent download folder
expose:
- 8787/tcp # web ui
ports:
- 8787:8787/tcp # web ui
restart: unless-stopped
networks:
- traefik-public
labels:
- traefik.enable=true
- traefik.docker.network=traefik-public
- traefik.constraint-label=traefik-public
- "traefik.http.routers.readarr.rule=(Host(`niefelheim.com`) && Path(`/readarr`))"
- traefik.http.services.readarr.loadbalancer.server.port=8787
# # Optional part for traefik middlewares
# - traefik.http.routers.readarr.middlewares=local-ipwhitelist@file
# image used to bypass cloudflare for prowlarr
#flaresolverr:
#image: flaresolverr/flaresolverr:latest
# container_name: arr-suite-flaresolverr
# environment:
# - LOG_LEVEL=info
# - LOG_HTML=false
# - CAPTCHA_SOLVER=none
# - TZ=Europe/Stockholm
# expose:
# - 8191/tcp # listening port for selenium
# restart: unless-stopped
#networks:
# - traefik-public
# image used for vpn killswitch network
# gluetun:
# image: qmcgaw/gluetun:latest
# container_name: arr-suite-gluetun
# cap_add:
# - NET_ADMIN
# ports:
# - 8080:8080 # qbittorrent http web ui
# environment:
# see https://github.com/qdm12/gluetun-wiki for more details
# example envs based on https://github.com/qdm12/gluetun-wiki/blob/main/setup/providers/mullvad.md
# - VPN_SERVICE_PROVIDER=mullvad # define the vpn provider
#- VPN_TYPE=wireguard # define the vpn protocol to use
#- WIREGUARD_PRIVATE_KEY=wOEI9rqqbDwnN8/Bpp22sVz48T71vJ4fYmFWujulwUU= # define your wireguard private key here
#- WIREGUARD_ADDRESSES=10.64.222.21/32 # define the ipv4 vpn network subnet here
#volumes:
#- ${DOCKER_VOLUME_STORAGE:-/mnt/docker-volumes}/arr-suite/configs/gluetun:/gluetun
#restart: unless-stopped
#networks:
# - traefik-public
#labels:
# - traefik.enable=true
# - traefik.docker.network=traefik-public
# - traefik.http.routers.qbittorrent.rule=Host(`qbittorrent.example.com`)
# - traefik.http.services.qbittorrent.loadbalancer.server.port=8080
# # Optional part for traefik middlewares
# - traefik.http.routers.qbittorrent.middlewares=local-ipwhitelist@file
# image used to download stuff; run over gluetun network (vpn killswitch)
# qbittorrent:
# image: linuxserver/qbittorrent:latest
# container_name: arr-suite-qbittorrent
# environment:
# - PUID=1000
# - PGID=1000
# - TZ=Europe/Stockholm
# - WEBUI_PORT=8080
# volumes:
# - ${DOCKER_VOLUME_STORAGE:-/mnt/docker-volumes}/arr-suite/configs/qbittorrent:/config
# - ${DOCKER_VOLUME_STORAGE:-/mnt/docker-volumes}/arr-suite/media/downloads:/media/downloads
# depends_on:
# - gluetun
# network_mode: container:arr-suite-gluetun # use the gluetun container network (vpn killswitch)
# restart: unless-stopped
# image used to manage media and stream it
#emby:
# image: linuxserver/emby:latest
# container_name: arr-suite-emby
# environment:
# - PUID=1000
# - PGID=1000
# - TZ=Europe/Stockholm
# volumes:
# - ${DOCKER_VOLUME_STORAGE:-/mnt/docker-volumes}/arr-suite/configs/emby:/config # emby data storage location; can grow very large
# - ${DOCKER_VOLUME_STORAGE:-/mnt/docker-volumes}/arr-suite/media:/data # media goes here
# #- ${DOCKER_VOLUME_STORAGE:-/mnt/docker-volumes}/arr-suite/configs/emby/lib:/opt/vc/lib # optional; path for Raspberry Pi OpenMAX libs
# expose:
# - 8096/tcp # http web ui
# - 8920/tcp # https web ui
# ports:
# - 8096:8096/tcp # http web ui
#devices:
# - /dev/dri:/dev/dri #optional
# - /dev/vchiq:/dev/vchiq #optional
# - /dev/video10:/dev/video10 #optional
# - /dev/video11:/dev/video11 #optional
# - /dev/video12:/dev/video12 #optional
# restart: unless-stopped
#networks:
# - traefik-public
#labels:
# - traefik.enable=true
# - traefik.docker.network=traefik-public
# - traefik.http.routers.emby.rule=Host(`emby.example.com`)
# - traefik.http.services.emby.loadbalancer.server.port=8096
# # Optional part for traefik middlewares
# - traefik.http.routers.emby.middlewares=local-ipwhitelist@file
# image used to manage media and stream it
#jellyfin:
# image: linuxserver/jellyfin:latest
# container_name: arr-suite-jellyfin
# environment:
# - PUID=1000
# - PGID=1000
# - TZ=Europe/Stockholm
# volumes:
# - ${DOCKER_VOLUME_STORAGE:-/mnt/docker-volumes}/arr-suite/configs/jellyfin:/config # emby data storage location; can grow very large
# - ${DOCKER_VOLUME_STORAGE:-/mnt/docker-volumes}/arr-suite/media:/data # media goes here
# #- ${DOCKER_VOLUME_STORAGE:-/mnt/docker-volumes}/arr-suite/configs/jellyfin/lib:/opt/vc/lib # optional; path for Raspberry Pi OpenMAX libs
# expose:
# - 8096/tcp # http web ui
# ports:
# - 8096:8096/tcp # http web ui
# #devices:
# # - /dev/dri:/dev/dri #optional
# # - /dev/vchiq:/dev/vchiq #optional
# # - /dev/video10:/dev/video10 #optional
# # - /dev/video11:/dev/video11 #optional
# # - /dev/video12:/dev/video12 #optional
# restart: unless-stopped
# #networks:
# # - traefik-public
# #labels:
# # - traefik.enable=true
# # - traefik.docker.network=traefik-public
# # - traefik.http.routers.jellyfin.rule=Host(`jellyfin.example.com`)
# # - traefik.http.services.jellyfin.loadbalancer.server.port=8096
# # # Optional part for traefik middlewares
# # - traefik.http.routers.jellyfin.middlewares=local-ipwhitelist@file
networks:
traefik-public:
external: true
volumes:
#bazarr:
# driver: local
lidarr:
driver: local
prowlarr:
driver: local
readarr:
driver: local
radarr:
driver: local
sonarr:
driver: local

8
swarm/bedrock/deploy.sh Executable file
View File

@ -0,0 +1,8 @@
#!/bin/sh
SCRIPT_DIR=$(dirname -- "$0")
mkdir -p /data/bedrock/server
mkdir -p /data/backup/bedrock
docker stack deploy -c "$SCRIPT_DIR/docker-compose.yml" mc

View File

@ -0,0 +1,140 @@
version: '3.8'
services:
bedrock:
image: itzg/minecraft-bedrock-server
environment:
# docker image properties
# - UID=485
# - GID=485
- TZ=Europe/Stockholm
- EULA=TRUE
- VERSION=PREVIEW
# Minecraft server.properties parameters
# https://minecraft.fandom.com/wiki/Server.properties#Bedrock_Edition_3
- GAMEMODE=survival
- DIFFICULTY=normal
- "SERVER_NAME=Niefelheim Bedrock server"
- SERVER_PORT=19132
- SERVER_PORT_V6=19131
- ALLOW_CHEATS=true
- ONLINE_MODE=true
- LEVEL_NAME=Dungen
#- OPS=2533274968313137
- OPS=2533274968313137,2535421315133378,2535433533727392
ports:
- target: 19132
published: 19132
protocol: udp
mode: host
networks:
- host
volumes:
- /etc/localtime:/etc/localtime:ro
- server-data:/data
stdin_open: true
tty: true
deploy:
placement:
constraints: [node.labels.Bedrock == true ]
backup:
image: alpinelinux/docker-cli
entrypoint: "sh -c"
command: |
'
DATE_FMT="%Y-%m-%d_%H-%M-%S"
echo "Stopping bedrock service..."
docker service scale $$SERVICE_NAME=0 > /dev/null
for world_dir in /opt/server/worlds/*; do
world_name=$$(basename "$$world_dir")
now=$$(date +$$DATE_FMT)
target="/opt/backups/$$world_name-$$now.tar.gz"
echo "Backing up '$$world_name' to $$target..."
tar -cpzf "$$target" -C "$$world_dir" .
echo "Cleaning up old versions of '$$world_name'..."
find /opt/backups -name "$$world_name-*.tar.gz" -type f -mtime +14 -print -delete
done
echo "Starting bedrock service..."
docker service scale $$SERVICE_NAME=1 > /dev/null
'
volumes:
- /var/run/docker.sock:/var/run/docker.sock
- server-data:/opt/server
- backups-data:/opt/backups
environment:
# assumes this is deployed to a stack named "mc"
- SERVICE_NAME=mc_bedrock
deploy:
placement:
constraints:
- node.role == manager
restart_policy:
condition: none
replicas: 0 # none by default; created when cron hits
labels:
# backup and restart daily at 6am
- swarm.cronjob.enable=true
- swarm.cronjob.schedule=0 6 * * *
update:
image: alpinelinux/docker-cli
entrypoint: "sh -c"
command: |
'
# force service update, which causes restart
docker service update $$SERVICE_NAME --force
'
volumes:
- /var/run/docker.sock:/var/run/docker.sock
environment:
# assumes this is deployed to a stack named "mc"
- SERVICE_NAME=mc_bedrock
deploy:
placement:
constraints:
- node.role == manager
restart_policy:
condition: none
replicas: 0 # none by fedault; created when cron hits
labels:
# restart daily at 6am to get latest MC version
- swarm.cronjob.enable=false
- swarm.cronjob.schedule=0 6 * * *
cron:
image: crazymax/swarm-cronjob:1.13.0
environment:
- TZ=Europe/Stockholm
- LOG_LEVEL=info
- LOG_JSON=false
volumes:
- /var/run/docker.sock:/var/run/docker.sock
deploy:
placement:
constraints:
- node.role == manager
replicas: 0 # disabled; already running in lab stack
networks:
host:
external: true
volumes:
server-data:
driver: local
driver_opts:
type: 'none'
o: 'bind'
device: '/data/bedrock/server'
backups-data:
driver: local
driver_opts:
type: 'none'
o: 'bind'
device: '/data/backup/bedrock'

3
swarm/bedrock/mcrcon.sh Executable file
View File

@ -0,0 +1,3 @@
#!/bin/bash
docker exec $(docker ps --filter name=mc_bedrock* --quiet) /usr/local/bin/send-command $1

View File

@ -0,0 +1,6 @@
{
"$schema": "https://docs.renovatebot.com/renovate-schema.json";
"extends": [
"config:base"
]
}

View File

@ -0,0 +1,7 @@
#!/bin/sh
SCRIPT_DIR=$(dirname -- "$0")
docker stack rm mc
docker volume rm mc_server-data
docker volume rm mc_backups-data

@ -0,0 +1 @@
Subproject commit cda5d531f49b532c52711eff358436d9c85ea225

@ -0,0 +1 @@
Subproject commit 3784ba79a72e919228c4c0f77935d587c92bd7c2

View File

@ -0,0 +1,18 @@
version: "3"
services:
shepherd:
build: .
image: containrrr/shepherd
environment:
TZ: 'Europe/Stockholm'
SLEEP_TIME: '20m'
FILTER_SERVICES: ''
VERBOSE: 'true'
# UPDATE_OPTIONS: '--update-delay=30s'
# ROLLBACK_OPTIONS: '--rollback-delay=0s'
volumes:
- /var/run/docker.sock:/var/run/docker.sock
deploy:
placement:
constraints:
- node.role == manager

View File

@ -0,0 +1,9 @@
version: "3.3"
services:
agile-redis:
image: 127.0.0.1:5000/agile-redis
build:
context: ./
dockerfile: ./agile-redis-Dockerfile.yml
ports:
- "6379:6379"

View File

@ -0,0 +1,9 @@
FROM redis:5
COPY agile-redis-entrypoint.sh /usr/local/bin/
RUN chmod +x /usr/local/bin/agile-redis-entrypoint.sh
VOLUME /etc/redis
ENTRYPOINT ["agile-redis-entrypoint.sh"]

View File

@ -0,0 +1,16 @@
#!/bin/bash
CONF_FILE=/etc/redis/redis.conf
if [ ! -f $CONF_FILE ]
then
echo "appendonly yes" > $CONF_FILE
if [ -n "$REDIS_MASTER_HOST" ]
then
echo "slaveof $REDIS_MASTER_HOST ${REDIS_MASTER_PORT:-6379}" >> $CONF_FILE
fi
chown redis:redis $CONF_FILE
fi
exec docker-entrypoint.sh redis-server /etc/redis/redis.conf

View File

@ -0,0 +1,9 @@
version: "3.3"
services:
agile-redis:
image: 127.0.0.1:5000/agile-redis-sentinel
ports:
- '26379:26379'
build:
context: ./
dockerfile: ./agile-redis-sentinel-Dockerfile.yml

View File

@ -0,0 +1,23 @@
FROM redis:5
EXPOSE 26379
ADD agile-redis-sentinel.conf /etc/redis/sentinel.conf
RUN chown redis:redis /etc/redis/sentinel.conf
ENV REDIS_MASTER_NAME=mymaster \
REDIS_MASTER_HOST=redis-master \
REDIS_MASTER_PORT=6379 \
SENTINEL_QUORUM=2 \
SENTINEL_DOWN_AFTER=30000\
SENTINEL_PARALLEL_SYNC=1 \
SENTINEL_FAILOVER_TIMEOUT=180000
COPY agile-redis-sentinel-entrypoint.sh /usr/local/bin/
RUN chmod +x /usr/local/bin/agile-redis-sentinel-entrypoint.sh
VOLUME /etc/redis
ENTRYPOINT ["agile-redis-sentinel-entrypoint.sh"]

View File

@ -0,0 +1,9 @@
#!/bin/sh
sed -i "s/\$REDIS_MASTER_NAME/$REDIS_MASTER_NAME/g" /etc/redis/sentinel.conf
sed -i "s/\$REDIS_MASTER_HOST/$REDIS_MASTER_HOST/g" /etc/redis/sentinel.conf
sed -i "s/\$REDIS_MASTER_PORT/$REDIS_MASTER_PORT/g" /etc/redis/sentinel.conf
sed -i "s/\$SENTINEL_QUORUM/$SENTINEL_QUORUM/g" /etc/redis/sentinel.conf
sed -i "s/\$SENTINEL_DOWN_AFTER/$SENTINEL_DOWN_AFTER/g" /etc/redis/sentinel.conf
sed -i "s/\$SENTINEL_PARALLEL_SYNC/$SENTINEL_PARALLEL_SYNC/g" /etc/redis/sentinel.conf
sed -i "s/\$SENTINEL_FAILOVER_TIMEOUT/$SENTINEL_FAILOVER_TIMEOUT/g" /etc/redis/sentinel.conf
exec docker-entrypoint.sh redis-server /etc/redis/sentinel.conf --sentinel

View File

@ -0,0 +1,195 @@
# Example sentinel.conf (from http://download.redis.io/redis-stable/sentinel.conf)
# *** IMPORTANT ***
#
# By default Sentinel will not be reachable from interfaces different than
# localhost, either use the 'bind' directive to bind to a list of network
# interfaces, or disable protected mode with "protected-mode no" by
# adding it to this configuration file.
#
# Before doing that MAKE SURE the instance is protected from the outside
# world via firewalling or other means.
#
# For example you may use one of the following:
#
# bind 127.0.0.1 192.168.1.1
#
# protected-mode no
# port <sentinel-port>
# The port that this sentinel instance will run on
port 26379
# sentinel announce-ip <ip>
# sentinel announce-port <port>
#
# The above two configuration directives are useful in environments where,
# because of NAT, Sentinel is reachable from outside via a non-local address.
#
# When announce-ip is provided, the Sentinel will claim the specified IP address
# in HELLO messages used to gossip its presence, instead of auto-detecting the
# local address as it usually does.
#
# Similarly when announce-port is provided and is valid and non-zero, Sentinel
# will announce the specified TCP port.
#
# The two options don't need to be used together, if only announce-ip is
# provided, the Sentinel will announce the specified IP and the server port
# as specified by the "port" option. If only announce-port is provided, the
# Sentinel will announce the auto-detected local IP and the specified port.
#
# Example:
#
# sentinel announce-ip $SENTINEL_IP
# dir <working-directory>
# Every long running process should have a well-defined working directory.
# For Redis Sentinel to chdir to /tmp at startup is the simplest thing
# for the process to don't interfere with administrative tasks such as
# unmounting filesystems.
dir /tmp
# sentinel monitor <master-name> <ip> <redis-port> <quorum>
#
# Tells Sentinel to monitor this master, and to consider it in O_DOWN
# (Objectively Down) state only if at least <quorum> sentinels agree.
#
# Note that whatever is the ODOWN quorum, a Sentinel will require to
# be elected by the majority of the known Sentinels in order to
# start a failover, so no failover can be performed in minority.
#
# Slaves are auto-discovered, so you don't need to specify slaves in
# any way. Sentinel itself will rewrite this configuration file adding
# the slaves using additional configuration options.
# Also note that the configuration file is rewritten when a
# slave is promoted to master.
#
# Note: master name should not include special characters or spaces.
# The valid charset is A-z 0-9 and the three characters ".-_".
sentinel monitor $REDIS_MASTER_NAME $REDIS_MASTER_HOST $REDIS_MASTER_PORT $SENTINEL_QUORUM
# sentinel auth-pass <master-name> <password>
#
# Set the password to use to authenticate with the master and slaves.
# Useful if there is a password set in the Redis instances to monitor.
#
# Note that the master password is also used for slaves, so it is not
# possible to set a different password in masters and slaves instances
# if you want to be able to monitor these instances with Sentinel.
#
# However you can have Redis instances without the authentication enabled
# mixed with Redis instances requiring the authentication (as long as the
# password set is the same for all the instances requiring the password) as
# the AUTH command will have no effect in Redis instances with authentication
# switched off.
#
# Example:
#
# sentinel auth-pass $REDIS_MASTER_NAME MySUPER--secret-0123passw0rd
# sentinel down-after-milliseconds <master-name> <milliseconds>
#
# Number of milliseconds the master (or any attached slave or sentinel) should
# be unreachable (as in, not acceptable reply to PING, continuously, for the
# specified period) in order to consider it in S_DOWN state (Subjectively
# Down).
#
# Default is 30 seconds.
sentinel down-after-milliseconds $REDIS_MASTER_NAME $SENTINEL_DOWN_AFTER
# sentinel parallel-syncs <master-name> <numslaves>
#
# How many slaves we can reconfigure to point to the new slave simultaneously
# during the failover. Use a low number if you use the slaves to serve query
# to avoid that all the slaves will be unreachable at about the same
# time while performing the synchronization with the master.
sentinel parallel-syncs $REDIS_MASTER_NAME $SENTINEL_PARALLEL_SYNC
# sentinel failover-timeout <master-name> <milliseconds>
#
# Specifies the failover timeout in milliseconds. It is used in many ways:
#
# - The time needed to re-start a failover after a previous failover was
# already tried against the same master by a given Sentinel, is two
# times the failover timeout.
#
# - The time needed for a slave replicating to a wrong master according
# to a Sentinel current configuration, to be forced to replicate
# with the right master, is exactly the failover timeout (counting since
# the moment a Sentinel detected the misconfiguration).
#
# - The time needed to cancel a failover that is already in progress but
# did not produced any configuration change (SLAVEOF NO ONE yet not
# acknowledged by the promoted slave).
#
# - The maximum time a failover in progress waits for all the slaves to be
# reconfigured as slaves of the new master. However even after this time
# the slaves will be reconfigured by the Sentinels anyway, but not with
# the exact parallel-syncs progression as specified.
#
# Default is 3 minutes.
sentinel failover-timeout $REDIS_MASTER_NAME $SENTINEL_FAILOVER_TIMEOUT
# SCRIPTS EXECUTION
#
# sentinel notification-script and sentinel reconfig-script are used in order
# to configure scripts that are called to notify the system administrator
# or to reconfigure clients after a failover. The scripts are executed
# with the following rules for error handling:
#
# If script exits with "1" the execution is retried later (up to a maximum
# number of times currently set to 10).
#
# If script exits with "2" (or an higher value) the script execution is
# not retried.
#
# If script terminates because it receives a signal the behavior is the same
# as exit code 1.
#
# A script has a maximum running time of 60 seconds. After this limit is
# reached the script is terminated with a SIGKILL and the execution retried.
# NOTIFICATION SCRIPT
#
# sentinel notification-script <master-name> <script-path>
#
# Call the specified notification script for any sentinel event that is
# generated in the WARNING level (for instance -sdown, -odown, and so forth).
# This script should notify the system administrator via email, SMS, or any
# other messaging system, that there is something wrong with the monitored
# Redis systems.
#
# The script is called with just two arguments: the first is the event type
# and the second the event description.
#
# The script must exist and be executable in order for sentinel to start if
# this option is provided.
#
# Example:
#
# sentinel notification-script $REDIS_MASTER_NAME /var/redis/notify.sh
# CLIENTS RECONFIGURATION SCRIPT
#
# sentinel client-reconfig-script <master-name> <script-path>
#
# When the master changed because of a failover a script can be called in
# order to perform application-specific tasks to notify the clients that the
# configuration has changed and the master is at a different address.
#
# The following arguments are passed to the script:
#
# <master-name> <role> <state> <from-ip> <from-port> <to-ip> <to-port>
#
# <state> is currently always "failover"
# <role> is either "leader" or "observer"
#
# The arguments from-ip, from-port, to-ip, to-port are used to communicate
# the old address of the master and the new address of the elected slave
# (now a master).
#
# This script should be resistant to multiple invocations.
#
# Example:
#
# sentinel client-reconfig-script $REDIS_MASTER_NAME /var/redis/reconfig.sh

View File

@ -0,0 +1,76 @@
version: "3.3"
services:
redis-sentinel:
image: 127.0.0.1:5000/agile-redis-sentinel
volumes:
- sentinel-conf:/etc/redis
deploy:
mode: global
environment:
- REDIS_MASTER_HOST=${REDIS_MASTER_IP}
- SENTINEL_DOWN_AFTER=5000
- SENTINEL_FAILOVER=15000
networks:
- host
redis-master:
image: 127.0.0.1:5000/agile-redis
volumes:
- redis-data:/data
- redis-conf:/etc/redis
deploy:
mode: global
placement:
constraints:
- node.hostname == ${REDIS_MASTER_HOSTNAME}
networks:
- host
redis-slave-node1:
image: 127.0.0.1:5000/agile-redis
volumes:
- redis-data:/data
- redis-conf:/etc/redis
environment:
- REDIS_MASTER_HOST=${REDIS_MASTER_IP}
deploy:
mode: global
placement:
constraints:
- node.hostname == ${REDIS_SLAVE_NODE1_HOSTNAME}
networks:
- host
redis-slave-node2:
image: 127.0.0.1:5000/agile-redis
volumes:
- redis-data:/data
- redis-conf:/etc/redis
environment:
- REDIS_MASTER_HOST=${REDIS_MASTER_IP}
deploy:
mode: global
placement:
constraints:
- node.hostname == ${REDIS_SLAVE_NODE2_HOSTNAME}
networks:
- host
agile-python-app:
image: 127.0.0.1:5000/agile-python-app
ports:
- "38000:611"
deploy:
mode: replicated
replicas: 3
environment:
- SENTINEL_HOST=${SENTINEL_IP}
volumes:
redis-data:
redis-conf:
sentinel-conf:
networks:
host:
external: true

55
swarm/stackredis/deploy.sh Executable file
View File

@ -0,0 +1,55 @@
#!/bin/bash
echo "--------------------------------------------------------------------------------------------------------------"
echo " REDIS STACK DEPLOYMENT "
echo "--------------------------------------------------------------------------------------------------------------"
export SENTINEL_HOSTNAME=$1 #serveur17
export REDIS_MASTER_HOSTNAME=$2 #serveur17
export REDIS_SLAVE_NODE1_HOSTNAME=$3 #serveur18
export REDIS_SLAVE_NODE2_HOSTNAME=$4 #serveur19
if [ -z $SENTINEL_HOSTNAME ] || [ -z $REDIS_MASTER_HOSTNAME ] || [ -z $REDIS_SLAVE_NODE1_HOSTNAME ] || [ -z $REDIS_SLAVE_NODE2_HOSTNAME ] ; then
echo "Status: Arguments missing. Cannot continue to build the stack. Missing SENTINEL_HOSTNAME, REDIS_MASTER_HOSTNAME, REDIS_SLAVE_NODE1_HOSTNAME, REDIS_SLAVE_NODE1_HOSTNAME" >&2
exit 1;
fi
echo "1- Start to push on registry the redis docker image which can be used as master or slave in the stack..."
docker compose -f agile-redis-master-slave/agile-redis-Dockercompose.yml build
docker compose -f agile-redis-master-slave/agile-redis-Dockercompose.yml push
echo "(1)End to build and push redis image to registry."
echo "-------------------------------------------------------\n"
echo "2- Start to push on registry the redis docker image which will be used to build sentinel..."
docker compose -f agile-redis-sentinel/agile-redis-sentinel-Dockercompose.yml build
docker compose -f agile-redis-sentinel/agile-redis-sentinel-Dockercompose.yml push
echo "(2)End to build and push redis sentinel image to registry."
echo "-------------------------------------------------------\n"
echo "3- Start to push our python app example on the registry..."
docker compose -f python-app-example/compose-app.yml build
docker compose -f python-app-example/compose-app.yml push
echo "(3)End to push our python app example on the registry."
echo "-------------------------------------------------------\n"
echo "4- Start to deploy the stack..."
export SENTINEL_IP=`docker node inspect --format {{.Status.Addr}} $SENTINEL_HOSTNAME`
export REDIS_MASTER_IP=`docker node inspect --format {{.Status.Addr}} $REDIS_MASTER_HOSTNAME`
echo "Sentinel hostname and IP: $SENTINEL_HOSTNAME - $SENTINEL_IP"
echo "Redis Master hostname and IP: $REDIS_MASTER_HOSTNAME - $REDIS_MASTER_IP"
echo "Redis slave 1 hostname: $REDIS_SLAVE_NODE1_HOSTNAME"
echo "Redis slave 2 hostname: $REDIS_SLAVE_NODE2_HOSTNAME"
docker stack deploy -c agile-redis-stack.yml stackredis
printf "(4)End to deploy the stack... Please wait until the services started\n\n\n"
sleep 3s
printf "Status: The stack deployment has been completed.\n\n"
docker service ls
printf "If all services replicas are not already deployed, please run << docker service ls >> to see if it now completed.\n"

View File

@ -0,0 +1,24 @@
from pypy:3-6
WORKDIR /usr/src/app
# Bundle app source
COPY app.py /usr/src/app
COPY requirements.txt /usr/src/app
RUN apt-get update
RUN apt-get install -y vim
# install requirements
RUN pip install --upgrade pip
RUN pip install -r requirements.txt
RUN mkdir -p /usr/src/logs
EXPOSE 611
VOLUME ["/usr/src/app"]
ENTRYPOINT ["pypy3", "app.py"]

View File

@ -0,0 +1,30 @@
import os
from flask import Flask
from redis.sentinel import Sentinel
app = Flask(__name__)
sentinelHost = os.environ.get("SENTINEL_HOST", None)
sentinelPort = int(os.environ.get("SENTINEL_PORT", 26379))
redisMasterName = os.environ.get("REDIS_MASTER_NAME", 'mymaster')
@app.route('/')
def hello():
if sentinelHost is not None and sentinelPort is not None:
try:
sentinel = Sentinel([(sentinelHost, sentinelPort)], socket_timeout=0.1)
redis_master = sentinel.master_for(redisMasterName, socket_timeout=0.1)
redis_slave = sentinel.slave_for(redisMasterName, socket_timeout=0.1)
incr_and_return_count = redis_master.incr('hits')
count_from_slave = redis_slave.get('hits')
return 'Hello World! I have been seen {} times. Yes Yeah\n'.format(count_from_slave)
except Exception as e:
return sentinelHost+ " "+ str(sentinelPort) + 'Exception handled when started to perform actions: Details error {}\n'.format(e)
else:
return 'Environment variable sentinelHost or sentinelPort is not found or empty. \n'
if __name__ == "__main__":
app.run(host="0.0.0.0", port=611, debug=True)

View File

@ -0,0 +1,7 @@
version: "3.3"
services:
agile-python-app:
image: 127.0.0.1:5000/agile-python-app
build:
context: ./
dockerfile: ./Dockerfile.yml

View File

@ -0,0 +1,2 @@
flask
redis

View File

@ -0,0 +1,102 @@
version: '3.3'
services:
traefik:
# Use the latest v3.0 Traefik image available
image: traefik:v3.0
ports:
# Listen on port 80, default for HTTP, necessary to redirect to HTTPS
- 80:80
# Listen on port 443, default for HTTPS
- 443:443
# Listen on port 8080, traefiks web-ui
- 8080:8080
deploy:
placement:
constraints:
# Make the traefik service run only on the node with this label
# as the node with it has the volume for certificates
- node.labels.traefik-public.traefik-public-certificates == true
labels:
# Enable Traefik for this service, to make it available in the public network
- traefik.enable=true
# Use the traefik-public network (declared below)
- traefik.docker-network=traefik-public
# User the custome label "traefik.constraint-label=traefik-public"
# This public Traefik will only use services with this label
# That way you can add other internal Traefik instances per stack if needed
- traefik.constraint-label=traefik-public
# admin-auth middleware with HTTP Basic auth
# Using the environment variables USERNAME and HASHED_PASSOWRD
- traefik.http.middlewares.admin-auth.basicauth.users=${USERNAME?Variable not set}:${HASHED_PASSWORD?Variable not set}
# https-redirect middleware to redirect HTTP to HTTPS
# It can be re-used by other stacks in other Docker Compose files
- traefik.http.middlewares.https-redirect.redirectscheme.scheme=https
- traefik.http.middlewares.https-redirect.redirectscheme.permanent=true
# traefik-http set up only to use the middleware to redirect to https
# Uses the environment variable DOMAIN
- traefik.http.routers.traefik-public-http.rule=Host(`${DOMAIN?Variable not set}`)
- traefik.http.routers.traefik-public-http.entrypoints=http
- traefik.http.routers.traefik-public-http.middlewares=https-redirect
# traefik-https the actual router using HTTPS
# Uses the environment variable DOMAIN
- traefik.http.routers.traefik-public-https.rule=Host(`${DOMAIN?Variable not set}`)
- traefik.http.routers.traefik-public-https.entrypoints=https
- traefik.http.routers.traefik-public-https.tls=true
# Use the special Traefik service api@internal with the web UI/Dashboard
- traefik.http.routers.traefik-public-https.service=api@internal
# Use the "le" (Let's Encrypt) resolver created below
- traefik.http.routers.traefik-public-https.tls.certresolver=le
# Enable HTTP Basic auth, using the middleware created above
- traefik.http.routers.traefik-public-https.middlewares=admin-auth
# Define the port inside of the Docker service to use
- traefik.http.services.traefik-public.loadbalancer.server.port=8080
volumes:
# Add Docker as a mounted volume, so that Traefik can read the labels of other services
- /var/run/docker.sock:/var/run/docker.sock:ro
# Mount the volume to store the certificates
- traefik-public-certificates:/certificates
command:
# Enable Docker in Traefik, so that it reads labels from Docker services
- --providers.docker
# Add a constraint to only use services with the label "traefik.constraint-label=traefik-public"
- --providers.docker.constraints=Label(`traefik.constraint-label`, `traefik-public`)
# Do not expose all Docker services, only the ones explicitly exposed
- --providers.docker.exposedbydefault=false
# Enable Docker Swarm mode
- --providers.swarm.endpoint=unix:///var/run/docker.sock
# Create an entrypoint "http" listening on port 80
- --entrypoints.http.address=:80
# Create an entrypoint "https" listening on port 443
- --entrypoints.https.address=:443
# Create the certificate resolver "le" for Let's Encrypt, uses the environment variable EMAIL
- --certificatesresolvers.le.acme.email=${EMAIL?Variable not set}
# Store the Let's Encrypt certificates in the mounted volume
- --certificatesresolvers.le.acme.storage=/certificates/acme.json
# Use the TLS Challenge for Let's Encrypt
- --certificatesresolvers.le.acme.tlschallenge=true
# Enable the access log, with HTTP requests
- --accesslog
# Enable the Traefik log, for configurations and errors
- --log
# Enable the Dashboard and API
- --api=true
- --api.dashboard=true
- --api.insecure=true
networks:
# Use the public network created to be shared between Traefik and
# any other service that needs to be publicly available with HTTPS
- traefik-public
volumes:
# Create a volume to store the certificates, there is a constraint to make sure
# Traefik is always deployed to the same Docker node with the same volume containing
# the HTTPS certificates
traefik-public-certificates:
networks:
# Use the previously created public network "traefik-public", shared with other
# services that need to be publicly available via this Traefik
traefik-public:
external: true