Compare commits

..

162 Commits

Author SHA1 Message Date
fac196aa60 test 2026-04-20 19:51:00 +02:00
93cc0f7bd7 test 2026-04-20 19:49:29 +02:00
d4bf0f044a test 2026-04-20 19:30:10 +02:00
2c00f40150 test 2026-04-20 19:25:57 +02:00
026fd46004 test 2026-04-20 19:24:38 +02:00
f107c1ba5e test 2026-04-20 19:24:26 +02:00
32fccb8683 test 2026-04-20 19:22:44 +02:00
6a2e2eb7dd monitoring 2026-04-20 19:16:59 +02:00
df22fcc717 monitoring 2026-04-20 19:11:34 +02:00
a839a51074 monitoring 2026-04-20 19:07:01 +02:00
8ab6ecfac1 monitoring 2026-04-20 18:00:06 +02:00
72a03d0101 monitoring 2026-04-20 16:50:38 +02:00
86a9de8e6b monitoring 2026-04-20 16:24:53 +02:00
f229a25d5e monitoring 2026-04-20 16:17:19 +02:00
e91097ebc2 monitoring 2026-04-20 16:06:59 +02:00
64320b3677 monitoring 2026-04-20 16:06:14 +02:00
f9efbd8784 logging 2026-04-19 16:48:19 +02:00
e38018afaf logging 2026-04-19 16:35:57 +02:00
47c481d1b4 logging 2026-04-19 16:30:13 +02:00
a376705962 logging 2026-04-19 11:40:10 +02:00
f3a53fd823 logging 2026-04-19 11:28:16 +02:00
a2a80f7c0f logging 2026-04-19 11:24:22 +02:00
14fec7bbe6 logging 2026-04-17 23:06:45 +02:00
c828653341 logging 2026-04-17 10:57:29 +02:00
15d3d4570c logging 2026-04-17 10:52:12 +02:00
9605daed02 loggging 2026-04-17 10:46:06 +02:00
18080290a9 logging 2026-04-17 10:45:00 +02:00
b8576d11d0 logging 2026-04-16 23:06:48 +02:00
0a95868b36 logging 2026-04-16 23:02:12 +02:00
538aad2dd1 loging 2026-04-16 22:57:38 +02:00
ab701a11bd logging 2026-04-16 22:48:26 +02:00
d4d5b2a6b3 logging 2026-04-15 22:34:04 +02:00
844b83a3f2 logging 2026-04-15 22:33:15 +02:00
8dcc755222 logging 2026-04-15 22:30:21 +02:00
65eed74dfb logging 2026-04-15 22:26:07 +02:00
e266a78843 logging 2026-04-15 22:22:38 +02:00
9c6aa40453 logging 2026-04-15 22:05:15 +02:00
ca33fc6d1f logging 2026-04-15 22:01:43 +02:00
ff9f3d3749 homepage 2026-04-13 19:41:31 +02:00
fcd88276c7 homepage 2026-04-13 13:02:10 +02:00
c76e8ce3f1 homepage 2026-04-13 12:58:27 +02:00
635baf2362 feat(uptime-Kuma): docker-compose 2026-04-05 18:23:44 +02:00
a0f63ab43f caddy 2026-04-02 19:41:24 +02:00
5d2a44b419 homepage 2026-04-01 22:31:57 +02:00
0315ae7043 feat(homepage):docker-compose 2026-04-01 22:25:20 +02:00
159373354f feat(logging):docker-compose 2026-03-31 11:49:44 +02:00
221b262562 immich 2026-03-30 20:36:18 +02:00
067e616c70 immich 2026-03-30 19:45:53 +02:00
af5b6eb840 immich 2026-03-30 19:36:58 +02:00
97aa13c6b3 immich 2026-03-30 19:32:31 +02:00
fe292f9a0e immich 2026-03-30 19:28:37 +02:00
fb00e72650 immich 2026-03-30 19:25:48 +02:00
6831ad1f03 immich 2026-03-30 19:16:40 +02:00
d12926dda8 immich 2026-03-30 18:43:59 +02:00
b24fa2e13d immich 2026-03-30 18:40:07 +02:00
9ac1707617 immich 2026-03-29 10:45:24 +02:00
e442576391 immich 2026-03-28 23:23:54 +01:00
38cf7ddd0a immich 2026-03-28 23:14:16 +01:00
564f4938fa immich 2026-03-28 22:58:15 +01:00
db9b42d8b3 immich 2026-03-28 22:50:24 +01:00
084bf8bba8 immich 2026-03-28 22:48:06 +01:00
1f73b62605 immich 2026-03-28 22:44:38 +01:00
2280ef9fef immich 2026-03-28 22:41:21 +01:00
bc99ef25b5 test 2026-03-27 23:22:43 +01:00
1353a8ff29 feat(immich):docker-compose 2026-03-27 23:16:41 +01:00
67bbec9f83 docs(repo): update 2026-03-26 15:31:46 +01:00
721c3e23e7 test 2026-03-23 22:41:55 +01:00
98a029dc37 test 2026-03-23 22:40:28 +01:00
6f070216b7 webhook 2026-03-23 22:36:48 +01:00
438b6d950e docs(pihole):neu 2026-03-23 22:28:07 +01:00
e8b70e7d48 feat(mount): no fstab 2026-03-19 19:42:17 +01:00
a8c81cef12 update 2026-03-12 22:14:14 +01:00
65ca5f4a82 repo 2026-03-12 19:54:13 +01:00
84ee914bea doku(diversese):update 2026-03-12 19:17:41 +01:00
ee8a96f0cf test 2026-03-12 00:04:26 +01:00
221932f90e nc 2026-03-11 23:29:26 +01:00
d7a1e900d9 nc 2026-03-11 23:27:50 +01:00
88ea22caa0 nc 2026-03-11 23:27:11 +01:00
8df7afa511 test 2026-03-11 22:44:51 +01:00
cda41d6055 test 2026-03-11 22:05:57 +01:00
52e8cd2da6 test 2026-03-11 22:01:22 +01:00
e323e4b3b4 test 2026-03-11 21:57:23 +01:00
b8ddf52633 webhook 2026-03-11 21:52:48 +01:00
e7eae03a9d test 2026-03-11 21:43:08 +01:00
5adee23135 tets 2026-03-11 21:39:15 +01:00
9cd773dd63 tets 2026-03-11 21:01:03 +01:00
d78da41a47 feat(vaulwarden):L port 2026-03-11 20:54:19 +01:00
3b862362fe tvh 2026-03-11 16:07:32 +01:00
ee71fccc58 test 2026-03-10 23:05:22 +01:00
22a3c3145f docs(workflows): 2026-03-10 23:00:38 +01:00
3699ec494d test 2026-03-10 22:24:11 +01:00
0572da6853 Make deploy script executable 2026-03-10 22:21:35 +01:00
38de32a680 test 2026-03-10 22:17:40 +01:00
b7a91a8ea8 test 2026-03-10 22:15:54 +01:00
165cc12450 test 2026-03-10 22:15:21 +01:00
99c12fe33a feat(webhook): redeploy 2026-03-10 22:14:21 +01:00
723e2a571a feat(webhook): redeploy 2026-03-10 21:56:24 +01:00
49c86b1c85 test 2026-03-09 16:22:09 +01:00
02e2504ad2 test 2026-03-09 16:14:05 +01:00
9eab24730d test 2026-03-09 16:10:09 +01:00
f4ea66ea31 test 2026-03-09 16:08:12 +01:00
7b7c6af453 update 2026-03-09 16:07:25 +01:00
888ada1263 update 2026-03-09 16:05:50 +01:00
e8b95cacb4 infra(DNS): Pihole, Adgaurd 2026-03-08 18:23:39 +01:00
50b35361a2 test 2026-03-05 22:18:50 +01:00
f2239e668a test 2026-03-05 22:17:10 +01:00
fa06ef0754 webhook 2026-03-05 22:15:44 +01:00
ca5fcaad07 test 2026-03-05 22:12:08 +01:00
18020c3ce9 test 2026-03-05 19:19:51 +01:00
81d8f89552 feat(webhook): scripts 2026-03-05 19:18:26 +01:00
be3601b39d test 2026-03-05 18:40:53 +01:00
18267c3f71 test 2026-03-05 18:39:59 +01:00
4c11ae66e0 infra(comose): ordern umbenannt 2026-03-05 18:39:06 +01:00
6465cd9ddc dockes(netz): DHCP 2026-03-05 17:53:24 +01:00
c8a9ad0438 docks(homeassistent): neu 2026-03-05 17:47:03 +01:00
dd7585f2a0 dockes(netz): DHCP 2026-03-05 17:45:28 +01:00
a1f8a33737 docs(adguard): down 2026-03-05 17:23:24 +01:00
7f308e4ee4 heimdall 2026-03-05 17:10:58 +01:00
e057dc84fc infra(pihole): neu 2026-03-04 23:04:59 +01:00
eb55ef32e6 infra(adguard): neu 2026-03-04 22:58:18 +01:00
8d8ea2a6fb test 2026-03-04 22:41:26 +01:00
bf36d12396 test 2026-03-04 22:23:53 +01:00
1da2665fa8 test 2026-03-04 22:18:27 +01:00
cec0fb7a1a feat(nc):realtive Pfade 2026-03-04 22:17:24 +01:00
0c1da36fbd test 2026-03-04 22:14:50 +01:00
affbeb666b test 2026-03-04 22:06:37 +01:00
c0edb4295f test 2026-03-04 22:05:06 +01:00
3519493688 infra(nextcloud): Umzug 2026-03-04 22:03:26 +01:00
f315c8a371 infra(nextcloud): Umzug 2026-03-04 21:46:11 +01:00
0cc8814188 feat(newt): docker-compose.yml 2026-03-04 19:06:23 +01:00
121955d4b9 test 2026-03-02 21:39:39 +01:00
c22e813d0a test 2026-03-02 21:26:15 +01:00
d48b65f039 test 2026-03-02 21:24:43 +01:00
75409bffec tvheadend 2026-03-02 21:23:29 +01:00
62fc532e14 infra(tvheadend): docker-compose 2026-03-02 20:57:36 +01:00
e2101b6059 infra(tvheadend): docker-compose 2026-03-02 20:56:24 +01:00
6f31d28cc2 test 2026-03-02 19:37:44 +01:00
ceeed62eaa test 2026-03-02 19:33:59 +01:00
33cd40694c test 2026-03-02 19:33:34 +01:00
7accc13053 test 2026-03-02 19:30:31 +01:00
c8b21cb41c test 2026-03-02 19:28:28 +01:00
bebfd4acdb test 2026-03-02 19:27:26 +01:00
1e3f814137 test 2026-03-02 19:25:48 +01:00
b095939837 test 2026-03-02 19:23:33 +01:00
6a2e525441 test 2026-03-02 19:22:17 +01:00
c891697abc test 2026-03-02 19:21:09 +01:00
15a346e47b test 2026-03-02 19:20:08 +01:00
d8bffaacfc test 2026-03-02 19:18:50 +01:00
55f18c66b9 test 2026-03-02 19:17:44 +01:00
7be10bda0c docs(proxmox Backup): update 2026-03-02 18:00:53 +01:00
cd908faf09 test 2026-03-02 17:57:47 +01:00
f5fc8c3146 test 2026-03-02 17:56:30 +01:00
ca91eaf3bc test 2026-03-02 17:53:44 +01:00
a680791adf test 2026-03-02 17:46:38 +01:00
56cc6199ed test 2026-03-02 17:44:37 +01:00
73c45e8923 docs(mkdocs): update 2026-03-02 17:43:19 +01:00
cfaef4207e docs(paperless): Doku + backup 2026-03-02 17:40:17 +01:00
963a57fca2 docs(Proxmox): Backupstrategie 2026-03-01 23:17:44 +01:00
d0d6640131 test 2026-03-01 22:21:02 +01:00
2db2d5e068 test 2026-03-01 19:16:55 +01:00
fb96362ea7 test 2026-03-01 19:15:48 +01:00
34f8f4f8c1 test 2026-03-01 19:13:56 +01:00
63 changed files with 1303 additions and 230 deletions

View File

@@ -1,13 +1,13 @@
services: services:
adguardhome: adguardhome:
image: adguard/adguardhome:v0.107.69 image: adguard/adguardhome:v0.107.69
container_name: adguardhome container_name: adguardhome
restart: unless-stopped restart: unless-stopped
network_mode: host network_mode: host
ports:
- 3003:3000
volumes: volumes:
- /docker/Daten/adguardhome/work:/opt/adguardhome/work - /srv/docker/daten/adguardhome/work:/opt/adguardhome/work
- /docker/Daten/adguardhome/conf:/opt/adguardhome/conf - /srv/docker/daten/adguardhome/conf:/opt/adguardhome/conf
environment: environment:
TZ: Europe/Berlin TZ: Europe/Berlin

View File

@@ -0,0 +1,15 @@
######### AKTUELL DOWN ##########
version: "3.8"
services:
caddy:
image: caddy:latest
container_name: caddy
restart: unless-stopped
ports:
- "80:80"
- "443:443"
volumes:
- /srv/docker/daten/caddy/Caddyfile:/etc/caddy/Caddyfile
- /srv/docker/daten/caddy/data:/data
- /srv/docker/daten/caddy/config:/config

View File

@@ -0,0 +1,76 @@
#
# WARNING: To install Immich, follow our guide: https://docs.immich.app/install/docker-compose
#
# Make sure to use the docker-compose.yml of the current release:
#
# https://github.com/immich-app/immich/releases/latest/download/docker-compose.yml
#
# The compose file on main may not be compatible with the latest release.
name: immich
services:
immich-server:
container_name: immich_server
image: ghcr.io/immich-app/immich-server:${IMMICH_VERSION:-release}
# extends:
# file: hwaccel.transcoding.yml
# service: cpu # set to one of [nvenc, quicksync, rkmpp, vaapi, vaapi-wsl] for accelerated transcoding
volumes:
# Do not edit the next line. If you want to change the media storage location on your system, edit the value of UPLOAD_LOCATION in the .env file
- ${UPLOAD_LOCATION}:/data
- /etc/localtime:/etc/localtime:ro
env_file:
- .env
ports:
- '2283:2283'
depends_on:
- redis
- database
restart: always
healthcheck:
disable: false
immich-machine-learning:
container_name: immich_machine_learning
# For hardware acceleration, add one of -[armnn, cuda, rocm, openvino, rknn] to the image tag.
# Example tag: ${IMMICH_VERSION:-release}-cuda
image: ghcr.io/immich-app/immich-machine-learning:${IMMICH_VERSION:-release}
# extends: # uncomment this section for hardware acceleration - see https://docs.immich.app/features/ml-hardware-acceleration
# file: hwaccel.ml.yml
# service: cpu # set to one of [armnn, cuda, rocm, openvino, openvino-wsl, rknn] for accelerated inference - use the `-wsl` version for WSL2 where applicable
volumes:
- model-cache:/cache
env_file:
- .env
restart: always
healthcheck:
disable: false
redis:
container_name: immich_redis
image: docker.io/valkey/valkey:9@sha256:3eeb09785cd61ec8e3be35f8804c8892080f3ca21934d628abc24ee4ed1698f6
healthcheck:
test: redis-cli ping || exit 1
restart: always
database:
container_name: immich_postgres
image: ghcr.io/immich-app/postgres:14-vectorchord0.4.3-pgvectors0.2.0@sha256:bcf63357191b76a916ae5eb93464d65c07511da41e3bf7a8416db519b40b1c23
environment:
POSTGRES_PASSWORD: ${DB_PASSWORD}
POSTGRES_USER: ${DB_USERNAME}
POSTGRES_DB: ${DB_DATABASE_NAME}
POSTGRES_INITDB_ARGS: '--data-checksums'
# Uncomment the DB_STORAGE_TYPE: 'HDD' var if your database isn't stored on SSDs
# DB_STORAGE_TYPE: 'HDD'
volumes:
# Do not edit the next line. If you want to change the database storage location on your system, edit the value of DB_DATA_LOCATION in the .env file
- ${DB_DATA_LOCATION}:/var/lib/postgresql/data
shm_size: 128mb
restart: always
healthcheck:
disable: false
volumes:
model-cache:

View File

@@ -3,14 +3,14 @@ services:
image: gitea/gitea:1.24 image: gitea/gitea:1.24
container_name: gitea container_name: gitea
environment: environment:
- USER_UID=1000 - USER_UID=1001
- USER_GID=1000 - USER_GID=1001
restart: unless-stopped restart: unless-stopped
networks: networks:
- gitea - gitea
- bruchtal-net # 👈 neu für bruchtal-webhook - bruchtal-net # 👈 neu für bruchtal-webhook
volumes: volumes:
- /docker/Daten/gitea/data:/data - /srv/docker/daten/gitea/data:/data
- /etc/timezone:/etc/timezone:ro - /etc/timezone:/etc/timezone:ro
- /etc/localtime:/etc/localtime:ro - /etc/localtime:/etc/localtime:ro
ports: ports:

View File

@@ -1,15 +1,18 @@
## test
services: services:
heimdall: heimdall:
image: lscr.io/linuxserver/heimdall:latest image: lscr.io/linuxserver/heimdall:latest
container_name: heimdall2 container_name: heimdall2
environment: environment:
- PUID=1000 - PUID=1001
- PGID=1000 - PGID=1001
- TZ=Europe/Berlin - TZ=Europe/Berlin
volumes: volumes:
- /home/christian/docker/heimdall/data/config:/config - /srv/docker/daten/heimdall/config:/config
ports: ports:
- 1280:80 - 1280:80
- 12443:443 - 12443:443
restart: unless-stopped restart: unless-stopped

View File

@@ -0,0 +1,34 @@
version: "3.8"
services:
dockerproxy:
image: ghcr.io/tecnativa/docker-socket-proxy:latest
container_name: dockerproxy
environment:
- CONTAINERS=1 # Allow access to viewing containers
- SERVICES=1 # Allow access to viewing services (necessary when using Docker Swarm)
- TASKS=1 # Allow access to viewing tasks (necessary when using Docker Swarm)
- POST=0 # Disallow any POST operations (effectively read-only)
ports:
- 127.0.0.1:2375:2375
volumes:
- /var/run/docker.sock:/var/run/docker.sock:ro # Mounted as read-only
restart: unless-stopped
homepage:
image: ghcr.io/gethomepage/homepage:latest
container_name: homepage
# network_mode: host
ports:
- "3004:3000"
volumes:
- /srv/docker/daten/homepage:/app/config
- /srv/docker/daten/homepage/icons:/app/public/icons
- /var/run/docker.sock:/var/run/docker.sock:ro
environment:
# - PORT=8080
- NODE_TLS_REJECT_UNAUTHORIZED=0
# - HOMEPAGE_ALLOWED_HOSTS=192.168.178.204:8080,192.168.178.204,localhost:8080,localhost,127.0.0.1
- HOMEPAGE_ALLOWED_HOSTS=192.168.178.204,192.168.178.204:3004,localhost,127.0.0.1
restart: unless-stopped

View File

@@ -0,0 +1,84 @@
#
# WARNING: To install Immich, follow our guide: https://docs.immich.app/install/docker-compose
#
# Make sure to use the docker-compose.yml of the current release:
#
# https://github.com/immich-app/immich/releases/latest/download/docker-compose.yml
#
# The compose file on main may not be compatible with the latest release.
name: immich
services:
immich-server:
container_name: immich_server
image: ghcr.io/immich-app/immich-server:${IMMICH_VERSION:-release}
# extends:
# file: hwaccel.transcoding.yml
# service: cpu # set to one of [nvenc, quicksync, rkmpp, vaapi, vaapi-wsl] for accelerated transcoding
#user: "1001:1001"
volumes:
# Do not edit the next line. If you want to change the media storage location on your system, edit the value of UPLOAD_LOCATION in the .env file
- ${UPLOAD_LOCATION}:/data
- /etc/localtime:/etc/localtime:ro
env_file:
- .env
ports:
- '2283:2283'
depends_on:
- redis
- database
restart: always
healthcheck:
disable: false
immich-machine-learning:
container_name: immich_machine_learning
# For hardware acceleration, add one of -[armnn, cuda, rocm, openvino, rknn] to the image tag.
# Example tag: ${IMMICH_VERSION:-release}-cuda
image: ghcr.io/immich-app/immich-machine-learning:${IMMICH_VERSION:-release}
# extends: # uncomment this section for hardware acceleration - see https://docs.immich.app/features/ml-hardware-acceleration
# file: hwaccel.ml.yml
# service: cpu # set to one of [armnn, cuda, rocm, openvino, openvino-wsl, rknn] for accelerated inference - use the `-wsl` version for WSL2 where applicable
#user: "1001:1001"
environment:
- MPLCONFIGDIR=/cache/.matplotlib
- IMMICH_TEMP_DIR=/cache/temp
volumes:
- /srv/docker/daten/immich/model-cache:/cache
- ${UPLOAD_LOCATION}:/data # <<< hier hinzufügen
env_file:
- .env
restart: always
healthcheck:
disable: false
redis:
container_name: immich_redis
image: docker.io/valkey/valkey:9@sha256:3eeb09785cd61ec8e3be35f8804c8892080f3ca21934d628abc24ee4ed1698f6
#user: "1001:1001"
healthcheck:
test: redis-cli ping || exit 1
restart: always
database:
container_name: immich_postgres
image: ghcr.io/immich-app/postgres:14-vectorchord0.4.3-pgvectors0.2.0@sha256:bcf63357191b76a916ae5eb93464d65c07511da41e3bf7a8416db519b40b1c23
#user: "1001:1001"
environment:
POSTGRES_PASSWORD: ${DB_PASSWORD}
POSTGRES_USER: ${DB_USERNAME}
POSTGRES_DB: ${DB_DATABASE_NAME}
POSTGRES_INITDB_ARGS: '--data-checksums'
# Uncomment the DB_STORAGE_TYPE: 'HDD' var if your database isn't stored on SSDs
# DB_STORAGE_TYPE: 'HDD'
volumes:
# Do not edit the next line. If you want to change the database storage location on your system, edit the value of DB_DATA_LOCATION in the .env file
- ${DB_DATA_LOCATION}:/var/lib/postgresql/data
shm_size: 128mb
restart: always
healthcheck:
disable: false
volumes:
model-cache:

View File

@@ -1,5 +1,4 @@
version: "3.8" ######### AKTUELL DOWN ##########
services: services:
kea-dhcp4: kea-dhcp4:
image: serhiymakarenko/isc-kea-dhcp4-server:latest image: serhiymakarenko/isc-kea-dhcp4-server:latest

View File

@@ -0,0 +1,62 @@
services:
prometheus:
image: prom/prometheus:latest
container_name: prometheus
restart: unless-stopped
user: "root" # Verhindert Permission-Probleme beim Lesen der Config
volumes:
- /srv/docker/daten/prometheus/prometheus.yml:/etc/prometheus/prometheus.yml
- /srv/docker/daten/prometheus:/prometheus
command:
- '--config.file=/etc/prometheus/prometheus.yml'
- '--storage.tsdb.path=/prometheus'
ports:
- "9090:9090"
grafana:
image: grafana/grafana:latest
container_name: grafana
restart: unless-stopped
environment:
- GF_SECURITY_ADMIN_PASSWORD=admin
volumes:
- /srv/docker/daten/grafana:/var/lib/grafana
ports:
- "3000:3000"
node-exporter:
image: prom/node-exporter:latest
container_name: node-exporter
restart: unless-stopped
volumes:
- /proc:/host/proc:ro
- /sys:/host/sys:ro
- /:/rootfs:ro
command:
- '--path.procfs=/host/proc'
- '--path.rootfs=/rootfs'
- '--path.sysfs=/host/sys'
cadvisor:
image: gcr.io/cadvisor/cadvisor:v0.49.1
container_name: cadvisor
restart: unless-stopped
privileged: true
devices:
- /dev/kmsg
volumes:
- /:/rootfs:ro
- /var/run:/var/run:ro
- /sys:/sys:ro
- /var/lib/docker/:/var/lib/docker:ro
- /dev/disk/:/dev/disk:ro
proxmox-exporter:
image: ghcr.io/prometheus-pve/prometheus-pve-exporter:latest
container_name: proxmox-exporter
restart: unless-stopped
volumes:
# Wir mounten das VERZEICHNIS. Darin liegt die pve.yml
- /srv/docker/daten/proxmox-exporter:/etc/prometheus:ro
ports:
- "9221:9221"

View File

@@ -0,0 +1,37 @@
services:
newt:
command:
- newt
container_name: newt
entrypoint:
- /entrypoint.sh
environment:
- PANGOLIN_ENDPOINT=https://tunnel.seanluc.de
- NEWT_ID=q8ddcxxoutrrhnc
- NEWT_SECRET=4vl316fhjkht127zxwndxryz13zu4w5w2jh7vm38eq7zja56
- PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin
hostname: 60a60dd2e962
image: fosrl/newt
ipc: private
labels:
com.docker.compose.config-hash: 66bc96213313b76f1a9874ff172919dad568b0964c744d55534da6566a2a0a2a
com.docker.compose.container-number: 1
com.docker.compose.depends_on: ""
com.docker.compose.image: sha256:b6f17a3a018ea6803f386a3ee028765e001c862d521397fdfe6053531462f212
com.docker.compose.oneoff: False
com.docker.compose.project: pangolintunnel
com.docker.compose.project.config_files: ""
com.docker.compose.project.working_dir: /data/compose/1/v4
com.docker.compose.replace: 490f03e84817d49e3242fa6ddd529de25d937744991fee24d50224d3fbf9ab0e
com.docker.compose.service: newt
com.docker.compose.version: ""
logging:
driver: json-file
options: {}
networks:
- pangolintunnel_default
restart: unless-stopped
working_dir: /
networks:
pangolintunnel_default:
external: true

View File

@@ -0,0 +1,58 @@
services:
db:
image: mariadb:latest
container_name: nextcloud-db
volumes:
- /srv/docker/daten/nextcloud/db:/var/lib/mysql
networks:
- default
restart: always
environment:
TZ: europe/berlin
MYSQL_ROOT_PASSWORD: ${MYSQL_ROOT_PASSWORD}
MYSQL_DATABASE: ${MYSQL_DATABASE}
MYSQL_USER: ${MYSQL_USER}
MYSQL_PASSWORD: ${MYSQL_PASSWORD}
redis:
image: redis:latest
container_name: nextcloud-redis
restart: always
volumes:
- /srv/docker/daten/nextcloud/redis:/data
networks:
- proxy
- default
nextcloud:
depends_on:
- redis
- db
image: nextcloud
container_name: nextcloud
volumes:
- /srv/docker/daten/nextcloud/www:/var/www/html
networks:
- proxy
- default
ports:
- 1180:80
- 1444:443
restart: always
environment:
REDIS_HOST: redis
MYSQL_HOST: db:3306
MYSQL_DATABASE: ${MYSQL_DATABASE}
MYSQL_USER: ${MYSQL_USER}
MYSQL_PASSWORD: ${MYSQL_PASSWORD}
PUID: "1001"
PGID: "1001"
networks:
proxy:
volumes:
nextcloud-db-data:
name: nextcloud-db-data
redis:
name: nextcloud-redis

View File

@@ -0,0 +1,40 @@
# More info at https://github.com/pi-hole/docker-pi-hole/ and https://docs.pi-hole.net/
services:
pihole:
container_name: pihole
image: pihole/pihole:latest
ports:
# DNS Ports
- "53:53/tcp"
- "53:53/udp"
# Default HTTP Port
- "3080:80/tcp"
# Default HTTPs Port. FTL will generate a self-signed certificate
- "8443:443/tcp"
# Uncomment the below if using Pi-hole as your DHCP Server
#- "67:67/udp"
# Uncomment the line below if you are using Pi-hole as your NTP server
#- "123:123/udp"
environment:
# Set the appropriate timezone for your location from
# https://en.wikipedia.org/wiki/List_of_tz_database_time_zones, e.g:
TZ: 'Europe/Berlin'
# Set a password to access the web interface. Not setting one will result in a random password being assigned
FTLCONF_webserver_api_password: '!!Zazen17**'
# If using Docker's default `bridge` network setting the dns listening mode should be set to 'ALL'
FTLCONF_dns_listeningMode: 'ALL'
# Volumes store your data between container upgrades
volumes:
# For persisting Pi-hole's databases and common configuration file
- '/docker/Daten/pihole:/etc/pihole'
# Uncomment the below if you have custom dnsmasq config files that you want to persist. Not needed for most starting fresh with Pi-hole v6. If you're upgrading from v5 you and have used this directory before, you should keep it enabled for the first v6 container start to allow for a complete migration. It can be removed afterwards. Needs environment variable FTLCONF_misc_etc_dnsmasq_d: 'true'
#- './etc-dnsmasq.d:/etc/dnsmasq.d'
cap_add:
# See https://github.com/pi-hole/docker-pi-hole#note-on-capabilities
# Required if you are using Pi-hole as your DHCP server, else not needed
- NET_ADMIN
# Required if you are using Pi-hole as your NTP client to be able to set the host's system time
- SYS_TIME
# Optional, if Pi-hole should get some more processing time
- SYS_NICE
restart: unless-stopped

View File

@@ -7,7 +7,6 @@ services:
- "9000:9000" # Webinterface - "9000:9000" # Webinterface
- "9443:9443" - "9443:9443"
volumes: volumes:
- /docker/Daten/portainer-data:/data:rw # Portainer-Daten (DB + Key) - /srv/docker/daten/portainer-data:/data:rw # Portainer-Daten (DB + Key)
- /etc/localtime:/etc/localtime:ro - /etc/localtime:/etc/localtime:ro
- /var/run/docker.sock:/var/run/docker.sock:ro - /var/run/docker.sock:/var/run/docker.sock:ro

View File

@@ -0,0 +1,16 @@
services:
tvheadend:
image: lscr.io/linuxserver/tvheadend:latest
container_name: tvheadend
network_mode: host
environment:
- PUID=1001
- PGID=1001
- TZ=Europe/Berlin
volumes:
- /srv/docker/daten/tvheadend/config:/config
- /srv/docker/daten/tvheadend/recordings:/recordings
restart: unless-stopped

View File

@@ -0,0 +1,11 @@
version: "3.8"
services:
uptime-kuma:
image: louislam/uptime-kuma:latest
container_name: uptime-kuma
ports:
- "3006:3001"
volumes:
- /srv/docker/daten/uptime-kuma:/app/data
restart: unless-stopped

View File

@@ -1,16 +1,17 @@
version: '3.7'
services: services:
vaultwarden: vaultwarden:
image: vaultwarden/server:latest image: vaultwarden/server:latest
container_name: vaultwarden4 container_name: vaultwarden4
restart: always restart: always
volumes: volumes:
- /docker/Daten/vaultwarden/bw-data:/data - /srv/docker/daten/vaultwarden/bw-data:/data
environment: environment:
- ADMIN_TOKEN=${VAULTWARDEN_ADMIN_TOKEN} - ADMIN_TOKEN=${VAULTWARDEN_ADMIN_TOKEN}
- WEBSOCKET_ENABLED=true - WEBSOCKET_ENABLED=true
ports: ports:
- 1380:80 - 1380:80
- 1443:443

View File

@@ -1,4 +1,3 @@
version: "3.8"
services: services:
vaultwarden_backup: vaultwarden_backup:
image: bruceforce/vaultwarden-backup image: bruceforce/vaultwarden-backup
@@ -6,7 +5,7 @@ services:
restart: unless-stopped restart: unless-stopped
init: true init: true
volumes: volumes:
- /docker/Daten/vaultwarden/bw-data:/data/ - /srv/docker/daten/vaultwarden/bw-data:/data/
- /mnt/vaultwardenBackupOnQnap:/backups/ - /mnt/vaultwardenBackupOnQnap:/backups/
- /mnt/vaultwardenBackupOnQnap/logs:/logs/ - /mnt/vaultwardenBackupOnQnap/logs:/logs/
environment: environment:

View File

@@ -0,0 +1,31 @@
services:
bruchtal-webhook:
build: ./deploy
container_name: webhook
restart: unless-stopped
ports:
- "9001:9001"
volumes:
- /srv/docker/repo:/workspace
- /srv/docker/scripts/webhook-deploy/hooks.json:/hooks/hooks.json:ro
- /var/run/docker.sock:/var/run/docker.sock
- /root/.ssh:/root/.ssh:ro
# environment:
# - WEBHOOK_ALLOWED_HOST_LIST=192.168.178.204,0.0.0.0,localhost
# - WEBHOOK_VERBOSE=true
# - WEBHOOK_PORT=9001
# - WEBHOOK_IP=0.0.0.0
command: [
"-hooks", "/hooks/hooks.json",
"-verbose", "-port", "9001",
"-ip", "0.0.0.0",
# "-allowed-host-list", "0.0.0.0,192.168.178.204,localhost"
]
networks:
- bruchtal-net
networks:
bruchtal-net:
external: true

View File

@@ -1,3 +1,5 @@
######### AKTUELL DOWN ##########
services: services:
wikijs: wikijs:
image: linuxserver/wikijs:2.5.312 image: linuxserver/wikijs:2.5.312

View File

@@ -8,10 +8,10 @@ services:
- "8005:8000" - "8005:8000"
volumes: volumes:
- /docker/Bruchtal:/docs - /srv/docker/repo:/docs
command: serve --dev-addr=0.0.0.0:8000 command: serve --dev-addr=0.0.0.0:8000
environment: environment:
- TZ=Europe/Berlin - TZ=Europe/Berlin
- WATCHDOG_FORCE_POLLING=true # - WATCHDOG_FORCE_POLLING=true

View File

@@ -1,19 +0,0 @@
services:
bruchtal-webhook:
build: ./deploy
container_name: bruchtal-webhook
restart: unless-stopped
ports:
- "9001:9001"
volumes:
- /docker/Bruchtal:/workspace
- /docker/Bruchtal/deploy/hooks.json:/hooks/hooks.json:ro
- /var/run/docker.sock:/var/run/docker.sock
- /root/.ssh:/root/.ssh:ro
command: ["-hooks", "/hooks/hooks.json", "-verbose", "-port", "9001", "-ip", "0.0.0.0"]
networks:
- bruchtal-net
networks:
bruchtal-net:
external: true

View File

@@ -1,11 +0,0 @@
services:
caddy:
image: caddy:latest
container_name: caddy
restart: unless-stopped
network_mode: host
volumes:
- /docker/caddy/config/Caddyfile:/etc/caddy/Caddyfile:ro
- /docker/caddy/data:/data
- /docker/caddy/ca/rootCA.crt:/etc/ssl/certs/rootCA.crt
- /docker/caddy/ca/rootCA.key:/etc/ssl/private/rootCA.key

View File

@@ -0,0 +1,4 @@
# Docker Backup
Docker läuft als VM unter Proxmox und wird dort täglich vollständig auf der Qnap gesichert
[=> Proxmox_Backup:](../../backup_restore/proxmox/proxmox_backup.md)

View File

@@ -0,0 +1,5 @@
# Server Seanluc1
- Location: Hetzner
- Schedule: täglich
- Skript: Hetzner Voreinstellung

View File

@@ -1,73 +0,0 @@
#!/bin/bash
BACKUP_DIR="/mnt/storagebox-nocrypt/pve-configs-backup"
TODAY=$(date +%T_%F)-pve-configs-backup
LOGFILE="pve-backup.log"
mkdir -p "$BACKUP_DIR/$TODAY"
FILES=(
"/root/"
"/etc/pve/"
"/etc/network/interfaces"
"/etc/hosts"
"/etc/resolv.conf"
"/etc/hostname"
"/etc/timezone"
"/etc/passwd"
"/etc/group"
"/etc/shadow"
"/root/.ssh/"
"/etc/vzdump.conf"
"/etc/ssh/sshd_config"
"/var/lib/pve-cluster/"
"/etc/ssh/"
"/etc/cron.d/"
"/etc/cron.daily/"
"/etc/cron.hourly/"
"/etc/cron.weekly/"
"/etc/cron.monthly/"
"/var/spool/cron/"
"/etc/fstab"
"/etc/default/"
"/etc/apt/sources.list"
"/etc/apt/sources.list.d/"
"/var/log/"
"/etc/systemd/"
"/etc/sysctl.conf"
"/etc/security/"
"/var/backups/"
"/etc/fail2ban/"
"/root/backup-pve-configs.sh"
)
EXCLUDE_DIRS=(
"/mnt/storagebox-crypt"
"/mnt/storagebox-nocrypt"
"/var/lib/vz/images/"
"/var/lib/lxc/"
"/var/lib/vz/private/"
"/var/lib/lxcfs/"
)
# rsync-Ausschlussparameter erstellen
EXCLUDE_PARAMS=()
for EXCLUDE in "${EXCLUDE_DIRS[@]}"; do
EXCLUDE_PARAMS+=(--exclude="$EXCLUDE")
done
for FILE in "${FILES[@]}"; do
if [ -e "$FILE" ]; then
echo "Kopiere $FILE..."
rsync -aL --relative --ignore-missing-args --safe-links "${EXCLUDE_PARAMS[@]}" "$FILE" "$BACKUP_DIR/$TODAY/" 2>/dev/null
else
echo "Warnung: $FILE existiert nicht und wird uebersprungen." >> ${LOGFILE}
fi
done
find "$BACKUP_DIR" -mindepth 1 -maxdepth 1 -type d -mtime +14 -exec rm -rf {} \;
echo "Backup fuer $TODAY abgeschlossen." >> ${LOGFILE}

View File

@@ -0,0 +1,18 @@
# Homeassistent
## läuft als VM unter Proxmox und wird dort täglich vollständig auf der Qnap gesichert
[=> Proxmox_Backup:](../../backup_restore/proxmox/proxmox_backup.md)
## aus HA heraus
=> Einstellungen => System => Speicher
- Netzwerkspeicher
- Verwendung: Backup
- Server: 192.168.178.254 (Qnap)
- Protokoll: NFS
- Remote Freigabepfad: Backups_homeassistant
=> Einstellungen => System => Backups
- Täglich und 7 Backups aufbewahren
- zu sichernde Daten: HA Einstellungen, Verlauf
- Speicherorte: Backups_homeassistant

View File

@@ -0,0 +1,177 @@
# Paperless Backup
- tägliches backup der VM unter Proxmox auf der QNAP [=> Proxmox_Backup](/docs/backup_restore/proxmox/proxmox_backup.md)
- Sicherung der Nutzdaten: erfolgt per Skript und cron auf hetzner.storage
<details>
<summary>Location /paperless/backup_storage:</summary>
```
#!/usr/bin/env bash
#####!/bin/bash
########### Initialisierung ##############
#### https://docs.hetzner.com/de/robot/storage-box/backup-space-ssh-keys/
## Führe auf dem Clienten die folgenden Befehle aus:
# ssh-keygen
# cat ~/.ssh/id_rsa.pub | ssh -p23 u338XXX@u338XXX.your-storagebox.de install-ssh-key
###### Hier deine Daten einfügen #########
export BORG_PASSPHRASE="%ci5pKqWvXj!iBm9khAR@Z2ohJ2inMMht8ZNsU*"
BACKUP_USER="u358899"
REPOSITORY_DIR="paperless"
##########################################
LOG_DIR="/paperless/backuplogs"
LOG="$LOG_DIR/backup_storage.log"
echo "MOIN!" >> /paperless/test.log
if [ ! -d "$LOG_DIR" ]; then
mkdir -p "$LOG_DIR"
fi
full_path=$(realpath $0)
dir_path=$(dirname $full_path)
echo $dir_path
## Hinweis: Für die Verwendung mit einem Backup-Account muss
## 'your-storagebox.de' in 'your-backup.de' geändert werden.
REPOSITORY="ssh://${BACKUP_USER}@${BACKUP_USER}.your-storagebox.de:23/./backups/${REPOSITORY_DIR}"
## Zeitstempel-Variable setzen
TIMESTAMP=$(date +'%Y-%m-%d_%H:%M')
##
## Ausgabe in Logdatei schreiben
##
exec > >(tee -i ${LOG})
exec 2>&1
start_time=$(date +'%Y-%m-%d %H:%M:%S')
echo "###### Backup gestartet: $start_time ######"
## Überprüfen, ob eine spezielle Aktion durchgeführt werden soll
BACKUP_SUFFIX=""
case "$1" in
NEW_INIT)
echo "Überprüfe, ob das Verzeichnis backups/${REPOSITORY_DIR} existiert..."
ssh -p23 ${BACKUP_USER}@${BACKUP_USER}.your-storagebox.de "[ -d backups/${REPOSITORY_DIR} ] || mkdir -p backups/${REPOSITORY_DIR}"
echo "Das Repository wird komplett gelöscht und neu angelegt..."
borg delete --force --stats $REPOSITORY
borg init --encryption=repokey $REPOSITORY
BACKUP_SUFFIX="_NEW_INIT"
;;
INIT)
echo "Überprüfe, ob das Verzeichnis backups/${REPOSITORY_DIR} existiert..."
ssh -p23 ${BACKUP_USER}@${BACKUP_USER}.your-storagebox.de "[ -d backups/${REPOSITORY_DIR} ] || mkdir -p backups/${REPOSITORY_DIR}"
echo "Überprüfe, ob das Repository existiert..."
if borg info $REPOSITORY > /dev/null 2>&1; then
echo "Das Repository existiert bereits. Vorgang wird abgebrochen."
exit 1
else
echo "Das Repository wird neu angelegt..."
borg init --encryption=repokey $REPOSITORY
fi
BACKUP_SUFFIX="_INIT"
;;
?)
echo -e "\n######################################"
echo -e "Verwendung des Backup-Skripts:"
echo -e "######################################"
echo -e "Ohne Parameter:"
echo -e " Führt ein reguläres Backup durch und hängt an den Backup-Namen das aktuelle Datum und die Uhrzeit."
echo -e "\nParameter:"
echo -e " NEW_INIT - Löscht das Repository komplett und legt es neu an."
echo -e " INIT - Legt das Repository neu an, wenn es nicht bereits existiert."
echo -e " ? - Zeigt diese Hilfemeldung an und bricht das Skript ab."
echo -e "\nBeispiele:"
echo -e " ./backup_script.sh"
echo -e " ./backup_script.sh NEW_INIT"
echo -e " ./backup_script.sh INIT"
echo -e " ./backup_script.sh ?"
echo -e "\nBackups auflisten mit:"
echo -e " borg list ssh://${BACKUP_USER}@${BACKUP_USER}.your-storagebox.de:23/./backups/${REPOSITORY_DIR}\n"
echo -e "Rücksichern einzelner Verzeichnisse mit:"
echo -e " cd /mytmp # Wechseln Sie in ein Testverzeichnis, um das Backup zu testen."
echo -e " borg extract ssh://${BACKUP_USER}@${BACKUP_USER}.your-storagebox.de:23/./backups/${REPOSITORY_DIR}::${TIMESTAMP}${BACKUP_SUFFIX} etc var lib lib64 sbin usr bin"
echo -e "######################################\n"
echo -e "Komplette Rücksicherung mit:"
echo -e "cd / # Wechseln Sie in das Root-Verzeichnis, um das gesamte System wiederherzustellen."
echo -e "borg extract ssh://${BACKUP_USER}@${BACKUP_USER}.your-storagebox.de:23/./backups/${REPOSITORY_DIR}::${TIMESTAMP}${BACKUP_SUFFIX}"
echo -e "######################################\n"
echo -e "Auf den Backup-Server verbinden:"
echo -e "ssh -p23 ${BACKUP_USER}@${BACKUP_USER}.your-storagebox.de"
echo -e "Verzeichnisinhalt anzeigen:"
echo -e "ls backups/${REPOSITORY_DIR}"
echo -e "Verzeichnis der aktuellen Backups löschen:"
echo -e "rm -rf backups/${REPOSITORY_DIR}"
echo -e "######################################\n"
exit 0
;;
esac
## Überprüfen, ob ein zusätzlicher Parameter übergeben wurde
if [ -n "$1" ] && [ "$1" != "INIT" ] && [ "$1" != "NEW_INIT" ]; then
BACKUP_SUFFIX="_$1"
fi
##
## Zu sichernde Verzeichnisse
##
# Hier werden alle Verzeichnisse im Root-Verzeichnis gesichert, außer den ausgeschlossenen Verzeichnissen
DIRS_TO_BACKUP=(
"/paperless/paperless-ngx/export"
)
##
## Dateien ins Repository übertragen
##
echo "Übertrage Dateien ..."
borg create -v --stats \
$REPOSITORY::"${TIMESTAMP}${BACKUP_SUFFIX}" \
"${DIRS_TO_BACKUP[@]}" # \
end_time=$(date +'%Y-%m-%d %H:%M:%S')
duration=$(date -u -d @$(( $(date -d "$end_time" +%s) - $(date -d "$start_time" +%s) )) +%H:%M:%S)
echo "###### Backup beendet: $end_time ######"
echo "Time (start): $start_time"
echo "Time (end): $end_time"
echo "Duration: $duration"
echo -e "\n######################################"
echo -e "Backups auflisten mit:"
echo -e "borg list ssh://${BACKUP_USER}@${BACKUP_USER}.your-storagebox.de:23/./backups/${REPOSITORY_DIR}\n"
echo -e "Rücksichern einzelner Verzeichnisse mit:"
echo -e "cd /mytmp # Wechseln Sie in ein Testverzeichnis, um das Backup zu testen."
echo -e "borg extract ssh://${BACKUP_USER}@${BACKUP_USER}.your-storagebox.de:23/./backups/${REPOSITORY_DIR}::${TIMESTAMP}${BACKUP_SUFFIX} etc var lib lib64 sbin usr bin"
echo -e "######################################\n"
echo -e "Komplette Rücksicherung mit:"
echo -e "borg extract ssh://${BACKUP_USER}@${BACKUP_USER}.your-storagebox.de:23/./backups/${REPOSITORY_DIR}::${TIMESTAMP}${BACKUP_SUFFIX}"
echo -e "######################################\n"
echo -e "Auf den Backup-Server verbinden:"
echo -e "ssh -p23 ${BACKUP_USER}@${BACKUP_USER}.your-storagebox.de"
echo -e "Verzeichnisinhalt anzeigen:"
echo -e "ls backups/${REPOSITORY_DIR}"
echo -e "Verzeichnis der aktuellen Backups löschen:"
echo
echo -e "rm -rf backups/${REPOSITORY_DIR}"
echo -e "######################################\n"
borg list ssh://${BACKUP_USER}@${BACKUP_USER}.your-storagebox.de:23/./backups/${REPOSITORY_DIR} >> ${LOG}
```
</details>

View File

@@ -3,29 +3,121 @@
## 1. PVE ## 1. PVE
- Location: verschlüsselt auf hetznerstoragebox gemäß Anleitung https://ralf-peter-kleinert.de/linux-server/proxmox-verschluesselt-backup.html. (Die liegt auch als Anhang im bitwarden) - Location: verschlüsselt auf hetznerstoragebox gemäß Anleitung https://ralf-peter-kleinert.de/linux-server/proxmox-verschluesselt-backup.html. (Die liegt auch als Anhang im bitwarden)
- Scedule: täglich im cron und wird 14 Tage aufgehoben - Scedule: täglich im cron und wird 14 Tage aufgehoben
- ÄNDERUNGEN siehe [/etc/fstab](../proxmox/fstab) - ÄNDERUNGEN siehe
- Skript: /root/backup-pve-configs.sh <details>
```snippet <summary>Location: /etc/fstab</summary>
--8<-- "/docs/backup_restore/proxmox/backup_pve_configs.sh"
``` ```
#QNAP
//192.168.178.254/qnapmultimedia /mnt/qnapmount_mm cifs user,credentials=/root/.credentials/qnapcreds,iocharset=utf8,noperm 0 0
//192.168.178.254/Backups /mnt/qnapmount_backups cifs user,credentials=/root/.credentials/qnapcreds,iocharset=utf8,noperm 0 0
#Hetznerbox
//u358899.your-storagebox.de/backup /mnt/hetznerbox cifs user,credentials=/root/.credentials/hetznercreds,iocharset=utf8,noperm 0 0
#//u358899.your-storagebox.de/backups /mnt/hetznerbox cifs username=u358899,password=vgceBjPMxwq2eT7k,rw
#Storagebox Crypted
//u358899.your-storagebox.de/backup/backups/proxmox /mnt/storagebox-crypted cifs credentials=/root/.credentials/hetznercreds,iocharset=utf8,rw,_netdev,uid=0,gid=0,file_mode=0660,dir_mode=0770 0 0
#Storagebox Uncrypted - wird automatisch mit Crypted verbunden
/mnt/storagebox-crypted /mnt/storagebox-nocrypt fuse./usr/bin/gocryptfs rw,nofail,auto,x-systemd.idle-timeout=10,x-systemd.automount,allow_other,quiet,passfile=/root/.gocryptfspw 0 0
```
</details>
- Log: /root/pve-backup.log - Log: /root/pve-backup.log
- Skript:
<details>
<summary>Location: /root/backup-pve-configs.sh :</summary>
```
#!/bin/bash
BACKUP_DIR="/mnt/storagebox-nocrypt/pve-configs-backup"
TODAY=$(date +%T_%F)-pve-configs-backup
LOGFILE="pve-backup.log"
mkdir -p "$BACKUP_DIR/$TODAY"
FILES=(
"/root/"
"/etc/pve/"
"/etc/network/interfaces"
"/etc/hosts"
"/etc/resolv.conf"
"/etc/hostname"
"/etc/timezone"
"/etc/passwd"
"/etc/group"
"/etc/shadow"
"/root/.ssh/"
"/etc/vzdump.conf"
"/etc/ssh/sshd_config"
"/var/lib/pve-cluster/"
"/etc/ssh/"
"/etc/cron.d/"
"/etc/cron.daily/"
"/etc/cron.hourly/"
"/etc/cron.weekly/"
"/etc/cron.monthly/"
"/var/spool/cron/"
"/etc/fstab"
"/etc/default/"
"/etc/apt/sources.list"
"/etc/apt/sources.list.d/"
"/var/log/"
"/etc/systemd/"
"/etc/sysctl.conf"
"/etc/security/"
"/var/backups/"
"/etc/fail2ban/"
"/root/backup-pve-configs.sh"
)
EXCLUDE_DIRS=(
"/mnt/storagebox-crypt"
"/mnt/storagebox-nocrypt"
"/var/lib/vz/images/"
"/var/lib/lxc/"
"/var/lib/vz/private/"
"/var/lib/lxcfs/"
)
# rsync-Ausschlussparameter erstellen
EXCLUDE_PARAMS=()
for EXCLUDE in "${EXCLUDE_DIRS[@]}"; do
EXCLUDE_PARAMS+=(--exclude="$EXCLUDE")
done
for FILE in "${FILES[@]}"; do
if [ -e "$FILE" ]; then
echo "Kopiere $FILE..."
rsync -aL --relative --ignore-missing-args --safe-links "${EXCLUDE_PARAMS[@]}" "$FILE" "$BACKUP_DIR/$TODAY/" 2>/dev/null
else
echo "Warnung: $FILE existiert nicht und wird uebersprungen." >> ${LOGFILE}
fi
done
find "$BACKUP_DIR" -mindepth 1 -maxdepth 1 -type d -mtime +14 -exec rm -rf {} \;
echo "Backup fuer $TODAY abgeschlossen." >> ${LOGFILE}
```
</details>
# 2. Komplette lxc und VMs #
## 2. Komplette lxc und VMs #
- Skript: GUI - Skript: GUI
- Log: GUI - Log: GUI
- Location: //192.168.178.29/Backups/proxmox - Location: /QNAP_Bckups/Backups/proxmox
- Scedule: - Scedule: 0:30, keep-dayily, keep-mpnthly=12, keep-weekly=4, keep-yearly=2
![scedule.png](/scedule.png)
- **TODO: Borgbackup für Container** - **TODO: Borgbackup für Container**
# 3. vm_paperless #
- Skript: /paperless/backupqnap.sh
- Location: //192.168.178.29/paperless/zips;
//u358899@u358899.your-storagebox.de/backup/./backups/paperless
- Scedule: täglich 4:00 für 31 Tage
# 4. VM Homeassistent ## weitere Datenbackups
[=> Paperless Backup](paperless/paperless_backup.md)
[=> Homeassistent Backup](paperless/homeassistent_backup.md)
[=> TV-Headend](paperless/tv-headend_backup.md)

View File

@@ -0,0 +1,3 @@
## TV-Headend Backup
nur Containersicherung über [Proxmox backup](../proxmox_backup.md)

View File

@@ -30,6 +30,3 @@ Backupsystem läuft auf 2 Ebenen:
## Restore ## Restore
- Gezielt Dateien: Archiv einhängen (Mountpunkt: /home/christina/borgbackupHetzner), Dateien kopierne - Gezielt Dateien: Archiv einhängen (Mountpunkt: /home/christina/borgbackupHetzner), Dateien kopierne
- allgemeines Restore: ausgewähltes Archiv -> exctract - allgemeines Restore: ausgewähltes Archiv -> exctract

View File

@@ -0,0 +1,13 @@
# Backup Konfiguration Workstations
## christian-linux-mint
=> [christian-linux-mint](../workstations/christian-linux_backup.md)
## Christians Handy
=> [christian-handy_backup.md](../workstations/christian-handy_backup.md)
## Dorotheas Labtop
=> [dorothea-laptop_backup.md](../workstations/dorothea-laptop_backup.md)
## Opis PC
=> [opi-pc_backup.md](../workstations/opi-pc_backup.md)

View File

@@ -1,5 +1,4 @@
# AdguardhomeTest # Adguardhome - ist DOWN, ZUR ZEIT LÄUFT PIHOLE
## Allgemein ## Allgemein
- Image: `adguard/adguardhome:v0.107.69` - Image: `adguard/adguardhome:v0.107.69`
@@ -16,7 +15,7 @@
## Deployment ## Deployment
```bash ```bash
cd /docker/Bruchtal/docker/adguardhome cd /docker/Bruchtal/compose/adguardhome
git pull git pull
docker compose pull docker compose pull
docker compose up -d docker compose up -d

View File

@@ -1,40 +0,0 @@
# 🏗 Bruchtal Docker-Architektur
## Übersicht
Die Bruchtal-Infrastruktur läuft vollständig containerisiert auf einer VM.
Alle Dienste kommunizieren über ein dediziertes Docker-Netzwerk, nutzen Git zur Versionierung und automatisches Deploy über Webhooks.
**Hauptkomponenten:**
| Service | Containername | Funktion |
|----------------|-------------------|---------|
| Gitea | `gitea` | Git-Server für Infrastruktur & Dokumentation |
| Wiki.js | `wikijs` | Wissensmanagement & Dokumentation |
| MkDocs | `bruchtal-docs` | Statische Markdown-Dokumentation |
| Webhook | `bruchtal-webhook`| Automatisches Deploy bei Git Push |
| Docker Host | VM | Plattform für alle Container |
---
## 🔗 Netzwerke
Alle Container laufen im **gemeinsamen Docker-Netzwerk** `bruchtal-net`:
- Kommunikation per Service-Namen (`gitea`, `bruchtal-webhook`)
- Keine Abhängigkeit von Host-IP
- Isoliert von anderen VM-Netzwerken
Beispiel Docker-Compose-Netzwerkdefinition:
```yaml
networks:
bruchtal-net:
external: true
```
## Mounts
/etc/fstab:
```
```

89
docs/docker/docker.md Normal file
View File

@@ -0,0 +1,89 @@
# 🏗 Bruchtal Docker-Architektur
## Übersicht
Die Bruchtal-Infrastruktur läuft vollständig containerisiert auf einer VM.
Alle Dienste kommunizieren über ein dediziertes Docker-Netzwerk, nutzen Git zur Versionierung und automatisches Deploy über Webhooks.
**Hauptkomponenten:**
| Service | Containername | Funktion |
|----------------|-------------------|---------|
| Gitea | `gitea` | Git-Server für Infrastruktur & Dokumentation |
| Wiki.js | `wikijs` | Wissensmanagement & Dokumentation |
| MkDocs | `bruchtal-docs` | Statische Markdown-Dokumentation |
| Webhook | `bruchtal-webhook`| Automatisches Deploy bei Git Push |
| Docker Host | VM | Plattform für alle Container |
---
## IP
192.168.178.204
## derzeit belegte Ports:
| Port | Dienst | Container | Funktion | URL |
|-------|--------------------|--------------|-----------------------------------|--------------------------------|
| 9443 |Portainer | portainer | **reines Dashboard** für Docker | [portainer.seanluc.de](https://portainer.seanluc.de) |
| 1380 | Vaultwarden | vaulttwarden | Passwortmanager | [bitwarden.seanluc.de](https://bitwarden.seanluc.de) |
| 1180 | Nextcloud | nexcloud | Cloud | [nc.seanluc.de](https://nc.seanluc.de) |
| 3002 | Gitea | gitea | Repo Verwaltung | [gitea.seanluc.de](https://gitea.seanluc.de) |
| 9005 | Mkdocs | bruchtal-docs| Dokumentation | [doku.seanluc.de](doku.seanluc.de) |
---
## 🔗 Netzwerke
Alle Container laufen im **gemeinsamen Docker-Netzwerk** `bruchtal-net`:
- Kommunikation per Service-Namen (`gitea`, `bruchtal-webhook`)
- Keine Abhängigkeit von Host-IP
- Isoliert von anderen VM-Netzwerken
Beispiel Docker-Compose-Netzwerkdefinition:
```yaml
networks:
bruchtal-net:
external: true
```
## Mounts
Die Mount laufen nicht über die fstab. Grund: die QNAP geht seltsam mit Sonderzeichen in den credentials um.
Lösung:
sudo nano /etc/systemd/system/mnt-vaultwardenBackupOnQnap.mount:
``` ini
[Unit]
After=network-online.target
[Mount]
What=//192.168.178.254/Backups/docker_backups/vaultwarden
Where=/mnt/vaultwardenBackupOnQnap
Type=cifs
Options=rw,vers=3.0,username=admin,password=!!Zazen17**,uid=1001,gid=1001
[Install]
WantedBy=multi-user.target
```
dann
```
sudo systemctl daemon-reload
sudo systemctl enable mnt-vaultwardenBackupOnQnap.mount
sudo systemctl start mnt-vaultwardenBackupOnQnap.mount #Mount wird auch beim booten gestartet
sudo systemctl stop mnt-vaultwardenBackupOnQnap.mount #Mount wird gestoppt
## Docker Backup
Docker läuft als VM unter Proxmox und wird dort täglich vollständig auf der Qnap gesichert
[=> Proxmox_Backup:](../../backup_restore/proxmox/proxmox_backup.md)

View File

@@ -10,13 +10,13 @@
| Host Path | Container Path | Zweck | | Host Path | Container Path | Zweck |
|------------|----------------|--------| |------------|----------------|--------|
| /docker/Bruchtal/docker/heimdall/data/config:/config | /config | Konfiguration | | /docker/Bruchtal/compose/heimdall/data/config:/config | /config | Konfiguration |
## Deployment ## Deployment
```bash ```bash
cd /docker/Bruchtal/docker/heimdall cd /docker/Bruchtal/compose/heimdall
git pull git pull
docker compose pull docker compose pull
docker compose up -d docker compose up -d

View File

@@ -12,7 +12,7 @@
## Deployment ## Deployment
```bash ```bash
cd /docker/Bruchtal/docker/it-tools cd /docker/Bruchtal/compose/it-tools
git pull git pull
docker compose pull docker compose pull
docker compose up -d docker compose up -d

View File

@@ -0,0 +1,31 @@
# Mkdocs
## Zweck:
Dokumentation des Systems in Markdown
## /ssrv/docker/docker-compose.yml
-> liegt nicht in gesondertem Container.
-> erwartet Verzeichnis docs und mkdocs.yml (diese Datei)
``` yaml
services:
bruchtal-docs:
image: squidfunk/mkdocs-material:latest
container_name: bruchtal-docs
restart: unless-stopped
ports:
- "8005:8000"
volumes:
- /docker/Bruchtal:/docs
command: serve --dev-addr=0.0.0.0:8000
environment:
- TZ=Europe/Berlin
- WATCHDOG_FORCE_POLLING=true
```
## Backup & Restore
kein spezielles Backup notwendig.

View File

@@ -0,0 +1 @@
testtest

View File

@@ -17,7 +17,7 @@
## Deployment ## Deployment
```bash ```bash
cd /docker/Bruchtal/docker/wikijs cd /docker/Bruchtal/compose/wikijs
git pull git pull
docker compose pull docker compose pull
docker compose up -d docker compose up -d

67
docs/homepage/homepage.md Normal file
View File

@@ -0,0 +1,67 @@
# Homepage Stack
## dockerproxy
um den Status von Dockercontainern direkt auslesen zu können
- Image: `ghcr.io/tecnativa/docker-socket-proxy:latest`
- Port: `127.0.0.1:2375:2375`
- Restart Policy: `unless-stopped`
## Homepage
das eigentlich
## Volumes
| Host Path | Container Path | Zweck |
|------------|----------------|--------|
| /var/run/docker.sock:/var/run/docker.sock:ro | | |
## Deployment
```bash
cd /docker/Bruchtal/compose/heimdall
git pull
docker compose pull
docker compose up -d
```
## aktuelles Skript
```snippet
--8<-- "/docs/docker/heimdall/docker-compose.yml"
```
dockerproxy:
image: ghcr.io/tecnativa/docker-socket-proxy:latest
container_name: dockerproxy
environment:
- CONTAINERS=1 # Allow access to viewing containers
- SERVICES=1 # Allow access to viewing services (necessary when using Docker Swarm)
- TASKS=1 # Allow access to viewing tasks (necessary when using Docker Swarm)
- POST=0 # Disallow any POST operations (effectively read-only)
ports:
- 127.0.0.1:2375:2375
volumes:
- /var/run/docker.sock:/var/run/docker.sock:ro # Mounted as read-only
restart: unless-stopped
homepage:
image: ghcr.io/gethomepage/homepage:latest
container_name: homepage
ports:
- "3004:3000"
volumes:
- /srv/docker/daten/homepage:/app/config
- /srv/docker/daten/homepage/icons:/app/public/icons
- /var/run/docker.sock:/var/run/docker.sock:ro
environment:
- HOMEPAGE_ALLOWED_HOSTS=192.168.178.204:3004,localhost
restart: unless-stopped

View File

@@ -2,8 +2,7 @@
Willkommen in der Infrastruktur-Dokumentation von **Bruchtal**. Willkommen in der Infrastruktur-Dokumentation von **Bruchtal**.
## Ziel ## Inhalt ##
Diese Dokumentation beschreibt: Diese Dokumentation beschreibt:
- ⚙️ Workflows - ⚙️ Workflows

View File

@@ -0,0 +1,16 @@
# Allgemeine Einstellungen
## DHCP
### Fritzbox (derzeit aktuell)
- Range: 192.168.178.20 - 199
## feste IPs
[192.168.178.200: Proxmox](http://192.168.178.200:8006)
[192.168.178.201: vm-Pihole](http://192.168.178.201)
[192.168.178.203: vm-homeassistent](http://192.168.178.203:8123)
[192.168.178.204: vm-docker](http://192.168.178.204:9443)
[192.168.178.214: vm-jellyfin]()
[192.168.178.217: vm-docker-restore](http://192.168.178.221:9443)
[192.168.178.221: vm-paperless2](http://192.168.178.221:8000)
[192.168.178.254: QNAP](https://192.168.178.254)
### Kea-dhcp4 + Adguard # caddy

View File

@@ -0,0 +1,4 @@
# Homeasistent
## Plattform
- VM im Docker

View File

@@ -0,0 +1,52 @@
services:
broker:
image: docker.io/library/redis:8
restart: unless-stopped
volumes:
- redisdata:/data
db:
image: docker.io/library/postgres:17
restart: unless-stopped
volumes:
- /paperless/database:/var/lib/postgresql/data
environment:
POSTGRES_DB: paperless
POSTGRES_USER: paperless
POSTGRES_PASSWORD: paperless
webserver:
image: ghcr.io/paperless-ngx/paperless-ngx:latest
restart: unless-stopped
depends_on:
- db
- broker
- gotenberg
- tika
ports:
- "8000:8000"
volumes:
- /paperless/data:/usr/src/paperless/data
- /paperless/media:/usr/src/paperless/media
- ./export:/usr/src/paperless/export
- /paperless/paperless-ngx/consume:/usr/src/paperless/consume
env_file: docker-compose.env
environment:
PAPERLESS_REDIS: redis://broker:6379
PAPERLESS_DBHOST: db
PAPERLESS_TIKA_ENABLED: 1
PAPERLESS_TIKA_GOTENBERG_ENDPOINT: http://gotenberg:3000
PAPERLESS_TIKA_ENDPOINT: http://tika:9998
gotenberg:
image: docker.io/gotenberg/gotenberg:8.20
restart: unless-stopped
# The gotenberg chromium route is used to convert .eml files. We do not
# want to allow external content like tracking pixels or even javascript.
command:
- "gotenberg"
- "--chromium-disable-javascript=true"
- "--chromium-allow-list=file:///tmp/.*"
tika:
image: docker.io/apache/tika:latest
restart: unless-stopped
volumes:
redisdata:

View File

@@ -0,0 +1,17 @@
# Paperless VM
## Architektur
- läuft als VM direkt auf Proxmox
- Plattform: Ubuntu Server 24
- Paperless läuft unter Docker
## docker-compose.yml
``` snippet
--8<-- "/docs/proxmox/paperless/docker-compose.yml"
```
** docker-compose.env Datei mit Key in Bitwarden **
## Backup
==>

View File

@@ -9,6 +9,9 @@
- Docker-VM - Docker-VM
- Backup: tägliche Snapshots - Backup: tägliche Snapshots
## Root-Zugriff
derzeit noch möglich: das übliche PW
## Crontab ## Crontab
```snippet ```snippet
0 1 * * * /root/backup-pve-configs.sh 0 1 * * * /root/backup-pve-configs.sh
@@ -44,10 +47,9 @@ UUID=fe878af4-c44e-4ab8-8d81-1efb0391aaf7 /mnt/DockerDaten ext4 defaults 0
## aktuelles Skript ## aktuelles Skript
``` snippet ``` snippet
--8<-- "/docs/docker/wikijs/docker-compose.yml" --8<-- "/docs/backup_restore/proxmox/backup_pve_configs.sh"
``` ```
=> [Proxmox Backup](../backup_restore/proxmox/proxmox_backup.md).
=> [Skript:](../backup_restore/proxmox/backup_pve_configs.sh)
=> [Proxmox Backup](../backup_restore/proxmox_backup.md).

View File

@@ -0,0 +1,2 @@
# TV-Headend

View File

@@ -1,6 +1,18 @@
# DNS - DHCP Konfiguration # DNS - DHCP Konfiguration
** AKTUELL PIHOLE ALS DNS!**
## Anmerkungen zu AdGuardHome
## Pihole
- derzeit aktuell im Einsatz, Adguard & Co abgeschaltet.
-
### aktuelles Skript
```snippet
--8<-- "/docs/docker/pihole/docker-compose.yml"
```
## AdGuardHome
- die Konfiguration findet in der adguard/config/AdGuardHome.yaml statt. - die Konfiguration findet in der adguard/config/AdGuardHome.yaml statt.
**Problem**: dummerweise überschreibt die GUI diese bei jedem Start gnadenlos. **Problem**: dummerweise überschreibt die GUI diese bei jedem Start gnadenlos.
- Blaupause liegt in /docker/AdguardHome.yaml bzw. s.u. - Blaupause liegt in /docker/AdguardHome.yaml bzw. s.u.

View File

@@ -0,0 +1,3 @@
siehe [Backups Workstaions](../backup_restore/workstations/workstation:backup.md)

View File

@@ -14,7 +14,6 @@ Ziel:
**Konfiguration passiert lokal in VS Code.** **Konfiguration passiert lokal in VS Code.**
Die VM ist nur noch Laufzeitumgebung. Die VM ist nur noch Laufzeitumgebung.
1. Lokal ändern, egal was 1. Lokal ändern, egal was
2. Committen & Pushen 2. Committen & Pushen
3. der Pull auf der VM wird automatisch über einen Webhook ausgeführt 3. der Pull auf der VM wird automatisch über einen Webhook ausgeführt
@@ -28,22 +27,68 @@ flowchart LR
Workspace -->|Markdown changes| MkDocs Workspace -->|Markdown changes| MkDocs
MkDocs -->|serve| Browser MkDocs -->|serve| Browser
``` ```
--- ---
## Verzeichnisstruktur
compose, scripts, docs sind im Repo. Daher als Unterordner, damit bei einem pull force die Daten nicht mit überschrieben werden
```
/srv/docker/
├─ repo
│ ├─ compose # Docker-Compose Stacks für jeden Container
│ │ ├─ nextcloud/
│ │ │ └─ docker-compose.yml
│ │ ├─ tvheadend/
│ │ │ └─ docker-compose.yml
│ │ ├─ signal-rest-api/
│ │ │ └─ docker-compose.yml
│ │ └─ ... (weitere aktive Container)
│ │
│ ├─ scripts/ # Deploy-Scripts, Webhooks, Utilities
│ │ ├─ deploy-changed-containers-final.sh
│ │ ├─ webhook-deploy.sh
│ │ └─ ... (weitere Scripts)
│ │
│ ├─ docs/ # MkDocs / Markdown Dokumentation
│ │ ├─ backup_restore
│ │ | ├─ docker
| │ | └─ docker_backup.md
| │ | ├─ hetzner
│ │ ├─ docker
│ │ | ├─ adguardhome
| │ | | └─ adguardhome.md
│ │ | ├─ heimdall
| │ │ └─ heimdall.md
| │ └─ ... (weitere .md Dateien)
│ │
│ ├─ mkdocs.yml # MkDocs Konfiguration
├─ daten/ # Docker-Volumes / persistent data
│ ├─ nextcloud/
│ │ ├─ www/ # Nextcloud Webdaten
│ │ ├─ db/ # MariaDB Daten
│ │ └─ redis/ # Redis Daten
│ ├─ tvheadend/
│ │ └─ config/ # TVHeadend config / recordings
│ ├─ signal-rest-api/
│ │ └─ data/
│ └─ ... (weitere Container-Daten)
└─ .gitignore # ignoriert daten/ und ggf. temp files
```
## Workflow "neuer Container" ## Workflow "neuer Container"
- VSCode starten in ~Bruchtal mit code . => VS startet sauber mit der Giteinstellung - VSCode starten in ~Bruchtal mit code . => VS startet sauber mit der Giteinstellung
**ALLE ÄNDERUNGEN NUR IN VS** **ALLE ÄNDERUNGEN NUR IN VS**
### neuen Containeranlegen - `compose/<Containername>` anlegen
- `Bruchtal/<Containername>` anlegen - `compose/<Containername>/docker-compose.yml` anlegen
- `Bruchtal/<Containername>/docker-compose.yml` anlegen
- docker-compose.yml editieren, - docker-compose.yml editieren,
- commit mit Message `"infra(<Containername>): docker-compose.yml neu angelegt"` - commit mit Message `"infra(<Containername>): docker-compose.yml neu angelegt"`
- push - push
### neuen Container dokumentieren ### neuen Container dokumentieren
- `Bruchtal/docs/<Containername>` anlegen - `docker/docs/<Containername>` anlegen
- `Bruchtal/docs/<Containername>/<Containername>.md` anlegen - `Bruchtal/docs/<Containername>/<Containername>.md` anlegen
- `<Containername>.md` editieren, Blaupause z.B: wikijs.md - `<Containername>.md` editieren, Blaupause z.B: wikijs.md
- commit message `docs(<Containername>): Dokumentation angelegt` - commit message `docs(<Containername>): Dokumentation angelegt`
@@ -60,14 +105,3 @@ cd /docker/Bruchtal/<Containername>
docker compose up -d docker compose up -d
``` ```
## Repository-Struktur
```
Bruchtal/
├── docker/
│ └── <Containername>/
│ └── docker-compose.yml
└── docs/
└── docker/
└── <Containername>/
└── <Containername>.md
```

View File

@@ -1,6 +1,27 @@
# Reparieren des Gitea-Repos # Reparieren des Gitea-Repos
## Ausgangslage: aus Versehen Datei auf Vm editier anstatt in VS Code ## Ausgangslage: aus Versehen Datei auf Vm editier anstatt in VS Code
Problem: das Repo ist auseiander gelaufen. Problem: das Repo ist auseiander gelaufen.
## Voraussetzung:
docker/gitea muss laufen
## Lösungsansatz: ## Lösungsansatz:
- **Wichtig:** dafür sorgen, dass die "korrekten" Dateien auf dem PC liegen wo VS läuft - **Wichtig:** dafür sorgen, dass die "korrekten" Dateien auf dem PC liegen wo VS läuft
- - auf der lokalen Maschine in Bruchtal:
``` snippet
git push origin main --force
```
auf der VM:
``` snippet
cd /srv/docker/repo
git fetch origin
git reset --hard origin/main
```
Optional noch alte Dateien entfernen:
```
git clean -fd
```

View File

@@ -1,6 +1,6 @@
site_name: Bruchtal site_name: Bruchtal
site_description: Infrastruktur- und Betriebsdokumentation von Bruchtal site_description: Infrastruktur- und Betriebsdokumentation von Bruchtal
site_author: Bruchtal site_author: Christian
theme: theme:
name: material name: material
@@ -15,27 +15,34 @@ nav:
- Netzwerk: - Netzwerk:
- Topologie: network/topology.md - Topologie: network/topology.md
- Tunnel: network/tunnel.md - Tunnel: network/tunnel.md
- Proxmox: - Proxmox:
- VMs: proxmox/proxmox.md - System: proxmox/proxmox.md
- Paperless: proxmox/paperless/paperless.md
- Homeassistant: proxmox/homeassistent/homeassistent.md
- Docker: - Docker:
- Architektur: docker/architecture.md - Architektur: docker/docker.md
- Adguardhome: docker/adguardhome/adguardhome.md - Adguardhome: docker/adguardhome/adguardhome.md
- Wikijs: docker/wikijs/wikijs.md
- Heimdall: docker/heimdall/heimdall.md - Heimdall: docker/heimdall/heimdall.md
- It-Tools: docker/it-tools/it-tools.md - It-Tools: docker/it-tools/it-tools.md
- PiholeTests: docker/pihole/pihole.md
- Portainer: docker/portainer/portainer.md - Portainer: docker/portainer/portainer.md
- Vaultwarden: docker/vaultwarden/vaultwarden.md - Vaultwarden: docker/vaultwarden/vaultwarden.md
- Vaultwarden_Backup: docker/vaultwarden-backup/vaultwarden-backup.md - Vaultwarden_Backup: docker/vaultwarden_backup/vaultwarden_backup.md
- Wikijs: docker/wikijs/wikijs.md - Wikijs: docker/wikijs/wikijs.md
- Backup: - Backup:
- Backup: backup/backup.md - Docker: /docker/docker_backup.md
- christian-linux: backup_restore/workstations/christian-linux_backup.md - Hetzner: backup_restore/hetzner/hetzner_backup.md
- Homeassistant: backup_restore/proxmox/homeassistant/homeassistant_backup.md
- Paperless: backup_restore/proxmox/paperless/paperless_backup.md
- Proxmox: backup_restore/proxmox/proxmox_backup.md - Proxmox: backup_restore/proxmox/proxmox_backup.md
- Storage: backup/storage.md - Workstation: backup_restore/workstation/workstation_backup.md
markdown_extensions: markdown_extensions:
- admonition
- pymdownx.highlight - pymdownx.highlight
- pymdownx.snippets: - pymdownx.snippets:
check_paths: false check_paths: false

View File

@@ -0,0 +1,69 @@
#!/bin/bash
# Auto-Restart Script für geänderte Docker-Compose Stacks
# Nur laufende, aktive Container werden neu gestartet
# Inaktive Container bleiben unberührt
# Logs im Repo-Verzeichnis
#test
REPO_DIR="/srv/docker"
LOGFILE="$REPO_DIR/scripts/docker-update.log"
# Liste der inaktiven Container
INACTIVE_CONTAINERS=("adguard" "kea" "caddy" "wikijs")
log() {
echo "$(date '+%Y-%m-%d %H:%M:%S') | $*" | tee -a "$LOGFILE"
}
log "===== Starting Auto-Restart (final) ====="
cd "$REPO_DIR" || { log "ERROR: Cannot enter $REPO_DIR"; exit 1; }
# 1⃣ Git Pull + Hard Reset (VM exakt auf Remote-Stand bringen)
git fetch --all &>/dev/null
git reset --hard origin/main &>/dev/null
log "Pulled latest changes and reset VM to remote state."
# 2⃣ Geänderte Compose-Dateien ermitteln
CHANGED=$(git diff --name-only HEAD~1 HEAD | grep -E '^compose/.+/docker-compose\.yml$' || true)
if [ -z "$CHANGED" ]; then
log "No Compose files changed. Nothing to restart."
exit 0
fi
# 3⃣ Nur laufende, geänderte Container neu starten
for FILE in $CHANGED; do
CONTAINER_NAME=$(echo "$FILE" | cut -d'/' -f2)
# Inaktive Container überspringen
if [[ " ${INACTIVE_CONTAINERS[@]} " =~ " ${CONTAINER_NAME} " ]]; then
log "Skipping inactive container: $CONTAINER_NAME"
continue
fi
COMPOSE_DIR="$REPO_DIR/compose/$CONTAINER_NAME"
if [ ! -d "$COMPOSE_DIR" ]; then
log "Warning: $COMPOSE_DIR does not exist, skipping..."
continue
fi
# Prüfen, ob Container läuft
RUNNING=$(docker compose -f "$COMPOSE_DIR/docker-compose.yml" ps -q)
if [ -z "$RUNNING" ]; then
log "Container $CONTAINER_NAME is stopped. Skipping restart."
continue
fi
log "Restarting running container: $CONTAINER_NAME"
cd "$COMPOSE_DIR" || continue
docker compose up -d &>/dev/null
if [ $? -eq 0 ]; then
log "$CONTAINER_NAME restarted successfully"
else
log "❌ Failed to restart $CONTAINER_NAME"
fi
done
log "===== Auto-Restart Completed ====="

5
scripts/stop-all.sh Normal file
View File

@@ -0,0 +1,5 @@
cd /docker/Bruchtal/docker
for d in */; do
(cd "$d" && docker compose down)
done

View File

@@ -2,7 +2,7 @@
set -e set -e
# test XDG_RUNTIME_DIR # test XDG_RUNTIME_DIR
LOGFILE="/var/log/bruchtal-deploy.log" LOGFILE="/srv/docker/repo/scripts/bruchtal-deploy.log"
cd /workspace cd /workspace
log() { log() {
@@ -25,11 +25,20 @@ fi
# ----------------------------- # -----------------------------
# 2⃣ Pull latest changes # 2⃣ Pull latest changes
# ----------------------------- # -----------------------------
# safe directory for git in CI environment
git config --global --add safe.directory /workspace
log "Pulling latest changes from Gitea" log "Pulling latest changes from Gitea"
git pull git pull
# ----------------------------- # -----------------------------
# 3Check for Markdown changes # 3Redeploy changed containers
# -----------------------------
#/srv/docker/scripts/redeploy-containers.sh
# -----------------------------
# 4⃣ Check for Markdown changes
# ----------------------------- # -----------------------------
log "Checking for new or modified Markdown files..." log "Checking for new or modified Markdown files..."
changed=$(git diff --name-status HEAD~1 HEAD | grep -E '^[AM]\s.*(\.md$|mkdocs\.yml$)' | awk '{print $2}' || true) changed=$(git diff --name-status HEAD~1 HEAD | grep -E '^[AM]\s.*(\.md$|mkdocs\.yml$)' | awk '{print $2}' || true)

View File

@@ -1,7 +1,8 @@
[ [
{ {
"id": "bruchtal-deploy", "id": "bruchtal-deploy",
"execute-command": "/workspace/deploy/deploy-bruchtal.sh", "execute-command": "/workspace/scripts/webhook-deploy/deploy-bruchtal.sh",
"command-working-directory": "/workspace" "command-working-directory": "/workspace"
} }
] ]