Compare commits

..

24 Commits

Author SHA1 Message Date
ee361d9a53 vault backup: 2026-04-23 21:42:51
Affected files:
.obsidian/workspace.json
99 Work/Teaching/TEP - Schulung/CAPSA SUITE proposal deep-dive.md
99 Work/Teaching/TEP - Schulung/capsa_suite_project_overview_short.md
99 Work/Teaching/TEP - Schulung/capsa_suite_visit_briefing.md
2026-04-23 21:42:51 +02:00
e280622496 vault backup: 2026-04-23 21:01:39
Affected files:
99 Work/Teaching/TEP - Schulung/CAPSA SUITE proposal deep-dive.md
2026-04-23 21:01:39 +02:00
3b7bcb23db vault backup: 2026-04-23 20:23:23
Affected files:
.obsidian/bookmarks.json
.obsidian/workspace.json
99 Work/Teaching/TEP - Schulung/CAPSA SUITE proposal deep-dive.md
2026-04-23 20:23:23 +02:00
121d2b9461 vault backup: 2026-04-22 11:25:18
Affected files:
.obsidian/workspace.json
2 Personal/Home Lab/Baerhalten/Home Assistant.md
2026-04-22 11:25:19 +02:00
9aae367408 vault backup: 2026-04-20 12:33:43
Affected files:
.obsidian/workspace.json
2 Personal/Home Lab/Backup System - Kopia Server Setup.md
2026-04-20 12:33:43 +02:00
9057585fbc vault backup: 2026-04-19 13:20:49
Affected files:
.obsidian/workspace.json
.obsidian_iphone/plugins/obsidian-spaced-repetition/data.json
.obsidian_iphone/workspace-mobile.json
2 Personal/Projects/AlpineView/Thoughts.md
2026-04-19 13:20:50 +02:00
ea2c370b29 vault backup: 2026-04-17 18:32:28
Affected files:
.obsidian_iphone/plugins/obsidian-spaced-repetition/data.json
.obsidian_iphone/workspace-mobile.json
2 Personal/Lists/Packlisten/Packliste - Berge Mehrtages.md
2 Personal/Lists/Packlisten/Packliste - Skitour.md
2026-04-17 18:32:28 +02:00
090a255e7e vault backup: 2026-04-15 09:48:06
Affected files:
.obsidian/workspace.json
.obsidian_iphone/plugins/obsidian-spaced-repetition/data.json
.obsidian_iphone/workspace-mobile.json
2 Personal/1 Skills/Obisdian/Obsidian Setup.md
2026-04-15 09:48:06 +02:00
998ff52f70 vault backup: 2026-04-14 08:55:40
Affected files:
.obsidian/workspace.json
.obsidian_iphone/plugins/obsidian-spaced-repetition/data.json
.obsidian_iphone/workspace-mobile.json
0 Journal/0 Daily/2026-04-13.md
2 Personal/Lists/Packlisten/Packliste - Skitour.md
2026-04-14 08:55:40 +02:00
39a4f8ad5a vault backup: 2026-04-06 14:57:29
Affected files:
.obsidian/workspace.json
.obsidian_iphone/plugins/obsidian-spaced-repetition/data.json
.obsidian_iphone/workspace-mobile.json
OneNote/NAS/ToDo  Verify.md
2026-04-06 14:57:29 +02:00
9420af83d6 vault backup: 2026-03-31 18:31:05
Affected files:
2 Personal/Home Lab/NAS/immich_v1.1_setup.md
2026-03-31 18:31:05 +02:00
0d1dc5990e vault backup: 2026-03-31 14:56:35
Affected files:
.obsidian/workspace.json
2 Personal/Home Lab/NAS/immich_v1.1_setup.md
2026-03-31 14:56:35 +02:00
31e9ecf447 vault backup: 2026-03-31 12:52:27
Affected files:
2 Personal/Home Lab/NAS/immich_v1_setup.md
2026-03-31 12:52:27 +02:00
174b2e34a5 vault backup: 2026-03-31 12:00:20
Affected files:
.obsidian/workspace.json
2 Personal/Home Lab/NAS/immich_v1_setup.md
2026-03-31 12:00:20 +02:00
631b817207 vault backup: 2026-03-31 01:15:14
Affected files:
.obsidian/workspace.json
2 Personal/Home Lab/NAS/homelab_backup_architecture_first_draft.md
2026-03-31 01:15:14 +02:00
3aa2abc0ae vault backup: 2026-03-30 18:45:34
Affected files:
.obsidian/workspace.json
8 Places/BusinessesDrawing 2023-10-12 16.01.52.excalidraw.md
2026-03-30 18:45:34 +02:00
6af7c7e720 vault backup: 2026-03-26 00:09:55
Affected files:
2 Personal/Lists/Business Ideas.md
2026-03-26 00:09:55 +01:00
e56528b7b3 vault backup: 2026-03-25 23:46:41
Affected files:
.obsidian/workspace.json
0 Journal/0 Daily/2026-03-20.md
2026-03-25 23:46:42 +01:00
1ed975513a vault backup: 2026-03-18 20:02:56
Affected files:
.obsidian/workspace.json
.obsidian_iphone/workspace-mobile.json
0 Journal/0 Daily/2026-03-18.md
2026-03-18 20:02:56 +01:00
5a426919d7 vault backup: 2026-03-17 16:15:43
Affected files:
.obsidian/workspace.json
2 Personal/Home Lab/Baerhalten/Home Assistant.md
Attachments/ESPSomfyRTS 2026-03-17T16_05_06.backup
2026-03-17 16:15:43 +01:00
232b4b9f32 vault backup: 2026-03-17 15:03:57
Affected files:
.obsidian/workspace.json
2 Personal/Home Lab/Baerhalten/Home Assistant -> InfluxDB -> Grafana setup and debugging notes.md
2 Personal/Home Lab/Baerhalten/Home Assistant.md
2026-03-17 15:03:57 +01:00
4482f6921e vault backup: 2026-03-17 13:29:38
Affected files:
2 Personal/Home Lab/Baerhalten/Home Assistant.md
2026-03-17 13:29:38 +01:00
34a3dc17b2 vault backup: 2026-03-17 12:50:12
Affected files:
.obsidian/workspace.json
2 Personal/Home Lab/Baerhalten/Home Assistant.md
2026-03-17 12:50:12 +01:00
dd8e2ff9d5 vault backup: 2026-03-17 12:37:47
Affected files:
.obsidian/workspace.json
.obsidian_iphone/plugins/obsidian-spaced-repetition/data.json
.obsidian_iphone/workspace-mobile.json
2026-03-17 12:37:47 +01:00
24 changed files with 5334 additions and 708 deletions

9
.obsidian/bookmarks.json vendored Normal file
View File

@@ -0,0 +1,9 @@
{
"items": [
{
"type": "folder",
"ctime": 1776967124634,
"path": "99 Work/Teaching/TEP - Schulung"
}
]
}

View File

@@ -91,32 +91,18 @@
"title": "2026-01-07"
}
},
{
"id": "b948e87b03ca1ad1",
"type": "leaf",
"state": {
"type": "markdown",
"state": {
"file": "2 Personal/Home Lab/Baerhalten/Home Assistant.md",
"mode": "source",
"source": false
},
"icon": "lucide-file",
"title": "Home Assistant"
}
},
{
"id": "f33efed5601c1085",
"type": "leaf",
"state": {
"type": "markdown",
"state": {
"file": "2 Personal/Home Lab/Homelab.md",
"mode": "source",
"file": "5 Media/8 Courses/Design Patterns by Construx.md",
"mode": "preview",
"source": false
},
"icon": "lucide-file",
"title": "Homelab"
"title": "Design Patterns by Construx"
}
},
{
@@ -162,17 +148,17 @@
}
},
{
"id": "fac43a56fe618e9d",
"id": "b948e87b03ca1ad1",
"type": "leaf",
"state": {
"type": "markdown",
"state": {
"file": "2 Personal/Home Lab/NextiShareBot.md",
"file": "2 Personal/Home Lab/Baerhalten/Home Assistant.md",
"mode": "source",
"source": false
},
"icon": "lucide-file",
"title": "NextiShareBot"
"title": "Home Assistant"
}
}
],
@@ -195,7 +181,7 @@
"state": {
"type": "file-explorer",
"state": {
"sortOrder": "alphabetical",
"sortOrder": "byModifiedTime",
"autoReveal": true
},
"icon": "lucide-folder-closed",
@@ -481,45 +467,83 @@
],
"direction": "vertical",
"x": 0,
"y": 42,
"y": 34,
"width": 900,
"height": 777,
"maximize": false,
"zoom": 0
},
{
"id": "f1b9d015e6bf9f80",
"type": "window",
"children": [
{
"id": "ead9296ecab7eab5",
"type": "tabs",
"children": [
{
"id": "a24c733c4981be03",
"type": "leaf",
"state": {
"type": "markdown",
"state": {
"file": "99 Work/Teaching/TEP - Schulung/CAPSA SUITE proposal deep-dive.md",
"mode": "source",
"source": false
},
"icon": "lucide-file",
"title": "CAPSA SUITE proposal deep-dive"
}
}
]
}
],
"direction": "vertical",
"x": 658,
"y": -1860,
"width": 1680,
"height": 1860,
"maximize": false,
"zoom": 0
}
]
},
"active": "6914f148f736c4ac",
"active": "a24c733c4981be03",
"lastOpenFiles": [
"2 Personal/Home Lab/NextiShareBot.md",
"2 Personal/Lists/Business Ideas.md",
"0 Journal/0 Daily/2026-01-29.md",
"Temporary/Untitled 4.md",
"99 Work/Jobhunt/OneSec Experience on LinkedIn.md",
"99 Work/Jobhunt/Linkedin Profile.md",
"99 Work/Jobhunt/My CV skills.md",
"99 Work/0 OneSec/OneSecNotes/Handover Planning.md",
"2 Personal/Home Lab/Devices/Dell Studio 1558.md",
"0 Journal/0 Daily/2026-02-04.md",
"99 Work/Jobhunt/Interview Questions.md",
"Temporary/My Health Products.md",
"0 Journal/0 Daily/2026-01-28.md",
"2 Personal/Lists/Packlisten/Packliste - Skitour.md",
"0 Journal/0 Daily/2026-01-27.md",
"Temporary/Madgwick Filter.md",
"0 Journal/0 Daily/2026-01-24.md",
"Temporary/Ralph Wiggum Technique for AI Coding.md",
"2 Personal/Home Lab/NAS/Backup Strategy.md",
"0 Journal/0 Daily/2026-01-10.md",
"Temporary/Material Damping.md",
"Temporary/Friction Damping.md",
"Attachments/Pasted image 20260121121234.png",
"2 Personal/Home Lab/Homelab.md",
"2 Personal/Home Lab/Baerhalten/Home Assistant.md",
"2 Personal/Lists/Media/Bücher.md",
"99 Work/Teaching/TEP - Schulung/capsa_suite_visit_briefing.md",
"99 Work/Teaching/TEP - Schulung/CAPSA SUITE proposal deep-dive.md",
"99 Work/Teaching/TEP - Schulung/capsa_suite_project_overview_short.md",
"99 Work/Teaching/TEP - Schulung",
"99 Work/Teaching",
"2 Personal/Home Lab/Backup System - Kopia Server Setup.md",
"2 Personal/1 Skills/Obisdian/Obsidian Setup.md",
"2 Personal/Projects/AlpineView/Thoughts.md",
"2 Personal/Projects/AlpineView",
"2 Personal/Home Lab/Syncthing.md",
"2 Personal/Home Lab/NextiShareBot.md",
"5 Media/8 Courses/Design Patterns by Construx.md",
"2 Personal/Lists/Business Ideas.md",
"OneNote/NAS/ToDo Verify.md",
"OneNote/NAS/Apps.md",
"OneNote/NAS/Backups.md",
"OneNote/NAS/Fotos Strategie.md",
"OneNote/NAS/Main Info.md",
"2 Personal/Home Lab/NAS/immich_v1.1_setup.md",
"2 Personal/Home Lab/NAS/immich_v1_setup.md",
"2 Personal/Home Lab/NAS/Ports Opening.md",
"2 Personal/Home Lab/NAS/Virtual Machine Hosting.md",
"2 Personal/Home Lab/NAS/SSH.md",
"2 Personal/Home Lab/NAS/NordVPN Setup.md",
"2 Personal/Home Lab/NAS/NAS Projects.md",
"2 Personal/Home Lab/NAS/Maintenance Plan.md",
"2 Personal/Home Lab/NAS/Jellyfin Installation.md",
"2 Personal/Home Lab/NAS/homelab_backup_architecture_first_draft.md",
"Dashboard Canvas.canvas",
"Attachments/ESPSomfyRTS 2026-03-17T16_05_06.backup",
"Attachments/Pasted image 20260121121234.png",
"Attachments/ESPSomfyRTS 2026-01-18T16_26_16.backup",
"Attachments/Pasted image 20260118150817.png",
"Temporary/Untitled 3.md",
"Attachments/Pasted image 20251202214228.png",
"2 Personal/1 Skills/AI",
"2 Personal/Home Lab/Baerhalten",
@@ -531,13 +555,8 @@
"Attachments/Pasted image 20251015111504.png",
"Attachments/Pasted image 20251015092212.png",
"3 Knowledge/5 AI/PromptDB",
"3 Knowledge/5 AI",
"99 Work/0 OneSec/OneSecNotes/10 Projects/TeensyFlightcontroller",
"Attachments/Pasted image 20250922115441.png",
"7 People/0_People.base",
"Attachments/Belts, Suspenders.mp3",
"Attachments/image 21.jpg",
"Dashboard Canvas.canvas",
"99 Work/0 OneSec/OneSecNotes/30 Engineering Skills/Computer Science/Untitled.canvas",
"8 Work/OneSecNotes/Temporary/Untitled.canvas"
]

View File

@@ -47,7 +47,7 @@
"maxLinkFactor": 1,
"showDebugMessages": false
},
"buryDate": "2026-03-05",
"buryDate": "2026-04-19",
"buryList": [],
"historyDeck": null
}

View File

@@ -153,12 +153,12 @@
"state": {
"type": "markdown",
"state": {
"file": "2 Personal/Lists/Packlisten/Packliste - Skitour.md",
"mode": "preview",
"file": "2 Personal/Projects/AlpineView/Thoughts.md",
"mode": "source",
"source": true
},
"icon": "lucide-file",
"title": "Packliste - Skitour"
"title": "Thoughts"
}
}
],
@@ -1845,6 +1845,86 @@
{
"id": "b89379a2c2d5def6",
"type": "leaf",
"state": {
"type": "review-queue-list-view",
"state": {},
"icon": "lucide-ghost",
"title": "review-queue-list-view"
}
},
{
"id": "c2f4ad24032ae4c0",
"type": "leaf",
"state": {
"type": "review-queue-list-view",
"state": {},
"icon": "lucide-ghost",
"title": "review-queue-list-view"
}
},
{
"id": "a1d1f3375ebefaba",
"type": "leaf",
"state": {
"type": "review-queue-list-view",
"state": {},
"icon": "lucide-ghost",
"title": "review-queue-list-view"
}
},
{
"id": "1552da422914318f",
"type": "leaf",
"state": {
"type": "review-queue-list-view",
"state": {},
"icon": "lucide-ghost",
"title": "review-queue-list-view"
}
},
{
"id": "f27b5c148e348c3e",
"type": "leaf",
"state": {
"type": "review-queue-list-view",
"state": {},
"icon": "lucide-ghost",
"title": "review-queue-list-view"
}
},
{
"id": "b4e99fd5097529dc",
"type": "leaf",
"state": {
"type": "review-queue-list-view",
"state": {},
"icon": "lucide-ghost",
"title": "review-queue-list-view"
}
},
{
"id": "6f3ed20b735267d2",
"type": "leaf",
"state": {
"type": "review-queue-list-view",
"state": {},
"icon": "lucide-ghost",
"title": "review-queue-list-view"
}
},
{
"id": "dc2b0338bdb75278",
"type": "leaf",
"state": {
"type": "review-queue-list-view",
"state": {},
"icon": "lucide-ghost",
"title": "review-queue-list-view"
}
},
{
"id": "2f3905e698cfb06e",
"type": "leaf",
"state": {
"type": "review-queue-list-view",
"state": {},
@@ -1871,8 +1951,14 @@
"periodic-notes:Open today": false
}
},
"active": "992ba06a004d0d26",
"active": "2f3905e698cfb06e",
"lastOpenFiles": [
"2 Personal/Lists/Packlisten/Packliste - Berge Mehrtages.md",
"2 Personal/Projects/AlpineView/Thoughts.md",
"2 Personal/Projects/AlpineView",
"2 Personal/Lists/Packlisten/Packliste - Skitour.md",
"0 Journal/0 Daily/2026-04-13.md",
"8 Places/Le Cut - Zürich.md",
"0 Journal/0 Daily/2026-02-08.md",
"99 Work/Jobhunt/OneSec Experience on LinkedIn.md",
"0 Journal/0 Daily/2026-02-04.md",
@@ -1880,7 +1966,6 @@
"0 Journal/0 Daily/2026-01-02.md",
"0 Journal/0 Daily/2026-01-24.md",
"2 Personal/Home Lab/Baerhalten/Home Assistant.md",
"8 Places/Le Cut - Zürich.md",
"2 Personal/Organisation/Persönliche Organisation.md",
"0 Journal/0 Daily/2025-12-10.md",
"0 Journal/0 Daily/2025-11-11.md",
@@ -1896,10 +1981,6 @@
"0 Journal/0 Daily/2025-07-23.md",
"0 Journal/0 Daily/2025-08-11.md",
"0 Journal/0 Daily/2025-07-25.md",
"5 Media/0 Books/More Effective Agile by Steve McConnell.md",
"Temporary/n8n - ideas.md",
"0 Journal/0 Daily/2025-07-22.md",
"Temporary/n8n - ideas.sync-conflict-20250721-234226-LIUMLEB.md",
"Attachments/image 20.jpg",
"Attachments/image 19.jpg",
"2 Personal/Projects/Timelapse Project",
@@ -1918,7 +1999,6 @@
"99 Work/Jobhunt/Applications",
"Dashboard Canvas.canvas",
"Attachments/Recording 20241024190036.m4a",
"2 Personal/ Personal Growth/Future Habits",
"2 Personal/ Personal Growth"
"2 Personal/ Personal Growth/Future Habits"
]
}

View File

@@ -0,0 +1,28 @@
---
aliases:
Tags:
- daily
day_grade:
Dehnen:
Sport:
Ernährung:
---
# 2026-03-18
[[2026-03-17]] <--> [[2026-03-19]]
> [!quote] Real success is finding your lifework in the work that you love.
> — David McCullough
---
## Planning
___
## Reflection
___
## Notes
-

View File

@@ -0,0 +1,27 @@
---
aliases:
Tags:
- daily
day_grade:
Dehnen:
Sport:
Ernährung:
---
# <%tp.file.title%>
[[<%tp.date.now("YYYY-MM-DD",-1)%>]] <--> [[<%tp.date.now("YYYY-MM-DD",+1)%>]]
<%tp.web.daily_quote()%>
---
## Planning
___
## Reflection
___
## Notes
-

View File

@@ -0,0 +1,27 @@
---
aliases:
Tags:
- daily
day_grade:
Dehnen:
Sport:
Ernährung:
---
# <%tp.file.title%>
[[<%tp.date.now("YYYY-MM-DD",-1)%>]] <--> [[<%tp.date.now("YYYY-MM-DD",+1)%>]]
<%tp.web.daily_quote()%>
---
## Planning
___
## Reflection
___
## Notes
-

View File

@@ -19,6 +19,8 @@ I initially thought that [[Syncthing]] is not available on my iPhone, but the ap
Now I should be able to synchronize between all devices using only Syncthing.
A concern is syncing subfolders within an already synced folder because it could cause ring dependencies. But according to [this thread](https://forum.syncthing.net/t/sync-a-subfolder-of-an-already-synced-folder/16164) it should be working just fine.
I used this instruction set to get it to work: [Sync Mac/PC and iOS using Syncthing + Möbius Sync - Share & showcase - Obsidian Forum](https://forum.obsidian.md/t/sync-mac-pc-and-ios-using-syncthing-mobius-sync/72022)
### Instructions to Setup Everything
# Backup Solution

View File

@@ -0,0 +1,947 @@
---
title: Backup System - Kopia Server Setup
created_date: 2026-04-20
updated_date: 2026-04-20
aliases:
tags:
---
# Backup System - Kopia Server Setup
## Overview
This document describes the setup we built for a self-hosted Kopia backup server in the homelab.
### Final architecture
- **Hypervisor:** Proxmox
- **Backup server runtime:** Debian VM on Proxmox (IP:192.168.1.54 - IP managed by pi-hole)
- **Backup repository storage:** Synology NAS via NFS
- **Backup service:** Kopia Repository Server
- **Remote/private access:** intended via Tailscale or LAN
- **First client:** MacBook
- **TLS:** self-generated Kopia TLS certificate
### Why this architecture was chosen
We first tried to run Kopia in an LXC container. That led to multiple issues:
- NFS mount permission issues with an **unprivileged LXC**
- AppArmor / systemd problems with newer Debian / systemd in LXC
- black Proxmox console / awkward LXC behavior
Because of that, we switched to a **VM**, which is the cleaner and more robust setup for this use case.
---
## Final design
### Components
- **Debian VM** runs Kopia Repository Server
- **Synology NAS** stores the actual backup repository blobs
- **Kopia clients** connect to the Repository Server over HTTPS
- **MacBook** connects as `claudio@macbook-main`
### Repository model
Important distinction:
- The **repository** itself is stored on the NAS filesystem
- The **Repository Server** is the HTTP/HTTPS layer in front of it
- Users connect to the **server**, not directly to the NAS share
This is better than mounting the share directly on each laptop because:
- clients do not depend on a mounted NAS path
- you get per-user accounts on the server
- easier multi-user setup
- easier remote use over VPN/Tailscale
---
## Synology setup
### Shared folder
A dedicated shared folder was used on Synology:
- `kopia-repository`
### NFS export
The NFS export path used was:
```bash
192.168.1.34:/volume1/kopia-repository
```
### Synology NFS settings
Recommended / used settings:
- **Privilege:** Read/Write
- **Squash:** No mapping
- **Security:** sys
- **Allow connections from non-privileged ports:** enabled if needed
Notes:
- `Squash: No mapping` is fine for a dedicated Kopia repository share.
- In the LXC attempt, permissions still failed because of UID/GID mapping issues with unprivileged containers.
---
## Failed LXC attempt and why we abandoned it
We first tried to run Kopia in a Proxmox LXC.
### What we tried
- Created an LXC
- Tried mounting NFS inside the LXC
- Then switched to mounting NFS on the Proxmox host and bind-mounting it into the LXC
- Added `/dev/net/tun` for Tailscale / ZeroTier
- Enabled `nesting=1` because of Debian/systemd issues
### Problems encountered
#### 1. Wrong mount point created via Proxmox UI
We accidentally created an extra LXC disk instead of a bind mount.
We had this wrong line:
```ini
mp0: local-lvm:vm-102-disk-1,mp=/mnt/pve/kopia-repo,size=8G
```
This was **not** a bind mount from the host.
The correct bind mount syntax was:
```ini
mp0: /mnt/pve/kopia-repo,mp=/srv/kopia-repo
```
#### 2. Console issues / black screen
The LXC booted, but the Proxmox console was black.
`pct enter` worked, but `pct console` did not work reliably.
#### 3. systemd / AppArmor issues
We saw warnings such as:
- `Systemd 257 detected. You may need to enable nesting.`
- AppArmor denials related to `userns_create`, `mount`, `journald`, `networkd`, etc.
This was partially fixed with:
```bash
pct set 102 --features nesting=1
pct restart 102
```
#### 4. NFS permission issues in unprivileged LXC
Even when the share was mounted correctly, writes failed inside the container:
```bash
touch /srv/kopia-repo/testfile
# Permission denied
```
This happened because unprivileged container root is UID-mapped and Synology/NFS did not allow writes the way we wanted.
### Conclusion
The LXC route was abandoned because it caused unnecessary complexity for a simple service.
We switched to a **Debian VM**, which is simpler and more maintainable.
---
## VM setup
### Assumptions
- Debian VM on Proxmox
- VM has network access to the Synology NAS
- NFS share exists on Synology
- Kopia repository will live on the NAS
### Base packages installed
On the VM, we installed the following:
```bash
apt update
apt install -y nfs-common curl ca-certificates gnupg nano
```
---
## NFS mount inside the VM
### Mountpoint creation
```bash
mkdir -p /srv/kopia-repo
mkdir -p /var/lib/kopia
chmod 700 /var/lib/kopia
```
### Manual test mount
```bash
mount -t nfs 192.168.1.34:/volume1/kopia-repository /srv/kopia-repo
```
### Validation commands
```bash
df -h /srv/kopia-repo
touch /srv/kopia-repo/testfile
ls -l /srv/kopia-repo/testfile
rm /srv/kopia-repo/testfile
```
At this stage, the VM approach worked properly.
### Persistent mount in `/etc/fstab`
We added:
```fstab
192.168.1.34:/volume1/kopia-repository /srv/kopia-repo nfs defaults,_netdev 0 0
```
Then tested with:
```bash
umount /srv/kopia-repo
mount -a
df -h /srv/kopia-repo
```
---
## Kopia installation on the VM
### APT repo setup
We used the official Kopia apt repository.
Commands used:
```bash
install -d -m 0755 /etc/apt/keyrings
curl -s https://kopia.io/signing-key | gpg --dearmor -o /etc/apt/keyrings/kopia-keyring.gpg
echo "deb [signed-by=/etc/apt/keyrings/kopia-keyring.gpg] http://packages.kopia.io/apt/ stable main" > /etc/apt/sources.list.d/kopia.list
apt update
apt install -y kopia
```
### Verify installation
```bash
kopia --version
```
---
## Kopia repository creation
### Repository directory
```bash
mkdir -p /srv/kopia-repo/repository
```
### Create repository
We created a filesystem repository on the NFS-backed path:
```bash
kopia repository create filesystem --path=/srv/kopia-repo/repository
```
During this step, a **repository password** was chosen.
This password is critical. It encrypts the repository contents.
---
## Environment variables
We used an environment file with exported variables for passwords.
Important shell note:
- `"$VAR"` expands the variable
- `'$VAR'` does **not** expand the variable
So this is **wrong**:
```bash
--server-password='$KOPIA_SRV_PW'
```
And this is **correct**:
```bash
--server-password="$KOPIA_SRV_PW"
```
### Variables used
Example variables:
```bash
export KOPIA_REPO_PW="..."
export KOPIA_SRV_CTRL_PW="..."
export KOPIA_SRV_PW="..."
```
And for manual startup we exported:
```bash
export KOPIA_PASSWORD="$KOPIA_REPO_PW"
```
---
## First manual Kopia server start attempts
### Initial attempt without TLS
We first tried something like:
```bash
kopia server start \
--address=0.0.0.0:51515 \
--server-control-username=server-control \
--server-control-password="$KOPIA_SRV_CTRL_PW" \
--server-username=kopia \
--server-password="$KOPIA_SRV_PW"
```
This failed with the message:
- `TLS not configured. To start server without encryption pass --insecure`
So the server did **not** actually listen on the port.
### Working manual TLS start
The working startup command was:
```bash
kopia server start \
--tls-generate-cert \
--tls-cert-file ~/my.cert \
--tls-key-file ~/my.key \
--address 0.0.0.0:51515 \
--server-control-username control
```
Notes:
- This generated a self-signed TLS cert and key.
- After generation, future starts must **not** use `--tls-generate-cert` again.
---
## Adding the first Kopia user
We created the first Mac user with:
```bash
kopia server user add claudio@macbook-main
```
This prompts for a password for that user.
This user/password is what the Mac client uses to connect.
---
## Server refresh issue and root cause
We saw this error:
```bash
kopia server refresh ...
400 Bad Request: not connected
```
And from the Mac / KopiaUI we saw:
- `not connected to a direct repository`
### Root cause
The Repository Server was starting, but the process running under systemd was **not connected to the repository**.
This happened because:
- the repository had been created and connected as user **`cef`**
- but the systemd service was running as **root** by default
- root did not have the Kopia repository config/session
### Fix
We changed the systemd service to run as **user `cef`**.
That solved the issue.
---
## TLS certificate handling
### Move cert and key to stable location
We moved the generated files out of the home directory:
```bash
sudo mkdir -p /etc/kopia
sudo mv ~/my.cert /etc/kopia/server.cert
sudo mv ~/my.key /etc/kopia/server.key
sudo chmod 600 /etc/kopia/server.key
sudo chmod 644 /etc/kopia/server.cert
```
### Important permission fix
Because the service runs as user `cef`, that user needed access to the cert and key:
```bash
sudo chown cef:cef /etc/kopia/server.cert /etc/kopia/server.key
sudo chmod 600 /etc/kopia/server.key
sudo chmod 644 /etc/kopia/server.cert
```
---
## Environment file for systemd
We created:
```bash
sudo nano /etc/kopia-server.env
```
Contents:
```bash
KOPIA_PASSWORD=YOUR_REPOSITORY_PASSWORD
KOPIA_SRV_CTRL_PW=YOUR_SERVER_CONTROL_PASSWORD
KOPIA_SRV_PW=YOUR_WEB_UI_PASSWORD
```
Permissions:
```bash
sudo chown root:cef /etc/kopia-server.env
sudo chmod 640 /etc/kopia-server.env
```
---
## Final systemd service
We created:
```bash
sudo nano /etc/systemd/system/kopia-server.service
```
Final service:
```ini
[Unit]
Description=Kopia Repository Server
After=network-online.target remote-fs.target
Wants=network-online.target
Requires=remote-fs.target
[Service]
Type=simple
User=cef
Group=cef
EnvironmentFile=/etc/kopia-server.env
ExecStart=/usr/bin/kopia server start \
--tls-cert-file=/etc/kopia/server.cert \
--tls-key-file=/etc/kopia/server.key \
--address=0.0.0.0:51515 \
--server-control-username=control \
--server-control-password=${KOPIA_SRV_CTRL_PW} \
--server-username=kopia \
--server-password=${KOPIA_SRV_PW}
Restart=always
RestartSec=5
[Install]
WantedBy=multi-user.target
```
### Enable and start
```bash
sudo systemctl daemon-reload
sudo systemctl enable --now kopia-server
sudo systemctl status kopia-server --no-pager
```
### Check if listening
```bash
ss -ltnp | grep 51515
```
---
## Certificate fingerprint
Because the TLS certificate is self-generated, clients must trust it using the SHA256 fingerprint.
### Get fingerprint
On the VM:
```bash
openssl x509 -in /etc/kopia/server.cert -noout -fingerprint -sha256 | sed 's/://g' | cut -f 2 -d =
```
Save the resulting fingerprint.
---
## Refreshing server credentials
Once the service was working, refresh needed to use:
- HTTPS
- control username/password
- server certificate fingerprint
Command:
```bash
kopia server refresh \
--address=https://127.0.0.1:51515 \
--server-control-username=control \
--server-control-password="$KOPIA_SRV_CTRL_PW" \
--server-cert-fingerprint=YOUR_FINGERPRINT
```
If you get `not connected`, the server process is not connected to the repository context. Check that the service runs as the same user that has a valid Kopia repository config.
---
## MacBook setup
### Install Kopia on macOS
We used Homebrew:
```bash
brew install kopia
brew install kopiaui
```
### Connect the Mac to the Repository Server
We used the CLI first, because it is more reliable for initial connection than KopiaUI.
Command:
```bash
kopia repository connect server \
--url=https://YOUR_VM_IP:51515 \
--server-cert-fingerprint=YOUR_CERT_FINGERPRINT \
--override-username=claudio \
--override-hostname=macbook-main
```
The login password here is the password for the Kopia server user:
- `claudio@macbook-main`
### Verify connection
```bash
kopia repository status
```
---
## MacBook first test backup
### Create a test folder
```bash
mkdir -p ~/kopia-test
echo "hello kopia" > ~/kopia-test/file1.txt
date > ~/kopia-test/file2.txt
```
### Create first snapshot
```bash
kopia snapshot create ~/kopia-test
```
### List snapshots
```bash
kopia snapshot list
```
### Test restore
```bash
mkdir -p ~/kopia-restore-test
kopia restore latest ~/kopia-restore-test
ls -la ~/kopia-restore-test
```
This validates the full chain:
- VM
- NFS mount
- repository
- Kopia Repository Server
- Mac client connection
- backup
- restore
---
## Automatic backup on the Mac
### Basic idea
The recommended model is:
1. connect the Mac to the repository once
2. define backup roots
3. use `kopia snapshot create --all` on a schedule
### Suggested first backup roots
Start simple. For example:
- `~/Documents`
- `~/Desktop`
- local project folders
Do **not** immediately back up all of `~/Library`.
### Initial snapshots for real backup roots
Example:
```bash
kopia snapshot create ~/Documents
kopia snapshot create ~/Desktop
```
### Set retention / scheduling policy
Example:
```bash
kopia policy set ~/Documents \
--snapshot-interval=12h \
--keep-latest=14 \
--keep-daily=14 \
--keep-weekly=8 \
--keep-monthly=12
kopia policy set ~/Desktop \
--snapshot-interval=12h \
--keep-latest=14 \
--keep-daily=14 \
--keep-weekly=8 \
--keep-monthly=12
```
### Robust automatic execution with launchd
Create:
```text
~/Library/LaunchAgents/com.claudio.kopia-backup.plist
```
Contents:
```xml
<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
<plist version="1.0">
<dict>
<key>Label</key>
<string>com.claudio.kopia-backup</string>
<key>ProgramArguments</key>
<array>
<string>/opt/homebrew/bin/kopia</string>
<string>snapshot</string>
<string>create</string>
<string>--all</string>
<string>--no-progress</string>
</array>
<key>StartInterval</key>
<integer>21600</integer>
<key>RunAtLoad</key>
<true/>
<key>StandardOutPath</key>
<string>/tmp/kopia-backup.out</string>
<key>StandardErrorPath</key>
<string>/tmp/kopia-backup.err</string>
</dict>
</plist>
```
Load it:
```bash
launchctl unload ~/Library/LaunchAgents/com.claudio.kopia-backup.plist 2>/dev/null || true
launchctl load ~/Library/LaunchAgents/com.claudio.kopia-backup.plist
launchctl list | grep kopia
```
This runs every 6 hours (`21600` seconds).
### Verify automatic backup
```bash
kopia snapshot list
```
And occasionally:
```bash
kopia restore latest ~/kopia-restore-test-2
```
---
## How to add another user, for example your spouse
The model is:
- create a new user on the Kopia server
- connect that users machine with a fixed username/hostname identity
- create backup sources from that machine
### Example: spouse on Windows
Suppose you want:
- username: `partner`
- hostname: `windows-laptop`
### 1. Create the user on the Kopia server VM
On the VM:
```bash
kopia server user add partner@windows-laptop
```
Set a password when prompted.
### 2. Refresh credentials if needed
```bash
kopia server refresh \
--address=https://127.0.0.1:51515 \
--server-control-username=control \
--server-control-password="$KOPIA_SRV_CTRL_PW" \
--server-cert-fingerprint=YOUR_FINGERPRINT
```
### 3. Install Kopia on the spouses machine
For Windows, install Kopia / KopiaUI from the official installer.
### 4. Connect that machine to the server
From CLI, the equivalent pattern is:
```bash
kopia repository connect server \
--url=https://YOUR_VM_IP:51515 \
--server-cert-fingerprint=YOUR_CERT_FINGERPRINT \
--override-username=partner \
--override-hostname=windows-laptop
```
Then enter the password for:
- `partner@windows-laptop`
### 5. Create first backup sources on that machine
For example on Windows:
- Documents
- Desktop
- Pictures
### 6. Run first backup and test restore
Do exactly the same test pattern as on the Mac:
- create first snapshot
- list snapshots
- restore a test folder
### Naming convention recommendation
Use stable names so future maintenance is simple.
Examples:
- `claudio@macbook-main`
- `claudio@windows-main`
- `partner@windows-laptop`
- `partner@android-photos` if you ever use a separate flow later
---
## Maintenance and troubleshooting
### Check service status
```bash
sudo systemctl status kopia-server --no-pager
```
### View Kopia server logs
```bash
sudo journalctl -u kopia-server -n 100 --no-pager
```
### Check if port is listening
```bash
ss -ltnp | grep 51515
```
### Check repository status as service user
```bash
kopia repository status
```
If this fails under the service user context, the Repository Server will not work correctly.
### Test local HTTPS endpoint
```bash
curl -k https://127.0.0.1:51515/
```
### Get certificate fingerprint again
```bash
openssl x509 -in /etc/kopia/server.cert -noout -fingerprint -sha256 | sed 's/://g' | cut -f 2 -d =
```
### Common failure: wrong shell quoting
Wrong:
```bash
--server-password='$KOPIA_SRV_PW'
```
Correct:
```bash
--server-password="$KOPIA_SRV_PW"
```
### Common failure: server not connected
Symptoms:
- `400 Bad Request: not connected`
- Mac / UI error: `not connected to a direct repository`
Fix:
- make sure systemd service runs as the same user that created / connected the repository
- in this setup that was `cef`
### Common failure: browser works but Mac client fails
This usually means:
- HTTPS listener is fine
- web auth is fine
- but the server has no repository connection or client trust settings are wrong
Check:
- `kopia repository status`
- systemd service user
- cert fingerprint on client
---
## Important files in this setup
### On the VM
- Repository storage:
- `/srv/kopia-repo/repository`
- NFS mountpoint:
- `/srv/kopia-repo`
- TLS cert:
- `/etc/kopia/server.cert`
- TLS key:
- `/etc/kopia/server.key`
- systemd env file:
- `/etc/kopia-server.env`
- systemd service:
- `/etc/systemd/system/kopia-server.service`
### On the Mac
- launchd job:
- `~/Library/LaunchAgents/com.claudio.kopia-backup.plist`
---
## What not to forget later
- Do not rerun `--tls-generate-cert` on every start.
- Keep the repository password safe.
- Keep the server control password safe.
- Keep the Web UI password safe.
- Keep the certificate fingerprint documented.
- Test restores periodically, not just backups.
- Do not assume browser access means repository connectivity is correct.
---
## Suggested next steps
1. Finalize the Mac backup roots
2. Test automatic backups from the Mac
3. Add spouses machine as a second user
4. Test restore from spouses machine too
5. Later consider offsite replication of the Kopia repository
6. Keep Time Machine in parallel on the Mac if you want easier full-machine restore

View File

@@ -0,0 +1,400 @@
---
title: Home Assistant -> InfluxDB -> Grafana setup and debugging notes
created_date: 2026-03-17
updated_date: 2026-03-17
aliases:
tags:
---
# Home Assistant -> InfluxDB -> Grafana setup and debugging notes
## Architecture
This setup has **three layers**:
1. **Home Assistant** generates state changes and sensor values.
2. **InfluxDB 3 Core** stores that time-series data.
3. **Grafana** reads from InfluxDB and visualizes it.
### Network layout
- **Home Assistant** runs outside the Docker Compose network.
- **InfluxDB** and **Grafana** run together in the same Docker Compose stack.
- Therefore:
- **Home Assistant -> InfluxDB** must use the **Proxmox container IP** and exposed port.
- **Grafana -> InfluxDB** should use the **Docker service name**.
## Actual endpoints
### Home Assistant -> InfluxDB
Home Assistant should connect to InfluxDB using:
- Protocol: `http`
- Host / URL: `192.168.194.120`
- Port: `8181`
- SSL verification: off
Reason: Home Assistant is outside the Docker network, so it cannot resolve `influxdb3`.
### Grafana -> InfluxDB
Grafana should connect to InfluxDB using:
- URL: `http://influxdb3:8181`
Reason: Grafana and InfluxDB are on the same Docker Compose network, so Docker DNS resolves `influxdb3`.
## Docker Compose setup
The relevant Compose structure is:
```yaml
services:
influxdb3:
image: influxdb:3-core
ports:
- "8181:8181"
grafana:
image: grafana/grafana:latest
depends_on:
- influxdb3
ports:
- "3000:3000"
```
This means:
- InfluxDB is reachable from outside Docker at `http://<proxmox-container-ip>:8181`
- InfluxDB is reachable from Grafana internally at `http://influxdb3:8181`
## Home Assistant InfluxDB configuration
### Important migration detail
Home Assistant is removing YAML-based InfluxDB **connection settings**.
So these connection keys should **not** stay in YAML:
- `api_version`
- `host`
- `port`
- `ssl`
- `verify_ssl`
- `ssl_ca_cert`
- `username`
- `password`
- `database`
- `token`
- `organization`
- `bucket`
- `path`
Those should be configured through **Devices & Services** in the Home Assistant UI.
### What can still stay in YAML
Behavior/settings like these can still stay in YAML:
- `measurement_attr`
- `default_measurement`
- `override_measurement`
- `include`
- `exclude`
- `tags`
- `tags_attributes`
## Final Home Assistant behavior setting
To get **one table per entity/sensor**, the key YAML setting is:
```yaml
influxdb:
measurement_attr: entity_id
```
Reason:
- Default behavior grouped data into tables like `W`, `Wh`, `V`, `%`
- That made many sensors appear "missing", because they were grouped by unit
- `measurement_attr: entity_id` changes that back to one measurement/table per entity, which is easier to use in Grafana
## InfluxDB concepts in this setup
### Databases
We used databases such as:
- `home`
- `homeassistant`
- `ha_fresh`
These are databases, not tables.
### Tables / measurements
Inside a database, tables are created automatically when data is written.
Examples after using `measurement_attr: entity_id`:
- `sensor.solarnet_power_photovoltaics`
- `sensor.solarnet_power_grid_export`
- `sensor.solarnet_power_load_consumed`
Examples from the old grouped schema:
- `W`
- `Wh`
- `V`
- `A`
## Tokens and authentication
### What is used where
- **Home Assistant** uses an InfluxDB token to write data
- **Grafana** uses an InfluxDB token to query data
- **Admin token** is used for CLI/database management
### Important note
Tokens and secrets that were exposed should be rotated.
## How we debugged the setup
## 1. Verified InfluxDB is running
We checked Docker:
```bash
docker ps
docker logs --since 0.5m influxdb3
```
This confirmed:
- Grafana container is running
- InfluxDB container is running
- InfluxDB is actively flushing writes
## 2. Verified databases exist
```bash
docker exec -it influxdb3 influxdb3 show databases --token "$INFLUX_ADMIN_TOKEN"
```
This showed databases like:
- `_internal`
- `home`
- `homeassistant`
- later `ha_fresh`
## 3. Verified Home Assistant can reach InfluxDB over the network
From Home Assistant shell:
```bash
curl http://192.168.194.120:8181/health
```
Response:
```text
{"error": "the request was not authenticated"}
```
This was **good** because it proved:
- DNS/IP was correct
- network path worked
- InfluxDB was reachable
- only authentication remained
## 4. Reconfigured Home Assistant in the UI
Home Assistant InfluxDB integration was configured in **Devices & Services** using:
- HTTP
- host/IP of Proxmox container
- port `8181`
- no SSL verification
- organization `dummy`
- bucket/database `ha_fresh` or similar
- token
## 5. Enabled debug logs in Home Assistant
We confirmed HA was writing by seeing log lines like:
```text
Wrote 8 events.
Wrote 10 events.
```
That proved:
- Home Assistant -> InfluxDB write path works
## 6. Verified writes actually landed in InfluxDB
We queried InfluxDB directly:
```bash
docker exec -it influxdb3 influxdb3 query --database ha_fresh --token "$INFLUX_ADMIN_TOKEN" 'SELECT * FROM "W" ORDER BY time DESC LIMIT 20'
```
This showed fresh rows from Home Assistant sensors, including:
- `pv_power`
- `solarnet_power_photovoltaics`
- `solarnet_power_load_consumed`
- `solarnet_power_grid_export`
That proved:
- Home Assistant -> InfluxDB works fully
- the issue was not ingestion
- the issue was schema / Grafana config
## 7. Verified Grafana container can reach InfluxDB
From inside the Grafana container, we tested:
```bash
curl -i http://influxdb3:8181/api/v3/query_sql -H "Authorization: Token YOUR_GRAFANA_TOKEN" -H "Content-Type: application/json" --data '{"db":"home","q":"SHOW TABLES"}'
```
This working proved:
- Docker networking is fine
- `influxdb3` resolves correctly
- Grafana-side token auth works
- the remaining problem was purely Grafana datasource configuration
## 8. Fixed Grafana datasource setup
### Correct Grafana datasource basics
- Query language: **SQL** or the desired mode
- URL: `http://influxdb3:8181`
- Database: the actual InfluxDB database in use
- Token: Grafana token
### Important TLS issue
Grafana originally failed with an error like:
```text
tls: first record does not look like a TLS handshake
```
That happened because Grafana tried TLS/FlightSQL behavior against a plain HTTP endpoint.
Fix:
- use the correct datasource mode
- use the right endpoint
- keep the connection consistent with the actual InfluxDB setup
## 9. Realized the schema had changed
Old schema assumption:
- one table per sensor
New schema that appeared initially:
- one table per **unit**, like `W`, `Wh`, `V`
That is why old queries stopped working.
Example old query:
```sql
SELECT mean("value")
FROM "sensor.solarnet_power_photovoltaics"
WHERE $timeFilter
GROUP BY time(5m) fill(none)
```
Equivalent query in the grouped-by-unit schema:
```sql
SELECT mean("value")
FROM "W"
WHERE
"entity_id" = 'solarnet_power_photovoltaics'
AND $timeFilter
GROUP BY time(5m) fill(none)
```
Then we changed HA back to per-entity measurements using:
```yaml
influxdb:
measurement_attr: entity_id
```
## 10. Reset the fresh database to keep it clean
After confirming the new schema worked, the goal was to keep only actually-used tables in the fresh DB.
General approach:
- delete or recreate `ha_fresh`
- reconnect HA to it
- let HA repopulate it with only actively written entities
That leaves a clean database without old junk.
## Final recommended setup
## Home Assistant
- Configure InfluxDB connection in the UI
- Keep only behavior options in YAML
- Use:
```yaml
influxdb:
measurement_attr: entity_id
```
if per-entity tables are desired
## InfluxDB
- Keep one clean database for HA data
- Use separate tokens for:
- admin
- Home Assistant
- Grafana
## Grafana
- Use datasource URL:
```text
http://influxdb3:8181
```
- Point it to the correct InfluxDB database
- Use the Grafana token
- Rebuild old queries if schema changed
## Quick troubleshooting checklist for the future
### If HA is not writing
1. Check HA logs for InfluxDB errors
2. Test connectivity from HA:
```bash
curl http://<influx-ip>:8181/health
```
3. Verify token / database / organization / bucket in HA UI
4. Query InfluxDB directly to see whether data is arriving
### If Grafana shows nothing
1. Verify datasource URL is `http://influxdb3:8181`
2. Test token from inside Grafana container
3. Confirm the right database is selected
4. Check whether schema is per-entity or grouped-by-unit
5. Rewrite queries accordingly
### If sensors seem missing
1. Check `SHOW TABLES`
2. Query likely shared tables like `W`, `Wh`, `V`
3. Check whether HA is grouping by unit instead of entity
4. Set:
```yaml
influxdb:
measurement_attr: entity_id
```
if one table per sensor is preferred

View File

@@ -28,9 +28,14 @@ can this be good:
[GitHub - jomjol/AI-on-the-edge-device: Easy to use device for connecting "old" measuring units (water, power, gas, ...) to the digital world](https://github.com/jomjol/AI-on-the-edge-device?tab=readme-ov-file#setup-%EF%B8%8F)
## Somfy
I used this tutorial: [Somfy Smart DIY \| Make Shades Smart With ESPSomfy-RTS - YouTube](https://www.youtube.com/watch?v=1acVJ0xWJgs)
The open memory function doesn't work (the auto button). For groups I just used the prog button on the back of the remote after clicking the open memory button and waiting (which doesn't do anything).
![[Pasted image 20260118150817.png]]
Backup:
![[ESPSomfyRTS 2026-01-18T16_26_16.backup]]
![[ESPSomfyRTS 2026-03-17T16_05_06.backup]]
### Home Assitant Integration
Followed this Video: [ESPSomfy Integration with Home Assistant - YouTube](https://www.youtube.com/watch?v=ikrQwPYGyeg&t=14s)
@@ -49,9 +54,15 @@ Other resources used:
## Influx DB and Grafana
### Influx DB Setup
DB: homeassistant
user: homeassistant
pw: VYWPQuR5UWkqMNU
See Lastpass
[Long-term data & analysis in Home Assistant with Grafana 📈 & InfluxDB 🗃️ - YouTube](https://www.youtube.com/watch?v=vdtuU33aHrc) Look at this video for better dashboards.
### Tutorials
[INSANE STATISTICS In Home Assistant With Grafana! - TUTORIAL - YouTube](https://www.youtube.com/watch?v=rXF-LycbjoA)
## Backup
Ich hab ein Back-up User kreiert und einen geteilten Ordner auf der Nase bei meinen Eltern zu Hause. Home Assistant macht jeden Tag ein automatisches Back-up und behält dabei fünf Back-ups in Rotation. Die Verschlüsselung Schlüssel sind in meinem Lastpass.

View File

@@ -0,0 +1,665 @@
# Home Lab Backup Architecture - First Draft
## Executive summary
You do **not** have one backup problem. You have **five** different ones:
1. **Primary shared data** on the Synology NAS
2. **Virtualized workloads** on Proxmox
3. **Standalone Linux servers** (VPS + Debian server)
4. **User endpoints** (Mac, Linux laptop, Windows PC)
5. **Mobile devices** (iPhone, Android)
Treating all of them with one tool will make the whole system worse.
My recommendation is a **layered architecture**:
- **Synology NAS** = primary durable data store for family files/photos and backup landing zone
- **Local snapshots** = fast recovery from accidental deletion and ransomware-style mistakes
- **Proxmox Backup Server (PBS)** = proper backup system for VMs/LXCs
- **Offsite encrypted backup** = file/server copies to a remote target
- **Scheduled restore testing** = mandatory, not optional
- **Separate production and staging** = from day one
---
## The main design decisions
### 1) Snapshots are not backups
Snapshots are for:
- accidental deletion
- "I changed something yesterday and want it back"
- quick rollback
Backups are for:
- NAS failure
- host failure
- site disaster
- account compromise
- major corruption
So the rule is:
- **Use snapshots for short-term recovery**
- **Use true backups for 3-2-1**
---
### 2) Do not store databases on a network share
For **Nextcloud**, **Immich**, and **Authentik**:
- Keep **PostgreSQL/MariaDB/Redis** on **local SSD storage** of the VM/LXC
- Keep only the **large user data / media blobs** on NAS-backed storage if needed
- Back up databases separately and consistently
This is the clean split:
- **App state + DB** -> local VM/LXC disk -> backed up by PBS / app-aware dumps
- **User files / photos** -> NAS share -> snapshot + offsite backup
This reduces fragility a lot.
---
### 3) Production and staging must be separated structurally
Do not make staging just "another docker compose file on the same data".
Recommended separation:
- different subdomains
- different VM/LXC instances
- different databases
- different storage paths/shares
- different backup namespaces / repositories
- shorter retention on staging
- no staging write access to production data
Best case:
- `prod` and `staging` live on separate Proxmox VMs/CTs
- staging can read from **sanitized copies** or test data only
---
## Recommended target architecture
```mermaid
flowchart TB
Users[Users<br/>You / spouse / family] --> NC[Nextcloud on Proxmox<br/>prod]
Users --> IM[Immich on Proxmox<br/>prod]
Users --> AK[Authentik on Proxmox<br/>prod]
Laptops[Mac / Linux / Windows laptops] --> EPB[Endpoint backup services]
Phones[iPhone / Android] --> IM
Phones --> NC
subgraph PVE[Proxmox cluster / host]
NC
IM
AK
STG[Staging VMs/LXCs]
PBS1[Local Proxmox Backup Server]
end
subgraph NAS[Synology NAS]
SH1[Family files share]
SH2[Photos/media share]
SH3[App data shares]
SNAP[Synology snapshots]
ABB[Active Backup for Business / Time Machine targets]
HB[Hyper Backup jobs]
end
NC --> SH1
IM --> SH2
NC --> SH3
SH1 --> SNAP
SH2 --> SNAP
SH3 --> SNAP
PVE --> PBS1
PBS1 --> OFFPBS[Offsite PBS<br/>preferred for Proxmox backups]
HB --> OFFSITE[Hetzner Storage Box or object storage]
VPS[VPS services] --> RESTIC[Restic/Kopia backups]
Debian[Small Debian server] --> RESTIC
RESTIC --> OFFSITE
EPB --> ABB
ABB --> SNAP
ABB --> HB
```
---
## What I would recommend for each layer
## A. Synology NAS shared folders
### Role
The NAS should be the **primary storage anchor** for user files, family data, and large photo/media payloads.
### Local protection
Use:
- **Btrfs shared-folder snapshots** via Snapshot Replication
- **Recycle Bin** for user-facing convenience
### Suggested snapshot policy
For important family shares:
- every **hour** for 48 hours
- every **day** for 30 days
- every **week** for 8 weeks
- every **month** for 6-12 months
This gives you fast rollback without huge operational complexity.
### Offsite protection
Use **Hyper Backup** from Synology to one of these:
#### Option A - Hetzner Storage Box
**Pros**
- cheap
- European provider
- supports SFTP/rsync/Borg/WebDAV/SMB
- has its own snapshots
**Cons**
- less elegant than S3-style cloud for some tools
- not object storage
- weaker ecosystem for immutability-style backup controls
#### Option B - Backblaze B2 / S3-compatible object storage
**Pros**
- easy Hyper Backup integration
- clean cloud backup flow
- common backend for many tools
**Cons**
- ongoing storage/egress economics need checking for your volume
- less aligned with your "Hetzner-like box" idea
### My call
For your case:
- **Synology NAS -> Hyper Backup -> Hetzner Storage Box** is reasonable for NAS data
- if later you want stronger cloud-native backup properties, move NAS offsite backup to **B2/S3**
---
## B. Proxmox VMs and containers
### Do not use generic file copies for this
Use **Proxmox Backup Server**.
### Why
PBS is the right tool for:
- deduplicated Proxmox backups
- restore of full VMs/CTs
- verification jobs
- prune/retention jobs
- remote sync between PBS instances
### Best local deployment
Preferred order:
1. **Dedicated small physical box for PBS**
2. **Your small Debian server repurposed for PBS**, if it can be dedicated
3. PBS in a VM only if you must
The closer PBS is to "external to the main Proxmox failure domain", the better.
### Offsite strategy for Proxmox
This is the important part:
- If you want a **clean Proxmox-native offsite design**, use a **second PBS offsite**
- A plain **Hetzner Storage Box is good for files**, but it is **not the clean native target** for PBS remote sync
### My call
For Proxmox workloads:
- **Local PBS on separate hardware (or your Debian box if dedicated)**
- **Offsite PBS in Hetzner / other remote host** for synced copies
That is the most robust design in your whole setup.
---
## C. VPS and the extra Debian server
These are not Proxmox guests, so PBS is not the natural universal answer.
Use:
- **restic** if you want simplicity, reliability, scripting, and wide backend support
- **Kopia** if you want a better GUI and endpoint UX
### What to back up on each server
For each service:
- config files
- compose files / manifests
- secrets management exports where appropriate
- database dumps or consistent DB snapshots
- application data directories
- a machine bootstrap script or IaC definition
### Recommendation
- **restic** for VPS and Linux servers is the safer default
- send encrypted backups to **Hetzner Storage Box** or **S3/B2**
Why restic over trying to force one mega-tool?
- fewer moving parts
- easy cron/systemd timers
- easy restore scripting
- good fit for Linux servers and VPSs
---
## D. Nextcloud, Immich, Authentik
## 1. Nextcloud
### Recommended layout
- app + DB on local VM/LXC storage
- file data on NAS-backed storage if needed
- back up the DB, config, and data separately
### Identity
Use **Authentik via OIDC** if you want central auth.
### Important note
Nextcloud is your **file platform**, not your only backup platform.
Do not confuse sync/share with backup.
---
## 2. Immich
### Recommended layout
- app + PostgreSQL on local VM/LXC storage
- `UPLOAD_LOCATION` media on NAS-backed storage
- use Immich's built-in **database backup job**, but also back up the underlying media storage
### Identity
Immich supports **OIDC**, so Authentik can be used here too.
### Important note
Immich's own docs explicitly warn that database backup alone is not enough; you must also back up the uploaded photos/videos.
---
## 3. Authentik
### Recommended layout
- keep Authentik small and boring
- PostgreSQL local
- media/config backed up
- include exported configuration / recovery procedure in runbook
### Important note
Authentik removed its older integrated backup functionality, so you should treat it like a normal app stack: back up the DB and any relevant mounted state.
---
## E. Private computers and family computers
This is where many homelabs become annoying.
You need something that:
- users do not hate
- works automatically
- restores cleanly
- you can operate without constant support calls
### My recommendation is **not** one single tool for everything
## For Macs
Use **Time Machine to Synology**.
Why:
- native Mac UX
- low friction
- non-technical users understand it better
- good restore story for Mac users
## For Windows PCs
Use **Synology Active Backup for Business**.
Why:
- centralized management
- good Windows support
- easier for family members than a DIY CLI backup setup
## For your Linux laptop
Use one of:
- **Kopia** if you want a GUI and easy restores
- **restic** if you want maximum control
### Why I am not pushing UrBackup as the main answer
UrBackup is interesting and easy to set up, but it is weaker as a universal answer here because:
- Windows is its strongest path
- Mac is not really the center of gravity
- it adds another platform you must trust and operate
For your environment, Synology-native endpoint backup plus Linux-specific tooling is the cleaner compromise.
---
## F. iPhone and Android
Here is the blunt answer:
A clean, self-hosted, cross-platform, full-device backup experience for iPhone + Android + easy restore is **not realistically where you want to fight this battle**.
### What I recommend instead
#### Photos/videos
- **Immich mobile backup** for photos/videos
#### Files/documents
- **Nextcloud mobile app** for file access and uploads
#### Full device state restore
- **iCloud backup** for iPhones
- **Google/Android device backup** for Android
This is one place where the practical answer beats the ideological self-hosting answer.
---
## 3-2-1 mapping for your setup
## Production family data (files/photos)
### Copy 1 - primary
- Synology shared folders
### Copy 2 - local secondary
- Synology snapshots / recycle bin / local backup target where appropriate
### Copy 3 - offsite
- Hyper Backup to Hetzner Storage Box or S3/B2
---
## Proxmox workloads
### Copy 1 - primary
- running VMs/CTs on Proxmox
### Copy 2 - local backup
- local PBS datastore
### Copy 3 - offsite
- remote PBS sync target
---
## VPS / Debian server
### Copy 1 - primary
- live server
### Copy 2 - local or local-ish backup cache/repository if desired
- optional local repository
### Copy 3 - offsite
- restic/Kopia to Hetzner Storage Box or object storage
---
## Retention suggestions
## Snapshots
- hourly x 48
- daily x 30
- weekly x 8
- monthly x 6-12
## Proxmox backups
- nightly
- keep 7 daily
- keep 4 weekly
- keep 6 monthly
- keep 1-2 yearly for critical systems
## Server file backups
- daily
- same retention as above unless data changes very fast
## Staging
- keep short
- e.g. 3 daily, 2 weekly
---
## Restore testing - mandatory design
This is the part most people skip.
You explicitly said you want testing. Good. Keep it formal.
## Level 1 - automated integrity checks
### PBS
- run **verify jobs**
- run **prune / GC** on schedule
- alert on failure
### Restic / Kopia
- run backup check / metadata verification
- restore a few files automatically to a temp path
- verify checksums
---
## Level 2 - scheduled app restore tests
Monthly or quarterly:
- restore latest VPS backup into an isolated test VM/network
- boot it
- run a smoke test script:
- service starts
- ports respond
- DB reachable
- app login page loads
- sample user file exists
Do the same for:
- Nextcloud test restore
- Immich test restore
- Authentik test restore
---
## Level 3 - disaster recovery rehearsal
Quarterly or every 6 months:
- simulate total loss of one service
- restore from documented procedure only
- measure:
- RTO: how long to restore
- RPO: how much data lost
- missing secrets / hidden manual steps
That is how you find the lies in your own system.
---
## Suggested restore-test automation pattern
### For each service, create:
- backup source definition
- restore script
- smoke-test script
- teardown script
Example flow:
1. create isolated VM/LXC on test VLAN
2. pull latest backup
3. restore service
4. run health checks
5. record success/failure and timing
6. destroy test instance
This can start simple with shell scripts and later become CI-driven.
---
## Recommended first-version architecture
If I had to choose a practical v1 for you now, I would do this:
### Storage and data
- Synology NAS as primary family data store
- Btrfs snapshots on all critical shares
- Recycle Bin on user-facing shares
- Hyper Backup nightly to **Hetzner Storage Box**
### Compute and apps
- Proxmox hosts production VMs/LXCs
- Nextcloud, Immich, Authentik each isolated in their own VM/LXC or at least separate stacks
- DBs local to compute
- large file/media data on NAS share if required
### Proxmox backup
- deploy **PBS locally** on separate hardware if possible
- nightly backups of all prod VMs/CTs
- weekly verify jobs
- remote sync to **offsite PBS**
### Other Linux systems
- restic to offsite storage
- documented DB dumps + config backups
### Endpoints
- Mac -> Time Machine to Synology
- Windows -> Active Backup for Business
- Linux laptop -> Kopia or restic to NAS/offsite
### Phones
- Immich for photos/videos
- Nextcloud for documents/files
- iCloud / Google backup for full-device state
### Environments
- production and staging separated by VM/LXC, storage path, DNS, and credentials
---
## Where this architecture is strong
- low conceptual confusion
- good restore paths
- good separation of snapshots vs backups
- avoids forcing one backup tool onto every problem
- realistic for family usage
- expandable as you add more self-hosted services
---
## Where it is still weak / uncertain
The answer is uncertain because I do not know:
- your exact Synology model and whether all required packages/features are supported well
- whether your NAS volumes are Btrfs
- whether you can dedicate hardware to PBS
- your uplink bandwidth and expected offsite backup volume
- whether your family really needs full endpoint bare-metal restore, or mostly file-level recovery
- how much operational complexity you personally are willing to own long-term
These details can change the final recommendation.
---
## Biggest architectural traps to avoid
1. **Putting all backups on the same NAS and calling it 3-2-1**
2. **Using snapshots as your only backup**
3. **Mounting NAS storage into apps without thinking about DB consistency**
4. **Using one universal backup tool for everything**
5. **No restore testing**
6. **Letting staging touch production data**
7. **Running your only PBS inside the same Proxmox failure domain without another copy**
---
## My opinionated recommendation
If you want the most solid path with a sane amount of complexity:
- **Synology snapshots + Hyper Backup** for NAS data
- **PBS + offsite PBS** for Proxmox
- **restic** for VPS and Linux servers
- **Time Machine / ABB / Kopia** for endpoints depending on OS
- **Immich + Nextcloud + Authentik** with local DBs and NAS-backed large-data storage
- **formal restore tests** every month/quarter
That is the first design I would trust with family data.
---
## Good next steps
1. Inventory every system and classify data into:
- critical
- important
- replaceable
2. Decide where PBS will run
3. Decide whether offsite for NAS data is **Hetzner Storage Box** or **B2/S3**
4. Separate prod and staging naming, storage paths, and credentials
5. Implement backup in this order:
- Synology snapshots
- PBS local backups
- offsite backups
- endpoint backups
- restore tests
6. Write one restore runbook per service
---
## Sources / references
- Proxmox Backup Server docs: https://pbs.proxmox.com/docs/
- Proxmox PBS installation / Debian / Proxmox VE integration: https://www.proxmox.com/en/products/proxmox-backup-server/get-started
- Proxmox PBS sync jobs: https://pbs.proxmox.com/docs/managing-remotes.html
- Proxmox PBS maintenance tasks: https://pbs.proxmox.com/docs/maintenance.html
- Proxmox VE vzdump docs: https://pve.proxmox.com/pve-docs/chapter-vzdump.html
- Synology Snapshot Replication: https://kb.synology.com/en-global/DSM/help/SnapshotReplication/snapshots?version=7
- Synology Snapshot Replication technical specs: https://www.synology.com/dsm/7.9/software_spec/snapshot_replication
- Synology snapshot recovery: https://kb.synology.com/DSM/tutorial/How_can_I_recover_files_from_snapshots
- Synology Recycle Bin restore: https://kb.synology.com/en-ro/DSM/tutorial/How_do_I_restore_files_deleted_from_Synology_NAS
- Synology Hyper Backup overview: https://kb.synology.com/en-global/DSM/help/HyperBackup/BackupApp_desc?version=7
- Synology Hyper Backup feature page: https://www.synology.com/en-global/dsm/feature/hyper_backup
- Synology Hyper Backup Explorer: https://kb.synology.com/en-global/DSM/help/HyperBackupExplorer/hyperbackupexplorer?version=7
- Hetzner Storage Box protocols: https://docs.hetzner.com/storage/storage-box/general/
- Hetzner Storage Box SSH/rsync/Borg: https://docs.hetzner.com/storage/storage-box/access/access-ssh-rsync-borg/
- Hetzner Storage Box overview: https://www.hetzner.com/storage/storage-box/
- Restic docs: https://restic.readthedocs.io/en/latest/
- Kopia docs/features: https://kopia.io/docs/features/
- Kopia homepage: https://kopia.io/
- UrBackup features: https://www.urbackup.org/features.html
- UrBackup admin manual: https://www.urbackup.org/administration_manual.html
- Synology Active Backup for Business PC/Mac: https://www.synology.com/en-global/dsm/feature/active-backup-business/pc
- Synology ABB Mac page: https://kb.synology.com/en-global/DSM/help/ActiveBackup/activebackup_business_pc_mac?version=7
- Synology ABB requirements/limitations: https://kb.synology.com/en-af/DSM/help/ActiveBackup/activebackup_business_requireandlimit
- Synology Time Machine guide: https://kb.synology.com/en-us/DSM/tutorial/How_to_back_up_files_from_Mac_to_Synology_NAS_with_Time_Machine
- Windows File History: https://support.microsoft.com/en-us/windows/backup-and-restore-with-file-history-7bf065bf-f1ea-0a78-c1cf-7dcf51cc8bfc
- Nextcloud backup docs: https://docs.nextcloud.com/server/32/admin_manual/maintenance/backup.html
- Nextcloud desktop/mobile sync guidance: https://docs.nextcloud.com/server/latest/user_manual/en/files/desktop_mobile_sync.html
- Nextcloud WebDAV guidance: https://docs.nextcloud.com/server/latest/user_manual/en/files/access_webdav.html
- Nextcloud public shares: https://docs.nextcloud.com/server/latest/user_manual/en/files/sharing.html
- Nextcloud OIDC / auth docs: https://docs.nextcloud.com/server/stable/admin_manual/configuration_user/index.html
- Immich backup/restore: https://docs.immich.app/administration/backup-and-restore
- Immich quick start: https://docs.immich.app/overview/quick-start
- Immich custom file locations: https://docs.immich.app/guides/custom-locations
- Immich OAuth/OIDC: https://docs.immich.app/administration/oauth/
- Authentik backup/restore: https://docs.goauthentik.io/sys-mgmt/ops/backup-restore/
- Authentik release note about removal of integrated backups: https://docs.goauthentik.io/releases/2022.2/
- Apple iPhone backup: https://support.apple.com/guide/iphone/back-up-iphone-iph3ecf67d29/ios
- Apple iPhone restore from backup: https://support.apple.com/en-us/118105
- Android backup/restore: https://support.google.com/android/answer/2819582?hl=en

View File

@@ -0,0 +1,790 @@
# Immich V1 setup on Proxmox + Synology NAS + Authentik + Pangolin
## Status
This document is the **ground truth** for the current Immich V1 deployment and the first troubleshooting reference.
Current state:
- **Platform:** Proxmox VM
- **Guest OS:** Debian 13 server, headless
- **Networking:** LAN IP via DHCP reservation, plus ZeroTier installed
- **Container runtime:** Docker + Docker Compose
- **Immich deployment:** official Immich `docker-compose.yml`
- **Storage model:**
- **media/library on Synology NAS via NFS**
- **Postgres on local VM storage**
- **Planned next steps:** Authentik OIDC login, Pangolin public reverse proxy, Synology snapshots verification
---
## Why this architecture
This is the correct V1 shape because:
- Immich recommends a **full VM** in virtualized environments, not Docker in LXC.
- Immich recommends **Docker Compose** for normal deployment.
- Immich explicitly states that the **Postgres database should stay on local SSD storage and not on a network share**.
- Synology **Btrfs snapshots** are a good fit for the media share.
- NFS is a cleaner Linux-to-Linux storage mount than SMB for this use case.
---
## Current implemented architecture
```mermaid
flowchart LR
subgraph Users
U1[Browser users]
U2[Immich mobile app users]
U3[Admin via SSH]
end
subgraph Edge
ZT[ZeroTier]
PG[Pangolin - planned]
end
subgraph Proxmox
VM[Debian 13 VM\nimmich-vm]
DC[Docker Compose]
IS[Immich Server]
IML[Immich ML]
R[(Redis)]
DB[(Postgres\nlocal disk)]
CFG[/opt/immich-app\ncompose + .env/]
NFSM[/mnt/immich-prod\nNFS mount/]
end
subgraph Synology
SHARE[Shared folder: immich-prod]
SNAP[Snapshots - to configure/verify]
end
subgraph Identity
AK[Authentik - planned]
end
U3 --> ZT --> VM
U1 --> PG --> IS
U2 --> PG --> IS
VM --> DC
DC --> IS
DC --> IML
DC --> R
DC --> DB
IS --> NFSM
IML --> NFSM
NFSM --> SHARE
SHARE --> SNAP
IS --> AK
```
---
## VM build decisions
### Guest type
- **Debian 13 server/headless**
- No desktop environment
- SSH server installed
- Standard system utilities installed
### Proxmox VM settings used/recommended
- **Machine type:** `q35`
- **BIOS:** `OVMF (UEFI)`
- **Graphics:** default / minimal, no desktop needed
- **CPU type:** ideally `host`
- acceptable fallback: `x86-64-v2-AES`
- **vCPU:** 4
- **RAM:** 8 GB recommended
- **Disk:** local SSD-backed VM disk, enough for OS + Docker + Postgres
- good V1 default: **64 GB**
- **NIC model:** VirtIO
### Why
- `q35` + `OVMF` is the modern sane default.
- Debian headless keeps the VM simple and low-maintenance.
- Immich itself does not need a GUI on the host.
- Local disk is used for DB because the DB must **not** live on NFS.
---
## Directory layout
### On the VM
```text
/opt/immich-app/
├── docker-compose.yml
├── .env
└── postgres/
/mnt/immich-prod/
├── library/
└── model-cache/ # optional, if you keep ML cache here
```
### Why this layout
- `/opt/immich-app` is for the **application deployment**, not user files.
- `/mnt/immich-prod` is the mounted NAS share.
- `postgres/` stays on **local VM storage**.
- Do **not** put the project under `/home/cef/...` for production-style operation.
---
## Installed packages / components
Installed on the VM:
- `docker`
- `docker compose`
- `zerotier`
- `nfs-common`
- `sudo`
Useful verification commands:
```bash
docker --version
docker compose version
zerotier-cli info
showmount -e 192.168.1.34
mount | grep immich
```
---
## Synology NFS setup
### NAS
- **Synology IP:** `192.168.1.34`
- **Shared folder / export:** `/volume1/immich-prod`
- **Allowed client:** `192.168.1.52`
Verified export list:
```bash
sudo showmount -e 192.168.1.34
```
Expected output:
```text
Export list for 192.168.1.34:
/volume1/Downloads 192.168.1.35/24
/volume1/immich-prod 192.168.1.52
```
### VM mountpoint
```bash
sudo mkdir -p /mnt/immich-prod
```
### Manual mount command
```bash
sudo mount -t nfs 192.168.1.34:/volume1/immich-prod /mnt/immich-prod
```
### Important typo that already happened once
Wrong:
```bash
sudo mount -t nfs 192.168.1.34:/volumel/immich-prod /mnt/immich-prod
```
Correct:
```bash
sudo mount -t nfs 192.168.1.34:/volume1/immich-prod /mnt/immich-prod
```
The mistake was **`volumel`** with letter `l` instead of **`volume1`** with number `1`.
### Recommended persistent mount in `/etc/fstab`
Use this:
```fstab
192.168.1.34:/volume1/immich-prod /mnt/immich-prod nfs rw,hard,_netdev,x-systemd.automount,noatime 0 0
```
Then test it:
```bash
sudo mount -a
mount | grep immich-prod
df -h | grep immich-prod
```
### Why these mount options
- `rw` -> read/write
- `hard` -> keep retrying if the NAS drops briefly
- `_netdev` -> network-dependent mount
- `x-systemd.automount` -> avoids ugly boot timing issues
- `noatime` -> reduces metadata writes
---
## Immich deployment files
### Project directory
```bash
cd /opt/immich-app
```
### Compose file
Use the **official Immich release** `docker-compose.yml` unchanged unless there is a specific reason to change it.
This is important because:
- the official file stays aligned with the current release
- random blog versions drift
- hand-written compose files become future maintenance debt
### Current `.env`
Current deployment values:
```dotenv
# You can find documentation for all the supported env variables at https://docs.immich.app/install/environment-variables
# The location where your uploaded files are stored
UPLOAD_LOCATION=/mnt/immich-prod/library
# The location where your database files are stored
# MUST stay on local VM storage, not on NFS
DB_DATA_LOCATION=/opt/immich-app/postgres
# Set your timezone
TZ=Europe/Zurich
# Immich Settings
IMMICH_ENV=production
# Immich version
IMMICH_VERSION=v2
# Database credentials
# Use only A-Za-z0-9 for this value
DB_PASSWORD=my-secret-pw. # See Lastpass
# Usually leave these as default unless you have a reason to change them
DB_USERNAME=postgres
DB_DATABASE_NAME=immich
```
### Why these values are correct
- `UPLOAD_LOCATION=/mnt/immich-prod/library`
- correct because media belongs on the NAS share
- `DB_DATA_LOCATION=/opt/immich-app/postgres`
- correct because DB must stay local
- `TZ=Europe/Zurich`
- good operational default
- `IMMICH_ENV=production`
- correct for this VM
- `IMMICH_VERSION=v2`
- matches current official release convention
---
## Useful Docker commands
### Start / recreate stack
```bash
cd /opt/immich-app
docker compose up -d
```
### Stop stack
```bash
cd /opt/immich-app
docker compose down
```
### See running containers
```bash
docker ps
```
### Follow logs
```bash
cd /opt/immich-app
docker compose logs -f
```
### Follow a single service log
```bash
docker compose logs -f immich-server
docker compose logs -f database
docker compose logs -f redis
docker compose logs -f immich-machine-learning
```
### Restart stack
```bash
docker compose restart
```
### Pull updated images later
```bash
cd /opt/immich-app
docker compose pull
docker compose up -d
```
---
## Current data model
### Media
Stored on NAS via:
```text
/mnt/immich-prod/library
```
This means:
- media is on Synology storage
- Synology snapshots can protect it
- the actual photos/videos do **not** live on the VM disk
### Database
Stored locally via:
```text
/opt/immich-app/postgres
```
This means:
- DB is not exposed to NFS consistency problems
- VM backup strategy must include this path
- DB and media are separate backup concerns
### Automatic Immich DB backups
Immich also stores automatic DB dumps under the upload location, typically in:
```text
UPLOAD_LOCATION/backups
```
Those backups contain **metadata only**, not the photo/video files.
So:
- NAS snapshots help protect media and DB dump files
- but media + DB are still two separate pieces of the system
---
## Synology snapshots
### Goal
The `immich-prod` shared folder should have Synology snapshots enabled.
### Why
This gives:
- fast rollback after accidental deletion
- protection against bad imports or user mistakes
- short-term recovery without touching full backups
### Good V1 retention suggestion
- hourly snapshots for 24 hours
- daily snapshots for 14 days
- weekly snapshots for 8 weeks
### Important truth
Snapshots are **not enough by themselves**.
They are rollback protection, not the full backup strategy.
The real backup picture later must include:
- Synology media share backup offsite
- VM / Postgres backup
- restore testing
---
## Authentik plan
Planned next phase:
- configure Immich login through **Authentik OIDC**
- keep local Immich login enabled until OIDC is proven working
### Important future redirect URIs
When creating the Immich OIDC application/provider in Authentik, include:
```text
app.immich:///oauth-callback
https://immich.<your-domain>/auth/login
https://immich.<your-domain>/user-settings
```
The mobile callback is required for app login.
### Safe rollout rule
Do this in order:
1. verify local Immich admin login works
2. configure Authentik OIDC
3. test browser login
4. test mobile login
5. only then consider disabling password login
---
## Pangolin plan
Planned next phase:
- expose Immich publicly through **Pangolin**
- do **not** expose port `2283` directly to the internet
### Reverse proxy requirements for Immich
The reverse proxy must correctly pass:
- `Host`
- `X-Real-IP`
- `X-Forwarded-Proto`
- `X-Forwarded-For`
It also must:
- allow **large uploads**
- serve Immich on the **root of a subdomain**, not a sub-path
Correct:
```text
https://immich.example.com
```
Wrong:
```text
https://example.com/immich
```
---
## Networking notes
### DHCP reservation issue already seen
The VM originally still held the old IP lease after a router DHCP reservation change.
Fastest fix was simply:
```bash
su -
reboot
```
Reason:
- DHCP reservation does not always force immediate address change
- the client often keeps the old lease until renew/reboot/expiry
### Current relevant IPs
- **Synology NAS:** `192.168.1.34`
- **Immich VM:** `192.168.1.52`
---
## Troubleshooting logbook
### Problem: `sudo: command not found`
Cause:
- user did not yet have sudo available / configured
Fix:
```bash
su -
apt update
apt install sudo
usermod -aG sudo cef
reboot
```
---
### Problem: `apt update` permission denied / lock file errors
Cause:
- command was run as non-root user without sudo
Fix:
```bash
su -
apt update
```
---
### Problem: `showmount: command not found`
Cause:
- NFS client tools not installed yet
Fix:
```bash
sudo apt update
sudo apt install -y nfs-common
```
Then:
```bash
sudo showmount -e 192.168.1.34
```
---
### Problem: `mkdir: cannot create directory '/mnt/immich-prod': Permission denied`
Cause:
- `/mnt` requires root privileges
Fix:
```bash
sudo mkdir -p /mnt/immich-prod
```
---
### Problem: `mount.nfs: access denied by server while mounting ...`
Actual cause in this case:
- typo in mount source path: used `volumel` instead of `volume1`
Correct command:
```bash
sudo mount -t nfs 192.168.1.34:/volume1/immich-prod /mnt/immich-prod
```
If it happens again with the correct path, then check:
1. Synology NFS service enabled
2. Synology export path correct
3. Synology NFS permissions allow `192.168.1.52`
4. VM actually has IP `192.168.1.52`
5. export visible via `showmount -e 192.168.1.34`
---
### Problem: changed router DHCP reservation but VM kept old IP
Cause:
- client kept existing lease
Fix:
```bash
su -
reboot
```
Alternative debugging commands:
```bash
ip a
networkctl
systemctl status systemd-networkd
```
---
### Problem: hidden `.env` file does not show in normal `ls`
Cause:
- dotfiles are hidden by default
Fix:
```bash
ls -la /opt/immich-app
```
---
## Operational rules
### Rules to keep
1. **Never move Postgres onto the NAS share.**
2. **Keep using the official Immich compose file.**
3. **Do not improvise custom Dockerfiles for V1.**
4. **Do not expose raw Immich directly to the internet.** Use Pangolin.
5. **Do not disable local login until Authentik login is proven working.**
6. **Keep the Synology shared folder dedicated to Immich.**
7. **Treat snapshots as rollback, not as the only backup.**
### Rules for changes later
If changing `.env` or updating Immich:
```bash
cd /opt/immich-app
docker compose pull
docker compose up -d
```
If something acts weird after env changes:
```bash
docker compose up -d --force-recreate
```
---
## What still needs to be done
### Must do next
1. verify `/etc/fstab` persistent NFS mount works across reboot
2. verify Synology snapshots are enabled on `immich-prod`
3. configure Authentik OIDC
4. configure Pangolin public access
5. test large upload through the reverse proxy
6. test Immich mobile login through OIDC
### Should do soon
1. rotate the DB password because it was shared in chat
2. document exact Pangolin config once implemented
3. document exact Authentik provider/app config once implemented
4. create first restore notes for:
- Immich DB
- media share
- full VM restore
---
## Fast command reference
### Check mount
```bash
mount | grep immich
df -h | grep immich
ls -la /mnt/immich-prod
```
### Check Docker
```bash
docker ps
docker compose logs -f
```
### Restart Immich
```bash
cd /opt/immich-app
docker compose restart
```
### Full stack recreate
```bash
cd /opt/immich-app
docker compose down
docker compose up -d
```
### Reboot VM
```bash
sudo reboot
```
---
## Final blunt summary
The current setup is on the right track.
The important good decisions already made are:
- full VM instead of LXC
- Debian server instead of desktop bloat
- official Immich compose
- media on NAS via NFS
- Postgres on local storage
- clear deployment path under `/opt/immich-app`
The main things that can still hurt later are:
- forgetting to persist and verify the NFS mount properly
- forgetting to enable/test Synology snapshots
- breaking login while introducing Authentik
- exposing Immich badly when adding Pangolin
- leaving the documented DB password unchanged
This document should be updated after every meaningful change.
---
```mermaid
flowchart LR
User[User Browser] -->|HTTPS| Cloudflare[Cloudflare Edge]
Cloudflare -->|Tunnel| CFD[cloudflared in edge stack]
CFD -->|HTTP or HTTPS, but one consistent model| Traefik[Traefik]
Traefik -->|Badger auth check| Pangolin[Pangolin]
Traefik -->|Proxy via Gerbil/Newt| RemoteApps[Apps behind Pangolin]
Cloudflare -->|Direct route| Authentik[Authentik direct]
Pangolin --> Gerbil[Gerbil]
Newt[Newt on remote hosts] --> Pangolin
Newt --> Gerbil
```

View File

@@ -0,0 +1,398 @@
# Immich V1 setup on Proxmox + Synology NAS + Authentik + Pangolin
## Goal
Set up **Immich first**, on a **full Linux VM in Proxmox** (not Docker inside LXC), with:
- **media storage on a Synology NAS share**
- **database on local VM storage**
- **OIDC login via Authentik**
- **public access through Pangolin**
- **Synology snapshots enabled** for the media share
This is the right V1 shape because Immich recommends Linux, Docker Compose, and a **full VM** when virtualized. It also explicitly says the **Postgres database should ideally use local SSD storage and never a network share**. The reverse proxy must forward the right headers, allow large uploads, and Immich must be served at the root of a domain/subdomain rather than a sub-path.
Sources: Immich Requirements, Reverse Proxy, OAuth Authentication.
---
## Recommended architecture
```mermaid
flowchart LR
subgraph Internet
U[Users / Immich mobile app / browser]
end
subgraph Edge
P[Pangolin\nTLS + public entrypoint]
end
subgraph Prod[Proxmox production]
VM[Debian/Ubuntu VM\nDocker Compose\nImmich server + ML + Redis + Postgres]
DB[(Postgres data\nlocal VM disk)]
CFG[(Compose files/.env\nlocal VM disk)]
end
subgraph NAS[Synology NAS]
NFS[NFS share for Immich upload library]
SNAP[Synology snapshots]
HB[Hyper Backup / offsite replication later]
end
subgraph IdP[Identity]
AK[Authentik\nOIDC provider]
end
U --> P --> VM
VM --> AK
VM --> DB
VM --> CFG
VM --> NFS
NFS --> SNAP
SNAP --> HB
```
---
## Hard recommendations
### Do this
1. **Run Immich in a VM, not an LXC.**
2. Put **Postgres on local VM storage**.
3. Put **Immich upload/media storage on the NAS** using a **Unix-compatible mounted path**.
4. Use **OIDC with Authentik**, but **keep local password login enabled until OIDC works**.
5. Publish Immich on its own subdomain, for example `immich.example.com`.
6. Use a **dedicated Synology shared folder** for Immich media.
7. Turn on **Btrfs snapshots** on that shared folder.
### Do not do this
1. **Do not put `DB_DATA_LOCATION` on the NAS.**
2. **Do not serve Immich on a sub-path** like `https://example.com/immich`.
3. **Do not expose port 2283 directly to the internet**.
4. **Do not use the NAS share for everything**. Only media belongs there; DB does not.
---
## Best-practice V1 decision: primary upload storage vs external library
For a fresh setup, use the NAS-mounted path as **Immich's main `UPLOAD_LOCATION`**.
Use **External Libraries** only if you already have existing folders on the NAS that Immich should **index without owning**. External libraries need the directory mounted into the container too, and tcby adding additional volume mounts.
Source: Immich External Library guide.
So for V1:
- `UPLOAD_LOCATION` -> NAS share mounted in the VM
- Postgres data -> local VM disk
- Optional later: extra read-only external libraries from other NAS folders
---
## V1 implementation plan
### 1. Create a dedicated Proxmox VM
Use a **Debian 12** or **Ubuntu 24.04 LTS** VM.
Suggested starting size:
- 4 vCPU
- 8 GB RAM
- 40-80 GB local SSD/NVMe disk for OS + Docker + Postgres + cache
Immich's current requirements say:
- Linux or Unix-like 64-bit OS recommended
- minimum **6 GB RAM**, recommended **8 GB**
- minimum **2 cores**, recommended **4**
- in a virtualized environment, a **full VM** is recommended
- storage should support Unix ownership/permissions
- the **Postgres database should ideally use local SSD storage and never a network share**
Source: Immich Requirements.
### 2. Create a dedicated Synology shared folder for Immich
Create something like:
- Shared folder: `immich-prod`
Inside it, keep it simple. Let Immich create its own structure under `UPLOAD_LOCATION`.
Do **not** manually invent a lot of subdirectories unless you have a reason.
### 3. Export that Synology folder to the VM
Prefer **NFS** from Synology to the Linux VM.
Reason:
- cleaner Linux permissions model than SMB for this use case
- easier predictable mount behavior inside a Linux VM
Mount it in the VM, for example:
- NAS export -> VM mountpoint: `/srv/immich-upload`
Then point Immich `UPLOAD_LOCATION` there.
### 4. Keep local storage for DB and configs
Example local paths in the VM:
- `/opt/immich` -> compose files
- `/var/lib/immich-postgres` -> database data
- `/var/lib/immich-model-cache` -> model cache
The important part is that the Postgres path stays **local**, not on the NAS.
Source: Immich Requirements.
### 5. Install Docker Engine + Compose plugin in the VM
Immich requires Docker with the Compose plugin and explicitly requires the command `docker compose`; `docker-compose` is deprecated and no longer supported by Immich.
Source: Immich Requirements.
### 6. Deploy Immich with Docker Compose
Use the official compose example from Immich and modify only what matters:
- `UPLOAD_LOCATION=/srv/immich-upload`
- `DB_DATA_LOCATION=/var/lib/immich-postgres`
- model cache on local disk
- bind only to the VM internally; Pangolin handles public entry
### 7. Configure Pangolin
Publish Immich via Pangolin on its own subdomain.
Important reverse-proxy requirements from Immich:
- forward `Host`
- forward `X-Real-IP`
- forward `X-Forwarded-Proto`
- forward `X-Forwarded-For`
- allow **large uploads**
- Immich must be on the **root path of a domain/subdomain**
Source: Immich Reverse Proxy docs.
### 8. Configure Authentik OIDC for Immich
Immich supports OIDC and explicitly lists Authentik among supported identity providers.
Source: Immich OAuth Authentication.
Create an application/provider pair in Authentik using **OAuth2/OIDC**. Authentik recommends creating the application and provider together via **Applications -> Applications -> Create with provider**.
Source: Authentik Create an OAuth2 provider.
Use these redirect URIs in Authentik:
- `app.immich:///oauth-callback`
- `https://immich.example.com/auth/login`
- `https://immich.example.com/user-settings`
Immich requires the mobile redirect URI `app.immich:///oauth-callback` for iOS/Android app login to work properly.
Source: Immich OAuth Authentication.
Then in Immich Admin -> Settings -> OAuth:
- enable OAuth
- set issuer URL to the Authentik OIDC discovery URL
- set client ID / secret
- scope: `openid email profile`
- auto-register: on
- auto-launch: optional, keep off in V1
### 9. Keep password login enabled initially
Do not lock yourself out.
First:
- create the initial local Immich admin
- verify normal web login
- configure OIDC
- verify browser login
- verify mobile login
- only then decide whether to disable password login
Immich allows disabling password authentication instance-wide, but that can lock everyone out if OAuth is also broken.
Source: Immich System Settings.
### 10. Enable Synology snapshots
Because your Synology is Btrfs, this is a good fit.
Enable snapshots on the `immich-prod` shared folder.
Good V1 retention example:
- every hour for 24 hours
- every day for 14 days
- every week for 8 weeks
That gives you fast rollback for accidental deletion or a bad import.
Important caveat: snapshots help with rollback, but the full backup still also needs the Immich database and a true backup destination later.
---
## Concrete directory layout
### In the VM
```text
/opt/immich/ # compose + .env
/var/lib/immich-postgres/ # local postgres data
/var/lib/immich-model-cache/ # local ML cache
/srv/immich-upload/ # NFS mount from Synology
```
### On Synology
```text
Shared folder: immich-prod
\- mounted into VM as /srv/immich-upload
```
Immich will create and use directories under `UPLOAD_LOCATION`, including backups for automatic DB dumps. Immich stores automatic database backups in `UPLOAD_LOCATION/backups`, and those backups only contain metadata, not the photos/videos.
Source: Immich Backup and Restore.
---
## Example Compose shape
This is **not** a full copy of the official file. It is the shape that matters.
```yaml
services:
immich-server:
image: ghcr.io/immich-app/immich-server:release
env_file:
- .env
volumes:
- ${UPLOAD_LOCATION}:/data
ports:
- "2283:2283"
depends_on:
- redis
- database
immich-machine-learning:
image: ghcr.io/immich-app/immich-machine-learning:release
env_file:
- .env
volumes:
- /var/lib/immich-model-cache:/cache
redis:
image: redis:6.2-alpine
database:
image: ghcr.io/immich-app/postgres:14-vectorchord0.4.0-pgvectors0.8.1
env_file:
- .env
volumes:
- ${DB_DATA_LOCATION}:/var/lib/postgresql/data
```
And the key bits in `.env`:
```env
UPLOAD_LOCATION=/srv/immich-upload
DB_DATA_LOCATION=/var/lib/immich-postgres
```
Use the exact current compose file and image tags from the Immich docs/release example when you deploy, not a random blog post. Immich has changed its Postgres/vector extension story over time and currently documents VectorChord-based setups and upgrade cautions.
Sources: Immich Requirements, Upgrading, Pre-existing Postgres.
---
## Pangolin-specific guidance
Pangolin is an identity-aware remote access platform combining reverse proxy and VPN-style connectivity. For this use case, the key point is: use Pangolin only as the **public entrypoint**, and keep Immich itself private behind it.
Source: Pangolin GitHub README.
For Immich specifically, make sure Pangolin is configured so that:
- the service is published on a **dedicated host/subdomain**
- uploads are allowed to be large enough
- timeouts are not too short for big videos
- forwarded headers match what Immich expects
Because Immich documents strict reverse-proxy expectations, do not rely on defaults you have not checked.
Source: Immich Reverse Proxy docs.
---
## Authentik-specific guidance
In Authentik:
1. Create an application/provider pair with **OAuth2/OIDC**.
2. Use **Authorization Code** flow.
3. Use a **confidential** web client.
4. Add all redirect URIs you will actually use.
Immich's OAuth docs say the redirect URIs should include:
- mobile callback
- web login callback
- user settings callback
and they should contain all domains used to access Immich.
Source: Immich OAuth Authentication.
---
## What to back up for Immich in this design
You need both:
1. **`UPLOAD_LOCATION`** on the NAS
2. **the database**
Immich says a comprehensive backup includes both uploaded photos/videos and the Immich database. It also says the automatic database backups are stored in `UPLOAD_LOCATION/backups`, but these backups contain only metadata, not photos/videos.
Source: Immich Backup and Restore.
In your design that means:
- NAS snapshots protect the media path and DB backup files stored under `UPLOAD_LOCATION/backups`
- local-VM backup later must also protect `/var/lib/immich-postgres` or you rely on Immich DB dumps inside `UPLOAD_LOCATION/backups`
For V1, I would still enable Immich automatic DB backups in the UI.
---
## Minimum test plan for today
1. VM boots and NFS mount is present at `/srv/immich-upload`
2. `docker compose up -d` starts all Immich services
3. local browser login works on `http://VM-IP:2283`
4. upload one test image and one test video
5. verify files appear on Synology share
6. configure Pangolin and verify public HTTPS access
7. configure Authentik OIDC and verify browser login
8. verify mobile app login using OIDC
9. create a manual DB backup in Immich and confirm it appears under `UPLOAD_LOCATION/backups`
10. create a Synology snapshot and verify the snapshot schedule is active
---
## Final recommendation
For your **V1 today**, the clean setup is:
- **Proxmox VM** for Immich
- **Docker Compose** inside the VM
- **local VM disk** for Postgres and local runtime state
- **Synology NFS share** for `UPLOAD_LOCATION`
- **Synology Btrfs snapshots** on that shared folder
- **Pangolin** for public HTTPS exposure
- **Authentik OIDC** for login
That is the right compromise between best practice and what you want to achieve right now.
## The one thing not to compromise on
Do **not** place the Immich Postgres data on the NAS share. That is the main thing Immich explicitly warns against.
Source: Immich Requirements.

View File

@@ -60,6 +60,8 @@ Diese Liste kann auch mit `#idea/startup` oder mit `#business-idea` ergänzt wer
- course website (maybe like aihero.dev?) but for apprenticeships: woodworking, metalworking, etc. when AI replaces all pc jobs people need something to do. --> create marketplace for teaching. Yes people can watch everything on youtube, but lets say here in switzerland you have a local youtuber that can easily organize workshops and book private lessons. --> much more personal.
- gamification of sofware development through agents[[]]
- CV curator: have a layout, have many bullets as master. Depending on the role select whatever bullets you want.
- Scuba dive: risk analysis of your dives. get direct feedback of your previous dive (red, orange or green smiley). Better planning tool for DCS (talked with [[Lukas Glaus]], he has a physics friend who is an absolute killer). Immediate suggestions on how to improve dive profile to reduce risk. Simple app good UI
- Skitouring app with crowd intelligence: upload pictures of conditions with geo data. Feedback system: feedback risk level of past tours given the avalanche risk and the route chosen (reduktions-methode)
## Medtech
- use differential cameras to visualize airflow to teach deaf people how the airflow is when speaking to learn it properly

View File

@@ -4,17 +4,17 @@
- [x] Wachstücher für Sandwich
# 2. Hütte
## Necessaire
- [x] Zahnbürste & Zahnpasta
- [x] Seife / Schampoo
- [x] Deo
- [x] Bepanthen
- [ ] Zahnbürste & Zahnpasta
- [ ] Seife / Schampoo
- [ ] Deo
- [ ] Bepanthen
## Wohnen und Schlafen
- [x] Hüttenschlafsack
- [x] Stoffsack als Kissen
- [x] Oropax
- [x] Schlafmaske
- [x] Finken
- [x] Frotteewäsche
- [ ] Hüttenschlafsack
- [ ] Stoffsack als Kissen
- [ ] Oropax
- [ ] Schlafmaske
- [ ] Finken
- [ ] Frotteewäsche
## Kleider
- [x] Chillerhose
- [x] Chillerpulli

View File

@@ -1,81 +1,81 @@
# Ausrüstung
- [x] Lawinengerät ( batterie?)
- [x] Lawinenschaufel
- [x] Lawinensonde
- [ ] Lawinengerät ( batterie?)
- [ ] Lawinenschaufel
- [ ] Lawinensonde
- [ ] Ski/Splitboard (eingestellt??)
- [x] Felle
- [x] Harscheisen
- [x] Stöcke (teleskop)
- [x] Tourenschuhe
- [x] Rucksack + Regenhülle
- [x] Sitzunterlage
- [ ] Felle
- [ ] Harscheisen
- [ ] Stöcke (teleskop)
- [ ] Tourenschuhe
- [ ] Rucksack + Regenhülle
- [ ] Sitzunterlage
# Proviant
- [x] Thermoskanne
- [ ] Wasserflasche
- [x] Wachstücher für sandwich
- [x] Nüsse und Dörrfrüchte
- [x] Notfallriegel
- [ ] Gaskocher
- [ ] Bialetti
- [ ] Raclette öfeli
- [ ] Thermoskanne
- [x] Wasserflasche
- [ ] Wachstücher für sandwich
- [ ] Nüsse und Dörrfrüchte
- [ ] Notfallriegel
- [x] Gaskocher
- [x] Bialetti
- [x] Raclette öfeli
- [x] Sackmesser
# Kleider
- [x] Gletscherbrille
- [x] Skibrille
- [x] Helm + Randlose Mütze falls Kletterhelm
- [x] Hose Hardshell
- [x] Jacke Hardshell
- [x] Pullover atmungsaktiv
- [x] Daunenjacke
- [x] Lange Unterhose Merino
- [x] Kurze Unterhose Merino
- [x] Merino shirt kurz
- [x] Merino shirt lang
- [x] Skisocken
- [x] Ersatzunterwäsche in plastiksack
- [x] Mütze / Stirnband
- [x] Schlauch / Skimaske
- [x] Sonnenhut
- [x] Touren Handschuhe
- [x] Hardshell Handschuhe
- [x] Ski Handschuhe
- [x] Blistersocks
- [ ] Gletscherbrille
- [ ] Skibrille
- [ ] Helm + Randlose Mütze falls Kletterhelm
- [ ] Hose Hardshell
- [ ] Jacke Hardshell
- [ ] Pullover atmungsaktiv
- [ ] Daunenjacke
- [ ] Lange Unterhose Merino
- [ ] Kurze Unterhose Merino
- [ ] Merino shirt kurz
- [ ] Merino shirt lang
- [ ] Skisocken
- [ ] Ersatzunterwäsche in plastiksack
- [ ] Mütze / Stirnband
- [ ] Schlauch / Skimaske
- [ ] Sonnenhut
- [ ] Touren Handschuhe
- [ ] Hardshell Handschuhe
- [ ] Ski Handschuhe
- [ ] Blistersocks
# Apotheke und Tools
- [x] Handy
- [ ] Handy
- [x] Bargeld[[]]
- [x] Taschentücher
- [x] Panzer Tape
- [x] Erste Hilfe Set
- [x] Splint (Schiene)
- [x] Biwaksack
- [x] Sonnencreme
- [x] Lipstick Sonne
- [x] Schraubenzieher tool
- [x] Kleiner Riemen
- [x] Kabelbinder
- [x] Stirnlampe
- [x] Rucksack/airbag max 35-45l
- [x] Blasenpflaster
- [x] Karabiner
- [ ] Taschentücher
- [ ] Panzer Tape
- [ ] Erste Hilfe Set
- [ ] Splint (Schiene)
- [ ] Biwaksack
- [ ] Sonnencreme
- [ ] Lipstick Sonne
- [ ] Schraubenzieher tool
- [ ] Kleiner Riemen
- [ ] Kabelbinder
- [ ] Stirnlampe
- [ ] Rucksack/airbag max 35-45l
- [ ] Blasenpflaster
- [ ] Karabiner
- [ ] Reepschnur
- [x] Heissleim Patrone
- [x] Feuerzeug
- [ ] Heissleim Patrone
- [ ] Feuerzeug
- [ ] Fernglas
- [x] Plastiksäche / Drybag
- [ ] Plastiksäche / Drybag
# Elektronik
Sind alle Geräte geladen am Abend vorher?
- [x] Ladekabel für alle Geräte
- [x] Handy
- [x] Ersatzbatterien
- [x] Powerbank
- [x] Gps Uhr
- [ ] Ladekabel für alle Geräte
- [ ] Handy
- [ ] Ersatzbatterien
- [ ] Powerbank
- [ ] Gps Uhr
- [ ] Kamera
## Lager
- [x] Trainerhose
- [ ] Trainerhose
- [ ] lange jogging hose
- [ ] sport shorts
- [ ] sport tshirt

View File

@@ -0,0 +1,4 @@
- fotos projezieren (wie sateliten bilder nachmachen)
- punkt auswählen und alle Fotos sehen die diesen Punkt abbilden (sonst zu viele Fotos)
- Unterscheiden zwischen tracked tour (aufgezeichnet ) und planned tour
- Gdpr, no liability etc (cooy from skitouren guru)

View File

@@ -7,7 +7,9 @@ tags: [excalidraw]
==⚠ Switch to EXCALIDRAW VIEW in the MORE OPTIONS menu of this document. ⚠==
# Text Elements
# Excalidraw Data
## Text Elements
PX4 Info Node ^SHjaC7GH
getCurrentPose ^bntLlwqf
@@ -40,572 +42,78 @@ MoveSmoothActionServer ^jNLQaaLq
MoveSmoothAction ^jLt0dnKk
# Embedded files
## Embedded Files
53a29778b62e67d5d6ee80cf34453e58bcb88187: [[Pasted Image 20231012160459_976.png]]
%%
# Drawing
```json
{
"type": "excalidraw",
"version": 2,
"source": "https://github.com/zsviczian/obsidian-excalidraw-plugin/releases/tag/1.9.20",
"elements": [
{
"type": "rectangle",
"version": 70,
"versionNonce": 1706906056,
"isDeleted": false,
"id": "2x2FIJ03wD79stOrQPI2T",
"fillStyle": "hachure",
"strokeWidth": 2,
"strokeStyle": "solid",
"roughness": 1,
"opacity": 100,
"angle": 0,
"x": 563.2296109199522,
"y": -641.5260467529299,
"strokeColor": "#1e1e1e",
"backgroundColor": "transparent",
"width": 664.8743286132816,
"height": 818.5408325195318,
"seed": 1357369544,
"groupIds": [
"eTVsuRlQRF9sgbnV876J0",
"lE9VKsMY2iriwU-c_Iog-"
],
"frameId": null,
"roundness": {
"type": 3
},
"boundElements": [],
"updated": 1697119406023,
"link": null,
"locked": false
},
{
"type": "rectangle",
"version": 91,
"versionNonce": 1557106360,
"isDeleted": false,
"id": "wgrd0rloD6B_tv4cvqt1Q",
"fillStyle": "hachure",
"strokeWidth": 2,
"strokeStyle": "solid",
"roughness": 1,
"opacity": 100,
"angle": 0,
"x": 658.3203702402645,
"y": -595.8825224970703,
"strokeColor": "#1e1e1e",
"backgroundColor": "transparent",
"width": 480.7786254882816,
"height": 60.85806274414085,
"seed": 1672494792,
"groupIds": [
"eTVsuRlQRF9sgbnV876J0",
"lE9VKsMY2iriwU-c_Iog-"
],
"frameId": null,
"roundness": {
"type": 3
},
"boundElements": [
{
"type": "text",
"id": "SHjaC7GH"
}
],
"updated": 1697119406023,
"link": null,
"locked": false
},
{
"type": "text",
"version": 60,
"versionNonce": 1153423560,
"isDeleted": false,
"id": "SHjaC7GH",
"fillStyle": "hachure",
"strokeWidth": 2,
"strokeStyle": "solid",
"roughness": 1,
"opacity": 100,
"angle": 0,
"x": 767.9396481943663,
"y": -587.9534911249999,
"strokeColor": "#1e1e1e",
"backgroundColor": "transparent",
"width": 261.5400695800781,
"height": 45,
"seed": 1070409160,
"groupIds": [
"eTVsuRlQRF9sgbnV876J0",
"lE9VKsMY2iriwU-c_Iog-"
],
"frameId": null,
"roundness": null,
"boundElements": [],
"updated": 1697119406023,
"link": null,
"locked": false,
"fontSize": 36,
"fontFamily": 1,
"text": "PX4 Info Node",
"rawText": "PX4 Info Node",
"textAlign": "center",
"verticalAlign": "middle",
"containerId": "wgrd0rloD6B_tv4cvqt1Q",
"originalText": "PX4 Info Node",
"lineHeight": 1.25,
"baseline": 31
},
{
"id": "RRuBk1mpSeCly4Kvfa79J",
"type": "diamond",
"x": 556.250027179718,
"y": -474.53338623046875,
"width": 340.2777099609375,
"height": 94.3055419921875,
"angle": 0,
"strokeColor": "#1e1e1e",
"backgroundColor": "transparent",
"fillStyle": "hachure",
"strokeWidth": 1,
"strokeStyle": "solid",
"roughness": 1,
"opacity": 100,
"groupIds": [
"lE9VKsMY2iriwU-c_Iog-"
],
"frameId": null,
"roundness": {
"type": 2
},
"seed": 131086280,
"version": 102,
"versionNonce": 1247606712,
"isDeleted": false,
"boundElements": [
{
"type": "text",
"id": "bntLlwqf"
}
],
"updated": 1697119406023,
"link": null,
"locked": false
},
{
"id": "bntLlwqf",
"type": "text",
"x": 650.629536151886,
"y": -439.9570007324219,
"width": 151.3798370361328,
"height": 25,
"angle": 0,
"strokeColor": "#1e1e1e",
"backgroundColor": "transparent",
"fillStyle": "hachure",
"strokeWidth": 1,
"strokeStyle": "solid",
"roughness": 1,
"opacity": 100,
"groupIds": [
"lE9VKsMY2iriwU-c_Iog-"
],
"frameId": null,
"roundness": null,
"seed": 1735610568,
"version": 73,
"versionNonce": 1339654088,
"isDeleted": false,
"boundElements": null,
"updated": 1697119406023,
"link": null,
"locked": false,
"text": "getCurrentPose",
"rawText": "getCurrentPose",
"fontSize": 20,
"fontFamily": 1,
"textAlign": "center",
"verticalAlign": "middle",
"baseline": 17,
"containerId": "RRuBk1mpSeCly4Kvfa79J",
"originalText": "getCurrentPose",
"lineHeight": 1.25
},
{
"type": "rectangle",
"version": 50,
"versionNonce": 1609226952,
"isDeleted": false,
"id": "4QXgkAkRFkLPAcD8t0Sgc",
"fillStyle": "hachure",
"strokeWidth": 2,
"strokeStyle": "solid",
"roughness": 1,
"opacity": 100,
"angle": 0,
"x": -650.6871371269228,
"y": -597.4427185058596,
"strokeColor": "#1e1e1e",
"backgroundColor": "transparent",
"width": 664.8743286132816,
"height": 818.5408325195318,
"seed": 2101748664,
"groupIds": [
"Hul-uvlemt_cI9_Fp3WrH"
],
"frameId": null,
"roundness": {
"type": 3
},
"boundElements": [
{
"type": "text",
"id": "5knELniv"
}
],
"updated": 1697123064116,
"link": null,
"locked": false
},
{
"type": "text",
"version": 228,
"versionNonce": 307511752,
"isDeleted": false,
"id": "5knELniv",
"fillStyle": "hachure",
"strokeWidth": 2,
"strokeStyle": "solid",
"roughness": 1,
"opacity": 100,
"angle": 0,
"x": -645.6871371269228,
"y": -592.4427185058596,
"strokeColor": "#1e1e1e",
"backgroundColor": "transparent",
"width": 552.638427734375,
"height": 420,
"seed": 691596472,
"groupIds": [
"Hul-uvlemt_cI9_Fp3WrH"
],
"frameId": null,
"roundness": null,
"boundElements": [],
"updated": 1697123064116,
"link": null,
"locked": false,
"fontSize": 28,
"fontFamily": 1,
"text": "\n\n\n\n- handle_goal\n- handle_cancel\n- handle_accepted\n\nThe important part is execute:\n- for loop through trajectory and send \noffboard commands to px4\n",
"rawText": "\n\n\n\n- handle_goal\n- handle_cancel\n- handle_accepted\n\nThe important part is execute:\n- for loop through trajectory and send offboard commands to px4\n",
"textAlign": "left",
"verticalAlign": "top",
"containerId": "4QXgkAkRFkLPAcD8t0Sgc",
"originalText": "\n\n\n\n- handle_goal\n- handle_cancel\n- handle_accepted\n\nThe important part is execute:\n- for loop through trajectory and send offboard commands to px4\n",
"lineHeight": 1.25,
"baseline": 409
},
{
"type": "rectangle",
"version": 68,
"versionNonce": 553175240,
"isDeleted": false,
"id": "q_MPWFFrVxrxtrzAUTaaJ",
"fillStyle": "hachure",
"strokeWidth": 2,
"strokeStyle": "solid",
"roughness": 1,
"opacity": 100,
"angle": 0,
"x": -555.5963778066103,
"y": -551.1047057246094,
"strokeColor": "#1e1e1e",
"backgroundColor": "transparent",
"width": 480.7786254882816,
"height": 60.85806274414085,
"seed": 1038783928,
"groupIds": [
"Hul-uvlemt_cI9_Fp3WrH"
],
"frameId": null,
"roundness": {
"type": 3
},
"boundElements": [
{
"type": "text",
"id": "jNLQaaLq"
}
],
"updated": 1697123064116,
"link": null,
"locked": false
},
{
"type": "text",
"version": 41,
"versionNonce": 393506760,
"isDeleted": false,
"id": "jNLQaaLq",
"fillStyle": "hachure",
"strokeWidth": 2,
"strokeStyle": "solid",
"roughness": 1,
"opacity": 100,
"angle": 0,
"x": -528.4351381825868,
"y": -543.175674352539,
"strokeColor": "#1e1e1e",
"backgroundColor": "transparent",
"width": 426.4561462402344,
"height": 45,
"seed": 8330936,
"groupIds": [
"Hul-uvlemt_cI9_Fp3WrH"
],
"frameId": null,
"roundness": null,
"boundElements": [],
"updated": 1697123064116,
"link": null,
"locked": false,
"fontSize": 36,
"fontFamily": 1,
"text": "MoveSmoothActionServer",
"rawText": "MoveSmoothActionServer",
"textAlign": "center",
"verticalAlign": "middle",
"containerId": "q_MPWFFrVxrxtrzAUTaaJ",
"originalText": "MoveSmoothActionServer",
"lineHeight": 1.25,
"baseline": 31
},
{
"id": "T_jgB4iG3mlWPyahBp9rA",
"type": "image",
"x": -1216.7221102714539,
"y": -607.1722717285156,
"width": 349,
"height": 358,
"angle": 0,
"strokeColor": "transparent",
"backgroundColor": "transparent",
"fillStyle": "hachure",
"strokeWidth": 1,
"strokeStyle": "solid",
"roughness": 1,
"opacity": 100,
"groupIds": [],
"frameId": null,
"roundness": null,
"seed": 1488109256,
"version": 105,
"versionNonce": 76913080,
"isDeleted": false,
"boundElements": null,
"updated": 1697119498449,
"link": null,
"locked": false,
"status": "pending",
"fileId": "53a29778b62e67d5d6ee80cf34453e58bcb88187",
"scale": [
1,
1
]
},
{
"type": "rectangle",
"version": 127,
"versionNonce": 605752520,
"isDeleted": false,
"id": "oKD7end-LC_VdRYXouRuv",
"fillStyle": "hachure",
"strokeWidth": 2,
"strokeStyle": "solid",
"roughness": 1,
"opacity": 100,
"angle": 0,
"x": -659.7703280448916,
"y": 338.61279296874955,
"strokeColor": "#1e1e1e",
"backgroundColor": "transparent",
"width": 664.8743286132816,
"height": 818.5408325195318,
"seed": 193154488,
"groupIds": [
"n-mVf4NySQdfVIenFRlup"
],
"frameId": null,
"roundness": {
"type": 3
},
"boundElements": [
{
"type": "text",
"id": "CMXgBmzj"
}
],
"updated": 1697122375648,
"link": null,
"locked": false
},
{
"type": "text",
"version": 735,
"versionNonce": 534342856,
"isDeleted": false,
"id": "CMXgBmzj",
"fillStyle": "hachure",
"strokeWidth": 2,
"strokeStyle": "solid",
"roughness": 1,
"opacity": 100,
"angle": 0,
"x": -654.7703280448916,
"y": 343.61279296874955,
"strokeColor": "#1e1e1e",
"backgroundColor": "transparent",
"width": 653.0186767578125,
"height": 595,
"seed": 172021432,
"groupIds": [
"n-mVf4NySQdfVIenFRlup"
],
"frameId": null,
"roundness": null,
"boundElements": [],
"updated": 1697122769534,
"link": null,
"locked": false,
"fontSize": 28,
"fontFamily": 1,
"text": "\n\n\n\n- This class implements the Action client that \nsets the goal, and has the callbacks for \nreceiving feedback and the result. It inherits \nfrom the baseclass RosActionNode which itself\ninherits from the Behaviortree library, meaning \nit implement a Behaviortree action.\n\n- parent class calls setGoal and then\n\n- Uses the client to call the server through \nasync_send_goal\n\n",
"rawText": "\n\n\n\n- This class implements the Action client that sets the goal, and has the callbacks for receiving feedback and the result. It inherits from the baseclass RosActionNode which itself inherits from the Behaviortree library, meaning it implement a Behaviortree action.\n\n- parent class calls setGoal and then\n\n- Uses the client to call the server through async_send_goal\n\n",
"textAlign": "left",
"verticalAlign": "top",
"containerId": "oKD7end-LC_VdRYXouRuv",
"originalText": "\n\n\n\n- This class implements the Action client that sets the goal, and has the callbacks for receiving feedback and the result. It inherits from the baseclass RosActionNode which itself inherits from the Behaviortree library, meaning it implement a Behaviortree action.\n\n- parent class calls setGoal and then\n\n- Uses the client to call the server through async_send_goal\n\n",
"lineHeight": 1.25,
"baseline": 584
},
{
"type": "rectangle",
"version": 143,
"versionNonce": 672151224,
"isDeleted": false,
"id": "j7vPLVyvo3dYqfyv5dahM",
"fillStyle": "hachure",
"strokeWidth": 2,
"strokeStyle": "solid",
"roughness": 1,
"opacity": 100,
"angle": 0,
"x": -563.2908968495792,
"y": 386.33972176562474,
"strokeColor": "#1e1e1e",
"backgroundColor": "transparent",
"width": 480.7786254882816,
"height": 60.85806274414085,
"seed": 1248149432,
"groupIds": [
"n-mVf4NySQdfVIenFRlup"
],
"frameId": null,
"roundness": {
"type": 3
},
"boundElements": [
{
"type": "text",
"id": "jLt0dnKk"
}
],
"updated": 1697122374080,
"link": null,
"locked": false
},
{
"type": "text",
"version": 119,
"versionNonce": 927506360,
"isDeleted": false,
"id": "jLt0dnKk",
"fillStyle": "hachure",
"strokeWidth": 2,
"strokeStyle": "solid",
"roughness": 1,
"opacity": 100,
"angle": 0,
"x": -480.59964318746967,
"y": 394.26875313769517,
"strokeColor": "#1e1e1e",
"backgroundColor": "transparent",
"width": 315.3961181640625,
"height": 45,
"seed": 1007978680,
"groupIds": [
"n-mVf4NySQdfVIenFRlup"
],
"frameId": null,
"roundness": null,
"boundElements": [],
"updated": 1697122374080,
"link": null,
"locked": false,
"fontSize": 36,
"fontFamily": 1,
"text": "MoveSmoothAction",
"rawText": "MoveSmoothAction",
"textAlign": "center",
"verticalAlign": "middle",
"containerId": "j7vPLVyvo3dYqfyv5dahM",
"originalText": "MoveSmoothAction",
"lineHeight": 1.25,
"baseline": 31
}
],
"appState": {
"theme": "light",
"viewBackgroundColor": "#ffffff",
"currentItemStrokeColor": "#1e1e1e",
"currentItemBackgroundColor": "transparent",
"currentItemFillStyle": "hachure",
"currentItemStrokeWidth": 1,
"currentItemStrokeStyle": "solid",
"currentItemRoughness": 1,
"currentItemOpacity": 100,
"currentItemFontFamily": 1,
"currentItemFontSize": 20,
"currentItemTextAlign": "left",
"currentItemStartArrowhead": null,
"currentItemEndArrowhead": "arrow",
"scrollX": 2797.031155109408,
"scrollY": 1374.2330627441424,
"zoom": {
"value": 0.4999999999999996
},
"currentItemRoundness": "round",
"gridSize": null,
"gridColor": {
"Bold": "#C9C9C9FF",
"Regular": "#EDEDEDFF"
},
"currentStrokeOptions": null,
"previousGridSize": null,
"frameRendering": {
"enabled": true,
"clip": true,
"name": true,
"outline": true
}
},
"files": {}
}
## Drawing
```compressed-json
N4KAkARALgngDgUwgLgAQQQDwMYEMA2AlgCYBOuA7hADTgQBuCpAzoQPYB2KqATLZMzYBXUtiRoIACyhQ4zZAHoFAc0JRJQgEYA6bGwC2CgF7N6hbEcK4OCtptbErHALRY8RMpWdx8Q1TdIEfARcZgRmBShcZQUebQBGAE5tAAYaOiCEfQQOKGZuAG1wMFAwMuh4cXRA7CiOZWD0sshGFnYuNAB2eP5y1tZOADlOMW4kzoA2eIBmCc6UxN7IQmYA
EUyoBGJuADMCMKWIEm4IHkweADEASQApFOmKVc7E5igAeVIARQAFK54AFSa5R2hHw+AAyrBGhJJLhsBpAkCBFBSGwANYIADqJHU3D4xWRqIxkJg0PQgg8SIgqL8kg44XyaB6BIgbDgcLUMDGKRSh2sDSqvJZmG4AFYJtNtDweIkpgskolRdLDly0M4JgAWeLaJUTFIauZKxIyxYs15EhAAYTY+DYpBOAGJ4ghnc6qZo4WjlDSOMRrbb7RIUdZmOz
ArkqRQcZJuBNNdoABydDXTHgJqaphPxCaHSQIQjKaTcLMJnUalIJ1OipKi6bxBOHMJbMbTUWdWaKjUaw7e4RwK7ERmoIrNSAIf4ANWYQgASvhPjOLi9lJoOBOkxM7kjIPgAKKJCcAaWYAFkAJo8QikQgUACqzmwAH0rmxlM4IASALqHHbkbID7gOCEMFDh9Yh6WYIdSmaCpEG4aYCQAX0OTRhF9XdgmyXIhwKb8WSEOBiFwTZtiZTpkwTSjaxlOZ
DiIDg0UA4D8DothsAxUjUD2fADhZQhfSwE5cDSXNQn+LAoAAGX4xi0G4sJimQ4poMgWA4IkGo6gFKl+nabhjUOXShhGKp4hSetEiSCYeE6Q4VnWYISN2fYEDsziIAob1iBSUhbVWCYACFHygegNWwegAEcoHiT4qRBMESTJKQ4QRJBGxRdEsWjPF0otRKqggCljlA4RCwgodmRgtkOVgbkhRg/kyXq8oRTQCZRVLVN7nmHhyx4TVRRVbhnFFRVEw
THglV6xJ5nmaZcsy/07UdV0XTSlkPXY3shF9JbA3QYMOFDXBwygSNsrQDUExSbRyPTSaromrMcxZPMCyLNqboTDqUms5MtXLb7GwQZsmTmaaNWefEYO2/tB0KAkx0nac5wXJdmBXNcNy3JYd33I9TwvK8b3vJ8XzfD9mjwmDf1wf9OKAkCWTA8ruBU1TKngpCULQ4gMKyHI8gR0d2Y59SDok7dlnc8EAAkACtcEtToAHFZcp5pEK/Q4CKIpyyIoq
jU1lWyWXo2TUEZlizbYjjnJ41y+IE1r0FweIqVhZhxMwKSZPthSyiUsoVNggrNh9nSmAGDpUCmQyo/aYYOFGS7q1TSH2zstYNlBriXLck45cV5W1bi0EIShArYXhER1pg81MuxYhcTQaHygb4lK5OIrthK2lWaZQ5quwTk6r5eomsOF3Jk6bREmmGbeseutsyGtUOtnxVpg1Sz4mmxID4WjE9pWta3RQz1tt2m1lqDcgjrDQXzubmNW6mHUF8siY
D54etOkG16+ZCxnVTsDXOZl5jli/s1SAsMBw4URhgZGs55yLmXKudckwcaIL3AeY855LzXjvA+Z8r53zaxZLTemTEmYwRZgyGh1sYKoR2nzTCgscLU3KLrYi4DyKPW+sbWiZs/ZoCtqxdiud5KOxppwKA4JCBGCqLMH8ciLh01BKqVAlVyjhxAegb4AANDUqArgcB2GwVAwxiB13KOQCg3t9EQCMSYsxFirFsBsVSPRABBIgygY4QDELkJgkdSBQ
HMAQPxBZAn6BIMQRohw9C5FwPxJgAEJCeVIN5XybB/JBRCmFSK0VYpD2vP4AgjiTguNMeYyx1jbE7jSbLIBH1tFSgAcw0IQQ0nwR0ZAXAQgoBsBnOERRVQURCBkeUfiNiXYQFwDwD2YkJLSQYv7BAilegh2OBIGcM4hABTRPEfQcBwRWnwDADUh56B7GeDcbxnMJCODppwXuwoxTig6TyGy8RnjdAbCyLRzhIYag/tMaY91pj6gmEmTp5Qowv3gu
WKU5F5gHz1PPf+uZWn6MSGC6FopRRah/n/eFAyJ6CiPlaG++0IBOjPo0iAm0vRgRPnfEMj8Iw/nLvlE41dUpUg7llJFg8zQZU7qSAqPcqQ0jKgwsVVV2Qj1qkyHkPYaRwwQaOCAuCCYEOJsQsmZCNZgC4ZAKhCAMmW2Yn3X0A9UDs1DnibmZoQacRmGZe61145tE4NyeaLIjIcCTinbRE1P6/26FnByCB9Z5wdjzVh/MsJCzQCOZootnVBklrjaW
JxVxSXwBQCKOxTVgC1lTHWhFeEev4ZRQRNFTYwXNowiRds5L5ydnMoS0xlle1WaIhNAcK3bL4u5Qtkli2lseeLaAuaPltVFDdayW8pjVkoi9GCwKUzJEVPMHk7Zeq/1NDBRFLdtHVm0NMZ4lY5oZjTDi96+jJrj20mgGBhUJU0oDKfVa7pL5stpScQ6x1TplwSl3GEKVa5Cu/U3C9/Sv15Sg+SG0xVmalTpAq7RQ9lWjzVZ+uB8N004PxvgomRDS
akIphQmmf4rUM1tZh1hDrxFur4a2OU4pAUwWDdwZMvro6htMkvHkSQEynpmdnRyUiu3MN5imjhhRzUQB4fGv5htG0mzokO9jLbbZycTSyPRJxlBxstCIU63w2BhFlZQKpEhzNQEs6QaztmmUWNyAopReJP1eagOouJlyxiHF8f4wJwTNj2l9RE9w0SAknDicQBJTKPRhHoqZZt5RklRDSaQa1EB9mHOOac85lpLnXNubge5VI7QFn4pUyWTmLNWc
FjZuzun6QtOfWMDpfIhkjLGb5tAkzpnLGdkJDU/bHFrIttIrZykTNPOqAgWojUmX8bQNWITicTJjDTNKDU4oUwxpzpxaRBcJAak+IY5QaIfFokXGiSS3wfHYFWAmKAKRwTKGwBBiuUr+UwcRNShDr9eDUr5RIGVfd5WQVCyyYeBHtHqpZBt7gn6XbqmXdoWF3Qb172/tKXj5RgWjVnl2X530UgdVGlu9u372XoAZf+i+W0gO/o5Q/E6T9Djnoh3G
MFSYUxpgfc9J9wDiz1jLBWKsNY6yk4EO6vEZk/lXSFxqvs8DhYwVlsBZwQh6CYSgI+bAVxEiPguHAaYmJSDqzo8CBj1r9N2N5g6p1allGuoU8m9h2FdfB0RmLMOC7Rz5okKKNEHBdySQ4IQeg5bK1lFU+pvhWnqI6ZEessRzGDOSIu/JmZk2JC4FFDNwdOfh2bMDmOmCXuQNh76Anf1rcZS7eMsnKoy62yEpmmd2ThfjMwV2egKPMe48J4B9D9AA
rYNg4upD8VKGgcw/Q+8uhWGHVIeR6q1Hn6McfqnsNAaeOkwzG6P1Y0j6gXDVGnEKnAKe/fVlNS5n9LVrnw2oB3m7/QNcrOn50X1rFLAlDbA1F/jhUlzaQgM/SbE4m/niHp0hjblgU1R11Ix1X13wEN2NyyFN3N0t2t1t3t1NVU0tRdzzzd1Yxw1d0gBYXQn9zTWHFTxrQ03rSNibS6wtjoN1UMyH14lkW83GTxCVwgACyC00UR3r2a3QAAB0OAFC
lCXBUBYRfRghHxlA2ACAFDnBVDrBUszdrAxB8BdD9D1CEBHw4QxA4ASJlD/g8xUBCBTk7Q6goBUAwx3CVhUAsA1shkEBkAzCLFSBUBbQ2RUB1A5VJAIjyB5Y1thlSAYBUADDUAwhfQFC2AdgdhUITpiBUA9B9B9ADDmAIjLE4BMANQFD7MHFZCIBlDlC9C1DDCtCdCVCmiNC8Au9TC2iDCNDrCEBbCth7DHDnC4BXDrB3DPCnCSjfDsB/DAiVDgj
Qi2BwjIisMYjcA4jag7QkiUi0i8jMjsjtDsl8iDAijfQSjhkPCKiqiwsJIEtAlggdhACg0mA4sokIsQM2QqRctUl6QCt3Ibs7sHsnsLgXs3sPsvsfs/s6tylGt8BHN5DFDkSzD2jLCWjujGjejLDOiTDUTsSrDsAbC7DkSHCEAnCXDwkJiPCTovCZjMA/DNgFi9Cliwi4AIjJAoiNitiEjdjfRUicgDisiciTiCjzjBxSjrjKiuAusEAespcmR+t
v8MtelLoFgBthlRlWARsYipk7IS9XYJgK8fY5sNlFsg968VtqR4iMcO8Y5v47SRMxhDQUgeAFho0+IZM40jNBCZl3IIpHwTxvhMQLgLhSAJxMBSAfZSAjAfFbx/hcBcAHkeVINV9Z8QcmVhVwccpl9MoZ9Cp19ZUt8cMd98M98zID9KVMdj914iUdRZQb1Ohro4wzJA1t078r0zJIZadOhepMVuxczj5gMJBWc1oAMOdf9hyDp74wM+cWQBduAro
bo7prJiVKI0xV5AFetPpEwfo/oux4hAZyVCoVc1VIUmyF4b8YZ0CSNhxEFsDcCTczcLcrcbc7cHcq1KFncmNaFqD7UcNPcVsEJNYk1GCBYA9MDM1g9s0JYI480jh3J5ZBhJJPhEzJIIok9HdIA0860M8hFssmkq9eDbQC8Nl9Se1S9OhjTfYq8Fta8ltLS51TM7TFzUCGAW8Q19s0BoUDRXT54B9vSBDxsEKTgkKUK0KMKUzAcko59QdByRUL02L
hV8zYcWN4cKo8MaotEKy31J5F1UARo0xtAUxl0eRBErolQ14DLiVJQ/lxRkxWxJoF438pyP9GVxzWVJyudpzOVeduV5zF8ICJhjKiVz8Oo3S6xX0tyFTUBjswFOJKxoV54Gc0DtdbyM1ygHyjcnzCDXySCPyU8fxvy20WN/yEdc9fz6DFMmDOFq09Z08BFM9hEW09MqCdx+CyLKE5EfNlEUrxC1ENEQtFVdFaiTw2BGBwR9AVj1B3sIlOBzlSBWh
qjESIAxqJqpq2AZrah2gFqlq7ifYHiTgotQlYtIl8BDqJBktUsfi5E/j0l/TAzgzQzwzIzozYz4zEzkykc4SOAmsI4JA1qEBJrprJBZqdqmA9rs85TcU+soqukVT6Q+kNShttSJlSA9Tu1BJS8ExqLTTO0HZzSShx0Th/hHx5ZlAAoNRCAVZph9B8BMRvgYBcBJAAo4BEhSAfFZ0CpnDogmVsc95sxbppR4gzJfljtnLb81Q9RZ4/lpRuhezvokC
+qFzuKd5oD9FWwxDD9UA4CmdXL/8/KXiukJzWE/8ZyADp9UNkoa45L654NF8kNlKrbVLN9+4SzNKVVtK0dry0raqvy6ZGMSrXayqhxeD4CxhHozJjRxQWK1U+rg0nSmQDQD5KI/kkN7JztOrfcwLU0/aYIcKxgODtNmryhW0KqmFS6Or8bfTkRiIhAhwIBEBfR+JlAINA7I9phFkZomzNBrIEA5hiBRRiAJgQZrpsAdht5xaEAOpNBsBNBU6kwhV
3AqgMqwB+k16sKjgDSFlEhcah1LsWRsBUQ4BGFCaQ4G8NIbSqzY7tErzm8/VOKu9YwuxCU95jyM7B8s6/STg2BDwnhBTnBJJLRHwJxiAZwzxDFhADlE8pL8zZLMz7bRUl87aV8koXbqD1LpDyhd8vbKz30daayDL2pkhyJ7g0x9Qrov4rKIVQC94oZZQRdFRjzhV39Ryv9jbPLTb9bzbDbn4L0hdEwHKxcZgNy+q3oYqSxZdKxJoFd6x4qxh54kC
uxKItcCIMC7ydUXB9AJwdgNRBgYBwRPhiAdgJwrgcgLg5wCIyCiqA7KDKrqR3cALEEL7UBgL6Ls62FwLmDV6s0XH504LoLR8IBLQTw7sAp9AjB5ZMLPz862CGqG0mqCLdVWr7GSKO1q9yKsbXZObRIB0TT96XIz7lsmKm8WgOKBMVFXiH7E7tEF4uxEhBEpNlgvT40D6R93IQmwmImonYGrb4G4MLRszW4odnbCy4dsNyrcMkcyzcHdKqV9KccwV
SHMwKHJNNz2zVbJQpgbJjQGHkwmGXLvK3K2dv8Tbr4jmDbwMgCkHNRJQ2wZoRbLJyxVz1axRFR5GyI3Tf5RdVGtVA9IAtGdG9GDGjGTGzGOALHfAT7N6KCfyK7IB6FJneCGDPHc6VM6ra1C68KuDs8eC2q+DSLq7hKAserRDVFchJChqpmZD/qkT6iVCHDvDsB8BQgSjRiaqOTySwbOB8iiBBYOTiIFCwg8hOXUAMTqBkj+TPZRX3B8AWUSjgiFC
ah8wzB6guJ3UWVJW8j1ByTAhpx8AoBtBTEvCOA8xrw8gFDfwDBRX0s1sWXIJUAZxbNuXOKbFUAKBJBzBoi1AMsdgFD+IzWfWuJUR9BRWAoEBYQzBXDAhySiBNByBEiJXshrAW7/WvDTkmDkjUBw3I32BwkY3kjtrOBtAGiaTTpeXWX8iCAeIBSoAVZtD8AtXOWUSVDbwwhLjHDmXCB+WrjZXRWwhFqmAOSoiFDQgYBk5Hx9jNCG3lDlraj6WzDGW
SjmXK32WvGO2uWi2OBeXu3cgBX3DhWN2xWG2JWUjpWdWq2wR5WuI7RUBlWE8W71WthNWUiL29XgJDXjWnDTWmAg2rXQ2L3bWV2HWnXmAXWGl3XPX4QnC8gggdhv3A2RX/2w2I3cAo382QZQjCB42ToYAk2Qh481W1AKSfAvGs2c20O82URMO4Q5qOAS3kS9CACK2HXZWSjhX62CAm2dWW29C23wgZW+W93e3q3+2Iah21jaRkjmBx2nwp2MTZ39q
oALr0Anijb773jzrPigxviklbr8tCtf7/7fRAHgHQHwHIHZwjdYSGtfqET52W3F3PXl37W2WM313RWXWd2e3YQD240j3xWm3z3O3q3r2lj73VXlAn3iAX3+S33wgP2jWrgTXEOFWQ2bXulgOSjQPwPPFySPWvWYPfWEPf2kP0uL2KP0PqPY3sOE28PUBk3COoviO13U1yPUOquC3aP2gGOzDmOsvL2a2OOG3uO8xePUB+Oj2u2e3LE+2L2B3Whh3
1ix2J35OZ3kSqRMt5S2ltQ4byhbXMsxQEwByGpBstSRDRt0bhLZksmFkAo97aLCn3GibGKCpNJbSqno4I6Tv77hMuLUBZQ+pFR1TPTY1Wmi8I90B5ZOh6BvhJIJwYB6A2BphiAzxS1Eeh7maTxLa0zrbBUF8kGlLv0VKxm1KJmNLpmtKx50cqyj8FnxRJQZQKwGGd57m2KtFIVgqIVF4/l2prJQVDnb4WdP80sf8uGLmeGrmAqkGlzbomzVzHpRH
XmdzvpmybIDyjyPnb6rpDz8VUxfn1HV6IBAXdH9HDHjHTHzHLHoWYmndbG4W7VwInGdUXG3HR0Npqr13/nfGrTmLAnELJJvtwJDxGJEZk8zUMX2DsWs8WqiL8W0mfTrvt6lYHv5snv3eLTdFffSn2KH6DtP0E7/vjvdR9QbIBLwfh9v6JB5ZA+Uhg/GJencf+mCfFKRncf0GEXizJnSyqfCM5nqyFnZfRomHsxt52xaxqH8UpR8dawL9v5qwkmWH
XK2HRezm/RuHfKpez1F86xRRr1FR55n9Rob1jzxGYDmHTz99/l0wfUWRiNtUYITfgXzewWreoXrH/bqFy7He2N8WUWlMIKLBSPvE04Ix9S6KTeFgS3SZtNgQ3VC7q4z6oSFBq2lJTicEBrA1NqoNLdnO1parVxqQNDaltTo7eJ7i2ndAMdRixVNNOKnCAFdUSSH19O/xQrND1h7w9EeyPVHuj3oCY9JA2PMpLZz+pOJ0BhArAcQNlLbd9Eu3Y8gd
1VKuMkMgyTUsNjRoY0R8yfVYKnzNKBxwA1MBZHADORRBNgbMYoNADzDZATg/iIsL0AYDdsKAAUMXucyF70osizgstFYLmJuZBYyXLIJCAtCsMReQIIJG1lyBeD9AdgtfmbU35zkcsQQqACEIuC8o+mGZAIe4NOghCfBjcB2m4JiFpDieozSkFkI8HBDNg+gJ1m7W74FDUhxQt4DM2p7RDChsQ4oRcAGrBYUBxglIZ4MaFwCdSbpCoR0KyCOIaBan
ZIdkOKGQhaSPiNzGwA9YhAHebQkYVkF3C+gJhqIaYbgHcgnQVhww+oSEOWFTD/gVpMCFsMqFZAwyAdJ1mSFdyFQj6NofAIYgOxZhUgeoWsJfggLLo24Vw1EGCDPD6Qd4xlQ8lmBSCTBUwcjYwUYBWL6AjBfGAgFMkxzXonhDTQmoEO2HFDShIdE4IcKsHegSApLVuM1AgBYjiAkIAYmKExHXhiAY1GxIsNwCaBggQlUkSQGZwhwAoNodyKQBXC4A
AAFFGglbcjeAfyCVjdFFAABKKkKMmUDAQTo6I9kVyOhQ8jZRvAeUYKJFGIj2h3mb9NULo6h0qC4hBjKMgEjXh6gkI8oDkGpG0juAY2JJEQBPqXcVB5QX6mYJtHCVhAUAQ7o6MOBFFSAGIUgIMADrmiru7ok6F6KpE0jc4Y2REXYB5LMBwQv1OABSIQDBizRRLKwd10YD/AVi+AI0SHm7iZBsBSSeusMn0D7DxYxFKuhky/IGBzkwQbAUmLNihBlO
21VMemNPrPcIAjgZgKaJpQhIfYJ4HIEIE0FBxxCoIcIGzC1iIQgAA===
```
%%

View File

@@ -0,0 +1,708 @@
# CAPSA SUITE proposal deep-dive
## Purpose of this note
This note turns the 70-page CAPSA SUITE proposal into a structured briefing you can use before your visit. It focuses on:
1. what the proposal is really trying to build
2. how the business and technical pieces fit together
3. where the project is strong, weak, risky, and AI-relevant
4. how you can position a **5 x 1h course** on AI tools, software engineering, and operations in a way that fits their actual needs
**Source:** CAPSA SUITE submitted proposal. Key themes and structure are taken from the uploaded proposal. See the original file for full detail.
---
## Executive summary
CAPSA SUITE is a proposal to build a modular software platform for **building decarbonization planning**. The core idea is to replace a slow, manual, consultant-heavy workflow with a more automated pipeline that:
- collects building data through a mobile app
- enriches it with public, private, and inferred data
- stores it in a **Digital Building Passport (DBP)**
- generates **Decarbonization and Retrofit Roadmaps (DRR)**
- estimates **whole-life carbon / scopes 1-3**
- supports implementation tracking and reporting
The proposal is really about **productizing expert consulting know-how**. ChillServices contributes software, app, and digital building passport capabilities. TEP contributes energy system, GIS, building stock modelling, techno-economic, carbon, and policy expertise.
The strongest insight in the proposal is this:
> decarbonization at scale is less blocked by lack of ideas and more blocked by fragmented data, inconsistent workflows, limited labor, and poor integration.
That is exactly where good AI tooling and good software practices can help.
---
## 1. Big-picture mental model
### CAPSA in one sentence
CAPSA SUITE is an attempt to industrialize building decarbonization planning for real estate portfolios.
### The whole system at a glance
```mermaid
flowchart LR
A[On-site building data collection\nmobile app, photos, guided input] --> B[Data completion and validation\npublic data, internal data, ML/statistical imputation]
B --> C[Digital Building Passport\ncentral repository and UI]
C --> D[DRR generation\ndecarbonization and retrofit roadmaps]
C --> E[Whole-life carbon estimation\nScopes 1-3]
D --> F[Investment packages\nfinance-ready planning]
E --> F
F --> G[Monitoring and reporting\nstatus, progress, carbon, costs]
```
This is the central logic of the proposal. The app is only the front door. The real value is in the connected system behind it.
---
## 2. The problem they are trying to solve
The proposal argues that the current market for building decarbonization planning is broken in five ways:
1. **manual data collection** is slow and expensive
2. **building data is incomplete and scattered**
3. **results depend too much on the individual expert**
4. **there are not enough qualified people** to scale the work
5. **owners of large portfolios** need consistent, comparable roadmaps, not ad-hoc reports
### Current pain points
```mermaid
flowchart TD
A[Building owner needs decarbonization plan] --> B[Manual site visit]
B --> C[Paper notes / fragmented records / missing data]
C --> D[Expert interpretation and guesswork]
D --> E[Manual calculations across multiple tools]
E --> F[Static report]
F --> G[Limited reuse, weak monitoring, poor scaling]
```
### CAPSA's intended replacement
```mermaid
flowchart TD
A[Building owner needs decarbonization plan] --> B[Structured mobile data capture]
B --> C[Automated completion and validation]
C --> D[Central Digital Building Passport]
D --> E[Automated DRR and carbon outputs]
E --> F[Monitoring, updates, portfolio comparison]
F --> G[Scalable, lower-cost, more consistent process]
```
This process shift is the heart of the proposal. They repeatedly describe it as a **process innovation**, not just a software feature set.
---
## 3. The product stack in detail
The proposal has five core building blocks.
### 3.1 Mobile data gathering module
The mobile app is meant to let non-experts or semi-experts collect data during regular site visits using guided flows, photos, and image recognition.
**What it is supposed to do**
- capture building and equipment data on site
- use image recognition for type plates, facades, windows, etc.
- reduce dependence on skilled auditors
- fit into real workflows of caretakers and service staff
**What matters strategically**
This is where AI becomes visible to the user. If the app is painful, the entire system collapses because all downstream outputs depend on upstream data quality.
### 3.2 Data completion and verification
The proposal assumes raw building data will almost always be incomplete, so it adds a second layer that fills gaps using:
- public registries
- GIS / 3D city data
- smart meter or other internal data
- synthetic building stock methods
- statistical and ML-based imputation
**Important:** this is not a nice-to-have. It is the real differentiator. Without it, CAPSA would just be another audit app.
### 3.3 Digital Building Passport
The DBP is the central data model and user-facing repository. It stores, structures, synchronizes, and exposes building information. It also manages access and standard exports.
### 3.4 DRR generation module
The DRR module is meant to generate building-specific retrofit and decarbonization pathways based on rule-based logic, context data, lifecycle logic, and cost/carbon tradeoffs.
### 3.5 Whole-life carbon and monitoring
CAPSA also wants to include embodied emissions and scope 3, then track progress after measures are planned or completed. This makes the platform more relevant for ESG, CSRD, and finance-linked use cases.
---
## 4. Work package map
The proposal is organized into four work packages over 30 months, with total project cost of about EUR 1.86M.
```mermaid
gantt
title CAPSA SUITE high-level timeline
dateFormat X
axisFormat %s
section WP1 Project management and product design
WP1 active :a1, 0, 30
section WP2 Data collection and completion
WP2 active :a2, 0, 30
App alpha / beta :milestone, m1, 12, 0
App beta maturity :milestone, m2, 24, 0
DBP alpha :milestone, m3, 16, 0
DBP beta :milestone, m4, 30, 0
section WP3 DRR and carbon functionality
WP3 active :a3, 6, 24
TED Switzerland :milestone, m5, 12, 0
DRR alpha :milestone, m6, 12, 0
TED Germany :milestone, m7, 18, 0
DRR beta :milestone, m8, 20, 0
Monitoring :milestone, m9, 24, 0
section WP4 Customer-led implementation
WP4 active :a4, 0, 30
Engage first 10 clients :milestone, m10, 6, 0
Start whole-life carbon pilots :milestone, m11, 12, 0
Start Swiss anchor app pilot :milestone, m12, 9, 0
Bring SEAT to market :milestone, m13, 15, 0
DRR pilots :milestone, m14, 20, 0
```
This timeline is approximate and simplified from the work package and milestone sections.
### Budget split by work package
```mermaid
pie showData
title Total project cost by work package (EUR)
"WP1 Management and design" : 189522
"WP2 Data collection and completion" : 710598
"WP3 DRR generation and evaluation" : 636290
"WP4 Market-oriented implementation" : 323132
```
This shows where the effort sits: mostly in data/integration and DRR logic, not in generic project management.
---
## 5. Who does what: ChillServices vs TEP
```mermaid
flowchart LR
A[ChillServices] --> A1[Mobile app]
A --> A2[Frontend and backend]
A --> A3[Digital Building Passport]
A --> A4[Commercial software delivery]
A --> A5[Project coordination]
B[TEP Energy] --> B1[Spatial energy analysis / GIS]
B --> B2[Building stock modelling]
B --> B3[Techno-economic database]
B --> B4[DRR logic and evaluation]
B --> B5[Whole-life carbon and policy/market context]
```
### Clean interpretation
- **ChillServices** is the software/product delivery side.
- **TEP** is the domain intelligence and modelling side.
That is why TEP is interesting for your visit: they likely have deep expertise but may still operate with many consulting-style, research-style, and semi-manual processes that could benefit hugely from better AI-enabled workflows.
This division of labor is described throughout the consortium and task sections.
---
## 6. What is actually innovative vs what is mostly integration
This is important because it tells you where to challenge them and where to help them.
### Truly valuable innovations
1. **Structured mobile data collection for decarbonization use cases**
2. **Gap filling via external, statistical, and model-based methods**
3. **Context-aware roadmap generation**, not just generic retrofit suggestions
4. **Linking scope 3 / whole-life carbon to retrofit planning**
5. **Turning consulting workflows into a reusable digital process**
### Less novel than they imply
1. APIs and database integration
2. standard product management and pilot loops
3. dashboards and exports
4. mobile + backend architecture in itself
### Honest assessment
The main novelty is not that each module is unprecedented. The novelty is that they are trying to **stitch together a coherent decision-making and execution system** for building decarbonization.
That means their main risk is not idea risk. It is **execution risk, data quality risk, and productization risk**.
---
## 7. The real business model
The proposal mixes two business models.
```mermaid
flowchart TD
A[CAPSA SUITE] --> B[Service-led revenue]
A --> C[Software / license-led revenue]
B --> B1[manual + semi-automated consulting projects]
B --> B2[roadmaps for portfolios]
B --> B3[data and analysis services]
C --> C1[subscription per building or unit]
C --> C2[module licensing to partners]
C --> C3[integration into partner ERP / platforms]
```
### Practical interpretation
They are not yet a pure product company. They are moving from:
**consulting and project work -> software-enabled services -> increasingly licensable modules**
That matters for your visit because the internal culture and engineering approach may still feel closer to:
- project delivery
- research and modelling
- custom client work
- prototype evolution
rather than:
- platform product engineering
- SRE / DevOps maturity
- disciplined release engineering
- strong internal developer platform standards
---
## 8. The strongest and weakest parts of the proposal
### Strongest parts
#### Strong point 1: It starts from a real bottleneck
The proposal correctly identifies that building decarbonization at scale is constrained by data fragmentation and labor intensity.
#### Strong point 2: It builds on existing assets
This is not zero-to-one fantasy. They already have app, DBP, SEAT, and building stock model components.
#### Strong point 3: It has plausible commercial channels
Hypoport, Viessmann/Carrier, housing associations, and existing clients make the go-to-market story more credible than typical grant proposals.
#### Strong point 4: It understands finance and policy pressure
The proposal is grounded in regulation, sustainability reporting, and financing use cases, not just engineering enthusiasm.
### Weakest parts
#### Weak point 1: It is over-ambitious
There are too many moving parts for 30 months and two SMEs.
#### Weak point 2: Data quality remains the Achilles heel
All outputs depend on incomplete and heterogeneous inputs. The proposal acknowledges this but still sounds optimistic.
#### Weak point 3: User trust may be harder than model accuracy
Even decent outputs can fail if users do not trust inferred data or rule-generated roadmaps.
#### Weak point 4: Productization is harder than consulting
Turning expert tacit knowledge into maintainable code, traceable logic, and reliable APIs is a very different discipline.
#### Weak point 5: Partner dependency is high
Commercial success depends a lot on external distribution and integration partners.
---
## 9. Where AI can create real leverage for them
This is probably the most useful section for your visit.
There are many possible AI use cases, but not all are equally valuable. The high-value ones are where AI reduces friction in **development**, **operations**, **knowledge work**, or **data-heavy workflows**.
### 9.1 AI opportunities inside the product
```mermaid
flowchart TD
A[Product AI opportunities] --> B[Image-assisted site data capture]
A --> C[Document extraction and normalization]
A --> D[Data validation and anomaly detection]
A --> E[Assisted missing-data inference explanations]
A --> F[Natural-language summary for management reports]
A --> G[Support assistant inside DBP]
```
#### Highest-value product AI candidates
1. **Photo-assisted field capture**
- detect type plates
- identify heating systems / windows / facade types
- propose structured entries from images
2. **Document ingestion**
- pull data from EPCs, PDFs, invoices, maintenance reports, permits
- normalize to DBP schema
3. **Quality control assistant**
- flag inconsistent values
- detect suspicious combinations
- surface missing critical inputs before DRR generation
4. **Explainability layer**
- if data was inferred, explain from what sources and with what confidence
- this is essential for trust
5. **Report drafting**
- generate client-friendly summaries, management notes, and comparison text
### 9.2 AI opportunities in software development
This is likely even more relevant for your course.
```mermaid
flowchart LR
A[Developer workflow pain] --> B[AI coding assistants]
A --> C[Test generation]
A --> D[Refactoring and code comprehension]
A --> E[API and schema documentation]
A --> F[SQL / ETL / transformation help]
A --> G[Infra and CI/CD assistance]
```
#### Very practical targets
- speeding up backend boilerplate and integration code
- generating and improving unit/integration tests
- documenting APIs and data contracts
- helping with schema mapping and ETL logic
- debugging Docker, CI, and deployment problems
- accelerating GIS/data pipeline scripting
- improving developer onboarding
### 9.3 AI opportunities in operations and internal knowledge work
#### Internal knowledge base / RAG
They likely have knowledge scattered across:
- proposals
- methods
- reports
- partner docs
- public datasets
- policy documents
- code comments
- spreadsheets
A strong internal knowledge assistant could answer:
- what assumptions exist in the DRR engine
- where a cost coefficient comes from
- which German or Swiss data source feeds a module
- what changed between client versions
- how a model should be interpreted
#### Meeting and project ops
AI can help with:
- meeting summaries
- action extraction
- issue creation
- technical decision logs
- stakeholder update drafts
- synthesis of pilot feedback
#### DevOps and reliability
AI can help teams write and maintain:
- Dockerfiles
- CI pipelines
- Terraform / deployment config
- observability dashboards
- incident runbooks
- migration scripts
---
## 10. What good software practices they likely need most
Your value is probably not teaching generic “AI is cool.” It is showing how AI becomes useful **inside a disciplined engineering workflow**.
### The likely maturity gaps
Based on the proposal, they probably face some mix of the following:
1. knowledge in peoples heads instead of systems
2. evolving prototypes without strong contracts or architecture boundaries
3. data models and assumptions spread across code, reports, and spreadsheets
4. limited automated tests around domain logic
5. custom client work making the product harder to standardize
6. unclear traceability from business rules to implementation
7. limited operational visibility once modules are deployed
### The software practices most relevant to them
```mermaid
mindmap
root((Good practices for CAPSA-like teams))
Architecture
clear module boundaries
API contracts
ownership per service
Data
schema versioning
lineage
assumptions marked explicitly
auditability
Quality
unit tests for rule logic
integration tests for data flows
golden datasets
regression testing
Operations
CI/CD
observability
error budgets
runbooks
Product process
design docs
decision logs
user feedback loops
release notes
AI use
human in the loop
reproducibility
traceability
secure usage policies
```
### Especially important for this product
#### A. Data lineage and explainability
Because CAPSA mixes measured, reported, inferred, and synthetic data, they need extremely clear provenance.
#### B. Rule testing and regression protection
A DRR engine is only useful if changes do not silently break decision logic.
#### C. Strong interface contracts
Mobile app, DBP, SEAT, TED, and DRR modules should not drift semantically.
#### D. Golden test cases
They should have representative buildings and portfolios where expected outputs are known and compared over time.
#### E. Release discipline
If pilot clients are involved, sloppy release processes will quickly destroy trust.
---
## 11. Suggested 5 x 1h course structure
This is the course I would suggest based on their proposal and likely needs.
### Overview
```mermaid
flowchart LR
A[Session 1\nAI for engineers and analysts] --> B[Session 2\nAI in software development workflows]
B --> C[Session 3\nGood software architecture and testing]
C --> D[Session 4\nDevOps, operations, and observability]
D --> E[Session 5\nApplying it directly to CAPSA use cases]
```
## Session 1 — AI tools that actually save time
**Goal:** demystify AI and show concrete productivity wins.
**Topics**
- where AI helps and where it hurts
- using coding assistants, chat tools, and CLI agents safely
- prompting for engineering vs research vs documentation
- AI for synthesis of policy, technical, and market documents
- using AI to summarize meetings, proposals, client feedback
**Hands-on examples for them**
- summarize a technical method paper into implementation tasks
- extract API requirements from a planning note
- generate a comparison of Swiss vs German data sources
## Session 2 — AI-assisted development workflows
**Goal:** make developers faster without destroying code quality.
**Topics**
- how to use Claude Code / ChatGPT / Cursor / Copilot style tools well
- codebase navigation and comprehension
- test generation and refactoring
- writing migrations, ETL code, and API docs
- patterns for secure and reviewable AI usage
**Hands-on examples for them**
- generate tests for a rule engine
- refactor a DBP schema mapper
- document a REST API from code
## Session 3 — Software engineering discipline for modular platforms
**Goal:** help them build a maintainable product instead of a pile of pilot features.
**Topics**
- module boundaries and service ownership
- contracts and schemas
- design docs and decision records
- regression testing for domain logic
- golden datasets and validation harnesses
**Hands-on examples for them**
- define interface contracts between DBP, TED, and DRR
- create a minimal testing strategy for building archetypes
## Session 4 — DevOps, deployment, and reliability
**Goal:** reduce operational pain and improve confidence.
**Topics**
- Docker and reproducible environments
- CI/CD basics that matter
- structured logging and tracing
- health checks, alerts, dashboards
- secrets handling and environment management
**Hands-on examples for them**
- what to log in a DRR generation pipeline
- how to monitor failures in data ingestion and image recognition
- how to deploy safely across staging and pilot environments
## Session 5 — Workshop on CAPSA-specific opportunities
**Goal:** make the training concrete and strategic.
**Topics**
- map current workflow pain points
- identify 3 quick wins and 2 longer-term bets
- design an internal AI use policy
- agree on a tooling stack and rollout plan
- decide where product AI vs internal AI makes sense
**Possible outputs of session 5**
- AI opportunity map
- engineering improvement roadmap
- coding assistant policy
- test strategy outline
- internal documentation / knowledge assistant pilot
---
## 12. Recommended positioning for your visit
You should not pitch yourself as “the AI guy.”
Better positioning:
> I can help you use AI and better engineering practices to reduce friction in development, improve reliability, and speed up the path from expert knowledge to robust software.
That fits their actual challenge much better.
### Suggested angle to emphasize
#### 1. AI is an accelerator, not the product strategy
The product still lives or dies by workflow, data quality, and trust.
#### 2. Good software practices are what make AI safe and useful
Without contracts, tests, and observability, AI speeds up the wrong things.
#### 3. Their edge is domain expertise
AI should help them operationalize and multiply that edge, not replace it.
#### 4. Start with internal leverage first
Before putting AI everywhere in the client-facing product, improve internal development, documentation, QA, and analysis workflows.
---
## 13. Smart questions to ask during the visit
### Product and workflow
- Where is the biggest bottleneck today: data capture, data cleaning, roadmap logic, or client delivery?
- Which part of CAPSA is already real product, and which part is still research/prototype?
- Where do users currently distrust the system the most?
### Engineering
- How are interfaces between modules specified today?
- How do you test that DRR outputs remain correct over time?
- Do you have golden datasets or benchmark buildings?
- How do you trace where a number in the final roadmap came from?
### Data and AI
- Which inputs are measured, user-entered, inferred, or synthetic?
- How do you explain inferred values to clients?
- Are there image/document extraction tasks where current manual effort is high?
### DevOps / operations
- How do you manage environments across pilots and production?
- What is currently painful in deployment or integration?
- What do you monitor today when a pipeline breaks?
### Team enablement
- Where do developers lose the most time?
- What kind of recurring documentation or reporting work is still manual?
- What would make a 5-session course clearly valuable for them?
---
## 14. What I would prioritize if they want quick wins
### Quick wins in 1-2 months
1. **AI-assisted internal documentation and code comprehension**
2. **Test generation for critical logic and APIs**
3. **Meeting summaries and action extraction**
4. **Template-driven design docs and ADRs**
5. **Basic CI quality gates and lint/test enforcement**
### Medium-term bets in 3-6 months
1. **Internal knowledge assistant over proposals, methods, code, and datasets**
2. **Golden test dataset framework for DRR validation**
3. **Image/document extraction workflow for site and report data**
4. **Observability for data pipelines and model assumptions**
### Longer-term product bets
1. **Explainable inference assistant inside the DBP**
2. **AI-assisted field capture in the mobile app**
3. **Natural-language portfolio analysis and reporting layer**
---
## 15. Final blunt assessment
This proposal is strategically smart. It attacks a real pain point in the building transition and sits at the intersection of policy, economics, energy, and software. It also fits TEP very well because it turns their domain know-how into a repeatable digital capability.
But it is also very ambitious. The hard problem is not “can we build some features?” It is:
- can we create a trustworthy system from messy real-world data
- can we encode expert judgment without making the system brittle
- can we keep the software maintainable as pilots and markets expand
- can we move from consulting habits to product discipline
That is exactly where your contribution can matter.
Your highest-value offer is likely:
1. help them use AI to reduce day-to-day friction
2. help them adopt stronger engineering habits
3. help them avoid turning a strong concept into an unmaintainable stack
---
## 16. Suggested one-line pitch for yourself
> I can help your team use AI and better software practices to move faster, document less painfully, test more reliably, and make complex domain logic easier to build and operate.
---
## 17. Source reminder
This briefing is based on the uploaded CAPSA SUITE proposal and summarizes its stated goals, structure, work packages, business case, and roles of ChillServices and TEP. Original file:

View File

@@ -0,0 +1,278 @@
# CAPSA SUITE — Short Project Overview
## Executive summary
CAPSA SUITE is a proposed software platform for **decarbonizing building portfolios at scale**.
Its core promise is simple:
> turn fragmented building data and manual consulting work into a structured digital workflow that helps owners, portfolio managers, investors, and banks make faster, more consistent, and more economically grounded retrofit decisions.
In practice, CAPSA aims to connect:
- **data collection in the field** (mobile app, photos, guided input)
- **central data storage** (digital building passport / secure database)
- **data completion and contextualization** (public registries, GIS, infrastructure and building-stock data)
- **decarbonization planning** (retrofit roadmaps)
- **carbon and cost evaluation** (including whole-life carbon and scope 13 thinking)
- **monitoring and portfolio steering** over time
The project is positioned as a bridge between **consulting, software, energy planning, and sustainable finance**.
---
## What CAPSA wants to achieve
CAPSA wants to make building decarbonization:
- **more scalable** — so large portfolios can be handled without depending entirely on scarce experts
- **more data-driven** — less guesswork, fewer scattered PDFs and Excel files
- **more standardized** — more comparable decisions across buildings and portfolios
- **more finance-ready** — clearer investment packages, better reporting, stronger basis for green finance and ESG work
- **more operational** — not just a strategy report, but a system that links planning to execution and monitoring
The public TEP positioning reinforces this. TEP describes CAPSA as a combination of **app, secure database, and insights portal** that lets users collect building information, manage it centrally, and turn it into actionable strategies across single buildings and entire portfolios. The emphasis is on connecting strategic and operational levels and integrating technical and economic considerations in one system.
---
## Why this project exists
The proposal is built on a real market pain point:
### The current situation
Building decarbonization is often still handled through:
- manual site visits
- scattered building records
- incomplete or outdated data
- expert judgment that varies from person to person
- isolated reports instead of continuous systems
That is workable for a few buildings, but weak for **large housing portfolios**, **banks**, **real-estate firms**, or **municipal planning contexts**.
### The pressure is rising
Owners and managers increasingly need:
- decarbonization strategies
- energy and emissions reporting
- investment prioritization
- portfolio-level decision support
- stronger links to policy and financing requirements
So CAPSA exists because the old way is too slow, too manual, and too inconsistent for the scale of the transition.
---
## The main selling point
The main selling point is **not just another building database**.
The real selling point is the **end-to-end process**:
1. capture building data efficiently
2. fill in missing information intelligently
3. combine building data with spatial and infrastructure context
4. generate decarbonization and retrofit pathways
5. estimate cost and carbon impact
6. store and monitor everything in a digital building passport
That is what makes CAPSA stronger than a normal audit workflow, a normal property-management system, or a normal GIS/planning tool.
TEPs public REAT description supports this broader system view. REAT is positioned as a spatial-energy analysis toolbox for municipalities, utilities, and regions that identifies local energy potentials and constraints while considering prices, policy, network paths, noise rules, water protection, and geothermal restrictions. That matters because CAPSA is not meant to operate in a vacuum — it depends on local infrastructure and feasibility context.
---
## How the system works in plain English
### 1. Data collection
Someone on site uses the CAPSA app to collect building data quickly and in a more structured way than paper or ad hoc notes.
### 2. Data completion
The system supplements missing information using external data sources, building typologies, public records, and model-based inference.
### 3. Central storage
All relevant information is stored in a **Digital Building Passport**, which becomes the central data layer.
### 4. Strategy generation
The platform generates **Decarbonization and Retrofit Roadmaps (DRRs)** for buildings or portfolios.
### 5. Cost and carbon assessment
Measures are evaluated economically and environmentally, including broader whole-life and scope-3 type thinking.
### 6. Monitoring
The roadmap is not supposed to remain a report on a shelf. CAPSA aims to track what is planned, in progress, and completed.
---
## Who it is for
CAPSA is aimed at:
- real estate portfolio owners
- housing associations
- property managers
- investors and banks
- municipalities and utilities in adjacent use cases
- consultants and implementation partners
The public TEP page explicitly frames CAPSA for **real-estate companies, owners, managers, investors, and banks**.
---
## Benefits
## Strategic benefits
- Creates a clearer basis for long-term decarbonization decisions
- Supports portfolio-level prioritization instead of one-off building-by-building firefighting
- Helps connect technical planning with capital allocation and reporting
## Operational benefits
- Faster and more structured data capture
- Better continuity of information over time
- Easier collaboration between field staff, analysts, managers, and external partners
- Less dependence on scattered documents and individual expert memory
## Economic benefits
- Lower manual effort per building over time
- Better comparability of measures and investment packages
- More useful basis for financing, budgeting, and sequencing works
## Climate and policy benefits
- Supports structured decarbonization instead of isolated measures
- Makes scope 13 style thinking more operational
- Helps align building action with wider policy and infrastructure context
## Business benefits for TEP and partners
- Converts consulting know-how into reusable product modules
- Opens recurring software and hybrid service revenue potential
- Strengthens positioning at the intersection of energy, policy, economics, and software
---
## Risks
## 1. Execution risk
This is an ambitious integration project. Mobile data collection, data completion, spatial context, carbon accounting, roadmap generation, APIs, and monitoring all need to work together well. That is hard.
## 2. Data quality risk
If the input data is weak, automated outputs may look polished but still be wrong or misleading. In this type of product, trust depends heavily on data quality.
## 3. Adoption risk
The building and real-estate sectors are often conservative. Even if the software works, adoption may be slower than expected because workflows, incentives, and responsibilities are messy.
## 4. Over-complexity risk
The product tries to serve many stakeholders at once: owners, managers, banks, cities, utilities, consultants. That can create a bloated product if priorities are not sharp.
## 5. Commercialization risk
A good prototype does not automatically become a scalable business. The gap between “useful in pilots” and “bought systematically” is often large.
## 6. Integration risk
CAPSAs value depends on connecting with external data sources, internal systems, and planning/finance processes. Integration work is usually harder and slower than expected.
---
## Drawbacks / likely weak points
These are not fatal flaws, but they matter.
### It may be too broad
CAPSA is trying to be:
- a data collection tool
- a digital passport
- a planning engine
- a carbon tool
- a monitoring system
- a finance-enabler
- partly also a spatial energy intelligence layer
That breadth is powerful, but also dangerous. It can dilute focus.
### The hardest part is trust, not software alone
Clients will only rely on automated pathways if they trust:
- the data
- the assumptions
- the prioritization logic
- the feasibility logic
- the cost estimates
### It may remain service-heavy longer than planned
Many such platforms claim SaaS-like scalability, but in practice remain dependent on consulting, onboarding, custom data work, and customer-specific adaptation.
### It competes with existing habits, not only competitors
Sometimes the real competitor is not another software company. It is:
- Excel
- PDFs
- consultants
- internal ad hoc workflows
- “good enough” manual processes
That is a real barrier.
---
## Different viewpoints on CAPSA
## 1. From the building owners view
CAPSA is a way to gain a better overview, prioritize measures, and reduce chaos in building data and retrofit planning.
## 2. From the consultants view
CAPSA can make consulting more efficient and repeatable, but also pressures traditional manual advisory work.
## 3. From the bank / investor view
CAPSA could become a more structured basis for understanding building risk, retrofit readiness, and decarbonization pathways.
## 4. From the municipality / utility view
The link to spatial-energy context matters because building-level decisions depend on heat networks, local restrictions, and renewable potentials. This is exactly where REAT complements the CAPSA vision.
## 5. From TEPs business view
CAPSA is a productization move: turning TEPs modelling, energy-planning, and strategy expertise into a more scalable digital offering.
## 6. From a software practitioners view
CAPSA is really a systems-integration product. The key challenge is not only frontend or AI, but reliable workflows, data architecture, APIs, validation, and decision transparency.
---
## My blunt assessment
This project makes sense.
Its strongest idea is that **decarbonizing building portfolios is fundamentally a data-and-process problem**, not just an engineering calculation problem.
That is exactly why a platform like CAPSA can be valuable.
The upside is real:
- better decisions
- faster workflows
- stronger portfolio steering
- better connection between strategy, economics, and execution
But the danger is also real:
- too much scope
- too many moving parts
- too much dependence on data quality and change management
So the proposal is strong in **direction** and **market logic**, but success will depend heavily on **focus, product discipline, trust in outputs, and execution quality**.
---
## One-paragraph takeaway
CAPSA SUITE wants to become the digital backbone for building-portfolio decarbonization: a system that captures building data, enriches it with public and spatial context, translates it into retrofit and carbon strategies, and helps owners and managers act on those strategies over time. Its promise is speed, consistency, better decisions, and a stronger basis for finance and reporting. Its risks are complexity, adoption, and data trust. If it works, it can be a meaningful step from fragmented consulting work toward a scalable decarbonization operating system.
---
## Sources
- Eurostars project proposal PDF provided in this chat
- TEP Energy: CAPSA Suite page
- TEP Energy: REAT page

View File

@@ -0,0 +1,708 @@
# CAPSA SUITE proposal deep-dive
## Purpose of this note
This note turns the 70-page CAPSA SUITE proposal into a structured briefing you can use before your visit. It focuses on:
1. what the proposal is really trying to build
2. how the business and technical pieces fit together
3. where the project is strong, weak, risky, and AI-relevant
4. how you can position a **5 x 1h course** on AI tools, software engineering, and operations in a way that fits their actual needs
**Source:** CAPSA SUITE submitted proposal. Key themes and structure are taken from the uploaded proposal. See the original file for full detail.
---
## Executive summary
CAPSA SUITE is a proposal to build a modular software platform for **building decarbonization planning**. The core idea is to replace a slow, manual, consultant-heavy workflow with a more automated pipeline that:
- collects building data through a mobile app
- enriches it with public, private, and inferred data
- stores it in a **Digital Building Passport (DBP)**
- generates **Decarbonization and Retrofit Roadmaps (DRR)**
- estimates **whole-life carbon / scopes 1-3**
- supports implementation tracking and reporting
The proposal is really about **productizing expert consulting know-how**. ChillServices contributes software, app, and digital building passport capabilities. TEP contributes energy system, GIS, building stock modelling, techno-economic, carbon, and policy expertise.
The strongest insight in the proposal is this:
> decarbonization at scale is less blocked by lack of ideas and more blocked by fragmented data, inconsistent workflows, limited labor, and poor integration.
That is exactly where good AI tooling and good software practices can help.
---
## 1. Big-picture mental model
### CAPSA in one sentence
CAPSA SUITE is an attempt to industrialize building decarbonization planning for real estate portfolios.
### The whole system at a glance
```mermaid
flowchart LR
A[On-site building data collection\nmobile app, photos, guided input] --> B[Data completion and validation\npublic data, internal data, ML/statistical imputation]
B --> C[Digital Building Passport\ncentral repository and UI]
C --> D[DRR generation\ndecarbonization and retrofit roadmaps]
C --> E[Whole-life carbon estimation\nScopes 1-3]
D --> F[Investment packages\nfinance-ready planning]
E --> F
F --> G[Monitoring and reporting\nstatus, progress, carbon, costs]
```
This is the central logic of the proposal. The app is only the front door. The real value is in the connected system behind it.
---
## 2. The problem they are trying to solve
The proposal argues that the current market for building decarbonization planning is broken in five ways:
1. **manual data collection** is slow and expensive
2. **building data is incomplete and scattered**
3. **results depend too much on the individual expert**
4. **there are not enough qualified people** to scale the work
5. **owners of large portfolios** need consistent, comparable roadmaps, not ad-hoc reports
### Current pain points
```mermaid
flowchart TD
A[Building owner needs decarbonization plan] --> B[Manual site visit]
B --> C[Paper notes / fragmented records / missing data]
C --> D[Expert interpretation and guesswork]
D --> E[Manual calculations across multiple tools]
E --> F[Static report]
F --> G[Limited reuse, weak monitoring, poor scaling]
```
### CAPSA's intended replacement
```mermaid
flowchart TD
A[Building owner needs decarbonization plan] --> B[Structured mobile data capture]
B --> C[Automated completion and validation]
C --> D[Central Digital Building Passport]
D --> E[Automated DRR and carbon outputs]
E --> F[Monitoring, updates, portfolio comparison]
F --> G[Scalable, lower-cost, more consistent process]
```
This process shift is the heart of the proposal. They repeatedly describe it as a **process innovation**, not just a software feature set.
---
## 3. The product stack in detail
The proposal has five core building blocks.
### 3.1 Mobile data gathering module
The mobile app is meant to let non-experts or semi-experts collect data during regular site visits using guided flows, photos, and image recognition.
**What it is supposed to do**
- capture building and equipment data on site
- use image recognition for type plates, facades, windows, etc.
- reduce dependence on skilled auditors
- fit into real workflows of caretakers and service staff
**What matters strategically**
This is where AI becomes visible to the user. If the app is painful, the entire system collapses because all downstream outputs depend on upstream data quality.
### 3.2 Data completion and verification
The proposal assumes raw building data will almost always be incomplete, so it adds a second layer that fills gaps using:
- public registries
- GIS / 3D city data
- smart meter or other internal data
- synthetic building stock methods
- statistical and ML-based imputation
**Important:** this is not a nice-to-have. It is the real differentiator. Without it, CAPSA would just be another audit app.
### 3.3 Digital Building Passport
The DBP is the central data model and user-facing repository. It stores, structures, synchronizes, and exposes building information. It also manages access and standard exports.
### 3.4 DRR generation module
The DRR module is meant to generate building-specific retrofit and decarbonization pathways based on rule-based logic, context data, lifecycle logic, and cost/carbon tradeoffs.
### 3.5 Whole-life carbon and monitoring
CAPSA also wants to include embodied emissions and scope 3, then track progress after measures are planned or completed. This makes the platform more relevant for ESG, CSRD, and finance-linked use cases.
---
## 4. Work package map
The proposal is organized into four work packages over 30 months, with total project cost of about EUR 1.86M.
```mermaid
gantt
title CAPSA SUITE high-level timeline
dateFormat X
axisFormat %s
section WP1 Project management and product design
WP1 active :a1, 0, 30
section WP2 Data collection and completion
WP2 active :a2, 0, 30
App alpha / beta :milestone, m1, 12, 0
App beta maturity :milestone, m2, 24, 0
DBP alpha :milestone, m3, 16, 0
DBP beta :milestone, m4, 30, 0
section WP3 DRR and carbon functionality
WP3 active :a3, 6, 24
TED Switzerland :milestone, m5, 12, 0
DRR alpha :milestone, m6, 12, 0
TED Germany :milestone, m7, 18, 0
DRR beta :milestone, m8, 20, 0
Monitoring :milestone, m9, 24, 0
section WP4 Customer-led implementation
WP4 active :a4, 0, 30
Engage first 10 clients :milestone, m10, 6, 0
Start whole-life carbon pilots :milestone, m11, 12, 0
Start Swiss anchor app pilot :milestone, m12, 9, 0
Bring SEAT to market :milestone, m13, 15, 0
DRR pilots :milestone, m14, 20, 0
```
This timeline is approximate and simplified from the work package and milestone sections.
### Budget split by work package
```mermaid
pie showData
title Total project cost by work package (EUR)
"WP1 Management and design" : 189522
"WP2 Data collection and completion" : 710598
"WP3 DRR generation and evaluation" : 636290
"WP4 Market-oriented implementation" : 323132
```
This shows where the effort sits: mostly in data/integration and DRR logic, not in generic project management.
---
## 5. Who does what: ChillServices vs TEP
```mermaid
flowchart LR
A[ChillServices] --> A1[Mobile app]
A --> A2[Frontend and backend]
A --> A3[Digital Building Passport]
A --> A4[Commercial software delivery]
A --> A5[Project coordination]
B[TEP Energy] --> B1[Spatial energy analysis / GIS]
B --> B2[Building stock modelling]
B --> B3[Techno-economic database]
B --> B4[DRR logic and evaluation]
B --> B5[Whole-life carbon and policy/market context]
```
### Clean interpretation
- **ChillServices** is the software/product delivery side.
- **TEP** is the domain intelligence and modelling side.
That is why TEP is interesting for your visit: they likely have deep expertise but may still operate with many consulting-style, research-style, and semi-manual processes that could benefit hugely from better AI-enabled workflows.
This division of labor is described throughout the consortium and task sections.
---
## 6. What is actually innovative vs what is mostly integration
This is important because it tells you where to challenge them and where to help them.
### Truly valuable innovations
1. **Structured mobile data collection for decarbonization use cases**
2. **Gap filling via external, statistical, and model-based methods**
3. **Context-aware roadmap generation**, not just generic retrofit suggestions
4. **Linking scope 3 / whole-life carbon to retrofit planning**
5. **Turning consulting workflows into a reusable digital process**
### Less novel than they imply
1. APIs and database integration
2. standard product management and pilot loops
3. dashboards and exports
4. mobile + backend architecture in itself
### Honest assessment
The main novelty is not that each module is unprecedented. The novelty is that they are trying to **stitch together a coherent decision-making and execution system** for building decarbonization.
That means their main risk is not idea risk. It is **execution risk, data quality risk, and productization risk**.
---
## 7. The real business model
The proposal mixes two business models.
```mermaid
flowchart TD
A[CAPSA SUITE] --> B[Service-led revenue]
A --> C[Software / license-led revenue]
B --> B1[manual + semi-automated consulting projects]
B --> B2[roadmaps for portfolios]
B --> B3[data and analysis services]
C --> C1[subscription per building or unit]
C --> C2[module licensing to partners]
C --> C3[integration into partner ERP / platforms]
```
### Practical interpretation
They are not yet a pure product company. They are moving from:
**consulting and project work -> software-enabled services -> increasingly licensable modules**
That matters for your visit because the internal culture and engineering approach may still feel closer to:
- project delivery
- research and modelling
- custom client work
- prototype evolution
rather than:
- platform product engineering
- SRE / DevOps maturity
- disciplined release engineering
- strong internal developer platform standards
---
## 8. The strongest and weakest parts of the proposal
### Strongest parts
#### Strong point 1: It starts from a real bottleneck
The proposal correctly identifies that building decarbonization at scale is constrained by data fragmentation and labor intensity.
#### Strong point 2: It builds on existing assets
This is not zero-to-one fantasy. They already have app, DBP, SEAT, and building stock model components.
#### Strong point 3: It has plausible commercial channels
Hypoport, Viessmann/Carrier, housing associations, and existing clients make the go-to-market story more credible than typical grant proposals.
#### Strong point 4: It understands finance and policy pressure
The proposal is grounded in regulation, sustainability reporting, and financing use cases, not just engineering enthusiasm.
### Weakest parts
#### Weak point 1: It is over-ambitious
There are too many moving parts for 30 months and two SMEs.
#### Weak point 2: Data quality remains the Achilles heel
All outputs depend on incomplete and heterogeneous inputs. The proposal acknowledges this but still sounds optimistic.
#### Weak point 3: User trust may be harder than model accuracy
Even decent outputs can fail if users do not trust inferred data or rule-generated roadmaps.
#### Weak point 4: Productization is harder than consulting
Turning expert tacit knowledge into maintainable code, traceable logic, and reliable APIs is a very different discipline.
#### Weak point 5: Partner dependency is high
Commercial success depends a lot on external distribution and integration partners.
---
## 9. Where AI can create real leverage for them
This is probably the most useful section for your visit.
There are many possible AI use cases, but not all are equally valuable. The high-value ones are where AI reduces friction in **development**, **operations**, **knowledge work**, or **data-heavy workflows**.
### 9.1 AI opportunities inside the product
```mermaid
flowchart TD
A[Product AI opportunities] --> B[Image-assisted site data capture]
A --> C[Document extraction and normalization]
A --> D[Data validation and anomaly detection]
A --> E[Assisted missing-data inference explanations]
A --> F[Natural-language summary for management reports]
A --> G[Support assistant inside DBP]
```
#### Highest-value product AI candidates
1. **Photo-assisted field capture**
- detect type plates
- identify heating systems / windows / facade types
- propose structured entries from images
2. **Document ingestion**
- pull data from EPCs, PDFs, invoices, maintenance reports, permits
- normalize to DBP schema
3. **Quality control assistant**
- flag inconsistent values
- detect suspicious combinations
- surface missing critical inputs before DRR generation
4. **Explainability layer**
- if data was inferred, explain from what sources and with what confidence
- this is essential for trust
5. **Report drafting**
- generate client-friendly summaries, management notes, and comparison text
### 9.2 AI opportunities in software development
This is likely even more relevant for your course.
```mermaid
flowchart LR
A[Developer workflow pain] --> B[AI coding assistants]
A --> C[Test generation]
A --> D[Refactoring and code comprehension]
A --> E[API and schema documentation]
A --> F[SQL / ETL / transformation help]
A --> G[Infra and CI/CD assistance]
```
#### Very practical targets
- speeding up backend boilerplate and integration code
- generating and improving unit/integration tests
- documenting APIs and data contracts
- helping with schema mapping and ETL logic
- debugging Docker, CI, and deployment problems
- accelerating GIS/data pipeline scripting
- improving developer onboarding
### 9.3 AI opportunities in operations and internal knowledge work
#### Internal knowledge base / RAG
They likely have knowledge scattered across:
- proposals
- methods
- reports
- partner docs
- public datasets
- policy documents
- code comments
- spreadsheets
A strong internal knowledge assistant could answer:
- what assumptions exist in the DRR engine
- where a cost coefficient comes from
- which German or Swiss data source feeds a module
- what changed between client versions
- how a model should be interpreted
#### Meeting and project ops
AI can help with:
- meeting summaries
- action extraction
- issue creation
- technical decision logs
- stakeholder update drafts
- synthesis of pilot feedback
#### DevOps and reliability
AI can help teams write and maintain:
- Dockerfiles
- CI pipelines
- Terraform / deployment config
- observability dashboards
- incident runbooks
- migration scripts
---
## 10. What good software practices they likely need most
Your value is probably not teaching generic “AI is cool.” It is showing how AI becomes useful **inside a disciplined engineering workflow**.
### The likely maturity gaps
Based on the proposal, they probably face some mix of the following:
1. knowledge in peoples heads instead of systems
2. evolving prototypes without strong contracts or architecture boundaries
3. data models and assumptions spread across code, reports, and spreadsheets
4. limited automated tests around domain logic
5. custom client work making the product harder to standardize
6. unclear traceability from business rules to implementation
7. limited operational visibility once modules are deployed
### The software practices most relevant to them
```mermaid
mindmap
root((Good practices for CAPSA-like teams))
Architecture
clear module boundaries
API contracts
ownership per service
Data
schema versioning
lineage
assumptions marked explicitly
auditability
Quality
unit tests for rule logic
integration tests for data flows
golden datasets
regression testing
Operations
CI/CD
observability
error budgets
runbooks
Product process
design docs
decision logs
user feedback loops
release notes
AI use
human in the loop
reproducibility
traceability
secure usage policies
```
### Especially important for this product
#### A. Data lineage and explainability
Because CAPSA mixes measured, reported, inferred, and synthetic data, they need extremely clear provenance.
#### B. Rule testing and regression protection
A DRR engine is only useful if changes do not silently break decision logic.
#### C. Strong interface contracts
Mobile app, DBP, SEAT, TED, and DRR modules should not drift semantically.
#### D. Golden test cases
They should have representative buildings and portfolios where expected outputs are known and compared over time.
#### E. Release discipline
If pilot clients are involved, sloppy release processes will quickly destroy trust.
---
## 11. Suggested 5 x 1h course structure
This is the course I would suggest based on their proposal and likely needs.
### Overview
```mermaid
flowchart LR
A[Session 1\nAI for engineers and analysts] --> B[Session 2\nAI in software development workflows]
B --> C[Session 3\nGood software architecture and testing]
C --> D[Session 4\nDevOps, operations, and observability]
D --> E[Session 5\nApplying it directly to CAPSA use cases]
```
## Session 1 — AI tools that actually save time
**Goal:** demystify AI and show concrete productivity wins.
**Topics**
- where AI helps and where it hurts
- using coding assistants, chat tools, and CLI agents safely
- prompting for engineering vs research vs documentation
- AI for synthesis of policy, technical, and market documents
- using AI to summarize meetings, proposals, client feedback
**Hands-on examples for them**
- summarize a technical method paper into implementation tasks
- extract API requirements from a planning note
- generate a comparison of Swiss vs German data sources
## Session 2 — AI-assisted development workflows
**Goal:** make developers faster without destroying code quality.
**Topics**
- how to use Claude Code / ChatGPT / Cursor / Copilot style tools well
- codebase navigation and comprehension
- test generation and refactoring
- writing migrations, ETL code, and API docs
- patterns for secure and reviewable AI usage
**Hands-on examples for them**
- generate tests for a rule engine
- refactor a DBP schema mapper
- document a REST API from code
## Session 3 — Software engineering discipline for modular platforms
**Goal:** help them build a maintainable product instead of a pile of pilot features.
**Topics**
- module boundaries and service ownership
- contracts and schemas
- design docs and decision records
- regression testing for domain logic
- golden datasets and validation harnesses
**Hands-on examples for them**
- define interface contracts between DBP, TED, and DRR
- create a minimal testing strategy for building archetypes
## Session 4 — DevOps, deployment, and reliability
**Goal:** reduce operational pain and improve confidence.
**Topics**
- Docker and reproducible environments
- CI/CD basics that matter
- structured logging and tracing
- health checks, alerts, dashboards
- secrets handling and environment management
**Hands-on examples for them**
- what to log in a DRR generation pipeline
- how to monitor failures in data ingestion and image recognition
- how to deploy safely across staging and pilot environments
## Session 5 — Workshop on CAPSA-specific opportunities
**Goal:** make the training concrete and strategic.
**Topics**
- map current workflow pain points
- identify 3 quick wins and 2 longer-term bets
- design an internal AI use policy
- agree on a tooling stack and rollout plan
- decide where product AI vs internal AI makes sense
**Possible outputs of session 5**
- AI opportunity map
- engineering improvement roadmap
- coding assistant policy
- test strategy outline
- internal documentation / knowledge assistant pilot
---
## 12. Recommended positioning for your visit
You should not pitch yourself as “the AI guy.”
Better positioning:
> I can help you use AI and better engineering practices to reduce friction in development, improve reliability, and speed up the path from expert knowledge to robust software.
That fits their actual challenge much better.
### Suggested angle to emphasize
#### 1. AI is an accelerator, not the product strategy
The product still lives or dies by workflow, data quality, and trust.
#### 2. Good software practices are what make AI safe and useful
Without contracts, tests, and observability, AI speeds up the wrong things.
#### 3. Their edge is domain expertise
AI should help them operationalize and multiply that edge, not replace it.
#### 4. Start with internal leverage first
Before putting AI everywhere in the client-facing product, improve internal development, documentation, QA, and analysis workflows.
---
## 13. Smart questions to ask during the visit
### Product and workflow
- Where is the biggest bottleneck today: data capture, data cleaning, roadmap logic, or client delivery?
- Which part of CAPSA is already real product, and which part is still research/prototype?
- Where do users currently distrust the system the most?
### Engineering
- How are interfaces between modules specified today?
- How do you test that DRR outputs remain correct over time?
- Do you have golden datasets or benchmark buildings?
- How do you trace where a number in the final roadmap came from?
### Data and AI
- Which inputs are measured, user-entered, inferred, or synthetic?
- How do you explain inferred values to clients?
- Are there image/document extraction tasks where current manual effort is high?
### DevOps / operations
- How do you manage environments across pilots and production?
- What is currently painful in deployment or integration?
- What do you monitor today when a pipeline breaks?
### Team enablement
- Where do developers lose the most time?
- What kind of recurring documentation or reporting work is still manual?
- What would make a 5-session course clearly valuable for them?
---
## 14. What I would prioritize if they want quick wins
### Quick wins in 1-2 months
1. **AI-assisted internal documentation and code comprehension**
2. **Test generation for critical logic and APIs**
3. **Meeting summaries and action extraction**
4. **Template-driven design docs and ADRs**
5. **Basic CI quality gates and lint/test enforcement**
### Medium-term bets in 3-6 months
1. **Internal knowledge assistant over proposals, methods, code, and datasets**
2. **Golden test dataset framework for DRR validation**
3. **Image/document extraction workflow for site and report data**
4. **Observability for data pipelines and model assumptions**
### Longer-term product bets
1. **Explainable inference assistant inside the DBP**
2. **AI-assisted field capture in the mobile app**
3. **Natural-language portfolio analysis and reporting layer**
---
## 15. Final blunt assessment
This proposal is strategically smart. It attacks a real pain point in the building transition and sits at the intersection of policy, economics, energy, and software. It also fits TEP very well because it turns their domain know-how into a repeatable digital capability.
But it is also very ambitious. The hard problem is not “can we build some features?” It is:
- can we create a trustworthy system from messy real-world data
- can we encode expert judgment without making the system brittle
- can we keep the software maintainable as pilots and markets expand
- can we move from consulting habits to product discipline
That is exactly where your contribution can matter.
Your highest-value offer is likely:
1. help them use AI to reduce day-to-day friction
2. help them adopt stronger engineering habits
3. help them avoid turning a strong concept into an unmaintainable stack
---
## 16. Suggested one-line pitch for yourself
> I can help your team use AI and better software practices to move faster, document less painfully, test more reliably, and make complex domain logic easier to build and operate.
---
## 17. Source reminder
This briefing is based on the uploaded CAPSA SUITE proposal and summarizes its stated goals, structure, work packages, business case, and roles of ChillServices and TEP. Original file:

View File

@@ -0,0 +1,17 @@
24, 76, 29, 2, 276, 7, 200, 3, 77, 1, 98, 170, 74,2DD6D8
1,kueche , 1
2,wohnzimmer , 2
1,true , 1, 450761,top-links , 2, 0, 80, 58800, 57000, 2800, 46, 16597248, 82432, 0, 0, 0, 0, 0, 115, 0, 20.00000, 10.00000, 34.00000, 50.00000,false,false, 0, 1, 0, 0, 0, 0, 2
2,true , 1, 953570,top rechts , 2, 0, 80, 58200, 57000, 2800, 1, 16662784, 11904159, 0, 0, 0, 0, 0, 34, 0, -1.00000, -1.00000, 45.00000, 50.00000,false,false, 0, 2, 0, 0, 0, 0, 2
3,true , 1, 953571,unten links , 2, 0, 80, 58200, 57000, 2800, 1, 16728320, 82432, 0, 0, 0, 0, 0, 17, 0, -1.00000, -1.00000, 34.00000, 50.00000,false,false, 0, 3, 0, 0, 0, 0, 2
4,true , 1, 953572,unten rechts , 2, 0, 80, 58200, 57000, 2800, 1, 16896, 82432, 0, 0, 0, 0, 0, 55, 0, -1.00000, -1.00000, 34.00000, 50.00000,false,false, 0, 4, 0, 0, 0, 0, 2
5,true , 1, 953573,Tisch , 2, 0, 80, 59150, 58400, 1900, 43, 15373058, 15635202, 0, 0, 0, 0, 0, 9, 0, -1.00000, -1.00000, 42.00000, 50.00000,false,false, 0, 5, 0, 0, 0, 0, 1
6,true , 1, 953574,türe , 2, 0, 80, 58400, 58150, 1600, 42, 15438594, 15635202, 0, 0, 0, 0, 0, 8, 0, -1.00000, -1.00000, 42.00000, 50.00000,false,false, 0, 6, 0, 0, 0, 0, 1
7,true , 1, 953575,sitzplatz , 2, 0, 80, 59570, 58750, 1400, 45, 15504130, 15635202, 0, 0, 0, 0, 0, 10, 0, -1.00000, -1.00000, 35.00000, 50.00000,false,false, 0, 7, 0, 0, 0, 0, 1
1, 0, 953569,all-4 , 0, 80, 1, 2, 3, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2,false, 2, 7
2, 0, 953576,all-3 , 0, 80, 5, 6, 7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3,false, 1, 14
3, 0, 953577,all-7 , 0, 80, 7, 5, 6, 1, 2, 3, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 4,false, 0, 16
0, 0, 0, 0, 0, 0, 0
"v2.4.7","storen-somfy","pool.ntp.org","CET-1CEST-2,M3.5.0/02:00:00,M10.5.0/03:00:00",true ,true
1,true ,"192.168.1.225","192.168.1.1","255.255.255.0","62.2.24.162","62.2.17.60","mqtt://","ESPSomfyRTS", 1883,false,"","homeassistant", 0, 0, 0, 0, -1, 23, 18
true , 0, 80, 18, 5, 23, 19, 13, 12, 433.420, 99.97, 47.60, 10

View File

@@ -38,5 +38,3 @@ Volume 1 ](Exported%20image%2020231126171714-0.png)
- [ ] The movie database api
- [ ] Hyperbackup zu Festplatte bei Daddy zuhause.
F