From 8579430db94bd79c19a685e9745ec1c002b27b0e Mon Sep 17 00:00:00 2001
From: Patrick Pacher <patrick.pacher@gmail.com>
Date: Fri, 15 Mar 2024 11:55:13 +0100
Subject: [PATCH] wip: migrate to mono-repo. SPN has already been moved to spn/

---
 .ci-inject-internal-deps.sh                   |  22 -
 Gopkg.lock                                    | 405 --------
 Gopkg.toml                                    |  35 -
 assets/.gitkeep                               |   0
 cmds/hub/.gitignore                           |   3 +
 cmds/hub/build                                |  60 ++
 cmds/hub/main.go                              |  66 ++
 cmds/hub/pack                                 | 123 +++
 cmds/integrationtest/netstate.go              |   6 +-
 cmds/observation-hub/.gitignore               |   3 +
 cmds/observation-hub/Dockerfile               |  38 +
 cmds/observation-hub/apprise.go               | 257 +++++
 cmds/observation-hub/apprise_test.go          |  84 ++
 cmds/observation-hub/build                    |  60 ++
 cmds/observation-hub/main.go                  |  44 +
 cmds/observation-hub/notifications.tmpl       |  75 ++
 cmds/observation-hub/observe.go               | 407 ++++++++
 cmds/portmaster-core/main.go                  |  14 +-
 cmds/portmaster-start/main.go                 |   2 +-
 cmds/portmaster-start/recover_linux.go        |   2 +-
 cmds/portmaster-start/run.go                  |   2 +-
 cmds/portmaster-start/show.go                 |   2 +-
 cmds/portmaster-start/update.go               |   2 +-
 cmds/portmaster-start/verify.go               |   2 +-
 cmds/testsuite/.gitignore                     |   3 +
 cmds/testsuite/db.go                          |  33 +
 cmds/testsuite/login.go                       | 125 +++
 cmds/testsuite/main.go                        |  69 ++
 cmds/testsuite/report_healthcheck.go          |  51 +
 cmds/winkext-test/main.go                     |   5 +-
 desktop/angular/.gitkeep                      |   0
 desktop/tauri/.gitkeep                        |   0
 go.mod                                        |   9 +-
 go.sum                                        |   4 +
 pack                                          |   3 +
 packaging/linux/.gitkeep                      |   0
 packaging/windows/.gitkeep                    |   0
 runtime/.gitkeep                              |   1 +
 {broadcasts => service/broadcasts}/api.go     |   0
 {broadcasts => service/broadcasts}/data.go    |  12 +-
 .../broadcasts}/install_info.go               |   0
 {broadcasts => service/broadcasts}/module.go  |   0
 {broadcasts => service/broadcasts}/notify.go  |   2 +-
 {broadcasts => service/broadcasts}/state.go   |   0
 .../broadcasts}/testdata/README.md            |   0
 .../broadcasts}/testdata/notifications.yaml   |   0
 {compat => service/compat}/api.go             |   0
 {compat => service/compat}/callbacks.go       |   4 +-
 {compat => service/compat}/debug_default.go   |   0
 {compat => service/compat}/debug_linux.go     |   0
 {compat => service/compat}/debug_windows.go   |   0
 {compat => service/compat}/iptables.go        |   0
 {compat => service/compat}/iptables_test.go   |   0
 {compat => service/compat}/module.go          |   4 +-
 {compat => service/compat}/notify.go          |   4 +-
 {compat => service/compat}/selfcheck.go       |   6 +-
 {compat => service/compat}/wfpstate.go        |   0
 {compat => service/compat}/wfpstate_test.go   |   0
 {core => service/core}/api.go                 |  12 +-
 {core => service/core}/base/databases.go      |   0
 {core => service/core}/base/global.go         |   0
 {core => service/core}/base/logs.go           |   0
 {core => service/core}/base/module.go         |   0
 {core => service/core}/base/profiling.go      |   0
 {core => service/core}/config.go              |   0
 {core => service/core}/core.go                |  14 +-
 {core => service/core}/os_default.go          |   0
 {core => service/core}/os_windows.go          |   0
 {core => service/core}/pmtesting/testing.go   |   4 +-
 {detection => service/detection}/dga/lms.go   |   0
 .../detection}/dga/lms_test.go                |   0
 {firewall => service/firewall}/api.go         |  10 +-
 {firewall => service/firewall}/bypassing.go   |  10 +-
 {firewall => service/firewall}/config.go      |   4 +-
 {firewall => service/firewall}/dns.go         |  10 +-
 .../firewall}/inspection/inspection.go        |   4 +-
 .../interception/ebpf/bandwidth/bpf_bpfeb.go  |   0
 .../interception/ebpf/bandwidth/bpf_bpfeb.o   | Bin
 .../interception/ebpf/bandwidth/bpf_bpfel.go  |   0
 .../interception/ebpf/bandwidth/bpf_bpfel.o   | Bin
 .../interception/ebpf/bandwidth/interface.go  |   2 +-
 .../ebpf/connection_listener/bpf_bpfeb.go     |   0
 .../ebpf/connection_listener/bpf_bpfeb.o      | Bin
 .../ebpf/connection_listener/bpf_bpfel.go     |   0
 .../ebpf/connection_listener/bpf_bpfel.o      | Bin
 .../ebpf/connection_listener/worker.go        |   2 +-
 .../interception/ebpf/exec/bpf_bpfeb.go       |   0
 .../interception/ebpf/exec/bpf_bpfeb.o        | Bin
 .../interception/ebpf/exec/bpf_bpfel.go       |   0
 .../interception/ebpf/exec/bpf_bpfel.o        | Bin
 .../firewall}/interception/ebpf/exec/exec.go  |   0
 .../interception/ebpf/programs/bandwidth.c    |   0
 .../ebpf/programs/bpf/bpf_core_read.h         |   0
 .../ebpf/programs/bpf/bpf_helper_defs.h       |   0
 .../ebpf/programs/bpf/bpf_helpers.h           |   0
 .../ebpf/programs/bpf/bpf_tracing.h           |   0
 .../interception/ebpf/programs/exec.c         |   0
 .../interception/ebpf/programs/monitor.c      |   0
 .../interception/ebpf/programs/update.sh      |   0
 .../interception/ebpf/programs/vmlinux-x86.h  |   0
 .../interception/interception_default.go      |   4 +-
 .../interception/interception_linux.go        |  10 +-
 .../interception/interception_windows.go      |   8 +-
 .../firewall}/interception/introspection.go   |   0
 .../firewall}/interception/module.go          |   2 +-
 .../firewall}/interception/nfq/conntrack.go   |   4 +-
 .../firewall}/interception/nfq/nfq.go         |   4 +-
 .../firewall}/interception/nfq/packet.go      |   2 +-
 .../firewall}/interception/nfqueue_linux.go   |   6 +-
 .../firewall}/interception/packet_tracer.go   |   2 +-
 .../windowskext/bandwidth_stats.go            |   2 +-
 .../firewall}/interception/windowskext/doc.go |   0
 .../interception/windowskext/handler.go       |   6 +-
 .../interception/windowskext/kext.go          |   4 +-
 .../interception/windowskext/packet.go        |   4 +-
 .../interception/windowskext/service.go       |   0
 .../interception/windowskext/syscall.go       |   0
 {firewall => service/firewall}/master.go      |  18 +-
 {firewall => service/firewall}/module.go      |  10 +-
 .../firewall}/packet_handler.go               |  22 +-
 {firewall => service/firewall}/preauth.go     |   8 +-
 {firewall => service/firewall}/prompt.go      |   8 +-
 {firewall => service/firewall}/tunnel.go      |  24 +-
 {intel => service/intel}/block_reason.go      |   2 +-
 .../intel}/customlists/config.go              |   0
 {intel => service/intel}/customlists/lists.go |   2 +-
 .../intel}/customlists/module.go              |   0
 {intel => service/intel}/entity.go            |   6 +-
 {intel => service/intel}/filterlists/bloom.go |   0
 .../intel}/filterlists/cache_version.go       |   0
 .../intel}/filterlists/database.go            |   2 +-
 .../intel}/filterlists/decoder.go             |   0
 {intel => service/intel}/filterlists/index.go |   2 +-
 {intel => service/intel}/filterlists/keys.go  |   0
 .../intel}/filterlists/lookup.go              |   0
 .../intel}/filterlists/module.go              |   4 +-
 .../intel}/filterlists/module_test.go         |   0
 .../intel}/filterlists/record.go              |   0
 .../intel}/filterlists/updater.go             |   0
 .../intel}/geoip/country_info.go              |   0
 .../intel}/geoip/country_info_test.go         |   0
 {intel => service/intel}/geoip/database.go    |   2 +-
 {intel => service/intel}/geoip/location.go    |   0
 .../intel}/geoip/location_test.go             |   0
 {intel => service/intel}/geoip/lookup.go      |   0
 {intel => service/intel}/geoip/lookup_test.go |   0
 {intel => service/intel}/geoip/module.go      |   2 +-
 {intel => service/intel}/geoip/module_test.go |   2 +-
 {intel => service/intel}/geoip/regions.go     |   0
 .../intel}/geoip/regions_test.go              |   0
 {intel => service/intel}/module.go            |   2 +-
 {intel => service/intel}/resolver.go          |   0
 {nameserver => service/nameserver}/config.go  |   2 +-
 .../nameserver}/conflict.go                   |   4 +-
 {nameserver => service/nameserver}/failing.go |   4 +-
 {nameserver => service/nameserver}/metrics.go |   0
 {nameserver => service/nameserver}/module.go  |   6 +-
 .../nameserver}/nameserver.go                 |  12 +-
 .../nameserver}/nsutil/nsutil.go              |   0
 .../nameserver}/response.go                   |   2 +-
 {netenv => service/netenv}/addresses_test.go  |   0
 {netenv => service/netenv}/adresses.go        |   2 +-
 {netenv => service/netenv}/api.go             |   0
 {netenv => service/netenv}/dbus_linux.go      |   0
 {netenv => service/netenv}/dbus_linux_test.go |   0
 {netenv => service/netenv}/dialing.go         |   0
 {netenv => service/netenv}/environment.go     |   0
 .../netenv}/environment_default.go            |   0
 .../netenv}/environment_linux.go              |   2 +-
 .../netenv}/environment_linux_test.go         |   0
 .../netenv}/environment_test.go               |   0
 .../netenv}/environment_windows.go            |   0
 .../netenv}/environment_windows_test.go       |   0
 {netenv => service/netenv}/icmp_listener.go   |   2 +-
 {netenv => service/netenv}/location.go        |   6 +-
 .../netenv}/location_default.go               |   0
 {netenv => service/netenv}/location_test.go   |   0
 .../netenv}/location_windows.go               |   0
 {netenv => service/netenv}/main.go            |   0
 {netenv => service/netenv}/main_test.go       |   2 +-
 {netenv => service/netenv}/network-change.go  |   0
 {netenv => service/netenv}/notes.md           |   0
 {netenv => service/netenv}/online-status.go   |   4 +-
 .../netenv}/online-status_test.go             |   0
 {netenv => service/netenv}/os_android.go      |   3 +-
 {netenv => service/netenv}/os_default.go      |   0
 .../netquery}/active_chart_handler.go         |   2 +-
 .../netquery}/bandwidth_chart_handler.go      |   2 +-
 {netquery => service/netquery}/database.go    |  10 +-
 {netquery => service/netquery}/manager.go     |   2 +-
 {netquery => service/netquery}/module_api.go  |   2 +-
 {netquery => service/netquery}/orm/decoder.go |   0
 .../netquery}/orm/decoder_test.go             |   0
 {netquery => service/netquery}/orm/encoder.go |   0
 .../netquery}/orm/encoder_test.go             |   0
 .../netquery}/orm/query_runner.go             |   0
 .../netquery}/orm/schema_builder.go           |   0
 .../netquery}/orm/schema_builder_test.go      |   0
 {netquery => service/netquery}/query.go       |   2 +-
 .../netquery}/query_handler.go                |   2 +-
 .../netquery}/query_request.go                |   2 +-
 {netquery => service/netquery}/query_test.go  |   2 +-
 .../netquery}/runtime_query_runner.go         |   2 +-
 {network => service/network}/api.go           |  10 +-
 {network => service/network}/api_test.go      |   2 +-
 {network => service/network}/clean.go         |   6 +-
 {network => service/network}/connection.go    |  20 +-
 .../network}/connection_android.go            |  10 +-
 .../network}/connection_store.go              |   0
 {network => service/network}/database.go      |   2 +-
 {network => service/network}/dns.go           |   8 +-
 {network => service/network}/iphelper/get.go  |   2 +-
 .../network}/iphelper/iphelper.go             |   0
 .../network}/iphelper/tables.go               |   2 +-
 .../network}/iphelper/tables_test.go          |   0
 {network => service/network}/metrics.go       |   2 +-
 {network => service/network}/module.go        |   6 +-
 {network => service/network}/multicast.go     |   2 +-
 .../network}/netutils/address.go              |   2 +-
 {network => service/network}/netutils/dns.go  |   0
 .../network}/netutils/dns_test.go             |   0
 {network => service/network}/netutils/ip.go   |   0
 .../network}/netutils/ip_test.go              |   0
 .../network}/netutils/tcpassembly.go          |   0
 .../network}/packet/bandwidth.go              |   0
 {network => service/network}/packet/const.go  |   0
 .../network}/packet/info_only.go              |   0
 {network => service/network}/packet/packet.go |   0
 .../network}/packet/packetinfo.go             |   0
 {network => service/network}/packet/parse.go  |   0
 {network => service/network}/ports.go         |   0
 {network => service/network}/proc/findpid.go  |   2 +-
 .../network}/proc/pids_by_user.go             |   0
 {network => service/network}/proc/tables.go   |   2 +-
 .../network}/proc/tables_test.go              |   0
 .../network}/reference/ports.go               |   0
 .../network}/reference/protocols.go           |   0
 {network => service/network}/socket/socket.go |   0
 {network => service/network}/state/exists.go  |   4 +-
 {network => service/network}/state/info.go    |   4 +-
 {network => service/network}/state/lookup.go  |   6 +-
 .../network}/state/system_default.go          |   2 +-
 .../network}/state/system_linux.go            |   4 +-
 .../network}/state/system_windows.go          |   4 +-
 {network => service/network}/state/tcp.go     |   2 +-
 {network => service/network}/state/udp.go     |   6 +-
 {network => service/network}/status.go        |   0
 {process => service/process}/api.go           |   2 +-
 {process => service/process}/config.go        |   0
 {process => service/process}/database.go      |   2 +-
 {process => service/process}/doc.go           |   0
 {process => service/process}/executable.go    |   0
 {process => service/process}/find.go          |   8 +-
 {process => service/process}/module.go        |   2 +-
 {process => service/process}/module_test.go   |   2 +-
 {process => service/process}/process.go       |   2 +-
 .../process}/process_default.go               |   0
 {process => service/process}/process_linux.go |   0
 .../process}/process_windows.go               |   0
 {process => service/process}/profile.go       |   2 +-
 {process => service/process}/special.go       |   4 +-
 {process => service/process}/tags.go          |   2 +-
 .../process}/tags/appimage_unix.go            |   6 +-
 .../process}/tags/flatpak_unix.go             |   6 +-
 .../process}/tags/interpreter_unix.go         |   6 +-
 {process => service/process}/tags/net.go      |   4 +-
 .../process}/tags/snap_unix.go                |   6 +-
 .../process}/tags/svchost_windows.go          |   6 +-
 .../process}/tags/winstore_windows.go         |   6 +-
 {profile => service/profile}/active.go        |   0
 {profile => service/profile}/api.go           |   2 +-
 .../profile}/binmeta/convert.go               |   0
 .../profile}/binmeta/find_default.go          |   0
 .../profile}/binmeta/find_linux.go            |   0
 .../profile}/binmeta/find_linux_test.go       |   0
 .../profile}/binmeta/find_windows.go          |   0
 .../profile}/binmeta/find_windows_test.go     |   0
 {profile => service/profile}/binmeta/icon.go  |   0
 {profile => service/profile}/binmeta/icons.go |   0
 .../profile}/binmeta/locations_linux.go       |   0
 {profile => service/profile}/binmeta/name.go  |   0
 .../profile}/binmeta/name_test.go             |   0
 {profile => service/profile}/config-update.go |   4 +-
 {profile => service/profile}/config.go        |   6 +-
 {profile => service/profile}/database.go      |   0
 .../profile}/endpoints/annotations.go         |   0
 .../profile}/endpoints/endpoint-any.go        |   2 +-
 .../profile}/endpoints/endpoint-asn.go        |   2 +-
 .../profile}/endpoints/endpoint-continent.go  |   2 +-
 .../profile}/endpoints/endpoint-country.go    |   2 +-
 .../profile}/endpoints/endpoint-domain.go     |   4 +-
 .../profile}/endpoints/endpoint-ip.go         |   2 +-
 .../profile}/endpoints/endpoint-iprange.go    |   2 +-
 .../profile}/endpoints/endpoint-lists.go      |   2 +-
 .../profile}/endpoints/endpoint-scopes.go     |   4 +-
 .../profile}/endpoints/endpoint.go            |   4 +-
 .../profile}/endpoints/endpoint_test.go       |   0
 .../profile}/endpoints/endpoints.go           |   2 +-
 .../profile}/endpoints/endpoints_test.go      |   4 +-
 .../profile}/endpoints/reason.go              |   0
 {profile => service/profile}/fingerprint.go   |   0
 .../profile}/fingerprint_test.go              |   0
 {profile => service/profile}/framework.go     |   0
 .../profile}/framework_test.go                |   0
 {profile => service/profile}/get.go           |   0
 {profile => service/profile}/merge.go         |   2 +-
 {profile => service/profile}/meta.go          |   0
 {profile => service/profile}/migrations.go    |   2 +-
 {profile => service/profile}/module.go        |   6 +-
 .../profile}/profile-layered-provider.go      |   0
 .../profile}/profile-layered.go               |   4 +-
 {profile => service/profile}/profile.go       |   6 +-
 {profile => service/profile}/special.go       |   0
 {resolver => service/resolver}/api.go         |   0
 .../resolver}/block-detection.go              |   0
 {resolver => service/resolver}/compat.go      |   0
 {resolver => service/resolver}/config.go      |   6 +-
 {resolver => service/resolver}/doc.go         |   0
 {resolver => service/resolver}/failing.go     |   2 +-
 {resolver => service/resolver}/ipinfo.go      |   0
 {resolver => service/resolver}/ipinfo_test.go |   0
 {resolver => service/resolver}/main.go        |   6 +-
 {resolver => service/resolver}/main_test.go   |   2 +-
 {resolver => service/resolver}/metrics.go     |   0
 {resolver => service/resolver}/namerecord.go  |   0
 .../resolver}/namerecord_test.go              |   0
 {resolver => service/resolver}/resolve.go     |   2 +-
 .../resolver}/resolver-env.go                 |   4 +-
 .../resolver}/resolver-https.go               |   2 +-
 .../resolver}/resolver-mdns.go                |   4 +-
 .../resolver}/resolver-plain.go               |   2 +-
 .../resolver}/resolver-tcp.go                 |   2 +-
 {resolver => service/resolver}/resolver.go    |   4 +-
 .../resolver}/resolver_test.go                |   0
 {resolver => service/resolver}/resolvers.go   |   4 +-
 .../resolver}/resolvers_test.go               |   0
 {resolver => service/resolver}/reverse.go     |   0
 .../resolver}/reverse_test.go                 |   0
 {resolver => service/resolver}/rr_context.go  |   0
 {resolver => service/resolver}/rrcache.go     |   4 +-
 .../resolver}/rrcache_test.go                 |   0
 {resolver => service/resolver}/scopes.go      |   2 +-
 .../resolver}/test/resolving.bash             |   0
 {status => service/status}/module.go          |   2 +-
 {status => service/status}/provider.go        |   2 +-
 {status => service/status}/records.go         |   2 +-
 {status => service/status}/security_level.go  |   0
 {sync => service/sync}/module.go              |   0
 {sync => service/sync}/profile.go             |   4 +-
 {sync => service/sync}/setting_single.go      |   2 +-
 {sync => service/sync}/settings.go            |   2 +-
 {sync => service/sync}/util.go                |   0
 {ui => service/ui}/api.go                     |   0
 {ui => service/ui}/module.go                  |   0
 {ui => service/ui}/serve.go                   |   2 +-
 {updates => service/updates}/api.go           |   0
 .../updates}/assets/portmaster.service        |   0
 {updates => service/updates}/config.go        |   2 +-
 {updates => service/updates}/export.go        |   2 +-
 {updates => service/updates}/get.go           |   2 +-
 .../updates}/helper/electron.go               |   0
 .../updates}/helper/indexes.go                |   0
 .../updates}/helper/signing.go                |   0
 .../updates}/helper/updates.go                |   0
 {updates => service/updates}/main.go          |   2 +-
 {updates => service/updates}/notify.go        |   0
 .../updates}/os_integration_default.go        |   0
 .../updates}/os_integration_linux.go          |   0
 {updates => service/updates}/restart.go       |   0
 {updates => service/updates}/state.go         |   0
 {updates => service/updates}/upgrader.go      |   2 +-
 spn/TESTING.md                                |  26 +
 spn/TRADEMARKS                                |   5 +
 spn/access/account/auth.go                    |  65 ++
 spn/access/account/client.go                  |  14 +
 spn/access/account/types.go                   | 137 +++
 spn/access/account/view.go                    | 123 +++
 spn/access/api.go                             | 168 ++++
 spn/access/client.go                          | 550 +++++++++++
 spn/access/client_test.go                     |  79 ++
 spn/access/database.go                        | 258 +++++
 spn/access/features.go                        | 127 +++
 spn/access/module.go                          | 194 ++++
 spn/access/module_test.go                     |  13 +
 spn/access/notify.go                          | 105 ++
 spn/access/op_auth.go                         |  75 ++
 spn/access/storage.go                         | 131 +++
 spn/access/token/errors.go                    |  15 +
 spn/access/token/module_test.go               |  13 +
 spn/access/token/pblind.go                    | 552 +++++++++++
 spn/access/token/pblind_gen_test.go           |  39 +
 spn/access/token/pblind_test.go               | 260 +++++
 spn/access/token/registry.go                  | 116 +++
 spn/access/token/request.go                   | 244 +++++
 spn/access/token/request_test.go              | 125 +++
 spn/access/token/scramble.go                  | 240 +++++
 spn/access/token/scramble_gen_test.go         |  48 +
 spn/access/token/scramble_test.go             |  84 ++
 spn/access/token/token.go                     |  83 ++
 spn/access/token/token_test.go                |  33 +
 spn/access/zones.go                           | 257 +++++
 spn/cabin/config-public.go                    | 392 ++++++++
 spn/cabin/database.go                         |  98 ++
 spn/cabin/identity.go                         | 311 ++++++
 spn/cabin/identity_test.go                    | 129 +++
 spn/cabin/keys.go                             | 179 ++++
 spn/cabin/keys_test.go                        |  43 +
 spn/cabin/module.go                           |  26 +
 spn/cabin/module_test.go                      |  13 +
 spn/cabin/verification.go                     | 157 +++
 spn/cabin/verification_test.go                | 127 +++
 spn/captain/api.go                            |  68 ++
 spn/captain/bootstrap.go                      | 152 +++
 spn/captain/client.go                         | 506 ++++++++++
 spn/captain/config.go                         | 253 +++++
 spn/captain/establish.go                      | 105 ++
 spn/captain/exceptions.go                     |  28 +
 spn/captain/gossip.go                         |  38 +
 spn/captain/hooks.go                          |  47 +
 spn/captain/intel.go                          | 108 +++
 spn/captain/module.go                         | 219 +++++
 spn/captain/navigation.go                     | 306 ++++++
 spn/captain/op_gossip.go                      | 156 +++
 spn/captain/op_gossip_query.go                | 195 ++++
 spn/captain/op_publish.go                     | 183 ++++
 spn/captain/piers.go                          | 131 +++
 spn/captain/public.go                         | 247 +++++
 spn/captain/status.go                         | 154 +++
 spn/conf/map.go                               |  17 +
 spn/conf/mode.go                              |  30 +
 spn/conf/networks.go                          | 110 +++
 spn/conf/version.go                           |   9 +
 spn/crew/connect.go                           | 482 +++++++++
 spn/crew/metrics.go                           | 223 +++++
 spn/crew/module.go                            |  44 +
 spn/crew/module_test.go                       |  13 +
 spn/crew/op_connect.go                        | 585 +++++++++++
 spn/crew/op_connect_test.go                   | 115 +++
 spn/crew/op_ping.go                           | 149 +++
 spn/crew/op_ping_test.go                      |  32 +
 spn/crew/policy.go                            |  51 +
 spn/crew/sticky.go                            | 176 ++++
 spn/docks/bandwidth_test.go                   |  90 ++
 spn/docks/controller.go                       | 100 ++
 spn/docks/crane.go                            | 913 ++++++++++++++++++
 spn/docks/crane_establish.go                  |  81 ++
 spn/docks/crane_init.go                       | 339 +++++++
 spn/docks/crane_netstate.go                   | 131 +++
 spn/docks/crane_terminal.go                   | 122 +++
 spn/docks/crane_test.go                       | 267 +++++
 spn/docks/crane_verify.go                     |  85 ++
 spn/docks/cranehooks.go                       |  46 +
 spn/docks/hub_import.go                       | 189 ++++
 spn/docks/measurements.go                     | 108 +++
 spn/docks/metrics.go                          | 404 ++++++++
 spn/docks/module.go                           | 117 +++
 spn/docks/module_test.go                      |  16 +
 spn/docks/op_capacity.go                      | 356 +++++++
 spn/docks/op_capacity_test.go                 |  85 ++
 spn/docks/op_expand.go                        | 393 ++++++++
 spn/docks/op_latency.go                       | 298 ++++++
 spn/docks/op_latency_test.go                  |  59 ++
 spn/docks/op_sync_state.go                    | 150 +++
 spn/docks/op_whoami.go                        | 135 +++
 spn/docks/op_whoami_test.go                   |  24 +
 spn/docks/terminal_expansion.go               | 150 +++
 spn/docks/terminal_expansion_test.go          | 305 ++++++
 spn/hub/database.go                           | 202 ++++
 spn/hub/errors.go                             |  21 +
 spn/hub/format.go                             |  69 ++
 spn/hub/format_test.go                        |  81 ++
 spn/hub/hub.go                                | 435 +++++++++
 spn/hub/hub_test.go                           |  79 ++
 spn/hub/intel.go                              | 191 ++++
 spn/hub/intel_override.go                     |  17 +
 spn/hub/measurements.go                       | 231 +++++
 spn/hub/status.go                             | 308 ++++++
 spn/hub/transport.go                          | 152 +++
 spn/hub/transport_test.go                     | 147 +++
 spn/hub/truststores.go                        |  17 +
 spn/hub/update.go                             | 524 ++++++++++
 spn/hub/update_test.go                        |  70 ++
 spn/navigator/api.go                          | 672 +++++++++++++
 spn/navigator/api_route.go                    | 396 ++++++++
 spn/navigator/costs.go                        |  72 ++
 spn/navigator/database.go                     | 164 ++++
 spn/navigator/findnearest.go                  | 441 +++++++++
 spn/navigator/findnearest_test.go             | 124 +++
 spn/navigator/findroutes.go                   | 234 +++++
 spn/navigator/findroutes_test.go              |  54 ++
 spn/navigator/intel.go                        | 222 +++++
 spn/navigator/map.go                          | 165 ++++
 spn/navigator/map_stats.go                    |  85 ++
 spn/navigator/map_test.go                     | 279 ++++++
 spn/navigator/measurements.go                 | 144 +++
 spn/navigator/metrics.go                      | 177 ++++
 spn/navigator/module.go                       | 129 +++
 spn/navigator/module_test.go                  |  13 +
 spn/navigator/optimize.go                     | 388 ++++++++
 spn/navigator/optimize_region.go              | 224 +++++
 spn/navigator/optimize_test.go                | 188 ++++
 spn/navigator/options.go                      | 330 +++++++
 spn/navigator/pin.go                          | 269 ++++++
 spn/navigator/pin_export.go                   |  98 ++
 spn/navigator/region.go                       | 231 +++++
 spn/navigator/route.go                        | 221 +++++
 spn/navigator/routing-profiles.go             | 162 ++++
 spn/navigator/sort.go                         | 141 +++
 spn/navigator/sort_test.go                    | 112 +++
 spn/navigator/state.go                        | 426 ++++++++
 spn/navigator/state_test.go                   |  31 +
 spn/navigator/testdata/main-intel.yml         | 234 +++++
 spn/navigator/update.go                       | 776 +++++++++++++++
 spn/patrol/domains.go                         | 311 ++++++
 spn/patrol/domains_test.go                    |  67 ++
 spn/patrol/http.go                            | 186 ++++
 spn/patrol/module.go                          |  32 +
 spn/ships/connection_test.go                  | 131 +++
 spn/ships/http.go                             | 230 +++++
 spn/ships/http_info.go                        |  83 ++
 spn/ships/http_info_page.html.tmpl            | 112 +++
 spn/ships/http_info_test.go                   |  26 +
 spn/ships/http_shared.go                      | 188 ++++
 spn/ships/http_shared_test.go                 |  33 +
 spn/ships/kcp.go                              |  81 ++
 spn/ships/launch.go                           | 114 +++
 spn/ships/masking.go                          |  63 ++
 spn/ships/module.go                           |  20 +
 spn/ships/mtu.go                              |  47 +
 spn/ships/pier.go                             |  82 ++
 spn/ships/registry.go                         |  55 ++
 spn/ships/ship.go                             | 220 +++++
 spn/ships/tcp.go                              | 145 +++
 spn/ships/testship.go                         | 154 +++
 spn/ships/testship_test.go                    |  58 ++
 spn/ships/virtual_network.go                  |  43 +
 spn/sluice/module.go                          |  46 +
 spn/sluice/packet_listener.go                 | 277 ++++++
 spn/sluice/request.go                         |  78 ++
 spn/sluice/sluice.go                          | 229 +++++
 spn/sluice/sluices.go                         |  47 +
 spn/sluice/udp_listener.go                    | 334 +++++++
 spn/spn.go                                    |   1 +
 spn/terminal/control_flow.go                  | 454 +++++++++
 spn/terminal/defaults.go                      |  36 +
 spn/terminal/errors.go                        | 221 +++++
 spn/terminal/fmt.go                           |  27 +
 spn/terminal/init.go                          | 210 ++++
 spn/terminal/metrics.go                       | 117 +++
 spn/terminal/module.go                        |  80 ++
 spn/terminal/module_test.go                   |  13 +
 spn/terminal/msg.go                           | 106 ++
 spn/terminal/msgtypes.go                      |  66 ++
 spn/terminal/operation.go                     | 332 +++++++
 spn/terminal/operation_base.go                | 185 ++++
 spn/terminal/operation_counter.go             | 255 +++++
 spn/terminal/permission.go                    |  50 +
 spn/terminal/rate_limit.go                    |  39 +
 spn/terminal/session.go                       | 166 ++++
 spn/terminal/session_test.go                  |  94 ++
 spn/terminal/terminal.go                      | 909 +++++++++++++++++
 spn/terminal/terminal_test.go                 | 311 ++++++
 spn/terminal/testing.go                       | 243 +++++
 spn/terminal/upstream.go                      |  16 +
 spn/test                                      | 168 ++++
 spn/tools/Dockerfile                          |  23 +
 spn/tools/container-init.sh                   |  30 +
 spn/tools/install.sh                          | 326 +++++++
 spn/tools/start-checksum.txt                  |   1 +
 spn/tools/sysctl.conf                         |  45 +
 spn/unit/doc.go                               |  13 +
 spn/unit/scheduler.go                         | 358 +++++++
 spn/unit/scheduler_stats.go                   |  87 ++
 spn/unit/scheduler_test.go                    |  51 +
 spn/unit/unit.go                              | 103 ++
 spn/unit/unit_debug.go                        |  86 ++
 spn/unit/unit_test.go                         | 104 ++
 577 files changed, 35981 insertions(+), 818 deletions(-)
 delete mode 100755 .ci-inject-internal-deps.sh
 delete mode 100644 Gopkg.lock
 delete mode 100644 Gopkg.toml
 create mode 100644 assets/.gitkeep
 create mode 100644 cmds/hub/.gitignore
 create mode 100755 cmds/hub/build
 create mode 100644 cmds/hub/main.go
 create mode 100755 cmds/hub/pack
 create mode 100644 cmds/observation-hub/.gitignore
 create mode 100644 cmds/observation-hub/Dockerfile
 create mode 100644 cmds/observation-hub/apprise.go
 create mode 100644 cmds/observation-hub/apprise_test.go
 create mode 100755 cmds/observation-hub/build
 create mode 100644 cmds/observation-hub/main.go
 create mode 100644 cmds/observation-hub/notifications.tmpl
 create mode 100644 cmds/observation-hub/observe.go
 create mode 100644 cmds/testsuite/.gitignore
 create mode 100644 cmds/testsuite/db.go
 create mode 100644 cmds/testsuite/login.go
 create mode 100644 cmds/testsuite/main.go
 create mode 100644 cmds/testsuite/report_healthcheck.go
 create mode 100644 desktop/angular/.gitkeep
 create mode 100644 desktop/tauri/.gitkeep
 create mode 100644 packaging/linux/.gitkeep
 create mode 100644 packaging/windows/.gitkeep
 create mode 100644 runtime/.gitkeep
 rename {broadcasts => service/broadcasts}/api.go (100%)
 rename {broadcasts => service/broadcasts}/data.go (91%)
 rename {broadcasts => service/broadcasts}/install_info.go (100%)
 rename {broadcasts => service/broadcasts}/module.go (100%)
 rename {broadcasts => service/broadcasts}/notify.go (99%)
 rename {broadcasts => service/broadcasts}/state.go (100%)
 rename {broadcasts => service/broadcasts}/testdata/README.md (100%)
 rename {broadcasts => service/broadcasts}/testdata/notifications.yaml (100%)
 rename {compat => service/compat}/api.go (100%)
 rename {compat => service/compat}/callbacks.go (90%)
 rename {compat => service/compat}/debug_default.go (100%)
 rename {compat => service/compat}/debug_linux.go (100%)
 rename {compat => service/compat}/debug_windows.go (100%)
 rename {compat => service/compat}/iptables.go (100%)
 rename {compat => service/compat}/iptables_test.go (100%)
 rename {compat => service/compat}/module.go (97%)
 rename {compat => service/compat}/notify.go (98%)
 rename {compat => service/compat}/selfcheck.go (97%)
 rename {compat => service/compat}/wfpstate.go (100%)
 rename {compat => service/compat}/wfpstate_test.go (100%)
 rename {core => service/core}/api.go (96%)
 rename {core => service/core}/base/databases.go (100%)
 rename {core => service/core}/base/global.go (100%)
 rename {core => service/core}/base/logs.go (100%)
 rename {core => service/core}/base/module.go (100%)
 rename {core => service/core}/base/profiling.go (100%)
 rename {core => service/core}/config.go (100%)
 rename {core => service/core}/core.go (84%)
 rename {core => service/core}/os_default.go (100%)
 rename {core => service/core}/os_windows.go (100%)
 rename {core => service/core}/pmtesting/testing.go (96%)
 rename {detection => service/detection}/dga/lms.go (100%)
 rename {detection => service/detection}/dga/lms_test.go (100%)
 rename {firewall => service/firewall}/api.go (96%)
 rename {firewall => service/firewall}/bypassing.go (87%)
 rename {firewall => service/firewall}/config.go (98%)
 rename {firewall => service/firewall}/dns.go (97%)
 rename {firewall => service/firewall}/inspection/inspection.go (95%)
 rename {firewall => service/firewall}/interception/ebpf/bandwidth/bpf_bpfeb.go (100%)
 rename {firewall => service/firewall}/interception/ebpf/bandwidth/bpf_bpfeb.o (100%)
 rename {firewall => service/firewall}/interception/ebpf/bandwidth/bpf_bpfel.go (100%)
 rename {firewall => service/firewall}/interception/ebpf/bandwidth/bpf_bpfel.o (100%)
 rename {firewall => service/firewall}/interception/ebpf/bandwidth/interface.go (98%)
 rename {firewall => service/firewall}/interception/ebpf/connection_listener/bpf_bpfeb.go (100%)
 rename {firewall => service/firewall}/interception/ebpf/connection_listener/bpf_bpfeb.o (100%)
 rename {firewall => service/firewall}/interception/ebpf/connection_listener/bpf_bpfel.go (100%)
 rename {firewall => service/firewall}/interception/ebpf/connection_listener/bpf_bpfel.o (100%)
 rename {firewall => service/firewall}/interception/ebpf/connection_listener/worker.go (98%)
 rename {firewall => service/firewall}/interception/ebpf/exec/bpf_bpfeb.go (100%)
 rename {firewall => service/firewall}/interception/ebpf/exec/bpf_bpfeb.o (100%)
 rename {firewall => service/firewall}/interception/ebpf/exec/bpf_bpfel.go (100%)
 rename {firewall => service/firewall}/interception/ebpf/exec/bpf_bpfel.o (100%)
 rename {firewall => service/firewall}/interception/ebpf/exec/exec.go (100%)
 rename {firewall => service/firewall}/interception/ebpf/programs/bandwidth.c (100%)
 rename {firewall => service/firewall}/interception/ebpf/programs/bpf/bpf_core_read.h (100%)
 rename {firewall => service/firewall}/interception/ebpf/programs/bpf/bpf_helper_defs.h (100%)
 rename {firewall => service/firewall}/interception/ebpf/programs/bpf/bpf_helpers.h (100%)
 rename {firewall => service/firewall}/interception/ebpf/programs/bpf/bpf_tracing.h (100%)
 rename {firewall => service/firewall}/interception/ebpf/programs/exec.c (100%)
 rename {firewall => service/firewall}/interception/ebpf/programs/monitor.c (100%)
 rename {firewall => service/firewall}/interception/ebpf/programs/update.sh (100%)
 rename {firewall => service/firewall}/interception/ebpf/programs/vmlinux-x86.h (100%)
 rename {firewall => service/firewall}/interception/interception_default.go (87%)
 rename {firewall => service/firewall}/interception/interception_linux.go (77%)
 rename {firewall => service/firewall}/interception/interception_windows.go (88%)
 rename {firewall => service/firewall}/interception/introspection.go (100%)
 rename {firewall => service/firewall}/interception/module.go (96%)
 rename {firewall => service/firewall}/interception/nfq/conntrack.go (97%)
 rename {firewall => service/firewall}/interception/nfq/nfq.go (98%)
 rename {firewall => service/firewall}/interception/nfq/packet.go (98%)
 rename {firewall => service/firewall}/interception/nfqueue_linux.go (98%)
 rename {firewall => service/firewall}/interception/packet_tracer.go (95%)
 rename {firewall => service/firewall}/interception/windowskext/bandwidth_stats.go (98%)
 rename {firewall => service/firewall}/interception/windowskext/doc.go (100%)
 rename {firewall => service/firewall}/interception/windowskext/handler.go (97%)
 rename {firewall => service/firewall}/interception/windowskext/kext.go (98%)
 rename {firewall => service/firewall}/interception/windowskext/packet.go (97%)
 rename {firewall => service/firewall}/interception/windowskext/service.go (100%)
 rename {firewall => service/firewall}/interception/windowskext/syscall.go (100%)
 rename {firewall => service/firewall}/master.go (97%)
 rename {firewall => service/firewall}/module.go (94%)
 rename {firewall => service/firewall}/packet_handler.go (97%)
 rename {firewall => service/firewall}/preauth.go (93%)
 rename {firewall => service/firewall}/prompt.go (97%)
 rename {firewall => service/firewall}/tunnel.go (91%)
 rename {intel => service/intel}/block_reason.go (97%)
 rename {intel => service/intel}/customlists/config.go (100%)
 rename {intel => service/intel}/customlists/lists.go (98%)
 rename {intel => service/intel}/customlists/module.go (100%)
 rename {intel => service/intel}/entity.go (98%)
 rename {intel => service/intel}/filterlists/bloom.go (100%)
 rename {intel => service/intel}/filterlists/cache_version.go (100%)
 rename {intel => service/intel}/filterlists/database.go (99%)
 rename {intel => service/intel}/filterlists/decoder.go (100%)
 rename {intel => service/intel}/filterlists/index.go (99%)
 rename {intel => service/intel}/filterlists/keys.go (100%)
 rename {intel => service/intel}/filterlists/lookup.go (100%)
 rename {intel => service/intel}/filterlists/module.go (96%)
 rename {intel => service/intel}/filterlists/module_test.go (100%)
 rename {intel => service/intel}/filterlists/record.go (100%)
 rename {intel => service/intel}/filterlists/updater.go (100%)
 rename {intel => service/intel}/geoip/country_info.go (100%)
 rename {intel => service/intel}/geoip/country_info_test.go (100%)
 rename {intel => service/intel}/geoip/database.go (98%)
 rename {intel => service/intel}/geoip/location.go (100%)
 rename {intel => service/intel}/geoip/location_test.go (100%)
 rename {intel => service/intel}/geoip/lookup.go (100%)
 rename {intel => service/intel}/geoip/lookup_test.go (100%)
 rename {intel => service/intel}/geoip/module.go (94%)
 rename {intel => service/intel}/geoip/module_test.go (64%)
 rename {intel => service/intel}/geoip/regions.go (100%)
 rename {intel => service/intel}/geoip/regions_test.go (100%)
 rename {intel => service/intel}/module.go (82%)
 rename {intel => service/intel}/resolver.go (100%)
 rename {nameserver => service/nameserver}/config.go (97%)
 rename {nameserver => service/nameserver}/conflict.go (94%)
 rename {nameserver => service/nameserver}/failing.go (97%)
 rename {nameserver => service/nameserver}/metrics.go (100%)
 rename {nameserver => service/nameserver}/module.go (97%)
 rename {nameserver => service/nameserver}/nameserver.go (97%)
 rename {nameserver => service/nameserver}/nsutil/nsutil.go (100%)
 rename {nameserver => service/nameserver}/response.go (97%)
 rename {netenv => service/netenv}/addresses_test.go (100%)
 rename {netenv => service/netenv}/adresses.go (98%)
 rename {netenv => service/netenv}/api.go (100%)
 rename {netenv => service/netenv}/dbus_linux.go (100%)
 rename {netenv => service/netenv}/dbus_linux_test.go (100%)
 rename {netenv => service/netenv}/dialing.go (100%)
 rename {netenv => service/netenv}/environment.go (100%)
 rename {netenv => service/netenv}/environment_default.go (100%)
 rename {netenv => service/netenv}/environment_linux.go (98%)
 rename {netenv => service/netenv}/environment_linux_test.go (100%)
 rename {netenv => service/netenv}/environment_test.go (100%)
 rename {netenv => service/netenv}/environment_windows.go (100%)
 rename {netenv => service/netenv}/environment_windows_test.go (100%)
 rename {netenv => service/netenv}/icmp_listener.go (98%)
 rename {netenv => service/netenv}/location.go (98%)
 rename {netenv => service/netenv}/location_default.go (100%)
 rename {netenv => service/netenv}/location_test.go (100%)
 rename {netenv => service/netenv}/location_windows.go (100%)
 rename {netenv => service/netenv}/main.go (100%)
 rename {netenv => service/netenv}/main_test.go (65%)
 rename {netenv => service/netenv}/network-change.go (100%)
 rename {netenv => service/netenv}/notes.md (100%)
 rename {netenv => service/netenv}/online-status.go (99%)
 rename {netenv => service/netenv}/online-status_test.go (100%)
 rename {netenv => service/netenv}/os_android.go (92%)
 rename {netenv => service/netenv}/os_default.go (100%)
 rename {netquery => service/netquery}/active_chart_handler.go (98%)
 rename {netquery => service/netquery}/bandwidth_chart_handler.go (98%)
 rename {netquery => service/netquery}/database.go (98%)
 rename {netquery => service/netquery}/manager.go (99%)
 rename {netquery => service/netquery}/module_api.go (99%)
 rename {netquery => service/netquery}/orm/decoder.go (100%)
 rename {netquery => service/netquery}/orm/decoder_test.go (100%)
 rename {netquery => service/netquery}/orm/encoder.go (100%)
 rename {netquery => service/netquery}/orm/encoder_test.go (100%)
 rename {netquery => service/netquery}/orm/query_runner.go (100%)
 rename {netquery => service/netquery}/orm/schema_builder.go (100%)
 rename {netquery => service/netquery}/orm/schema_builder_test.go (100%)
 rename {netquery => service/netquery}/query.go (99%)
 rename {netquery => service/netquery}/query_handler.go (99%)
 rename {netquery => service/netquery}/query_request.go (99%)
 rename {netquery => service/netquery}/query_test.go (98%)
 rename {netquery => service/netquery}/runtime_query_runner.go (97%)
 rename {network => service/network}/api.go (97%)
 rename {network => service/network}/api_test.go (98%)
 rename {network => service/network}/clean.go (95%)
 rename {network => service/network}/connection.go (98%)
 rename {network => service/network}/connection_android.go (88%)
 rename {network => service/network}/connection_store.go (100%)
 rename {network => service/network}/database.go (98%)
 rename {network => service/network}/dns.go (97%)
 rename {network => service/network}/iphelper/get.go (96%)
 rename {network => service/network}/iphelper/iphelper.go (100%)
 rename {network => service/network}/iphelper/tables.go (99%)
 rename {network => service/network}/iphelper/tables_test.go (100%)
 rename {network => service/network}/metrics.go (98%)
 rename {network => service/network}/module.go (96%)
 rename {network => service/network}/multicast.go (96%)
 rename {network => service/network}/netutils/address.go (96%)
 rename {network => service/network}/netutils/dns.go (100%)
 rename {network => service/network}/netutils/dns_test.go (100%)
 rename {network => service/network}/netutils/ip.go (100%)
 rename {network => service/network}/netutils/ip_test.go (100%)
 rename {network => service/network}/netutils/tcpassembly.go (100%)
 rename {network => service/network}/packet/bandwidth.go (100%)
 rename {network => service/network}/packet/const.go (100%)
 rename {network => service/network}/packet/info_only.go (100%)
 rename {network => service/network}/packet/packet.go (100%)
 rename {network => service/network}/packet/packetinfo.go (100%)
 rename {network => service/network}/packet/parse.go (100%)
 rename {network => service/network}/ports.go (100%)
 rename {network => service/network}/proc/findpid.go (97%)
 rename {network => service/network}/proc/pids_by_user.go (100%)
 rename {network => service/network}/proc/tables.go (99%)
 rename {network => service/network}/proc/tables_test.go (100%)
 rename {network => service/network}/reference/ports.go (100%)
 rename {network => service/network}/reference/protocols.go (100%)
 rename {network => service/network}/socket/socket.go (100%)
 rename {network => service/network}/state/exists.go (95%)
 rename {network => service/network}/state/info.go (89%)
 rename {network => service/network}/state/lookup.go (97%)
 rename {network => service/network}/state/system_default.go (95%)
 rename {network => service/network}/state/system_linux.go (90%)
 rename {network => service/network}/state/system_windows.go (80%)
 rename {network => service/network}/state/tcp.go (97%)
 rename {network => service/network}/state/udp.go (97%)
 rename {network => service/network}/status.go (100%)
 rename {process => service/process}/api.go (98%)
 rename {process => service/process}/config.go (100%)
 rename {process => service/process}/database.go (98%)
 rename {process => service/process}/doc.go (100%)
 rename {process => service/process}/executable.go (100%)
 rename {process => service/process}/find.go (95%)
 rename {process => service/process}/module.go (91%)
 rename {process => service/process}/module_test.go (65%)
 rename {process => service/process}/process.go (99%)
 rename {process => service/process}/process_default.go (100%)
 rename {process => service/process}/process_linux.go (100%)
 rename {process => service/process}/process_windows.go (100%)
 rename {process => service/process}/profile.go (98%)
 rename {process => service/process}/special.go (96%)
 rename {process => service/process}/tags.go (97%)
 rename {process => service/process}/tags/appimage_unix.go (96%)
 rename {process => service/process}/tags/flatpak_unix.go (92%)
 rename {process => service/process}/tags/interpreter_unix.go (97%)
 rename {process => service/process}/tags/net.go (93%)
 rename {process => service/process}/tags/snap_unix.go (95%)
 rename {process => service/process}/tags/svchost_windows.go (95%)
 rename {process => service/process}/tags/winstore_windows.go (95%)
 rename {profile => service/profile}/active.go (100%)
 rename {profile => service/profile}/api.go (98%)
 rename {profile => service/profile}/binmeta/convert.go (100%)
 rename {profile => service/profile}/binmeta/find_default.go (100%)
 rename {profile => service/profile}/binmeta/find_linux.go (100%)
 rename {profile => service/profile}/binmeta/find_linux_test.go (100%)
 rename {profile => service/profile}/binmeta/find_windows.go (100%)
 rename {profile => service/profile}/binmeta/find_windows_test.go (100%)
 rename {profile => service/profile}/binmeta/icon.go (100%)
 rename {profile => service/profile}/binmeta/icons.go (100%)
 rename {profile => service/profile}/binmeta/locations_linux.go (100%)
 rename {profile => service/profile}/binmeta/name.go (100%)
 rename {profile => service/profile}/binmeta/name_test.go (100%)
 rename {profile => service/profile}/config-update.go (96%)
 rename {profile => service/profile}/config.go (99%)
 rename {profile => service/profile}/database.go (100%)
 rename {profile => service/profile}/endpoints/annotations.go (100%)
 rename {profile => service/profile}/endpoints/endpoint-any.go (92%)
 rename {profile => service/profile}/endpoints/endpoint-asn.go (96%)
 rename {profile => service/profile}/endpoints/endpoint-continent.go (96%)
 rename {profile => service/profile}/endpoints/endpoint-country.go (96%)
 rename {profile => service/profile}/endpoints/endpoint-domain.go (97%)
 rename {profile => service/profile}/endpoints/endpoint-ip.go (94%)
 rename {profile => service/profile}/endpoints/endpoint-iprange.go (94%)
 rename {profile => service/profile}/endpoints/endpoint-lists.go (95%)
 rename {profile => service/profile}/endpoints/endpoint-scopes.go (95%)
 rename {profile => service/profile}/endpoints/endpoint.go (98%)
 rename {profile => service/profile}/endpoints/endpoint_test.go (100%)
 rename {profile => service/profile}/endpoints/endpoints.go (98%)
 rename {profile => service/profile}/endpoints/endpoints_test.go (98%)
 rename {profile => service/profile}/endpoints/reason.go (100%)
 rename {profile => service/profile}/fingerprint.go (100%)
 rename {profile => service/profile}/fingerprint_test.go (100%)
 rename {profile => service/profile}/framework.go (100%)
 rename {profile => service/profile}/framework_test.go (100%)
 rename {profile => service/profile}/get.go (100%)
 rename {profile => service/profile}/merge.go (98%)
 rename {profile => service/profile}/meta.go (100%)
 rename {profile => service/profile}/migrations.go (99%)
 rename {profile => service/profile}/module.go (93%)
 rename {profile => service/profile}/profile-layered-provider.go (100%)
 rename {profile => service/profile}/profile-layered.go (99%)
 rename {profile => service/profile}/profile.go (98%)
 rename {profile => service/profile}/special.go (100%)
 rename {resolver => service/resolver}/api.go (100%)
 rename {resolver => service/resolver}/block-detection.go (100%)
 rename {resolver => service/resolver}/compat.go (100%)
 rename {resolver => service/resolver}/config.go (98%)
 rename {resolver => service/resolver}/doc.go (100%)
 rename {resolver => service/resolver}/failing.go (98%)
 rename {resolver => service/resolver}/ipinfo.go (100%)
 rename {resolver => service/resolver}/ipinfo_test.go (100%)
 rename {resolver => service/resolver}/main.go (97%)
 rename {resolver => service/resolver}/main_test.go (97%)
 rename {resolver => service/resolver}/metrics.go (100%)
 rename {resolver => service/resolver}/namerecord.go (100%)
 rename {resolver => service/resolver}/namerecord_test.go (100%)
 rename {resolver => service/resolver}/resolve.go (99%)
 rename {resolver => service/resolver}/resolver-env.go (97%)
 rename {resolver => service/resolver}/resolver-https.go (98%)
 rename {resolver => service/resolver}/resolver-mdns.go (99%)
 rename {resolver => service/resolver}/resolver-plain.go (98%)
 rename {resolver => service/resolver}/resolver-tcp.go (99%)
 rename {resolver => service/resolver}/resolver.go (98%)
 rename {resolver => service/resolver}/resolver_test.go (100%)
 rename {resolver => service/resolver}/resolvers.go (99%)
 rename {resolver => service/resolver}/resolvers_test.go (100%)
 rename {resolver => service/resolver}/reverse.go (100%)
 rename {resolver => service/resolver}/reverse_test.go (100%)
 rename {resolver => service/resolver}/rr_context.go (100%)
 rename {resolver => service/resolver}/rrcache.go (98%)
 rename {resolver => service/resolver}/rrcache_test.go (100%)
 rename {resolver => service/resolver}/scopes.go (99%)
 rename {resolver => service/resolver}/test/resolving.bash (100%)
 rename {status => service/status}/module.go (95%)
 rename {status => service/status}/provider.go (95%)
 rename {status => service/status}/records.go (92%)
 rename {status => service/status}/security_level.go (100%)
 rename {sync => service/sync}/module.go (100%)
 rename {sync => service/sync}/profile.go (99%)
 rename {sync => service/sync}/setting_single.go (99%)
 rename {sync => service/sync}/settings.go (99%)
 rename {sync => service/sync}/util.go (100%)
 rename {ui => service/ui}/api.go (100%)
 rename {ui => service/ui}/module.go (100%)
 rename {ui => service/ui}/serve.go (99%)
 rename {updates => service/updates}/api.go (100%)
 rename {updates => service/updates}/assets/portmaster.service (100%)
 rename {updates => service/updates}/config.go (99%)
 rename {updates => service/updates}/export.go (99%)
 rename {updates => service/updates}/get.go (97%)
 rename {updates => service/updates}/helper/electron.go (100%)
 rename {updates => service/updates}/helper/indexes.go (100%)
 rename {updates => service/updates}/helper/signing.go (100%)
 rename {updates => service/updates}/helper/updates.go (100%)
 rename {updates => service/updates}/main.go (99%)
 rename {updates => service/updates}/notify.go (100%)
 rename {updates => service/updates}/os_integration_default.go (100%)
 rename {updates => service/updates}/os_integration_linux.go (100%)
 rename {updates => service/updates}/restart.go (100%)
 rename {updates => service/updates}/state.go (100%)
 rename {updates => service/updates}/upgrader.go (99%)
 create mode 100644 spn/TESTING.md
 create mode 100644 spn/TRADEMARKS
 create mode 100644 spn/access/account/auth.go
 create mode 100644 spn/access/account/client.go
 create mode 100644 spn/access/account/types.go
 create mode 100644 spn/access/account/view.go
 create mode 100644 spn/access/api.go
 create mode 100644 spn/access/client.go
 create mode 100644 spn/access/client_test.go
 create mode 100644 spn/access/database.go
 create mode 100644 spn/access/features.go
 create mode 100644 spn/access/module.go
 create mode 100644 spn/access/module_test.go
 create mode 100644 spn/access/notify.go
 create mode 100644 spn/access/op_auth.go
 create mode 100644 spn/access/storage.go
 create mode 100644 spn/access/token/errors.go
 create mode 100644 spn/access/token/module_test.go
 create mode 100644 spn/access/token/pblind.go
 create mode 100644 spn/access/token/pblind_gen_test.go
 create mode 100644 spn/access/token/pblind_test.go
 create mode 100644 spn/access/token/registry.go
 create mode 100644 spn/access/token/request.go
 create mode 100644 spn/access/token/request_test.go
 create mode 100644 spn/access/token/scramble.go
 create mode 100644 spn/access/token/scramble_gen_test.go
 create mode 100644 spn/access/token/scramble_test.go
 create mode 100644 spn/access/token/token.go
 create mode 100644 spn/access/token/token_test.go
 create mode 100644 spn/access/zones.go
 create mode 100644 spn/cabin/config-public.go
 create mode 100644 spn/cabin/database.go
 create mode 100644 spn/cabin/identity.go
 create mode 100644 spn/cabin/identity_test.go
 create mode 100644 spn/cabin/keys.go
 create mode 100644 spn/cabin/keys_test.go
 create mode 100644 spn/cabin/module.go
 create mode 100644 spn/cabin/module_test.go
 create mode 100644 spn/cabin/verification.go
 create mode 100644 spn/cabin/verification_test.go
 create mode 100644 spn/captain/api.go
 create mode 100644 spn/captain/bootstrap.go
 create mode 100644 spn/captain/client.go
 create mode 100644 spn/captain/config.go
 create mode 100644 spn/captain/establish.go
 create mode 100644 spn/captain/exceptions.go
 create mode 100644 spn/captain/gossip.go
 create mode 100644 spn/captain/hooks.go
 create mode 100644 spn/captain/intel.go
 create mode 100644 spn/captain/module.go
 create mode 100644 spn/captain/navigation.go
 create mode 100644 spn/captain/op_gossip.go
 create mode 100644 spn/captain/op_gossip_query.go
 create mode 100644 spn/captain/op_publish.go
 create mode 100644 spn/captain/piers.go
 create mode 100644 spn/captain/public.go
 create mode 100644 spn/captain/status.go
 create mode 100644 spn/conf/map.go
 create mode 100644 spn/conf/mode.go
 create mode 100644 spn/conf/networks.go
 create mode 100644 spn/conf/version.go
 create mode 100644 spn/crew/connect.go
 create mode 100644 spn/crew/metrics.go
 create mode 100644 spn/crew/module.go
 create mode 100644 spn/crew/module_test.go
 create mode 100644 spn/crew/op_connect.go
 create mode 100644 spn/crew/op_connect_test.go
 create mode 100644 spn/crew/op_ping.go
 create mode 100644 spn/crew/op_ping_test.go
 create mode 100644 spn/crew/policy.go
 create mode 100644 spn/crew/sticky.go
 create mode 100644 spn/docks/bandwidth_test.go
 create mode 100644 spn/docks/controller.go
 create mode 100644 spn/docks/crane.go
 create mode 100644 spn/docks/crane_establish.go
 create mode 100644 spn/docks/crane_init.go
 create mode 100644 spn/docks/crane_netstate.go
 create mode 100644 spn/docks/crane_terminal.go
 create mode 100644 spn/docks/crane_test.go
 create mode 100644 spn/docks/crane_verify.go
 create mode 100644 spn/docks/cranehooks.go
 create mode 100644 spn/docks/hub_import.go
 create mode 100644 spn/docks/measurements.go
 create mode 100644 spn/docks/metrics.go
 create mode 100644 spn/docks/module.go
 create mode 100644 spn/docks/module_test.go
 create mode 100644 spn/docks/op_capacity.go
 create mode 100644 spn/docks/op_capacity_test.go
 create mode 100644 spn/docks/op_expand.go
 create mode 100644 spn/docks/op_latency.go
 create mode 100644 spn/docks/op_latency_test.go
 create mode 100644 spn/docks/op_sync_state.go
 create mode 100644 spn/docks/op_whoami.go
 create mode 100644 spn/docks/op_whoami_test.go
 create mode 100644 spn/docks/terminal_expansion.go
 create mode 100644 spn/docks/terminal_expansion_test.go
 create mode 100644 spn/hub/database.go
 create mode 100644 spn/hub/errors.go
 create mode 100644 spn/hub/format.go
 create mode 100644 spn/hub/format_test.go
 create mode 100644 spn/hub/hub.go
 create mode 100644 spn/hub/hub_test.go
 create mode 100644 spn/hub/intel.go
 create mode 100644 spn/hub/intel_override.go
 create mode 100644 spn/hub/measurements.go
 create mode 100644 spn/hub/status.go
 create mode 100644 spn/hub/transport.go
 create mode 100644 spn/hub/transport_test.go
 create mode 100644 spn/hub/truststores.go
 create mode 100644 spn/hub/update.go
 create mode 100644 spn/hub/update_test.go
 create mode 100644 spn/navigator/api.go
 create mode 100644 spn/navigator/api_route.go
 create mode 100644 spn/navigator/costs.go
 create mode 100644 spn/navigator/database.go
 create mode 100644 spn/navigator/findnearest.go
 create mode 100644 spn/navigator/findnearest_test.go
 create mode 100644 spn/navigator/findroutes.go
 create mode 100644 spn/navigator/findroutes_test.go
 create mode 100644 spn/navigator/intel.go
 create mode 100644 spn/navigator/map.go
 create mode 100644 spn/navigator/map_stats.go
 create mode 100644 spn/navigator/map_test.go
 create mode 100644 spn/navigator/measurements.go
 create mode 100644 spn/navigator/metrics.go
 create mode 100644 spn/navigator/module.go
 create mode 100644 spn/navigator/module_test.go
 create mode 100644 spn/navigator/optimize.go
 create mode 100644 spn/navigator/optimize_region.go
 create mode 100644 spn/navigator/optimize_test.go
 create mode 100644 spn/navigator/options.go
 create mode 100644 spn/navigator/pin.go
 create mode 100644 spn/navigator/pin_export.go
 create mode 100644 spn/navigator/region.go
 create mode 100644 spn/navigator/route.go
 create mode 100644 spn/navigator/routing-profiles.go
 create mode 100644 spn/navigator/sort.go
 create mode 100644 spn/navigator/sort_test.go
 create mode 100644 spn/navigator/state.go
 create mode 100644 spn/navigator/state_test.go
 create mode 100644 spn/navigator/testdata/main-intel.yml
 create mode 100644 spn/navigator/update.go
 create mode 100644 spn/patrol/domains.go
 create mode 100644 spn/patrol/domains_test.go
 create mode 100644 spn/patrol/http.go
 create mode 100644 spn/patrol/module.go
 create mode 100644 spn/ships/connection_test.go
 create mode 100644 spn/ships/http.go
 create mode 100644 spn/ships/http_info.go
 create mode 100644 spn/ships/http_info_page.html.tmpl
 create mode 100644 spn/ships/http_info_test.go
 create mode 100644 spn/ships/http_shared.go
 create mode 100644 spn/ships/http_shared_test.go
 create mode 100644 spn/ships/kcp.go
 create mode 100644 spn/ships/launch.go
 create mode 100644 spn/ships/masking.go
 create mode 100644 spn/ships/module.go
 create mode 100644 spn/ships/mtu.go
 create mode 100644 spn/ships/pier.go
 create mode 100644 spn/ships/registry.go
 create mode 100644 spn/ships/ship.go
 create mode 100644 spn/ships/tcp.go
 create mode 100644 spn/ships/testship.go
 create mode 100644 spn/ships/testship_test.go
 create mode 100644 spn/ships/virtual_network.go
 create mode 100644 spn/sluice/module.go
 create mode 100644 spn/sluice/packet_listener.go
 create mode 100644 spn/sluice/request.go
 create mode 100644 spn/sluice/sluice.go
 create mode 100644 spn/sluice/sluices.go
 create mode 100644 spn/sluice/udp_listener.go
 create mode 100644 spn/spn.go
 create mode 100644 spn/terminal/control_flow.go
 create mode 100644 spn/terminal/defaults.go
 create mode 100644 spn/terminal/errors.go
 create mode 100644 spn/terminal/fmt.go
 create mode 100644 spn/terminal/init.go
 create mode 100644 spn/terminal/metrics.go
 create mode 100644 spn/terminal/module.go
 create mode 100644 spn/terminal/module_test.go
 create mode 100644 spn/terminal/msg.go
 create mode 100644 spn/terminal/msgtypes.go
 create mode 100644 spn/terminal/operation.go
 create mode 100644 spn/terminal/operation_base.go
 create mode 100644 spn/terminal/operation_counter.go
 create mode 100644 spn/terminal/permission.go
 create mode 100644 spn/terminal/rate_limit.go
 create mode 100644 spn/terminal/session.go
 create mode 100644 spn/terminal/session_test.go
 create mode 100644 spn/terminal/terminal.go
 create mode 100644 spn/terminal/terminal_test.go
 create mode 100644 spn/terminal/testing.go
 create mode 100644 spn/terminal/upstream.go
 create mode 100755 spn/test
 create mode 100644 spn/tools/Dockerfile
 create mode 100755 spn/tools/container-init.sh
 create mode 100755 spn/tools/install.sh
 create mode 100644 spn/tools/start-checksum.txt
 create mode 100644 spn/tools/sysctl.conf
 create mode 100644 spn/unit/doc.go
 create mode 100644 spn/unit/scheduler.go
 create mode 100644 spn/unit/scheduler_stats.go
 create mode 100644 spn/unit/scheduler_test.go
 create mode 100644 spn/unit/unit.go
 create mode 100644 spn/unit/unit_debug.go
 create mode 100644 spn/unit/unit_test.go

diff --git a/.ci-inject-internal-deps.sh b/.ci-inject-internal-deps.sh
deleted file mode 100755
index 08304016..00000000
--- a/.ci-inject-internal-deps.sh
+++ /dev/null
@@ -1,22 +0,0 @@
-#!/bin/sh
-
-DEP_FILE="Gopkg.toml"
-
-# remove ignored internal deps
-sed -i '/ignored = \["github.com\/safing\//d' $DEP_FILE
-
-# portbase
-PORTBASE_BRANCH="develop" 
-git branch | grep "* master" >/dev/null
-if [ $? -eq 0 ]; then
-    PORTBASE_BRANCH="master" 
-fi
-echo "
-[[constraint]]
-  name = \"github.com/safing/portbase\"
-  branch = \"${PORTBASE_BRANCH}\"
-
-[[constraint]]
-  name = \"github.com/safing/spn\"
-  branch = \"${PORTBASE_BRANCH}\"
-" >> $DEP_FILE
diff --git a/Gopkg.lock b/Gopkg.lock
deleted file mode 100644
index ebf28930..00000000
--- a/Gopkg.lock
+++ /dev/null
@@ -1,405 +0,0 @@
-# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'.
-
-
-[[projects]]
-  digest = "1:6146fda730c18186631e91e818d995e759e7cbe27644d6871ccd469f6865c686"
-  name = "github.com/StackExchange/wmi"
-  packages = ["."]
-  pruneopts = ""
-  revision = "cbe66965904dbe8a6cd589e2298e5d8b986bd7dd"
-  version = "1.1.0"
-
-[[projects]]
-  digest = "1:e010d6b45ee6c721df761eae89961c634ceb55feff166a48d15504729309f267"
-  name = "github.com/TheTannerRyan/ring"
-  packages = ["."]
-  pruneopts = ""
-  revision = "7b27005873e31b5d5a035e166636a09e03aaf40e"
-  version = "v1.1.1"
-
-[[projects]]
-  digest = "1:21caed545a1c7ef7a2627bbb45989f689872ff6d5087d49c31340ce74c36de59"
-  name = "github.com/agext/levenshtein"
-  packages = ["."]
-  pruneopts = ""
-  revision = "52c14c47d03211d8ac1834e94601635e07c5a6ef"
-  version = "v1.2.3"
-
-[[projects]]
-  branch = "v2.1"
-  digest = "1:3fc5d0d9cb474736e8e6c2f2292e0763b5132c6e7d8cbedf7bde404a470c8c3b"
-  name = "github.com/cookieo9/resources-go"
-  packages = ["."]
-  pruneopts = ""
-  revision = "d27c04069d0d5dfe11c202dacbf745ae8d1ab181"
-
-[[projects]]
-  digest = "1:f384a8b6f89c502229e9013aa4f89ce5b5b56f09f9a4d601d7f1f026d3564fbf"
-  name = "github.com/coreos/go-iptables"
-  packages = ["iptables"]
-  pruneopts = ""
-  revision = "f901d6c2a4f2a4df092b98c33366dfba1f93d7a0"
-  version = "v0.4.5"
-
-[[projects]]
-  digest = "1:0deddd908b6b4b768cfc272c16ee61e7088a60f7fe2f06c547bd3d8e1f8b8e77"
-  name = "github.com/davecgh/go-spew"
-  packages = ["spew"]
-  pruneopts = ""
-  revision = "8991bc29aa16c548c550c7ff78260e27b9ab7c73"
-  version = "v1.1.1"
-
-[[projects]]
-  branch = "master"
-  digest = "1:c8098f53cd182561cfb128c9a5ba70e41ad2364b763f33f05c6bd54003ae6495"
-  name = "github.com/florianl/go-nfqueue"
-  packages = [
-    ".",
-    "internal/unix",
-  ]
-  pruneopts = ""
-  revision = "a2f196e98ab0ffdcb8b5252e7cbba98e45dea204"
-
-[[projects]]
-  digest = "1:b6581f9180e0f2d5549280d71819ab951db9d511478c87daca95669589d505c0"
-  name = "github.com/go-ole/go-ole"
-  packages = [
-    ".",
-    "oleutil",
-  ]
-  pruneopts = ""
-  revision = "97b6244175ae18ea6eef668034fd6565847501c9"
-  version = "v1.2.4"
-
-[[projects]]
-  digest = "1:f63933986e63230fc32512ed00bc18ea4dbb0f57b5da18561314928fd20c2ff0"
-  name = "github.com/godbus/dbus"
-  packages = ["."]
-  pruneopts = ""
-  revision = "37bf87eef99d69c4f1d3528bd66e3a87dc201472"
-  version = "v5.0.3"
-
-[[projects]]
-  digest = "1:c18de9c9afca0ab336a29cf356d566abbdc29dd4948547557ed62c0da30d3be3"
-  name = "github.com/google/gopacket"
-  packages = [
-    ".",
-    "layers",
-    "tcpassembly",
-  ]
-  pruneopts = ""
-  revision = "558173e197d46ae52f0f7c58313c96296ee16a9c"
-  version = "v1.1.18"
-
-[[projects]]
-  digest = "1:20dc576ad8f98fe64777c62f090a9b37dd67c62b23fe42b429c2c41936aa8a9c"
-  name = "github.com/google/renameio"
-  packages = ["."]
-  pruneopts = ""
-  revision = "f0e32980c006571efd537032e5f9cd8c1a92819e"
-  version = "v0.1.0"
-
-[[projects]]
-  digest = "1:8e3bd93036b4a925fe2250d3e4f38f21cadb8ef623561cd80c3c50c114b13201"
-  name = "github.com/hashicorp/errwrap"
-  packages = ["."]
-  pruneopts = ""
-  revision = "8a6fb523712970c966eefc6b39ed2c5e74880354"
-  version = "v1.0.0"
-
-[[projects]]
-  digest = "1:c6e569ffa34fcd24febd3562bff0520a104d15d1a600199cb3141debf2e58c89"
-  name = "github.com/hashicorp/go-multierror"
-  packages = ["."]
-  pruneopts = ""
-  revision = "2004d9dba6b07a5b8d133209244f376680f9d472"
-  version = "v1.1.0"
-
-[[projects]]
-  digest = "1:ebffb4b4c8ddcf66bb549464183ea2ddbac6c58a803658f67249f83395d17455"
-  name = "github.com/hashicorp/go-version"
-  packages = ["."]
-  pruneopts = ""
-  revision = "59da58cfd357de719a4d16dac30481391a56c002"
-  version = "v1.2.1"
-
-[[projects]]
-  digest = "1:870d441fe217b8e689d7949fef6e43efbc787e50f200cb1e70dbca9204a1d6be"
-  name = "github.com/inconshreveable/mousetrap"
-  packages = ["."]
-  pruneopts = ""
-  revision = "76626ae9c91c4f2a10f34cad8ce83ea42c93bb75"
-  version = "v1.0"
-
-[[projects]]
-  branch = "master"
-  digest = "1:e71cc6b377264002aec0d9c235087e51ad7a3c1fb341bb4baa84709308b94fe8"
-  name = "github.com/kardianos/osext"
-  packages = ["."]
-  pruneopts = ""
-  revision = "2bc1f35cddc0cc527b4bc3dce8578fc2a6c11384"
-
-[[projects]]
-  digest = "1:711ec17a2d8edd94cff8e2e4339d847e46acc1bb6b49ec29dcc1db78b666378b"
-  name = "github.com/mdlayher/netlink"
-  packages = [
-    ".",
-    "nlenc",
-  ]
-  pruneopts = ""
-  revision = "2a4e26491f1ba4eae173a7733ac11744cfed82b5"
-  version = "v1.2.0"
-
-[[projects]]
-  digest = "1:508f444b8e00a569a40899aaf5740348b44c305d36f36d4f002b277677deef95"
-  name = "github.com/miekg/dns"
-  packages = ["."]
-  pruneopts = ""
-  revision = "10e0aeedbee54849adab780611454192a9980443"
-  version = "v1.1.33"
-
-[[projects]]
-  digest = "1:3282ac9a9ddf5c2c0eda96693364d34fe0f8d10a0748259082a5c9fbd3e1f7e4"
-  name = "github.com/oschwald/maxminddb-golang"
-  packages = ["."]
-  pruneopts = ""
-  revision = "2e4624cc0c4105b1df1d0643ac3aadb53824dc7d"
-  version = "v1.7.0"
-
-[[projects]]
-  digest = "1:c45802472e0c06928cd997661f2af610accd85217023b1d5f6331bebce0671d3"
-  name = "github.com/pkg/errors"
-  packages = ["."]
-  pruneopts = ""
-  revision = "614d223910a179a466c1767a985424175c39b465"
-  version = "v0.9.1"
-
-[[projects]]
-  digest = "1:256484dbbcd271f9ecebc6795b2df8cad4c458dd0f5fd82a8c2fa0c29f233411"
-  name = "github.com/pmezard/go-difflib"
-  packages = ["difflib"]
-  pruneopts = ""
-  revision = "792786c7400a136282c1664665ae0a8db921c6c2"
-  version = "v1.0.0"
-
-[[projects]]
-  digest = "1:70e15b4090e254d1eada6ef156773c0888cf707c43078479114d814761b902c5"
-  name = "github.com/shirou/gopsutil"
-  packages = [
-    "cpu",
-    "internal/common",
-    "mem",
-    "net",
-    "process",
-  ]
-  pruneopts = ""
-  revision = "7e94bb8bcde053b6d6c98bda5145e9742c913c39"
-  version = "v2.20.7"
-
-[[projects]]
-  digest = "1:bff75d4f1a2d2c4b8f4b46ff5ac230b80b5fa49276f615900cba09fe4c97e66e"
-  name = "github.com/spf13/cobra"
-  packages = ["."]
-  pruneopts = ""
-  revision = "a684a6d7f5e37385d954dd3b5a14fc6912c6ab9d"
-  version = "v1.0.0"
-
-[[projects]]
-  digest = "1:688428eeb1ca80d92599eb3254bdf91b51d7e232fead3a73844c1f201a281e51"
-  name = "github.com/spf13/pflag"
-  packages = ["."]
-  pruneopts = ""
-  revision = "2e9d26c8c37aae03e3f9d4e90b7116f5accb7cab"
-  version = "v1.0.5"
-
-[[projects]]
-  digest = "1:83fd2513b9f6ae0997bf646db6b74e9e00131e31002116fda597175f25add42d"
-  name = "github.com/stretchr/testify"
-  packages = ["assert"]
-  pruneopts = ""
-  revision = "f654a9112bbeac49ca2cd45bfbe11533c4666cf8"
-  version = "v1.6.1"
-
-[[projects]]
-  digest = "1:1f11a269b089908c141f78c060991ff7bcd16545e95ee48d557e638fa846bde2"
-  name = "github.com/tevino/abool"
-  packages = ["."]
-  pruneopts = ""
-  revision = "8ae5c93531aabf12924a5b78e6dee1216bfff2f8"
-  version = "v1.2.0"
-
-[[projects]]
-  branch = "master"
-  digest = "1:21097653bd7914de1262f2429e277933507442f892815a791ce1c0dbf0a8dc20"
-  name = "github.com/umahmood/haversine"
-  packages = ["."]
-  pruneopts = ""
-  revision = "808ab04add26660fd241ddb7973886c6dd6669e8"
-
-[[projects]]
-  branch = "master"
-  digest = "1:df4642a605244e62c69ae335ac3c3cfa1c2b7ec971c3de398e1909592a961923"
-  name = "golang.org/x/crypto"
-  packages = [
-    "ed25519",
-    "ed25519/internal/edwards25519",
-  ]
-  pruneopts = ""
-  revision = "123391ffb6de907695e1066dc40c1ff09322aeb6"
-
-[[projects]]
-  digest = "1:ba49944a3238ae8f163c85b6d01d2db51cd5b09807105a3cfaacbd414744ca82"
-  name = "golang.org/x/mod"
-  packages = ["semver"]
-  pruneopts = ""
-  revision = "859b3ef565e237f9f1a0fb6b55385c497545680d"
-  version = "v0.3.0"
-
-[[projects]]
-  branch = "master"
-  digest = "1:9ee0e6bc20d85d179d19be321443639dc501a8c0ba1bac173261b57768063e79"
-  name = "golang.org/x/net"
-  packages = [
-    "bpf",
-    "icmp",
-    "idna",
-    "internal/iana",
-    "internal/socket",
-    "ipv4",
-    "ipv6",
-    "publicsuffix",
-  ]
-  pruneopts = ""
-  revision = "3edf25e44fccea9e11b919341e952fca722ef460"
-
-[[projects]]
-  branch = "master"
-  digest = "1:ae1578a64c2b241c13ab243739d05936d83825d2b6e9ff043ea3c7105666493d"
-  name = "golang.org/x/sync"
-  packages = [
-    "errgroup",
-    "singleflight",
-  ]
-  pruneopts = ""
-  revision = "6e8e738ad208923de99951fe0b48239bfd864f28"
-
-[[projects]]
-  branch = "master"
-  digest = "1:ecfcd51736bf55de713770df4580026a43f01a94c9c077b0ab10239e8a93a589"
-  name = "golang.org/x/sys"
-  packages = [
-    "internal/unsafeheader",
-    "unix",
-    "windows",
-    "windows/registry",
-    "windows/svc",
-    "windows/svc/debug",
-    "windows/svc/eventlog",
-    "windows/svc/mgr",
-  ]
-  pruneopts = ""
-  revision = "3ff754bf58a9922e2b8a1a0bd199be6c9a806123"
-
-[[projects]]
-  digest = "1:fccda34e4c58111b1908d8d69bf8d57c41c8e2542bc18ec8cd38c4fa21057f71"
-  name = "golang.org/x/text"
-  packages = [
-    "collate",
-    "collate/build",
-    "internal/colltab",
-    "internal/gen",
-    "internal/language",
-    "internal/language/compact",
-    "internal/tag",
-    "internal/triegen",
-    "internal/ucd",
-    "language",
-    "secure/bidirule",
-    "transform",
-    "unicode/bidi",
-    "unicode/cldr",
-    "unicode/norm",
-    "unicode/rangetable",
-  ]
-  pruneopts = ""
-  revision = "23ae387dee1f90d29a23c0e87ee0b46038fbed0e"
-  version = "v0.3.3"
-
-[[projects]]
-  branch = "master"
-  digest = "1:1f61b0af124800c576e5ccc355d0634413e0b71fe6fbc77694b18bd30d9aa56e"
-  name = "golang.org/x/tools"
-  packages = [
-    "go/ast/astutil",
-    "go/gcexportdata",
-    "go/internal/gcimporter",
-    "go/internal/packagesdriver",
-    "go/packages",
-    "go/types/typeutil",
-    "internal/event",
-    "internal/event/core",
-    "internal/event/keys",
-    "internal/event/label",
-    "internal/gocommand",
-    "internal/packagesinternal",
-    "internal/typesinternal",
-  ]
-  pruneopts = ""
-  revision = "d00afeaade8f1e68fb815705aa42d704c1b6df35"
-
-[[projects]]
-  branch = "master"
-  digest = "1:a5a7a1a9560c0eb1f8b32c40da2e71bd2a05b9ff9e1ea294461c7dbe0d24c6bc"
-  name = "golang.org/x/xerrors"
-  packages = [
-    ".",
-    "internal",
-  ]
-  pruneopts = ""
-  revision = "5ec99f83aff198f5fbd629d6c8d8eb38a04218ca"
-
-[[projects]]
-  branch = "v3"
-  digest = "1:2e9c4d6def1d36dcd17730e00c06b49a2e97ea5e1e639bcd24fa60fa43e33ad6"
-  name = "gopkg.in/yaml.v3"
-  packages = ["."]
-  pruneopts = ""
-  revision = "eeeca48fe7764f320e4870d231902bf9c1be2c08"
-
-[solve-meta]
-  analyzer-name = "dep"
-  analyzer-version = 1
-  input-imports = [
-    "github.com/TheTannerRyan/ring",
-    "github.com/agext/levenshtein",
-    "github.com/cookieo9/resources-go",
-    "github.com/coreos/go-iptables/iptables",
-    "github.com/florianl/go-nfqueue",
-    "github.com/godbus/dbus",
-    "github.com/google/gopacket",
-    "github.com/google/gopacket/layers",
-    "github.com/google/gopacket/tcpassembly",
-    "github.com/google/renameio",
-    "github.com/hashicorp/go-multierror",
-    "github.com/hashicorp/go-version",
-    "github.com/miekg/dns",
-    "github.com/oschwald/maxminddb-golang",
-    "github.com/shirou/gopsutil/process",
-    "github.com/spf13/cobra",
-    "github.com/stretchr/testify/assert",
-    "github.com/tevino/abool",
-    "github.com/umahmood/haversine",
-    "golang.org/x/net/icmp",
-    "golang.org/x/net/ipv4",
-    "golang.org/x/net/publicsuffix",
-    "golang.org/x/sync/errgroup",
-    "golang.org/x/sync/singleflight",
-    "golang.org/x/sys/unix",
-    "golang.org/x/sys/windows",
-    "golang.org/x/sys/windows/svc",
-    "golang.org/x/sys/windows/svc/debug",
-    "golang.org/x/sys/windows/svc/eventlog",
-    "golang.org/x/sys/windows/svc/mgr",
-  ]
-  solver-name = "gps-cdcl"
-  solver-version = 1
diff --git a/Gopkg.toml b/Gopkg.toml
deleted file mode 100644
index 80648950..00000000
--- a/Gopkg.toml
+++ /dev/null
@@ -1,35 +0,0 @@
-# Gopkg.toml example
-#
-# Refer to https://golang.github.io/dep/docs/Gopkg.toml.html
-# for detailed Gopkg.toml documentation.
-#
-# required = ["github.com/user/thing/cmd/thing"]
-# ignored = ["github.com/user/project/pkgX", "bitbucket.org/user/project/pkgA/pkgY"]
-#
-# [[constraint]]
-#   name = "github.com/user/project"
-#   version = "1.0.0"
-#
-# [[constraint]]
-#   name = "github.com/user/project2"
-#   branch = "dev"
-#   source = "github.com/myfork/project2"
-#
-# [[override]]
-#   name = "github.com/x/y"
-#   version = "2.4.0"
-#
-# [prune]
-#   non-go = false
-#   go-tests = true
-#   unused-packages = true
-
-ignored = ["github.com/safing/portbase/*", "github.com/safing/spn/*"]
-
-[[constraint]]
-  name = "github.com/florianl/go-nfqueue"
-  branch = "master" # switch back once we migrate to go.mod
-
-[[override]]
-  name = "github.com/mdlayher/netlink"
-  version = "1.2.0" # remove when github.com/florianl/go-nfqueue has updated to v1.2.0
diff --git a/assets/.gitkeep b/assets/.gitkeep
new file mode 100644
index 00000000..e69de29b
diff --git a/cmds/hub/.gitignore b/cmds/hub/.gitignore
new file mode 100644
index 00000000..41668e89
--- /dev/null
+++ b/cmds/hub/.gitignore
@@ -0,0 +1,3 @@
+# Compiled binaries
+hub
+hub.exe
diff --git a/cmds/hub/build b/cmds/hub/build
new file mode 100755
index 00000000..055874ef
--- /dev/null
+++ b/cmds/hub/build
@@ -0,0 +1,60 @@
+#!/bin/bash
+
+# get build data
+if [[ "$BUILD_COMMIT" == "" ]]; then
+  BUILD_COMMIT=$(git describe --all --long --abbrev=99 --dirty 2>/dev/null)
+fi
+if [[ "$BUILD_USER" == "" ]]; then
+  BUILD_USER=$(id -un)
+fi
+if [[ "$BUILD_HOST" == "" ]]; then
+  BUILD_HOST=$(hostname -f)
+fi
+if [[ "$BUILD_DATE" == "" ]]; then
+  BUILD_DATE=$(date +%d.%m.%Y)
+fi
+if [[ "$BUILD_SOURCE" == "" ]]; then
+  BUILD_SOURCE=$(git remote -v | grep origin | cut -f2 | cut -d" " -f1 | head -n 1)
+fi
+if [[ "$BUILD_SOURCE" == "" ]]; then
+  BUILD_SOURCE=$(git remote -v | cut -f2 | cut -d" " -f1 | head -n 1)
+fi
+BUILD_BUILDOPTIONS=$(echo $* | sed "s/ /§/g")
+
+# check
+if [[ "$BUILD_COMMIT" == "" ]]; then
+  echo "could not automatically determine BUILD_COMMIT, please supply manually as environment variable."
+  exit 1
+fi
+if [[ "$BUILD_USER" == "" ]]; then
+  echo "could not automatically determine BUILD_USER, please supply manually as environment variable."
+  exit 1
+fi
+if [[ "$BUILD_HOST" == "" ]]; then
+  echo "could not automatically determine BUILD_HOST, please supply manually as environment variable."
+  exit 1
+fi
+if [[ "$BUILD_DATE" == "" ]]; then
+  echo "could not automatically determine BUILD_DATE, please supply manually as environment variable."
+  exit 1
+fi
+if [[ "$BUILD_SOURCE" == "" ]]; then
+  echo "could not automatically determine BUILD_SOURCE, please supply manually as environment variable."
+  exit 1
+fi
+
+# set build options
+export CGO_ENABLED=0
+if [[ $1 == "dev" ]]; then
+  shift
+  export CGO_ENABLED=1
+  DEV="-race"
+fi
+
+echo "Please notice, that this build script includes metadata into the build."
+echo "This information is useful for debugging and license compliance."
+echo "Run the compiled binary with the -version flag to see the information included."
+
+# build
+BUILD_PATH="github.com/safing/portbase/info"
+go build $DEV -ldflags "-X ${BUILD_PATH}.commit=${BUILD_COMMIT} -X ${BUILD_PATH}.buildOptions=${BUILD_BUILDOPTIONS} -X ${BUILD_PATH}.buildUser=${BUILD_USER} -X ${BUILD_PATH}.buildHost=${BUILD_HOST} -X ${BUILD_PATH}.buildDate=${BUILD_DATE} -X ${BUILD_PATH}.buildSource=${BUILD_SOURCE}" $*
diff --git a/cmds/hub/main.go b/cmds/hub/main.go
new file mode 100644
index 00000000..831e3abc
--- /dev/null
+++ b/cmds/hub/main.go
@@ -0,0 +1,66 @@
+package main
+
+import (
+	"flag"
+	"fmt"
+	"os"
+	"runtime"
+
+	"github.com/safing/portbase/info"
+	"github.com/safing/portbase/metrics"
+	"github.com/safing/portbase/modules"
+	"github.com/safing/portbase/run"
+	_ "github.com/safing/portmaster/service/core/base"
+	_ "github.com/safing/portmaster/service/ui"
+	"github.com/safing/portmaster/service/updates"
+	"github.com/safing/portmaster/service/updates/helper"
+	_ "github.com/safing/portmaster/spn/captain"
+	"github.com/safing/portmaster/spn/conf"
+)
+
+func init() {
+	flag.BoolVar(&updates.RebootOnRestart, "reboot-on-restart", false, "reboot server on auto-upgrade")
+}
+
+func main() {
+	info.Set("SPN Hub", "0.7.6", "AGPLv3", true)
+
+	// Configure metrics.
+	_ = metrics.SetNamespace("hub")
+
+	// Configure updating.
+	updates.UserAgent = fmt.Sprintf("SPN Hub (%s %s)", runtime.GOOS, runtime.GOARCH)
+	helper.IntelOnly()
+
+	// Configure SPN mode.
+	conf.EnablePublicHub(true)
+	conf.EnableClient(false)
+
+	// Disable module management, as we want to start all modules.
+	modules.DisableModuleManagement()
+
+	// Configure microtask threshold.
+	// Scale with CPU/GOMAXPROCS count, but keep a baseline and minimum:
+	// CPUs -> MicroTasks
+	//    0 ->  8 (increased to minimum)
+	//    1 ->  8 (increased to minimum)
+	//    2 ->  8
+	//    3 -> 10
+	//    4 -> 12
+	//    8 -> 20
+	//   16 -> 36
+	//
+	// Start with number of GOMAXPROCS.
+	microTasksThreshold := runtime.GOMAXPROCS(0) * 2
+	// Use at least 4 microtasks based on GOMAXPROCS.
+	if microTasksThreshold < 4 {
+		microTasksThreshold = 4
+	}
+	// Add a 4 microtask baseline.
+	microTasksThreshold += 4
+	// Set threshold.
+	modules.SetMaxConcurrentMicroTasks(microTasksThreshold)
+
+	// Start.
+	os.Exit(run.Run())
+}
diff --git a/cmds/hub/pack b/cmds/hub/pack
new file mode 100755
index 00000000..73c20270
--- /dev/null
+++ b/cmds/hub/pack
@@ -0,0 +1,123 @@
+#!/bin/bash
+
+baseDir="$( cd "$(dirname "$0")" && pwd )"
+cd "$baseDir"
+
+COL_OFF="\033[0m"
+COL_BOLD="\033[01;01m"
+COL_RED="\033[31m"
+COL_GREEN="\033[32m"
+COL_YELLOW="\033[33m"
+
+destDirPart1="../../dist"
+destDirPart2="hub"
+
+function prep {
+  # output
+  output="main"
+  # get version
+  version=$(grep "info.Set" main.go | cut -d'"' -f4)
+  # build versioned file name
+  filename="spn-hub_v${version//./-}"
+  # platform
+  platform="${GOOS}_${GOARCH}"
+  if [[ $GOOS == "windows" ]]; then
+    filename="${filename}.exe"
+    output="${output}.exe"
+  fi
+  # build destination path
+  destPath=${destDirPart1}/${platform}/${destDirPart2}/$filename
+}
+
+function check {
+  prep
+
+  # check if file exists
+  if [[ -f $destPath ]]; then
+    echo "[hub] $platform v$version already built"
+  else
+    echo -e "${COL_BOLD}[hub] $platform v$version${COL_OFF}"
+  fi
+}
+
+function build {
+  prep
+
+  # check if file exists
+  if [[ -f $destPath ]]; then
+    echo "[hub] $platform already built in v$version, skipping..."
+    return
+  fi
+
+  # build
+  ./build main.go
+  if [[ $? -ne 0 ]]; then
+    echo -e "\n${COL_BOLD}[hub] $platform v$version: ${COL_RED}BUILD FAILED.${COL_OFF}"
+    exit 1
+  fi
+  mkdir -p $(dirname $destPath)
+  cp $output $destPath
+  echo -e "\n${COL_BOLD}[hub] $platform v$version: ${COL_GREEN}successfully built.${COL_OFF}"
+}
+
+function reset {
+  prep
+  
+  # delete if file exists
+  if [[ -f $destPath ]]; then
+    rm $destPath
+    echo "[hub] $platform v$version deleted."
+  fi
+}
+
+function check_all {
+  GOOS=linux GOARCH=amd64 check
+  GOOS=windows GOARCH=amd64 check
+  GOOS=darwin GOARCH=amd64 check
+  GOOS=linux GOARCH=arm64 check
+  GOOS=windows GOARCH=arm64 check
+  GOOS=darwin GOARCH=arm64 check
+}
+
+function build_all {
+  GOOS=linux GOARCH=amd64 build
+  GOOS=windows GOARCH=amd64 build
+  GOOS=darwin GOARCH=amd64 build
+  GOOS=linux GOARCH=arm64 build
+  GOOS=windows GOARCH=arm64 build
+  GOOS=darwin GOARCH=arm64 build
+}
+
+function reset_all {
+  GOOS=linux GOARCH=amd64 reset
+  GOOS=windows GOARCH=amd64 reset
+  GOOS=darwin GOARCH=amd64 reset
+  GOOS=linux GOARCH=arm64 reset
+  GOOS=windows GOARCH=arm64 reset
+  GOOS=darwin GOARCH=arm64 reset
+}
+
+case $1 in
+  "check" )
+    check_all
+    ;;
+  "build" )
+    build_all
+    ;;
+  "reset" )
+    reset_all
+    ;;
+  * )
+    echo ""
+    echo "build list:"
+    echo ""
+    check_all
+    echo ""
+    read -p "press [Enter] to start building" x
+    echo ""
+    build_all
+    echo ""
+    echo "finished building."
+    echo ""
+    ;;
+esac
diff --git a/cmds/integrationtest/netstate.go b/cmds/integrationtest/netstate.go
index f76604c7..5eaaa9c8 100644
--- a/cmds/integrationtest/netstate.go
+++ b/cmds/integrationtest/netstate.go
@@ -7,9 +7,9 @@ import (
 	processInfo "github.com/shirou/gopsutil/process"
 	"github.com/spf13/cobra"
 
-	"github.com/safing/portmaster/network/packet"
-	"github.com/safing/portmaster/network/socket"
-	"github.com/safing/portmaster/network/state"
+	"github.com/safing/portmaster/service/network/packet"
+	"github.com/safing/portmaster/service/network/socket"
+	"github.com/safing/portmaster/service/network/state"
 )
 
 func init() {
diff --git a/cmds/observation-hub/.gitignore b/cmds/observation-hub/.gitignore
new file mode 100644
index 00000000..f1b57325
--- /dev/null
+++ b/cmds/observation-hub/.gitignore
@@ -0,0 +1,3 @@
+# Compiled binaries
+observation-hub
+observation-hub.exe
diff --git a/cmds/observation-hub/Dockerfile b/cmds/observation-hub/Dockerfile
new file mode 100644
index 00000000..0ff78cb9
--- /dev/null
+++ b/cmds/observation-hub/Dockerfile
@@ -0,0 +1,38 @@
+# Docker Image for Observation Hub
+
+# Important:
+# You need to build this from the repo root!
+# Run: docker build -f cmds/observation-hub/Dockerfile -t safing/observation-hub:latest .
+# Check With: docker run -ti --rm safing/observation-hub:latest --help
+
+# golang 1.21 linux/amd64 on debian bookworm
+# https://github.com/docker-library/golang/blob/master/1.21/bookworm/Dockerfile
+FROM golang:1.21-bookworm as builder
+
+# Ensure ca-certficates are up to date
+RUN update-ca-certificates
+
+# Install dependencies
+WORKDIR $GOPATH/src/github.com/safing/portmaster/spn
+COPY go.mod .
+COPY go.sum .
+ENV GO111MODULE=on
+RUN go mod download
+RUN go mod verify
+
+# Copy source code
+COPY . .
+
+# Build the static binary
+RUN cd cmds/observation-hub && \
+CGO_ENABLED=0 ./build -o /go/bin/observation-hub
+
+# Use static image
+# https://github.com/GoogleContainerTools/distroless
+FROM gcr.io/distroless/static-debian12
+
+# Copy our static executable
+COPY --from=builder --chmod=0755 /go/bin/observation-hub /go/bin/observation-hub
+
+# Run the observation-hub binary.
+ENTRYPOINT ["/go/bin/observation-hub"]
diff --git a/cmds/observation-hub/apprise.go b/cmds/observation-hub/apprise.go
new file mode 100644
index 00000000..c7df3c19
--- /dev/null
+++ b/cmds/observation-hub/apprise.go
@@ -0,0 +1,257 @@
+package main
+
+import (
+	"bytes"
+	"crypto/tls"
+	_ "embed"
+	"errors"
+	"flag"
+	"fmt"
+	"net/http"
+	"strings"
+	"text/template"
+	"time"
+
+	"github.com/safing/portbase/apprise"
+	"github.com/safing/portbase/log"
+	"github.com/safing/portbase/modules"
+	"github.com/safing/portmaster/service/intel/geoip"
+)
+
+var (
+	appriseModule   *modules.Module
+	appriseNotifier *apprise.Notifier
+
+	appriseURL        string
+	appriseTag        string
+	appriseClientCert string
+	appriseClientKey  string
+	appriseGreet      bool
+)
+
+func init() {
+	appriseModule = modules.Register("apprise", nil, startApprise, nil)
+
+	flag.StringVar(&appriseURL, "apprise-url", "", "set the apprise URL to enable notifications via apprise")
+	flag.StringVar(&appriseTag, "apprise-tag", "", "set the apprise tag(s) according to their docs")
+	flag.StringVar(&appriseClientCert, "apprise-client-cert", "", "set the apprise client certificate")
+	flag.StringVar(&appriseClientKey, "apprise-client-key", "", "set the apprise client key")
+	flag.BoolVar(&appriseGreet, "apprise-greet", false, "send a greeting message to apprise on start")
+}
+
+func startApprise() error {
+	// Check if apprise should be configured.
+	if appriseURL == "" {
+		return nil
+	}
+	// Check if there is a tag.
+	if appriseTag == "" {
+		return errors.New("an apprise tag is required")
+	}
+
+	// Create notifier.
+	appriseNotifier = &apprise.Notifier{
+		URL:           appriseURL,
+		DefaultType:   apprise.TypeInfo,
+		DefaultTag:    appriseTag,
+		DefaultFormat: apprise.FormatMarkdown,
+		AllowUntagged: false,
+	}
+
+	if appriseClientCert != "" || appriseClientKey != "" {
+		// Load client cert from disk.
+		cert, err := tls.LoadX509KeyPair(appriseClientCert, appriseClientKey)
+		if err != nil {
+			return fmt.Errorf("failed to load client cert/key: %w", err)
+		}
+
+		// Set client cert in http client.
+		appriseNotifier.SetClient(&http.Client{
+			Transport: &http.Transport{
+				TLSClientConfig: &tls.Config{
+					MinVersion:   tls.VersionTLS12,
+					Certificates: []tls.Certificate{cert},
+				},
+			},
+			Timeout: 10 * time.Second,
+		})
+	}
+
+	if appriseGreet {
+		err := appriseNotifier.Send(appriseModule.Ctx, &apprise.Message{
+			Title: "👋 Observation Hub Reporting In",
+			Body:  "I am the Observation Hub. I am connected to the SPN and watch out for it. I will report notable changes to the network here.",
+		})
+		if err != nil {
+			log.Warningf("apprise: failed to send test message: %s", err)
+		} else {
+			log.Info("apprise: sent greeting message")
+		}
+	}
+
+	return nil
+}
+
+func reportToApprise(change *observedChange) (errs error) {
+	// Check if configured.
+	if appriseNotifier == nil {
+		return nil
+	}
+
+handleTag:
+	for _, tag := range strings.Split(appriseNotifier.DefaultTag, ",") {
+		// Check if we are shutting down.
+		if appriseModule.IsStopping() {
+			return nil
+		}
+
+		// Render notification based on tag / destination.
+		buf := &bytes.Buffer{}
+		switch {
+		case strings.HasPrefix(tag, "matrix-"):
+			if err := templates.ExecuteTemplate(buf, "matrix-notification", change); err != nil {
+				return fmt.Errorf("failed to render notification: %w", err)
+			}
+
+		case strings.HasPrefix(tag, "discord-"):
+			if err := templates.ExecuteTemplate(buf, "discord-notification", change); err != nil {
+				return fmt.Errorf("failed to render notification: %w", err)
+			}
+
+		default:
+			// Use matrix notification template as default for now.
+			if err := templates.ExecuteTemplate(buf, "matrix-notification", change); err != nil {
+				return fmt.Errorf("failed to render notification: %w", err)
+			}
+		}
+
+		// Send notification to apprise.
+		var err error
+		for i := 0; i < 3; i++ {
+			// Try three times.
+			err = appriseNotifier.Send(appriseModule.Ctx, &apprise.Message{
+				Body: buf.String(),
+				Tag:  tag,
+			})
+			if err == nil {
+				continue handleTag
+			}
+			// Wait for 5 seconds, then try again.
+			time.Sleep(5 * time.Second)
+		}
+		// Add error to errors.
+		if err != nil {
+			errs = errors.Join(errs, fmt.Errorf("| failed to send: %w", err))
+		}
+	}
+
+	return errs
+}
+
+// var (
+// 	entityTemplate = template.Must(template.New("entity").Parse(
+// 		`Entity: {{ . }}
+// {{ .IP }} [{{ .ASN }} - {{ .ASOrg }}]
+// `,
+// 	))
+
+// 	// {{ with .GetCountryInfo -}}
+// 	// {{ .Name }} ({{ .Code }})
+// 	// {{- end }}
+
+// 	matrixTemplate = template.Must(template.New("matrix observer notification").Parse(
+// 		`{{ .Title }}
+// {{ if .Summary }}
+// Details:
+// {{ .Summary }}
+
+// Note: Changes were registered at {{ .UpdateTime }} and were possibly merged.
+// {{ end }}
+
+// {{ template "entity" .UpdatedPin.EntityV4 }}
+
+// Hub Info:
+// Test: {{ .UpdatedPin.EntityV4 }}
+// {{ template "entity" .UpdatedPin.EntityV4 }}
+// {{ template "entity" .UpdatedPin.EntityV6 }}
+// `,
+// 	))
+
+// 	discordTemplate = template.Must(template.New("discord observer notification").Parse(
+// 		``,
+// 	))
+
+// 	defaultTemplate = template.Must(template.New("default observer notification").Parse(
+// 		``,
+// 	))
+// )
+
+var (
+	//go:embed notifications.tmpl
+	templateFile string
+	templates    = template.Must(template.New("notifications").Funcs(
+		template.FuncMap{
+			"joinStrings":    joinStrings,
+			"textBlock":      textBlock,
+			"getCountryInfo": getCountryInfo,
+		},
+	).Parse(templateFile))
+)
+
+func joinStrings(slice []string, sep string) string {
+	return strings.Join(slice, sep)
+}
+
+func textBlock(block, addPrefix, addSuffix string) string {
+	// Trim whitespaces.
+	block = strings.TrimSpace(block)
+
+	// Prepend and append string for every line.
+	lines := strings.Split(block, "\n")
+	for i, line := range lines {
+		lines[i] = addPrefix + line + addSuffix
+	}
+
+	// Return as block.
+	return strings.Join(lines, "\n")
+}
+
+func getCountryInfo(code string) geoip.CountryInfo {
+	// Get the country info directly instead of via the entity location,
+	// so it also works in test without the geoip module.
+	return geoip.GetCountryInfo(code)
+}
+
+// func init() {
+// 	templates = template.Must(template.New(templateFile).Parse(templateFile))
+
+// 	nt, err := templates.New("entity").Parse(
+// 		`Entity: {{ . }}
+// {{ .IP }} [{{ .ASN }} - {{ .ASOrg }}]
+// `,
+// 	)
+// 	if err != nil {
+// 		panic(err)
+// 	}
+// 	templates.AddParseTree(nt.Tree)
+
+// 	if _, err := templates.New("matrix-notification").Parse(
+// 		`{{ .Title }}
+// {{ if .Summary }}
+// Details:
+// {{ .Summary }}
+
+// Note: Changes were registered at {{ .UpdateTime }} and were possibly merged.
+// {{ end }}
+
+// {{ template "entity" .UpdatedPin.EntityV4 }}
+
+// Hub Info:
+// Test: {{ .UpdatedPin.EntityV4 }}
+// {{ template "entity" .UpdatedPin.EntityV4 }}
+// {{ template "entity" .UpdatedPin.EntityV6 }}
+// `,
+// 	); err != nil {
+// 		panic(err)
+// 	}
+// }
diff --git a/cmds/observation-hub/apprise_test.go b/cmds/observation-hub/apprise_test.go
new file mode 100644
index 00000000..e0397858
--- /dev/null
+++ b/cmds/observation-hub/apprise_test.go
@@ -0,0 +1,84 @@
+package main
+
+import (
+	"bytes"
+	"fmt"
+	"net"
+	"testing"
+	"time"
+
+	"github.com/safing/portmaster/service/intel"
+	"github.com/safing/portmaster/service/network/netutils"
+	"github.com/safing/portmaster/spn/hub"
+	"github.com/safing/portmaster/spn/navigator"
+)
+
+var observedTestChange = &observedChange{
+	Title: "Hub Changed: fogos (8uLe-zUkC)",
+	Summary: `ConnectedTo.ZwqBAzGqifBAFKFW1GQijNM18pi7BnWH34GyKBF7KB5fC5.HubID removed ZwqBAzGqifBAFKFW1GQijNM18pi7BnWH34GyKBF7KB5fC5
+	ConnectedTo.ZwqBAzGqifBAFKFW1GQijNM18pi7BnWH34GyKBF7KB5fC5.Capacity removed 3403661
+	ConnectedTo.ZwqBAzGqifBAFKFW1GQijNM18pi7BnWH34GyKBF7KB5fC5.Latency removed 252.350006ms`,
+	UpdatedPin: &navigator.PinExport{
+		ID:        "Zwtb8EKMatnMRkW1VaLh8CPV3QswD9iuRU4Sda8uLezUkC",
+		Name:      "fogos",
+		Map:       "main",
+		FirstSeen: time.Now(),
+		EntityV4: &intel.Entity{
+			IP:      net.IPv4(138, 201, 140, 70),
+			IPScope: netutils.Global,
+			Country: "DE",
+			ASN:     24940,
+			ASOrg:   "Hetzner Online GmbH",
+		},
+		States:        []string{"HasRequiredInfo", "Reachable", "Active", "Trusted"},
+		VerifiedOwner: "Safing",
+		HopDistance:   3,
+		SessionActive: false,
+		Info: &hub.Announcement{
+			ID:             "Zwtb8EKMatnMRkW1VaLh8CPV3QswD9iuRU4Sda8uLezUkC",
+			Timestamp:      1677682008,
+			Name:           "fogos",
+			Group:          "Safing",
+			ContactAddress: "abuse@safing.io",
+			ContactService: "email",
+			Hosters:        []string{"Hetzner"},
+			Datacenter:     "DE-Hetzner-FSN",
+			IPv4:           net.IPv4(138, 201, 140, 70),
+			IPv6:           net.ParseIP("2a01:4f8:172:3753::2"),
+			Transports:     []string{"tcp:17", "tcp:17017"},
+			Entry:          []string{},
+			Exit:           []string{"- * TCP/25"},
+		},
+		Status: &hub.Status{
+			Timestamp: 1694180778,
+			Version:   "0.6.19 ",
+		},
+	},
+	UpdateTime: time.Now(),
+}
+
+func TestNotificationTemplate(t *testing.T) {
+	t.Parallel()
+
+	fmt.Println("==========\nFound templates:")
+	for _, tpl := range templates.Templates() {
+		fmt.Println(tpl.Name())
+	}
+	fmt.Println("")
+
+	fmt.Println("\n\n==========\nMatrix template:")
+	matrixOutput := &bytes.Buffer{}
+	err := templates.ExecuteTemplate(matrixOutput, "matrix-notification", observedTestChange)
+	if err != nil {
+		t.Errorf("failed to render matrix template: %s", err)
+	}
+	fmt.Println(matrixOutput.String())
+
+	fmt.Println("\n\n==========\nDiscord template:")
+	discordOutput := &bytes.Buffer{}
+	err = templates.ExecuteTemplate(discordOutput, "discord-notification", observedTestChange)
+	if err != nil {
+		t.Errorf("failed to render discord template: %s", err)
+	}
+	fmt.Println(discordOutput.String())
+}
diff --git a/cmds/observation-hub/build b/cmds/observation-hub/build
new file mode 100755
index 00000000..055874ef
--- /dev/null
+++ b/cmds/observation-hub/build
@@ -0,0 +1,60 @@
+#!/bin/bash
+
+# get build data
+if [[ "$BUILD_COMMIT" == "" ]]; then
+  BUILD_COMMIT=$(git describe --all --long --abbrev=99 --dirty 2>/dev/null)
+fi
+if [[ "$BUILD_USER" == "" ]]; then
+  BUILD_USER=$(id -un)
+fi
+if [[ "$BUILD_HOST" == "" ]]; then
+  BUILD_HOST=$(hostname -f)
+fi
+if [[ "$BUILD_DATE" == "" ]]; then
+  BUILD_DATE=$(date +%d.%m.%Y)
+fi
+if [[ "$BUILD_SOURCE" == "" ]]; then
+  BUILD_SOURCE=$(git remote -v | grep origin | cut -f2 | cut -d" " -f1 | head -n 1)
+fi
+if [[ "$BUILD_SOURCE" == "" ]]; then
+  BUILD_SOURCE=$(git remote -v | cut -f2 | cut -d" " -f1 | head -n 1)
+fi
+BUILD_BUILDOPTIONS=$(echo $* | sed "s/ /§/g")
+
+# check
+if [[ "$BUILD_COMMIT" == "" ]]; then
+  echo "could not automatically determine BUILD_COMMIT, please supply manually as environment variable."
+  exit 1
+fi
+if [[ "$BUILD_USER" == "" ]]; then
+  echo "could not automatically determine BUILD_USER, please supply manually as environment variable."
+  exit 1
+fi
+if [[ "$BUILD_HOST" == "" ]]; then
+  echo "could not automatically determine BUILD_HOST, please supply manually as environment variable."
+  exit 1
+fi
+if [[ "$BUILD_DATE" == "" ]]; then
+  echo "could not automatically determine BUILD_DATE, please supply manually as environment variable."
+  exit 1
+fi
+if [[ "$BUILD_SOURCE" == "" ]]; then
+  echo "could not automatically determine BUILD_SOURCE, please supply manually as environment variable."
+  exit 1
+fi
+
+# set build options
+export CGO_ENABLED=0
+if [[ $1 == "dev" ]]; then
+  shift
+  export CGO_ENABLED=1
+  DEV="-race"
+fi
+
+echo "Please notice, that this build script includes metadata into the build."
+echo "This information is useful for debugging and license compliance."
+echo "Run the compiled binary with the -version flag to see the information included."
+
+# build
+BUILD_PATH="github.com/safing/portbase/info"
+go build $DEV -ldflags "-X ${BUILD_PATH}.commit=${BUILD_COMMIT} -X ${BUILD_PATH}.buildOptions=${BUILD_BUILDOPTIONS} -X ${BUILD_PATH}.buildUser=${BUILD_USER} -X ${BUILD_PATH}.buildHost=${BUILD_HOST} -X ${BUILD_PATH}.buildDate=${BUILD_DATE} -X ${BUILD_PATH}.buildSource=${BUILD_SOURCE}" $*
diff --git a/cmds/observation-hub/main.go b/cmds/observation-hub/main.go
new file mode 100644
index 00000000..c82f3a17
--- /dev/null
+++ b/cmds/observation-hub/main.go
@@ -0,0 +1,44 @@
+package main
+
+import (
+	"fmt"
+	"os"
+	"runtime"
+
+	"github.com/safing/portbase/api"
+	"github.com/safing/portbase/info"
+	"github.com/safing/portbase/metrics"
+	"github.com/safing/portbase/modules"
+	"github.com/safing/portbase/run"
+	"github.com/safing/portmaster/service/updates"
+	"github.com/safing/portmaster/service/updates/helper"
+	"github.com/safing/portmaster/spn/captain"
+	"github.com/safing/portmaster/spn/conf"
+	"github.com/safing/portmaster/spn/sluice"
+)
+
+func main() {
+	info.Set("SPN Observation Hub", "0.7.1", "AGPLv3", true)
+
+	// Configure metrics.
+	_ = metrics.SetNamespace("observer")
+
+	// Configure user agent.
+	updates.UserAgent = fmt.Sprintf("SPN Observation Hub (%s %s)", runtime.GOOS, runtime.GOARCH)
+	helper.IntelOnly()
+
+	// Configure SPN mode.
+	conf.EnableClient(true)
+	conf.EnablePublicHub(false)
+	captain.DisableAccount = true
+
+	// Disable unneeded listeners.
+	sluice.EnableListener = false
+	api.EnableServer = false
+
+	// Disable module management, as we want to start all modules.
+	modules.DisableModuleManagement()
+
+	// Start.
+	os.Exit(run.Run())
+}
diff --git a/cmds/observation-hub/notifications.tmpl b/cmds/observation-hub/notifications.tmpl
new file mode 100644
index 00000000..8a25175f
--- /dev/null
+++ b/cmds/observation-hub/notifications.tmpl
@@ -0,0 +1,75 @@
+{{ define "entity" -}}
+  {{ .IP }} [AS{{ .ASN }} - {{ .ASOrg }}] in {{ if .Country }}
+    {{- with getCountryInfo .Country -}}
+      {{ .Name }} ({{ .Code }}; Region {{ .Continent.Region }})
+    {{- end }}
+  {{- end }}
+{{- end }}
+
+{{ define "matrix-notification" -}}
+### 🌍 {{ .Title }}{{ if .Summary }}
+
+{{ textBlock .Summary "" "  " }}
+{{ end }}
+
+> Note: Changes were registered at {{ .UpdateTime.UTC.Format "15:04:05 02.01.2006 MST" }} and were possibly merged.
+
+##### Hub Info
+
+> Name: {{ .UpdatedPin.Name }}  
+> ID: {{ .UpdatedPin.ID }}  
+> IPv4: {{ if .UpdatedPin.EntityV4 }}{{ template "entity" .UpdatedPin.EntityV4 }}{{ end }}  
+> IPv6: {{ if .UpdatedPin.EntityV6 }}{{ template "entity" .UpdatedPin.EntityV6 }}{{ end }}  
+> Version: {{ .UpdatedPin.Status.Version }}  
+> States: {{ joinStrings .UpdatedPin.States ", " }}  
+> Status: {{ len .UpdatedPin.Status.Lanes }} Lanes, {{ len .UpdatedPin.Status.Keys }} Keys, {{ .UpdatedPin.Status.Load }} Load  
+> Verified Owner: {{ .UpdatedPin.VerifiedOwner }}  
+> Transports: {{ joinStrings .UpdatedPin.Info.Transports ", " }}  
+> Entry: {{ joinStrings .UpdatedPin.Info.Entry ", " }}  
+> Exit: {{ joinStrings .UpdatedPin.Info.Exit ", " }}  
+> Relations: {{ if .UpdatedPin.Info.Group -}}
+Group={{ .UpdatedPin.Info.Group }} {{ end }}
+
+{{- if .UpdatedPin.Info.Datacenter -}}
+Datacenter={{ .UpdatedPin.Info.Datacenter }} {{ end }}
+
+{{- if .UpdatedPin.Info.Hosters -}}
+Hosters={{ joinStrings .UpdatedPin.Info.Hosters ";" }} {{ end }}
+
+{{- if .UpdatedPin.Info.ContactAddress -}}
+Contact= {{ .UpdatedPin.Info.ContactAddress }}{{ if .UpdatedPin.Info.ContactService }} via {{ .UpdatedPin.Info.ContactService }}{{ end }}{{ end }}
+
+{{- end }}
+
+{{ define "discord-notification" -}}
+# 🌍 {{ .Title }}{{ if .Summary }}
+
+{{ .Summary }}
+{{- end }}
+
+##### Note: Changes were registered at {{ .UpdateTime.UTC.Format "15:04:05 02.01.2006 MST" }} and were possibly merged. - Hub Info:
+
+Name: {{ .UpdatedPin.Name }}
+ID: {{ .UpdatedPin.ID }}
+IPv4: {{ if .UpdatedPin.EntityV4 }}{{ template "entity" .UpdatedPin.EntityV4 }}{{ end }}
+IPv6: {{ if .UpdatedPin.EntityV6 }}{{ template "entity" .UpdatedPin.EntityV6 }}{{ end }}
+Version: {{ .UpdatedPin.Status.Version }}
+States: {{ joinStrings .UpdatedPin.States ", " }}
+Status: {{ len .UpdatedPin.Status.Lanes }} Lanes, {{ len .UpdatedPin.Status.Keys }} Keys, {{ .UpdatedPin.Status.Load }} Load
+Verified Owner: {{ .UpdatedPin.VerifiedOwner }}
+Transports: {{ joinStrings .UpdatedPin.Info.Transports ", " }}
+Entry: {{ joinStrings .UpdatedPin.Info.Entry ", " }}
+Exit: {{ joinStrings .UpdatedPin.Info.Exit ", " }}
+Relations: {{ if .UpdatedPin.Info.Group -}}
+Group={{ .UpdatedPin.Info.Group }} {{ end }}
+
+{{- if .UpdatedPin.Info.Datacenter -}}
+Datacenter={{ .UpdatedPin.Info.Datacenter }} {{ end }}
+
+{{- if .UpdatedPin.Info.Hosters -}}
+Hosters={{ joinStrings .UpdatedPin.Info.Hosters ";" }} {{ end }}
+
+{{- if .UpdatedPin.Info.ContactAddress -}}
+Contact= {{ .UpdatedPin.Info.ContactAddress }}{{ if .UpdatedPin.Info.ContactService }} via {{ .UpdatedPin.Info.ContactService }}{{ end }}{{ end }}
+
+{{- end }}
diff --git a/cmds/observation-hub/observe.go b/cmds/observation-hub/observe.go
new file mode 100644
index 00000000..371b8692
--- /dev/null
+++ b/cmds/observation-hub/observe.go
@@ -0,0 +1,407 @@
+package main
+
+import (
+	"context"
+	"errors"
+	"flag"
+	"fmt"
+	"path"
+	"strings"
+	"time"
+
+	diff "github.com/r3labs/diff/v3"
+	"golang.org/x/exp/slices"
+
+	"github.com/safing/portbase/database"
+	"github.com/safing/portbase/database/query"
+	"github.com/safing/portbase/log"
+	"github.com/safing/portbase/modules"
+	"github.com/safing/portmaster/spn/captain"
+	"github.com/safing/portmaster/spn/navigator"
+)
+
+var (
+	observerModule *modules.Module
+
+	db = database.NewInterface(&database.Options{
+		Local:    true,
+		Internal: true,
+	})
+
+	reportAllChanges bool
+
+	errNoChanges = errors.New("no changes")
+
+	reportingDelayFlag string
+	reportingDelay     = 10 * time.Minute
+)
+
+func init() {
+	observerModule = modules.Register("observer", prepObserver, startObserver, nil, "captain", "apprise")
+
+	flag.BoolVar(&reportAllChanges, "report-all-changes", false, "report all changes, no just interesting ones")
+	flag.StringVar(&reportingDelayFlag, "reporting-delay", "10m", "delay reports to summarize changes")
+}
+
+func prepObserver() error {
+	if reportingDelayFlag != "" {
+		duration, err := time.ParseDuration(reportingDelayFlag)
+		if err != nil {
+			return fmt.Errorf("failed to parse reporting-delay: %w", err)
+		}
+		reportingDelay = duration
+	}
+
+	return nil
+}
+
+func startObserver() error {
+	observerModule.StartServiceWorker("observer", 0, observerWorker)
+
+	return nil
+}
+
+type observedPin struct {
+	previous *navigator.PinExport
+	latest   *navigator.PinExport
+
+	lastUpdate         time.Time
+	lastUpdateReported bool
+}
+
+type observedChange struct {
+	Title   string
+	Summary string
+
+	UpdatedPin *navigator.PinExport
+	UpdateTime time.Time
+
+	SPNStatus *captain.SPNStatus
+}
+
+func observerWorker(ctx context.Context) error {
+	log.Info("observer: starting")
+	defer log.Info("observer: stopped")
+
+	// Subscribe to SPN status.
+	statusSub, err := db.Subscribe(query.New("runtime:spn/status"))
+	if err != nil {
+		return fmt.Errorf("failed to subscribe to spn status: %w", err)
+	}
+	defer statusSub.Cancel() //nolint:errcheck
+
+	// Get latest status.
+	latestStatus := captain.GetSPNStatus()
+
+	// Step 1: Wait for SPN to connect, if needed.
+	if latestStatus.Status != captain.StatusConnected {
+		log.Info("observer: waiting for SPN to connect")
+	waitForConnect:
+		for {
+			select {
+			case r := <-statusSub.Feed:
+				if r == nil {
+					return errors.New("status feed ended")
+				}
+
+				statusUpdate, ok := r.(*captain.SPNStatus)
+				switch {
+				case !ok:
+					log.Warningf("observer: received invalid SPN status: %s", r)
+				case statusUpdate.Status == captain.StatusFailed:
+					log.Warningf("observer: SPN failed to connect")
+				case statusUpdate.Status == captain.StatusConnected:
+					break waitForConnect
+				}
+			case <-ctx.Done():
+				return nil
+			}
+		}
+	}
+
+	// Wait for one second for the navigator to settle things.
+	log.Info("observer: connected to network, waiting for navigator")
+	time.Sleep(1 * time.Second)
+
+	// Step 2: Get current state.
+	mapQuery := query.New("map:main/")
+	q, err := db.Query(mapQuery)
+	if err != nil {
+		return fmt.Errorf("failed to start map query: %w", err)
+	}
+	defer q.Cancel()
+
+	// Put all current pins in a map.
+	observedPins := make(map[string]*observedPin)
+query:
+	for {
+		select {
+		case r := <-q.Next:
+			// Check if we are done.
+			if r == nil {
+				break query
+			}
+			// Add all pins to seen pins.
+			if pin, ok := r.(*navigator.PinExport); ok {
+				observedPins[pin.ID] = &observedPin{
+					previous: pin,
+					latest:   pin,
+				}
+			} else {
+				log.Warningf("observer: received invalid pin export: %s", r)
+			}
+		case <-ctx.Done():
+			return nil
+		}
+	}
+	if q.Err() != nil {
+		return fmt.Errorf("failed to finish map query: %w", q.Err())
+	}
+
+	// Step 3: Monitor for changes.
+	sub, err := db.Subscribe(mapQuery)
+	if err != nil {
+		return fmt.Errorf("failed to start map sub: %w", err)
+	}
+	defer sub.Cancel() //nolint:errcheck
+
+	// Start ticker for checking for changes.
+	reportChangesTicker := time.NewTicker(10 * time.Second)
+	defer reportChangesTicker.Stop()
+
+	log.Info("observer: listening for hub changes")
+	for {
+		select {
+		case <-ctx.Done():
+			return nil
+
+		case r := <-statusSub.Feed:
+			// Keep SPN connection status up to date.
+			if r == nil {
+				return errors.New("status feed ended")
+			}
+			if statusUpdate, ok := r.(*captain.SPNStatus); ok {
+				latestStatus = statusUpdate
+				log.Infof("observer: SPN status is now %s", statusUpdate.Status)
+			} else {
+				log.Warningf("observer: received invalid pin export: %s", r)
+			}
+
+		case r := <-sub.Feed:
+			// Save all observed pins.
+			switch {
+			case r == nil:
+				return errors.New("pin feed ended")
+			case r.Meta().IsDeleted():
+				delete(observedPins, path.Base(r.DatabaseKey()))
+			default:
+				if pin, ok := r.(*navigator.PinExport); ok {
+					existingObservedPin, ok := observedPins[pin.ID]
+					if ok {
+						// Update previously observed Hub.
+						existingObservedPin.latest = pin
+						existingObservedPin.lastUpdate = time.Now()
+						existingObservedPin.lastUpdateReported = false
+					} else {
+						// Add new Hub.
+						observedPins[pin.ID] = &observedPin{
+							latest:     pin,
+							lastUpdate: time.Now(),
+						}
+					}
+				} else {
+					log.Warningf("observer: received invalid pin export: %s", r)
+				}
+			}
+
+		case <-reportChangesTicker.C:
+			// Report changed pins.
+
+			for _, observedPin := range observedPins {
+				// Check if context was canceled.
+				select {
+				case <-ctx.Done():
+					return nil
+				default:
+				}
+
+				switch {
+				case observedPin.lastUpdateReported:
+					// Change already reported.
+				case time.Since(observedPin.lastUpdate) < reportingDelay:
+					// Only report changes if older than the configured delay.
+				default:
+					// Format and report.
+					title, changes, err := formatPinChanges(observedPin.previous, observedPin.latest)
+					if err != nil {
+						if !errors.Is(err, errNoChanges) {
+							log.Warningf("observer: failed to format pin changes: %s", err)
+						}
+					} else {
+						// Report changes.
+						reportChanges(&observedChange{
+							Title:      title,
+							Summary:    changes,
+							UpdatedPin: observedPin.latest,
+							UpdateTime: observedPin.lastUpdate,
+							SPNStatus:  latestStatus,
+						})
+					}
+
+					// Update observed pin.
+					observedPin.previous = observedPin.latest
+					observedPin.lastUpdateReported = true
+				}
+			}
+		}
+	}
+}
+
+func reportChanges(change *observedChange) {
+	// Log changes.
+	log.Infof("observer:\n%s\n%s", change.Title, change.Summary)
+
+	// Report via Apprise.
+	err := reportToApprise(change)
+	if err != nil {
+		log.Warningf("observer: failed to report changes to apprise: %s", err)
+	}
+}
+
+var (
+	ignoreChangesIn = []string{
+		"ConnectedTo",
+		"HopDistance",
+		"Info.entryPolicy", // Alternatively, ignore "Info.Entry"
+		"Info.exitPolicy",  // Alternatively, ignore "Info.Exit"
+		"Info.parsedTransports",
+		"Info.Timestamp",
+		"SessionActive",
+		"Status.Keys",
+		"Status.Lanes",
+		"Status.Load",
+		"Status.Timestamp",
+	}
+
+	ignoreStates = []string{
+		"IsHomeHub",
+		"Failing",
+	}
+)
+
+func ignoreChange(path string) bool {
+	if reportAllChanges {
+		return false
+	}
+
+	for _, pathPrefix := range ignoreChangesIn {
+		if strings.HasPrefix(path, pathPrefix) {
+			return true
+		}
+	}
+	return false
+}
+
+func formatPinChanges(from, to *navigator.PinExport) (title, changes string, err error) {
+	// Return immediately if pin is new.
+	if from == nil {
+		return fmt.Sprintf("New Hub: %s", makeHubName(to.Name, to.ID)), "", nil
+	}
+
+	// Find notable changes.
+	changelog, err := diff.Diff(from, to)
+	if err != nil {
+		return "", "", fmt.Errorf("failed to diff: %w", err)
+	}
+	if len(changelog) > 0 {
+		// Build changelog message.
+		changes := make([]string, 0, len(changelog))
+		for _, change := range changelog {
+			// Create path to changed field.
+			fullPath := strings.Join(change.Path, ".")
+
+			// Check if this path should be ignored.
+			if ignoreChange(fullPath) {
+				continue
+			}
+
+			// Add to reportable changes.
+			changeMsg := formatChange(change, fullPath)
+			if changeMsg != "" {
+				changes = append(changes, changeMsg)
+			}
+		}
+
+		// Log the changes, if there are any left.
+		if len(changes) > 0 {
+			return fmt.Sprintf("Hub Changed: %s", makeHubName(to.Name, to.ID)),
+				strings.Join(changes, "\n"),
+				nil
+		}
+	}
+
+	return "", "", errNoChanges
+}
+
+func formatChange(change diff.Change, fullPath string) string {
+	switch {
+	case strings.HasPrefix(fullPath, "States"):
+		switch change.Type {
+		case diff.CREATE:
+			return formatState(fmt.Sprintf("%v", change.To), true)
+		case diff.UPDATE:
+			a := formatState(fmt.Sprintf("%v", change.To), true)
+			b := formatState(fmt.Sprintf("%v", change.From), false)
+			switch {
+			case a != "" && b != "":
+				return a + "\n" + b
+			case a != "":
+				return a
+			case b != "":
+				return b
+			}
+		case diff.DELETE:
+			return formatState(fmt.Sprintf("%v", change.From), false)
+		}
+
+	default:
+		switch change.Type {
+		case diff.CREATE:
+			return fmt.Sprintf("%s added %v", fullPath, change.To)
+		case diff.UPDATE:
+			return fmt.Sprintf("%s changed from %v to %v", fullPath, change.From, change.To)
+		case diff.DELETE:
+			return fmt.Sprintf("%s removed %v", fullPath, change.From)
+		}
+	}
+
+	return ""
+}
+
+func formatState(name string, isSet bool) string {
+	// Check if state should be ignored.
+	if !reportAllChanges && slices.Contains[[]string, string](ignoreStates, name) {
+		return ""
+	}
+
+	if isSet {
+		return fmt.Sprintf("State is %v", name)
+	}
+	return fmt.Sprintf("State is NOT %v", name)
+}
+
+func makeHubName(name, id string) string {
+	shortenedID := id[len(id)-8:len(id)-4] +
+		"-" +
+		id[len(id)-4:]
+
+	// Be more careful, as the Hub name is user input.
+	switch {
+	case name == "":
+		return shortenedID
+	case len(name) > 16:
+		return fmt.Sprintf("%s (%s)", name[:16], shortenedID)
+	default:
+		return fmt.Sprintf("%s (%s)", name, shortenedID)
+	}
+}
diff --git a/cmds/portmaster-core/main.go b/cmds/portmaster-core/main.go
index 517f604a..62d05e5d 100644
--- a/cmds/portmaster-core/main.go
+++ b/cmds/portmaster-core/main.go
@@ -10,16 +10,16 @@ import (
 	"github.com/safing/portbase/log"
 	"github.com/safing/portbase/metrics"
 	"github.com/safing/portbase/run"
-	"github.com/safing/portmaster/updates"
-	"github.com/safing/spn/conf"
+	"github.com/safing/portmaster/service/updates"
+	"github.com/safing/portmaster/spn/conf"
 
 	// Include packages here.
 	_ "github.com/safing/portbase/modules/subsystems"
-	_ "github.com/safing/portmaster/core"
-	_ "github.com/safing/portmaster/firewall"
-	_ "github.com/safing/portmaster/nameserver"
-	_ "github.com/safing/portmaster/ui"
-	_ "github.com/safing/spn/captain"
+	_ "github.com/safing/portmaster/service/core"
+	_ "github.com/safing/portmaster/service/firewall"
+	_ "github.com/safing/portmaster/service/nameserver"
+	_ "github.com/safing/portmaster/service/ui"
+	_ "github.com/safing/portmaster/spn/captain"
 )
 
 func main() {
diff --git a/cmds/portmaster-start/main.go b/cmds/portmaster-start/main.go
index 246b2075..1e0c66aa 100644
--- a/cmds/portmaster-start/main.go
+++ b/cmds/portmaster-start/main.go
@@ -20,7 +20,7 @@ import (
 	portlog "github.com/safing/portbase/log"
 	"github.com/safing/portbase/updater"
 	"github.com/safing/portbase/utils"
-	"github.com/safing/portmaster/updates/helper"
+	"github.com/safing/portmaster/service/updates/helper"
 )
 
 var (
diff --git a/cmds/portmaster-start/recover_linux.go b/cmds/portmaster-start/recover_linux.go
index ecb9a219..96719bd8 100644
--- a/cmds/portmaster-start/recover_linux.go
+++ b/cmds/portmaster-start/recover_linux.go
@@ -9,7 +9,7 @@ import (
 	"github.com/hashicorp/go-multierror"
 	"github.com/spf13/cobra"
 
-	"github.com/safing/portmaster/firewall/interception"
+	"github.com/safing/portmaster/service/firewall/interception"
 )
 
 var recoverIPTablesCmd = &cobra.Command{
diff --git a/cmds/portmaster-start/run.go b/cmds/portmaster-start/run.go
index 0536f30a..12606d8e 100644
--- a/cmds/portmaster-start/run.go
+++ b/cmds/portmaster-start/run.go
@@ -16,7 +16,7 @@ import (
 	"github.com/spf13/cobra"
 	"github.com/tevino/abool"
 
-	"github.com/safing/portmaster/updates/helper"
+	"github.com/safing/portmaster/service/updates/helper"
 )
 
 const (
diff --git a/cmds/portmaster-start/show.go b/cmds/portmaster-start/show.go
index 207b7183..7ae6fc85 100644
--- a/cmds/portmaster-start/show.go
+++ b/cmds/portmaster-start/show.go
@@ -6,7 +6,7 @@ import (
 
 	"github.com/spf13/cobra"
 
-	"github.com/safing/portmaster/updates/helper"
+	"github.com/safing/portmaster/service/updates/helper"
 )
 
 func init() {
diff --git a/cmds/portmaster-start/update.go b/cmds/portmaster-start/update.go
index 805e0cae..bbcec860 100644
--- a/cmds/portmaster-start/update.go
+++ b/cmds/portmaster-start/update.go
@@ -10,7 +10,7 @@ import (
 
 	portlog "github.com/safing/portbase/log"
 	"github.com/safing/portbase/updater"
-	"github.com/safing/portmaster/updates/helper"
+	"github.com/safing/portmaster/service/updates/helper"
 )
 
 var (
diff --git a/cmds/portmaster-start/verify.go b/cmds/portmaster-start/verify.go
index ded921b8..7fb7be08 100644
--- a/cmds/portmaster-start/verify.go
+++ b/cmds/portmaster-start/verify.go
@@ -15,7 +15,7 @@ import (
 	"github.com/safing/jess/filesig"
 	portlog "github.com/safing/portbase/log"
 	"github.com/safing/portbase/updater"
-	"github.com/safing/portmaster/updates/helper"
+	"github.com/safing/portmaster/service/updates/helper"
 )
 
 var (
diff --git a/cmds/testsuite/.gitignore b/cmds/testsuite/.gitignore
new file mode 100644
index 00000000..08e00271
--- /dev/null
+++ b/cmds/testsuite/.gitignore
@@ -0,0 +1,3 @@
+# Compiled binaries
+testsuite
+testsuite.exe
diff --git a/cmds/testsuite/db.go b/cmds/testsuite/db.go
new file mode 100644
index 00000000..848e4d89
--- /dev/null
+++ b/cmds/testsuite/db.go
@@ -0,0 +1,33 @@
+package main
+
+import (
+	"github.com/safing/portbase/database"
+	_ "github.com/safing/portbase/database/storage/hashmap"
+)
+
+func setupDatabases(path string) error {
+	err := database.InitializeWithPath(path)
+	if err != nil {
+		return err
+	}
+
+	_, err = database.Register(&database.Database{
+		Name:        "core",
+		Description: "Holds core data, such as settings and profiles",
+		StorageType: "hashmap",
+	})
+	if err != nil {
+		return err
+	}
+
+	_, err = database.Register(&database.Database{
+		Name:        "cache",
+		Description: "Cached data, such as Intelligence and DNS Records",
+		StorageType: "hashmap",
+	})
+	if err != nil {
+		return err
+	}
+
+	return nil
+}
diff --git a/cmds/testsuite/login.go b/cmds/testsuite/login.go
new file mode 100644
index 00000000..bf1e5ef3
--- /dev/null
+++ b/cmds/testsuite/login.go
@@ -0,0 +1,125 @@
+package main
+
+import (
+	"errors"
+	"fmt"
+	"log"
+
+	"github.com/spf13/cobra"
+
+	"github.com/safing/portmaster/spn/access"
+	"github.com/safing/portmaster/spn/access/account"
+)
+
+var (
+	loginCmd = &cobra.Command{
+		Use:   "login",
+		Short: "Test login and token issuing",
+		RunE:  runTestCommand(testLogin),
+	}
+
+	loginUsername string
+	loginPassword string
+	loginDeviceID string
+)
+
+func init() {
+	rootCmd.AddCommand(loginCmd)
+
+	// Add flags for login options.
+	flags := loginCmd.Flags()
+	flags.StringVar(&loginUsername, "username", "", "set username to use for the login test")
+	flags.StringVar(&loginPassword, "password", "", "set password to use for the login test")
+	flags.StringVar(&loginDeviceID, "device-id", "", "set device ID to use for the login test")
+
+	// Mark all as required.
+	_ = loginCmd.MarkFlagRequired("username")
+	_ = loginCmd.MarkFlagRequired("password")
+	_ = loginCmd.MarkFlagRequired("device-id")
+}
+
+func testLogin(cmd *cobra.Command, args []string) (err error) {
+	// Init token zones.
+	err = access.InitializeZones()
+	if err != nil {
+		return fmt.Errorf("failed to initialize token zones: %w", err)
+	}
+
+	// Set initial user object in order to set the device ID that should be used for login.
+	initialUser := &access.UserRecord{
+		User: &account.User{
+			Username: loginUsername,
+			Device: &account.Device{
+				ID: loginDeviceID,
+			},
+		},
+	}
+	err = initialUser.Save()
+	if err != nil {
+		return fmt.Errorf("failed to save initial user with device ID: %w", err)
+	}
+
+	// Login.
+	_, _, err = access.Login(loginUsername, loginPassword)
+	if err != nil {
+		return fmt.Errorf("login failed: %w", err)
+	}
+
+	// Check user.
+	user, err := access.GetUser()
+	if err != nil {
+		return fmt.Errorf("failed to get user after login: %w", err)
+	}
+	if verbose {
+		log.Printf("user (from login): %+v", user.User)
+		log.Printf("device (from login): %+v", user.User.Device)
+	}
+
+	// Check if the device ID is unchanged.
+	if user.Device.ID != loginDeviceID {
+		return errors.New("device ID changed")
+	}
+
+	// Check Auth Token.
+	authToken, err := access.GetAuthToken()
+	if err != nil {
+		return fmt.Errorf("failed to get auth token after login: %w", err)
+	}
+	if verbose {
+		log.Printf("auth token (from login): %+v", authToken.Token)
+	}
+	firstAuthToken := authToken.Token.Token
+
+	// Update User.
+	_, _, err = access.UpdateUser()
+	if err != nil {
+		return fmt.Errorf("failed to update user: %w", err)
+	}
+
+	// Check if we received a new Auth Token.
+	authToken, err = access.GetAuthToken()
+	if err != nil {
+		return fmt.Errorf("failed to get auth token after user update: %w", err)
+	}
+	if verbose {
+		log.Printf("auth token (from update): %+v", authToken.Token)
+	}
+	if authToken.Token.Token == firstAuthToken {
+		return errors.New("auth token did not change after update")
+	}
+
+	// Get Tokens.
+	err = access.UpdateTokens()
+	if err != nil {
+		return fmt.Errorf("failed to update tokens: %w", err)
+	}
+	regular, fallback := access.GetTokenAmount(access.ExpandAndConnectZones)
+	if verbose {
+		log.Printf("received tokens: %d regular, %d fallback", regular, fallback)
+	}
+	if regular == 0 || fallback == 0 {
+		return fmt.Errorf("not enough tokens after fetching: %d regular, %d fallback", regular, fallback)
+	}
+
+	return nil
+}
diff --git a/cmds/testsuite/main.go b/cmds/testsuite/main.go
new file mode 100644
index 00000000..d4edcead
--- /dev/null
+++ b/cmds/testsuite/main.go
@@ -0,0 +1,69 @@
+package main
+
+import (
+	"fmt"
+	"log"
+	"os"
+
+	"github.com/spf13/cobra"
+)
+
+var (
+	rootCmd = &cobra.Command{
+		Use:   "testsuite",
+		Short: "An integration and end-to-end test tool for the SPN",
+	}
+
+	verbose bool
+)
+
+func runTestCommand(cmdFunc func(cmd *cobra.Command, args []string) error) func(cmd *cobra.Command, args []string) error {
+	return func(cmd *cobra.Command, args []string) error {
+		// Setup
+		dbDir, err := os.MkdirTemp("", "spn-testsuite-")
+		if err != nil {
+			makeReports(cmd, fmt.Errorf("internal test error: failed to setup datbases: %w", err))
+			return err
+		}
+		if err = setupDatabases(dbDir); err != nil {
+			makeReports(cmd, fmt.Errorf("internal test error: failed to setup datbases: %w", err))
+			return err
+		}
+
+		// Run Test
+		err = cmdFunc(cmd, args)
+		if err != nil {
+			log.Printf("test failed: %s", err)
+		}
+
+		// Report
+		makeReports(cmd, err)
+
+		// Cleanup and return more important error.
+		cleanUpErr := os.RemoveAll(dbDir)
+		if cleanUpErr != nil {
+			// Only log if the test failed, so we can return the more important error
+			if err == nil {
+				return cleanUpErr
+			}
+			log.Printf("cleanup failed: %s", err)
+		}
+
+		return err
+	}
+}
+
+func makeReports(cmd *cobra.Command, err error) {
+	reportToHealthCheckIfEnabled(cmd, err)
+}
+
+func init() {
+	flags := rootCmd.PersistentFlags()
+	flags.BoolVarP(&verbose, "verbose", "v", false, "enable verbose logging")
+}
+
+func main() {
+	if err := rootCmd.Execute(); err != nil {
+		os.Exit(1)
+	}
+}
diff --git a/cmds/testsuite/report_healthcheck.go b/cmds/testsuite/report_healthcheck.go
new file mode 100644
index 00000000..4ca9eb4a
--- /dev/null
+++ b/cmds/testsuite/report_healthcheck.go
@@ -0,0 +1,51 @@
+package main
+
+import (
+	"log"
+	"net/http"
+	"strings"
+
+	"github.com/spf13/cobra"
+)
+
+var healthCheckReportURL string
+
+func init() {
+	flags := rootCmd.PersistentFlags()
+	flags.StringVar(&healthCheckReportURL, "report-to-healthcheck", "", "report to the given healthchecks URL")
+}
+
+func reportToHealthCheckIfEnabled(_ *cobra.Command, failureErr error) {
+	if healthCheckReportURL == "" {
+		return
+	}
+
+	if failureErr != nil {
+		// Report failure.
+		resp, err := http.Post(
+			healthCheckReportURL+"/fail",
+			"text/plain; utf-8",
+			strings.NewReader(failureErr.Error()),
+		)
+		if err != nil {
+			log.Printf("failed to report failure to healthcheck at %q: %s", healthCheckReportURL, err)
+			return
+		}
+		_ = resp.Body.Close()
+
+		// Always log that we've report the error.
+		log.Printf("reported failure to healthcheck at %q", healthCheckReportURL)
+	} else {
+		// Report success.
+		resp, err := http.Get(healthCheckReportURL) //nolint:gosec
+		if err != nil {
+			log.Printf("failed to report success to healthcheck at %q: %s", healthCheckReportURL, err)
+			return
+		}
+		_ = resp.Body.Close()
+
+		if verbose {
+			log.Printf("reported success to healthcheck at %q", healthCheckReportURL)
+		}
+	}
+}
diff --git a/cmds/winkext-test/main.go b/cmds/winkext-test/main.go
index 8c7bf2cf..0a3d8c4b 100644
--- a/cmds/winkext-test/main.go
+++ b/cmds/winkext-test/main.go
@@ -1,3 +1,4 @@
+//go:build windows
 // +build windows
 
 package main
@@ -11,8 +12,8 @@ import (
 	"syscall"
 
 	"github.com/safing/portbase/log"
-	"github.com/safing/portmaster/firewall/interception/windowskext"
-	"github.com/safing/portmaster/network/packet"
+	"github.com/safing/portmaster/service/firewall/interception/windowskext"
+	"github.com/safing/portmaster/service/network/packet"
 )
 
 var (
diff --git a/desktop/angular/.gitkeep b/desktop/angular/.gitkeep
new file mode 100644
index 00000000..e69de29b
diff --git a/desktop/tauri/.gitkeep b/desktop/tauri/.gitkeep
new file mode 100644
index 00000000..e69de29b
diff --git a/go.mod b/go.mod
index 9421ce7f..6a1fc4e3 100644
--- a/go.mod
+++ b/go.mod
@@ -10,6 +10,7 @@ replace github.com/tc-hib/winres => github.com/dhaavi/winres v0.2.2
 require (
 	github.com/Xuanwo/go-locale v1.1.0
 	github.com/agext/levenshtein v1.2.3
+	github.com/awalterschulze/gographviz v2.0.3+incompatible
 	github.com/cilium/ebpf v0.12.3
 	github.com/coreos/go-iptables v0.7.0
 	github.com/florianl/go-conntrack v0.4.0
@@ -25,11 +26,13 @@ require (
 	github.com/mat/besticon v3.12.0+incompatible
 	github.com/miekg/dns v1.1.57
 	github.com/mitchellh/go-server-timing v1.0.1
+	github.com/mr-tron/base58 v1.2.0
 	github.com/oschwald/maxminddb-golang v1.12.0
+	github.com/r3labs/diff/v3 v3.0.1
+	github.com/rot256/pblind v0.0.0-20231024115251-cd3f239f28c1
 	github.com/safing/jess v0.3.3
 	github.com/safing/portbase v0.18.9
 	github.com/safing/portmaster-android/go v0.0.0-20230830120134-3226ceac3bec
-	github.com/safing/spn v0.7.5
 	github.com/shirou/gopsutil v3.21.11+incompatible
 	github.com/spf13/cobra v1.8.0
 	github.com/spkg/zipfs v0.7.1
@@ -53,8 +56,8 @@ require (
 	github.com/aead/serpent v0.0.0-20160714141033-fba169763ea6 // indirect
 	github.com/alessio/shellescape v1.4.2 // indirect
 	github.com/armon/go-radix v1.0.0 // indirect
-	github.com/awalterschulze/gographviz v2.0.3+incompatible // indirect
 	github.com/bluele/gcache v0.0.2 // indirect
+	github.com/brianvoe/gofakeit v3.18.0+incompatible // indirect
 	github.com/danieljoos/wincred v1.2.1 // indirect
 	github.com/davecgh/go-spew v1.1.1 // indirect
 	github.com/dustin/go-humanize v1.0.1 // indirect
@@ -79,12 +82,10 @@ require (
 	github.com/mdlayher/socket v0.5.0 // indirect
 	github.com/mitchellh/copystructure v1.2.0 // indirect
 	github.com/mitchellh/reflectwalk v1.0.2 // indirect
-	github.com/mr-tron/base58 v1.2.0 // indirect
 	github.com/nfnt/resize v0.0.0-20180221191011-83c6a9932646 // indirect
 	github.com/pkg/errors v0.9.1 // indirect
 	github.com/pmezard/go-difflib v1.0.0 // indirect
 	github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec // indirect
-	github.com/rot256/pblind v0.0.0-20231024115251-cd3f239f28c1 // indirect
 	github.com/satori/go.uuid v1.2.0 // indirect
 	github.com/seehuhn/fortuna v1.0.1 // indirect
 	github.com/seehuhn/sha256d v1.0.0 // indirect
diff --git a/go.sum b/go.sum
index d9fdd161..e4c9c316 100644
--- a/go.sum
+++ b/go.sum
@@ -203,6 +203,8 @@ github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
 github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
 github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
 github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
+github.com/r3labs/diff/v3 v3.0.1 h1:CBKqf3XmNRHXKmdU7mZP1w7TV0pDyVCis1AUHtA4Xtg=
+github.com/r3labs/diff/v3 v3.0.1/go.mod h1:f1S9bourRbiM66NskseyUdo0fTmEE0qKrikYJX63dgo=
 github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec h1:W09IVJc94icq4NjY3clb7Lk8O1qJ8BdBEF8z0ibU0rE=
 github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo=
 github.com/rogpeppe/go-internal v1.9.0 h1:73kH8U+JUqXU8lRuOHeVHaa/SZPifC7BkcraZVejAe8=
@@ -246,6 +248,7 @@ github.com/stretchr/objx v0.5.0 h1:1zr/of2m5FGMsad5YfcqgdqdWrIhu+EBEJRhR1U7z/c=
 github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
 github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
 github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
+github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
 github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
 github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk=
 github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
@@ -275,6 +278,7 @@ github.com/valyala/histogram v1.2.0 h1:wyYGAZZt3CpwUiIb9AU/Zbllg1llXyrtApRS815OL
 github.com/valyala/histogram v1.2.0/go.mod h1:Hb4kBwb4UxsaNbbbh+RRz8ZR6pdodR57tzWUS3BUzXY=
 github.com/vincent-petithory/dataurl v1.0.0 h1:cXw+kPto8NLuJtlMsI152irrVw9fRDX8AbShPRpg2CI=
 github.com/vincent-petithory/dataurl v1.0.0/go.mod h1:FHafX5vmDzyP+1CQATJn7WFKc9CvnvxyvZy6I1MrG/U=
+github.com/vmihailenco/msgpack/v5 v5.3.5/go.mod h1:7xyJ9e+0+9SaZT0Wt1RGleJXzli6Q/V5KbhBonMG9jc=
 github.com/vmihailenco/msgpack/v5 v5.4.1 h1:cQriyiUvjTwOHg8QZaPihLWeRAAVoCpE00IUPn0Bjt8=
 github.com/vmihailenco/msgpack/v5 v5.4.1/go.mod h1:GaZTsDaehaPpQVyxrf5mtQlH+pc21PIudVV/E3rRQok=
 github.com/vmihailenco/tagparser/v2 v2.0.0 h1:y09buUbR+b5aycVFQs/g70pqKVZNBmxwAhO7/IwNM9g=
diff --git a/pack b/pack
index 493b77f3..e23f1286 100755
--- a/pack
+++ b/pack
@@ -24,16 +24,19 @@ function safe_execute {
 function check {
   ./cmds/portmaster-core/pack check
   ./cmds/portmaster-start/pack check
+  ./cmds/hub/pack check
 }
 
 function build {
   safe_execute ./cmds/portmaster-core/pack build
   safe_execute ./cmds/portmaster-start/pack build
+  safe_execute ./cmds/hub/pack build
 }
 
 function reset {
   ./cmds/portmaster-core/pack reset
   ./cmds/portmaster-start/pack reset
+  ./cmds/hub/pack resset
 }
 
 case $1 in
diff --git a/packaging/linux/.gitkeep b/packaging/linux/.gitkeep
new file mode 100644
index 00000000..e69de29b
diff --git a/packaging/windows/.gitkeep b/packaging/windows/.gitkeep
new file mode 100644
index 00000000..e69de29b
diff --git a/runtime/.gitkeep b/runtime/.gitkeep
new file mode 100644
index 00000000..0d64ac96
--- /dev/null
+++ b/runtime/.gitkeep
@@ -0,0 +1 @@
+The new portbase should land here.
\ No newline at end of file
diff --git a/broadcasts/api.go b/service/broadcasts/api.go
similarity index 100%
rename from broadcasts/api.go
rename to service/broadcasts/api.go
diff --git a/broadcasts/data.go b/service/broadcasts/data.go
similarity index 91%
rename from broadcasts/data.go
rename to service/broadcasts/data.go
index a04c7820..22faf458 100644
--- a/broadcasts/data.go
+++ b/service/broadcasts/data.go
@@ -5,12 +5,12 @@ import (
 	"time"
 
 	"github.com/safing/portbase/config"
-	"github.com/safing/portmaster/intel/geoip"
-	"github.com/safing/portmaster/netenv"
-	"github.com/safing/portmaster/updates"
-	"github.com/safing/spn/access"
-	"github.com/safing/spn/access/account"
-	"github.com/safing/spn/captain"
+	"github.com/safing/portmaster/service/intel/geoip"
+	"github.com/safing/portmaster/service/netenv"
+	"github.com/safing/portmaster/service/updates"
+	"github.com/safing/portmaster/spn/access"
+	"github.com/safing/portmaster/spn/access/account"
+	"github.com/safing/portmaster/spn/captain"
 )
 
 var portmasterStarted = time.Now()
diff --git a/broadcasts/install_info.go b/service/broadcasts/install_info.go
similarity index 100%
rename from broadcasts/install_info.go
rename to service/broadcasts/install_info.go
diff --git a/broadcasts/module.go b/service/broadcasts/module.go
similarity index 100%
rename from broadcasts/module.go
rename to service/broadcasts/module.go
diff --git a/broadcasts/notify.go b/service/broadcasts/notify.go
similarity index 99%
rename from broadcasts/notify.go
rename to service/broadcasts/notify.go
index cd6c38f2..4e359139 100644
--- a/broadcasts/notify.go
+++ b/service/broadcasts/notify.go
@@ -18,7 +18,7 @@ import (
 	"github.com/safing/portbase/log"
 	"github.com/safing/portbase/modules"
 	"github.com/safing/portbase/notifications"
-	"github.com/safing/portmaster/updates"
+	"github.com/safing/portmaster/service/updates"
 )
 
 const (
diff --git a/broadcasts/state.go b/service/broadcasts/state.go
similarity index 100%
rename from broadcasts/state.go
rename to service/broadcasts/state.go
diff --git a/broadcasts/testdata/README.md b/service/broadcasts/testdata/README.md
similarity index 100%
rename from broadcasts/testdata/README.md
rename to service/broadcasts/testdata/README.md
diff --git a/broadcasts/testdata/notifications.yaml b/service/broadcasts/testdata/notifications.yaml
similarity index 100%
rename from broadcasts/testdata/notifications.yaml
rename to service/broadcasts/testdata/notifications.yaml
diff --git a/compat/api.go b/service/compat/api.go
similarity index 100%
rename from compat/api.go
rename to service/compat/api.go
diff --git a/compat/callbacks.go b/service/compat/callbacks.go
similarity index 90%
rename from compat/callbacks.go
rename to service/compat/callbacks.go
index e997ff8f..2abfa858 100644
--- a/compat/callbacks.go
+++ b/service/compat/callbacks.go
@@ -3,8 +3,8 @@ package compat
 import (
 	"net"
 
-	"github.com/safing/portmaster/network/packet"
-	"github.com/safing/portmaster/process"
+	"github.com/safing/portmaster/service/network/packet"
+	"github.com/safing/portmaster/service/process"
 )
 
 // SubmitSystemIntegrationCheckPacket submit a packet for the system integrity check.
diff --git a/compat/debug_default.go b/service/compat/debug_default.go
similarity index 100%
rename from compat/debug_default.go
rename to service/compat/debug_default.go
diff --git a/compat/debug_linux.go b/service/compat/debug_linux.go
similarity index 100%
rename from compat/debug_linux.go
rename to service/compat/debug_linux.go
diff --git a/compat/debug_windows.go b/service/compat/debug_windows.go
similarity index 100%
rename from compat/debug_windows.go
rename to service/compat/debug_windows.go
diff --git a/compat/iptables.go b/service/compat/iptables.go
similarity index 100%
rename from compat/iptables.go
rename to service/compat/iptables.go
diff --git a/compat/iptables_test.go b/service/compat/iptables_test.go
similarity index 100%
rename from compat/iptables_test.go
rename to service/compat/iptables_test.go
diff --git a/compat/module.go b/service/compat/module.go
similarity index 97%
rename from compat/module.go
rename to service/compat/module.go
index c159d02f..b8b95090 100644
--- a/compat/module.go
+++ b/service/compat/module.go
@@ -9,8 +9,8 @@ import (
 
 	"github.com/safing/portbase/log"
 	"github.com/safing/portbase/modules"
-	"github.com/safing/portmaster/netenv"
-	"github.com/safing/portmaster/resolver"
+	"github.com/safing/portmaster/service/netenv"
+	"github.com/safing/portmaster/service/resolver"
 )
 
 var (
diff --git a/compat/notify.go b/service/compat/notify.go
similarity index 98%
rename from compat/notify.go
rename to service/compat/notify.go
index 39157648..f26f0ea3 100644
--- a/compat/notify.go
+++ b/service/compat/notify.go
@@ -12,8 +12,8 @@ import (
 	"github.com/safing/portbase/log"
 	"github.com/safing/portbase/modules"
 	"github.com/safing/portbase/notifications"
-	"github.com/safing/portmaster/process"
-	"github.com/safing/portmaster/profile"
+	"github.com/safing/portmaster/service/process"
+	"github.com/safing/portmaster/service/profile"
 )
 
 type baseIssue struct {
diff --git a/compat/selfcheck.go b/service/compat/selfcheck.go
similarity index 97%
rename from compat/selfcheck.go
rename to service/compat/selfcheck.go
index e26c1ed5..f4775cdc 100644
--- a/compat/selfcheck.go
+++ b/service/compat/selfcheck.go
@@ -12,9 +12,9 @@ import (
 
 	"github.com/safing/portbase/log"
 	"github.com/safing/portbase/rng"
-	"github.com/safing/portmaster/netenv"
-	"github.com/safing/portmaster/network/packet"
-	"github.com/safing/portmaster/resolver"
+	"github.com/safing/portmaster/service/netenv"
+	"github.com/safing/portmaster/service/network/packet"
+	"github.com/safing/portmaster/service/resolver"
 )
 
 var (
diff --git a/compat/wfpstate.go b/service/compat/wfpstate.go
similarity index 100%
rename from compat/wfpstate.go
rename to service/compat/wfpstate.go
diff --git a/compat/wfpstate_test.go b/service/compat/wfpstate_test.go
similarity index 100%
rename from compat/wfpstate_test.go
rename to service/compat/wfpstate_test.go
diff --git a/core/api.go b/service/core/api.go
similarity index 96%
rename from core/api.go
rename to service/core/api.go
index 6a653909..8e7d24bc 100644
--- a/core/api.go
+++ b/service/core/api.go
@@ -15,12 +15,12 @@ import (
 	"github.com/safing/portbase/notifications"
 	"github.com/safing/portbase/rng"
 	"github.com/safing/portbase/utils/debug"
-	"github.com/safing/portmaster/compat"
-	"github.com/safing/portmaster/process"
-	"github.com/safing/portmaster/resolver"
-	"github.com/safing/portmaster/status"
-	"github.com/safing/portmaster/updates"
-	"github.com/safing/spn/captain"
+	"github.com/safing/portmaster/service/compat"
+	"github.com/safing/portmaster/service/process"
+	"github.com/safing/portmaster/service/resolver"
+	"github.com/safing/portmaster/service/status"
+	"github.com/safing/portmaster/service/updates"
+	"github.com/safing/portmaster/spn/captain"
 )
 
 func registerAPIEndpoints() error {
diff --git a/core/base/databases.go b/service/core/base/databases.go
similarity index 100%
rename from core/base/databases.go
rename to service/core/base/databases.go
diff --git a/core/base/global.go b/service/core/base/global.go
similarity index 100%
rename from core/base/global.go
rename to service/core/base/global.go
diff --git a/core/base/logs.go b/service/core/base/logs.go
similarity index 100%
rename from core/base/logs.go
rename to service/core/base/logs.go
diff --git a/core/base/module.go b/service/core/base/module.go
similarity index 100%
rename from core/base/module.go
rename to service/core/base/module.go
diff --git a/core/base/profiling.go b/service/core/base/profiling.go
similarity index 100%
rename from core/base/profiling.go
rename to service/core/base/profiling.go
diff --git a/core/config.go b/service/core/config.go
similarity index 100%
rename from core/config.go
rename to service/core/config.go
diff --git a/core/core.go b/service/core/core.go
similarity index 84%
rename from core/core.go
rename to service/core/core.go
index d0c95418..ff535759 100644
--- a/core/core.go
+++ b/service/core/core.go
@@ -9,13 +9,13 @@ import (
 	"github.com/safing/portbase/metrics"
 	"github.com/safing/portbase/modules"
 	"github.com/safing/portbase/modules/subsystems"
-	_ "github.com/safing/portmaster/broadcasts"
-	_ "github.com/safing/portmaster/netenv"
-	_ "github.com/safing/portmaster/netquery"
-	_ "github.com/safing/portmaster/status"
-	_ "github.com/safing/portmaster/sync"
-	_ "github.com/safing/portmaster/ui"
-	"github.com/safing/portmaster/updates"
+	_ "github.com/safing/portmaster/service/broadcasts"
+	_ "github.com/safing/portmaster/service/netenv"
+	_ "github.com/safing/portmaster/service/netquery"
+	_ "github.com/safing/portmaster/service/status"
+	_ "github.com/safing/portmaster/service/sync"
+	_ "github.com/safing/portmaster/service/ui"
+	"github.com/safing/portmaster/service/updates"
 )
 
 const (
diff --git a/core/os_default.go b/service/core/os_default.go
similarity index 100%
rename from core/os_default.go
rename to service/core/os_default.go
diff --git a/core/os_windows.go b/service/core/os_windows.go
similarity index 100%
rename from core/os_windows.go
rename to service/core/os_windows.go
diff --git a/core/pmtesting/testing.go b/service/core/pmtesting/testing.go
similarity index 96%
rename from core/pmtesting/testing.go
rename to service/core/pmtesting/testing.go
index 9b597c83..16253f86 100644
--- a/core/pmtesting/testing.go
+++ b/service/core/pmtesting/testing.go
@@ -7,7 +7,7 @@
 //	import (
 //		"testing"
 //
-//		"github.com/safing/portmaster/core/pmtesting"
+//		"github.com/safing/portmaster/service/core/pmtesting"
 //	)
 //
 //	func TestMain(m *testing.M) {
@@ -27,7 +27,7 @@ import (
 	"github.com/safing/portbase/dataroot"
 	"github.com/safing/portbase/log"
 	"github.com/safing/portbase/modules"
-	"github.com/safing/portmaster/core/base"
+	"github.com/safing/portmaster/service/core/base"
 )
 
 var printStackOnExit bool
diff --git a/detection/dga/lms.go b/service/detection/dga/lms.go
similarity index 100%
rename from detection/dga/lms.go
rename to service/detection/dga/lms.go
diff --git a/detection/dga/lms_test.go b/service/detection/dga/lms_test.go
similarity index 100%
rename from detection/dga/lms_test.go
rename to service/detection/dga/lms_test.go
diff --git a/firewall/api.go b/service/firewall/api.go
similarity index 96%
rename from firewall/api.go
rename to service/firewall/api.go
index b17efe6d..949e168f 100644
--- a/firewall/api.go
+++ b/service/firewall/api.go
@@ -13,11 +13,11 @@ import (
 	"github.com/safing/portbase/dataroot"
 	"github.com/safing/portbase/log"
 	"github.com/safing/portbase/utils"
-	"github.com/safing/portmaster/netenv"
-	"github.com/safing/portmaster/network/netutils"
-	"github.com/safing/portmaster/network/packet"
-	"github.com/safing/portmaster/process"
-	"github.com/safing/portmaster/updates"
+	"github.com/safing/portmaster/service/netenv"
+	"github.com/safing/portmaster/service/network/netutils"
+	"github.com/safing/portmaster/service/network/packet"
+	"github.com/safing/portmaster/service/process"
+	"github.com/safing/portmaster/service/updates"
 )
 
 const (
diff --git a/firewall/bypassing.go b/service/firewall/bypassing.go
similarity index 87%
rename from firewall/bypassing.go
rename to service/firewall/bypassing.go
index cf8502cb..415fc6c8 100644
--- a/firewall/bypassing.go
+++ b/service/firewall/bypassing.go
@@ -4,11 +4,11 @@ import (
 	"context"
 	"strings"
 
-	"github.com/safing/portmaster/compat"
-	"github.com/safing/portmaster/nameserver/nsutil"
-	"github.com/safing/portmaster/network"
-	"github.com/safing/portmaster/network/packet"
-	"github.com/safing/portmaster/profile/endpoints"
+	"github.com/safing/portmaster/service/compat"
+	"github.com/safing/portmaster/service/nameserver/nsutil"
+	"github.com/safing/portmaster/service/network"
+	"github.com/safing/portmaster/service/network/packet"
+	"github.com/safing/portmaster/service/profile/endpoints"
 )
 
 var resolverFilterLists = []string{"17-DNS"}
diff --git a/firewall/config.go b/service/firewall/config.go
similarity index 98%
rename from firewall/config.go
rename to service/firewall/config.go
index 4e3ca653..960c000b 100644
--- a/firewall/config.go
+++ b/service/firewall/config.go
@@ -6,8 +6,8 @@ import (
 	"github.com/safing/portbase/api"
 	"github.com/safing/portbase/config"
 	"github.com/safing/portbase/notifications"
-	"github.com/safing/portmaster/core"
-	"github.com/safing/spn/captain"
+	"github.com/safing/portmaster/service/core"
+	"github.com/safing/portmaster/spn/captain"
 )
 
 // Configuration Keys.
diff --git a/firewall/dns.go b/service/firewall/dns.go
similarity index 97%
rename from firewall/dns.go
rename to service/firewall/dns.go
index b0ac071a..3712165d 100644
--- a/firewall/dns.go
+++ b/service/firewall/dns.go
@@ -10,11 +10,11 @@ import (
 
 	"github.com/safing/portbase/database"
 	"github.com/safing/portbase/log"
-	"github.com/safing/portmaster/network"
-	"github.com/safing/portmaster/network/netutils"
-	"github.com/safing/portmaster/profile"
-	"github.com/safing/portmaster/profile/endpoints"
-	"github.com/safing/portmaster/resolver"
+	"github.com/safing/portmaster/service/network"
+	"github.com/safing/portmaster/service/network/netutils"
+	"github.com/safing/portmaster/service/profile"
+	"github.com/safing/portmaster/service/profile/endpoints"
+	"github.com/safing/portmaster/service/resolver"
 )
 
 func filterDNSSection(
diff --git a/firewall/inspection/inspection.go b/service/firewall/inspection/inspection.go
similarity index 95%
rename from firewall/inspection/inspection.go
rename to service/firewall/inspection/inspection.go
index 92de0345..44855ba4 100644
--- a/firewall/inspection/inspection.go
+++ b/service/firewall/inspection/inspection.go
@@ -3,8 +3,8 @@ package inspection
 import (
 	"sync"
 
-	"github.com/safing/portmaster/network"
-	"github.com/safing/portmaster/network/packet"
+	"github.com/safing/portmaster/service/network"
+	"github.com/safing/portmaster/service/network/packet"
 )
 
 //nolint:golint,stylecheck // FIXME
diff --git a/firewall/interception/ebpf/bandwidth/bpf_bpfeb.go b/service/firewall/interception/ebpf/bandwidth/bpf_bpfeb.go
similarity index 100%
rename from firewall/interception/ebpf/bandwidth/bpf_bpfeb.go
rename to service/firewall/interception/ebpf/bandwidth/bpf_bpfeb.go
diff --git a/firewall/interception/ebpf/bandwidth/bpf_bpfeb.o b/service/firewall/interception/ebpf/bandwidth/bpf_bpfeb.o
similarity index 100%
rename from firewall/interception/ebpf/bandwidth/bpf_bpfeb.o
rename to service/firewall/interception/ebpf/bandwidth/bpf_bpfeb.o
diff --git a/firewall/interception/ebpf/bandwidth/bpf_bpfel.go b/service/firewall/interception/ebpf/bandwidth/bpf_bpfel.go
similarity index 100%
rename from firewall/interception/ebpf/bandwidth/bpf_bpfel.go
rename to service/firewall/interception/ebpf/bandwidth/bpf_bpfel.go
diff --git a/firewall/interception/ebpf/bandwidth/bpf_bpfel.o b/service/firewall/interception/ebpf/bandwidth/bpf_bpfel.o
similarity index 100%
rename from firewall/interception/ebpf/bandwidth/bpf_bpfel.o
rename to service/firewall/interception/ebpf/bandwidth/bpf_bpfel.o
diff --git a/firewall/interception/ebpf/bandwidth/interface.go b/service/firewall/interception/ebpf/bandwidth/interface.go
similarity index 98%
rename from firewall/interception/ebpf/bandwidth/interface.go
rename to service/firewall/interception/ebpf/bandwidth/interface.go
index 3a08bbad..e1473dbd 100644
--- a/firewall/interception/ebpf/bandwidth/interface.go
+++ b/service/firewall/interception/ebpf/bandwidth/interface.go
@@ -16,7 +16,7 @@ import (
 	"golang.org/x/sys/unix"
 
 	"github.com/safing/portbase/log"
-	"github.com/safing/portmaster/network/packet"
+	"github.com/safing/portmaster/service/network/packet"
 )
 
 //go:generate go run github.com/cilium/ebpf/cmd/bpf2go -cc clang -cflags "-O2 -g -Wall -Werror" bpf ../programs/bandwidth.c
diff --git a/firewall/interception/ebpf/connection_listener/bpf_bpfeb.go b/service/firewall/interception/ebpf/connection_listener/bpf_bpfeb.go
similarity index 100%
rename from firewall/interception/ebpf/connection_listener/bpf_bpfeb.go
rename to service/firewall/interception/ebpf/connection_listener/bpf_bpfeb.go
diff --git a/firewall/interception/ebpf/connection_listener/bpf_bpfeb.o b/service/firewall/interception/ebpf/connection_listener/bpf_bpfeb.o
similarity index 100%
rename from firewall/interception/ebpf/connection_listener/bpf_bpfeb.o
rename to service/firewall/interception/ebpf/connection_listener/bpf_bpfeb.o
diff --git a/firewall/interception/ebpf/connection_listener/bpf_bpfel.go b/service/firewall/interception/ebpf/connection_listener/bpf_bpfel.go
similarity index 100%
rename from firewall/interception/ebpf/connection_listener/bpf_bpfel.go
rename to service/firewall/interception/ebpf/connection_listener/bpf_bpfel.go
diff --git a/firewall/interception/ebpf/connection_listener/bpf_bpfel.o b/service/firewall/interception/ebpf/connection_listener/bpf_bpfel.o
similarity index 100%
rename from firewall/interception/ebpf/connection_listener/bpf_bpfel.o
rename to service/firewall/interception/ebpf/connection_listener/bpf_bpfel.o
diff --git a/firewall/interception/ebpf/connection_listener/worker.go b/service/firewall/interception/ebpf/connection_listener/worker.go
similarity index 98%
rename from firewall/interception/ebpf/connection_listener/worker.go
rename to service/firewall/interception/ebpf/connection_listener/worker.go
index bee03f12..aadfd57f 100644
--- a/firewall/interception/ebpf/connection_listener/worker.go
+++ b/service/firewall/interception/ebpf/connection_listener/worker.go
@@ -15,7 +15,7 @@ import (
 	"github.com/cilium/ebpf/rlimit"
 
 	"github.com/safing/portbase/log"
-	"github.com/safing/portmaster/network/packet"
+	"github.com/safing/portmaster/service/network/packet"
 )
 
 //go:generate go run github.com/cilium/ebpf/cmd/bpf2go -cc clang -cflags "-O2 -g -Wall -Werror" -type Event bpf ../programs/monitor.c
diff --git a/firewall/interception/ebpf/exec/bpf_bpfeb.go b/service/firewall/interception/ebpf/exec/bpf_bpfeb.go
similarity index 100%
rename from firewall/interception/ebpf/exec/bpf_bpfeb.go
rename to service/firewall/interception/ebpf/exec/bpf_bpfeb.go
diff --git a/firewall/interception/ebpf/exec/bpf_bpfeb.o b/service/firewall/interception/ebpf/exec/bpf_bpfeb.o
similarity index 100%
rename from firewall/interception/ebpf/exec/bpf_bpfeb.o
rename to service/firewall/interception/ebpf/exec/bpf_bpfeb.o
diff --git a/firewall/interception/ebpf/exec/bpf_bpfel.go b/service/firewall/interception/ebpf/exec/bpf_bpfel.go
similarity index 100%
rename from firewall/interception/ebpf/exec/bpf_bpfel.go
rename to service/firewall/interception/ebpf/exec/bpf_bpfel.go
diff --git a/firewall/interception/ebpf/exec/bpf_bpfel.o b/service/firewall/interception/ebpf/exec/bpf_bpfel.o
similarity index 100%
rename from firewall/interception/ebpf/exec/bpf_bpfel.o
rename to service/firewall/interception/ebpf/exec/bpf_bpfel.o
diff --git a/firewall/interception/ebpf/exec/exec.go b/service/firewall/interception/ebpf/exec/exec.go
similarity index 100%
rename from firewall/interception/ebpf/exec/exec.go
rename to service/firewall/interception/ebpf/exec/exec.go
diff --git a/firewall/interception/ebpf/programs/bandwidth.c b/service/firewall/interception/ebpf/programs/bandwidth.c
similarity index 100%
rename from firewall/interception/ebpf/programs/bandwidth.c
rename to service/firewall/interception/ebpf/programs/bandwidth.c
diff --git a/firewall/interception/ebpf/programs/bpf/bpf_core_read.h b/service/firewall/interception/ebpf/programs/bpf/bpf_core_read.h
similarity index 100%
rename from firewall/interception/ebpf/programs/bpf/bpf_core_read.h
rename to service/firewall/interception/ebpf/programs/bpf/bpf_core_read.h
diff --git a/firewall/interception/ebpf/programs/bpf/bpf_helper_defs.h b/service/firewall/interception/ebpf/programs/bpf/bpf_helper_defs.h
similarity index 100%
rename from firewall/interception/ebpf/programs/bpf/bpf_helper_defs.h
rename to service/firewall/interception/ebpf/programs/bpf/bpf_helper_defs.h
diff --git a/firewall/interception/ebpf/programs/bpf/bpf_helpers.h b/service/firewall/interception/ebpf/programs/bpf/bpf_helpers.h
similarity index 100%
rename from firewall/interception/ebpf/programs/bpf/bpf_helpers.h
rename to service/firewall/interception/ebpf/programs/bpf/bpf_helpers.h
diff --git a/firewall/interception/ebpf/programs/bpf/bpf_tracing.h b/service/firewall/interception/ebpf/programs/bpf/bpf_tracing.h
similarity index 100%
rename from firewall/interception/ebpf/programs/bpf/bpf_tracing.h
rename to service/firewall/interception/ebpf/programs/bpf/bpf_tracing.h
diff --git a/firewall/interception/ebpf/programs/exec.c b/service/firewall/interception/ebpf/programs/exec.c
similarity index 100%
rename from firewall/interception/ebpf/programs/exec.c
rename to service/firewall/interception/ebpf/programs/exec.c
diff --git a/firewall/interception/ebpf/programs/monitor.c b/service/firewall/interception/ebpf/programs/monitor.c
similarity index 100%
rename from firewall/interception/ebpf/programs/monitor.c
rename to service/firewall/interception/ebpf/programs/monitor.c
diff --git a/firewall/interception/ebpf/programs/update.sh b/service/firewall/interception/ebpf/programs/update.sh
similarity index 100%
rename from firewall/interception/ebpf/programs/update.sh
rename to service/firewall/interception/ebpf/programs/update.sh
diff --git a/firewall/interception/ebpf/programs/vmlinux-x86.h b/service/firewall/interception/ebpf/programs/vmlinux-x86.h
similarity index 100%
rename from firewall/interception/ebpf/programs/vmlinux-x86.h
rename to service/firewall/interception/ebpf/programs/vmlinux-x86.h
diff --git a/firewall/interception/interception_default.go b/service/firewall/interception/interception_default.go
similarity index 87%
rename from firewall/interception/interception_default.go
rename to service/firewall/interception/interception_default.go
index 222a041c..a4a93f44 100644
--- a/firewall/interception/interception_default.go
+++ b/service/firewall/interception/interception_default.go
@@ -4,8 +4,8 @@ package interception
 
 import (
 	"github.com/safing/portbase/log"
-	"github.com/safing/portmaster/network"
-	"github.com/safing/portmaster/network/packet"
+	"github.com/safing/portmaster/service/network"
+	"github.com/safing/portmaster/service/network/packet"
 )
 
 // start starts the interception.
diff --git a/firewall/interception/interception_linux.go b/service/firewall/interception/interception_linux.go
similarity index 77%
rename from firewall/interception/interception_linux.go
rename to service/firewall/interception/interception_linux.go
index 128f6649..66ca5b7e 100644
--- a/firewall/interception/interception_linux.go
+++ b/service/firewall/interception/interception_linux.go
@@ -4,11 +4,11 @@ import (
 	"context"
 	"time"
 
-	bandwidth "github.com/safing/portmaster/firewall/interception/ebpf/bandwidth"
-	conn_listener "github.com/safing/portmaster/firewall/interception/ebpf/connection_listener"
-	"github.com/safing/portmaster/firewall/interception/nfq"
-	"github.com/safing/portmaster/network"
-	"github.com/safing/portmaster/network/packet"
+	bandwidth "github.com/safing/portmaster/service/firewall/interception/ebpf/bandwidth"
+	conn_listener "github.com/safing/portmaster/service/firewall/interception/ebpf/connection_listener"
+	"github.com/safing/portmaster/service/firewall/interception/nfq"
+	"github.com/safing/portmaster/service/network"
+	"github.com/safing/portmaster/service/network/packet"
 )
 
 // start starts the interception.
diff --git a/firewall/interception/interception_windows.go b/service/firewall/interception/interception_windows.go
similarity index 88%
rename from firewall/interception/interception_windows.go
rename to service/firewall/interception/interception_windows.go
index 069f5c01..71033c1a 100644
--- a/firewall/interception/interception_windows.go
+++ b/service/firewall/interception/interception_windows.go
@@ -5,10 +5,10 @@ import (
 	"fmt"
 	"time"
 
-	"github.com/safing/portmaster/firewall/interception/windowskext"
-	"github.com/safing/portmaster/network"
-	"github.com/safing/portmaster/network/packet"
-	"github.com/safing/portmaster/updates"
+	"github.com/safing/portmaster/service/firewall/interception/windowskext"
+	"github.com/safing/portmaster/service/network"
+	"github.com/safing/portmaster/service/network/packet"
+	"github.com/safing/portmaster/service/updates"
 )
 
 // start starts the interception.
diff --git a/firewall/interception/introspection.go b/service/firewall/interception/introspection.go
similarity index 100%
rename from firewall/interception/introspection.go
rename to service/firewall/interception/introspection.go
diff --git a/firewall/interception/module.go b/service/firewall/interception/module.go
similarity index 96%
rename from firewall/interception/module.go
rename to service/firewall/interception/module.go
index 0b0e86d0..2802defa 100644
--- a/firewall/interception/module.go
+++ b/service/firewall/interception/module.go
@@ -5,7 +5,7 @@ import (
 
 	"github.com/safing/portbase/log"
 	"github.com/safing/portbase/modules"
-	"github.com/safing/portmaster/network/packet"
+	"github.com/safing/portmaster/service/network/packet"
 )
 
 var (
diff --git a/firewall/interception/nfq/conntrack.go b/service/firewall/interception/nfq/conntrack.go
similarity index 97%
rename from firewall/interception/nfq/conntrack.go
rename to service/firewall/interception/nfq/conntrack.go
index b71651ec..6959d328 100644
--- a/firewall/interception/nfq/conntrack.go
+++ b/service/firewall/interception/nfq/conntrack.go
@@ -9,8 +9,8 @@ import (
 	ct "github.com/florianl/go-conntrack"
 
 	"github.com/safing/portbase/log"
-	"github.com/safing/portmaster/netenv"
-	"github.com/safing/portmaster/network"
+	"github.com/safing/portmaster/service/netenv"
+	"github.com/safing/portmaster/service/network"
 )
 
 var nfct *ct.Nfct // Conntrack handler. NFCT: Network Filter Connection Tracking.
diff --git a/firewall/interception/nfq/nfq.go b/service/firewall/interception/nfq/nfq.go
similarity index 98%
rename from firewall/interception/nfq/nfq.go
rename to service/firewall/interception/nfq/nfq.go
index 184e15f9..f7579920 100644
--- a/firewall/interception/nfq/nfq.go
+++ b/service/firewall/interception/nfq/nfq.go
@@ -15,8 +15,8 @@ import (
 	"golang.org/x/sys/unix"
 
 	"github.com/safing/portbase/log"
-	pmpacket "github.com/safing/portmaster/network/packet"
-	"github.com/safing/portmaster/process"
+	pmpacket "github.com/safing/portmaster/service/network/packet"
+	"github.com/safing/portmaster/service/process"
 )
 
 // Queue wraps a nfqueue.
diff --git a/firewall/interception/nfq/packet.go b/service/firewall/interception/nfq/packet.go
similarity index 98%
rename from firewall/interception/nfq/packet.go
rename to service/firewall/interception/nfq/packet.go
index 8baeff5b..af3d5fac 100644
--- a/firewall/interception/nfq/packet.go
+++ b/service/firewall/interception/nfq/packet.go
@@ -11,7 +11,7 @@ import (
 	"github.com/tevino/abool"
 
 	"github.com/safing/portbase/log"
-	pmpacket "github.com/safing/portmaster/network/packet"
+	pmpacket "github.com/safing/portmaster/service/network/packet"
 )
 
 // Firewalling marks used by the Portmaster.
diff --git a/firewall/interception/nfqueue_linux.go b/service/firewall/interception/nfqueue_linux.go
similarity index 98%
rename from firewall/interception/nfqueue_linux.go
rename to service/firewall/interception/nfqueue_linux.go
index 2e632813..537bbcb7 100644
--- a/firewall/interception/nfqueue_linux.go
+++ b/service/firewall/interception/nfqueue_linux.go
@@ -11,9 +11,9 @@ import (
 	"github.com/hashicorp/go-multierror"
 
 	"github.com/safing/portbase/log"
-	"github.com/safing/portmaster/firewall/interception/nfq"
-	"github.com/safing/portmaster/netenv"
-	"github.com/safing/portmaster/network/packet"
+	"github.com/safing/portmaster/service/firewall/interception/nfq"
+	"github.com/safing/portmaster/service/netenv"
+	"github.com/safing/portmaster/service/network/packet"
 )
 
 var (
diff --git a/firewall/interception/packet_tracer.go b/service/firewall/interception/packet_tracer.go
similarity index 95%
rename from firewall/interception/packet_tracer.go
rename to service/firewall/interception/packet_tracer.go
index 4d822a42..b90dfbf7 100644
--- a/firewall/interception/packet_tracer.go
+++ b/service/firewall/interception/packet_tracer.go
@@ -3,7 +3,7 @@ package interception
 import (
 	"time"
 
-	"github.com/safing/portmaster/network/packet"
+	"github.com/safing/portmaster/service/network/packet"
 )
 
 type tracedPacket struct {
diff --git a/firewall/interception/windowskext/bandwidth_stats.go b/service/firewall/interception/windowskext/bandwidth_stats.go
similarity index 98%
rename from firewall/interception/windowskext/bandwidth_stats.go
rename to service/firewall/interception/windowskext/bandwidth_stats.go
index 2a1bddc0..f1fb856b 100644
--- a/firewall/interception/windowskext/bandwidth_stats.go
+++ b/service/firewall/interception/windowskext/bandwidth_stats.go
@@ -10,7 +10,7 @@ import (
 	"time"
 
 	"github.com/safing/portbase/log"
-	"github.com/safing/portmaster/network/packet"
+	"github.com/safing/portmaster/service/network/packet"
 )
 
 type Rxtxdata struct {
diff --git a/firewall/interception/windowskext/doc.go b/service/firewall/interception/windowskext/doc.go
similarity index 100%
rename from firewall/interception/windowskext/doc.go
rename to service/firewall/interception/windowskext/doc.go
diff --git a/firewall/interception/windowskext/handler.go b/service/firewall/interception/windowskext/handler.go
similarity index 97%
rename from firewall/interception/windowskext/handler.go
rename to service/firewall/interception/windowskext/handler.go
index f5d66761..a5a8de74 100644
--- a/firewall/interception/windowskext/handler.go
+++ b/service/firewall/interception/windowskext/handler.go
@@ -12,13 +12,13 @@ import (
 	"time"
 	"unsafe"
 
-	"github.com/safing/portmaster/process"
+	"github.com/safing/portmaster/service/process"
 
 	"github.com/tevino/abool"
 
 	"github.com/safing/portbase/log"
-	"github.com/safing/portmaster/network"
-	"github.com/safing/portmaster/network/packet"
+	"github.com/safing/portmaster/service/network"
+	"github.com/safing/portmaster/service/network/packet"
 )
 
 const (
diff --git a/firewall/interception/windowskext/kext.go b/service/firewall/interception/windowskext/kext.go
similarity index 98%
rename from firewall/interception/windowskext/kext.go
rename to service/firewall/interception/windowskext/kext.go
index a7e6a1c3..7699c35a 100644
--- a/firewall/interception/windowskext/kext.go
+++ b/service/firewall/interception/windowskext/kext.go
@@ -11,8 +11,8 @@ import (
 	"unsafe"
 
 	"github.com/safing/portbase/log"
-	"github.com/safing/portmaster/network"
-	"github.com/safing/portmaster/network/packet"
+	"github.com/safing/portmaster/service/network"
+	"github.com/safing/portmaster/service/network/packet"
 	"golang.org/x/sys/windows"
 )
 
diff --git a/firewall/interception/windowskext/packet.go b/service/firewall/interception/windowskext/packet.go
similarity index 97%
rename from firewall/interception/windowskext/packet.go
rename to service/firewall/interception/windowskext/packet.go
index 6c7b24da..5f96e784 100644
--- a/firewall/interception/windowskext/packet.go
+++ b/service/firewall/interception/windowskext/packet.go
@@ -9,8 +9,8 @@ import (
 	"github.com/tevino/abool"
 
 	"github.com/safing/portbase/log"
-	"github.com/safing/portmaster/network"
-	"github.com/safing/portmaster/network/packet"
+	"github.com/safing/portmaster/service/network"
+	"github.com/safing/portmaster/service/network/packet"
 )
 
 // Packet represents an IP packet.
diff --git a/firewall/interception/windowskext/service.go b/service/firewall/interception/windowskext/service.go
similarity index 100%
rename from firewall/interception/windowskext/service.go
rename to service/firewall/interception/windowskext/service.go
diff --git a/firewall/interception/windowskext/syscall.go b/service/firewall/interception/windowskext/syscall.go
similarity index 100%
rename from firewall/interception/windowskext/syscall.go
rename to service/firewall/interception/windowskext/syscall.go
diff --git a/firewall/master.go b/service/firewall/master.go
similarity index 97%
rename from firewall/master.go
rename to service/firewall/master.go
index 8c4b1e59..6549194f 100644
--- a/firewall/master.go
+++ b/service/firewall/master.go
@@ -12,15 +12,15 @@ import (
 	"golang.org/x/net/publicsuffix"
 
 	"github.com/safing/portbase/log"
-	"github.com/safing/portmaster/detection/dga"
-	"github.com/safing/portmaster/intel/customlists"
-	"github.com/safing/portmaster/intel/filterlists"
-	"github.com/safing/portmaster/netenv"
-	"github.com/safing/portmaster/network"
-	"github.com/safing/portmaster/network/netutils"
-	"github.com/safing/portmaster/network/packet"
-	"github.com/safing/portmaster/profile"
-	"github.com/safing/portmaster/profile/endpoints"
+	"github.com/safing/portmaster/service/detection/dga"
+	"github.com/safing/portmaster/service/intel/customlists"
+	"github.com/safing/portmaster/service/intel/filterlists"
+	"github.com/safing/portmaster/service/netenv"
+	"github.com/safing/portmaster/service/network"
+	"github.com/safing/portmaster/service/network/netutils"
+	"github.com/safing/portmaster/service/network/packet"
+	"github.com/safing/portmaster/service/profile"
+	"github.com/safing/portmaster/service/profile/endpoints"
 )
 
 const noReasonOptionKey = ""
diff --git a/firewall/module.go b/service/firewall/module.go
similarity index 94%
rename from firewall/module.go
rename to service/firewall/module.go
index de6ca88a..73292967 100644
--- a/firewall/module.go
+++ b/service/firewall/module.go
@@ -9,11 +9,11 @@ import (
 	"github.com/safing/portbase/log"
 	"github.com/safing/portbase/modules"
 	"github.com/safing/portbase/modules/subsystems"
-	_ "github.com/safing/portmaster/core"
-	"github.com/safing/portmaster/network"
-	"github.com/safing/portmaster/profile"
-	"github.com/safing/spn/access"
-	"github.com/safing/spn/captain"
+	_ "github.com/safing/portmaster/service/core"
+	"github.com/safing/portmaster/service/network"
+	"github.com/safing/portmaster/service/profile"
+	"github.com/safing/portmaster/spn/access"
+	"github.com/safing/portmaster/spn/captain"
 )
 
 var module *modules.Module
diff --git a/firewall/packet_handler.go b/service/firewall/packet_handler.go
similarity index 97%
rename from firewall/packet_handler.go
rename to service/firewall/packet_handler.go
index 65105e15..22d9ce37 100644
--- a/firewall/packet_handler.go
+++ b/service/firewall/packet_handler.go
@@ -13,17 +13,17 @@ import (
 	"github.com/tevino/abool"
 
 	"github.com/safing/portbase/log"
-	"github.com/safing/portmaster/compat"
-	_ "github.com/safing/portmaster/core/base"
-	"github.com/safing/portmaster/firewall/inspection"
-	"github.com/safing/portmaster/firewall/interception"
-	"github.com/safing/portmaster/netenv"
-	"github.com/safing/portmaster/netquery"
-	"github.com/safing/portmaster/network"
-	"github.com/safing/portmaster/network/netutils"
-	"github.com/safing/portmaster/network/packet"
-	"github.com/safing/portmaster/process"
-	"github.com/safing/spn/access"
+	"github.com/safing/portmaster/service/compat"
+	_ "github.com/safing/portmaster/service/core/base"
+	"github.com/safing/portmaster/service/firewall/inspection"
+	"github.com/safing/portmaster/service/firewall/interception"
+	"github.com/safing/portmaster/service/netenv"
+	"github.com/safing/portmaster/service/netquery"
+	"github.com/safing/portmaster/service/network"
+	"github.com/safing/portmaster/service/network/netutils"
+	"github.com/safing/portmaster/service/network/packet"
+	"github.com/safing/portmaster/service/process"
+	"github.com/safing/portmaster/spn/access"
 )
 
 var (
diff --git a/firewall/preauth.go b/service/firewall/preauth.go
similarity index 93%
rename from firewall/preauth.go
rename to service/firewall/preauth.go
index 3ee749a6..a265350f 100644
--- a/firewall/preauth.go
+++ b/service/firewall/preauth.go
@@ -6,10 +6,10 @@ import (
 	"strconv"
 	"sync"
 
-	"github.com/safing/portmaster/netenv"
-	"github.com/safing/portmaster/network"
-	"github.com/safing/portmaster/network/packet"
-	"github.com/safing/portmaster/resolver"
+	"github.com/safing/portmaster/service/netenv"
+	"github.com/safing/portmaster/service/network"
+	"github.com/safing/portmaster/service/network/packet"
+	"github.com/safing/portmaster/service/resolver"
 )
 
 var (
diff --git a/firewall/prompt.go b/service/firewall/prompt.go
similarity index 97%
rename from firewall/prompt.go
rename to service/firewall/prompt.go
index 0b2b4ef7..51d6a12a 100644
--- a/firewall/prompt.go
+++ b/service/firewall/prompt.go
@@ -8,10 +8,10 @@ import (
 
 	"github.com/safing/portbase/log"
 	"github.com/safing/portbase/notifications"
-	"github.com/safing/portmaster/intel"
-	"github.com/safing/portmaster/network"
-	"github.com/safing/portmaster/profile"
-	"github.com/safing/portmaster/profile/endpoints"
+	"github.com/safing/portmaster/service/intel"
+	"github.com/safing/portmaster/service/network"
+	"github.com/safing/portmaster/service/profile"
+	"github.com/safing/portmaster/service/profile/endpoints"
 )
 
 const (
diff --git a/firewall/tunnel.go b/service/firewall/tunnel.go
similarity index 91%
rename from firewall/tunnel.go
rename to service/firewall/tunnel.go
index 013bec7b..46b5864a 100644
--- a/firewall/tunnel.go
+++ b/service/firewall/tunnel.go
@@ -5,18 +5,18 @@ import (
 	"errors"
 
 	"github.com/safing/portbase/log"
-	"github.com/safing/portmaster/intel"
-	"github.com/safing/portmaster/netenv"
-	"github.com/safing/portmaster/network"
-	"github.com/safing/portmaster/network/packet"
-	"github.com/safing/portmaster/process"
-	"github.com/safing/portmaster/profile"
-	"github.com/safing/portmaster/profile/endpoints"
-	"github.com/safing/portmaster/resolver"
-	"github.com/safing/spn/captain"
-	"github.com/safing/spn/crew"
-	"github.com/safing/spn/navigator"
-	"github.com/safing/spn/sluice"
+	"github.com/safing/portmaster/service/intel"
+	"github.com/safing/portmaster/service/netenv"
+	"github.com/safing/portmaster/service/network"
+	"github.com/safing/portmaster/service/network/packet"
+	"github.com/safing/portmaster/service/process"
+	"github.com/safing/portmaster/service/profile"
+	"github.com/safing/portmaster/service/profile/endpoints"
+	"github.com/safing/portmaster/service/resolver"
+	"github.com/safing/portmaster/spn/captain"
+	"github.com/safing/portmaster/spn/crew"
+	"github.com/safing/portmaster/spn/navigator"
+	"github.com/safing/portmaster/spn/sluice"
 )
 
 func checkTunneling(ctx context.Context, conn *network.Connection) {
diff --git a/intel/block_reason.go b/service/intel/block_reason.go
similarity index 97%
rename from intel/block_reason.go
rename to service/intel/block_reason.go
index b29ef279..5cabbddf 100644
--- a/intel/block_reason.go
+++ b/service/intel/block_reason.go
@@ -9,7 +9,7 @@ import (
 	"github.com/miekg/dns"
 
 	"github.com/safing/portbase/log"
-	"github.com/safing/portmaster/nameserver/nsutil"
+	"github.com/safing/portmaster/service/nameserver/nsutil"
 )
 
 // ListMatch represents an entity that has been
diff --git a/intel/customlists/config.go b/service/intel/customlists/config.go
similarity index 100%
rename from intel/customlists/config.go
rename to service/intel/customlists/config.go
diff --git a/intel/customlists/lists.go b/service/intel/customlists/lists.go
similarity index 98%
rename from intel/customlists/lists.go
rename to service/intel/customlists/lists.go
index c13a8cd5..33170dd7 100644
--- a/intel/customlists/lists.go
+++ b/service/intel/customlists/lists.go
@@ -12,7 +12,7 @@ import (
 
 	"github.com/safing/portbase/log"
 	"github.com/safing/portbase/notifications"
-	"github.com/safing/portmaster/network/netutils"
+	"github.com/safing/portmaster/service/network/netutils"
 )
 
 var (
diff --git a/intel/customlists/module.go b/service/intel/customlists/module.go
similarity index 100%
rename from intel/customlists/module.go
rename to service/intel/customlists/module.go
diff --git a/intel/entity.go b/service/intel/entity.go
similarity index 98%
rename from intel/entity.go
rename to service/intel/entity.go
index d89be9f6..5311881a 100644
--- a/intel/entity.go
+++ b/service/intel/entity.go
@@ -11,9 +11,9 @@ import (
 	"golang.org/x/net/publicsuffix"
 
 	"github.com/safing/portbase/log"
-	"github.com/safing/portmaster/intel/filterlists"
-	"github.com/safing/portmaster/intel/geoip"
-	"github.com/safing/portmaster/network/netutils"
+	"github.com/safing/portmaster/service/intel/filterlists"
+	"github.com/safing/portmaster/service/intel/geoip"
+	"github.com/safing/portmaster/service/network/netutils"
 )
 
 // Entity describes a remote endpoint in many different ways.
diff --git a/intel/filterlists/bloom.go b/service/intel/filterlists/bloom.go
similarity index 100%
rename from intel/filterlists/bloom.go
rename to service/intel/filterlists/bloom.go
diff --git a/intel/filterlists/cache_version.go b/service/intel/filterlists/cache_version.go
similarity index 100%
rename from intel/filterlists/cache_version.go
rename to service/intel/filterlists/cache_version.go
diff --git a/intel/filterlists/database.go b/service/intel/filterlists/database.go
similarity index 99%
rename from intel/filterlists/database.go
rename to service/intel/filterlists/database.go
index 73330440..8b08f323 100644
--- a/intel/filterlists/database.go
+++ b/service/intel/filterlists/database.go
@@ -15,7 +15,7 @@ import (
 	"github.com/safing/portbase/database/record"
 	"github.com/safing/portbase/log"
 	"github.com/safing/portbase/updater"
-	"github.com/safing/portmaster/updates"
+	"github.com/safing/portmaster/service/updates"
 )
 
 const (
diff --git a/intel/filterlists/decoder.go b/service/intel/filterlists/decoder.go
similarity index 100%
rename from intel/filterlists/decoder.go
rename to service/intel/filterlists/decoder.go
diff --git a/intel/filterlists/index.go b/service/intel/filterlists/index.go
similarity index 99%
rename from intel/filterlists/index.go
rename to service/intel/filterlists/index.go
index 095e3ebd..e5a593b6 100644
--- a/intel/filterlists/index.go
+++ b/service/intel/filterlists/index.go
@@ -12,7 +12,7 @@ import (
 	"github.com/safing/portbase/formats/dsd"
 	"github.com/safing/portbase/log"
 	"github.com/safing/portbase/updater"
-	"github.com/safing/portmaster/updates"
+	"github.com/safing/portmaster/service/updates"
 )
 
 // the following definitions are copied from the intelhub repository
diff --git a/intel/filterlists/keys.go b/service/intel/filterlists/keys.go
similarity index 100%
rename from intel/filterlists/keys.go
rename to service/intel/filterlists/keys.go
diff --git a/intel/filterlists/lookup.go b/service/intel/filterlists/lookup.go
similarity index 100%
rename from intel/filterlists/lookup.go
rename to service/intel/filterlists/lookup.go
diff --git a/intel/filterlists/module.go b/service/intel/filterlists/module.go
similarity index 96%
rename from intel/filterlists/module.go
rename to service/intel/filterlists/module.go
index 6f5568aa..a7846ee4 100644
--- a/intel/filterlists/module.go
+++ b/service/intel/filterlists/module.go
@@ -8,8 +8,8 @@ import (
 
 	"github.com/safing/portbase/log"
 	"github.com/safing/portbase/modules"
-	"github.com/safing/portmaster/netenv"
-	"github.com/safing/portmaster/updates"
+	"github.com/safing/portmaster/service/netenv"
+	"github.com/safing/portmaster/service/updates"
 )
 
 var module *modules.Module
diff --git a/intel/filterlists/module_test.go b/service/intel/filterlists/module_test.go
similarity index 100%
rename from intel/filterlists/module_test.go
rename to service/intel/filterlists/module_test.go
diff --git a/intel/filterlists/record.go b/service/intel/filterlists/record.go
similarity index 100%
rename from intel/filterlists/record.go
rename to service/intel/filterlists/record.go
diff --git a/intel/filterlists/updater.go b/service/intel/filterlists/updater.go
similarity index 100%
rename from intel/filterlists/updater.go
rename to service/intel/filterlists/updater.go
diff --git a/intel/geoip/country_info.go b/service/intel/geoip/country_info.go
similarity index 100%
rename from intel/geoip/country_info.go
rename to service/intel/geoip/country_info.go
diff --git a/intel/geoip/country_info_test.go b/service/intel/geoip/country_info_test.go
similarity index 100%
rename from intel/geoip/country_info_test.go
rename to service/intel/geoip/country_info_test.go
diff --git a/intel/geoip/database.go b/service/intel/geoip/database.go
similarity index 98%
rename from intel/geoip/database.go
rename to service/intel/geoip/database.go
index 61bde277..57b08578 100644
--- a/intel/geoip/database.go
+++ b/service/intel/geoip/database.go
@@ -10,7 +10,7 @@ import (
 
 	"github.com/safing/portbase/log"
 	"github.com/safing/portbase/updater"
-	"github.com/safing/portmaster/updates"
+	"github.com/safing/portmaster/service/updates"
 )
 
 var worker *updateWorker
diff --git a/intel/geoip/location.go b/service/intel/geoip/location.go
similarity index 100%
rename from intel/geoip/location.go
rename to service/intel/geoip/location.go
diff --git a/intel/geoip/location_test.go b/service/intel/geoip/location_test.go
similarity index 100%
rename from intel/geoip/location_test.go
rename to service/intel/geoip/location_test.go
diff --git a/intel/geoip/lookup.go b/service/intel/geoip/lookup.go
similarity index 100%
rename from intel/geoip/lookup.go
rename to service/intel/geoip/lookup.go
diff --git a/intel/geoip/lookup_test.go b/service/intel/geoip/lookup_test.go
similarity index 100%
rename from intel/geoip/lookup_test.go
rename to service/intel/geoip/lookup_test.go
diff --git a/intel/geoip/module.go b/service/intel/geoip/module.go
similarity index 94%
rename from intel/geoip/module.go
rename to service/intel/geoip/module.go
index 0c65f1af..c5d44e00 100644
--- a/intel/geoip/module.go
+++ b/service/intel/geoip/module.go
@@ -5,7 +5,7 @@ import (
 
 	"github.com/safing/portbase/api"
 	"github.com/safing/portbase/modules"
-	"github.com/safing/portmaster/updates"
+	"github.com/safing/portmaster/service/updates"
 )
 
 var module *modules.Module
diff --git a/intel/geoip/module_test.go b/service/intel/geoip/module_test.go
similarity index 64%
rename from intel/geoip/module_test.go
rename to service/intel/geoip/module_test.go
index c1ae951b..c223d920 100644
--- a/intel/geoip/module_test.go
+++ b/service/intel/geoip/module_test.go
@@ -3,7 +3,7 @@ package geoip
 import (
 	"testing"
 
-	"github.com/safing/portmaster/core/pmtesting"
+	"github.com/safing/portmaster/service/core/pmtesting"
 )
 
 func TestMain(m *testing.M) {
diff --git a/intel/geoip/regions.go b/service/intel/geoip/regions.go
similarity index 100%
rename from intel/geoip/regions.go
rename to service/intel/geoip/regions.go
diff --git a/intel/geoip/regions_test.go b/service/intel/geoip/regions_test.go
similarity index 100%
rename from intel/geoip/regions_test.go
rename to service/intel/geoip/regions_test.go
diff --git a/intel/module.go b/service/intel/module.go
similarity index 82%
rename from intel/module.go
rename to service/intel/module.go
index ceec6b64..35c2d75c 100644
--- a/intel/module.go
+++ b/service/intel/module.go
@@ -2,7 +2,7 @@ package intel
 
 import (
 	"github.com/safing/portbase/modules"
-	_ "github.com/safing/portmaster/intel/customlists"
+	_ "github.com/safing/portmaster/service/intel/customlists"
 )
 
 // Module of this package. Export needed for testing of the endpoints package.
diff --git a/intel/resolver.go b/service/intel/resolver.go
similarity index 100%
rename from intel/resolver.go
rename to service/intel/resolver.go
diff --git a/nameserver/config.go b/service/nameserver/config.go
similarity index 97%
rename from nameserver/config.go
rename to service/nameserver/config.go
index c466a154..3e13044a 100644
--- a/nameserver/config.go
+++ b/service/nameserver/config.go
@@ -5,7 +5,7 @@ import (
 	"runtime"
 
 	"github.com/safing/portbase/config"
-	"github.com/safing/portmaster/core"
+	"github.com/safing/portmaster/service/core"
 )
 
 // CfgDefaultNameserverAddressKey is the config key for the listen address..
diff --git a/nameserver/conflict.go b/service/nameserver/conflict.go
similarity index 94%
rename from nameserver/conflict.go
rename to service/nameserver/conflict.go
index e02e1fd5..f716f7eb 100644
--- a/nameserver/conflict.go
+++ b/service/nameserver/conflict.go
@@ -7,8 +7,8 @@ import (
 	processInfo "github.com/shirou/gopsutil/process"
 
 	"github.com/safing/portbase/log"
-	"github.com/safing/portmaster/network/packet"
-	"github.com/safing/portmaster/network/state"
+	"github.com/safing/portmaster/service/network/packet"
+	"github.com/safing/portmaster/service/network/state"
 )
 
 var commonResolverIPs = []net.IP{
diff --git a/nameserver/failing.go b/service/nameserver/failing.go
similarity index 97%
rename from nameserver/failing.go
rename to service/nameserver/failing.go
index 1880dc96..2637a61f 100644
--- a/nameserver/failing.go
+++ b/service/nameserver/failing.go
@@ -4,8 +4,8 @@ import (
 	"sync"
 	"time"
 
-	"github.com/safing/portmaster/netenv"
-	"github.com/safing/portmaster/resolver"
+	"github.com/safing/portmaster/service/netenv"
+	"github.com/safing/portmaster/service/resolver"
 )
 
 type failingQuery struct {
diff --git a/nameserver/metrics.go b/service/nameserver/metrics.go
similarity index 100%
rename from nameserver/metrics.go
rename to service/nameserver/metrics.go
diff --git a/nameserver/module.go b/service/nameserver/module.go
similarity index 97%
rename from nameserver/module.go
rename to service/nameserver/module.go
index ed7eb740..287ba48e 100644
--- a/nameserver/module.go
+++ b/service/nameserver/module.go
@@ -14,9 +14,9 @@ import (
 	"github.com/safing/portbase/modules"
 	"github.com/safing/portbase/modules/subsystems"
 	"github.com/safing/portbase/notifications"
-	"github.com/safing/portmaster/compat"
-	"github.com/safing/portmaster/firewall"
-	"github.com/safing/portmaster/netenv"
+	"github.com/safing/portmaster/service/compat"
+	"github.com/safing/portmaster/service/firewall"
+	"github.com/safing/portmaster/service/netenv"
 )
 
 var (
diff --git a/nameserver/nameserver.go b/service/nameserver/nameserver.go
similarity index 97%
rename from nameserver/nameserver.go
rename to service/nameserver/nameserver.go
index 464db782..55195756 100644
--- a/nameserver/nameserver.go
+++ b/service/nameserver/nameserver.go
@@ -11,12 +11,12 @@ import (
 	"github.com/miekg/dns"
 
 	"github.com/safing/portbase/log"
-	"github.com/safing/portmaster/firewall"
-	"github.com/safing/portmaster/nameserver/nsutil"
-	"github.com/safing/portmaster/netenv"
-	"github.com/safing/portmaster/network"
-	"github.com/safing/portmaster/network/netutils"
-	"github.com/safing/portmaster/resolver"
+	"github.com/safing/portmaster/service/firewall"
+	"github.com/safing/portmaster/service/nameserver/nsutil"
+	"github.com/safing/portmaster/service/netenv"
+	"github.com/safing/portmaster/service/network"
+	"github.com/safing/portmaster/service/network/netutils"
+	"github.com/safing/portmaster/service/resolver"
 )
 
 var hostname string
diff --git a/nameserver/nsutil/nsutil.go b/service/nameserver/nsutil/nsutil.go
similarity index 100%
rename from nameserver/nsutil/nsutil.go
rename to service/nameserver/nsutil/nsutil.go
diff --git a/nameserver/response.go b/service/nameserver/response.go
similarity index 97%
rename from nameserver/response.go
rename to service/nameserver/response.go
index 92dd80af..85daf140 100644
--- a/nameserver/response.go
+++ b/service/nameserver/response.go
@@ -7,7 +7,7 @@ import (
 	"github.com/miekg/dns"
 
 	"github.com/safing/portbase/log"
-	"github.com/safing/portmaster/nameserver/nsutil"
+	"github.com/safing/portmaster/service/nameserver/nsutil"
 )
 
 // sendResponse sends a response to query using w. The response message is
diff --git a/netenv/addresses_test.go b/service/netenv/addresses_test.go
similarity index 100%
rename from netenv/addresses_test.go
rename to service/netenv/addresses_test.go
diff --git a/netenv/adresses.go b/service/netenv/adresses.go
similarity index 98%
rename from netenv/adresses.go
rename to service/netenv/adresses.go
index b050ad33..902dd0da 100644
--- a/netenv/adresses.go
+++ b/service/netenv/adresses.go
@@ -7,7 +7,7 @@ import (
 	"time"
 
 	"github.com/safing/portbase/log"
-	"github.com/safing/portmaster/network/netutils"
+	"github.com/safing/portmaster/service/network/netutils"
 )
 
 // GetAssignedAddresses returns the assigned IPv4 and IPv6 addresses of the host.
diff --git a/netenv/api.go b/service/netenv/api.go
similarity index 100%
rename from netenv/api.go
rename to service/netenv/api.go
diff --git a/netenv/dbus_linux.go b/service/netenv/dbus_linux.go
similarity index 100%
rename from netenv/dbus_linux.go
rename to service/netenv/dbus_linux.go
diff --git a/netenv/dbus_linux_test.go b/service/netenv/dbus_linux_test.go
similarity index 100%
rename from netenv/dbus_linux_test.go
rename to service/netenv/dbus_linux_test.go
diff --git a/netenv/dialing.go b/service/netenv/dialing.go
similarity index 100%
rename from netenv/dialing.go
rename to service/netenv/dialing.go
diff --git a/netenv/environment.go b/service/netenv/environment.go
similarity index 100%
rename from netenv/environment.go
rename to service/netenv/environment.go
diff --git a/netenv/environment_default.go b/service/netenv/environment_default.go
similarity index 100%
rename from netenv/environment_default.go
rename to service/netenv/environment_default.go
diff --git a/netenv/environment_linux.go b/service/netenv/environment_linux.go
similarity index 98%
rename from netenv/environment_linux.go
rename to service/netenv/environment_linux.go
index d6b57b91..5f39875b 100644
--- a/netenv/environment_linux.go
+++ b/service/netenv/environment_linux.go
@@ -11,7 +11,7 @@ import (
 	"github.com/miekg/dns"
 
 	"github.com/safing/portbase/log"
-	"github.com/safing/portmaster/network/netutils"
+	"github.com/safing/portmaster/service/network/netutils"
 )
 
 var (
diff --git a/netenv/environment_linux_test.go b/service/netenv/environment_linux_test.go
similarity index 100%
rename from netenv/environment_linux_test.go
rename to service/netenv/environment_linux_test.go
diff --git a/netenv/environment_test.go b/service/netenv/environment_test.go
similarity index 100%
rename from netenv/environment_test.go
rename to service/netenv/environment_test.go
diff --git a/netenv/environment_windows.go b/service/netenv/environment_windows.go
similarity index 100%
rename from netenv/environment_windows.go
rename to service/netenv/environment_windows.go
diff --git a/netenv/environment_windows_test.go b/service/netenv/environment_windows_test.go
similarity index 100%
rename from netenv/environment_windows_test.go
rename to service/netenv/environment_windows_test.go
diff --git a/netenv/icmp_listener.go b/service/netenv/icmp_listener.go
similarity index 98%
rename from netenv/icmp_listener.go
rename to service/netenv/icmp_listener.go
index ca90b1e4..d1716d8a 100644
--- a/netenv/icmp_listener.go
+++ b/service/netenv/icmp_listener.go
@@ -7,7 +7,7 @@ import (
 	"github.com/tevino/abool"
 
 	"github.com/safing/portbase/log"
-	"github.com/safing/portmaster/network/packet"
+	"github.com/safing/portmaster/service/network/packet"
 )
 
 /*
diff --git a/netenv/location.go b/service/netenv/location.go
similarity index 98%
rename from netenv/location.go
rename to service/netenv/location.go
index 23de17ff..276e33a3 100644
--- a/netenv/location.go
+++ b/service/netenv/location.go
@@ -14,9 +14,9 @@ import (
 
 	"github.com/safing/portbase/log"
 	"github.com/safing/portbase/rng"
-	"github.com/safing/portmaster/intel/geoip"
-	"github.com/safing/portmaster/network/netutils"
-	"github.com/safing/portmaster/network/packet"
+	"github.com/safing/portmaster/service/intel/geoip"
+	"github.com/safing/portmaster/service/network/netutils"
+	"github.com/safing/portmaster/service/network/packet"
 )
 
 var (
diff --git a/netenv/location_default.go b/service/netenv/location_default.go
similarity index 100%
rename from netenv/location_default.go
rename to service/netenv/location_default.go
diff --git a/netenv/location_test.go b/service/netenv/location_test.go
similarity index 100%
rename from netenv/location_test.go
rename to service/netenv/location_test.go
diff --git a/netenv/location_windows.go b/service/netenv/location_windows.go
similarity index 100%
rename from netenv/location_windows.go
rename to service/netenv/location_windows.go
diff --git a/netenv/main.go b/service/netenv/main.go
similarity index 100%
rename from netenv/main.go
rename to service/netenv/main.go
diff --git a/netenv/main_test.go b/service/netenv/main_test.go
similarity index 65%
rename from netenv/main_test.go
rename to service/netenv/main_test.go
index 1ee7b730..64588b38 100644
--- a/netenv/main_test.go
+++ b/service/netenv/main_test.go
@@ -3,7 +3,7 @@ package netenv
 import (
 	"testing"
 
-	"github.com/safing/portmaster/core/pmtesting"
+	"github.com/safing/portmaster/service/core/pmtesting"
 )
 
 func TestMain(m *testing.M) {
diff --git a/netenv/network-change.go b/service/netenv/network-change.go
similarity index 100%
rename from netenv/network-change.go
rename to service/netenv/network-change.go
diff --git a/netenv/notes.md b/service/netenv/notes.md
similarity index 100%
rename from netenv/notes.md
rename to service/netenv/notes.md
diff --git a/netenv/online-status.go b/service/netenv/online-status.go
similarity index 99%
rename from netenv/online-status.go
rename to service/netenv/online-status.go
index 7ec4a3f4..fac5e170 100644
--- a/netenv/online-status.go
+++ b/service/netenv/online-status.go
@@ -15,8 +15,8 @@ import (
 
 	"github.com/safing/portbase/log"
 	"github.com/safing/portbase/notifications"
-	"github.com/safing/portmaster/network/netutils"
-	"github.com/safing/portmaster/updates"
+	"github.com/safing/portmaster/service/network/netutils"
+	"github.com/safing/portmaster/service/updates"
 )
 
 // OnlineStatus represent a state of connectivity to the Internet.
diff --git a/netenv/online-status_test.go b/service/netenv/online-status_test.go
similarity index 100%
rename from netenv/online-status_test.go
rename to service/netenv/online-status_test.go
diff --git a/netenv/os_android.go b/service/netenv/os_android.go
similarity index 92%
rename from netenv/os_android.go
rename to service/netenv/os_android.go
index 84c36958..aceed896 100644
--- a/netenv/os_android.go
+++ b/service/netenv/os_android.go
@@ -1,9 +1,10 @@
 package netenv
 
 import (
-	"github.com/safing/portmaster-android/go/app_interface"
 	"net"
 	"time"
+
+	"github.com/safing/portmaster/service-android/go/app_interface"
 )
 
 var (
diff --git a/netenv/os_default.go b/service/netenv/os_default.go
similarity index 100%
rename from netenv/os_default.go
rename to service/netenv/os_default.go
diff --git a/netquery/active_chart_handler.go b/service/netquery/active_chart_handler.go
similarity index 98%
rename from netquery/active_chart_handler.go
rename to service/netquery/active_chart_handler.go
index 08628394..2d2fb682 100644
--- a/netquery/active_chart_handler.go
+++ b/service/netquery/active_chart_handler.go
@@ -10,7 +10,7 @@ import (
 	"net/http"
 	"strings"
 
-	"github.com/safing/portmaster/netquery/orm"
+	"github.com/safing/portmaster/service/netquery/orm"
 )
 
 // ActiveChartHandler handles requests for connection charts.
diff --git a/netquery/bandwidth_chart_handler.go b/service/netquery/bandwidth_chart_handler.go
similarity index 98%
rename from netquery/bandwidth_chart_handler.go
rename to service/netquery/bandwidth_chart_handler.go
index 5bb5b526..615682e6 100644
--- a/netquery/bandwidth_chart_handler.go
+++ b/service/netquery/bandwidth_chart_handler.go
@@ -10,7 +10,7 @@ import (
 	"net/http"
 	"strings"
 
-	"github.com/safing/portmaster/netquery/orm"
+	"github.com/safing/portmaster/service/netquery/orm"
 )
 
 // BandwidthChartHandler handles requests for connection charts.
diff --git a/netquery/database.go b/service/netquery/database.go
similarity index 98%
rename from netquery/database.go
rename to service/netquery/database.go
index e3287345..cb9f4039 100644
--- a/netquery/database.go
+++ b/service/netquery/database.go
@@ -19,11 +19,11 @@ import (
 	"github.com/safing/portbase/config"
 	"github.com/safing/portbase/dataroot"
 	"github.com/safing/portbase/log"
-	"github.com/safing/portmaster/netquery/orm"
-	"github.com/safing/portmaster/network"
-	"github.com/safing/portmaster/network/netutils"
-	"github.com/safing/portmaster/network/packet"
-	"github.com/safing/portmaster/profile"
+	"github.com/safing/portmaster/service/netquery/orm"
+	"github.com/safing/portmaster/service/network"
+	"github.com/safing/portmaster/service/network/netutils"
+	"github.com/safing/portmaster/service/network/packet"
+	"github.com/safing/portmaster/service/profile"
 )
 
 // InMemory is the "file path" to open a new in-memory database.
diff --git a/netquery/manager.go b/service/netquery/manager.go
similarity index 99%
rename from netquery/manager.go
rename to service/netquery/manager.go
index 34b779b5..76403e03 100644
--- a/netquery/manager.go
+++ b/service/netquery/manager.go
@@ -10,7 +10,7 @@ import (
 	"github.com/safing/portbase/formats/dsd"
 	"github.com/safing/portbase/log"
 	"github.com/safing/portbase/runtime"
-	"github.com/safing/portmaster/network"
+	"github.com/safing/portmaster/service/network"
 )
 
 type (
diff --git a/netquery/module_api.go b/service/netquery/module_api.go
similarity index 99%
rename from netquery/module_api.go
rename to service/netquery/module_api.go
index b4e56b02..00950a01 100644
--- a/netquery/module_api.go
+++ b/service/netquery/module_api.go
@@ -17,7 +17,7 @@ import (
 	"github.com/safing/portbase/modules"
 	"github.com/safing/portbase/modules/subsystems"
 	"github.com/safing/portbase/runtime"
-	"github.com/safing/portmaster/network"
+	"github.com/safing/portmaster/service/network"
 )
 
 // DefaultModule is the default netquery module.
diff --git a/netquery/orm/decoder.go b/service/netquery/orm/decoder.go
similarity index 100%
rename from netquery/orm/decoder.go
rename to service/netquery/orm/decoder.go
diff --git a/netquery/orm/decoder_test.go b/service/netquery/orm/decoder_test.go
similarity index 100%
rename from netquery/orm/decoder_test.go
rename to service/netquery/orm/decoder_test.go
diff --git a/netquery/orm/encoder.go b/service/netquery/orm/encoder.go
similarity index 100%
rename from netquery/orm/encoder.go
rename to service/netquery/orm/encoder.go
diff --git a/netquery/orm/encoder_test.go b/service/netquery/orm/encoder_test.go
similarity index 100%
rename from netquery/orm/encoder_test.go
rename to service/netquery/orm/encoder_test.go
diff --git a/netquery/orm/query_runner.go b/service/netquery/orm/query_runner.go
similarity index 100%
rename from netquery/orm/query_runner.go
rename to service/netquery/orm/query_runner.go
diff --git a/netquery/orm/schema_builder.go b/service/netquery/orm/schema_builder.go
similarity index 100%
rename from netquery/orm/schema_builder.go
rename to service/netquery/orm/schema_builder.go
diff --git a/netquery/orm/schema_builder_test.go b/service/netquery/orm/schema_builder_test.go
similarity index 100%
rename from netquery/orm/schema_builder_test.go
rename to service/netquery/orm/schema_builder_test.go
diff --git a/netquery/query.go b/service/netquery/query.go
similarity index 99%
rename from netquery/query.go
rename to service/netquery/query.go
index 2b81bfb1..cb84ac30 100644
--- a/netquery/query.go
+++ b/service/netquery/query.go
@@ -13,7 +13,7 @@ import (
 	"golang.org/x/exp/slices"
 	"zombiezen.com/go/sqlite"
 
-	"github.com/safing/portmaster/netquery/orm"
+	"github.com/safing/portmaster/service/netquery/orm"
 )
 
 // DatabaseName is a database name constant.
diff --git a/netquery/query_handler.go b/service/netquery/query_handler.go
similarity index 99%
rename from netquery/query_handler.go
rename to service/netquery/query_handler.go
index 8e704d3e..68b1feb2 100644
--- a/netquery/query_handler.go
+++ b/service/netquery/query_handler.go
@@ -14,7 +14,7 @@ import (
 	servertiming "github.com/mitchellh/go-server-timing"
 
 	"github.com/safing/portbase/log"
-	"github.com/safing/portmaster/netquery/orm"
+	"github.com/safing/portmaster/service/netquery/orm"
 )
 
 var charOnlyRegexp = regexp.MustCompile("[a-zA-Z]+")
diff --git a/netquery/query_request.go b/service/netquery/query_request.go
similarity index 99%
rename from netquery/query_request.go
rename to service/netquery/query_request.go
index ea5162a9..97fc5789 100644
--- a/netquery/query_request.go
+++ b/service/netquery/query_request.go
@@ -7,7 +7,7 @@ import (
 
 	"golang.org/x/exp/slices"
 
-	"github.com/safing/portmaster/netquery/orm"
+	"github.com/safing/portmaster/service/netquery/orm"
 )
 
 type (
diff --git a/netquery/query_test.go b/service/netquery/query_test.go
similarity index 98%
rename from netquery/query_test.go
rename to service/netquery/query_test.go
index afd65b4f..bc9fde27 100644
--- a/netquery/query_test.go
+++ b/service/netquery/query_test.go
@@ -10,7 +10,7 @@ import (
 	"github.com/stretchr/testify/assert"
 	"github.com/stretchr/testify/require"
 
-	"github.com/safing/portmaster/netquery/orm"
+	"github.com/safing/portmaster/service/netquery/orm"
 )
 
 func TestUnmarshalQuery(t *testing.T) { //nolint:tparallel
diff --git a/netquery/runtime_query_runner.go b/service/netquery/runtime_query_runner.go
similarity index 97%
rename from netquery/runtime_query_runner.go
rename to service/netquery/runtime_query_runner.go
index 3b443ec5..67ba449b 100644
--- a/netquery/runtime_query_runner.go
+++ b/service/netquery/runtime_query_runner.go
@@ -10,7 +10,7 @@ import (
 	"github.com/safing/portbase/formats/dsd"
 	"github.com/safing/portbase/log"
 	"github.com/safing/portbase/runtime"
-	"github.com/safing/portmaster/netquery/orm"
+	"github.com/safing/portmaster/service/netquery/orm"
 )
 
 // RuntimeQueryRunner provides a simple interface for the runtime database
diff --git a/network/api.go b/service/network/api.go
similarity index 97%
rename from network/api.go
rename to service/network/api.go
index c59b5aaf..afb2d610 100644
--- a/network/api.go
+++ b/service/network/api.go
@@ -12,11 +12,11 @@ import (
 	"github.com/safing/portbase/config"
 	"github.com/safing/portbase/database/query"
 	"github.com/safing/portbase/utils/debug"
-	"github.com/safing/portmaster/network/state"
-	"github.com/safing/portmaster/process"
-	"github.com/safing/portmaster/resolver"
-	"github.com/safing/portmaster/status"
-	"github.com/safing/portmaster/updates"
+	"github.com/safing/portmaster/service/network/state"
+	"github.com/safing/portmaster/service/process"
+	"github.com/safing/portmaster/service/resolver"
+	"github.com/safing/portmaster/service/status"
+	"github.com/safing/portmaster/service/updates"
 )
 
 func registerAPIEndpoints() error {
diff --git a/network/api_test.go b/service/network/api_test.go
similarity index 98%
rename from network/api_test.go
rename to service/network/api_test.go
index 62647527..c44109b0 100644
--- a/network/api_test.go
+++ b/service/network/api_test.go
@@ -5,7 +5,7 @@ import (
 	"net"
 	"testing"
 
-	"github.com/safing/portmaster/intel"
+	"github.com/safing/portmaster/service/intel"
 )
 
 func TestDebugInfoLineFormatting(t *testing.T) {
diff --git a/network/clean.go b/service/network/clean.go
similarity index 95%
rename from network/clean.go
rename to service/network/clean.go
index b15fbaa0..9901b00b 100644
--- a/network/clean.go
+++ b/service/network/clean.go
@@ -5,9 +5,9 @@ import (
 	"time"
 
 	"github.com/safing/portbase/log"
-	"github.com/safing/portmaster/network/packet"
-	"github.com/safing/portmaster/network/state"
-	"github.com/safing/portmaster/process"
+	"github.com/safing/portmaster/service/network/packet"
+	"github.com/safing/portmaster/service/network/state"
+	"github.com/safing/portmaster/service/process"
 )
 
 const (
diff --git a/network/connection.go b/service/network/connection.go
similarity index 98%
rename from network/connection.go
rename to service/network/connection.go
index b1ed96fe..32ba8ee9 100644
--- a/network/connection.go
+++ b/service/network/connection.go
@@ -13,16 +13,16 @@ import (
 	"github.com/safing/portbase/database/record"
 	"github.com/safing/portbase/log"
 	"github.com/safing/portbase/notifications"
-	"github.com/safing/portmaster/intel"
-	"github.com/safing/portmaster/netenv"
-	"github.com/safing/portmaster/network/netutils"
-	"github.com/safing/portmaster/network/packet"
-	"github.com/safing/portmaster/process"
-	_ "github.com/safing/portmaster/process/tags"
-	"github.com/safing/portmaster/resolver"
-	"github.com/safing/spn/access"
-	"github.com/safing/spn/access/account"
-	"github.com/safing/spn/navigator"
+	"github.com/safing/portmaster/service/intel"
+	"github.com/safing/portmaster/service/netenv"
+	"github.com/safing/portmaster/service/network/netutils"
+	"github.com/safing/portmaster/service/network/packet"
+	"github.com/safing/portmaster/service/process"
+	_ "github.com/safing/portmaster/service/process/tags"
+	"github.com/safing/portmaster/service/resolver"
+	"github.com/safing/portmaster/spn/access"
+	"github.com/safing/portmaster/spn/access/account"
+	"github.com/safing/portmaster/spn/navigator"
 )
 
 // FirewallHandler defines the function signature for a firewall
diff --git a/network/connection_android.go b/service/network/connection_android.go
similarity index 88%
rename from network/connection_android.go
rename to service/network/connection_android.go
index 71b16ed4..bbd49864 100644
--- a/network/connection_android.go
+++ b/service/network/connection_android.go
@@ -6,11 +6,11 @@ import (
 	"net"
 	"time"
 
-	"github.com/safing/portmaster/intel"
-	"github.com/safing/portmaster/network/netutils"
-	"github.com/safing/portmaster/network/packet"
-	"github.com/safing/portmaster/process"
-	"github.com/safing/spn/navigator"
+	"github.com/safing/portmaster/service/intel"
+	"github.com/safing/portmaster/service/network/netutils"
+	"github.com/safing/portmaster/service/network/packet"
+	"github.com/safing/portmaster/service/process"
+	"github.com/safing/portmaster/spn/navigator"
 	"github.com/tevino/abool"
 )
 
diff --git a/network/connection_store.go b/service/network/connection_store.go
similarity index 100%
rename from network/connection_store.go
rename to service/network/connection_store.go
diff --git a/network/database.go b/service/network/database.go
similarity index 98%
rename from network/database.go
rename to service/network/database.go
index 457b2693..9b098d48 100644
--- a/network/database.go
+++ b/service/network/database.go
@@ -11,7 +11,7 @@ import (
 	"github.com/safing/portbase/database/query"
 	"github.com/safing/portbase/database/record"
 	"github.com/safing/portbase/database/storage"
-	"github.com/safing/portmaster/process"
+	"github.com/safing/portmaster/service/process"
 )
 
 const (
diff --git a/network/dns.go b/service/network/dns.go
similarity index 97%
rename from network/dns.go
rename to service/network/dns.go
index a0bef466..201dd25b 100644
--- a/network/dns.go
+++ b/service/network/dns.go
@@ -11,10 +11,10 @@ import (
 	"golang.org/x/exp/slices"
 
 	"github.com/safing/portbase/log"
-	"github.com/safing/portmaster/nameserver/nsutil"
-	"github.com/safing/portmaster/network/packet"
-	"github.com/safing/portmaster/process"
-	"github.com/safing/portmaster/resolver"
+	"github.com/safing/portmaster/service/nameserver/nsutil"
+	"github.com/safing/portmaster/service/network/packet"
+	"github.com/safing/portmaster/service/process"
+	"github.com/safing/portmaster/service/resolver"
 )
 
 var (
diff --git a/network/iphelper/get.go b/service/network/iphelper/get.go
similarity index 96%
rename from network/iphelper/get.go
rename to service/network/iphelper/get.go
index 31f1c925..e78c70fc 100644
--- a/network/iphelper/get.go
+++ b/service/network/iphelper/get.go
@@ -5,7 +5,7 @@ package iphelper
 import (
 	"sync"
 
-	"github.com/safing/portmaster/network/socket"
+	"github.com/safing/portmaster/service/network/socket"
 )
 
 var (
diff --git a/network/iphelper/iphelper.go b/service/network/iphelper/iphelper.go
similarity index 100%
rename from network/iphelper/iphelper.go
rename to service/network/iphelper/iphelper.go
diff --git a/network/iphelper/tables.go b/service/network/iphelper/tables.go
similarity index 99%
rename from network/iphelper/tables.go
rename to service/network/iphelper/tables.go
index 94998d7e..9e082173 100644
--- a/network/iphelper/tables.go
+++ b/service/network/iphelper/tables.go
@@ -11,7 +11,7 @@ import (
 	"unsafe"
 
 	"github.com/safing/portbase/log"
-	"github.com/safing/portmaster/network/socket"
+	"github.com/safing/portmaster/service/network/socket"
 
 	"golang.org/x/sys/windows"
 )
diff --git a/network/iphelper/tables_test.go b/service/network/iphelper/tables_test.go
similarity index 100%
rename from network/iphelper/tables_test.go
rename to service/network/iphelper/tables_test.go
diff --git a/network/metrics.go b/service/network/metrics.go
similarity index 98%
rename from network/metrics.go
rename to service/network/metrics.go
index 66f19e1b..5ffa1880 100644
--- a/network/metrics.go
+++ b/service/network/metrics.go
@@ -4,7 +4,7 @@ import (
 	"github.com/safing/portbase/api"
 	"github.com/safing/portbase/config"
 	"github.com/safing/portbase/metrics"
-	"github.com/safing/portmaster/process"
+	"github.com/safing/portmaster/service/process"
 )
 
 var (
diff --git a/network/module.go b/service/network/module.go
similarity index 96%
rename from network/module.go
rename to service/network/module.go
index 1a7fe891..bebcb467 100644
--- a/network/module.go
+++ b/service/network/module.go
@@ -8,9 +8,9 @@ import (
 
 	"github.com/safing/portbase/log"
 	"github.com/safing/portbase/modules"
-	"github.com/safing/portmaster/netenv"
-	"github.com/safing/portmaster/network/state"
-	"github.com/safing/portmaster/profile"
+	"github.com/safing/portmaster/service/netenv"
+	"github.com/safing/portmaster/service/network/state"
+	"github.com/safing/portmaster/service/profile"
 )
 
 var (
diff --git a/network/multicast.go b/service/network/multicast.go
similarity index 96%
rename from network/multicast.go
rename to service/network/multicast.go
index d7c8f9a7..d12809a9 100644
--- a/network/multicast.go
+++ b/service/network/multicast.go
@@ -3,7 +3,7 @@ package network
 import (
 	"net"
 
-	"github.com/safing/portmaster/network/netutils"
+	"github.com/safing/portmaster/service/network/netutils"
 )
 
 // GetMulticastRequestConn searches for and returns the requesting connnection
diff --git a/network/netutils/address.go b/service/network/netutils/address.go
similarity index 96%
rename from network/netutils/address.go
rename to service/network/netutils/address.go
index 3d89c39c..44337392 100644
--- a/network/netutils/address.go
+++ b/service/network/netutils/address.go
@@ -5,7 +5,7 @@ import (
 	"net"
 	"strconv"
 
-	"github.com/safing/portmaster/network/packet"
+	"github.com/safing/portmaster/service/network/packet"
 )
 
 var errInvalidIP = errors.New("invalid IP address")
diff --git a/network/netutils/dns.go b/service/network/netutils/dns.go
similarity index 100%
rename from network/netutils/dns.go
rename to service/network/netutils/dns.go
diff --git a/network/netutils/dns_test.go b/service/network/netutils/dns_test.go
similarity index 100%
rename from network/netutils/dns_test.go
rename to service/network/netutils/dns_test.go
diff --git a/network/netutils/ip.go b/service/network/netutils/ip.go
similarity index 100%
rename from network/netutils/ip.go
rename to service/network/netutils/ip.go
diff --git a/network/netutils/ip_test.go b/service/network/netutils/ip_test.go
similarity index 100%
rename from network/netutils/ip_test.go
rename to service/network/netutils/ip_test.go
diff --git a/network/netutils/tcpassembly.go b/service/network/netutils/tcpassembly.go
similarity index 100%
rename from network/netutils/tcpassembly.go
rename to service/network/netutils/tcpassembly.go
diff --git a/network/packet/bandwidth.go b/service/network/packet/bandwidth.go
similarity index 100%
rename from network/packet/bandwidth.go
rename to service/network/packet/bandwidth.go
diff --git a/network/packet/const.go b/service/network/packet/const.go
similarity index 100%
rename from network/packet/const.go
rename to service/network/packet/const.go
diff --git a/network/packet/info_only.go b/service/network/packet/info_only.go
similarity index 100%
rename from network/packet/info_only.go
rename to service/network/packet/info_only.go
diff --git a/network/packet/packet.go b/service/network/packet/packet.go
similarity index 100%
rename from network/packet/packet.go
rename to service/network/packet/packet.go
diff --git a/network/packet/packetinfo.go b/service/network/packet/packetinfo.go
similarity index 100%
rename from network/packet/packetinfo.go
rename to service/network/packet/packetinfo.go
diff --git a/network/packet/parse.go b/service/network/packet/parse.go
similarity index 100%
rename from network/packet/parse.go
rename to service/network/packet/parse.go
diff --git a/network/ports.go b/service/network/ports.go
similarity index 100%
rename from network/ports.go
rename to service/network/ports.go
diff --git a/network/proc/findpid.go b/service/network/proc/findpid.go
similarity index 97%
rename from network/proc/findpid.go
rename to service/network/proc/findpid.go
index 2fbb7130..e5cd5185 100644
--- a/network/proc/findpid.go
+++ b/service/network/proc/findpid.go
@@ -9,7 +9,7 @@ import (
 	"strconv"
 
 	"github.com/safing/portbase/log"
-	"github.com/safing/portmaster/network/socket"
+	"github.com/safing/portmaster/service/network/socket"
 )
 
 // GetPID returns the already existing pid of the given socket info or searches for it.
diff --git a/network/proc/pids_by_user.go b/service/network/proc/pids_by_user.go
similarity index 100%
rename from network/proc/pids_by_user.go
rename to service/network/proc/pids_by_user.go
diff --git a/network/proc/tables.go b/service/network/proc/tables.go
similarity index 99%
rename from network/proc/tables.go
rename to service/network/proc/tables.go
index 62c4a4c5..2569a7f0 100644
--- a/network/proc/tables.go
+++ b/service/network/proc/tables.go
@@ -13,7 +13,7 @@ import (
 	"unicode"
 
 	"github.com/safing/portbase/log"
-	"github.com/safing/portmaster/network/socket"
+	"github.com/safing/portmaster/service/network/socket"
 )
 
 /*
diff --git a/network/proc/tables_test.go b/service/network/proc/tables_test.go
similarity index 100%
rename from network/proc/tables_test.go
rename to service/network/proc/tables_test.go
diff --git a/network/reference/ports.go b/service/network/reference/ports.go
similarity index 100%
rename from network/reference/ports.go
rename to service/network/reference/ports.go
diff --git a/network/reference/protocols.go b/service/network/reference/protocols.go
similarity index 100%
rename from network/reference/protocols.go
rename to service/network/reference/protocols.go
diff --git a/network/socket/socket.go b/service/network/socket/socket.go
similarity index 100%
rename from network/socket/socket.go
rename to service/network/socket/socket.go
diff --git a/network/state/exists.go b/service/network/state/exists.go
similarity index 95%
rename from network/state/exists.go
rename to service/network/state/exists.go
index ed0c48c3..cbe81239 100644
--- a/network/state/exists.go
+++ b/service/network/state/exists.go
@@ -3,8 +3,8 @@ package state
 import (
 	"time"
 
-	"github.com/safing/portmaster/network/packet"
-	"github.com/safing/portmaster/network/socket"
+	"github.com/safing/portmaster/service/network/packet"
+	"github.com/safing/portmaster/service/network/socket"
 )
 
 const (
diff --git a/network/state/info.go b/service/network/state/info.go
similarity index 89%
rename from network/state/info.go
rename to service/network/state/info.go
index 483cd66e..306c36a0 100644
--- a/network/state/info.go
+++ b/service/network/state/info.go
@@ -4,8 +4,8 @@ import (
 	"sync"
 
 	"github.com/safing/portbase/database/record"
-	"github.com/safing/portmaster/netenv"
-	"github.com/safing/portmaster/network/socket"
+	"github.com/safing/portmaster/service/netenv"
+	"github.com/safing/portmaster/service/network/socket"
 )
 
 // Info holds network state information as provided by the system.
diff --git a/network/state/lookup.go b/service/network/state/lookup.go
similarity index 97%
rename from network/state/lookup.go
rename to service/network/state/lookup.go
index 35006b2c..39f3d2d9 100644
--- a/network/state/lookup.go
+++ b/service/network/state/lookup.go
@@ -3,9 +3,9 @@ package state
 import (
 	"errors"
 
-	"github.com/safing/portmaster/network/netutils"
-	"github.com/safing/portmaster/network/packet"
-	"github.com/safing/portmaster/network/socket"
+	"github.com/safing/portmaster/service/network/netutils"
+	"github.com/safing/portmaster/service/network/packet"
+	"github.com/safing/portmaster/service/network/socket"
 )
 
 // - TCP
diff --git a/network/state/system_default.go b/service/network/state/system_default.go
similarity index 95%
rename from network/state/system_default.go
rename to service/network/state/system_default.go
index 4b798996..9ccf96c9 100644
--- a/network/state/system_default.go
+++ b/service/network/state/system_default.go
@@ -7,7 +7,7 @@ import (
 	"time"
 
 	"github.com/safing/portbase/config"
-	"github.com/safing/portmaster/network/socket"
+	"github.com/safing/portmaster/service/network/socket"
 )
 
 func init() {
diff --git a/network/state/system_linux.go b/service/network/state/system_linux.go
similarity index 90%
rename from network/state/system_linux.go
rename to service/network/state/system_linux.go
index c3e792a8..6c6bfe6f 100644
--- a/network/state/system_linux.go
+++ b/service/network/state/system_linux.go
@@ -3,8 +3,8 @@ package state
 import (
 	"time"
 
-	"github.com/safing/portmaster/network/proc"
-	"github.com/safing/portmaster/network/socket"
+	"github.com/safing/portmaster/service/network/proc"
+	"github.com/safing/portmaster/service/network/socket"
 )
 
 var (
diff --git a/network/state/system_windows.go b/service/network/state/system_windows.go
similarity index 80%
rename from network/state/system_windows.go
rename to service/network/state/system_windows.go
index 2a95a01e..fea998dd 100644
--- a/network/state/system_windows.go
+++ b/service/network/state/system_windows.go
@@ -1,8 +1,8 @@
 package state
 
 import (
-	"github.com/safing/portmaster/network/iphelper"
-	"github.com/safing/portmaster/network/socket"
+	"github.com/safing/portmaster/service/network/iphelper"
+	"github.com/safing/portmaster/service/network/socket"
 )
 
 var (
diff --git a/network/state/tcp.go b/service/network/state/tcp.go
similarity index 97%
rename from network/state/tcp.go
rename to service/network/state/tcp.go
index 5f8c03d7..33e053be 100644
--- a/network/state/tcp.go
+++ b/service/network/state/tcp.go
@@ -8,7 +8,7 @@ import (
 
 	"github.com/safing/portbase/log"
 	"github.com/safing/portbase/utils"
-	"github.com/safing/portmaster/network/socket"
+	"github.com/safing/portmaster/service/network/socket"
 )
 
 const (
diff --git a/network/state/udp.go b/service/network/state/udp.go
similarity index 97%
rename from network/state/udp.go
rename to service/network/state/udp.go
index 40696820..ce7139e4 100644
--- a/network/state/udp.go
+++ b/service/network/state/udp.go
@@ -10,9 +10,9 @@ import (
 
 	"github.com/safing/portbase/log"
 	"github.com/safing/portbase/utils"
-	"github.com/safing/portmaster/netenv"
-	"github.com/safing/portmaster/network/packet"
-	"github.com/safing/portmaster/network/socket"
+	"github.com/safing/portmaster/service/netenv"
+	"github.com/safing/portmaster/service/network/packet"
+	"github.com/safing/portmaster/service/network/socket"
 )
 
 type udpTable struct {
diff --git a/network/status.go b/service/network/status.go
similarity index 100%
rename from network/status.go
rename to service/network/status.go
diff --git a/process/api.go b/service/process/api.go
similarity index 98%
rename from process/api.go
rename to service/process/api.go
index 0f5c43a4..b687ae83 100644
--- a/process/api.go
+++ b/service/process/api.go
@@ -7,7 +7,7 @@ import (
 	"strconv"
 
 	"github.com/safing/portbase/api"
-	"github.com/safing/portmaster/profile"
+	"github.com/safing/portmaster/service/profile"
 )
 
 func registerAPIEndpoints() error {
diff --git a/process/config.go b/service/process/config.go
similarity index 100%
rename from process/config.go
rename to service/process/config.go
diff --git a/process/database.go b/service/process/database.go
similarity index 98%
rename from process/database.go
rename to service/process/database.go
index 2041c6cf..82a6dcb8 100644
--- a/process/database.go
+++ b/service/process/database.go
@@ -13,7 +13,7 @@ import (
 
 	"github.com/safing/portbase/database"
 	"github.com/safing/portbase/log"
-	"github.com/safing/portmaster/profile"
+	"github.com/safing/portmaster/service/profile"
 )
 
 const processDatabaseNamespace = "network:tree"
diff --git a/process/doc.go b/service/process/doc.go
similarity index 100%
rename from process/doc.go
rename to service/process/doc.go
diff --git a/process/executable.go b/service/process/executable.go
similarity index 100%
rename from process/executable.go
rename to service/process/executable.go
diff --git a/process/find.go b/service/process/find.go
similarity index 95%
rename from process/find.go
rename to service/process/find.go
index be5afdb6..98681832 100644
--- a/process/find.go
+++ b/service/process/find.go
@@ -8,10 +8,10 @@ import (
 
 	"github.com/safing/portbase/api"
 	"github.com/safing/portbase/log"
-	"github.com/safing/portmaster/network/netutils"
-	"github.com/safing/portmaster/network/packet"
-	"github.com/safing/portmaster/network/state"
-	"github.com/safing/portmaster/profile"
+	"github.com/safing/portmaster/service/network/netutils"
+	"github.com/safing/portmaster/service/network/packet"
+	"github.com/safing/portmaster/service/network/state"
+	"github.com/safing/portmaster/service/profile"
 )
 
 // GetProcessWithProfile returns the process, including the profile.
diff --git a/process/module.go b/service/process/module.go
similarity index 91%
rename from process/module.go
rename to service/process/module.go
index b33be8ca..cef4fe2a 100644
--- a/process/module.go
+++ b/service/process/module.go
@@ -4,7 +4,7 @@ import (
 	"os"
 
 	"github.com/safing/portbase/modules"
-	"github.com/safing/portmaster/updates"
+	"github.com/safing/portmaster/service/updates"
 )
 
 var (
diff --git a/process/module_test.go b/service/process/module_test.go
similarity index 65%
rename from process/module_test.go
rename to service/process/module_test.go
index fc33c7bd..f2350d94 100644
--- a/process/module_test.go
+++ b/service/process/module_test.go
@@ -3,7 +3,7 @@ package process
 import (
 	"testing"
 
-	"github.com/safing/portmaster/core/pmtesting"
+	"github.com/safing/portmaster/service/core/pmtesting"
 )
 
 func TestMain(m *testing.M) {
diff --git a/process/process.go b/service/process/process.go
similarity index 99%
rename from process/process.go
rename to service/process/process.go
index b7d0cf41..4508310e 100644
--- a/process/process.go
+++ b/service/process/process.go
@@ -15,7 +15,7 @@ import (
 
 	"github.com/safing/portbase/database/record"
 	"github.com/safing/portbase/log"
-	"github.com/safing/portmaster/profile"
+	"github.com/safing/portmaster/service/profile"
 )
 
 const onLinux = runtime.GOOS == "linux"
diff --git a/process/process_default.go b/service/process/process_default.go
similarity index 100%
rename from process/process_default.go
rename to service/process/process_default.go
diff --git a/process/process_linux.go b/service/process/process_linux.go
similarity index 100%
rename from process/process_linux.go
rename to service/process/process_linux.go
diff --git a/process/process_windows.go b/service/process/process_windows.go
similarity index 100%
rename from process/process_windows.go
rename to service/process/process_windows.go
diff --git a/process/profile.go b/service/process/profile.go
similarity index 98%
rename from process/profile.go
rename to service/process/profile.go
index 27c0f985..53599913 100644
--- a/process/profile.go
+++ b/service/process/profile.go
@@ -8,7 +8,7 @@ import (
 	"strings"
 
 	"github.com/safing/portbase/log"
-	"github.com/safing/portmaster/profile"
+	"github.com/safing/portmaster/service/profile"
 )
 
 var ownPID = os.Getpid()
diff --git a/process/special.go b/service/process/special.go
similarity index 96%
rename from process/special.go
rename to service/process/special.go
index aa35160a..5733c2ba 100644
--- a/process/special.go
+++ b/service/process/special.go
@@ -7,8 +7,8 @@ import (
 	"golang.org/x/sync/singleflight"
 
 	"github.com/safing/portbase/log"
-	"github.com/safing/portmaster/network/socket"
-	"github.com/safing/portmaster/profile"
+	"github.com/safing/portmaster/service/network/socket"
+	"github.com/safing/portmaster/service/profile"
 )
 
 const (
diff --git a/process/tags.go b/service/process/tags.go
similarity index 97%
rename from process/tags.go
rename to service/process/tags.go
index 0eea7f49..dd8a43c5 100644
--- a/process/tags.go
+++ b/service/process/tags.go
@@ -4,7 +4,7 @@ import (
 	"errors"
 	"sync"
 
-	"github.com/safing/portmaster/profile"
+	"github.com/safing/portmaster/service/profile"
 )
 
 var (
diff --git a/process/tags/appimage_unix.go b/service/process/tags/appimage_unix.go
similarity index 96%
rename from process/tags/appimage_unix.go
rename to service/process/tags/appimage_unix.go
index 17cbaba2..1e1bd259 100644
--- a/process/tags/appimage_unix.go
+++ b/service/process/tags/appimage_unix.go
@@ -8,9 +8,9 @@ import (
 	"strings"
 
 	"github.com/safing/portbase/log"
-	"github.com/safing/portmaster/process"
-	"github.com/safing/portmaster/profile"
-	"github.com/safing/portmaster/profile/binmeta"
+	"github.com/safing/portmaster/service/process"
+	"github.com/safing/portmaster/service/profile"
+	"github.com/safing/portmaster/service/profile/binmeta"
 )
 
 func init() {
diff --git a/process/tags/flatpak_unix.go b/service/process/tags/flatpak_unix.go
similarity index 92%
rename from process/tags/flatpak_unix.go
rename to service/process/tags/flatpak_unix.go
index 78eafe53..ea9e9c5a 100644
--- a/process/tags/flatpak_unix.go
+++ b/service/process/tags/flatpak_unix.go
@@ -3,9 +3,9 @@ package tags
 import (
 	"strings"
 
-	"github.com/safing/portmaster/process"
-	"github.com/safing/portmaster/profile"
-	"github.com/safing/portmaster/profile/binmeta"
+	"github.com/safing/portmaster/service/process"
+	"github.com/safing/portmaster/service/profile"
+	"github.com/safing/portmaster/service/profile/binmeta"
 )
 
 func init() {
diff --git a/process/tags/interpreter_unix.go b/service/process/tags/interpreter_unix.go
similarity index 97%
rename from process/tags/interpreter_unix.go
rename to service/process/tags/interpreter_unix.go
index 7e9dfdfc..7e5c28b9 100644
--- a/process/tags/interpreter_unix.go
+++ b/service/process/tags/interpreter_unix.go
@@ -12,9 +12,9 @@ import (
 
 	"github.com/google/shlex"
 
-	"github.com/safing/portmaster/process"
-	"github.com/safing/portmaster/profile"
-	"github.com/safing/portmaster/profile/binmeta"
+	"github.com/safing/portmaster/service/process"
+	"github.com/safing/portmaster/service/profile"
+	"github.com/safing/portmaster/service/profile/binmeta"
 )
 
 func init() {
diff --git a/process/tags/net.go b/service/process/tags/net.go
similarity index 93%
rename from process/tags/net.go
rename to service/process/tags/net.go
index 8c6196e5..ce608513 100644
--- a/process/tags/net.go
+++ b/service/process/tags/net.go
@@ -1,8 +1,8 @@
 package tags
 
 import (
-	"github.com/safing/portmaster/process"
-	"github.com/safing/portmaster/profile"
+	"github.com/safing/portmaster/service/process"
+	"github.com/safing/portmaster/service/profile"
 )
 
 func init() {
diff --git a/process/tags/snap_unix.go b/service/process/tags/snap_unix.go
similarity index 95%
rename from process/tags/snap_unix.go
rename to service/process/tags/snap_unix.go
index 70e65299..667ac485 100644
--- a/process/tags/snap_unix.go
+++ b/service/process/tags/snap_unix.go
@@ -3,9 +3,9 @@ package tags
 import (
 	"strings"
 
-	"github.com/safing/portmaster/process"
-	"github.com/safing/portmaster/profile"
-	"github.com/safing/portmaster/profile/binmeta"
+	"github.com/safing/portmaster/service/process"
+	"github.com/safing/portmaster/service/profile"
+	"github.com/safing/portmaster/service/profile/binmeta"
 )
 
 func init() {
diff --git a/process/tags/svchost_windows.go b/service/process/tags/svchost_windows.go
similarity index 95%
rename from process/tags/svchost_windows.go
rename to service/process/tags/svchost_windows.go
index 44071228..83087cbc 100644
--- a/process/tags/svchost_windows.go
+++ b/service/process/tags/svchost_windows.go
@@ -7,9 +7,9 @@ import (
 
 	"github.com/safing/portbase/log"
 	"github.com/safing/portbase/utils/osdetail"
-	"github.com/safing/portmaster/process"
-	"github.com/safing/portmaster/profile"
-	"github.com/safing/portmaster/profile/binmeta"
+	"github.com/safing/portmaster/service/process"
+	"github.com/safing/portmaster/service/profile"
+	"github.com/safing/portmaster/service/profile/binmeta"
 )
 
 func init() {
diff --git a/process/tags/winstore_windows.go b/service/process/tags/winstore_windows.go
similarity index 95%
rename from process/tags/winstore_windows.go
rename to service/process/tags/winstore_windows.go
index 0948be97..e41995c8 100644
--- a/process/tags/winstore_windows.go
+++ b/service/process/tags/winstore_windows.go
@@ -6,9 +6,9 @@ import (
 
 	"github.com/safing/portbase/log"
 	"github.com/safing/portbase/utils"
-	"github.com/safing/portmaster/process"
-	"github.com/safing/portmaster/profile"
-	"github.com/safing/portmaster/profile/binmeta"
+	"github.com/safing/portmaster/service/process"
+	"github.com/safing/portmaster/service/profile"
+	"github.com/safing/portmaster/service/profile/binmeta"
 )
 
 func init() {
diff --git a/profile/active.go b/service/profile/active.go
similarity index 100%
rename from profile/active.go
rename to service/profile/active.go
diff --git a/profile/api.go b/service/profile/api.go
similarity index 98%
rename from profile/api.go
rename to service/profile/api.go
index ca43031e..7b02e914 100644
--- a/profile/api.go
+++ b/service/profile/api.go
@@ -10,7 +10,7 @@ import (
 	"github.com/safing/portbase/api"
 	"github.com/safing/portbase/formats/dsd"
 	"github.com/safing/portbase/utils"
-	"github.com/safing/portmaster/profile/binmeta"
+	"github.com/safing/portmaster/service/profile/binmeta"
 )
 
 func registerAPIEndpoints() error {
diff --git a/profile/binmeta/convert.go b/service/profile/binmeta/convert.go
similarity index 100%
rename from profile/binmeta/convert.go
rename to service/profile/binmeta/convert.go
diff --git a/profile/binmeta/find_default.go b/service/profile/binmeta/find_default.go
similarity index 100%
rename from profile/binmeta/find_default.go
rename to service/profile/binmeta/find_default.go
diff --git a/profile/binmeta/find_linux.go b/service/profile/binmeta/find_linux.go
similarity index 100%
rename from profile/binmeta/find_linux.go
rename to service/profile/binmeta/find_linux.go
diff --git a/profile/binmeta/find_linux_test.go b/service/profile/binmeta/find_linux_test.go
similarity index 100%
rename from profile/binmeta/find_linux_test.go
rename to service/profile/binmeta/find_linux_test.go
diff --git a/profile/binmeta/find_windows.go b/service/profile/binmeta/find_windows.go
similarity index 100%
rename from profile/binmeta/find_windows.go
rename to service/profile/binmeta/find_windows.go
diff --git a/profile/binmeta/find_windows_test.go b/service/profile/binmeta/find_windows_test.go
similarity index 100%
rename from profile/binmeta/find_windows_test.go
rename to service/profile/binmeta/find_windows_test.go
diff --git a/profile/binmeta/icon.go b/service/profile/binmeta/icon.go
similarity index 100%
rename from profile/binmeta/icon.go
rename to service/profile/binmeta/icon.go
diff --git a/profile/binmeta/icons.go b/service/profile/binmeta/icons.go
similarity index 100%
rename from profile/binmeta/icons.go
rename to service/profile/binmeta/icons.go
diff --git a/profile/binmeta/locations_linux.go b/service/profile/binmeta/locations_linux.go
similarity index 100%
rename from profile/binmeta/locations_linux.go
rename to service/profile/binmeta/locations_linux.go
diff --git a/profile/binmeta/name.go b/service/profile/binmeta/name.go
similarity index 100%
rename from profile/binmeta/name.go
rename to service/profile/binmeta/name.go
diff --git a/profile/binmeta/name_test.go b/service/profile/binmeta/name_test.go
similarity index 100%
rename from profile/binmeta/name_test.go
rename to service/profile/binmeta/name_test.go
diff --git a/profile/config-update.go b/service/profile/config-update.go
similarity index 96%
rename from profile/config-update.go
rename to service/profile/config-update.go
index 3a6cd246..3c31603c 100644
--- a/profile/config-update.go
+++ b/service/profile/config-update.go
@@ -7,8 +7,8 @@ import (
 	"time"
 
 	"github.com/safing/portbase/modules"
-	"github.com/safing/portmaster/intel/filterlists"
-	"github.com/safing/portmaster/profile/endpoints"
+	"github.com/safing/portmaster/service/intel/filterlists"
+	"github.com/safing/portmaster/service/profile/endpoints"
 )
 
 var (
diff --git a/profile/config.go b/service/profile/config.go
similarity index 99%
rename from profile/config.go
rename to service/profile/config.go
index 18495ae9..a2b5da0a 100644
--- a/profile/config.go
+++ b/service/profile/config.go
@@ -4,9 +4,9 @@ import (
 	"strings"
 
 	"github.com/safing/portbase/config"
-	"github.com/safing/portmaster/profile/endpoints"
-	"github.com/safing/portmaster/status"
-	"github.com/safing/spn/access/account"
+	"github.com/safing/portmaster/service/profile/endpoints"
+	"github.com/safing/portmaster/service/status"
+	"github.com/safing/portmaster/spn/access/account"
 )
 
 // Configuration Keys.
diff --git a/profile/database.go b/service/profile/database.go
similarity index 100%
rename from profile/database.go
rename to service/profile/database.go
diff --git a/profile/endpoints/annotations.go b/service/profile/endpoints/annotations.go
similarity index 100%
rename from profile/endpoints/annotations.go
rename to service/profile/endpoints/annotations.go
diff --git a/profile/endpoints/endpoint-any.go b/service/profile/endpoints/endpoint-any.go
similarity index 92%
rename from profile/endpoints/endpoint-any.go
rename to service/profile/endpoints/endpoint-any.go
index 14960489..7ec64688 100644
--- a/profile/endpoints/endpoint-any.go
+++ b/service/profile/endpoints/endpoint-any.go
@@ -3,7 +3,7 @@ package endpoints
 import (
 	"context"
 
-	"github.com/safing/portmaster/intel"
+	"github.com/safing/portmaster/service/intel"
 )
 
 // EndpointAny matches anything.
diff --git a/profile/endpoints/endpoint-asn.go b/service/profile/endpoints/endpoint-asn.go
similarity index 96%
rename from profile/endpoints/endpoint-asn.go
rename to service/profile/endpoints/endpoint-asn.go
index 5341f81b..20864d72 100644
--- a/profile/endpoints/endpoint-asn.go
+++ b/service/profile/endpoints/endpoint-asn.go
@@ -7,7 +7,7 @@ import (
 	"strconv"
 	"strings"
 
-	"github.com/safing/portmaster/intel"
+	"github.com/safing/portmaster/service/intel"
 )
 
 var asnRegex = regexp.MustCompile("^AS[0-9]+$")
diff --git a/profile/endpoints/endpoint-continent.go b/service/profile/endpoints/endpoint-continent.go
similarity index 96%
rename from profile/endpoints/endpoint-continent.go
rename to service/profile/endpoints/endpoint-continent.go
index f241cfa2..4ba244da 100644
--- a/profile/endpoints/endpoint-continent.go
+++ b/service/profile/endpoints/endpoint-continent.go
@@ -6,7 +6,7 @@ import (
 	"regexp"
 	"strings"
 
-	"github.com/safing/portmaster/intel"
+	"github.com/safing/portmaster/service/intel"
 )
 
 var (
diff --git a/profile/endpoints/endpoint-country.go b/service/profile/endpoints/endpoint-country.go
similarity index 96%
rename from profile/endpoints/endpoint-country.go
rename to service/profile/endpoints/endpoint-country.go
index c8e1f6df..60a478cf 100644
--- a/profile/endpoints/endpoint-country.go
+++ b/service/profile/endpoints/endpoint-country.go
@@ -6,7 +6,7 @@ import (
 	"regexp"
 	"strings"
 
-	"github.com/safing/portmaster/intel"
+	"github.com/safing/portmaster/service/intel"
 )
 
 var countryRegex = regexp.MustCompile(`^[A-Z]{2}$`)
diff --git a/profile/endpoints/endpoint-domain.go b/service/profile/endpoints/endpoint-domain.go
similarity index 97%
rename from profile/endpoints/endpoint-domain.go
rename to service/profile/endpoints/endpoint-domain.go
index d82ccb5b..cdb6f248 100644
--- a/profile/endpoints/endpoint-domain.go
+++ b/service/profile/endpoints/endpoint-domain.go
@@ -6,8 +6,8 @@ import (
 	"regexp"
 	"strings"
 
-	"github.com/safing/portmaster/intel"
-	"github.com/safing/portmaster/network/netutils"
+	"github.com/safing/portmaster/service/intel"
+	"github.com/safing/portmaster/service/network/netutils"
 )
 
 const (
diff --git a/profile/endpoints/endpoint-ip.go b/service/profile/endpoints/endpoint-ip.go
similarity index 94%
rename from profile/endpoints/endpoint-ip.go
rename to service/profile/endpoints/endpoint-ip.go
index 9797eb8d..78706932 100644
--- a/profile/endpoints/endpoint-ip.go
+++ b/service/profile/endpoints/endpoint-ip.go
@@ -4,7 +4,7 @@ import (
 	"context"
 	"net"
 
-	"github.com/safing/portmaster/intel"
+	"github.com/safing/portmaster/service/intel"
 )
 
 // EndpointIP matches IPs.
diff --git a/profile/endpoints/endpoint-iprange.go b/service/profile/endpoints/endpoint-iprange.go
similarity index 94%
rename from profile/endpoints/endpoint-iprange.go
rename to service/profile/endpoints/endpoint-iprange.go
index 6a0b713a..14503bd8 100644
--- a/profile/endpoints/endpoint-iprange.go
+++ b/service/profile/endpoints/endpoint-iprange.go
@@ -4,7 +4,7 @@ import (
 	"context"
 	"net"
 
-	"github.com/safing/portmaster/intel"
+	"github.com/safing/portmaster/service/intel"
 )
 
 // EndpointIPRange matches IP ranges.
diff --git a/profile/endpoints/endpoint-lists.go b/service/profile/endpoints/endpoint-lists.go
similarity index 95%
rename from profile/endpoints/endpoint-lists.go
rename to service/profile/endpoints/endpoint-lists.go
index 58e48be7..8aedb0ee 100644
--- a/profile/endpoints/endpoint-lists.go
+++ b/service/profile/endpoints/endpoint-lists.go
@@ -4,7 +4,7 @@ import (
 	"context"
 	"strings"
 
-	"github.com/safing/portmaster/intel"
+	"github.com/safing/portmaster/service/intel"
 )
 
 // EndpointLists matches endpoint lists.
diff --git a/profile/endpoints/endpoint-scopes.go b/service/profile/endpoints/endpoint-scopes.go
similarity index 95%
rename from profile/endpoints/endpoint-scopes.go
rename to service/profile/endpoints/endpoint-scopes.go
index c969b408..b506e2a1 100644
--- a/profile/endpoints/endpoint-scopes.go
+++ b/service/profile/endpoints/endpoint-scopes.go
@@ -4,8 +4,8 @@ import (
 	"context"
 	"strings"
 
-	"github.com/safing/portmaster/intel"
-	"github.com/safing/portmaster/network/netutils"
+	"github.com/safing/portmaster/service/intel"
+	"github.com/safing/portmaster/service/network/netutils"
 )
 
 const (
diff --git a/profile/endpoints/endpoint.go b/service/profile/endpoints/endpoint.go
similarity index 98%
rename from profile/endpoints/endpoint.go
rename to service/profile/endpoints/endpoint.go
index b893a634..962d78e2 100644
--- a/profile/endpoints/endpoint.go
+++ b/service/profile/endpoints/endpoint.go
@@ -6,8 +6,8 @@ import (
 	"strconv"
 	"strings"
 
-	"github.com/safing/portmaster/intel"
-	"github.com/safing/portmaster/network/reference"
+	"github.com/safing/portmaster/service/intel"
+	"github.com/safing/portmaster/service/network/reference"
 )
 
 // Endpoint describes an Endpoint Matcher.
diff --git a/profile/endpoints/endpoint_test.go b/service/profile/endpoints/endpoint_test.go
similarity index 100%
rename from profile/endpoints/endpoint_test.go
rename to service/profile/endpoints/endpoint_test.go
diff --git a/profile/endpoints/endpoints.go b/service/profile/endpoints/endpoints.go
similarity index 98%
rename from profile/endpoints/endpoints.go
rename to service/profile/endpoints/endpoints.go
index 6ed3ad04..17649675 100644
--- a/profile/endpoints/endpoints.go
+++ b/service/profile/endpoints/endpoints.go
@@ -6,7 +6,7 @@ import (
 	"fmt"
 	"strings"
 
-	"github.com/safing/portmaster/intel"
+	"github.com/safing/portmaster/service/intel"
 )
 
 // Endpoints is a list of permitted or denied endpoints.
diff --git a/profile/endpoints/endpoints_test.go b/service/profile/endpoints/endpoints_test.go
similarity index 98%
rename from profile/endpoints/endpoints_test.go
rename to service/profile/endpoints/endpoints_test.go
index 342d81d8..8dafe10d 100644
--- a/profile/endpoints/endpoints_test.go
+++ b/service/profile/endpoints/endpoints_test.go
@@ -8,8 +8,8 @@ import (
 
 	"github.com/stretchr/testify/assert"
 
-	"github.com/safing/portmaster/core/pmtesting"
-	"github.com/safing/portmaster/intel"
+	"github.com/safing/portmaster/service/core/pmtesting"
+	"github.com/safing/portmaster/service/intel"
 )
 
 func TestMain(m *testing.M) {
diff --git a/profile/endpoints/reason.go b/service/profile/endpoints/reason.go
similarity index 100%
rename from profile/endpoints/reason.go
rename to service/profile/endpoints/reason.go
diff --git a/profile/fingerprint.go b/service/profile/fingerprint.go
similarity index 100%
rename from profile/fingerprint.go
rename to service/profile/fingerprint.go
diff --git a/profile/fingerprint_test.go b/service/profile/fingerprint_test.go
similarity index 100%
rename from profile/fingerprint_test.go
rename to service/profile/fingerprint_test.go
diff --git a/profile/framework.go b/service/profile/framework.go
similarity index 100%
rename from profile/framework.go
rename to service/profile/framework.go
diff --git a/profile/framework_test.go b/service/profile/framework_test.go
similarity index 100%
rename from profile/framework_test.go
rename to service/profile/framework_test.go
diff --git a/profile/get.go b/service/profile/get.go
similarity index 100%
rename from profile/get.go
rename to service/profile/get.go
diff --git a/profile/merge.go b/service/profile/merge.go
similarity index 98%
rename from profile/merge.go
rename to service/profile/merge.go
index 420d64f6..5e995182 100644
--- a/profile/merge.go
+++ b/service/profile/merge.go
@@ -7,7 +7,7 @@ import (
 	"time"
 
 	"github.com/safing/portbase/database/record"
-	"github.com/safing/portmaster/profile/binmeta"
+	"github.com/safing/portmaster/service/profile/binmeta"
 )
 
 // MergeProfiles merges multiple profiles into a new one.
diff --git a/profile/meta.go b/service/profile/meta.go
similarity index 100%
rename from profile/meta.go
rename to service/profile/meta.go
diff --git a/profile/migrations.go b/service/profile/migrations.go
similarity index 99%
rename from profile/migrations.go
rename to service/profile/migrations.go
index e9b1344d..5eb94313 100644
--- a/profile/migrations.go
+++ b/service/profile/migrations.go
@@ -11,7 +11,7 @@ import (
 	"github.com/safing/portbase/database/migration"
 	"github.com/safing/portbase/database/query"
 	"github.com/safing/portbase/log"
-	"github.com/safing/portmaster/profile/binmeta"
+	"github.com/safing/portmaster/service/profile/binmeta"
 )
 
 func registerMigrations() error {
diff --git a/profile/module.go b/service/profile/module.go
similarity index 93%
rename from profile/module.go
rename to service/profile/module.go
index 547944b1..4465750d 100644
--- a/profile/module.go
+++ b/service/profile/module.go
@@ -10,9 +10,9 @@ import (
 	"github.com/safing/portbase/dataroot"
 	"github.com/safing/portbase/log"
 	"github.com/safing/portbase/modules"
-	_ "github.com/safing/portmaster/core/base"
-	"github.com/safing/portmaster/profile/binmeta"
-	"github.com/safing/portmaster/updates"
+	_ "github.com/safing/portmaster/service/core/base"
+	"github.com/safing/portmaster/service/profile/binmeta"
+	"github.com/safing/portmaster/service/updates"
 )
 
 var (
diff --git a/profile/profile-layered-provider.go b/service/profile/profile-layered-provider.go
similarity index 100%
rename from profile/profile-layered-provider.go
rename to service/profile/profile-layered-provider.go
diff --git a/profile/profile-layered.go b/service/profile/profile-layered.go
similarity index 99%
rename from profile/profile-layered.go
rename to service/profile/profile-layered.go
index acd88da3..2635aed5 100644
--- a/profile/profile-layered.go
+++ b/service/profile/profile-layered.go
@@ -9,8 +9,8 @@ import (
 	"github.com/safing/portbase/database/record"
 	"github.com/safing/portbase/log"
 	"github.com/safing/portbase/runtime"
-	"github.com/safing/portmaster/intel"
-	"github.com/safing/portmaster/profile/endpoints"
+	"github.com/safing/portmaster/service/intel"
+	"github.com/safing/portmaster/service/profile/endpoints"
 )
 
 // LayeredProfile combines multiple Profiles.
diff --git a/profile/profile.go b/service/profile/profile.go
similarity index 98%
rename from profile/profile.go
rename to service/profile/profile.go
index 95e2b762..fff41908 100644
--- a/profile/profile.go
+++ b/service/profile/profile.go
@@ -15,9 +15,9 @@ import (
 	"github.com/safing/portbase/database/record"
 	"github.com/safing/portbase/log"
 	"github.com/safing/portbase/utils"
-	"github.com/safing/portmaster/intel/filterlists"
-	"github.com/safing/portmaster/profile/binmeta"
-	"github.com/safing/portmaster/profile/endpoints"
+	"github.com/safing/portmaster/service/intel/filterlists"
+	"github.com/safing/portmaster/service/profile/binmeta"
+	"github.com/safing/portmaster/service/profile/endpoints"
 )
 
 // ProfileSource is the source of the profile.
diff --git a/profile/special.go b/service/profile/special.go
similarity index 100%
rename from profile/special.go
rename to service/profile/special.go
diff --git a/resolver/api.go b/service/resolver/api.go
similarity index 100%
rename from resolver/api.go
rename to service/resolver/api.go
diff --git a/resolver/block-detection.go b/service/resolver/block-detection.go
similarity index 100%
rename from resolver/block-detection.go
rename to service/resolver/block-detection.go
diff --git a/resolver/compat.go b/service/resolver/compat.go
similarity index 100%
rename from resolver/compat.go
rename to service/resolver/compat.go
diff --git a/resolver/config.go b/service/resolver/config.go
similarity index 98%
rename from resolver/config.go
rename to service/resolver/config.go
index 135c7c27..b5538d7d 100644
--- a/resolver/config.go
+++ b/service/resolver/config.go
@@ -6,8 +6,8 @@ import (
 	"strings"
 
 	"github.com/safing/portbase/config"
-	"github.com/safing/portmaster/netenv"
-	"github.com/safing/portmaster/status"
+	"github.com/safing/portmaster/service/netenv"
+	"github.com/safing/portmaster/service/status"
 )
 
 // Configuration Keys.
@@ -23,7 +23,7 @@ var (
 
 		// We encourage everyone who has the technical abilities to set their own preferred servers.
 		// For a list of configuration options, see
-		// https://github.com/safing/portmaster/wiki/DNS-Server-Settings
+		// https://github.com/safing/portmaster/service/wiki/DNS-Server-Settings
 
 		// Quad9 (encrypted DNS)
 		// "dot://dns.quad9.net?ip=9.9.9.9&name=Quad9&blockedif=empty",
diff --git a/resolver/doc.go b/service/resolver/doc.go
similarity index 100%
rename from resolver/doc.go
rename to service/resolver/doc.go
diff --git a/resolver/failing.go b/service/resolver/failing.go
similarity index 98%
rename from resolver/failing.go
rename to service/resolver/failing.go
index 33cee5b5..2f1ff87b 100644
--- a/resolver/failing.go
+++ b/service/resolver/failing.go
@@ -6,7 +6,7 @@ import (
 
 	"github.com/safing/portbase/log"
 	"github.com/safing/portbase/modules"
-	"github.com/safing/portmaster/netenv"
+	"github.com/safing/portmaster/service/netenv"
 )
 
 var (
diff --git a/resolver/ipinfo.go b/service/resolver/ipinfo.go
similarity index 100%
rename from resolver/ipinfo.go
rename to service/resolver/ipinfo.go
diff --git a/resolver/ipinfo_test.go b/service/resolver/ipinfo_test.go
similarity index 100%
rename from resolver/ipinfo_test.go
rename to service/resolver/ipinfo_test.go
diff --git a/resolver/main.go b/service/resolver/main.go
similarity index 97%
rename from resolver/main.go
rename to service/resolver/main.go
index 2daab556..693797b5 100644
--- a/resolver/main.go
+++ b/service/resolver/main.go
@@ -14,9 +14,9 @@ import (
 	"github.com/safing/portbase/modules"
 	"github.com/safing/portbase/notifications"
 	"github.com/safing/portbase/utils/debug"
-	_ "github.com/safing/portmaster/core/base"
-	"github.com/safing/portmaster/intel"
-	"github.com/safing/portmaster/netenv"
+	_ "github.com/safing/portmaster/service/core/base"
+	"github.com/safing/portmaster/service/intel"
+	"github.com/safing/portmaster/service/netenv"
 )
 
 var module *modules.Module
diff --git a/resolver/main_test.go b/service/resolver/main_test.go
similarity index 97%
rename from resolver/main_test.go
rename to service/resolver/main_test.go
index 57168227..2a2dbe44 100644
--- a/resolver/main_test.go
+++ b/service/resolver/main_test.go
@@ -3,7 +3,7 @@ package resolver
 import (
 	"testing"
 
-	"github.com/safing/portmaster/core/pmtesting"
+	"github.com/safing/portmaster/service/core/pmtesting"
 )
 
 var domainFeed = make(chan string)
diff --git a/resolver/metrics.go b/service/resolver/metrics.go
similarity index 100%
rename from resolver/metrics.go
rename to service/resolver/metrics.go
diff --git a/resolver/namerecord.go b/service/resolver/namerecord.go
similarity index 100%
rename from resolver/namerecord.go
rename to service/resolver/namerecord.go
diff --git a/resolver/namerecord_test.go b/service/resolver/namerecord_test.go
similarity index 100%
rename from resolver/namerecord_test.go
rename to service/resolver/namerecord_test.go
diff --git a/resolver/resolve.go b/service/resolver/resolve.go
similarity index 99%
rename from resolver/resolve.go
rename to service/resolver/resolve.go
index b9feb0a9..fe3e11ff 100644
--- a/resolver/resolve.go
+++ b/service/resolver/resolve.go
@@ -14,7 +14,7 @@ import (
 
 	"github.com/safing/portbase/database"
 	"github.com/safing/portbase/log"
-	"github.com/safing/portmaster/netenv"
+	"github.com/safing/portmaster/service/netenv"
 )
 
 // Errors.
diff --git a/resolver/resolver-env.go b/service/resolver/resolver-env.go
similarity index 97%
rename from resolver/resolver-env.go
rename to service/resolver/resolver-env.go
index d976d311..01f58ea7 100644
--- a/resolver/resolver-env.go
+++ b/service/resolver/resolver-env.go
@@ -9,8 +9,8 @@ import (
 	"github.com/miekg/dns"
 
 	"github.com/safing/portbase/log"
-	"github.com/safing/portmaster/netenv"
-	"github.com/safing/portmaster/network/netutils"
+	"github.com/safing/portmaster/service/netenv"
+	"github.com/safing/portmaster/service/network/netutils"
 )
 
 const (
diff --git a/resolver/resolver-https.go b/service/resolver/resolver-https.go
similarity index 98%
rename from resolver/resolver-https.go
rename to service/resolver/resolver-https.go
index 2d40aac0..ed04bf92 100644
--- a/resolver/resolver-https.go
+++ b/service/resolver/resolver-https.go
@@ -14,7 +14,7 @@ import (
 	"github.com/miekg/dns"
 
 	"github.com/safing/portbase/log"
-	"github.com/safing/portmaster/netenv"
+	"github.com/safing/portmaster/service/netenv"
 )
 
 // HTTPSResolver is a resolver using just a single tcp connection with pipelining.
diff --git a/resolver/resolver-mdns.go b/service/resolver/resolver-mdns.go
similarity index 99%
rename from resolver/resolver-mdns.go
rename to service/resolver/resolver-mdns.go
index 2e01122a..17f034c8 100644
--- a/resolver/resolver-mdns.go
+++ b/service/resolver/resolver-mdns.go
@@ -12,8 +12,8 @@ import (
 	"github.com/miekg/dns"
 
 	"github.com/safing/portbase/log"
-	"github.com/safing/portmaster/netenv"
-	"github.com/safing/portmaster/network/netutils"
+	"github.com/safing/portmaster/service/netenv"
+	"github.com/safing/portmaster/service/network/netutils"
 )
 
 // DNS Classes.
diff --git a/resolver/resolver-plain.go b/service/resolver/resolver-plain.go
similarity index 98%
rename from resolver/resolver-plain.go
rename to service/resolver/resolver-plain.go
index 614f30b2..56f85458 100644
--- a/resolver/resolver-plain.go
+++ b/service/resolver/resolver-plain.go
@@ -9,7 +9,7 @@ import (
 	"github.com/miekg/dns"
 
 	"github.com/safing/portbase/log"
-	"github.com/safing/portmaster/netenv"
+	"github.com/safing/portmaster/service/netenv"
 )
 
 var (
diff --git a/resolver/resolver-tcp.go b/service/resolver/resolver-tcp.go
similarity index 99%
rename from resolver/resolver-tcp.go
rename to service/resolver/resolver-tcp.go
index aed64e2d..271d8808 100644
--- a/resolver/resolver-tcp.go
+++ b/service/resolver/resolver-tcp.go
@@ -13,7 +13,7 @@ import (
 	"github.com/tevino/abool"
 
 	"github.com/safing/portbase/log"
-	"github.com/safing/portmaster/netenv"
+	"github.com/safing/portmaster/service/netenv"
 )
 
 const (
diff --git a/resolver/resolver.go b/service/resolver/resolver.go
similarity index 98%
rename from resolver/resolver.go
rename to service/resolver/resolver.go
index e899f480..3474fd30 100644
--- a/resolver/resolver.go
+++ b/service/resolver/resolver.go
@@ -11,8 +11,8 @@ import (
 	"github.com/tevino/abool"
 
 	"github.com/safing/portbase/utils"
-	"github.com/safing/portmaster/netenv"
-	"github.com/safing/portmaster/network/netutils"
+	"github.com/safing/portmaster/service/netenv"
+	"github.com/safing/portmaster/service/network/netutils"
 )
 
 // DNS Resolver Attributes.
diff --git a/resolver/resolver_test.go b/service/resolver/resolver_test.go
similarity index 100%
rename from resolver/resolver_test.go
rename to service/resolver/resolver_test.go
diff --git a/resolver/resolvers.go b/service/resolver/resolvers.go
similarity index 99%
rename from resolver/resolvers.go
rename to service/resolver/resolvers.go
index 10226b35..93edf2a1 100644
--- a/resolver/resolvers.go
+++ b/service/resolver/resolvers.go
@@ -15,8 +15,8 @@ import (
 
 	"github.com/safing/portbase/log"
 	"github.com/safing/portbase/utils"
-	"github.com/safing/portmaster/netenv"
-	"github.com/safing/portmaster/network/netutils"
+	"github.com/safing/portmaster/service/netenv"
+	"github.com/safing/portmaster/service/network/netutils"
 )
 
 const maxSearchDomains = 100
diff --git a/resolver/resolvers_test.go b/service/resolver/resolvers_test.go
similarity index 100%
rename from resolver/resolvers_test.go
rename to service/resolver/resolvers_test.go
diff --git a/resolver/reverse.go b/service/resolver/reverse.go
similarity index 100%
rename from resolver/reverse.go
rename to service/resolver/reverse.go
diff --git a/resolver/reverse_test.go b/service/resolver/reverse_test.go
similarity index 100%
rename from resolver/reverse_test.go
rename to service/resolver/reverse_test.go
diff --git a/resolver/rr_context.go b/service/resolver/rr_context.go
similarity index 100%
rename from resolver/rr_context.go
rename to service/resolver/rr_context.go
diff --git a/resolver/rrcache.go b/service/resolver/rrcache.go
similarity index 98%
rename from resolver/rrcache.go
rename to service/resolver/rrcache.go
index 1b6fdc3d..36b46e31 100644
--- a/resolver/rrcache.go
+++ b/service/resolver/rrcache.go
@@ -9,8 +9,8 @@ import (
 	"github.com/miekg/dns"
 
 	"github.com/safing/portbase/log"
-	"github.com/safing/portmaster/nameserver/nsutil"
-	"github.com/safing/portmaster/netenv"
+	"github.com/safing/portmaster/service/nameserver/nsutil"
+	"github.com/safing/portmaster/service/netenv"
 )
 
 // RRCache is a single-use structure to hold a DNS response.
diff --git a/resolver/rrcache_test.go b/service/resolver/rrcache_test.go
similarity index 100%
rename from resolver/rrcache_test.go
rename to service/resolver/rrcache_test.go
diff --git a/resolver/scopes.go b/service/resolver/scopes.go
similarity index 99%
rename from resolver/scopes.go
rename to service/resolver/scopes.go
index 044b83fc..ac1391b1 100644
--- a/resolver/scopes.go
+++ b/service/resolver/scopes.go
@@ -8,7 +8,7 @@ import (
 	"github.com/miekg/dns"
 
 	"github.com/safing/portbase/log"
-	"github.com/safing/portmaster/netenv"
+	"github.com/safing/portmaster/service/netenv"
 )
 
 // Domain Scopes.
diff --git a/resolver/test/resolving.bash b/service/resolver/test/resolving.bash
similarity index 100%
rename from resolver/test/resolving.bash
rename to service/resolver/test/resolving.bash
diff --git a/status/module.go b/service/status/module.go
similarity index 95%
rename from status/module.go
rename to service/status/module.go
index bc823832..2465d09b 100644
--- a/status/module.go
+++ b/service/status/module.go
@@ -6,7 +6,7 @@ import (
 
 	"github.com/safing/portbase/modules"
 	"github.com/safing/portbase/utils/debug"
-	"github.com/safing/portmaster/netenv"
+	"github.com/safing/portmaster/service/netenv"
 )
 
 var module *modules.Module
diff --git a/status/provider.go b/service/status/provider.go
similarity index 95%
rename from status/provider.go
rename to service/status/provider.go
index fbe8d84f..5130560e 100644
--- a/status/provider.go
+++ b/service/status/provider.go
@@ -3,7 +3,7 @@ package status
 import (
 	"github.com/safing/portbase/database/record"
 	"github.com/safing/portbase/runtime"
-	"github.com/safing/portmaster/netenv"
+	"github.com/safing/portmaster/service/netenv"
 )
 
 var pushUpdate runtime.PushFunc
diff --git a/status/records.go b/service/status/records.go
similarity index 92%
rename from status/records.go
rename to service/status/records.go
index 63f3f9fd..56f19e5f 100644
--- a/status/records.go
+++ b/service/status/records.go
@@ -4,7 +4,7 @@ import (
 	"sync"
 
 	"github.com/safing/portbase/database/record"
-	"github.com/safing/portmaster/netenv"
+	"github.com/safing/portmaster/service/netenv"
 )
 
 // SystemStatusRecord describes the overall status of the Portmaster.
diff --git a/status/security_level.go b/service/status/security_level.go
similarity index 100%
rename from status/security_level.go
rename to service/status/security_level.go
diff --git a/sync/module.go b/service/sync/module.go
similarity index 100%
rename from sync/module.go
rename to service/sync/module.go
diff --git a/sync/profile.go b/service/sync/profile.go
similarity index 99%
rename from sync/profile.go
rename to service/sync/profile.go
index bfc76893..22a6472b 100644
--- a/sync/profile.go
+++ b/service/sync/profile.go
@@ -13,8 +13,8 @@ import (
 	"github.com/safing/portbase/api"
 	"github.com/safing/portbase/config"
 	"github.com/safing/portbase/log"
-	"github.com/safing/portmaster/profile"
-	"github.com/safing/portmaster/profile/binmeta"
+	"github.com/safing/portmaster/service/profile"
+	"github.com/safing/portmaster/service/profile/binmeta"
 )
 
 // ProfileExport holds an export of a profile.
diff --git a/sync/setting_single.go b/service/sync/setting_single.go
similarity index 99%
rename from sync/setting_single.go
rename to service/sync/setting_single.go
index 24cd0cbc..8911d6e4 100644
--- a/sync/setting_single.go
+++ b/service/sync/setting_single.go
@@ -10,7 +10,7 @@ import (
 	"github.com/safing/portbase/api"
 	"github.com/safing/portbase/config"
 	"github.com/safing/portbase/formats/dsd"
-	"github.com/safing/portmaster/profile"
+	"github.com/safing/portmaster/service/profile"
 )
 
 // SingleSettingExport holds an export of a single setting.
diff --git a/sync/settings.go b/service/sync/settings.go
similarity index 99%
rename from sync/settings.go
rename to service/sync/settings.go
index 795d94bb..4e640d09 100644
--- a/sync/settings.go
+++ b/service/sync/settings.go
@@ -10,7 +10,7 @@ import (
 
 	"github.com/safing/portbase/api"
 	"github.com/safing/portbase/config"
-	"github.com/safing/portmaster/profile"
+	"github.com/safing/portmaster/service/profile"
 )
 
 // SettingsExport holds an export of settings.
diff --git a/sync/util.go b/service/sync/util.go
similarity index 100%
rename from sync/util.go
rename to service/sync/util.go
diff --git a/ui/api.go b/service/ui/api.go
similarity index 100%
rename from ui/api.go
rename to service/ui/api.go
diff --git a/ui/module.go b/service/ui/module.go
similarity index 100%
rename from ui/module.go
rename to service/ui/module.go
diff --git a/ui/serve.go b/service/ui/serve.go
similarity index 99%
rename from ui/serve.go
rename to service/ui/serve.go
index 2fe7f710..1e9e5861 100644
--- a/ui/serve.go
+++ b/service/ui/serve.go
@@ -18,7 +18,7 @@ import (
 	"github.com/safing/portbase/modules"
 	"github.com/safing/portbase/updater"
 	"github.com/safing/portbase/utils"
-	"github.com/safing/portmaster/updates"
+	"github.com/safing/portmaster/service/updates"
 )
 
 var (
diff --git a/updates/api.go b/service/updates/api.go
similarity index 100%
rename from updates/api.go
rename to service/updates/api.go
diff --git a/updates/assets/portmaster.service b/service/updates/assets/portmaster.service
similarity index 100%
rename from updates/assets/portmaster.service
rename to service/updates/assets/portmaster.service
diff --git a/updates/config.go b/service/updates/config.go
similarity index 99%
rename from updates/config.go
rename to service/updates/config.go
index a8fff098..c06e7793 100644
--- a/updates/config.go
+++ b/service/updates/config.go
@@ -7,7 +7,7 @@ import (
 
 	"github.com/safing/portbase/config"
 	"github.com/safing/portbase/log"
-	"github.com/safing/portmaster/updates/helper"
+	"github.com/safing/portmaster/service/updates/helper"
 )
 
 const cfgDevModeKey = "core/devMode"
diff --git a/updates/export.go b/service/updates/export.go
similarity index 99%
rename from updates/export.go
rename to service/updates/export.go
index e17113d1..0f355d58 100644
--- a/updates/export.go
+++ b/service/updates/export.go
@@ -12,7 +12,7 @@ import (
 	"github.com/safing/portbase/log"
 	"github.com/safing/portbase/updater"
 	"github.com/safing/portbase/utils/debug"
-	"github.com/safing/portmaster/updates/helper"
+	"github.com/safing/portmaster/service/updates/helper"
 )
 
 const (
diff --git a/updates/get.go b/service/updates/get.go
similarity index 97%
rename from updates/get.go
rename to service/updates/get.go
index 2cf7acf7..c133ae1f 100644
--- a/updates/get.go
+++ b/service/updates/get.go
@@ -4,7 +4,7 @@ import (
 	"path"
 
 	"github.com/safing/portbase/updater"
-	"github.com/safing/portmaster/updates/helper"
+	"github.com/safing/portmaster/service/updates/helper"
 )
 
 // GetPlatformFile returns the latest platform specific file identified by the given identifier.
diff --git a/updates/helper/electron.go b/service/updates/helper/electron.go
similarity index 100%
rename from updates/helper/electron.go
rename to service/updates/helper/electron.go
diff --git a/updates/helper/indexes.go b/service/updates/helper/indexes.go
similarity index 100%
rename from updates/helper/indexes.go
rename to service/updates/helper/indexes.go
diff --git a/updates/helper/signing.go b/service/updates/helper/signing.go
similarity index 100%
rename from updates/helper/signing.go
rename to service/updates/helper/signing.go
diff --git a/updates/helper/updates.go b/service/updates/helper/updates.go
similarity index 100%
rename from updates/helper/updates.go
rename to service/updates/helper/updates.go
diff --git a/updates/main.go b/service/updates/main.go
similarity index 99%
rename from updates/main.go
rename to service/updates/main.go
index 02f46075..95c20f04 100644
--- a/updates/main.go
+++ b/service/updates/main.go
@@ -14,7 +14,7 @@ import (
 	"github.com/safing/portbase/log"
 	"github.com/safing/portbase/modules"
 	"github.com/safing/portbase/updater"
-	"github.com/safing/portmaster/updates/helper"
+	"github.com/safing/portmaster/service/updates/helper"
 )
 
 const (
diff --git a/updates/notify.go b/service/updates/notify.go
similarity index 100%
rename from updates/notify.go
rename to service/updates/notify.go
diff --git a/updates/os_integration_default.go b/service/updates/os_integration_default.go
similarity index 100%
rename from updates/os_integration_default.go
rename to service/updates/os_integration_default.go
diff --git a/updates/os_integration_linux.go b/service/updates/os_integration_linux.go
similarity index 100%
rename from updates/os_integration_linux.go
rename to service/updates/os_integration_linux.go
diff --git a/updates/restart.go b/service/updates/restart.go
similarity index 100%
rename from updates/restart.go
rename to service/updates/restart.go
diff --git a/updates/state.go b/service/updates/state.go
similarity index 100%
rename from updates/state.go
rename to service/updates/state.go
diff --git a/updates/upgrader.go b/service/updates/upgrader.go
similarity index 99%
rename from updates/upgrader.go
rename to service/updates/upgrader.go
index d350b760..9467dc73 100644
--- a/updates/upgrader.go
+++ b/service/updates/upgrader.go
@@ -21,7 +21,7 @@ import (
 	"github.com/safing/portbase/rng"
 	"github.com/safing/portbase/updater"
 	"github.com/safing/portbase/utils/renameio"
-	"github.com/safing/portmaster/updates/helper"
+	"github.com/safing/portmaster/service/updates/helper"
 )
 
 const (
diff --git a/spn/TESTING.md b/spn/TESTING.md
new file mode 100644
index 00000000..88a82c33
--- /dev/null
+++ b/spn/TESTING.md
@@ -0,0 +1,26 @@
+# Testing SPN
+
+This page documents ways to test if the SPN works as intended.
+
+⚠ Work in Progress. Currently we are just collecting helpful things we find.
+
+## Test Multi-Identity Routing
+
+In order to test if the multi-identity routing is working, you can request multiple websites to display your public IP.
+If they show different values, multi-identity routing is working.
+
+### Websites
+
+- <https://icanhazip.com>
+- <https://ipecho.net>
+- <https://ipinfo.io>
+- <https://ipinfo.tw>
+
+### Terminal
+
+```sh
+curl https://icanhazip.com
+curl https://ipecho.net/plain
+curl https://ipinfo.io/ip
+curl https://ipinfo.tw/ip
+```
diff --git a/spn/TRADEMARKS b/spn/TRADEMARKS
new file mode 100644
index 00000000..1bff5e79
--- /dev/null
+++ b/spn/TRADEMARKS
@@ -0,0 +1,5 @@
+The names "Safing", "Portmaster", "SPN" and their logos are trademarks owned by Safing ICS Technologies GmbH (Austria).
+
+Although our code is free, it is very important that we strictly enforce our trademark rights, in order to be able to protect our users against people who use the marks to commit fraud. This means that, while you have considerable freedom to redistribute and modify our software, there are tight restrictions on your ability to use our names and logos in ways which fall in the domain of trademark law, even when built into binaries that we provide.
+
+This file is licensed under a Creative Commons Attribution-ShareAlike 4.0 International License. Parts of it were taken from https://www.mozilla.org/en-US/foundation/licensing/.
diff --git a/spn/access/account/auth.go b/spn/access/account/auth.go
new file mode 100644
index 00000000..d93e6bf5
--- /dev/null
+++ b/spn/access/account/auth.go
@@ -0,0 +1,65 @@
+package account
+
+import (
+	"errors"
+	"net/http"
+)
+
+// Authentication Headers.
+const (
+	AuthHeaderDevice              = "Device-17"
+	AuthHeaderToken               = "Token-17"
+	AuthHeaderNextToken           = "Next-Token-17"
+	AuthHeaderNextTokenDeprecated = "Next_token_17"
+)
+
+// Errors.
+var (
+	ErrMissingDeviceID = errors.New("missing device ID")
+	ErrMissingToken    = errors.New("missing token")
+)
+
+// AuthToken holds an authentication token.
+type AuthToken struct {
+	Device string
+	Token  string
+}
+
+// GetAuthTokenFromRequest extracts an authentication token from a request.
+func GetAuthTokenFromRequest(request *http.Request) (*AuthToken, error) {
+	device := request.Header.Get(AuthHeaderDevice)
+	if device == "" {
+		return nil, ErrMissingDeviceID
+	}
+	token := request.Header.Get(AuthHeaderToken)
+	if token == "" {
+		return nil, ErrMissingToken
+	}
+
+	return &AuthToken{
+		Device: device,
+		Token:  token,
+	}, nil
+}
+
+// ApplyTo applies the authentication token to a request.
+func (at *AuthToken) ApplyTo(request *http.Request) {
+	request.Header.Set(AuthHeaderDevice, at.Device)
+	request.Header.Set(AuthHeaderToken, at.Token)
+}
+
+// GetNextTokenFromResponse extracts an authentication token from a response.
+func GetNextTokenFromResponse(resp *http.Response) (token string, ok bool) {
+	token = resp.Header.Get(AuthHeaderNextToken)
+	if token == "" {
+		// TODO: Remove when fixed on server.
+		token = resp.Header.Get(AuthHeaderNextTokenDeprecated)
+	}
+
+	return token, token != ""
+}
+
+// ApplyNextTokenToResponse applies the next authentication token to a response.
+func ApplyNextTokenToResponse(w http.ResponseWriter, token string) {
+	w.Header().Set(AuthHeaderNextToken, token)
+}
diff --git a/spn/access/account/client.go b/spn/access/account/client.go
new file mode 100644
index 00000000..d6d0f879
--- /dev/null
+++ b/spn/access/account/client.go
@@ -0,0 +1,14 @@
+package account
+
+// Customer Agent URLs.
+const (
+	CAAuthenticateURL = "/authenticate"
+	CAProfileURL      = "/user/profile"
+	CAGetTokensURL    = "/tokens"
+)
+
+// Customer Hub URLs.
+const (
+	CHAuthenticateURL = "/v1/authenticate"
+	CHUserProfileURL  = "/v1/user_profile"
+)
diff --git a/spn/access/account/types.go b/spn/access/account/types.go
new file mode 100644
index 00000000..f92f9f65
--- /dev/null
+++ b/spn/access/account/types.go
@@ -0,0 +1,137 @@
+package account
+
+import (
+	"time"
+
+	"golang.org/x/exp/slices"
+)
+
+// User, Subscription and Charge states.
+const (
+	// UserStateNone is only used within Portmaster for saving information for
+	// logging into the same device.
+	UserStateNone      = ""
+	UserStateFresh     = "fresh"
+	UserStateQueued    = "queued"
+	UserStateApproved  = "approved"
+	UserStateSuspended = "suspended"
+	UserStateLoggedOut = "loggedout" // Portmaster only.
+
+	SubscriptionStateManual    = "manual"    // Manual renewal.
+	SubscriptionStateActive    = "active"    // Automatic renewal.
+	SubscriptionStateCancelled = "cancelled" // Automatic, but canceled.
+
+	ChargeStatePending   = "pending"
+	ChargeStateCompleted = "completed"
+	ChargeStateDead      = "dead"
+)
+
+// Agent and Hub return statuses.
+const (
+	// StatusInvalidAuth [401 Unauthorized] is returned when the credentials are
+	// invalid or the user was logged out.
+	StatusInvalidAuth = 401
+	// StatusNoAccess [403 Forbidden] is returned when the user does not have
+	// an active subscription or the subscription does not include the required
+	// feature for the request.
+	StatusNoAccess = 403
+	// StatusInvalidDevice [410 Gone] is returned when the device trying to
+	// log into does not exist.
+	StatusInvalidDevice = 410
+	// StatusReachedDeviceLimit [409 Conflict] is returned when the device limit is reached.
+	StatusReachedDeviceLimit = 409
+	// StatusDeviceInactive [423 Locked] is returned when the device is locked.
+	StatusDeviceInactive = 423
+	// StatusNotLoggedIn [412 Precondition] is returned by the Portmaster, if an action required to be logged in, but the user is not logged in.
+	StatusNotLoggedIn = 412
+
+	// StatusUnknownError is a special status code that signifies an unknown or
+	// unexpected error by the API.
+	StatusUnknownError = -1
+	// StatusConnectionError is a special status code that signifies a
+	// connection error.
+	StatusConnectionError = -2
+)
+
+// User describes an SPN user account.
+type User struct {
+	Username     string        `json:"username"`
+	State        string        `json:"state"`
+	Balance      int           `json:"balance"`
+	Device       *Device       `json:"device"`
+	Subscription *Subscription `json:"subscription"`
+	CurrentPlan  *Plan         `json:"current_plan"`
+	NextPlan     *Plan         `json:"next_plan"`
+	View         *View         `json:"view"`
+}
+
+// MayUseSPN returns whether the user may currently use the SPN.
+func (u *User) MayUseSPN() bool {
+	return u.MayUse(FeatureSPN)
+}
+
+// MayUsePrioritySupport returns whether the user may currently use the priority support.
+func (u *User) MayUsePrioritySupport() bool {
+	return u.MayUse(FeatureSafingSupport)
+}
+
+// MayUse returns whether the user may currently use the feature identified by
+// the given feature ID.
+// Leave feature ID empty to check without feature.
+func (u *User) MayUse(featureID FeatureID) bool {
+	switch {
+	case u == nil:
+		// We need a user, obviously.
+	case u.State != UserStateApproved:
+		// Only approved users may use the SPN.
+	case u.Subscription == nil:
+		// Need a subscription.
+	case u.Subscription.EndsAt == nil:
+	case time.Now().After(*u.Subscription.EndsAt):
+		// Subscription needs to be active.
+	case u.CurrentPlan == nil:
+		// Need a plan / package.
+	case featureID != "" &&
+		!slices.Contains(u.CurrentPlan.FeatureIDs, featureID):
+		// Required feature ID must be in plan / package feature IDs.
+	default:
+		// All checks passed!
+		return true
+	}
+	return false
+}
+
+// Device describes a device of an SPN user.
+type Device struct {
+	Name string `json:"name"`
+	ID   string `json:"id"`
+}
+
+// Subscription describes an SPN subscription.
+type Subscription struct {
+	EndsAt          *time.Time `json:"ends_at"`
+	State           string     `json:"state"`
+	NextBillingDate *time.Time `json:"next_billing_date"`
+	PaymentProvider string     `json:"payment_provider"`
+}
+
+// FeatureID defines a feature that requires a plan/subscription.
+type FeatureID string
+
+// A list of all supported features.
+const (
+	FeatureSPN           = FeatureID("spn")
+	FeatureSafingSupport = FeatureID("support")
+	FeatureHistory       = FeatureID("history")
+	FeatureBWVis         = FeatureID("bw-vis")
+	FeatureVPNCompat     = FeatureID("vpn-compat")
+)
+
+// Plan describes an SPN subscription plan.
+type Plan struct {
+	Name       string      `json:"name"`
+	Amount     int         `json:"amount"`
+	Months     int         `json:"months"`
+	Renewable  bool        `json:"renewable"`
+	FeatureIDs []FeatureID `json:"feature_ids"`
+}
diff --git a/spn/access/account/view.go b/spn/access/account/view.go
new file mode 100644
index 00000000..818bdfa6
--- /dev/null
+++ b/spn/access/account/view.go
@@ -0,0 +1,123 @@
+package account
+
+import (
+	"fmt"
+	"strings"
+	"time"
+)
+
+// View holds metadata that assists in displaying account information.
+type View struct {
+	Message           string
+	ShowAccountData   bool
+	ShowAccountButton bool
+	ShowLoginButton   bool
+	ShowRefreshButton bool
+	ShowLogoutButton  bool
+}
+
+// UpdateView updates the view and handles plan/package fallbacks.
+func (u *User) UpdateView(requestStatusCode int) {
+	v := &View{}
+
+	// Clean up naming and fallbacks when finished.
+	defer func() {
+		// Display "Free" package if no plan is set or if it expired.
+		switch {
+		case u.CurrentPlan == nil,
+			u.Subscription == nil,
+			u.Subscription.EndsAt == nil:
+			// Reset to free plan.
+			u.CurrentPlan = &Plan{
+				Name: "Free",
+			}
+			u.Subscription = nil
+
+		case u.Subscription.NextBillingDate != nil:
+			// Subscription is on auto-renew.
+			// Wait for update from server.
+
+		case time.Since(*u.Subscription.EndsAt) > 0:
+			// Reset to free plan.
+			u.CurrentPlan = &Plan{
+				Name: "Free",
+			}
+			u.Subscription = nil
+		}
+
+		// Prepend "Portmaster " to plan name.
+		// TODO: Remove when Plan/Package naming has been updated.
+		if u.CurrentPlan != nil && !strings.HasPrefix(u.CurrentPlan.Name, "Portmaster ") {
+			u.CurrentPlan.Name = "Portmaster " + u.CurrentPlan.Name
+		}
+
+		// Apply new view to user.
+		u.View = v
+	}()
+
+	// Set view data based on return code.
+	switch requestStatusCode {
+	case StatusInvalidAuth, StatusInvalidDevice, StatusDeviceInactive:
+		// Account deleted or Device inactive or deleted.
+		// When using token based auth, there is no difference between these cases.
+		v.Message = "This device may have been deactivated or removed from your account. Please log in again."
+		v.ShowAccountData = true
+		v.ShowAccountButton = true
+		v.ShowLoginButton = true
+		v.ShowLogoutButton = true
+		return
+
+	case StatusUnknownError:
+		v.Message = "There is an unknown error in the communication with the account server. The shown information may not be accurate. "
+
+	case StatusConnectionError:
+		v.Message = "Portmaster could not connect to the account server. The shown information may not be accurate. "
+	}
+
+	// Set view data based on profile data.
+	switch {
+	case u.State == UserStateLoggedOut:
+		// User logged out.
+		v.ShowAccountButton = true
+		v.ShowLoginButton = true
+		return
+
+	case u.State == UserStateSuspended:
+		// Account is suspended.
+		v.Message += fmt.Sprintf("Your account (%s) was suspended. Please contact support for details.", u.Username)
+		v.ShowAccountButton = true
+		v.ShowRefreshButton = true
+		v.ShowLogoutButton = true
+		return
+
+	case u.Subscription == nil || u.Subscription.EndsAt == nil:
+		// Account has never had a subscription.
+		v.Message += "Get more features. Upgrade today."
+
+	case u.Subscription.NextBillingDate != nil:
+		switch {
+		case time.Since(*u.Subscription.NextBillingDate) > 0:
+			v.Message += "Your auto-renewal seems to be delayed. Please refresh and check the status of your payment. Payment information may be delayed."
+		case time.Until(*u.Subscription.NextBillingDate) < 24*time.Hour:
+			v.Message += "Your subscription will auto-renew soon. Please note that payment information may be delayed."
+		}
+
+	case time.Since(*u.Subscription.EndsAt) > 0:
+		// Subscription expired.
+		if u.CurrentPlan != nil {
+			v.Message += fmt.Sprintf("Your package %s has ended. Extend it on the Account Page.", u.CurrentPlan.Name)
+		} else {
+			v.Message += "Your package has ended. Extend it on the Account Page."
+		}
+
+	case time.Until(*u.Subscription.EndsAt) < 7*24*time.Hour:
+		// Add generic ending soon message if the package ends in less than 7 days.
+		v.Message += "Your package ends soon. Extend it on the Account Page."
+	}
+
+	// Defaults for generally good accounts.
+	v.ShowAccountData = true
+	v.ShowAccountButton = true
+	v.ShowRefreshButton = true
+	v.ShowLogoutButton = true
+}
diff --git a/spn/access/api.go b/spn/access/api.go
new file mode 100644
index 00000000..e38a8c9f
--- /dev/null
+++ b/spn/access/api.go
@@ -0,0 +1,168 @@
+package access
+
+import (
+	"fmt"
+	"net/http"
+
+	"github.com/safing/portbase/api"
+	"github.com/safing/portbase/database/record"
+	"github.com/safing/portbase/log"
+	"github.com/safing/portmaster/spn/access/account"
+)
+
+func registerAPIEndpoints() error {
+	if err := api.RegisterEndpoint(api.Endpoint{
+		Path:        `spn/account/login`,
+		Write:       api.PermitAdmin,
+		WriteMethod: http.MethodPost,
+		HandlerFunc: handleLogin,
+		Name:        "SPN Login",
+		Description: "Log into your SPN account.",
+	}); err != nil {
+		return err
+	}
+
+	if err := api.RegisterEndpoint(api.Endpoint{
+		Path:        `spn/account/logout`,
+		Write:       api.PermitAdmin,
+		WriteMethod: http.MethodDelete,
+		ActionFunc:  handleLogout,
+		Name:        "SPN Logout",
+		Description: "Logout from your SPN account.",
+		Parameters: []api.Parameter{
+			{
+				Method:      http.MethodDelete,
+				Field:       "purge",
+				Value:       "",
+				Description: "If set, account data is purged. Otherwise, the username and device ID are kept in order to log into the same device when logging in with the same user again.",
+			},
+		},
+	}); err != nil {
+		return err
+	}
+
+	if err := api.RegisterEndpoint(api.Endpoint{
+		Path:        `spn/account/user/profile`,
+		Read:        api.PermitUser,
+		ReadMethod:  http.MethodGet,
+		RecordFunc:  handleGetUserProfile,
+		Name:        "SPN User Profile",
+		Description: "Get the user profile of the logged in SPN account.",
+		Parameters: []api.Parameter{
+			{
+				Method:      http.MethodGet,
+				Field:       "refresh",
+				Value:       "",
+				Description: "If set, the user profile is freshly fetched from the account server.",
+			},
+		},
+	}); err != nil {
+		return err
+	}
+
+	if err := api.RegisterEndpoint(api.Endpoint{
+		Path:       `account/features`,
+		Read:       api.PermitUser,
+		ReadMethod: http.MethodGet,
+		StructFunc: func(_ *api.Request) (i interface{}, err error) {
+			return struct {
+				Features []Feature
+			}{
+				Features: features,
+			}, nil
+		},
+		Name:        "Get Account Features",
+		Description: "Returns all account features.",
+	}); err != nil {
+		return err
+	}
+
+	if err := api.RegisterEndpoint(api.Endpoint{
+		Path:       `account/features/{id:[A-Za-z0-9_-]+}/icon`,
+		Read:       api.PermitUser,
+		ReadMethod: http.MethodGet,
+		Name:       "Returns the image of the featuare",
+		MimeType:   "image/svg+xml",
+		DataFunc: func(ar *api.Request) (data []byte, err error) {
+			featureID, ok := ar.URLVars["id"]
+			if !ok {
+				return nil, fmt.Errorf("invalid feature id")
+			}
+
+			for _, feature := range features {
+				if feature.ID == featureID {
+					return []byte(feature.icon), nil
+				}
+			}
+
+			return nil, fmt.Errorf("feature id not found")
+		},
+	}); err != nil {
+		return err
+	}
+
+	return nil
+}
+
+func handleLogin(w http.ResponseWriter, r *http.Request) {
+	// Get username and password.
+	username, password, ok := r.BasicAuth()
+	// Request, if omitted.
+	if !ok || username == "" || password == "" {
+		w.Header().Set("WWW-Authenticate", "Basic realm=SPN Login")
+		http.Error(w, "Login with your SPN account.", http.StatusUnauthorized)
+		return
+	}
+
+	// Process login.
+	user, code, err := Login(username, password)
+	if err != nil {
+		log.Warningf("spn/access: failed to login: %s", err)
+		if code == 0 {
+			http.Error(w, "Internal error: "+err.Error(), http.StatusInternalServerError)
+		} else {
+			http.Error(w, err.Error(), code)
+		}
+		return
+	}
+
+	// Return success.
+	_, _ = w.Write([]byte(
+		fmt.Sprintf("Now logged in as %s as device %s", user.Username, user.Device.Name),
+	))
+}
+
+func handleLogout(ar *api.Request) (msg string, err error) {
+	purge := ar.URL.Query().Get("purge") != ""
+	err = Logout(false, purge)
+	switch {
+	case err != nil:
+		log.Warningf("spn/access: failed to logout: %s", err)
+		return "", err
+	case purge:
+		return "Logged out and user data purged.", nil
+	default:
+		return "Logged out.", nil
+	}
+}
+
+func handleGetUserProfile(ar *api.Request) (r record.Record, err error) {
+	// Check if we are already authenticated.
+	user, err := GetUser()
+	if err != nil || user.State == account.UserStateNone {
+		return nil, api.ErrorWithStatus(
+			ErrNotLoggedIn,
+			account.StatusInvalidAuth,
+		)
+	}
+
+	// Should we refresh the user profile?
+	if ar.URL.Query().Get("refresh") != "" {
+		user, _, err = UpdateUser()
+		if err != nil {
+			return nil, err
+		}
+	}
+
+	return user, nil
+}
diff --git a/spn/access/client.go b/spn/access/client.go
new file mode 100644
index 00000000..f22bb9e9
--- /dev/null
+++ b/spn/access/client.go
@@ -0,0 +1,550 @@
+package access
+
+import (
+	"context"
+	"errors"
+	"fmt"
+	"net/http"
+	"sync"
+	"time"
+
+	"github.com/safing/portbase/database"
+	"github.com/safing/portbase/formats/dsd"
+	"github.com/safing/portbase/log"
+	"github.com/safing/portmaster/spn/access/account"
+	"github.com/safing/portmaster/spn/access/token"
+)
+
+// Client URLs.
+const (
+	AccountServer         = "https://api.account.safing.io"
+	LoginPath             = "/api/v1/authenticate"
+	UserProfilePath       = "/api/v1/user/profile"
+	TokenRequestSetupPath = "/api/v1/token/request/setup" //nolint:gosec
+	TokenRequestIssuePath = "/api/v1/token/request/issue" //nolint:gosec
+	HealthCheckPath       = "/api/v1/health"
+
+	defaultDataFormat     = dsd.CBOR
+	defaultRequestTimeout = 30 * time.Second
+)
+
+var (
+	accountClient     = &http.Client{}
+	clientRequestLock sync.Mutex
+
+	// EnableAfterLogin automatically enables the SPN subsystem/module after login.
+	EnableAfterLogin = true
+)
+
+type clientRequestOptions struct {
+	method               string
+	url                  string
+	send                 interface{}
+	recv                 interface{}
+	requestTimeout       time.Duration
+	dataFormat           uint8
+	setAuthToken         bool
+	requireNextAuthToken bool
+	logoutOnAuthError    bool
+	requestSetupFunc     func(*http.Request) error
+}
+
+func makeClientRequest(opts *clientRequestOptions) (resp *http.Response, err error) {
+	// Get request timeout.
+	if opts.requestTimeout == 0 {
+		opts.requestTimeout = defaultRequestTimeout
+	}
+	// Get context for request.
+	var ctx context.Context
+	var cancel context.CancelFunc
+	if module.Online() {
+		// Only use module context if online.
+		ctx, cancel = context.WithTimeout(module.Ctx, opts.requestTimeout)
+		defer cancel()
+	} else {
+		// Otherwise, use the background context.
+		ctx, cancel = context.WithTimeout(context.Background(), opts.requestTimeout)
+		defer cancel()
+	}
+
+	// Create new request.
+	request, err := http.NewRequestWithContext(ctx, opts.method, opts.url, nil)
+	if err != nil {
+		return nil, fmt.Errorf("failed to create request structure: %w", err)
+	}
+
+	// Prepare body and content type.
+	if opts.dataFormat == dsd.AUTO {
+		opts.dataFormat = defaultDataFormat
+	}
+	if opts.send != nil {
+		// Add data to body.
+		err = dsd.DumpToHTTPRequest(request, opts.send, opts.dataFormat)
+		if err != nil {
+			return nil, fmt.Errorf("failed to add request body: %w", err)
+		}
+	} else {
+		// Set requested HTTP response format.
+		_, err = dsd.RequestHTTPResponseFormat(request, opts.dataFormat)
+		if err != nil {
+			return nil, fmt.Errorf("failed to set requested response format: %w", err)
+		}
+	}
+
+	// Get auth token to apply to request.
+	var authToken *AuthTokenRecord
+	if opts.setAuthToken {
+		authToken, err = GetAuthToken()
+		if err != nil {
+			return nil, ErrNotLoggedIn
+		}
+		authToken.Token.ApplyTo(request)
+	}
+
+	// Do any additional custom request setup.
+	if opts.requestSetupFunc != nil {
+		err = opts.requestSetupFunc(request)
+		if err != nil {
+			return nil, err
+		}
+	}
+
+	// Make request.
+	resp, err = accountClient.Do(request)
+	if err != nil {
+		updateUserWithFailedRequest(account.StatusConnectionError, false)
+		tokenIssuerFailed()
+		return nil, fmt.Errorf("http request failed: %w", err)
+	}
+	log.Debugf("spn/access: request to %s returned %s", request.URL, resp.Status)
+	defer func() {
+		_ = resp.Body.Close()
+	}()
+	// Handle request error.
+	switch resp.StatusCode {
+	case http.StatusOK, http.StatusCreated:
+		// All good!
+
+	case account.StatusInvalidAuth, account.StatusInvalidDevice:
+		// Wrong username / password.
+		updateUserWithFailedRequest(resp.StatusCode, true)
+		return resp, ErrInvalidCredentials
+
+	case account.StatusReachedDeviceLimit:
+		// Device limit is reached.
+		updateUserWithFailedRequest(resp.StatusCode, true)
+		return resp, ErrDeviceLimitReached
+
+	case account.StatusDeviceInactive:
+		// Device is locked.
+		updateUserWithFailedRequest(resp.StatusCode, true)
+		return resp, ErrDeviceIsLocked
+
+	default:
+		updateUserWithFailedRequest(account.StatusUnknownError, false)
+		tokenIssuerFailed()
+		return resp, fmt.Errorf("unexpected reply: [%d] %s", resp.StatusCode, resp.Status)
+	}
+
+	// Save next auth token.
+	if authToken != nil {
+		err = authToken.Update(resp)
+		if err != nil {
+			if errors.Is(err, account.ErrMissingToken) {
+				if opts.requireNextAuthToken {
+					return resp, fmt.Errorf("failed to save next auth token: %w", err)
+				}
+			} else {
+				return resp, fmt.Errorf("failed to save next auth token: %w", err)
+			}
+		}
+	} else if opts.requireNextAuthToken {
+		return resp, fmt.Errorf("failed to save next auth token: %w", account.ErrMissingToken)
+	}
+
+	// Load response data.
+	if opts.recv != nil {
+		_, err = dsd.LoadFromHTTPResponse(resp, opts.recv)
+		if err != nil {
+			return resp, fmt.Errorf("failed to parse response: %w", err)
+		}
+	}
+
+	tokenIssuerIsFailing.UnSet()
+	return resp, nil
+}
+
+func updateUserWithFailedRequest(statusCode int, disableSubscription bool) {
+	// Get user from database.
+	user, err := GetUser()
+	if err != nil {
+		if !errors.Is(err, ErrNotLoggedIn) {
+			log.Warningf("spn/access: failed to get user to update with failed request: %s", err)
+		}
+		return
+	}
+
+	func() {
+		user.Lock()
+		defer user.Unlock()
+
+		// Ignore update if user state is undefined or logged out.
+		if user.State == "" || user.State == account.UserStateLoggedOut {
+			return
+		}
+
+		// Disable the subscription if desired.
+		if disableSubscription && user.Subscription != nil {
+			user.Subscription.EndsAt = nil
+		}
+
+		// Update view with the status code and save user.
+		user.UpdateView(statusCode)
+	}()
+
+	err = user.Save()
+	if err != nil {
+		log.Warningf("spn/access: failed to save user after update with failed request: %s", err)
+	}
+}
+
+// Login logs the user into the SPN account with the given username and password.
+func Login(username, password string) (user *UserRecord, code int, err error) {
+	clientRequestLock.Lock()
+	defer clientRequestLock.Unlock()
+
+	// Trigger account update when done.
+	defer module.TriggerEvent(AccountUpdateEvent, nil)
+
+	// Get previous user.
+	previousUser, err := GetUser()
+	if err != nil {
+		if !errors.Is(err, ErrNotLoggedIn) {
+			log.Warningf("spn/access: failed to get previous for re-login: %s", err)
+		}
+		previousUser = nil
+	}
+
+	// Create request options.
+	userAccount := &account.User{}
+	requestOptions := &clientRequestOptions{
+		method:     http.MethodPost,
+		url:        AccountServer + LoginPath,
+		recv:       userAccount,
+		dataFormat: dsd.JSON,
+		requestSetupFunc: func(request *http.Request) error {
+			// Add username and password.
+			request.SetBasicAuth(username, password)
+
+			// Try to reuse the device ID, if the username matches the previous user.
+			if previousUser != nil && username == previousUser.Username {
+				request.Header.Set(account.AuthHeaderDevice, previousUser.Device.ID)
+			}
+
+			return nil
+		},
+	}
+
+	// Make request.
+	resp, err := makeClientRequest(requestOptions) //nolint:bodyclose // Body is closed in function.
+	if err != nil {
+		if resp != nil && resp.StatusCode == account.StatusInvalidDevice {
+			// Try again without the previous device ID.
+			previousUser = nil
+			log.Info("spn/access: retrying log in without re-using previous device ID")
+			resp, err = makeClientRequest(requestOptions) //nolint:bodyclose // Body is closed in function.
+		}
+		if err != nil {
+			if resp != nil {
+				return nil, resp.StatusCode, err
+			}
+			return nil, 0, err
+		}
+	}
+
+	// Save new user.
+	now := time.Now()
+	user = &UserRecord{
+		User:       userAccount,
+		LoggedInAt: &now,
+	}
+
+	user.UpdateView(0)
+	err = user.Save()
+	if err != nil {
+		return user, resp.StatusCode, fmt.Errorf("failed to save new user profile: %w", err)
+	}
+
+	// Save initial auth token.
+	err = SaveNewAuthToken(user.Device.ID, resp)
+	if err != nil {
+		return user, resp.StatusCode, fmt.Errorf("failed to save initial auth token: %w", err)
+	}
+
+	// Enable the SPN right after login.
+	if user.MayUseSPN() && EnableAfterLogin {
+		enableSPN()
+	}
+
+	log.Infof("spn/access: logged in as %q on device %q", user.Username, user.Device.Name)
+	return user, resp.StatusCode, nil
+}
+
+// Logout logs the user out of the SPN account.
+// Specify "shallow" to keep user data in order to display data in the
+// UI - preferably when logged out be the server.
+// Specify "purge" in order to fully delete all user account data, even
+// the device ID so that logging in again will create a new device.
+func Logout(shallow, purge bool) error {
+	clientRequestLock.Lock()
+	defer clientRequestLock.Unlock()
+
+	// Trigger account update when done.
+	defer module.TriggerEvent(AccountUpdateEvent, nil)
+
+	// Clear caches.
+	clearUserCaches()
+
+	// Clear tokens.
+	clearTokens()
+
+	// Delete auth token.
+	err := db.Delete(authTokenRecordKey)
+	if err != nil && !errors.Is(err, database.ErrNotFound) {
+		return fmt.Errorf("failed to delete auth token: %w", err)
+	}
+
+	// Delete all user data if purging.
+	if purge {
+		err := db.Delete(userRecordKey)
+		if err != nil && !errors.Is(err, database.ErrNotFound) {
+			return fmt.Errorf("failed to delete user: %w", err)
+		}
+
+		// Disable SPN when the user logs out directly.
+		disableSPN()
+
+		log.Info("spn/access: logged out and purged data")
+		return nil
+	}
+
+	// Else, just update the user.
+	user, err := GetUser()
+	if err != nil {
+		if errors.Is(err, ErrNotLoggedIn) {
+			return nil
+		}
+		return fmt.Errorf("failed to load user for logout: %w", err)
+	}
+
+	func() {
+		user.Lock()
+		defer user.Unlock()
+
+		if shallow {
+			// Shallow logout: User stays logged in the UI to display status when
+			// logged out from the Portmaster or Customer Hub.
+			user.User.State = account.UserStateLoggedOut
+		} else {
+			// Proper logout: User is logged out from UI.
+			// Reset all user data, except for username and device ID in order to log
+			// into the same device again.
+			user.User = &account.User{
+				Username: user.Username,
+				Device: &account.Device{
+					ID: user.Device.ID,
+				},
+			}
+			user.LoggedInAt = &time.Time{}
+		}
+		user.UpdateView(0)
+	}()
+	err = user.Save()
+	if err != nil {
+		return fmt.Errorf("failed to save user for logout: %w", err)
+	}
+
+	if shallow {
+		log.Info("spn/access: logged out shallow")
+	} else {
+		log.Info("spn/access: logged out")
+
+		// Disable SPN when the user logs out directly.
+		disableSPN()
+	}
+
+	return nil
+}
+
+// UpdateUser fetches the current user information from the server.
+func UpdateUser() (user *UserRecord, statusCode int, err error) {
+	clientRequestLock.Lock()
+	defer clientRequestLock.Unlock()
+
+	// Trigger account update when done.
+	defer module.TriggerEvent(AccountUpdateEvent, nil)
+
+	// Create request options.
+	userData := &account.User{}
+	requestOptions := &clientRequestOptions{
+		method:               http.MethodGet,
+		url:                  AccountServer + UserProfilePath,
+		recv:                 userData,
+		dataFormat:           dsd.JSON,
+		setAuthToken:         true,
+		requireNextAuthToken: true,
+		logoutOnAuthError:    true,
+	}
+
+	// Make request.
+	resp, err := makeClientRequest(requestOptions) //nolint:bodyclose // Body is closed in function.
+	if err != nil {
+		if resp != nil {
+			return nil, resp.StatusCode, err
+		}
+		return nil, 0, err
+	}
+
+	// Save to previous user, if exists.
+	previousUser, err := GetUser()
+	if err == nil {
+		func() {
+			previousUser.Lock()
+			defer previousUser.Unlock()
+			previousUser.User = userData
+			previousUser.UpdateView(resp.StatusCode)
+		}()
+		err := previousUser.Save()
+		if err != nil {
+			log.Warningf("spn/access: failed to save updated user profile: %s", err)
+		}
+
+		// Notify user of nearing end of package.
+		notifyOfPackageEnd(previousUser)
+
+		log.Infof("spn/access: got user profile, updated existing")
+		return previousUser, resp.StatusCode, nil
+	}
+
+	// Else, save as new user.
+	now := time.Now()
+	newUser := &UserRecord{
+		User:       userData,
+		LoggedInAt: &now,
+	}
+	newUser.UpdateView(resp.StatusCode)
+	err = newUser.Save()
+	if err != nil {
+		log.Warningf("spn/access: failed to save new user profile: %s", err)
+	}
+
+	// Notify user of nearing end of package.
+	notifyOfPackageEnd(newUser)
+
+	log.Infof("spn/access: got user profile, saved as new")
+	return newUser, resp.StatusCode, nil
+}
+
+// UpdateTokens fetches more tokens for handlers that need it.
+func UpdateTokens() error {
+	clientRequestLock.Lock()
+	defer clientRequestLock.Unlock()
+
+	// Check if the user may request tokens.
+	user, err := GetUser()
+	if err != nil {
+		return fmt.Errorf("failed to get user: %w", err)
+	}
+	if !user.MayUseTheSPN() {
+		return ErrMayNotUseSPN
+	}
+
+	// Create setup request, return if not required.
+	setupRequest, setupRequired := token.CreateSetupRequest()
+	var setupResponse *token.SetupResponse
+	if setupRequired {
+		// Request setup data.
+		setupResponse = &token.SetupResponse{}
+		_, err := makeClientRequest(&clientRequestOptions{ //nolint:bodyclose // Body is closed in function.
+			method:            http.MethodPost,
+			url:               AccountServer + TokenRequestSetupPath,
+			send:              setupRequest,
+			recv:              setupResponse,
+			dataFormat:        dsd.MsgPack,
+			setAuthToken:      true,
+			logoutOnAuthError: true,
+		})
+		if err != nil {
+			return fmt.Errorf("failed to request setup data: %w", err)
+		}
+	}
+
+	// Create request for issuing new tokens.
+	tokenRequest, requestRequired, err := token.CreateTokenRequest(setupResponse)
+	if err != nil {
+		return fmt.Errorf("failed to create token request: %w", err)
+	}
+	if !requestRequired {
+		return nil
+	}
+
+	// Request issuing new tokens.
+	issuedTokens := &token.IssuedTokens{}
+	_, err = makeClientRequest(&clientRequestOptions{ //nolint:bodyclose // Body is closed in function.
+		method:            http.MethodPost,
+		url:               AccountServer + TokenRequestIssuePath,
+		send:              tokenRequest,
+		recv:              issuedTokens,
+		dataFormat:        dsd.MsgPack,
+		setAuthToken:      true,
+		logoutOnAuthError: true,
+	})
+	if err != nil {
+		return fmt.Errorf("failed to request tokens: %w", err)
+	}
+
+	// Save tokens to handlers.
+	err = token.ProcessIssuedTokens(issuedTokens)
+	if err != nil {
+		return fmt.Errorf("failed to process issued tokens: %w", err)
+	}
+
+	// Log new status.
+	regular, fallback := GetTokenAmount(ExpandAndConnectZones)
+	log.Infof(
+		"spn/access: got new tokens, now at %d regular and %d fallback tokens for expand and connect",
+		regular,
+		fallback,
+	)
+
+	return nil
+}
+
+var (
+	lastHealthCheckExpires          time.Time
+	lastHealthCheckLock             sync.Mutex
+	lastHealthCheckValidityDuration = 30 * time.Second
+)
+
+func healthCheck() (ok bool) {
+	lastHealthCheckLock.Lock()
+	defer lastHealthCheckLock.Unlock()
+
+	// Return current value if recently checked.
+	if time.Now().Before(lastHealthCheckExpires) {
+		return tokenIssuerIsFailing.IsNotSet()
+	}
+
+	// Check health.
+	_, err := makeClientRequest(&clientRequestOptions{ //nolint:bodyclose // Body is closed in function.
+		method: http.MethodGet,
+		url:    AccountServer + HealthCheckPath,
+	})
+	if err != nil {
+		log.Warningf("spn/access: token issuer health check failed: %s", err)
+	}
+	// Update health check expiry.
+	lastHealthCheckExpires = time.Now().Add(lastHealthCheckValidityDuration)
+
+	return tokenIssuerIsFailing.IsNotSet()
+}
diff --git a/spn/access/client_test.go b/spn/access/client_test.go
new file mode 100644
index 00000000..93c5e81e
--- /dev/null
+++ b/spn/access/client_test.go
@@ -0,0 +1,79 @@
+package access
+
+import (
+	"os"
+	"testing"
+)
+
+var (
+	testUsername = os.Getenv("SPN_TEST_USERNAME")
+	testPassword = os.Getenv("SPN_TEST_PASSWORD")
+)
+
+func TestClient(t *testing.T) {
+	// Skip test in CI.
+	if testing.Short() {
+		t.Skip()
+	}
+	t.Parallel()
+
+	if testUsername == "" || testPassword == "" {
+		t.Fatal("test username or password not configured")
+	}
+
+	loginAndRefresh(t, true, 5)
+	clearUserCaches()
+	loginAndRefresh(t, false, 1)
+
+	err := Logout(false, false)
+	if err != nil {
+		t.Fatalf("failed to log out: %s", err)
+	}
+	t.Logf("logged out")
+
+	loginAndRefresh(t, true, 1)
+
+	err = Logout(false, true)
+	if err != nil {
+		t.Fatalf("failed to log out: %s", err)
+	}
+	t.Logf("logged out with purge")
+
+	loginAndRefresh(t, true, 1)
+}
+
+func loginAndRefresh(t *testing.T, doLogin bool, refreshTimes int) {
+	t.Helper()
+
+	if doLogin {
+		_, _, err := Login(testUsername, testPassword)
+		if err != nil {
+			t.Fatalf("login failed: %s", err)
+		}
+		user, err := GetUser()
+		if err != nil {
+			t.Fatalf("failed to get user: %s", err)
+		}
+		t.Logf("user (from login): %+v", user.User)
+		t.Logf("device (from login): %+v", user.User.Device)
+		authToken, err := GetAuthToken()
+		if err != nil {
+			t.Fatalf("failed to get auth token: %s", err)
+		}
+		t.Logf("auth token: %+v", authToken.Token)
+	}
+
+	for i := 0; i < refreshTimes; i++ {
+		user, _, err := UpdateUser()
+		if err != nil {
+			t.Fatalf("getting profile failed: %s", err)
+		}
+		t.Logf("user (from refresh): %+v", user.User)
+
+		authToken, err := GetAuthToken()
+		if err != nil {
+			t.Fatalf("failed to get auth token: %s", err)
+		}
+		t.Logf("auth token: %+v", authToken.Token)
+	}
+}
diff --git a/spn/access/database.go b/spn/access/database.go
new file mode 100644
index 00000000..be5ea95a
--- /dev/null
+++ b/spn/access/database.go
@@ -0,0 +1,258 @@
+package access
+
+import (
+	"errors"
+	"fmt"
+	"net/http"
+	"sync"
+	"time"
+
+	"github.com/safing/portbase/database"
+	"github.com/safing/portbase/database/record"
+	"github.com/safing/portmaster/spn/access/account"
+)
+
+const (
+	userRecordKey           = "core:spn/account/user"
+	authTokenRecordKey      = "core:spn/account/authtoken" //nolint:gosec // Not a credential.
+	tokenStorageKeyTemplate = "core:spn/account/tokens/%s" //nolint:gosec // Not a credential.
+)
+
+var db = database.NewInterface(&database.Options{
+	Local:    true,
+	Internal: true,
+})
+
+// UserRecord holds a SPN user account.
+type UserRecord struct {
+	record.Base
+	sync.Mutex
+
+	*account.User
+
+	LastNotifiedOfEnd *time.Time
+	LoggedInAt        *time.Time
+}
+
+// MayUseSPN returns whether the user may currently use the SPN.
+func (user *UserRecord) MayUseSPN() bool {
+	// Shadow this function in order to allow calls on a nil user.
+	if user == nil || user.User == nil {
+		return false
+	}
+	return user.User.MayUseSPN()
+}
+
+// MayUsePrioritySupport returns whether the user may currently use the priority support.
+func (user *UserRecord) MayUsePrioritySupport() bool {
+	// Shadow this function in order to allow calls on a nil user.
+	if user == nil || user.User == nil {
+		return false
+	}
+	return user.User.MayUsePrioritySupport()
+}
+
+// MayUse returns whether the user may currently use the feature identified by
+// the given feature ID.
+// Leave feature ID empty to check without feature.
+func (user *UserRecord) MayUse(featureID account.FeatureID) bool {
+	// Shadow this function in order to allow calls on a nil user.
+	if user == nil || user.User == nil {
+		return false
+	}
+	return user.User.MayUse(featureID)
+}
+
+// AuthTokenRecord holds an authentication token.
+type AuthTokenRecord struct {
+	record.Base
+	sync.Mutex
+
+	Token *account.AuthToken
+}
+
+// GetToken returns the token from the record.
+func (authToken *AuthTokenRecord) GetToken() *account.AuthToken {
+	authToken.Lock()
+	defer authToken.Unlock()
+
+	return authToken.Token
+}
+
+// SaveNewAuthToken saves a new auth token to the database.
+func SaveNewAuthToken(deviceID string, resp *http.Response) error {
+	token, ok := account.GetNextTokenFromResponse(resp)
+	if !ok {
+		return account.ErrMissingToken
+	}
+
+	newAuthToken := &AuthTokenRecord{
+		Token: &account.AuthToken{
+			Device: deviceID,
+			Token:  token,
+		},
+	}
+	return newAuthToken.Save()
+}
+
+// Update updates an existing auth token with the next token from a response.
+func (authToken *AuthTokenRecord) Update(resp *http.Response) error {
+	token, ok := account.GetNextTokenFromResponse(resp)
+	if !ok {
+		return account.ErrMissingToken
+	}
+
+	// Update token with new account.AuthToken.
+	func() {
+		authToken.Lock()
+		defer authToken.Unlock()
+
+		authToken.Token = &account.AuthToken{
+			Device: authToken.Token.Device,
+			Token:  token,
+		}
+	}()
+
+	return authToken.Save()
+}
+
+var (
+	accountCacheLock sync.Mutex
+
+	cachedUser    *UserRecord
+	cachedUserSet bool
+
+	cachedAuthToken *AuthTokenRecord
+)
+
+func clearUserCaches() {
+	accountCacheLock.Lock()
+	defer accountCacheLock.Unlock()
+
+	cachedUser = nil
+	cachedUserSet = false
+	cachedAuthToken = nil
+}
+
+// GetUser returns the current user account.
+// Returns nil when no user is logged in.
+func GetUser() (*UserRecord, error) {
+	// Check cache.
+	accountCacheLock.Lock()
+	defer accountCacheLock.Unlock()
+	if cachedUserSet {
+		if cachedUser == nil {
+			return nil, ErrNotLoggedIn
+		}
+		return cachedUser, nil
+	}
+
+	// Load from disk.
+	r, err := db.Get(userRecordKey)
+	if err != nil {
+		if errors.Is(err, database.ErrNotFound) {
+			cachedUser = nil
+			cachedUserSet = true
+			return nil, ErrNotLoggedIn
+		}
+		return nil, err
+	}
+
+	// Unwrap record.
+	if r.IsWrapped() {
+		// only allocate a new struct, if we need it
+		newUser := &UserRecord{}
+		err = record.Unwrap(r, newUser)
+		if err != nil {
+			return nil, err
+		}
+		cachedUser = newUser
+		cachedUserSet = true
+		return cachedUser, nil
+	}
+
+	// Or adjust type.
+	newUser, ok := r.(*UserRecord)
+	if !ok {
+		return nil, fmt.Errorf("record not of type *UserRecord, but %T", r)
+	}
+	cachedUser = newUser
+	cachedUserSet = true
+	return cachedUser, nil
+}
+
+// Save saves the User.
+func (user *UserRecord) Save() error {
+	// Update cache.
+	accountCacheLock.Lock()
+	defer accountCacheLock.Unlock()
+	cachedUser = user
+	cachedUserSet = true
+
+	// Update view if unset.
+	if user.View == nil {
+		user.UpdateView(0)
+	}
+
+	// Set, check and update metadata.
+	if !user.KeyIsSet() {
+		user.SetKey(userRecordKey)
+	}
+	user.UpdateMeta()
+
+	return db.Put(user)
+}
+
+// GetAuthToken returns the current auth token.
+func GetAuthToken() (*AuthTokenRecord, error) {
+	// Check cache.
+	accountCacheLock.Lock()
+	defer accountCacheLock.Unlock()
+	if cachedAuthToken != nil {
+		return cachedAuthToken, nil
+	}
+
+	// Load from disk.
+	r, err := db.Get(authTokenRecordKey)
+	if err != nil {
+		return nil, err
+	}
+
+	// Unwrap record.
+	if r.IsWrapped() {
+		// only allocate a new struct, if we need it
+		newAuthRecord := &AuthTokenRecord{}
+		err = record.Unwrap(r, newAuthRecord)
+		if err != nil {
+			return nil, err
+		}
+		cachedAuthToken = newAuthRecord
+		return newAuthRecord, nil
+	}
+
+	// Or adjust type.
+	newAuthRecord, ok := r.(*AuthTokenRecord)
+	if !ok {
+		return nil, fmt.Errorf("record not of type *AuthTokenRecord, but %T", r)
+	}
+	cachedAuthToken = newAuthRecord
+	return newAuthRecord, nil
+}
+
+// Save saves the auth token to the database.
+func (authToken *AuthTokenRecord) Save() error {
+	// Update cache.
+	accountCacheLock.Lock()
+	defer accountCacheLock.Unlock()
+	cachedAuthToken = authToken
+
+	// Set, check and update metadata.
+	if !authToken.KeyIsSet() {
+		authToken.SetKey(authTokenRecordKey)
+	}
+	authToken.UpdateMeta()
+	authToken.Meta().MakeSecret()
+	authToken.Meta().MakeCrownJewel()
+
+	return db.Put(authToken)
+}
diff --git a/spn/access/features.go b/spn/access/features.go
new file mode 100644
index 00000000..a26805e1
--- /dev/null
+++ b/spn/access/features.go
@@ -0,0 +1,127 @@
+package access
+
+import "github.com/safing/portmaster/spn/access/account"
+
+// Feature describes a notable part of the program.
+type Feature struct {
+	Name              string
+	ID                string
+	RequiredFeatureID account.FeatureID
+	ConfigKey         string
+	ConfigScope       string
+	InPackage         *Package
+	Comment           string
+	Beta              bool
+	ComingSoon        bool
+	icon              string
+}
+
+// Package combines a set of features.
+type Package struct {
+	Name     string
+	HexColor string
+	InfoURL  string
+}
+
+var (
+	infoURL     = "https://safing.io/pricing/"
+	packageFree = &Package{
+		Name:     "Free",
+		HexColor: "#ffffff",
+		InfoURL:  infoURL,
+	}
+	packagePlus = &Package{
+		Name:     "Plus",
+		HexColor: "#2fcfae",
+		InfoURL:  infoURL,
+	}
+	packagePro = &Package{
+		Name:     "Pro",
+		HexColor: "#029ad0",
+		InfoURL:  infoURL,
+	}
+	features = []Feature{
+		{
+			Name:        "Secure DNS",
+			ID:          "dns",
+			ConfigScope: "dns/",
+			InPackage:   packageFree,
+			icon: `
+			    <svg xmlns="http://www.w3.org/2000/svg" fill="none" viewBox="0 0 24 24" stroke-width="1.5" stroke="currentColor">
+    			  <path stroke-linecap="round" stroke-linejoin="round"
+    			    d="M12 21a9.004 9.004 0 008.716-6.747M12 21a9.004 9.004 0 01-8.716-6.747M12 21c2.485 0 4.5-4.03 4.5-9S14.485 3 12 3m0 18c-2.485 0-4.5-4.03-4.5-9S9.515 3 12 3m0 0a8.997 8.997 0 017.843 4.582M12 3a8.997 8.997 0 00-7.843 4.582m15.686 0A11.953 11.953 0 0112 10.5c-2.998 0-5.74-1.1-7.843-2.918m15.686 0A8.959 8.959 0 0121 12c0 .778-.099 1.533-.284 2.253m0 0A17.919 17.919 0 0112 16.5c-3.162 0-6.133-.815-8.716-2.247m0 0A9.015 9.015 0 013 12c0-1.605.42-3.113 1.157-4.418" />
+    			</svg>
+			`,
+		},
+		{
+			Name:        "Privacy Filter",
+			ID:          "filter",
+			ConfigScope: "filter/",
+			InPackage:   packageFree,
+			icon: `
+<svg xmlns="http://www.w3.org/2000/svg" fill="none" viewBox="0 0 24 24" stroke-width="1.5" stroke="currentColor">
+  <path stroke-linecap="round" stroke-linejoin="round" d="M3.98 8.223A10.477 10.477 0 001.934 12C3.226 16.338 7.244 19.5 12 19.5c.993 0 1.953-.138 2.863-.395M6.228 6.228A10.45 10.45 0 0112 4.5c4.756 0 8.773 3.162 10.065 7.498a10.523 10.523 0 01-4.293 5.774M6.228 6.228L3 3m3.228 3.228l3.65 3.65m7.894 7.894L21 21m-3.228-3.228l-3.65-3.65m0 0a3 3 0 10-4.243-4.243m4.242 4.242L9.88 9.88" />
+</svg>
+			`,
+		},
+		{
+			Name:              "Network History",
+			ID:                string(account.FeatureHistory),
+			RequiredFeatureID: account.FeatureHistory,
+			ConfigKey:         "history/enable",
+			ConfigScope:       "history/",
+			InPackage:         packagePlus,
+			icon: `
+<svg xmlns="http://www.w3.org/2000/svg" fill="none" viewBox="0 0 24 24" stroke-width="1.5" stroke="currentColor">
+  <path stroke-linecap="round" stroke-linejoin="round"
+    d="M12 6.042A8.967 8.967 0 006 3.75c-1.052 0-2.062.18-3 .512v14.25A8.987 8.987 0 016 18c2.305 0 4.408.867 6 2.292m0-14.25a8.966 8.966 0 016-2.292c1.052 0 2.062.18 3 .512v14.25A8.987 8.987 0 0018 18a8.967 8.967 0 00-6 2.292m0-14.25v14.25" />
+</svg>	
+			`,
+		},
+		{
+			Name:              "Bandwidth Visibility",
+			ID:                string(account.FeatureBWVis),
+			RequiredFeatureID: account.FeatureBWVis,
+			InPackage:         packagePlus,
+			Beta:              true,
+			icon: `
+    <svg xmlns="http://www.w3.org/2000/svg" fill="none" viewBox="0 0 24 24" stroke-width="1.5" stroke="currentColor">
+      <path stroke-linecap="round" stroke-linejoin="round"
+        d="M3 13.125C3 12.504 3.504 12 4.125 12h2.25c.621 0 1.125.504 1.125 1.125v6.75C7.5 20.496 6.996 21 6.375 21h-2.25A1.125 1.125 0 013 19.875v-6.75zM9.75 8.625c0-.621.504-1.125 1.125-1.125h2.25c.621 0 1.125.504 1.125 1.125v11.25c0 .621-.504 1.125-1.125 1.125h-2.25a1.125 1.125 0 01-1.125-1.125V8.625zM16.5 4.125c0-.621.504-1.125 1.125-1.125h2.25C20.496 3 21 3.504 21 4.125v15.75c0 .621-.504 1.125-1.125 1.125h-2.25a1.125 1.125 0 01-1.125-1.125V4.125z" />
+    </svg>
+			`,
+		},
+		{
+			Name:              "Safing Support",
+			ID:                string(account.FeatureSafingSupport),
+			RequiredFeatureID: account.FeatureSafingSupport,
+			InPackage:         packagePlus,
+			icon: `
+    <svg xmlns="http://www.w3.org/2000/svg" fill="none" viewBox="0 0 24 24" stroke-width="1.5" stroke="currentColor">
+      <path stroke-linecap="round" stroke-linejoin="round"
+        d="M15.75 6a3.75 3.75 0 11-7.5 0 3.75 3.75 0 017.5 0zM4.501 20.118a7.5 7.5 0 0114.998 0A17.933 17.933 0 0112 21.75c-2.676 0-5.216-.584-7.499-1.632z" />
+    </svg>	
+			`,
+		},
+		{
+			Name:              "Safing Privacy Network",
+			ID:                string(account.FeatureSPN),
+			RequiredFeatureID: account.FeatureSPN,
+			ConfigKey:         "spn/enable",
+			ConfigScope:       "spn/",
+			InPackage:         packagePro,
+			icon: `
+    <svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 24 24" stroke="currentColor" class="text-green-300">
+      <g fill="none" stroke-linecap="round" stroke-linejoin="round" stroke-width="1.5">
+        <path
+          d="M6.488 15.581c.782.781.782 2.048 0 2.829-.782.781-2.049.781-2.83 0-.782-.781-.782-2.048 0-2.829.781-.781 2.048-.781 2.83 0M13.415 3.586c.782.781.782 2.048 0 2.829-.782.781-2.049.781-2.83 0-.782-.781-.782-2.048 0-2.829.781-.781 2.049-.781 2.83 0M20.343 15.58c.782.781.782 2.048 0 2.829-.782.781-2.049.781-2.83 0-.782-.781-.782-2.048 0-2.829.781-.781 2.048-.781 2.83 0">
+        </path>
+        <path
+          d="M17.721 18.581C16.269 20.071 14.246 21 12 21c-1.146 0-2.231-.246-3.215-.68M4.293 15.152c-.56-1.999-.352-4.21.769-6.151.574-.995 1.334-1.814 2.205-2.449M13.975 5.254c2.017.512 3.834 1.799 4.957 3.743.569.985.899 2.041 1.018 3.103">
+        </path>
+      </g>
+    </svg>
+			`,
+		},
+	}
+)
diff --git a/spn/access/module.go b/spn/access/module.go
new file mode 100644
index 00000000..3f935f33
--- /dev/null
+++ b/spn/access/module.go
@@ -0,0 +1,194 @@
+package access
+
+import (
+	"context"
+	"errors"
+	"fmt"
+	"time"
+
+	"github.com/tevino/abool"
+
+	"github.com/safing/portbase/config"
+	"github.com/safing/portbase/log"
+	"github.com/safing/portbase/modules"
+	"github.com/safing/portmaster/spn/access/account"
+	"github.com/safing/portmaster/spn/access/token"
+	"github.com/safing/portmaster/spn/conf"
+)
+
+var (
+	module *modules.Module
+
+	accountUpdateTask *modules.Task
+
+	tokenIssuerIsFailing     = abool.New()
+	tokenIssuerRetryDuration = 10 * time.Minute
+
+	// AccountUpdateEvent is fired when the account has changed in any way.
+	AccountUpdateEvent = "account update"
+)
+
+// Errors.
+var (
+	ErrDeviceIsLocked       = errors.New("device is locked")
+	ErrDeviceLimitReached   = errors.New("device limit reached")
+	ErrFallbackNotAvailable = errors.New("fallback tokens not available, token issuer is online")
+	ErrInvalidCredentials   = errors.New("invalid credentials")
+	ErrMayNotUseSPN         = errors.New("may not use SPN")
+	ErrNotLoggedIn          = errors.New("not logged in")
+)
+
+func init() {
+	module = modules.Register("access", prep, start, stop, "terminal")
+}
+
+func prep() error {
+	module.RegisterEvent(AccountUpdateEvent, true)
+
+	// Register API handlers.
+	if conf.Client() {
+		err := registerAPIEndpoints()
+		if err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+func start() error {
+	// Initialize zones.
+	if err := InitializeZones(); err != nil {
+		return err
+	}
+
+	if conf.Client() {
+		// Load tokens from database.
+		loadTokens()
+
+		// Register new task.
+		accountUpdateTask = module.NewTask(
+			"update account",
+			UpdateAccount,
+		).Repeat(24 * time.Hour).Schedule(time.Now().Add(1 * time.Minute))
+	}
+
+	return nil
+}
+
+func stop() error {
+	if conf.Client() {
+		// Stop account update task.
+		accountUpdateTask.Cancel()
+		accountUpdateTask = nil
+
+		// Store tokens to database.
+		storeTokens()
+	}
+
+	// Reset zones.
+	token.ResetRegistry()
+
+	return nil
+}
+
+// UpdateAccount updates the user account and fetches new tokens, if needed.
+func UpdateAccount(_ context.Context, task *modules.Task) error {
+	// Retry sooner if the token issuer is failing.
+	defer func() {
+		if tokenIssuerIsFailing.IsSet() && task != nil {
+			task.Schedule(time.Now().Add(tokenIssuerRetryDuration))
+		}
+	}()
+
+	// Get current user.
+	u, err := GetUser()
+	if err == nil {
+		// Do not update if we just updated.
+		if time.Since(time.Unix(u.Meta().Modified, 0)) < 2*time.Minute {
+			return nil
+		}
+	}
+
+	u, _, err = UpdateUser()
+	if err != nil {
+		return fmt.Errorf("failed to update user profile: %w", err)
+	}
+
+	err = UpdateTokens()
+	if err != nil {
+		return fmt.Errorf("failed to get tokens: %w", err)
+	}
+
+	// Schedule next check.
+	switch {
+	case u == nil: // No user.
+	case u.Subscription == nil: // No subscription.
+	case u.Subscription.EndsAt == nil: // Subscription not active
+
+	case time.Until(*u.Subscription.EndsAt) < 24*time.Hour &&
+		time.Since(*u.Subscription.EndsAt) < 24*time.Hour:
+		// Update account every hour 24h hours before and after the subscription ends.
+		task.Schedule(time.Now().Add(time.Hour))
+
+	case u.Subscription.NextBillingDate == nil: // No auto-subscription.
+
+	case time.Until(*u.Subscription.NextBillingDate) < 24*time.Hour &&
+		time.Since(*u.Subscription.NextBillingDate) < 24*time.Hour:
+		// Update account every hour 24h hours before and after the next billing date.
+		task.Schedule(time.Now().Add(time.Hour))
+	}
+
+	return nil
+}
+
+func enableSPN() {
+	err := config.SetConfigOption("spn/enable", true)
+	if err != nil {
+		log.Warningf("spn/access: failed to enable the SPN during login: %s", err)
+	}
+}
+
+func disableSPN() {
+	err := config.SetConfigOption("spn/enable", false)
+	if err != nil {
+		log.Warningf("spn/access: failed to disable the SPN during logout: %s", err)
+	}
+}
+
+// TokenIssuerIsFailing returns whether token issuing is currently failing.
+func TokenIssuerIsFailing() bool {
+	return tokenIssuerIsFailing.IsSet()
+}
+
+func tokenIssuerFailed() {
+	if !tokenIssuerIsFailing.SetToIf(false, true) {
+		return
+	}
+	if !module.Online() {
+		return
+	}
+
+	accountUpdateTask.Schedule(time.Now().Add(tokenIssuerRetryDuration))
+}
+
+// IsLoggedIn returns whether a User is currently logged in.
+func (user *UserRecord) IsLoggedIn() bool {
+	user.Lock()
+	defer user.Unlock()
+
+	switch user.State {
+	case account.UserStateNone, account.UserStateLoggedOut:
+		return false
+	default:
+		return true
+	}
+}
+
+// MayUseTheSPN returns whether the currently logged in User may use the SPN.
+func (user *UserRecord) MayUseTheSPN() bool {
+	user.Lock()
+	defer user.Unlock()
+
+	return user.User.MayUseSPN()
+}
diff --git a/spn/access/module_test.go b/spn/access/module_test.go
new file mode 100644
index 00000000..59d69be6
--- /dev/null
+++ b/spn/access/module_test.go
@@ -0,0 +1,13 @@
+package access
+
+import (
+	"testing"
+
+	"github.com/safing/portmaster/service/core/pmtesting"
+	"github.com/safing/portmaster/spn/conf"
+)
+
+func TestMain(m *testing.M) {
+	conf.EnableClient(true)
+	pmtesting.TestMain(m, module)
+}
diff --git a/spn/access/notify.go b/spn/access/notify.go
new file mode 100644
index 00000000..978a2f16
--- /dev/null
+++ b/spn/access/notify.go
@@ -0,0 +1,105 @@
+package access
+
+import (
+	"fmt"
+	"strings"
+	"time"
+
+	"github.com/safing/portbase/log"
+	"github.com/safing/portbase/notifications"
+)
+
+const (
+	day  = 24 * time.Hour
+	week = 7 * day
+
+	endOfPackageNearNotifID = "access:end-of-package-near"
+)
+
+func notifyOfPackageEnd(u *UserRecord) {
+	// TODO: Check if subscription auto-renews.
+
+	// Skip if there is not active subscription or if it has ended already.
+	switch {
+	case u.Subscription == nil, // No subscription.
+		u.Subscription.EndsAt == nil,             // Subscription not active.
+		u.Subscription.NextBillingDate != nil,    // Subscription is auto-renewing.
+		time.Now().After(*u.Subscription.EndsAt): // Subscription has ended.
+		return
+	}
+
+	// Calculate durations.
+	sinceLastNotified := 52 * week // Never.
+	if u.LastNotifiedOfEnd != nil {
+		sinceLastNotified = time.Since(*u.LastNotifiedOfEnd)
+	}
+	untilEnd := time.Until(*u.Subscription.EndsAt)
+
+	// Notify every two days in the week before end.
+	notifType := notifications.Info
+	switch {
+	case untilEnd < week && sinceLastNotified > 2*day:
+		// Notify 7, 5, 3 and 1 days before end.
+		if untilEnd < 4*day {
+			notifType = notifications.Warning
+		}
+		fallthrough
+
+	case u.CurrentPlan != nil && u.CurrentPlan.Months >= 6 &&
+		untilEnd < 4*week && sinceLastNotified > week:
+		// Notify 4, 3 and 2 weeks before end - on long running packages.
+
+		// Get names and messages.
+		packageNameTitle := "Portmaster Package"
+		if u.CurrentPlan != nil {
+			packageNameTitle = u.CurrentPlan.Name
+		}
+		packageNameBody := packageNameTitle
+		if !strings.HasSuffix(packageNameBody, " Package") {
+			packageNameBody += " Package"
+		}
+
+		var endsText string
+		daysUntilEnd := untilEnd / day
+		switch daysUntilEnd { //nolint:exhaustive
+		case 0:
+			endsText = "today"
+		case 1:
+			endsText = "tomorrow"
+		default:
+			endsText = fmt.Sprintf("in %d days", daysUntilEnd)
+		}
+
+		// Send notification.
+		notifications.Notify(&notifications.Notification{
+			EventID: endOfPackageNearNotifID,
+			Type:    notifType,
+			Title:   fmt.Sprintf("%s About to Expire", packageNameTitle),
+			Message: fmt.Sprintf(
+				"Your current %s ends %s. Extend it to keep your full privacy protections.",
+				packageNameBody,
+				endsText,
+			),
+			ShowOnSystem: notifType == notifications.Warning,
+			AvailableActions: []*notifications.Action{
+				{
+					Text:    "Open Account Page",
+					Type:    notifications.ActionTypeOpenURL,
+					Payload: "https://account.safing.io",
+				},
+				{
+					ID:   "ack",
+					Text: "Got it!",
+				},
+			},
+		})
+
+		// Save that we sent a notification.
+		now := time.Now()
+		u.LastNotifiedOfEnd = &now
+		err := u.Save()
+		if err != nil {
+			log.Warningf("spn/access: failed to save user after sending subscription ending soon notification: %s", err)
+		}
+	}
+}
diff --git a/spn/access/op_auth.go b/spn/access/op_auth.go
new file mode 100644
index 00000000..764c73c3
--- /dev/null
+++ b/spn/access/op_auth.go
@@ -0,0 +1,75 @@
+package access
+
+import (
+	"time"
+
+	"github.com/safing/portbase/container"
+	"github.com/safing/portbase/log"
+	"github.com/safing/portmaster/spn/access/token"
+	"github.com/safing/portmaster/spn/terminal"
+)
+
+// OpTypeAccessCodeAuth is the type ID of the auth operation.
+const OpTypeAccessCodeAuth = "auth"
+
+func init() {
+	terminal.RegisterOpType(terminal.OperationFactory{
+		Type:  OpTypeAccessCodeAuth,
+		Start: checkAccessCode,
+	})
+}
+
+// AuthorizeOp is used to authorize a session.
+type AuthorizeOp struct {
+	terminal.OneOffOperationBase
+}
+
+// Type returns the type ID.
+func (op *AuthorizeOp) Type() string {
+	return OpTypeAccessCodeAuth
+}
+
+// AuthorizeToTerminal starts an authorization operation.
+func AuthorizeToTerminal(t terminal.Terminal) (*AuthorizeOp, *terminal.Error) {
+	op := &AuthorizeOp{}
+	op.Init()
+
+	newToken, err := GetToken(ExpandAndConnectZones)
+	if err != nil {
+		return nil, terminal.ErrInternalError.With("failed to get access token: %w", err)
+	}
+
+	tErr := t.StartOperation(op, container.New(newToken.Raw()), 10*time.Second)
+	if tErr != nil {
+		return nil, terminal.ErrInternalError.With("failed to init auth op: %w", tErr)
+	}
+
+	return op, nil
+}
+
+func checkAccessCode(t terminal.Terminal, opID uint32, initData *container.Container) (terminal.Operation, *terminal.Error) {
+	// Parse provided access token.
+	receivedToken, err := token.ParseRawToken(initData.CompileData())
+	if err != nil {
+		return nil, terminal.ErrMalformedData.With("failed to parse access token: %w", err)
+	}
+
+	// Check if token is valid.
+	granted, err := VerifyToken(receivedToken)
+	if err != nil {
+		return nil, terminal.ErrPermissionDenied.With("invalid access token: %w", err)
+	}
+
+	// Get the authorizing terminal for applying the granted permission.
+	authTerm, ok := t.(terminal.AuthorizingTerminal)
+	if !ok {
+		return nil, terminal.ErrIncorrectUsage.With("terminal does not handle authorization")
+	}
+
+	// Grant permissions.
+	authTerm.GrantPermission(granted)
+	log.Debugf("spn/access: granted %s permissions via %s zone", t.FmtID(), receivedToken.Zone)
+
+	// End successfully.
+	return nil, terminal.ErrExplicitAck
+}
diff --git a/spn/access/storage.go b/spn/access/storage.go
new file mode 100644
index 00000000..fcbb7edc
--- /dev/null
+++ b/spn/access/storage.go
@@ -0,0 +1,131 @@
+package access
+
+import (
+	"context"
+	"errors"
+	"fmt"
+	"time"
+
+	"github.com/safing/portbase/database"
+	"github.com/safing/portbase/database/query"
+	"github.com/safing/portbase/database/record"
+	"github.com/safing/portbase/formats/dsd"
+	"github.com/safing/portbase/log"
+	"github.com/safing/portmaster/spn/access/token"
+)
+
+func loadTokens() {
+	for _, zone := range persistentZones {
+		// Get handler of zone.
+		handler, ok := token.GetHandler(zone)
+		if !ok {
+			log.Warningf("spn/access: could not find zone %s for loading tokens", zone)
+			continue
+		}
+
+		// Get data from database.
+		r, err := db.Get(fmt.Sprintf(tokenStorageKeyTemplate, zone))
+		if err != nil {
+			if errors.Is(err, database.ErrNotFound) {
+				log.Debugf("spn/access: no %s tokens to load", zone)
+			} else {
+				log.Warningf("spn/access: failed to load %s tokens: %s", zone, err)
+			}
+			continue
+		}
+
+		// Get wrapper.
+		wrapper, ok := r.(*record.Wrapper)
+		if !ok {
+			log.Warningf("spn/access: failed to parse %s tokens: expected wrapper, got %T", zone, r)
+			continue
+		}
+
+		// Load into handler.
+		err = handler.Load(wrapper.Data)
+		if err != nil {
+			log.Warningf("spn/access: failed to load %s tokens: %s", zone, err)
+		}
+		log.Infof("spn/access: loaded %d %s tokens", handler.Amount(), zone)
+	}
+}
+
+func storeTokens() {
+	for _, zone := range persistentZones {
+		// Get handler of zone.
+		handler, ok := token.GetHandler(zone)
+		if !ok {
+			log.Warningf("spn/access: could not find zone %s for storing tokens", zone)
+			continue
+		}
+
+		// Generate storage key.
+		storageKey := fmt.Sprintf(tokenStorageKeyTemplate, zone)
+
+		// Check if there is data to save.
+		amount := handler.Amount()
+		if amount == 0 {
+			// Remove possible old entry from database.
+			err := db.Delete(storageKey)
+			if err != nil {
+				log.Warningf("spn/access: failed to delete possible old %s tokens from storage: %s", zone, err)
+			}
+			log.Debugf("spn/access: no %s tokens to store", zone)
+			continue
+		}
+
+		// Export data.
+		data, err := handler.Save()
+		if err != nil {
+			log.Warningf("spn/access: failed to export %s tokens for storing: %s", zone, err)
+			continue
+		}
+
+		// Wrap data into raw record.
+		r, err := record.NewWrapper(storageKey, nil, dsd.RAW, data)
+		if err != nil {
+			log.Warningf("spn/access: failed to prepare %s token export for storing: %s", zone, err)
+			continue
+		}
+
+		// Let tokens expire after one month.
+		// This will regularly happen when we switch zones.
+		r.UpdateMeta()
+		r.Meta().MakeSecret()
+		r.Meta().MakeCrownJewel()
+		r.Meta().SetRelativateExpiry(30 * 86400)
+
+		// Save to database.
+		err = db.Put(r)
+		if err != nil {
+			log.Warningf("spn/access: failed to store %s tokens: %s", zone, err)
+			continue
+		}
+
+		log.Infof("spn/access: stored %d %s tokens", amount, zone)
+	}
+}
+
+func clearTokens() {
+	for _, zone := range persistentZones {
+		// Get handler of zone.
+		handler, ok := token.GetHandler(zone)
+		if !ok {
+			log.Warningf("spn/access: could not find zone %s for clearing tokens", zone)
+			continue
+		}
+
+		// Clear tokens.
+		handler.Clear()
+	}
+
+	// Purge database storage prefix.
+	ctx, cancel := context.WithTimeout(module.Ctx, 10*time.Second)
+	defer cancel()
+	n, err := db.Purge(ctx, query.New(fmt.Sprintf(tokenStorageKeyTemplate, "")))
+	if err != nil {
+		log.Warningf("spn/access: failed to clear token storages: %s", err)
+		return
+	}
+	log.Infof("spn/access: cleared %d token storages", n)
+}
diff --git a/spn/access/token/errors.go b/spn/access/token/errors.go
new file mode 100644
index 00000000..b19fbb28
--- /dev/null
+++ b/spn/access/token/errors.go
@@ -0,0 +1,15 @@
+package token
+
+import "errors"
+
+// Errors.
+var (
+	ErrEmpty          = errors.New("token storage is empty")
+	ErrNoZone         = errors.New("no zone specified")
+	ErrTokenInvalid   = errors.New("token is invalid")
+	ErrTokenMalformed = errors.New("token malformed")
+	ErrTokenUsed      = errors.New("token already used")
+	ErrZoneMismatch   = errors.New("zone mismatch")
+	ErrZoneTaken      = errors.New("zone taken")
+	ErrZoneUnknown    = errors.New("zone unknown")
+)
diff --git a/spn/access/token/module_test.go b/spn/access/token/module_test.go
new file mode 100644
index 00000000..bb79d76f
--- /dev/null
+++ b/spn/access/token/module_test.go
@@ -0,0 +1,13 @@
+package token
+
+import (
+	"testing"
+
+	"github.com/safing/portbase/modules"
+	"github.com/safing/portmaster/service/core/pmtesting"
+)
+
+func TestMain(m *testing.M) {
+	module := modules.Register("token", nil, nil, nil, "rng")
+	pmtesting.TestMain(m, module)
+}
diff --git a/spn/access/token/pblind.go b/spn/access/token/pblind.go
new file mode 100644
index 00000000..71f137a3
--- /dev/null
+++ b/spn/access/token/pblind.go
@@ -0,0 +1,552 @@
+package token
+
+import (
+	"crypto/elliptic"
+	"crypto/rand"
+	"errors"
+	"fmt"
+	"math"
+	"math/big"
+	mrand "math/rand"
+	"sync"
+
+	"github.com/mr-tron/base58"
+	"github.com/rot256/pblind"
+
+	"github.com/safing/portbase/container"
+	"github.com/safing/portbase/formats/dsd"
+)
+
+const pblindSecretSize = 32
+
+// PBlindToken is token based on the pblind library.
+type PBlindToken struct {
+	Serial    int               `json:"N,omitempty"`
+	Token     []byte            `json:"T,omitempty"`
+	Signature *pblind.Signature `json:"S,omitempty"`
+}
+
+// Pack packs the token.
+func (pbt *PBlindToken) Pack() ([]byte, error) {
+	return dsd.Dump(pbt, dsd.CBOR)
+}
+
+// UnpackPBlindToken unpacks the token.
+func UnpackPBlindToken(token []byte) (*PBlindToken, error) {
+	t := &PBlindToken{}
+
+	_, err := dsd.Load(token, t)
+	if err != nil {
+		return nil, err
+	}
+
+	return t, nil
+}
+
+// PBlindHandler is a handler for the pblind tokens.
+type PBlindHandler struct {
+	sync.Mutex
+	opts *PBlindOptions
+
+	publicKey  *pblind.PublicKey
+	privateKey *pblind.SecretKey
+
+	storageLock sync.Mutex
+	Storage     []*PBlindToken
+
+	// Client request state.
+	requestStateLock sync.Mutex
+	requestState     []RequestState
+}
+
+// PBlindOptions are options for the PBlindHandler.
+type PBlindOptions struct {
+	Zone                  string
+	CurveName             string
+	Curve                 elliptic.Curve
+	PublicKey             string
+	PrivateKey            string
+	BatchSize             int
+	UseSerials            bool
+	RandomizeOrder        bool
+	Fallback              bool
+	SignalShouldRequest   func(Handler)
+	DoubleSpendProtection func([]byte) error
+}
+
+// PBlindSignerState is a signer state.
+type PBlindSignerState struct {
+	signers []*pblind.StateSigner
+}
+
+// PBlindSetupResponse is a setup response.
+type PBlindSetupResponse struct {
+	Msgs []*pblind.Message1
+}
+
+// PBlindTokenRequest is a token request.
+type PBlindTokenRequest struct {
+	Msgs []*pblind.Message2
+}
+
+// IssuedPBlindTokens are issued pblind tokens.
+type IssuedPBlindTokens struct {
+	Msgs []*pblind.Message3
+}
+
+// RequestState is a request state.
+type RequestState struct {
+	Token []byte
+	State *pblind.StateRequester
+}
+
+// NewPBlindHandler creates a new pblind handler.
+func NewPBlindHandler(opts PBlindOptions) (*PBlindHandler, error) {
+	pbh := &PBlindHandler{
+		opts: &opts,
+	}
+
+	// Check curve, get from name.
+	if opts.Curve == nil {
+		switch opts.CurveName {
+		case "P-256":
+			opts.Curve = elliptic.P256()
+		case "P-384":
+			opts.Curve = elliptic.P384()
+		case "P-521":
+			opts.Curve = elliptic.P521()
+		default:
+			return nil, errors.New("no curve supplied")
+		}
+	} else if opts.CurveName != "" {
+		return nil, errors.New("both curve and curve name supplied")
+	}
+
+	// Load keys.
+	switch {
+	case pbh.opts.PrivateKey != "":
+		keyData, err := base58.Decode(pbh.opts.PrivateKey)
+		if err != nil {
+			return nil, fmt.Errorf("failed to decode private key: %w", err)
+		}
+		pivateKey := pblind.SecretKeyFromBytes(pbh.opts.Curve, keyData)
+		pbh.privateKey = &pivateKey
+		publicKey := pbh.privateKey.GetPublicKey()
+		pbh.publicKey = &publicKey
+
+		// Check public key if also provided.
+		if pbh.opts.PublicKey != "" {
+			if pbh.opts.PublicKey != base58.Encode(pbh.publicKey.Bytes()) {
+				return nil, errors.New("private and public mismatch")
+			}
+		}
+
+	case pbh.opts.PublicKey != "":
+		keyData, err := base58.Decode(pbh.opts.PublicKey)
+		if err != nil {
+			return nil, fmt.Errorf("failed to decode public key: %w", err)
+		}
+		publicKey, err := pblind.PublicKeyFromBytes(pbh.opts.Curve, keyData)
+		if err != nil {
+			return nil, fmt.Errorf("failed to decode public key: %w", err)
+		}
+		pbh.publicKey = &publicKey
+
+	default:
+		return nil, errors.New("no key supplied")
+	}
+
+	return pbh, nil
+}
+
+func (pbh *PBlindHandler) makeInfo(serial int) (*pblind.Info, error) {
+	// Gather data for info.
+	infoData := container.New()
+	infoData.AppendAsBlock([]byte(pbh.opts.Zone))
+	if pbh.opts.UseSerials {
+		infoData.AppendInt(serial)
+	}
+
+	// Compress to point.
+	info, err := pblind.CompressInfo(pbh.opts.Curve, infoData.CompileData())
+	if err != nil {
+		return nil, fmt.Errorf("failed to compress info: %w", err)
+	}
+
+	return &info, nil
+}
+
+// Zone returns the zone name.
+func (pbh *PBlindHandler) Zone() string {
+	return pbh.opts.Zone
+}
+
+// ShouldRequest returns whether the new tokens should be requested.
+func (pbh *PBlindHandler) ShouldRequest() bool {
+	pbh.storageLock.Lock()
+	defer pbh.storageLock.Unlock()
+
+	return pbh.shouldRequest()
+}
+
+func (pbh *PBlindHandler) shouldRequest() bool {
+	// Return true if storage is at or below 10%.
+	return len(pbh.Storage) == 0 || pbh.opts.BatchSize/len(pbh.Storage) > 10
+}
+
+// Amount returns the current amount of tokens in this handler.
+func (pbh *PBlindHandler) Amount() int {
+	pbh.storageLock.Lock()
+	defer pbh.storageLock.Unlock()
+
+	return len(pbh.Storage)
+}
+
+// IsFallback returns whether this handler should only be used as a fallback.
+func (pbh *PBlindHandler) IsFallback() bool {
+	return pbh.opts.Fallback
+}
+
+// CreateSetup sets up signers for a request.
+func (pbh *PBlindHandler) CreateSetup() (state *PBlindSignerState, setupResponse *PBlindSetupResponse, err error) {
+	state = &PBlindSignerState{
+		signers: make([]*pblind.StateSigner, pbh.opts.BatchSize),
+	}
+	setupResponse = &PBlindSetupResponse{
+		Msgs: make([]*pblind.Message1, pbh.opts.BatchSize),
+	}
+
+	// Go through the batch.
+	for i := 0; i < pbh.opts.BatchSize; i++ {
+		info, err := pbh.makeInfo(i + 1)
+		if err != nil {
+			return nil, nil, fmt.Errorf("failed to create info #%d: %w", i, err)
+		}
+
+		// Create signer.
+		signer, err := pblind.CreateSigner(*pbh.privateKey, *info)
+		if err != nil {
+			return nil, nil, fmt.Errorf("failed to create signer #%d: %w", i, err)
+		}
+		state.signers[i] = signer
+
+		// Create request setup.
+		setupMsg, err := signer.CreateMessage1()
+		if err != nil {
+			return nil, nil, fmt.Errorf("failed to create setup msg #%d: %w", i, err)
+		}
+		setupResponse.Msgs[i] = &setupMsg
+	}
+
+	return state, setupResponse, nil
+}
+
+// CreateTokenRequest creates a token request to be sent to the token server.
+func (pbh *PBlindHandler) CreateTokenRequest(requestSetup *PBlindSetupResponse) (request *PBlindTokenRequest, err error) {
+	// Check request setup data.
+	if len(requestSetup.Msgs) != pbh.opts.BatchSize {
+		return nil, fmt.Errorf("invalid request setup msg count of %d", len(requestSetup.Msgs))
+	}
+
+	// Lock and reset the request state.
+	pbh.requestStateLock.Lock()
+	defer pbh.requestStateLock.Unlock()
+	pbh.requestState = make([]RequestState, pbh.opts.BatchSize)
+	request = &PBlindTokenRequest{
+		Msgs: make([]*pblind.Message2, pbh.opts.BatchSize),
+	}
+
+	// Go through the batch.
+	for i := 0; i < pbh.opts.BatchSize; i++ {
+		// Check if we have setup data.
+		if requestSetup.Msgs[i] == nil {
+			return nil, fmt.Errorf("missing setup data #%d", i)
+		}
+
+		// Generate secret token.
+		token := make([]byte, pblindSecretSize)
+		n, err := rand.Read(token) //nolint:gosec // False positive - check the imports.
+		if err != nil {
+			return nil, fmt.Errorf("failed to get random token #%d: %w", i, err)
+		}
+		if n != pblindSecretSize {
+			return nil, fmt.Errorf("failed to get full random token #%d: only got %d bytes", i, n)
+		}
+		pbh.requestState[i].Token = token
+
+		// Create public metadata.
+		info, err := pbh.makeInfo(i + 1)
+		if err != nil {
+			return nil, fmt.Errorf("failed to make token info #%d: %w", i, err)
+		}
+
+		// Create request and request state.
+		requester, err := pblind.CreateRequester(*pbh.publicKey, *info, token)
+		if err != nil {
+			return nil, fmt.Errorf("failed to create request state #%d: %w", i, err)
+		}
+		pbh.requestState[i].State = requester
+
+		err = requester.ProcessMessage1(*requestSetup.Msgs[i])
+		if err != nil {
+			return nil, fmt.Errorf("failed to process setup message #%d: %w", i, err)
+		}
+
+		// Create request message.
+		requestMsg, err := requester.CreateMessage2()
+		if err != nil {
+			return nil, fmt.Errorf("failed to create request message #%d: %w", i, err)
+		}
+		request.Msgs[i] = &requestMsg
+	}
+
+	return request, nil
+}
+
+// IssueTokens sign the requested tokens.
+func (pbh *PBlindHandler) IssueTokens(state *PBlindSignerState, request *PBlindTokenRequest) (response *IssuedPBlindTokens, err error) {
+	// Check request data.
+	if len(request.Msgs) != pbh.opts.BatchSize {
+		return nil, fmt.Errorf("invalid request msg count of %d", len(request.Msgs))
+	}
+	if len(state.signers) != pbh.opts.BatchSize {
+		return nil, fmt.Errorf("invalid request state count of %d", len(request.Msgs))
+	}
+
+	// Create response.
+	response = &IssuedPBlindTokens{
+		Msgs: make([]*pblind.Message3, pbh.opts.BatchSize),
+	}
+
+	// Go through the batch.
+	for i := 0; i < pbh.opts.BatchSize; i++ {
+		// Check if we have request data.
+		if request.Msgs[i] == nil {
+			return nil, fmt.Errorf("missing request data #%d", i)
+		}
+
+		// Process request msg.
+		err = state.signers[i].ProcessMessage2(*request.Msgs[i])
+		if err != nil {
+			return nil, fmt.Errorf("failed to process request msg #%d: %w", i, err)
+		}
+
+		// Issue token.
+		responseMsg, err := state.signers[i].CreateMessage3()
+		if err != nil {
+			return nil, fmt.Errorf("failed to issue token #%d: %w", i, err)
+		}
+		response.Msgs[i] = &responseMsg
+	}
+
+	return response, nil
+}
+
+// ProcessIssuedTokens processes the issued token from the server.
+func (pbh *PBlindHandler) ProcessIssuedTokens(issuedTokens *IssuedPBlindTokens) error {
+	// Check data.
+	if len(issuedTokens.Msgs) != pbh.opts.BatchSize {
+		return fmt.Errorf("invalid issued token count of %d", len(issuedTokens.Msgs))
+	}
+
+	// Step 1: Process issued tokens.
+
+	// Lock and reset the request state.
+	pbh.requestStateLock.Lock()
+	defer pbh.requestStateLock.Unlock()
+	defer func() {
+		pbh.requestState = make([]RequestState, pbh.opts.BatchSize)
+	}()
+	finalizedTokens := make([]*PBlindToken, pbh.opts.BatchSize)
+
+	// Go through the batch.
+	for i := 0; i < pbh.opts.BatchSize; i++ {
+		// Finalize token.
+		err := pbh.requestState[i].State.ProcessMessage3(*issuedTokens.Msgs[i])
+		if err != nil {
+			return fmt.Errorf("failed to create final signature #%d: %w", i, err)
+		}
+
+		// Get and check final signature.
+		signature, err := pbh.requestState[i].State.Signature()
+		if err != nil {
+			return fmt.Errorf("failed to create final signature #%d: %w", i, err)
+		}
+		info, err := pbh.makeInfo(i + 1)
+		if err != nil {
+			return fmt.Errorf("failed to make token info #%d: %w", i, err)
+		}
+		if !pbh.publicKey.Check(signature, *info, pbh.requestState[i].Token) {
+			return fmt.Errorf("invalid signature on #%d", i)
+		}
+
+		// Save to temporary slice.
+		newToken := &PBlindToken{
+			Token:     pbh.requestState[i].Token,
+			Signature: &signature,
+		}
+		if pbh.opts.UseSerials {
+			newToken.Serial = i + 1
+		}
+		finalizedTokens[i] = newToken
+	}
+
+	// Step 2: Randomize received tokens
+
+	if pbh.opts.RandomizeOrder {
+		rInt, err := rand.Int(rand.Reader, big.NewInt(math.MaxInt64))
+		if err != nil {
+			return fmt.Errorf("failed to get seed for shuffle: %w", err)
+		}
+		mr := mrand.New(mrand.NewSource(rInt.Int64())) //nolint:gosec
+		mr.Shuffle(len(finalizedTokens), func(i, j int) {
+			finalizedTokens[i], finalizedTokens[j] = finalizedTokens[j], finalizedTokens[i]
+		})
+	}
+
+	// Step 3: Add tokens to storage.
+
+	// Wait for all processing to be complete, as using tokens from a faulty
+	// batch can be dangerous, as the server could be doing this purposely to
+	// create conditions that may benefit an attacker.
+
+	pbh.storageLock.Lock()
+	defer pbh.storageLock.Unlock()
+
+	// Add finalized tokens to storage.
+	pbh.Storage = append(pbh.Storage, finalizedTokens...)
+
+	return nil
+}
+
+// GetToken returns a token.
+func (pbh *PBlindHandler) GetToken() (token *Token, err error) {
+	pbh.storageLock.Lock()
+	defer pbh.storageLock.Unlock()
+
+	// Check if we have supply.
+	if len(pbh.Storage) == 0 {
+		return nil, ErrEmpty
+	}
+
+	// Pack token.
+	data, err := pbh.Storage[0].Pack()
+	if err != nil {
+		return nil, fmt.Errorf("failed to pack token: %w", err)
+	}
+
+	// Shift to next token.
+	pbh.Storage = pbh.Storage[1:]
+
+	// Check if we should signal that we should request tokens.
+	if pbh.opts.SignalShouldRequest != nil && pbh.shouldRequest() {
+		pbh.opts.SignalShouldRequest(pbh)
+	}
+
+	return &Token{
+		Zone: pbh.opts.Zone,
+		Data: data,
+	}, nil
+}
+
+// Verify verifies the given token.
+func (pbh *PBlindHandler) Verify(token *Token) error {
+	// Check if zone matches.
+	if token.Zone != pbh.opts.Zone {
+		return ErrZoneMismatch
+	}
+
+	// Unpack token.
+	t, err := UnpackPBlindToken(token.Data)
+	if err != nil {
+		return fmt.Errorf("%w: %w", ErrTokenMalformed, err)
+	}
+
+	// Check if serial is valid.
+	switch {
+	case pbh.opts.UseSerials && t.Serial > 0 && t.Serial <= pbh.opts.BatchSize:
+		// Using serials in accepted range.
+	case !pbh.opts.UseSerials && t.Serial == 0:
+		// Not using serials and serial is zero.
+	default:
+		return fmt.Errorf("%w: invalid serial", ErrTokenMalformed)
+	}
+
+	// Build info for checking signature.
+	info, err := pbh.makeInfo(t.Serial)
+	if err != nil {
+		return fmt.Errorf("%w: %w", ErrTokenMalformed, err)
+	}
+
+	// Check signature.
+	if !pbh.publicKey.Check(*t.Signature, *info, t.Token) {
+		return ErrTokenInvalid
+	}
+
+	// Check for double spending.
+	if pbh.opts.DoubleSpendProtection != nil {
+		if err := pbh.opts.DoubleSpendProtection(t.Token); err != nil {
+			return fmt.Errorf("%w: %w", ErrTokenUsed, err)
+		}
+	}
+
+	return nil
+}
+
+// PBlindStorage is a storage for pblind tokens.
+type PBlindStorage struct {
+	Storage []*PBlindToken
+}
+
+// Save serializes and returns the current tokens.
+func (pbh *PBlindHandler) Save() ([]byte, error) {
+	pbh.storageLock.Lock()
+	defer pbh.storageLock.Unlock()
+
+	if len(pbh.Storage) == 0 {
+		return nil, ErrEmpty
+	}
+
+	s := &PBlindStorage{
+		Storage: pbh.Storage,
+	}
+
+	return dsd.Dump(s, dsd.CBOR)
+}
+
+// Load loads the given tokens into the handler.
+func (pbh *PBlindHandler) Load(data []byte) error {
+	pbh.storageLock.Lock()
+	defer pbh.storageLock.Unlock()
+
+	s := &PBlindStorage{}
+	_, err := dsd.Load(data, s)
+	if err != nil {
+		return err
+	}
+
+	// Check signatures on load.
+	for _, t := range s.Storage {
+		// Build info for checking signature.
+		info, err := pbh.makeInfo(t.Serial)
+		if err != nil {
+			return err
+		}
+
+		// Check signature.
+		if !pbh.publicKey.Check(*t.Signature, *info, t.Token) {
+			return ErrTokenInvalid
+		}
+	}
+
+	pbh.Storage = s.Storage
+	return nil
+}
+
+// Clear clears all the tokens in the handler.
+func (pbh *PBlindHandler) Clear() {
+	pbh.storageLock.Lock()
+	defer pbh.storageLock.Unlock()
+
+	pbh.Storage = nil
+}
diff --git a/spn/access/token/pblind_gen_test.go b/spn/access/token/pblind_gen_test.go
new file mode 100644
index 00000000..416213ae
--- /dev/null
+++ b/spn/access/token/pblind_gen_test.go
@@ -0,0 +1,39 @@
+package token
+
+import (
+	"crypto/elliptic"
+	"fmt"
+	"testing"
+
+	"github.com/mr-tron/base58"
+	"github.com/rot256/pblind"
+)
+
+func TestGeneratePBlindKeys(t *testing.T) {
+	t.Parallel()
+
+	for _, curve := range []elliptic.Curve{
+		elliptic.P256(),
+		elliptic.P384(),
+		elliptic.P521(),
+	} {
+		privateKey, err := pblind.NewSecretKey(curve)
+		if err != nil {
+			t.Fatal(err)
+		}
+		publicKey := privateKey.GetPublicKey()
+
+		fmt.Printf(
+			"%s (%dbit) private key: %s\n",
+			curve.Params().Name,
+			curve.Params().BitSize,
+			base58.Encode(privateKey.Bytes()),
+		)
+		fmt.Printf(
+			"%s (%dbit) public key: %s\n",
+			curve.Params().Name,
+			curve.Params().BitSize,
+			base58.Encode(publicKey.Bytes()),
+		)
+	}
+}
diff --git a/spn/access/token/pblind_test.go b/spn/access/token/pblind_test.go
new file mode 100644
index 00000000..b25ac71b
--- /dev/null
+++ b/spn/access/token/pblind_test.go
@@ -0,0 +1,260 @@
+package token
+
+import (
+	"crypto/elliptic"
+	"encoding/asn1"
+	"testing"
+	"time"
+
+	"github.com/rot256/pblind"
+)
+
+const PBlindTestZone = "test-pblind"
+
+func init() {
+	// Combined testing config.
+
+	h, err := NewPBlindHandler(PBlindOptions{
+		Zone:           PBlindTestZone,
+		Curve:          elliptic.P256(),
+		PrivateKey:     "HbwGtLsqek1Fdwuz1MhNQfiY7tj9EpWHeMWHPZ9c6KYY",
+		UseSerials:     true,
+		BatchSize:      1000,
+		RandomizeOrder: true,
+	})
+	if err != nil {
+		panic(err)
+	}
+
+	err = RegisterPBlindHandler(h)
+	if err != nil {
+		panic(err)
+	}
+}
+
+func TestPBlind(t *testing.T) {
+	t.Parallel()
+
+	opts := &PBlindOptions{
+		Zone:           PBlindTestZone,
+		Curve:          elliptic.P256(),
+		UseSerials:     true,
+		BatchSize:      1000,
+		RandomizeOrder: true,
+	}
+
+	// Issuer
+	opts.PrivateKey = "HbwGtLsqek1Fdwuz1MhNQfiY7tj9EpWHeMWHPZ9c6KYY"
+	issuer, err := NewPBlindHandler(*opts)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	// Client
+	opts.PrivateKey = ""
+	opts.PublicKey = "285oMDh3w5mxyFgpmmURifKfhkcqwwsdnePpPZ6Nqm8cc"
+	client, err := NewPBlindHandler(*opts)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	// Verifier
+	verifier, err := NewPBlindHandler(*opts)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	// Play through the whole use case.
+
+	signerState, setupResponse, err := issuer.CreateSetup()
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	request, err := client.CreateTokenRequest(setupResponse)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	issuedTokens, err := issuer.IssueTokens(signerState, request)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	err = client.ProcessIssuedTokens(issuedTokens)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	token, err := client.GetToken()
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	err = verifier.Verify(token)
+	if err != nil {
+		t.Fatal(err)
+	}
+}
+
+func TestPBlindLibrary(t *testing.T) {
+	t.Parallel()
+
+	// generate a key-pair
+
+	curve := elliptic.P256()
+
+	sk, _ := pblind.NewSecretKey(curve)
+	pk := sk.GetPublicKey()
+
+	msgStr := []byte("128b_accesstoken")
+	infoStr := []byte("v=1 serial=12345")
+	info, err := pblind.CompressInfo(curve, infoStr)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	totalStart := time.Now()
+	batchSize := 1000
+
+	signers := make([]*pblind.StateSigner, batchSize)
+	requesters := make([]*pblind.StateRequester, batchSize)
+	toServer := make([][]byte, batchSize)
+	toClient := make([][]byte, batchSize)
+
+	// Create signers and prep requests.
+	start := time.Now()
+	for i := 0; i < batchSize; i++ {
+		signer, err := pblind.CreateSigner(sk, info)
+		if err != nil {
+			t.Fatal(err)
+		}
+		signers[i] = signer
+
+		msg1S, err := signer.CreateMessage1()
+		if err != nil {
+			t.Fatal(err)
+		}
+		ser1S, err := asn1.Marshal(msg1S)
+		if err != nil {
+			t.Fatal(err)
+		}
+		toClient[i] = ser1S
+	}
+	t.Logf("created %d signers and request preps in %s", batchSize, time.Since(start))
+	t.Logf("sending %d bytes to client", lenOfByteSlices(toClient))
+
+	// Create requesters and create requests.
+	start = time.Now()
+	for i := 0; i < batchSize; i++ {
+		requester, err := pblind.CreateRequester(pk, info, msgStr)
+		if err != nil {
+			t.Fatal(err)
+		}
+		requesters[i] = requester
+
+		var msg1R pblind.Message1
+		_, err = asn1.Unmarshal(toClient[i], &msg1R)
+		if err != nil {
+			t.Fatal(err)
+		}
+		err = requester.ProcessMessage1(msg1R)
+		if err != nil {
+			t.Fatal(err)
+		}
+
+		msg2R, err := requester.CreateMessage2()
+		if err != nil {
+			t.Fatal(err)
+		}
+		ser2R, err := asn1.Marshal(msg2R)
+		if err != nil {
+			t.Fatal(err)
+		}
+		toServer[i] = ser2R
+	}
+	t.Logf("created %d requesters and requests in %s", batchSize, time.Since(start))
+	t.Logf("sending %d bytes to server", lenOfByteSlices(toServer))
+
+	// Sign requests
+	start = time.Now()
+	for i := 0; i < batchSize; i++ {
+		var msg2S pblind.Message2
+		_, err = asn1.Unmarshal(toServer[i], &msg2S)
+		if err != nil {
+			t.Fatal(err)
+		}
+		err = signers[i].ProcessMessage2(msg2S)
+		if err != nil {
+			t.Fatal(err)
+		}
+
+		msg3S, err := signers[i].CreateMessage3()
+		if err != nil {
+			t.Fatal(err)
+		}
+		ser3S, err := asn1.Marshal(msg3S)
+		if err != nil {
+			t.Fatal(err)
+		}
+		toClient[i] = ser3S
+	}
+	t.Logf("signed %d requests in %s", batchSize, time.Since(start))
+	t.Logf("sending %d bytes to client", lenOfByteSlices(toClient))
+
+	// Verify signed requests
+	start = time.Now()
+	for i := 0; i < batchSize; i++ {
+		var msg3R pblind.Message3
+		_, err := asn1.Unmarshal(toClient[i], &msg3R)
+		if err != nil {
+			t.Fatal(err)
+		}
+		err = requesters[i].ProcessMessage3(msg3R)
+		if err != nil {
+			t.Fatal(err)
+		}
+		signature, err := requesters[i].Signature()
+		if err != nil {
+			t.Fatal(err)
+		}
+		sig, err := asn1.Marshal(signature)
+		if err != nil {
+			t.Fatal(err)
+		}
+		toServer[i] = sig
+
+		// check signature
+		if !pk.Check(signature, info, msgStr) {
+			t.Fatal("signature invalid")
+		}
+	}
+	t.Logf("finalized and verified %d signed tokens in %s", batchSize, time.Since(start))
+	t.Logf("stored %d signed tokens in %d bytes", batchSize, lenOfByteSlices(toServer))
+
+	// Verify on server
+	start = time.Now()
+	for i := 0; i < batchSize; i++ {
+		var sig pblind.Signature
+		_, err := asn1.Unmarshal(toServer[i], &sig)
+		if err != nil {
+			t.Fatal(err)
+		}
+
+		// check signature
+		if !pk.Check(sig, info, msgStr) {
+			t.Fatal("signature invalid")
+		}
+	}
+	t.Logf("verified %d signed tokens in %s", batchSize, time.Since(start))
+
+	t.Logf("process complete")
+	t.Logf("simulated the whole process for %d tokens in %s", batchSize, time.Since(totalStart))
+}
+
+func lenOfByteSlices(v [][]byte) (length int) {
+	for _, s := range v {
+		length += len(s)
+	}
+	return
+}
diff --git a/spn/access/token/registry.go b/spn/access/token/registry.go
new file mode 100644
index 00000000..d20ec6f0
--- /dev/null
+++ b/spn/access/token/registry.go
@@ -0,0 +1,116 @@
+package token
+
+import "sync"
+
+// Handler represents a token handling system.
+type Handler interface {
+	// Zone returns the zone name.
+	Zone() string
+
+	// ShouldRequest returns whether the new tokens should be requested.
+	ShouldRequest() bool
+
+	// Amount returns the current amount of tokens in this handler.
+	Amount() int
+
+	// IsFallback returns whether this handler should only be used as a fallback.
+	IsFallback() bool
+
+	// GetToken returns a token.
+	GetToken() (token *Token, err error)
+
+	// Verify verifies the given token.
+	Verify(token *Token) error
+
+	// Save serializes and returns the current tokens.
+	Save() ([]byte, error)
+
+	// Load loads the given tokens into the handler.
+	Load(data []byte) error
+
+	// Clear clears all the tokens in the handler.
+	Clear()
+}
+
+var (
+	registry         map[string]Handler
+	pblindRegistry   []*PBlindHandler
+	scrambleRegistry []*ScrambleHandler
+
+	registryLock sync.RWMutex
+)
+
+func init() {
+	initRegistry()
+}
+
+func initRegistry() {
+	registry = make(map[string]Handler)
+	pblindRegistry = make([]*PBlindHandler, 0, 1)
+	scrambleRegistry = make([]*ScrambleHandler, 0, 1)
+}
+
+// RegisterPBlindHandler registers a pblind handler with the registry.
+func RegisterPBlindHandler(h *PBlindHandler) error {
+	registryLock.Lock()
+	defer registryLock.Unlock()
+
+	if err := registerHandler(h, h.opts.Zone); err != nil {
+		return err
+	}
+
+	pblindRegistry = append(pblindRegistry, h)
+	return nil
+}
+
+// RegisterScrambleHandler registers a scramble handler with the registry.
+func RegisterScrambleHandler(h *ScrambleHandler) error {
+	registryLock.Lock()
+	defer registryLock.Unlock()
+
+	if err := registerHandler(h, h.opts.Zone); err != nil {
+		return err
+	}
+
+	scrambleRegistry = append(scrambleRegistry, h)
+	return nil
+}
+
+func registerHandler(h Handler, zone string) error {
+	if zone == "" {
+		return ErrNoZone
+	}
+
+	_, ok := registry[zone]
+	if ok {
+		return ErrZoneTaken
+	}
+
+	registry[zone] = h
+	return nil
+}
+
+// GetHandler returns the handler of the given zone.
+func GetHandler(zone string) (handler Handler, ok bool) {
+	registryLock.RLock()
+	defer registryLock.RUnlock()
+
+	handler, ok = registry[zone]
+	return
+}
+
+// ResetRegistry resets the token handler registry.
+func ResetRegistry() {
+	registryLock.Lock()
+	defer registryLock.Unlock()
+
+	initRegistry()
+}
+
+// RegistrySize returns the amount of handler registered.
+func RegistrySize() int {
+	registryLock.Lock()
+	defer registryLock.Unlock()
+
+	return len(registry)
+}
diff --git a/spn/access/token/request.go b/spn/access/token/request.go
new file mode 100644
index 00000000..70e9422a
--- /dev/null
+++ b/spn/access/token/request.go
@@ -0,0 +1,244 @@
+package token
+
+import (
+	"crypto/rand"
+	"errors"
+	"fmt"
+
+	"github.com/mr-tron/base58"
+)
+
+const sessionIDSize = 32
+
+// RequestHandlingState is a request handling state.
+type RequestHandlingState struct {
+	SessionID string
+	PBlind    map[string]*PBlindSignerState
+}
+
+// SetupRequest is a setup request.
+type SetupRequest struct {
+	PBlind map[string]struct{} `json:"PB,omitempty"`
+}
+
+// SetupResponse is a setup response.
+type SetupResponse struct {
+	SessionID string                          `json:"ID,omitempty"`
+	PBlind    map[string]*PBlindSetupResponse `json:"PB,omitempty"`
+}
+
+// TokenRequest is a token request.
+type TokenRequest struct { //nolint:golint // Be explicit.
+	SessionID string                           `json:"ID,omitempty"`
+	PBlind    map[string]*PBlindTokenRequest   `json:"PB,omitempty"`
+	Scramble  map[string]*ScrambleTokenRequest `json:"S,omitempty"`
+}
+
+// IssuedTokens are issued tokens.
+type IssuedTokens struct {
+	PBlind   map[string]*IssuedPBlindTokens   `json:"PB,omitempty"`
+	Scramble map[string]*IssuedScrambleTokens `json:"SC,omitempty"`
+}
+
+// CreateSetupRequest creates a combined setup request for all registered tokens, if needed.
+func CreateSetupRequest() (request *SetupRequest, setupRequired bool) {
+	registryLock.RLock()
+	defer registryLock.RUnlock()
+
+	request = &SetupRequest{
+		PBlind: make(map[string]struct{}, len(pblindRegistry)),
+	}
+
+	// Go through handlers and create request setups.
+	for _, pblindHandler := range pblindRegistry {
+		// Check if we need to request with this handler.
+		if pblindHandler.ShouldRequest() {
+			request.PBlind[pblindHandler.Zone()] = struct{}{}
+			setupRequired = true
+		}
+	}
+
+	return
+}
+
+// HandleSetupRequest handles a setup request for all registered tokens.
+func HandleSetupRequest(request *SetupRequest) (*RequestHandlingState, *SetupResponse, error) {
+	registryLock.RLock()
+	defer registryLock.RUnlock()
+
+	// Generate session token.
+	randomID := make([]byte, sessionIDSize)
+	n, err := rand.Read(randomID)
+	if err != nil {
+		return nil, nil, fmt.Errorf("failed to generate session ID: %w", err)
+	}
+	if n != sessionIDSize {
+		return nil, nil, fmt.Errorf("failed to get full session ID: only got %d bytes", n)
+	}
+	sessionID := base58.Encode(randomID)
+
+	// Create state and response.
+	state := &RequestHandlingState{
+		SessionID: sessionID,
+		PBlind:    make(map[string]*PBlindSignerState, len(pblindRegistry)),
+	}
+	setup := &SetupResponse{
+		SessionID: sessionID,
+		PBlind:    make(map[string]*PBlindSetupResponse, len(pblindRegistry)),
+	}
+
+	// Go through handlers and create setups.
+	for _, pblindHandler := range pblindRegistry {
+		// Check if we have a request for this handler.
+		_, ok := request.PBlind[pblindHandler.Zone()]
+		if !ok {
+			continue
+		}
+
+		plindState, pblindSetup, err := pblindHandler.CreateSetup()
+		if err != nil {
+			return nil, nil, fmt.Errorf("failed to create setup for %s: %w", pblindHandler.Zone(), err)
+		}
+
+		state.PBlind[pblindHandler.Zone()] = plindState
+		setup.PBlind[pblindHandler.Zone()] = pblindSetup
+	}
+
+	return state, setup, nil
+}
+
+// CreateTokenRequest creates a token request for all registered tokens.
+func CreateTokenRequest(setup *SetupResponse) (request *TokenRequest, requestRequired bool, err error) {
+	registryLock.RLock()
+	defer registryLock.RUnlock()
+
+	// Check setup data.
+	if setup != nil && setup.SessionID == "" {
+		return nil, false, errors.New("setup data is missing a session ID")
+	}
+
+	// Create token request.
+	request = &TokenRequest{
+		PBlind:   make(map[string]*PBlindTokenRequest, len(pblindRegistry)),
+		Scramble: make(map[string]*ScrambleTokenRequest, len(scrambleRegistry)),
+	}
+	if setup != nil {
+		request.SessionID = setup.SessionID
+	}
+
+	// Go through handlers and create requests.
+	if setup != nil {
+		for _, pblindHandler := range pblindRegistry {
+			// Check if we have setup data for this handler.
+			pblindSetup, ok := setup.PBlind[pblindHandler.Zone()]
+			if !ok {
+				// TODO: Abort if we should have received request data.
+				continue
+			}
+
+			// Create request.
+			pblindRequest, err := pblindHandler.CreateTokenRequest(pblindSetup)
+			if err != nil {
+				return nil, false, fmt.Errorf("failed to create token request for %s: %w", pblindHandler.Zone(), err)
+			}
+
+			requestRequired = true
+			request.PBlind[pblindHandler.Zone()] = pblindRequest
+		}
+	}
+	for _, scrambleHandler := range scrambleRegistry {
+		// Check if we need to request with this handler.
+		if scrambleHandler.ShouldRequest() {
+			requestRequired = true
+			request.Scramble[scrambleHandler.Zone()] = scrambleHandler.CreateTokenRequest()
+		}
+	}
+
+	return request, requestRequired, nil
+}
+
+// IssueTokens issues tokens for all registered tokens.
+func IssueTokens(state *RequestHandlingState, request *TokenRequest) (response *IssuedTokens, err error) {
+	registryLock.RLock()
+	defer registryLock.RUnlock()
+
+	// Create token response.
+	response = &IssuedTokens{
+		PBlind:   make(map[string]*IssuedPBlindTokens, len(pblindRegistry)),
+		Scramble: make(map[string]*IssuedScrambleTokens, len(scrambleRegistry)),
+	}
+
+	// Go through handlers and create requests.
+	for _, pblindHandler := range pblindRegistry {
+		// Check if we have all the data for issuing.
+		pblindState, ok := state.PBlind[pblindHandler.Zone()]
+		if !ok {
+			continue
+		}
+		pblindRequest, ok := request.PBlind[pblindHandler.Zone()]
+		if !ok {
+			continue
+		}
+
+		// Issue tokens.
+		pblindTokens, err := pblindHandler.IssueTokens(pblindState, pblindRequest)
+		if err != nil {
+			return nil, fmt.Errorf("failed to issue tokens for %s: %w", pblindHandler.Zone(), err)
+		}
+
+		response.PBlind[pblindHandler.Zone()] = pblindTokens
+	}
+	for _, scrambleHandler := range scrambleRegistry {
+		// Check if we have all the data for issuing.
+		scrambleRequest, ok := request.Scramble[scrambleHandler.Zone()]
+		if !ok {
+			continue
+		}
+
+		// Issue tokens.
+		scrambleTokens, err := scrambleHandler.IssueTokens(scrambleRequest)
+		if err != nil {
+			return nil, fmt.Errorf("failed to issue tokens for %s: %w", scrambleHandler.Zone(), err)
+		}
+
+		response.Scramble[scrambleHandler.Zone()] = scrambleTokens
+	}
+
+	return response, nil
+}
+
+// ProcessIssuedTokens processes issued tokens for all registered tokens.
+func ProcessIssuedTokens(response *IssuedTokens) error {
+	registryLock.RLock()
+	defer registryLock.RUnlock()
+
+	// Go through handlers and create requests.
+	for _, pblindHandler := range pblindRegistry {
+		// Check if we received tokens.
+		pblindResponse, ok := response.PBlind[pblindHandler.Zone()]
+		if !ok {
+			continue
+		}
+
+		// Process issued tokens.
+		err := pblindHandler.ProcessIssuedTokens(pblindResponse)
+		if err != nil {
+			return fmt.Errorf("failed to process issued tokens for %s: %w", pblindHandler.Zone(), err)
+		}
+	}
+	for _, scrambleHandler := range scrambleRegistry {
+		// Check if we received tokens.
+		scrambleResponse, ok := response.Scramble[scrambleHandler.Zone()]
+		if !ok {
+			continue
+		}
+
+		// Process issued tokens.
+		err := scrambleHandler.ProcessIssuedTokens(scrambleResponse)
+		if err != nil {
+			return fmt.Errorf("failed to process issued tokens for %s: %w", scrambleHandler.Zone(), err)
+		}
+	}
+
+	return nil
+}
diff --git a/spn/access/token/request_test.go b/spn/access/token/request_test.go
new file mode 100644
index 00000000..7040672a
--- /dev/null
+++ b/spn/access/token/request_test.go
@@ -0,0 +1,125 @@
+package token
+
+import (
+	"testing"
+	"time"
+
+	"github.com/safing/portbase/formats/dsd"
+)
+
+func TestFull(t *testing.T) {
+	t.Parallel()
+
+	testStart := time.Now()
+
+	// Roundtrip 1
+
+	start := time.Now()
+	setupRequest, setupRequired := CreateSetupRequest()
+	if !setupRequired {
+		t.Fatal("setup should be required")
+	}
+	setupRequestData, err := dsd.Dump(setupRequest, dsd.CBOR)
+	if err != nil {
+		t.Fatal(err)
+	}
+	setupRequest = nil // nolint:ineffassign,wastedassign // Just to be sure.
+	t.Logf("setupRequest: %s, %d bytes", time.Since(start), len(setupRequestData))
+
+	start = time.Now()
+	loadedSetupRequest := &SetupRequest{}
+	_, err = dsd.Load(setupRequestData, loadedSetupRequest)
+	if err != nil {
+		t.Fatal(err)
+	}
+	serverState, setupResponse, err := HandleSetupRequest(loadedSetupRequest)
+	if err != nil {
+		t.Fatal(err)
+	}
+	setupResponseData, err := dsd.Dump(setupResponse, dsd.CBOR)
+	if err != nil {
+		t.Fatal(err)
+	}
+	setupResponse = nil // nolint:ineffassign,wastedassign // Just to be sure.
+	t.Logf("setupResponse: %s, %d bytes", time.Since(start), len(setupResponseData))
+
+	// Roundtrip 2
+
+	start = time.Now()
+	loadedSetupResponse := &SetupResponse{}
+	_, err = dsd.Load(setupResponseData, loadedSetupResponse)
+	if err != nil {
+		t.Fatal(err)
+	}
+	request, requestRequired, err := CreateTokenRequest(loadedSetupResponse)
+	if err != nil {
+		t.Fatal(err)
+	}
+	if !requestRequired {
+		t.Fatal("request should be required")
+	}
+	requestData, err := dsd.Dump(request, dsd.CBOR)
+	if err != nil {
+		t.Fatal(err)
+	}
+	request = nil // nolint:ineffassign,wastedassign // Just to be sure.
+	t.Logf("request: %s, %d bytes", time.Since(start), len(requestData))
+
+	start = time.Now()
+	loadedRequest := &TokenRequest{}
+	_, err = dsd.Load(requestData, loadedRequest)
+	if err != nil {
+		t.Fatal(err)
+	}
+	response, err := IssueTokens(serverState, loadedRequest)
+	if err != nil {
+		t.Fatal(err)
+	}
+	responseData, err := dsd.Dump(response, dsd.CBOR)
+	if err != nil {
+		t.Fatal(err)
+	}
+	response = nil // nolint:ineffassign,wastedassign // Just to be sure.
+	t.Logf("response: %s, %d bytes", time.Since(start), len(responseData))
+
+	start = time.Now()
+	loadedResponse := &IssuedTokens{}
+	_, err = dsd.Load(responseData, loadedResponse)
+	if err != nil {
+		t.Fatal(err)
+	}
+	err = ProcessIssuedTokens(loadedResponse)
+	if err != nil {
+		t.Fatal(err)
+	}
+	t.Logf("processing: %s", time.Since(start))
+
+	// Token Usage
+
+	for _, testZone := range []string{
+		PBlindTestZone,
+		ScrambleTestZone,
+	} {
+		start = time.Now()
+
+		token, err := GetToken(testZone)
+		if err != nil {
+			t.Fatal(err)
+		}
+		tokenData := token.Raw()
+		token = nil // nolint:wastedassign // Just to be sure.
+
+		loadedToken, err := ParseRawToken(tokenData)
+		if err != nil {
+			t.Fatal(err)
+		}
+		err = VerifyToken(loadedToken)
+		if err != nil {
+			t.Fatal(err)
+		}
+
+		t.Logf("using %s token: %s", testZone, time.Since(start))
+	}
+
+	t.Logf("full simulation took %s", time.Since(testStart))
+}
diff --git a/spn/access/token/scramble.go b/spn/access/token/scramble.go
new file mode 100644
index 00000000..df96bcc6
--- /dev/null
+++ b/spn/access/token/scramble.go
@@ -0,0 +1,240 @@
+package token
+
+import (
+	"fmt"
+	"sync"
+
+	"github.com/mr-tron/base58"
+
+	"github.com/safing/jess/lhash"
+	"github.com/safing/portbase/formats/dsd"
+)
+
+const (
+	scrambleSecretSize = 32
+)
+
+// ScrambleToken is token based on hashing.
+type ScrambleToken struct {
+	Token []byte
+}
+
+// Pack packs the token.
+func (pbt *ScrambleToken) Pack() ([]byte, error) {
+	return pbt.Token, nil
+}
+
+// UnpackScrambleToken unpacks the token.
+func UnpackScrambleToken(token []byte) (*ScrambleToken, error) {
+	return &ScrambleToken{Token: token}, nil
+}
+
+// ScrambleHandler is a handler for the scramble tokens.
+type ScrambleHandler struct {
+	sync.Mutex
+	opts *ScrambleOptions
+
+	storageLock sync.Mutex
+	Storage     []*ScrambleToken
+
+	verifiersLock sync.RWMutex
+	verifiers     map[string]*ScrambleToken
+}
+
+// ScrambleOptions are options for the ScrambleHandler.
+type ScrambleOptions struct {
+	Zone             string
+	Algorithm        lhash.Algorithm
+	InitialTokens    []string
+	InitialVerifiers []string
+	Fallback         bool
+}
+
+// ScrambleTokenRequest is a token request.
+type ScrambleTokenRequest struct{}
+
+// IssuedScrambleTokens are issued scrambled tokens.
+type IssuedScrambleTokens struct {
+	Tokens []*ScrambleToken
+}
+
+// NewScrambleHandler creates a new scramble handler.
+func NewScrambleHandler(opts ScrambleOptions) (*ScrambleHandler, error) {
+	sh := &ScrambleHandler{
+		opts:      &opts,
+		verifiers: make(map[string]*ScrambleToken, len(opts.InitialTokens)+len(opts.InitialVerifiers)),
+	}
+
+	// Add initial tokens.
+	sh.Storage = make([]*ScrambleToken, len(opts.InitialTokens))
+	for i, token := range opts.InitialTokens {
+		// Add to storage.
+		tokenData, err := base58.Decode(token)
+		if err != nil {
+			return nil, fmt.Errorf("failed to decode initial token %q: %w", token, err)
+		}
+		sh.Storage[i] = &ScrambleToken{
+			Token: tokenData,
+		}
+
+		// Add to verifiers.
+		scrambledToken := lhash.Digest(sh.opts.Algorithm, tokenData).Bytes()
+		sh.verifiers[string(scrambledToken)] = sh.Storage[i]
+	}
+
+	// Add initial verifiers.
+	for _, verifier := range opts.InitialVerifiers {
+		verifierData, err := base58.Decode(verifier)
+		if err != nil {
+			return nil, fmt.Errorf("failed to decode verifier %q: %w", verifier, err)
+		}
+		sh.verifiers[string(verifierData)] = &ScrambleToken{}
+	}
+
+	return sh, nil
+}
+
+// Zone returns the zone name.
+func (sh *ScrambleHandler) Zone() string {
+	return sh.opts.Zone
+}
+
+// ShouldRequest returns whether the new tokens should be requested.
+func (sh *ScrambleHandler) ShouldRequest() bool {
+	sh.storageLock.Lock()
+	defer sh.storageLock.Unlock()
+
+	return len(sh.Storage) == 0
+}
+
+// Amount returns the current amount of tokens in this handler.
+func (sh *ScrambleHandler) Amount() int {
+	sh.storageLock.Lock()
+	defer sh.storageLock.Unlock()
+
+	return len(sh.Storage)
+}
+
+// IsFallback returns whether this handler should only be used as a fallback.
+func (sh *ScrambleHandler) IsFallback() bool {
+	return sh.opts.Fallback
+}
+
+// CreateTokenRequest creates a token request to be sent to the token server.
+func (sh *ScrambleHandler) CreateTokenRequest() (request *ScrambleTokenRequest) {
+	return &ScrambleTokenRequest{}
+}
+
+// IssueTokens sign the requested tokens.
+func (sh *ScrambleHandler) IssueTokens(request *ScrambleTokenRequest) (response *IssuedScrambleTokens, err error) {
+	// Copy the storage.
+	tokens := make([]*ScrambleToken, len(sh.Storage))
+	copy(tokens, sh.Storage)
+
+	return &IssuedScrambleTokens{
+		Tokens: tokens,
+	}, nil
+}
+
+// ProcessIssuedTokens processes the issued token from the server.
+func (sh *ScrambleHandler) ProcessIssuedTokens(issuedTokens *IssuedScrambleTokens) error {
+	sh.verifiersLock.RLock()
+	defer sh.verifiersLock.RUnlock()
+
+	// Validate tokens.
+	for i, newToken := range issuedTokens.Tokens {
+		// Scramle token.
+		scrambledToken := lhash.Digest(sh.opts.Algorithm, newToken.Token).Bytes()
+
+		// Check if token is valid.
+		_, ok := sh.verifiers[string(scrambledToken)]
+		if !ok {
+			return fmt.Errorf("invalid token on #%d", i)
+		}
+	}
+
+	// Copy to storage.
+	sh.Storage = issuedTokens.Tokens
+
+	return nil
+}
+
+// Verify verifies the given token.
+func (sh *ScrambleHandler) Verify(token *Token) error {
+	if token.Zone != sh.opts.Zone {
+		return ErrZoneMismatch
+	}
+
+	// Hash the data.
+	scrambledToken := lhash.Digest(sh.opts.Algorithm, token.Data).Bytes()
+
+	sh.verifiersLock.RLock()
+	defer sh.verifiersLock.RUnlock()
+
+	// Check if token is valid.
+	_, ok := sh.verifiers[string(scrambledToken)]
+	if !ok {
+		return ErrTokenInvalid
+	}
+
+	return nil
+}
+
+// GetToken returns a token.
+func (sh *ScrambleHandler) GetToken() (*Token, error) {
+	sh.storageLock.Lock()
+	defer sh.storageLock.Unlock()
+
+	if len(sh.Storage) == 0 {
+		return nil, ErrEmpty
+	}
+
+	return &Token{
+		Zone: sh.opts.Zone,
+		Data: sh.Storage[0].Token,
+	}, nil
+}
+
+// ScrambleStorage is a storage for scramble tokens.
+type ScrambleStorage struct {
+	Storage []*ScrambleToken
+}
+
+// Save serializes and returns the current tokens.
+func (sh *ScrambleHandler) Save() ([]byte, error) {
+	sh.storageLock.Lock()
+	defer sh.storageLock.Unlock()
+
+	if len(sh.Storage) == 0 {
+		return nil, ErrEmpty
+	}
+
+	s := &ScrambleStorage{
+		Storage: sh.Storage,
+	}
+
+	return dsd.Dump(s, dsd.CBOR)
+}
+
+// Load loads the given tokens into the handler.
+func (sh *ScrambleHandler) Load(data []byte) error {
+	sh.storageLock.Lock()
+	defer sh.storageLock.Unlock()
+
+	s := &ScrambleStorage{}
+	_, err := dsd.Load(data, s)
+	if err != nil {
+		return err
+	}
+
+	sh.Storage = s.Storage
+	return nil
+}
+
+// Clear clears all the tokens in the handler.
+func (sh *ScrambleHandler) Clear() {
+	sh.storageLock.Lock()
+	defer sh.storageLock.Unlock()
+
+	sh.Storage = nil
+}
diff --git a/spn/access/token/scramble_gen_test.go b/spn/access/token/scramble_gen_test.go
new file mode 100644
index 00000000..91a7d32e
--- /dev/null
+++ b/spn/access/token/scramble_gen_test.go
@@ -0,0 +1,48 @@
+package token
+
+import (
+	"crypto/rand"
+	"fmt"
+	"testing"
+
+	"github.com/mr-tron/base58"
+
+	"github.com/safing/jess/lhash"
+)
+
+type genAlgs struct {
+	alg  lhash.Algorithm
+	name string
+}
+
+func TestGenerateScrambleKeys(t *testing.T) {
+	t.Parallel()
+
+	for _, alg := range []genAlgs{
+		{alg: lhash.SHA2_256, name: "SHA2_256"},
+		{alg: lhash.SHA3_256, name: "SHA3_256"},
+		{alg: lhash.SHA3_512, name: "SHA3_512"},
+		{alg: lhash.BLAKE2b_256, name: "BLAKE2b_256"},
+	} {
+		token := make([]byte, scrambleSecretSize)
+		n, err := rand.Read(token)
+		if err != nil {
+			t.Fatal(err)
+		}
+		if n != scrambleSecretSize {
+			t.Fatalf("only got %d bytes", n)
+		}
+		scrambledToken := lhash.Digest(alg.alg, token).Bytes()
+
+		fmt.Printf(
+			"%s secret token: %s\n",
+			alg.name,
+			base58.Encode(token),
+		)
+		fmt.Printf(
+			"%s scrambled (public) token: %s\n",
+			alg.name,
+			base58.Encode(scrambledToken),
+		)
+	}
+}
diff --git a/spn/access/token/scramble_test.go b/spn/access/token/scramble_test.go
new file mode 100644
index 00000000..765d7007
--- /dev/null
+++ b/spn/access/token/scramble_test.go
@@ -0,0 +1,84 @@
+package token
+
+import (
+	"testing"
+
+	"github.com/safing/jess/lhash"
+)
+
+const ScrambleTestZone = "test-scramble"
+
+func init() {
+	// Combined testing config.
+
+	h, err := NewScrambleHandler(ScrambleOptions{
+		Zone:          ScrambleTestZone,
+		Algorithm:     lhash.SHA2_256,
+		InitialTokens: []string{"2VqJ8BvDew1tUpytZhR7tuvq7ToPpW3tQtHvu3veE3iW"},
+	})
+	if err != nil {
+		panic(err)
+	}
+
+	err = RegisterScrambleHandler(h)
+	if err != nil {
+		panic(err)
+	}
+}
+
+func TestScramble(t *testing.T) {
+	t.Parallel()
+
+	opts := &ScrambleOptions{
+		Zone:      ScrambleTestZone,
+		Algorithm: lhash.SHA2_256,
+	}
+
+	// Issuer
+	opts.InitialTokens = []string{"2VqJ8BvDew1tUpytZhR7tuvq7ToPpW3tQtHvu3veE3iW"}
+	issuer, err := NewScrambleHandler(*opts)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	// Client
+	opts.InitialTokens = nil
+	opts.InitialVerifiers = []string{"Cy9tz37Xq9NiXGDRU9yicjGU62GjXskE9KqUmuoddSxaE3"}
+	client, err := NewScrambleHandler(*opts)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	// Verifier
+	verifier, err := NewScrambleHandler(*opts)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	// Play through the whole use case.
+
+	request := client.CreateTokenRequest()
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	issuedTokens, err := issuer.IssueTokens(request)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	err = client.ProcessIssuedTokens(issuedTokens)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	token, err := client.GetToken()
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	err = verifier.Verify(token)
+	if err != nil {
+		t.Fatal(err)
+	}
+}
diff --git a/spn/access/token/token.go b/spn/access/token/token.go
new file mode 100644
index 00000000..b93ed194
--- /dev/null
+++ b/spn/access/token/token.go
@@ -0,0 +1,83 @@
+package token
+
+import (
+	"bytes"
+	"errors"
+	"fmt"
+	"strings"
+
+	"github.com/mr-tron/base58"
+
+	"github.com/safing/portbase/container"
+)
+
+// Token represents a token, consisting of a zone (name) and some data.
+type Token struct {
+	Zone string
+	Data []byte
+}
+
+// GetToken returns a token of the given zone.
+func GetToken(zone string) (*Token, error) {
+	handler, ok := GetHandler(zone)
+	if !ok {
+		return nil, ErrZoneUnknown
+	}
+
+	return handler.GetToken()
+}
+
+// VerifyToken verifies the given token.
+func VerifyToken(token *Token) error {
+	handler, ok := GetHandler(token.Zone)
+	if !ok {
+		return ErrZoneUnknown
+	}
+
+	return handler.Verify(token)
+}
+
+// Raw returns the raw format of the token.
+func (c *Token) Raw() []byte {
+	cont := container.New()
+	cont.Append([]byte(c.Zone))
+	cont.Append([]byte(":"))
+	cont.Append(c.Data)
+	return cont.CompileData()
+}
+
+// String returns the stringified format of the token.
+func (c *Token) String() string {
+	return c.Zone + ":" + base58.Encode(c.Data)
+}
+
+// ParseRawToken parses a raw token.
+func ParseRawToken(code []byte) (*Token, error) {
+	splitted := bytes.SplitN(code, []byte(":"), 2)
+	if len(splitted) < 2 {
+		return nil, errors.New("invalid code format: zone/data separator missing")
+	}
+
+	return &Token{
+		Zone: string(splitted[0]),
+		Data: splitted[1],
+	}, nil
+}
+
+// ParseToken parses a stringified token.
+func ParseToken(code string) (*Token, error) {
+	splitted := strings.SplitN(code, ":", 2)
+	if len(splitted) < 2 {
+		return nil, errors.New("invalid code format: zone/data separator missing")
+	}
+
+	data, err := base58.Decode(splitted[1])
+	if err != nil {
+		return nil, fmt.Errorf("invalid code format: %w", err)
+	}
+
+	return &Token{
+		Zone: splitted[0],
+		Data: data,
+	}, nil
+}
diff --git a/spn/access/token/token_test.go b/spn/access/token/token_test.go
new file mode 100644
index 00000000..b132265a
--- /dev/null
+++ b/spn/access/token/token_test.go
@@ -0,0 +1,33 @@
+package token
+
+import (
+	"testing"
+
+	"github.com/safing/portbase/rng"
+)
+
+func TestToken(t *testing.T) {
+	t.Parallel()
+
+	randomData, err := rng.Bytes(32)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	c := &Token{
+		Zone: "test",
+		Data: randomData,
+	}
+
+	s := c.String()
+	_, err = ParseToken(s)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	r := c.Raw()
+	_, err = ParseRawToken(r)
+	if err != nil {
+		t.Fatal(err)
+	}
+}
diff --git a/spn/access/zones.go b/spn/access/zones.go
new file mode 100644
index 00000000..1f9c954b
--- /dev/null
+++ b/spn/access/zones.go
@@ -0,0 +1,257 @@
+package access
+
+import (
+	"errors"
+	"fmt"
+	"os"
+	"strings"
+
+	"github.com/tevino/abool"
+
+	"github.com/safing/jess/lhash"
+	"github.com/safing/portbase/log"
+	"github.com/safing/portmaster/spn/access/token"
+	"github.com/safing/portmaster/spn/conf"
+	"github.com/safing/portmaster/spn/terminal"
+)
+
+var (
+	// ExpandAndConnectZones are the zones that grant access to the expand and
+	// connect operations.
+	ExpandAndConnectZones = []string{"pblind1", "alpha2", "fallback1"}
+
+	zonePermissions = map[string]terminal.Permission{
+		"pblind1":   terminal.AddPermissions(terminal.MayExpand, terminal.MayConnect),
+		"alpha2":    terminal.AddPermissions(terminal.MayExpand, terminal.MayConnect),
+		"fallback1": terminal.AddPermissions(terminal.MayExpand, terminal.MayConnect),
+	}
+	persistentZones = ExpandAndConnectZones
+
+	enableTestMode = abool.New()
+)
+
+// EnableTestMode enables the test mode, leading the access module to only
+// register a test zone.
+// This should not be used to test the access module itself.
+func EnableTestMode() {
+	enableTestMode.Set()
+}
+
+// InitializeZones initialized the permission zones.
+// It initializes the test zones, if EnableTestMode was called before.
+// Must only be called once.
+func InitializeZones() error {
+	// Check if we are testing.
+	if enableTestMode.IsSet() {
+		return initializeTestZone()
+	}
+
+	// Special client zone config.
+	var requestSignalHandler func(token.Handler)
+	if conf.Client() {
+		requestSignalHandler = shouldRequestTokensHandler
+	}
+
+	// Register pblind1 as the first primary zone.
+	ph, err := token.NewPBlindHandler(token.PBlindOptions{
+		Zone:                "pblind1",
+		CurveName:           "P-256",
+		PublicKey:           "eXoJXzXbM66UEsM2eVi9HwyBPLMfVnNrC7gNrsfMUJDs",
+		UseSerials:          true,
+		BatchSize:           1000,
+		RandomizeOrder:      true,
+		SignalShouldRequest: requestSignalHandler,
+	})
+	if err != nil {
+		return fmt.Errorf("failed to create pblind1 token handler: %w", err)
+	}
+	err = token.RegisterPBlindHandler(ph)
+	if err != nil {
+		return fmt.Errorf("failed to register pblind1 token handler: %w", err)
+	}
+
+	// Register fallback1 zone as fallback when the issuer is not available.
+	sh, err := token.NewScrambleHandler(token.ScrambleOptions{
+		Zone:             "fallback1",
+		Algorithm:        lhash.BLAKE2b_256,
+		InitialVerifiers: []string{"ZwkQoaAttVBMURzeLzNXokFBMAMUUwECfM1iHojcVKBmjk"},
+		Fallback:         true,
+	})
+	if err != nil {
+		return fmt.Errorf("failed to create fallback1 token handler: %w", err)
+	}
+	err = token.RegisterScrambleHandler(sh)
+	if err != nil {
+		return fmt.Errorf("failed to register fallback1 token handler: %w", err)
+	}
+
+	// Register alpha2 zone for transition phase.
+	sh, err = token.NewScrambleHandler(token.ScrambleOptions{
+		Zone:             "alpha2",
+		Algorithm:        lhash.BLAKE2b_256,
+		InitialVerifiers: []string{"ZwojEvXZmAv7SZdNe7m94Xzu7F9J8vULqKf7QYtoTpN2tH"},
+	})
+	if err != nil {
+		return fmt.Errorf("failed to create alpha2 token handler: %w", err)
+	}
+	err = token.RegisterScrambleHandler(sh)
+	if err != nil {
+		return fmt.Errorf("failed to register alpha2 token handler: %w", err)
+	}
+
+	return nil
+}
+
+func initializeTestZone() error {
+	// Safeguard checks if we should really enable the test zone.
+	if !strings.HasSuffix(os.Args[0], ".test") {
+		return errors.New("tried to enable test mode, but no test binary was detected")
+	}
+	if token.RegistrySize() > 0 {
+		return fmt.Errorf("tried to enable test zone, but %d handlers are already registered", token.RegistrySize())
+	}
+
+	// Reset zones.
+	token.ResetRegistry()
+
+	// Set eligible zones.
+	ExpandAndConnectZones = []string{"unittest"}
+	zonePermissions = map[string]terminal.Permission{
+		"unittest": terminal.AddPermissions(terminal.MayExpand, terminal.MayConnect),
+	}
+
+	// Register unittest zone as for testing.
+	sh, err := token.NewScrambleHandler(token.ScrambleOptions{
+		Zone:             "unittest",
+		Algorithm:        lhash.BLAKE2b_256,
+		InitialTokens:    []string{"6jFqLA93uSLL52utGKrvctG3ZfopSQ8WFqjsRK1c2Svt"},
+		InitialVerifiers: []string{"ZwoEoL59sr81s7WnF2vydGzjeejE3u8CqVafig1NTQzUr7"},
+	})
+	if err != nil {
+		return fmt.Errorf("failed to create unittest token handler: %w", err)
+	}
+	err = token.RegisterScrambleHandler(sh)
+	if err != nil {
+		return fmt.Errorf("failed to register unittest token handler: %w", err)
+	}
+
+	return nil
+}
+
+func shouldRequestTokensHandler(_ token.Handler) {
+	// accountUpdateTask is always set in client mode and when the module is online.
+	// Check if it's set in case this gets executed in other circumstances.
+	if accountUpdateTask == nil {
+		log.Warningf("spn/access: trying to trigger account update, but the task is not available")
+		return
+	}
+
+	accountUpdateTask.StartASAP()
+}
+
+// GetTokenAmount returns the amount of tokens for the given zones.
+func GetTokenAmount(zones []string) (regular, fallback int) {
+handlerLoop:
+	for _, zone := range zones {
+		// Get handler and check if it should be used.
+		handler, ok := token.GetHandler(zone)
+		if !ok {
+			log.Warningf("spn/access: use of non-registered zone %q", zone)
+			continue handlerLoop
+		}
+
+		if handler.IsFallback() {
+			fallback += handler.Amount()
+		} else {
+			regular += handler.Amount()
+		}
+	}
+
+	return
+}
+
+// ShouldRequest returns whether tokens should be requested for the given zones.
+func ShouldRequest(zones []string) (shouldRequest bool) {
+handlerLoop:
+	for _, zone := range zones {
+		// Get handler and check if it should be used.
+		handler, ok := token.GetHandler(zone)
+		if !ok {
+			log.Warningf("spn/access: use of non-registered zone %q", zone)
+			continue handlerLoop
+		}
+
+		// Go through all handlers every time as this will be the case anyway most
+		// of the time and will help us better catch zone misconfiguration.
+		if handler.ShouldRequest() {
+			shouldRequest = true
+		}
+	}
+
+	return shouldRequest
+}
+
+// GetToken returns a token of one of the given zones.
+func GetToken(zones []string) (t *token.Token, err error) {
+handlerSelection:
+	for _, zone := range zones {
+		// Get handler and check if it should be used.
+		handler, ok := token.GetHandler(zone)
+		switch {
+		case !ok:
+			log.Warningf("spn/access: use of non-registered zone %q", zone)
+			continue handlerSelection
+		case handler.IsFallback() && !TokenIssuerIsFailing():
+			// Skip fallback zone if everything works.
+			continue handlerSelection
+		}
+
+		// Get token from handler.
+		t, err = token.GetToken(zone)
+		if err == nil {
+			return t, nil
+		}
+	}
+
+	// Return existing error, if exists.
+	if err != nil {
+		return nil, err
+	}
+	return nil, token.ErrEmpty
+}
+
+// VerifyRawToken verifies a raw token.
+func VerifyRawToken(data []byte) (granted terminal.Permission, err error) {
+	t, err := token.ParseRawToken(data)
+	if err != nil {
+		return 0, fmt.Errorf("failed to parse token: %w", err)
+	}
+
+	return VerifyToken(t)
+}
+
+// VerifyToken verifies a token.
+func VerifyToken(t *token.Token) (granted terminal.Permission, err error) {
+	handler, ok := token.GetHandler(t.Zone)
+	if !ok {
+		return terminal.NoPermission, token.ErrZoneUnknown
+	}
+
+	// Check if the token is a fallback token.
+	if handler.IsFallback() && !healthCheck() {
+		return terminal.NoPermission, ErrFallbackNotAvailable
+	}
+
+	// Verify token.
+	err = handler.Verify(t)
+	if err != nil {
+		return 0, fmt.Errorf("failed to verify token: %w", err)
+	}
+
+	// Return permission of zone.
+	granted, ok = zonePermissions[t.Zone]
+	if !ok {
+		return terminal.NoPermission, nil
+	}
+	return granted, nil
+}
diff --git a/spn/cabin/config-public.go b/spn/cabin/config-public.go
new file mode 100644
index 00000000..4ae733ae
--- /dev/null
+++ b/spn/cabin/config-public.go
@@ -0,0 +1,392 @@
+package cabin
+
+import (
+	"fmt"
+	"net"
+	"os"
+
+	"github.com/safing/portbase/config"
+	"github.com/safing/portbase/log"
+	"github.com/safing/portmaster/service/netenv"
+	"github.com/safing/portmaster/service/profile/endpoints"
+	"github.com/safing/portmaster/spn/hub"
+)
+
+// Configuration Keys.
+var (
+	// Name of the node.
+	publicCfgOptionNameKey     = "spn/publicHub/name"
+	publicCfgOptionName        config.StringOption
+	publicCfgOptionNameDefault = ""
+	publicCfgOptionNameOrder   = 512
+
+	// Person or organisation, who is in control of the node (should be same for all nodes of this person or organisation).
+	publicCfgOptionGroupKey     = "spn/publicHub/group"
+	publicCfgOptionGroup        config.StringOption
+	publicCfgOptionGroupDefault = ""
+	publicCfgOptionGroupOrder   = 513
+
+	// Contact possibility  (recommended, but optional).
+	publicCfgOptionContactAddressKey     = "spn/publicHub/contactAddress"
+	publicCfgOptionContactAddress        config.StringOption
+	publicCfgOptionContactAddressDefault = ""
+	publicCfgOptionContactAddressOrder   = 514
+
+	// Type of service of the contact address, if not email.
+	publicCfgOptionContactServiceKey     = "spn/publicHub/contactService"
+	publicCfgOptionContactService        config.StringOption
+	publicCfgOptionContactServiceDefault = ""
+	publicCfgOptionContactServiceOrder   = 515
+
+	// Hosters - supply chain (reseller, hosting provider, datacenter operator, ...).
+	publicCfgOptionHostersKey     = "spn/publicHub/hosters"
+	publicCfgOptionHosters        config.StringArrayOption
+	publicCfgOptionHostersDefault = []string{}
+	publicCfgOptionHostersOrder   = 516
+
+	// Datacenter
+	// Format: CC-COMPANY-INTERNALCODE
+	// Eg: DE-Hetzner-FSN1-DC5
+	//.
+	publicCfgOptionDatacenterKey     = "spn/publicHub/datacenter"
+	publicCfgOptionDatacenter        config.StringOption
+	publicCfgOptionDatacenterDefault = ""
+	publicCfgOptionDatacenterOrder   = 517
+
+	// Network Location and Access.
+
+	// IPv4 must be global and accessible.
+	publicCfgOptionIPv4Key     = "spn/publicHub/ip4"
+	publicCfgOptionIPv4        config.StringOption
+	publicCfgOptionIPv4Default = ""
+	publicCfgOptionIPv4Order   = 518
+
+	// IPv6 must be global and accessible.
+	publicCfgOptionIPv6Key     = "spn/publicHub/ip6"
+	publicCfgOptionIPv6        config.StringOption
+	publicCfgOptionIPv6Default = ""
+	publicCfgOptionIPv6Order   = 519
+
+	// Transports.
+	publicCfgOptionTransportsKey     = "spn/publicHub/transports"
+	publicCfgOptionTransports        config.StringArrayOption
+	publicCfgOptionTransportsDefault = []string{
+		"tcp:17",
+	}
+	publicCfgOptionTransportsOrder = 520
+
+	// Entry Policy.
+	publicCfgOptionEntryKey     = "spn/publicHub/entry"
+	publicCfgOptionEntry        config.StringArrayOption
+	publicCfgOptionEntryDefault = []string{}
+	publicCfgOptionEntryOrder   = 521
+
+	// Exit Policy.
+	publicCfgOptionExitKey     = "spn/publicHub/exit"
+	publicCfgOptionExit        config.StringArrayOption
+	publicCfgOptionExitDefault = []string{"- * TCP/25"}
+	publicCfgOptionExitOrder   = 522
+
+	// Allow Unencrypted.
+	publicCfgOptionAllowUnencryptedKey     = "spn/publicHub/allowUnencrypted"
+	publicCfgOptionAllowUnencrypted        config.BoolOption
+	publicCfgOptionAllowUnencryptedDefault = false
+	publicCfgOptionAllowUnencryptedOrder   = 523
+)
+
+func prepPublicHubConfig() error {
+	err := config.Register(&config.Option{
+		Name:            "Name",
+		Key:             publicCfgOptionNameKey,
+		Description:     "Human readable name of the Hub.",
+		OptType:         config.OptTypeString,
+		ExpertiseLevel:  config.ExpertiseLevelExpert,
+		RequiresRestart: true,
+		DefaultValue:    publicCfgOptionNameDefault,
+		Annotations: config.Annotations{
+			config.DisplayOrderAnnotation: publicCfgOptionNameOrder,
+		},
+	})
+	if err != nil {
+		return err
+	}
+	publicCfgOptionName = config.GetAsString(publicCfgOptionNameKey, publicCfgOptionNameDefault)
+
+	err = config.Register(&config.Option{
+		Name:            "Group",
+		Key:             publicCfgOptionGroupKey,
+		Description:     "Name of the hub group this Hub belongs to.",
+		OptType:         config.OptTypeString,
+		ExpertiseLevel:  config.ExpertiseLevelExpert,
+		RequiresRestart: true,
+		DefaultValue:    publicCfgOptionGroupDefault,
+		Annotations: config.Annotations{
+			config.DisplayOrderAnnotation: publicCfgOptionGroupOrder,
+		},
+	})
+	if err != nil {
+		return err
+	}
+	publicCfgOptionGroup = config.GetAsString(publicCfgOptionGroupKey, publicCfgOptionGroupDefault)
+
+	err = config.Register(&config.Option{
+		Name:            "Contact Address",
+		Key:             publicCfgOptionContactAddressKey,
+		Description:     "Contact address where the Hub operator can be reached.",
+		OptType:         config.OptTypeString,
+		ExpertiseLevel:  config.ExpertiseLevelExpert,
+		RequiresRestart: true,
+		DefaultValue:    publicCfgOptionContactAddressDefault,
+		Annotations: config.Annotations{
+			config.DisplayOrderAnnotation: publicCfgOptionContactAddressOrder,
+		},
+	})
+	if err != nil {
+		return err
+	}
+	publicCfgOptionContactAddress = config.GetAsString(publicCfgOptionContactAddressKey, publicCfgOptionContactAddressDefault)
+
+	err = config.Register(&config.Option{
+		Name:            "Contact Service",
+		Key:             publicCfgOptionContactServiceKey,
+		Description:     "Name of the service the contact address corresponds to, if not email.",
+		OptType:         config.OptTypeString,
+		ExpertiseLevel:  config.ExpertiseLevelExpert,
+		RequiresRestart: true,
+		DefaultValue:    publicCfgOptionContactServiceDefault,
+		Annotations: config.Annotations{
+			config.DisplayOrderAnnotation: publicCfgOptionContactServiceOrder,
+		},
+	})
+	if err != nil {
+		return err
+	}
+	publicCfgOptionContactService = config.GetAsString(publicCfgOptionContactServiceKey, publicCfgOptionContactServiceDefault)
+
+	err = config.Register(&config.Option{
+		Name:            "Hosters",
+		Key:             publicCfgOptionHostersKey,
+		Description:     "List of all involved entities and organisations that are involved in hosting this Hub.",
+		OptType:         config.OptTypeStringArray,
+		ExpertiseLevel:  config.ExpertiseLevelExpert,
+		RequiresRestart: true,
+		DefaultValue:    publicCfgOptionHostersDefault,
+		Annotations: config.Annotations{
+			config.DisplayOrderAnnotation: publicCfgOptionHostersOrder,
+		},
+	})
+	if err != nil {
+		return err
+	}
+	publicCfgOptionHosters = config.GetAsStringArray(publicCfgOptionHostersKey, publicCfgOptionHostersDefault)
+
+	err = config.Register(&config.Option{
+		Name:            "Datacenter",
+		Key:             publicCfgOptionDatacenterKey,
+		Description:     "Identifier of the datacenter this Hub is hosted in.",
+		OptType:         config.OptTypeString,
+		ExpertiseLevel:  config.ExpertiseLevelExpert,
+		RequiresRestart: true,
+		DefaultValue:    publicCfgOptionDatacenterDefault,
+		Annotations: config.Annotations{
+			config.DisplayOrderAnnotation: publicCfgOptionDatacenterOrder,
+		},
+	})
+	if err != nil {
+		return err
+	}
+	publicCfgOptionDatacenter = config.GetAsString(publicCfgOptionDatacenterKey, publicCfgOptionDatacenterDefault)
+
+	err = config.Register(&config.Option{
+		Name:            "IPv4",
+		Key:             publicCfgOptionIPv4Key,
+		Description:     "IPv4 address of this Hub. Must be globally reachable.",
+		OptType:         config.OptTypeString,
+		ExpertiseLevel:  config.ExpertiseLevelExpert,
+		RequiresRestart: true,
+		DefaultValue:    publicCfgOptionIPv4Default,
+		Annotations: config.Annotations{
+			config.DisplayOrderAnnotation: publicCfgOptionIPv4Order,
+		},
+	})
+	if err != nil {
+		return err
+	}
+	publicCfgOptionIPv4 = config.GetAsString(publicCfgOptionIPv4Key, publicCfgOptionIPv4Default)
+
+	err = config.Register(&config.Option{
+		Name:            "IPv6",
+		Key:             publicCfgOptionIPv6Key,
+		Description:     "IPv6 address of this Hub. Must be globally reachable.",
+		OptType:         config.OptTypeString,
+		ExpertiseLevel:  config.ExpertiseLevelExpert,
+		RequiresRestart: true,
+		DefaultValue:    publicCfgOptionIPv6Default,
+		Annotations: config.Annotations{
+			config.DisplayOrderAnnotation: publicCfgOptionIPv6Order,
+		},
+	})
+	if err != nil {
+		return err
+	}
+	publicCfgOptionIPv6 = config.GetAsString(publicCfgOptionIPv6Key, publicCfgOptionIPv6Default)
+
+	err = config.Register(&config.Option{
+		Name:            "Transports",
+		Key:             publicCfgOptionTransportsKey,
+		Description:     "List of transports this Hub supports.",
+		OptType:         config.OptTypeStringArray,
+		ExpertiseLevel:  config.ExpertiseLevelExpert,
+		RequiresRestart: true,
+		DefaultValue:    publicCfgOptionTransportsDefault,
+		ValidationFunc: func(value any) error {
+			if transports, ok := value.([]string); ok {
+				for i, transport := range transports {
+					if _, err := hub.ParseTransport(transport); err != nil {
+						return fmt.Errorf("failed to parse transport #%d: %w", i, err)
+					}
+				}
+			} else {
+				return fmt.Errorf("not a []string, but %T", value)
+			}
+			return nil
+		},
+		Annotations: config.Annotations{
+			config.DisplayOrderAnnotation: publicCfgOptionTransportsOrder,
+		},
+	})
+	if err != nil {
+		return err
+	}
+	publicCfgOptionTransports = config.GetAsStringArray(publicCfgOptionTransportsKey, publicCfgOptionTransportsDefault)
+
+	err = config.Register(&config.Option{
+		Name:            "Entry",
+		Key:             publicCfgOptionEntryKey,
+		Description:     "Define an entry policy. The format is the same for the endpoint lists. Default is permit.",
+		OptType:         config.OptTypeStringArray,
+		ExpertiseLevel:  config.ExpertiseLevelExpert,
+		RequiresRestart: true,
+		DefaultValue:    publicCfgOptionEntryDefault,
+		Annotations: config.Annotations{
+			config.DisplayOrderAnnotation: publicCfgOptionEntryOrder,
+			config.DisplayHintAnnotation:  endpoints.DisplayHintEndpointList,
+		},
+	})
+	if err != nil {
+		return err
+	}
+	publicCfgOptionEntry = config.GetAsStringArray(publicCfgOptionEntryKey, publicCfgOptionEntryDefault)
+
+	err = config.Register(&config.Option{
+		Name:            "Exit",
+		Key:             publicCfgOptionExitKey,
+		Description:     "Define an exit policy. The format is the same for the endpoint lists. Default is permit.",
+		OptType:         config.OptTypeStringArray,
+		ExpertiseLevel:  config.ExpertiseLevelExpert,
+		RequiresRestart: true,
+		DefaultValue:    publicCfgOptionExitDefault,
+		Annotations: config.Annotations{
+			config.DisplayOrderAnnotation: publicCfgOptionExitOrder,
+			config.DisplayHintAnnotation:  endpoints.DisplayHintEndpointList,
+		},
+	})
+	if err != nil {
+		return err
+	}
+	publicCfgOptionExit = config.GetAsStringArray(publicCfgOptionExitKey, publicCfgOptionExitDefault)
+
+	err = config.Register(&config.Option{
+		Name:            "Allow Unencrypted Connections",
+		Key:             publicCfgOptionAllowUnencryptedKey,
+		Description:     "Advertise that this Hub is available for handling unencrypted connections, as detected by clients.",
+		OptType:         config.OptTypeBool,
+		ExpertiseLevel:  config.ExpertiseLevelExpert,
+		RequiresRestart: true,
+		DefaultValue:    publicCfgOptionAllowUnencryptedDefault,
+		Annotations: config.Annotations{
+			config.DisplayOrderAnnotation: publicCfgOptionAllowUnencryptedOrder,
+		},
+	})
+	if err != nil {
+		return err
+	}
+	publicCfgOptionAllowUnencrypted = config.GetAsBool(publicCfgOptionAllowUnencryptedKey, publicCfgOptionAllowUnencryptedDefault)
+
+	// update defaults from system
+	setDynamicPublicDefaults()
+
+	return nil
+}
+
+func getPublicHubInfo() *hub.Announcement {
+	// get configuration
+	info := &hub.Announcement{
+		Name:           publicCfgOptionName(),
+		Group:          publicCfgOptionGroup(),
+		ContactAddress: publicCfgOptionContactAddress(),
+		ContactService: publicCfgOptionContactService(),
+		Hosters:        publicCfgOptionHosters(),
+		Datacenter:     publicCfgOptionDatacenter(),
+		Transports:     publicCfgOptionTransports(),
+		Entry:          publicCfgOptionEntry(),
+		Exit:           publicCfgOptionExit(),
+		Flags:          []string{},
+	}
+
+	if publicCfgOptionAllowUnencrypted() {
+		info.Flags = append(info.Flags, hub.FlagAllowUnencrypted)
+	}
+
+	ip4 := publicCfgOptionIPv4()
+	if ip4 != "" {
+		ip := net.ParseIP(ip4)
+		if ip == nil {
+			log.Warningf("spn/cabin: invalid %s config: %s", publicCfgOptionIPv4Key, ip4)
+		} else {
+			info.IPv4 = ip
+		}
+	}
+
+	ip6 := publicCfgOptionIPv6()
+	if ip6 != "" {
+		ip := net.ParseIP(ip6)
+		if ip == nil {
+			log.Warningf("spn/cabin: invalid %s config: %s", publicCfgOptionIPv6Key, ip6)
+		} else {
+			info.IPv6 = ip
+		}
+	}
+
+	return info
+}
+
+func setDynamicPublicDefaults() {
+	// name
+	hostname, err := os.Hostname()
+	if err == nil {
+		err := config.SetDefaultConfigOption(publicCfgOptionNameKey, hostname)
+		if err != nil {
+			log.Warningf("spn/cabin: failed to set %s default to %s", publicCfgOptionNameKey, hostname)
+		}
+	}
+
+	// IPs
+	v4IPs, v6IPs, err := netenv.GetAssignedGlobalAddresses()
+	if err != nil {
+		log.Warningf("spn/cabin: failed to get assigned addresses: %s", err)
+		return
+	}
+	if len(v4IPs) == 1 {
+		err = config.SetDefaultConfigOption(publicCfgOptionIPv4Key, v4IPs[0].String())
+		if err != nil {
+			log.Warningf("spn/cabin: failed to set %s default to %s", publicCfgOptionIPv4Key, v4IPs[0].String())
+		}
+	}
+	if len(v6IPs) == 1 {
+		err = config.SetDefaultConfigOption(publicCfgOptionIPv6Key, v6IPs[0].String())
+		if err != nil {
+			log.Warningf("spn/cabin: failed to set %s default to %s", publicCfgOptionIPv6Key, v6IPs[0].String())
+		}
+	}
+}
diff --git a/spn/cabin/database.go b/spn/cabin/database.go
new file mode 100644
index 00000000..41097530
--- /dev/null
+++ b/spn/cabin/database.go
@@ -0,0 +1,98 @@
+package cabin
+
+import (
+	"errors"
+	"fmt"
+
+	"github.com/safing/portbase/database"
+	"github.com/safing/portbase/database/record"
+	"github.com/safing/portmaster/spn/hub"
+)
+
+var db = database.NewInterface(nil)
+
+// LoadIdentity loads an identify with the given key.
+func LoadIdentity(key string) (id *Identity, changed bool, err error) {
+	r, err := db.Get(key)
+	if err != nil {
+		return nil, false, err
+	}
+	id, err = EnsureIdentity(r)
+	if err != nil {
+		return nil, false, fmt.Errorf("failed to parse identity: %w", err)
+	}
+
+	// Check if required fields are present.
+	switch {
+	case id.Hub == nil:
+		return nil, false, errors.New("missing id.Hub")
+	case id.Signet == nil:
+		return nil, false, errors.New("missing id.Signet")
+	case id.Hub.Info == nil:
+		return nil, false, errors.New("missing hub.Info")
+	case id.Hub.Status == nil:
+		return nil, false, errors.New("missing hub.Status")
+	case id.ID != id.Hub.ID:
+		return nil, false, errors.New("hub.ID mismatch")
+	case id.ID != id.Hub.Info.ID:
+		return nil, false, errors.New("hub.Info.ID mismatch")
+	case id.Map == "":
+		return nil, false, errors.New("invalid id.Map")
+	case id.Hub.Map == "":
+		return nil, false, errors.New("invalid hub.Map")
+	case id.Hub.FirstSeen.IsZero():
+		return nil, false, errors.New("missing hub.FirstSeen")
+	case id.Hub.Info.Timestamp == 0:
+		return nil, false, errors.New("missing hub.Info.Timestamp")
+	case id.Hub.Status.Timestamp == 0:
+		return nil, false, errors.New("missing hub.Status.Timestamp")
+	}
+
+	// Run a initial maintenance routine.
+	infoChanged, err := id.MaintainAnnouncement(nil, true)
+	if err != nil {
+		return nil, false, fmt.Errorf("failed to initialize announcement: %w", err)
+	}
+	statusChanged, err := id.MaintainStatus(nil, nil, nil, true)
+	if err != nil {
+		return nil, false, fmt.Errorf("failed to initialize status: %w", err)
+	}
+
+	// Ensure the Measurements reset the values.
+	measurements := id.Hub.GetMeasurements()
+	measurements.SetLatency(0)
+	measurements.SetCapacity(0)
+	measurements.SetCalculatedCost(hub.MaxCalculatedCost)
+
+	return id, infoChanged || statusChanged, nil
+}
+
+// EnsureIdentity makes sure a database record is an Identity.
+func EnsureIdentity(r record.Record) (*Identity, error) {
+	// unwrap
+	if r.IsWrapped() {
+		// only allocate a new struct, if we need it
+		id := &Identity{}
+		err := record.Unwrap(r, id)
+		if err != nil {
+			return nil, err
+		}
+		return id, nil
+	}
+
+	// or adjust type
+	id, ok := r.(*Identity)
+	if !ok {
+		return nil, fmt.Errorf("record not of type *Identity, but %T", r)
+	}
+	return id, nil
+}
+
+// Save saves the Identity to the database.
+func (id *Identity) Save() error {
+	if !id.KeyIsSet() {
+		return errors.New("no key set")
+	}
+
+	return db.Put(id)
+}
diff --git a/spn/cabin/identity.go b/spn/cabin/identity.go
new file mode 100644
index 00000000..0be583cf
--- /dev/null
+++ b/spn/cabin/identity.go
@@ -0,0 +1,311 @@
+package cabin
+
+import (
+	"context"
+	"errors"
+	"fmt"
+	"time"
+
+	"github.com/safing/jess"
+	"github.com/safing/jess/tools"
+	"github.com/safing/portbase/database/record"
+	"github.com/safing/portbase/info"
+	"github.com/safing/portbase/log"
+	"github.com/safing/portmaster/spn/conf"
+	"github.com/safing/portmaster/spn/hub"
+)
+
+const (
+	// DefaultIDKeyScheme is the default jess tool for creating ID keys.
+	DefaultIDKeyScheme = "Ed25519"
+
+	// DefaultIDKeySecurityLevel is the default security level for creating ID keys.
+	DefaultIDKeySecurityLevel = 256 // Ed25519 security level is fixed, setting is ignored.
+)
+
+// Identity holds the identity of a Hub.
+type Identity struct {
+	record.Base
+
+	ID     string
+	Map    string
+	Hub    *hub.Hub
+	Signet *jess.Signet
+
+	ExchKeys map[string]*ExchKey
+
+	infoExportCache   []byte
+	statusExportCache []byte
+}
+
+// Lock locks the Identity through the Hub lock.
+func (id *Identity) Lock() {
+	id.Hub.Lock()
+}
+
+// Unlock unlocks the Identity through the Hub lock.
+func (id *Identity) Unlock() {
+	id.Hub.Unlock()
+}
+
+// ExchKey holds the private information of a HubKey.
+type ExchKey struct {
+	Created time.Time
+	Expires time.Time
+	key     *jess.Signet
+	tool    *tools.Tool
+}
+
+// CreateIdentity creates a new identity.
+func CreateIdentity(ctx context.Context, mapName string) (*Identity, error) {
+	id := &Identity{
+		Map:      mapName,
+		ExchKeys: make(map[string]*ExchKey),
+	}
+
+	// create signet
+	signet, recipient, err := hub.CreateHubSignet(DefaultIDKeyScheme, DefaultIDKeySecurityLevel)
+	if err != nil {
+		return nil, err
+	}
+	id.Signet = signet
+	id.ID = signet.ID
+	id.Hub = &hub.Hub{
+		ID:        id.ID,
+		Map:       mapName,
+		PublicKey: recipient,
+	}
+
+	// initial maintenance routine
+	_, err = id.MaintainAnnouncement(nil, true)
+	if err != nil {
+		return nil, fmt.Errorf("failed to initialize announcement: %w", err)
+	}
+	_, err = id.MaintainStatus([]*hub.Lane{}, new(int), nil, true)
+	if err != nil {
+		return nil, fmt.Errorf("failed to initialize status: %w", err)
+	}
+
+	return id, nil
+}
+
+// MaintainAnnouncement maintains the Hub's Announcenemt and returns whether
+// there was a change that should be communicated to other Hubs.
+// If newInfo is nil, it will be derived from configuration.
+func (id *Identity) MaintainAnnouncement(newInfo *hub.Announcement, selfcheck bool) (changed bool, err error) {
+	id.Lock()
+	defer id.Unlock()
+
+	// Populate new info with data.
+	if newInfo == nil {
+		newInfo = getPublicHubInfo()
+	}
+	newInfo.ID = id.Hub.ID
+	if id.Hub.Info != nil {
+		newInfo.Timestamp = id.Hub.Info.Timestamp
+	}
+	if !newInfo.Equal(id.Hub.Info) {
+		changed = true
+	}
+
+	if changed {
+		// Update timestamp.
+		newInfo.Timestamp = time.Now().Unix()
+	}
+
+	if changed || selfcheck {
+		// Export new data.
+		newInfoData, err := newInfo.Export(id.signingEnvelope())
+		if err != nil {
+			return false, fmt.Errorf("failed to export: %w", err)
+		}
+
+		// Apply the status as all other Hubs would in order to check if it's valid.
+		_, _, _, err = hub.ApplyAnnouncement(id.Hub, newInfoData, conf.MainMapName, conf.MainMapScope, true)
+		if err != nil {
+			return false, fmt.Errorf("failed to apply new announcement: %w", err)
+		}
+		id.infoExportCache = newInfoData
+
+		// Save message to hub message storage.
+		err = hub.SaveHubMsg(id.ID, conf.MainMapName, hub.MsgTypeAnnouncement, newInfoData)
+		if err != nil {
+			log.Warningf("spn/cabin: failed to save own new/updated announcement of %s: %s", id.ID, err)
+		}
+	}
+
+	return changed, nil
+}
+
+// MaintainStatus maintains the Hub's Status and returns whether there was a change that should be communicated to other Hubs.
+func (id *Identity) MaintainStatus(lanes []*hub.Lane, load *int, flags []string, selfcheck bool) (changed bool, err error) {
+	id.Lock()
+	defer id.Unlock()
+
+	// Create a new status or make a copy of the status for editing.
+	var newStatus *hub.Status
+	if id.Hub.Status != nil {
+		newStatus = id.Hub.Status.Copy()
+	} else {
+		newStatus = &hub.Status{}
+	}
+
+	// Update software version.
+	if newStatus.Version != info.Version() {
+		newStatus.Version = info.Version()
+		changed = true
+	}
+
+	// Update keys.
+	keysChanged, err := id.MaintainExchKeys(newStatus, time.Now())
+	if err != nil {
+		return false, fmt.Errorf("failed to maintain keys: %w", err)
+	}
+	if keysChanged {
+		changed = true
+	}
+
+	// Update lanes.
+	if lanes != nil && !hub.LanesEqual(newStatus.Lanes, lanes) {
+		newStatus.Lanes = lanes
+		changed = true
+	}
+
+	// Update load.
+	if load != nil && newStatus.Load != *load {
+		newStatus.Load = *load
+		changed = true
+	}
+
+	// Update flags.
+	if !hub.FlagsEqual(newStatus.Flags, flags) {
+		newStatus.Flags = flags
+		changed = true
+	}
+
+	// Update timestamp if something changed.
+	if changed {
+		newStatus.Timestamp = time.Now().Unix()
+	}
+
+	if changed || selfcheck {
+		// Export new data.
+		newStatusData, err := newStatus.Export(id.signingEnvelope())
+		if err != nil {
+			return false, fmt.Errorf("failed to export: %w", err)
+		}
+
+		// Apply the status as all other Hubs would in order to check if it's valid.
+		_, _, _, err = hub.ApplyStatus(id.Hub, newStatusData, conf.MainMapName, conf.MainMapScope, true)
+		if err != nil {
+			return false, fmt.Errorf("failed to apply new status: %w", err)
+		}
+		id.statusExportCache = newStatusData
+
+		// Save message to hub message storage.
+		err = hub.SaveHubMsg(id.ID, conf.MainMapName, hub.MsgTypeStatus, newStatusData)
+		if err != nil {
+			log.Warningf("spn/cabin: failed to save own new/updated status: %s", err)
+		}
+	}
+
+	return changed, nil
+}
+
+// MakeOfflineStatus creates and signs an offline status message.
+func (id *Identity) MakeOfflineStatus() (offlineStatusExport []byte, err error) {
+	// Make offline status.
+	newStatus := &hub.Status{
+		Timestamp: time.Now().Unix(),
+		Version:   info.Version(),
+		Flags:     []string{hub.FlagOffline},
+	}
+
+	// Export new data.
+	newStatusData, err := newStatus.Export(id.signingEnvelope())
+	if err != nil {
+		return nil, fmt.Errorf("failed to export: %w", err)
+	}
+
+	return newStatusData, nil
+}
+
+func (id *Identity) signingEnvelope() *jess.Envelope {
+	env := jess.NewUnconfiguredEnvelope()
+	env.SuiteID = jess.SuiteSignV1
+	env.Senders = []*jess.Signet{id.Signet}
+
+	return env
+}
+
+// ExportAnnouncement serializes and signs the Announcement.
+func (id *Identity) ExportAnnouncement() ([]byte, error) {
+	id.Lock()
+	defer id.Unlock()
+
+	if id.infoExportCache == nil {
+		return nil, errors.New("announcement not exported")
+	}
+
+	return id.infoExportCache, nil
+}
+
+// ExportStatus serializes and signs the Status.
+func (id *Identity) ExportStatus() ([]byte, error) {
+	id.Lock()
+	defer id.Unlock()
+
+	if id.statusExportCache == nil {
+		return nil, errors.New("status not exported")
+	}
+
+	return id.statusExportCache, nil
+}
+
+// SignHubMsg signs a data blob with the identity's private key.
+func (id *Identity) SignHubMsg(data []byte) ([]byte, error) {
+	return hub.SignHubMsg(data, id.signingEnvelope(), false)
+}
+
+// GetSignet returns the private exchange key with the given ID.
+func (id *Identity) GetSignet(keyID string, recipient bool) (*jess.Signet, error) {
+	if recipient {
+		return nil, errors.New("cabin.Identity only serves private keys")
+	}
+
+	id.Lock()
+	defer id.Unlock()
+
+	key, ok := id.ExchKeys[keyID]
+	if !ok {
+		return nil, errors.New("the requested key does not exist")
+	}
+	if time.Now().After(key.Expires) || key.key == nil {
+		return nil, errors.New("the requested key has expired")
+	}
+
+	return key.key, nil
+}
+
+func (ek *ExchKey) toHubKey() (*hub.Key, error) {
+	if ek.key == nil {
+		return nil, errors.New("no key")
+	}
+
+	// export public key
+	rcpt, err := ek.key.AsRecipient()
+	if err != nil {
+		return nil, err
+	}
+	err = rcpt.StoreKey()
+	if err != nil {
+		return nil, err
+	}
+
+	// repackage
+	return &hub.Key{
+		Scheme:  rcpt.Scheme,
+		Key:     rcpt.Key,
+		Expires: ek.Expires.Unix(),
+	}, nil
+}
diff --git a/spn/cabin/identity_test.go b/spn/cabin/identity_test.go
new file mode 100644
index 00000000..6ad0530d
--- /dev/null
+++ b/spn/cabin/identity_test.go
@@ -0,0 +1,129 @@
+package cabin
+
+import (
+	"fmt"
+	"testing"
+
+	"github.com/stretchr/testify/assert"
+
+	"github.com/safing/portmaster/spn/conf"
+	"github.com/safing/portmaster/spn/hub"
+)
+
+func TestIdentity(t *testing.T) {
+	t.Parallel()
+
+	// Register config options for public hub.
+	if err := prepPublicHubConfig(); err != nil {
+		t.Fatal(err)
+	}
+
+	// Create new identity.
+	identityTestKey := "core:spn/public/identity"
+	id, err := CreateIdentity(module.Ctx, conf.MainMapName)
+	if err != nil {
+		t.Fatal(err)
+	}
+	id.SetKey(identityTestKey)
+
+	// Check values
+	// Identity
+	assert.NotEmpty(t, id.ID, "id.ID must be set")
+	assert.NotEmpty(t, id.Map, "id.Map must be set")
+	assert.NotNil(t, id.Signet, "id.Signet must be set")
+	assert.NotNil(t, id.infoExportCache, "id.infoExportCache must be set")
+	assert.NotNil(t, id.statusExportCache, "id.statusExportCache must be set")
+	// Hub
+	assert.NotEmpty(t, id.Hub.ID, "hub.ID must be set")
+	assert.NotEmpty(t, id.Hub.Map, "hub.Map must be set")
+	assert.NotZero(t, id.Hub.FirstSeen, "hub.FirstSeen must be set")
+	// Info
+	assert.NotEmpty(t, id.Hub.Info.ID, "info.ID must be set")
+	assert.NotEqual(t, 0, id.Hub.Info.Timestamp, "info.Timestamp must be set")
+	assert.NotEqual(t, "", id.Hub.Info.Name, "info.Name must be set (to hostname)")
+	// Status
+	assert.NotEqual(t, 0, id.Hub.Status.Timestamp, "status.Timestamp must be set")
+	assert.NotEmpty(t, id.Hub.Status.Keys, "status.Keys must be set")
+
+	fmt.Printf("id: %+v\n", id)
+	fmt.Printf("id.hub: %+v\n", id.Hub)
+	fmt.Printf("id.Hub.Info: %+v\n", id.Hub.Info)
+	fmt.Printf("id.Hub.Status: %+v\n", id.Hub.Status)
+
+	// Maintenance is run in creation, so nothing should change now.
+	changed, err := id.MaintainAnnouncement(nil, false)
+	if err != nil {
+		t.Fatal(err)
+	}
+	if changed {
+		t.Error("unexpected change of announcement")
+	}
+	changed, err = id.MaintainStatus(nil, nil, nil, false)
+	if err != nil {
+		t.Fatal(err)
+	}
+	if changed {
+		t.Error("unexpected change of status")
+	}
+
+	// Change lanes.
+	lanes := []*hub.Lane{
+		{
+			ID:       "A",
+			Capacity: 1,
+			Latency:  2,
+		},
+		{
+			ID:       "B",
+			Capacity: 3,
+			Latency:  4,
+		},
+		{
+			ID:       "C",
+			Capacity: 5,
+			Latency:  6,
+		},
+	}
+	changed, err = id.MaintainStatus(lanes, new(int), nil, false)
+	if err != nil {
+		t.Fatal(err)
+	}
+	if !changed {
+		t.Error("status should have changed")
+	}
+
+	// Change nothing.
+	changed, err = id.MaintainStatus(lanes, new(int), nil, false)
+	if err != nil {
+		t.Fatal(err)
+	}
+	if changed {
+		t.Error("unexpected change of status")
+	}
+
+	// Exporting
+	_, err = id.ExportAnnouncement()
+	if err != nil {
+		t.Fatal(err)
+	}
+	_, err = id.ExportStatus()
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	// Save to and load from database.
+	err = id.Save()
+	if err != nil {
+		t.Fatal(err)
+	}
+	id2, changed, err := LoadIdentity(identityTestKey)
+	if err != nil {
+		t.Fatal(err)
+	}
+	if changed {
+		t.Error("unexpected change")
+	}
+
+	// Check if they match
+	assert.Equal(t, id, id2, "identities should be equal")
+}
diff --git a/spn/cabin/keys.go b/spn/cabin/keys.go
new file mode 100644
index 00000000..67d203a4
--- /dev/null
+++ b/spn/cabin/keys.go
@@ -0,0 +1,179 @@
+package cabin
+
+import (
+	"encoding/base64"
+	"errors"
+	"fmt"
+	"time"
+
+	"github.com/safing/jess"
+	"github.com/safing/jess/tools"
+	"github.com/safing/portbase/log"
+	"github.com/safing/portbase/rng"
+	"github.com/safing/portmaster/spn/hub"
+)
+
+type providedExchKeyScheme struct {
+	id            string
+	securityLevel int //nolint:structcheck // TODO
+	tool          *tools.Tool
+}
+
+var (
+	// validFor defines how long keys are valid for use by clients.
+	validFor = 48 * time.Hour // 2 days
+	// renewBeforeExpiry defines the duration how long before expiry keys should be renewed.
+	renewBeforeExpiry = 24 * time.Hour // 1 day
+
+	// burnAfter defines how long after expiry keys are burnt/deleted.
+	burnAfter = 12 * time.Hour // 1/2 day
+	// reuseAfter defines how long IDs should be blocked after expiry (and not be reused for new keys).
+	reuseAfter = 2 * 7 * 24 * time.Hour // 2 weeks
+
+	// provideExchKeySchemes defines the jess tools for creating exchange keys.
+	provideExchKeySchemes = []*providedExchKeyScheme{
+		{
+			id:            "ECDH-X25519",
+			securityLevel: 128, // informative only, security level of ECDH-X25519 is fixed
+		},
+		// TODO: test with rsa keys
+	}
+)
+
+func initProvidedExchKeySchemes() error {
+	for _, eks := range provideExchKeySchemes {
+		tool, err := tools.Get(eks.id)
+		if err != nil {
+			return err
+		}
+		eks.tool = tool
+	}
+	return nil
+}
+
+// MaintainExchKeys maintains the exchange keys, creating new ones and
+// deprecating and deleting old ones.
+func (id *Identity) MaintainExchKeys(newStatus *hub.Status, now time.Time) (changed bool, err error) {
+	// create Keys map
+	if id.ExchKeys == nil {
+		id.ExchKeys = make(map[string]*ExchKey)
+	}
+
+	// lifecycle management
+	for keyID, exchKey := range id.ExchKeys {
+		if exchKey.key != nil && now.After(exchKey.Expires.Add(burnAfter)) {
+			// delete key
+			err := exchKey.tool.StaticLogic.BurnKey(exchKey.key)
+			if err != nil {
+				log.Warningf(
+					"spn/cabin: failed to burn key %s (%s) of %s: %s",
+					keyID,
+					exchKey.tool.Info.Name,
+					id.Hub.ID,
+					err,
+				)
+			}
+			// remove reference
+			exchKey.key = nil
+		}
+		if now.After(exchKey.Expires.Add(reuseAfter)) {
+			// remove key
+			delete(id.ExchKeys, keyID)
+		}
+	}
+
+	// find or create current keys
+	for _, eks := range provideExchKeySchemes {
+		found := false
+		for _, exchKey := range id.ExchKeys {
+			if exchKey.key != nil &&
+				exchKey.key.Scheme == eks.id &&
+				now.Before(exchKey.Expires.Add(-renewBeforeExpiry)) {
+				found = true
+				break
+			}
+		}
+
+		if !found {
+			err := id.createExchKey(eks, now)
+			if err != nil {
+				return false, fmt.Errorf("failed to create %s exchange key: %w", eks.tool.Info.Name, err)
+			}
+			changed = true
+		}
+	}
+
+	// export most recent keys to HubStatus
+	if changed || len(newStatus.Keys) == 0 {
+		// reset
+		newStatus.Keys = make(map[string]*hub.Key)
+
+		// find longest valid key for every provided scheme
+		for _, eks := range provideExchKeySchemes {
+			// find key of scheme that is valid the longest
+			longestValid := &ExchKey{
+				Expires: now,
+			}
+			for _, exchKey := range id.ExchKeys {
+				if exchKey.key != nil &&
+					exchKey.key.Scheme == eks.id &&
+					exchKey.Expires.After(longestValid.Expires) {
+					longestValid = exchKey
+				}
+			}
+
+			// check result
+			if longestValid.key == nil {
+				log.Warningf("spn/cabin: could not find export candidate for exchange key scheme %s", eks.id)
+				continue
+			}
+
+			// export
+			hubKey, err := longestValid.toHubKey()
+			if err != nil {
+				return false, fmt.Errorf("failed to export %s exchange key: %w", longestValid.tool.Info.Name, err)
+			}
+			// add
+			newStatus.Keys[longestValid.key.ID] = hubKey
+		}
+	}
+
+	return changed, nil
+}
+
+func (id *Identity) createExchKey(eks *providedExchKeyScheme, now time.Time) error {
+	// get ID
+	var keyID string
+	for i := 0; i < 1000000; i++ { // not forever
+		// generate new ID
+		b, err := rng.Bytes(3)
+		if err != nil {
+			return fmt.Errorf("failed to get random data for key ID: %w", err)
+		}
+		keyID = base64.RawURLEncoding.EncodeToString(b)
+		_, exists := id.ExchKeys[keyID]
+		if !exists {
+			break
+		}
+	}
+	if keyID == "" {
+		return errors.New("unable to find available exchange key ID")
+	}
+
+	// generate key
+	signet := jess.NewSignetBase(eks.tool)
+	signet.ID = keyID
+	// TODO: use security level for key generation
+	if err := signet.GenerateKey(); err != nil {
+		return fmt.Errorf("failed to get new exchange key: %w", err)
+	}
+
+	// add to key map
+	id.ExchKeys[keyID] = &ExchKey{
+		Created: now,
+		Expires: now.Add(validFor),
+		key:     signet,
+		tool:    eks.tool,
+	}
+	return nil
+}
diff --git a/spn/cabin/keys_test.go b/spn/cabin/keys_test.go
new file mode 100644
index 00000000..c1622fe6
--- /dev/null
+++ b/spn/cabin/keys_test.go
@@ -0,0 +1,43 @@
+package cabin
+
+import (
+	"testing"
+	"time"
+
+	"github.com/safing/portmaster/spn/conf"
+)
+
+func TestKeyMaintenance(t *testing.T) {
+	t.Parallel()
+
+	id, err := CreateIdentity(module.Ctx, conf.MainMapName)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	iterations := 1000
+	changeCnt := 0
+
+	now := time.Now()
+	for i := 0; i < iterations; i++ {
+		changed, err := id.MaintainExchKeys(id.Hub.Status, now)
+		if err != nil {
+			t.Fatal(err)
+		}
+		if changed {
+			changeCnt++
+			t.Logf("===== exchange keys updated at %s:\n", now)
+			for keyID, exchKey := range id.ExchKeys {
+				t.Logf("[%s] %s %v\n", exchKey.Created, keyID, exchKey.key)
+			}
+		}
+		now = now.Add(1 * time.Hour)
+	}
+
+	if iterations/changeCnt > 25 { // one new key every 24 hours/ticks
+		t.Fatal("more changes than expected")
+	}
+	if len(id.ExchKeys) > 17 { // one new key every day for two weeks + 3 in use
+		t.Fatal("more keys than expected")
+	}
+}
diff --git a/spn/cabin/module.go b/spn/cabin/module.go
new file mode 100644
index 00000000..8644502f
--- /dev/null
+++ b/spn/cabin/module.go
@@ -0,0 +1,26 @@
+package cabin
+
+import (
+	"github.com/safing/portbase/modules"
+	"github.com/safing/portmaster/spn/conf"
+)
+
+var module *modules.Module
+
+func init() {
+	module = modules.Register("cabin", prep, nil, nil, "base", "rng")
+}
+
+func prep() error {
+	if err := initProvidedExchKeySchemes(); err != nil {
+		return err
+	}
+
+	if conf.PublicHub() {
+		if err := prepPublicHubConfig(); err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
diff --git a/spn/cabin/module_test.go b/spn/cabin/module_test.go
new file mode 100644
index 00000000..c2d66ed1
--- /dev/null
+++ b/spn/cabin/module_test.go
@@ -0,0 +1,13 @@
+package cabin
+
+import (
+	"testing"
+
+	"github.com/safing/portmaster/service/core/pmtesting"
+	"github.com/safing/portmaster/spn/conf"
+)
+
+func TestMain(m *testing.M) {
+	conf.EnablePublicHub(true)
+	pmtesting.TestMain(m, module)
+}
diff --git a/spn/cabin/verification.go b/spn/cabin/verification.go
new file mode 100644
index 00000000..07a993ea
--- /dev/null
+++ b/spn/cabin/verification.go
@@ -0,0 +1,157 @@
+package cabin
+
+import (
+	"crypto/subtle"
+	"errors"
+	"fmt"
+
+	"github.com/safing/jess"
+	"github.com/safing/portbase/formats/dsd"
+	"github.com/safing/portbase/rng"
+	"github.com/safing/portmaster/spn/hub"
+)
+
+var (
+	verificationChallengeSize    = 32
+	verificationChallengeMinSize = 16
+	verificationSigningSuite     = jess.SuiteSignV1
+	verificationRequirements     = jess.NewRequirements().
+					Remove(jess.Confidentiality).
+					Remove(jess.Integrity).
+					Remove(jess.RecipientAuthentication)
+)
+
+// Verification is used to verify certain aspects of another Hub.
+type Verification struct {
+	// Challenge is a random value chosen by the client.
+	Challenge []byte `json:"c"`
+	// Purpose defines the purpose of the verification. Protects against using verification for other purposes.
+	Purpose string `json:"p"`
+	// ClientReference is an optional field for exchanging metadata about the client. Protects against forwarding/relay attacks.
+	ClientReference string `json:"cr"`
+	// ServerReference is an optional field for exchanging metadata about the server. Protects against forwarding/relay attacks.
+	ServerReference string `json:"sr"`
+}
+
+// CreateVerificationRequest creates a new verification request with the given
+// purpose and references.
+func CreateVerificationRequest(purpose, clientReference, serverReference string) (v *Verification, request []byte, err error) {
+	// Generate random challenge.
+	challenge, err := rng.Bytes(verificationChallengeSize)
+	if err != nil {
+		return nil, nil, fmt.Errorf("failed to generate challenge: %w", err)
+	}
+
+	// Create verification object.
+	v = &Verification{
+		Purpose:         purpose,
+		ClientReference: clientReference,
+		Challenge:       challenge,
+	}
+
+	// Serialize verification.
+	request, err = dsd.Dump(v, dsd.JSON)
+	if err != nil {
+		return nil, nil, fmt.Errorf("failed to serialize verification request: %w", err)
+	}
+
+	// The server reference is not sent to the server, but needs to be supplied
+	// by the server.
+	v.ServerReference = serverReference
+
+	return v, request, nil
+}
+
+// SignVerificationRequest sign a verification request.
+// The purpose and references must match the request, else the verification
+// will fail.
+func (id *Identity) SignVerificationRequest(request []byte, purpose, clientReference, serverReference string) (response []byte, err error) {
+	// Parse request.
+	v := new(Verification)
+	_, err = dsd.Load(request, v)
+	if err != nil {
+		return nil, fmt.Errorf("failed to parse request: %w", err)
+	}
+
+	// Validate request.
+	if len(v.Challenge) < verificationChallengeMinSize {
+		return nil, errors.New("challenge too small")
+	}
+	if v.Purpose != purpose {
+		return nil, errors.New("purpose mismatch")
+	}
+	if v.ClientReference != clientReference {
+		return nil, errors.New("client reference mismatch")
+	}
+
+	// Assign server reference and serialize.
+	v.ServerReference = serverReference
+	dataToSign, err := dsd.Dump(v, dsd.JSON)
+	if err != nil {
+		return nil, fmt.Errorf("failed to serialize verification response: %w", err)
+	}
+
+	// Sign response.
+	e := jess.NewUnconfiguredEnvelope()
+	e.SuiteID = verificationSigningSuite
+	e.Senders = []*jess.Signet{id.Signet}
+	jession, err := e.Correspondence(nil)
+	if err != nil {
+		return nil, fmt.Errorf("failed to setup signer: %w", err)
+	}
+	letter, err := jession.Close(dataToSign)
+	if err != nil {
+		return nil, fmt.Errorf("failed to sign: %w", err)
+	}
+
+	// Serialize and return.
+	signedResponse, err := letter.ToDSD(dsd.JSON)
+	if err != nil {
+		return nil, fmt.Errorf("failed to serialize letter: %w", err)
+	}
+
+	return signedResponse, nil
+}
+
+// Verify verifies the verification response and checks if everything is valid.
+func (v *Verification) Verify(response []byte, h *hub.Hub) error {
+	// Parse response.
+	letter, err := jess.LetterFromDSD(response)
+	if err != nil {
+		return fmt.Errorf("failed to parse response: %w", err)
+	}
+
+	// Verify response.
+	responseData, err := letter.Open(
+		verificationRequirements,
+		&hub.SingleTrustStore{
+			Signet: h.PublicKey,
+		},
+	)
+	if err != nil {
+		return fmt.Errorf("failed to verify response: %w", err)
+	}
+
+	// Parse verified response.
+	responseV := new(Verification)
+	_, err = dsd.Load(responseData, responseV)
+	if err != nil {
+		return fmt.Errorf("failed to parse verified response: %w", err)
+	}
+
+	// Validate request.
+	if subtle.ConstantTimeCompare(v.Challenge, responseV.Challenge) != 1 {
+		return errors.New("challenge mismatch")
+	}
+	if subtle.ConstantTimeCompare([]byte(v.Purpose), []byte(responseV.Purpose)) != 1 {
+		return errors.New("purpose mismatch")
+	}
+	if subtle.ConstantTimeCompare([]byte(v.ClientReference), []byte(responseV.ClientReference)) != 1 {
+		return errors.New("client reference mismatch")
+	}
+	if subtle.ConstantTimeCompare([]byte(v.ServerReference), []byte(responseV.ServerReference)) != 1 {
+		return errors.New("server reference mismatch")
+	}
+
+	return nil
+}
diff --git a/spn/cabin/verification_test.go b/spn/cabin/verification_test.go
new file mode 100644
index 00000000..cb743a3d
--- /dev/null
+++ b/spn/cabin/verification_test.go
@@ -0,0 +1,127 @@
+package cabin
+
+import (
+	"fmt"
+	"testing"
+)
+
+func TestVerification(t *testing.T) {
+	t.Parallel()
+
+	id, err := CreateIdentity(module.Ctx, "test")
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	if err := testVerificationWith(
+		t, id,
+		"a", "b", "c",
+		"a", "b", "c",
+		"", "", "", nil,
+	); err != nil {
+		t.Fatal(err)
+	}
+
+	if err := testVerificationWith(
+		t, id,
+		"a", "b", "c",
+		"x", "b", "c",
+		"", "", "", nil,
+	); err == nil {
+		t.Fatal("should fail on purpose mismatch")
+	}
+
+	if err := testVerificationWith(
+		t, id,
+		"a", "b", "c",
+		"a", "x", "c",
+		"", "", "", nil,
+	); err == nil {
+		t.Fatal("should fail on client ref mismatch")
+	}
+
+	if err := testVerificationWith(
+		t, id,
+		"a", "b", "c",
+		"a", "b", "x",
+		"", "", "", nil,
+	); err == nil {
+		t.Fatal("should fail on server ref mismatch")
+	}
+
+	if err := testVerificationWith(
+		t, id,
+		"a", "b", "c",
+		"a", "b", "c",
+		"x", "", "", nil,
+	); err == nil {
+		t.Fatal("should fail on purpose mismatch")
+	}
+
+	if err := testVerificationWith(
+		t, id,
+		"a", "b", "c",
+		"a", "b", "c",
+		"", "x", "", nil,
+	); err == nil {
+		t.Fatal("should fail on client ref mismatch")
+	}
+
+	if err := testVerificationWith(
+		t, id,
+		"a", "b", "c",
+		"a", "b", "c",
+		"", "", "x", nil,
+	); err == nil {
+		t.Fatal("should fail on server ref mismatch")
+	}
+
+	if err := testVerificationWith(
+		t, id,
+		"a", "b", "c",
+		"a", "b", "c",
+		"", "", "", []byte{1, 2, 3, 4},
+	); err == nil {
+		t.Fatal("should fail on challenge mismatch")
+	}
+}
+
+func testVerificationWith(
+	t *testing.T, id *Identity,
+	purpose1, clientRef1, serverRef1 string, //nolint:unparam
+	purpose2, clientRef2, serverRef2 string,
+	mitmPurpose, mitmClientRef, mitmServerRef string,
+	mitmChallenge []byte,
+) error {
+	t.Helper()
+
+	v, request, err := CreateVerificationRequest(purpose1, clientRef1, serverRef1)
+	if err != nil {
+		return fmt.Errorf("failed to create verification request: %w", err)
+	}
+
+	response, err := id.SignVerificationRequest(request, purpose2, clientRef2, serverRef2)
+	if err != nil {
+		return fmt.Errorf("failed to sign verification response: %w", err)
+	}
+
+	if mitmPurpose != "" {
+		v.Purpose = mitmPurpose
+	}
+	if mitmClientRef != "" {
+		v.ClientReference = mitmClientRef
+	}
+	if mitmServerRef != "" {
+		v.ServerReference = mitmServerRef
+	}
+	if mitmChallenge != nil {
+		v.Challenge = mitmChallenge
+	}
+
+	err = v.Verify(response, id.Hub)
+	if err != nil {
+		return fmt.Errorf("failed to verify: %w", err)
+	}
+
+	return nil
+}
diff --git a/spn/captain/api.go b/spn/captain/api.go
new file mode 100644
index 00000000..dcc412d8
--- /dev/null
+++ b/spn/captain/api.go
@@ -0,0 +1,68 @@
+package captain
+
+import (
+	"errors"
+	"fmt"
+
+	"github.com/safing/portbase/api"
+	"github.com/safing/portbase/database"
+	"github.com/safing/portbase/database/query"
+	"github.com/safing/portbase/modules"
+)
+
+const (
+	apiPathForSPNReInit = "spn/reinit"
+)
+
+func registerAPIEndpoints() error {
+	if err := api.RegisterEndpoint(api.Endpoint{
+		Path:  apiPathForSPNReInit,
+		Write: api.PermitAdmin,
+		// BelongsTo:   module, // Do not attach to module, as this must run outside of the module.
+		ActionFunc:  handleReInit,
+		Name:        "Re-initialize SPN",
+		Description: "Stops the SPN, resets all caches and starts it again. The SPN account and settings are not changed.",
+	}); err != nil {
+		return err
+	}
+
+	return nil
+}
+
+func handleReInit(ar *api.Request) (msg string, err error) {
+	// Disable module and check
+	changed := module.Disable()
+	if !changed {
+		return "", errors.New("can only re-initialize when the SPN is enabled")
+	}
+
+	// Run module manager.
+	err = modules.ManageModules()
+	if err != nil {
+		return "", fmt.Errorf("failed to stop SPN: %w", err)
+	}
+
+	// Delete SPN cache.
+	db := database.NewInterface(&database.Options{
+		Local:    true,
+		Internal: true,
+	})
+	deletedRecords, err := db.Purge(ar.Context(), query.New("cache:spn/"))
+	if err != nil {
+		return "", fmt.Errorf("failed to delete SPN cache: %w", err)
+	}
+
+	// Enable module.
+	module.Enable()
+
+	// Run module manager.
+	err = modules.ManageModules()
+	if err != nil {
+		return "", fmt.Errorf("failed to start SPN after cache reset: %w", err)
+	}
+
+	return fmt.Sprintf(
+		"Completed SPN re-initialization and deleted %d cache records in the process.",
+		deletedRecords,
+	), nil
+}
diff --git a/spn/captain/bootstrap.go b/spn/captain/bootstrap.go
new file mode 100644
index 00000000..c7096116
--- /dev/null
+++ b/spn/captain/bootstrap.go
@@ -0,0 +1,152 @@
+package captain
+
+import (
+	"errors"
+	"flag"
+	"fmt"
+	"io/fs"
+	"os"
+
+	"github.com/safing/portbase/formats/dsd"
+	"github.com/safing/portbase/log"
+	"github.com/safing/portmaster/spn/conf"
+	"github.com/safing/portmaster/spn/hub"
+	"github.com/safing/portmaster/spn/navigator"
+)
+
+// BootstrapFile is used for sideloading bootstrap data.
+type BootstrapFile struct {
+	Main BootstrapFileEntry
+}
+
+// BootstrapFileEntry is the bootstrap data structure for one map.
+type BootstrapFileEntry struct {
+	Hubs []string
+}
+
+var (
+	bootstrapHubFlag  string
+	bootstrapFileFlag string
+)
+
+func init() {
+	flag.StringVar(&bootstrapHubFlag, "bootstrap-hub", "", "transport address of hub for bootstrapping with the hub ID in the fragment")
+	flag.StringVar(&bootstrapFileFlag, "bootstrap-file", "", "bootstrap file containing bootstrap hubs - will be initialized if running a public hub and it doesn't exist")
+}
+
+// prepBootstrapHubFlag checks the bootstrap-hub argument if it is valid.
+func prepBootstrapHubFlag() error {
+	if bootstrapHubFlag != "" {
+		_, _, _, err := hub.ParseBootstrapHub(bootstrapHubFlag)
+		return err
+	}
+	return nil
+}
+
+// processBootstrapHubFlag processes the bootstrap-hub argument.
+func processBootstrapHubFlag() error {
+	if bootstrapHubFlag != "" {
+		return navigator.Main.AddBootstrapHubs([]string{bootstrapHubFlag})
+	}
+	return nil
+}
+
+// processBootstrapFileFlag processes the bootstrap-file argument.
+func processBootstrapFileFlag() error {
+	if bootstrapFileFlag == "" {
+		return nil
+	}
+
+	_, err := os.Stat(bootstrapFileFlag)
+	if err != nil {
+		if errors.Is(err, fs.ErrNotExist) {
+			return createBootstrapFile(bootstrapFileFlag)
+		}
+		return fmt.Errorf("failed to access bootstrap hub file: %w", err)
+	}
+
+	return loadBootstrapFile(bootstrapFileFlag)
+}
+
+// bootstrapWithUpdates loads bootstrap hubs from the updates server and imports them.
+func bootstrapWithUpdates() error {
+	if bootstrapFileFlag != "" {
+		return errors.New("using the bootstrap-file argument disables bootstrapping via the update system")
+	}
+
+	return updateSPNIntel(module.Ctx, nil)
+}
+
+// loadBootstrapFile loads a file with bootstrap hub entries and imports them.
+func loadBootstrapFile(filename string) (err error) {
+	// Load bootstrap file from disk and parse it.
+	data, err := os.ReadFile(filename)
+	if err != nil {
+		return fmt.Errorf("failed to load bootstrap file: %w", err)
+	}
+	bootstrapFile := &BootstrapFile{}
+	_, err = dsd.Load(data, bootstrapFile)
+	if err != nil {
+		return fmt.Errorf("failed to parse bootstrap file: %w", err)
+	}
+	if len(bootstrapFile.Main.Hubs) == 0 {
+		return errors.New("bootstrap holds no hubs for main map")
+	}
+
+	// Add Hubs to map.
+	err = navigator.Main.AddBootstrapHubs(bootstrapFile.Main.Hubs)
+	if err == nil {
+		log.Infof("spn/captain: loaded bootstrap file %s", filename)
+	}
+	return err
+}
+
+// createBootstrapFile save a bootstrap hub file with an entry of the public identity.
+func createBootstrapFile(filename string) error {
+	if !conf.PublicHub() {
+		log.Infof("spn/captain: skipped writing a bootstrap hub file, as this is not a public hub")
+		return nil
+	}
+
+	// create bootstrap hub
+	if len(publicIdentity.Hub.Info.Transports) == 0 {
+		return errors.New("public identity has no transports available")
+	}
+	// parse first transport
+	t, err := hub.ParseTransport(publicIdentity.Hub.Info.Transports[0])
+	if err != nil {
+		return fmt.Errorf("failed to parse transport of public identity: %w", err)
+	}
+	// add IP address
+	switch {
+	case publicIdentity.Hub.Info.IPv4 != nil:
+		t.Domain = publicIdentity.Hub.Info.IPv4.String()
+	case publicIdentity.Hub.Info.IPv6 != nil:
+		t.Domain = "[" + publicIdentity.Hub.Info.IPv6.String() + "]"
+	default:
+		return errors.New("public identity has no IP address available")
+	}
+	// add Hub ID
+	t.Option = publicIdentity.Hub.ID
+	// put together
+	bs := &BootstrapFile{
+		Main: BootstrapFileEntry{
+			Hubs: []string{t.String()},
+		},
+	}
+
+	// serialize
+	fileData, err := dsd.Dump(bs, dsd.JSON)
+	if err != nil {
+		return err
+	}
+
+	// save to disk
+	err = os.WriteFile(filename, fileData, 0o0664) //nolint:gosec // Should be able to be read by others.
+	if err != nil {
+		return err
+	}
+
+	log.Infof("spn/captain: created bootstrap file %s", filename)
+	return nil
+}
diff --git a/spn/captain/client.go b/spn/captain/client.go
new file mode 100644
index 00000000..b30e4e98
--- /dev/null
+++ b/spn/captain/client.go
@@ -0,0 +1,506 @@
+package captain
+
+import (
+	"context"
+	"errors"
+	"fmt"
+	"time"
+
+	"github.com/tevino/abool"
+
+	"github.com/safing/portbase/log"
+	"github.com/safing/portbase/notifications"
+	"github.com/safing/portmaster/service/netenv"
+	"github.com/safing/portmaster/service/network/netutils"
+	"github.com/safing/portmaster/spn/access"
+	"github.com/safing/portmaster/spn/crew"
+	"github.com/safing/portmaster/spn/docks"
+	"github.com/safing/portmaster/spn/navigator"
+	"github.com/safing/portmaster/spn/terminal"
+)
+
+var (
+	ready = abool.New()
+
+	spnLoginButton = notifications.Action{
+		Text:    "Login",
+		Type:    notifications.ActionTypeOpenPage,
+		Payload: "spn",
+	}
+	spnOpenAccountPage = notifications.Action{
+		Text:    "Open Account Page",
+		Type:    notifications.ActionTypeOpenURL,
+		Payload: "https://account.safing.io",
+	}
+)
+
+// ClientReady signifies if the SPN client is fully ready to handle connections.
+func ClientReady() bool {
+	return ready.IsSet()
+}
+
+type (
+	clientComponentFunc   func(ctx context.Context) clientComponentResult
+	clientComponentResult uint8
+)
+
+const (
+	clientResultOk        clientComponentResult = iota // Continue and clean module status.
+	clientResultRetry                                  // Go back to start of current step, don't clear module status.
+	clientResultReconnect                              // Stop current connection and start from zero.
+	clientResultShutdown                               // SPN Module is shutting down.
+)
+
+var (
+	clientNetworkChangedFlag               = netenv.GetNetworkChangedFlag()
+	clientIneligibleAccountUpdateDelay     = 1 * time.Minute
+	clientRetryConnectBackoffDuration      = 5 * time.Second
+	clientInitialHealthCheckDelay          = 10 * time.Second
+	clientHealthCheckTickDuration          = 1 * time.Minute
+	clientHealthCheckTickDurationSleepMode = 5 * time.Minute
+	clientHealthCheckTimeout               = 15 * time.Second
+
+	clientHealthCheckTrigger = make(chan struct{}, 1)
+	lastHealthCheck          time.Time
+)
+
+func triggerClientHealthCheck() {
+	select {
+	case clientHealthCheckTrigger <- struct{}{}:
+	default:
+	}
+}
+
+func clientManager(ctx context.Context) error {
+	defer func() {
+		ready.UnSet()
+		netenv.ConnectedToSPN.UnSet()
+		resetSPNStatus(StatusDisabled, true)
+		module.Resolve("")
+		clientStopHomeHub(ctx)
+	}()
+
+	module.Hint(
+		"spn:establishing-home-hub",
+		"Connecting to SPN...",
+		"Connecting to the SPN network is in progress.",
+	)
+
+	// TODO: When we are starting and the SPN module is faster online than the
+	// nameserver, then updating the account will fail as the DNS query is
+	// redirected to a closed port.
+	// We also can't add the nameserver as a module dependency, as the nameserver
+	// is not part of the server.
+	select {
+	case <-time.After(1 * time.Second):
+	case <-ctx.Done():
+		return nil
+	}
+
+	healthCheckTicker := module.NewSleepyTicker(clientHealthCheckTickDuration, clientHealthCheckTickDurationSleepMode)
+
+reconnect:
+	for {
+		// Check if we are shutting down.
+		select {
+		case <-ctx.Done():
+			return nil
+		default:
+		}
+
+		// Reset SPN status.
+		if ready.SetToIf(true, false) {
+			netenv.ConnectedToSPN.UnSet()
+			log.Info("spn/captain: client not ready")
+		}
+		resetSPNStatus(StatusConnecting, true)
+
+		// Check everything and connect to the SPN.
+		for _, clientFunc := range []clientComponentFunc{
+			clientStopHomeHub,
+			clientCheckNetworkReady,
+			clientCheckAccountAndTokens,
+			clientConnectToHomeHub,
+			clientSetActiveConnectionStatus,
+		} {
+			switch clientFunc(ctx) {
+			case clientResultOk:
+				// Continue
+			case clientResultRetry, clientResultReconnect:
+				// Wait for a short time to not loop too quickly.
+				select {
+				case <-time.After(clientRetryConnectBackoffDuration):
+					continue reconnect
+				case <-ctx.Done():
+					return nil
+				}
+			case clientResultShutdown:
+				return nil
+			}
+		}
+
+		log.Info("spn/captain: client is ready")
+		ready.Set()
+		netenv.ConnectedToSPN.Set()
+
+		module.TriggerEvent(SPNConnectedEvent, nil)
+		module.StartWorker("update quick setting countries", navigator.Main.UpdateConfigQuickSettings)
+
+		// Reset last health check value, as we have just connected.
+		lastHealthCheck = time.Now()
+
+		// Back off before starting initial health checks.
+		select {
+		case <-time.After(clientInitialHealthCheckDelay):
+		case <-ctx.Done():
+			return nil
+		}
+
+		for {
+			// Check health of the current SPN connection and monitor the user status.
+		maintainers:
+			for _, clientFunc := range []clientComponentFunc{
+				clientCheckHomeHubConnection,
+				clientCheckAccountAndTokens,
+				clientSetActiveConnectionStatus,
+			} {
+				switch clientFunc(ctx) {
+				case clientResultOk:
+					// Continue
+				case clientResultRetry:
+					// Abort and wait for the next run.
+					break maintainers
+				case clientResultReconnect:
+					continue reconnect
+				case clientResultShutdown:
+					return nil
+				}
+			}
+
+			// Wait for signal to run maintenance again.
+			select {
+			case <-healthCheckTicker.Wait():
+			case <-clientHealthCheckTrigger:
+			case <-crew.ConnectErrors():
+			case <-clientNetworkChangedFlag.Signal():
+				clientNetworkChangedFlag.Refresh()
+			case <-ctx.Done():
+				return nil
+			}
+		}
+	}
+}
+
+func clientCheckNetworkReady(ctx context.Context) clientComponentResult {
+	// Check if we are online enough for connecting.
+	switch netenv.GetOnlineStatus() { //nolint:exhaustive
+	case netenv.StatusOffline,
+		netenv.StatusLimited:
+		select {
+		case <-ctx.Done():
+			return clientResultShutdown
+		case <-time.After(1 * time.Second):
+			return clientResultRetry
+		}
+	}
+
+	return clientResultOk
+}
+
+// DisableAccount disables using any account related SPN functionality.
+// Attempts to use the same will result in errors.
+var DisableAccount bool
+
+func clientCheckAccountAndTokens(ctx context.Context) clientComponentResult {
+	if DisableAccount {
+		return clientResultOk
+	}
+
+	// Get SPN user.
+	user, err := access.GetUser()
+	if err != nil && !errors.Is(err, access.ErrNotLoggedIn) {
+		notifications.NotifyError(
+			"spn:failed-to-get-user",
+			"SPN Internal Error",
+			`Please restart Portmaster.`,
+			// TODO: Add restart button.
+			// TODO: Use special UI restart action in order to reload UI on restart.
+		).AttachToModule(module)
+		resetSPNStatus(StatusFailed, true)
+		log.Errorf("spn/captain: client internal error: %s", err)
+		return clientResultReconnect
+	}
+
+	// Check if user is logged in.
+	if user == nil || !user.IsLoggedIn() {
+		notifications.NotifyWarn(
+			"spn:not-logged-in",
+			"SPN Login Required",
+			`Please log in to access the SPN.`,
+			spnLoginButton,
+		).AttachToModule(module)
+		resetSPNStatus(StatusFailed, true)
+		log.Warningf("spn/captain: enabled but not logged in")
+		return clientResultReconnect
+	}
+
+	// Check if user is eligible.
+	if !user.MayUseTheSPN() {
+		// Update user in case there was a change.
+		// Only update here if we need to - there is an update task in the access
+		// module for periodic updates.
+		if time.Now().Add(-clientIneligibleAccountUpdateDelay).After(time.Unix(user.Meta().Modified, 0)) {
+			_, _, err := access.UpdateUser()
+			if err != nil {
+				notifications.NotifyError(
+					"spn:failed-to-update-user",
+					"SPN Account Server Error",
+					fmt.Sprintf(`The status of your SPN account could not be updated: %s`, err),
+				).AttachToModule(module)
+				resetSPNStatus(StatusFailed, true)
+				log.Errorf("spn/captain: failed to update ineligible account: %s", err)
+				return clientResultReconnect
+			}
+		}
+
+		// Check if user is eligible after a possible update.
+		if !user.MayUseTheSPN() {
+
+			// If package is generally valid, then the current package does not have access to the SPN.
+			if user.MayUse("") {
+				notifications.NotifyError(
+					"spn:package-not-eligible",
+					"SPN Not Included In Package",
+					"Your current Portmaster Package does not include access to the SPN. Please upgrade your package on the Account Page.",
+					spnOpenAccountPage,
+				).AttachToModule(module)
+				resetSPNStatus(StatusFailed, true)
+				return clientResultReconnect
+			}
+
+			// Otherwise, include the message from the user view.
+			message := "There is an issue with your Portmaster Package. Please check the Account Page."
+			if user.View != nil && user.View.Message != "" {
+				message = user.View.Message
+			}
+			notifications.NotifyError(
+				"spn:subscription-inactive",
+				"Portmaster Package Issue",
+				"Cannot enable SPN: "+message,
+				spnOpenAccountPage,
+			).AttachToModule(module)
+			resetSPNStatus(StatusFailed, true)
+			return clientResultReconnect
+		}
+	}
+
+	// Check if we have enough tokens.
+	if access.ShouldRequest(access.ExpandAndConnectZones) {
+		err := access.UpdateTokens()
+		if err != nil {
+			log.Errorf("spn/captain: failed to get tokens: %s", err)
+
+			// There was an error updating the account.
+			// Check if we have enough tokens to continue anyway.
+			regular, _ := access.GetTokenAmount(access.ExpandAndConnectZones)
+			if regular == 0 /* && fallback == 0 */ { // TODO: Add fallback token check when fallback was tested on servers.
+				notifications.NotifyError(
+					"spn:tokens-exhausted",
+					"SPN Access Tokens Exhausted",
+					`The Portmaster failed to get new access tokens to access the SPN. The Portmaster will automatically retry to get new access tokens.`,
+				).AttachToModule(module)
+				resetSPNStatus(StatusFailed, false)
+			}
+			return clientResultRetry
+		}
+	}
+
+	return clientResultOk
+}
+
+func clientStopHomeHub(ctx context.Context) clientComponentResult {
+	// Don't use the context in this function, as it will likely be canceled
+	// already and would disrupt any context usage in here.
+
+	// Get crane connecting to home.
+	home, _ := navigator.Main.GetHome()
+	if home == nil {
+		return clientResultOk
+	}
+	crane := docks.GetAssignedCrane(home.Hub.ID)
+	if crane == nil {
+		return clientResultOk
+	}
+
+	// Stop crane and all connected terminals.
+	crane.Stop(nil)
+	return clientResultOk
+}
+
+func clientConnectToHomeHub(ctx context.Context) clientComponentResult {
+	err := establishHomeHub(ctx)
+	if err != nil {
+		log.Errorf("spn/captain: failed to establish connection to home hub: %s", err)
+		resetSPNStatus(StatusFailed, true)
+
+		switch {
+		case errors.Is(err, ErrAllHomeHubsExcluded):
+			notifications.NotifyError(
+				"spn:all-home-hubs-excluded",
+				"All Home Nodes Excluded",
+				"Your current Home Node Rules exclude all available and eligible SPN Nodes. Please change your rules to allow for at least one available and eligible Home Node.",
+				notifications.Action{
+					Text: "Configure",
+					Type: notifications.ActionTypeOpenSetting,
+					Payload: &notifications.ActionTypeOpenSettingPayload{
+						Key: CfgOptionHomeHubPolicyKey,
+					},
+				},
+			).AttachToModule(module)
+
+		case errors.Is(err, ErrReInitSPNSuggested):
+			notifications.NotifyError(
+				"spn:cannot-bootstrap",
+				"SPN Cannot Bootstrap",
+				"The local state of the SPN network is likely outdated. Portmaster was not able to identify a server to connect to. Please re-initialize the SPN using the tools menu or the button on the notification.",
+				notifications.Action{
+					ID:   "re-init",
+					Text: "Re-Init SPN",
+					Type: notifications.ActionTypeWebhook,
+					Payload: &notifications.ActionTypeWebhookPayload{
+						URL:          apiPathForSPNReInit,
+						ResultAction: "display",
+					},
+				},
+			).AttachToModule(module)
+
+		default:
+			notifications.NotifyWarn(
+				"spn:home-hub-failure",
+				"SPN Failed to Connect",
+				fmt.Sprintf("Failed to connect to a home hub: %s. The Portmaster will retry to connect automatically.", err),
+			).AttachToModule(module)
+		}
+
+		return clientResultReconnect
+	}
+
+	// Log new connection.
+	home, _ := navigator.Main.GetHome()
+	if home != nil {
+		log.Infof("spn/captain: established new home %s", home.Hub)
+	}
+
+	return clientResultOk
+}
+
+func clientSetActiveConnectionStatus(ctx context.Context) clientComponentResult {
+	// Get current home.
+	home, homeTerminal := navigator.Main.GetHome()
+	if home == nil || homeTerminal == nil {
+		return clientResultReconnect
+	}
+
+	// Resolve any connection error.
+	module.Resolve("")
+
+	// Update SPN Status with connection information, if not already correctly set.
+	spnStatus.Lock()
+	defer spnStatus.Unlock()
+
+	if spnStatus.Status != StatusConnected || spnStatus.HomeHubID != home.Hub.ID {
+		// Fill connection status data.
+		spnStatus.Status = StatusConnected
+		spnStatus.HomeHubID = home.Hub.ID
+		spnStatus.HomeHubName = home.Hub.Info.Name
+
+		connectedIP, _, err := netutils.IPPortFromAddr(homeTerminal.RemoteAddr())
+		if err != nil {
+			spnStatus.ConnectedIP = homeTerminal.RemoteAddr().String()
+		} else {
+			spnStatus.ConnectedIP = connectedIP.String()
+		}
+		spnStatus.ConnectedTransport = homeTerminal.Transport().String()
+
+		geoLoc := home.GetLocation(connectedIP)
+		if geoLoc != nil {
+			spnStatus.ConnectedCountry = &geoLoc.Country
+		}
+
+		now := time.Now()
+		spnStatus.ConnectedSince = &now
+
+		// Push new status.
+		pushSPNStatusUpdate()
+	}
+
+	return clientResultOk
+}
+
+func clientCheckHomeHubConnection(ctx context.Context) clientComponentResult {
+	// Check the status of the Home Hub.
+	home, homeTerminal := navigator.Main.GetHome()
+	if home == nil || homeTerminal == nil || homeTerminal.IsBeingAbandoned() {
+		return clientResultReconnect
+	}
+
+	// Get crane controller for health check.
+	crane := docks.GetAssignedCrane(home.Hub.ID)
+	if crane == nil {
+		log.Errorf("spn/captain: could not find home hub crane for health check")
+		return clientResultOk
+	}
+
+	// Ping home hub.
+	latency, tErr := pingHome(ctx, crane.Controller, clientHealthCheckTimeout)
+	if tErr != nil {
+		log.Warningf("spn/captain: failed to ping home hub: %s", tErr)
+
+		// Prepare to reconnect to the network.
+
+		// Reset all failing states, as these might have been caused by the failing home hub.
+		navigator.Main.ResetFailingStates(ctx)
+
+		// If the last health check is clearly too long ago, assume that the device was sleeping and do not set the home node to failing yet.
+		if time.Since(lastHealthCheck) > clientHealthCheckTickDuration+
+			clientHealthCheckTickDurationSleepMode+
+			(clientHealthCheckTimeout*2) {
+			return clientResultReconnect
+		}
+
+		// Mark the home hub itself as failing, as we want to try to connect to somewhere else.
+		home.MarkAsFailingFor(5 * time.Minute)
+
+		return clientResultReconnect
+	}
+	lastHealthCheck = time.Now()
+
+	log.Debugf("spn/captain: pinged home hub in %s", latency)
+	return clientResultOk
+}
+
+func pingHome(ctx context.Context, t terminal.Terminal, timeout time.Duration) (latency time.Duration, err *terminal.Error) {
+	started := time.Now()
+
+	// Start ping operation.
+	pingOp, tErr := crew.NewPingOp(t)
+	if tErr != nil {
+		return 0, tErr
+	}
+
+	// Wait for response.
+	select {
+	case <-ctx.Done():
+		return 0, terminal.ErrCanceled
+	case <-time.After(timeout):
+		return 0, terminal.ErrTimeout
+	case result := <-pingOp.Result:
+		if result.Is(terminal.ErrExplicitAck) {
+			return time.Since(started), nil
+		}
+		if result.IsOK() {
+			return 0, result.Wrap("unexpected response")
+		}
+		return 0, result
+	}
+}
diff --git a/spn/captain/config.go b/spn/captain/config.go
new file mode 100644
index 00000000..09e6f490
--- /dev/null
+++ b/spn/captain/config.go
@@ -0,0 +1,253 @@
+package captain
+
+import (
+	"sync"
+
+	"github.com/safing/portbase/config"
+	"github.com/safing/portmaster/service/profile"
+	"github.com/safing/portmaster/service/profile/endpoints"
+	"github.com/safing/portmaster/spn/conf"
+	"github.com/safing/portmaster/spn/navigator"
+)
+
+var (
+	// CfgOptionEnableSPNKey is the configuration key for the SPN module.
+	CfgOptionEnableSPNKey   = "spn/enable"
+	cfgOptionEnableSPNOrder = 128
+
+	// CfgOptionHomeHubPolicyKey is the configuration key for the SPN home policy.
+	CfgOptionHomeHubPolicyKey   = "spn/homePolicy"
+	cfgOptionHomeHubPolicy      config.StringArrayOption
+	cfgOptionHomeHubPolicyOrder = 145
+
+	// CfgOptionDNSExitHubPolicyKey is the configuration key for the SPN DNS exit policy.
+	CfgOptionDNSExitHubPolicyKey   = "spn/dnsExitPolicy"
+	cfgOptionDNSExitHubPolicy      config.StringArrayOption
+	cfgOptionDNSExitHubPolicyOrder = 148
+
+	// CfgOptionUseCommunityNodesKey is the configuration key for whether to use community nodes.
+	CfgOptionUseCommunityNodesKey   = "spn/useCommunityNodes"
+	cfgOptionUseCommunityNodes      config.BoolOption
+	cfgOptionUseCommunityNodesOrder = 149
+
+	// NonCommunityVerifiedOwners holds a list of verified owners that are not
+	// considered "community".
+	NonCommunityVerifiedOwners = []string{"Safing"}
+
+	// CfgOptionTrustNodeNodesKey is the configuration key for whether additional trusted nodes.
+	CfgOptionTrustNodeNodesKey   = "spn/trustNodes"
+	cfgOptionTrustNodeNodes      config.StringArrayOption
+	cfgOptionTrustNodeNodesOrder = 150
+
+	// Special Access Code.
+	cfgOptionSpecialAccessCodeKey     = "spn/specialAccessCode"
+	cfgOptionSpecialAccessCodeDefault = "none"
+	cfgOptionSpecialAccessCode        config.StringOption //nolint:unused // Linter, you drunk?
+	cfgOptionSpecialAccessCodeOrder   = 160
+
+	// IPv6 must be global and accessible.
+	cfgOptionBindToAdvertisedKey     = "spn/publicHub/bindToAdvertised"
+	cfgOptionBindToAdvertised        config.BoolOption
+	cfgOptionBindToAdvertisedDefault = false
+	cfgOptionBindToAdvertisedOrder   = 161
+
+	// Config options for use.
+	cfgOptionRoutingAlgorithm config.StringOption
+)
+
+func prepConfig() error {
+	// Home Node Rules
+	err := config.Register(&config.Option{
+		Name: "Home Node Rules",
+		Key:  CfgOptionHomeHubPolicyKey,
+		Description: `Customize which countries should or should not be used for your Home Node. The Home Node is your entry into the SPN. You connect directly to it and all your connections are routed through it.
+
+By default, the Portmaster tries to choose the nearest node as your Home Node in order to reduce your exposure to the open Internet.
+
+Reconnect to the SPN in order to apply new rules.`,
+		Help:            profile.SPNRulesHelp,
+		Sensitive:       true,
+		OptType:         config.OptTypeStringArray,
+		RequiresRestart: true,
+		ExpertiseLevel:  config.ExpertiseLevelExpert,
+		DefaultValue:    []string{},
+		Annotations: config.Annotations{
+			config.CategoryAnnotation:                    "Routing",
+			config.DisplayOrderAnnotation:                cfgOptionHomeHubPolicyOrder,
+			config.DisplayHintAnnotation:                 endpoints.DisplayHintEndpointList,
+			config.QuickSettingsAnnotation:               profile.SPNRulesQuickSettings,
+			endpoints.EndpointListVerdictNamesAnnotation: profile.SPNRulesVerdictNames,
+		},
+		ValidationRegex: endpoints.ListEntryValidationRegex,
+		ValidationFunc:  endpoints.ValidateEndpointListConfigOption,
+	})
+	if err != nil {
+		return err
+	}
+	cfgOptionHomeHubPolicy = config.Concurrent.GetAsStringArray(CfgOptionHomeHubPolicyKey, []string{})
+
+	// DNS Exit Node Rules
+	err = config.Register(&config.Option{
+		Name: "DNS Exit Node Rules",
+		Key:  CfgOptionDNSExitHubPolicyKey,
+		Description: `Customize which countries should or should not be used as DNS Exit Nodes.
+
+By default, the Portmaster will exit DNS requests directly at your Home Node in order to keep them fast and close to your location. This is important, as DNS resolution often takes your approximate location into account when deciding which optimized DNS records are returned to you. As the Portmaster encrypts your DNS requests by default, you effectively gain a two-hop security level for your DNS requests in order to protect your privacy.
+
+This setting mainly exists for when you need to simulate your presence in another location on a lower level too. This might be necessary to defeat more intelligent geo-blocking systems.`,
+		Help:            profile.SPNRulesHelp,
+		Sensitive:       true,
+		OptType:         config.OptTypeStringArray,
+		RequiresRestart: true,
+		ExpertiseLevel:  config.ExpertiseLevelExpert,
+		DefaultValue:    []string{},
+		Annotations: config.Annotations{
+			config.CategoryAnnotation:                    "Routing",
+			config.DisplayOrderAnnotation:                cfgOptionDNSExitHubPolicyOrder,
+			config.DisplayHintAnnotation:                 endpoints.DisplayHintEndpointList,
+			config.QuickSettingsAnnotation:               profile.SPNRulesQuickSettings,
+			endpoints.EndpointListVerdictNamesAnnotation: profile.SPNRulesVerdictNames,
+		},
+		ValidationRegex: endpoints.ListEntryValidationRegex,
+		ValidationFunc:  endpoints.ValidateEndpointListConfigOption,
+	})
+	if err != nil {
+		return err
+	}
+	cfgOptionDNSExitHubPolicy = config.Concurrent.GetAsStringArray(CfgOptionDNSExitHubPolicyKey, []string{})
+
+	err = config.Register(&config.Option{
+		Name:            "Use Community Nodes",
+		Key:             CfgOptionUseCommunityNodesKey,
+		Description:     "Use nodes (servers) not operated by Safing themselves. The use of community nodes is recommended as it diversifies the ownership of the nodes you use for your connections and further strengthens your privacy. Plain connections (eg. http, smtp, ...) will never exit via community nodes, making this setting safe to use.",
+		Sensitive:       true,
+		OptType:         config.OptTypeBool,
+		RequiresRestart: true,
+		DefaultValue:    true,
+		Annotations: config.Annotations{
+			config.DisplayOrderAnnotation: cfgOptionUseCommunityNodesOrder,
+			config.CategoryAnnotation:     "Routing",
+		},
+	})
+	if err != nil {
+		return err
+	}
+	cfgOptionUseCommunityNodes = config.Concurrent.GetAsBool(CfgOptionUseCommunityNodesKey, true)
+
+	err = config.Register(&config.Option{
+		Name:           "Trust Nodes",
+		Key:            CfgOptionTrustNodeNodesKey,
+		Description:    "Specify which community nodes to additionally trust. These nodes may then also be used as a Home Node, as well as an Exit Node for unencrypted connections.",
+		Help:           "You can specify nodes by their ID or their verified operator.",
+		Sensitive:      true,
+		OptType:        config.OptTypeStringArray,
+		ExpertiseLevel: config.ExpertiseLevelExpert,
+		DefaultValue:   []string{},
+		Annotations: config.Annotations{
+			config.DisplayOrderAnnotation: cfgOptionTrustNodeNodesOrder,
+			config.CategoryAnnotation:     "Routing",
+		},
+	})
+	if err != nil {
+		return err
+	}
+	cfgOptionTrustNodeNodes = config.Concurrent.GetAsStringArray(CfgOptionTrustNodeNodesKey, []string{})
+
+	err = config.Register(&config.Option{
+		Name:         "Special Access Code",
+		Key:          cfgOptionSpecialAccessCodeKey,
+		Description:  "Special Access Codes grant access to the SPN for testing or evaluation purposes.",
+		Sensitive:    true,
+		OptType:      config.OptTypeString,
+		DefaultValue: cfgOptionSpecialAccessCodeDefault,
+		Annotations: config.Annotations{
+			config.DisplayOrderAnnotation: cfgOptionSpecialAccessCodeOrder,
+			config.CategoryAnnotation:     "Advanced",
+		},
+	})
+	if err != nil {
+		return err
+	}
+	cfgOptionSpecialAccessCode = config.Concurrent.GetAsString(cfgOptionSpecialAccessCodeKey, "")
+
+	if conf.PublicHub() {
+		err = config.Register(&config.Option{
+			Name:            "Connect From Advertised IPs Only",
+			Key:             cfgOptionBindToAdvertisedKey,
+			Description:     "Only connect from (bind to) the advertised IP addresses.",
+			OptType:         config.OptTypeBool,
+			ExpertiseLevel:  config.ExpertiseLevelExpert,
+			DefaultValue:    cfgOptionBindToAdvertisedDefault,
+			RequiresRestart: true,
+			Annotations: config.Annotations{
+				config.DisplayOrderAnnotation: cfgOptionBindToAdvertisedOrder,
+			},
+		})
+		if err != nil {
+			return err
+		}
+		cfgOptionBindToAdvertised = config.GetAsBool(cfgOptionBindToAdvertisedKey, cfgOptionBindToAdvertisedDefault)
+	}
+
+	// Config options for use.
+	cfgOptionRoutingAlgorithm = config.Concurrent.GetAsString(profile.CfgOptionRoutingAlgorithmKey, navigator.DefaultRoutingProfileID)
+
+	return nil
+}
+
+var (
+	homeHubPolicy           endpoints.Endpoints
+	homeHubPolicyLock       sync.Mutex
+	homeHubPolicyConfigFlag = config.NewValidityFlag()
+)
+
+func getHomeHubPolicy() (endpoints.Endpoints, error) {
+	homeHubPolicyLock.Lock()
+	defer homeHubPolicyLock.Unlock()
+
+	// Return cached value if config is still valid.
+	if homeHubPolicyConfigFlag.IsValid() {
+		return homeHubPolicy, nil
+	}
+	homeHubPolicyConfigFlag.Refresh()
+
+	// Parse new policy.
+	policy, err := endpoints.ParseEndpoints(cfgOptionHomeHubPolicy())
+	if err != nil {
+		homeHubPolicy = nil
+		return nil, err
+	}
+
+	// Save and return the new policy.
+	homeHubPolicy = policy
+	return homeHubPolicy, nil
+}
+
+var (
+	dnsExitHubPolicy           endpoints.Endpoints
+	dnsExitHubPolicyLock       sync.Mutex
+	dnsExitHubPolicyConfigFlag = config.NewValidityFlag()
+)
+
+// GetDNSExitHubPolicy return the current DNS exit policy.
+func GetDNSExitHubPolicy() (endpoints.Endpoints, error) {
+	dnsExitHubPolicyLock.Lock()
+	defer dnsExitHubPolicyLock.Unlock()
+
+	// Return cached value if config is still valid.
+	if dnsExitHubPolicyConfigFlag.IsValid() {
+		return dnsExitHubPolicy, nil
+	}
+	dnsExitHubPolicyConfigFlag.Refresh()
+
+	// Parse new policy.
+	policy, err := endpoints.ParseEndpoints(cfgOptionDNSExitHubPolicy())
+	if err != nil {
+		dnsExitHubPolicy = nil
+		return nil, err
+	}
+
+	// Save and return the new policy.
+	dnsExitHubPolicy = policy
+	return dnsExitHubPolicy, nil
+}
diff --git a/spn/captain/establish.go b/spn/captain/establish.go
new file mode 100644
index 00000000..479098a5
--- /dev/null
+++ b/spn/captain/establish.go
@@ -0,0 +1,105 @@
+package captain
+
+import (
+	"context"
+	"errors"
+	"fmt"
+	"time"
+
+	"github.com/safing/portbase/log"
+	"github.com/safing/portmaster/spn/conf"
+	"github.com/safing/portmaster/spn/docks"
+	"github.com/safing/portmaster/spn/hub"
+	"github.com/safing/portmaster/spn/ships"
+	"github.com/safing/portmaster/spn/terminal"
+)
+
+// EstablishCrane establishes a crane to another Hub.
+func EstablishCrane(callerCtx context.Context, dst *hub.Hub) (*docks.Crane, error) {
+	if conf.PublicHub() && dst.ID == publicIdentity.ID {
+		return nil, errors.New("connecting to self")
+	}
+	if docks.GetAssignedCrane(dst.ID) != nil {
+		return nil, fmt.Errorf("route to %s already exists", dst.ID)
+	}
+
+	ship, err := ships.Launch(callerCtx, dst, nil, nil)
+	if err != nil {
+		return nil, fmt.Errorf("failed to launch ship: %w", err)
+	}
+
+	// On pure clients, mark all ships as public in order to show unmasked data in logs.
+	if conf.Client() && !conf.PublicHub() {
+		ship.MarkPublic()
+	}
+
+	crane, err := docks.NewCrane(ship, dst, publicIdentity)
+	if err != nil {
+		return nil, fmt.Errorf("failed to create crane: %w", err)
+	}
+
+	err = crane.Start(callerCtx)
+	if err != nil {
+		return nil, fmt.Errorf("failed to start crane: %w", err)
+	}
+
+	// Start gossip op for live map updates.
+	_, tErr := NewGossipOp(crane.Controller)
+	if tErr != nil {
+		crane.Stop(tErr)
+		return nil, fmt.Errorf("failed to start gossip op: %w", tErr)
+	}
+
+	return crane, nil
+}
+
+// EstablishPublicLane establishes a crane to another Hub and publishes it.
+func EstablishPublicLane(ctx context.Context, dst *hub.Hub) (*docks.Crane, *terminal.Error) {
+	// Create new context with timeout.
+	// The maximum timeout is a worst case safeguard.
+	// Keep in mind that multiple IPs and protocols may be tried in all configurations.
+	// Some servers will be (possibly on purpose) hard to reach.
+	ctx, cancel := context.WithTimeout(ctx, 5*time.Minute)
+	defer cancel()
+
+	// Connect to destination and establish communication.
+	crane, err := EstablishCrane(ctx, dst)
+	if err != nil {
+		return nil, terminal.ErrInternalError.With("failed to establish crane: %w", err)
+	}
+
+	// Publish as Lane.
+	publishOp, tErr := NewPublishOp(crane.Controller, publicIdentity)
+	if tErr != nil {
+		return nil, terminal.ErrInternalError.With("failed to publish: %w", err)
+	}
+
+	// Wait for publishing to complete.
+	select {
+	case tErr := <-publishOp.Result():
+		if !tErr.Is(terminal.ErrExplicitAck) {
+			// Stop crane again, because we failed to publish it.
+			defer crane.Stop(nil)
+			return nil, terminal.ErrInternalError.With("failed to publish lane: %w", tErr)
+		}
+
+	case <-crane.Controller.Ctx().Done():
+		defer crane.Stop(nil)
+		return nil, terminal.ErrStopping
+
+	case <-ctx.Done():
+		defer crane.Stop(nil)
+		if errors.Is(ctx.Err(), context.DeadlineExceeded) {
+			return nil, terminal.ErrTimeout
+		}
+		return nil, terminal.ErrCanceled
+	}
+
+	// Query all gossip msgs.
+	_, tErr = NewGossipQueryOp(crane.Controller)
+	if tErr != nil {
+		log.Warningf("spn/captain: failed to start initial gossip query: %s", tErr)
+	}
+
+	return crane, nil
+}
diff --git a/spn/captain/exceptions.go b/spn/captain/exceptions.go
new file mode 100644
index 00000000..bde30950
--- /dev/null
+++ b/spn/captain/exceptions.go
@@ -0,0 +1,28 @@
+package captain
+
+import (
+	"net"
+	"sync"
+)
+
+var (
+	exceptionLock sync.Mutex
+	exceptIPv4    net.IP
+	exceptIPv6    net.IP
+)
+
+func setExceptions(ipv4, ipv6 net.IP) {
+	exceptionLock.Lock()
+	defer exceptionLock.Unlock()
+
+	exceptIPv4 = ipv4
+	exceptIPv6 = ipv6
+}
+
+// IsExcepted checks if the given IP is currently excepted from the SPN.
+func IsExcepted(ip net.IP) bool {
+	exceptionLock.Lock()
+	defer exceptionLock.Unlock()
+
+	return ip.Equal(exceptIPv4) || ip.Equal(exceptIPv6)
+}
diff --git a/spn/captain/gossip.go b/spn/captain/gossip.go
new file mode 100644
index 00000000..3279367a
--- /dev/null
+++ b/spn/captain/gossip.go
@@ -0,0 +1,38 @@
+package captain
+
+import (
+	"sync"
+)
+
+var (
+	gossipOps     = make(map[string]*GossipOp)
+	gossipOpsLock sync.RWMutex
+)
+
+func registerGossipOp(craneID string, op *GossipOp) {
+	gossipOpsLock.Lock()
+	defer gossipOpsLock.Unlock()
+
+	gossipOps[craneID] = op
+}
+
+func deleteGossipOp(craneID string) {
+	gossipOpsLock.Lock()
+	defer gossipOpsLock.Unlock()
+
+	delete(gossipOps, craneID)
+}
+
+func gossipRelayMsg(receivedFrom string, msgType GossipMsgType, data []byte) {
+	gossipOpsLock.RLock()
+	defer gossipOpsLock.RUnlock()
+
+	for craneID, gossipOp := range gossipOps {
+		// Don't return same msg back to sender.
+		if craneID == receivedFrom {
+			continue
+		}
+
+		gossipOp.sendMsg(msgType, data)
+	}
+}
diff --git a/spn/captain/hooks.go b/spn/captain/hooks.go
new file mode 100644
index 00000000..6a60f7ea
--- /dev/null
+++ b/spn/captain/hooks.go
@@ -0,0 +1,47 @@
+package captain
+
+import (
+	"time"
+
+	"github.com/safing/portmaster/service/updates"
+	"github.com/safing/portmaster/spn/conf"
+	"github.com/safing/portmaster/spn/docks"
+)
+
+func startDockHooks() {
+	docks.RegisterCraneUpdateHook(handleCraneUpdate)
+}
+
+func stopDockHooks() {
+	docks.ResetCraneUpdateHook()
+}
+
+func handleCraneUpdate(crane *docks.Crane) {
+	if crane == nil {
+		return
+	}
+
+	if conf.Client() && crane.Controller != nil && crane.Controller.Abandoning.IsSet() {
+		// Check connection to home hub.
+		triggerClientHealthCheck()
+	}
+
+	if conf.PublicHub() && crane.Public() {
+		// Update Hub status.
+		updateConnectionStatus()
+	}
+}
+
+func updateConnectionStatus() {
+	// Delay updating status for a better chance to combine multiple changes.
+	statusUpdateTask.Schedule(time.Now().Add(maintainStatusUpdateDelay))
+
+	// Check if we lost all connections and trigger a pending restart if we did.
+	for _, crane := range docks.GetAllAssignedCranes() {
+		if crane.Public() && !crane.Stopped() {
+			// There is at least one public and active crane, so don't restart now.
+			return
+		}
+	}
+	updates.TriggerRestartIfPending()
+}
diff --git a/spn/captain/intel.go b/spn/captain/intel.go
new file mode 100644
index 00000000..fe743c1b
--- /dev/null
+++ b/spn/captain/intel.go
@@ -0,0 +1,108 @@
+package captain
+
+import (
+	"context"
+	"fmt"
+	"os"
+	"sync"
+
+	"github.com/safing/portbase/config"
+	"github.com/safing/portbase/updater"
+	"github.com/safing/portmaster/service/updates"
+	"github.com/safing/portmaster/spn/conf"
+	"github.com/safing/portmaster/spn/hub"
+	"github.com/safing/portmaster/spn/navigator"
+	"github.com/safing/portmaster/spn/ships"
+)
+
+var (
+	intelResource           *updater.File
+	intelResourcePath       = "intel/spn/main-intel.yaml"
+	intelResourceMapName    = "main"
+	intelResourceUpdateLock sync.Mutex
+)
+
+func registerIntelUpdateHook() error {
+	if err := module.RegisterEventHook(
+		updates.ModuleName,
+		updates.ResourceUpdateEvent,
+		"update SPN intel",
+		updateSPNIntel,
+	); err != nil {
+		return err
+	}
+
+	if err := module.RegisterEventHook(
+		"config",
+		config.ChangeEvent,
+		"update SPN intel",
+		updateSPNIntel,
+	); err != nil {
+		return err
+	}
+
+	return nil
+}
+
+func updateSPNIntel(ctx context.Context, _ interface{}) (err error) {
+	intelResourceUpdateLock.Lock()
+	defer intelResourceUpdateLock.Unlock()
+
+	// Only update SPN intel when using the matching map.
+	if conf.MainMapName != intelResourceMapName {
+		return fmt.Errorf("intel resource not for map %q", conf.MainMapName)
+	}
+
+	// Check if there is something to do.
+	if intelResource != nil && !intelResource.UpgradeAvailable() {
+		return nil
+	}
+
+	// Get intel file and load it from disk.
+	intelResource, err = updates.GetFile(intelResourcePath)
+	if err != nil {
+		return fmt.Errorf("failed to get SPN intel update: %w", err)
+	}
+	intelData, err := os.ReadFile(intelResource.Path())
+	if err != nil {
+		return fmt.Errorf("failed to load SPN intel update: %w", err)
+	}
+
+	// Parse and apply intel data.
+	intel, err := hub.ParseIntel(intelData)
+	if err != nil {
+		return fmt.Errorf("failed to parse SPN intel update: %w", err)
+	}
+
+	setVirtualNetworkConfig(intel.VirtualNetworks)
+	return navigator.Main.UpdateIntel(intel, cfgOptionTrustNodeNodes())
+}
+
+func resetSPNIntel() {
+	intelResourceUpdateLock.Lock()
+	defer intelResourceUpdateLock.Unlock()
+
+	intelResource = nil
+}
+
+func setVirtualNetworkConfig(configs []*hub.VirtualNetworkConfig) {
+	// Do nothing if not public Hub.
+	if !conf.PublicHub() {
+		return
+	}
+	// Reset if there are no virtual networks configured.
+	if len(configs) == 0 {
+		ships.SetVirtualNetworkConfig(nil)
+	}
+
+	// Check if we are in a virtual network.
+	for _, config := range configs {
+		if _, ok := config.Mapping[publicIdentity.Hub.ID]; ok {
+			ships.SetVirtualNetworkConfig(config)
+			return
+		}
+	}
+
+	// If not, reset - we might have been in one before.
+	ships.SetVirtualNetworkConfig(nil)
+}
diff --git a/spn/captain/module.go b/spn/captain/module.go
new file mode 100644
index 00000000..356eb199
--- /dev/null
+++ b/spn/captain/module.go
@@ -0,0 +1,219 @@
+package captain
+
+import (
+	"context"
+	"errors"
+	"fmt"
+	"net"
+	"net/http"
+	"time"
+
+	"github.com/safing/portbase/api"
+	"github.com/safing/portbase/config"
+	"github.com/safing/portbase/log"
+	"github.com/safing/portbase/modules"
+	"github.com/safing/portbase/modules/subsystems"
+	"github.com/safing/portbase/rng"
+	"github.com/safing/portmaster/service/network/netutils"
+	"github.com/safing/portmaster/spn/conf"
+	"github.com/safing/portmaster/spn/crew"
+	"github.com/safing/portmaster/spn/navigator"
+	"github.com/safing/portmaster/spn/patrol"
+	"github.com/safing/portmaster/spn/ships"
+	_ "github.com/safing/portmaster/spn/sluice"
+)
+
+const controlledFailureExitCode = 24
+
+var module *modules.Module
+
+// SPNConnectedEvent is the name of the event that is fired when the SPN has connected and is ready.
+const SPNConnectedEvent = "spn connect"
+
+func init() {
+	module = modules.Register("captain", prep, start, stop, "base", "terminal", "cabin", "ships", "docks", "crew", "navigator", "sluice", "patrol", "netenv")
+	module.RegisterEvent(SPNConnectedEvent, false)
+	subsystems.Register(
+		"spn",
+		"SPN",
+		"Safing Privacy Network",
+		module,
+		"config:spn/",
+		&config.Option{
+			Name:         "SPN Module",
+			Key:          CfgOptionEnableSPNKey,
+			Description:  "Start the Safing Privacy Network module. If turned off, the SPN is fully disabled on this device.",
+			OptType:      config.OptTypeBool,
+			DefaultValue: false,
+			Annotations: config.Annotations{
+				config.DisplayOrderAnnotation: cfgOptionEnableSPNOrder,
+				config.CategoryAnnotation:     "General",
+			},
+		},
+	)
+}
+
+func prep() error {
+	// Check if we can parse the bootstrap hub flag.
+	if err := prepBootstrapHubFlag(); err != nil {
+		return err
+	}
+
+	// Register SPN status provider.
+	if err := registerSPNStatusProvider(); err != nil {
+		return err
+	}
+
+	// Register API endpoints.
+	if err := registerAPIEndpoints(); err != nil {
+		return err
+	}
+
+	if conf.PublicHub() {
+		// Register API authenticator.
+		if err := api.SetAuthenticator(apiAuthenticator); err != nil {
+			return err
+		}
+
+		if err := module.RegisterEventHook(
+			"patrol",
+			patrol.ChangeSignalEventName,
+			"trigger hub status maintenance",
+			func(_ context.Context, _ any) error {
+				TriggerHubStatusMaintenance()
+				return nil
+			},
+		); err != nil {
+			return err
+		}
+	}
+
+	return prepConfig()
+}
+
+func start() error {
+	maskingBytes, err := rng.Bytes(16)
+	if err != nil {
+		return fmt.Errorf("failed to get random bytes for masking: %w", err)
+	}
+	ships.EnableMasking(maskingBytes)
+
+	// Initialize intel.
+	if err := registerIntelUpdateHook(); err != nil {
+		return err
+	}
+	if err := updateSPNIntel(module.Ctx, nil); err != nil {
+		log.Errorf("spn/captain: failed to update SPN intel: %s", err)
+	}
+
+	// Initialize identity and piers.
+	if conf.PublicHub() {
+		// Load identity.
+		if err := loadPublicIdentity(); err != nil {
+			// We cannot recover from this, set controlled failure (do not retry).
+			modules.SetExitStatusCode(controlledFailureExitCode)
+
+			return err
+		}
+
+		// Check if any networks are configured.
+		if !conf.HubHasIPv4() && !conf.HubHasIPv6() {
+			// We cannot recover from this, set controlled failure (do not retry).
+			modules.SetExitStatusCode(controlledFailureExitCode)
+
+			return errors.New("no IP addresses for Hub configured (or detected)")
+		}
+
+		// Start management of identity and piers.
+		if err := prepPublicIdentityMgmt(); err != nil {
+			return err
+		}
+		// Set ID to display on http info page.
+		ships.DisplayHubID = publicIdentity.ID
+		// Start listeners.
+		if err := startPiers(); err != nil {
+			return err
+		}
+
+		// Enable connect operation.
+		crew.EnableConnecting(publicIdentity.Hub)
+	}
+
+	// Subscribe to updates of cranes.
+	startDockHooks()
+
+	// bootstrapping
+	if err := processBootstrapHubFlag(); err != nil {
+		return err
+	}
+	if err := processBootstrapFileFlag(); err != nil {
+		return err
+	}
+
+	// network optimizer
+	if conf.PublicHub() {
+		module.NewTask("optimize network", optimizeNetwork).
+			Repeat(1 * time.Minute).
+			Schedule(time.Now().Add(15 * time.Second))
+	}
+
+	// client + home hub manager
+	if conf.Client() {
+		module.StartServiceWorker("client manager", 0, clientManager)
+
+		// Reset failing hubs when the network changes while not connected.
+		if err := module.RegisterEventHook(
+			"netenv",
+			"network changed",
+			"reset failing hubs",
+			func(_ context.Context, _ interface{}) error {
+				if ready.IsNotSet() {
+					navigator.Main.ResetFailingStates(module.Ctx)
+				}
+				return nil
+			},
+		); err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+func stop() error {
+	// Reset intel resource so that it is loaded again when starting.
+	resetSPNIntel()
+
+	// Unregister crane update hook.
+	stopDockHooks()
+
+	// Send shutdown status message.
+	if conf.PublicHub() {
+		publishShutdownStatus()
+		stopPiers()
+	}
+
+	return nil
+}
+
+// apiAuthenticator grants User permissions for local API requests.
+func apiAuthenticator(r *http.Request, s *http.Server) (*api.AuthToken, error) {
+	// Get remote IP.
+	host, _, err := net.SplitHostPort(r.RemoteAddr)
+	if err != nil {
+		return nil, fmt.Errorf("failed to split host/port: %w", err)
+	}
+	remoteIP := net.ParseIP(host)
+	if remoteIP == nil {
+		return nil, fmt.Errorf("failed to parse remote address %s", host)
+	}
+
+	if !netutils.GetIPScope(remoteIP).IsLocalhost() {
+		return nil, api.ErrAPIAccessDeniedMessage
+	}
+
+	return &api.AuthToken{
+		Read:  api.PermitUser,
+		Write: api.PermitUser,
+	}, nil
+}
diff --git a/spn/captain/navigation.go b/spn/captain/navigation.go
new file mode 100644
index 00000000..e60267fa
--- /dev/null
+++ b/spn/captain/navigation.go
@@ -0,0 +1,306 @@
+package captain
+
+import (
+	"context"
+	"errors"
+	"fmt"
+	"time"
+
+	"github.com/safing/portbase/log"
+	"github.com/safing/portbase/modules"
+	"github.com/safing/portmaster/service/intel"
+	"github.com/safing/portmaster/service/netenv"
+	"github.com/safing/portmaster/service/profile/endpoints"
+	"github.com/safing/portmaster/spn/access"
+	"github.com/safing/portmaster/spn/docks"
+	"github.com/safing/portmaster/spn/hub"
+	"github.com/safing/portmaster/spn/navigator"
+	"github.com/safing/portmaster/spn/terminal"
+)
+
+const stopCraneAfterBeingUnsuggestedFor = 6 * time.Hour
+
+var (
+	// ErrAllHomeHubsExcluded is returned when all available home hubs were excluded.
+	ErrAllHomeHubsExcluded = errors.New("all home hubs are excluded")
+
+	// ErrReInitSPNSuggested is returned when no home hub can be found, even without rules.
+	ErrReInitSPNSuggested = errors.New("SPN re-init suggested")
+)
+
+func establishHomeHub(ctx context.Context) error {
+	// Get own IP.
+	locations, ok := netenv.GetInternetLocation()
+	if !ok || len(locations.All) == 0 {
+		return errors.New("failed to locate own device")
+	}
+	log.Debugf(
+		"spn/captain: looking for new home hub near %s and %s",
+		locations.BestV4(),
+		locations.BestV6(),
+	)
+
+	// Get own entity.
+	// Checking the entity against the entry policies is somewhat hit and miss
+	// anyway, as the device location is an approximation.
+	var myEntity *intel.Entity
+	if dl := locations.BestV4(); dl != nil && dl.IP != nil {
+		myEntity = (&intel.Entity{IP: dl.IP}).Init(0)
+		myEntity.FetchData(ctx)
+	} else if dl := locations.BestV6(); dl != nil && dl.IP != nil {
+		myEntity = (&intel.Entity{IP: dl.IP}).Init(0)
+		myEntity.FetchData(ctx)
+	}
+
+	// Get home hub policy for selecting the home hub.
+	homePolicy, err := getHomeHubPolicy()
+	if err != nil {
+		return err
+	}
+
+	// Build navigation options for searching for a home hub.
+	opts := &navigator.Options{
+		Home: &navigator.HomeHubOptions{
+			HubPolicies:        []endpoints.Endpoints{homePolicy},
+			CheckHubPolicyWith: myEntity,
+		},
+	}
+
+	// Add requirement to only use Safing nodes when not using community nodes.
+	if !cfgOptionUseCommunityNodes() {
+		opts.Home.RequireVerifiedOwners = NonCommunityVerifiedOwners
+	}
+
+	// Require a trusted home node when the routing profile requires less than two hops.
+	routingProfile := navigator.GetRoutingProfile(cfgOptionRoutingAlgorithm())
+	if routingProfile.MinHops < 2 {
+		opts.Home.Regard = opts.Home.Regard.Add(navigator.StateTrusted)
+	}
+
+	// Find nearby hubs.
+findCandidates:
+	candidates, err := navigator.Main.FindNearestHubs(
+		locations.BestV4().LocationOrNil(),
+		locations.BestV6().LocationOrNil(),
+		opts, navigator.HomeHub,
+	)
+	if err != nil {
+		switch {
+		case errors.Is(err, navigator.ErrEmptyMap):
+			// bootstrap to the network!
+			err := bootstrapWithUpdates()
+			if err != nil {
+				return err
+			}
+			goto findCandidates
+
+		case errors.Is(err, navigator.ErrAllPinsDisregarded):
+			if len(homePolicy) > 0 {
+				return ErrAllHomeHubsExcluded
+			}
+			return ErrReInitSPNSuggested
+
+		default:
+			return fmt.Errorf("failed to find nearby hubs: %w", err)
+		}
+	}
+
+	// Try connecting to a hub.
+	var tries int
+	var candidate *hub.Hub
+	for tries, candidate = range candidates {
+		err = connectToHomeHub(ctx, candidate)
+		if err != nil {
+			// Check if context is canceled.
+			if ctx.Err() != nil {
+				return ctx.Err()
+			}
+			// Check if the SPN protocol is stopping again.
+			if errors.Is(err, terminal.ErrStopping) {
+				return err
+			}
+			log.Warningf("spn/captain: failed to connect to %s as new home: %s", candidate, err)
+		} else {
+			log.Infof("spn/captain: established connection to %s as new home with %d failed tries", candidate, tries)
+			return nil
+		}
+	}
+	if err != nil {
+		return fmt.Errorf("failed to connect to a new home hub - tried %d hubs: %w", tries+1, err)
+	}
+	return fmt.Errorf("no home hub candidates available")
+}
+
+func connectToHomeHub(ctx context.Context, dst *hub.Hub) error {
+	// Create new context with timeout.
+	// The maximum timeout is a worst case safeguard.
+	// Keep in mind that multiple IPs and protocols may be tried in all configurations.
+	// Some servers will be (possibly on purpose) hard to reach.
+	ctx, cancel := context.WithTimeout(ctx, 5*time.Minute)
+	defer cancel()
+
+	// Set and clean up exceptions.
+	setExceptions(dst.Info.IPv4, dst.Info.IPv6)
+	defer setExceptions(nil, nil)
+
+	// Connect to hub.
+	crane, err := EstablishCrane(ctx, dst)
+	if err != nil {
+		return err
+	}
+
+	// Cleanup connection in case of failure.
+	var success bool
+	defer func() {
+		if !success {
+			crane.Stop(nil)
+		}
+	}()
+
+	// Query all gossip msgs on first connection.
+	gossipQuery, tErr := NewGossipQueryOp(crane.Controller)
+	if tErr != nil {
+		log.Warningf("spn/captain: failed to start initial gossip query: %s", tErr)
+	}
+	// Wait for gossip query to complete.
+	select {
+	case <-gossipQuery.ctx.Done():
+	case <-ctx.Done():
+		return context.Canceled
+	}
+
+	// Create communication terminal.
+	homeTerminal, initData, tErr := docks.NewLocalCraneTerminal(crane, nil, terminal.DefaultHomeHubTerminalOpts())
+	if tErr != nil {
+		return tErr.Wrap("failed to create home terminal")
+	}
+	tErr = crane.EstablishNewTerminal(homeTerminal, initData)
+	if tErr != nil {
+		return tErr.Wrap("failed to connect home terminal")
+	}
+
+	if !DisableAccount {
+		// Authenticate to home hub.
+		authOp, tErr := access.AuthorizeToTerminal(homeTerminal)
+		if tErr != nil {
+			return tErr.Wrap("failed to authorize")
+		}
+		select {
+		case tErr := <-authOp.Result:
+			if !tErr.Is(terminal.ErrExplicitAck) {
+				return tErr.Wrap("failed to authenticate to")
+			}
+		case <-time.After(3 * time.Second):
+			return terminal.ErrTimeout.With("waiting for auth to complete")
+		case <-ctx.Done():
+			return terminal.ErrStopping
+		}
+	}
+
+	// Set new home on map.
+	ok := navigator.Main.SetHome(dst.ID, homeTerminal)
+	if !ok {
+		return fmt.Errorf("failed to set home hub on map")
+	}
+
+	// Assign crane to home hub in order to query it later.
+	docks.AssignCrane(crane.ConnectedHub.ID, crane)
+
+	success = true
+	return nil
+}
+
+func optimizeNetwork(ctx context.Context, task *modules.Task) error {
+	if publicIdentity == nil {
+		return nil
+	}
+
+optimize:
+	result, err := navigator.Main.Optimize(nil)
+	if err != nil {
+		if errors.Is(err, navigator.ErrEmptyMap) {
+			// bootstrap to the network!
+			err := bootstrapWithUpdates()
+			if err != nil {
+				return err
+			}
+			goto optimize
+		}
+
+		return err
+	}
+
+	// Create any new connections.
+	var createdConnections int
+	var attemptedConnections int
+	for _, connectTo := range result.SuggestedConnections {
+		// Skip duplicates.
+		if connectTo.Duplicate {
+			continue
+		}
+
+		// Check if connection already exists.
+		crane := docks.GetAssignedCrane(connectTo.Hub.ID)
+		if crane != nil {
+			// Update last suggested timestamp.
+			crane.NetState.UpdateLastSuggestedAt()
+			// Continue crane if stopping.
+			if crane.AbortStopping() {
+				log.Infof("spn/captain: optimization aborted retiring of %s, removed stopping mark", crane)
+				crane.NotifyUpdate()
+			}
+
+			// Create new connections if we have connects left.
+		} else if createdConnections < result.MaxConnect {
+			attemptedConnections++
+
+			crane, tErr := EstablishPublicLane(ctx, connectTo.Hub)
+			if !tErr.IsOK() {
+				log.Warningf("spn/captain: failed to establish lane to %s: %s", connectTo.Hub, tErr)
+			} else {
+				createdConnections++
+				crane.NetState.UpdateLastSuggestedAt()
+
+				log.Infof("spn/captain: established lane to %s", connectTo.Hub)
+			}
+		}
+	}
+
+	// Log optimization result.
+	if attemptedConnections > 0 {
+		log.Infof(
+			"spn/captain: created %d/%d new connections for %s optimization",
+			createdConnections,
+			attemptedConnections,
+			result.Purpose)
+	} else {
+		log.Infof(
+			"spn/captain: checked %d connections for %s optimization",
+			len(result.SuggestedConnections),
+			result.Purpose,
+		)
+	}
+
+	// Retire cranes if unsuggested for a while.
+	if result.StopOthers {
+		for _, crane := range docks.GetAllAssignedCranes() {
+			switch {
+			case crane.Stopped():
+				// Crane already stopped.
+			case crane.IsStopping():
+				// Crane is stopping, forcibly stop if mine and suggested.
+				if crane.IsMine() && crane.NetState.StopSuggested() {
+					crane.Stop(nil)
+				}
+			case crane.IsMine() && crane.NetState.StoppingSuggested():
+				// Mark as stopping if mine and suggested.
+				crane.MarkStopping()
+			case crane.NetState.RequestStoppingSuggested(stopCraneAfterBeingUnsuggestedFor):
+				// Mark as stopping requested.
+				crane.MarkStoppingRequested()
+			}
+		}
+	}
+
+	return nil
+}
diff --git a/spn/captain/op_gossip.go b/spn/captain/op_gossip.go
new file mode 100644
index 00000000..e5fb4377
--- /dev/null
+++ b/spn/captain/op_gossip.go
@@ -0,0 +1,156 @@
+package captain
+
+import (
+	"time"
+
+	"github.com/safing/portbase/container"
+	"github.com/safing/portbase/formats/varint"
+	"github.com/safing/portbase/log"
+	"github.com/safing/portmaster/spn/conf"
+	"github.com/safing/portmaster/spn/docks"
+	"github.com/safing/portmaster/spn/hub"
+	"github.com/safing/portmaster/spn/terminal"
+)
+
+// GossipOpType is the type ID of the gossip operation.
+const GossipOpType string = "gossip"
+
+// GossipMsgType is the gossip message type.
+type GossipMsgType uint8
+
+// Gossip Message Types.
+const (
+	GossipHubAnnouncementMsg GossipMsgType = 1
+	GossipHubStatusMsg       GossipMsgType = 2
+)
+
+func (msgType GossipMsgType) String() string {
+	switch msgType {
+	case GossipHubAnnouncementMsg:
+		return "hub announcement"
+	case GossipHubStatusMsg:
+		return "hub status"
+	default:
+		return "unknown gossip msg"
+	}
+}
+
+// GossipOp is used to gossip Hub messages.
+type GossipOp struct {
+	terminal.OperationBase
+
+	craneID string
+}
+
+// Type returns the type ID.
+func (op *GossipOp) Type() string {
+	return GossipOpType
+}
+
+func init() {
+	terminal.RegisterOpType(terminal.OperationFactory{
+		Type:     GossipOpType,
+		Requires: terminal.IsCraneController,
+		Start:    runGossipOp,
+	})
+}
+
+// NewGossipOp start a new gossip operation.
+func NewGossipOp(controller *docks.CraneControllerTerminal) (*GossipOp, *terminal.Error) {
+	// Create and init.
+	op := &GossipOp{
+		craneID: controller.Crane.ID,
+	}
+	err := controller.StartOperation(op, nil, 1*time.Minute)
+	if err != nil {
+		return nil, err
+	}
+	op.InitOperationBase(controller, op.ID())
+
+	// Register and return.
+	registerGossipOp(controller.Crane.ID, op)
+	return op, nil
+}
+
+func runGossipOp(t terminal.Terminal, opID uint32, data *container.Container) (terminal.Operation, *terminal.Error) {
+	// Check if we are run by a controller.
+	controller, ok := t.(*docks.CraneControllerTerminal)
+	if !ok {
+		return nil, terminal.ErrIncorrectUsage.With("gossip op may only be started by a crane controller terminal, but was started by %T", t)
+	}
+
+	// Create, init, register and return.
+	op := &GossipOp{
+		craneID: controller.Crane.ID,
+	}
+	op.InitOperationBase(t, opID)
+	registerGossipOp(controller.Crane.ID, op)
+	return op, nil
+}
+
+func (op *GossipOp) sendMsg(msgType GossipMsgType, data []byte) {
+	// Create message.
+	msg := op.NewEmptyMsg()
+	msg.Data = container.New(
+		varint.Pack8(uint8(msgType)),
+		data,
+	)
+	msg.Unit.MakeHighPriority()
+
+	// Send.
+	err := op.Send(msg, 1*time.Second)
+	if err != nil {
+		log.Debugf("spn/captain: failed to forward %s via %s: %s", msgType, op.craneID, err)
+	}
+}
+
+// Deliver delivers a message to the operation.
+func (op *GossipOp) Deliver(msg *terminal.Msg) *terminal.Error {
+	defer msg.Finish()
+
+	gossipMsgTypeN, err := msg.Data.GetNextN8()
+	if err != nil {
+		return terminal.ErrMalformedData.With("failed to parse gossip message type")
+	}
+	gossipMsgType := GossipMsgType(gossipMsgTypeN)
+
+	// Prepare data.
+	data := msg.Data.CompileData()
+	var announcementData, statusData []byte
+	switch gossipMsgType {
+	case GossipHubAnnouncementMsg:
+		announcementData = data
+	case GossipHubStatusMsg:
+		statusData = data
+	default:
+		log.Warningf("spn/captain: received unknown gossip message type from %s: %d", op.craneID, gossipMsgType)
+		return nil
+	}
+
+	// Import and verify.
+	h, forward, tErr := docks.ImportAndVerifyHubInfo(module.Ctx, "", announcementData, statusData, conf.MainMapName, conf.MainMapScope)
+	if tErr != nil {
+		if tErr.Is(hub.ErrOldData) {
+			log.Debugf("spn/captain: ignoring old %s from %s", gossipMsgType, op.craneID)
+		} else {
+			log.Warningf("spn/captain: failed to import %s from %s: %s", gossipMsgType, op.craneID, tErr)
+		}
+	} else if forward {
+		// Only log if we received something to save/forward.
+		log.Infof("spn/captain: received %s for %s", gossipMsgType, h)
+	}
+
+	// Relay data.
+	if forward {
+		gossipRelayMsg(op.craneID, gossipMsgType, data)
+	}
+	return nil
+}
+
+// HandleStop gives the operation the ability to cleanly shut down.
+// The returned error is the error to send to the other side.
+// Should never be called directly. Call Stop() instead.
+func (op *GossipOp) HandleStop(err *terminal.Error) (errorToSend *terminal.Error) {
+	deleteGossipOp(op.craneID)
+	return err
+}
diff --git a/spn/captain/op_gossip_query.go b/spn/captain/op_gossip_query.go
new file mode 100644
index 00000000..aaadbc21
--- /dev/null
+++ b/spn/captain/op_gossip_query.go
@@ -0,0 +1,195 @@
+package captain
+
+import (
+	"context"
+	"strings"
+	"time"
+
+	"github.com/safing/portbase/container"
+	"github.com/safing/portbase/formats/varint"
+	"github.com/safing/portbase/log"
+	"github.com/safing/portmaster/spn/conf"
+	"github.com/safing/portmaster/spn/docks"
+	"github.com/safing/portmaster/spn/hub"
+	"github.com/safing/portmaster/spn/terminal"
+)
+
+// GossipQueryOpType is the type ID of the gossip query operation.
+const GossipQueryOpType string = "gossip/query"
+
+// GossipQueryOp is used to query gossip messages.
+type GossipQueryOp struct {
+	terminal.OperationBase
+
+	t         terminal.Terminal
+	client    bool
+	importCnt int
+
+	ctx       context.Context
+	cancelCtx context.CancelFunc
+}
+
+// Type returns the type ID.
+func (op *GossipQueryOp) Type() string {
+	return GossipQueryOpType
+}
+
+func init() {
+	terminal.RegisterOpType(terminal.OperationFactory{
+		Type:     GossipQueryOpType,
+		Requires: terminal.IsCraneController,
+		Start:    runGossipQueryOp,
+	})
+}
+
+// NewGossipQueryOp starts a new gossip query operation.
+func NewGossipQueryOp(t terminal.Terminal) (*GossipQueryOp, *terminal.Error) {
+	// Create and init.
+	op := &GossipQueryOp{
+		t:      t,
+		client: true,
+	}
+	op.ctx, op.cancelCtx = context.WithCancel(t.Ctx())
+	err := t.StartOperation(op, nil, 1*time.Minute)
+	if err != nil {
+		return nil, err
+	}
+	return op, nil
+}
+
+func runGossipQueryOp(t terminal.Terminal, opID uint32, data *container.Container) (terminal.Operation, *terminal.Error) {
+	// Create, init, register and return.
+	op := &GossipQueryOp{t: t}
+	op.ctx, op.cancelCtx = context.WithCancel(t.Ctx())
+	op.InitOperationBase(t, opID)
+
+	module.StartWorker("gossip query handler", op.handler)
+
+	return op, nil
+}
+
+func (op *GossipQueryOp) handler(_ context.Context) error {
+	tErr := op.sendMsgs(hub.MsgTypeAnnouncement)
+	if tErr != nil {
+		op.Stop(op, tErr)
+		return nil // Clean worker exit.
+	}
+
+	tErr = op.sendMsgs(hub.MsgTypeStatus)
+	if tErr != nil {
+		op.Stop(op, tErr)
+		return nil // Clean worker exit.
+	}
+
+	op.Stop(op, nil)
+	return nil // Clean worker exit.
+}
+
+func (op *GossipQueryOp) sendMsgs(msgType hub.MsgType) *terminal.Error {
+	it, err := hub.QueryRawGossipMsgs(conf.MainMapName, msgType)
+	if err != nil {
+		return terminal.ErrInternalError.With("failed to query: %w", err)
+	}
+	defer it.Cancel()
+
+iterating:
+	for {
+		select {
+		case r := <-it.Next:
+			// Check if we are done.
+			if r == nil {
+				return nil
+			}
+
+			// Ensure we're handling a hub msg.
+			hubMsg, err := hub.EnsureHubMsg(r)
+			if err != nil {
+				log.Warningf("spn/captain: failed to load hub msg: %s", err)
+				continue iterating
+			}
+
+			// Create gossip msg.
+			var c *container.Container
+			switch hubMsg.Type {
+			case hub.MsgTypeAnnouncement:
+				c = container.New(
+					varint.Pack8(uint8(GossipHubAnnouncementMsg)),
+					hubMsg.Data,
+				)
+			case hub.MsgTypeStatus:
+				c = container.New(
+					varint.Pack8(uint8(GossipHubStatusMsg)),
+					hubMsg.Data,
+				)
+			default:
+				log.Warningf("spn/captain: unknown hub msg for gossip query at %q: %s", hubMsg.Key(), hubMsg.Type)
+			}
+
+			// Send msg.
+			if c != nil {
+				msg := op.NewEmptyMsg()
+				msg.Unit.MakeHighPriority()
+				msg.Data = c
+				tErr := op.Send(msg, 1*time.Second)
+				if tErr != nil {
+					return tErr.Wrap("failed to send msg")
+				}
+			}
+
+		case <-op.ctx.Done():
+			return terminal.ErrStopping
+		}
+	}
+}
+
+// Deliver delivers the message to the operation.
+func (op *GossipQueryOp) Deliver(msg *terminal.Msg) *terminal.Error {
+	defer msg.Finish()
+
+	gossipMsgTypeN, err := msg.Data.GetNextN8()
+	if err != nil {
+		return terminal.ErrMalformedData.With("failed to parse gossip message type")
+	}
+	gossipMsgType := GossipMsgType(gossipMsgTypeN)
+
+	// Prepare data.
+	data := msg.Data.CompileData()
+	var announcementData, statusData []byte
+	switch gossipMsgType {
+	case GossipHubAnnouncementMsg:
+		announcementData = data
+	case GossipHubStatusMsg:
+		statusData = data
+	default:
+		log.Warningf("spn/captain: received unknown gossip message type from gossip query: %d", gossipMsgType)
+		return nil
+	}
+
+	// Import and verify.
+	h, forward, tErr := docks.ImportAndVerifyHubInfo(module.Ctx, "", announcementData, statusData, conf.MainMapName, conf.MainMapScope)
+	if tErr != nil {
+		log.Warningf("spn/captain: failed to import %s from gossip query: %s", gossipMsgType, tErr)
+	} else {
+		log.Infof("spn/captain: received %s for %s from gossip query", gossipMsgType, h)
+		op.importCnt++
+	}
+
+	// Relay data.
+	if forward {
+		// TODO: Find better way to get craneID.
+		craneID := strings.SplitN(op.t.FmtID(), "#", 2)[0]
+		gossipRelayMsg(craneID, gossipMsgType, data)
+	}
+	return nil
+}
+
+// HandleStop gives the operation the ability to cleanly shut down.
+// The returned error is the error to send to the other side.
+// Should never be called directly. Call Stop() instead.
+func (op *GossipQueryOp) HandleStop(err *terminal.Error) (errorToSend *terminal.Error) {
+	if op.client {
+		log.Infof("spn/captain: gossip query imported %d entries", op.importCnt)
+	}
+	op.cancelCtx()
+	return err
+}
diff --git a/spn/captain/op_publish.go b/spn/captain/op_publish.go
new file mode 100644
index 00000000..178d1e88
--- /dev/null
+++ b/spn/captain/op_publish.go
@@ -0,0 +1,183 @@
+package captain
+
+import (
+	"time"
+
+	"github.com/safing/portbase/container"
+	"github.com/safing/portmaster/spn/cabin"
+	"github.com/safing/portmaster/spn/conf"
+	"github.com/safing/portmaster/spn/docks"
+	"github.com/safing/portmaster/spn/hub"
+	"github.com/safing/portmaster/spn/terminal"
+)
+
+// PublishOpType is the type ID of the publish operation.
+const PublishOpType string = "publish"
+
+// PublishOp is used to publish a connection.
+type PublishOp struct {
+	terminal.OperationBase
+	controller *docks.CraneControllerTerminal
+
+	identity      *cabin.Identity
+	requestingHub *hub.Hub
+	verification  *cabin.Verification
+	result        chan *terminal.Error
+}
+
+// Type returns the type ID.
+func (op *PublishOp) Type() string {
+	return PublishOpType
+}
+
+func init() {
+	terminal.RegisterOpType(terminal.OperationFactory{
+		Type:     PublishOpType,
+		Requires: terminal.IsCraneController,
+		Start:    runPublishOp,
+	})
+}
+
+// NewPublishOp start a new publish operation.
+func NewPublishOp(controller *docks.CraneControllerTerminal, identity *cabin.Identity) (*PublishOp, *terminal.Error) {
+	// Create and init.
+	op := &PublishOp{
+		controller: controller,
+		identity:   identity,
+		result:     make(chan *terminal.Error, 1),
+	}
+	msg := container.New()
+
+	// Add Hub Announcement.
+	announcementData, err := identity.ExportAnnouncement()
+	if err != nil {
+		return nil, terminal.ErrInternalError.With("failed to export announcement: %w", err)
+	}
+	msg.AppendAsBlock(announcementData)
+
+	// Add Hub Status.
+	statusData, err := identity.ExportStatus()
+	if err != nil {
+		return nil, terminal.ErrInternalError.With("failed to export status: %w", err)
+	}
+	msg.AppendAsBlock(statusData)
+
+	tErr := controller.StartOperation(op, msg, 10*time.Second)
+	if tErr != nil {
+		return nil, tErr
+	}
+	return op, nil
+}
+
+func runPublishOp(t terminal.Terminal, opID uint32, data *container.Container) (terminal.Operation, *terminal.Error) {
+	// Check if we are run by a controller.
+	controller, ok := t.(*docks.CraneControllerTerminal)
+	if !ok {
+		return nil, terminal.ErrIncorrectUsage.With("publish op may only be started by a crane controller terminal, but was started by %T", t)
+	}
+
+	// Parse and import Announcement and Status.
+	announcementData, err := data.GetNextBlock()
+	if err != nil {
+		return nil, terminal.ErrMalformedData.With("failed to get announcement: %w", err)
+	}
+	statusData, err := data.GetNextBlock()
+	if err != nil {
+		return nil, terminal.ErrMalformedData.With("failed to get status: %w", err)
+	}
+	h, forward, tErr := docks.ImportAndVerifyHubInfo(module.Ctx, "", announcementData, statusData, conf.MainMapName, conf.MainMapScope)
+	if tErr != nil {
+		return nil, tErr.Wrap("failed to import and verify hub")
+	}
+	// Update reference in case it was changed by the import.
+	controller.Crane.ConnectedHub = h
+
+	// Relay data.
+	if forward {
+		gossipRelayMsg(controller.Crane.ID, GossipHubAnnouncementMsg, announcementData)
+		gossipRelayMsg(controller.Crane.ID, GossipHubStatusMsg, statusData)
+	}
+
+	// Create verification request.
+	v, request, err := cabin.CreateVerificationRequest(PublishOpType, "", "")
+	if err != nil {
+		return nil, terminal.ErrInternalError.With("failed to create verification request: %w", err)
+	}
+
+	// Create operation.
+	op := &PublishOp{
+		controller:    controller,
+		requestingHub: h,
+		verification:  v,
+		result:        make(chan *terminal.Error, 1),
+	}
+	op.InitOperationBase(controller, opID)
+
+	// Reply with verification request.
+	tErr = op.Send(op.NewMsg(request), 10*time.Second)
+	if tErr != nil {
+		return nil, tErr.Wrap("failed to send verification request")
+	}
+
+	return op, nil
+}
+
+// Deliver delivers a message to the operation.
+func (op *PublishOp) Deliver(msg *terminal.Msg) *terminal.Error {
+	defer msg.Finish()
+
+	if op.identity != nil {
+		// Client
+
+		// Sign the received verification request.
+		response, err := op.identity.SignVerificationRequest(msg.Data.CompileData(), PublishOpType, "", "")
+		if err != nil {
+			return terminal.ErrPermissionDenied.With("signing verification request failed: %w", err)
+		}
+
+		return op.Send(op.NewMsg(response), 10*time.Second)
+	} else if op.requestingHub != nil {
+		// Server
+
+		// Verify the signed request.
+		err := op.verification.Verify(msg.Data.CompileData(), op.requestingHub)
+		if err != nil {
+			return terminal.ErrPermissionDenied.With("checking verification request failed: %w", err)
+		}
+		return terminal.ErrExplicitAck
+	}
+
+	return terminal.ErrInternalError.With("invalid operation state")
+}
+
+// Result returns the result (end error) of the operation.
+func (op *PublishOp) Result() <-chan *terminal.Error {
+	return op.result
+}
+
+// HandleStop gives the operation the ability to cleanly shut down.
+// The returned error is the error to send to the other side.
+// Should never be called directly. Call Stop() instead.
+func (op *PublishOp) HandleStop(tErr *terminal.Error) (errorToSend *terminal.Error) {
+	if tErr.Is(terminal.ErrExplicitAck) {
+		// TODO: Check for concurrenct access.
+		if op.controller.Crane.ConnectedHub == nil {
+			op.controller.Crane.ConnectedHub = op.requestingHub
+		}
+
+		// Publish crane, abort if it fails.
+		err := op.controller.Crane.Publish()
+		if err != nil {
+			tErr = terminal.ErrInternalError.With("failed to publish crane: %w", err)
+			op.controller.Crane.Stop(tErr)
+		} else {
+			op.controller.Crane.NotifyUpdate()
+		}
+	}
+
+	select {
+	case op.result <- tErr:
+	default:
+	}
+	return tErr
+}
diff --git a/spn/captain/piers.go b/spn/captain/piers.go
new file mode 100644
index 00000000..b0c994bf
--- /dev/null
+++ b/spn/captain/piers.go
@@ -0,0 +1,131 @@
+package captain
+
+import (
+	"context"
+	"errors"
+	"fmt"
+
+	"github.com/safing/portbase/log"
+	"github.com/safing/portmaster/service/intel"
+	"github.com/safing/portmaster/service/network/netutils"
+	"github.com/safing/portmaster/service/profile/endpoints"
+	"github.com/safing/portmaster/spn/docks"
+	"github.com/safing/portmaster/spn/hub"
+	"github.com/safing/portmaster/spn/ships"
+)
+
+var (
+	dockingRequests = make(chan ships.Ship, 100)
+	piers           []ships.Pier
+)
+
+func startPiers() error {
+	// Get and check transports.
+	transports := publicIdentity.Hub.Info.Transports
+	if len(transports) == 0 {
+		return errors.New("no transports defined")
+	}
+
+	piers = make([]ships.Pier, 0, len(transports))
+	for _, t := range transports {
+		// Parse transport.
+		transport, err := hub.ParseTransport(t)
+		if err != nil {
+			return fmt.Errorf("cannot build pier for invalid transport %q: %w", t, err)
+		}
+
+		// Establish pier / listener.
+		pier, err := ships.EstablishPier(transport, dockingRequests)
+		if err != nil {
+			return fmt.Errorf("failed to establish pier for transport %q: %w", t, err)
+		}
+
+		piers = append(piers, pier)
+		log.Infof("spn/captain: pier for transport %q built", t)
+	}
+
+	// Start worker to handle docking requests.
+	module.StartServiceWorker("docking request handler", 0, dockingRequestHandler)
+
+	return nil
+}
+
+func stopPiers() {
+	for _, pier := range piers {
+		pier.Abolish()
+	}
+}
+
+func dockingRequestHandler(ctx context.Context) error {
+	// Sink all waiting ships when this worker ends.
+	// But don't be destructive so the service worker could recover.
+	defer func() {
+		for {
+			select {
+			case ship := <-dockingRequests:
+				if ship != nil {
+					ship.Sink()
+				}
+			default:
+				return
+			}
+		}
+	}()
+
+	for {
+		select {
+		case <-ctx.Done():
+			return nil
+		case ship := <-dockingRequests:
+			// Ignore nil ships.
+			if ship == nil {
+				continue
+			}
+
+			if err := checkDockingPermission(ctx, ship); err != nil {
+				log.Warningf("spn/captain: denied ship from %s to dock at pier %s: %s", ship.RemoteAddr(), ship.Transport().String(), err)
+			} else {
+				handleDockingRequest(ship)
+			}
+		}
+	}
+}
+
+func checkDockingPermission(ctx context.Context, ship ships.Ship) error {
+	remoteIP, remotePort, err := netutils.IPPortFromAddr(ship.RemoteAddr())
+	if err != nil {
+		return fmt.Errorf("failed to parse remote IP: %w", err)
+	}
+
+	// Create entity.
+	entity := (&intel.Entity{
+		IP:       remoteIP,
+		Protocol: uint8(netutils.ProtocolFromNetwork(ship.RemoteAddr().Network())),
+		Port:     remotePort,
+	}).Init(ship.Transport().Port)
+	entity.FetchData(ctx)
+
+	// Check against policy.
+	result, reason := publicIdentity.Hub.GetInfo().EntryPolicy().Match(ctx, entity)
+	if result == endpoints.Denied {
+		return fmt.Errorf("entry policy violated: %s", reason)
+	}
+
+	return nil
+}
+
+func handleDockingRequest(ship ships.Ship) {
+	log.Infof("spn/captain: pemitting %s to dock", ship)
+
+	crane, err := docks.NewCrane(ship, nil, publicIdentity)
+	if err != nil {
+		log.Warningf("spn/captain: failed to commission crane for %s: %s", ship, err)
+		return
+	}
+
+	module.StartWorker("start crane", func(ctx context.Context) error {
+		_ = crane.Start(ctx)
+		// Crane handles errors internally.
+		return nil
+	})
+}
diff --git a/spn/captain/public.go b/spn/captain/public.go
new file mode 100644
index 00000000..04710d9f
--- /dev/null
+++ b/spn/captain/public.go
@@ -0,0 +1,247 @@
+package captain
+
+import (
+	"context"
+	"errors"
+	"fmt"
+	"sort"
+	"time"
+
+	"github.com/safing/portbase/database"
+	"github.com/safing/portbase/log"
+	"github.com/safing/portbase/metrics"
+	"github.com/safing/portbase/modules"
+	"github.com/safing/portmaster/spn/cabin"
+	"github.com/safing/portmaster/spn/conf"
+	"github.com/safing/portmaster/spn/docks"
+	"github.com/safing/portmaster/spn/hub"
+	"github.com/safing/portmaster/spn/navigator"
+	"github.com/safing/portmaster/spn/patrol"
+)
+
+const (
+	maintainStatusInterval    = 15 * time.Minute
+	maintainStatusUpdateDelay = 5 * time.Second
+)
+
+var (
+	publicIdentity    *cabin.Identity
+	publicIdentityKey = "core:spn/public/identity"
+
+	publicIdentityUpdateTask *modules.Task
+	statusUpdateTask         *modules.Task
+)
+
+func loadPublicIdentity() (err error) {
+	var changed bool
+
+	publicIdentity, changed, err = cabin.LoadIdentity(publicIdentityKey)
+	switch {
+	case err == nil:
+		// load was successful
+		log.Infof("spn/captain: loaded public hub identity %s", publicIdentity.Hub.ID)
+	case errors.Is(err, database.ErrNotFound):
+		// does not exist, create new
+		publicIdentity, err = cabin.CreateIdentity(module.Ctx, conf.MainMapName)
+		if err != nil {
+			return fmt.Errorf("failed to create new identity: %w", err)
+		}
+		publicIdentity.SetKey(publicIdentityKey)
+		changed = true
+
+		log.Infof("spn/captain: created new public hub identity %s", publicIdentity.ID)
+	default:
+		// loading error, abort
+		return fmt.Errorf("failed to load public identity: %w", err)
+	}
+
+	// Save to database if the identity changed.
+	if changed {
+		err = publicIdentity.Save()
+		if err != nil {
+			return fmt.Errorf("failed to save new/updated identity to database: %w", err)
+		}
+	}
+
+	// Set available networks.
+	conf.SetHubNetworks(
+		publicIdentity.Hub.Info.IPv4 != nil,
+		publicIdentity.Hub.Info.IPv6 != nil,
+	)
+	if cfgOptionBindToAdvertised() {
+		conf.SetBindAddr(publicIdentity.Hub.Info.IPv4, publicIdentity.Hub.Info.IPv6)
+	}
+
+	// Set Home Hub before updating the hub on the map, as this would trigger a
+	// recalculation without a Home Hub.
+	ok := navigator.Main.SetHome(publicIdentity.ID, nil)
+	// Always update the navigator in any case in order to sync the reference to
+	// the active struct of the identity.
+	navigator.Main.UpdateHub(publicIdentity.Hub)
+	// Setting the Home Hub will have failed if the identidy was only just
+	// created - try again if it failed.
+	if !ok {
+		ok = navigator.Main.SetHome(publicIdentity.ID, nil)
+		if !ok {
+			return errors.New("failed to set self as home hub")
+		}
+	}
+
+	return nil
+}
+
+func prepPublicIdentityMgmt() error {
+	publicIdentityUpdateTask = module.NewTask(
+		"maintain public identity",
+		maintainPublicIdentity,
+	)
+
+	statusUpdateTask = module.NewTask(
+		"maintain public status",
+		maintainPublicStatus,
+	).Repeat(maintainStatusInterval)
+
+	return module.RegisterEventHook(
+		"config",
+		"config change",
+		"update public identity from config",
+		func(_ context.Context, _ interface{}) error {
+			// trigger update in 5 minutes
+			publicIdentityUpdateTask.Schedule(time.Now().Add(5 * time.Minute))
+			return nil
+		},
+	)
+}
+
+// TriggerHubStatusMaintenance queues the Hub status update task to be executed.
+func TriggerHubStatusMaintenance() {
+	if statusUpdateTask != nil {
+		statusUpdateTask.Queue()
+	}
+}
+
+func maintainPublicIdentity(ctx context.Context, task *modules.Task) error {
+	changed, err := publicIdentity.MaintainAnnouncement(nil, false)
+	if err != nil {
+		return fmt.Errorf("failed to maintain announcement: %w", err)
+	}
+
+	if !changed {
+		return nil
+	}
+
+	// Update on map.
+	navigator.Main.UpdateHub(publicIdentity.Hub)
+	log.Debug("spn/captain: updated own hub on map after announcement change")
+
+	// export announcement
+	announcementData, err := publicIdentity.ExportAnnouncement()
+	if err != nil {
+		return fmt.Errorf("failed to export announcement: %w", err)
+	}
+
+	// forward to other connected Hubs
+	gossipRelayMsg("", GossipHubAnnouncementMsg, announcementData)
+
+	return nil
+}
+
+func maintainPublicStatus(ctx context.Context, task *modules.Task) error {
+	// Get current lanes.
+	cranes := docks.GetAllAssignedCranes()
+	lanes := make([]*hub.Lane, 0, len(cranes))
+	for _, crane := range cranes {
+		// Ignore private, stopped or stopping cranes.
+		if !crane.Public() || crane.Stopped() || crane.IsStopping() {
+			continue
+		}
+
+		// Get measurements.
+		measurements := crane.ConnectedHub.GetMeasurements()
+		latency, _ := measurements.GetLatency()
+		capacity, _ := measurements.GetCapacity()
+
+		// Add crane lane.
+		lanes = append(lanes, &hub.Lane{
+			ID:       crane.ConnectedHub.ID,
+			Latency:  latency,
+			Capacity: capacity,
+		})
+	}
+	// Sort Lanes for comparing.
+	hub.SortLanes(lanes)
+
+	// Get system load and convert to fixed steps.
+	var load int
+	loadAvg, ok := metrics.LoadAvg15()
+	switch {
+	case !ok:
+		load = -1
+	case loadAvg >= 1:
+		load = 100
+	case loadAvg >= 0.95:
+		load = 95
+	case loadAvg >= 0.8:
+		load = 80
+	default:
+		load = 0
+	}
+	if loadAvg >= 0.8 {
+		log.Warningf("spn/captain: publishing 15m system load average of %.2f as %d", loadAvg, load)
+	}
+
+	// Set flags.
+	var flags []string
+	if !patrol.HTTPSConnectivityConfirmed() {
+		flags = append(flags, hub.FlagNetError)
+	}
+	// Sort Lanes for comparing.
+	sort.Strings(flags)
+
+	// Run maintenance with the new data.
+	changed, err := publicIdentity.MaintainStatus(lanes, &load, flags, false)
+	if err != nil {
+		return fmt.Errorf("failed to maintain status: %w", err)
+	}
+
+	if !changed {
+		return nil
+	}
+
+	// Update on map.
+	navigator.Main.UpdateHub(publicIdentity.Hub)
+	log.Debug("spn/captain: updated own hub on map after status change")
+
+	// export status
+	statusData, err := publicIdentity.ExportStatus()
+	if err != nil {
+		return fmt.Errorf("failed to export status: %w", err)
+	}
+
+	// forward to other connected Hubs
+	gossipRelayMsg("", GossipHubStatusMsg, statusData)
+
+	log.Infof(
+		"spn/captain: updated status with load %d and current lanes: %v",
+		publicIdentity.Hub.Status.Load,
+		publicIdentity.Hub.Status.Lanes,
+	)
+	return nil
+}
+
+func publishShutdownStatus() {
+	// Create offline status.
+	offlineStatusData, err := publicIdentity.MakeOfflineStatus()
+	if err != nil {
+		log.Errorf("spn/captain: failed to create offline status: %s", err)
+		return
+	}
+
+	// Forward to other connected Hubs.
+	gossipRelayMsg("", GossipHubStatusMsg, offlineStatusData)
+
+	// Leave some time for the message to broadcast.
+	time.Sleep(2 * time.Second)
+
+	log.Infof("spn/captain: broadcasted offline status")
+}
diff --git a/spn/captain/status.go b/spn/captain/status.go
new file mode 100644
index 00000000..99b6632c
--- /dev/null
+++ b/spn/captain/status.go
@@ -0,0 +1,154 @@
+package captain
+
+import (
+	"fmt"
+	"sort"
+	"sync"
+	"time"
+
+	"github.com/safing/portbase/config"
+	"github.com/safing/portbase/database/record"
+	"github.com/safing/portbase/runtime"
+	"github.com/safing/portbase/utils/debug"
+	"github.com/safing/portmaster/service/intel/geoip"
+	"github.com/safing/portmaster/spn/conf"
+	"github.com/safing/portmaster/spn/navigator"
+)
+
+// SPNStatus holds SPN status information.
+type SPNStatus struct {
+	record.Base
+	sync.Mutex
+
+	Status             SPNStatusName
+	HomeHubID          string
+	HomeHubName        string
+	ConnectedIP        string
+	ConnectedTransport string
+	ConnectedCountry   *geoip.CountryInfo
+	ConnectedSince     *time.Time
+}
+
+// SPNStatusName is a SPN status.
+type SPNStatusName string
+
+// SPN Stati.
+const (
+	StatusFailed     SPNStatusName = "failed"
+	StatusDisabled   SPNStatusName = "disabled"
+	StatusConnecting SPNStatusName = "connecting"
+	StatusConnected  SPNStatusName = "connected"
+)
+
+var (
+	spnStatus = &SPNStatus{
+		Status: StatusDisabled,
+	}
+	spnStatusPushFunc runtime.PushFunc
+)
+
+func registerSPNStatusProvider() (err error) {
+	spnStatus.SetKey("runtime:spn/status")
+	spnStatus.UpdateMeta()
+	spnStatusPushFunc, err = runtime.Register("spn/status", runtime.ProvideRecord(spnStatus))
+	return
+}
+
+func resetSPNStatus(statusName SPNStatusName, overrideEvenIfConnected bool) {
+	// Lock for updating values.
+	spnStatus.Lock()
+	defer spnStatus.Unlock()
+
+	// Ignore when connected and not overriding
+	if !overrideEvenIfConnected && spnStatus.Status == StatusConnected {
+		return
+	}
+
+	// Reset status.
+	spnStatus.Status = statusName
+	spnStatus.HomeHubID = ""
+	spnStatus.HomeHubName = ""
+	spnStatus.ConnectedIP = ""
+	spnStatus.ConnectedTransport = ""
+	spnStatus.ConnectedCountry = nil
+	spnStatus.ConnectedSince = nil
+
+	// Push new status.
+	pushSPNStatusUpdate()
+}
+
+// pushSPNStatusUpdate pushes an update of spnStatus, which must be locked.
+func pushSPNStatusUpdate() {
+	spnStatus.UpdateMeta()
+	spnStatusPushFunc(spnStatus)
+}
+
+// GetSPNStatus returns the current SPN status.
+func GetSPNStatus() *SPNStatus {
+	spnStatus.Lock()
+	defer spnStatus.Unlock()
+
+	return &SPNStatus{
+		Status:             spnStatus.Status,
+		HomeHubID:          spnStatus.HomeHubID,
+		HomeHubName:        spnStatus.HomeHubName,
+		ConnectedIP:        spnStatus.ConnectedIP,
+		ConnectedTransport: spnStatus.ConnectedTransport,
+		ConnectedCountry:   spnStatus.ConnectedCountry,
+		ConnectedSince:     spnStatus.ConnectedSince,
+	}
+}
+
+// AddToDebugInfo adds the SPN status to the given debug.Info.
+func AddToDebugInfo(di *debug.Info) {
+	spnStatus.Lock()
+	defer spnStatus.Unlock()
+
+	// Check if SPN module is enabled.
+	var moduleStatus string
+	spnEnabled := config.GetAsBool(CfgOptionEnableSPNKey, false)
+	if spnEnabled() {
+		moduleStatus = "enabled"
+	} else {
+		moduleStatus = "disabled"
+	}
+
+	// Collect status data.
+	lines := make([]string, 0, 20)
+	lines = append(lines, fmt.Sprintf("HomeHubID:    %v", spnStatus.HomeHubID))
+	lines = append(lines, fmt.Sprintf("HomeHubName:  %v", spnStatus.HomeHubName))
+	lines = append(lines, fmt.Sprintf("HomeHubIP:    %v", spnStatus.ConnectedIP))
+	lines = append(lines, fmt.Sprintf("Transport:    %v", spnStatus.ConnectedTransport))
+	if spnStatus.ConnectedSince != nil {
+		lines = append(lines, fmt.Sprintf("Connected:    %v ago", time.Since(*spnStatus.ConnectedSince).Round(time.Minute)))
+	}
+	lines = append(lines, "---")
+	lines = append(lines, fmt.Sprintf("Client:       %v", conf.Client()))
+	lines = append(lines, fmt.Sprintf("PublicHub:    %v", conf.PublicHub()))
+	lines = append(lines, fmt.Sprintf("HubHasIPv4:   %v", conf.HubHasIPv4()))
+	lines = append(lines, fmt.Sprintf("HubHasIPv6:   %v", conf.HubHasIPv6()))
+
+	// Collect status data of map.
+	if navigator.Main != nil {
+		lines = append(lines, "---")
+		mainMapStats := navigator.Main.Stats()
+		lines = append(lines, fmt.Sprintf("Map %s:", navigator.Main.Name))
+		lines = append(lines, fmt.Sprintf("Active Terminals: %d Hubs", mainMapStats.ActiveTerminals))
+		// Collect hub states.
+		mapStateSummary := make([]string, 0, len(mainMapStats.States))
+		for state, cnt := range mainMapStats.States {
+			if cnt > 0 {
+				mapStateSummary = append(mapStateSummary, fmt.Sprintf("State %s: %d Hubs", state, cnt))
+			}
+		}
+		sort.Strings(mapStateSummary)
+		lines = append(lines, mapStateSummary...)
+	}
+
+	// Add all data as section.
+	di.AddSection(
+		fmt.Sprintf("SPN: %s (module %s)", spnStatus.Status, moduleStatus),
+		debug.UseCodeSection|debug.AddContentLineBreaks,
+		lines...,
+	)
+}
diff --git a/spn/conf/map.go b/spn/conf/map.go
new file mode 100644
index 00000000..e720be1a
--- /dev/null
+++ b/spn/conf/map.go
@@ -0,0 +1,17 @@
+package conf
+
+import (
+	"flag"
+
+	"github.com/safing/portmaster/spn/hub"
+)
+
+// Primary Map Configuration.
+var (
+	MainMapName  = "main"
+	MainMapScope = hub.ScopePublic
+)
+
+func init() {
+	flag.StringVar(&MainMapName, "spn-map", "main", "set main SPN map - use only for testing")
+}
diff --git a/spn/conf/mode.go b/spn/conf/mode.go
new file mode 100644
index 00000000..cc1248bb
--- /dev/null
+++ b/spn/conf/mode.go
@@ -0,0 +1,30 @@
+package conf
+
+import (
+	"github.com/tevino/abool"
+)
+
+var (
+	publicHub = abool.New()
+	client    = abool.New()
+)
+
+// PublicHub returns whether this is a public Hub.
+func PublicHub() bool {
+	return publicHub.IsSet()
+}
+
+// EnablePublicHub enables the public hub mode.
+func EnablePublicHub(enable bool) {
+	publicHub.SetTo(enable)
+}
+
+// Client returns whether this is a client.
+func Client() bool {
+	return client.IsSet()
+}
+
+// EnableClient enables the client mode.
+func EnableClient(enable bool) {
+	client.SetTo(enable)
+}
diff --git a/spn/conf/networks.go b/spn/conf/networks.go
new file mode 100644
index 00000000..379395c3
--- /dev/null
+++ b/spn/conf/networks.go
@@ -0,0 +1,110 @@
+package conf
+
+import (
+	"net"
+	"sync"
+
+	"github.com/tevino/abool"
+)
+
+var (
+	hubHasV4 = abool.New()
+	hubHasV6 = abool.New()
+)
+
+// SetHubNetworks sets the available IP networks on the Hub.
+func SetHubNetworks(v4, v6 bool) {
+	hubHasV4.SetTo(v4)
+	hubHasV6.SetTo(v6)
+}
+
+// HubHasIPv4 returns whether the Hub has IPv4 support.
+func HubHasIPv4() bool {
+	return hubHasV4.IsSet()
+}
+
+// HubHasIPv6 returns whether the Hub has IPv6 support.
+func HubHasIPv6() bool {
+	return hubHasV6.IsSet()
+}
+
+var (
+	bindIPv4   net.IP
+	bindIPv6   net.IP
+	bindIPLock sync.Mutex
+)
+
+// SetBindAddr sets the preferred connect (bind) addresses.
+func SetBindAddr(ip4, ip6 net.IP) {
+	bindIPLock.Lock()
+	defer bindIPLock.Unlock()
+
+	bindIPv4 = ip4
+	bindIPv6 = ip6
+}
+
+// BindAddrIsSet returns whether any bind address is set.
+func BindAddrIsSet() bool {
+	bindIPLock.Lock()
+	defer bindIPLock.Unlock()
+
+	return bindIPv4 != nil || bindIPv6 != nil
+}
+
+// GetBindAddr returns an address with the preferred binding address for the
+// given dial network.
+// The dial network must have a suffix specifying the IP version.
+func GetBindAddr(dialNetwork string) net.Addr {
+	bindIPLock.Lock()
+	defer bindIPLock.Unlock()
+
+	switch dialNetwork {
+	case "ip4":
+		if bindIPv4 != nil {
+			return &net.IPAddr{IP: bindIPv4}
+		}
+	case "ip6":
+		if bindIPv6 != nil {
+			return &net.IPAddr{IP: bindIPv6}
+		}
+	case "tcp4":
+		if bindIPv4 != nil {
+			return &net.TCPAddr{IP: bindIPv4}
+		}
+	case "tcp6":
+		if bindIPv6 != nil {
+			return &net.TCPAddr{IP: bindIPv6}
+		}
+	case "udp4":
+		if bindIPv4 != nil {
+			return &net.UDPAddr{IP: bindIPv4}
+		}
+	case "udp6":
+		if bindIPv6 != nil {
+			return &net.UDPAddr{IP: bindIPv6}
+		}
+	}
+
+	return nil
+}
+
+// GetBindIPs returns the preferred binding IPs.
+// Returns a slice with a single nil IP if no preferred binding IPs are set.
+func GetBindIPs() []net.IP {
+	bindIPLock.Lock()
+	defer bindIPLock.Unlock()
+
+	switch {
+	case bindIPv4 == nil && bindIPv6 == nil:
+		// Match most common case first.
+		return []net.IP{nil}
+	case bindIPv4 != nil && bindIPv6 != nil:
+		return []net.IP{bindIPv4, bindIPv6}
+	case bindIPv4 != nil:
+		return []net.IP{bindIPv4}
+	case bindIPv6 != nil:
+		return []net.IP{bindIPv6}
+	}
+
+	return []net.IP{nil}
+}
diff --git a/spn/conf/version.go b/spn/conf/version.go
new file mode 100644
index 00000000..ec5f3f03
--- /dev/null
+++ b/spn/conf/version.go
@@ -0,0 +1,9 @@
+package conf
+
+const (
+	// VersionOne is the first protocol version.
+	VersionOne = 1
+
+	// CurrentVersion always holds the newest version in production.
+	CurrentVersion = 1
+)
diff --git a/spn/crew/connect.go b/spn/crew/connect.go
new file mode 100644
index 00000000..96239931
--- /dev/null
+++ b/spn/crew/connect.go
@@ -0,0 +1,482 @@
+package crew
+
+import (
+	"context"
+	"errors"
+	"fmt"
+	"net"
+	"strings"
+	"sync"
+	"time"
+
+	"github.com/safing/portbase/log"
+	"github.com/safing/portmaster/service/network"
+	"github.com/safing/portmaster/service/profile/endpoints"
+	"github.com/safing/portmaster/spn/access"
+	"github.com/safing/portmaster/spn/docks"
+	"github.com/safing/portmaster/spn/navigator"
+	"github.com/safing/portmaster/spn/terminal"
+)
+
+// connectLock locks all routing operations to mitigate racy stuff for now.
+// TODO: Find a nice way to parallelize route creation.
+var connectLock sync.Mutex
+
+// HandleSluiceRequest handles a sluice request to build a tunnel.
+func HandleSluiceRequest(connInfo *network.Connection, conn net.Conn) {
+	if conn == nil {
+		log.Debugf("spn/crew: closing tunnel for %s before starting because of shutdown", connInfo)
+
+		// This is called within the connInfo lock.
+		connInfo.Failed("tunnel entry closed", "")
+		connInfo.SaveWhenFinished()
+		return
+	}
+
+	t := &Tunnel{
+		connInfo: connInfo,
+		conn:     conn,
+	}
+	module.StartWorker("tunnel handler", t.connectWorker)
+}
+
+// Tunnel represents the local information and endpoint of a data tunnel.
+type Tunnel struct {
+	connInfo *network.Connection
+	conn     net.Conn
+
+	dstPin      *navigator.Pin
+	dstTerminal terminal.Terminal
+	route       *navigator.Route
+	failedTries int
+	stickied    bool
+}
+
+func (t *Tunnel) connectWorker(ctx context.Context) (err error) {
+	// Get tracing logger.
+	ctx, tracer := log.AddTracer(ctx)
+	defer tracer.Submit()
+
+	// Save start time.
+	started := time.Now()
+
+	// Check the status of the Home Hub.
+	home, homeTerminal := navigator.Main.GetHome()
+	if home == nil || homeTerminal == nil || homeTerminal.IsBeingAbandoned() {
+		reportConnectError(terminal.ErrUnknownError.With("home terminal is abandoned"))
+
+		t.connInfo.Lock()
+		defer t.connInfo.Unlock()
+		t.connInfo.Failed("SPN not ready for tunneling", "")
+		t.connInfo.Save()
+
+		tracer.Infof("spn/crew: not tunneling %s, as the SPN is not ready", t.connInfo)
+		return nil
+	}
+
+	// Create path through the SPN.
+	err = t.establish(ctx)
+	if err != nil {
+		log.Warningf("spn/crew: failed to establish route for %s: %s", t.connInfo, err)
+
+		// TODO: Clean this up.
+		t.connInfo.Lock()
+		defer t.connInfo.Unlock()
+		t.connInfo.Failed(fmt.Sprintf("SPN failed to establish route: %s", err), "")
+		t.connInfo.Save()
+
+		tracer.Warningf("spn/crew: failed to establish route for %s: %s", t.connInfo, err)
+		return nil
+	}
+
+	// Connect via established tunnel.
+	_, tErr := NewConnectOp(t)
+	if tErr != nil {
+		tErr = tErr.Wrap("failed to initialize tunnel")
+		reportConnectError(tErr)
+
+		t.connInfo.Lock()
+		defer t.connInfo.Unlock()
+		t.connInfo.Failed(fmt.Sprintf("SPN failed to initialize data tunnel (connect op): %s", tErr.Error()), "")
+		t.connInfo.Save()
+
+		// TODO: try with another route?
+		tracer.Warningf("spn/crew: failed to initialize data tunnel (connect op) for %s: %s", t.connInfo, err)
+		return tErr
+	}
+
+	// Report time taken to find, build and check route and send connect request.
+	connectOpTTCRDurationHistogram.UpdateDuration(started)
+
+	t.connInfo.Lock()
+	defer t.connInfo.Unlock()
+	addTunnelContextToConnection(t)
+	t.connInfo.Save()
+
+	tracer.Infof("spn/crew: connected %s via %s", t.connInfo, t.dstPin.Hub)
+	return nil
+}
+
+func (t *Tunnel) establish(ctx context.Context) (err error) {
+	var routes *navigator.Routes
+
+	// Check if the destination sticks to a Hub.
+	sticksTo := getStickiedHub(t.connInfo)
+	switch {
+	case sticksTo == nil:
+		// Continue.
+
+	case sticksTo.Avoid:
+		log.Tracer(ctx).Tracef("spn/crew: avoiding %s", sticksTo.Pin.Hub)
+
+		// Avoid this Hub.
+		// TODO: Remember more than one hub to avoid.
+		avoidPolicy := []endpoints.Endpoint{
+			&endpoints.EndpointDomain{
+				OriginalValue: sticksTo.Pin.Hub.ID,
+				Domain:        strings.ToLower(sticksTo.Pin.Hub.ID) + ".",
+			},
+		}
+
+		// Append to policies.
+		t.connInfo.TunnelOpts.Destination.HubPolicies = append(t.connInfo.TunnelOpts.Destination.HubPolicies, avoidPolicy)
+
+	default:
+		log.Tracer(ctx).Tracef("spn/crew: using stickied %s", sticksTo.Pin.Hub)
+
+		// Check if the stickied Hub has an active terminal.
+		dstTerminal := sticksTo.Pin.GetActiveTerminal()
+		if dstTerminal != nil {
+			t.dstPin = sticksTo.Pin
+			t.dstTerminal = dstTerminal
+			t.route = sticksTo.Route
+			t.stickied = true
+			return nil
+		}
+
+		// If not, attempt to find a route to the stickied hub.
+		routes, err = navigator.Main.FindRouteToHub(
+			sticksTo.Pin.Hub.ID,
+			t.connInfo.TunnelOpts,
+		)
+		if err != nil {
+			log.Tracer(ctx).Tracef("spn/crew: failed to find route to stickied %s: %s", sticksTo.Pin.Hub, err)
+			routes = nil
+		} else {
+			t.stickied = true
+		}
+	}
+
+	// Find possible routes to destination.
+	if routes == nil {
+		log.Tracer(ctx).Trace("spn/crew: finding routes...")
+		routes, err = navigator.Main.FindRoutes(
+			t.connInfo.Entity.IP,
+			t.connInfo.TunnelOpts,
+		)
+		if err != nil {
+			return fmt.Errorf("failed to find routes to %s: %w", t.connInfo.Entity.IP, err)
+		}
+	}
+
+	// Check if routes are okay (again).
+	if len(routes.All) == 0 {
+		return fmt.Errorf("no routes to %s", t.connInfo.Entity.IP)
+	}
+
+	// Try routes until one succeeds.
+	log.Tracer(ctx).Trace("spn/crew: establishing route...")
+	var dstPin *navigator.Pin
+	var dstTerminal terminal.Terminal
+	for tries, route := range routes.All {
+		dstPin, dstTerminal, err = establishRoute(route)
+		if err != nil {
+			continue
+		}
+
+		// Assign route data to tunnel.
+		t.dstPin = dstPin
+		t.dstTerminal = dstTerminal
+		t.route = route
+		t.failedTries = tries
+
+		// Push changes to Pins and return.
+		navigator.Main.PushPinChanges()
+		return nil
+	}
+
+	return fmt.Errorf("failed to establish a route to %s: %w", t.connInfo.Entity.IP, err)
+}
+
+type hopCheck struct {
+	pin       *navigator.Pin
+	route     *navigator.Route
+	expansion *docks.ExpansionTerminal
+	authOp    *access.AuthorizeOp
+	pingOp    *PingOp
+}
+
+func establishRoute(route *navigator.Route) (dstPin *navigator.Pin, dstTerminal terminal.Terminal, err error) {
+	connectLock.Lock()
+	defer connectLock.Unlock()
+
+	// Check for path length.
+	if len(route.Path) < 1 {
+		return nil, nil, errors.New("path too short")
+	}
+
+	// Check for failing hubs in path.
+	for _, hop := range route.Path[1:] {
+		if hop.Pin().GetState().Has(navigator.StateFailing) {
+			return nil, nil, fmt.Errorf("failing hub in path: %s", hop.Pin().Hub.Name())
+		}
+	}
+
+	// Get home hub.
+	previousHop, homeTerminal := navigator.Main.GetHome()
+	if previousHop == nil || homeTerminal == nil {
+		return nil, nil, navigator.ErrHomeHubUnset
+	}
+	// Convert to interface for later use.
+	var previousTerminal terminal.Terminal = homeTerminal
+
+	// Check if first hub in path is the home hub.
+	if route.Path[0].HubID != previousHop.Hub.ID {
+		return nil, nil, errors.New("path start does not match home hub")
+	}
+
+	// Check if path only exists of home hub.
+	if len(route.Path) == 1 {
+		return previousHop, previousTerminal, nil
+	}
+
+	// TODO: Check what needs locking.
+
+	// Build path and save created paths.
+	hopChecks := make([]*hopCheck, 0, len(route.Path)-1)
+	for i, hop := range route.Path[1:] {
+		// Check if we already have a connection to the Hub.
+		activeTerminal := hop.Pin().GetActiveTerminal()
+		if activeTerminal != nil {
+			// Ping terminal if not recently checked.
+			if activeTerminal.NeedsReachableCheck(1 * time.Minute) {
+				pingOp, tErr := NewPingOp(activeTerminal)
+				if tErr.IsError() {
+					return nil, nil, tErr.Wrap("failed start ping to %s", hop.Pin())
+				}
+				// Add for checking results later.
+				hopChecks = append(hopChecks, &hopCheck{
+					pin:       hop.Pin(),
+					route:     route.CopyUpTo(i + 2),
+					expansion: activeTerminal,
+					pingOp:    pingOp,
+				})
+			}
+
+			previousHop = hop.Pin()
+			previousTerminal = activeTerminal
+			continue
+		}
+
+		// Expand to next Hub.
+		expansion, authOp, tErr := expand(previousTerminal, previousHop, hop.Pin())
+		if tErr != nil {
+			return nil, nil, tErr.Wrap("failed to expand to %s", hop.Pin())
+		}
+
+		// Add for checking results later.
+		hopChecks = append(hopChecks, &hopCheck{
+			pin:       hop.Pin(),
+			route:     route.CopyUpTo(i + 2),
+			expansion: expansion,
+			authOp:    authOp,
+		})
+
+		// Save previous pin for next loop or end.
+		previousHop = hop.Pin()
+		previousTerminal = expansion
+	}
+
+	// Check results.
+	for _, check := range hopChecks {
+		switch {
+		case check.authOp != nil:
+			// Wait for authOp result.
+			select {
+			case tErr := <-check.authOp.Result:
+				switch {
+				case tErr.IsError():
+					// There was a network or authentication error.
+					check.pin.MarkAsFailingFor(3 * time.Minute)
+					log.Warningf("spn/crew: failed to auth to %s: %s", check.pin.Hub, tErr)
+					return nil, nil, tErr.Wrap("failed to authenticate to %s: %w", check.pin.Hub, tErr)
+
+				case tErr.Is(terminal.ErrExplicitAck):
+					// Authentication was successful.
+
+				default:
+					// Authentication was aborted.
+					if tErr != nil {
+						tErr = terminal.ErrUnknownError
+					}
+					log.Warningf("spn/crew: auth to %s aborted with %s", check.pin.Hub, tErr)
+					return nil, nil, tErr.Wrap("authentication to %s aborted: %w", check.pin.Hub, tErr)
+				}
+
+			case <-time.After(5 * time.Second):
+				// Mark as failing for just a minute, until server load may be less.
+				check.pin.MarkAsFailingFor(1 * time.Minute)
+				log.Warningf("spn/crew: auth to %s timed out", check.pin.Hub)
+
+				return nil, nil, terminal.ErrTimeout.With("waiting for auth to %s", check.pin.Hub)
+			}
+
+			// Add terminal extension to the map.
+			check.pin.SetActiveTerminal(&navigator.PinConnection{
+				Terminal: check.expansion,
+				Route:    check.route,
+			})
+			check.expansion.MarkReachable()
+			log.Infof("spn/crew: added conn to %s via %s", check.pin, check.route)
+
+		case check.pingOp != nil:
+			// Wait for ping result.
+			select {
+			case tErr := <-check.pingOp.Result:
+				if !tErr.Is(terminal.ErrExplicitAck) {
+					// Mark as failing long enough to expire connections and session and shutdown connections.
+					// TODO: Should we forcibly disconnect instead?
+					// TODO: This might also be triggered if a relay fails and ends the operation.
+					check.pin.MarkAsFailingFor(7 * time.Minute)
+					// Forget about existing active terminal, re-create if needed.
+					check.pin.SetActiveTerminal(nil)
+					log.Warningf("spn/crew: failed to check reachability of %s: %s", check.pin.Hub, tErr)
+
+					return nil, nil, tErr.Wrap("failed to check reachability of %s: %w", check.pin.Hub, tErr)
+				}
+
+			case <-time.After(5 * time.Second):
+				// Mark as failing for just a minute, until server load may be less.
+				check.pin.MarkAsFailingFor(1 * time.Minute)
+				// Forget about existing active terminal, re-create if needed.
+				check.pin.SetActiveTerminal(nil)
+				log.Warningf("spn/crew: reachability check to %s timed out", check.pin.Hub)
+
+				return nil, nil, terminal.ErrTimeout.With("waiting for ping to %s", check.pin.Hub)
+			}
+
+			check.expansion.MarkReachable()
+			log.Debugf("spn/crew: checked conn to %s via %s", check.pin.Hub, check.route)
+
+		default:
+			log.Errorf("spn/crew: invalid hop check for %s", check.pin.Hub)
+			return nil, nil, terminal.ErrInternalError.With("invalid hop check")
+		}
+	}
+
+	// Return last hop.
+	return previousHop, previousTerminal, nil
+}
+
+func expand(fromTerminal terminal.Terminal, from, to *navigator.Pin) (expansion *docks.ExpansionTerminal, authOp *access.AuthorizeOp, tErr *terminal.Error) {
+	expansion, tErr = docks.ExpandTo(fromTerminal, to.Hub.ID, to.Hub)
+	if tErr != nil {
+		return nil, nil, tErr.Wrap("failed to expand to %s", to.Hub)
+	}
+
+	authOp, tErr = access.AuthorizeToTerminal(expansion)
+	if tErr != nil {
+		expansion.Abandon(nil)
+		return nil, nil, tErr.Wrap("failed to authorize")
+	}
+
+	log.Infof("spn/crew: expanded to %s (from %s)", to.Hub, from.Hub)
+	return expansion, authOp, nil
+}
+
+// TunnelContext holds additional information about the tunnel to be added to a
+// connection.
+type TunnelContext struct {
+	Path       []*TunnelContextHop
+	PathCost   float32
+	RoutingAlg string
+
+	tunnel *Tunnel
+}
+
+// GetExitNodeID returns the ID of the exit node.
+// It returns an empty string in case no path exists.
+func (tc *TunnelContext) GetExitNodeID() string {
+	if len(tc.Path) == 0 {
+		return ""
+	}
+
+	return tc.Path[len(tc.Path)-1].ID
+}
+
+// StopTunnel stops the tunnel.
+func (tc *TunnelContext) StopTunnel() error {
+	if tc.tunnel != nil && tc.tunnel.conn != nil {
+		return tc.tunnel.conn.Close()
+	}
+	return nil
+}
+
+// TunnelContextHop holds hop data for TunnelContext.
+type TunnelContextHop struct {
+	ID   string
+	Name string
+	IPv4 *TunnelContextHopIPInfo `json:",omitempty"`
+	IPv6 *TunnelContextHopIPInfo `json:",omitempty"`
+}
+
+// TunnelContextHopIPInfo holds hop IP data for TunnelContextHop.
+type TunnelContextHopIPInfo struct {
+	IP      net.IP
+	Country string
+	ASN     uint
+	ASOwner string
+}
+
+func addTunnelContextToConnection(t *Tunnel) {
+	// Create and add basic info.
+	tunnelCtx := &TunnelContext{
+		Path:       make([]*TunnelContextHop, len(t.route.Path)),
+		PathCost:   t.route.TotalCost,
+		RoutingAlg: t.route.Algorithm,
+		tunnel:     t,
+	}
+	t.connInfo.TunnelContext = tunnelCtx
+
+	// Add path info.
+	for i, hop := range t.route.Path {
+		// Add hub info.
+		hopCtx := &TunnelContextHop{
+			ID:   hop.HubID,
+			Name: hop.Pin().Hub.Info.Name,
+		}
+		tunnelCtx.Path[i] = hopCtx
+		// Add hub IPv4 info.
+		if hop.Pin().Hub.Info.IPv4 != nil {
+			hopCtx.IPv4 = &TunnelContextHopIPInfo{
+				IP: hop.Pin().Hub.Info.IPv4,
+			}
+			if hop.Pin().LocationV4 != nil {
+				hopCtx.IPv4.Country = hop.Pin().LocationV4.Country.Code
+				hopCtx.IPv4.ASN = hop.Pin().LocationV4.AutonomousSystemNumber
+				hopCtx.IPv4.ASOwner = hop.Pin().LocationV4.AutonomousSystemOrganization
+			}
+		}
+		// Add hub IPv6 info.
+		if hop.Pin().Hub.Info.IPv6 != nil {
+			hopCtx.IPv6 = &TunnelContextHopIPInfo{
+				IP: hop.Pin().Hub.Info.IPv6,
+			}
+			if hop.Pin().LocationV6 != nil {
+				hopCtx.IPv6.Country = hop.Pin().LocationV6.Country.Code
+				hopCtx.IPv6.ASN = hop.Pin().LocationV6.AutonomousSystemNumber
+				hopCtx.IPv6.ASOwner = hop.Pin().LocationV6.AutonomousSystemOrganization
+			}
+		}
+	}
+}
diff --git a/spn/crew/metrics.go b/spn/crew/metrics.go
new file mode 100644
index 00000000..b9549d1e
--- /dev/null
+++ b/spn/crew/metrics.go
@@ -0,0 +1,223 @@
+package crew
+
+import (
+	"sync/atomic"
+
+	"github.com/tevino/abool"
+
+	"github.com/safing/portbase/api"
+	"github.com/safing/portbase/metrics"
+)
+
+var (
+	connectOpCnt            *metrics.Counter
+	connectOpCntError       *metrics.Counter
+	connectOpCntBadRequest  *metrics.Counter
+	connectOpCntCanceled    *metrics.Counter
+	connectOpCntFailed      *metrics.Counter
+	connectOpCntConnected   *metrics.Counter
+	connectOpCntRateLimited *metrics.Counter
+
+	connectOpIncomingBytes *metrics.Counter
+	connectOpOutgoingBytes *metrics.Counter
+
+	connectOpTTCRDurationHistogram *metrics.Histogram
+	connectOpTTFBDurationHistogram *metrics.Histogram
+	connectOpDurationHistogram     *metrics.Histogram
+	connectOpIncomingDataHistogram *metrics.Histogram
+	connectOpOutgoingDataHistogram *metrics.Histogram
+
+	metricsRegistered = abool.New()
+)
+
+func registerMetrics() (err error) {
+	// Only register metrics once.
+	if !metricsRegistered.SetToIf(false, true) {
+		return nil
+	}
+
+	// Connect Op Stats on client.
+
+	connectOpCnt, err = metrics.NewCounter(
+		"spn/op/connect/total",
+		nil,
+		&metrics.Options{
+			Name:       "SPN Total Connect Operations",
+			InternalID: "spn_connect_count",
+			Permission: api.PermitUser,
+			Persist:    true,
+		},
+	)
+	if err != nil {
+		return err
+	}
+
+	// Connect Op Stats on server.
+
+	connectOpCntOptions := &metrics.Options{
+		Name:       "SPN Total Connect Operations",
+		Permission: api.PermitUser,
+		Persist:    true,
+	}
+
+	connectOpCntError, err = metrics.NewCounter(
+		"spn/op/connect/total",
+		map[string]string{"result": "error"},
+		connectOpCntOptions,
+	)
+	if err != nil {
+		return err
+	}
+
+	connectOpCntBadRequest, err = metrics.NewCounter(
+		"spn/op/connect/total",
+		map[string]string{"result": "bad_request"},
+		connectOpCntOptions,
+	)
+	if err != nil {
+		return err
+	}
+
+	connectOpCntCanceled, err = metrics.NewCounter(
+		"spn/op/connect/total",
+		map[string]string{"result": "canceled"},
+		connectOpCntOptions,
+	)
+	if err != nil {
+		return err
+	}
+
+	connectOpCntFailed, err = metrics.NewCounter(
+		"spn/op/connect/total",
+		map[string]string{"result": "failed"},
+		connectOpCntOptions,
+	)
+	if err != nil {
+		return err
+	}
+
+	connectOpCntConnected, err = metrics.NewCounter(
+		"spn/op/connect/total",
+		map[string]string{"result": "connected"},
+		connectOpCntOptions,
+	)
+	if err != nil {
+		return err
+	}
+
+	connectOpCntRateLimited, err = metrics.NewCounter(
+		"spn/op/connect/total",
+		map[string]string{"result": "rate_limited"},
+		connectOpCntOptions,
+	)
+	if err != nil {
+		return err
+	}
+
+	_, err = metrics.NewGauge(
+		"spn/op/connect/active",
+		nil,
+		getActiveConnectOpsStat,
+		&metrics.Options{
+			Name:       "SPN Active Connect Operations",
+			Permission: api.PermitUser,
+		},
+	)
+	if err != nil {
+		return err
+	}
+
+	connectOpIncomingBytes, err = metrics.NewCounter(
+		"spn/op/connect/incoming/bytes",
+		nil,
+		&metrics.Options{
+			Name:       "SPN Connect Operation Incoming Bytes",
+			InternalID: "spn_connect_in_bytes",
+			Permission: api.PermitUser,
+			Persist:    true,
+		},
+	)
+	if err != nil {
+		return err
+	}
+
+	connectOpOutgoingBytes, err = metrics.NewCounter(
+		"spn/op/connect/outgoing/bytes",
+		nil,
+		&metrics.Options{
+			Name:       "SPN Connect Operation Outgoing Bytes",
+			InternalID: "spn_connect_out_bytes",
+			Permission: api.PermitUser,
+			Persist:    true,
+		},
+	)
+	if err != nil {
+		return err
+	}
+
+	connectOpTTCRDurationHistogram, err = metrics.NewHistogram(
+		"spn/op/connect/histogram/ttcr/seconds",
+		nil,
+		&metrics.Options{
+			Name:       "SPN Connect Operation time-to-connect-request Histogram",
+			Permission: api.PermitUser,
+		},
+	)
+	if err != nil {
+		return err
+	}
+
+	connectOpTTFBDurationHistogram, err = metrics.NewHistogram(
+		"spn/op/connect/histogram/ttfb/seconds",
+		nil,
+		&metrics.Options{
+			Name:       "SPN Connect Operation time-to-first-byte (from TTCR) Histogram",
+			Permission: api.PermitUser,
+		},
+	)
+	if err != nil {
+		return err
+	}
+
+	connectOpDurationHistogram, err = metrics.NewHistogram(
+		"spn/op/connect/histogram/duration/seconds",
+		nil,
+		&metrics.Options{
+			Name:       "SPN Connect Operation Duration Histogram",
+			Permission: api.PermitUser,
+		},
+	)
+	if err != nil {
+		return err
+	}
+
+	connectOpIncomingDataHistogram, err = metrics.NewHistogram(
+		"spn/op/connect/histogram/incoming/bytes",
+		nil,
+		&metrics.Options{
+			Name:       "SPN Connect Operation Downloaded Data Histogram",
+			Permission: api.PermitUser,
+		},
+	)
+	if err != nil {
+		return err
+	}
+
+	connectOpOutgoingDataHistogram, err = metrics.NewHistogram(
+		"spn/op/connect/histogram/outgoing/bytes",
+		nil,
+		&metrics.Options{
+			Name:       "SPN Connect Operation Outgoing Data Histogram",
+			Permission: api.PermitUser,
+		},
+	)
+	if err != nil {
+		return err
+	}
+
+	return nil
+}
+
+func getActiveConnectOpsStat() float64 {
+	return float64(atomic.LoadInt64(activeConnectOps))
+}
diff --git a/spn/crew/module.go b/spn/crew/module.go
new file mode 100644
index 00000000..10d4ebed
--- /dev/null
+++ b/spn/crew/module.go
@@ -0,0 +1,44 @@
+package crew
+
+import (
+	"time"
+
+	"github.com/safing/portbase/modules"
+	"github.com/safing/portmaster/spn/terminal"
+)
+
+var module *modules.Module
+
+func init() {
+	module = modules.Register("crew", nil, start, stop, "terminal", "docks", "navigator", "intel", "cabin")
+}
+
+func start() error {
+	module.NewTask("sticky cleaner", cleanStickyHubs).
+		Repeat(10 * time.Minute)
+
+	return registerMetrics()
+}
+
+func stop() error {
+	clearStickyHubs()
+	terminal.StopScheduler()
+
+	return nil
+}
+
+var connectErrors = make(chan *terminal.Error, 10)
+
+func reportConnectError(tErr *terminal.Error) {
+	select {
+	case connectErrors <- tErr:
+	default:
+	}
+}
+
+// ConnectErrors returns errors of connect operations.
+// It only has a small and shared buffer and may only be used for indications,
+// not for full monitoring.
+func ConnectErrors() <-chan *terminal.Error {
+	return connectErrors
+}
diff --git a/spn/crew/module_test.go b/spn/crew/module_test.go
new file mode 100644
index 00000000..7c0a7ad7
--- /dev/null
+++ b/spn/crew/module_test.go
@@ -0,0 +1,13 @@
+package crew
+
+import (
+	"testing"
+
+	"github.com/safing/portmaster/service/core/pmtesting"
+	"github.com/safing/portmaster/spn/conf"
+)
+
+func TestMain(m *testing.M) {
+	conf.EnablePublicHub(true)
+	pmtesting.TestMain(m, module)
+}
diff --git a/spn/crew/op_connect.go b/spn/crew/op_connect.go
new file mode 100644
index 00000000..df5e4dbf
--- /dev/null
+++ b/spn/crew/op_connect.go
@@ -0,0 +1,585 @@
+package crew
+
+import (
+	"context"
+	"errors"
+	"fmt"
+	"io"
+	"net"
+	"strconv"
+	"sync/atomic"
+	"time"
+
+	"github.com/safing/portbase/container"
+	"github.com/safing/portbase/formats/dsd"
+	"github.com/safing/portbase/log"
+	"github.com/safing/portmaster/service/network/netutils"
+	"github.com/safing/portmaster/service/network/packet"
+	"github.com/safing/portmaster/spn/conf"
+	"github.com/safing/portmaster/spn/terminal"
+)
+
+// ConnectOpType is the type ID for the connection operation.
+const ConnectOpType string = "connect"
+
+var activeConnectOps = new(int64)
+
+// ConnectOp is used to connect data tunnels to servers on the Internet.
+type ConnectOp struct {
+	terminal.OperationBase
+
+	// Flow Control
+	dfq *terminal.DuplexFlowQueue
+
+	// Context and shutdown handling
+	// ctx is the context of the Terminal.
+	ctx context.Context
+	// cancelCtx cancels ctx.
+	cancelCtx context.CancelFunc
+	// doneWriting signals that the writer has finished writing.
+	doneWriting chan struct{}
+
+	// Metrics
+	incomingTraffic atomic.Uint64
+	outgoingTraffic atomic.Uint64
+	started         time.Time
+
+	// Connection
+	t       terminal.Terminal
+	conn    net.Conn
+	request *ConnectRequest
+	entry   bool
+	tunnel  *Tunnel
+}
+
+// Type returns the type ID.
+func (op *ConnectOp) Type() string {
+	return ConnectOpType
+}
+
+// Ctx returns the operation context.
+func (op *ConnectOp) Ctx() context.Context {
+	return op.ctx
+}
+
+// ConnectRequest holds all the information necessary for a connect operation.
+type ConnectRequest struct {
+	Domain              string            `json:"d,omitempty"`
+	IP                  net.IP            `json:"ip,omitempty"`
+	UsePriorityDataMsgs bool              `json:"pr,omitempty"`
+	Protocol            packet.IPProtocol `json:"p,omitempty"`
+	Port                uint16            `json:"po,omitempty"`
+	QueueSize           uint32            `json:"qs,omitempty"`
+}
+
+// DialNetwork returns the address of the connect request.
+func (r *ConnectRequest) DialNetwork() string {
+	if ip4 := r.IP.To4(); ip4 != nil {
+		switch r.Protocol { //nolint:exhaustive // Only looking for supported protocols.
+		case packet.TCP:
+			return "tcp4"
+		case packet.UDP:
+			return "udp4"
+		}
+	} else {
+		switch r.Protocol { //nolint:exhaustive // Only looking for supported protocols.
+		case packet.TCP:
+			return "tcp6"
+		case packet.UDP:
+			return "udp6"
+		}
+	}
+
+	return ""
+}
+
+// Address returns the address of the connext request.
+func (r *ConnectRequest) Address() string {
+	return net.JoinHostPort(r.IP.String(), strconv.Itoa(int(r.Port)))
+}
+
+func (r *ConnectRequest) String() string {
+	if r.Domain != "" {
+		return fmt.Sprintf("%s (%s %s)", r.Domain, r.Protocol, r.Address())
+	}
+	return fmt.Sprintf("%s %s", r.Protocol, r.Address())
+}
+
+func init() {
+	terminal.RegisterOpType(terminal.OperationFactory{
+		Type:     ConnectOpType,
+		Requires: terminal.MayConnect,
+		Start:    startConnectOp,
+	})
+}
+
+// NewConnectOp starts a new connect operation.
+func NewConnectOp(tunnel *Tunnel) (*ConnectOp, *terminal.Error) {
+	// Submit metrics.
+	connectOpCnt.Inc()
+
+	// Create request.
+	request := &ConnectRequest{
+		Domain:              tunnel.connInfo.Entity.Domain,
+		IP:                  tunnel.connInfo.Entity.IP,
+		Protocol:            packet.IPProtocol(tunnel.connInfo.Entity.Protocol),
+		Port:                tunnel.connInfo.Entity.Port,
+		UsePriorityDataMsgs: terminal.UsePriorityDataMsgs,
+	}
+
+	// Set defaults.
+	if request.QueueSize == 0 {
+		request.QueueSize = terminal.DefaultQueueSize
+	}
+
+	// Create new op.
+	op := &ConnectOp{
+		doneWriting: make(chan struct{}),
+		t:           tunnel.dstTerminal,
+		conn:        tunnel.conn,
+		request:     request,
+		entry:       true,
+		tunnel:      tunnel,
+	}
+	op.ctx, op.cancelCtx = context.WithCancel(module.Ctx)
+	op.dfq = terminal.NewDuplexFlowQueue(op.Ctx(), request.QueueSize, op.submitUpstream)
+
+	// Prepare init msg.
+	data, err := dsd.Dump(request, dsd.CBOR)
+	if err != nil {
+		return nil, terminal.ErrInternalError.With("failed to pack connect request: %w", err)
+	}
+
+	// Initialize.
+	tErr := op.t.StartOperation(op, container.New(data), 5*time.Second)
+	if err != nil {
+		return nil, tErr
+	}
+
+	// Setup metrics.
+	op.started = time.Now()
+
+	module.StartWorker("connect op conn reader", op.connReader)
+	module.StartWorker("connect op conn writer", op.connWriter)
+	module.StartWorker("connect op flow handler", op.dfq.FlowHandler)
+
+	log.Infof("spn/crew: connected to %s via %s", request, tunnel.dstPin.Hub)
+	return op, nil
+}
+
+func startConnectOp(t terminal.Terminal, opID uint32, data *container.Container) (terminal.Operation, *terminal.Error) {
+	// Check if we are running a public hub.
+	if !conf.PublicHub() {
+		return nil, terminal.ErrPermissionDenied.With("connecting is only allowed on public hubs")
+	}
+
+	// Parse connect request.
+	request := &ConnectRequest{}
+	_, err := dsd.Load(data.CompileData(), request)
+	if err != nil {
+		connectOpCntError.Inc() // More like a protocol/system error than a bad request.
+		return nil, terminal.ErrMalformedData.With("failed to parse connect request: %w", err)
+	}
+	if request.QueueSize == 0 || request.QueueSize > terminal.MaxQueueSize {
+		connectOpCntError.Inc() // More like a protocol/system error than a bad request.
+		return nil, terminal.ErrInvalidOptions.With("invalid queue size of %d", request.QueueSize)
+	}
+
+	// Check if IP seems valid.
+	if len(request.IP) != net.IPv4len && len(request.IP) != net.IPv6len {
+		connectOpCntError.Inc() // More like a protocol/system error than a bad request.
+		return nil, terminal.ErrInvalidOptions.With("ip address is not valid")
+	}
+
+	// Create and initialize operation.
+	op := &ConnectOp{
+		doneWriting: make(chan struct{}),
+		t:           t,
+		request:     request,
+	}
+	op.InitOperationBase(t, opID)
+	op.ctx, op.cancelCtx = context.WithCancel(t.Ctx())
+	op.dfq = terminal.NewDuplexFlowQueue(op.Ctx(), request.QueueSize, op.submitUpstream)
+
+	// Start worker to complete setting up the connection.
+	module.StartWorker("connect op setup", op.handleSetup)
+
+	return op, nil
+}
+
+func (op *ConnectOp) handleSetup(_ context.Context) error {
+	// Get terminal session for rate limiting.
+	var session *terminal.Session
+	if sessionTerm, ok := op.t.(terminal.SessionTerminal); ok {
+		session = sessionTerm.GetSession()
+	} else {
+		connectOpCntError.Inc()
+		log.Errorf("spn/crew: %T is not a session terminal, aborting op %s#%d", op.t, op.t.FmtID(), op.ID())
+		op.Stop(op, terminal.ErrInternalError.With("no session available"))
+		return nil
+	}
+
+	// Limit concurrency of connecting.
+	cancelErr := session.LimitConcurrency(op.Ctx(), func() {
+		op.setup(session)
+	})
+
+	// If context was canceled, stop operation.
+	if cancelErr != nil {
+		connectOpCntCanceled.Inc()
+		op.Stop(op, terminal.ErrCanceled.With(cancelErr.Error()))
+	}
+
+	// Do not return a worker error.
+	return nil
+}
+
+func (op *ConnectOp) setup(session *terminal.Session) {
+	// Rate limit before connecting.
+	if tErr := session.RateLimit(); tErr != nil {
+		// Add rate limit info to error.
+		if tErr.Is(terminal.ErrRateLimited) {
+			connectOpCntRateLimited.Inc()
+			op.Stop(op, tErr.With(session.RateLimitInfo()))
+			return
+		}
+
+		connectOpCntError.Inc()
+		op.Stop(op, tErr)
+		return
+	}
+
+	// Check if connection target is in global scope.
+	ipScope := netutils.GetIPScope(op.request.IP)
+	if ipScope != netutils.Global {
+		session.ReportSuspiciousActivity(terminal.SusFactorQuiteUnusual)
+		connectOpCntBadRequest.Inc()
+		op.Stop(op, terminal.ErrPermissionDenied.With("denied request to connect to non-global IP %s", op.request.IP))
+		return
+	}
+
+	// Check exit policy.
+	if tErr := checkExitPolicy(op.request); tErr != nil {
+		session.ReportSuspiciousActivity(terminal.SusFactorQuiteUnusual)
+		connectOpCntBadRequest.Inc()
+		op.Stop(op, tErr)
+		return
+	}
+
+	// Check one last time before connecting if operation was not canceled.
+	if op.Ctx().Err() != nil {
+		op.Stop(op, terminal.ErrCanceled.With(op.Ctx().Err().Error()))
+		connectOpCntCanceled.Inc()
+		return
+	}
+
+	// Connect to destination.
+	dialNet := op.request.DialNetwork()
+	if dialNet == "" {
+		session.ReportSuspiciousActivity(terminal.SusFactorCommon)
+		connectOpCntBadRequest.Inc()
+		op.Stop(op, terminal.ErrIncorrectUsage.With("protocol %s is not supported", op.request.Protocol))
+		return
+	}
+	dialer := &net.Dialer{
+		Timeout:       10 * time.Second,
+		LocalAddr:     conf.GetBindAddr(dialNet),
+		FallbackDelay: -1, // Disables Fast Fallback from IPv6 to IPv4.
+		KeepAlive:     -1, // Disable keep-alive.
+	}
+	conn, err := dialer.DialContext(op.Ctx(), dialNet, op.request.Address())
+	if err != nil {
+		// Connection errors are common, but still a bit suspicious.
+		var netError net.Error
+		switch {
+		case errors.As(err, &netError) && netError.Timeout():
+			session.ReportSuspiciousActivity(terminal.SusFactorCommon)
+			connectOpCntFailed.Inc()
+		case errors.Is(err, context.Canceled):
+			session.ReportSuspiciousActivity(terminal.SusFactorCommon)
+			connectOpCntCanceled.Inc()
+		default:
+			session.ReportSuspiciousActivity(terminal.SusFactorWeirdButOK)
+			connectOpCntFailed.Inc()
+		}
+
+		op.Stop(op, terminal.ErrConnectionError.With("failed to connect to %s: %w", op.request, err))
+		return
+	}
+	op.conn = conn
+
+	// Start worker.
+	module.StartWorker("connect op conn reader", op.connReader)
+	module.StartWorker("connect op conn writer", op.connWriter)
+	module.StartWorker("connect op flow handler", op.dfq.FlowHandler)
+
+	connectOpCntConnected.Inc()
+	log.Infof("spn/crew: connected op %s#%d to %s", op.t.FmtID(), op.ID(), op.request)
+}
+
+func (op *ConnectOp) submitUpstream(msg *terminal.Msg, timeout time.Duration) {
+	err := op.Send(msg, timeout)
+	if err != nil {
+		msg.Finish()
+		op.Stop(op, err.Wrap("failed to send data (op) read from %s", op.connectedType()))
+	}
+}
+
+const (
+	readBufSize = 1500
+
+	// High priority up to first 10MB.
+	highPrioThreshold = 10_000_000
+
+	// Rate limit to 128 Mbit/s after 1GB traffic.
+	// Do NOT use time.Sleep per packet, as it is very inaccurate and will sleep a lot longer than desired.
+	rateLimitThreshold = 1_000_000_000
+	rateLimitMaxMbit   = 128
+)
+
+func (op *ConnectOp) connReader(_ context.Context) error {
+	// Metrics setup and submitting.
+	atomic.AddInt64(activeConnectOps, 1)
+	defer func() {
+		atomic.AddInt64(activeConnectOps, -1)
+		connectOpDurationHistogram.UpdateDuration(op.started)
+		connectOpIncomingDataHistogram.Update(float64(op.incomingTraffic.Load()))
+	}()
+
+	rateLimiter := terminal.NewRateLimiter(rateLimitMaxMbit)
+
+	for {
+		// Read from connection.
+		buf := make([]byte, readBufSize)
+		n, err := op.conn.Read(buf)
+		if err != nil {
+			if errors.Is(err, io.EOF) {
+				op.Stop(op, terminal.ErrStopping.With("connection to %s was closed on read", op.connectedType()))
+			} else {
+				op.Stop(op, terminal.ErrConnectionError.With("failed to read from %s: %w", op.connectedType(), err))
+			}
+			return nil
+		}
+		if n == 0 {
+			log.Tracef("spn/crew: connect op %s>%d read 0 bytes from %s", op.t.FmtID(), op.ID(), op.connectedType())
+			continue
+		}
+
+		// Submit metrics.
+		connectOpIncomingBytes.Add(n)
+		inBytes := op.incomingTraffic.Add(uint64(n))
+
+		// Rate limit if over threshold.
+		if inBytes > rateLimitThreshold {
+			rateLimiter.Limit(uint64(n))
+		}
+
+		// Create message from data.
+		msg := op.NewMsg(buf[:n])
+
+		// Define priority and possibly wait for slot.
+		switch {
+		case inBytes > highPrioThreshold:
+			msg.Unit.WaitForSlot()
+		case op.request.UsePriorityDataMsgs:
+			msg.Unit.MakeHighPriority()
+		}
+
+		// Send packet.
+		tErr := op.dfq.Send(
+			msg,
+			30*time.Second,
+		)
+		if tErr != nil {
+			msg.Finish()
+			op.Stop(op, tErr.Wrap("failed to send data (dfq) from %s", op.connectedType()))
+			return nil
+		}
+	}
+}
+
+// Deliver delivers a messages to the operation.
+func (op *ConnectOp) Deliver(msg *terminal.Msg) *terminal.Error {
+	return op.dfq.Deliver(msg)
+}
+
+func (op *ConnectOp) connWriter(_ context.Context) error {
+	// Metrics submitting.
+	defer func() {
+		connectOpOutgoingDataHistogram.Update(float64(op.outgoingTraffic.Load()))
+	}()
+
+	defer func() {
+		// Signal that we are done with writing.
+		close(op.doneWriting)
+		// Close connection.
+		_ = op.conn.Close()
+	}()
+
+	var msg *terminal.Msg
+	defer msg.Finish()
+
+	rateLimiter := terminal.NewRateLimiter(rateLimitMaxMbit)
+
+writing:
+	for {
+		msg.Finish()
+
+		select {
+		case msg = <-op.dfq.Receive():
+		case <-op.ctx.Done():
+			op.Stop(op, terminal.ErrCanceled)
+			return nil
+		default:
+			// Handle all data before also listening for the context cancel.
+			// This ensures all data is written properly before stopping.
+			select {
+			case msg = <-op.dfq.Receive():
+			case op.doneWriting <- struct{}{}:
+				op.Stop(op, terminal.ErrStopping)
+				return nil
+			case <-op.ctx.Done():
+				op.Stop(op, terminal.ErrCanceled)
+				return nil
+			}
+		}
+
+		// TODO: Instead of compiling data here again, can we send it as in the container?
+		data := msg.Data.CompileData()
+		if len(data) == 0 {
+			continue writing
+		}
+
+		// Submit metrics.
+		connectOpOutgoingBytes.Add(len(data))
+		out := op.outgoingTraffic.Add(uint64(len(data)))
+
+		// Rate limit if over threshold.
+		if out > rateLimitThreshold {
+			rateLimiter.Limit(uint64(len(data)))
+		}
+
+		// Special handling after first data was received on client.
+		if op.entry &&
+			out == uint64(len(data)) {
+			// Report time taken to receive first byte.
+			connectOpTTFBDurationHistogram.UpdateDuration(op.started)
+
+			// If not stickied yet, stick destination to Hub.
+			if !op.tunnel.stickied {
+				op.tunnel.stickDestinationToHub()
+			}
+		}
+
+		// Send all given data.
+		for {
+			n, err := op.conn.Write(data)
+			switch {
+			case err != nil:
+				if errors.Is(err, io.EOF) {
+					op.Stop(op, terminal.ErrStopping.With("connection to %s was closed on write", op.connectedType()))
+				} else {
+					op.Stop(op, terminal.ErrConnectionError.With("failed to send to %s: %w", op.connectedType(), err))
+				}
+				return nil
+			case n == 0:
+				op.Stop(op, terminal.ErrConnectionError.With("sent 0 bytes to %s", op.connectedType()))
+				return nil
+			case n < len(data):
+				// If not all data was sent, try again.
+				log.Debugf("spn/crew: %s#%d only sent %d/%d bytes to %s", op.t.FmtID(), op.ID(), n, len(data), op.connectedType())
+				data = data[n:]
+			default:
+				continue writing
+			}
+		}
+	}
+}
+
+func (op *ConnectOp) connectedType() string {
+	if op.entry {
+		return "origin"
+	}
+	return "destination"
+}
+
+// HandleStop gives the operation the ability to cleanly shut down.
+// The returned error is the error to send to the other side.
+// Should never be called directly. Call Stop() instead.
+func (op *ConnectOp) HandleStop(err *terminal.Error) (errorToSend *terminal.Error) {
+	if err.IsError() {
+		reportConnectError(err)
+	}
+
+	// If the connection has sent or received any data so far, finish the data
+	// flows as it makes sense.
+	if op.incomingTraffic.Load() > 0 || op.outgoingTraffic.Load() > 0 {
+		// If the op was ended locally, send all data before closing.
+		// If the op was ended remotely, don't bother sending remaining data.
+		if !err.IsExternal() {
+			// Flushing could mean sending a full buffer of 50000 packets.
+			op.dfq.Flush(5 * time.Minute)
+		}
+
+		// If the op was ended remotely, write all remaining received data.
+		// If the op was ended locally, don't bother writing remaining data.
+		if err.IsExternal() {
+			select {
+			case <-op.doneWriting:
+			default:
+				select {
+				case <-op.doneWriting:
+				case <-time.After(5 * time.Second):
+				}
+			}
+		}
+	}
+
+	// Cancel workers.
+	op.cancelCtx()
+
+	// Special client-side handling.
+	if op.entry {
+		// Mark the connection as failed if there was an error and no data was sent to the app yet.
+		if err.IsError() && op.outgoingTraffic.Load() == 0 {
+			// Set connection to failed and save it to propagate the update.
+			c := op.tunnel.connInfo
+			func() {
+				c.Lock()
+				defer c.Unlock()
+
+				if err.IsExternal() {
+					c.Failed(fmt.Sprintf(
+						"the exit node reported an error: %s", err,
+					), "")
+				} else {
+					c.Failed(fmt.Sprintf(
+						"connection failed locally: %s", err,
+					), "")
+				}
+
+				c.Save()
+			}()
+		}
+
+		// Avoid connecting to the destination via this Hub if:
+		// - The error is external - ie. from the server.
+		// - The error is a connection error.
+		// - No data was received.
+		// This indicates that there is some network level issue that we can
+		// possibly work around by using another exit node.
+		if err.IsError() && err.IsExternal() &&
+			err.Is(terminal.ErrConnectionError) &&
+			op.outgoingTraffic.Load() == 0 {
+			op.tunnel.avoidDestinationHub()
+		}
+
+		// Don't leak local errors to the server.
+		if !err.IsExternal() {
+			// Change error that is reported.
+			return terminal.ErrStopping
+		}
+	}
+
+	return err
+}
diff --git a/spn/crew/op_connect_test.go b/spn/crew/op_connect_test.go
new file mode 100644
index 00000000..7205ea9a
--- /dev/null
+++ b/spn/crew/op_connect_test.go
@@ -0,0 +1,115 @@
+package crew
+
+import (
+	"fmt"
+	"net"
+	"net/http"
+	"net/url"
+	"testing"
+	"time"
+
+	"github.com/safing/portmaster/service/intel"
+	"github.com/safing/portmaster/service/network"
+	"github.com/safing/portmaster/spn/cabin"
+	"github.com/safing/portmaster/spn/conf"
+	"github.com/safing/portmaster/spn/hub"
+	"github.com/safing/portmaster/spn/navigator"
+	"github.com/safing/portmaster/spn/terminal"
+)
+
+const (
+	testPadding   = 8
+	testQueueSize = 10
+)
+
+func TestConnectOp(t *testing.T) {
+	t.Parallel()
+
+	if testing.Short() {
+		t.Skip("skipping test in short mode, as it interacts with the network")
+	}
+
+	// Create test terminal pair.
+	a, b, err := terminal.NewSimpleTestTerminalPair(0, 0,
+		&terminal.TerminalOpts{
+			FlowControl:     terminal.FlowControlDFQ,
+			FlowControlSize: testQueueSize,
+			Padding:         testPadding,
+		},
+	)
+	if err != nil {
+		t.Fatalf("failed to create test terminal pair: %s", err)
+	}
+
+	// Set up connect op.
+	b.GrantPermission(terminal.MayConnect)
+	conf.EnablePublicHub(true)
+	identity, err := cabin.CreateIdentity(module.Ctx, "test")
+	if err != nil {
+		t.Fatalf("failed to create identity: %s", err)
+	}
+	_, err = identity.MaintainAnnouncement(&hub.Announcement{
+		Transports: []string{
+			"tcp:17",
+		},
+		Exit: []string{
+			"+ * */80",
+			"- *",
+		},
+	}, true)
+	if err != nil {
+		t.Fatalf("failed to update identity: %s", err)
+	}
+	EnableConnecting(identity.Hub)
+
+	for i := 0; i < 1; i++ {
+		appConn, sluiceConn := net.Pipe()
+		_, tErr := NewConnectOp(&Tunnel{
+			connInfo: &network.Connection{
+				Entity: (&intel.Entity{
+					Protocol: 6,
+					Port:     80,
+					Domain:   "orf.at.",
+					IP:       net.IPv4(194, 232, 104, 142),
+				}).Init(0),
+			},
+			conn:        sluiceConn,
+			dstTerminal: a,
+			dstPin: &navigator.Pin{
+				Hub: identity.Hub,
+			},
+		})
+		if tErr != nil {
+			t.Fatalf("failed to start connect op: %s", tErr)
+		}
+
+		// Send request.
+		requestURL, err := url.Parse("http://orf.at/")
+		if err != nil {
+			t.Fatalf("failed to parse request url: %s", err)
+		}
+		r := http.Request{
+			Method: http.MethodHead,
+			URL:    requestURL,
+		}
+		err = r.Write(appConn)
+		if err != nil {
+			t.Fatalf("failed to write request: %s", err)
+		}
+
+		// Recv response.
+		data := make([]byte, 1500)
+		n, err := appConn.Read(data)
+		if err != nil {
+			t.Fatalf("failed to read request: %s", err)
+		}
+		if n == 0 {
+			t.Fatal("received empty reply")
+		}
+
+		t.Log("received data:")
+		fmt.Println(string(data[:n]))
+
+		time.Sleep(500 * time.Millisecond)
+	}
+}
diff --git a/spn/crew/op_ping.go b/spn/crew/op_ping.go
new file mode 100644
index 00000000..84ee4f6e
--- /dev/null
+++ b/spn/crew/op_ping.go
@@ -0,0 +1,149 @@
+package crew
+
+import (
+	"crypto/subtle"
+	"time"
+
+	"github.com/safing/portbase/container"
+	"github.com/safing/portbase/formats/dsd"
+	"github.com/safing/portbase/rng"
+	"github.com/safing/portmaster/spn/terminal"
+)
+
+const (
+	// PingOpType is the type ID of the latency test operation.
+	PingOpType = "ping"
+
+	pingOpNonceSize = 16
+	pingOpTimeout   = 3 * time.Second
+)
+
+// PingOp is used to measure latency.
+type PingOp struct {
+	terminal.OneOffOperationBase
+
+	started time.Time
+	nonce   []byte
+}
+
+// PingOpRequest is a ping request.
+type PingOpRequest struct {
+	Nonce []byte `json:"n,omitempty"`
+}
+
+// PingOpResponse is a ping response.
+type PingOpResponse struct {
+	Nonce []byte    `json:"n,omitempty"`
+	Time  time.Time `json:"t,omitempty"`
+}
+
+// Type returns the type ID.
+func (op *PingOp) Type() string {
+	return PingOpType
+}
+
+func init() {
+	terminal.RegisterOpType(terminal.OperationFactory{
+		Type:  PingOpType,
+		Start: startPingOp,
+	})
+}
+
+// NewPingOp runs a latency test.
+func NewPingOp(t terminal.Terminal) (*PingOp, *terminal.Error) {
+	// Generate nonce.
+	nonce, err := rng.Bytes(pingOpNonceSize)
+	if err != nil {
+		return nil, terminal.ErrInternalError.With("failed to generate ping nonce: %w", err)
+	}
+
+	// Create operation and init.
+	op := &PingOp{
+		started: time.Now().UTC(),
+		nonce:   nonce,
+	}
+	op.OneOffOperationBase.Init()
+
+	// Create request.
+	pingRequest, err := dsd.Dump(&PingOpRequest{
+		Nonce: op.nonce,
+	}, dsd.CBOR)
+	if err != nil {
+		return nil, terminal.ErrInternalError.With("failed to create ping request: %w", err)
+	}
+
+	// Send ping.
+	tErr := t.StartOperation(op, container.New(pingRequest), pingOpTimeout)
+	if tErr != nil {
+		return nil, tErr
+	}
+
+	return op, nil
+}
+
+// Deliver delivers a message to the operation.
+func (op *PingOp) Deliver(msg *terminal.Msg) *terminal.Error {
+	defer msg.Finish()
+
+	// Parse response.
+	response := &PingOpResponse{}
+	_, err := dsd.Load(msg.Data.CompileData(), response)
+	if err != nil {
+		return terminal.ErrMalformedData.With("failed to parse ping response: %w", err)
+	}
+
+	// Check if the nonce matches.
+	if subtle.ConstantTimeCompare(op.nonce, response.Nonce) != 1 {
+		return terminal.ErrIntegrity.With("ping nonce mismatched")
+	}
+
+	return terminal.ErrExplicitAck
+}
+
+func startPingOp(t terminal.Terminal, opID uint32, data *container.Container) (terminal.Operation, *terminal.Error) {
+	// Parse request.
+	request := &PingOpRequest{}
+	_, err := dsd.Load(data.CompileData(), request)
+	if err != nil {
+		return nil, terminal.ErrMalformedData.With("failed to parse ping request: %w", err)
+	}
+
+	// Create response.
+	response, err := dsd.Dump(&PingOpResponse{
+		Nonce: request.Nonce,
+		Time:  time.Now().UTC(),
+	}, dsd.CBOR)
+	if err != nil {
+		return nil, terminal.ErrInternalError.With("failed to create ping response: %w", err)
+	}
+
+	// Send response.
+	msg := terminal.NewMsg(response)
+	msg.FlowID = opID
+	msg.Unit.MakeHighPriority()
+	if terminal.UsePriorityDataMsgs {
+		msg.Type = terminal.MsgTypePriorityData
+	}
+	tErr := t.Send(msg, pingOpTimeout)
+	if tErr != nil {
+		// Finish message unit on failure.
+		msg.Finish()
+		return nil, tErr.With("failed to send ping response")
+	}
+
+	// Operation is just one response and finished successfully.
+	return nil, nil
+}
+
+// HandleStop gives the operation the ability to cleanly shut down.
+// The returned error is the error to send to the other side.
+// Should never be called directly. Call Stop() instead.
+func (op *PingOp) HandleStop(err *terminal.Error) (errorToSend *terminal.Error) {
+	// Prevent remote from sending explicit ack, as we use it as a success signal internally.
+	if err.Is(terminal.ErrExplicitAck) && err.IsExternal() {
+		err = terminal.ErrStopping.AsExternal()
+	}
+
+	// Continue with usual handling of inherited base.
+	return op.OneOffOperationBase.HandleStop(err)
+}
diff --git a/spn/crew/op_ping_test.go b/spn/crew/op_ping_test.go
new file mode 100644
index 00000000..f9d6dfb4
--- /dev/null
+++ b/spn/crew/op_ping_test.go
@@ -0,0 +1,32 @@
+package crew
+
+import (
+	"testing"
+	"time"
+
+	"github.com/safing/portmaster/spn/terminal"
+)
+
+func TestPingOp(t *testing.T) {
+	t.Parallel()
+
+	// Create test terminal pair.
+	a, _, err := terminal.NewSimpleTestTerminalPair(0, 0, nil)
+	if err != nil {
+		t.Fatalf("failed to create test terminal pair: %s", err)
+	}
+
+	// Create ping op.
+	op, tErr := NewPingOp(a)
+	if tErr.IsError() {
+		t.Fatal(tErr)
+	}
+
+	// Wait for result.
+	select {
+	case result := <-op.Result:
+		t.Logf("ping result: %s", result.Error())
+	case <-time.After(pingOpTimeout):
+		t.Fatal("timed out")
+	}
+}
diff --git a/spn/crew/policy.go b/spn/crew/policy.go
new file mode 100644
index 00000000..5a741164
--- /dev/null
+++ b/spn/crew/policy.go
@@ -0,0 +1,51 @@
+package crew
+
+import (
+	"context"
+	"sync"
+
+	"github.com/safing/portmaster/service/intel"
+	"github.com/safing/portmaster/service/profile/endpoints"
+	"github.com/safing/portmaster/spn/hub"
+	"github.com/safing/portmaster/spn/terminal"
+)
+
+var (
+	connectingHubLock sync.Mutex
+	connectingHub     *hub.Hub
+)
+
+// EnableConnecting enables connecting from this Hub.
+func EnableConnecting(my *hub.Hub) {
+	connectingHubLock.Lock()
+	defer connectingHubLock.Unlock()
+
+	connectingHub = my
+}
+
+func checkExitPolicy(request *ConnectRequest) *terminal.Error {
+	connectingHubLock.Lock()
+	defer connectingHubLock.Unlock()
+
+	// Check if connect requests are allowed.
+	if connectingHub == nil {
+		return terminal.ErrPermissionDenied.With("connect requests disabled")
+	}
+
+	// Create entity.
+	entity := (&intel.Entity{
+		IP:       request.IP,
+		Protocol: uint8(request.Protocol),
+		Port:     request.Port,
+		Domain:   request.Domain,
+	}).Init(0)
+	entity.FetchData(context.TODO())
+
+	// Check against policy.
+	result, reason := connectingHub.GetInfo().ExitPolicy().Match(context.TODO(), entity)
+	if result == endpoints.Denied {
+		return terminal.ErrPermissionDenied.With("connect request for %s violates the exit policy: %s", request, reason)
+	}
+
+	return nil
+}
diff --git a/spn/crew/sticky.go b/spn/crew/sticky.go
new file mode 100644
index 00000000..598476fa
--- /dev/null
+++ b/spn/crew/sticky.go
@@ -0,0 +1,176 @@
+package crew
+
+import (
+	"context"
+	"fmt"
+	"sync"
+	"time"
+
+	"github.com/safing/portbase/log"
+	"github.com/safing/portbase/modules"
+	"github.com/safing/portmaster/service/network"
+	"github.com/safing/portmaster/service/network/packet"
+	"github.com/safing/portmaster/spn/navigator"
+)
+
+const (
+	stickyTTL = 1 * time.Hour
+)
+
+var (
+	stickyIPs     = make(map[string]*stickyHub)
+	stickyDomains = make(map[string]*stickyHub)
+	stickyLock    sync.Mutex
+)
+
+type stickyHub struct {
+	Pin      *navigator.Pin
+	Route    *navigator.Route
+	LastSeen time.Time
+	Avoid    bool
+}
+
+func (sh *stickyHub) isExpired() bool {
+	return time.Now().Add(-stickyTTL).After(sh.LastSeen)
+}
+
+func makeStickyIPKey(conn *network.Connection) string {
+	if p := conn.Process().Profile(); p != nil {
+		return fmt.Sprintf(
+			"%s/%s>%s",
+			p.LocalProfile().Source,
+			p.LocalProfile().ID,
+			conn.Entity.IP,
+		)
+	}
+
+	return "?>" + string(conn.Entity.IP)
+}
+
+func makeStickyDomainKey(conn *network.Connection) string {
+	if p := conn.Process().Profile(); p != nil {
+		return fmt.Sprintf(
+			"%s/%s>%s",
+			p.LocalProfile().Source,
+			p.LocalProfile().ID,
+			conn.Entity.Domain,
+		)
+	}
+
+	return "?>" + conn.Entity.Domain
+}
+
+func getStickiedHub(conn *network.Connection) (sticksTo *stickyHub) {
+	stickyLock.Lock()
+	defer stickyLock.Unlock()
+
+	// Check if IP is sticky.
+	sticksTo = stickyIPs[makeStickyIPKey(conn)] // byte comparison
+	if sticksTo != nil && !sticksTo.isExpired() {
+		sticksTo.LastSeen = time.Now()
+	}
+
+	// If the IP did not stick and we have a domain, check if that sticks.
+	if sticksTo == nil && conn.Entity.Domain != "" {
+		sticksTo, ok := stickyDomains[makeStickyDomainKey(conn)]
+		if ok && !sticksTo.isExpired() {
+			sticksTo.LastSeen = time.Now()
+		}
+	}
+
+	// If nothing sticked, return now.
+	if sticksTo == nil {
+		return nil
+	}
+
+	// Get intel from map before locking pin to avoid simultaneous locking.
+	mapIntel := navigator.Main.GetIntel()
+
+	// Lock Pin for checking.
+	sticksTo.Pin.Lock()
+	defer sticksTo.Pin.Unlock()
+
+	// Check if the stickied Hub supports the needed IP version.
+	switch {
+	case conn.IPVersion == packet.IPv4 && sticksTo.Pin.EntityV4 == nil:
+		// Connection is IPv4, but stickied Hub has no IPv4.
+		return nil
+	case conn.IPVersion == packet.IPv6 && sticksTo.Pin.EntityV6 == nil:
+		// Connection is IPv4, but stickied Hub has no IPv4.
+		return nil
+	}
+
+	// Disregard stickied Hub if it is disregard with the current options.
+	matcher := conn.TunnelOpts.Destination.Matcher(mapIntel)
+	if !matcher(sticksTo.Pin) {
+		return nil
+	}
+
+	// Return fully checked stickied Hub.
+	return sticksTo
+}
+
+func (t *Tunnel) stickDestinationToHub() {
+	stickyLock.Lock()
+	defer stickyLock.Unlock()
+
+	// Stick to IP.
+	ipKey := makeStickyIPKey(t.connInfo)
+	stickyIPs[ipKey] = &stickyHub{
+		Pin:      t.dstPin,
+		Route:    t.route,
+		LastSeen: time.Now(),
+	}
+	log.Infof("spn/crew: sticking %s to %s", ipKey, t.dstPin.Hub)
+
+	// Stick to Domain, if present.
+	if t.connInfo.Entity.Domain != "" {
+		domainKey := makeStickyDomainKey(t.connInfo)
+		stickyDomains[domainKey] = &stickyHub{
+			Pin:      t.dstPin,
+			Route:    t.route,
+			LastSeen: time.Now(),
+		}
+		log.Infof("spn/crew: sticking %s to %s", domainKey, t.dstPin.Hub)
+	}
+}
+
+func (t *Tunnel) avoidDestinationHub() {
+	stickyLock.Lock()
+	defer stickyLock.Unlock()
+
+	// Stick to Hub/IP Pair.
+	ipKey := makeStickyIPKey(t.connInfo)
+	stickyIPs[ipKey] = &stickyHub{
+		Pin:      t.dstPin,
+		LastSeen: time.Now(),
+		Avoid:    true,
+	}
+	log.Warningf("spn/crew: avoiding %s for %s", t.dstPin.Hub, ipKey)
+}
+
+func cleanStickyHubs(ctx context.Context, task *modules.Task) error {
+	stickyLock.Lock()
+	defer stickyLock.Unlock()
+
+	for _, stickyRegistry := range []map[string]*stickyHub{stickyIPs, stickyDomains} {
+		for key, stickedEntry := range stickyRegistry {
+			if stickedEntry.isExpired() {
+				delete(stickyRegistry, key)
+			}
+		}
+	}
+
+	return nil
+}
+
+func clearStickyHubs() {
+	stickyLock.Lock()
+	defer stickyLock.Unlock()
+
+	for _, stickyRegistry := range []map[string]*stickyHub{stickyIPs, stickyDomains} {
+		for key := range stickyRegistry {
+			delete(stickyRegistry, key)
+		}
+	}
+}
diff --git a/spn/docks/bandwidth_test.go b/spn/docks/bandwidth_test.go
new file mode 100644
index 00000000..60101f1c
--- /dev/null
+++ b/spn/docks/bandwidth_test.go
@@ -0,0 +1,90 @@
+package docks
+
+import (
+	"testing"
+	"time"
+
+	"github.com/tevino/abool"
+
+	"github.com/safing/portbase/container"
+	"github.com/safing/portbase/formats/dsd"
+	"github.com/safing/portmaster/spn/terminal"
+)
+
+func TestEffectiveBandwidth(t *testing.T) { //nolint:paralleltest // Run alone.
+	// Skip in CI.
+	if testing.Short() {
+		t.Skip()
+	}
+
+	var (
+		bwTestDelay            = 50 * time.Millisecond
+		bwTestQueueSize uint32 = 1000
+		bwTestVolume           = 10000000 // 10MB
+		bwTestTime             = 10 * time.Second
+	)
+
+	// Create test terminal pair.
+	a, b, err := terminal.NewSimpleTestTerminalPair(
+		bwTestDelay,
+		int(bwTestQueueSize),
+		&terminal.TerminalOpts{
+			FlowControl:     terminal.FlowControlDFQ,
+			FlowControlSize: bwTestQueueSize,
+		},
+	)
+	if err != nil {
+		t.Fatalf("failed to create test terminal pair: %s", err)
+	}
+
+	// Grant permission for op on remote terminal and start op.
+	b.GrantPermission(terminal.IsCraneController)
+
+	// Re-use the capacity test for the bandwidth test.
+	op := &CapacityTestOp{
+		opts: &CapacityTestOptions{
+			TestVolume: bwTestVolume,
+			MaxTime:    bwTestTime,
+			testing:    true,
+		},
+		recvQueue:       make(chan *terminal.Msg),
+		dataSent:        new(int64),
+		dataSentWasAckd: abool.New(),
+		result:          make(chan *terminal.Error, 1),
+	}
+	// Disable sender again.
+	op.senderStarted = true
+	op.dataSentWasAckd.Set()
+	// Make capacity test request.
+	request, err := dsd.Dump(op.opts, dsd.CBOR)
+	if err != nil {
+		t.Fatal(terminal.ErrInternalError.With("failed to serialize capactity test options: %w", err))
+	}
+	// Send test request.
+	tErr := a.StartOperation(op, container.New(request), 1*time.Second)
+	if tErr != nil {
+		t.Fatal(tErr)
+	}
+	// Start handler.
+	module.StartWorker("op capacity handler", op.handler)
+
+	// Wait for result and check error.
+	tErr = <-op.Result()
+	if !tErr.IsOK() {
+		t.Fatalf("op failed: %s", tErr)
+	}
+	t.Logf("measured capacity: %d bit/s", op.testResult)
+
+	// Calculate expected bandwidth.
+	expectedBitsPerSecond := (float64(capacityTestMsgSize*8*int64(bwTestQueueSize)) / float64(bwTestDelay)) * float64(time.Second)
+	t.Logf("expected capacity: %f bit/s", expectedBitsPerSecond)
+
+	// Check if measured bandwidth is within parameters.
+	if float64(op.testResult) > expectedBitsPerSecond*1.6 {
+		t.Fatal("measured capacity too high")
+	}
+	// TODO: Check if we can raise this to at least 90%.
+	if float64(op.testResult) < expectedBitsPerSecond*0.2 {
+		t.Fatal("measured capacity too low")
+	}
+}
diff --git a/spn/docks/controller.go b/spn/docks/controller.go
new file mode 100644
index 00000000..05e18e39
--- /dev/null
+++ b/spn/docks/controller.go
@@ -0,0 +1,100 @@
+package docks
+
+import (
+	"github.com/safing/portbase/container"
+	"github.com/safing/portmaster/spn/terminal"
+)
+
+// CraneControllerTerminal is a terminal for the crane itself.
+type CraneControllerTerminal struct {
+	*terminal.TerminalBase
+
+	Crane *Crane
+}
+
+// NewLocalCraneControllerTerminal returns a new local crane controller.
+func NewLocalCraneControllerTerminal(
+	crane *Crane,
+	initMsg *terminal.TerminalOpts,
+) (*CraneControllerTerminal, *container.Container, *terminal.Error) {
+	// Remove unnecessary options from the crane controller.
+	initMsg.Padding = 0
+
+	// Create Terminal Base.
+	t, initData, err := terminal.NewLocalBaseTerminal(
+		crane.ctx,
+		0,
+		crane.ID,
+		nil,
+		initMsg,
+		terminal.UpstreamSendFunc(crane.sendImportantTerminalMsg),
+	)
+	if err != nil {
+		return nil, nil, err
+	}
+
+	return initCraneController(crane, t, initMsg), initData, nil
+}
+
+// NewRemoteCraneControllerTerminal returns a new remote crane controller.
+func NewRemoteCraneControllerTerminal(
+	crane *Crane,
+	initData *container.Container,
+) (*CraneControllerTerminal, *terminal.TerminalOpts, *terminal.Error) {
+	// Create Terminal Base.
+	t, initMsg, err := terminal.NewRemoteBaseTerminal(
+		crane.ctx,
+		0,
+		crane.ID,
+		nil,
+		initData,
+		terminal.UpstreamSendFunc(crane.sendImportantTerminalMsg),
+	)
+	if err != nil {
+		return nil, nil, err
+	}
+
+	return initCraneController(crane, t, initMsg), initMsg, nil
+}
+
+func initCraneController(
+	crane *Crane,
+	t *terminal.TerminalBase,
+	initMsg *terminal.TerminalOpts,
+) *CraneControllerTerminal {
+	// Create Crane Terminal and assign it as the extended Terminal.
+	cct := &CraneControllerTerminal{
+		TerminalBase: t,
+		Crane:        crane,
+	}
+	t.SetTerminalExtension(cct)
+
+	// Assign controller to crane.
+	crane.Controller = cct
+	crane.terminals[cct.ID()] = cct
+
+	// Copy the options to the crane itself.
+	crane.opts = *initMsg
+
+	// Grant crane controller permission.
+	t.GrantPermission(terminal.IsCraneController)
+
+	// Start workers.
+	t.StartWorkers(module, "crane controller terminal")
+
+	return cct
+}
+
+// HandleAbandon gives the terminal the ability to cleanly shut down.
+func (controller *CraneControllerTerminal) HandleAbandon(err *terminal.Error) (errorToSend *terminal.Error) {
+	// Abandon terminal.
+	controller.Crane.AbandonTerminal(0, err)
+
+	return err
+}
+
+// HandleDestruction gives the terminal the ability to clean up.
+func (controller *CraneControllerTerminal) HandleDestruction(err *terminal.Error) {
+	// Stop controlled crane.
+	controller.Crane.Stop(nil)
+}
diff --git a/spn/docks/crane.go b/spn/docks/crane.go
new file mode 100644
index 00000000..34dab6d3
--- /dev/null
+++ b/spn/docks/crane.go
@@ -0,0 +1,913 @@
+package docks
+
+import (
+	"context"
+	"errors"
+	"fmt"
+	"io"
+	"net"
+	"sync"
+	"time"
+
+	"github.com/tevino/abool"
+
+	"github.com/safing/jess"
+	"github.com/safing/portbase/container"
+	"github.com/safing/portbase/formats/varint"
+	"github.com/safing/portbase/log"
+	"github.com/safing/portbase/rng"
+	"github.com/safing/portmaster/spn/cabin"
+	"github.com/safing/portmaster/spn/hub"
+	"github.com/safing/portmaster/spn/ships"
+	"github.com/safing/portmaster/spn/terminal"
+)
+
+const (
+	// QOTD holds the quote of the day to return on idling unused connections.
+	QOTD = "Privacy is not an option, and it shouldn't be the price we accept for just getting on the Internet.\nGary Kovacs\n"
+
+	// maxUnloadSize defines the maximum size of a message to unload.
+	maxUnloadSize            = 16384
+	maxSegmentLength         = 16384
+	maxCraneStoppingDuration = 6 * time.Hour
+	maxCraneStopDuration     = 10 * time.Second
+)
+
+var (
+	// optimalMinLoadSize defines minimum for Crane.targetLoadSize.
+	optimalMinLoadSize = 3072 // Targeting around 4096.
+
+	// loadingMaxWaitDuration is the maximum time a crane will wait for
+	// additional data to send.
+	loadingMaxWaitDuration = 5 * time.Millisecond
+)
+
+// Errors.
+var (
+	ErrDone = errors.New("crane is done")
+)
+
+// Crane is the primary duplexer and connection manager.
+type Crane struct {
+	// ID is the ID of the Crane.
+	ID string
+	// opts holds options.
+	opts terminal.TerminalOpts
+
+	// ctx is the context of the Terminal.
+	ctx context.Context
+	// cancelCtx cancels ctx.
+	cancelCtx context.CancelFunc
+	// stopping indicates if the Crane will be stopped soon. The Crane may still
+	// be used until stopped, but must not be advertised anymore.
+	stopping *abool.AtomicBool
+	// stopped indicates if the Crane has been stopped. Whoever stopped the Crane
+	// already took care of notifying everyone, so a silent fail is normally the
+	// best response.
+	stopped *abool.AtomicBool
+	// authenticated indicates if there is has been any successful authentication.
+	authenticated *abool.AtomicBool
+
+	// ConnectedHub is the identity of the remote Hub.
+	ConnectedHub *hub.Hub
+	// NetState holds the network optimization state.
+	// It must always be set and the reference must not be changed.
+	// Access to fields within are coordinated by itself.
+	NetState *NetworkOptimizationState
+	// identity is identity of this instance and is usually only populated on a server.
+	identity *cabin.Identity
+
+	// jession is the jess session used for encryption.
+	jession *jess.Session
+	// jessionLock locks jession.
+	jessionLock sync.Mutex
+
+	// Controller is the Crane's Controller Terminal.
+	Controller *CraneControllerTerminal
+
+	// ship represents the underlying physical connection.
+	ship ships.Ship
+	// unloading moves containers from the ship to the crane.
+	unloading chan *container.Container
+	// loading moves containers from the crane to the ship.
+	loading chan *container.Container
+	// terminalMsgs holds containers from terminals waiting to be laoded.
+	terminalMsgs chan *terminal.Msg
+	// controllerMsgs holds important containers from terminals waiting to be laoded.
+	controllerMsgs chan *terminal.Msg
+
+	// terminals holds all the connected terminals.
+	terminals map[uint32]terminal.Terminal
+	// terminalsLock locks terminals.
+	terminalsLock sync.Mutex
+	// nextTerminalID holds the next terminal ID.
+	nextTerminalID uint32
+
+	// targetLoadSize defines the optimal loading size.
+	targetLoadSize int
+}
+
+// NewCrane returns a new crane.
+func NewCrane(ship ships.Ship, connectedHub *hub.Hub, id *cabin.Identity) (*Crane, error) {
+	// Cranes always run in module context.
+	ctx, cancelCtx := context.WithCancel(module.Ctx)
+
+	newCrane := &Crane{
+		ctx:           ctx,
+		cancelCtx:     cancelCtx,
+		stopping:      abool.NewBool(false),
+		stopped:       abool.NewBool(false),
+		authenticated: abool.NewBool(false),
+
+		ConnectedHub: connectedHub,
+		NetState:     newNetworkOptimizationState(),
+		identity:     id,
+
+		ship:           ship,
+		unloading:      make(chan *container.Container),
+		loading:        make(chan *container.Container, 100),
+		terminalMsgs:   make(chan *terminal.Msg, 100),
+		controllerMsgs: make(chan *terminal.Msg, 100),
+
+		terminals: make(map[uint32]terminal.Terminal),
+	}
+	err := registerCrane(newCrane)
+	if err != nil {
+		return nil, fmt.Errorf("failed to register crane: %w", err)
+	}
+
+	// Shift next terminal IDs on the server.
+	if !ship.IsMine() {
+		newCrane.nextTerminalID += 4
+	}
+
+	// Calculate target load size.
+	loadSize := ship.LoadSize()
+	if loadSize <= 0 {
+		loadSize = ships.BaseMTU
+	}
+	newCrane.targetLoadSize = loadSize
+	for newCrane.targetLoadSize < optimalMinLoadSize {
+		newCrane.targetLoadSize += loadSize
+	}
+	// Subtract overhead needed for encryption.
+	newCrane.targetLoadSize -= 25 // Manually tested for jess.SuiteWireV1
+	// Subtract space needed for length encoding the final chunk.
+	newCrane.targetLoadSize -= varint.EncodedSize(uint64(newCrane.targetLoadSize))
+
+	return newCrane, nil
+}
+
+// IsMine returns whether the crane was started on this side.
+func (crane *Crane) IsMine() bool {
+	return crane.ship.IsMine()
+}
+
+// Public returns whether the crane has been published.
+func (crane *Crane) Public() bool {
+	return crane.ship.Public()
+}
+
+// IsStopping returns whether the crane is stopping.
+func (crane *Crane) IsStopping() bool {
+	return crane.stopping.IsSet()
+}
+
+// MarkStoppingRequested marks the crane as stopping requested.
+func (crane *Crane) MarkStoppingRequested() {
+	crane.NetState.lock.Lock()
+	defer crane.NetState.lock.Unlock()
+
+	if !crane.NetState.stoppingRequested {
+		crane.NetState.stoppingRequested = true
+		crane.startSyncStateOp()
+	}
+}
+
+// MarkStopping marks the crane as stopping.
+func (crane *Crane) MarkStopping() (stopping bool) {
+	// Can only stop owned cranes.
+	if !crane.IsMine() {
+		return false
+	}
+
+	if !crane.stopping.SetToIf(false, true) {
+		return false
+	}
+
+	crane.NetState.lock.Lock()
+	defer crane.NetState.lock.Unlock()
+	crane.NetState.markedStoppingAt = time.Now()
+
+	crane.startSyncStateOp()
+	return true
+}
+
+// AbortStopping aborts the stopping.
+func (crane *Crane) AbortStopping() (aborted bool) {
+	aborted = crane.stopping.SetToIf(true, false)
+
+	crane.NetState.lock.Lock()
+	defer crane.NetState.lock.Unlock()
+
+	abortedStoppingRequest := crane.NetState.stoppingRequested
+	crane.NetState.stoppingRequested = false
+	crane.NetState.markedStoppingAt = time.Time{}
+
+	// Sync if any state changed.
+	if aborted || abortedStoppingRequest {
+		crane.startSyncStateOp()
+	}
+
+	return aborted
+}
+
+// Authenticated returns whether the other side of the crane has authenticated
+// itself with an access code.
+func (crane *Crane) Authenticated() bool {
+	return crane.authenticated.IsSet()
+}
+
+// Publish publishes the connection as a lane.
+func (crane *Crane) Publish() error {
+	// Check if crane is connected.
+	if crane.ConnectedHub == nil {
+		return fmt.Errorf("spn/docks: %s: cannot publish without defined connected hub", crane)
+	}
+
+	// Submit metrics.
+	if !crane.Public() {
+		newPublicCranes.Inc()
+	}
+
+	// Mark crane as public.
+	maskedID := crane.ship.MaskAddress(crane.ship.RemoteAddr())
+	crane.ship.MarkPublic()
+
+	// Assign crane to make it available to others.
+	AssignCrane(crane.ConnectedHub.ID, crane)
+
+	log.Infof("spn/docks: %s (was %s) is now public", crane, maskedID)
+	return nil
+}
+
+// LocalAddr returns ship's local address.
+func (crane *Crane) LocalAddr() net.Addr {
+	return crane.ship.LocalAddr()
+}
+
+// RemoteAddr returns ship's local address.
+func (crane *Crane) RemoteAddr() net.Addr {
+	return crane.ship.RemoteAddr()
+}
+
+// Transport returns ship's transport.
+func (crane *Crane) Transport() *hub.Transport {
+	return crane.ship.Transport()
+}
+
+func (crane *Crane) getNextTerminalID() uint32 {
+	crane.terminalsLock.Lock()
+	defer crane.terminalsLock.Unlock()
+
+	for {
+		// Bump to next ID.
+		crane.nextTerminalID += 8
+
+		// Check if it's free.
+		_, ok := crane.terminals[crane.nextTerminalID]
+		if !ok {
+			return crane.nextTerminalID
+		}
+	}
+}
+
+func (crane *Crane) terminalCount() int {
+	crane.terminalsLock.Lock()
+	defer crane.terminalsLock.Unlock()
+
+	return len(crane.terminals)
+}
+
+func (crane *Crane) getTerminal(id uint32) (t terminal.Terminal, ok bool) {
+	crane.terminalsLock.Lock()
+	defer crane.terminalsLock.Unlock()
+
+	t, ok = crane.terminals[id]
+	return
+}
+
+func (crane *Crane) setTerminal(t terminal.Terminal) {
+	crane.terminalsLock.Lock()
+	defer crane.terminalsLock.Unlock()
+
+	crane.terminals[t.ID()] = t
+}
+
+func (crane *Crane) deleteTerminal(id uint32) (t terminal.Terminal, ok bool) {
+	crane.terminalsLock.Lock()
+	defer crane.terminalsLock.Unlock()
+
+	t, ok = crane.terminals[id]
+	if ok {
+		delete(crane.terminals, id)
+		return t, true
+	}
+	return nil, false
+}
+
+// AbandonTerminal abandons the terminal with the given ID.
+func (crane *Crane) AbandonTerminal(id uint32, err *terminal.Error) {
+	// Get active terminal.
+	t, ok := crane.deleteTerminal(id)
+	if ok {
+		// If the terminal was registered, abandon it.
+
+		// Log reason the terminal is ending. Override stopping error with nil.
+		switch {
+		case err == nil || err.IsOK():
+			log.Debugf("spn/docks: %T %s is being abandoned", t, t.FmtID())
+		case err.Is(terminal.ErrStopping):
+			err = nil
+			log.Debugf("spn/docks: %T %s is being abandoned by peer", t, t.FmtID())
+		case err.Is(terminal.ErrNoActivity):
+			err = nil
+			log.Debugf("spn/docks: %T %s is being abandoned due to no activity", t, t.FmtID())
+		default:
+			log.Warningf("spn/docks: %T %s: %s", t, t.FmtID(), err)
+		}
+
+		// Call the terminal's abandon function.
+		t.Abandon(err)
+	} else { //nolint:gocritic
+		// When a crane terminal is abandoned, it calls crane.AbandonTerminal when
+		// finished. This time, the terminal won't be in the registry anymore and
+		// it finished shutting down, so we can now check if the crane needs to be
+		// stopped.
+
+		// If the crane is stopping, check if we can stop.
+		// We can stop when all terminals are abandoned or after a timeout.
+		// FYI: The crane controller will always take up one slot.
+		if crane.stopping.IsSet() &&
+			crane.terminalCount() <= 1 {
+			// Stop the crane in worker, so the caller can do some work.
+			module.StartWorker("retire crane", func(_ context.Context) error {
+				// Let enough time for the last errors to be sent, as terminals are abandoned in a goroutine.
+				time.Sleep(3 * time.Second)
+				crane.Stop(nil)
+				return nil
+			})
+		}
+	}
+}
+
+func (crane *Crane) sendImportantTerminalMsg(msg *terminal.Msg, timeout time.Duration) *terminal.Error {
+	select {
+	case crane.controllerMsgs <- msg:
+		return nil
+	case <-crane.ctx.Done():
+		msg.Finish()
+		return terminal.ErrCanceled
+	}
+}
+
+// Send is used by others to send a message through the crane.
+func (crane *Crane) Send(msg *terminal.Msg, timeout time.Duration) *terminal.Error {
+	select {
+	case crane.terminalMsgs <- msg:
+		return nil
+	case <-crane.ctx.Done():
+		msg.Finish()
+		return terminal.ErrCanceled
+	}
+}
+
+func (crane *Crane) encrypt(shipment *container.Container) (encrypted *container.Container, err error) {
+	// Skip if encryption is not enabled.
+	if crane.jession == nil {
+		return shipment, nil
+	}
+
+	crane.jessionLock.Lock()
+	defer crane.jessionLock.Unlock()
+
+	letter, err := crane.jession.Close(shipment.CompileData())
+	if err != nil {
+		return nil, err
+	}
+
+	encrypted, err = letter.ToWire()
+	if err != nil {
+		return nil, fmt.Errorf("failed to pack letter: %w", err)
+	}
+
+	return encrypted, nil
+}
+
+func (crane *Crane) decrypt(shipment *container.Container) (decrypted *container.Container, err error) {
+	// Skip if encryption is not enabled.
+	if crane.jession == nil {
+		return shipment, nil
+	}
+
+	crane.jessionLock.Lock()
+	defer crane.jessionLock.Unlock()
+
+	letter, err := jess.LetterFromWire(shipment)
+	if err != nil {
+		return nil, fmt.Errorf("failed to parse letter: %w", err)
+	}
+
+	decryptedData, err := crane.jession.Open(letter)
+	if err != nil {
+		return nil, err
+	}
+
+	return container.New(decryptedData), nil
+}
+
+func (crane *Crane) unloader(workerCtx context.Context) error {
+	// Unclean shutdown safeguard.
+	defer crane.Stop(terminal.ErrUnknownError.With("unloader died"))
+
+	for {
+		// Get first couple bytes to get the packet length.
+		// 2 bytes are enough to encode 65535.
+		// On the other hand, packets can be only 2 bytes small.
+		lenBuf := make([]byte, 2)
+		err := crane.unloadUntilFull(lenBuf)
+		if err != nil {
+			if errors.Is(err, io.EOF) {
+				crane.Stop(terminal.ErrStopping.With("connection closed"))
+			} else {
+				crane.Stop(terminal.ErrInternalError.With("failed to unload: %w", err))
+			}
+			return nil
+		}
+
+		// Unpack length.
+		containerLen, n, err := varint.Unpack64(lenBuf)
+		if err != nil {
+			crane.Stop(terminal.ErrMalformedData.With("failed to get container length: %w", err))
+			return nil
+		}
+		switch {
+		case containerLen <= 0:
+			crane.Stop(terminal.ErrMalformedData.With("received empty container with length %d", containerLen))
+			return nil
+		case containerLen > maxUnloadSize:
+			crane.Stop(terminal.ErrMalformedData.With("received oversized container with length %d", containerLen))
+			return nil
+		}
+
+		// Build shipment.
+		var shipmentBuf []byte
+		leftovers := len(lenBuf) - n
+
+		if leftovers == int(containerLen) {
+			// We already have all the shipment data.
+			shipmentBuf = lenBuf[n:]
+		} else {
+			// Create a shipment buffer, copy leftovers and read the rest from the connection.
+			shipmentBuf = make([]byte, containerLen)
+			if leftovers > 0 {
+				copy(shipmentBuf, lenBuf[n:])
+			}
+
+			// Read remaining shipment.
+			err = crane.unloadUntilFull(shipmentBuf[leftovers:])
+			if err != nil {
+				crane.Stop(terminal.ErrInternalError.With("failed to unload: %w", err))
+				return nil
+			}
+		}
+
+		// Submit to handler.
+		select {
+		case <-crane.ctx.Done():
+			crane.Stop(nil)
+			return nil
+		case crane.unloading <- container.New(shipmentBuf):
+		}
+	}
+}
+
+func (crane *Crane) unloadUntilFull(buf []byte) error {
+	var bytesRead int
+	for {
+		// Get shipment from ship.
+		n, err := crane.ship.UnloadTo(buf[bytesRead:])
+		if err != nil {
+			return err
+		}
+		if n == 0 {
+			log.Tracef("spn/docks: %s unloaded 0 bytes", crane)
+		}
+		bytesRead += n
+
+		// Return if buffer has been fully filled.
+		if bytesRead == len(buf) {
+			// Submit metrics.
+			crane.submitCraneTrafficStats(bytesRead)
+			crane.NetState.ReportTraffic(uint64(bytesRead), true)
+
+			return nil
+		}
+	}
+}
+
+func (crane *Crane) handler(workerCtx context.Context) error {
+	var partialShipment *container.Container
+	var segmentLength uint32
+
+	// Unclean shutdown safeguard.
+	defer crane.Stop(terminal.ErrUnknownError.With("handler died"))
+
+handling:
+	for {
+		select {
+		case <-crane.ctx.Done():
+			crane.Stop(nil)
+			return nil
+
+		case shipment := <-crane.unloading:
+			// log.Debugf("spn/crane %s: before decrypt: %v ... %v", crane.ID, c.CompileData()[:10], c.CompileData()[c.Length()-10:])
+
+			// Decrypt shipment.
+			shipment, err := crane.decrypt(shipment)
+			if err != nil {
+				crane.Stop(terminal.ErrIntegrity.With("failed to decrypt: %w", err))
+				return nil
+			}
+
+			// Process all segments/containers of the shipment.
+			for shipment.HoldsData() {
+				if partialShipment != nil {
+					// Continue processing partial segment.
+					// Append new shipment to previous partial segment.
+					partialShipment.AppendContainer(shipment)
+					shipment, partialShipment = partialShipment, nil
+				}
+
+				// Get next segment length.
+				if segmentLength == 0 {
+					segmentLength, err = shipment.GetNextN32()
+					if err != nil {
+						if errors.Is(err, varint.ErrBufTooSmall) {
+							// Continue handling when there is not yet enough data.
+							partialShipment = shipment
+							segmentLength = 0
+							continue handling
+						}
+
+						crane.Stop(terminal.ErrMalformedData.With("failed to get segment length: %w", err))
+						return nil
+					}
+
+					if segmentLength == 0 {
+						// Remainder is padding.
+						continue handling
+					}
+
+					// Check if the segment is within the boundary.
+					if segmentLength > maxSegmentLength {
+						crane.Stop(terminal.ErrMalformedData.With("received oversized segment with length %d", segmentLength))
+						return nil
+					}
+				}
+
+				// Check if we have enough data for the segment.
+				if uint32(shipment.Length()) < segmentLength {
+					partialShipment = shipment
+					continue handling
+				}
+
+				// Get segment from shipment.
+				segment, err := shipment.GetAsContainer(int(segmentLength))
+				if err != nil {
+					crane.Stop(terminal.ErrMalformedData.With("failed to get segment: %w", err))
+					return nil
+				}
+				segmentLength = 0
+
+				// Get terminal ID and message type of segment.
+				terminalID, terminalMsgType, err := terminal.ParseIDType(segment)
+				if err != nil {
+					crane.Stop(terminal.ErrMalformedData.With("failed to get terminal ID and msg type: %w", err))
+					return nil
+				}
+
+				switch terminalMsgType {
+				case terminal.MsgTypeInit:
+					crane.establishTerminal(terminalID, segment)
+
+				case terminal.MsgTypeData, terminal.MsgTypePriorityData:
+					// Get terminal and let it further handle the message.
+					t, ok := crane.getTerminal(terminalID)
+					if ok {
+						// Create msg and set priority.
+						msg := terminal.NewEmptyMsg()
+						msg.FlowID = terminalID
+						msg.Type = terminalMsgType
+						msg.Data = segment
+						if msg.Type == terminal.MsgTypePriorityData {
+							msg.Unit.MakeHighPriority()
+						}
+						// Deliver to terminal.
+						deliveryErr := t.Deliver(msg)
+						if deliveryErr != nil {
+							msg.Finish()
+							// This is a hot path. Start a worker for abandoning the terminal.
+							module.StartWorker("end terminal", func(_ context.Context) error {
+								crane.AbandonTerminal(t.ID(), deliveryErr.Wrap("failed to deliver data"))
+								return nil
+							})
+						}
+					} else {
+						log.Tracef("spn/docks: %s received msg for unknown terminal %d", crane, terminalID)
+					}
+
+				case terminal.MsgTypeStop:
+					// Parse error.
+					receivedErr, err := terminal.ParseExternalError(segment.CompileData())
+					if err != nil {
+						log.Warningf("spn/docks: %s failed to parse abandon error: %s", crane, err)
+						receivedErr = terminal.ErrUnknownError.AsExternal()
+					}
+					// This is a hot path. Start a worker for abandoning the terminal.
+					module.StartWorker("end terminal", func(_ context.Context) error {
+						crane.AbandonTerminal(terminalID, receivedErr)
+						return nil
+					})
+				}
+			}
+		}
+	}
+}
+
+func (crane *Crane) loader(workerCtx context.Context) (err error) {
+	shipment := container.New()
+	var partialShipment *container.Container
+	var loadingTimer *time.Timer
+
+	// Unclean shutdown safeguard.
+	defer crane.Stop(terminal.ErrUnknownError.With("loader died"))
+
+	// Return the loading wait channel if waiting.
+	loadNow := func() <-chan time.Time {
+		if loadingTimer != nil {
+			return loadingTimer.C
+		}
+		return nil
+	}
+
+	// Make sure any received message is finished
+	var msg, firstMsg *terminal.Msg
+	defer msg.Finish()
+	defer firstMsg.Finish()
+
+	for {
+		// Reset first message in shipment.
+		firstMsg.Finish()
+		firstMsg = nil
+
+	fillingShipment:
+		for shipment.Length() < crane.targetLoadSize {
+			// Gather segments until shipment is filled.
+
+			// Prioritize messages from the controller.
+			select {
+			case msg = <-crane.controllerMsgs:
+			case <-crane.ctx.Done():
+				crane.Stop(nil)
+				return nil
+
+			default:
+				// Then listen for all.
+				select {
+				case msg = <-crane.controllerMsgs:
+				case msg = <-crane.terminalMsgs:
+				case <-loadNow():
+					break fillingShipment
+				case <-crane.ctx.Done():
+					crane.Stop(nil)
+					return nil
+				}
+			}
+
+			// Debug unit leaks.
+			msg.Debug()
+
+			// Handle new message.
+			if msg != nil {
+				// Pack msg and add to segment.
+				msg.Pack()
+				newSegment := msg.Data
+
+				// Check if this is the first message.
+				// This is the only message where we wait for a slot.
+				if firstMsg == nil {
+					firstMsg = msg
+					firstMsg.Unit.WaitForSlot()
+				} else {
+					msg.Finish()
+				}
+
+				// Check length.
+				if newSegment.Length() > maxSegmentLength {
+					log.Warningf("spn/docks: %s ignored oversized segment with length %d", crane, newSegment.Length())
+					continue fillingShipment
+				}
+
+				// Append to shipment.
+				shipment.AppendContainer(newSegment)
+
+				// Set loading max wait timer on first segment.
+				if loadingTimer == nil {
+					loadingTimer = time.NewTimer(loadingMaxWaitDuration)
+				}
+
+			} else if crane.stopped.IsSet() {
+				// If there is no new segment, this might have been triggered by a
+				// closed channel. Check if the crane is still active.
+				return nil
+			}
+		}
+
+	sendingShipment:
+		for {
+			// Check if we are over the target load size and split the shipment.
+			if shipment.Length() > crane.targetLoadSize {
+				partialShipment, err = shipment.GetAsContainer(crane.targetLoadSize)
+				if err != nil {
+					crane.Stop(terminal.ErrInternalError.With("failed to split segment: %w", err))
+					return nil
+				}
+				shipment, partialShipment = partialShipment, shipment
+			}
+
+			// Load shipment.
+			err = crane.load(shipment)
+			if err != nil {
+				crane.Stop(terminal.ErrShipSunk.With("failed to load shipment: %w", err))
+				return nil
+			}
+
+			// Reset loading timer.
+			loadingTimer = nil
+
+			// Continue loading with partial shipment, or a new one.
+			if partialShipment != nil {
+				// Continue loading with a partial previous shipment.
+				shipment, partialShipment = partialShipment, nil
+
+				// If shipment is not big enough to send immediately, wait for more data.
+				if shipment.Length() < crane.targetLoadSize {
+					loadingTimer = time.NewTimer(loadingMaxWaitDuration)
+					break sendingShipment
+				}
+
+			} else {
+				// Continue loading with new shipment.
+				shipment = container.New()
+				break sendingShipment
+			}
+		}
+	}
+}
+
+func (crane *Crane) load(c *container.Container) error {
+	// Add Padding if needed.
+	if crane.opts.Padding > 0 {
+		paddingNeeded := int(crane.opts.Padding) -
+			((c.Length() + varint.EncodedSize(uint64(c.Length()))) % int(crane.opts.Padding))
+		// As the length changes slightly with the padding, we should avoid loading
+		// lengths around the varint size hops:
+		// - 128
+		// - 16384
+		// - 2097152
+		// - 268435456
+
+		// Pad to target load size at maximum.
+		maxPadding := crane.targetLoadSize - c.Length()
+		if paddingNeeded > maxPadding {
+			paddingNeeded = maxPadding
+		}
+
+		if paddingNeeded > 0 {
+			// Add padding indicator.
+			c.Append([]byte{0})
+			paddingNeeded--
+
+			// Add needed padding data.
+			if paddingNeeded > 0 {
+				padding, err := rng.Bytes(paddingNeeded)
+				if err != nil {
+					log.Debugf("spn/docks: %s failed to get random padding data, using zeros instead", crane)
+					padding = make([]byte, paddingNeeded)
+				}
+				c.Append(padding)
+			}
+		}
+	}
+
+	// Encrypt shipment.
+	c, err := crane.encrypt(c)
+	if err != nil {
+		return fmt.Errorf("failed to encrypt: %w", err)
+	}
+
+	// Finalize data.
+	c.PrependLength()
+	readyToSend := c.CompileData()
+
+	// Submit metrics.
+	crane.submitCraneTrafficStats(len(readyToSend))
+	crane.NetState.ReportTraffic(uint64(len(readyToSend)), false)
+
+	// Load onto ship.
+	err = crane.ship.Load(readyToSend)
+	if err != nil {
+		return fmt.Errorf("failed to load ship: %w", err)
+	}
+
+	return nil
+}
+
+// Stop stops the crane.
+func (crane *Crane) Stop(err *terminal.Error) {
+	if !crane.stopped.SetToIf(false, true) {
+		return
+	}
+
+	// Log error message.
+	if err != nil {
+		if err.IsOK() {
+			log.Infof("spn/docks: %s is done", crane)
+		} else {
+			log.Warningf("spn/docks: %s is stopping: %s", crane, err)
+		}
+	}
+
+	// Unregister crane.
+	unregisterCrane(crane)
+
+	// Stop all terminals.
+	for _, t := range crane.allTerms() {
+		t.Abandon(err) // Async!
+	}
+
+	// Stop controller.
+	if crane.Controller != nil {
+		crane.Controller.Abandon(err) // Async!
+	}
+
+	// Wait shortly for all terminals to finish abandoning.
+	waitStep := 50 * time.Millisecond
+	for i := time.Duration(0); i < maxCraneStopDuration; i += waitStep {
+		// Check if all terminals are done.
+		if crane.terminalCount() == 0 {
+			break
+		}
+
+		time.Sleep(waitStep)
+	}
+
+	// Close connection.
+	crane.ship.Sink()
+
+	// Cancel crane context.
+	crane.cancelCtx()
+
+	// Notify about change.
+	crane.NotifyUpdate()
+}
+
+func (crane *Crane) allTerms() []terminal.Terminal {
+	crane.terminalsLock.Lock()
+	defer crane.terminalsLock.Unlock()
+
+	terms := make([]terminal.Terminal, 0, len(crane.terminals))
+	for _, term := range crane.terminals {
+		terms = append(terms, term)
+	}
+
+	return terms
+}
+
+func (crane *Crane) String() string {
+	remoteAddr := crane.ship.RemoteAddr()
+	switch {
+	case remoteAddr == nil:
+		return fmt.Sprintf("crane %s", crane.ID)
+	case crane.ship.IsMine():
+		return fmt.Sprintf("crane %s to %s", crane.ID, crane.ship.MaskAddress(crane.ship.RemoteAddr()))
+	default:
+		return fmt.Sprintf("crane %s from %s", crane.ID, crane.ship.MaskAddress(crane.ship.RemoteAddr()))
+	}
+}
+
+// Stopped returns whether the crane has stopped.
+func (crane *Crane) Stopped() bool {
+	return crane.stopped.IsSet()
+}
diff --git a/spn/docks/crane_establish.go b/spn/docks/crane_establish.go
new file mode 100644
index 00000000..3fa26732
--- /dev/null
+++ b/spn/docks/crane_establish.go
@@ -0,0 +1,81 @@
+package docks
+
+import (
+	"context"
+	"time"
+
+	"github.com/safing/portbase/container"
+	"github.com/safing/portbase/log"
+	"github.com/safing/portmaster/spn/terminal"
+)
+
+const (
+	defaultTerminalIdleTimeout = 15 * time.Minute
+	remoteTerminalIdleTimeout  = 30 * time.Minute
+)
+
+// EstablishNewTerminal establishes a new terminal with the crane.
+func (crane *Crane) EstablishNewTerminal(
+	localTerm terminal.Terminal,
+	initData *container.Container,
+) *terminal.Error {
+	// Create message.
+	msg := terminal.NewEmptyMsg()
+	msg.FlowID = localTerm.ID()
+	msg.Type = terminal.MsgTypeInit
+	msg.Data = initData
+
+	// Register terminal with crane.
+	crane.setTerminal(localTerm)
+
+	// Send message.
+	select {
+	case crane.controllerMsgs <- msg:
+		log.Debugf("spn/docks: %s initiated new terminal %d", crane, localTerm.ID())
+		return nil
+	case <-crane.ctx.Done():
+		crane.AbandonTerminal(localTerm.ID(), terminal.ErrStopping.With("initiation aborted"))
+		return terminal.ErrStopping
+	}
+}
+
+func (crane *Crane) establishTerminal(id uint32, initData *container.Container) {
+	// Create new remote crane terminal.
+	newTerminal, _, err := NewRemoteCraneTerminal(
+		crane,
+		id,
+		initData,
+	)
+	if err == nil {
+		// Connections via public cranes have a timeout.
+		if crane.Public() {
+			newTerminal.TerminalBase.SetTimeout(remoteTerminalIdleTimeout)
+		}
+		// Register terminal with crane.
+		crane.setTerminal(newTerminal)
+		log.Debugf("spn/docks: %s established new crane terminal %d", crane, newTerminal.ID())
+		return
+	}
+
+	// If something goes wrong, send an error back.
+	log.Warningf("spn/docks: %s failed to establish crane terminal: %s", crane, err)
+
+	// Build abandon message.
+	msg := terminal.NewMsg(err.Pack())
+	msg.FlowID = id
+	msg.Type = terminal.MsgTypeStop
+
+	// Send message directly, or async.
+	select {
+	case crane.terminalMsgs <- msg:
+	default:
+		// Send error async.
+		module.StartWorker("abandon terminal", func(ctx context.Context) error {
+			select {
+			case crane.terminalMsgs <- msg:
+			case <-ctx.Done():
+			}
+			return nil
+		})
+	}
+}
diff --git a/spn/docks/crane_init.go b/spn/docks/crane_init.go
new file mode 100644
index 00000000..740f7cdb
--- /dev/null
+++ b/spn/docks/crane_init.go
@@ -0,0 +1,339 @@
+package docks
+
+import (
+	"context"
+	"time"
+
+	"github.com/safing/jess"
+	"github.com/safing/portbase/container"
+	"github.com/safing/portbase/formats/dsd"
+	"github.com/safing/portbase/formats/varint"
+	"github.com/safing/portbase/info"
+	"github.com/safing/portbase/log"
+	"github.com/safing/portmaster/spn/conf"
+	"github.com/safing/portmaster/spn/terminal"
+)
+
+/*
+
+Crane Init Message Format:
+used by init procedures
+
+- Data [bytes block]
+	- MsgType [varint]
+	- Data [bytes; only when MsgType is Verify or Start*]
+
+Crane Init Response Format:
+
+- Data [bytes block]
+
+Crane Operational Message Format:
+
+- Data [bytes block]
+	- possibly encrypted
+
+*/
+
+// Crane Msg Types.
+const (
+	CraneMsgTypeEnd              = 0
+	CraneMsgTypeInfo             = 1
+	CraneMsgTypeRequestHubInfo   = 2
+	CraneMsgTypeVerify           = 3
+	CraneMsgTypeStartEncrypted   = 4
+	CraneMsgTypeStartUnencrypted = 5
+)
+
+// Start starts the crane.
+func (crane *Crane) Start(callerCtx context.Context) error {
+	log.Infof("spn/docks: %s is starting", crane)
+
+	// Submit metrics.
+	newCranes.Inc()
+
+	// Start crane depending on situation.
+	var tErr *terminal.Error
+	if crane.ship.IsMine() {
+		tErr = crane.startLocal(callerCtx)
+	} else {
+		tErr = crane.startRemote(callerCtx)
+	}
+
+	// Stop crane again if starting failed.
+	if tErr != nil {
+		crane.Stop(tErr)
+		return tErr
+	}
+
+	log.Debugf("spn/docks: %s started", crane)
+	// Return an explicit nil for working "!= nil" checks.
+	return nil
+}
+
+func (crane *Crane) startLocal(callerCtx context.Context) *terminal.Error {
+	module.StartWorker("crane unloader", crane.unloader)
+
+	if !crane.ship.IsSecure() {
+		// Start encrypted channel.
+		// Check if we have all the data we need from the Hub.
+		if crane.ConnectedHub == nil {
+			return terminal.ErrIncorrectUsage.With("cannot start encrypted channel without connected hub")
+		}
+
+		// Always request hub info, as we don't know if the hub has restarted in
+		// the meantime and lost ephemeral keys.
+		hubInfoRequest := container.New(
+			varint.Pack8(CraneMsgTypeRequestHubInfo),
+		)
+		hubInfoRequest.PrependLength()
+		err := crane.ship.Load(hubInfoRequest.CompileData())
+		if err != nil {
+			return terminal.ErrShipSunk.With("failed to request hub info: %w", err)
+		}
+
+		// Wait for reply.
+		var reply *container.Container
+		select {
+		case reply = <-crane.unloading:
+		case <-time.After(30 * time.Second):
+			return terminal.ErrTimeout.With("waiting for hub info")
+		case <-crane.ctx.Done():
+			return terminal.ErrShipSunk.With("waiting for hub info")
+		case <-callerCtx.Done():
+			return terminal.ErrCanceled.With("waiting for hub info")
+		}
+
+		// Parse and import Announcement and Status.
+		announcementData, err := reply.GetNextBlock()
+		if err != nil {
+			return terminal.ErrMalformedData.With("failed to get announcement: %w", err)
+		}
+		statusData, err := reply.GetNextBlock()
+		if err != nil {
+			return terminal.ErrMalformedData.With("failed to get status: %w", err)
+		}
+		h, _, tErr := ImportAndVerifyHubInfo(
+			callerCtx,
+			crane.ConnectedHub.ID,
+			announcementData, statusData, conf.MainMapName, conf.MainMapScope,
+		)
+		if tErr != nil {
+			return tErr.Wrap("failed to import and verify hub")
+		}
+		// Update reference in case it was changed by the import.
+		crane.ConnectedHub = h
+
+		// Now, try to select a public key again.
+		signet := crane.ConnectedHub.SelectSignet()
+		if signet == nil {
+			return terminal.ErrHubNotReady.With("failed to select signet (after updating hub info)")
+		}
+
+		// Configure encryption.
+		env := jess.NewUnconfiguredEnvelope()
+		env.SuiteID = jess.SuiteWireV1
+		env.Recipients = []*jess.Signet{signet}
+
+		// Do not encrypt directly, rather get session for future use, then encrypt.
+		crane.jession, err = env.WireCorrespondence(nil)
+		if err != nil {
+			return terminal.ErrInternalError.With("failed to create encryption session: %w", err)
+		}
+	}
+
+	// Create crane controller.
+	_, initData, tErr := NewLocalCraneControllerTerminal(crane, terminal.DefaultCraneControllerOpts())
+	if tErr != nil {
+		return tErr.Wrap("failed to set up controller")
+	}
+
+	// Prepare init message for sending.
+	if crane.ship.IsSecure() {
+		initData.PrependNumber(CraneMsgTypeStartUnencrypted)
+	} else {
+		// Encrypt controller initializer.
+		letter, err := crane.jession.Close(initData.CompileData())
+		if err != nil {
+			return terminal.ErrInternalError.With("failed to encrypt initial packet: %w", err)
+		}
+		initData, err = letter.ToWire()
+		if err != nil {
+			return terminal.ErrInternalError.With("failed to pack initial packet: %w", err)
+		}
+		initData.PrependNumber(CraneMsgTypeStartEncrypted)
+	}
+
+	// Send start message.
+	initData.PrependLength()
+	err := crane.ship.Load(initData.CompileData())
+	if err != nil {
+		return terminal.ErrShipSunk.With("failed to send init msg: %w", err)
+	}
+
+	// Start remaining workers.
+	module.StartWorker("crane loader", crane.loader)
+	module.StartWorker("crane handler", crane.handler)
+
+	return nil
+}
+
+func (crane *Crane) startRemote(callerCtx context.Context) *terminal.Error {
+	var initMsg *container.Container
+
+	module.StartWorker("crane unloader", crane.unloader)
+
+handling:
+	for {
+		// Wait for request.
+		var request *container.Container
+		select {
+		case request = <-crane.unloading:
+
+		case <-time.After(30 * time.Second):
+			return terminal.ErrTimeout.With("waiting for crane init msg")
+		case <-crane.ctx.Done():
+			return terminal.ErrShipSunk.With("waiting for crane init msg")
+		case <-callerCtx.Done():
+			return terminal.ErrCanceled.With("waiting for crane init msg")
+		}
+
+		msgType, err := request.GetNextN8()
+		if err != nil {
+			return terminal.ErrMalformedData.With("failed to parse crane msg type: %s", err)
+		}
+
+		switch msgType {
+		case CraneMsgTypeEnd:
+			// End connection.
+			return terminal.ErrStopping
+
+		case CraneMsgTypeInfo:
+			// Info is a terminating request.
+			err := crane.handleCraneInfo()
+			if err != nil {
+				return err
+			}
+			log.Debugf("spn/docks: %s sent version info", crane)
+
+		case CraneMsgTypeRequestHubInfo:
+			// Handle Hub info request.
+			err := crane.handleCraneHubInfo()
+			if err != nil {
+				return err
+			}
+			log.Debugf("spn/docks: %s sent hub info", crane)
+
+		case CraneMsgTypeVerify:
+			// Verify is a terminating request.
+			err := crane.handleCraneVerification(request)
+			if err != nil {
+				return err
+			}
+			log.Infof("spn/docks: %s sent hub verification", crane)
+
+		case CraneMsgTypeStartUnencrypted:
+			initMsg = request
+
+			// Start crane with initMsg.
+			log.Debugf("spn/docks: %s initiated unencrypted channel", crane)
+			break handling
+
+		case CraneMsgTypeStartEncrypted:
+			if crane.identity == nil {
+				return terminal.ErrIncorrectUsage.With("cannot start incoming crane without designated identity")
+			}
+
+			// Set up encryption.
+			letter, err := jess.LetterFromWire(container.New(request.CompileData()))
+			if err != nil {
+				return terminal.ErrMalformedData.With("failed to unpack initial packet: %w", err)
+			}
+			crane.jession, err = letter.WireCorrespondence(crane.identity)
+			if err != nil {
+				return terminal.ErrInternalError.With("failed to create encryption session: %w", err)
+			}
+			initMsgData, err := crane.jession.Open(letter)
+			if err != nil {
+				return terminal.ErrIntegrity.With("failed to decrypt initial packet: %w", err)
+			}
+			initMsg = container.New(initMsgData)
+
+			// Start crane with initMsg.
+			log.Debugf("spn/docks: %s initiated encrypted channel", crane)
+			break handling
+		}
+	}
+
+	_, _, err := NewRemoteCraneControllerTerminal(crane, initMsg)
+	if err != nil {
+		return err.Wrap("failed to start crane controller")
+	}
+
+	// Start remaining workers.
+	module.StartWorker("crane loader", crane.loader)
+	module.StartWorker("crane handler", crane.handler)
+
+	return nil
+}
+
+func (crane *Crane) endInit() *terminal.Error {
+	endMsg := container.New(
+		varint.Pack8(CraneMsgTypeEnd),
+	)
+	endMsg.PrependLength()
+	err := crane.ship.Load(endMsg.CompileData())
+	if err != nil {
+		return terminal.ErrShipSunk.With("failed to send end msg: %w", err)
+	}
+	return nil
+}
+
+func (crane *Crane) handleCraneInfo() *terminal.Error {
+	// Pack info data.
+	infoData, err := dsd.Dump(info.GetInfo(), dsd.JSON)
+	if err != nil {
+		return terminal.ErrInternalError.With("failed to pack info: %w", err)
+	}
+	msg := container.New(infoData)
+
+	// Manually send reply.
+	msg.PrependLength()
+	err = crane.ship.Load(msg.CompileData())
+	if err != nil {
+		return terminal.ErrShipSunk.With("failed to send info reply: %w", err)
+	}
+
+	return nil
+}
+
+func (crane *Crane) handleCraneHubInfo() *terminal.Error {
+	msg := container.New()
+
+	// Check if we have an identity.
+	if crane.identity == nil {
+		return terminal.ErrIncorrectUsage.With("cannot handle hub info request without designated identity")
+	}
+
+	// Add Hub Announcement.
+	announcementData, err := crane.identity.ExportAnnouncement()
+	if err != nil {
+		return terminal.ErrInternalError.With("failed to export announcement: %w", err)
+	}
+	msg.AppendAsBlock(announcementData)
+
+	// Add Hub Status.
+	statusData, err := crane.identity.ExportStatus()
+	if err != nil {
+		return terminal.ErrInternalError.With("failed to export status: %w", err)
+	}
+	msg.AppendAsBlock(statusData)
+
+	// Manually send reply.
+	msg.PrependLength()
+	err = crane.ship.Load(msg.CompileData())
+	if err != nil {
+		return terminal.ErrShipSunk.With("failed to send hub info reply: %w", err)
+	}
+
+	return nil
+}
diff --git a/spn/docks/crane_netstate.go b/spn/docks/crane_netstate.go
new file mode 100644
index 00000000..508f5632
--- /dev/null
+++ b/spn/docks/crane_netstate.go
@@ -0,0 +1,131 @@
+package docks
+
+import (
+	"sync"
+	"sync/atomic"
+	"time"
+)
+
+// NetStatePeriodInterval defines the interval some of the net state should be reset.
+const NetStatePeriodInterval = 15 * time.Minute
+
+// NetworkOptimizationState holds data for optimization purposes.
+type NetworkOptimizationState struct {
+	lock sync.Mutex
+
+	// lastSuggestedAt holds the time when the connection to the connected Hub was last suggested by the network optimization.
+	lastSuggestedAt time.Time
+
+	// stoppingRequested signifies whether stopping this lane is requested.
+	stoppingRequested bool
+	// stoppingRequestedByPeer signifies whether stopping this lane is requested by the peer.
+	stoppingRequestedByPeer bool
+	// markedStoppingAt holds the time when the crane was last marked as stopping.
+	markedStoppingAt time.Time
+
+	lifetimeBytesIn  *uint64
+	lifetimeBytesOut *uint64
+	lifetimeStarted  time.Time
+	periodBytesIn    *uint64
+	periodBytesOut   *uint64
+	periodStarted    time.Time
+}
+
+func newNetworkOptimizationState() *NetworkOptimizationState {
+	return &NetworkOptimizationState{
+		lifetimeBytesIn:  new(uint64),
+		lifetimeBytesOut: new(uint64),
+		lifetimeStarted:  time.Now(),
+		periodBytesIn:    new(uint64),
+		periodBytesOut:   new(uint64),
+		periodStarted:    time.Now(),
+	}
+}
+
+// UpdateLastSuggestedAt sets when the lane was last suggested to the current time.
+func (netState *NetworkOptimizationState) UpdateLastSuggestedAt() {
+	netState.lock.Lock()
+	defer netState.lock.Unlock()
+
+	netState.lastSuggestedAt = time.Now()
+}
+
+// StoppingState returns when the stopping state.
+func (netState *NetworkOptimizationState) StoppingState() (requested, requestedByPeer bool, markedAt time.Time) {
+	netState.lock.Lock()
+	defer netState.lock.Unlock()
+
+	return netState.stoppingRequested, netState.stoppingRequestedByPeer, netState.markedStoppingAt
+}
+
+// RequestStoppingSuggested returns whether the crane should request stopping.
+func (netState *NetworkOptimizationState) RequestStoppingSuggested(maxNotSuggestedDuration time.Duration) bool {
+	netState.lock.Lock()
+	defer netState.lock.Unlock()
+
+	return time.Now().Add(-maxNotSuggestedDuration).After(netState.lastSuggestedAt)
+}
+
+// StoppingSuggested returns whether the crane should be marked as stopping.
+func (netState *NetworkOptimizationState) StoppingSuggested() bool {
+	netState.lock.Lock()
+	defer netState.lock.Unlock()
+
+	return netState.stoppingRequested &&
+		netState.stoppingRequestedByPeer
+}
+
+// StopSuggested returns whether the crane should be stopped.
+func (netState *NetworkOptimizationState) StopSuggested() bool {
+	netState.lock.Lock()
+	defer netState.lock.Unlock()
+
+	return netState.stoppingRequested &&
+		netState.stoppingRequestedByPeer &&
+		!netState.markedStoppingAt.IsZero() &&
+		time.Now().Add(-maxCraneStoppingDuration).After(netState.markedStoppingAt)
+}
+
+// ReportTraffic adds the reported transferred data to the traffic stats.
+func (netState *NetworkOptimizationState) ReportTraffic(bytes uint64, in bool) {
+	if in {
+		atomic.AddUint64(netState.lifetimeBytesIn, bytes)
+		atomic.AddUint64(netState.periodBytesIn, bytes)
+	} else {
+		atomic.AddUint64(netState.lifetimeBytesOut, bytes)
+		atomic.AddUint64(netState.periodBytesOut, bytes)
+	}
+}
+
+// LapsePeriod lapses the net state period, if needed.
+func (netState *NetworkOptimizationState) LapsePeriod() {
+	netState.lock.Lock()
+	defer netState.lock.Unlock()
+
+	// Reset period if interval elapsed.
+	if time.Now().Add(-NetStatePeriodInterval).After(netState.periodStarted) {
+		atomic.StoreUint64(netState.periodBytesIn, 0)
+		atomic.StoreUint64(netState.periodBytesOut, 0)
+		netState.periodStarted = time.Now()
+	}
+}
+
+// GetTrafficStats returns the traffic stats.
+func (netState *NetworkOptimizationState) GetTrafficStats() (
+	lifetimeBytesIn uint64,
+	lifetimeBytesOut uint64,
+	lifetimeStarted time.Time,
+	periodBytesIn uint64,
+	periodBytesOut uint64,
+	periodStarted time.Time,
+) {
+	netState.lock.Lock()
+	defer netState.lock.Unlock()
+
+	return atomic.LoadUint64(netState.lifetimeBytesIn),
+		atomic.LoadUint64(netState.lifetimeBytesOut),
+		netState.lifetimeStarted,
+		atomic.LoadUint64(netState.periodBytesIn),
+		atomic.LoadUint64(netState.periodBytesOut),
+		netState.periodStarted
+}
diff --git a/spn/docks/crane_terminal.go b/spn/docks/crane_terminal.go
new file mode 100644
index 00000000..731bf953
--- /dev/null
+++ b/spn/docks/crane_terminal.go
@@ -0,0 +1,122 @@
+package docks
+
+import (
+	"net"
+
+	"github.com/safing/portbase/container"
+	"github.com/safing/portmaster/spn/hub"
+	"github.com/safing/portmaster/spn/terminal"
+)
+
+// CraneTerminal is a terminal started by a crane.
+type CraneTerminal struct {
+	*terminal.TerminalBase
+
+	// Add-Ons
+	terminal.SessionAddOn
+
+	crane *Crane
+}
+
+// NewLocalCraneTerminal returns a new local crane terminal.
+func NewLocalCraneTerminal(
+	crane *Crane,
+	remoteHub *hub.Hub,
+	initMsg *terminal.TerminalOpts,
+) (*CraneTerminal, *container.Container, *terminal.Error) {
+	// Create Terminal Base.
+	t, initData, err := terminal.NewLocalBaseTerminal(
+		crane.ctx,
+		crane.getNextTerminalID(),
+		crane.ID,
+		remoteHub,
+		initMsg,
+		crane,
+	)
+	if err != nil {
+		return nil, nil, err
+	}
+
+	return initCraneTerminal(crane, t), initData, nil
+}
+
+// NewRemoteCraneTerminal returns a new remote crane terminal.
+func NewRemoteCraneTerminal(
+	crane *Crane,
+	id uint32,
+	initData *container.Container,
+) (*CraneTerminal, *terminal.TerminalOpts, *terminal.Error) {
+	// Create Terminal Base.
+	t, initMsg, err := terminal.NewRemoteBaseTerminal(
+		crane.ctx,
+		id,
+		crane.ID,
+		crane.identity,
+		initData,
+		crane,
+	)
+	if err != nil {
+		return nil, nil, err
+	}
+
+	return initCraneTerminal(crane, t), initMsg, nil
+}
+
+func initCraneTerminal(
+	crane *Crane,
+	t *terminal.TerminalBase,
+) *CraneTerminal {
+	// Create Crane Terminal and assign it as the extended Terminal.
+	ct := &CraneTerminal{
+		TerminalBase: t,
+		crane:        crane,
+	}
+	t.SetTerminalExtension(ct)
+
+	// Start workers.
+	t.StartWorkers(module, "crane terminal")
+
+	return ct
+}
+
+// GrantPermission grants the given permissions.
+// Additionally, it will mark the crane as authenticated, if not public.
+func (t *CraneTerminal) GrantPermission(grant terminal.Permission) {
+	// Forward granted permission to base terminal.
+	t.TerminalBase.GrantPermission(grant)
+
+	// Mark crane as authenticated if not public or already authenticated.
+	if !t.crane.Public() && !t.crane.Authenticated() {
+		t.crane.authenticated.Set()
+
+		// Submit metrics.
+		newAuthenticatedCranes.Inc()
+	}
+}
+
+// LocalAddr returns the crane's local address.
+func (t *CraneTerminal) LocalAddr() net.Addr {
+	return t.crane.LocalAddr()
+}
+
+// RemoteAddr returns the crane's remote address.
+func (t *CraneTerminal) RemoteAddr() net.Addr {
+	return t.crane.RemoteAddr()
+}
+
+// Transport returns the crane's transport.
+func (t *CraneTerminal) Transport() *hub.Transport {
+	return t.crane.Transport()
+}
+
+// IsBeingAbandoned returns whether the terminal is being abandoned.
+func (t *CraneTerminal) IsBeingAbandoned() bool {
+	return t.Abandoning.IsSet()
+}
+
+// HandleDestruction gives the terminal the ability to clean up.
+// The terminal has already fully shut down at this point.
+// Should never be called directly. Call Abandon() instead.
+func (t *CraneTerminal) HandleDestruction(err *terminal.Error) {
+	t.crane.AbandonTerminal(t.ID(), err)
+}
diff --git a/spn/docks/crane_test.go b/spn/docks/crane_test.go
new file mode 100644
index 00000000..9e13b5e1
--- /dev/null
+++ b/spn/docks/crane_test.go
@@ -0,0 +1,267 @@
+package docks
+
+import (
+	"context"
+	"fmt"
+	"os"
+	"runtime/pprof"
+	"sync"
+	"testing"
+	"time"
+
+	"github.com/stretchr/testify/assert"
+
+	"github.com/safing/portmaster/spn/cabin"
+	"github.com/safing/portmaster/spn/hub"
+	"github.com/safing/portmaster/spn/ships"
+	"github.com/safing/portmaster/spn/terminal"
+)
+
+func TestCraneCommunication(t *testing.T) {
+	t.Parallel()
+
+	testCraneWithCounter(t, "plain-counter-load-100", false, 100, 1000)
+	testCraneWithCounter(t, "plain-counter-load-1000", false, 1000, 1000)
+	testCraneWithCounter(t, "plain-counter-load-10000", false, 10000, 1000)
+	testCraneWithCounter(t, "encrypted-counter", true, 1000, 1000)
+}
+
+func testCraneWithCounter(t *testing.T, testID string, encrypting bool, loadSize int, countTo uint64) { //nolint:unparam,thelper
+	var identity *cabin.Identity
+	var connectedHub *hub.Hub
+	if encrypting {
+		identity, connectedHub = getTestIdentity(t)
+	}
+
+	// Build ship and cranes.
+	optimalMinLoadSize = loadSize * 2
+	ship := ships.NewTestShip(!encrypting, loadSize)
+
+	var crane1, crane2 *Crane
+	var craneWg sync.WaitGroup
+	craneWg.Add(2)
+
+	go func() {
+		var err error
+		crane1, err = NewCrane(ship, connectedHub, nil)
+		if err != nil {
+			panic(fmt.Sprintf("crane test %s could not create crane1: %s", testID, err))
+		}
+		err = crane1.Start(module.Ctx)
+		if err != nil {
+			panic(fmt.Sprintf("crane test %s could not start crane1: %s", testID, err))
+		}
+		craneWg.Done()
+	}()
+	go func() {
+		var err error
+		crane2, err = NewCrane(ship.Reverse(), nil, identity)
+		if err != nil {
+			panic(fmt.Sprintf("crane test %s could not create crane2: %s", testID, err))
+		}
+		err = crane2.Start(module.Ctx)
+		if err != nil {
+			panic(fmt.Sprintf("crane test %s could not start crane2: %s", testID, err))
+		}
+		craneWg.Done()
+	}()
+
+	craneWg.Wait()
+	t.Logf("crane test %s setup complete", testID)
+
+	// Wait async for test to complete, print stack after timeout.
+	finished := make(chan struct{})
+	go func() {
+		select {
+		case <-finished:
+		case <-time.After(10 * time.Second):
+			t.Logf("crane test %s is taking too long, print stack:", testID)
+			_ = pprof.Lookup("goroutine").WriteTo(os.Stdout, 1)
+			os.Exit(1)
+		}
+	}()
+
+	t.Logf("crane1 controller: %+v", crane1.Controller)
+	t.Logf("crane2 controller: %+v", crane2.Controller)
+
+	// Start counters for testing.
+	op1, tErr := terminal.NewCounterOp(crane1.Controller, terminal.CounterOpts{
+		ClientCountTo: countTo,
+		ServerCountTo: countTo,
+	})
+	if tErr != nil {
+		t.Fatalf("crane test %s failed to run counter op: %s", testID, tErr)
+	}
+
+	// Wait for completion.
+	op1.Wait()
+	close(finished)
+
+	// Wait a little so that all errors can be propagated, so we can truly see
+	// if we succeeded.
+	time.Sleep(1 * time.Second)
+
+	// Check errors.
+	if op1.Error != nil {
+		t.Fatalf("crane test %s counter op1 failed: %s", testID, op1.Error)
+	}
+}
+
+type StreamingTerminal struct {
+	terminal.BareTerminal
+
+	test     *testing.T
+	id       uint32
+	crane    *Crane
+	recv     chan *terminal.Msg
+	testData []byte
+}
+
+func (t *StreamingTerminal) ID() uint32 {
+	return t.id
+}
+
+func (t *StreamingTerminal) Ctx() context.Context {
+	return module.Ctx
+}
+
+func (t *StreamingTerminal) Deliver(msg *terminal.Msg) *terminal.Error {
+	t.recv <- msg
+	msg.Finish()
+	return nil
+}
+
+func (t *StreamingTerminal) Abandon(err *terminal.Error) {
+	t.crane.AbandonTerminal(t.ID(), err)
+	if err != nil {
+		t.test.Errorf("streaming terminal %d failed: %s", t.id, err)
+	}
+}
+
+func (t *StreamingTerminal) FmtID() string {
+	return fmt.Sprintf("test-%d", t.id)
+}
+
+func TestCraneLoadingUnloading(t *testing.T) {
+	t.Parallel()
+
+	testCraneWithStreaming(t, "plain-streaming", false, 100)
+	testCraneWithStreaming(t, "encrypted-streaming", true, 100)
+}
+
+func testCraneWithStreaming(t *testing.T, testID string, encrypting bool, loadSize int) { //nolint:thelper
+	var identity *cabin.Identity
+	var connectedHub *hub.Hub
+	if encrypting {
+		identity, connectedHub = getTestIdentity(t)
+	}
+
+	// Build ship and cranes.
+	optimalMinLoadSize = loadSize * 2
+	ship := ships.NewTestShip(!encrypting, loadSize)
+
+	var crane1, crane2 *Crane
+	var craneWg sync.WaitGroup
+	craneWg.Add(2)
+
+	go func() {
+		var err error
+		crane1, err = NewCrane(ship, connectedHub, nil)
+		if err != nil {
+			panic(fmt.Sprintf("crane test %s could not create crane1: %s", testID, err))
+		}
+		err = crane1.Start(module.Ctx)
+		if err != nil {
+			panic(fmt.Sprintf("crane test %s could not start crane1: %s", testID, err))
+		}
+		craneWg.Done()
+	}()
+	go func() {
+		var err error
+		crane2, err = NewCrane(ship.Reverse(), nil, identity)
+		if err != nil {
+			panic(fmt.Sprintf("crane test %s could not create crane2: %s", testID, err))
+		}
+		err = crane2.Start(module.Ctx)
+		if err != nil {
+			panic(fmt.Sprintf("crane test %s could not start crane2: %s", testID, err))
+		}
+		craneWg.Done()
+	}()
+
+	craneWg.Wait()
+	t.Logf("crane test %s setup complete", testID)
+
+	// Wait async for test to complete, print stack after timeout.
+	finished := make(chan struct{})
+	go func() {
+		select {
+		case <-finished:
+		case <-time.After(10 * time.Second):
+			t.Logf("crane test %s is taking too long, print stack:", testID)
+			_ = pprof.Lookup("goroutine").WriteTo(os.Stdout, 1)
+			os.Exit(1)
+		}
+	}()
+
+	t.Logf("crane1 controller: %+v", crane1.Controller)
+	t.Logf("crane2 controller: %+v", crane2.Controller)
+
+	// Create terminals and run test.
+	st := &StreamingTerminal{
+		test:     t,
+		id:       8,
+		crane:    crane2,
+		recv:     make(chan *terminal.Msg),
+		testData: []byte("The quick brown fox jumps over the lazy dog."),
+	}
+	crane2.terminals[st.ID()] = st
+
+	// Run streaming test.
+	var streamingWg sync.WaitGroup
+	streamingWg.Add(2)
+	count := 10000
+	go func() {
+		for i := 1; i <= count; i++ {
+			msg := terminal.NewMsg(st.testData)
+			msg.FlowID = st.id
+			err := crane1.Send(msg, 1*time.Second)
+			if err != nil {
+				msg.Finish()
+				crane1.Stop(err.Wrap("failed to submit terminal msg"))
+			}
+			// log.Tracef("spn/testing: + %d", i)
+		}
+		t.Logf("crane test %s done with sending", testID)
+		streamingWg.Done()
+	}()
+	go func() {
+		for i := 1; i <= count; i++ {
+			msg := <-st.recv
+			assert.Equal(t, st.testData, msg.Data.CompileData(), "data mismatched")
+			// log.Tracef("spn/testing: - %d", i)
+		}
+		t.Logf("crane test %s done with receiving", testID)
+		streamingWg.Done()
+	}()
+
+	// Wait for completion.
+	streamingWg.Wait()
+	close(finished)
+}
+
+var testIdentity *cabin.Identity
+
+func getTestIdentity(t *testing.T) (*cabin.Identity, *hub.Hub) {
+	t.Helper()
+
+	if testIdentity == nil {
+		var err error
+		testIdentity, err = cabin.CreateIdentity(module.Ctx, "test")
+		if err != nil {
+			t.Fatalf("failed to create identity: %s", err)
+		}
+	}
+
+	return testIdentity, testIdentity.Hub
+}
diff --git a/spn/docks/crane_verify.go b/spn/docks/crane_verify.go
new file mode 100644
index 00000000..1f4e686d
--- /dev/null
+++ b/spn/docks/crane_verify.go
@@ -0,0 +1,85 @@
+package docks
+
+import (
+	"context"
+	"errors"
+	"fmt"
+	"time"
+
+	"github.com/safing/portbase/container"
+	"github.com/safing/portbase/formats/varint"
+	"github.com/safing/portmaster/spn/cabin"
+	"github.com/safing/portmaster/spn/terminal"
+)
+
+const (
+	hubVerificationPurpose = "hub identify verification"
+)
+
+// VerifyConnectedHub verifies the connected Hub.
+func (crane *Crane) VerifyConnectedHub(callerCtx context.Context) error {
+	if !crane.ship.IsMine() || crane.nextTerminalID != 0 || crane.Public() {
+		return errors.New("hub verification can only be executed in init phase by the client")
+	}
+
+	// Create verification request.
+	v, request, err := cabin.CreateVerificationRequest(hubVerificationPurpose, "", "")
+	if err != nil {
+		return fmt.Errorf("failed to create verification request: %w", err)
+	}
+
+	// Send it.
+	msg := container.New(
+		varint.Pack8(CraneMsgTypeVerify),
+		request,
+	)
+	msg.PrependLength()
+	err = crane.ship.Load(msg.CompileData())
+	if err != nil {
+		return terminal.ErrShipSunk.With("failed to send verification request: %w", err)
+	}
+
+	// Wait for reply.
+	var reply *container.Container
+	select {
+	case reply = <-crane.unloading:
+	case <-time.After(2 * time.Minute):
+		// Use a big timeout here, as this might keep servers from joining the
+		// network at all, as every servers needs to verify every server, no
+		// matter how far away.
+		return terminal.ErrTimeout.With("waiting for verification reply")
+	case <-crane.ctx.Done():
+		return terminal.ErrShipSunk.With("waiting for verification reply")
+	case <-callerCtx.Done():
+		return terminal.ErrShipSunk.With("waiting for verification reply")
+	}
+
+	// Verify reply.
+	return v.Verify(reply.CompileData(), crane.ConnectedHub)
+}
+
+func (crane *Crane) handleCraneVerification(request *container.Container) *terminal.Error {
+	// Check if we have an identity.
+	if crane.identity == nil {
+		return terminal.ErrIncorrectUsage.With("cannot handle verification request without designated identity")
+	}
+
+	response, err := crane.identity.SignVerificationRequest(
+		request.CompileData(),
+		hubVerificationPurpose,
+		"", "",
+	)
+	if err != nil {
+		return terminal.ErrInternalError.With("failed to sign verification request: %w", err)
+	}
+	msg := container.New(response)
+
+	// Manually send reply.
+	msg.PrependLength()
+	err = crane.ship.Load(msg.CompileData())
+	if err != nil {
+		return terminal.ErrShipSunk.With("failed to send verification reply: %w", err)
+	}
+
+	return nil
+}
diff --git a/spn/docks/cranehooks.go b/spn/docks/cranehooks.go
new file mode 100644
index 00000000..0355a4f7
--- /dev/null
+++ b/spn/docks/cranehooks.go
@@ -0,0 +1,46 @@
+package docks
+
+import (
+	"sync"
+
+	"github.com/safing/portbase/log"
+)
+
+var (
+	craneUpdateHook     func(crane *Crane)
+	craneUpdateHookLock sync.Mutex
+)
+
+// RegisterCraneUpdateHook allows the captain to hook into receiving updates for cranes.
+func RegisterCraneUpdateHook(fn func(crane *Crane)) {
+	craneUpdateHookLock.Lock()
+	defer craneUpdateHookLock.Unlock()
+
+	if craneUpdateHook == nil {
+		craneUpdateHook = fn
+	} else {
+		log.Error("spn/docks: crane update hook already registered")
+	}
+}
+
+// ResetCraneUpdateHook resets the hook for receiving updates for cranes.
+func ResetCraneUpdateHook() {
+	craneUpdateHookLock.Lock()
+	defer craneUpdateHookLock.Unlock()
+
+	craneUpdateHook = nil
+}
+
+// NotifyUpdate calls the registers crane update hook function.
+func (crane *Crane) NotifyUpdate() {
+	if crane == nil {
+		return
+	}
+
+	craneUpdateHookLock.Lock()
+	defer craneUpdateHookLock.Unlock()
+
+	if craneUpdateHook != nil {
+		craneUpdateHook(crane)
+	}
+}
diff --git a/spn/docks/hub_import.go b/spn/docks/hub_import.go
new file mode 100644
index 00000000..377164f2
--- /dev/null
+++ b/spn/docks/hub_import.go
@@ -0,0 +1,189 @@
+package docks
+
+import (
+	"context"
+	"fmt"
+	"net"
+	"sync"
+
+	"github.com/safing/portbase/log"
+	"github.com/safing/portmaster/spn/conf"
+	"github.com/safing/portmaster/spn/hub"
+	"github.com/safing/portmaster/spn/ships"
+	"github.com/safing/portmaster/spn/terminal"
+)
+
+var hubImportLock sync.Mutex
+
+// ImportAndVerifyHubInfo imports the given hub message and verifies them.
+func ImportAndVerifyHubInfo(ctx context.Context, hubID string, announcementData, statusData []byte, mapName string, scope hub.Scope) (h *hub.Hub, forward bool, tErr *terminal.Error) {
+	var firstErr *terminal.Error
+
+	// Synchronize import, as we might easily learn of a new hub from different
+	// gossip channels simultaneously.
+	hubImportLock.Lock()
+	defer hubImportLock.Unlock()
+
+	// Check arguments.
+	if announcementData == nil && statusData == nil {
+		return nil, false, terminal.ErrInternalError.With("no announcement or status supplied")
+	}
+
+	// Import Announcement, if given.
+	var hubKnown, hubChanged bool
+	if announcementData != nil {
+		hubFromMsg, known, changed, err := hub.ApplyAnnouncement(nil, announcementData, mapName, scope, false)
+		if err != nil && firstErr == nil {
+			firstErr = terminal.ErrInternalError.With("failed to apply announcement: %w", err)
+		}
+		if known {
+			hubKnown = true
+		}
+		if changed {
+			hubChanged = true
+		}
+		if hubFromMsg != nil {
+			h = hubFromMsg
+		}
+	}
+
+	// Import Status, if given.
+	if statusData != nil {
+		hubFromMsg, known, changed, err := hub.ApplyStatus(h, statusData, mapName, scope, false)
+		if err != nil && firstErr == nil {
+			firstErr = terminal.ErrInternalError.With("failed to apply status: %w", err)
+		}
+		if known && announcementData == nil {
+			// If we parsed an announcement before, "known" will always be true here,
+			// as we supply hub.ApplyStatus with a hub.
+			hubKnown = true
+		}
+		if changed {
+			hubChanged = true
+		}
+		if hubFromMsg != nil {
+			h = hubFromMsg
+		}
+	}
+
+	// Only continue if we now have a Hub.
+	if h == nil {
+		if firstErr != nil {
+			return nil, false, firstErr
+		}
+		return nil, false, terminal.ErrInternalError.With("got not hub after data import")
+	}
+
+	// Abort if the given hub ID does not match.
+	// We may have just connected to the wrong IP address.
+	if hubID != "" && h.ID != hubID {
+		return nil, false, terminal.ErrInternalError.With("hub mismatch")
+	}
+
+	// Verify hub if:
+	// - There is no error up until here.
+	// - There has been any change.
+	// - The hub is not verified yet.
+	// - We're a public Hub.
+	// - We're not testing.
+	if firstErr == nil && hubChanged && !h.Verified() && conf.PublicHub() && !runningTests {
+		if !conf.HubHasIPv4() && !conf.HubHasIPv6() {
+			firstErr = terminal.ErrInternalError.With("no hub networks set")
+		}
+		if h.Info.IPv4 != nil && conf.HubHasIPv4() {
+			err := verifyHubIP(ctx, h, h.Info.IPv4)
+			if err != nil {
+				firstErr = terminal.ErrIntegrity.With("failed to verify IPv4 address %s of %s: %w", h.Info.IPv4, h, err)
+			}
+		}
+		if h.Info.IPv6 != nil && conf.HubHasIPv6() {
+			err := verifyHubIP(ctx, h, h.Info.IPv6)
+			if err != nil {
+				firstErr = terminal.ErrIntegrity.With("failed to verify IPv6 address %s of %s: %w", h.Info.IPv6, h, err)
+			}
+		}
+
+		if firstErr != nil {
+			func() {
+				h.Lock()
+				defer h.Unlock()
+				h.InvalidInfo = true
+			}()
+			log.Warningf("spn/docks: failed to verify IPs of %s: %s", h, firstErr)
+		} else {
+			func() {
+				h.Lock()
+				defer h.Unlock()
+				h.VerifiedIPs = true
+			}()
+			log.Infof("spn/docks: verified IPs of %s: IPv4=%s IPv6=%s", h, h.Info.IPv4, h.Info.IPv6)
+		}
+	}
+
+	// Dismiss initial imports with errors.
+	if !hubKnown && firstErr != nil {
+		return nil, false, firstErr
+	}
+
+	// Don't do anything if nothing changed.
+	if !hubChanged {
+		return h, false, firstErr
+	}
+
+	// We now have one of:
+	// - A unknown Hub without error.
+	// - A known Hub without error.
+	// - A known Hub with error, which we want to save and propagate.
+
+	// Save the Hub to the database.
+	err := h.Save()
+	if err != nil {
+		log.Errorf("spn/docks: failed to persist %s: %s", h, err)
+	}
+
+	// Save the raw messages to the database.
+	if announcementData != nil {
+		err = hub.SaveHubMsg(h.ID, h.Map, hub.MsgTypeAnnouncement, announcementData)
+		if err != nil {
+			log.Errorf("spn/docks: failed to save raw announcement msg of %s: %s", h, err)
+		}
+	}
+	if statusData != nil {
+		err = hub.SaveHubMsg(h.ID, h.Map, hub.MsgTypeStatus, statusData)
+		if err != nil {
+			log.Errorf("spn/docks: failed to save raw status msg of %s: %s", h, err)
+		}
+	}
+
+	return h, true, firstErr
+}
+
+func verifyHubIP(ctx context.Context, h *hub.Hub, ip net.IP) error {
+	// Create connection.
+	ship, err := ships.Launch(ctx, h, nil, ip)
+	if err != nil {
+		return fmt.Errorf("failed to launch ship to %s: %w", ip, err)
+	}
+
+	// Start crane for receiving reply.
+	crane, err := NewCrane(ship, h, nil)
+	if err != nil {
+		return fmt.Errorf("failed to create crane: %w", err)
+	}
+	module.StartWorker("crane unloader", crane.unloader)
+	defer crane.Stop(nil)
+
+	// Verify Hub.
+	err = crane.VerifyConnectedHub(ctx)
+	if err != nil {
+		return err
+	}
+
+	// End connection.
+	tErr := crane.endInit()
+	if tErr != nil {
+		log.Debugf("spn/docks: failed to end verification connection to %s: %s", ip, tErr)
+	}
+
+	return nil
+}
diff --git a/spn/docks/measurements.go b/spn/docks/measurements.go
new file mode 100644
index 00000000..ed2edfb3
--- /dev/null
+++ b/spn/docks/measurements.go
@@ -0,0 +1,108 @@
+package docks
+
+import (
+	"context"
+	"fmt"
+	"time"
+
+	"github.com/safing/portmaster/spn/hub"
+	"github.com/safing/portmaster/spn/ships"
+	"github.com/safing/portmaster/spn/terminal"
+)
+
+// Measurement Configuration.
+const (
+	CraneMeasurementTTLDefault    = 30 * time.Minute
+	CraneMeasurementTTLByCostBase = 1 * time.Minute
+	CraneMeasurementTTLByCostMin  = 30 * time.Minute
+	CraneMeasurementTTLByCostMax  = 3 * time.Hour
+
+	// With a base TTL of 1m, this leads to:
+	// 20c     -> 20m -> raised to 30m
+	// 50c     -> 50m
+	// 100c    -> 1h40m
+	// 1000c   -> 16h40m -> capped to 3h.
+)
+
+// MeasureHub measures the connection to this Hub and saves the results to the
+// Hub.
+func MeasureHub(ctx context.Context, h *hub.Hub, checkExpiryWith time.Duration) *terminal.Error {
+	// Check if we are measuring before building a connection.
+	if capacityTestRunning.IsSet() {
+		return terminal.ErrTryAgainLater.With("another capacity op is already running")
+	}
+
+	// Check if we have a connection to this Hub.
+	crane := GetAssignedCrane(h.ID)
+	if crane == nil {
+		// Connect to Hub.
+		var err error
+		crane, err = establishCraneForMeasuring(ctx, h)
+		if err != nil {
+			return terminal.ErrConnectionError.With("failed to connect to %s: %s", h, err)
+		}
+		// Stop crane if established just for measuring.
+		defer crane.Stop(nil)
+	}
+
+	// Run latency test.
+	_, expires := h.GetMeasurements().GetLatency()
+	if checkExpiryWith == 0 || time.Now().Add(-checkExpiryWith).After(expires) {
+		latOp, tErr := NewLatencyTestOp(crane.Controller)
+		if !tErr.IsOK() {
+			return tErr
+		}
+		select {
+		case tErr = <-latOp.Result():
+			if !tErr.IsOK() {
+				return tErr
+			}
+		case <-ctx.Done():
+			return terminal.ErrCanceled
+		case <-time.After(1 * time.Minute):
+			crane.Controller.StopOperation(latOp, terminal.ErrTimeout)
+			return terminal.ErrTimeout.With("waiting for latency test")
+		}
+	}
+
+	// Run capacity test.
+	_, expires = h.GetMeasurements().GetCapacity()
+	if checkExpiryWith == 0 || time.Now().Add(-checkExpiryWith).After(expires) {
+		capOp, tErr := NewCapacityTestOp(crane.Controller, nil)
+		if !tErr.IsOK() {
+			return tErr
+		}
+		select {
+		case tErr = <-capOp.Result():
+			if !tErr.IsOK() {
+				return tErr
+			}
+		case <-ctx.Done():
+			return terminal.ErrCanceled
+		case <-time.After(1 * time.Minute):
+			crane.Controller.StopOperation(capOp, terminal.ErrTimeout)
+			return terminal.ErrTimeout.With("waiting for capacity test")
+		}
+	}
+
+	return nil
+}
+
+func establishCraneForMeasuring(ctx context.Context, dst *hub.Hub) (*Crane, error) {
+	ship, err := ships.Launch(ctx, dst, nil, nil)
+	if err != nil {
+		return nil, fmt.Errorf("failed to launch ship: %w", err)
+	}
+
+	crane, err := NewCrane(ship, dst, nil)
+	if err != nil {
+		return nil, fmt.Errorf("failed to create crane: %w", err)
+	}
+
+	err = crane.Start(ctx)
+	if err != nil {
+		return nil, fmt.Errorf("failed to start crane: %w", err)
+	}
+
+	return crane, nil
+}
diff --git a/spn/docks/metrics.go b/spn/docks/metrics.go
new file mode 100644
index 00000000..49df92bd
--- /dev/null
+++ b/spn/docks/metrics.go
@@ -0,0 +1,404 @@
+package docks
+
+import (
+	"sync"
+	"sync/atomic"
+	"time"
+
+	"github.com/tevino/abool"
+
+	"github.com/safing/portbase/api"
+	"github.com/safing/portbase/metrics"
+)
+
+var (
+	newCranes              *metrics.Counter
+	newPublicCranes        *metrics.Counter
+	newAuthenticatedCranes *metrics.Counter
+
+	trafficBytesPublicCranes        *metrics.Counter
+	trafficBytesAuthenticatedCranes *metrics.Counter
+	trafficBytesPrivateCranes       *metrics.Counter
+
+	newExpandOp                  *metrics.Counter
+	expandOpDurationHistogram    *metrics.Histogram
+	expandOpRelayedDataHistogram *metrics.Histogram
+
+	metricsRegistered = abool.New()
+)
+
+func registerMetrics() (err error) {
+	// Only register metrics once.
+	if !metricsRegistered.SetToIf(false, true) {
+		return nil
+	}
+
+	// Total Crane Stats.
+
+	newCranes, err = metrics.NewCounter(
+		"spn/cranes/total",
+		nil,
+		&metrics.Options{
+			Name:       "SPN New Cranes",
+			Permission: api.PermitUser,
+		},
+	)
+	if err != nil {
+		return err
+	}
+
+	newPublicCranes, err = metrics.NewCounter(
+		"spn/cranes/public/total",
+		nil,
+		&metrics.Options{
+			Name:       "SPN New Public Cranes",
+			Permission: api.PermitUser,
+		},
+	)
+	if err != nil {
+		return err
+	}
+
+	newAuthenticatedCranes, err = metrics.NewCounter(
+		"spn/cranes/authenticated/total",
+		nil,
+		&metrics.Options{
+			Name:       "SPN New Authenticated Cranes",
+			Permission: api.PermitUser,
+		},
+	)
+	if err != nil {
+		return err
+	}
+
+	// Active Crane Stats.
+
+	_, err = metrics.NewGauge(
+		"spn/cranes/active",
+		map[string]string{
+			"status": "public",
+		},
+		getActivePublicCranes,
+		&metrics.Options{
+			Name:       "SPN Active Public Cranes",
+			Permission: api.PermitUser,
+		},
+	)
+	if err != nil {
+		return err
+	}
+
+	_, err = metrics.NewGauge(
+		"spn/cranes/active",
+		map[string]string{
+			"status": "authenticated",
+		},
+		getActiveAuthenticatedCranes,
+		&metrics.Options{
+			Name:       "SPN Active Authenticated Cranes",
+			Permission: api.PermitUser,
+		},
+	)
+	if err != nil {
+		return err
+	}
+
+	_, err = metrics.NewGauge(
+		"spn/cranes/active",
+		map[string]string{
+			"status": "private",
+		},
+		getActivePrivateCranes,
+		&metrics.Options{
+			Name:       "SPN Active Private Cranes",
+			Permission: api.PermitUser,
+		},
+	)
+	if err != nil {
+		return err
+	}
+
+	_, err = metrics.NewGauge(
+		"spn/cranes/active",
+		map[string]string{
+			"status": "stopping",
+		},
+		getActiveStoppingCranes,
+		&metrics.Options{
+			Name:       "SPN Active Stopping Cranes",
+			Permission: api.PermitUser,
+		},
+	)
+	if err != nil {
+		return err
+	}
+
+	// Crane Traffic Stats.
+
+	trafficBytesPublicCranes, err = metrics.NewCounter(
+		"spn/cranes/bytes",
+		map[string]string{
+			"status": "public",
+		},
+		&metrics.Options{
+			Name:       "SPN Public Crane Traffic",
+			Permission: api.PermitUser,
+		},
+	)
+	if err != nil {
+		return err
+	}
+
+	trafficBytesAuthenticatedCranes, err = metrics.NewCounter(
+		"spn/cranes/bytes",
+		map[string]string{
+			"status": "authenticated",
+		},
+		&metrics.Options{
+			Name:       "SPN Authenticated Crane Traffic",
+			Permission: api.PermitUser,
+		},
+	)
+	if err != nil {
+		return err
+	}
+
+	trafficBytesPrivateCranes, err = metrics.NewCounter(
+		"spn/cranes/bytes",
+		map[string]string{
+			"status": "private",
+		},
+		&metrics.Options{
+			Name:       "SPN Private Crane Traffic",
+			Permission: api.PermitUser,
+		},
+	)
+	if err != nil {
+		return err
+	}
+
+	// Lane Stats.
+
+	_, err = metrics.NewGauge(
+		"spn/lanes/latency/avg/seconds",
+		nil,
+		getAvgLaneLatencyStat,
+		&metrics.Options{
+			Name:       "SPN Avg Lane Latency",
+			Permission: api.PermitUser,
+		},
+	)
+	if err != nil {
+		return err
+	}
+
+	_, err = metrics.NewGauge(
+		"spn/lanes/latency/min/seconds",
+		nil,
+		getMinLaneLatencyStat,
+		&metrics.Options{
+			Name:       "SPN Min Lane Latency",
+			Permission: api.PermitUser,
+		},
+	)
+	if err != nil {
+		return err
+	}
+
+	_, err = metrics.NewGauge(
+		"spn/lanes/capacity/avg/bytes",
+		nil,
+		getAvgLaneCapacityStat,
+		&metrics.Options{
+			Name:       "SPN Avg Lane Capacity",
+			Permission: api.PermitUser,
+		},
+	)
+	if err != nil {
+		return err
+	}
+
+	_, err = metrics.NewGauge(
+		"spn/lanes/capacity/max/bytes",
+		nil,
+		getMaxLaneCapacityStat,
+		&metrics.Options{
+			Name:       "SPN Max Lane Capacity",
+			Permission: api.PermitUser,
+		},
+	)
+	if err != nil {
+		return err
+	}
+
+	// Expand Op Stats.
+
+	newExpandOp, err = metrics.NewCounter(
+		"spn/op/expand/total",
+		nil,
+		&metrics.Options{
+			Name:       "SPN Total Expand Operations",
+			Permission: api.PermitUser,
+		},
+	)
+	if err != nil {
+		return err
+	}
+
+	_, err = metrics.NewGauge(
+		"spn/op/expand/active",
+		nil,
+		getActiveExpandOpsStat,
+		&metrics.Options{
+			Name:       "SPN Active Expand Operations",
+			Permission: api.PermitUser,
+		},
+	)
+	if err != nil {
+		return err
+	}
+
+	expandOpDurationHistogram, err = metrics.NewHistogram(
+		"spn/op/expand/histogram/duration/seconds",
+		nil,
+		&metrics.Options{
+			Name:       "SPN Expand Operation Duration Histogram",
+			Permission: api.PermitUser,
+		},
+	)
+	if err != nil {
+		return err
+	}
+
+	expandOpRelayedDataHistogram, err = metrics.NewHistogram(
+		"spn/op/expand/histogram/traffic/bytes",
+		nil,
+		&metrics.Options{
+			Name:       "SPN Expand Operation Relayed Data Histogram",
+			Permission: api.PermitUser,
+		},
+	)
+	if err != nil {
+		return err
+	}
+
+	return err
+}
+
+func getActiveExpandOpsStat() float64 {
+	return float64(atomic.LoadInt64(activeExpandOps))
+}
+
+var (
+	craneStats        *craneGauges
+	craneStatsExpires time.Time
+	craneStatsLock    sync.Mutex
+	craneStatsTTL     = 55 * time.Second
+)
+
+type craneGauges struct {
+	publicActive        float64
+	authenticatedActive float64
+	privateActive       float64
+	stoppingActive      float64
+
+	laneLatencyAvg  float64
+	laneLatencyMin  float64
+	laneCapacityAvg float64
+	laneCapacityMax float64
+}
+
+func getActivePublicCranes() float64        { return getCraneStats().publicActive }
+func getActiveAuthenticatedCranes() float64 { return getCraneStats().authenticatedActive }
+func getActivePrivateCranes() float64       { return getCraneStats().privateActive }
+func getActiveStoppingCranes() float64      { return getCraneStats().stoppingActive }
+func getAvgLaneLatencyStat() float64        { return getCraneStats().laneLatencyAvg }
+func getMinLaneLatencyStat() float64        { return getCraneStats().laneLatencyMin }
+func getAvgLaneCapacityStat() float64       { return getCraneStats().laneCapacityAvg }
+func getMaxLaneCapacityStat() float64       { return getCraneStats().laneCapacityMax }
+
+func getCraneStats() *craneGauges {
+	craneStatsLock.Lock()
+	defer craneStatsLock.Unlock()
+
+	// Return cache if still valid.
+	if time.Now().Before(craneStatsExpires) {
+		return craneStats
+	}
+
+	// Refresh.
+	craneStats = &craneGauges{}
+	var laneStatCnt float64
+	for _, crane := range getAllCranes() {
+		switch {
+		case crane.Stopped():
+			continue
+		case crane.IsStopping():
+			craneStats.stoppingActive++
+			continue
+		case crane.Public():
+			craneStats.publicActive++
+		case crane.Authenticated():
+			craneStats.authenticatedActive++
+			continue
+		default:
+			craneStats.privateActive++
+			continue
+		}
+
+		// Get lane stats.
+		if crane.ConnectedHub == nil {
+			continue
+		}
+		measurements := crane.ConnectedHub.GetMeasurements()
+		laneLatency, _ := measurements.GetLatency()
+		if laneLatency == 0 {
+			continue
+		}
+		laneCapacity, _ := measurements.GetCapacity()
+		if laneCapacity == 0 {
+			continue
+		}
+
+		// Only use data if both latency and capacity is available.
+		laneStatCnt++
+
+		// Convert to base unit: seconds.
+		latency := laneLatency.Seconds()
+		// Add to avg and set min if lower.
+		craneStats.laneLatencyAvg += latency
+		if craneStats.laneLatencyMin > latency || craneStats.laneLatencyMin == 0 {
+			craneStats.laneLatencyMin = latency
+		}
+
+		// Convert in base unit: bytes.
+		capacity := float64(laneCapacity) / 8
+		// Add to avg and set max if higher.
+		craneStats.laneCapacityAvg += capacity
+		if craneStats.laneCapacityMax < capacity {
+			craneStats.laneCapacityMax = capacity
+		}
+	}
+
+	// Create averages.
+	if laneStatCnt > 0 {
+		craneStats.laneLatencyAvg /= laneStatCnt
+		craneStats.laneCapacityAvg /= laneStatCnt
+	}
+
+	craneStatsExpires = time.Now().Add(craneStatsTTL)
+	return craneStats
+}
+
+func (crane *Crane) submitCraneTrafficStats(bytes int) {
+	switch {
+	case crane.Stopped():
+		return
+	case crane.Public():
+		trafficBytesPublicCranes.Add(bytes)
+	case crane.Authenticated():
+		trafficBytesAuthenticatedCranes.Add(bytes)
+	default:
+		trafficBytesPrivateCranes.Add(bytes)
+	}
+}
diff --git a/spn/docks/module.go b/spn/docks/module.go
new file mode 100644
index 00000000..31a4da95
--- /dev/null
+++ b/spn/docks/module.go
@@ -0,0 +1,117 @@
+package docks
+
+import (
+	"encoding/hex"
+	"errors"
+	"fmt"
+	"sync"
+
+	"github.com/safing/portbase/modules"
+	"github.com/safing/portbase/rng"
+	_ "github.com/safing/portmaster/spn/access"
+)
+
+var (
+	module *modules.Module
+
+	allCranes      = make(map[string]*Crane) // ID = Crane ID
+	assignedCranes = make(map[string]*Crane) // ID = connected Hub ID
+	cranesLock     sync.RWMutex
+
+	runningTests bool
+)
+
+func init() {
+	module = modules.Register("docks", nil, start, stopAllCranes, "terminal", "cabin", "access")
+}
+
+func start() error {
+	return registerMetrics()
+}
+
+func registerCrane(crane *Crane) error {
+	cranesLock.Lock()
+	defer cranesLock.Unlock()
+
+	// Generate new IDs until a unique one is found.
+	for i := 0; i < 100; i++ {
+		// Generate random ID.
+		randomID, err := rng.Bytes(3)
+		if err != nil {
+			return fmt.Errorf("failed to generate crane ID: %w", err)
+		}
+		newID := hex.EncodeToString(randomID)
+
+		// Check if ID already exists.
+		_, ok := allCranes[newID]
+		if !ok {
+			crane.ID = newID
+			allCranes[crane.ID] = crane
+			return nil
+		}
+	}
+
+	return errors.New("failed to find unique crane ID")
+}
+
+func unregisterCrane(crane *Crane) {
+	cranesLock.Lock()
+	defer cranesLock.Unlock()
+
+	delete(allCranes, crane.ID)
+	if crane.ConnectedHub != nil {
+		delete(assignedCranes, crane.ConnectedHub.ID)
+	}
+}
+
+func stopAllCranes() error {
+	for _, crane := range getAllCranes() {
+		crane.Stop(nil)
+	}
+	return nil
+}
+
+// AssignCrane assigns a crane to the given Hub ID.
+func AssignCrane(hubID string, crane *Crane) {
+	cranesLock.Lock()
+	defer cranesLock.Unlock()
+
+	assignedCranes[hubID] = crane
+}
+
+// GetAssignedCrane returns the assigned crane of the given Hub ID.
+func GetAssignedCrane(hubID string) *Crane {
+	cranesLock.RLock()
+	defer cranesLock.RUnlock()
+
+	crane, ok := assignedCranes[hubID]
+	if ok {
+		return crane
+	}
+	return nil
+}
+
+func getAllCranes() map[string]*Crane {
+	copiedCranes := make(map[string]*Crane, len(allCranes))
+
+	cranesLock.RLock()
+	defer cranesLock.RUnlock()
+
+	for id, crane := range allCranes {
+		copiedCranes[id] = crane
+	}
+	return copiedCranes
+}
+
+// GetAllAssignedCranes returns a copy of the map of all assigned cranes.
+func GetAllAssignedCranes() map[string]*Crane {
+	copiedCranes := make(map[string]*Crane, len(assignedCranes))
+
+	cranesLock.RLock()
+	defer cranesLock.RUnlock()
+
+	for destination, crane := range assignedCranes {
+		copiedCranes[destination] = crane
+	}
+	return copiedCranes
+}
diff --git a/spn/docks/module_test.go b/spn/docks/module_test.go
new file mode 100644
index 00000000..0383cc21
--- /dev/null
+++ b/spn/docks/module_test.go
@@ -0,0 +1,16 @@
+package docks
+
+import (
+	"testing"
+
+	"github.com/safing/portmaster/service/core/pmtesting"
+	"github.com/safing/portmaster/spn/access"
+	"github.com/safing/portmaster/spn/conf"
+)
+
+func TestMain(m *testing.M) {
+	runningTests = true
+	conf.EnablePublicHub(true) // Make hub config available.
+	access.EnableTestMode()    // Register test zone instead of real ones.
+	pmtesting.TestMain(m, module)
+}
diff --git a/spn/docks/op_capacity.go b/spn/docks/op_capacity.go
new file mode 100644
index 00000000..a66ae617
--- /dev/null
+++ b/spn/docks/op_capacity.go
@@ -0,0 +1,356 @@
+package docks
+
+import (
+	"bytes"
+	"context"
+	"sync/atomic"
+	"time"
+
+	"github.com/tevino/abool"
+
+	"github.com/safing/portbase/container"
+	"github.com/safing/portbase/formats/dsd"
+	"github.com/safing/portbase/log"
+	"github.com/safing/portmaster/spn/terminal"
+)
+
+const (
+	// CapacityTestOpType is the type ID of the capacity test operation.
+	CapacityTestOpType = "capacity"
+
+	defaultCapacityTestVolume = 50000000  // 50MB
+	maxCapacityTestVolume     = 100000000 // 100MB
+
+	defaultCapacityTestMaxTime = 5 * time.Second
+	maxCapacityTestMaxTime     = 15 * time.Second
+	capacityTestTimeout        = 30 * time.Second
+
+	capacityTestMsgSize     = 1000
+	capacityTestSendTimeout = 1000 * time.Millisecond
+)
+
+var (
+	capacityTestSendData           = make([]byte, capacityTestMsgSize)
+	capacityTestDataReceivedSignal = []byte("ACK")
+
+	capacityTestRunning = abool.New()
+)
+
+// CapacityTestOp is used for capacity test operations.
+type CapacityTestOp struct { //nolint:maligned
+	terminal.OperationBase
+
+	opts *CapacityTestOptions
+
+	started       bool
+	startTime     time.Time
+	senderStarted bool
+
+	recvQueue              chan *terminal.Msg
+	dataReceived           int
+	dataReceivedAckWasAckd bool
+
+	dataSent        *int64
+	dataSentWasAckd *abool.AtomicBool
+
+	testResult int
+	result     chan *terminal.Error
+}
+
+// CapacityTestOptions holds options for the capacity test.
+type CapacityTestOptions struct {
+	TestVolume int
+	MaxTime    time.Duration
+	testing    bool
+}
+
+// Type returns the type ID.
+func (op *CapacityTestOp) Type() string {
+	return CapacityTestOpType
+}
+
+func init() {
+	terminal.RegisterOpType(terminal.OperationFactory{
+		Type:     CapacityTestOpType,
+		Requires: terminal.IsCraneController,
+		Start:    startCapacityTestOp,
+	})
+}
+
+// NewCapacityTestOp runs a capacity test.
+func NewCapacityTestOp(t terminal.Terminal, opts *CapacityTestOptions) (*CapacityTestOp, *terminal.Error) {
+	// Check options.
+	if opts == nil {
+		opts = &CapacityTestOptions{
+			TestVolume: defaultCapacityTestVolume,
+			MaxTime:    defaultCapacityTestMaxTime,
+		}
+	}
+
+	// Check if another test is already running.
+	if !opts.testing && !capacityTestRunning.SetToIf(false, true) {
+		return nil, terminal.ErrTryAgainLater.With("another capacity op is already running")
+	}
+
+	// Create and init.
+	op := &CapacityTestOp{
+		opts:            opts,
+		recvQueue:       make(chan *terminal.Msg),
+		dataSent:        new(int64),
+		dataSentWasAckd: abool.New(),
+		result:          make(chan *terminal.Error, 1),
+	}
+
+	// Make capacity test request.
+	request, err := dsd.Dump(op.opts, dsd.CBOR)
+	if err != nil {
+		capacityTestRunning.UnSet()
+		return nil, terminal.ErrInternalError.With("failed to serialize capactity test options: %w", err)
+	}
+
+	// Send test request.
+	tErr := t.StartOperation(op, container.New(request), 1*time.Second)
+	if tErr != nil {
+		capacityTestRunning.UnSet()
+		return nil, tErr
+	}
+
+	// Start handler.
+	module.StartWorker("op capacity handler", op.handler)
+
+	return op, nil
+}
+
+func startCapacityTestOp(t terminal.Terminal, opID uint32, data *container.Container) (terminal.Operation, *terminal.Error) {
+	// Check if another test is already running.
+	if !capacityTestRunning.SetToIf(false, true) {
+		return nil, terminal.ErrTryAgainLater.With("another capacity op is already running")
+	}
+
+	// Parse options.
+	opts := &CapacityTestOptions{}
+	_, err := dsd.Load(data.CompileData(), opts)
+	if err != nil {
+		capacityTestRunning.UnSet()
+		return nil, terminal.ErrMalformedData.With("failed to parse options: %w", err)
+	}
+
+	// Check options.
+	if opts.TestVolume > maxCapacityTestVolume {
+		capacityTestRunning.UnSet()
+		return nil, terminal.ErrInvalidOptions.With("maximum volume exceeded")
+	}
+	if opts.MaxTime > maxCapacityTestMaxTime {
+		capacityTestRunning.UnSet()
+		return nil, terminal.ErrInvalidOptions.With("maximum maxtime exceeded")
+	}
+
+	// Create operation.
+	op := &CapacityTestOp{
+		opts:            opts,
+		recvQueue:       make(chan *terminal.Msg, 1000),
+		dataSent:        new(int64),
+		dataSentWasAckd: abool.New(),
+		result:          make(chan *terminal.Error, 1),
+	}
+	op.InitOperationBase(t, opID)
+
+	// Start handler and sender.
+	op.senderStarted = true
+	module.StartWorker("op capacity handler", op.handler)
+	module.StartWorker("op capacity sender", op.sender)
+
+	return op, nil
+}
+
+func (op *CapacityTestOp) handler(ctx context.Context) error {
+	defer capacityTestRunning.UnSet()
+
+	returnErr := terminal.ErrStopping
+	defer func() {
+		// Linters don't get that returnErr is used when directly used as defer.
+		op.Stop(op, returnErr)
+	}()
+
+	var maxTestTimeReached <-chan time.Time
+	opTimeout := time.After(capacityTestTimeout)
+
+	// Setup unit handling
+	var msg *terminal.Msg
+	defer msg.Finish()
+
+	// Handle receives.
+	for {
+		msg.Finish()
+
+		select {
+		case <-ctx.Done():
+			returnErr = terminal.ErrCanceled
+			return nil
+
+		case <-opTimeout:
+			returnErr = terminal.ErrTimeout
+			return nil
+
+		case <-maxTestTimeReached:
+			returnErr = op.reportMeasuredCapacity()
+			return nil
+
+		case msg = <-op.recvQueue:
+			// Record start time and start sender.
+			if !op.started {
+				op.started = true
+				op.startTime = time.Now()
+				maxTestTimeReached = time.After(op.opts.MaxTime)
+				if !op.senderStarted {
+					op.senderStarted = true
+					module.StartWorker("op capacity sender", op.sender)
+				}
+			}
+
+			// Add to received data counter.
+			op.dataReceived += msg.Data.Length()
+
+			// Check if we received the data received signal.
+			if msg.Data.Length() == len(capacityTestDataReceivedSignal) &&
+				bytes.Equal(msg.Data.CompileData(), capacityTestDataReceivedSignal) {
+				op.dataSentWasAckd.Set()
+			}
+
+			// Send the data received signal when we received the full test volume.
+			if op.dataReceived >= op.opts.TestVolume && !op.dataReceivedAckWasAckd {
+				tErr := op.Send(op.NewMsg(capacityTestDataReceivedSignal), capacityTestSendTimeout)
+				if tErr != nil {
+					returnErr = tErr.Wrap("failed to send data received signal")
+					return nil
+				}
+				atomic.AddInt64(op.dataSent, int64(len(capacityTestDataReceivedSignal)))
+				op.dataReceivedAckWasAckd = true
+
+				// Flush last message.
+				op.Flush(10 * time.Second)
+			}
+
+			// Check if we can complete the test.
+			if op.dataReceivedAckWasAckd &&
+				op.dataSentWasAckd.IsSet() {
+				returnErr = op.reportMeasuredCapacity()
+				return nil
+			}
+		}
+	}
+}
+
+func (op *CapacityTestOp) sender(ctx context.Context) error {
+	for {
+		// Send next chunk.
+		msg := op.NewMsg(capacityTestSendData)
+		msg.Unit.MakeHighPriority()
+		tErr := op.Send(msg, capacityTestSendTimeout)
+		if tErr != nil {
+			op.Stop(op, tErr.Wrap("failed to send capacity test data"))
+			return nil
+		}
+
+		// Add to sent data counter and stop sending if sending is complete.
+		if atomic.AddInt64(op.dataSent, int64(len(capacityTestSendData))) >= int64(op.opts.TestVolume) {
+			return nil
+		}
+
+		// Check if we have received an ack.
+		if op.dataSentWasAckd.IsSet() {
+			return nil
+		}
+
+		// Check if op has ended.
+		if op.Stopped() {
+			return nil
+		}
+	}
+}
+
+func (op *CapacityTestOp) reportMeasuredCapacity() *terminal.Error {
+	// Calculate lane capacity and set it.
+	timeNeeded := time.Since(op.startTime)
+	if timeNeeded <= 0 {
+		timeNeeded = 1
+	}
+	duplexBits := float64((int64(op.dataReceived) + atomic.LoadInt64(op.dataSent)) * 8)
+	duplexNSBitRate := duplexBits / float64(timeNeeded)
+	bitRate := (duplexNSBitRate / 2) * float64(time.Second)
+	op.testResult = int(bitRate)
+
+	// Save the result to the crane.
+	if controller, ok := op.Terminal().(*CraneControllerTerminal); ok {
+		if controller.Crane.ConnectedHub != nil {
+			controller.Crane.ConnectedHub.GetMeasurements().SetCapacity(op.testResult)
+			log.Infof(
+				"docks: measured capacity to %s: %.2f Mbit/s (%.2fMB down / %.2fMB up in %s)",
+				controller.Crane.ConnectedHub,
+				float64(op.testResult)/1000000,
+				float64(op.dataReceived)/1000000,
+				float64(atomic.LoadInt64(op.dataSent))/1000000,
+				timeNeeded,
+			)
+			return nil
+		} else if controller.Crane.IsMine() {
+			return terminal.ErrInternalError.With("capacity operation was run on %s without a connected hub set", controller.Crane)
+		}
+	} else if !runningTests {
+		return terminal.ErrInternalError.With("capacity operation was run on terminal that is not a crane controller, but %T", op.Terminal())
+	}
+
+	return nil
+}
+
+// Deliver delivers a message.
+func (op *CapacityTestOp) Deliver(msg *terminal.Msg) *terminal.Error {
+	// Optimized delivery with 1s timeout.
+	select {
+	case op.recvQueue <- msg:
+	default:
+		select {
+		case op.recvQueue <- msg:
+		case <-time.After(1 * time.Second):
+			msg.Finish()
+			return terminal.ErrTimeout
+		}
+	}
+	return nil
+}
+
+// HandleStop gives the operation the ability to cleanly shut down.
+// The returned error is the error to send to the other side.
+// Should never be called directly. Call Stop() instead.
+func (op *CapacityTestOp) HandleStop(tErr *terminal.Error) (errorToSend *terminal.Error) {
+	// Return result to waiting routine.
+	select {
+	case op.result <- tErr:
+	default:
+	}
+
+	// Drain the recvQueue to finish the message units.
+drain:
+	for {
+		select {
+		case msg := <-op.recvQueue:
+			msg.Finish()
+		default:
+			select {
+			case msg := <-op.recvQueue:
+				msg.Finish()
+			case <-time.After(3 * time.Millisecond):
+				// Give some additional time buffer to drain the queue.
+				break drain
+			}
+		}
+	}
+
+	// Return error as is.
+	return tErr
+}
+
+// Result returns the result (end error) of the operation.
+func (op *CapacityTestOp) Result() <-chan *terminal.Error {
+	return op.result
+}
diff --git a/spn/docks/op_capacity_test.go b/spn/docks/op_capacity_test.go
new file mode 100644
index 00000000..1aaa1437
--- /dev/null
+++ b/spn/docks/op_capacity_test.go
@@ -0,0 +1,85 @@
+package docks
+
+import (
+	"testing"
+	"time"
+
+	"github.com/safing/portmaster/spn/terminal"
+)
+
+var (
+	testCapacityTestVolume  = 1_000_000
+	testCapacitytestMaxTime = 1 * time.Second
+)
+
+func TestCapacityOp(t *testing.T) { //nolint:paralleltest // Performance test.
+	// Defaults.
+	testCapacityOp(t, &CapacityTestOptions{
+		TestVolume: testCapacityTestVolume,
+		MaxTime:    testCapacitytestMaxTime,
+		testing:    true,
+	})
+
+	// Hit max time first.
+	testCapacityOp(t, &CapacityTestOptions{
+		TestVolume: testCapacityTestVolume,
+		MaxTime:    100 * time.Millisecond,
+		testing:    true,
+	})
+
+	// Hit volume first.
+	testCapacityOp(t, &CapacityTestOptions{
+		TestVolume: 100_000,
+		MaxTime:    testCapacitytestMaxTime,
+		testing:    true,
+	})
+}
+
+func testCapacityOp(t *testing.T, opts *CapacityTestOptions) {
+	t.Helper()
+
+	var (
+		capTestDelay            = 5 * time.Millisecond
+		capTestQueueSize uint32 = 10
+	)
+
+	// Create test terminal pair.
+	a, b, err := terminal.NewSimpleTestTerminalPair(
+		capTestDelay,
+		int(capTestQueueSize),
+		&terminal.TerminalOpts{
+			FlowControl:     terminal.FlowControlDFQ,
+			FlowControlSize: capTestQueueSize,
+		},
+	)
+	if err != nil {
+		t.Fatalf("failed to create test terminal pair: %s", err)
+	}
+
+	// Grant permission for op on remote terminal and start op.
+	b.GrantPermission(terminal.IsCraneController)
+	op, tErr := NewCapacityTestOp(a, opts)
+	if tErr != nil {
+		t.Fatalf("failed to start op: %s", err)
+	}
+
+	// Wait for result and check error.
+	tErr = <-op.Result()
+	if !tErr.IsOK() {
+		t.Fatalf("op failed: %s", tErr)
+	}
+	t.Logf("measured capacity: %d bit/s", op.testResult)
+
+	// Calculate expected bandwidth.
+	expectedBitsPerSecond := float64(capacityTestMsgSize*8*int64(capTestQueueSize)) / float64(capTestDelay) * float64(time.Second)
+	t.Logf("expected capacity: %f bit/s", expectedBitsPerSecond)
+
+	// Check if measured bandwidth is within parameters.
+	if float64(op.testResult) > expectedBitsPerSecond*1.6 {
+		t.Fatal("measured capacity too high")
+	}
+	// TODO: Check if we can raise this to at least 90%.
+	if float64(op.testResult) < expectedBitsPerSecond*0.2 {
+		t.Fatal("measured capacity too low")
+	}
+}
diff --git a/spn/docks/op_expand.go b/spn/docks/op_expand.go
new file mode 100644
index 00000000..4a96c766
--- /dev/null
+++ b/spn/docks/op_expand.go
@@ -0,0 +1,393 @@
+package docks
+
+import (
+	"context"
+	"fmt"
+	"sync/atomic"
+	"time"
+
+	"github.com/tevino/abool"
+
+	"github.com/safing/portbase/container"
+	"github.com/safing/portmaster/spn/conf"
+	"github.com/safing/portmaster/spn/terminal"
+)
+
+// ExpandOpType is the type ID of the expand operation.
+const ExpandOpType string = "expand"
+
+var activeExpandOps = new(int64)
+
+// ExpandOp is used to expand to another Hub.
+type ExpandOp struct {
+	terminal.OperationBase
+	opts *terminal.TerminalOpts
+
+	// ctx is the context of the Terminal.
+	ctx context.Context
+	// cancelCtx cancels ctx.
+	cancelCtx context.CancelFunc
+
+	dataRelayed *uint64
+	ended       *abool.AtomicBool
+
+	relayTerminal *ExpansionRelayTerminal
+
+	// flowControl holds the flow control system.
+	flowControl terminal.FlowControl
+	// deliverProxy is populated with the configured deliver function
+	deliverProxy func(msg *terminal.Msg) *terminal.Error
+	// recvProxy is populated with the configured recv function
+	recvProxy func() <-chan *terminal.Msg
+	// sendProxy is populated with the configured send function
+	sendProxy func(msg *terminal.Msg, timeout time.Duration)
+}
+
+// ExpansionRelayTerminal is a relay used for expansion.
+type ExpansionRelayTerminal struct {
+	terminal.BareTerminal
+
+	op *ExpandOp
+
+	id    uint32
+	crane *Crane
+
+	abandoning *abool.AtomicBool
+
+	// flowControl holds the flow control system.
+	flowControl terminal.FlowControl
+	// deliverProxy is populated with the configured deliver function
+	deliverProxy func(msg *terminal.Msg) *terminal.Error
+	// recvProxy is populated with the configured recv function
+	recvProxy func() <-chan *terminal.Msg
+	// sendProxy is populated with the configured send function
+	sendProxy func(msg *terminal.Msg, timeout time.Duration)
+}
+
+// Type returns the type ID.
+func (op *ExpandOp) Type() string {
+	return ExpandOpType
+}
+
+// ID returns the operation ID.
+func (t *ExpansionRelayTerminal) ID() uint32 {
+	return t.id
+}
+
+// Ctx returns the operation context.
+func (op *ExpandOp) Ctx() context.Context {
+	return op.ctx
+}
+
+// Ctx returns the relay terminal context.
+func (t *ExpansionRelayTerminal) Ctx() context.Context {
+	return t.op.ctx
+}
+
+// Deliver delivers a message to the relay operation.
+func (op *ExpandOp) Deliver(msg *terminal.Msg) *terminal.Error {
+	return op.deliverProxy(msg)
+}
+
+// Deliver delivers a message to the relay terminal.
+func (t *ExpansionRelayTerminal) Deliver(msg *terminal.Msg) *terminal.Error {
+	return t.deliverProxy(msg)
+}
+
+// Flush writes all data in the queues.
+func (op *ExpandOp) Flush(timeout time.Duration) {
+	if op.flowControl != nil {
+		op.flowControl.Flush(timeout)
+	}
+}
+
+// Flush writes all data in the queues.
+func (t *ExpansionRelayTerminal) Flush(timeout time.Duration) {
+	if t.flowControl != nil {
+		t.flowControl.Flush(timeout)
+	}
+}
+
+func init() {
+	terminal.RegisterOpType(terminal.OperationFactory{
+		Type:     ExpandOpType,
+		Requires: terminal.MayExpand,
+		Start:    expand,
+	})
+}
+
+func expand(t terminal.Terminal, opID uint32, data *container.Container) (terminal.Operation, *terminal.Error) {
+	// Submit metrics.
+	newExpandOp.Inc()
+
+	// Check if we are running a public hub.
+	if !conf.PublicHub() {
+		return nil, terminal.ErrPermissionDenied.With("expanding is only allowed on public hubs")
+	}
+
+	// Parse destination hub ID.
+	dstData, err := data.GetNextBlock()
+	if err != nil {
+		return nil, terminal.ErrMalformedData.With("failed to parse destination: %w", err)
+	}
+
+	// Parse terminal options.
+	opts, tErr := terminal.ParseTerminalOpts(data)
+	if tErr != nil {
+		return nil, tErr.Wrap("failed to parse terminal options")
+	}
+
+	// Get crane with destination.
+	relayCrane := GetAssignedCrane(string(dstData))
+	if relayCrane == nil {
+		return nil, terminal.ErrHubUnavailable.With("no crane assigned to %q", string(dstData))
+	}
+
+	// TODO: Expand outside of hot path.
+
+	// Create operation and terminal.
+	op := &ExpandOp{
+		opts:        opts,
+		dataRelayed: new(uint64),
+		ended:       abool.New(),
+		relayTerminal: &ExpansionRelayTerminal{
+			crane:      relayCrane,
+			id:         relayCrane.getNextTerminalID(),
+			abandoning: abool.New(),
+		},
+	}
+	op.InitOperationBase(t, opID)
+	op.ctx, op.cancelCtx = context.WithCancel(t.Ctx())
+	op.relayTerminal.op = op
+
+	// Create flow control.
+	switch opts.FlowControl {
+	case terminal.FlowControlDFQ:
+		// Operation
+		op.flowControl = terminal.NewDuplexFlowQueue(op.ctx, opts.FlowControlSize, op.submitBackwardUpstream)
+		op.deliverProxy = op.flowControl.Deliver
+		op.recvProxy = op.flowControl.Receive
+		op.sendProxy = op.submitBackwardFlowControl
+		// Relay Terminal
+		op.relayTerminal.flowControl = terminal.NewDuplexFlowQueue(op.ctx, opts.FlowControlSize, op.submitForwardUpstream)
+		op.relayTerminal.deliverProxy = op.relayTerminal.flowControl.Deliver
+		op.relayTerminal.recvProxy = op.relayTerminal.flowControl.Receive
+		op.relayTerminal.sendProxy = op.submitForwardFlowControl
+	case terminal.FlowControlNone:
+		// Operation
+		deliverToOp := make(chan *terminal.Msg, opts.FlowControlSize)
+		op.deliverProxy = terminal.MakeDirectDeliveryDeliverFunc(op.ctx, deliverToOp)
+		op.recvProxy = terminal.MakeDirectDeliveryRecvFunc(deliverToOp)
+		op.sendProxy = op.submitBackwardUpstream
+		// Relay Terminal
+		deliverToRelay := make(chan *terminal.Msg, opts.FlowControlSize)
+		op.relayTerminal.deliverProxy = terminal.MakeDirectDeliveryDeliverFunc(op.ctx, deliverToRelay)
+		op.relayTerminal.recvProxy = terminal.MakeDirectDeliveryRecvFunc(deliverToRelay)
+		op.relayTerminal.sendProxy = op.submitForwardUpstream
+	case terminal.FlowControlDefault:
+		fallthrough
+	default:
+		return nil, terminal.ErrInternalError.With("unknown flow control type %d", opts.FlowControl)
+	}
+
+	// Establish terminal on destination.
+	newInitData, tErr := opts.Pack()
+	if tErr != nil {
+		return nil, terminal.ErrInternalError.With("failed to re-pack options: %w", err)
+	}
+	tErr = op.relayTerminal.crane.EstablishNewTerminal(op.relayTerminal, newInitData)
+	if tErr != nil {
+		return nil, tErr
+	}
+
+	// Start workers.
+	module.StartWorker("expand op forward relay", op.forwardHandler)
+	module.StartWorker("expand op backward relay", op.backwardHandler)
+	if op.flowControl != nil {
+		op.flowControl.StartWorkers(module, "expand op")
+	}
+	if op.relayTerminal.flowControl != nil {
+		op.relayTerminal.flowControl.StartWorkers(module, "expand op terminal")
+	}
+
+	return op, nil
+}
+
+func (op *ExpandOp) submitForwardFlowControl(msg *terminal.Msg, timeout time.Duration) {
+	err := op.relayTerminal.flowControl.Send(msg, timeout)
+	if err != nil {
+		msg.Finish()
+		op.Stop(op, err.Wrap("failed to submit to forward flow control"))
+	}
+}
+
+func (op *ExpandOp) submitBackwardFlowControl(msg *terminal.Msg, timeout time.Duration) {
+	err := op.flowControl.Send(msg, timeout)
+	if err != nil {
+		msg.Finish()
+		op.Stop(op, err.Wrap("failed to submit to backward flow control"))
+	}
+}
+
+func (op *ExpandOp) submitForwardUpstream(msg *terminal.Msg, timeout time.Duration) {
+	msg.FlowID = op.relayTerminal.id
+	if msg.Unit.IsHighPriority() && op.opts.UsePriorityDataMsgs {
+		msg.Type = terminal.MsgTypePriorityData
+	} else {
+		msg.Type = terminal.MsgTypeData
+	}
+	err := op.relayTerminal.crane.Send(msg, timeout)
+	if err != nil {
+		msg.Finish()
+		op.Stop(op, err.Wrap("failed to submit to forward upstream"))
+	}
+}
+
+func (op *ExpandOp) submitBackwardUpstream(msg *terminal.Msg, timeout time.Duration) {
+	msg.FlowID = op.relayTerminal.id
+	if msg.Unit.IsHighPriority() && op.opts.UsePriorityDataMsgs {
+		msg.Type = terminal.MsgTypePriorityData
+	} else {
+		msg.Type = terminal.MsgTypeData
+		msg.Unit.RemovePriority()
+	}
+	// Note: op.Send() will transform high priority units to priority data msgs.
+	err := op.Send(msg, timeout)
+	if err != nil {
+		msg.Finish()
+		op.Stop(op, err.Wrap("failed to submit to backward upstream"))
+	}
+}
+
+func (op *ExpandOp) forwardHandler(_ context.Context) error {
+	// Metrics setup and submitting.
+	atomic.AddInt64(activeExpandOps, 1)
+	started := time.Now()
+	defer func() {
+		atomic.AddInt64(activeExpandOps, -1)
+		expandOpDurationHistogram.UpdateDuration(started)
+		expandOpRelayedDataHistogram.Update(float64(atomic.LoadUint64(op.dataRelayed)))
+	}()
+
+	for {
+		select {
+		case msg := <-op.recvProxy():
+			// Debugging:
+			// log.Debugf("spn/testing: forwarding at %s: %s", op.FmtID(), spew.Sdump(c.CompileData()))
+
+			// Wait for processing slot.
+			msg.Unit.WaitForSlot()
+
+			// Count relayed data for metrics.
+			atomic.AddUint64(op.dataRelayed, uint64(msg.Data.Length()))
+
+			// Receive data from the origin and forward it to the relay.
+			op.relayTerminal.sendProxy(msg, 1*time.Minute)
+
+		case <-op.ctx.Done():
+			return nil
+		}
+	}
+}
+
+func (op *ExpandOp) backwardHandler(_ context.Context) error {
+	for {
+		select {
+		case msg := <-op.relayTerminal.recvProxy():
+			// Debugging:
+			// log.Debugf("spn/testing: backwarding at %s: %s", op.FmtID(), spew.Sdump(c.CompileData()))
+
+			// Wait for processing slot.
+			msg.Unit.WaitForSlot()
+
+			// Count relayed data for metrics.
+			atomic.AddUint64(op.dataRelayed, uint64(msg.Data.Length()))
+
+			// Receive data from the relay and forward it to the origin.
+			op.sendProxy(msg, 1*time.Minute)
+
+		case <-op.ctx.Done():
+			return nil
+		}
+	}
+}
+
+// HandleStop gives the operation the ability to cleanly shut down.
+// The returned error is the error to send to the other side.
+// Should never be called directly. Call Stop() instead.
+func (op *ExpandOp) HandleStop(err *terminal.Error) (errorToSend *terminal.Error) {
+	// Flush all messages before stopping.
+	op.Flush(1 * time.Minute)
+	op.relayTerminal.Flush(1 * time.Minute)
+
+	// Stop connected workers.
+	op.cancelCtx()
+
+	// Abandon connected terminal.
+	op.relayTerminal.Abandon(nil)
+
+	// Add context to error.
+	if err.IsError() {
+		return err.Wrap("relay operation failed with")
+	}
+	return err
+}
+
+// Abandon shuts down the terminal unregistering it from upstream and calling HandleAbandon().
+func (t *ExpansionRelayTerminal) Abandon(err *terminal.Error) {
+	if t.abandoning.SetToIf(false, true) {
+		module.StartWorker("terminal abandon procedure", func(_ context.Context) error {
+			t.handleAbandonProcedure(err)
+			return nil
+		})
+	}
+}
+
+// HandleAbandon gives the terminal the ability to cleanly shut down.
+// The returned error is the error to send to the other side.
+// Should never be called directly. Call Abandon() instead.
+func (t *ExpansionRelayTerminal) HandleAbandon(err *terminal.Error) (errorToSend *terminal.Error) {
+	// Stop the connected relay operation.
+	t.op.Stop(t.op, err)
+
+	// Add context to error.
+	if err.IsError() {
+		return err.Wrap("relay terminal failed with")
+	}
+	return err
+}
+
+// HandleDestruction gives the terminal the ability to clean up.
+// The terminal has already fully shut down at this point.
+// Should never be called directly. Call Abandon() instead.
+func (t *ExpansionRelayTerminal) HandleDestruction(err *terminal.Error) {}
+
+func (t *ExpansionRelayTerminal) handleAbandonProcedure(err *terminal.Error) {
+	// Call operation stop handle function for proper shutdown cleaning up.
+	err = t.HandleAbandon(err)
+
+	// Flush all messages before stopping.
+	t.Flush(1 * time.Minute)
+
+	// Send error to the connected Operation, if the error is internal.
+	if !err.IsExternal() {
+		if err == nil {
+			err = terminal.ErrStopping
+		}
+
+		msg := terminal.NewMsg(err.Pack())
+		msg.FlowID = t.ID()
+		msg.Type = terminal.MsgTypeStop
+		t.op.submitForwardUpstream(msg, 1*time.Second)
+	}
+}
+
+// FmtID returns the expansion ID hierarchy.
+func (op *ExpandOp) FmtID() string {
+	return fmt.Sprintf("%s>%d <r> %s#%d", op.Terminal().FmtID(), op.ID(), op.relayTerminal.crane.ID, op.relayTerminal.id)
+}
+
+// FmtID returns the expansion ID hierarchy.
+func (t *ExpansionRelayTerminal) FmtID() string {
+	return fmt.Sprintf("%s#%d", t.crane.ID, t.id)
+}
diff --git a/spn/docks/op_latency.go b/spn/docks/op_latency.go
new file mode 100644
index 00000000..02c38f78
--- /dev/null
+++ b/spn/docks/op_latency.go
@@ -0,0 +1,298 @@
+package docks
+
+import (
+	"bytes"
+	"context"
+	"fmt"
+	"time"
+
+	"github.com/safing/portbase/container"
+	"github.com/safing/portbase/formats/varint"
+	"github.com/safing/portbase/log"
+	"github.com/safing/portbase/rng"
+	"github.com/safing/portmaster/spn/terminal"
+)
+
+const (
+	// LatencyTestOpType is the type ID of the latency test operation.
+	LatencyTestOpType = "latency"
+
+	latencyPingRequest  = 1
+	latencyPingResponse = 2
+
+	latencyTestNonceSize = 16
+	latencyTestRuns      = 10
+)
+
+var (
+	latencyTestPauseDuration = 1 * time.Second
+	latencyTestOpTimeout     = latencyTestRuns * latencyTestPauseDuration * 3
+)
+
+// LatencyTestOp is used to measure latency.
+type LatencyTestOp struct {
+	terminal.OperationBase
+}
+
+// LatencyTestClientOp is the client version of LatencyTestOp.
+type LatencyTestClientOp struct {
+	LatencyTestOp
+
+	lastPingSentAt    time.Time
+	lastPingNonce     []byte
+	measuredLatencies []time.Duration
+	responses         chan *terminal.Msg
+	testResult        time.Duration
+
+	result chan *terminal.Error
+}
+
+// Type returns the type ID.
+func (op *LatencyTestOp) Type() string {
+	return LatencyTestOpType
+}
+
+func init() {
+	terminal.RegisterOpType(terminal.OperationFactory{
+		Type:     LatencyTestOpType,
+		Requires: terminal.IsCraneController,
+		Start:    startLatencyTestOp,
+	})
+}
+
+// NewLatencyTestOp runs a latency test.
+func NewLatencyTestOp(t terminal.Terminal) (*LatencyTestClientOp, *terminal.Error) {
+	// Create and init.
+	op := &LatencyTestClientOp{
+		responses:         make(chan *terminal.Msg),
+		measuredLatencies: make([]time.Duration, 0, latencyTestRuns),
+		result:            make(chan *terminal.Error, 1),
+	}
+
+	// Make ping request.
+	pingRequest, err := op.createPingRequest()
+	if err != nil {
+		return nil, terminal.ErrInternalError.With("%w", err)
+	}
+
+	// Send ping.
+	tErr := t.StartOperation(op, pingRequest, 1*time.Second)
+	if tErr != nil {
+		return nil, tErr
+	}
+
+	// Start handler.
+	module.StartWorker("op latency handler", op.handler)
+
+	return op, nil
+}
+
+func (op *LatencyTestClientOp) handler(ctx context.Context) error {
+	returnErr := terminal.ErrStopping
+	defer func() {
+		// Linters don't get that returnErr is used when directly used as defer.
+		op.Stop(op, returnErr)
+	}()
+
+	var nextTest <-chan time.Time
+	opTimeout := time.After(latencyTestOpTimeout)
+
+	for {
+		select {
+		case <-ctx.Done():
+			return nil
+
+		case <-opTimeout:
+			return nil
+
+		case <-nextTest:
+			// Create ping request msg.
+			pingRequest, err := op.createPingRequest()
+			if err != nil {
+				returnErr = terminal.ErrInternalError.With("%w", err)
+				return nil
+			}
+			msg := op.NewEmptyMsg()
+			msg.Unit.MakeHighPriority()
+			msg.Data = pingRequest
+
+			// Send it.
+			tErr := op.Send(msg, latencyTestOpTimeout)
+			if tErr != nil {
+				returnErr = tErr.Wrap("failed to send ping request")
+				return nil
+			}
+			op.Flush(1 * time.Second)
+
+			nextTest = nil
+
+		case msg := <-op.responses:
+			// Check if the op ended.
+			if msg == nil {
+				return nil
+			}
+
+			// Handle response
+			tErr := op.handleResponse(msg)
+			if tErr != nil {
+				returnErr = tErr
+				return nil //nolint:nilerr
+			}
+
+			// Check if we have enough latency tests.
+			if len(op.measuredLatencies) >= latencyTestRuns {
+				returnErr = op.reportMeasuredLatencies()
+				return nil
+			}
+
+			// Schedule next latency test, if not yet scheduled.
+			if nextTest == nil {
+				nextTest = time.After(latencyTestPauseDuration)
+			}
+		}
+	}
+}
+
+func (op *LatencyTestClientOp) createPingRequest() (*container.Container, error) {
+	// Generate nonce.
+	nonce, err := rng.Bytes(latencyTestNonceSize)
+	if err != nil {
+		return nil, fmt.Errorf("failed to create ping nonce")
+	}
+
+	// Set client request state.
+	op.lastPingSentAt = time.Now()
+	op.lastPingNonce = nonce
+
+	return container.New(
+		varint.Pack8(latencyPingRequest),
+		nonce,
+	), nil
+}
+
+func (op *LatencyTestClientOp) handleResponse(msg *terminal.Msg) *terminal.Error {
+	defer msg.Finish()
+
+	rType, err := msg.Data.GetNextN8()
+	if err != nil {
+		return terminal.ErrMalformedData.With("failed to get response type: %w", err)
+	}
+
+	switch rType {
+	case latencyPingResponse:
+		// Check if the ping nonce matches.
+		if !bytes.Equal(op.lastPingNonce, msg.Data.CompileData()) {
+			return terminal.ErrIntegrity.With("ping nonce mismatch")
+		}
+		op.lastPingNonce = nil
+		// Save latency.
+		op.measuredLatencies = append(op.measuredLatencies, time.Since(op.lastPingSentAt))
+
+		return nil
+	default:
+		return terminal.ErrIncorrectUsage.With("unknown response type")
+	}
+}
+
+func (op *LatencyTestClientOp) reportMeasuredLatencies() *terminal.Error {
+	// Find lowest value.
+	lowestLatency := time.Hour
+	for _, latency := range op.measuredLatencies {
+		if latency < lowestLatency {
+			lowestLatency = latency
+		}
+	}
+	op.testResult = lowestLatency
+
+	// Save the result to the crane.
+	if controller, ok := op.Terminal().(*CraneControllerTerminal); ok {
+		if controller.Crane.ConnectedHub != nil {
+			controller.Crane.ConnectedHub.GetMeasurements().SetLatency(op.testResult)
+			log.Infof("spn/docks: measured latency to %s: %s", controller.Crane.ConnectedHub, op.testResult)
+			return nil
+		} else if controller.Crane.IsMine() {
+			return terminal.ErrInternalError.With("latency operation was run on %s without a connected hub set", controller.Crane)
+		}
+	} else if !runningTests {
+		return terminal.ErrInternalError.With("latency operation was run on terminal that is not a crane controller, but %T", op.Terminal())
+	}
+	return nil
+}
+
+// Deliver delivers a message to the operation.
+func (op *LatencyTestClientOp) Deliver(msg *terminal.Msg) *terminal.Error {
+	// Optimized delivery with 1s timeout.
+	select {
+	case op.responses <- msg:
+	default:
+		select {
+		case op.responses <- msg:
+		case <-time.After(1 * time.Second):
+			return terminal.ErrTimeout
+		}
+	}
+	return nil
+}
+
+// HandleStop gives the operation the ability to cleanly shut down.
+// The returned error is the error to send to the other side.
+// Should never be called directly. Call Stop() instead.
+func (op *LatencyTestClientOp) HandleStop(tErr *terminal.Error) (errorToSend *terminal.Error) {
+	close(op.responses)
+	select {
+	case op.result <- tErr:
+	default:
+	}
+	return tErr
+}
+
+// Result returns the result (end error) of the operation.
+func (op *LatencyTestClientOp) Result() <-chan *terminal.Error {
+	return op.result
+}
+
+func startLatencyTestOp(t terminal.Terminal, opID uint32, data *container.Container) (terminal.Operation, *terminal.Error) {
+	// Create operation.
+	op := &LatencyTestOp{}
+	op.InitOperationBase(t, opID)
+
+	// Handle first request.
+	msg := op.NewEmptyMsg()
+	msg.Data = data
+	tErr := op.Deliver(msg)
+	if tErr != nil {
+		return nil, tErr
+	}
+
+	return op, nil
+}
+
+// Deliver delivers a message to the operation.
+func (op *LatencyTestOp) Deliver(msg *terminal.Msg) *terminal.Error {
+	// Get request type.
+	rType, err := msg.Data.GetNextN8()
+	if err != nil {
+		return terminal.ErrMalformedData.With("failed to get response type: %w", err)
+	}
+
+	switch rType {
+	case latencyPingRequest:
+		// Keep the nonce and just replace the msg type.
+		msg.Data.PrependNumber(latencyPingResponse)
+		msg.Type = terminal.MsgTypeData
+		msg.Unit.ReUse()
+		msg.Unit.MakeHighPriority()
+
+		// Send response.
+		tErr := op.Send(msg, latencyTestOpTimeout)
+		if tErr != nil {
+			return tErr.Wrap("failed to send ping response")
+		}
+		op.Flush(1 * time.Second)
+
+		return nil
+
+	default:
+		return terminal.ErrIncorrectUsage.With("unknown request type")
+	}
+}
diff --git a/spn/docks/op_latency_test.go b/spn/docks/op_latency_test.go
new file mode 100644
index 00000000..7a0b4ec7
--- /dev/null
+++ b/spn/docks/op_latency_test.go
@@ -0,0 +1,59 @@
+package docks
+
+import (
+	"testing"
+	"time"
+
+	"github.com/safing/portmaster/spn/terminal"
+)
+
+func TestLatencyOp(t *testing.T) {
+	t.Parallel()
+
+	var (
+		latTestDelay            = 10 * time.Millisecond
+		latTestQueueSize uint32 = 10
+	)
+
+	// Reduce waiting time.
+	latencyTestPauseDuration = 100 * time.Millisecond
+
+	// Create test terminal pair.
+	a, b, err := terminal.NewSimpleTestTerminalPair(
+		latTestDelay,
+		int(latTestQueueSize),
+		&terminal.TerminalOpts{
+			FlowControl:     terminal.FlowControlNone,
+			FlowControlSize: latTestQueueSize,
+		},
+	)
+	if err != nil {
+		t.Fatalf("failed to create test terminal pair: %s", err)
+	}
+
+	// Grant permission for op on remote terminal and start op.
+	b.GrantPermission(terminal.IsCraneController)
+	op, tErr := NewLatencyTestOp(a)
+	if tErr != nil {
+		t.Fatalf("failed to start op: %s", err)
+	}
+
+	// Wait for result and check error.
+	tErr = <-op.Result()
+	if tErr.IsError() {
+		t.Fatalf("op failed: %s", tErr)
+	}
+	t.Logf("measured latency: %f ms", float64(op.testResult)/float64(time.Millisecond))
+
+	// Calculate expected latency.
+	expectedLatency := float64(latTestDelay * 2)
+	t.Logf("expected latency: %f ms", expectedLatency/float64(time.Millisecond))
+
+	// Check if measured latency is within parameters.
+	if float64(op.testResult) > expectedLatency*1.2 {
+		t.Fatal("measured latency too high")
+	}
+	if float64(op.testResult) < expectedLatency*0.9 {
+		t.Fatal("measured latency too low")
+	}
+}
diff --git a/spn/docks/op_sync_state.go b/spn/docks/op_sync_state.go
new file mode 100644
index 00000000..43530803
--- /dev/null
+++ b/spn/docks/op_sync_state.go
@@ -0,0 +1,150 @@
+package docks
+
+import (
+	"context"
+	"time"
+
+	"github.com/safing/portbase/container"
+	"github.com/safing/portbase/formats/dsd"
+	"github.com/safing/portmaster/spn/conf"
+	"github.com/safing/portmaster/spn/terminal"
+)
+
+// SyncStateOpType is the type ID of the sync state operation.
+const SyncStateOpType = "sync/state"
+
+// SyncStateOp is used to sync the crane state.
+type SyncStateOp struct {
+	terminal.OneOffOperationBase
+}
+
+// SyncStateMessage holds the sync data.
+type SyncStateMessage struct {
+	Stopping        bool
+	RequestStopping bool
+}
+
+// Type returns the type ID.
+func (op *SyncStateOp) Type() string {
+	return SyncStateOpType
+}
+
+func init() {
+	terminal.RegisterOpType(terminal.OperationFactory{
+		Type:     SyncStateOpType,
+		Requires: terminal.IsCraneController,
+		Start:    runSyncStateOp,
+	})
+}
+
+// startSyncStateOp starts a worker that runs the sync state operation.
+func (crane *Crane) startSyncStateOp() {
+	module.StartWorker("sync crane state", func(ctx context.Context) error {
+		tErr := crane.Controller.SyncState(ctx)
+		if tErr != nil {
+			return tErr
+		}
+
+		return nil
+	})
+}
+
+// SyncState runs a sync state operation.
+func (controller *CraneControllerTerminal) SyncState(ctx context.Context) *terminal.Error {
+	// Check if we are a public Hub, whether we own the crane and whether the lane is public too.
+	if !conf.PublicHub() || !controller.Crane.Public() {
+		return nil
+	}
+
+	// Create and init.
+	op := &SyncStateOp{}
+	op.Init()
+
+	// Get optimization states.
+	requestStopping := false
+	func() {
+		controller.Crane.NetState.lock.Lock()
+		defer controller.Crane.NetState.lock.Unlock()
+
+		requestStopping = controller.Crane.NetState.stoppingRequested
+	}()
+
+	// Create sync message.
+	msg := &SyncStateMessage{
+		Stopping:        controller.Crane.stopping.IsSet(),
+		RequestStopping: requestStopping,
+	}
+	data, err := dsd.Dump(msg, dsd.CBOR)
+	if err != nil {
+		return terminal.ErrInternalError.With("%w", err)
+	}
+
+	// Send message.
+	tErr := controller.StartOperation(op, container.New(data), 30*time.Second)
+	if tErr != nil {
+		return tErr
+	}
+
+	// Wait for reply
+	select {
+	case tErr = <-op.Result:
+		if tErr.IsError() {
+			return tErr
+		}
+		return nil
+	case <-ctx.Done():
+		return nil
+	case <-time.After(1 * time.Minute):
+		return terminal.ErrTimeout.With("timed out while waiting for sync crane result")
+	}
+}
+
+func runSyncStateOp(t terminal.Terminal, opID uint32, data *container.Container) (terminal.Operation, *terminal.Error) {
+	// Check if we are a on a crane controller.
+	var ok bool
+	var controller *CraneControllerTerminal
+	if controller, ok = t.(*CraneControllerTerminal); !ok {
+		return nil, terminal.ErrIncorrectUsage.With("can only be used with a crane controller")
+	}
+
+	// Check if we are a public Hub and whether the lane is public too.
+	if !conf.PublicHub() || !controller.Crane.Public() {
+		return nil, terminal.ErrPermissionDenied.With("only public lanes can sync crane status")
+	}
+
+	// Load message.
+	syncState := &SyncStateMessage{}
+	_, err := dsd.Load(data.CompileData(), syncState)
+	if err != nil {
+		return nil, terminal.ErrMalformedData.With("failed to load sync state message: %w", err)
+	}
+
+	// Apply optimization state.
+	controller.Crane.NetState.lock.Lock()
+	defer controller.Crane.NetState.lock.Unlock()
+	controller.Crane.NetState.stoppingRequestedByPeer = syncState.RequestStopping
+
+	// Apply crane state only when we don't own the crane.
+	if !controller.Crane.IsMine() {
+		// Apply sync state.
+		var changed bool
+		if syncState.Stopping {
+			if controller.Crane.stopping.SetToIf(false, true) {
+				controller.Crane.NetState.markedStoppingAt = time.Now()
+				changed = true
+			}
+		} else {
+			if controller.Crane.stopping.SetToIf(true, false) {
+				controller.Crane.NetState.markedStoppingAt = time.Time{}
+				changed = true
+			}
+		}
+
+		// Notify of change.
+		if changed {
+			controller.Crane.NotifyUpdate()
+		}
+	}
+
+	return nil, nil
+}
diff --git a/spn/docks/op_whoami.go b/spn/docks/op_whoami.go
new file mode 100644
index 00000000..baf5204c
--- /dev/null
+++ b/spn/docks/op_whoami.go
@@ -0,0 +1,135 @@
+package docks
+
+import (
+	"time"
+
+	"github.com/safing/portbase/container"
+	"github.com/safing/portbase/formats/dsd"
+	"github.com/safing/portmaster/spn/terminal"
+)
+
+const (
+	// WhoAmIType is the type ID of the latency test operation.
+	WhoAmIType = "whoami"
+
+	whoAmITimeout = 3 * time.Second
+)
+
+// WhoAmIOp is used to request some metadata about the other side.
+type WhoAmIOp struct {
+	terminal.OneOffOperationBase
+
+	response *WhoAmIResponse
+}
+
+// WhoAmIResponse is a whoami response.
+type WhoAmIResponse struct {
+	// Timestamp in nanoseconds
+	Timestamp int64 `cbor:"t,omitempty" json:"t,omitempty"`
+
+	// Addr is the remote address as reported by the crane terminal (IP and port).
+	Addr string `cbor:"a,omitempty" json:"a,omitempty"`
+}
+
+// Type returns the type ID.
+func (op *WhoAmIOp) Type() string {
+	return WhoAmIType
+}
+
+func init() {
+	terminal.RegisterOpType(terminal.OperationFactory{
+		Type:  WhoAmIType,
+		Start: startWhoAmI,
+	})
+}
+
+// WhoAmI executes a whoami operation and returns the response.
+func WhoAmI(t terminal.Terminal) (*WhoAmIResponse, *terminal.Error) {
+	whoami, err := NewWhoAmIOp(t)
+	if err.IsError() {
+		return nil, err
+	}
+
+	// Wait for response.
+	select {
+	case tErr := <-whoami.Result:
+		if tErr.IsError() {
+			return nil, tErr
+		}
+		return whoami.response, nil
+	case <-time.After(whoAmITimeout * 2):
+		return nil, terminal.ErrTimeout
+	}
+}
+
+// NewWhoAmIOp starts a new whoami operation.
+func NewWhoAmIOp(t terminal.Terminal) (*WhoAmIOp, *terminal.Error) {
+	// Create operation and init.
+	op := &WhoAmIOp{}
+	op.OneOffOperationBase.Init()
+
+	// Send ping.
+	tErr := t.StartOperation(op, nil, whoAmITimeout)
+	if tErr != nil {
+		return nil, tErr
+	}
+
+	return op, nil
+}
+
+// Deliver delivers a message to the operation.
+func (op *WhoAmIOp) Deliver(msg *terminal.Msg) *terminal.Error {
+	defer msg.Finish()
+
+	// Parse response.
+	response := &WhoAmIResponse{}
+	_, err := dsd.Load(msg.Data.CompileData(), response)
+	if err != nil {
+		return terminal.ErrMalformedData.With("failed to parse ping response: %w", err)
+	}
+
+	op.response = response
+	return terminal.ErrExplicitAck
+}
+
+func startWhoAmI(t terminal.Terminal, opID uint32, data *container.Container) (terminal.Operation, *terminal.Error) {
+	// Get crane terminal, if available.
+	ct, _ := t.(*CraneTerminal)
+
+	// Create response.
+	r := &WhoAmIResponse{
+		Timestamp: time.Now().UnixNano(),
+	}
+	if ct != nil {
+		r.Addr = ct.RemoteAddr().String()
+	}
+	response, err := dsd.Dump(r, dsd.CBOR)
+	if err != nil {
+		return nil, terminal.ErrInternalError.With("failed to create whoami response: %w", err)
+	}
+
+	// Send response.
+	msg := terminal.NewMsg(response)
+	msg.FlowID = opID
+	msg.Unit.MakeHighPriority()
+	if terminal.UsePriorityDataMsgs {
+		msg.Type = terminal.MsgTypePriorityData
+	}
+	tErr := t.Send(msg, whoAmITimeout)
+	if tErr != nil {
+		// Finish message unit on failure.
+		msg.Finish()
+		return nil, tErr.With("failed to send ping response")
+	}
+
+	// Operation is just one response and finished successfully.
+	return nil, nil
+}
+
+// HandleStop gives the operation the ability to cleanly shut down.
+// The returned error is the error to send to the other side.
+// Should never be called directly. Call Stop() instead.
+func (op *WhoAmIOp) HandleStop(err *terminal.Error) (errorToSend *terminal.Error) {
+	// Continue with usual handling of inherited base.
+	return op.OneOffOperationBase.HandleStop(err)
+}
diff --git a/spn/docks/op_whoami_test.go b/spn/docks/op_whoami_test.go
new file mode 100644
index 00000000..9ce32763
--- /dev/null
+++ b/spn/docks/op_whoami_test.go
@@ -0,0 +1,24 @@
+package docks
+
+import (
+	"testing"
+
+	"github.com/safing/portmaster/spn/terminal"
+)
+
+func TestWhoAmIOp(t *testing.T) {
+	t.Parallel()
+
+	// Create test terminal pair.
+	a, _, err := terminal.NewSimpleTestTerminalPair(0, 0, nil)
+	if err != nil {
+		t.Fatalf("failed to create test terminal pair: %s", err)
+	}
+
+	// Run op.
+	resp, tErr := WhoAmI(a)
+	if tErr.IsError() {
+		t.Fatal(tErr)
+	}
+	t.Logf("whoami: %+v", resp)
+}
diff --git a/spn/docks/terminal_expansion.go b/spn/docks/terminal_expansion.go
new file mode 100644
index 00000000..16895a83
--- /dev/null
+++ b/spn/docks/terminal_expansion.go
@@ -0,0 +1,150 @@
+package docks
+
+import (
+	"fmt"
+	"sync"
+	"time"
+
+	"github.com/tevino/abool"
+
+	"github.com/safing/portbase/container"
+	"github.com/safing/portmaster/spn/hub"
+	"github.com/safing/portmaster/spn/terminal"
+)
+
+// ExpansionTerminal is used for expanding to another Hub.
+type ExpansionTerminal struct {
+	*terminal.TerminalBase
+
+	relayOp *ExpansionTerminalRelayOp
+
+	changeNotifyFuncReady *abool.AtomicBool
+	changeNotifyFunc      func()
+
+	reachableChecked time.Time
+	reachableLock    sync.Mutex
+}
+
+// ExpansionTerminalRelayOp is the operation that connects to the relay.
+type ExpansionTerminalRelayOp struct {
+	terminal.OperationBase
+
+	expansionTerminal *ExpansionTerminal
+}
+
+// Type returns the type ID.
+func (op *ExpansionTerminalRelayOp) Type() string {
+	return ExpandOpType
+}
+
+// ExpandTo initiates an expansion.
+func ExpandTo(from terminal.Terminal, routeTo string, encryptFor *hub.Hub) (*ExpansionTerminal, *terminal.Error) {
+	// First, create the local endpoint terminal to generate the init data.
+
+	// Create options and bare expansion terminal.
+	opts := terminal.DefaultExpansionTerminalOpts()
+	opts.Encrypt = encryptFor != nil
+	expansion := &ExpansionTerminal{
+		changeNotifyFuncReady: abool.New(),
+	}
+	expansion.relayOp = &ExpansionTerminalRelayOp{
+		expansionTerminal: expansion,
+	}
+
+	// Create base terminal for expansion.
+	base, initData, tErr := terminal.NewLocalBaseTerminal(
+		module.Ctx,
+		0, // Ignore; The ID of the operation is used for communication.
+		from.FmtID(),
+		encryptFor,
+		opts,
+		expansion.relayOp,
+	)
+	if tErr != nil {
+		return nil, tErr.Wrap("failed to create expansion terminal base")
+	}
+	expansion.TerminalBase = base
+	base.SetTerminalExtension(expansion)
+	base.SetTimeout(defaultTerminalIdleTimeout)
+
+	// Second, start the actual relay operation.
+
+	// Create setup message for relay operation.
+	opInitData := container.New()
+	opInitData.AppendAsBlock([]byte(routeTo))
+	opInitData.AppendContainer(initData)
+
+	// Start relay operation on connected Hub.
+	tErr = from.StartOperation(expansion.relayOp, opInitData, 5*time.Second)
+	if tErr != nil {
+		return nil, tErr.Wrap("failed to start expansion operation")
+	}
+
+	// Start Workers.
+	base.StartWorkers(module, "expansion terminal")
+
+	return expansion, nil
+}
+
+// SetChangeNotifyFunc sets a callback function that is called when the terminal state changes.
+func (t *ExpansionTerminal) SetChangeNotifyFunc(f func()) {
+	if t.changeNotifyFuncReady.IsSet() {
+		return
+	}
+	t.changeNotifyFunc = f
+	t.changeNotifyFuncReady.Set()
+}
+
+// NeedsReachableCheck returns whether the terminal should be checked if it is
+// reachable via the existing network internal relayed connection.
+func (t *ExpansionTerminal) NeedsReachableCheck(maxCheckAge time.Duration) bool {
+	t.reachableLock.Lock()
+	defer t.reachableLock.Unlock()
+
+	return time.Since(t.reachableChecked) > maxCheckAge
+}
+
+// MarkReachable marks the terminal as reachable via the existing network
+// internal relayed connection.
+func (t *ExpansionTerminal) MarkReachable() {
+	t.reachableLock.Lock()
+	defer t.reachableLock.Unlock()
+
+	t.reachableChecked = time.Now()
+}
+
+// HandleDestruction gives the terminal the ability to clean up.
+// The terminal has already fully shut down at this point.
+// Should never be called directly. Call Abandon() instead.
+func (t *ExpansionTerminal) HandleDestruction(err *terminal.Error) {
+	// Trigger update of connected Pin.
+	if t.changeNotifyFuncReady.IsSet() {
+		t.changeNotifyFunc()
+	}
+
+	// Stop the relay operation.
+	// The error message is arlready sent by the terminal.
+	t.relayOp.Stop(t.relayOp, nil)
+}
+
+// CustomIDFormat formats the terminal ID.
+func (t *ExpansionTerminal) CustomIDFormat() string {
+	return fmt.Sprintf("%s~%d", t.relayOp.Terminal().FmtID(), t.relayOp.ID())
+}
+
+// Deliver delivers a message to the operation.
+func (op *ExpansionTerminalRelayOp) Deliver(msg *terminal.Msg) *terminal.Error {
+	// Proxy directly to expansion terminal.
+	return op.expansionTerminal.Deliver(msg)
+}
+
+// HandleStop gives the operation the ability to cleanly shut down.
+// The returned error is the error to send to the other side.
+// Should never be called directly. Call Stop() instead.
+func (op *ExpansionTerminalRelayOp) HandleStop(err *terminal.Error) (errorToSend *terminal.Error) {
+	// Stop the expansion terminal.
+	// The error message will be sent by the operation.
+	op.expansionTerminal.Abandon(nil)
+
+	return err
+}
diff --git a/spn/docks/terminal_expansion_test.go b/spn/docks/terminal_expansion_test.go
new file mode 100644
index 00000000..415716ea
--- /dev/null
+++ b/spn/docks/terminal_expansion_test.go
@@ -0,0 +1,305 @@
+package docks
+
+import (
+	"fmt"
+	"os"
+	"runtime/pprof"
+	"sync"
+	"testing"
+	"time"
+
+	"github.com/safing/portmaster/spn/access"
+	"github.com/safing/portmaster/spn/cabin"
+	"github.com/safing/portmaster/spn/hub"
+	"github.com/safing/portmaster/spn/ships"
+	"github.com/safing/portmaster/spn/terminal"
+)
+
+const defaultTestQueueSize = 200
+
+func TestExpansion(t *testing.T) {
+	t.Parallel()
+
+	// Test without and with encryption.
+	for _, encrypt := range []bool{false, true} {
+		// Test down/up separately and in parallel.
+		for _, parallel := range []bool{false, true} {
+			// Test with different flow controls.
+			for _, fc := range []struct {
+				flowControl     terminal.FlowControlType
+				flowControlSize uint32
+			}{
+				{
+					flowControl:     terminal.FlowControlNone,
+					flowControlSize: 5,
+				},
+				{
+					flowControl:     terminal.FlowControlDFQ,
+					flowControlSize: defaultTestQueueSize,
+				},
+			} {
+				// Run tests with combined options.
+				testExpansion(
+					t,
+					"expansion-hop-test",
+					&terminal.TerminalOpts{
+						Encrypt:         encrypt,
+						Padding:         8,
+						FlowControl:     fc.flowControl,
+						FlowControlSize: fc.flowControlSize,
+					},
+					defaultTestQueueSize,
+					defaultTestQueueSize,
+					parallel,
+				)
+			}
+		}
+	}
+
+	stressTestOpts := &terminal.TerminalOpts{
+		Encrypt:         true,
+		Padding:         8,
+		FlowControl:     terminal.FlowControlDFQ,
+		FlowControlSize: defaultTestQueueSize,
+	}
+	testExpansion(t, "expansion-stress-test-down", stressTestOpts, defaultTestQueueSize*100, 0, false)
+	testExpansion(t, "expansion-stress-test-up", stressTestOpts, 0, defaultTestQueueSize*100, false)
+	testExpansion(t, "expansion-stress-test-duplex", stressTestOpts, defaultTestQueueSize*100, defaultTestQueueSize*100, false)
+}
+
+func testExpansion( //nolint:maintidx,thelper
+	t *testing.T,
+	testID string,
+	terminalOpts *terminal.TerminalOpts,
+	clientCountTo,
+	serverCountTo uint64,
+	inParallel bool,
+) {
+	testID += fmt.Sprintf(":encrypt=%v,flowType=%d,parallel=%v", terminalOpts.Encrypt, terminalOpts.FlowControl, inParallel)
+
+	var identity2, identity3, identity4 *cabin.Identity
+	var connectedHub2, connectedHub3, connectedHub4 *hub.Hub
+	if terminalOpts.Encrypt {
+		identity2, connectedHub2 = getTestIdentity(t)
+		identity3, connectedHub3 = getTestIdentity(t)
+		identity4, connectedHub4 = getTestIdentity(t)
+	}
+
+	// Build ships and cranes.
+	optimalMinLoadSize = 100
+	ship1to2 := ships.NewTestShip(!terminalOpts.Encrypt, 100)
+	ship2to3 := ships.NewTestShip(!terminalOpts.Encrypt, 100)
+	ship3to4 := ships.NewTestShip(!terminalOpts.Encrypt, 100)
+
+	var crane1, crane2to1, crane2to3, crane3to2, crane3to4, crane4 *Crane
+	var craneWg sync.WaitGroup
+	craneWg.Add(6)
+
+	go func() {
+		var err error
+		crane1, err = NewCrane(ship1to2, connectedHub2, nil)
+		if err != nil {
+			panic(fmt.Sprintf("expansion test %s could not create crane1: %s", testID, err))
+		}
+		crane1.ID = "c1"
+		err = crane1.Start(module.Ctx)
+		if err != nil {
+			panic(fmt.Sprintf("expansion test %s could not start crane1: %s", testID, err))
+		}
+		crane1.ship.MarkPublic()
+		craneWg.Done()
+	}()
+	go func() {
+		var err error
+		crane2to1, err = NewCrane(ship1to2.Reverse(), nil, identity2)
+		if err != nil {
+			panic(fmt.Sprintf("expansion test %s could not create crane2to1: %s", testID, err))
+		}
+		crane2to1.ID = "c2to1"
+		err = crane2to1.Start(module.Ctx)
+		if err != nil {
+			panic(fmt.Sprintf("expansion test %s could not start crane2to1: %s", testID, err))
+		}
+		crane2to1.ship.MarkPublic()
+		craneWg.Done()
+	}()
+	go func() {
+		var err error
+		crane2to3, err = NewCrane(ship2to3, connectedHub3, nil)
+		if err != nil {
+			panic(fmt.Sprintf("expansion test %s could not create crane2to3: %s", testID, err))
+		}
+		crane2to3.ID = "c2to3"
+		err = crane2to3.Start(module.Ctx)
+		if err != nil {
+			panic(fmt.Sprintf("expansion test %s could not start crane2to3: %s", testID, err))
+		}
+		crane2to3.ship.MarkPublic()
+		craneWg.Done()
+	}()
+	go func() {
+		var err error
+		crane3to2, err = NewCrane(ship2to3.Reverse(), nil, identity3)
+		if err != nil {
+			panic(fmt.Sprintf("expansion test %s could not create crane3to2: %s", testID, err))
+		}
+		crane3to2.ID = "c3to2"
+		err = crane3to2.Start(module.Ctx)
+		if err != nil {
+			panic(fmt.Sprintf("expansion test %s could not start crane3to2: %s", testID, err))
+		}
+		crane3to2.ship.MarkPublic()
+		craneWg.Done()
+	}()
+	go func() {
+		var err error
+		crane3to4, err = NewCrane(ship3to4, connectedHub4, nil)
+		if err != nil {
+			panic(fmt.Sprintf("expansion test %s could not create crane3to4: %s", testID, err))
+		}
+		crane3to4.ID = "c3to4"
+		err = crane3to4.Start(module.Ctx)
+		if err != nil {
+			panic(fmt.Sprintf("expansion test %s could not start crane3to4: %s", testID, err))
+		}
+		crane3to4.ship.MarkPublic()
+		craneWg.Done()
+	}()
+	go func() {
+		var err error
+		crane4, err = NewCrane(ship3to4.Reverse(), nil, identity4)
+		if err != nil {
+			panic(fmt.Sprintf("expansion test %s could not create crane4: %s", testID, err))
+		}
+		crane4.ID = "c4"
+		err = crane4.Start(module.Ctx)
+		if err != nil {
+			panic(fmt.Sprintf("expansion test %s could not start crane4: %s", testID, err))
+		}
+		crane4.ship.MarkPublic()
+		craneWg.Done()
+	}()
+	craneWg.Wait()
+
+	// Assign cranes.
+	crane3HubID := testID + "-crane3HubID"
+	AssignCrane(crane3HubID, crane2to3)
+	crane4HubID := testID + "-crane4HubID"
+	AssignCrane(crane4HubID, crane3to4)
+
+	t.Logf("expansion test %s: initial setup complete", testID)
+
+	// Wait async for test to complete, print stack after timeout.
+	finished := make(chan struct{})
+	go func() {
+		select {
+		case <-finished:
+		case <-time.After(30 * time.Second):
+			fmt.Printf("expansion test %s is taking too long, print stack:\n", testID)
+			_ = pprof.Lookup("goroutine").WriteTo(os.Stdout, 1)
+			os.Exit(1)
+		}
+	}()
+
+	// Start initial crane.
+	homeTerminal, initData, tErr := NewLocalCraneTerminal(crane1, nil, &terminal.TerminalOpts{})
+	if tErr != nil {
+		t.Fatalf("expansion test %s failed to create home terminal: %s", testID, tErr)
+	}
+	tErr = crane1.EstablishNewTerminal(homeTerminal, initData)
+	if tErr != nil {
+		t.Fatalf("expansion test %s failed to connect home terminal: %s", testID, tErr)
+	}
+
+	t.Logf("expansion test %s: home terminal setup complete", testID)
+	time.Sleep(100 * time.Millisecond)
+
+	// Start counters for testing.
+	op0, tErr := terminal.NewCounterOp(homeTerminal, terminal.CounterOpts{
+		ClientCountTo: clientCountTo,
+		ServerCountTo: serverCountTo,
+	})
+	if tErr != nil {
+		t.Fatalf("expansion test %s failed to run counter op: %s", testID, tErr)
+	}
+	t.Logf("expansion test %s: home terminal counter setup complete", testID)
+	if !inParallel {
+		op0.Wait()
+	}
+
+	// Start expansion to crane 3.
+	opAuthTo2, tErr := access.AuthorizeToTerminal(homeTerminal)
+	if tErr != nil {
+		t.Fatalf("expansion test %s failed to auth with home terminal: %s", testID, tErr)
+	}
+	tErr = <-opAuthTo2.Result
+	if tErr.IsError() {
+		t.Fatalf("expansion test %s failed to auth with home terminal: %s", testID, tErr)
+	}
+	expansionTerminalTo3, err := ExpandTo(homeTerminal, crane3HubID, connectedHub3)
+	if err != nil {
+		t.Fatalf("expansion test %s failed to expand to %s: %s", testID, crane3HubID, tErr)
+	}
+
+	// Start counters for testing.
+	op1, tErr := terminal.NewCounterOp(expansionTerminalTo3, terminal.CounterOpts{
+		ClientCountTo: clientCountTo,
+		ServerCountTo: serverCountTo,
+	})
+	if tErr != nil {
+		t.Fatalf("expansion test %s failed to run counter op: %s", testID, tErr)
+	}
+
+	t.Logf("expansion test %s: expansion to crane3 and counter setup complete", testID)
+	if !inParallel {
+		op1.Wait()
+	}
+
+	// Start expansion to crane 4.
+	opAuthTo3, tErr := access.AuthorizeToTerminal(expansionTerminalTo3)
+	if tErr != nil {
+		t.Fatalf("expansion test %s failed to auth with extenstion terminal: %s", testID, tErr)
+	}
+	tErr = <-opAuthTo3.Result
+	if tErr.IsError() {
+		t.Fatalf("expansion test %s failed to auth with extenstion terminal: %s", testID, tErr)
+	}
+
+	expansionTerminalTo4, err := ExpandTo(expansionTerminalTo3, crane4HubID, connectedHub4)
+	if err != nil {
+		t.Fatalf("expansion test %s failed to expand to %s: %s", testID, crane4HubID, tErr)
+	}
+
+	// Start counters for testing.
+	op2, tErr := terminal.NewCounterOp(expansionTerminalTo4, terminal.CounterOpts{
+		ClientCountTo: clientCountTo,
+		ServerCountTo: serverCountTo,
+	})
+	if tErr != nil {
+		t.Fatalf("expansion test %s failed to run counter op: %s", testID, tErr)
+	}
+
+	t.Logf("expansion test %s: expansion to crane4 and counter setup complete", testID)
+	op2.Wait()
+
+	// Wait for op1 if not already.
+	if inParallel {
+		op0.Wait()
+		op1.Wait()
+	}
+
+	// Wait for completion.
+	close(finished)
+
+	// Wait a little so that all errors can be propagated, so we can truly see
+	// if we succeeded.
+	time.Sleep(100 * time.Millisecond)
+
+	// Check errors.
+	if op1.Error != nil {
+		t.Fatalf("crane test %s counter op1 failed: %s", testID, op1.Error)
+	}
+	if op2.Error != nil {
+		t.Fatalf("crane test %s counter op2 failed: %s", testID, op2.Error)
+	}
+}
diff --git a/spn/hub/database.go b/spn/hub/database.go
new file mode 100644
index 00000000..d4ca3f85
--- /dev/null
+++ b/spn/hub/database.go
@@ -0,0 +1,202 @@
+package hub
+
+import (
+	"errors"
+	"fmt"
+	"sync"
+	"time"
+
+	"github.com/safing/portbase/database"
+	"github.com/safing/portbase/database/iterator"
+	"github.com/safing/portbase/database/query"
+	"github.com/safing/portbase/database/record"
+)
+
+var (
+	db = database.NewInterface(&database.Options{
+		Local:    true,
+		Internal: true,
+	})
+
+	getFromNavigator func(mapName, hubID string) *Hub
+)
+
+// MakeHubDBKey makes a hub db key.
+func MakeHubDBKey(mapName, hubID string) string {
+	return fmt.Sprintf("cache:spn/hubs/%s/%s", mapName, hubID)
+}
+
+// MakeHubMsgDBKey makes a hub msg db key.
+func MakeHubMsgDBKey(mapName string, msgType MsgType, hubID string) string {
+	return fmt.Sprintf("cache:spn/msgs/%s/%s/%s", mapName, msgType, hubID)
+}
+
+// SetNavigatorAccess sets a shortcut function to access hubs from the navigator instead of having go through the database.
+// This also reduces the number of object in RAM and better caches parsed attributes.
+func SetNavigatorAccess(fn func(mapName, hubID string) *Hub) {
+	if getFromNavigator == nil {
+		getFromNavigator = fn
+	}
+}
+
+// GetHub get a Hub from the database - or the navigator, if configured.
+func GetHub(mapName string, hubID string) (*Hub, error) {
+	if getFromNavigator != nil {
+		hub := getFromNavigator(mapName, hubID)
+		if hub != nil {
+			return hub, nil
+		}
+	}
+
+	return GetHubByKey(MakeHubDBKey(mapName, hubID))
+}
+
+// GetHubByKey returns a hub by its raw DB key.
+func GetHubByKey(key string) (*Hub, error) {
+	r, err := db.Get(key)
+	if err != nil {
+		return nil, err
+	}
+
+	hub, err := EnsureHub(r)
+	if err != nil {
+		return nil, err
+	}
+
+	return hub, nil
+}
+
+// EnsureHub makes sure a database record is a Hub.
+func EnsureHub(r record.Record) (*Hub, error) {
+	// unwrap
+	if r.IsWrapped() {
+		// only allocate a new struct, if we need it
+		newHub := &Hub{}
+		err := record.Unwrap(r, newHub)
+		if err != nil {
+			return nil, err
+		}
+		newHub = prepHub(newHub)
+
+		// Fully validate when getting from database.
+		if err := newHub.Info.validateFormatting(); err != nil {
+			return nil, fmt.Errorf("announcement failed format validation: %w", err)
+		}
+		if err := newHub.Status.validateFormatting(); err != nil {
+			return nil, fmt.Errorf("status failed format validation: %w", err)
+		}
+		if err := newHub.Info.prepare(false); err != nil {
+			return nil, fmt.Errorf("failed to prepare announcement: %w", err)
+		}
+
+		return newHub, nil
+	}
+
+	// or adjust type
+	newHub, ok := r.(*Hub)
+	if !ok {
+		return nil, fmt.Errorf("record not of type *Hub, but %T", r)
+	}
+	newHub = prepHub(newHub)
+
+	// Prepare only when already parsed.
+	if err := newHub.Info.prepare(false); err != nil {
+		return nil, fmt.Errorf("failed to prepare announcement: %w", err)
+	}
+
+	// ensure status
+	return newHub, nil
+}
+
+func prepHub(h *Hub) *Hub {
+	if h.Status == nil {
+		h.Status = &Status{}
+	}
+	h.Measurements = getSharedMeasurements(h.ID, h.Measurements)
+	return h
+}
+
+// Save saves to Hub to the correct scope in the database.
+func (h *Hub) Save() error {
+	if !h.KeyIsSet() {
+		h.SetKey(MakeHubDBKey(h.Map, h.ID))
+	}
+
+	return db.Put(h)
+}
+
+// RemoveHubAndMsgs deletes a Hub and it's saved messages from the database.
+func RemoveHubAndMsgs(mapName string, hubID string) (err error) {
+	err = db.Delete(MakeHubDBKey(mapName, hubID))
+	if err != nil && !errors.Is(err, database.ErrNotFound) {
+		return fmt.Errorf("failed to delete main hub entry: %w", err)
+	}
+
+	err = db.Delete(MakeHubMsgDBKey(mapName, MsgTypeAnnouncement, hubID))
+	if err != nil && !errors.Is(err, database.ErrNotFound) {
+		return fmt.Errorf("failed to delete hub announcement data: %w", err)
+	}
+
+	err = db.Delete(MakeHubMsgDBKey(mapName, MsgTypeStatus, hubID))
+	if err != nil && !errors.Is(err, database.ErrNotFound) {
+		return fmt.Errorf("failed to delete hub status data: %w", err)
+	}
+
+	return nil
+}
+
+// HubMsg stores raw Hub messages.
+type HubMsg struct { //nolint:golint
+	record.Base
+	sync.Mutex
+
+	ID   string
+	Map  string
+	Type MsgType
+	Data []byte
+
+	Received int64
+}
+
+// SaveHubMsg saves a raw (and signed) message received by another Hub.
+func SaveHubMsg(id string, mapName string, msgType MsgType, data []byte) error {
+	// create wrapper record
+	msg := &HubMsg{
+		ID:       id,
+		Map:      mapName,
+		Type:     msgType,
+		Data:     data,
+		Received: time.Now().Unix(),
+	}
+	// set key
+	msg.SetKey(MakeHubMsgDBKey(msg.Map, msg.Type, msg.ID))
+	// save
+	return db.PutNew(msg)
+}
+
+// QueryRawGossipMsgs queries the database for raw gossip messages.
+func QueryRawGossipMsgs(mapName string, msgType MsgType) (it *iterator.Iterator, err error) {
+	it, err = db.Query(query.New(MakeHubMsgDBKey(mapName, msgType, "")))
+	return
+}
+
+// EnsureHubMsg makes sure a database record is a HubMsg.
+func EnsureHubMsg(r record.Record) (*HubMsg, error) {
+	// unwrap
+	if r.IsWrapped() {
+		// only allocate a new struct, if we need it
+		newHubMsg := &HubMsg{}
+		err := record.Unwrap(r, newHubMsg)
+		if err != nil {
+			return nil, err
+		}
+		return newHubMsg, nil
+	}
+
+	// or adjust type
+	newHubMsg, ok := r.(*HubMsg)
+	if !ok {
+		return nil, fmt.Errorf("record not of type *Hub, but %T", r)
+	}
+	return newHubMsg, nil
+}
diff --git a/spn/hub/errors.go b/spn/hub/errors.go
new file mode 100644
index 00000000..276549e4
--- /dev/null
+++ b/spn/hub/errors.go
@@ -0,0 +1,21 @@
+package hub
+
+import "errors"
+
+var (
+	// ErrMissingInfo signifies that the hub is missing the HubAnnouncement.
+	ErrMissingInfo = errors.New("hub has no announcement")
+
+	// ErrMissingTransports signifies that the hub announcement did not specify any transports.
+	ErrMissingTransports = errors.New("hub announcement has no transports")
+
+	// ErrMissingIPs signifies that the hub announcement did not specify any IPs,
+	// or none of the IPs is supported by the client.
+	ErrMissingIPs = errors.New("hub announcement has no (supported) IPs")
+
+	// ErrTemporaryValidationError is returned when a validation error might be temporary.
+	ErrTemporaryValidationError = errors.New("temporary validation error")
+
+	// ErrOldData is returned when received data is outdated.
+	ErrOldData = errors.New("")
+)
diff --git a/spn/hub/format.go b/spn/hub/format.go
new file mode 100644
index 00000000..f36b3d0d
--- /dev/null
+++ b/spn/hub/format.go
@@ -0,0 +1,69 @@
+package hub
+
+import (
+	"fmt"
+	"net"
+	"regexp"
+
+	"github.com/safing/portmaster/service/network/netutils"
+)
+
+// BaselineCharset defines the permitted characters.
+var BaselineCharset = regexp.MustCompile(
+	// Start of charset selection.
+	`^[` +
+		// Printable ASCII (character code 32-127), excluding common control characters of different languages: "$%&';<>\` and DELETE.
+		` !#()*+,\-\./0-9:=?@A-Z[\]^_a-z{|}~` +
+		// Only latin characters from extended ASCII (character code 128-255).
+		`ŠŒŽšœžŸ¡¿ÀÁÂÃÄÅÆÇÈÉÊËÌÍÎÏÐÑÒÓÔÕÖØÙÚÛÜÝÞßàáâãäåæçèéêëìíîïðñòóôõöøùúûüýþÿ` +
+		// End of charset selection.
+		`]*$`,
+)
+
+func checkStringFormat(fieldName, value string, maxLength int) error {
+	switch {
+	case len(value) > maxLength:
+		return fmt.Errorf("field %s with length of %d exceeds max length of %d", fieldName, len(value), maxLength)
+	case !BaselineCharset.MatchString(value):
+		return fmt.Errorf("field %s contains characters not permitted by baseline validation", fieldName)
+	default:
+		return nil
+	}
+}
+
+func checkStringSliceFormat(fieldName string, value []string, maxLength, maxStringLength int) error { //nolint:unparam
+	if len(value) > maxLength {
+		return fmt.Errorf("field %s with array/slice length of %d exceeds max length of %d", fieldName, len(value), maxLength)
+	}
+	for _, s := range value {
+		if err := checkStringFormat(fieldName, s, maxStringLength); err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+func checkByteSliceFormat(fieldName string, value []byte, maxLength int) error {
+	switch {
+	case len(value) > maxLength:
+		return fmt.Errorf("field %s with length of %d exceeds max length of %d", fieldName, len(value), maxLength)
+	default:
+		return nil
+	}
+}
+
+func checkIPFormat(fieldName string, value net.IP) error {
+	// Check if there is an IP address.
+	if value == nil {
+		return nil
+	}
+
+	switch {
+	case len(value) != 4 && len(value) != 16:
+		return fmt.Errorf("field %s has an invalid length of %d for an IP address", fieldName, len(value))
+	case netutils.GetIPScope(value) == netutils.Invalid:
+		return fmt.Errorf("field %s holds an invalid IP address: %s", fieldName, value)
+	default:
+		return nil
+	}
+}
diff --git a/spn/hub/format_test.go b/spn/hub/format_test.go
new file mode 100644
index 00000000..62b79635
--- /dev/null
+++ b/spn/hub/format_test.go
@@ -0,0 +1,81 @@
+package hub
+
+import (
+	"fmt"
+	"net"
+	"testing"
+
+	"github.com/stretchr/testify/assert"
+)
+
+func TestCheckStringFormat(t *testing.T) {
+	t.Parallel()
+
+	testSet := map[string]bool{
+		// Printable ASCII (character code 32-127)
+		" ": true, "!": true, `"`: false, "#": true, "$": false, "%": false, "&": false, "'": false,
+		"(": true, ")": true, "*": true, "+": true, ",": true, "-": true, ".": true, "/": true,
+		"0": true, "1": true, "2": true, "3": true, "4": true, "5": true, "6": true, "7": true,
+		"8": true, "9": true, ":": true, ";": false, "<": false, "=": true, ">": false, "?": true,
+		"@": true, "A": true, "B": true, "C": true, "D": true, "E": true, "F": true, "G": true,
+		"H": true, "I": true, "J": true, "K": true, "L": true, "M": true, "N": true, "O": true,
+		"P": true, "Q": true, "R": true, "S": true, "T": true, "U": true, "V": true, "W": true,
+		"X": true, "Y": true, "Z": true, "[": true, `\`: false, "]": true, "^": true, "_": true,
+		"`": false, "a": true, "b": true, "c": true, "d": true, "e": true, "f": true, "g": true,
+		"h": true, "i": true, "j": true, "k": true, "l": true, "m": true, "n": true, "o": true,
+		"p": true, "q": true, "r": true, "s": true, "t": true, "u": true, "v": true, "w": true,
+		"x": true, "y": true, "z": true, "{": true, "|": true, "}": true, "~": true,
+		// Not testing for DELETE character.
+
+		// Extended ASCII (character code 128-255)
+		"€": false, "‚": false, "ƒ": false, "„": false, "…": false, "†": false, "‡": false, "ˆ": false,
+		"‰": false, "Š": true, "‹": false, "Œ": true, "Ž": true, "‘": false, "’": false, "“": false,
+		"”": false, "•": false, "–": false, "—": false, "˜": false, "™": false, "š": true, "›": false,
+		"œ": true, "ž": true, "Ÿ": true, "¡": true, "¢": false, "£": false, "¤": false, "¥": false,
+		"¦": false, "§": false, "¨": false, "©": false, "ª": false, "«": false, "¬": false, "®": false,
+		"¯": false, "°": false, "±": false, "²": false, "³": false, "´": false, "µ": false, "¶": false,
+		"·": false, "¸": false, "¹": false, "º": false, "»": false, "¼": false, "½": false, "¾": false,
+		"¿": true, "À": true, "Á": true, "Â": true, "Ã": true, "Ä": true, "Å": true, "Æ": true,
+		"Ç": true, "È": true, "É": true, "Ê": true, "Ë": true, "Ì": true, "Í": true, "Î": true,
+		"Ï": true, "Ð": true, "Ñ": true, "Ò": true, "Ó": true, "Ô": true, "Õ": true, "Ö": true,
+		"×": false, "Ø": true, "Ù": true, "Ú": true, "Û": true, "Ü": true, "Ý": true, "Þ": true,
+		"ß": true, "à": true, "á": true, "â": true, "ã": true, "ä": true, "å": true, "æ": true,
+		"ç": true, "è": true, "é": true, "ê": true, "ë": true, "ì": true, "í": true, "î": true,
+		"ï": true, "ð": true, "ñ": true, "ò": true, "ó": true, "ô": true, "õ": true, "ö": true,
+		"÷": false, "ø": true, "ù": true, "ú": true, "û": true, "ü": true, "ý": true, "þ": true,
+		"ÿ": true,
+	}
+
+	for testCharacter, isPermitted := range testSet {
+		if isPermitted {
+			assert.NoError(t, checkStringFormat(fmt.Sprintf("test character %q", testCharacter), testCharacter, 3))
+		} else {
+			assert.Error(t, checkStringFormat(fmt.Sprintf("test character %q", testCharacter), testCharacter, 3))
+		}
+	}
+}
+
+func TestCheckIPFormat(t *testing.T) {
+	t.Parallel()
+
+	// IPv4
+	assert.NoError(t, checkIPFormat("test IP 1.1.1.1", net.IPv4(1, 1, 1, 1)))
+	assert.NoError(t, checkIPFormat("test IP 192.168.1.1", net.IPv4(192, 168, 1, 1)))
+	assert.Error(t, checkIPFormat("test IP 255.0.0.1", net.IPv4(255, 0, 0, 1)))
+
+	// IPv6
+	assert.NoError(t, checkIPFormat("test IP ::1", net.ParseIP("::1")))
+	assert.NoError(t, checkIPFormat("test IP 2606:4700:4700::1111", net.ParseIP("2606:4700:4700::1111")))
+
+	// Invalid
+	assert.Error(t, checkIPFormat("test IP with length 3", net.IP([]byte{0, 0, 0})))
+	assert.Error(t, checkIPFormat("test IP with length 5", net.IP([]byte{0, 0, 0, 0, 0})))
+	assert.Error(t, checkIPFormat(
+		"test IP with length 15",
+		net.IP([]byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}),
+	))
+	assert.Error(t, checkIPFormat(
+		"test IP with length 17",
+		net.IP([]byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}),
+	))
+}
diff --git a/spn/hub/hub.go b/spn/hub/hub.go
new file mode 100644
index 00000000..efc34cd0
--- /dev/null
+++ b/spn/hub/hub.go
@@ -0,0 +1,435 @@
+package hub
+
+import (
+	"fmt"
+	"net"
+	"sync"
+	"time"
+
+	"golang.org/x/exp/slices"
+
+	"github.com/safing/jess"
+	"github.com/safing/portbase/database/record"
+	"github.com/safing/portbase/log"
+	"github.com/safing/portmaster/service/profile/endpoints"
+)
+
+// Scope is the network scope a Hub can be in.
+type Scope uint8
+
+const (
+	// ScopeInvalid defines an invalid scope.
+	ScopeInvalid Scope = 0
+
+	// ScopeLocal identifies local Hubs.
+	ScopeLocal Scope = 1
+
+	// ScopePublic identifies public Hubs.
+	ScopePublic Scope = 2
+
+	// ScopeTest identifies Hubs for testing.
+	ScopeTest Scope = 0xFF
+)
+
+const (
+	obsoleteValidAfter   = 30 * 24 * time.Hour
+	obsoleteInvalidAfter = 7 * 24 * time.Hour
+)
+
+// MsgType defines the message type.
+type MsgType string
+
+// Message Types.
+const (
+	MsgTypeAnnouncement = "announcement"
+	MsgTypeStatus       = "status"
+)
+
+// Hub represents a network node in the SPN.
+type Hub struct { //nolint:maligned
+	sync.Mutex
+	record.Base
+
+	ID        string
+	PublicKey *jess.Signet
+	Map       string
+
+	Info   *Announcement
+	Status *Status
+
+	Measurements            *Measurements
+	measurementsInitialized bool
+
+	FirstSeen     time.Time
+	VerifiedIPs   bool
+	InvalidInfo   bool
+	InvalidStatus bool
+}
+
+// Announcement is the main message type to publish Hub Information. This only changes if updated manually.
+type Announcement struct {
+	// Primary Key
+	// hash of public key
+	// must be checked if it matches the public key
+	ID string `cbor:"i"` // via jess.LabeledHash
+
+	// PublicKey *jess.Signet
+	// PublicKey // if not part of signature
+	// Signature *jess.Letter
+	Timestamp int64 `cbor:"t"` // Unix timestamp in seconds
+
+	// Node Information
+	Name           string `cbor:"n"`                              // name of the node
+	Group          string `cbor:"g,omitempty"  json:",omitempty"` // person or organisation, who is in control of the node (should be same for all nodes of this person or organisation)
+	ContactAddress string `cbor:"ca,omitempty" json:",omitempty"` // contact possibility  (recommended, but optional)
+	ContactService string `cbor:"cs,omitempty" json:",omitempty"` // type of service of the contact address, if not email
+
+	// currently unused, but collected for later use
+	Hosters    []string `cbor:"ho,omitempty" json:",omitempty"` // hoster supply chain (reseller, hosting provider, datacenter operator, ...)
+	Datacenter string   `cbor:"dc,omitempty" json:",omitempty"` // datacenter will be bullshit checked
+	// Format: CC-COMPANY-INTERNALCODE
+	// Eg: DE-Hetzner-FSN1-DC5
+
+	// Network Location and Access
+	// If node is behind NAT (or similar), IP addresses must be configured
+	IPv4       net.IP   `cbor:"ip4,omitempty" json:",omitempty"` // must be global and accessible
+	IPv6       net.IP   `cbor:"ip6,omitempty" json:",omitempty"` // must be global and accessible
+	Transports []string `cbor:"tp,omitempty"  json:",omitempty"`
+	// {
+	//   "spn:17",
+	//   "smtp:25", // also support "smtp://:25
+	//   "smtp:587",
+	//   "imap:143",
+	//   "http:80",
+	//   "http://example.com:80", // HTTP (based): use full path for request
+	//   "https:443",
+	//   "ws:80",
+	//   "wss://example.com:443/spn",
+	// } // protocols with metadata
+	parsedTransports []*Transport
+
+	// Policies - default permit
+	Entry       []string `cbor:"pi,omitempty" json:",omitempty"`
+	entryPolicy endpoints.Endpoints
+	// {"+ ", "- *"}
+	Exit       []string `cbor:"po,omitempty" json:",omitempty"`
+	exitPolicy endpoints.Endpoints
+	// {"- * TCP/25", "- US"}
+
+	// Flags holds flags that signify special states.
+	Flags []string `cbor:"f,omitempty" json:",omitempty"`
+}
+
+// Copy returns a deep copy of the Announcement.
+func (a *Announcement) Copy() *Announcement {
+	return &Announcement{
+		ID:               a.ID,
+		Timestamp:        a.Timestamp,
+		Name:             a.Name,
+		ContactAddress:   a.ContactAddress,
+		ContactService:   a.ContactService,
+		Hosters:          slices.Clone(a.Hosters),
+		Datacenter:       a.Datacenter,
+		IPv4:             a.IPv4,
+		IPv6:             a.IPv6,
+		Transports:       slices.Clone(a.Transports),
+		parsedTransports: slices.Clone(a.parsedTransports),
+		Entry:            slices.Clone(a.Entry),
+		entryPolicy:      slices.Clone(a.entryPolicy),
+		Exit:             slices.Clone(a.Exit),
+		exitPolicy:       slices.Clone(a.exitPolicy),
+		Flags:            slices.Clone(a.Flags),
+	}
+}
+
+// GetInfo returns the hub info.
+func (h *Hub) GetInfo() *Announcement {
+	h.Lock()
+	defer h.Unlock()
+
+	return h.Info
+}
+
+// GetStatus returns the hub status.
+func (h *Hub) GetStatus() *Status {
+	h.Lock()
+	defer h.Unlock()
+
+	return h.Status
+}
+
+// GetMeasurements returns the hub measurements.
+// This method should always be used instead of direct access.
+func (h *Hub) GetMeasurements() *Measurements {
+	h.Lock()
+	defer h.Unlock()
+
+	return h.GetMeasurementsWithLockedHub()
+}
+
+// GetMeasurementsWithLockedHub returns the hub measurements.
+// The caller must hold the lock to Hub.
+// This method should always be used instead of direct access.
+func (h *Hub) GetMeasurementsWithLockedHub() *Measurements {
+	if !h.measurementsInitialized {
+		h.Measurements = getSharedMeasurements(h.ID, h.Measurements)
+		h.Measurements.check()
+		h.measurementsInitialized = true
+	}
+
+	return h.Measurements
+}
+
+// Verified return whether the Hub has been verified.
+func (h *Hub) Verified() bool {
+	h.Lock()
+	defer h.Unlock()
+
+	return h.VerifiedIPs
+}
+
+// String returns a human-readable representation of the Hub.
+func (h *Hub) String() string {
+	h.Lock()
+	defer h.Unlock()
+
+	return "<Hub " + h.getName() + ">"
+}
+
+// StringWithoutLocking returns a human-readable representation of the Hub without locking it.
+func (h *Hub) StringWithoutLocking() string {
+	return "<Hub " + h.getName() + ">"
+}
+
+// Name returns a human-readable version of a Hub's name. This name will likely consist of two parts: the given name and the ending of the ID to make it unique.
+func (h *Hub) Name() string {
+	h.Lock()
+	defer h.Unlock()
+
+	return h.getName()
+}
+
+func (h *Hub) getName() string {
+	// Check for a short ID that is sometimes used for testing.
+	if len(h.ID) < 8 {
+		return h.ID
+	}
+
+	shortenedID := h.ID[len(h.ID)-8:len(h.ID)-4] +
+		"-" +
+		h.ID[len(h.ID)-4:]
+
+	// Be more careful, as the Hub name is user input.
+	switch {
+	case h.Info.Name == "":
+		return shortenedID
+	case len(h.Info.Name) > 16:
+		return h.Info.Name[:16] + " " + shortenedID
+	default:
+		return h.Info.Name + " " + shortenedID
+	}
+}
+
+// Obsolete returns if the Hub is obsolete and may be deleted.
+func (h *Hub) Obsolete() bool {
+	h.Lock()
+	defer h.Unlock()
+
+	// Check if Hub is valid.
+	var valid bool
+	switch {
+	case h.InvalidInfo:
+	case h.InvalidStatus:
+	case h.HasFlag(FlagOffline):
+		// Treat offline as invalid.
+	default:
+		valid = true
+	}
+
+	// Check when Hub was last seen.
+	lastSeen := h.FirstSeen
+	if h.Status.Timestamp != 0 {
+		lastSeen = time.Unix(h.Status.Timestamp, 0)
+	}
+
+	// Check if Hub is obsolete.
+	if valid {
+		return time.Now().Add(-obsoleteValidAfter).After(lastSeen)
+	}
+	return time.Now().Add(-obsoleteInvalidAfter).After(lastSeen)
+}
+
+// HasFlag returns whether the Announcement or Status has the given flag set.
+func (h *Hub) HasFlag(flagName string) bool {
+	switch {
+	case h.Status != nil && slices.Contains[[]string, string](h.Status.Flags, flagName):
+		return true
+	case h.Info != nil && slices.Contains[[]string, string](h.Info.Flags, flagName):
+		return true
+	}
+	return false
+}
+
+// Equal returns whether the given Announcements are equal.
+func (a *Announcement) Equal(b *Announcement) bool {
+	switch {
+	case a == nil || b == nil:
+		return false
+	case a.ID != b.ID:
+		return false
+	case a.Timestamp != b.Timestamp:
+		return false
+	case a.Name != b.Name:
+		return false
+	case a.ContactAddress != b.ContactAddress:
+		return false
+	case a.ContactService != b.ContactService:
+		return false
+	case !equalStringSlice(a.Hosters, b.Hosters):
+		return false
+	case a.Datacenter != b.Datacenter:
+		return false
+	case !a.IPv4.Equal(b.IPv4):
+		return false
+	case !a.IPv6.Equal(b.IPv6):
+		return false
+	case !equalStringSlice(a.Transports, b.Transports):
+		return false
+	case !equalStringSlice(a.Entry, b.Entry):
+		return false
+	case !equalStringSlice(a.Exit, b.Exit):
+		return false
+	case !equalStringSlice(a.Flags, b.Flags):
+		return false
+	default:
+		return true
+	}
+}
+
+// validateFormatting check if all values conform to the basic format.
+func (a *Announcement) validateFormatting() error {
+	if err := checkStringFormat("ID", a.ID, 255); err != nil {
+		return err
+	}
+	if err := checkStringFormat("Name", a.Name, 32); err != nil {
+		return err
+	}
+	if err := checkStringFormat("Group", a.Group, 32); err != nil {
+		return err
+	}
+	if err := checkStringFormat("ContactAddress", a.ContactAddress, 255); err != nil {
+		return err
+	}
+	if err := checkStringFormat("ContactService", a.ContactService, 255); err != nil {
+		return err
+	}
+	if err := checkStringSliceFormat("Hosters", a.Hosters, 255, 255); err != nil {
+		return err
+	}
+	if err := checkStringFormat("Datacenter", a.Datacenter, 255); err != nil {
+		return err
+	}
+	if err := checkIPFormat("IPv4", a.IPv4); err != nil {
+		return err
+	}
+	if err := checkIPFormat("IPv6", a.IPv6); err != nil {
+		return err
+	}
+	if err := checkStringSliceFormat("Transports", a.Transports, 255, 255); err != nil {
+		return err
+	}
+	if err := checkStringSliceFormat("Entry", a.Entry, 255, 255); err != nil {
+		return err
+	}
+	if err := checkStringSliceFormat("Exit", a.Exit, 255, 255); err != nil {
+		return err
+	}
+	if err := checkStringSliceFormat("Flags", a.Flags, 16, 32); err != nil {
+		return err
+	}
+	return nil
+}
+
+// Prepare prepares the announcement by parsing policies and transports.
+// If fields are already parsed, they will only be parsed again, when force is set to true.
+func (a *Announcement) prepare(force bool) error {
+	var err error
+
+	// Parse policies.
+	if len(a.entryPolicy) == 0 || force {
+		if a.entryPolicy, err = endpoints.ParseEndpoints(a.Entry); err != nil {
+			return fmt.Errorf("failed to parse entry policy: %w", err)
+		}
+	}
+	if len(a.exitPolicy) == 0 || force {
+		if a.exitPolicy, err = endpoints.ParseEndpoints(a.Exit); err != nil {
+			return fmt.Errorf("failed to parse exit policy: %w", err)
+		}
+	}
+
+	// Parse transports.
+	if len(a.parsedTransports) == 0 || force {
+		parsed, errs := ParseTransports(a.Transports)
+		// Log parsing warnings.
+		for _, err := range errs {
+			log.Warningf("hub: Hub %s (%s) has configured an %s", a.Name, a.ID, err)
+		}
+		// Check if there are any valid transports.
+		if len(parsed) == 0 {
+			return ErrMissingTransports
+		}
+		a.parsedTransports = parsed
+	}
+
+	return nil
+}
+
+// EntryPolicy returns the Hub's entry policy.
+func (a *Announcement) EntryPolicy() endpoints.Endpoints {
+	return a.entryPolicy
+}
+
+// ExitPolicy returns the Hub's exit policy.
+func (a *Announcement) ExitPolicy() endpoints.Endpoints {
+	return a.exitPolicy
+}
+
+// ParsedTransports returns the Hub's parsed transports.
+func (a *Announcement) ParsedTransports() []*Transport {
+	return a.parsedTransports
+}
+
+// HasFlag returns whether the Announcement has the given flag set.
+func (a *Announcement) HasFlag(flagName string) bool {
+	return slices.Contains[[]string, string](a.Flags, flagName)
+}
+
+// String returns the string representation of the scope.
+func (s Scope) String() string {
+	switch s {
+	case ScopeInvalid:
+		return "invalid"
+	case ScopeLocal:
+		return "local"
+	case ScopePublic:
+		return "public"
+	case ScopeTest:
+		return "test"
+	default:
+		return "unknown"
+	}
+}
+
+func equalStringSlice(a, b []string) bool {
+	if len(a) != len(b) {
+		return false
+	}
+
+	for i := 0; i < len(a); i++ {
+		if a[i] != b[i] {
+			return false
+		}
+	}
+
+	return true
+}
diff --git a/spn/hub/hub_test.go b/spn/hub/hub_test.go
new file mode 100644
index 00000000..70cc5b16
--- /dev/null
+++ b/spn/hub/hub_test.go
@@ -0,0 +1,79 @@
+package hub
+
+import (
+	"net"
+	"testing"
+
+	"github.com/stretchr/testify/assert"
+
+	"github.com/safing/portbase/modules"
+	_ "github.com/safing/portmaster/service/core/base"
+	"github.com/safing/portmaster/service/core/pmtesting"
+)
+
+func TestMain(m *testing.M) {
+	// TODO: We need the database module, so maybe set up a module for this package.
+	module := modules.Register("hub", nil, nil, nil, "base")
+	pmtesting.TestMain(m, module)
+}
+
+func TestEquality(t *testing.T) {
+	t.Parallel()
+
+	// empty match
+	a := &Announcement{}
+	assert.True(t, a.Equal(a), "should match itself") //nolint:gocritic // This is a test.
+
+	// full match
+	a = &Announcement{
+		ID:             "a",
+		Timestamp:      1,
+		Name:           "a",
+		ContactAddress: "a",
+		ContactService: "a",
+		Hosters:        []string{"a", "b"},
+		Datacenter:     "a",
+		IPv4:           net.IPv4(1, 2, 3, 4),
+		IPv6:           net.ParseIP("::1"),
+		Transports:     []string{"a", "b"},
+		Entry:          []string{"a", "b"},
+		Exit:           []string{"a", "b"},
+	}
+	assert.True(t, a.Equal(a), "should match itself") //nolint:gocritic // This is a test.
+
+	// no match
+	b := &Announcement{ID: "b"}
+	assert.False(t, a.Equal(b), "should not match")
+	b = &Announcement{Timestamp: 2}
+	assert.False(t, a.Equal(b), "should not match")
+	b = &Announcement{Name: "b"}
+	assert.False(t, a.Equal(b), "should not match")
+	b = &Announcement{ContactAddress: "b"}
+	assert.False(t, a.Equal(b), "should not match")
+	b = &Announcement{ContactService: "b"}
+	assert.False(t, a.Equal(b), "should not match")
+	b = &Announcement{Hosters: []string{"b", "c"}}
+	assert.False(t, a.Equal(b), "should not match")
+	b = &Announcement{Datacenter: "b"}
+	assert.False(t, a.Equal(b), "should not match")
+	b = &Announcement{IPv4: net.IPv4(1, 2, 3, 5)}
+	assert.False(t, a.Equal(b), "should not match")
+	b = &Announcement{IPv6: net.ParseIP("::2")}
+	assert.False(t, a.Equal(b), "should not match")
+	b = &Announcement{Transports: []string{"b", "c"}}
+	assert.False(t, a.Equal(b), "should not match")
+	b = &Announcement{Entry: []string{"b", "c"}}
+	assert.False(t, a.Equal(b), "should not match")
+	b = &Announcement{Exit: []string{"b", "c"}}
+	assert.False(t, a.Equal(b), "should not match")
+}
+
+func TestStringify(t *testing.T) {
+	t.Parallel()
+
+	assert.Equal(t, "<Hub abcdefg>", (&Hub{ID: "abcdefg", Info: &Announcement{}}).String())
+	assert.Equal(t, "<Hub abcd-efgh>", (&Hub{ID: "abcdefgh", Info: &Announcement{}}).String())
+	assert.Equal(t, "<Hub bcde-fghi>", (&Hub{ID: "abcdefghi", Info: &Announcement{}}).String())
+	assert.Equal(t, "<Hub Franz bcde-fghi>", (&Hub{ID: "abcdefghi", Info: &Announcement{Name: "Franz"}}).String())
+	assert.Equal(t, "<Hub AProbablyAutoGen bcde-fghi>", (&Hub{ID: "abcdefghi", Info: &Announcement{Name: "AProbablyAutoGeneratedName"}}).String())
+}
diff --git a/spn/hub/intel.go b/spn/hub/intel.go
new file mode 100644
index 00000000..8bc505ed
--- /dev/null
+++ b/spn/hub/intel.go
@@ -0,0 +1,191 @@
+package hub
+
+import (
+	"errors"
+	"fmt"
+	"net"
+
+	"github.com/ghodss/yaml"
+
+	"github.com/safing/jess/lhash"
+	"github.com/safing/portmaster/service/profile/endpoints"
+)
+
+// Intel holds a collection of various security related data collections on Hubs.
+type Intel struct {
+	// BootstrapHubs is list of transports that also contain an IP and the Hub's ID.
+	BootstrapHubs []string
+
+	// Hubs holds intel regarding specific Hubs.
+	Hubs map[string]*HubIntel
+
+	// AdviseOnlyTrustedHubs advises to only use trusted Hubs regardless of intended purpose.
+	AdviseOnlyTrustedHubs bool
+	// AdviseOnlyTrustedHomeHubs advises to only use trusted Hubs for Home Hubs.
+	AdviseOnlyTrustedHomeHubs bool
+	// AdviseOnlyTrustedDestinationHubs advises to only use trusted Hubs for Destination Hubs.
+	AdviseOnlyTrustedDestinationHubs bool
+
+	// Hub Advisories advise on the usage of Hubs and take the form of Endpoint Lists that match on both IPv4 and IPv6 addresses and their related data.
+
+	// HubAdvisory always affects all Hubs.
+	HubAdvisory []string
+	// HomeHubAdvisory is only taken into account when selecting a Home Hub.
+	HomeHubAdvisory []string
+	// DestinationHubAdvisory is only taken into account when selecting a Destination Hub.
+	DestinationHubAdvisory []string
+
+	// Regions defines regions to assist network optimization.
+	Regions []*RegionConfig
+
+	// VirtualNetworks holds network configurations for virtual cloud networks.
+	VirtualNetworks []*VirtualNetworkConfig
+
+	parsed *ParsedIntel
+}
+
+// HubIntel holds Hub-related data.
+type HubIntel struct { //nolint:golint
+	// Trusted specifies if the Hub is specially designated for more sensitive tasks, such as handling unencrypted traffic.
+	Trusted bool
+
+	// Discontinued specifies if the Hub has been discontinued and should be marked as offline and removed.
+	Discontinued bool
+
+	// VerifiedOwner holds the name of the verified owner / operator of the Hub.
+	VerifiedOwner string
+
+	// Override is used to override certain Hub information.
+	Override *InfoOverride
+}
+
+// RegionConfig holds the configuration of a region.
+type RegionConfig struct {
+	// ID is the internal identifier of the region.
+	ID string
+	// Name is a human readable name of the region.
+	Name string
+	// MemberPolicy specifies a list for including members.
+	MemberPolicy []string
+
+	// RegionalMinLanes specifies how many lanes other regions should build
+	// to this region.
+	RegionalMinLanes int
+	// RegionalMinLanesPerHub specifies how many lanes other regions should
+	// build to this region, per Hub in this region.
+	// This value will usually be below one.
+	RegionalMinLanesPerHub float64
+	// RegionalMaxLanesOnHub specifies how many lanes from or to another region may be
+	// built on one Hub per region.
+	RegionalMaxLanesOnHub int
+
+	// SatelliteMinLanes specifies how many lanes satellites (Hubs without
+	// region) should build to this region.
+	SatelliteMinLanes int
+	// SatelliteMinLanesPerHub specifies how many lanes satellites (Hubs without
+	// region) should build to this region, per Hub in this region.
+	// This value will usually be below one.
+	SatelliteMinLanesPerHub float64
+
+	// InternalMinLanesOnHub specifies how many lanes every Hub should create
+	// within the region at minimum.
+	InternalMinLanesOnHub int
+	// InternalMaxHops specifies the max hop constraint for internally optimizing
+	// the region.
+	InternalMaxHops int
+}
+
+// VirtualNetworkConfig holds configuration of a virtual network that binds multiple Hubs together.
+type VirtualNetworkConfig struct {
+	// Name is a human readable name of the virtual network.
+	Name string
+	// Force forces the use of the mapped IP addresses after the Hub's IPs have been verified.
+	Force bool
+	// Mapping maps Hub IDs to internal IP addresses.
+	Mapping map[string]net.IP
+}
+
+// ParsedIntel holds a collection of parsed intel data.
+type ParsedIntel struct {
+	// HubAdvisory always affects all Hubs.
+	HubAdvisory endpoints.Endpoints
+
+	// HomeHubAdvisory is only taken into account when selecting a Home Hub.
+	HomeHubAdvisory endpoints.Endpoints
+
+	// DestinationHubAdvisory is only taken into account when selecting a Destination Hub.
+	DestinationHubAdvisory endpoints.Endpoints
+}
+
+// Parsed returns the collection of parsed intel data.
+func (i *Intel) Parsed() *ParsedIntel {
+	return i.parsed
+}
+
+// ParseIntel parses Hub intelligence data.
+func ParseIntel(data []byte) (*Intel, error) {
+	// Load data into struct.
+	intel := &Intel{}
+	err := yaml.Unmarshal(data, intel)
+	if err != nil {
+		return nil, fmt.Errorf("failed to parse data: %w", err)
+	}
+
+	// Parse all endpoint lists.
+	err = intel.ParseAdvisories()
+	if err != nil {
+		return nil, err
+	}
+
+	return intel, nil
+}
+
+// ParseAdvisories parses all advisory endpoint lists.
+func (i *Intel) ParseAdvisories() (err error) {
+	i.parsed = &ParsedIntel{}
+
+	i.parsed.HubAdvisory, err = endpoints.ParseEndpoints(i.HubAdvisory)
+	if err != nil {
+		return fmt.Errorf("failed to parse HubAdvisory list: %w", err)
+	}
+
+	i.parsed.HomeHubAdvisory, err = endpoints.ParseEndpoints(i.HomeHubAdvisory)
+	if err != nil {
+		return fmt.Errorf("failed to parse HomeHubAdvisory list: %w", err)
+	}
+
+	i.parsed.DestinationHubAdvisory, err = endpoints.ParseEndpoints(i.DestinationHubAdvisory)
+	if err != nil {
+		return fmt.Errorf("failed to parse DestinationHubAdvisory list: %w", err)
+	}
+
+	return nil
+}
+
+// ParseBootstrapHub parses a bootstrap hub.
+func ParseBootstrapHub(bootstrapTransport string) (t *Transport, hubID string, hubIP net.IP, err error) {
+	// Parse transport and check Hub ID.
+	t, err = ParseTransport(bootstrapTransport)
+	if err != nil {
+		return nil, "", nil, fmt.Errorf("failed to parse transport: %w", err)
+	}
+	if t.Option == "" {
+		return nil, "", nil, errors.New("missing hub ID in URL fragment")
+	}
+	if _, err := lhash.FromBase58(t.Option); err != nil {
+		return nil, "", nil, fmt.Errorf("hub ID is invalid: %w", err)
+	}
+
+	// Parse IP address from transport.
+	ip := net.ParseIP(t.Domain)
+	if ip == nil {
+		return nil, "", nil, errors.New("invalid IP address (domains are not supported for bootstrapping)")
+	}
+
+	// Clean up transport for hub info.
+	id := t.Option
+	t.Domain = ""
+	t.Option = ""
+
+	return t, id, ip, nil
+}
diff --git a/spn/hub/intel_override.go b/spn/hub/intel_override.go
new file mode 100644
index 00000000..0fa7f29c
--- /dev/null
+++ b/spn/hub/intel_override.go
@@ -0,0 +1,17 @@
+package hub
+
+import "github.com/safing/portmaster/service/intel/geoip"
+
+// InfoOverride holds data to overide hub info information.
+type InfoOverride struct {
+	// ContinentCode overrides the continent code of the geoip data.
+	ContinentCode string
+	// CountryCode overrides the country code of the geoip data.
+	CountryCode string
+	// Coordinates overrides the geo coordinates code of the geoip data.
+	Coordinates *geoip.Coordinates
+	// ASN overrides the Autonomous System Number of the geoip data.
+	ASN uint
+	// ASOrg overrides the Autonomous System Organization of the geoip data.
+	ASOrg string
+}
diff --git a/spn/hub/measurements.go b/spn/hub/measurements.go
new file mode 100644
index 00000000..135a67c9
--- /dev/null
+++ b/spn/hub/measurements.go
@@ -0,0 +1,231 @@
+package hub
+
+import (
+	"sync"
+	"time"
+
+	"github.com/tevino/abool"
+)
+
+// MaxCalculatedCost specifies the max calculated cost to be used for an unknown high cost.
+const MaxCalculatedCost = 1000000
+
+// Measurements holds various measurements relating to a Hub.
+// Fields may not be accessed directly.
+type Measurements struct {
+	sync.Mutex
+
+	// Latency designates the latency between these Hubs.
+	// It is specified in nanoseconds.
+	Latency time.Duration
+	// LatencyMeasuredAt holds when the latency was measured.
+	LatencyMeasuredAt time.Time
+
+	// Capacity designates the available bandwidth between these Hubs.
+	// It is specified in bit/s.
+	Capacity int
+	// CapacityMeasuredAt holds when the capacity measurement expires.
+	CapacityMeasuredAt time.Time
+
+	// CalculatedCost stores the calculated cost for direct access.
+	// It is not set automatically, but needs to be set when needed.
+	CalculatedCost float32
+
+	// GeoProximity stores an approximation of the geolocation proximity.
+	// The value is between 0 (other side of the world) and 100 (same location).
+	GeoProximity float32
+
+	// persisted holds whether the Measurements have been persisted to the
+	// database.
+	persisted *abool.AtomicBool
+}
+
+// NewMeasurements returns a new measurements struct.
+func NewMeasurements() *Measurements {
+	m := &Measurements{
+		CalculatedCost: MaxCalculatedCost, // Push to back when sorting without data.
+	}
+	m.check()
+	return m
+}
+
+// Copy returns a copy of the measurements.
+func (m *Measurements) Copy() *Measurements {
+	copied := &Measurements{
+		Latency:            m.Latency,
+		LatencyMeasuredAt:  m.LatencyMeasuredAt,
+		Capacity:           m.Capacity,
+		CapacityMeasuredAt: m.CapacityMeasuredAt,
+		CalculatedCost:     m.CalculatedCost,
+	}
+	copied.check()
+	return copied
+}
+
+// Check checks if the Measurements are properly initialized and ready to use.
+func (m *Measurements) check() {
+	if m == nil {
+		return
+	}
+
+	m.Lock()
+	defer m.Unlock()
+
+	if m.persisted == nil {
+		m.persisted = abool.NewBool(true)
+	}
+}
+
+// IsPersisted return whether changes to the measurements have been persisted.
+func (m *Measurements) IsPersisted() bool {
+	return m.persisted.IsSet()
+}
+
+// Valid returns whether there is a valid value .
+func (m *Measurements) Valid() bool {
+	m.Lock()
+	defer m.Unlock()
+
+	switch {
+	case m.Latency == 0:
+		// Latency is not set.
+	case m.Capacity == 0:
+		// Capacity is not set.
+	case m.CalculatedCost == 0:
+		// CalculatedCost is not set.
+	case m.CalculatedCost == MaxCalculatedCost:
+		// CalculatedCost is set to static max value.
+	default:
+		return true
+	}
+
+	return false
+}
+
+// Expired returns whether any of the measurements has expired - calculated
+// with the given TTL.
+func (m *Measurements) Expired(ttl time.Duration) bool {
+	expiry := time.Now().Add(-ttl)
+
+	m.Lock()
+	defer m.Unlock()
+
+	switch {
+	case expiry.After(m.LatencyMeasuredAt):
+		return true
+	case expiry.After(m.CapacityMeasuredAt):
+		return true
+	default:
+		return false
+	}
+}
+
+// SetLatency sets the latency to the given value.
+func (m *Measurements) SetLatency(latency time.Duration) {
+	m.Lock()
+	defer m.Unlock()
+
+	m.Latency = latency
+	m.LatencyMeasuredAt = time.Now()
+	m.persisted.UnSet()
+}
+
+// GetLatency returns the latency and when it expires.
+func (m *Measurements) GetLatency() (latency time.Duration, measuredAt time.Time) {
+	m.Lock()
+	defer m.Unlock()
+
+	return m.Latency, m.LatencyMeasuredAt
+}
+
+// SetCapacity sets the capacity to the given value.
+// The capacity is measued in bit/s.
+func (m *Measurements) SetCapacity(capacity int) {
+	m.Lock()
+	defer m.Unlock()
+
+	m.Capacity = capacity
+	m.CapacityMeasuredAt = time.Now()
+	m.persisted.UnSet()
+}
+
+// GetCapacity returns the capacity and when it expires.
+// The capacity is measued in bit/s.
+func (m *Measurements) GetCapacity() (capacity int, measuredAt time.Time) {
+	m.Lock()
+	defer m.Unlock()
+
+	return m.Capacity, m.CapacityMeasuredAt
+}
+
+// SetCalculatedCost sets the calculated cost to the given value.
+// The calculated cost is not set automatically, but needs to be set when needed.
+func (m *Measurements) SetCalculatedCost(cost float32) {
+	m.Lock()
+	defer m.Unlock()
+
+	m.CalculatedCost = cost
+	m.persisted.UnSet()
+}
+
+// GetCalculatedCost returns the calculated cost.
+// The calculated cost is not set automatically, but needs to be set when needed.
+func (m *Measurements) GetCalculatedCost() (cost float32) {
+	if m == nil {
+		return MaxCalculatedCost
+	}
+
+	m.Lock()
+	defer m.Unlock()
+
+	return m.CalculatedCost
+}
+
+// SetGeoProximity sets the geolocation proximity to the given value.
+func (m *Measurements) SetGeoProximity(geoProximity float32) {
+	m.Lock()
+	defer m.Unlock()
+
+	m.GeoProximity = geoProximity
+	m.persisted.UnSet()
+}
+
+// GetGeoProximity returns the geolocation proximity.
+func (m *Measurements) GetGeoProximity() (geoProximity float32) {
+	if m == nil {
+		return 0
+	}
+
+	m.Lock()
+	defer m.Unlock()
+
+	return m.GeoProximity
+}
+
+var (
+	measurementsRegistry     = make(map[string]*Measurements)
+	measurementsRegistryLock sync.Mutex
+)
+
+func getSharedMeasurements(hubID string, existing *Measurements) *Measurements {
+	measurementsRegistryLock.Lock()
+	defer measurementsRegistryLock.Unlock()
+
+	// 1. Check registry and return shared measurements.
+	m, ok := measurementsRegistry[hubID]
+	if ok {
+		return m
+	}
+
+	// 2. Use existing and make it shared, if available.
+	if existing != nil {
+		existing.check()
+		measurementsRegistry[hubID] = existing
+		return existing
+	}
+
+	// 3. Create new measurements.
+	m = NewMeasurements()
+	measurementsRegistry[hubID] = m
+	return m
+}
diff --git a/spn/hub/status.go b/spn/hub/status.go
new file mode 100644
index 00000000..0d5c4808
--- /dev/null
+++ b/spn/hub/status.go
@@ -0,0 +1,308 @@
+package hub
+
+import (
+	"errors"
+	"fmt"
+	"sort"
+	"time"
+
+	"golang.org/x/exp/slices"
+
+	"github.com/safing/jess"
+)
+
+// VersionOffline is a special version used to signify that the Hub has gone offline.
+// This is depracated, please use FlagOffline instead.
+const VersionOffline = "offline"
+
+// Status Flags.
+const (
+	// FlagNetError signifies that the Hub reports a network connectivity failure or impairment.
+	FlagNetError = "net-error"
+
+	// FlagOffline signifies that the Hub has gone offline by itself.
+	FlagOffline = "offline"
+
+	// FlagAllowUnencrypted signifies that the Hub is available to handle unencrypted connections.
+	FlagAllowUnencrypted = "allow-unencrypted"
+)
+
+// Status is the message type used to update changing Hub Information. Changes are made automatically.
+type Status struct {
+	Timestamp int64 `cbor:"t"`
+
+	// Version holds the current software version of the Hub.
+	Version string `cbor:"v"`
+
+	// Routing Information
+	Keys  map[string]*Key `cbor:"k,omitempty" json:",omitempty"` // public keys (with type)
+	Lanes []*Lane         `cbor:"c,omitempty" json:",omitempty"` // Connections to other Hubs.
+
+	// Status Information
+	// Load describes max(CPU, Memory) in percent, averaged over at least 15
+	// minutes. Load is published in fixed steps only.
+	Load int `cbor:"l,omitempty" json:",omitempty"`
+
+	// Flags holds flags that signify special states.
+	Flags []string `cbor:"f,omitempty" json:",omitempty"`
+}
+
+// Key represents a semi-ephemeral public key used for 0-RTT connection establishment.
+type Key struct {
+	Scheme  string
+	Key     []byte
+	Expires int64
+}
+
+// Lane represents a connection to another Hub.
+type Lane struct {
+	// ID is the Hub ID of the peer.
+	ID string
+
+	// Capacity designates the available bandwidth between these Hubs.
+	// It is specified in bit/s.
+	Capacity int
+
+	// Lateny designates the latency between these Hubs.
+	// It is specified in nanoseconds.
+	Latency time.Duration
+}
+
+// Copy returns a deep copy of the Status.
+func (s *Status) Copy() *Status {
+	newStatus := &Status{
+		Timestamp: s.Timestamp,
+		Version:   s.Version,
+		Lanes:     slices.Clone(s.Lanes),
+		Load:      s.Load,
+		Flags:     slices.Clone(s.Flags),
+	}
+	// Copy map.
+	newStatus.Keys = make(map[string]*Key, len(s.Keys))
+	for k, v := range s.Keys {
+		newStatus.Keys[k] = v
+	}
+	return newStatus
+}
+
+// SelectSignet selects the public key to use for initiating connections to that Hub.
+func (h *Hub) SelectSignet() *jess.Signet {
+	h.Lock()
+	defer h.Unlock()
+
+	// Return no Signet if we don't have a Status.
+	if h.Status == nil {
+		return nil
+	}
+
+	// TODO: select key based on preferred alg?
+	now := time.Now().Unix()
+	for id, key := range h.Status.Keys {
+		if now < key.Expires {
+			return &jess.Signet{
+				ID:     id,
+				Scheme: key.Scheme,
+				Key:    key.Key,
+				Public: true,
+			}
+		}
+	}
+
+	return nil
+}
+
+// GetSignet returns the public key identified by the given ID from the Hub Status.
+func (h *Hub) GetSignet(id string, recipient bool) (*jess.Signet, error) {
+	h.Lock()
+	defer h.Unlock()
+
+	// check if public key is being requested
+	if !recipient {
+		return nil, jess.ErrSignetNotFound
+	}
+	// check if ID exists
+	key, ok := h.Status.Keys[id]
+	if !ok {
+		return nil, jess.ErrSignetNotFound
+	}
+	// transform and return
+	return &jess.Signet{
+		ID:     id,
+		Scheme: key.Scheme,
+		Key:    key.Key,
+		Public: true,
+	}, nil
+}
+
+// AddLane adds a new Lane to the Hub Status.
+func (h *Hub) AddLane(newLane *Lane) error {
+	h.Lock()
+	defer h.Unlock()
+
+	// validity check
+	if h.Status == nil {
+		return ErrMissingInfo
+	}
+
+	// check if duplicate
+	for _, lane := range h.Status.Lanes {
+		if newLane.ID == lane.ID {
+			return errors.New("lane already exists")
+		}
+	}
+
+	// add
+	h.Status.Lanes = append(h.Status.Lanes, newLane)
+	return nil
+}
+
+// RemoveLane removes a Lane from the Hub Status.
+func (h *Hub) RemoveLane(hubID string) error {
+	h.Lock()
+	defer h.Unlock()
+
+	// validity check
+	if h.Status == nil {
+		return ErrMissingInfo
+	}
+
+	for key, lane := range h.Status.Lanes {
+		if lane.ID == hubID {
+			h.Status.Lanes = append(h.Status.Lanes[:key], h.Status.Lanes[key+1:]...)
+			break
+		}
+	}
+
+	return nil
+}
+
+// GetLaneTo returns the lane to the given Hub, if it exists.
+func (h *Hub) GetLaneTo(hubID string) *Lane {
+	h.Lock()
+	defer h.Unlock()
+
+	// validity check
+	if h.Status == nil {
+		return nil
+	}
+
+	for _, lane := range h.Status.Lanes {
+		if lane.ID == hubID {
+			return lane
+		}
+	}
+
+	return nil
+}
+
+// Equal returns whether the Lane is equal to the given one.
+func (l *Lane) Equal(other *Lane) bool {
+	switch {
+	case l == nil || other == nil:
+		return false
+	case l.ID != other.ID:
+		return false
+	case l.Capacity != other.Capacity:
+		return false
+	case l.Latency != other.Latency:
+		return false
+	}
+	return true
+}
+
+// validateFormatting check if all values conform to the basic format.
+func (s *Status) validateFormatting() error {
+	// public keys
+	if len(s.Keys) > 255 {
+		return fmt.Errorf("field Keys with array/slice length of %d exceeds max length of %d", len(s.Keys), 255)
+	}
+	for keyID, key := range s.Keys {
+		if err := checkStringFormat("Keys#ID", keyID, 255); err != nil {
+			return err
+		}
+		if err := checkStringFormat("Keys.Scheme", key.Scheme, 255); err != nil {
+			return err
+		}
+		if err := checkByteSliceFormat("Keys.Key", key.Key, 1024); err != nil {
+			return err
+		}
+	}
+
+	// connections
+	if len(s.Lanes) > 255 {
+		return fmt.Errorf("field Lanes with array/slice length of %d exceeds max length of %d", len(s.Lanes), 255)
+	}
+	for _, lanes := range s.Lanes {
+		if err := checkStringFormat("Lanes.ID", lanes.ID, 255); err != nil {
+			return err
+		}
+	}
+
+	// Flags
+	if err := checkStringSliceFormat("Flags", s.Flags, 255, 255); err != nil {
+		return err
+	}
+
+	return nil
+}
+
+func (l *Lane) String() string {
+	return fmt.Sprintf("<%s cap=%d lat=%d>", l.ID, l.Capacity, l.Latency)
+}
+
+// LanesEqual returns whether the given []*Lane are equal.
+func LanesEqual(a, b []*Lane) bool {
+	if len(a) != len(b) {
+		return false
+	}
+
+	for i, l := range a {
+		if !l.Equal(b[i]) {
+			return false
+		}
+	}
+
+	return true
+}
+
+type lanes []*Lane
+
+func (l lanes) Len() int           { return len(l) }
+func (l lanes) Swap(i, j int)      { l[i], l[j] = l[j], l[i] }
+func (l lanes) Less(i, j int) bool { return l[i].ID < l[j].ID }
+
+// SortLanes sorts a slice of Lanes.
+func SortLanes(l []*Lane) {
+	sort.Sort(lanes(l))
+}
+
+// HasFlag returns whether the Status has the given flag set.
+func (s *Status) HasFlag(flagName string) bool {
+	return slices.Contains[[]string, string](s.Flags, flagName)
+}
+
+// FlagsEqual returns whether the given status flags are equal.
+func FlagsEqual(a, b []string) bool {
+	// Cannot be equal if lengths are different.
+	if len(a) != len(b) {
+		return false
+	}
+
+	// If both are empty, they are equal.
+	if len(a) == 0 {
+		return true
+	}
+
+	// Make sure flags are sorted before comparing values.
+	sort.Strings(a)
+	sort.Strings(b)
+
+	// Compare values.
+	for i, v := range a {
+		if v != b[i] {
+			return false
+		}
+	}
+
+	return true
+}
diff --git a/spn/hub/transport.go b/spn/hub/transport.go
new file mode 100644
index 00000000..aa8f3bf9
--- /dev/null
+++ b/spn/hub/transport.go
@@ -0,0 +1,152 @@
+package hub
+
+import (
+	"errors"
+	"fmt"
+	"net/url"
+	"strconv"
+	"strings"
+
+	"golang.org/x/exp/slices"
+)
+
+// Examples:
+// "spn:17",
+// "smtp:25",
+// "smtp:587",
+// "imap:143",
+// "http:80",
+// "http://example.com:80/example", // HTTP (based): use full path for request
+// "https:443",
+// "ws:80",
+// "wss://example.com:443/spn",
+
+// Transport represents a "endpoint" that others can connect to. This allows for use of different protocols, ports and infrastructure integration.
+type Transport struct {
+	Protocol string
+	Domain   string
+	Port     uint16
+	Path     string
+	Option   string
+}
+
+// ParseTransports returns a list of parsed transports and errors from parsing
+// the given definitions.
+func ParseTransports(definitions []string) (transports []*Transport, errs []error) {
+	transports = make([]*Transport, 0, len(definitions))
+	for _, definition := range definitions {
+		parsed, err := ParseTransport(definition)
+		if err != nil {
+			errs = append(errs, fmt.Errorf(
+				"unknown or invalid transport %q: %w", definition, err,
+			))
+		} else {
+			transports = append(transports, parsed)
+		}
+	}
+
+	SortTransports(transports)
+	return transports, errs
+}
+
+// ParseTransport parses a transport definition.
+func ParseTransport(definition string) (*Transport, error) {
+	u, err := url.Parse(definition)
+	if err != nil {
+		return nil, err
+	}
+
+	// check for invalid parts
+	if u.User != nil {
+		return nil, errors.New("user/pass is not allowed")
+	}
+
+	// put into transport
+	t := &Transport{
+		Protocol: u.Scheme,
+		Domain:   u.Hostname(),
+		Path:     u.RequestURI(),
+		Option:   u.Fragment,
+	}
+
+	// parse port
+	portData := u.Port()
+	if portData == "" {
+		// no port available - it might be in u.Opaque, which holds both the port and possibly a path
+		portData = strings.SplitN(u.Opaque, "/", 2)[0] // get port
+		t.Path = strings.TrimPrefix(t.Path, portData)  // trim port from path
+		// check again for port
+		if portData == "" {
+			return nil, errors.New("missing port")
+		}
+	}
+	port, err := strconv.ParseUint(portData, 10, 16)
+	if err != nil {
+		return nil, errors.New("invalid port")
+	}
+	t.Port = uint16(port)
+
+	// check port
+	if t.Port == 0 {
+		return nil, errors.New("invalid port")
+	}
+
+	// remove root paths
+	if t.Path == "/" {
+		t.Path = ""
+	}
+
+	// check for protocol
+	if t.Protocol == "" {
+		return nil, errors.New("missing scheme/protocol")
+	}
+
+	return t, nil
+}
+
+// String returns the definition form of the transport.
+func (t *Transport) String() string {
+	switch {
+	case t.Option != "":
+		return fmt.Sprintf("%s://%s:%d%s#%s", t.Protocol, t.Domain, t.Port, t.Path, t.Option)
+	case t.Domain != "":
+		return fmt.Sprintf("%s://%s:%d%s", t.Protocol, t.Domain, t.Port, t.Path)
+	default:
+		return fmt.Sprintf("%s:%d%s", t.Protocol, t.Port, t.Path)
+	}
+}
+
+// SortTransports sorts the transports to emphasize certain protocols, but
+// otherwise leaves the order intact.
+func SortTransports(ts []*Transport) {
+	slices.SortStableFunc[[]*Transport, *Transport](ts, func(a, b *Transport) int {
+		aOrder := a.protocolOrder()
+		bOrder := b.protocolOrder()
+
+		switch {
+		case aOrder != bOrder:
+			return aOrder - bOrder
+		// case a.Port != b.Port:
+		// 	return int(a.Port) - int(b.Port)
+		// case a.Domain != b.Domain:
+		// 	return strings.Compare(a.Domain, b.Domain)
+		// case a.Path != b.Path:
+		// 	return strings.Compare(a.Path, b.Path)
+		// case a.Option != b.Option:
+		// 	return strings.Compare(a.Option, b.Option)
+		default:
+			return 0
+		}
+	})
+}
+
+func (t *Transport) protocolOrder() int {
+	switch t.Protocol {
+	case "http":
+		return 1
+	case "spn":
+		return 2
+	default:
+		return 100
+	}
+}
diff --git a/spn/hub/transport_test.go b/spn/hub/transport_test.go
new file mode 100644
index 00000000..c885fcfa
--- /dev/null
+++ b/spn/hub/transport_test.go
@@ -0,0 +1,147 @@
+package hub
+
+import (
+	"testing"
+
+	"github.com/stretchr/testify/assert"
+)
+
+func parseT(t *testing.T, definition string) *Transport {
+	t.Helper()
+
+	tr, err := ParseTransport(definition)
+	if err != nil {
+		t.Fatal(err)
+		return nil
+	}
+	return tr
+}
+
+func parseTError(definition string) error {
+	_, err := ParseTransport(definition)
+	return err
+}
+
+func TestTransportParsing(t *testing.T) {
+	t.Parallel()
+
+	// test parsing
+
+	assert.Equal(t, &Transport{
+		Protocol: "spn",
+		Port:     17,
+	}, parseT(t, "spn:17"), "should match")
+
+	assert.Equal(t, &Transport{
+		Protocol: "smtp",
+		Port:     25,
+	}, parseT(t, "smtp:25"), "should match")
+
+	assert.Equal(t, &Transport{
+		Protocol: "smtp",
+		Port:     25,
+	}, parseT(t, "smtp://:25"), "should match")
+
+	assert.Equal(t, &Transport{
+		Protocol: "smtp",
+		Port:     587,
+	}, parseT(t, "smtp:587"), "should match")
+
+	assert.Equal(t, &Transport{
+		Protocol: "imap",
+		Port:     143,
+	}, parseT(t, "imap:143"), "should match")
+
+	assert.Equal(t, &Transport{
+		Protocol: "http",
+		Port:     80,
+	}, parseT(t, "http:80"), "should match")
+
+	assert.Equal(t, &Transport{
+		Protocol: "http",
+		Domain:   "example.com",
+		Port:     80,
+	}, parseT(t, "http://example.com:80"), "should match")
+
+	assert.Equal(t, &Transport{
+		Protocol: "https",
+		Port:     443,
+	}, parseT(t, "https:443"), "should match")
+
+	assert.Equal(t, &Transport{
+		Protocol: "ws",
+		Port:     80,
+	}, parseT(t, "ws:80"), "should match")
+
+	assert.Equal(t, &Transport{
+		Protocol: "wss",
+		Domain:   "example.com",
+		Port:     443,
+		Path:     "/spn",
+	}, parseT(t, "wss://example.com:443/spn"), "should match")
+
+	assert.Equal(t, &Transport{
+		Protocol: "http",
+		Domain:   "example.com",
+		Port:     80,
+	}, parseT(t, "http://example.com:80"), "should match")
+
+	assert.Equal(t, &Transport{
+		Protocol: "http",
+		Domain:   "example.com",
+		Port:     80,
+		Path:     "/test%20test",
+	}, parseT(t, "http://example.com:80/test test"), "should match")
+
+	assert.Equal(t, &Transport{
+		Protocol: "http",
+		Domain:   "example.com",
+		Port:     80,
+		Path:     "/test%20test",
+	}, parseT(t, "http://example.com:80/test%20test"), "should match")
+
+	assert.Equal(t, &Transport{
+		Protocol: "http",
+		Domain:   "example.com",
+		Port:     80,
+		Path:     "/test?key=value",
+	}, parseT(t, "http://example.com:80/test?key=value"), "should match")
+
+	// test parsing and formatting
+
+	assert.Equal(t, "spn:17",
+		parseT(t, "spn:17").String(), "should match")
+	assert.Equal(t, "smtp:25",
+		parseT(t, "smtp:25").String(), "should match")
+	assert.Equal(t, "smtp:25",
+		parseT(t, "smtp://:25").String(), "should match")
+	assert.Equal(t, "smtp:587",
+		parseT(t, "smtp:587").String(), "should match")
+	assert.Equal(t, "imap:143",
+		parseT(t, "imap:143").String(), "should match")
+	assert.Equal(t, "http:80",
+		parseT(t, "http:80").String(), "should match")
+	assert.Equal(t, "http://example.com:80",
+		parseT(t, "http://example.com:80").String(), "should match")
+	assert.Equal(t, "https:443",
+		parseT(t, "https:443").String(), "should match")
+	assert.Equal(t, "ws:80",
+		parseT(t, "ws:80").String(), "should match")
+	assert.Equal(t, "wss://example.com:443/spn",
+		parseT(t, "wss://example.com:443/spn").String(), "should match")
+	assert.Equal(t, "http://example.com:80",
+		parseT(t, "http://example.com:80").String(), "should match")
+	assert.Equal(t, "http://example.com:80/test%20test",
+		parseT(t, "http://example.com:80/test test").String(), "should match")
+	assert.Equal(t, "http://example.com:80/test%20test",
+		parseT(t, "http://example.com:80/test%20test").String(), "should match")
+	assert.Equal(t, "http://example.com:80/test?key=value",
+		parseT(t, "http://example.com:80/test?key=value").String(), "should match")
+
+	// test invalid
+
+	assert.NotEqual(t, parseTError("spn"), nil, "should fail")
+	assert.NotEqual(t, parseTError("spn:"), nil, "should fail")
+	assert.NotEqual(t, parseTError("spn:0"), nil, "should fail")
+	assert.NotEqual(t, parseTError("spn:65536"), nil, "should fail")
+}
diff --git a/spn/hub/truststores.go b/spn/hub/truststores.go
new file mode 100644
index 00000000..8f06a55d
--- /dev/null
+++ b/spn/hub/truststores.go
@@ -0,0 +1,17 @@
+package hub
+
+import "github.com/safing/jess"
+
+// SingleTrustStore is a simple truststore that always returns the same Signet.
+type SingleTrustStore struct {
+	Signet *jess.Signet
+}
+
+// GetSignet implements the truststore interface.
+func (ts *SingleTrustStore) GetSignet(id string, recipient bool) (*jess.Signet, error) {
+	if ts.Signet.ID != id || recipient != ts.Signet.Public {
+		return nil, jess.ErrSignetNotFound
+	}
+
+	return ts.Signet, nil
+}
diff --git a/spn/hub/update.go b/spn/hub/update.go
new file mode 100644
index 00000000..e2009db4
--- /dev/null
+++ b/spn/hub/update.go
@@ -0,0 +1,524 @@
+package hub
+
+import (
+	"errors"
+	"fmt"
+	"time"
+
+	"github.com/safing/jess"
+	"github.com/safing/jess/lhash"
+	"github.com/safing/portbase/container"
+	"github.com/safing/portbase/database"
+	"github.com/safing/portbase/formats/dsd"
+	"github.com/safing/portbase/log"
+	"github.com/safing/portmaster/service/network/netutils"
+)
+
+var (
+	// hubMsgRequirements defines which security attributes message need to have.
+	hubMsgRequirements = jess.NewRequirements().
+				Remove(jess.RecipientAuthentication). // Recipient don't need a private key.
+				Remove(jess.Confidentiality).         // Message contents are out in the open.
+				Remove(jess.Integrity)                // Only applies to decryption.
+	// SenderAuthentication provides pre-decryption integrity. That is all we need.
+
+	clockSkewTolerance = 1 * time.Hour
+)
+
+// SignHubMsg signs the given serialized hub msg with the given configuration.
+func SignHubMsg(msg []byte, env *jess.Envelope, enableTofu bool) ([]byte, error) {
+	// start session from envelope
+	session, err := env.Correspondence(nil)
+	if err != nil {
+		return nil, fmt.Errorf("failed to initiate signing session: %w", err)
+	}
+	// sign the data
+	letter, err := session.Close(msg)
+	if err != nil {
+		return nil, fmt.Errorf("failed to sign msg: %w", err)
+	}
+
+	if enableTofu {
+		// smuggle the public key
+		// letter.Keys is usually only used for key exchanges and encapsulation
+		// neither is used when signing, so we can use letter.Keys to transport public keys
+		for _, sender := range env.Senders {
+			// get public key
+			public, err := sender.AsRecipient()
+			if err != nil {
+				return nil, fmt.Errorf("failed to get public key of %s: %w", sender.ID, err)
+			}
+			// serialize key
+			err = public.StoreKey()
+			if err != nil {
+				return nil, fmt.Errorf("failed to serialize public key %s: %w", sender.ID, err)
+			}
+			// add to keys
+			letter.Keys = append(letter.Keys, &jess.Seal{
+				Value: public.Key,
+			})
+		}
+	}
+
+	// pack
+	data, err := letter.ToDSD(dsd.JSON)
+	if err != nil {
+		return nil, err
+	}
+
+	return data, nil
+}
+
+// OpenHubMsg opens a signed hub msg and verifies the signature using the
+// provided hub or the local database. If TOFU is enabled, the signature is
+// always accepted, if valid.
+func OpenHubMsg(hub *Hub, data []byte, mapName string, tofu bool) (msg []byte, sendingHub *Hub, known bool, err error) {
+	letter, err := jess.LetterFromDSD(data)
+	if err != nil {
+		return nil, nil, false, fmt.Errorf("malformed letter: %w", err)
+	}
+
+	// check signatures
+	var seal *jess.Seal
+	switch len(letter.Signatures) {
+	case 0:
+		return nil, nil, false, errors.New("missing signature")
+	case 1:
+		seal = letter.Signatures[0]
+	default:
+		return nil, nil, false, fmt.Errorf("too many signatures (%d)", len(letter.Signatures))
+	}
+
+	// check signature signer ID
+	if seal.ID == "" {
+		return nil, nil, false, errors.New("signature is missing signer ID")
+	}
+
+	// get hub for public key
+	if hub == nil {
+		hub, err = GetHub(mapName, seal.ID)
+		if err != nil {
+			if !errors.Is(err, database.ErrNotFound) {
+				return nil, nil, false, fmt.Errorf("failed to get existing hub %s: %w", seal.ID, err)
+			}
+			hub = nil
+		} else {
+			known = true
+		}
+	} else {
+		known = true
+	}
+
+	var truststore jess.TrustStore
+	if hub != nil && hub.PublicKey != nil { // bootstrap entries will not have a public key
+		// check ID integrity
+		if hub.ID != seal.ID {
+			return nil, hub, known, fmt.Errorf("ID mismatch with hub msg ID %s and hub ID %s", seal.ID, hub.ID)
+		}
+		if !verifyHubID(seal.ID, hub.PublicKey.Scheme, hub.PublicKey.Key) {
+			return nil, hub, known, fmt.Errorf("ID integrity of %s violated with existing key", seal.ID)
+		}
+	} else {
+		if !tofu {
+			return nil, nil, false, fmt.Errorf("hub msg ID %s unknown (missing announcement)", seal.ID)
+		}
+
+		// trust on first use, extract key from keys
+		// TODO: Test if works without TOFU.
+
+		// get key
+		var pubkey *jess.Seal
+		switch len(letter.Keys) {
+		case 0:
+			return nil, nil, false, fmt.Errorf("missing key for TOFU of %s", seal.ID)
+		case 1:
+			pubkey = letter.Keys[0]
+		default:
+			return nil, nil, false, fmt.Errorf("too many keys (%d) for TOFU of %s", len(letter.Keys), seal.ID)
+		}
+
+		// check ID integrity
+		if !verifyHubID(seal.ID, seal.Scheme, pubkey.Value) {
+			return nil, nil, false, fmt.Errorf("ID integrity of %s violated with new key", seal.ID)
+		}
+
+		hub = &Hub{
+			ID:  seal.ID,
+			Map: mapName,
+			PublicKey: &jess.Signet{
+				ID:     seal.ID,
+				Scheme: seal.Scheme,
+				Key:    pubkey.Value,
+				Public: true,
+			},
+		}
+		err = hub.PublicKey.LoadKey()
+		if err != nil {
+			return nil, nil, false, err
+		}
+	}
+
+	// create trust store
+	truststore = &SingleTrustStore{hub.PublicKey}
+
+	// remove keys from letter, as they are only used to transfer the public key
+	letter.Keys = nil
+
+	// check signature
+	err = letter.Verify(hubMsgRequirements, truststore)
+	if err != nil {
+		return nil, nil, false, err
+	}
+
+	return letter.Data, hub, known, nil
+}
+
+// Export exports the announcement with the given signature configuration.
+func (a *Announcement) Export(env *jess.Envelope) ([]byte, error) {
+	// pack
+	msg, err := dsd.Dump(a, dsd.JSON)
+	if err != nil {
+		return nil, fmt.Errorf("failed to pack announcement: %w", err)
+	}
+
+	return SignHubMsg(msg, env, true)
+}
+
+// ApplyAnnouncement applies the announcement to the Hub if it passes all the
+// checks. If no Hub is provided, it is loaded from the database or created.
+func ApplyAnnouncement(existingHub *Hub, data []byte, mapName string, scope Scope, selfcheck bool) (hub *Hub, known, changed bool, err error) {
+	// Set valid/invalid status based on the return error.
+	var announcement *Announcement
+	defer func() {
+		if hub != nil {
+			if err != nil && !errors.Is(err, ErrOldData) {
+				hub.InvalidInfo = true
+			} else {
+				hub.InvalidInfo = false
+			}
+		}
+	}()
+
+	// open and verify
+	var msg []byte
+	msg, hub, known, err = OpenHubMsg(existingHub, data, mapName, true)
+
+	// Lock hub if we have one.
+	if hub != nil && !selfcheck {
+		hub.Lock()
+		defer hub.Unlock()
+	}
+
+	// Check if there was an error with the Hub msg.
+	if err != nil {
+		return //nolint:nakedret
+	}
+
+	// parse
+	announcement = &Announcement{}
+	_, err = dsd.Load(msg, announcement)
+	if err != nil {
+		return //nolint:nakedret
+	}
+
+	// integrity check
+
+	// `hub.ID` is taken from the first ever received announcement message.
+	// `announcement.ID` is additionally present in the message as we need
+	// a signed version of the ID to mitigate fake IDs.
+	// Fake IDs are possible because the hash algorithm of the ID is dynamic.
+	if hub.ID != announcement.ID {
+		err = fmt.Errorf("announcement ID %q mismatches hub ID %q", announcement.ID, hub.ID)
+		return //nolint:nakedret
+	}
+
+	// version check
+	if hub.Info != nil {
+		// check if we already have this version
+		switch {
+		case announcement.Timestamp == hub.Info.Timestamp && !selfcheck:
+			// The new copy is not saved, as we expect the versions to be identical.
+			// Also, the new version has not been validated at this point.
+			return //nolint:nakedret
+		case announcement.Timestamp < hub.Info.Timestamp:
+			// Received an old version, do not update.
+			err = fmt.Errorf(
+				"%wannouncement from %s @ %s is older than current status @ %s",
+				ErrOldData, hub.StringWithoutLocking(), time.Unix(announcement.Timestamp, 0), time.Unix(hub.Info.Timestamp, 0),
+			)
+			return //nolint:nakedret
+		}
+	}
+
+	// We received a new version.
+	changed = true
+
+	// Update timestamp here already in case validation fails.
+	if hub.Info != nil {
+		hub.Info.Timestamp = announcement.Timestamp
+	}
+
+	// Validate the announcement.
+	err = hub.validateAnnouncement(announcement, scope)
+	if err != nil {
+		if selfcheck || hub.FirstSeen.IsZero() {
+			err = fmt.Errorf("failed to validate announcement of %s: %w", hub.StringWithoutLocking(), err)
+			return //nolint:nakedret
+		}
+
+		log.Warningf("spn/hub: received an invalid announcement of %s: %s", hub.StringWithoutLocking(), err)
+		// If a previously fully validated Hub publishes an update that breaks it, a
+		// soft-fail will accept the faulty changes, but mark is as invalid and
+		// forward it to neighbors. This way the invalid update is propagated through
+		// the network and all nodes will mark it as invalid an thus ingore the Hub
+		// until the issue is fixed.
+	}
+
+	// Only save announcement if it is valid.
+	if err == nil {
+		hub.Info = announcement
+	}
+	// Set FirstSeen timestamp when we see this Hub for the first time.
+	if hub.FirstSeen.IsZero() {
+		hub.FirstSeen = time.Now().UTC()
+	}
+
+	return //nolint:nakedret
+}
+
+func (h *Hub) validateAnnouncement(announcement *Announcement, scope Scope) error {
+	// value formatting
+	if err := announcement.validateFormatting(); err != nil {
+		return err
+	}
+	// check parsables
+	if err := announcement.prepare(true); err != nil {
+		return fmt.Errorf("failed to prepare announcement: %w", err)
+	}
+
+	// check timestamp
+	if announcement.Timestamp > time.Now().Add(clockSkewTolerance).Unix() {
+		return fmt.Errorf(
+			"announcement from %s @ %s is from the future",
+			announcement.ID,
+			time.Unix(announcement.Timestamp, 0),
+		)
+	}
+
+	// check for illegal IP address changes
+	if h.Info != nil {
+		switch {
+		case h.Info.IPv4 != nil && announcement.IPv4 == nil:
+			h.VerifiedIPs = false
+			return errors.New("previously announced IPv4 address missing")
+		case h.Info.IPv4 != nil && !announcement.IPv4.Equal(h.Info.IPv4):
+			h.VerifiedIPs = false
+			return errors.New("IPv4 address changed")
+		case h.Info.IPv6 != nil && announcement.IPv6 == nil:
+			h.VerifiedIPs = false
+			return errors.New("previously announced IPv6 address missing")
+		case h.Info.IPv6 != nil && !announcement.IPv6.Equal(h.Info.IPv6):
+			h.VerifiedIPs = false
+			return errors.New("IPv6 address changed")
+		}
+	}
+
+	// validate IP scopes
+	if announcement.IPv4 != nil {
+		ipScope := netutils.GetIPScope(announcement.IPv4)
+		switch {
+		case scope == ScopeLocal && !ipScope.IsLAN():
+			return errors.New("IPv4 scope violation: outside of local scope")
+		case scope == ScopePublic && !ipScope.IsGlobal():
+			return errors.New("IPv4 scope violation: outside of global scope")
+		}
+		// Reset IP verification flag if IPv4 was added.
+		if h.Info == nil || h.Info.IPv4 == nil {
+			h.VerifiedIPs = false
+		}
+	}
+	if announcement.IPv6 != nil {
+		ipScope := netutils.GetIPScope(announcement.IPv6)
+		switch {
+		case scope == ScopeLocal && !ipScope.IsLAN():
+			return errors.New("IPv6 scope violation: outside of local scope")
+		case scope == ScopePublic && !ipScope.IsGlobal():
+			return errors.New("IPv6 scope violation: outside of global scope")
+		}
+		// Reset IP verification flag if IPv6 was added.
+		if h.Info == nil || h.Info.IPv6 == nil {
+			h.VerifiedIPs = false
+		}
+	}
+
+	return nil
+}
+
+// Export exports the status with the given signature configuration.
+func (s *Status) Export(env *jess.Envelope) ([]byte, error) {
+	// pack
+	msg, err := dsd.Dump(s, dsd.JSON)
+	if err != nil {
+		return nil, fmt.Errorf("failed to pack status: %w", err)
+	}
+
+	return SignHubMsg(msg, env, false)
+}
+
+// ApplyStatus applies a status update if it passes all the checks.
+func ApplyStatus(existingHub *Hub, data []byte, mapName string, scope Scope, selfcheck bool) (hub *Hub, known, changed bool, err error) {
+	// Set valid/invalid status based on the return error.
+	defer func() {
+		if hub != nil {
+			if err != nil && !errors.Is(err, ErrOldData) {
+				hub.InvalidStatus = true
+			} else {
+				hub.InvalidStatus = false
+			}
+		}
+	}()
+
+	// open and verify
+	var msg []byte
+	msg, hub, known, err = OpenHubMsg(existingHub, data, mapName, false)
+
+	// Lock hub if we have one.
+	if hub != nil && !selfcheck {
+		hub.Lock()
+		defer hub.Unlock()
+	}
+
+	// Check if there was an error with the Hub msg.
+	if err != nil {
+		return //nolint:nakedret
+	}
+
+	// parse
+	status := &Status{}
+	_, err = dsd.Load(msg, status)
+	if err != nil {
+		return //nolint:nakedret
+	}
+
+	// version check
+	if hub.Status != nil {
+		// check if we already have this version
+		switch {
+		case status.Timestamp == hub.Status.Timestamp && !selfcheck:
+			// The new copy is not saved, as we expect the versions to be identical.
+			// Also, the new version has not been validated at this point.
+			return //nolint:nakedret
+		case status.Timestamp < hub.Status.Timestamp:
+			// Received an old version, do not update.
+			err = fmt.Errorf(
+				"%wstatus from %s @ %s is older than current status @ %s",
+				ErrOldData, hub.StringWithoutLocking(), time.Unix(status.Timestamp, 0), time.Unix(hub.Status.Timestamp, 0),
+			)
+			return //nolint:nakedret
+		}
+	}
+
+	// We received a new version.
+	changed = true
+
+	// Update timestamp here already in case validation fails.
+	if hub.Status != nil {
+		hub.Status.Timestamp = status.Timestamp
+	}
+
+	// Validate the status.
+	err = hub.validateStatus(status)
+	if err != nil {
+		if selfcheck {
+			err = fmt.Errorf("failed to validate status of %s: %w", hub.StringWithoutLocking(), err)
+			return //nolint:nakedret
+		}
+
+		log.Warningf("spn/hub: received an invalid status of %s: %s", hub.StringWithoutLocking(), err)
+		// If a previously fully validated Hub publishes an update that breaks it, a
+		// soft-fail will accept the faulty changes, but mark is as invalid and
+		// forward it to neighbors. This way the invalid update is propagated through
+		// the network and all nodes will mark it as invalid an thus ingore the Hub
+		// until the issue is fixed.
+	}
+
+	// Only save status if it is valid, else mark it as invalid.
+	if err == nil {
+		hub.Status = status
+	}
+
+	return //nolint:nakedret
+}
+
+func (h *Hub) validateStatus(status *Status) error {
+	// value formatting
+	if err := status.validateFormatting(); err != nil {
+		return err
+	}
+
+	// check timestamp
+	if status.Timestamp > time.Now().Add(clockSkewTolerance).Unix() {
+		return fmt.Errorf(
+			"status from %s @ %s is from the future",
+			h.ID,
+			time.Unix(status.Timestamp, 0),
+		)
+	}
+
+	// TODO: validate status.Keys
+
+	return nil
+}
+
+// CreateHubSignet creates a signet with the correct ID for usage as a Hub Identity.
+func CreateHubSignet(toolID string, securityLevel int) (private, public *jess.Signet, err error) {
+	private, err = jess.GenerateSignet(toolID, securityLevel)
+	if err != nil {
+		return nil, nil, fmt.Errorf("failed to generate key: %w", err)
+	}
+	err = private.StoreKey()
+	if err != nil {
+		return nil, nil, fmt.Errorf("failed to store private key: %w", err)
+	}
+
+	// get public key for creating the Hub ID
+	public, err = private.AsRecipient()
+	if err != nil {
+		return nil, nil, fmt.Errorf("failed to get public key: %w", err)
+	}
+	err = public.StoreKey()
+	if err != nil {
+		return nil, nil, fmt.Errorf("failed to store public key: %w", err)
+	}
+
+	// assign IDs
+	private.ID = createHubID(public.Scheme, public.Key)
+	public.ID = private.ID
+
+	return private, public, nil
+}
+
+func createHubID(scheme string, pubkey []byte) string {
+	// compile scheme and public key
+	c := container.New()
+	c.AppendAsBlock([]byte(scheme))
+	c.AppendAsBlock(pubkey)
+
+	return lhash.Digest(lhash.BLAKE2b_256, c.CompileData()).Base58()
+}
+
+func verifyHubID(id string, scheme string, pubkey []byte) (ok bool) {
+	// load labeled hash from ID
+	labeledHash, err := lhash.FromBase58(id)
+	if err != nil {
+		return false
+	}
+
+	// compile scheme and public key
+	c := container.New()
+	c.AppendAsBlock([]byte(scheme))
+	c.AppendAsBlock(pubkey)
+
+	// check if it matches
+	return labeledHash.MatchesData(c.CompileData())
+}
diff --git a/spn/hub/update_test.go b/spn/hub/update_test.go
new file mode 100644
index 00000000..982f3206
--- /dev/null
+++ b/spn/hub/update_test.go
@@ -0,0 +1,70 @@
+package hub
+
+import (
+	"fmt"
+	"testing"
+
+	"github.com/safing/jess"
+	"github.com/safing/portbase/formats/dsd"
+)
+
+func TestHubUpdate(t *testing.T) {
+	t.Parallel()
+
+	// message signing
+
+	testData := []byte{0}
+
+	s1, err := jess.GenerateSignet("Ed25519", 0)
+	if err != nil {
+		t.Fatal(err)
+	}
+	err = s1.StoreKey()
+	if err != nil {
+		t.Fatal(err)
+	}
+	fmt.Printf("s1: %+v\n", s1)
+
+	s1e, err := s1.AsRecipient()
+	if err != nil {
+		t.Fatal(err)
+	}
+	err = s1e.StoreKey()
+	if err != nil {
+		t.Fatal(err)
+	}
+	s1e.ID = createHubID(s1e.Scheme, s1e.Key)
+	s1.ID = s1e.ID
+
+	t.Logf("generated hub ID: %s", s1.ID)
+
+	env := jess.NewUnconfiguredEnvelope()
+	env.SuiteID = jess.SuiteSignV1
+	env.Senders = []*jess.Signet{s1}
+
+	s, err := env.Correspondence(nil)
+	if err != nil {
+		t.Fatal(err)
+	}
+	letter, err := s.Close(testData)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	// smuggle the key
+	letter.Keys = append(letter.Keys, &jess.Seal{
+		Value: s1e.Key,
+	})
+	t.Logf("letter with smuggled key: %+v", letter)
+
+	// pack
+	data, err := letter.ToDSD(dsd.JSON)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	_, _, _, err = OpenHubMsg(nil, data, "test", true) //nolint:dogsled
+	if err != nil {
+		t.Fatal(err)
+	}
+}
diff --git a/spn/navigator/api.go b/spn/navigator/api.go
new file mode 100644
index 00000000..832d1126
--- /dev/null
+++ b/spn/navigator/api.go
@@ -0,0 +1,672 @@
+package navigator
+
+import (
+	"bytes"
+	"errors"
+	"fmt"
+	"math"
+	"net/http"
+	"sort"
+	"strconv"
+	"strings"
+	"sync"
+	"text/tabwriter"
+	"time"
+
+	"github.com/awalterschulze/gographviz"
+
+	"github.com/safing/portbase/api"
+	"github.com/safing/portbase/log"
+	"github.com/safing/portmaster/spn/docks"
+	"github.com/safing/portmaster/spn/hub"
+)
+
+var (
+	apiMapsLock sync.Mutex
+	apiMaps     = make(map[string]*Map)
+)
+
+func addMapToAPI(m *Map) {
+	apiMapsLock.Lock()
+	defer apiMapsLock.Unlock()
+
+	apiMaps[m.Name] = m
+}
+
+func getMapForAPI(name string) (m *Map, ok bool) {
+	apiMapsLock.Lock()
+	defer apiMapsLock.Unlock()
+
+	m, ok = apiMaps[name]
+	return
+}
+
+func removeMapFromAPI(name string) {
+	apiMapsLock.Lock()
+	defer apiMapsLock.Unlock()
+
+	delete(apiMaps, name)
+}
+
+func registerAPIEndpoints() error {
+	if err := api.RegisterEndpoint(api.Endpoint{
+		Path:        `spn/map/{map:[A-Za-z0-9]{1,255}}/pins`,
+		Read:        api.PermitUser,
+		BelongsTo:   module,
+		StructFunc:  handleMapPinsRequest,
+		Name:        "Get SPN map pins",
+		Description: "Returns a list of pins on the map.",
+	}); err != nil {
+		return err
+	}
+
+	if err := api.RegisterEndpoint(api.Endpoint{
+		Path:        `spn/map/{map:[A-Za-z0-9]{1,255}}/intel/update`,
+		Write:       api.PermitSelf,
+		BelongsTo:   module,
+		ActionFunc:  handleIntelUpdateRequest,
+		Name:        "Update map intelligence.",
+		Description: "Updates the intel data of the map.",
+	}); err != nil {
+		return err
+	}
+
+	if err := api.RegisterEndpoint(api.Endpoint{
+		Path:        `spn/map/{map:[A-Za-z0-9]{1,255}}/optimization`,
+		Read:        api.PermitUser,
+		BelongsTo:   module,
+		StructFunc:  handleMapOptimizationRequest,
+		Name:        "Get SPN map optimization",
+		Description: "Returns the calculated optimization for the map.",
+	}); err != nil {
+		return err
+	}
+
+	if err := api.RegisterEndpoint(api.Endpoint{
+		Path:        `spn/map/{map:[A-Za-z0-9]{1,255}}/optimization/table`,
+		Read:        api.PermitUser,
+		BelongsTo:   module,
+		DataFunc:    handleMapOptimizationTableRequest,
+		Name:        "Get SPN map optimization as a table",
+		Description: "Returns the calculated optimization for the map as a table.",
+	}); err != nil {
+		return err
+	}
+
+	if err := api.RegisterEndpoint(api.Endpoint{
+		Path:        `spn/map/{map:[A-Za-z0-9]{1,255}}/measurements`,
+		Read:        api.PermitUser,
+		BelongsTo:   module,
+		StructFunc:  handleMapMeasurementsRequest,
+		Name:        "Get SPN map measurements",
+		Description: "Returns the measurements of the map.",
+	}); err != nil {
+		return err
+	}
+
+	if err := api.RegisterEndpoint(api.Endpoint{
+		Path:        `spn/map/{map:[A-Za-z0-9]{1,255}}/measurements/table`,
+		MimeType:    api.MimeTypeText,
+		Read:        api.PermitUser,
+		BelongsTo:   module,
+		DataFunc:    handleMapMeasurementsTableRequest,
+		Name:        "Get SPN map measurements as a table",
+		Description: "Returns the measurements of the map as a table.",
+	}); err != nil {
+		return err
+	}
+
+	if err := api.RegisterEndpoint(api.Endpoint{
+		Path:        `spn/map/{map:[A-Za-z0-9]{1,255}}/graph{format:\.[a-z]{2,4}}`,
+		Read:        api.PermitUser,
+		BelongsTo:   module,
+		HandlerFunc: handleMapGraphRequest,
+		Name:        "Get SPN map graph",
+		Description: "Returns a graph of the given SPN map.",
+		Parameters: []api.Parameter{
+			{
+				Method:      http.MethodGet,
+				Field:       "map (in path)",
+				Value:       "name of map",
+				Description: "Specify the map you want to get the map for. The main map is called `main`.",
+			},
+			{
+				Method:      http.MethodGet,
+				Field:       "format (in path)",
+				Value:       "file type",
+				Description: "Specify the format you want to get the map in. Available values: `dot`, `html`. Please note that the html format is only available in development mode.",
+			},
+		},
+	}); err != nil {
+		return err
+	}
+
+	// Register API endpoints from other files.
+	if err := registerRouteAPIEndpoints(); err != nil {
+		return err
+	}
+
+	return nil
+}
+
+func handleMapPinsRequest(ar *api.Request) (i interface{}, err error) {
+	// Get map.
+	m, ok := getMapForAPI(ar.URLVars["map"])
+	if !ok {
+		return nil, errors.New("map not found")
+	}
+
+	// Export all pins.
+	sortedPins := m.sortedPins(true)
+	exportedPins := make([]*PinExport, len(sortedPins))
+	for key, pin := range sortedPins {
+		exportedPins[key] = pin.Export()
+	}
+
+	return exportedPins, nil
+}
+
+func handleIntelUpdateRequest(ar *api.Request) (msg string, err error) {
+	// Get map.
+	m, ok := getMapForAPI(ar.URLVars["map"])
+	if !ok {
+		return "", errors.New("map not found")
+	}
+
+	// Parse new intel data.
+	newIntel, err := hub.ParseIntel(ar.InputData)
+	if err != nil {
+		return "", fmt.Errorf("failed to parse intel data: %w", err)
+	}
+
+	// Apply intel data.
+	err = m.UpdateIntel(newIntel, cfgOptionTrustNodeNodes())
+	if err != nil {
+		return "", fmt.Errorf("failed to apply intel data: %w", err)
+	}
+
+	return "successfully applied given intel data", nil
+}
+
+func handleMapOptimizationRequest(ar *api.Request) (i interface{}, err error) {
+	// Get map.
+	m, ok := getMapForAPI(ar.URLVars["map"])
+	if !ok {
+		return nil, errors.New("map not found")
+	}
+
+	return m.Optimize(nil)
+}
+
+func handleMapOptimizationTableRequest(ar *api.Request) (data []byte, err error) {
+	// Get map.
+	m, ok := getMapForAPI(ar.URLVars["map"])
+	if !ok {
+		return nil, errors.New("map not found")
+	}
+
+	// Get optimization result.
+	result, err := m.Optimize(nil)
+	if err != nil {
+		return nil, err
+	}
+
+	// Read lock map, as we access pins.
+	m.RLock()
+	defer m.RUnlock()
+
+	// Get cranes for additional metadata.
+	assignedCranes := docks.GetAllAssignedCranes()
+
+	// Write metadata.
+	buf := bytes.NewBuffer(nil)
+	buf.WriteString("Optimization:\n")
+	fmt.Fprintf(buf, "Purpose: %s\n", result.Purpose)
+	if len(result.Approach) == 1 {
+		fmt.Fprintf(buf, "Approach: %s\n", result.Approach[0])
+	} else if len(result.Approach) > 1 {
+		buf.WriteString("Approach:\n")
+		for _, approach := range result.Approach {
+			fmt.Fprintf(buf, "  - %s\n", approach)
+		}
+	}
+	fmt.Fprintf(buf, "MaxConnect: %d\n", result.MaxConnect)
+	fmt.Fprintf(buf, "StopOthers: %v\n", result.StopOthers)
+
+	// Build table of suggested connections.
+	buf.WriteString("\nSuggested Connections:\n")
+	tabWriter := tabwriter.NewWriter(buf, 8, 4, 3, ' ', 0)
+	fmt.Fprint(tabWriter, "Hub Name\tReason\tDuplicate\tCountry\tRegion\tLatency\tCapacity\tCost\tGeo Prox.\tHub ID\tLifetime Usage\tPeriod Usage\tProt\tStatus\n")
+	for _, suggested := range result.SuggestedConnections {
+		var dupe string
+		if suggested.Duplicate {
+			dupe = "yes"
+		} else {
+			// Only lock dupes once.
+			suggested.pin.measurements.Lock()
+			defer suggested.pin.measurements.Unlock()
+		}
+
+		// Add row.
+		fmt.Fprintf(tabWriter,
+			"%s\t%s\t%s\t%s\t%s\t%s\t%.2fMbit/s\t%.2fc\t%.2f%%\t%s",
+			suggested.Hub.Info.Name,
+			suggested.Reason,
+			dupe,
+			getPinCountry(suggested.pin),
+			suggested.pin.region.getName(),
+			suggested.pin.measurements.Latency,
+			float64(suggested.pin.measurements.Capacity)/1000000,
+			suggested.pin.measurements.CalculatedCost,
+			suggested.pin.measurements.GeoProximity,
+			suggested.Hub.ID,
+		)
+
+		// Add usage stats.
+		if crane, ok := assignedCranes[suggested.Hub.ID]; ok {
+			addUsageStatsToTable(crane, tabWriter)
+		}
+
+		// Add linebreak.
+		fmt.Fprint(tabWriter, "\n")
+	}
+	_ = tabWriter.Flush()
+
+	return buf.Bytes(), nil
+}
+
+// addUsageStatsToTable compiles some usage stats of a lane and addes them to the table.
+// Table Fields: Lifetime Usage, Period Usage, Prot, Mine.
+func addUsageStatsToTable(crane *docks.Crane, tabWriter *tabwriter.Writer) {
+	ltIn, ltOut, ltStart, pIn, pOut, pStart := crane.NetState.GetTrafficStats()
+	ltDuration := time.Since(ltStart)
+	pDuration := time.Since(pStart)
+
+	// Build ownership and stopping info.
+	var status string
+	isMine := crane.IsMine()
+	isStopping := crane.IsStopping()
+	stoppingRequested, stoppingRequestedByPeer, markedStoppingAt := crane.NetState.StoppingState()
+	if isMine {
+		status = "mine"
+	}
+	if isStopping || stoppingRequested || stoppingRequestedByPeer {
+		if isMine {
+			status += " - "
+		}
+		status += "stopping "
+		if stoppingRequested {
+			status += "<r"
+		}
+		if isStopping {
+			status += "!"
+		}
+		if stoppingRequestedByPeer {
+			status += "r>"
+		}
+		if isStopping && !markedStoppingAt.IsZero() {
+			status += " since " + markedStoppingAt.Truncate(time.Minute).String()
+		}
+	}
+
+	fmt.Fprintf(tabWriter,
+		"\t%.2fGB %.2fMbit/s %.2f%%out since %s\t%.2fGB %.2fMbit/s %.2f%%out since %s\t%s\t%s",
+		float64(ltIn+ltOut)/1000000000,
+		(float64(ltIn+ltOut)/1000000/ltDuration.Seconds())*8,
+		float64(ltOut)/float64(ltIn+ltOut)*100,
+		ltDuration.Truncate(time.Second),
+		float64(pIn+pOut)/1000000000,
+		(float64(pIn+pOut)/1000000/pDuration.Seconds())*8,
+		float64(pOut)/float64(pIn+pOut)*100,
+		pDuration.Truncate(time.Second),
+		crane.Transport().Protocol,
+		status,
+	)
+}
+
+func handleMapMeasurementsRequest(ar *api.Request) (i interface{}, err error) {
+	// Get map.
+	m, ok := getMapForAPI(ar.URLVars["map"])
+	if !ok {
+		return nil, errors.New("map not found")
+	}
+
+	// Get and sort pins.
+	list := m.pinList(true)
+	sort.Sort(sortByLowestMeasuredCost(list))
+
+	// Copy data and return.
+	measurements := make([]*hub.Measurements, 0, len(list))
+	for _, pin := range list {
+		measurements = append(measurements, pin.measurements.Copy())
+	}
+	return measurements, nil
+}
+
+func handleMapMeasurementsTableRequest(ar *api.Request) (data []byte, err error) {
+	// Get map.
+	m, ok := getMapForAPI(ar.URLVars["map"])
+	if !ok {
+		return nil, errors.New("map not found")
+	}
+	matcher := m.DefaultOptions().Transit.Matcher(m.GetIntel())
+
+	// Get and sort pins.
+	list := m.pinList(true)
+	sort.Sort(sortByLowestMeasuredCost(list))
+
+	// Get cranes for usage stats.
+	assignedCranes := docks.GetAllAssignedCranes()
+
+	// Build table and return.
+	buf := bytes.NewBuffer(nil)
+	tabWriter := tabwriter.NewWriter(buf, 8, 4, 3, ' ', 0)
+	fmt.Fprint(tabWriter, "Hub Name\tCountry\tRegion\tLatency\tCapacity\tCost\tGeo Prox.\tHub ID\tLifetime Usage\tPeriod Usage\tProt\tStatus\n")
+	for _, pin := range list {
+		// Only print regarded Hubs.
+		if !matcher(pin) {
+			continue
+		}
+
+		// Add row.
+		pin.measurements.Lock()
+		defer pin.measurements.Unlock()
+		fmt.Fprintf(tabWriter,
+			"%s\t%s\t%s\t%s\t%.2fMbit/s\t%.2fc\t%.2f%%\t%s",
+			pin.Hub.Info.Name,
+			getPinCountry(pin),
+			pin.region.getName(),
+			pin.measurements.Latency,
+			float64(pin.measurements.Capacity)/1000000,
+			pin.measurements.CalculatedCost,
+			pin.measurements.GeoProximity,
+			pin.Hub.ID,
+		)
+
+		// Add usage stats.
+		if crane, ok := assignedCranes[pin.Hub.ID]; ok {
+			addUsageStatsToTable(crane, tabWriter)
+		}
+
+		// Add linebreak.
+		fmt.Fprint(tabWriter, "\n")
+	}
+	_ = tabWriter.Flush()
+
+	return buf.Bytes(), nil
+}
+
+func getPinCountry(pin *Pin) string {
+	switch {
+	case pin.LocationV4 != nil && pin.LocationV4.Country.Code != "":
+		return pin.LocationV4.Country.Code
+	case pin.LocationV6 != nil && pin.LocationV6.Country.Code != "":
+		return pin.LocationV6.Country.Code
+	case pin.EntityV4 != nil && pin.EntityV4.Country != "":
+		return pin.EntityV4.Country
+	case pin.EntityV6 != nil && pin.EntityV6.Country != "":
+		return pin.EntityV6.Country
+	default:
+		return ""
+	}
+}
+
+func handleMapGraphRequest(w http.ResponseWriter, hr *http.Request) {
+	r := api.GetAPIRequest(hr)
+	if r == nil {
+		http.Error(w, "API request invalid.", http.StatusInternalServerError)
+		return
+	}
+
+	// Get map.
+	m, ok := getMapForAPI(r.URLVars["map"])
+	if !ok {
+		http.Error(w, "Map not found.", http.StatusNotFound)
+		return
+	}
+
+	// Check format.
+	var format string
+	switch r.URLVars["format"] {
+	case ".dot":
+		format = "dot"
+	case ".html":
+		format = "html"
+
+		// Check if we are in dev mode.
+		if !devMode() {
+			http.Error(w, "Graph html formatting (js rendering) is only available in dev mode.", http.StatusPreconditionFailed)
+			return
+		}
+	default:
+		http.Error(w, "Unsupported format.", http.StatusBadRequest)
+		return
+	}
+
+	// Build graph.
+	graph := gographviz.NewGraph()
+	_ = graph.AddAttr("", "overlap", "scale")
+	_ = graph.AddAttr("", "center", "true")
+	_ = graph.AddAttr("", "ratio", "fill")
+	for _, pin := range m.sortedPins(true) {
+		_ = graph.AddNode("", pin.Hub.ID, map[string]string{
+			"label":     graphNodeLabel(pin),
+			"tooltip":   graphNodeTooltip(pin),
+			"color":     graphNodeBorderColor(pin),
+			"fillcolor": graphNodeColor(pin),
+			"shape":     "circle",
+			"style":     "filled",
+			"fontsize":  "20",
+			"penwidth":  "4",
+			"margin":    "0",
+		})
+		for _, lane := range pin.ConnectedTo {
+			if graph.IsNode(lane.Pin.Hub.ID) && pin.State != StateNone {
+				// Create attributes.
+				edgeOptions := map[string]string{
+					"tooltip":  graphEdgeTooltip(pin, lane.Pin, lane),
+					"color":    graphEdgeColor(pin, lane.Pin, lane),
+					"len":      fmt.Sprintf("%f", lane.Latency.Seconds()*200),
+					"penwidth": fmt.Sprintf("%f", math.Sqrt(float64(lane.Capacity)/1000000)*2),
+				}
+				// Add edge.
+				_ = graph.AddEdge(pin.Hub.ID, lane.Pin.Hub.ID, false, edgeOptions)
+			}
+		}
+	}
+
+	var mimeType string
+	var responseData []byte
+	switch format {
+	case "dot":
+		mimeType = "text/x-dot"
+		responseData = []byte(graph.String())
+	case "html":
+		mimeType = "text/html"
+		responseData = []byte(fmt.Sprintf(
+			`<!DOCTYPE html><html><meta charset="utf-8"><body style="margin:0;padding:0;">
+<style>#graph svg {height: 99.5vh; width: 99.5vw;}</style>
+<div id="graph"></div>
+<script src="/assets/vendor/js/hpcc-js-wasm-1.13.0/index.min.js"></script>
+<script src="/assets/vendor/js/d3-7.3.0/d3.min.js"></script>
+<script src="/assets/vendor/js/d3-graphviz-4.1.0/d3-graphviz.min.js"></script>
+<script>
+d3.select("#graph").graphviz(useWorker=false).engine("neato").renderDot(%s%s%s);
+</script>
+</body></html>`,
+			"`", graph.String(), "`",
+		))
+	}
+
+	// Write response.
+	w.Header().Set("Content-Type", mimeType+"; charset=utf-8")
+	w.Header().Set("Content-Length", strconv.Itoa(len(responseData)))
+	w.WriteHeader(http.StatusOK)
+	_, err := w.Write(responseData)
+	if err != nil {
+		log.Tracer(r.Context()).Warningf("api: failed to write response: %s", err)
+	}
+}
+
+func graphNodeLabel(pin *Pin) (s string) {
+	var comment string
+	switch {
+	case pin.State == StateNone:
+		comment = "dead"
+	case pin.State.Has(StateIsHomeHub):
+		comment = "Home"
+	case pin.State.HasAnyOf(StateSummaryDisregard):
+		comment = "disregarded"
+	case !pin.State.Has(StateSummaryRegard):
+		comment = "not regarded"
+	case pin.State.Has(StateTrusted):
+		comment = "trusted"
+	}
+	if comment != "" {
+		comment = fmt.Sprintf("\n(%s)", comment)
+	}
+
+	if pin.Hub.Status.Load >= 80 {
+		comment += fmt.Sprintf("\nHIGH LOAD: %d", pin.Hub.Status.Load)
+	}
+
+	return fmt.Sprintf(
+		`"%s%s"`,
+		strings.ReplaceAll(pin.Hub.Name(), " ", "\n"),
+		comment,
+	)
+}
+
+func graphNodeTooltip(pin *Pin) string {
+	// Gather IP info.
+	var v4Info, v6Info string
+	if pin.Hub.Info.IPv4 != nil {
+		if pin.LocationV4 != nil {
+			v4Info = fmt.Sprintf(
+				"%s (%s AS%d %s)",
+				pin.Hub.Info.IPv4.String(),
+				pin.LocationV4.Country.Code,
+				pin.LocationV4.AutonomousSystemNumber,
+				pin.LocationV4.AutonomousSystemOrganization,
+			)
+		} else {
+			v4Info = pin.Hub.Info.IPv4.String()
+		}
+	}
+	if pin.Hub.Info.IPv6 != nil {
+		if pin.LocationV6 != nil {
+			v6Info = fmt.Sprintf(
+				"%s (%s AS%d %s)",
+				pin.Hub.Info.IPv6.String(),
+				pin.LocationV6.Country.Code,
+				pin.LocationV6.AutonomousSystemNumber,
+				pin.LocationV6.AutonomousSystemOrganization,
+			)
+		} else {
+			v6Info = pin.Hub.Info.IPv6.String()
+		}
+	}
+
+	return fmt.Sprintf(
+		`"ID: %s
+States: %s
+Version: %s
+IPv4: %s
+IPv6: %s
+Load: %d
+Cost: %.2f"`,
+		pin.Hub.ID,
+		pin.State,
+		pin.Hub.Status.Version,
+		v4Info,
+		v6Info,
+		pin.Hub.Status.Load,
+		pin.Cost,
+	)
+}
+
+func graphEdgeTooltip(from, to *Pin, lane *Lane) string {
+	return fmt.Sprintf(
+		`"%s <> %s
+Latency: %s
+Capacity: %.2f Mbit/s
+Cost: %.2f"`,
+		from.Hub.Info.Name, to.Hub.Info.Name,
+		lane.Latency,
+		float64(lane.Capacity)/1000000,
+		lane.Cost,
+	)
+}
+
+// Graphviz colors.
+// See https://graphviz.org/doc/info/colors.html
+const (
+	graphColorWarning          = "orange2"
+	graphColorError            = "red2"
+	graphColorHomeAndConnected = "steelblue2"
+	graphColorDisregard        = "tomato2"
+	graphColorNotRegard        = "tan2"
+	graphColorTrusted          = "seagreen2"
+	graphColorDefaultNode      = "seashell2"
+	graphColorDefaultEdge      = "black"
+	graphColorNone             = "transparent"
+)
+
+func graphNodeColor(pin *Pin) string {
+	switch {
+	case pin.State == StateNone:
+		return graphColorNone
+	case pin.Hub.Status.Load >= 95:
+		return graphColorError
+	case pin.Hub.Status.Load >= 80:
+		return graphColorWarning
+	case pin.State.Has(StateIsHomeHub):
+		return graphColorHomeAndConnected
+	case pin.State.HasAnyOf(StateSummaryDisregard):
+		return graphColorDisregard
+	case !pin.State.Has(StateSummaryRegard):
+		return graphColorNotRegard
+	case pin.State.Has(StateTrusted):
+		return graphColorTrusted
+	default:
+		return graphColorDefaultNode
+	}
+}
+
+func graphNodeBorderColor(pin *Pin) string {
+	switch {
+	case pin.HasActiveTerminal():
+		return graphColorHomeAndConnected
+	default:
+		return graphColorNone
+	}
+}
+
+func graphEdgeColor(from, to *Pin, lane *Lane) string {
+	// Check lane stats.
+	if lane.Capacity == 0 || lane.Latency == 0 {
+		return graphColorWarning
+	}
+	// Alert if capacity is under 10Mbit/s or latency is over 100ms.
+	if lane.Capacity < 10000000 || lane.Latency > 100*time.Millisecond {
+		return graphColorError
+	}
+
+	// Check for active edge forward.
+	if to.HasActiveTerminal() && len(to.Connection.Route.Path) >= 2 {
+		secondLastHopIndex := len(to.Connection.Route.Path) - 2
+		if to.Connection.Route.Path[secondLastHopIndex].HubID == from.Hub.ID {
+			return graphColorHomeAndConnected
+		}
+	}
+	// Check for active edge backward.
+	if from.HasActiveTerminal() && len(from.Connection.Route.Path) >= 2 {
+		secondLastHopIndex := len(from.Connection.Route.Path) - 2
+		if from.Connection.Route.Path[secondLastHopIndex].HubID == to.Hub.ID {
+			return graphColorHomeAndConnected
+		}
+	}
+
+	// Return default color if edge is not active.
+	return graphColorDefaultEdge
+}
diff --git a/spn/navigator/api_route.go b/spn/navigator/api_route.go
new file mode 100644
index 00000000..4d854841
--- /dev/null
+++ b/spn/navigator/api_route.go
@@ -0,0 +1,396 @@
+package navigator
+
+import (
+	"bytes"
+	"errors"
+	"fmt"
+	mrand "math/rand"
+	"net"
+	"net/http"
+	"strings"
+	"text/tabwriter"
+	"time"
+
+	"github.com/safing/portbase/api"
+	"github.com/safing/portbase/config"
+	"github.com/safing/portmaster/service/intel"
+	"github.com/safing/portmaster/service/intel/geoip"
+	"github.com/safing/portmaster/service/netenv"
+	"github.com/safing/portmaster/service/network/netutils"
+	"github.com/safing/portmaster/service/profile"
+	"github.com/safing/portmaster/service/profile/endpoints"
+)
+
+func registerRouteAPIEndpoints() error {
+	if err := api.RegisterEndpoint(api.Endpoint{
+		Path:        `spn/map/{map:[A-Za-z0-9]{1,255}}/route/to/{destination:[a-z0-9_\.:-]{1,255}}`,
+		Read:        api.PermitUser,
+		BelongsTo:   module,
+		ActionFunc:  handleRouteCalculationRequest,
+		Name:        "Calculate Route through SPN",
+		Description: "Returns a textual representation of the routing process.",
+		Parameters: []api.Parameter{
+			{
+				Method:      http.MethodGet,
+				Field:       "profile",
+				Value:       "<id>|global",
+				Description: "Specify a profile ID to load more settings for simulation.",
+			},
+			{
+				Method:      http.MethodGet,
+				Field:       "encrypted",
+				Value:       "true",
+				Description: "Specify to signify that the simulated connection should be regarded as encrypted. Only valid with a profile.",
+			},
+		},
+	}); err != nil {
+		return err
+	}
+
+	return nil
+}
+
+func handleRouteCalculationRequest(ar *api.Request) (msg string, err error) { //nolint:maintidx
+	// Get map.
+	m, ok := getMapForAPI(ar.URLVars["map"])
+	if !ok {
+		return "", errors.New("map not found")
+	}
+	// Get profile ID.
+	profileID := ar.Request.URL.Query().Get("profile")
+
+	// Parse destination and prepare options.
+	entity := &intel.Entity{}
+	destination := ar.URLVars["destination"]
+	matchFor := DestinationHub
+	var (
+		introText              string
+		locationV4, locationV6 *geoip.Location
+		opts                   *Options
+	)
+	switch {
+	case destination == "":
+		// Destination is required.
+		return "", errors.New("no destination provided")
+
+	case destination == "home":
+		if profileID != "" {
+			return "", errors.New("cannot apply profile to home hub route")
+		}
+		// Simulate finding home hub.
+		locations, ok := netenv.GetInternetLocation()
+		if !ok || len(locations.All) == 0 {
+			return "", errors.New("failed to locate own device for finding home hub")
+		}
+		introText = fmt.Sprintf("looking for home hub near %s and %s", locations.BestV4(), locations.BestV6())
+		locationV4 = locations.BestV4().LocationOrNil()
+		locationV6 = locations.BestV6().LocationOrNil()
+		matchFor = HomeHub
+
+		// START of copied from captain/navigation.go
+
+		// Get own entity.
+		// Checking the entity against the entry policies is somewhat hit and miss
+		// anyway, as the device location is an approximation.
+		var myEntity *intel.Entity
+		if dl := locations.BestV4(); dl != nil && dl.IP != nil {
+			myEntity = (&intel.Entity{IP: dl.IP}).Init(0)
+			myEntity.FetchData(ar.Context())
+		} else if dl := locations.BestV6(); dl != nil && dl.IP != nil {
+			myEntity = (&intel.Entity{IP: dl.IP}).Init(0)
+			myEntity.FetchData(ar.Context())
+		}
+
+		// Build navigation options for searching for a home hub.
+		homePolicy, err := endpoints.ParseEndpoints(config.GetAsStringArray("spn/homePolicy", []string{})())
+		if err != nil {
+			return "", fmt.Errorf("failed to parse home hub policy: %w", err)
+		}
+
+		opts = &Options{
+			Home: &HomeHubOptions{
+				HubPolicies:        []endpoints.Endpoints{homePolicy},
+				CheckHubPolicyWith: myEntity,
+			},
+		}
+
+		// Add requirement to only use Safing nodes when not using community nodes.
+		if !config.GetAsBool("spn/useCommunityNodes", true)() {
+			opts.Home.RequireVerifiedOwners = []string{"Safing"}
+		}
+
+		// Require a trusted home node when the routing profile requires less than two hops.
+		routingProfile := GetRoutingProfile(config.GetAsString(profile.CfgOptionRoutingAlgorithmKey, DefaultRoutingProfileID)())
+		if routingProfile.MinHops < 2 {
+			opts.Home.Regard = opts.Home.Regard.Add(StateTrusted)
+		}
+
+		// END of copied
+
+	case net.ParseIP(destination) != nil:
+		entity.IP = net.ParseIP(destination)
+
+		fallthrough
+	case netutils.IsValidFqdn(destination):
+		fallthrough
+	case netutils.IsValidFqdn(destination + "."):
+		// Resolve domain to IP, if not inherired from a previous case.
+		var ignoredIPs int
+		if entity.IP == nil {
+			entity.Domain = destination
+
+			// Resolve name to IPs.
+			ips, err := net.DefaultResolver.LookupIP(ar.Context(), "ip", destination)
+			if err != nil {
+				return "", fmt.Errorf("failed to lookup IP address of %s: %w", destination, err)
+			}
+			if len(ips) == 0 {
+				return "", fmt.Errorf("failed to lookup IP address of %s: no result", destination)
+			}
+
+			// Shuffle IPs.
+			if len(ips) >= 2 {
+				mr := mrand.New(mrand.NewSource(time.Now().UnixNano())) //nolint:gosec
+				mr.Shuffle(len(ips), func(i, j int) {
+					ips[i], ips[j] = ips[j], ips[i]
+				})
+			}
+
+			entity.IP = ips[0]
+			ignoredIPs = len(ips) - 1
+		}
+		entity.Init(0)
+
+		// Get location of IP.
+		location, ok := entity.GetLocation(ar.Context())
+		if !ok {
+			return "", fmt.Errorf("failed to get geoip location for %s: %s", entity.IP, entity.LocationError)
+		}
+		// Assign location to separate variables.
+		if entity.IP.To4() != nil {
+			locationV4 = location
+		} else {
+			locationV6 = location
+		}
+
+		// Set intro text.
+		if entity.Domain != "" {
+			introText = fmt.Sprintf("looking for route to %s at %s\n(ignoring %d additional IPs returned by DNS)", entity.IP, formatLocation(location), ignoredIPs)
+		} else {
+			introText = fmt.Sprintf("looking for route to %s at %s", entity.IP, formatLocation(location))
+		}
+
+		// Get profile.
+		if profileID != "" {
+			var lp *profile.LayeredProfile
+			if profileID == "global" {
+				// Create new empty profile for easy access to global settings.
+				lp = profile.NewLayeredProfile(profile.New(nil))
+			} else {
+				// Get local profile by ID.
+				localProfile, err := profile.GetLocalProfile(profileID, nil, nil)
+				if err != nil {
+					return "", fmt.Errorf("failed to get profile: %w", err)
+				}
+				lp = localProfile.LayeredProfile()
+			}
+			opts = DeriveTunnelOptions(
+				lp,
+				entity,
+				ar.Request.URL.Query().Has("encrypted"),
+			)
+		} else {
+			opts = m.defaultOptions()
+		}
+
+	default:
+		return "", errors.New("invalid destination provided")
+	}
+
+	// Finalize entity.
+	entity.Init(0)
+
+	// Start formatting output.
+	lines := []string{
+		"Routing simulation: " + introText,
+		"Please note that this routing simulation does match the behavior of regular routing to 100%.",
+		"",
+	}
+
+	// Print options.
+	// ==================
+
+	lines = append(lines, "Routing Options:")
+	lines = append(lines, "Algorithm: "+opts.RoutingProfile)
+	if opts.Home != nil {
+		lines = append(lines, "Home Options:")
+		lines = append(lines, fmt.Sprintf("  Regard: %s", opts.Home.Regard))
+		lines = append(lines, fmt.Sprintf("  Disregard: %s", opts.Home.Disregard))
+		lines = append(lines, fmt.Sprintf("  No Default: %v", opts.Home.NoDefaults))
+		lines = append(lines, fmt.Sprintf("  Hub Policies: %v", opts.Home.HubPolicies))
+		lines = append(lines, fmt.Sprintf("  Require Verified Owners: %v", opts.Home.RequireVerifiedOwners))
+	}
+	if opts.Transit != nil {
+		lines = append(lines, "Transit Options:")
+		lines = append(lines, fmt.Sprintf("  Regard: %s", opts.Transit.Regard))
+		lines = append(lines, fmt.Sprintf("  Disregard: %s", opts.Transit.Disregard))
+		lines = append(lines, fmt.Sprintf("  No Default: %v", opts.Transit.NoDefaults))
+		lines = append(lines, fmt.Sprintf("  Hub Policies: %v", opts.Transit.HubPolicies))
+		lines = append(lines, fmt.Sprintf("  Require Verified Owners: %v", opts.Transit.RequireVerifiedOwners))
+	}
+	if opts.Destination != nil {
+		lines = append(lines, "Destination Options:")
+		lines = append(lines, fmt.Sprintf("  Regard: %s", opts.Destination.Regard))
+		lines = append(lines, fmt.Sprintf("  Disregard: %s", opts.Destination.Disregard))
+		lines = append(lines, fmt.Sprintf("  No Default: %v", opts.Destination.NoDefaults))
+		lines = append(lines, fmt.Sprintf("  Hub Policies: %v", opts.Destination.HubPolicies))
+		lines = append(lines, fmt.Sprintf("  Require Verified Owners: %v", opts.Destination.RequireVerifiedOwners))
+		if opts.Destination.CheckHubPolicyWith != nil {
+			lines = append(lines, "  Check Hub Policy With:")
+			if opts.Destination.CheckHubPolicyWith.Domain != "" {
+				lines = append(lines, fmt.Sprintf("    Domain: %v", opts.Destination.CheckHubPolicyWith.Domain))
+			}
+			if opts.Destination.CheckHubPolicyWith.IP != nil {
+				lines = append(lines, fmt.Sprintf("    IP: %v", opts.Destination.CheckHubPolicyWith.IP))
+			}
+			if opts.Destination.CheckHubPolicyWith.Port != 0 {
+				lines = append(lines, fmt.Sprintf("    Port: %v", opts.Destination.CheckHubPolicyWith.Port))
+			}
+		}
+	}
+	lines = append(lines, "\n")
+
+	// Find nearest hubs.
+	// ==================
+
+	// Start operating in map.
+	m.RLock()
+	defer m.RUnlock()
+	// Check if map is populated.
+	if m.isEmpty() {
+		return "", ErrEmptyMap
+	}
+
+	// Find nearest hubs.
+	nbPins, err := m.findNearestPins(locationV4, locationV6, opts, matchFor, true)
+	if err != nil {
+		lines = append(lines, fmt.Sprintf("FAILED to find any suitable exit hub: %s", err))
+		return strings.Join(lines, "\n"), nil
+		// return "", fmt.Errorf("failed to search for nearby pins: %w", err)
+	}
+
+	// Print found exits to table.
+	lines = append(lines, "Considered Exits (cheapest 10% are shuffled)")
+	buf := bytes.NewBuffer(nil)
+	tabWriter := tabwriter.NewWriter(buf, 8, 4, 3, ' ', 0)
+	fmt.Fprint(tabWriter, "Hub Name\tCost\tLocation\n")
+	for _, nbPin := range nbPins.pins {
+		fmt.Fprintf(tabWriter,
+			"%s\t%.0f\t%s\n",
+			nbPin.pin.Hub.Name(),
+			nbPin.cost,
+			formatMultiLocation(nbPin.pin.LocationV4, nbPin.pin.LocationV6),
+		)
+	}
+	_ = tabWriter.Flush()
+	lines = append(lines, buf.String())
+
+	// Print too expensive exits to table.
+	lines = append(lines, "Too Expensive Exits:")
+	buf = bytes.NewBuffer(nil)
+	tabWriter = tabwriter.NewWriter(buf, 8, 4, 3, ' ', 0)
+	fmt.Fprint(tabWriter, "Hub Name\tCost\tLocation\n")
+	for _, nbPin := range nbPins.debug.tooExpensive {
+		fmt.Fprintf(tabWriter,
+			"%s\t%.0f\t%s\n",
+			nbPin.pin.Hub.Name(),
+			nbPin.cost,
+			formatMultiLocation(nbPin.pin.LocationV4, nbPin.pin.LocationV6),
+		)
+	}
+	_ = tabWriter.Flush()
+	lines = append(lines, buf.String())
+
+	// Print disregarded exits to table.
+	lines = append(lines, "Disregarded Exits:")
+	buf = bytes.NewBuffer(nil)
+	tabWriter = tabwriter.NewWriter(buf, 8, 4, 3, ' ', 0)
+	fmt.Fprint(tabWriter, "Hub Name\tReason\tStates\n")
+	for _, nbPin := range nbPins.debug.disregarded {
+		fmt.Fprintf(tabWriter,
+			"%s\t%s\t%s\n",
+			nbPin.pin.Hub.Name(),
+			nbPin.reason,
+			nbPin.pin.State,
+		)
+	}
+	_ = tabWriter.Flush()
+	lines = append(lines, buf.String())
+
+	// Find routes.
+	// ============
+
+	// Unless we looked for a home node.
+	if destination == "home" {
+		return strings.Join(lines, "\n"), nil
+	}
+
+	// Find routes.
+	routes, err := m.findRoutes(nbPins, opts)
+	if err != nil {
+		lines = append(lines, fmt.Sprintf("FAILED to find routes: %s", err))
+		return strings.Join(lines, "\n"), nil
+		// return "", fmt.Errorf("failed to find routes: %w", err)
+	}
+
+	// Print found routes to table.
+	lines = append(lines, "Considered Routes (cheapest 10% are shuffled)")
+	buf = bytes.NewBuffer(nil)
+	tabWriter = tabwriter.NewWriter(buf, 8, 4, 3, ' ', 0)
+	fmt.Fprint(tabWriter, "Cost\tPath\n")
+	for _, route := range routes.All {
+		fmt.Fprintf(tabWriter,
+			"%.0f\t%s\n",
+			route.TotalCost,
+			formatRoute(route, entity.IP),
+		)
+	}
+	_ = tabWriter.Flush()
+	lines = append(lines, buf.String())
+
+	return strings.Join(lines, "\n"), nil
+}
+
+func formatLocation(loc *geoip.Location) string {
+	return fmt.Sprintf(
+		"%s (%s - AS%d %s)",
+		loc.Country.Name,
+		loc.Country.Code,
+		loc.AutonomousSystemNumber,
+		loc.AutonomousSystemOrganization,
+	)
+}
+
+func formatMultiLocation(a, b *geoip.Location) string {
+	switch {
+	case a != nil:
+		return formatLocation(a)
+	case b != nil:
+		return formatLocation(b)
+	default:
+		return ""
+	}
+}
+
+func formatRoute(r *Route, dst net.IP) string {
+	s := make([]string, 0, len(r.Path)+1)
+	for i, hop := range r.Path {
+		if i == 0 {
+			s = append(s, hop.pin.Hub.Name())
+		} else {
+			s = append(s, fmt.Sprintf(">> %.2fc >> %s", hop.Cost, hop.pin.Hub.Name()))
+		}
+	}
+	s = append(s, fmt.Sprintf(">> %.2fc >> %s", r.DstCost, dst))
+	return strings.Join(s, " ")
+}
diff --git a/spn/navigator/costs.go b/spn/navigator/costs.go
new file mode 100644
index 00000000..0b48ea16
--- /dev/null
+++ b/spn/navigator/costs.go
@@ -0,0 +1,72 @@
+package navigator
+
+import "time"
+
+const (
+	nearestPinsMaxCostDifference = 5000
+	nearestPinsMinimum           = 10
+)
+
+// CalculateLaneCost calculates the cost of using a Lane based on the given
+// Lane latency and capacity.
+// Ranges from 0 to 10000.
+func CalculateLaneCost(latency time.Duration, capacity int) (cost float32) {
+	// - One point for every ms in latency (linear)
+	if latency != 0 {
+		cost += float32(latency) / float32(time.Millisecond)
+	} else {
+		// Add cautious default cost if latency is not available.
+		cost += 1000
+	}
+
+	capacityFloat := float32(capacity)
+	switch {
+	case capacityFloat == 0:
+		// Add cautious default cost if capacity is not available.
+		cost += 4000
+	case capacityFloat < cap1Mbit:
+		// - Between 1000 and 10000 points for ranges below 1Mbit/s
+		cost += 1000 + 9000*((cap1Mbit-capacityFloat)/cap1Mbit)
+	case capacityFloat < cap10Mbit:
+		// - Between 100 and 1000 points for ranges below 10Mbit/s
+		cost += 100 + 900*((cap10Mbit-capacityFloat)/cap10Mbit)
+	case capacityFloat < cap100Mbit:
+		// - Between 20 and 100 points for ranges below 100Mbit/s
+		cost += 20 + 80*((cap100Mbit-capacityFloat)/cap100Mbit)
+	case capacityFloat < cap1Gbit:
+		// - Between 5 and 20 points for ranges below 1Gbit/s
+		cost += 5 + 15*((cap1Gbit-capacityFloat)/cap1Gbit)
+	case capacityFloat < cap10Gbit:
+		// - Between 0 and 5 points for ranges below 10Gbit/s
+		cost += 5 * ((cap10Gbit - float32(capacity)) / cap10Gbit)
+	}
+
+	return cost
+}
+
+// CalculateHubCost calculates the cost of using a Hub based on the given Hub load.
+// Ranges from 100 to 10000.
+func CalculateHubCost(load int) (cost float32) {
+	switch {
+	case load >= 100:
+		return 10000
+	case load >= 95:
+		return 1000
+	case load >= 80:
+		return 500
+	default:
+		return 100
+	}
+}
+
+// CalculateDestinationCost calculates the cost of a destination hub to a
+// destination server based on the given proximity.
+// Ranges from 0 to 2500.
+func CalculateDestinationCost(proximity float32) (cost float32) {
+	// Invert from proximity (0-100) to get a distance value.
+	distance := 100 - proximity
+
+	// Take the distance to the power of three and then divide by hundred in order to
+	// make high distances exponentially more expensive.
+	return (distance * distance * distance) / 100
+}
diff --git a/spn/navigator/database.go b/spn/navigator/database.go
new file mode 100644
index 00000000..b7ee8ae4
--- /dev/null
+++ b/spn/navigator/database.go
@@ -0,0 +1,164 @@
+package navigator
+
+import (
+	"context"
+	"fmt"
+	"strings"
+
+	"github.com/safing/portbase/database"
+	"github.com/safing/portbase/database/iterator"
+	"github.com/safing/portbase/database/query"
+	"github.com/safing/portbase/database/record"
+	"github.com/safing/portbase/database/storage"
+)
+
+var mapDBController *database.Controller
+
+// StorageInterface provices a storage.Interface to the
+// configuration manager.
+type StorageInterface struct {
+	storage.InjectBase
+}
+
+// Database prefixes:
+// Pins:       map:main/<Hub ID>
+// DNS Requests:    network:tree/<PID>/dns/<ID>
+// IP Connections:  network:tree/<PID>/ip/<ID>
+
+func makeDBKey(mapName, hubID string) string {
+	return fmt.Sprintf("map:%s/%s", mapName, hubID)
+}
+
+func parseDBKey(key string) (mapName, hubID string) {
+	// Split into segments.
+	segments := strings.Split(key, "/")
+
+	// Keys have 1 or 2 segments.
+	switch len(segments) {
+	case 1:
+		return segments[0], ""
+	case 2:
+		return segments[0], segments[1]
+	default:
+		return "", ""
+	}
+}
+
+// Get returns a database record.
+func (s *StorageInterface) Get(key string) (record.Record, error) {
+	// Parse key and check if valid.
+	mapName, hubID := parseDBKey(key)
+	if mapName == "" || hubID == "" {
+		return nil, storage.ErrNotFound
+	}
+
+	// Get map.
+	m, ok := getMapForAPI(mapName)
+	if !ok {
+		return nil, storage.ErrNotFound
+	}
+
+	// Get Pin from map.
+	pin, ok := m.GetPin(hubID)
+	if !ok {
+		return nil, storage.ErrNotFound
+	}
+	return pin.Export(), nil
+}
+
+// Query returns a an iterator for the supplied query.
+func (s *StorageInterface) Query(q *query.Query, local, internal bool) (*iterator.Iterator, error) {
+	// Parse key and check if valid.
+	mapName, _ := parseDBKey(q.DatabaseKeyPrefix())
+	if mapName == "" {
+		return nil, storage.ErrNotFound
+	}
+
+	// Get map.
+	m, ok := getMapForAPI(mapName)
+	if !ok {
+		return nil, storage.ErrNotFound
+	}
+
+	// Start query worker.
+	it := iterator.New()
+	module.StartWorker("map query", func(_ context.Context) error {
+		s.processQuery(m, q, it)
+		return nil
+	})
+
+	return it, nil
+}
+
+func (s *StorageInterface) processQuery(m *Map, q *query.Query, it *iterator.Iterator) {
+	// Return all matching pins.
+	for _, pin := range m.sortedPins(true) {
+		export := pin.Export()
+		if q.Matches(export) {
+			select {
+			case it.Next <- export:
+			case <-it.Done:
+				return
+			}
+		}
+	}
+
+	it.Finish(nil)
+}
+
+func registerMapDatabase() error {
+	_, err := database.Register(&database.Database{
+		Name:        "map",
+		Description: "SPN Network Maps",
+		StorageType: database.StorageTypeInjected,
+	})
+	if err != nil {
+		return err
+	}
+
+	controller, err := database.InjectDatabase("map", &StorageInterface{})
+	if err != nil {
+		return err
+	}
+
+	mapDBController = controller
+	return nil
+}
+
+func withdrawMapDatabase() {
+	mapDBController.Withdraw()
+}
+
+// PushPinChanges pushes all changed pins to subscribers.
+func (m *Map) PushPinChanges() {
+	module.StartWorker("push pin changes", m.pushPinChangesWorker)
+}
+
+func (m *Map) pushPinChangesWorker(ctx context.Context) error {
+	m.RLock()
+	defer m.RUnlock()
+
+	for _, pin := range m.all {
+		if pin.pushChanges.SetToIf(true, false) {
+			mapDBController.PushUpdate(pin.Export())
+		}
+	}
+
+	return nil
+}
+
+// pushChange pushes changes of the pin, if the pushChanges flag is set.
+func (pin *Pin) pushChange() {
+	// Check before starting the worker.
+	if pin.pushChanges.IsNotSet() {
+		return
+	}
+
+	// Start worker to push changes.
+	module.StartWorker("push pin change", func(ctx context.Context) error {
+		if pin.pushChanges.SetToIf(true, false) {
+			mapDBController.PushUpdate(pin.Export())
+		}
+		return nil
+	})
+}
diff --git a/spn/navigator/findnearest.go b/spn/navigator/findnearest.go
new file mode 100644
index 00000000..0a294ce2
--- /dev/null
+++ b/spn/navigator/findnearest.go
@@ -0,0 +1,441 @@
+package navigator
+
+import (
+	"errors"
+	"fmt"
+	mrand "math/rand"
+	"sort"
+	"strings"
+	"time"
+
+	"github.com/safing/portmaster/service/intel/geoip"
+	"github.com/safing/portmaster/spn/hub"
+)
+
+const (
+	// defaultMaxNearbyMatches defines a default value of how many matches a
+	// nearby pin find operation in a map should return.
+	defaultMaxNearbyMatches = 100
+
+	// defaultRandomizeNearbyPinTopPercent defines the top percent of a nearby
+	// pins set that should be randomized for balancing purposes.
+	// Range: 0-1.
+	defaultRandomizeNearbyPinTopPercent = 0.1
+)
+
+// nearbyPins is a list of nearby Pins to a certain location.
+type nearbyPins struct {
+	pins                []*nearbyPin
+	minPins             int
+	maxPins             int
+	maxCost             float32
+	cutOffLimit         float32
+	randomizeTopPercent float32
+
+	debug *nearbyPinsDebug
+}
+
+// nearbyPinsDebug holds additional debugging for nearbyPins.
+type nearbyPinsDebug struct {
+	tooExpensive []*nearbyPin
+	disregarded  []*nearbyDisregardedPin
+}
+
+// nearbyDisregardedPin represents a disregarded pin.
+type nearbyDisregardedPin struct {
+	pin    *Pin
+	reason string
+}
+
+// nearbyPin represents a Pin and the proximity to a certain location.
+type nearbyPin struct {
+	pin  *Pin
+	cost float32
+}
+
+// Len is the number of elements in the collection.
+func (nb *nearbyPins) Len() int {
+	return len(nb.pins)
+}
+
+// Less reports whether the element with index i should sort before the element
+// with index j.
+func (nb *nearbyPins) Less(i, j int) bool {
+	return nb.pins[i].cost < nb.pins[j].cost
+}
+
+// Swap swaps the elements with indexes i and j.
+func (nb *nearbyPins) Swap(i, j int) {
+	nb.pins[i], nb.pins[j] = nb.pins[j], nb.pins[i]
+}
+
+// add potentially adds a Pin to the list of nearby Pins.
+func (nb *nearbyPins) add(pin *Pin, cost float32) {
+	if len(nb.pins) > nb.minPins && nb.maxCost > 0 && cost > nb.maxCost {
+		// Add debug data if enabled.
+		if nb.debug != nil {
+			nb.debug.tooExpensive = append(nb.debug.tooExpensive,
+				&nearbyPin{
+					pin:  pin,
+					cost: cost,
+				},
+			)
+		}
+
+		return
+	}
+
+	nb.pins = append(nb.pins, &nearbyPin{
+		pin:  pin,
+		cost: cost,
+	})
+}
+
+// contains checks if the collection contains a Pin.
+func (nb *nearbyPins) get(id string) *nearbyPin {
+	for _, nbPin := range nb.pins {
+		if nbPin.pin.Hub.ID == id {
+			return nbPin
+		}
+	}
+
+	return nil
+}
+
+// clean sort and shortens the list to the configured maximum.
+func (nb *nearbyPins) clean() {
+	// Sort nearby Pins so that the closest one is on top.
+	sort.Sort(nb)
+
+	// Set maximum cost based on max difference, if we have enough pins.
+	if len(nb.pins) >= nb.minPins {
+		nb.maxCost = nb.pins[0].cost + nb.cutOffLimit
+	}
+
+	// Remove superfluous Pins from the list.
+	if len(nb.pins) > nb.maxPins {
+		// Add debug data if enabled.
+		if nb.debug != nil {
+			nb.debug.tooExpensive = append(nb.debug.tooExpensive, nb.pins[nb.maxPins:]...)
+		}
+
+		nb.pins = nb.pins[:nb.maxPins]
+	}
+	// Remove Pins that are too costly.
+	if len(nb.pins) > nb.minPins {
+		// Search for first pin that is too costly.
+		okUntil := nb.minPins
+		for ; okUntil < len(nb.pins); okUntil++ {
+			if nb.pins[okUntil].cost > nb.maxCost {
+				break
+			}
+		}
+
+		// Add debug data if enabled.
+		if nb.debug != nil {
+			nb.debug.tooExpensive = append(nb.debug.tooExpensive, nb.pins[okUntil:]...)
+		}
+
+		// Cut off the list at that point.
+		nb.pins = nb.pins[:okUntil]
+	}
+}
+
+// randomizeTop randomized to the top nearest pins for balancing the network.
+func (nb *nearbyPins) randomizeTop() {
+	switch {
+	case nb.randomizeTopPercent == 0:
+		// Check if randomization is enabled.
+		return
+	case len(nb.pins) < 2:
+		// Check if we have enough pins to work with.
+		return
+	}
+
+	// Find randomization set.
+	randomizeUpTo := len(nb.pins)
+	threshold := nb.pins[0].cost * (1 + nb.randomizeTopPercent)
+	for i, nb := range nb.pins {
+		// Find first value above the threshold to stop.
+		if nb.cost > threshold {
+			randomizeUpTo = i
+			break
+		}
+	}
+
+	// Shuffle top set.
+	if randomizeUpTo >= 2 {
+		mr := mrand.New(mrand.NewSource(time.Now().UnixNano())) //nolint:gosec
+		mr.Shuffle(randomizeUpTo, nb.Swap)
+	}
+}
+
+// FindNearestHubs searches for the nearest Hubs to the given IP address. The returned Hubs must not be modified in any way.
+func (m *Map) FindNearestHubs(locationV4, locationV6 *geoip.Location, opts *Options, matchFor HubType) ([]*hub.Hub, error) {
+	m.RLock()
+	defer m.RUnlock()
+
+	// Check if map is populated.
+	if m.isEmpty() {
+		return nil, ErrEmptyMap
+	}
+
+	// Set default options if unset.
+	if opts == nil {
+		opts = m.defaultOptions()
+	}
+
+	// Find nearest Pins.
+	nearby, err := m.findNearestPins(locationV4, locationV6, opts, matchFor, false)
+	if err != nil {
+		return nil, err
+	}
+
+	// Convert to Hub list and return.
+	hubs := make([]*hub.Hub, 0, len(nearby.pins))
+	for _, nbPin := range nearby.pins {
+		hubs = append(hubs, nbPin.pin.Hub)
+	}
+	return hubs, nil
+}
+
+func (m *Map) findNearestPins(locationV4, locationV6 *geoip.Location, opts *Options, matchFor HubType, debug bool) (*nearbyPins, error) {
+	// Fail if no location is provided.
+	if locationV4 == nil && locationV6 == nil {
+		return nil, errors.New("no location provided")
+	}
+
+	// Raise maxMatches to nearestPinsMinimum.
+	maxMatches := defaultMaxNearbyMatches
+	if maxMatches < nearestPinsMinimum {
+		maxMatches = nearestPinsMinimum
+	}
+
+	// Create nearby Pins list.
+	nearby := &nearbyPins{
+		minPins:             nearestPinsMinimum,
+		maxPins:             maxMatches,
+		cutOffLimit:         nearestPinsMaxCostDifference,
+		randomizeTopPercent: defaultRandomizeNearbyPinTopPercent,
+	}
+	if debug {
+		nearby.debug = &nearbyPinsDebug{}
+	}
+
+	// Create pin matcher.
+	matcher := opts.Matcher(matchFor, m.intel)
+
+	// Iterate over all Pins in the Map to find the nearest ones.
+	for _, pin := range m.all {
+		var cost float32
+
+		// Check if the Pin matches the criteria.
+		if !matcher(pin) {
+			// Add debug data if enabled.
+			if nearby.debug != nil && pin.State.Has(StateActive|StateReachable) {
+				nearby.debug.disregarded = append(nearby.debug.disregarded,
+					&nearbyDisregardedPin{
+						pin:    pin,
+						reason: "does not match general criteria",
+					},
+				)
+			}
+
+			// Debugging:
+			// log.Tracef("spn/navigator: skipping %s with states %s for finding nearest", pin, pin.State)
+			continue
+		}
+
+		// Check if the Hub supports at least one IP version we are looking for.
+		switch {
+		case locationV4 != nil && pin.LocationV4 != nil:
+			// Both have IPv4!
+		case locationV6 != nil && pin.LocationV6 != nil:
+			// Both have IPv6!
+		default:
+			// Hub does not support any IP version we need.
+
+			// Add debug data if enabled.
+			if nearby.debug != nil {
+				nearby.debug.disregarded = append(nearby.debug.disregarded,
+					&nearbyDisregardedPin{
+						pin:    pin,
+						reason: "does not support the required IP version",
+					},
+				)
+			}
+
+			continue
+		}
+
+		// If finding a home hub and the global routing profile is set to home ("VPN"),
+		// check if all local IP versions are available on the Hub.
+		if matchFor == HomeHub && cfgOptionRoutingAlgorithm() == RoutingProfileHomeID {
+			switch {
+			case locationV4 != nil && pin.LocationV4 == nil:
+				// Device has IPv4, but Hub does not!
+				fallthrough
+			case locationV6 != nil && pin.LocationV6 == nil:
+				// Device has IPv6, but Hub does not!
+
+				// Add debug data if enabled.
+				if nearby.debug != nil {
+					nearby.debug.disregarded = append(nearby.debug.disregarded,
+						&nearbyDisregardedPin{
+							pin:    pin,
+							reason: "home hub needs all IP versions of client (when Home/VPN routing)",
+						},
+					)
+				}
+
+				continue
+			}
+		}
+
+		// 1. Calculate cost based on distance
+
+		if locationV4 != nil && pin.LocationV4 != nil {
+			if locationV4.IsAnycast && m.home != nil {
+				// If the destination is anycast, calculate cost though proximity to home hub instead, if possible.
+				cost = lessButPositive(cost, CalculateDestinationCost(
+					proximityBetweenPins(pin, m.home),
+				))
+			} else {
+				// Regular cost calculation through proximity.
+				cost = lessButPositive(cost, CalculateDestinationCost(
+					locationV4.EstimateNetworkProximity(pin.LocationV4),
+				))
+			}
+		}
+
+		if locationV6 != nil && pin.LocationV6 != nil {
+			if locationV6.IsAnycast && m.home != nil {
+				// If the destination is anycast, calculate cost though proximity to home hub instead, if possible.
+				cost = lessButPositive(cost, CalculateDestinationCost(
+					proximityBetweenPins(pin, m.home),
+				))
+			} else {
+				// Regular cost calculation through proximity.
+				cost = lessButPositive(cost, CalculateDestinationCost(
+					locationV6.EstimateNetworkProximity(pin.LocationV6),
+				))
+			}
+		}
+
+		// If no cost could be calculated, fall back to a default value.
+		if cost == 0 {
+			cost = CalculateDestinationCost(50) // proximity out of 0-100
+		}
+
+		// Debugging:
+		// if matchFor == HomeHub {
+		// 	log.Tracef("spn/navigator: adding %.2f proximity cost to home hub %s", cost, pin.Hub)
+		// }
+
+		// 2. Add cost based on Hub status
+
+		cost += CalculateHubCost(pin.Hub.Status.Load)
+
+		// Debugging:
+		// if matchFor == HomeHub {
+		// 	log.Tracef("spn/navigator: adding %.2f hub cost to home hub %s", CalculateHubCost(pin.Hub.Status.Load), pin.Hub)
+		// }
+
+		// 3. If matching a home hub, add cost based on capacity/latency performance.
+
+		if matchFor == HomeHub {
+			// Find best capacity/latency values.
+			var (
+				bestCapacity int
+				bestLatency  time.Duration
+			)
+			for _, lane := range pin.Hub.Status.Lanes {
+				if lane.Capacity > bestCapacity {
+					bestCapacity = lane.Capacity
+				}
+				if bestLatency == 0 || lane.Latency < bestLatency {
+					bestLatency = lane.Latency
+				}
+			}
+			// Add cost of best capacity/latency values.
+			cost += CalculateLaneCost(bestLatency, bestCapacity)
+
+			// Debugging:
+			// log.Tracef("spn/navigator: adding %.2f lane cost to home hub %s", CalculateLaneCost(bestLatency, bestCapacity), pin.Hub)
+			// log.Debugf("spn/navigator: total cost of %.2f to home hub %s", cost, pin.Hub)
+		}
+
+		nearby.add(pin, cost)
+
+		// Clean the nearby list if have collected more than two times the max amount.
+		if len(nearby.pins) >= nearby.maxPins*2 {
+			nearby.clean()
+		}
+	}
+
+	// Check if we found any nearby pins
+	if nearby.Len() == 0 {
+		return nil, ErrAllPinsDisregarded
+	}
+
+	// Clean one last time and return the list.
+	nearby.clean()
+
+	// Randomize top nearest pins for load balancing.
+	nearby.randomizeTop()
+
+	// Debugging:
+	// if matchFor == HomeHub {
+	// 	log.Debug("spn/navigator: nearby pins:")
+	// 	for _, nbPin := range nearby.pins {
+	// 		log.Debugf("spn/navigator: nearby pin %s", nbPin)
+	// 	}
+	// }
+
+	return nearby, nil
+}
+
+func (nb *nearbyPins) String() string {
+	s := make([]string, 0, len(nb.pins))
+	for _, nbPin := range nb.pins {
+		s = append(s, nbPin.String())
+	}
+	return strings.Join(s, ", ")
+}
+
+func (nb *nearbyPin) String() string {
+	return fmt.Sprintf("%s at %.2fc", nb.pin, nb.cost)
+}
+
+func proximityBetweenPins(a, b *Pin) float32 {
+	var x, y float32
+
+	// Get IPv4 network proximity.
+	if a.LocationV4 != nil && b.LocationV4 != nil {
+		x = a.LocationV4.EstimateNetworkProximity(b.LocationV4)
+	}
+
+	// Get IPv6 network proximity.
+	if a.LocationV6 != nil && b.LocationV6 != nil {
+		y = a.LocationV6.EstimateNetworkProximity(b.LocationV6)
+	}
+
+	// Return higher proximity.
+	if x > y {
+		return x
+	}
+	return y
+}
+
+func lessButPositive(a, b float32) float32 {
+	switch {
+	case a == 0:
+		return b
+	case b == 0:
+		return a
+	case a < b:
+		return a
+	default:
+		return b
+	}
+}
diff --git a/spn/navigator/findnearest_test.go b/spn/navigator/findnearest_test.go
new file mode 100644
index 00000000..596d7779
--- /dev/null
+++ b/spn/navigator/findnearest_test.go
@@ -0,0 +1,124 @@
+package navigator
+
+import (
+	"testing"
+)
+
+func TestFindNearest(t *testing.T) {
+	t.Parallel()
+
+	// Create map and lock faking in order to guarantee reproducability of faked data.
+	m := getDefaultTestMap()
+	fakeLock.Lock()
+	defer fakeLock.Unlock()
+
+	for i := 0; i < 100; i++ {
+		// Create a random destination address
+		ip4, loc4 := createGoodIP(true)
+
+		nbPins, err := m.findNearestPins(loc4, nil, m.DefaultOptions(), DestinationHub, false)
+		if err != nil {
+			t.Error(err)
+		} else {
+			t.Logf("Pins near %s: %s", ip4, nbPins)
+		}
+	}
+
+	for i := 0; i < 100; i++ {
+		// Create a random destination address
+		ip6, loc6 := createGoodIP(true)
+
+		nbPins, err := m.findNearestPins(nil, loc6, m.DefaultOptions(), DestinationHub, false)
+		if err != nil {
+			t.Error(err)
+		} else {
+			t.Logf("Pins near %s: %s", ip6, nbPins)
+		}
+	}
+}
+
+/*
+TODO: Find a way to quickly generate good geoip data on the fly, as we don't want to measure IP address generation, but only finding the nearest pins.
+
+func BenchmarkFindNearest(b *testing.B) {
+	// Create map and lock faking in order to guarantee reproducability of faked data.
+	m := getDefaultTestMap()
+	fakeLock.Lock()
+	defer fakeLock.Unlock()
+
+	b.ResetTimer()
+	for i := 0; i < b.N; i++ {
+		// Create a random destination address
+		var dstIP net.IP
+		if i%2 == 0 {
+			dstIP = net.ParseIP(gofakeit.IPv4Address())
+		} else {
+			dstIP = net.ParseIP(gofakeit.IPv6Address())
+		}
+
+		_, err := m.findNearestPins(dstIP, m.DefaultOptions(),DestinationHub		if err != nil {
+			b.Error(err)
+		}
+	}
+}
+*/
+
+func findFakeHomeHub(m *Map) {
+	// Create fake IP address.
+	_, loc4 := createGoodIP(true)
+	_, loc6 := createGoodIP(false)
+
+	nbPins, err := m.findNearestPins(loc4, loc6, m.defaultOptions(), HomeHub, false)
+	if err != nil {
+		panic(err)
+	}
+	if len(nbPins.pins) == 0 {
+		panic("could not find a Home Hub")
+	}
+
+	// Set Home.
+	m.home = nbPins.pins[0].pin
+
+	// Recalculate reachability.
+	if err := m.recalculateReachableHubs(); err != nil {
+		panic(err)
+	}
+}
+
+func TestNearbyPinsCleaning(t *testing.T) {
+	t.Parallel()
+
+	testCleaning(t, []float32{10, 20, 30, 40, 50, 60, 70, 80, 90, 100}, 3)
+	testCleaning(t, []float32{10, 11, 12, 13, 50, 60, 70, 80, 90, 100}, 4)
+	testCleaning(t, []float32{10, 11, 12, 40, 50, 60, 70, 80, 90, 100}, 3)
+	testCleaning(t, []float32{10, 11, 30, 40, 50, 60, 70, 80, 90, 100}, 3)
+}
+
+func testCleaning(t *testing.T, costs []float32, expectedLeftOver int) {
+	t.Helper()
+
+	nb := &nearbyPins{
+		minPins:     3,
+		maxPins:     5,
+		cutOffLimit: 10,
+	}
+
+	// Simulate usage.
+	for _, cost := range costs {
+		// Add to list.
+		nb.add(nil, cost)
+
+		// Clean once in a while.
+		if len(nb.pins) > nb.maxPins {
+			nb.clean()
+		}
+	}
+	// Final clean.
+	nb.clean()
+
+	// Check results.
+	t.Logf("result: %+v", nb.pins)
+	if len(nb.pins) != expectedLeftOver {
+		t.Errorf("unexpected amount of left over pins: %+v", nb.pins)
+	}
+}
diff --git a/spn/navigator/findroutes.go b/spn/navigator/findroutes.go
new file mode 100644
index 00000000..ef886334
--- /dev/null
+++ b/spn/navigator/findroutes.go
@@ -0,0 +1,234 @@
+package navigator
+
+import (
+	"errors"
+	"fmt"
+	"net"
+
+	"github.com/safing/portmaster/service/intel/geoip"
+)
+
+const (
+	// defaultMaxRouteMatches defines a default value of how many matches a
+	// route find operation in a map should return.
+	defaultMaxRouteMatches = 10
+
+	// defaultRandomizeRoutesTopPercent defines the top percent of a routes
+	// set that should be randomized for balancing purposes.
+	// Range: 0-1.
+	defaultRandomizeRoutesTopPercent = 0.1
+)
+
+// FindRoutes finds possible routes to the given IP, with the given options.
+func (m *Map) FindRoutes(ip net.IP, opts *Options) (*Routes, error) {
+	m.Lock()
+	defer m.Unlock()
+
+	// Check if map is populated.
+	if m.isEmpty() {
+		return nil, ErrEmptyMap
+	}
+
+	// Check if home hub is set.
+	if m.home == nil {
+		return nil, ErrHomeHubUnset
+	}
+
+	// Get the location of the given IP address.
+	var locationV4, locationV6 *geoip.Location
+	var err error
+	// Save whether the given IP address is a IPv4 or IPv6 address.
+	if v4 := ip.To4(); v4 != nil {
+		locationV4, err = geoip.GetLocation(ip)
+	} else {
+		locationV6, err = geoip.GetLocation(ip)
+	}
+	if err != nil {
+		return nil, fmt.Errorf("failed to get IP location: %w", err)
+	}
+
+	// Set default options if unset.
+	if opts == nil {
+		opts = m.defaultOptions()
+	}
+
+	// Handle special home routing profile.
+	if opts.RoutingProfile == RoutingProfileHomeID {
+		switch {
+		case locationV4 != nil && m.home.LocationV4 == nil:
+			// Destination is IPv4, but Hub has no IPv4!
+			// Upgrade routing profile.
+			opts.RoutingProfile = RoutingProfileSingleHopID
+
+		case locationV6 != nil && m.home.LocationV6 == nil:
+			// Destination is IPv6, but Hub has no IPv6!
+			// Upgrade routing profile.
+			opts.RoutingProfile = RoutingProfileSingleHopID
+
+		default:
+			// Return route with only home hub for home hub routing.
+			return &Routes{
+				All: []*Route{{
+					Path: []*Hop{{
+						pin:   m.home,
+						HubID: m.home.Hub.ID,
+					}},
+					Algorithm: RoutingProfileHomeID,
+				}},
+			}, nil
+		}
+	}
+
+	// Find nearest Pins.
+	nearby, err := m.findNearestPins(locationV4, locationV6, opts, DestinationHub, false)
+	if err != nil {
+		return nil, err
+	}
+
+	return m.findRoutes(nearby, opts)
+}
+
+// FindRouteToHub finds possible routes to the given Hub, with the given options.
+func (m *Map) FindRouteToHub(hubID string, opts *Options) (*Routes, error) {
+	m.Lock()
+	defer m.Unlock()
+
+	// Get Pin.
+	pin, ok := m.all[hubID]
+	if !ok {
+		return nil, ErrHubNotFound
+	}
+
+	// Create a nearby with a single Pin.
+	nearby := &nearbyPins{
+		pins: []*nearbyPin{
+			{
+				pin: pin,
+			},
+		},
+	}
+
+	// Find a route to the given Hub.
+	return m.findRoutes(nearby, opts)
+}
+
+func (m *Map) findRoutes(dsts *nearbyPins, opts *Options) (*Routes, error) {
+	if m.home == nil {
+		return nil, ErrHomeHubUnset
+	}
+
+	// Initialize matchers.
+	var done bool
+	transitMatcher := opts.Transit.Matcher(m.intel)
+	destinationMatcher := opts.Destination.Matcher(m.intel)
+	routingProfile := GetRoutingProfile(opts.RoutingProfile)
+
+	// Create routes collector.
+	routes := &Routes{
+		maxRoutes:           defaultMaxRouteMatches,
+		randomizeTopPercent: defaultRandomizeRoutesTopPercent,
+	}
+
+	// TODO:
+	// Start from the destination and use HopDistance to prioritize
+	// exploring routes that are in the right direction.
+	// How would we handle selecting the destination node based on route to client?
+	// Should we just try all destinations?
+
+	// Create initial route.
+	route := &Route{
+		// Estimate how much space we will need, else it'll just expand.
+		Path: make([]*Hop, 1, routingProfile.MinHops+routingProfile.MaxExtraHops),
+	}
+	route.Path[0] = &Hop{
+		pin: m.home,
+		// TODO: add initial cost
+	}
+
+	// exploreHop explores a hop (Lane) to a connected Pin.
+	var exploreHop func(route *Route, lane *Lane)
+
+	// exploreLanes explores all Lanes of a Pin.
+	exploreLanes := func(route *Route) {
+		for _, lane := range route.Path[len(route.Path)-1].pin.ConnectedTo {
+			// Check if we are done and can skip the rest.
+			if done {
+				return
+			}
+
+			// Explore!
+			exploreHop(route, lane)
+		}
+	}
+
+	exploreHop = func(route *Route, lane *Lane) {
+		// Check if the Pin should be regarded as Transit Hub.
+		if !transitMatcher(lane.Pin) {
+			return
+		}
+
+		// Add Pin to the current path and remove when done.
+		route.addHop(lane.Pin, lane.Cost+lane.Pin.Cost)
+		defer route.removeHop()
+
+		// Check if the route would even make it into the list.
+		if !routes.isGoodEnough(route) {
+			return
+		}
+
+		// Check route compliance.
+		// This also includes some algorithm-based optimizations.
+		switch routingProfile.checkRouteCompliance(route, routes) {
+		case routeOk:
+			// Route would be compliant.
+			// Now, check if the last hop qualifies as a Destination Hub.
+			if destinationMatcher(lane.Pin) {
+				// Get Pin as nearby Pin.
+				nbPin := dsts.get(lane.Pin.Hub.ID)
+				if nbPin != nil {
+					// Pin is listed as selected Destination Hub!
+					// Complete route to add destination ("last mile") cost.
+					route.completeRoute(nbPin.cost)
+					routes.add(route)
+
+					// We have found a route and have come to an end here.
+					return
+				}
+			}
+
+			// The Route is compliant, but we haven't found a Destination Hub yet.
+			fallthrough
+		case routeNonCompliant:
+			// Continue exploration.
+			exploreLanes(route)
+		case routeDisqualified:
+			fallthrough
+		default:
+			// Route is disqualified and we can return without further exploration.
+		}
+	}
+
+	// Start the hop exploration tree.
+	// This will fork into about a gazillion branches and add all the found valid
+	// routes to the list.
+	exploreLanes(route)
+
+	// Check if we found anything.
+	if len(routes.All) == 0 {
+		return nil, errors.New("failed to find any routes")
+	}
+
+	// Randomize top routes for load balancing.
+	routes.randomizeTop()
+
+	// Copy remaining data to routes.
+	routes.makeExportReady(opts.RoutingProfile)
+
+	// Debugging:
+	// log.Debug("spn/navigator: routes:")
+	// for _, route := range routes.All {
+	// 	log.Debugf("spn/navigator: %s", route)
+	// }
+
+	return routes, nil
+}
diff --git a/spn/navigator/findroutes_test.go b/spn/navigator/findroutes_test.go
new file mode 100644
index 00000000..ed7793c1
--- /dev/null
+++ b/spn/navigator/findroutes_test.go
@@ -0,0 +1,54 @@
+package navigator
+
+import (
+	"net"
+	"testing"
+)
+
+func TestFindRoutes(t *testing.T) {
+	t.Parallel()
+
+	// Create map and lock faking in order to guarantee reproducability of faked data.
+	m := getOptimizedDefaultTestMap(t)
+	fakeLock.Lock()
+	defer fakeLock.Unlock()
+
+	for i := 0; i < 1; i++ {
+		// Create a random destination address
+		dstIP, _ := createGoodIP(i%2 == 0)
+
+		routes, err := m.FindRoutes(dstIP, m.DefaultOptions())
+		switch {
+		case err != nil:
+			t.Error(err)
+		case len(routes.All) == 0:
+			t.Logf("No routes for %s", dstIP)
+		default:
+			t.Logf("Best route for %s: %s", dstIP, routes.All[0])
+		}
+	}
+}
+
+func BenchmarkFindRoutes(b *testing.B) {
+	// Create map and lock faking in order to guarantee reproducability of faked data.
+	m := getOptimizedDefaultTestMap(nil)
+	fakeLock.Lock()
+	defer fakeLock.Unlock()
+
+	// Pre-generate 100 IPs
+	preGenIPs := make([]net.IP, 0, 100)
+	for i := 0; i < cap(preGenIPs); i++ {
+		ip, _ := createGoodIP(i%2 == 0)
+		preGenIPs = append(preGenIPs, ip)
+	}
+
+	b.ResetTimer()
+	for i := 0; i < b.N; i++ {
+		routes, err := m.FindRoutes(preGenIPs[i%len(preGenIPs)], m.DefaultOptions())
+		if err != nil {
+			b.Error(err)
+		} else {
+			b.Logf("Best route for %s: %s", preGenIPs[i%len(preGenIPs)], routes.All[0])
+		}
+	}
+}
diff --git a/spn/navigator/intel.go b/spn/navigator/intel.go
new file mode 100644
index 00000000..d26733c1
--- /dev/null
+++ b/spn/navigator/intel.go
@@ -0,0 +1,222 @@
+package navigator
+
+import (
+	"context"
+	"errors"
+
+	"golang.org/x/exp/slices"
+
+	"github.com/safing/portbase/log"
+	"github.com/safing/portmaster/service/intel/geoip"
+	"github.com/safing/portmaster/service/profile/endpoints"
+	"github.com/safing/portmaster/spn/hub"
+)
+
+// UpdateIntel supplies the map with new intel data. The data is not copied, so
+// it must not be modified after being supplied. If the map is empty, the
+// bootstrap hubs will be added to the map.
+func (m *Map) UpdateIntel(update *hub.Intel, trustNodes []string) error {
+	// Check if intel data is already parsed.
+	if update.Parsed() == nil {
+		return errors.New("intel data is not parsed")
+	}
+
+	m.Lock()
+	defer m.Unlock()
+
+	// Update the map's reference to the intel data.
+	m.intel = update
+
+	// Update pins with new intel data.
+	for _, pin := range m.all {
+		// Add/Update location data from IP addresses.
+		pin.updateLocationData()
+
+		// Override Pin Data.
+		m.updateInfoOverrides(pin)
+
+		// Update Trust and Advisory Statuses.
+		m.updateIntelStatuses(pin, trustNodes)
+
+		// Push changes.
+		// TODO: Only set when pin changed.
+		pin.pushChanges.Set()
+	}
+
+	// Configure the map's regions.
+	m.updateRegions(m.intel.Regions)
+
+	// Push pin changes.
+	m.PushPinChanges()
+
+	log.Infof("spn/navigator: updated intel on map %s", m.Name)
+
+	// Add bootstrap hubs if map is empty.
+	if m.isEmpty() {
+		return m.addBootstrapHubs(m.intel.BootstrapHubs)
+	}
+	return nil
+}
+
+// GetIntel returns the map's intel data.
+func (m *Map) GetIntel() *hub.Intel {
+	m.RLock()
+	defer m.RUnlock()
+
+	return m.intel
+}
+
+func (m *Map) updateIntelStatuses(pin *Pin, trustNodes []string) {
+	// Reset all related states.
+	pin.removeStates(StateTrusted | StateUsageDiscouraged | StateUsageAsHomeDiscouraged | StateUsageAsDestinationDiscouraged)
+
+	// Check if Intel data is loaded.
+	if m.intel == nil {
+		return
+	}
+
+	// Check Hub Intel
+	hubIntel, ok := m.intel.Hubs[pin.Hub.ID]
+	if ok {
+		// Apply the verified owner, if any.
+		pin.VerifiedOwner = hubIntel.VerifiedOwner
+
+		// Check if Hub is discontinued.
+		if hubIntel.Discontinued {
+			// Reset state, set offline and return.
+			pin.State = StateNone
+			pin.addStates(StateOffline)
+			return
+		}
+
+		// Check if Hub is trusted.
+		if hubIntel.Trusted {
+			pin.addStates(StateTrusted)
+		}
+	}
+
+	// Check manual trust status.
+	switch {
+	case slices.Contains[[]string, string](trustNodes, pin.VerifiedOwner):
+		pin.addStates(StateTrusted)
+	case slices.Contains[[]string, string](trustNodes, pin.Hub.ID):
+		pin.addStates(StateTrusted)
+	}
+
+	// Check advisories.
+	// Check for UsageDiscouraged.
+	checkStatusList(
+		pin,
+		StateUsageDiscouraged,
+		m.intel.AdviseOnlyTrustedHubs,
+		m.intel.Parsed().HubAdvisory,
+	)
+	// Check for UsageAsHomeDiscouraged.
+	checkStatusList(
+		pin,
+		StateUsageAsHomeDiscouraged,
+		m.intel.AdviseOnlyTrustedHomeHubs,
+		m.intel.Parsed().HomeHubAdvisory,
+	)
+	// Check for UsageAsDestinationDiscouraged.
+	checkStatusList(
+		pin,
+		StateUsageAsDestinationDiscouraged,
+		m.intel.AdviseOnlyTrustedDestinationHubs,
+		m.intel.Parsed().DestinationHubAdvisory,
+	)
+}
+
+func checkStatusList(pin *Pin, state PinState, requireTrusted bool, endpointList endpoints.Endpoints) {
+	if requireTrusted && !pin.State.Has(StateTrusted) {
+		pin.addStates(state)
+		return
+	}
+
+	if pin.EntityV4 != nil {
+		result, _ := endpointList.Match(context.TODO(), pin.EntityV4)
+		if result == endpoints.Denied {
+			pin.addStates(state)
+			return
+		}
+	}
+
+	if pin.EntityV6 != nil {
+		result, _ := endpointList.Match(context.TODO(), pin.EntityV6)
+		if result == endpoints.Denied {
+			pin.addStates(state)
+		}
+	}
+}
+
+func (m *Map) updateInfoOverrides(pin *Pin) {
+	// Check if Intel data is loaded and if there are any overrides.
+	if m.intel == nil {
+		return
+	}
+
+	// Get overrides for this pin.
+	hubIntel, ok := m.intel.Hubs[pin.Hub.ID]
+	if !ok || hubIntel.Override == nil {
+		return
+	}
+	overrides := hubIntel.Override
+
+	// Apply overrides
+	if overrides.CountryCode != "" {
+		if pin.LocationV4 != nil {
+			pin.LocationV4.Country = geoip.GetCountryInfo(overrides.CountryCode)
+		}
+		if pin.EntityV4 != nil {
+			pin.EntityV4.Country = overrides.CountryCode
+		}
+		if pin.LocationV6 != nil {
+			pin.LocationV6.Country = geoip.GetCountryInfo(overrides.CountryCode)
+		}
+		if pin.EntityV6 != nil {
+			pin.EntityV6.Country = overrides.CountryCode
+		}
+	}
+	if overrides.Coordinates != nil {
+		if pin.LocationV4 != nil {
+			pin.LocationV4.Coordinates = *overrides.Coordinates
+		}
+		if pin.EntityV4 != nil {
+			pin.EntityV4.Coordinates = overrides.Coordinates
+		}
+		if pin.LocationV6 != nil {
+			pin.LocationV6.Coordinates = *overrides.Coordinates
+		}
+		if pin.EntityV6 != nil {
+			pin.EntityV6.Coordinates = overrides.Coordinates
+		}
+	}
+	if overrides.ASN != 0 {
+		if pin.LocationV4 != nil {
+			pin.LocationV4.AutonomousSystemNumber = overrides.ASN
+		}
+		if pin.EntityV4 != nil {
+			pin.EntityV4.ASN = overrides.ASN
+		}
+		if pin.LocationV6 != nil {
+			pin.LocationV6.AutonomousSystemNumber = overrides.ASN
+		}
+		if pin.EntityV6 != nil {
+			pin.EntityV6.ASN = overrides.ASN
+		}
+	}
+	if overrides.ASOrg != "" {
+		if pin.LocationV4 != nil {
+			pin.LocationV4.AutonomousSystemOrganization = overrides.ASOrg
+		}
+		if pin.EntityV4 != nil {
+			pin.EntityV4.ASOrg = overrides.ASOrg
+		}
+		if pin.LocationV6 != nil {
+			pin.LocationV6.AutonomousSystemOrganization = overrides.ASOrg
+		}
+		if pin.EntityV6 != nil {
+			pin.EntityV6.ASOrg = overrides.ASOrg
+		}
+	}
+}
diff --git a/spn/navigator/map.go b/spn/navigator/map.go
new file mode 100644
index 00000000..006dfc13
--- /dev/null
+++ b/spn/navigator/map.go
@@ -0,0 +1,165 @@
+package navigator
+
+import (
+	"sort"
+	"sync"
+	"time"
+
+	"github.com/safing/portbase/database"
+	"github.com/safing/portbase/log"
+	"github.com/safing/portmaster/service/intel/geoip"
+	"github.com/safing/portmaster/spn/docks"
+	"github.com/safing/portmaster/spn/hub"
+)
+
+// Map represent a collection of Pins and their relationship and status.
+type Map struct {
+	sync.RWMutex
+	Name string
+
+	all     map[string]*Pin
+	intel   *hub.Intel
+	regions []*Region
+
+	home         *Pin
+	homeTerminal *docks.CraneTerminal
+
+	measuringEnabled bool
+	hubUpdateHook    *database.RegisteredHook
+
+	// analysisLock guards access to all of this map's Pin.analysis,
+	// regardedPins and the lastDesegrationAttempt fields.
+	analysisLock           sync.Mutex
+	regardedPins           []*Pin
+	lastDesegrationAttempt time.Time
+}
+
+// NewMap returns a new and empty Map.
+func NewMap(name string, enableMeasuring bool) *Map {
+	m := &Map{
+		Name:             name,
+		all:              make(map[string]*Pin),
+		measuringEnabled: enableMeasuring,
+	}
+	addMapToAPI(m)
+
+	return m
+}
+
+// Close removes the map's integration, taking it "offline".
+func (m *Map) Close() {
+	removeMapFromAPI(m.Name)
+}
+
+// GetPin returns the Pin of the Hub with the given ID.
+func (m *Map) GetPin(hubID string) (pin *Pin, ok bool) {
+	m.RLock()
+	defer m.RUnlock()
+
+	pin, ok = m.all[hubID]
+	return
+}
+
+// GetHome returns the current home and it's accompanying terminal.
+// Both may be nil.
+func (m *Map) GetHome() (*Pin, *docks.CraneTerminal) {
+	m.RLock()
+	defer m.RUnlock()
+
+	return m.home, m.homeTerminal
+}
+
+// SetHome sets the given hub as the new home. Optionally, a terminal may be
+// supplied to accompany the home hub.
+func (m *Map) SetHome(id string, t *docks.CraneTerminal) (ok bool) {
+	m.Lock()
+	defer m.Unlock()
+
+	// Get pin from map.
+	newHome, ok := m.all[id]
+	if !ok {
+		return false
+	}
+
+	// Remove home hub state from all pins.
+	for _, pin := range m.all {
+		pin.removeStates(StateIsHomeHub)
+	}
+
+	// Set pin as home.
+	m.home = newHome
+	m.homeTerminal = t
+	m.home.addStates(StateIsHomeHub)
+
+	// Recalculate reachable.
+	err := m.recalculateReachableHubs()
+	if err != nil {
+		log.Warningf("spn/navigator: failed to recalculate reachable hubs: %s", err)
+	}
+
+	m.PushPinChanges()
+	return true
+}
+
+// GetAvailableCountries returns a map of countries including their information
+// where the map has pins suitable for the given type.
+func (m *Map) GetAvailableCountries(opts *Options, forType HubType) map[string]*geoip.CountryInfo {
+	if opts == nil {
+		opts = m.defaultOptions()
+	}
+
+	m.RLock()
+	defer m.RUnlock()
+
+	matcher := opts.Matcher(forType, m.intel)
+	countries := make(map[string]*geoip.CountryInfo)
+	for _, pin := range m.all {
+		if !matcher(pin) {
+			continue
+		}
+		if pin.LocationV4 != nil && countries[pin.LocationV4.Country.Code] == nil {
+			countries[pin.LocationV4.Country.Code] = &pin.LocationV4.Country
+		}
+		if pin.LocationV6 != nil && countries[pin.LocationV6.Country.Code] == nil {
+			countries[pin.LocationV6.Country.Code] = &pin.LocationV6.Country
+		}
+	}
+
+	return countries
+}
+
+// isEmpty returns whether the Map is regarded as empty.
+func (m *Map) isEmpty() bool {
+	if m.home != nil {
+		// When a home hub is set, we also regard a map with only one entry to be
+		// empty, as this will be the case for Hubs, which will have their own
+		// entry in the Map.
+		return len(m.all) <= 1
+	}
+
+	return len(m.all) == 0
+}
+
+func (m *Map) pinList(lockMap bool) []*Pin {
+	if lockMap {
+		m.RLock()
+		defer m.RUnlock()
+	}
+
+	// Copy into slice.
+	list := make([]*Pin, 0, len(m.all))
+	for _, pin := range m.all {
+		list = append(list, pin)
+	}
+
+	return list
+}
+
+func (m *Map) sortedPins(lockMap bool) []*Pin {
+	// Get list.
+	list := m.pinList(lockMap)
+
+	// Sort list.
+	sort.Sort(sortByPinID(list))
+	return list
+}
diff --git a/spn/navigator/map_stats.go b/spn/navigator/map_stats.go
new file mode 100644
index 00000000..c4e17108
--- /dev/null
+++ b/spn/navigator/map_stats.go
@@ -0,0 +1,85 @@
+package navigator
+
+import (
+	"fmt"
+	"sort"
+	"strings"
+)
+
+// MapStats holds generic map statistics.
+type MapStats struct {
+	Name            string
+	States          map[PinState]int
+	Lanes           map[int]int
+	ActiveTerminals int
+}
+
+// Stats collects and returns statistics from the map.
+func (m *Map) Stats() *MapStats {
+	m.Lock()
+	defer m.Unlock()
+
+	// Create stats struct.
+	stats := &MapStats{
+		Name:   m.Name,
+		States: make(map[PinState]int),
+		Lanes:  make(map[int]int),
+	}
+	for _, state := range allStates {
+		stats.States[state] = 0
+	}
+
+	// Iterate over all Pins to collect data.
+	for _, pin := range m.all {
+		// Count active terminals.
+		if pin.HasActiveTerminal() {
+			stats.ActiveTerminals++
+		}
+
+		// Check all states.
+		for _, state := range allStates {
+			if pin.State.Has(state) {
+				stats.States[state]++
+			}
+		}
+
+		// Count lanes.
+		laneCnt, ok := stats.Lanes[len(pin.ConnectedTo)]
+		if ok {
+			stats.Lanes[len(pin.ConnectedTo)] = laneCnt + 1
+		} else {
+			stats.Lanes[len(pin.ConnectedTo)] = 1
+		}
+	}
+
+	return stats
+}
+
+func (ms *MapStats) String() string {
+	var builder strings.Builder
+
+	// Write header.
+	fmt.Fprintf(&builder, "Stats for Map %s:\n", ms.Name)
+
+	// Write State Stats
+	stateSummary := make([]string, 0, len(ms.States))
+	for state, cnt := range ms.States {
+		stateSummary = append(stateSummary, fmt.Sprintf("State %s: %d Hubs", state, cnt))
+	}
+	sort.Strings(stateSummary)
+	for _, stateSum := range stateSummary {
+		fmt.Fprintln(&builder, stateSum)
+	}
+
+	// Write Lane Stats
+	laneStats := make([]string, 0, len(ms.Lanes))
+	for laneCnt, pinCnt := range ms.Lanes {
+		laneStats = append(laneStats, fmt.Sprintf("%d Lanes: %d Hubs", laneCnt, pinCnt))
+	}
+	sort.Strings(laneStats)
+	for _, laneStat := range laneStats {
+		fmt.Fprintln(&builder, laneStat)
+	}
+
+	return builder.String()
+}
diff --git a/spn/navigator/map_test.go b/spn/navigator/map_test.go
new file mode 100644
index 00000000..bea2d477
--- /dev/null
+++ b/spn/navigator/map_test.go
@@ -0,0 +1,279 @@
+package navigator
+
+import (
+	"fmt"
+	"net"
+	"sync"
+	"testing"
+	"time"
+
+	"github.com/brianvoe/gofakeit"
+
+	"github.com/safing/jess/lhash"
+	"github.com/safing/portmaster/service/intel/geoip"
+	"github.com/safing/portmaster/spn/hub"
+)
+
+var (
+	fakeLock sync.Mutex
+
+	defaultMapCreate sync.Once
+	defaultMap       *Map
+)
+
+func getDefaultTestMap() *Map {
+	defaultMapCreate.Do(func() {
+		defaultMap = createRandomTestMap(1, 200)
+	})
+	return defaultMap
+}
+
+func TestRandomMapCreation(t *testing.T) {
+	t.Parallel()
+
+	m := getDefaultTestMap()
+
+	fmt.Println("All Pins:")
+	for _, pin := range m.all {
+		fmt.Printf("%s: %s %s\n", pin, pin.Hub.Info.IPv4, pin.Hub.Info.IPv6)
+	}
+
+	// Print stats
+	fmt.Printf("\n%s\n", m.Stats())
+
+	// Print home
+	fmt.Printf("Selected Home Hub: %s\n", m.home)
+}
+
+func createRandomTestMap(seed int64, size int) *Map {
+	fakeLock.Lock()
+	defer fakeLock.Unlock()
+
+	// Seed with parameter to make it reproducible.
+	gofakeit.Seed(seed)
+
+	// Enforce minimum size.
+	if size < 10 {
+		size = 10
+	}
+
+	// Create Hub list.
+	var hubs []*hub.Hub
+
+	// Create Intel data structure.
+	mapIntel := &hub.Intel{
+		Hubs: make(map[string]*hub.HubIntel),
+	}
+
+	// Define periodic values.
+	var currentGroup string
+
+	// Create [size] fake Hubs.
+	for i := 0; i < size; i++ {
+		// Change group every 5 Hubs.
+		if i%5 == 0 {
+			currentGroup = gofakeit.Username()
+		}
+
+		// Create new fake Hub and add to the list.
+		h := createFakeHub(currentGroup, true, mapIntel)
+		hubs = append(hubs, h)
+	}
+
+	// Fake three superseeded Hubs.
+	for i := 0; i < 3; i++ {
+		h := hubs[size-1-i]
+
+		// Set FirstSeen in the past and copy an IP address of an existing Hub.
+		h.FirstSeen = time.Now().Add(-1 * time.Hour)
+		if i%2 == 0 {
+			h.Info.IPv4 = hubs[i].Info.IPv4
+		} else {
+			h.Info.IPv6 = hubs[i].Info.IPv6
+		}
+	}
+
+	// Create Lanes between Hubs in order to create the network.
+	totalConnections := size * 10
+	for i := 0; i < totalConnections; i++ {
+		// Get new random indexes.
+		indexA := gofakeit.Number(0, size-1)
+		indexB := gofakeit.Number(0, size-1)
+		if indexA == indexB {
+			continue
+		}
+
+		// Get Hubs and check if they are already connected.
+		hubA := hubs[indexA]
+		hubB := hubs[indexB]
+		if hubA.GetLaneTo(hubB.ID) != nil {
+			// already connected
+			continue
+		}
+		if hubB.GetLaneTo(hubA.ID) != nil {
+			// already connected
+			continue
+		}
+
+		// Create connections.
+		_ = hubA.AddLane(createLane(hubB.ID))
+		// Add the second connection in 99% of cases.
+		// If this is missing, the Pins should not show up as connected.
+		if gofakeit.Number(0, 100) != 0 {
+			_ = hubB.AddLane(createLane(hubA.ID))
+		}
+	}
+
+	// Parse constructed intel data
+	err := mapIntel.ParseAdvisories()
+	if err != nil {
+		panic(err)
+	}
+
+	// Create map and add Pins.
+	m := NewMap(fmt.Sprintf("Test-Map-%d", seed), true)
+	m.intel = mapIntel
+	for _, h := range hubs {
+		m.UpdateHub(h)
+	}
+
+	// Fake communication error with three Hubs.
+	var i int
+	for _, pin := range m.all {
+		pin.MarkAsFailingFor(1 * time.Hour)
+		pin.addStates(StateFailing)
+
+		if i++; i >= 3 {
+			break
+		}
+	}
+
+	// Set a Home Hub.
+	findFakeHomeHub(m)
+
+	return m
+}
+
+func createFakeHub(group string, randomFailes bool, mapIntel *hub.Intel) *hub.Hub {
+	// Create fake Hub ID.
+	idSrc := gofakeit.Password(true, true, true, true, true, 64)
+	id := lhash.Digest(lhash.BLAKE2b_256, []byte(idSrc)).Base58()
+	ip4, _ := createGoodIP(true)
+	ip6, _ := createGoodIP(false)
+
+	// Create and return new fake Hub.
+	h := &hub.Hub{
+		ID: id,
+		Info: &hub.Announcement{
+			ID:        id,
+			Timestamp: time.Now().Unix(),
+			Name:      gofakeit.Username(),
+			Group:     group,
+			// ContactAddress // TODO
+			// ContactService // TODO
+			// Hosters    []string // TODO
+			// Datacenter string   // TODO
+			IPv4: ip4,
+			IPv6: ip6,
+		},
+		Status: &hub.Status{
+			Timestamp: time.Now().Unix(),
+			Keys: map[string]*hub.Key{
+				"a": {
+					Expires: time.Now().Add(48 * time.Hour).Unix(),
+				},
+			},
+			Load: gofakeit.Number(10, 100),
+		},
+		Measurements: hub.NewMeasurements(),
+		FirstSeen:    time.Now(),
+	}
+	h.Measurements.Latency = createLatency()
+	h.Measurements.Capacity = createCapacity()
+	h.Measurements.CalculatedCost = CalculateLaneCost(
+		h.Measurements.Latency,
+		h.Measurements.Capacity,
+	)
+
+	// Return if not failures of any kind should be simulated.
+	if !randomFailes {
+		return h
+	}
+
+	// Set hub-based states.
+	if gofakeit.Number(0, 100) == 0 {
+		// Fake Info message error.
+		h.InvalidInfo = true
+	}
+	if gofakeit.Number(0, 100) == 0 {
+		// Fake Status message error.
+		h.InvalidStatus = true
+	}
+	if gofakeit.Number(0, 100) == 0 {
+		// Fake expired exchange keys.
+		for _, key := range h.Status.Keys {
+			key.Expires = time.Now().Add(-1 * time.Hour).Unix()
+		}
+	}
+
+	// Return if not failures of any kind should be simulated.
+	if mapIntel == nil {
+		return h
+	}
+
+	// Set advisory-based states.
+	if gofakeit.Number(0, 10) == 0 {
+		// Make Trusted State
+		mapIntel.Hubs[h.ID] = &hub.HubIntel{
+			Trusted: true,
+		}
+	}
+	if gofakeit.Number(0, 100) == 0 {
+		// Discourage any usage.
+		mapIntel.HubAdvisory = append(mapIntel.HubAdvisory, "- "+h.Info.IPv4.String())
+	}
+	if gofakeit.Number(0, 100) == 0 {
+		// Discourage Home Hub usage.
+		mapIntel.HomeHubAdvisory = append(mapIntel.HomeHubAdvisory, "- "+h.Info.IPv4.String())
+	}
+	if gofakeit.Number(0, 100) == 0 {
+		// Discourage Destination Hub usage.
+		mapIntel.DestinationHubAdvisory = append(mapIntel.DestinationHubAdvisory, "- "+h.Info.IPv4.String())
+	}
+
+	return h
+}
+
+func createGoodIP(v4 bool) (net.IP, *geoip.Location) {
+	var candidate net.IP
+	for i := 0; i < 100; i++ {
+		if v4 {
+			candidate = net.ParseIP(gofakeit.IPv4Address())
+		} else {
+			candidate = net.ParseIP(gofakeit.IPv6Address())
+		}
+		loc, err := geoip.GetLocation(candidate)
+		if err == nil && loc.Coordinates.Latitude != 0 {
+			return candidate, loc
+		}
+	}
+	return candidate, nil
+}
+
+func createLane(toHubID string) *hub.Lane {
+	return &hub.Lane{
+		ID:       toHubID,
+		Latency:  createLatency(),
+		Capacity: createCapacity(),
+	}
+}
+
+func createLatency() time.Duration {
+	// Return a value between 10ms and 100ms.
+	return time.Duration(gofakeit.Float64Range(10, 100) * float64(time.Millisecond))
+}
+
+func createCapacity() int {
+	// Return a value between 10Mbit/s and 1Gbit/s.
+	return gofakeit.Number(10000000, 1000000000)
+}
diff --git a/spn/navigator/measurements.go b/spn/navigator/measurements.go
new file mode 100644
index 00000000..571365cb
--- /dev/null
+++ b/spn/navigator/measurements.go
@@ -0,0 +1,144 @@
+package navigator
+
+import (
+	"context"
+	"sort"
+	"time"
+
+	"github.com/safing/portbase/log"
+	"github.com/safing/portbase/modules"
+	"github.com/safing/portmaster/spn/docks"
+	"github.com/safing/portmaster/spn/terminal"
+)
+
+// Measurements Configuration.
+const (
+	NavigatorMeasurementTTLDefault    = 4 * time.Hour
+	NavigatorMeasurementTTLByCostBase = 6 * time.Minute
+	NavigatorMeasurementTTLByCostMin  = 4 * time.Hour
+	NavigatorMeasurementTTLByCostMax  = 50 * time.Hour
+
+	// With a base TTL of 3m, this leads to:
+	// 20c     -> 2h -> raised to 4h.
+	// 50c     -> 5h
+	// 100c    -> 10h
+	// 1000c   -> 100h -> capped to 50h.
+)
+
+func (m *Map) measureHubs(ctx context.Context, _ *modules.Task) error {
+	if home, _ := m.GetHome(); home == nil {
+		log.Debug("spn/navigator: skipping measuring, no home hub set")
+		return nil
+	}
+
+	var unknownErrCnt int
+	matcher := m.DefaultOptions().Transit.Matcher(m.GetIntel())
+
+	// Get list and sort in order to check near/low-cost hubs earlier.
+	list := m.pinList(true)
+	sort.Sort(sortByLowestMeasuredCost(list))
+
+	// Find first pin where any measurement has expired.
+	for _, pin := range list {
+		// Check if measuring is enabled.
+		if pin.measurements == nil {
+			continue
+		}
+
+		// Check if Pin is regarded.
+		if !matcher(pin) {
+			continue
+		}
+
+		// Calculate dynamic TTL.
+		var checkWithTTL time.Duration
+		if pin.HopDistance == 2 { // Hub is directly connected.
+			checkWithTTL = calculateMeasurementTTLByCost(
+				pin.measurements.GetCalculatedCost(),
+				docks.CraneMeasurementTTLByCostBase,
+				docks.CraneMeasurementTTLByCostMin,
+				docks.CraneMeasurementTTLByCostMax,
+			)
+		} else {
+			checkWithTTL = calculateMeasurementTTLByCost(
+				pin.measurements.GetCalculatedCost(),
+				NavigatorMeasurementTTLByCostBase,
+				NavigatorMeasurementTTLByCostMin,
+				NavigatorMeasurementTTLByCostMax,
+			)
+		}
+
+		// Check if we have measured the pin within the TTL.
+		if !pin.measurements.Expired(checkWithTTL) {
+			continue
+		}
+
+		// Measure connection.
+		tErr := docks.MeasureHub(ctx, pin.Hub, checkWithTTL)
+
+		// Independent of outcome, recalculate the cost.
+		latency, _ := pin.measurements.GetLatency()
+		capacity, _ := pin.measurements.GetCapacity()
+		calculatedCost := CalculateLaneCost(latency, capacity)
+		pin.measurements.SetCalculatedCost(calculatedCost)
+		// Log result.
+		log.Infof(
+			"spn/navigator: updated measurements for connection to %s: %s %.2fMbit/s %.2fc",
+			pin.Hub,
+			latency,
+			float64(capacity)/1000000,
+			calculatedCost,
+		)
+
+		switch {
+		case tErr.IsOK():
+			// All good, continue.
+
+		case tErr.Is(terminal.ErrTryAgainLater):
+			if tErr.IsExternal() {
+				// Remote is measuring, just continue with next.
+				log.Debugf("spn/navigator: remote %s is measuring, continuing with next", pin.Hub)
+			} else {
+				// We are measuring, abort and restart measuring again later.
+				log.Debugf("spn/navigator: postponing measuring because we are currently engaged in measuring")
+				return nil
+			}
+
+		default:
+			log.Warningf("spn/navigator: failed to measure connection to %s: %s", pin.Hub, tErr)
+			unknownErrCnt++
+			if unknownErrCnt >= 3 {
+				log.Warningf("spn/navigator: postponing measuring task because of multiple errors")
+				return nil
+			}
+		}
+	}
+
+	return nil
+}
+
+// SaveMeasuredHubs saves all Hubs that have unsaved measurements.
+func (m *Map) SaveMeasuredHubs() {
+	m.RLock()
+	defer m.RUnlock()
+
+	for _, pin := range m.all {
+		if !pin.measurements.IsPersisted() {
+			if err := pin.Hub.Save(); err != nil {
+				log.Warningf("spn/navigator: failed to save Hub %s to persist measurements: %s", pin.Hub, err)
+			}
+		}
+	}
+}
+
+func calculateMeasurementTTLByCost(cost float32, base, min, max time.Duration) time.Duration {
+	calculated := time.Duration(cost) * base
+	switch {
+	case calculated < min:
+		return min
+	case calculated > max:
+		return max
+	default:
+		return calculated
+	}
+}
diff --git a/spn/navigator/metrics.go b/spn/navigator/metrics.go
new file mode 100644
index 00000000..fe62020e
--- /dev/null
+++ b/spn/navigator/metrics.go
@@ -0,0 +1,177 @@
+package navigator
+
+import (
+	"sort"
+	"sync"
+	"time"
+
+	"github.com/tevino/abool"
+
+	"github.com/safing/portbase/api"
+	"github.com/safing/portbase/metrics"
+)
+
+var metricsRegistered = abool.New()
+
+func registerMetrics() (err error) {
+	// Only register metrics once.
+	if !metricsRegistered.SetToIf(false, true) {
+		return nil
+	}
+
+	// Map Stats.
+
+	_, err = metrics.NewGauge(
+		"spn/map/main/latency/all/lowest/seconds",
+		nil,
+		getLowestLatency,
+		&metrics.Options{
+			Name:       "SPN Map Lowest Latency",
+			Permission: api.PermitUser,
+		},
+	)
+	if err != nil {
+		return err
+	}
+
+	_, err = metrics.NewGauge(
+		"spn/map/main/latency/fas/lowest/seconds",
+		nil,
+		getLowestLatencyFromFas,
+		&metrics.Options{
+			Name:       "SPN Map Lowest Latency",
+			Permission: api.PermitUser,
+		},
+	)
+	if err != nil {
+		return err
+	}
+
+	_, err = metrics.NewGauge(
+		"spn/map/main/capacity/all/highest/bytes",
+		nil,
+		getHighestCapacity,
+		&metrics.Options{
+			Name:       "SPN Map Lowest Latency",
+			Permission: api.PermitUser,
+		},
+	)
+	if err != nil {
+		return err
+	}
+
+	_, err = metrics.NewGauge(
+		"spn/map/main/capacity/fas/highest/bytes",
+		nil,
+		getHighestCapacityFromFas,
+		&metrics.Options{
+			Name:       "SPN Map Lowest Latency",
+			Permission: api.PermitUser,
+		},
+	)
+	if err != nil {
+		return err
+	}
+
+	return nil
+}
+
+var (
+	mapStats        *mapMetrics
+	mapStatsExpires time.Time
+	mapStatsLock    sync.Mutex
+	mapStatsTTL     = 55 * time.Second
+)
+
+type mapMetrics struct {
+	lowestLatency            float64
+	lowestForeignASLatency   float64
+	highestCapacity          float64
+	highestForeignASCapacity float64
+}
+
+func getLowestLatency() float64          { return getMapStats().lowestLatency }
+func getLowestLatencyFromFas() float64   { return getMapStats().lowestForeignASLatency }
+func getHighestCapacity() float64        { return getMapStats().highestCapacity }
+func getHighestCapacityFromFas() float64 { return getMapStats().highestForeignASCapacity }
+
+func getMapStats() *mapMetrics {
+	mapStatsLock.Lock()
+	defer mapStatsLock.Unlock()
+
+	// Return cache if still valid.
+	if time.Now().Before(mapStatsExpires) {
+		return mapStats
+	}
+
+	// Refresh.
+	mapStats = &mapMetrics{}
+
+	// Get all pins and home.
+	list := Main.pinList(true)
+	home, _ := Main.GetHome()
+
+	// Return empty stats if we have incomplete data.
+	if len(list) <= 1 || home == nil {
+		mapStatsExpires = time.Now().Add(mapStatsTTL)
+		return mapStats
+	}
+
+	// Sort by latency.
+	sort.Sort(sortByLowestMeasuredLatency(list))
+	// Get lowest latency.
+	lowestLatency, _ := list[0].measurements.GetLatency()
+	mapStats.lowestLatency = lowestLatency.Seconds()
+	// Find best foreign AS latency.
+	bestForeignASPin := findFirstForeignASStatsPin(home, list)
+	if bestForeignASPin != nil {
+		lowestForeignASLatency, _ := bestForeignASPin.measurements.GetLatency()
+		mapStats.lowestForeignASLatency = lowestForeignASLatency.Seconds()
+	}
+
+	// Sort by capacity.
+	sort.Sort(sortByHighestMeasuredCapacity(list))
+	// Get highest capacity.
+	highestCapacity, _ := list[0].measurements.GetCapacity()
+	mapStats.highestCapacity = float64(highestCapacity) / 8
+	// Find best foreign AS capacity.
+	bestForeignASPin = findFirstForeignASStatsPin(home, list)
+	if bestForeignASPin != nil {
+		highestForeignASCapacity, _ := bestForeignASPin.measurements.GetCapacity()
+		mapStats.highestForeignASCapacity = float64(highestForeignASCapacity) / 8
+	}
+
+	mapStatsExpires = time.Now().Add(mapStatsTTL)
+	return mapStats
+}
+
+func findFirstForeignASStatsPin(home *Pin, list []*Pin) *Pin {
+	// Find best foreign AS latency.
+	for _, pin := range list {
+		compared := false
+
+		// Skip if IPv4 AS matches.
+		if home.LocationV4 != nil && pin.LocationV4 != nil {
+			if home.LocationV4.AutonomousSystemNumber == pin.LocationV4.AutonomousSystemNumber {
+				continue
+			}
+			compared = true
+		}
+
+		// Skip if IPv6 AS matches.
+		if home.LocationV6 != nil && pin.LocationV6 != nil {
+			if home.LocationV6.AutonomousSystemNumber == pin.LocationV6.AutonomousSystemNumber {
+				continue
+			}
+			compared = true
+		}
+
+		// Skip if no data was compared
+		if !compared {
+			continue
+		}
+
+		return pin
+	}
+	return nil
+}
diff --git a/spn/navigator/module.go b/spn/navigator/module.go
new file mode 100644
index 00000000..9937ad61
--- /dev/null
+++ b/spn/navigator/module.go
@@ -0,0 +1,129 @@
+package navigator
+
+import (
+	"errors"
+	"time"
+
+	"github.com/safing/portbase/config"
+	"github.com/safing/portbase/log"
+	"github.com/safing/portbase/modules"
+	"github.com/safing/portmaster/service/intel/geoip"
+	"github.com/safing/portmaster/spn/conf"
+)
+
+const (
+	// cfgOptionRoutingAlgorithmKey is copied from profile/config.go to avoid import loop.
+	cfgOptionRoutingAlgorithmKey = "spn/routingAlgorithm"
+
+	// cfgOptionRoutingAlgorithmKey is copied from captain/config.go to avoid import loop.
+	cfgOptionTrustNodeNodesKey = "spn/trustNodes"
+)
+
+var (
+	// ErrHomeHubUnset is returned when the Home Hub is required and not set.
+	ErrHomeHubUnset = errors.New("map has no Home Hub set")
+
+	// ErrEmptyMap is returned when the Map is empty.
+	ErrEmptyMap = errors.New("map is empty")
+
+	// ErrHubNotFound is returned when the Hub was not found on the Map.
+	ErrHubNotFound = errors.New("hub not found")
+
+	// ErrAllPinsDisregarded is returned when all pins have been disregarded.
+	ErrAllPinsDisregarded = errors.New("all pins have been disregarded")
+)
+
+var (
+	module *modules.Module
+
+	// Main is the primary map used.
+	Main *Map
+
+	devMode                   config.BoolOption
+	cfgOptionRoutingAlgorithm config.StringOption
+	cfgOptionTrustNodeNodes   config.StringArrayOption
+)
+
+func init() {
+	module = modules.Register("navigator", prep, start, stop, "terminal", "geoip", "netenv")
+}
+
+func prep() error {
+	return registerAPIEndpoints()
+}
+
+func start() error {
+	Main = NewMap(conf.MainMapName, true)
+	devMode = config.Concurrent.GetAsBool(config.CfgDevModeKey, false)
+	cfgOptionRoutingAlgorithm = config.Concurrent.GetAsString(cfgOptionRoutingAlgorithmKey, DefaultRoutingProfileID)
+	cfgOptionTrustNodeNodes = config.Concurrent.GetAsStringArray(cfgOptionTrustNodeNodesKey, []string{})
+
+	err := registerMapDatabase()
+	if err != nil {
+		return err
+	}
+
+	// Wait for geoip databases to be ready.
+	// Try again if not yet ready, as this is critical.
+	// The "wait" parameter times out after 1 second.
+	// Allow 30 seconds for both databases to load.
+geoInitCheck:
+	for i := 0; i < 30; i++ {
+		switch {
+		case !geoip.IsInitialized(false, true): // First, IPv4.
+		case !geoip.IsInitialized(true, true): // Then, IPv6.
+		default:
+			break geoInitCheck
+		}
+	}
+
+	err = Main.InitializeFromDatabase()
+	if err != nil {
+		// Wait for three seconds, then try again.
+		time.Sleep(3 * time.Second)
+		err = Main.InitializeFromDatabase()
+		if err != nil {
+			// Even if the init fails, we can try to start without it and get data along the way.
+			log.Warningf("spn/navigator: %s", err)
+		}
+	}
+	err = Main.RegisterHubUpdateHook()
+	if err != nil {
+		return err
+	}
+
+	// TODO: delete superseded hubs after x amount of time
+
+	module.NewTask("update states", Main.updateStates).
+		Repeat(1 * time.Hour).
+		Schedule(time.Now().Add(3 * time.Minute))
+
+	module.NewTask("update failing states", Main.updateFailingStates).
+		Repeat(1 * time.Minute).
+		Schedule(time.Now().Add(3 * time.Minute))
+
+	if conf.PublicHub() {
+		// Only measure Hubs on public Hubs.
+		module.NewTask("measure hubs", Main.measureHubs).
+			Repeat(5 * time.Minute).
+			Schedule(time.Now().Add(1 * time.Minute))
+
+		// Only register metrics on Hubs, as they only make sense there.
+		err := registerMetrics()
+		if err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+func stop() error {
+	withdrawMapDatabase()
+
+	Main.CancelHubUpdateHook()
+	Main.SaveMeasuredHubs()
+	Main.Close()
+
+	return nil
+}
diff --git a/spn/navigator/module_test.go b/spn/navigator/module_test.go
new file mode 100644
index 00000000..f55ea4e8
--- /dev/null
+++ b/spn/navigator/module_test.go
@@ -0,0 +1,13 @@
+package navigator
+
+import (
+	"testing"
+
+	"github.com/safing/portbase/log"
+	"github.com/safing/portmaster/service/core/pmtesting"
+)
+
+func TestMain(m *testing.M) {
+	log.SetLogLevel(log.DebugLevel)
+	pmtesting.TestMain(m, module)
+}
diff --git a/spn/navigator/optimize.go b/spn/navigator/optimize.go
new file mode 100644
index 00000000..76f101c3
--- /dev/null
+++ b/spn/navigator/optimize.go
@@ -0,0 +1,388 @@
+package navigator
+
+import (
+	"fmt"
+	"sort"
+	"time"
+
+	"github.com/safing/portmaster/spn/docks"
+	"github.com/safing/portmaster/spn/hub"
+)
+
+const (
+	optimizationLowestCostConnections = 3
+	optimizationHopDistanceTarget     = 3
+	waitUntilMeasuredUpToPercent      = 0.5
+
+	desegrationAttemptBackoff = time.Hour
+)
+
+// Optimization Purposes.
+const (
+	OptimizePurposeBootstrap       = "bootstrap"
+	OptimizePurposeDesegregate     = "desegregate"
+	OptimizePurposeWait            = "wait"
+	OptimizePurposeTargetStructure = "target-structure"
+)
+
+// AnalysisState holds state for analyzing the network for optimizations.
+type AnalysisState struct { //nolint:maligned
+	// Suggested signifies that a direct connection to this Hub is suggested by
+	// the optimization algorithm.
+	Suggested bool
+
+	// SuggestedHopDistance holds the hop distance to this Hub when only
+	// considering the suggested Hubs as connected.
+	SuggestedHopDistance int
+
+	// SuggestedHopDistanceInRegion holds the hop distance to this Hub in the
+	// same region when only considering the suggested Hubs as connected.
+	SuggestedHopDistanceInRegion int
+
+	// CrossRegionalConnections holds the amount of connections a Pin has from
+	// the current region.
+	CrossRegionalConnections int
+	// CrossRegionalLowestCostLane holds the lowest cost of the counted
+	// connections from the current region.
+	CrossRegionalLowestCostLane float32
+	// CrossRegionalLaneCosts holds all the cross regional lane costs.
+	CrossRegionalLaneCosts []float32
+	// CrossRegionalHighestCostInHubLimit holds to highest cost of the lowest
+	// cost connections within the maximum allowed lanes on a Hub from the
+	// current region.
+	CrossRegionalHighestCostInHubLimit float32
+}
+
+// initAnalysis creates all Pin.analysis fields.
+// The caller needs to hold the map and analysis lock..
+func (m *Map) initAnalysis(result *OptimizationResult) {
+	// Compile lists of regarded pins.
+	m.regardedPins = make([]*Pin, 0, len(m.all))
+	for _, region := range m.regions {
+		region.regardedPins = make([]*Pin, 0, len(m.all))
+	}
+	// Find all regarded pins.
+	for _, pin := range m.all {
+		if result.matcher(pin) {
+			m.regardedPins = append(m.regardedPins, pin)
+			// Add to region.
+			if pin.region != nil {
+				pin.region.regardedPins = append(pin.region.regardedPins, pin)
+			}
+		}
+	}
+
+	// Initialize analysis state.
+	for _, pin := range m.all {
+		pin.analysis = &AnalysisState{}
+	}
+}
+
+// clearAnalysis reset all Pin.analysis fields.
+// The caller needs to hold the map and analysis lock.
+func (m *Map) clearAnalysis() {
+	m.regardedPins = nil
+	for _, region := range m.regions {
+		region.regardedPins = nil
+	}
+	for _, pin := range m.all {
+		pin.analysis = nil
+	}
+}
+
+// OptimizationResult holds the result of an optimizaion analysis.
+type OptimizationResult struct {
+	// Purpose holds a semi-human readable constant of the optimization purpose.
+	Purpose string
+
+	// Approach holds human readable descriptions of how the stated purpose
+	// should be achieved.
+	Approach []string
+
+	// SuggestedConnections holds the Hubs to which connections are suggested.
+	SuggestedConnections []*SuggestedConnection
+
+	// MaxConnect specifies how many connections should be created at maximum
+	// based on this optimization.
+	MaxConnect int
+
+	// StopOthers specifies if other connections than the suggested ones may
+	// be stopped.
+	StopOthers bool
+
+	// opts holds the options for matching Hubs in this optimization.
+	opts *HubOptions
+
+	// matcher is the matcher used to create the regarded Pins.
+	// Required for updating suggested hop distance.
+	matcher PinMatcher
+}
+
+// SuggestedConnection holds suggestions by the optimization system.
+type SuggestedConnection struct {
+	// Hub holds the Hub to which a connection is suggested.
+	Hub *hub.Hub
+	// pin holds the Pin of the Hub.
+	pin *Pin
+	// Reason holds a reason why this connection is suggested.
+	Reason string
+	// Duplicate marks duplicate entries. These should be ignored when
+	// connecting, but are helpful for understand the optimization result.
+	Duplicate bool
+}
+
+func (or *OptimizationResult) addApproach(description string) {
+	or.Approach = append(or.Approach, description)
+}
+
+func (or *OptimizationResult) addSuggested(reason string, pins ...*Pin) {
+	for _, pin := range pins {
+		// Mark as suggested.
+		pin.analysis.Suggested = true
+
+		// Check if this is a duplicate.
+		var duplicate bool
+		for _, sc := range or.SuggestedConnections {
+			if pin.Hub.ID == sc.Hub.ID {
+				duplicate = true
+				break
+			}
+		}
+
+		// Add to suggested connections.
+		or.SuggestedConnections = append(or.SuggestedConnections, &SuggestedConnection{
+			Hub:       pin.Hub,
+			pin:       pin,
+			Reason:    reason,
+			Duplicate: duplicate,
+		})
+
+		// Update hop distances if we have a matcher.
+		if or.matcher != nil {
+			or.markSuggestedReachable(pin, 2)
+			or.markSuggestedReachableInRegion(pin, 2)
+		}
+	}
+}
+
+func (or *OptimizationResult) markSuggestedReachable(suggested *Pin, hopDistance int) {
+	// Don't update if distance is greater or equal than current one.
+	if hopDistance >= suggested.analysis.SuggestedHopDistance {
+		return
+	}
+
+	// Set suggested hop distance.
+	suggested.analysis.SuggestedHopDistance = hopDistance
+
+	// Increase distance and apply to matching Pins.
+	hopDistance++
+	for _, lane := range suggested.ConnectedTo {
+		if or.matcher(lane.Pin) {
+			or.markSuggestedReachable(lane.Pin, hopDistance)
+		}
+	}
+}
+
+// Optimize analyzes the map and suggests changes.
+func (m *Map) Optimize(opts *HubOptions) (result *OptimizationResult, err error) {
+	m.RLock()
+	defer m.RUnlock()
+
+	// Check if the map is empty.
+	if m.isEmpty() {
+		return nil, ErrEmptyMap
+	}
+
+	// Set default options if unset.
+	if opts == nil {
+		opts = &HubOptions{}
+	}
+
+	return m.optimize(opts)
+}
+
+func (m *Map) optimize(opts *HubOptions) (result *OptimizationResult, err error) {
+	if m.home == nil {
+		return nil, ErrHomeHubUnset
+	}
+
+	// Set default options if unset.
+	if opts == nil {
+		opts = &HubOptions{}
+	}
+
+	// Create result.
+	result = &OptimizationResult{
+		opts:    opts,
+		matcher: opts.Matcher(TransitHub, m.intel),
+	}
+
+	// Setup analyis.
+	m.analysisLock.Lock()
+	defer m.analysisLock.Unlock()
+	m.initAnalysis(result)
+	defer m.clearAnalysis()
+
+	// Bootstrap to the network and desegregate map.
+	// If there is a result, return it immediately.
+	returnImmediately := m.optimizeForBootstrappingAndDesegregation(result)
+	if returnImmediately {
+		return result, nil
+	}
+
+	// Check if we have the measurements we need.
+	if m.measuringEnabled {
+		// Cound pins with valid measurements.
+		var validMeasurements float32
+		for _, pin := range m.regardedPins {
+			if pin.measurements.Valid() {
+				validMeasurements++
+			}
+		}
+
+		// If less than the required amount of regarded Pins have valid
+		// measurements, let's wait until we have that.
+		if validMeasurements/float32(len(m.regardedPins)) < waitUntilMeasuredUpToPercent {
+			return &OptimizationResult{
+				Purpose:  OptimizePurposeWait,
+				Approach: []string{"Wait for measurements of 80% of regarded nodes for better optimization."},
+			}, nil
+		}
+	}
+
+	// Set default values for target structure optimization.
+	result.Purpose = OptimizePurposeTargetStructure
+	result.MaxConnect = 3
+	result.StopOthers = true
+
+	// Optimize for lowest cost.
+	m.optimizeForLowestCost(result, optimizationLowestCostConnections)
+
+	// Optimize for lowest cost in region.
+	m.optimizeForLowestCostInRegion(result)
+
+	// Optimize for distance constraint in region.
+	m.optimizeForDistanceConstraintInRegion(result, 3)
+
+	// Optimize for region-to-region connectivity.
+	m.optimizeForRegionConnectivity(result)
+
+	// Optimize for satellite-to-region connectivity.
+	m.optimizeForSatelliteConnectivity(result)
+
+	// Lapse traffic stats after optimizing for good fresh data next time.
+	for _, crane := range docks.GetAllAssignedCranes() {
+		crane.NetState.LapsePeriod()
+	}
+
+	// Clean and return.
+	return result, nil
+}
+
+func (m *Map) optimizeForBootstrappingAndDesegregation(result *OptimizationResult) (returnImmediately bool) {
+	// All regarded Pins are reachable.
+	reachable := len(m.regardedPins)
+
+	// Count Pins that may be connectable.
+	connectable := make([]*Pin, 0, len(m.all))
+	// Copy opts as we are going to make changes.
+	opts := result.opts.Copy()
+	opts.NoDefaults = true
+	opts.Regard = StateNone
+	opts.Disregard = StateSummaryDisregard
+	// Collect Pins with matcher.
+	matcher := opts.Matcher(TransitHub, m.intel)
+	for _, pin := range m.all {
+		if matcher(pin) {
+			connectable = append(connectable, pin)
+		}
+	}
+
+	switch {
+	case reachable == 0:
+
+		// Sort by lowest cost.
+		sort.Sort(sortByLowestMeasuredCost(connectable))
+
+		// Return bootstrap optimization.
+		result.Purpose = OptimizePurposeBootstrap
+		result.Approach = []string{"Connect to a near Hub to connect to the network."}
+		result.MaxConnect = 1
+		result.addSuggested("bootstrap", connectable...)
+		return true
+
+	case reachable > len(connectable)/2:
+		// We are part of the majority network, continue with regular optimization.
+
+	case time.Now().Add(-desegrationAttemptBackoff).Before(m.lastDesegrationAttempt):
+		// We tried to desegregate recently, continue with regular optimization.
+
+	default:
+		// We are in a network comprised of less than half of the known nodes.
+		// Attempt to connect to an unconnected one to desegregate the network.
+
+		// Copy opts as we are going to make changes.
+		opts = opts.Copy()
+		opts.NoDefaults = true
+		opts.Regard = StateNone
+		opts.Disregard = StateSummaryDisregard | StateReachable
+
+		// Iterate over all Pins to find any matching Pin.
+		desegregateWith := make([]*Pin, 0, len(m.all)-reachable)
+		matcher := opts.Matcher(TransitHub, m.intel)
+		for _, pin := range m.all {
+			if matcher(pin) {
+				desegregateWith = append(desegregateWith, pin)
+			}
+		}
+
+		// Sort by lowest connection cost.
+		sort.Sort(sortByLowestMeasuredCost(desegregateWith))
+
+		// Build desegration optimization.
+		result.Purpose = OptimizePurposeDesegregate
+		result.Approach = []string{"Attempt to desegregate network by connection to an unreachable Hub."}
+		result.MaxConnect = 1
+		result.addSuggested("desegregate", desegregateWith...)
+
+		// Record desegregation attempt.
+		m.lastDesegrationAttempt = time.Now()
+
+		return true
+	}
+
+	return false
+}
+
+func (m *Map) optimizeForLowestCost(result *OptimizationResult, max int) {
+	// Add approach.
+	result.addApproach(fmt.Sprintf("Connect to best (lowest cost) %d Hubs globally.", max))
+
+	// Sort by lowest cost.
+	sort.Sort(sortByLowestMeasuredCost(m.regardedPins))
+
+	// Add to suggested pins.
+	if len(m.regardedPins) <= max {
+		result.addSuggested("best globally", m.regardedPins...)
+	} else {
+		result.addSuggested("best globally", m.regardedPins[:max]...)
+	}
+}
+
+func (m *Map) optimizeForDistanceConstraint(result *OptimizationResult, max int) { //nolint:unused // TODO: Likely to be used again.
+	// Add approach.
+	result.addApproach(fmt.Sprintf("Satisfy max hop constraint of %d globally.", optimizationHopDistanceTarget))
+
+	for i := 0; i < max; i++ {
+		// Sort by lowest cost.
+		sort.Sort(sortBySuggestedHopDistanceAndLowestMeasuredCost(m.regardedPins))
+
+		// Return when all regarded Pins are within the distance constraint.
+		if m.regardedPins[0].analysis.SuggestedHopDistance <= optimizationHopDistanceTarget {
+			return
+		}
+
+		// If not, suggest a connection to the best match.
+		result.addSuggested("satisfy global hop constraint", m.regardedPins[0])
+	}
+}
diff --git a/spn/navigator/optimize_region.go b/spn/navigator/optimize_region.go
new file mode 100644
index 00000000..14814813
--- /dev/null
+++ b/spn/navigator/optimize_region.go
@@ -0,0 +1,224 @@
+package navigator
+
+import (
+	"fmt"
+	"sort"
+)
+
+func (or *OptimizationResult) markSuggestedReachableInRegion(suggested *Pin, hopDistance int) {
+	// Abort if suggested Pin has no region.
+	if suggested.region == nil {
+		return
+	}
+
+	// Don't update if distance is greater or equal than current one.
+	if hopDistance >= suggested.analysis.SuggestedHopDistanceInRegion {
+		return
+	}
+
+	// Set suggested hop distance.
+	suggested.analysis.SuggestedHopDistanceInRegion = hopDistance
+
+	// Increase distance and apply to matching Pins.
+	hopDistance++
+	for _, lane := range suggested.ConnectedTo {
+		if lane.Pin.region != nil &&
+			lane.Pin.region.ID == suggested.region.ID &&
+			or.matcher(lane.Pin) {
+			or.markSuggestedReachableInRegion(lane.Pin, hopDistance)
+		}
+	}
+}
+
+func (m *Map) optimizeForLowestCostInRegion(result *OptimizationResult) {
+	if m.home == nil || m.home.region == nil {
+		return
+	}
+	region := m.home.region
+
+	// Add approach.
+	result.addApproach(fmt.Sprintf("Connect to best (lowest cost) %d Hubs within the region.", region.internalMinLanesOnHub))
+
+	// Sort by lowest cost.
+	sort.Sort(sortByLowestMeasuredCost(region.regardedPins))
+
+	// Add to suggested pins.
+	if len(region.regardedPins) <= region.internalMinLanesOnHub {
+		result.addSuggested("best in region", region.regardedPins...)
+	} else {
+		result.addSuggested("best in region", region.regardedPins[:region.internalMinLanesOnHub]...)
+	}
+}
+
+func (m *Map) optimizeForDistanceConstraintInRegion(result *OptimizationResult, max int) {
+	if m.home == nil || m.home.region == nil {
+		return
+	}
+	region := m.home.region
+
+	// Add approach.
+	result.addApproach(fmt.Sprintf("Satisfy max hop constraint of %d within the region.", region.internalMaxHops))
+
+	// Sort by lowest cost.
+	sort.Sort(sortBySuggestedHopDistanceInRegionAndLowestMeasuredCost(region.regardedPins))
+
+	for i := 0; i < max && i < len(region.regardedPins); i++ {
+		// Return when all regarded Pins are within the distance constraint.
+		if region.regardedPins[i].analysis.SuggestedHopDistanceInRegion <= region.internalMaxHops {
+			return
+		}
+
+		// If not, suggest a connection to the best match.
+		result.addSuggested("satisfy regional hop constraint", region.regardedPins[i])
+	}
+}
+
+func (m *Map) optimizeForRegionConnectivity(result *OptimizationResult) {
+	if m.home == nil || m.home.region == nil {
+		return
+	}
+	region := m.home.region
+
+	// Add approach.
+	result.addApproach("Connect region to other regions.")
+
+	// Optimize for every region.
+checkRegions:
+	for _, otherRegion := range m.regions {
+		// Skip own region.
+		if region.ID == otherRegion.ID {
+			continue
+		}
+
+		// Collect data on connections to that region.
+		lanesToRegion, highestCostWithinLaneLimit := m.countConnectionsToRegion(result, region, otherRegion)
+
+		// Sort by lowest cost.
+		sort.Sort(sortByLowestMeasuredCost(otherRegion.regardedPins))
+
+		// Find cheapest connections with a free slot or better values.
+		var lanesSuggested int
+		for _, pin := range otherRegion.regardedPins {
+			myCost := pin.measurements.GetCalculatedCost()
+
+			// Check if we are done or region is satisfied.
+			switch {
+			case lanesSuggested >= region.regionalMaxLanesOnHub:
+				// We hit our max.
+				continue checkRegions
+			case lanesToRegion >= otherRegion.regionalMinLanes && myCost >= highestCostWithinLaneLimit:
+				// Region has enough lanes and we are not better.
+				continue checkRegions
+			}
+
+			// Check if we can contribute on this Pin.
+			switch {
+			case pin.analysis.CrossRegionalConnections < otherRegion.regionalMaxLanesOnHub &&
+				lanesToRegion < otherRegion.regionalMinLanes:
+				// There is a free spot on this Pin and the region needs more connections.
+				result.addSuggested("occupy cross-region lane on pin", pin)
+				lanesSuggested++
+				lanesToRegion++
+				// Because our own Pin is not counted, this should be the default
+				// suggestion for a stable network.
+
+			case myCost < pin.analysis.CrossRegionalHighestCostInHubLimit:
+				// We have a better connection to this Pin than at least one other existing connection (within the limit!).
+				result.addSuggested("replace cross-region lane on pin", pin)
+				lanesSuggested++
+				lanesToRegion++
+
+			case myCost < highestCostWithinLaneLimit &&
+				pin.analysis.CrossRegionalConnections < otherRegion.regionalMaxLanesOnHub:
+				// We have a better connection to this Pin than another existing region-to-region connection.
+				result.addSuggested("replace unrelated cross-region lane", pin)
+				lanesSuggested++
+				lanesToRegion++
+			}
+		}
+	}
+}
+
+// countConnectionsToRegion analyzes existing lanes from this to another
+// region, with taking lanes from this Hub into account.
+func (m *Map) countConnectionsToRegion(result *OptimizationResult, region *Region, otherRegion *Region) (lanesToRegion int, highestCostWithinLaneLimit float32) {
+	for _, pin := range region.regardedPins {
+		// Skip self.
+		if m.home.Hub.ID == pin.Hub.ID {
+			continue
+		}
+
+		// Find lanes to other region.
+		for _, lane := range pin.ConnectedTo {
+			if lane.Pin.region != nil &&
+				lane.Pin.region.ID == otherRegion.ID &&
+				result.matcher(lane.Pin) {
+				// This is a lane from this region to a regarded Pin in the other region.
+				lanesToRegion++
+
+				// Count cross region connection.
+				lane.Pin.analysis.CrossRegionalConnections++
+
+				// Collect lane costs.
+				lane.Pin.analysis.CrossRegionalLaneCosts = append(
+					lane.Pin.analysis.CrossRegionalLaneCosts,
+					lane.Cost,
+				)
+			}
+		}
+	}
+
+	// Calculate lane costs from collected lane costs.
+	for _, pin := range otherRegion.regardedPins {
+		sort.Sort(sortCostsByLowest(pin.analysis.CrossRegionalLaneCosts))
+		switch {
+		case len(pin.analysis.CrossRegionalLaneCosts) == 0:
+			// Nothing to do.
+		case len(pin.analysis.CrossRegionalLaneCosts) < otherRegion.regionalMaxLanesOnHub:
+			pin.analysis.CrossRegionalLowestCostLane = pin.analysis.CrossRegionalLaneCosts[0]
+			pin.analysis.CrossRegionalHighestCostInHubLimit = pin.analysis.CrossRegionalLaneCosts[len(pin.analysis.CrossRegionalLaneCosts)-1]
+		default:
+			pin.analysis.CrossRegionalLowestCostLane = pin.analysis.CrossRegionalLaneCosts[0]
+			pin.analysis.CrossRegionalHighestCostInHubLimit = pin.analysis.CrossRegionalLaneCosts[otherRegion.regionalMaxLanesOnHub-1]
+		}
+
+		// Find highest cost within limit.
+		if pin.analysis.CrossRegionalHighestCostInHubLimit > highestCostWithinLaneLimit {
+			highestCostWithinLaneLimit = pin.analysis.CrossRegionalHighestCostInHubLimit
+		}
+	}
+
+	return lanesToRegion, highestCostWithinLaneLimit
+}
+
+func (m *Map) optimizeForSatelliteConnectivity(result *OptimizationResult) {
+	if m.home == nil {
+		return
+	}
+	// This is only for Hubs that are not in a region.
+	if m.home.region != nil {
+		return
+	}
+
+	// Add approach.
+	result.addApproach("Connect satellite to regions.")
+
+	// Optimize for every region.
+	for _, region := range m.regions {
+		// Sort by lowest cost.
+		sort.Sort(sortByLowestMeasuredCost(region.regardedPins))
+
+		// Add to suggested pins.
+		if len(region.regardedPins) <= region.satelliteMinLanes {
+			result.addSuggested(fmt.Sprintf("best to region %s", region.ID), region.regardedPins...)
+		} else {
+			result.addSuggested(fmt.Sprintf("best to region %s", region.ID), region.regardedPins[:region.satelliteMinLanes]...)
+		}
+	}
+}
+
+type sortCostsByLowest []float32
+
+func (a sortCostsByLowest) Len() int           { return len(a) }
+func (a sortCostsByLowest) Swap(i, j int)      { a[i], a[j] = a[j], a[i] }
+func (a sortCostsByLowest) Less(i, j int) bool { return a[i] < a[j] }
diff --git a/spn/navigator/optimize_test.go b/spn/navigator/optimize_test.go
new file mode 100644
index 00000000..83f778cf
--- /dev/null
+++ b/spn/navigator/optimize_test.go
@@ -0,0 +1,188 @@
+package navigator
+
+import (
+	"strings"
+	"sync"
+	"testing"
+
+	"github.com/safing/portmaster/spn/hub"
+)
+
+var (
+	optimizedDefaultMapCreate sync.Once
+	optimizedDefaultMap       *Map
+)
+
+func getOptimizedDefaultTestMap(t *testing.T) *Map {
+	t.Helper()
+
+	optimizedDefaultMapCreate.Do(func() {
+		optimizedDefaultMap = createRandomTestMap(2, 100)
+		optimizedDefaultMap.optimizeTestMap(t)
+	})
+	return optimizedDefaultMap
+}
+
+func (m *Map) optimizeTestMap(t *testing.T) {
+	t.Helper()
+	t.Logf("optimizing test map %s with %d pins", m.Name, len(m.all))
+
+	// Save original Home, as we will be switching around the home for the
+	// optimization.
+	run := 0
+	newLanes := 0
+	originalHome := m.home
+	mcf := newMeasurementCachedFactory()
+
+	for {
+		run++
+		newLanesInRun := 0
+		// Let's check if we have a run without any map changes.
+		lastRun := true
+
+		for _, pin := range m.all {
+			// Set Home to this Pin for this iteration.
+			if !m.SetHome(pin.Hub.ID, nil) {
+				panic("failed to set home")
+			}
+
+			// Update measurements for the new home.
+			updateMeasurements(m, mcf)
+
+			optimizeResult, err := m.optimize(nil)
+			if err != nil {
+				panic(err)
+			}
+			lanesCreatedWithResult := 0
+			for _, connectTo := range optimizeResult.SuggestedConnections {
+				// Check if lane to suggested Hub already exists.
+				if m.home.Hub.GetLaneTo(connectTo.Hub.ID) != nil {
+					continue
+				}
+
+				// Add lanes to the Hub status.
+				_ = m.home.Hub.AddLane(createLane(connectTo.Hub.ID))
+				_ = connectTo.Hub.AddLane(createLane(m.home.Hub.ID))
+
+				// Update Hubs in map.
+				m.UpdateHub(m.home.Hub)
+				m.UpdateHub(connectTo.Hub)
+				newLanes++
+				newLanesInRun++
+
+				// We are changing the map in this run, so this is not the last.
+				lastRun = false
+
+				// Only create as many lanes as suggested by the result.
+				lanesCreatedWithResult++
+				if lanesCreatedWithResult >= optimizeResult.MaxConnect {
+					break
+				}
+			}
+			if optimizeResult.Purpose != OptimizePurposeTargetStructure {
+				// If we aren't yet building the target structure, we need to keep building.
+				lastRun = false
+			}
+		}
+
+		// Log progress.
+		if t != nil {
+			t.Logf(
+				"optimizing: added %d lanes in run #%d (%d Hubs) - %d new lanes in total",
+				newLanesInRun,
+				run,
+				len(m.all),
+				newLanes,
+			)
+		}
+
+		// End optimization after last run.
+		if lastRun {
+			break
+		}
+	}
+
+	// Log what was done and set home back to the original value.
+	if t != nil {
+		t.Logf("finished optimizing test map %s: added %d lanes in %d runs", m.Name, newLanes, run)
+	}
+	m.home = originalHome
+}
+
+func TestOptimize(t *testing.T) {
+	t.Parallel()
+
+	m := getOptimizedDefaultTestMap(t)
+	matcher := m.defaultOptions().Destination.Matcher(m.intel)
+	originalHome := m.home
+
+	for _, pin := range m.all {
+		// Set Home to this Pin for this iteration.
+		m.home = pin
+		err := m.recalculateReachableHubs()
+		if err != nil {
+			panic(err)
+		}
+
+		for _, peer := range m.all {
+			// Check if the Pin matches the criteria.
+			if !matcher(peer) {
+				continue
+			}
+
+			// TODO: Adapt test to new regions.
+			if peer.HopDistance > 5 {
+				t.Errorf("Optimization error: %s is %d hops away from %s", peer, peer.HopDistance, pin)
+			}
+		}
+	}
+
+	// Print stats
+	t.Logf("optimized map:\n%s\n", m.Stats())
+
+	m.home = originalHome
+}
+
+func updateMeasurements(m *Map, mcf *measurementCachedFactory) {
+	for _, pin := range m.all {
+		pin.measurements = mcf.getOrCreate(m.home.Hub.ID, pin.Hub.ID)
+	}
+}
+
+type measurementCachedFactory struct {
+	cache map[string]*hub.Measurements
+}
+
+func newMeasurementCachedFactory() *measurementCachedFactory {
+	return &measurementCachedFactory{
+		cache: make(map[string]*hub.Measurements),
+	}
+}
+
+func (mcf *measurementCachedFactory) getOrCreate(from, to string) *hub.Measurements {
+	var id string
+	comparison := strings.Compare(from, to)
+	switch {
+	case comparison == 0:
+		return nil
+	case comparison > 0:
+		id = from + "-" + to
+	case comparison < 0:
+		id = to + "-" + from
+	}
+
+	m, ok := mcf.cache[id]
+	if ok {
+		return m
+	}
+
+	m = hub.NewMeasurements()
+	m.Latency = createLatency()
+	m.Capacity = createCapacity()
+	m.CalculatedCost = CalculateLaneCost(
+		m.Latency,
+		m.Capacity,
+	)
+	mcf.cache[id] = m
+	return m
+}
diff --git a/spn/navigator/options.go b/spn/navigator/options.go
new file mode 100644
index 00000000..05c93ea1
--- /dev/null
+++ b/spn/navigator/options.go
@@ -0,0 +1,330 @@
+package navigator
+
+import (
+	"context"
+
+	"github.com/safing/portbase/log"
+	"github.com/safing/portmaster/service/intel"
+	"github.com/safing/portmaster/service/profile"
+	"github.com/safing/portmaster/service/profile/endpoints"
+	"github.com/safing/portmaster/spn/hub"
+)
+
+// HubType is the usage type of a Hub in routing.
+type HubType uint8
+
+// Hub Types.
+const (
+	HomeHub HubType = iota
+	TransitHub
+	DestinationHub
+)
+
+// DeriveTunnelOptions derives and returns the tunnel options from the connection and profile.
+// This function lives in firewall/tunnel.go and is set here to avoid import loops.
+var DeriveTunnelOptions func(lp *profile.LayeredProfile, destination *intel.Entity, connEncrypted bool) *Options
+
+// Options holds configuration options for operations with the Map.
+type Options struct { //nolint:maligned
+	// Home holds the options for Home Hubs.
+	Home *HomeHubOptions
+
+	// Transit holds the options for Transit Hubs.
+	Transit *TransitHubOptions
+
+	// Destination holds the options for Destination Hubs.
+	Destination *DestinationHubOptions
+
+	// RoutingProfile defines the algorithm to use to find a route.
+	RoutingProfile string
+}
+
+// HomeHubOptions holds configuration options for Home Hub operations with the Map.
+type HomeHubOptions HubOptions
+
+// TransitHubOptions holds configuration options for Transit Hub operations with the Map.
+type TransitHubOptions HubOptions
+
+// DestinationHubOptions holds configuration options for Destination Hub operations with the Map.
+type DestinationHubOptions HubOptions
+
+// HubOptions holds configuration options for a specific hub type for operations with the Map.
+type HubOptions struct {
+	// Regard holds required States. Only Hubs where all of these are present
+	// will taken into account for the operation. If NoDefaults is not set, a
+	// basic set of desirable states is added automatically.
+	Regard PinState
+
+	// Disregard holds disqualifying States. Only Hubs where none of these are
+	// present will be taken into account for the operation. If NoDefaults is not
+	// set, a basic set of undesirable states is added automatically.
+	Disregard PinState
+
+	// NoDefaults declares whether default and recommended Regard and Disregard states should not be used.
+	NoDefaults bool
+
+	// HubPolicies is a collection of endpoint lists that Hubs must pass in order
+	// to be taken into account for the operation.
+	HubPolicies []endpoints.Endpoints
+
+	// RequireVerifiedOwners specifies which verified owners are allowed to be used.
+	// If the list is empty, all owners are allowed.
+	RequireVerifiedOwners []string
+
+	// CheckHubPolicyWith provides an entity that must match the Hubs entry or exit
+	// policy (depending on type) in order to be taken into account for the operation.
+	CheckHubPolicyWith *intel.Entity
+}
+
+// Copy returns a shallow copy of the Options.
+func (o *Options) Copy() *Options {
+	copied := &Options{
+		RoutingProfile: o.RoutingProfile,
+	}
+	if o.Home != nil {
+		c := HomeHubOptions(HubOptions(*o.Home).Copy())
+		copied.Home = &c
+	}
+	if o.Transit != nil {
+		c := TransitHubOptions(HubOptions(*o.Transit).Copy())
+		copied.Transit = &c
+	}
+	if o.Destination != nil {
+		c := DestinationHubOptions(HubOptions(*o.Destination).Copy())
+		copied.Destination = &c
+	}
+	return copied
+}
+
+// Copy returns a shallow copy of the Options.
+func (o HubOptions) Copy() HubOptions {
+	return HubOptions{
+		Regard:                o.Regard,
+		Disregard:             o.Disregard,
+		NoDefaults:            o.NoDefaults,
+		HubPolicies:           o.HubPolicies,
+		RequireVerifiedOwners: o.RequireVerifiedOwners,
+		CheckHubPolicyWith:    o.CheckHubPolicyWith,
+	}
+}
+
+// PinMatcher is a stateful matching function generated by Options.
+type PinMatcher func(pin *Pin) bool
+
+// DefaultOptions returns the default options for this Map.
+func (m *Map) DefaultOptions() *Options {
+	m.Lock()
+	defer m.Unlock()
+
+	return m.defaultOptions()
+}
+
+func (m *Map) defaultOptions() *Options {
+	opts := &Options{
+		RoutingProfile: DefaultRoutingProfileID,
+	}
+
+	return opts
+}
+
+// HubPoliciesAreSet returns whether any of the given hub policies are set and non-empty.
+func HubPoliciesAreSet(policies []endpoints.Endpoints) bool {
+	for _, policy := range policies {
+		if policy.IsSet() {
+			return true
+		}
+	}
+	return false
+}
+
+var emptyHubOptions = &HubOptions{}
+
+// Matcher generates a PinMatcher based on the Options.
+func (o *HomeHubOptions) Matcher(hubIntel *hub.Intel) PinMatcher {
+	if o == nil {
+		return emptyHubOptions.Matcher(HomeHub, hubIntel)
+	}
+
+	// Convert and call base func.
+	ho := HubOptions(*o)
+	return ho.Matcher(HomeHub, hubIntel)
+}
+
+// Matcher generates a PinMatcher based on the Options.
+func (o *TransitHubOptions) Matcher(hubIntel *hub.Intel) PinMatcher {
+	if o == nil {
+		return emptyHubOptions.Matcher(TransitHub, hubIntel)
+	}
+
+	// Convert and call base func.
+	ho := HubOptions(*o)
+	return ho.Matcher(TransitHub, hubIntel)
+}
+
+// Matcher generates a PinMatcher based on the Options.
+func (o *DestinationHubOptions) Matcher(hubIntel *hub.Intel) PinMatcher {
+	if o == nil {
+		return emptyHubOptions.Matcher(DestinationHub, hubIntel)
+	}
+
+	// Convert and call base func.
+	ho := HubOptions(*o)
+	return ho.Matcher(DestinationHub, hubIntel)
+}
+
+// Matcher generates a PinMatcher based on the Options.
+// Always use the Matcher on option structs if you can.
+func (o *Options) Matcher(hubType HubType, hubIntel *hub.Intel) PinMatcher {
+	switch hubType {
+	case HomeHub:
+		return o.Home.Matcher(hubIntel)
+	case TransitHub:
+		return o.Transit.Matcher(hubIntel)
+	case DestinationHub:
+		return o.Destination.Matcher(hubIntel)
+	default:
+		return nil // This will panic, but should never be used.
+	}
+}
+
+// Matcher generates a PinMatcher based on the Options.
+func (o *HubOptions) Matcher(hubType HubType, hubIntel *hub.Intel) PinMatcher {
+	// Fallback to empty hub options.
+	if o == nil {
+		o = emptyHubOptions
+	}
+
+	// Compile states to regard and disregard.
+	regard := o.Regard
+	disregard := o.Disregard
+
+	// Add default states.
+	if !o.NoDefaults {
+		// Add default States.
+		regard = regard.Add(StateSummaryRegard)
+		disregard = disregard.Add(StateSummaryDisregard)
+
+		// Add type based Advisories.
+		switch hubType {
+		case HomeHub:
+			// Home Hubs don't need to be reachable and don't need keys ready to be used.
+			regard = regard.Remove(StateReachable)
+			regard = regard.Remove(StateActive)
+			// Follow advisory.
+			disregard = disregard.Add(StateUsageAsHomeDiscouraged)
+			// Home Hub may be the current Home Hub.
+			disregard = disregard.Remove(StateIsHomeHub)
+		case TransitHub:
+			// Transit Hubs get no additional states.
+		case DestinationHub:
+			// Follow advisory.
+			disregard = disregard.Add(StateUsageAsDestinationDiscouraged)
+			// Do not use if Hub reports network issues.
+			disregard = disregard.Add(StateConnectivityIssues)
+		}
+	}
+
+	// Add intel policies.
+	hubPolicies := o.HubPolicies
+	if hubIntel != nil && hubIntel.Parsed() != nil {
+		switch hubType {
+		case HomeHub:
+			hubPolicies = append(hubPolicies, hubIntel.Parsed().HubAdvisory, hubIntel.Parsed().HomeHubAdvisory)
+		case TransitHub:
+			hubPolicies = append(hubPolicies, hubIntel.Parsed().HubAdvisory)
+		case DestinationHub:
+			hubPolicies = append(hubPolicies, hubIntel.Parsed().HubAdvisory, hubIntel.Parsed().DestinationHubAdvisory)
+		}
+	}
+
+	// Add entry/exit policiy checks.
+	checkHubPolicyWith := o.CheckHubPolicyWith
+
+	return func(pin *Pin) bool {
+		// Check required Pin States.
+		if !pin.State.Has(regard) || pin.State.HasAnyOf(disregard) {
+			return false
+		}
+
+		// Check verified owners.
+		if len(o.RequireVerifiedOwners) > 0 {
+			// Check if Pin has a verified owner at all.
+			if pin.VerifiedOwner == "" {
+				return false
+			}
+
+			// Check if verified owner is in the list.
+			inList := false
+			for _, allowed := range o.RequireVerifiedOwners {
+				if pin.VerifiedOwner == allowed {
+					inList = true
+					break
+				}
+			}
+
+			// Pin does not have a verified owner from the allowed list.
+			if !inList {
+				return false
+			}
+		}
+
+		// Check policies.
+	policyCheck:
+		for _, policy := range hubPolicies {
+			// Check if policy is set.
+			if !policy.IsSet() {
+				continue
+			}
+
+			// Check if policy matches.
+			result, reason := policy.MatchMulti(context.TODO(), pin.EntityV4, pin.EntityV6)
+			switch result {
+			case endpoints.NoMatch:
+				// Continue with check.
+			case endpoints.MatchError:
+				log.Warningf("spn/navigator: failed to match policy: %s", reason)
+				// Continue with check for now.
+				// TODO: Rethink how to do this. If eg. the geoip database has a
+				// problem, then no Hub will match. For now, just continue to the
+				// next rule set. Not optimal, but fail safe.
+			case endpoints.Denied:
+				// Explicitly denied, abort immediately.
+				return false
+			case endpoints.Permitted:
+				// Explicitly allowed, abort check and continue.
+				break policyCheck
+			}
+		}
+
+		// Check entry/exit policies.
+		if checkHubPolicyWith != nil {
+			switch hubType {
+			case HomeHub:
+				if endpointListMatch(pin.Hub.Info.EntryPolicy(), checkHubPolicyWith) == endpoints.Denied {
+					// Hub does not allow entry from the given entity.
+					return false
+				}
+			case TransitHub:
+				// Transit Hubs do not have a hub policy.
+			case DestinationHub:
+				if endpointListMatch(pin.Hub.Info.ExitPolicy(), checkHubPolicyWith) == endpoints.Denied {
+					// Hub does not allow exit to the given entity.
+					return false
+				}
+			}
+		}
+
+		return true // All checks have passed.
+	}
+}
+
+func endpointListMatch(list endpoints.Endpoints, entity *intel.Entity) endpoints.EPResult {
+	// Check if endpoint list and entity are available.
+	if !list.IsSet() || entity == nil {
+		return endpoints.NoMatch
+	}
+
+	// Match and return result only.
+	result, _ := list.Match(context.TODO(), entity)
+	return result
+}
diff --git a/spn/navigator/pin.go b/spn/navigator/pin.go
new file mode 100644
index 00000000..9e113ab4
--- /dev/null
+++ b/spn/navigator/pin.go
@@ -0,0 +1,269 @@
+package navigator
+
+import (
+	"context"
+	"net"
+	"strings"
+	"time"
+
+	"github.com/tevino/abool"
+
+	"github.com/safing/portbase/log"
+	"github.com/safing/portmaster/service/intel"
+	"github.com/safing/portmaster/service/intel/geoip"
+	"github.com/safing/portmaster/spn/docks"
+	"github.com/safing/portmaster/spn/hub"
+)
+
+// Pin represents a Hub on a Map.
+type Pin struct { //nolint:maligned
+	// Hub Information
+	Hub        *hub.Hub
+	EntityV4   *intel.Entity
+	EntityV6   *intel.Entity
+	LocationV4 *geoip.Location
+	LocationV6 *geoip.Location
+
+	// Hub Status
+	State PinState
+	// VerifiedOwner holds the name of the verified owner / operator of the Hub.
+	VerifiedOwner string
+	// HopDistance signifies the needed hops to reach this Hub.
+	// HopDistance is measured from the view of a client.
+	// A Hub itself will have itself at distance 1.
+	// Directly connected Hubs have a distance of 2.
+	HopDistance int
+	// Cost is the routing cost of this Hub.
+	Cost float32
+	// ConnectedTo holds validated lanes.
+	ConnectedTo map[string]*Lane // Key is Hub ID.
+
+	// FailingUntil specifies until when this Hub should be regarded as failing.
+	// This is connected to StateFailing.
+	FailingUntil time.Time
+
+	// Connection holds a information about a connection to the Hub of this Pin.
+	Connection *PinConnection
+
+	// Internal
+
+	// pushChanges is set to true if something noteworthy on the Pin changed and
+	// an update needs to be pushed by the database storage interface to whoever
+	// is listening.
+	pushChanges *abool.AtomicBool
+
+	// measurements holds Measurements regarding this Pin.
+	// It must always be set and the reference must not be changed when measuring
+	// is enabled.
+	// Access to fields within are coordinated by itself.
+	measurements *hub.Measurements
+
+	// analysis holds the analysis state.
+	// Should only be set during analysis and be reset at the start and removed at the end of an analysis.
+	analysis *AnalysisState
+
+	// region is the region this Pin belongs to.
+	region *Region
+}
+
+// PinConnection represents a connection to a terminal on the Hub.
+type PinConnection struct {
+	// Terminal holds the active terminal session.
+	Terminal *docks.ExpansionTerminal
+
+	// Route is the route built for this terminal.
+	Route *Route
+}
+
+// Lane is a connection to another Hub.
+type Lane struct {
+	// Pin is the Pin/Hub this Lane connects to.
+	Pin *Pin
+
+	// Capacity designates the available bandwidth between these Hubs.
+	// It is specified in bit/s.
+	Capacity int
+
+	// Lateny designates the latency between these Hubs.
+	// It is specified in nanoseconds.
+	Latency time.Duration
+
+	// Cost is the routing cost of this lane.
+	Cost float32
+
+	// active is a helper flag in order help remove abandoned Lanes.
+	active bool
+}
+
+// Lock locks the Pin via the Hub's lock.
+func (pin *Pin) Lock() {
+	pin.Hub.Lock()
+}
+
+// Unlock unlocks the Pin via the Hub's lock.
+func (pin *Pin) Unlock() {
+	pin.Hub.Unlock()
+}
+
+// String returns a human-readable representation of the Pin.
+func (pin *Pin) String() string {
+	return "<Pin " + pin.Hub.Name() + ">"
+}
+
+// GetState returns the state of the pin.
+func (pin *Pin) GetState() PinState {
+	pin.Lock()
+	defer pin.Unlock()
+
+	return pin.State
+}
+
+// updateLocationData fetches the necessary location data in order to correctly map out the Pin.
+func (pin *Pin) updateLocationData() {
+	// TODO: We are currently assigning the Hub ID to the entity domain to
+	// support matching a Hub by its ID. The issue here is that the domain
+	// rules are lower-cased, so we have to lower-case the ID here too.
+	// This is not optimal from a security perspective, but there are still
+	// enough bits left that this cannot be easily exploited.
+
+	if pin.Hub.Info.IPv4 != nil {
+		pin.EntityV4 = (&intel.Entity{
+			IP:     pin.Hub.Info.IPv4,
+			Domain: strings.ToLower(pin.Hub.ID) + ".",
+		}).Init(0)
+
+		var ok bool
+		pin.LocationV4, ok = pin.EntityV4.GetLocation(context.TODO())
+		if !ok {
+			log.Warningf("spn/navigator: failed to get location of %s of %s", pin.Hub.Info.IPv4, pin.Hub.StringWithoutLocking())
+			return
+		}
+	} else {
+		pin.EntityV4 = nil
+		pin.LocationV4 = nil
+	}
+
+	if pin.Hub.Info.IPv6 != nil {
+		pin.EntityV6 = (&intel.Entity{
+			IP:     pin.Hub.Info.IPv6,
+			Domain: strings.ToLower(pin.Hub.ID) + ".",
+		}).Init(0)
+
+		var ok bool
+		pin.LocationV6, ok = pin.EntityV6.GetLocation(context.TODO())
+		if !ok {
+			log.Warningf("spn/navigator: failed to get location of %s of %s", pin.Hub.Info.IPv6, pin.Hub.StringWithoutLocking())
+			return
+		}
+	} else {
+		pin.EntityV6 = nil
+		pin.LocationV6 = nil
+	}
+}
+
+// GetLocation returns the geoip location of the Pin, preferring first the given IP, then IPv4.
+func (pin *Pin) GetLocation(ip net.IP) *geoip.Location {
+	pin.Lock()
+	defer pin.Unlock()
+
+	switch {
+	case ip != nil && ip.Equal(pin.Hub.Info.IPv4) && pin.LocationV4 != nil:
+		return pin.LocationV4
+	case ip != nil && ip.Equal(pin.Hub.Info.IPv6) && pin.LocationV6 != nil:
+		return pin.LocationV6
+	case pin.LocationV4 != nil:
+		return pin.LocationV4
+	case pin.LocationV6 != nil:
+		return pin.LocationV6
+	default:
+		return nil
+	}
+}
+
+// SetActiveTerminal sets an active terminal for the pin.
+func (pin *Pin) SetActiveTerminal(pc *PinConnection) {
+	pin.Lock()
+	defer pin.Unlock()
+
+	pin.Connection = pc
+	if pin.Connection != nil && pin.Connection.Terminal != nil {
+		pin.Connection.Terminal.SetChangeNotifyFunc(pin.NotifyTerminalChange)
+	}
+
+	pin.pushChanges.Set()
+}
+
+// GetActiveTerminal returns the active terminal of the pin.
+func (pin *Pin) GetActiveTerminal() *docks.ExpansionTerminal {
+	pin.Lock()
+	defer pin.Unlock()
+
+	if !pin.hasActiveTerminal() {
+		return nil
+	}
+	return pin.Connection.Terminal
+}
+
+// HasActiveTerminal returns whether the Pin has an active terminal.
+func (pin *Pin) HasActiveTerminal() bool {
+	pin.Lock()
+	defer pin.Unlock()
+
+	return pin.hasActiveTerminal()
+}
+
+func (pin *Pin) hasActiveTerminal() bool {
+	return pin.Connection != nil &&
+		pin.Connection.Terminal.Abandoning.IsNotSet()
+}
+
+// NotifyTerminalChange notifies subscribers of the changed terminal.
+func (pin *Pin) NotifyTerminalChange() {
+	pin.pushChanges.Set()
+	pin.pushChange()
+}
+
+// IsFailing returns whether the pin should be treated as failing.
+// The Pin is locked for this.
+func (pin *Pin) IsFailing() bool {
+	pin.Lock()
+	defer pin.Unlock()
+
+	return time.Now().Before(pin.FailingUntil)
+}
+
+// MarkAsFailingFor marks the pin as failing.
+// The Pin is locked for this.
+// Changes are pushed.
+func (pin *Pin) MarkAsFailingFor(duration time.Duration) {
+	pin.Lock()
+	defer pin.Unlock()
+
+	until := time.Now().Add(duration)
+	// Only ever increase failing until, never reduce.
+	if until.After(pin.FailingUntil) {
+		pin.FailingUntil = until
+	}
+
+	pin.addStates(StateFailing)
+
+	pin.pushChanges.Set()
+	pin.pushChange()
+}
+
+// ResetFailingState resets the failing state.
+// The Pin is locked for this.
+// Changes are not pushed, but Pins are marked.
+func (pin *Pin) ResetFailingState() {
+	pin.Lock()
+	defer pin.Unlock()
+
+	if time.Now().Before(pin.FailingUntil) {
+		pin.FailingUntil = time.Now()
+		pin.pushChanges.Set()
+	}
+	if pin.State.Has(StateFailing) {
+		pin.removeStates(StateFailing)
+		pin.pushChanges.Set()
+	}
+}
diff --git a/spn/navigator/pin_export.go b/spn/navigator/pin_export.go
new file mode 100644
index 00000000..85fd279e
--- /dev/null
+++ b/spn/navigator/pin_export.go
@@ -0,0 +1,98 @@
+package navigator
+
+import (
+	"sync"
+	"time"
+
+	"github.com/safing/portbase/database/record"
+	"github.com/safing/portmaster/service/intel"
+	"github.com/safing/portmaster/spn/hub"
+)
+
+// PinExport is the exportable version of a Pin.
+type PinExport struct {
+	record.Base
+	sync.Mutex
+
+	ID        string
+	Name      string
+	Map       string
+	FirstSeen time.Time
+
+	EntityV4 *intel.Entity
+	EntityV6 *intel.Entity
+	// TODO: add coords
+
+	States        []string // From pin.State
+	VerifiedOwner string
+	HopDistance   int
+
+	ConnectedTo   map[string]*LaneExport // Key is Hub ID.
+	Route         []string               // Includes Home Hub and this Pin's ID.
+	SessionActive bool
+
+	Info   *hub.Announcement
+	Status *hub.Status
+}
+
+// LaneExport is the exportable version of a Lane.
+type LaneExport struct {
+	HubID string
+
+	// Capacity designates the available bandwidth between these Hubs.
+	// It is specified in bit/s.
+	Capacity int
+
+	// Lateny designates the latency between these Hubs.
+	// It is specified in nanoseconds.
+	Latency time.Duration
+}
+
+// Export puts the Pin's information into an exportable format.
+func (pin *Pin) Export() *PinExport {
+	pin.Lock()
+	defer pin.Unlock()
+
+	// Shallow copy static values.
+	export := &PinExport{
+		ID:            pin.Hub.ID,
+		Name:          pin.Hub.Info.Name,
+		Map:           pin.Hub.Map,
+		FirstSeen:     pin.Hub.FirstSeen,
+		EntityV4:      pin.EntityV4,
+		EntityV6:      pin.EntityV6,
+		States:        pin.State.Export(),
+		VerifiedOwner: pin.VerifiedOwner,
+		HopDistance:   pin.HopDistance,
+		SessionActive: pin.hasActiveTerminal() || pin.State.Has(StateIsHomeHub),
+		Info:          pin.Hub.Info,   // Is updated as a whole, no need to copy.
+		Status:        pin.Hub.Status, // Is updated as a whole, no need to copy.
+	}
+
+	// Export lanes.
+	export.ConnectedTo = make(map[string]*LaneExport, len(pin.ConnectedTo))
+	for key, lane := range pin.ConnectedTo {
+		export.ConnectedTo[key] = &LaneExport{
+			HubID:    lane.Pin.Hub.ID,
+			Capacity: lane.Capacity,
+			Latency:  lane.Latency,
+		}
+	}
+
+	// Export route to Pin, if connected.
+	if pin.Connection != nil && pin.Connection.Route != nil {
+		export.Route = make([]string, len(pin.Connection.Route.Path))
+		for key, hop := range pin.Connection.Route.Path {
+			export.Route[key] = hop.HubID
+		}
+	}
+
+	// Create database record metadata.
+	export.SetKey(makeDBKey(export.Map, export.ID))
+	export.SetMeta(&record.Meta{
+		Created:  export.FirstSeen.Unix(),
+		Modified: time.Now().Unix(),
+	})
+
+	return export
+}
diff --git a/spn/navigator/region.go b/spn/navigator/region.go
new file mode 100644
index 00000000..a3798efe
--- /dev/null
+++ b/spn/navigator/region.go
@@ -0,0 +1,231 @@
+package navigator
+
+import (
+	"context"
+	"math"
+
+	"github.com/safing/portbase/log"
+	"github.com/safing/portmaster/service/profile/endpoints"
+	"github.com/safing/portmaster/spn/hub"
+)
+
+const (
+	defaultRegionalMinLanesPerHub  = 0.5
+	defaultRegionalMaxLanesOnHub   = 2
+	defaultSatelliteMinLanesPerHub = 0.3
+	defaultInternalMinLanesOnHub   = 3
+	defaultInternalMaxHops         = 3
+)
+
+// Region specifies a group of Hubs for optimization purposes.
+type Region struct {
+	ID           string
+	Name         string
+	config       *hub.RegionConfig
+	memberPolicy endpoints.Endpoints
+
+	pins         []*Pin
+	regardedPins []*Pin
+
+	regionalMinLanes      int
+	regionalMaxLanesOnHub int
+	satelliteMinLanes     int
+	internalMinLanesOnHub int
+	internalMaxHops       int
+}
+
+func (region *Region) getName() string {
+	switch {
+	case region == nil:
+		return "-"
+	case region.Name != "":
+		return region.Name
+	default:
+		return region.ID
+	}
+}
+
+func (m *Map) updateRegions(config []*hub.RegionConfig) {
+	// Reset map and pins.
+	m.regions = make([]*Region, 0, len(config))
+	for _, pin := range m.all {
+		pin.region = nil
+	}
+
+	// Stop if not regions are defined.
+	if len(config) == 0 {
+		return
+	}
+
+	// Build regions from config.
+	for _, regionConfig := range config {
+		// Check if region has an ID.
+		if regionConfig.ID == "" {
+			log.Error("spn/navigator: region is missing ID")
+			// Abort adding this region to the map.
+			continue
+		}
+
+		// Create new region.
+		region := &Region{
+			ID:     regionConfig.ID,
+			Name:   regionConfig.Name,
+			config: regionConfig,
+		}
+
+		// Parse member policy.
+		if len(regionConfig.MemberPolicy) == 0 {
+			log.Errorf("spn/navigator: member policy of region %s is missing", region.ID)
+			// Abort adding this region to the map.
+			continue
+		}
+		memberPolicy, err := endpoints.ParseEndpoints(regionConfig.MemberPolicy)
+		if err != nil {
+			log.Errorf("spn/navigator: failed to parse member policy of region %s: %s", region.ID, err)
+			// Abort adding this region to the map.
+			continue
+		}
+		region.memberPolicy = memberPolicy
+
+		// Recalculate region properties.
+		region.recalculateProperties()
+
+		// Add region to map.
+		m.regions = append(m.regions, region)
+	}
+
+	// Update region in all Pins.
+	for _, pin := range m.all {
+		m.updatePinRegion(pin)
+	}
+}
+
+func (region *Region) addPin(pin *Pin) {
+	// Find pin in region.
+	for _, regionPin := range region.pins {
+		if pin.Hub.ID == regionPin.Hub.ID {
+			// Pin is already part of region.
+			return
+		}
+	}
+
+	// Check if pin is already part of this region.
+	if pin.region != nil && pin.region.ID == region.ID {
+		return
+	}
+
+	// Remove pin from previous region.
+	if pin.region != nil {
+		pin.region.removePin(pin)
+	}
+
+	// Add new pin to region.
+	region.pins = append(region.pins, pin)
+	pin.region = region
+
+	// Recalculate region properties.
+	region.recalculateProperties()
+}
+
+func (region *Region) removePin(pin *Pin) {
+	// Find pin index in region.
+	removeIndex := -1
+	for index, regionPin := range region.pins {
+		if pin.Hub.ID == regionPin.Hub.ID {
+			removeIndex = index
+			break
+		}
+	}
+	if removeIndex < 0 {
+		// Pin is not part of region.
+		return
+	}
+
+	// Remove pin from region.
+	region.pins = append(region.pins[:removeIndex], region.pins[removeIndex+1:]...)
+
+	// Recalculate region properties.
+	region.recalculateProperties()
+}
+
+func (region *Region) recalculateProperties() {
+	// Regional properties.
+	region.regionalMinLanes = calculateMinLanes(
+		len(region.pins),
+		region.config.RegionalMinLanes,
+		region.config.RegionalMinLanesPerHub,
+		defaultRegionalMinLanesPerHub,
+	)
+	region.regionalMaxLanesOnHub = region.config.RegionalMaxLanesOnHub
+	if region.regionalMaxLanesOnHub <= 0 {
+		region.regionalMaxLanesOnHub = defaultRegionalMaxLanesOnHub
+	}
+
+	// Satellite properties.
+	region.satelliteMinLanes = calculateMinLanes(
+		len(region.pins),
+		region.config.SatelliteMinLanes,
+		region.config.SatelliteMinLanesPerHub,
+		defaultSatelliteMinLanesPerHub,
+	)
+
+	// Internal properties.
+	region.internalMinLanesOnHub = region.config.InternalMinLanesOnHub
+	if region.internalMinLanesOnHub <= 0 {
+		region.internalMinLanesOnHub = defaultInternalMinLanesOnHub
+	}
+	region.internalMaxHops = region.config.InternalMaxHops
+	if region.internalMaxHops <= 0 {
+		region.internalMaxHops = defaultInternalMaxHops
+	}
+	// Values below 2 do not make any sense for max hops.
+	if region.internalMaxHops < 2 {
+		region.internalMaxHops = 2
+	}
+}
+
+func calculateMinLanes(regionHubCount, minLanes int, minLanesPerHub, defaultMinLanesPerHub float64) (minLaneCount int) {
+	// Validate hub count.
+	if regionHubCount <= 0 {
+		// Reset to safe value.
+		regionHubCount = 1
+	}
+
+	// Set to configured minimum lanes.
+	minLaneCount = minLanes
+
+	// Raise to configured minimum lanes per Hub.
+	if minLanesPerHub != 0 {
+		minLanesFromSize := int(math.Ceil(float64(regionHubCount) * minLanesPerHub))
+		if minLanesFromSize > minLaneCount {
+			minLaneCount = minLanesFromSize
+		}
+	}
+
+	// Raise to default minimum lanes per Hub, if still 0.
+	if minLaneCount <= 0 {
+		minLaneCount = int(math.Ceil(float64(regionHubCount) * defaultMinLanesPerHub))
+	}
+
+	return minLaneCount
+}
+
+func (m *Map) updatePinRegion(pin *Pin) {
+	for _, region := range m.regions {
+		// Check if pin matches the region's member policy.
+		if pin.EntityV4 != nil {
+			result, _ := region.memberPolicy.Match(context.TODO(), pin.EntityV4)
+			if result == endpoints.Permitted {
+				region.addPin(pin)
+				return
+			}
+		}
+		if pin.EntityV6 != nil {
+			result, _ := region.memberPolicy.Match(context.TODO(), pin.EntityV6)
+			if result == endpoints.Permitted {
+				region.addPin(pin)
+				return
+			}
+		}
+	}
+}
diff --git a/spn/navigator/route.go b/spn/navigator/route.go
new file mode 100644
index 00000000..f1b98a38
--- /dev/null
+++ b/spn/navigator/route.go
@@ -0,0 +1,221 @@
+package navigator
+
+import (
+	"fmt"
+	mrand "math/rand"
+	"sort"
+	"strings"
+	"time"
+)
+
+// Routes holds a collection of Routes.
+type Routes struct {
+	All                 []*Route
+	randomizeTopPercent float32
+	maxCost             float32 // automatic
+	maxRoutes           int     // manual setting
+}
+
+// Len is the number of elements in the collection.
+func (r *Routes) Len() int {
+	return len(r.All)
+}
+
+// Less reports whether the element with index i should sort before the element
+// with index j.
+func (r *Routes) Less(i, j int) bool {
+	return r.All[i].TotalCost < r.All[j].TotalCost
+}
+
+// Swap swaps the elements with indexes i and j.
+func (r *Routes) Swap(i, j int) {
+	r.All[i], r.All[j] = r.All[j], r.All[i]
+}
+
+// isGoodEnough reports whether the route would survive a clean process.
+func (r *Routes) isGoodEnough(route *Route) bool {
+	if r.maxCost > 0 && route.TotalCost > r.maxCost {
+		return false
+	}
+	return true
+}
+
+// add adds a Route if it is good enough.
+func (r *Routes) add(route *Route) {
+	if !r.isGoodEnough(route) {
+		return
+	}
+	r.All = append(r.All, route.CopyUpTo(0))
+	r.clean()
+}
+
+// clean sort and shortens the list to the configured maximum.
+func (r *Routes) clean() {
+	// Sort Routes so that the best ones are on top.
+	sort.Sort(r)
+	// Remove all remaining from the list.
+	if len(r.All) > r.maxRoutes {
+		r.All = r.All[:r.maxRoutes]
+	}
+	// Set new maximum total cost.
+	if len(r.All) >= r.maxRoutes {
+		r.maxCost = r.All[len(r.All)-1].TotalCost
+	}
+}
+
+// randomizeTop randomized to the top nearest pins for balancing the network.
+func (r *Routes) randomizeTop() {
+	switch {
+	case r.randomizeTopPercent == 0:
+		// Check if randomization is enabled.
+		return
+	case len(r.All) < 2:
+		// Check if we have enough pins to work with.
+		return
+	}
+
+	// Find randomization set.
+	randomizeUpTo := len(r.All)
+	threshold := r.All[0].TotalCost * (1 + r.randomizeTopPercent)
+	for i, r := range r.All {
+		// Find first value above the threshold to stop.
+		if r.TotalCost > threshold {
+			randomizeUpTo = i
+			break
+		}
+	}
+
+	// Shuffle top set.
+	if randomizeUpTo >= 2 {
+		mr := mrand.New(mrand.NewSource(time.Now().UnixNano())) //nolint:gosec
+		mr.Shuffle(randomizeUpTo, r.Swap)
+	}
+}
+
+// Route is a path through the map.
+type Route struct {
+	// Path is a list of Transit Hubs and the Destination Hub, including the Cost
+	// for each Hop.
+	Path []*Hop
+
+	// DstCost is the calculated cost between the Destination Hub and the destination IP.
+	DstCost float32
+
+	// TotalCost is the sum of all costs of this Route.
+	TotalCost float32
+
+	// Algorithm is the ID of the algorithm used to calculate the route.
+	Algorithm string
+}
+
+// Hop is one hop of a route's path.
+type Hop struct {
+	pin *Pin
+
+	// HubID is the Hub ID.
+	HubID string
+
+	// Cost is the cost for both Lane to this Hub and the Hub itself.
+	Cost float32
+}
+
+// addHop adds a hop to the route.
+func (r *Route) addHop(pin *Pin, cost float32) {
+	r.Path = append(r.Path, &Hop{
+		pin:  pin,
+		Cost: cost,
+	})
+	r.recalculateTotalCost()
+}
+
+// completeRoute completes the route by adding the destination cost of the
+// connection between the last hop and the destination IP.
+func (r *Route) completeRoute(dstCost float32) {
+	r.DstCost = dstCost
+	r.recalculateTotalCost()
+}
+
+// removeHop removes the last hop from the Route.
+func (r *Route) removeHop() {
+	// Reset DstCost, as the route might have been completed.
+	r.DstCost = 0
+
+	if len(r.Path) >= 1 {
+		r.Path = r.Path[:len(r.Path)-1]
+	}
+	r.recalculateTotalCost()
+}
+
+// recalculateTotalCost recalculates to total cost of this route.
+func (r *Route) recalculateTotalCost() {
+	r.TotalCost = r.DstCost
+	for _, hop := range r.Path {
+		if hop.pin.HasActiveTerminal() {
+			// If we have an active connection, only take 80% of the cost.
+			r.TotalCost += hop.Cost * 0.8
+		} else {
+			r.TotalCost += hop.Cost
+		}
+	}
+}
+
+// CopyUpTo makes a somewhat deep copy of the Route up to the specified amount
+// and returns it. Hops themselves are not copied, because their data does not
+// change. Therefore, returned Hops may not be edited.
+// Specify an amount of 0 to copy all.
+func (r *Route) CopyUpTo(n int) *Route {
+	// Check amount.
+	if n == 0 || n > len(r.Path) {
+		n = len(r.Path)
+	}
+
+	newRoute := &Route{
+		Path:      make([]*Hop, n),
+		DstCost:   r.DstCost,
+		TotalCost: r.TotalCost,
+	}
+	copy(newRoute.Path, r.Path)
+	return newRoute
+}
+
+// makeExportReady fills in all the missing data fields which are meant for
+// exporting only.
+func (r *Routes) makeExportReady(algorithm string) {
+	for _, route := range r.All {
+		route.makeExportReady(algorithm)
+	}
+}
+
+// makeExportReady fills in all the missing data fields which are meant for
+// exporting only.
+func (r *Route) makeExportReady(algorithm string) {
+	r.Algorithm = algorithm
+	for _, hop := range r.Path {
+		hop.makeExportReady()
+	}
+}
+
+// makeExportReady fills in all the missing data fields which are meant for
+// exporting only.
+func (hop *Hop) makeExportReady() {
+	hop.HubID = hop.pin.Hub.ID
+}
+
+// Pin returns the Pin of the Hop.
+func (hop *Hop) Pin() *Pin {
+	return hop.pin
+}
+
+func (r *Route) String() string {
+	s := make([]string, 0, len(r.Path)+2)
+	s = append(s, fmt.Sprintf("route with %.2fc:", r.TotalCost))
+	for i, hop := range r.Path {
+		if i == 0 {
+			s = append(s, hop.pin.String())
+		} else {
+			s = append(s, fmt.Sprintf("--> %.2fc %s", hop.Cost, hop.pin))
+		}
+	}
+	s = append(s, fmt.Sprintf("--> %.2fc", r.DstCost))
+	return strings.Join(s, " ")
+}
diff --git a/spn/navigator/routing-profiles.go b/spn/navigator/routing-profiles.go
new file mode 100644
index 00000000..9241c072
--- /dev/null
+++ b/spn/navigator/routing-profiles.go
@@ -0,0 +1,162 @@
+package navigator
+
+import (
+	"github.com/safing/portbase/log"
+	"github.com/safing/portmaster/service/profile"
+)
+
+// RoutingProfile defines a routing algorithm with some options.
+type RoutingProfile struct {
+	ID string
+
+	// Name is the human readable name of the profile.
+	Name string
+
+	// MinHops defines how many hops a route must have at minimum. In order to
+	// reduce confusion, the Home Hub is also counted.
+	MinHops int
+
+	// MaxHops defines the limit on how many hops a route may have. In order to
+	// reduce confusion, the Home Hub is also counted.
+	MaxHops int
+
+	// MaxExtraHops sets a limit on how many extra hops are allowed in addition
+	// to the amount of Hops in the currently best route. This is an optimization
+	// option and should not interfere with finding the best route, but might
+	// reduce the amount of routes found.
+	MaxExtraHops int
+
+	// MaxExtraCost sets a limit on the extra cost allowed in addition to the
+	// cost of the currently best route. This is an optimization option and
+	// should not interfere with finding the best route, but might reduce the
+	// amount of routes found.
+	MaxExtraCost float32
+}
+
+// Routing Profile Names.
+const (
+	RoutingProfileHomeID      = "home"
+	RoutingProfileSingleHopID = "single-hop"
+	RoutingProfileDoubleHopID = "double-hop"
+	RoutingProfileTripleHopID = "triple-hop"
+)
+
+// Routing Profiles.
+var (
+	DefaultRoutingProfileID = profile.DefaultRoutingProfileID
+
+	RoutingProfileHome = &RoutingProfile{
+		ID:      "home",
+		Name:    "Plain VPN Mode",
+		MinHops: 1,
+		MaxHops: 1,
+	}
+	RoutingProfileSingleHop = &RoutingProfile{
+		ID:           "single-hop",
+		Name:         "Speed Focused",
+		MinHops:      1,
+		MaxHops:      3,
+		MaxExtraHops: 1,
+		MaxExtraCost: 10000,
+	}
+	RoutingProfileDoubleHop = &RoutingProfile{
+		ID:           "double-hop",
+		Name:         "Balanced",
+		MinHops:      2,
+		MaxHops:      4,
+		MaxExtraHops: 2,
+		MaxExtraCost: 10000,
+	}
+	RoutingProfileTripleHop = &RoutingProfile{
+		ID:           "triple-hop",
+		Name:         "Privacy Focused",
+		MinHops:      3,
+		MaxHops:      5,
+		MaxExtraHops: 3,
+		MaxExtraCost: 10000,
+	}
+)
+
+// GetRoutingProfile returns the routing profile with the given ID.
+func GetRoutingProfile(id string) *RoutingProfile {
+	switch id {
+	case RoutingProfileHomeID:
+		return RoutingProfileHome
+	case RoutingProfileSingleHopID:
+		return RoutingProfileSingleHop
+	case RoutingProfileDoubleHopID:
+		return RoutingProfileDoubleHop
+	case RoutingProfileTripleHopID:
+		return RoutingProfileTripleHop
+	default:
+		return RoutingProfileDoubleHop
+	}
+}
+
+type routeCompliance uint8
+
+const (
+	routeOk           routeCompliance = iota // Route is fully compliant and can be used.
+	routeNonCompliant                        // Route is not compliant, but this might change if more hops are added.
+	routeDisqualified                        // Route is disqualified and won't be able to become compliant.
+)
+
+func (rp *RoutingProfile) checkRouteCompliance(route *Route, foundRoutes *Routes) routeCompliance {
+	switch {
+	case len(route.Path) < rp.MinHops:
+		// Route is shorter than the defined minimum.
+		return routeNonCompliant
+	case len(route.Path) > rp.MaxHops:
+		// Route is longer than the defined maximum.
+		return routeDisqualified
+	}
+
+	// Check for hub re-use.
+	if len(route.Path) >= 2 {
+		lastHop := route.Path[len(route.Path)-1]
+		for _, hop := range route.Path[:len(route.Path)-1] {
+			if lastHop.pin.Hub.ID == hop.pin.Hub.ID {
+				return routeDisqualified
+			}
+		}
+	}
+
+	// Check if hub is already in use, if so check if the route matches.
+	if len(route.Path) >= 2 {
+		// Get active connection to the last pin of the current path.
+		lastPinConnection := route.Path[len(route.Path)-1].pin.Connection
+
+		switch {
+		case lastPinConnection == nil:
+			// Last pin is not yet connected.
+		case len(lastPinConnection.Route.Path) < 2:
+			// Path of last pin does not have enough hops.
+			// This is unexpected and should not happen.
+			log.Errorf(
+				"navigator: expected active connection to %s to have 2 hops or more on path, but it had %d",
+				route.Path[len(route.Path)-1].pin.Hub.StringWithoutLocking(),
+				len(lastPinConnection.Route.Path),
+			)
+		case lastPinConnection.Route.Path[len(lastPinConnection.Route.Path)-2].pin.Hub.ID != route.Path[len(route.Path)-2].pin.Hub.ID:
+			// The previous hop of the existing route and the one we are evaluating don't match.
+			// Currently, we only allow one session per Hub.
+			return routeDisqualified
+		}
+	}
+
+	// Abort route exploration when we are outside the optimization boundaries.
+	if len(foundRoutes.All) > 0 {
+		// Get the best found route.
+		best := foundRoutes.All[0]
+		// Abort if current route exceeds max extra costs.
+		if route.TotalCost > best.TotalCost+rp.MaxExtraCost {
+			return routeDisqualified
+		}
+		// Abort if current route exceeds max extra hops.
+		if len(route.Path) > len(best.Path)+rp.MaxExtraHops {
+			return routeDisqualified
+		}
+	}
+
+	return routeOk
+}
diff --git a/spn/navigator/sort.go b/spn/navigator/sort.go
new file mode 100644
index 00000000..9fd0391e
--- /dev/null
+++ b/spn/navigator/sort.go
@@ -0,0 +1,141 @@
+package navigator
+
+type sortByPinID []*Pin
+
+func (a sortByPinID) Len() int           { return len(a) }
+func (a sortByPinID) Swap(i, j int)      { a[i], a[j] = a[j], a[i] }
+func (a sortByPinID) Less(i, j int) bool { return a[i].Hub.ID < a[j].Hub.ID }
+
+type sortByLowestMeasuredCost []*Pin
+
+func (a sortByLowestMeasuredCost) Len() int      { return len(a) }
+func (a sortByLowestMeasuredCost) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
+func (a sortByLowestMeasuredCost) Less(i, j int) bool {
+	x := a[i].measurements.GetCalculatedCost()
+	y := a[j].measurements.GetCalculatedCost()
+	if x != y {
+		return x < y
+	}
+
+	// Fall back to geo proximity.
+	gx := a[i].measurements.GetGeoProximity()
+	gy := a[j].measurements.GetGeoProximity()
+	if gx != gy {
+		return gx > gy
+	}
+
+	// Fall back to Hub ID.
+	return a[i].Hub.ID < a[j].Hub.ID
+}
+
+type sortBySuggestedHopDistanceAndLowestMeasuredCost []*Pin
+
+func (a sortBySuggestedHopDistanceAndLowestMeasuredCost) Len() int      { return len(a) }
+func (a sortBySuggestedHopDistanceAndLowestMeasuredCost) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
+func (a sortBySuggestedHopDistanceAndLowestMeasuredCost) Less(i, j int) bool {
+	// First sort by suggested hop distance.
+	if a[i].analysis.SuggestedHopDistance != a[j].analysis.SuggestedHopDistance {
+		return a[i].analysis.SuggestedHopDistance > a[j].analysis.SuggestedHopDistance
+	}
+
+	// Then by cost.
+	x := a[i].measurements.GetCalculatedCost()
+	y := a[j].measurements.GetCalculatedCost()
+	if x != y {
+		return x < y
+	}
+
+	// Fall back to geo proximity.
+	gx := a[i].measurements.GetGeoProximity()
+	gy := a[j].measurements.GetGeoProximity()
+	if gx != gy {
+		return gx > gy
+	}
+
+	// Fall back to Hub ID.
+	return a[i].Hub.ID < a[j].Hub.ID
+}
+
+type sortBySuggestedHopDistanceInRegionAndLowestMeasuredCost []*Pin
+
+func (a sortBySuggestedHopDistanceInRegionAndLowestMeasuredCost) Len() int { return len(a) }
+func (a sortBySuggestedHopDistanceInRegionAndLowestMeasuredCost) Swap(i, j int) {
+	a[i], a[j] = a[j], a[i]
+}
+
+func (a sortBySuggestedHopDistanceInRegionAndLowestMeasuredCost) Less(i, j int) bool {
+	// First sort by suggested hop distance.
+	if a[i].analysis.SuggestedHopDistanceInRegion != a[j].analysis.SuggestedHopDistanceInRegion {
+		return a[i].analysis.SuggestedHopDistanceInRegion > a[j].analysis.SuggestedHopDistanceInRegion
+	}
+
+	// Then by cost.
+	x := a[i].measurements.GetCalculatedCost()
+	y := a[j].measurements.GetCalculatedCost()
+	if x != y {
+		return x < y
+	}
+
+	// Fall back to geo proximity.
+	gx := a[i].measurements.GetGeoProximity()
+	gy := a[j].measurements.GetGeoProximity()
+	if gx != gy {
+		return gx > gy
+	}
+
+	// Fall back to Hub ID.
+	return a[i].Hub.ID < a[j].Hub.ID
+}
+
+type sortByLowestMeasuredLatency []*Pin
+
+func (a sortByLowestMeasuredLatency) Len() int      { return len(a) }
+func (a sortByLowestMeasuredLatency) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
+func (a sortByLowestMeasuredLatency) Less(i, j int) bool {
+	x, _ := a[i].measurements.GetLatency()
+	y, _ := a[j].measurements.GetLatency()
+	switch {
+	case x == y:
+		// Go to fallbacks.
+	case x == 0:
+		// Ignore zero values.
+		return false // j/y is better.
+	case y == 0:
+		// Ignore zero values.
+		return true // i/x is better.
+	default:
+		return x < y
+	}
+
+	// Fall back to geo proximity.
+	gx := a[i].measurements.GetGeoProximity()
+	gy := a[j].measurements.GetGeoProximity()
+	if gx != gy {
+		return gx > gy
+	}
+
+	// Fall back to Hub ID.
+	return a[i].Hub.ID < a[j].Hub.ID
+}
+
+type sortByHighestMeasuredCapacity []*Pin
+
+func (a sortByHighestMeasuredCapacity) Len() int      { return len(a) }
+func (a sortByHighestMeasuredCapacity) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
+func (a sortByHighestMeasuredCapacity) Less(i, j int) bool {
+	x, _ := a[i].measurements.GetCapacity()
+	y, _ := a[j].measurements.GetCapacity()
+	if x != y {
+		return x > y
+	}
+
+	// Fall back to geo proximity.
+	gx := a[i].measurements.GetGeoProximity()
+	gy := a[j].measurements.GetGeoProximity()
+	if gx != gy {
+		return gx > gy
+	}
+
+	// Fall back to Hub ID.
+	return a[i].Hub.ID < a[j].Hub.ID
+}
diff --git a/spn/navigator/sort_test.go b/spn/navigator/sort_test.go
new file mode 100644
index 00000000..f424cc3d
--- /dev/null
+++ b/spn/navigator/sort_test.go
@@ -0,0 +1,112 @@
+package navigator
+
+import (
+	"sort"
+	"strings"
+	"testing"
+
+	"github.com/stretchr/testify/assert"
+
+	"github.com/safing/portmaster/spn/hub"
+)
+
+func TestSorting(t *testing.T) {
+	t.Parallel()
+
+	list := []*Pin{
+		{
+			Hub: &hub.Hub{
+				ID: "a",
+			},
+			measurements: &hub.Measurements{
+				Latency:        3,
+				Capacity:       4,
+				CalculatedCost: 5,
+			},
+			analysis: &AnalysisState{
+				SuggestedHopDistance: 3,
+			},
+		},
+		{
+			Hub: &hub.Hub{
+				ID: "b",
+			},
+			measurements: &hub.Measurements{
+				Latency:        4,
+				Capacity:       3,
+				CalculatedCost: 1,
+			},
+			analysis: &AnalysisState{
+				SuggestedHopDistance: 2,
+			},
+		},
+		{
+			Hub: &hub.Hub{
+				ID: "c",
+			},
+			measurements: &hub.Measurements{
+				Latency:        5,
+				Capacity:       2,
+				CalculatedCost: 2,
+			},
+			analysis: &AnalysisState{
+				SuggestedHopDistance: 4,
+			},
+		},
+		{
+			Hub: &hub.Hub{
+				ID: "d",
+			},
+			measurements: &hub.Measurements{
+				Latency:        1,
+				Capacity:       1,
+				CalculatedCost: 3,
+			},
+			analysis: &AnalysisState{
+				SuggestedHopDistance: 4,
+			},
+		},
+		{
+			Hub: &hub.Hub{
+				ID: "e",
+			},
+			measurements: &hub.Measurements{
+				Latency:        2,
+				Capacity:       5,
+				CalculatedCost: 4,
+			},
+			analysis: &AnalysisState{
+				SuggestedHopDistance: 4,
+			},
+		},
+	}
+
+	sort.Sort(sortByLowestMeasuredCost(list))
+	checkSorting(t, list, "b-c-d-e-a")
+
+	sort.Sort(sortBySuggestedHopDistanceAndLowestMeasuredCost(list))
+	checkSorting(t, list, "c-d-e-a-b")
+
+	sort.Sort(sortByLowestMeasuredLatency(list))
+	checkSorting(t, list, "d-e-a-b-c")
+
+	sort.Sort(sortByHighestMeasuredCapacity(list))
+	checkSorting(t, list, "e-a-b-c-d")
+
+	sort.Sort(sortByPinID(list))
+	checkSorting(t, list, "a-b-c-d-e")
+}
+
+func checkSorting(t *testing.T, sortedList []*Pin, expectedOrder string) {
+	t.Helper()
+
+	// Build list ID string.
+	ids := make([]string, 0, len(sortedList))
+	for _, pin := range sortedList {
+		ids = append(ids, pin.Hub.ID)
+	}
+	sortedIDs := strings.Join(ids, "-")
+
+	// Check for matching order.
+	assert.Equal(t, expectedOrder, sortedIDs, "should match")
+}
diff --git a/spn/navigator/state.go b/spn/navigator/state.go
new file mode 100644
index 00000000..755e2895
--- /dev/null
+++ b/spn/navigator/state.go
@@ -0,0 +1,426 @@
+package navigator
+
+import (
+	"strings"
+	"time"
+)
+
+// PinState holds a bit-mapped collection of Pin states, or a single state used
+// for assigment and matching.
+type PinState uint16
+
+const (
+	// StateNone represents an empty state.
+	StateNone PinState = 0
+
+	// Negative States.
+
+	// StateInvalid signifies that there was an error while processing or
+	// handling this Hub.
+	StateInvalid PinState = 1 << (iota - 1) // 1 << 0 => 00000001 => 0x01
+
+	// StateSuperseded signifies that this Hub was superseded by another. This is
+	// the case if any other Hub with a matching IP was verified after this one.
+	// Verification timestamp equals Hub.FirstSeen.
+	StateSuperseded // 0x02
+
+	// StateFailing signifies that a recent error was encountered while
+	// communicating with this Hub. Pin.FailingUntil specifies when this state is
+	// re-evaluated at earliest.
+	StateFailing // 0x04
+
+	// StateOffline signifies that the Hub is offline.
+	StateOffline // 0x08
+
+	// Positive States.
+
+	// StateHasRequiredInfo signifies that the Hub announces the minimum required
+	// information about itself.
+	StateHasRequiredInfo // 0x10
+
+	// StateReachable signifies that the Hub is reachable via the network from
+	// the currently connected primary Hub.
+	StateReachable // 0x20
+
+	// StateActive signifies that everything seems fine with the Hub and
+	// connections to it should succeed. This is tested by checking if a valid
+	// semi-ephemeral public key is available.
+	StateActive // 0x40
+
+	_ // 0x80: Reserved
+
+	// Trust and Advisory States.
+
+	// StateTrusted signifies the Hub has the special trusted status.
+	StateTrusted // 0x0100
+
+	// StateUsageDiscouraged signifies that usage of the Hub is discouraged for any task.
+	StateUsageDiscouraged // 0x0200
+
+	// StateUsageAsHomeDiscouraged signifies that usage of the Hub as a Home Hub is discouraged.
+	StateUsageAsHomeDiscouraged // 0x0400
+
+	// StateUsageAsDestinationDiscouraged signifies that usage of the Hub as a Destination Hub is discouraged.
+	StateUsageAsDestinationDiscouraged // 0x0800
+
+	// Special States.
+
+	// StateIsHomeHub signifies that the Hub is the current Home Hub. While not
+	// negative in itself, selecting the Home Hub does not make sense in almost
+	// all cases.
+	StateIsHomeHub // 0x1000
+
+	// StateConnectivityIssues signifies that the Hub reports connectivity issues.
+	// This might impact all connectivity or just some.
+	// This does not invalidate the Hub for all operations and not in all cases.
+	StateConnectivityIssues // 0x2000
+
+	// StateAllowUnencrypted signifies that the Hub is available to handle unencrypted connections.
+	StateAllowUnencrypted // 0x4000
+
+	// State Summaries.
+
+	// StateSummaryRegard summarizes all states that must always be set in order to take a Hub into consideration for any task.
+	// TODO: Add StateHasRequiredInfo when we start enforcing Hub information.
+	StateSummaryRegard = StateReachable | StateActive
+
+	// StateSummaryDisregard summarizes all states that must not be set in order to take a Hub into consideration for any task.
+	StateSummaryDisregard = StateInvalid |
+		StateSuperseded |
+		StateFailing |
+		StateOffline |
+		StateUsageDiscouraged |
+		StateIsHomeHub
+)
+
+var allStates = []PinState{
+	StateInvalid,
+	StateSuperseded,
+	StateFailing,
+	StateOffline,
+	StateHasRequiredInfo,
+	StateReachable,
+	StateActive,
+	StateTrusted,
+	StateUsageDiscouraged,
+	StateUsageAsHomeDiscouraged,
+	StateUsageAsDestinationDiscouraged,
+	StateIsHomeHub,
+	StateConnectivityIssues,
+	StateAllowUnencrypted,
+}
+
+// Add returns a new PinState with the given states added.
+func (pinState PinState) Add(states PinState) PinState {
+	// OR:
+	//   0011
+	// | 0101
+	// = 0111
+	return pinState | states
+}
+
+// Remove returns a new PinState with the given states removed.
+func (pinState PinState) Remove(states PinState) PinState {
+	// AND NOT:
+	//    0011
+	// &^ 0101
+	// =  0010
+	return pinState &^ states
+}
+
+// Has returns whether the state has all of the given states.
+func (pinState PinState) Has(states PinState) bool {
+	// AND:
+	//   0011
+	// & 0101
+	// = 0001
+
+	return pinState&states == states
+}
+
+// HasAnyOf returns whether the state has any of the given states.
+func (pinState PinState) HasAnyOf(states PinState) bool {
+	// AND:
+	//   0011
+	// & 0101
+	// = 0001
+
+	return (pinState & states) != 0
+}
+
+// HasNoneOf returns whether the state does not have any of the given states.
+func (pinState PinState) HasNoneOf(states PinState) bool {
+	// AND:
+	//   0011
+	// & 0101
+	// = 0001
+
+	return (pinState & states) == 0
+}
+
+// addStates adds the given states on the Pin.
+func (pin *Pin) addStates(states PinState) {
+	pin.State = pin.State.Add(states)
+}
+
+// removeStates removes the given states on the Pin.
+func (pin *Pin) removeStates(states PinState) {
+	pin.State = pin.State.Remove(states)
+}
+
+func (m *Map) updateStateSuperseded(pin *Pin) {
+	pin.removeStates(StateSuperseded)
+
+	// Update StateSuperseded
+	// Iterate over all Pins in order to find a matching IP address.
+	// In order to prevent false positive matching, we have to go through IPv4
+	// and IPv6 separately.
+	// TODO: This will not scale well beyond about 1000 Hubs.
+
+	// IPv4 Loop
+	if pin.Hub.Info.IPv4 != nil {
+		for _, mapPin := range m.all {
+			// Skip Pin itself
+			if mapPin.Hub.ID == pin.Hub.ID {
+				continue
+			}
+
+			// Check for a matching IPv4 address.
+			if mapPin.Hub.Info.IPv4 != nil && pin.Hub.Info.IPv4.Equal(mapPin.Hub.Info.IPv4) {
+				continueChecking := checkAndHandleSuperseding(pin, mapPin)
+				if !continueChecking {
+					break
+				}
+			}
+		}
+	}
+
+	// IPv6 Loop
+	if pin.Hub.Info.IPv6 != nil {
+		for _, mapPin := range m.all {
+			// Skip Pin itself
+			if mapPin.Hub.ID == pin.Hub.ID {
+				continue
+			}
+
+			// Check for a matching IPv6 address.
+			if mapPin.Hub.Info.IPv6 != nil && pin.Hub.Info.IPv6.Equal(mapPin.Hub.Info.IPv6) {
+				continueChecking := checkAndHandleSuperseding(pin, mapPin)
+				if !continueChecking {
+					break
+				}
+			}
+		}
+	}
+}
+
+func checkAndHandleSuperseding(newPin, existingPin *Pin) (continueChecking bool) {
+	const (
+		supersedeNone = iota
+		supersedeExisting
+		supersedeNew
+	)
+	var action int
+
+	switch {
+	case newPin.Hub.ID == existingPin.Hub.ID:
+		// Cannot supersede same Hub.
+		// Continue checking.
+		action = supersedeNone
+
+	// Step 1: Check if only one is active.
+
+	case newPin.State.Has(StateActive) && existingPin.State.HasNoneOf(StateActive):
+		// If only the new Hub is active, supersede the existing one.
+		action = supersedeExisting
+	case newPin.State.HasNoneOf(StateActive) && existingPin.State.Has(StateActive):
+		// If only the existing Hub is active, supersede the new one.
+		action = supersedeNew
+
+	// Step 2: Check if only one is reachable.
+
+	case newPin.State.Has(StateReachable) && existingPin.State.HasNoneOf(StateReachable):
+		// If only the new Hub is reachable, supersede the existing one.
+		action = supersedeExisting
+	case newPin.State.HasNoneOf(StateReachable) && existingPin.State.Has(StateReachable):
+		// If only the existing Hub is reachable, supersede the new one.
+		action = supersedeNew
+
+	// Step 3: Check which one has been seen first.
+
+	case newPin.Hub.FirstSeen.After(existingPin.Hub.FirstSeen):
+		// If the new Hub has been first seen later, supersede the existing one.
+		action = supersedeExisting
+	default:
+		// If the existing Hub has been first seen later, supersede the new one.
+		action = supersedeNew
+	}
+
+	switch action {
+	case supersedeExisting:
+		existingPin.addStates(StateSuperseded)
+		existingPin.pushChanges.Set()
+		// Continue checking, as there might be other Hubs to be superseded.
+		return true
+
+	case supersedeNew:
+		newPin.addStates(StateSuperseded)
+		newPin.pushChanges.Set()
+		// If the new pin is superseded, do _not_ continue, as this will lead to an incorrect state.
+		return false
+
+	case supersedeNone:
+		fallthrough
+	default:
+		// Do nothing, continue checking.
+		return true
+	}
+}
+
+func (pin *Pin) updateStateHasRequiredInfo() {
+	pin.removeStates(StateHasRequiredInfo)
+
+	// Check for required Hub Information.
+	switch {
+	case len(pin.Hub.Info.Name) == 0:
+	case len(pin.Hub.Info.Group) == 0:
+	case len(pin.Hub.Info.ContactAddress) == 0:
+	case len(pin.Hub.Info.ContactService) == 0:
+	case len(pin.Hub.Info.Hosters) == 0:
+	case len(pin.Hub.Info.Hosters[0]) == 0:
+	case len(pin.Hub.Info.Datacenter) == 0:
+	default:
+		pin.addStates(StateHasRequiredInfo)
+	}
+}
+
+func (m *Map) updateActiveHubs() {
+	now := time.Now().Unix()
+	for _, pin := range m.all {
+		pin.updateStateActive(now)
+	}
+}
+
+func (pin *Pin) updateStateActive(now int64) {
+	pin.removeStates(StateActive)
+
+	// Check for active key.
+	for _, key := range pin.Hub.Status.Keys {
+		if now < key.Expires {
+			pin.addStates(StateActive)
+			return
+		}
+	}
+}
+
+func (m *Map) recalculateReachableHubs() error {
+	if m.home == nil {
+		return ErrHomeHubUnset
+	}
+
+	// reset
+	for _, pin := range m.all {
+		pin.removeStates(StateReachable)
+		pin.HopDistance = 0
+		pin.pushChanges.Set()
+	}
+
+	// find all connected Hubs
+	m.home.markReachable(1)
+	return nil
+}
+
+func (pin *Pin) markReachable(hopDistance int) {
+	switch {
+	case !pin.State.Has(StateReachable):
+		// Pin wasn't reachable before.
+	case hopDistance < pin.HopDistance:
+		// New path has a shorter distance.
+	case pin.State.HasAnyOf(StateSummaryDisregard): //nolint:staticcheck
+		// Ignore disregarded pins for reachability calculation.
+		return
+	default:
+		// Pin is already reachable at same or better distance.
+		return
+	}
+
+	// Update reachability.
+	pin.addStates(StateReachable)
+	pin.HopDistance = hopDistance
+	pin.pushChanges.Set()
+
+	// Propagate to connected Pins.
+	hopDistance++
+	for _, lane := range pin.ConnectedTo {
+		lane.Pin.markReachable(hopDistance)
+	}
+}
+
+// Export returns a list of all state names.
+func (pinState PinState) Export() []string {
+	// Check if there are no states.
+	if pinState == StateNone {
+		return nil
+	}
+
+	// Collect state names.
+	var stateNames []string
+	for _, state := range allStates {
+		if pinState.Has(state) {
+			stateNames = append(stateNames, state.Name())
+		}
+	}
+
+	return stateNames
+}
+
+// String returns the states as a human readable string.
+func (pinState PinState) String() string {
+	stateNames := pinState.Export()
+	if len(stateNames) == 0 {
+		return "None"
+	}
+
+	return strings.Join(stateNames, ", ")
+}
+
+// Name returns the name of a single state flag.
+func (pinState PinState) Name() string {
+	switch pinState {
+	case StateNone:
+		return "None"
+	case StateInvalid:
+		return "Invalid"
+	case StateSuperseded:
+		return "Superseded"
+	case StateFailing:
+		return "Failing"
+	case StateOffline:
+		return "Offline"
+	case StateHasRequiredInfo:
+		return "HasRequiredInfo"
+	case StateReachable:
+		return "Reachable"
+	case StateActive:
+		return "Active"
+	case StateTrusted:
+		return "Trusted"
+	case StateUsageDiscouraged:
+		return "UsageDiscouraged"
+	case StateUsageAsHomeDiscouraged:
+		return "UsageAsHomeDiscouraged"
+	case StateUsageAsDestinationDiscouraged:
+		return "UsageAsDestinationDiscouraged"
+	case StateIsHomeHub:
+		return "IsHomeHub"
+	case StateConnectivityIssues:
+		return "ConnectivityIssues"
+	case StateAllowUnencrypted:
+		return "AllowUnencrypted"
+	case StateSummaryRegard, StateSummaryDisregard:
+		// Satisfy exhaustive linter.
+		fallthrough
+	default:
+		return "Unknown"
+	}
+}
diff --git a/spn/navigator/state_test.go b/spn/navigator/state_test.go
new file mode 100644
index 00000000..90d5f37a
--- /dev/null
+++ b/spn/navigator/state_test.go
@@ -0,0 +1,31 @@
+package navigator
+
+import (
+	"testing"
+
+	"github.com/stretchr/testify/assert"
+)
+
+func TestStates(t *testing.T) {
+	t.Parallel()
+
+	p := &Pin{}
+
+	p.addStates(StateInvalid | StateFailing | StateSuperseded)
+	assert.Equal(t, StateInvalid|StateFailing|StateSuperseded, p.State)
+
+	p.removeStates(StateFailing | StateSuperseded)
+	assert.Equal(t, StateInvalid, p.State)
+
+	p.addStates(StateTrusted | StateActive)
+	assert.True(t, p.State.Has(StateInvalid|StateTrusted))
+	assert.False(t, p.State.Has(StateInvalid|StateSuperseded))
+	assert.True(t, p.State.HasAnyOf(StateInvalid|StateTrusted))
+	assert.True(t, p.State.HasAnyOf(StateInvalid|StateSuperseded))
+	assert.False(t, p.State.HasAnyOf(StateSuperseded|StateFailing))
+
+	assert.False(t, p.State.Has(StateSummaryRegard))
+	assert.False(t, p.State.Has(StateSummaryDisregard))
+	assert.True(t, p.State.HasAnyOf(StateSummaryRegard))
+	assert.True(t, p.State.HasAnyOf(StateSummaryDisregard))
+}
diff --git a/spn/navigator/testdata/main-intel.yml b/spn/navigator/testdata/main-intel.yml
new file mode 100644
index 00000000..62711337
--- /dev/null
+++ b/spn/navigator/testdata/main-intel.yml
@@ -0,0 +1,234 @@
+---
+BootstrapHubs:
+- tcp://[2a01:4f8:172:3753::2]:17#Zwtb8EKMatnMRkW1VaLh8CPV3QswD9iuRU4Sda8uLezUkC # fogos [DE]
+- tcp://[2a01:4f9:2a:d48::2]:17#Zwkwujs345P4ZygNZcEafawTqfZieCBVogQZ3xZPWiu7BU # heleus [FI]
+- tcp://138.201.140.70:17#Zwtb8EKMatnMRkW1VaLh8CPV3QswD9iuRU4Sda8uLezUkC # fogos [DE]
+- tcp://95.216.13.61:17#Zwkwujs345P4ZygNZcEafawTqfZieCBVogQZ3xZPWiu7BU # heleus [FI]
+
+Hubs:
+  ZwhpYS1jWzXvPYKFhJqh1ZD3bKquLLoSoJ6RjeshmcXoFx: # voria [US]
+    Trusted: true
+    VerifiedOwner: Safing
+    Override:
+      CountryCode: US
+      Coordinates: # Ashburn, VA
+        Latitude: 39.04
+        Longitude: -77.48
+        AccuracyRadius: 20
+  ZwkAKBoyEd3PkE5RGDNmghahzHiBiTZA7Mg3XH7X3HjS39: # noru [US]
+    Trusted: true
+    VerifiedOwner: Safing
+  ZwkapJz5HFWpgd9PHsZLVueBu9PDmTJHKp382Wm9MB2EB7: # lovas [US]
+    Trusted: true
+    VerifiedOwner: Safing
+    Override:
+      CountryCode: US
+      Coordinates: # Los Angeles, CA
+        Latitude: 34.03
+        Longitude: -118.15
+        AccuracyRadius: 20
+  ZwkLShvVYvQFGmpY1MNhSSPXCktojywMVtv2N86mFbNH4w: # tooina [CA]
+    Trusted: true
+    VerifiedOwner: Safing
+  Zwkwujs345P4ZygNZcEafawTqfZieCBVogQZ3xZPWiu7BU: # heleus [FI]
+    Trusted: true
+    VerifiedOwner: Safing
+  Zwm72XieV6aeNKbwtJW8JdPUwT1hopQaLanLXjxcTfV3B9: # mergan [US]
+    Trusted: true
+    VerifiedOwner: Safing
+  Zwmp5SgUK9FidWBSCDK4d6dyRp3vhz3dQdwma1E4TMfiRw: # grenenia [FR]
+    Trusted: true
+    VerifiedOwner: Safing
+    Override:
+      CountryCode: FR
+      Coordinates: # Gravelines
+        Latitude: 50.59
+        Longitude: 2.07
+        AccuracyRadius: 20
+  ZwnFd1bSQrBegPZqFkS7DZU29x4PbojpFmTQFUnzQoicKp: # telos [IL]
+    Trusted: true
+    VerifiedOwner: Safing
+  Zwpg5FoXYVYidzgbdvDyvBBcrArmmHvK9nH3v7KDHiywtt: # melcor [PL]
+    Trusted: true
+    VerifiedOwner: Safing
+  ZwpsJpwngWyba54AbVkCawcRQ2HP37RRQAgj5LHNR2svRf: # soalis [AU]
+    Trusted: true
+    VerifiedOwner: Safing
+  Zwpy5hbrQkKznJwbUmn9WpJwGkpWD9VqE2pi9yfMDQM7PK: # rin9 [FR]
+    Trusted: true
+    VerifiedOwner: Safing
+    Override:
+      CountryCode: FR
+      Coordinates: # Strasbourg
+        Latitude: 48.35
+        Longitude: 7.45
+        AccuracyRadius: 20
+  ZwqANMrhcyJZb8cRMEd3FdPcXY7ZbvviPPfTUQpLNau12J: # sulkam [GB]
+    Trusted: true
+    VerifiedOwner: Safing
+  ZwwBspMhigqcEYv2cryipzJsi4vkHhnBqUmDmkJ2xizGFx: # surn [US]
+    Trusted: true
+    VerifiedOwner: Safing
+    Override:
+      CountryCode: US
+      Coordinates: # Seattle, WA
+        Latitude: 47.36
+        Longitude: -122.19
+        AccuracyRadius: 20
+  ZwsvsES3SHz1VLnwFPxDbW6DC8Esp1PiEtUHxGnm4BTYHt: # fungvis [DE]
+    Trusted: true
+    VerifiedOwner: Safing
+  Zwtb8EKMatnMRkW1VaLh8CPV3QswD9iuRU4Sda8uLezUkC: # fogos [DS]
+    Trusted: true
+    VerifiedOwner: Safing
+  ZwtfvBuq5wkKYRth8rGCuGyp42nMe4doASUDJiDHJ8iucn: # vamalla [AT]
+    Trusted: true
+    VerifiedOwner: Safing
+  ZwtjwvdPxG4u7oB2zmNJFvsDy5VDLT9UArDkYDGfC9bkDt: # carros [US]
+    Trusted: true
+    VerifiedOwner: Safing
+  ZwvMZt6RcrrRuCdufjApnosxWbzsP8rTPRuHGeHu5KU241: # syniru [SG]
+    Trusted: true
+    VerifiedOwner: Safing
+    Override:
+      CountryCode: SG
+      Coordinates: # Singapore
+        Latitude: 1.18
+        Longitude: 103.50
+        AccuracyRadius: 20
+  ZwvyDLz8221fcSBw6GKZNDnwEn4YmE9m7JPieLUVe7iGR9: # calla [CA]
+    Trusted: true
+    VerifiedOwner: Safing
+  Zwvz9S6uyxn4ww1JGqJiisGMDmH2hz6mhwutmJXvTtwQww: # cidai [US]
+    Trusted: true
+    VerifiedOwner: Safing
+  ZwxJvZDZH18RUEQ3oFcR5uCqeXJaqkoi9P5Sj1aZ62HPin: # nutis [DE]
+    Trusted: true
+    VerifiedOwner: Safing
+  ZwvPQVFkoDbx3J6qThNwfLHZqwvFgUYLYtirCHVd7FfjBz: # perturn [CZ]
+    Trusted: true
+    VerifiedOwner: Safing
+    Override:
+      CountryCode: CZ
+      Coordinates: # Prague
+        Latitude: 50.05
+        Longitude: 14.25
+        AccuracyRadius: 100
+  Zwj52Q7d5ezvFk7HKB42dBtFu152bC9JasYF7BHB724RfG: # sono [NL]
+    Trusted: true
+    VerifiedOwner: Safing
+  ZwmhYMEmw36CzgVUp9sLjoK3gkVDWdMPiupEcekpTAXur8: # ivtos [TR]
+    Trusted: true
+    VerifiedOwner: Safing
+    Override:
+      CountryCode: TR
+      Coordinates: # Izmir
+        Latitude: 38.25
+        Longitude: 27.90
+        AccuracyRadius: 20
+  Zwm9JX1hBNUUvSYc3gpMhmw84ay45SuyXE7D2UgETM7XCn: # porcania [PT]
+    Trusted: true
+    VerifiedOwner: Safing
+  ZwxE83uRV9LcM8Bm3QjXjjejNRhBBBJAethPf14R6gcZwf: # steepeus [SE]
+    Trusted: true
+    VerifiedOwner: Safing
+
+InfoOverrides:
+  workaround:
+    for: bug
+
+AdviseOnlyTrustedHubs: false
+AdviseOnlyTrustedHomeHubs: true
+AdviseOnlyTrustedDestinationHubs: false
+
+HomeHubAdvisory:
+- "- Zwj52Q7d5ezvFk7HKB42dBtFu152bC9JasYF7BHB724RfG" # sono [NL] is too slow for home hub
+- "- Zwm9JX1hBNUUvSYc3gpMhmw84ay45SuyXE7D2UgETM7XCn" # porcania [PT] is too slow for home hub
+- "- ZwmhYMEmw36CzgVUp9sLjoK3gkVDWdMPiupEcekpTAXur8" # ivtos [TR] is too slow for home hub
+- "- ZwvPQVFkoDbx3J6qThNwfLHZqwvFgUYLYtirCHVd7FfjBz" # perturn [CZ] is too slow for home hub
+
+Regions:
+- ID: europe
+  Name: Europe
+  RegionalMinLanes: 5
+  RegionalMinLanesPerHub: 0.7
+  RegionalMaxLanesOnHub: 2
+  SatelliteMinLanes: 2
+  SatelliteMinLanesPerHub: 0.3
+  InternalMinLanesOnHub: 3
+  InternalMaxHops: 3
+  MemberPolicy:
+  - "+ AD"
+  - "+ AL"
+  - "+ AT"
+  - "+ AX"
+  - "+ BA"
+  - "+ BE"
+  - "+ BG"
+  - "+ BY"
+  - "+ CH"
+  - "+ CZ"
+  - "+ DE"
+  - "+ DK"
+  - "+ EE"
+  - "+ ES"
+  - "+ FI"
+  - "+ FO"
+  - "+ FR"
+  - "+ GB"
+  - "+ GG"
+  - "+ GI"
+  - "+ GR"
+  - "+ HR"
+  - "+ HU"
+  - "+ IE"
+  - "+ IM"
+  - "+ IS"
+  - "+ IT"
+  - "+ JE"
+  - "+ LI"
+  - "+ LT"
+  - "+ LU"
+  - "+ LV"
+  - "+ MC"
+  - "+ MD"
+  - "+ ME"
+  - "+ MK"
+  - "+ MT"
+  - "+ NL"
+  - "+ NO"
+  - "+ PL"
+  - "+ PT"
+  - "+ RO"
+  - "+ RS"
+  - "+ RU"
+  - "+ SE"
+  - "+ SI"
+  - "+ SJ"
+  - "+ SK"
+  - "+ SM"
+  - "+ UA"
+  - "+ VA"
+- ID: north-america
+  Name: "North America"
+  RegionalMinLanes: 5
+  RegionalMinLanesPerHub: 0.7
+  RegionalMaxLanesOnHub: 2
+  SatelliteMinLanes: 2
+  SatelliteMinLanesPerHub: 0.3
+  InternalMinLanesOnHub: 3
+  InternalMaxHops: 3
+  MemberPolicy:
+  - "+ BM"
+  - "+ BZ"
+  - "+ CA"
+  - "+ CR"
+  - "+ GL"
+  - "+ GT"
+  - "+ HN"
+  - "+ MX"
+  - "+ NI"
+  - "+ PA"
+  - "+ PM"
+  - "+ SV"
+  - "+ US"
\ No newline at end of file
diff --git a/spn/navigator/update.go b/spn/navigator/update.go
new file mode 100644
index 00000000..73f52811
--- /dev/null
+++ b/spn/navigator/update.go
@@ -0,0 +1,776 @@
+package navigator
+
+import (
+	"context"
+	"fmt"
+	"path"
+	"strings"
+	"time"
+
+	"github.com/tevino/abool"
+	"golang.org/x/exp/slices"
+
+	"github.com/safing/portbase/config"
+	"github.com/safing/portbase/database"
+	"github.com/safing/portbase/database/query"
+	"github.com/safing/portbase/database/record"
+	"github.com/safing/portbase/log"
+	"github.com/safing/portbase/modules"
+	"github.com/safing/portbase/utils"
+	"github.com/safing/portmaster/service/intel/geoip"
+	"github.com/safing/portmaster/service/netenv"
+	"github.com/safing/portmaster/service/profile"
+	"github.com/safing/portmaster/spn/hub"
+)
+
+var db = database.NewInterface(&database.Options{
+	Local:    true,
+	Internal: true,
+})
+
+// InitializeFromDatabase loads all Hubs from the given database prefix and adds them to the Map.
+func (m *Map) InitializeFromDatabase() error {
+	m.Lock()
+	defer m.Unlock()
+
+	// start query for Hubs
+	iter, err := db.Query(query.New(hub.MakeHubDBKey(m.Name, "")))
+	if err != nil {
+		return fmt.Errorf("failed to start query for initialization feed of %s map: %w", m.Name, err)
+	}
+
+	// update navigator
+	var hubCount int
+	log.Tracef("spn/navigator: starting to initialize %s map from database", m.Name)
+	for r := range iter.Next {
+		h, err := hub.EnsureHub(r)
+		if err != nil {
+			log.Warningf("spn/navigator: could not parse hub %q while initializing %s map: %s", r.Key(), m.Name, err)
+			continue
+		}
+
+		hubCount++
+		m.updateHub(h, false, true)
+	}
+	switch {
+	case iter.Err() != nil:
+		return fmt.Errorf("failed to (fully) initialize %s map: %w", m.Name, iter.Err())
+	case hubCount == 0:
+		log.Warningf("spn/navigator: no hubs available for %s map - this is normal on first start", m.Name)
+	default:
+		log.Infof("spn/navigator: added %d hubs from database to %s map", hubCount, m.Name)
+	}
+	return nil
+}
+
+// UpdateHook updates the a map from database changes.
+type UpdateHook struct {
+	database.HookBase
+	m *Map
+}
+
+// UsesPrePut implements the Hook interface.
+func (hook *UpdateHook) UsesPrePut() bool {
+	return true
+}
+
+// PrePut implements the Hook interface.
+func (hook *UpdateHook) PrePut(r record.Record) (record.Record, error) {
+	// Remove deleted hubs from the map.
+	if r.Meta().IsDeleted() {
+		hook.m.RemoveHub(path.Base(r.Key()))
+		return r, nil
+	}
+
+	// Ensure we have a hub and update it in navigation map.
+	h, err := hub.EnsureHub(r)
+	if err != nil {
+		log.Debugf("spn/navigator: record %s is not a hub", r.Key())
+	} else {
+		hook.m.updateHub(h, true, false)
+	}
+
+	return r, nil
+}
+
+// RegisterHubUpdateHook registers a database pre-put hook that updates all
+// Hubs saved at the given database prefix.
+func (m *Map) RegisterHubUpdateHook() (err error) {
+	m.hubUpdateHook, err = database.RegisterHook(
+		query.New(hub.MakeHubDBKey(m.Name, "")),
+		&UpdateHook{m: m},
+	)
+	return err
+}
+
+// CancelHubUpdateHook cancels the map's update hook.
+func (m *Map) CancelHubUpdateHook() {
+	if m.hubUpdateHook != nil {
+		if err := m.hubUpdateHook.Cancel(); err != nil {
+			log.Warningf("spn/navigator: failed to cancel update hook for map %s: %s", m.Name, err)
+		}
+	}
+}
+
+// RemoveHub removes a Hub from the Map.
+func (m *Map) RemoveHub(id string) {
+	m.Lock()
+	defer m.Unlock()
+
+	// Get pin and remove it from the map, if it exists.
+	pin, ok := m.all[id]
+	if !ok {
+		return
+	}
+	delete(m.all, id)
+
+	// Remove lanes from removed Pin.
+	for id := range pin.ConnectedTo {
+		// Remove Lane from peer.
+		peer, ok := m.all[id]
+		if ok {
+			delete(peer.ConnectedTo, pin.Hub.ID)
+			peer.pushChanges.Set()
+		}
+	}
+
+	// Push update to subscriptions.
+	export := pin.Export()
+	export.Meta().Delete()
+	mapDBController.PushUpdate(export)
+	// Push lane changes.
+	m.PushPinChanges()
+}
+
+// UpdateHub updates a Hub on the Map.
+func (m *Map) UpdateHub(h *hub.Hub) {
+	m.updateHub(h, true, true)
+}
+
+func (m *Map) updateHub(h *hub.Hub, lockMap, lockHub bool) {
+	if lockMap {
+		m.Lock()
+		defer m.Unlock()
+	}
+	if lockHub {
+		h.Lock()
+		defer h.Unlock()
+	}
+
+	// Hub requires both Info and Status to be added to the Map.
+	if h.Info == nil || h.Status == nil {
+		return
+	}
+
+	// Create or update Pin.
+	pin, ok := m.all[h.ID]
+	if ok {
+		pin.Hub = h
+	} else {
+		pin = &Pin{
+			Hub:         h,
+			ConnectedTo: make(map[string]*Lane),
+			pushChanges: abool.New(),
+		}
+		m.all[h.ID] = pin
+	}
+	pin.pushChanges.Set()
+
+	// 1. Update Pin Data.
+
+	// Add/Update location data from IP addresses.
+	pin.updateLocationData()
+
+	// Override Pin Data.
+	m.updateInfoOverrides(pin)
+
+	// Update Hub cost.
+	pin.Cost = CalculateHubCost(pin.Hub.Status.Load)
+
+	// Ensure measurements are set when enabled.
+	if m.measuringEnabled && pin.measurements == nil {
+		// Get shared measurements.
+		pin.measurements = pin.Hub.GetMeasurementsWithLockedHub()
+
+		// Update cost calculation.
+		latency, _ := pin.measurements.GetLatency()
+		capacity, _ := pin.measurements.GetCapacity()
+		pin.measurements.SetCalculatedCost(CalculateLaneCost(latency, capacity))
+
+		// Update geo proximity.
+		// Get own location.
+		var myLocation *geoip.Location
+		switch {
+		case m.home != nil && m.home.LocationV4 != nil:
+			myLocation = m.home.LocationV4
+		case m.home != nil && m.home.LocationV6 != nil:
+			myLocation = m.home.LocationV6
+		default:
+			locations, ok := netenv.GetInternetLocation()
+			if ok {
+				myLocation = locations.Best().LocationOrNil()
+			}
+		}
+		// Calculate proximity with available location.
+		if myLocation != nil {
+			switch {
+			case pin.LocationV4 != nil:
+				pin.measurements.SetGeoProximity(
+					myLocation.EstimateNetworkProximity(pin.LocationV4),
+				)
+			case pin.LocationV6 != nil:
+				pin.measurements.SetGeoProximity(
+					myLocation.EstimateNetworkProximity(pin.LocationV6),
+				)
+			}
+		}
+	}
+
+	// 2. Update Pin States.
+
+	// Update the invalid status of the Pin.
+	if pin.Hub.InvalidInfo || pin.Hub.InvalidStatus {
+		pin.addStates(StateInvalid)
+	} else {
+		pin.removeStates(StateInvalid)
+	}
+
+	// Update online status of the Pin.
+	if pin.Hub.HasFlag(hub.FlagOffline) || pin.Hub.Status.Version == hub.VersionOffline {
+		pin.addStates(StateOffline)
+	} else {
+		pin.removeStates(StateOffline)
+	}
+
+	// Update online status of the Pin.
+	if pin.Hub.HasFlag(hub.FlagAllowUnencrypted) {
+		pin.addStates(StateAllowUnencrypted)
+	} else {
+		pin.removeStates(StateAllowUnencrypted)
+	}
+
+	// Update from status flags.
+	if pin.Hub.HasFlag(hub.FlagNetError) {
+		pin.addStates(StateConnectivityIssues)
+	} else {
+		pin.removeStates(StateConnectivityIssues)
+	}
+
+	// Update Trust and Advisory Statuses.
+	m.updateIntelStatuses(pin, cfgOptionTrustNodeNodes())
+
+	// Update Statuses derived from Hub.
+	pin.updateStateHasRequiredInfo()
+	pin.updateStateActive(time.Now().Unix())
+
+	// 3. Update Lanes.
+
+	// Mark all existing Lanes as inactive.
+	for _, lane := range pin.ConnectedTo {
+		lane.active = false
+	}
+
+	// Update Lanes (connections to other Hubs) from the Status.
+	for _, lane := range pin.Hub.Status.Lanes {
+		// Check if this is a Lane to itself.
+		if lane.ID == pin.Hub.ID {
+			continue
+		}
+
+		// First, get the Lane peer.
+		peer, ok := m.all[lane.ID]
+		if !ok {
+			// We need to wait for peer to be added to the Map.
+			continue
+		}
+
+		m.updateHubLane(pin, lane, peer)
+	}
+
+	// Remove all inactive/abandoned Lanes from both Pins.
+	var removedLanes bool
+	for id, lane := range pin.ConnectedTo {
+		if !lane.active {
+			// Remove Lane from this Pin.
+			delete(pin.ConnectedTo, id)
+			pin.pushChanges.Set()
+			removedLanes = true
+			// Remove Lane from peer.
+			peer, ok := m.all[id]
+			if ok {
+				delete(peer.ConnectedTo, pin.Hub.ID)
+				peer.pushChanges.Set()
+			}
+		}
+	}
+
+	// Fully recalculate reachability if any Lanes were removed.
+	if removedLanes {
+		err := m.recalculateReachableHubs()
+		if err != nil {
+			log.Warningf("spn/navigator: failed to recalculate reachable Hubs: %s", err)
+		}
+	}
+
+	// 4. Update states that depend on other information.
+
+	// Check if hub is superseded or if it supersedes another hub.
+	m.updateStateSuperseded(pin)
+
+	// Push updates.
+	m.PushPinChanges()
+}
+
+const (
+	minUnconfirmedLatency  = 10 * time.Millisecond
+	maxUnconfirmedCapacity = 100000000 // 100Mbit/s
+
+	cap1Mbit   float32 = 1000000
+	cap10Mbit  float32 = 10000000
+	cap100Mbit float32 = 100000000
+	cap1Gbit   float32 = 1000000000
+	cap10Gbit  float32 = 10000000000
+)
+
+// updateHubLane updates a lane between two Hubs on the Map.
+// pin must already be locked, lane belongs to pin.
+// peer will be locked by this function.
+func (m *Map) updateHubLane(pin *Pin, lane *hub.Lane, peer *Pin) {
+	peer.Hub.Lock()
+	defer peer.Hub.Unlock()
+
+	// Then get the corresponding Lane from that peer, if it exists.
+	var peerLane *hub.Lane
+	for _, possiblePeerLane := range peer.Hub.Status.Lanes {
+		if possiblePeerLane.ID == pin.Hub.ID {
+			peerLane = possiblePeerLane
+			// We have found the corresponding peerLane, break the loop.
+			break
+		}
+	}
+	if peerLane == nil {
+		// The peer obviously does not advertise a Lane to this Hub.
+		// Maybe this is a fresh Lane, and the message has not yet reached us.
+		// Alternatively, the Lane could have been recently removed.
+
+		// Abandon this Lane for now.
+		delete(pin.ConnectedTo, peer.Hub.ID)
+		return
+	}
+
+	// Calculate combined latency, use the greater value.
+	combinedLatency := lane.Latency
+	if peerLane.Latency > combinedLatency {
+		combinedLatency = peerLane.Latency
+	}
+	// Enforce minimum value if at least one side has no data.
+	if (lane.Latency == 0 || peerLane.Latency == 0) && combinedLatency < minUnconfirmedLatency {
+		combinedLatency = minUnconfirmedLatency
+	}
+
+	// Calculate combined capacity, use the lesser existing value.
+	combinedCapacity := lane.Capacity
+	if combinedCapacity == 0 || (peerLane.Capacity > 0 && peerLane.Capacity < combinedCapacity) {
+		combinedCapacity = peerLane.Capacity
+	}
+	// Enforce maximum value if at least one side has no data.
+	if (lane.Capacity == 0 || peerLane.Capacity == 0) && combinedCapacity > maxUnconfirmedCapacity {
+		combinedCapacity = maxUnconfirmedCapacity
+	}
+
+	// Calculate lane cost.
+	laneCost := CalculateLaneCost(combinedLatency, combinedCapacity)
+
+	// Add Lane to both Pins and override old values in the process.
+	pin.ConnectedTo[peer.Hub.ID] = &Lane{
+		Pin:      peer,
+		Capacity: combinedCapacity,
+		Latency:  combinedLatency,
+		Cost:     laneCost,
+		active:   true,
+	}
+	peer.ConnectedTo[pin.Hub.ID] = &Lane{
+		Pin:      pin,
+		Capacity: combinedCapacity,
+		Latency:  combinedLatency,
+		Cost:     laneCost,
+		active:   true,
+	}
+	peer.pushChanges.Set()
+
+	// Check for reachability.
+
+	if pin.State.Has(StateReachable) {
+		peer.markReachable(pin.HopDistance + 1)
+	}
+	if peer.State.Has(StateReachable) {
+		pin.markReachable(peer.HopDistance + 1)
+	}
+}
+
+// ResetFailingStates resets the failing state on all pins.
+func (m *Map) ResetFailingStates(ctx context.Context) {
+	m.Lock()
+	defer m.Unlock()
+
+	for _, pin := range m.all {
+		pin.ResetFailingState()
+	}
+
+	m.PushPinChanges()
+}
+
+func (m *Map) updateFailingStates(ctx context.Context, task *modules.Task) error {
+	m.Lock()
+	defer m.Unlock()
+
+	for _, pin := range m.all {
+		if pin.State.Has(StateFailing) && !pin.IsFailing() {
+			pin.removeStates(StateFailing)
+		}
+	}
+
+	return nil
+}
+
+func (m *Map) updateStates(ctx context.Context, task *modules.Task) error {
+	var toDelete []string
+
+	m.Lock()
+	defer m.Unlock()
+
+pinLoop:
+	for _, pin := range m.all {
+		// Check for discontinued Hubs.
+		if m.intel != nil {
+			hubIntel, ok := m.intel.Hubs[pin.Hub.ID]
+			if ok && hubIntel.Discontinued {
+				toDelete = append(toDelete, pin.Hub.ID)
+				log.Infof("spn/navigator: deleting discontinued %s", pin.Hub)
+				continue pinLoop
+			}
+		}
+		// Check for obsoleted Hubs.
+		if pin.State.HasNoneOf(StateActive) && pin.Hub.Obsolete() {
+			toDelete = append(toDelete, pin.Hub.ID)
+			log.Infof("spn/navigator: deleting obsolete %s", pin.Hub)
+		}
+
+		// Delete hubs async, as deleting triggers a couple hooks that lock the map.
+		if len(toDelete) > 0 {
+			module.StartWorker("delete hubs", func(_ context.Context) error {
+				for _, idToDelete := range toDelete {
+					err := hub.RemoveHubAndMsgs(m.Name, idToDelete)
+					if err != nil {
+						log.Warningf("spn/navigator: failed to delete Hub %s: %s", idToDelete, err)
+					}
+				}
+				return nil
+			})
+		}
+	}
+
+	// Update StateActive.
+	m.updateActiveHubs()
+
+	// Update StateReachable.
+	return m.recalculateReachableHubs()
+}
+
+// AddBootstrapHubs adds the given bootstrap hubs to the map.
+func (m *Map) AddBootstrapHubs(bootstrapTransports []string) error {
+	m.Lock()
+	defer m.Unlock()
+
+	return m.addBootstrapHubs(bootstrapTransports)
+}
+
+func (m *Map) addBootstrapHubs(bootstrapTransports []string) error {
+	var anyAdded bool
+	var lastErr error
+	var failed int
+	for _, bootstrapTransport := range bootstrapTransports {
+		err := m.addBootstrapHub(bootstrapTransport)
+		if err != nil {
+			log.Warningf("spn/navigator: failed to add bootstrap hub %q to map %s: %s", bootstrapTransport, m.Name, err)
+			lastErr = err
+			failed++
+		} else {
+			anyAdded = true
+		}
+	}
+
+	if lastErr != nil && !anyAdded {
+		return lastErr
+	}
+	return nil
+}
+
+func (m *Map) addBootstrapHub(bootstrapTransport string) error {
+	// Parse bootstrap hub.
+	transport, hubID, hubIP, err := hub.ParseBootstrapHub(bootstrapTransport)
+	if err != nil {
+		return fmt.Errorf("invalid bootstrap hub: %w", err)
+	}
+
+	// Check if hub already exists.
+	var h *hub.Hub
+	pin, ok := m.all[hubID]
+	if ok {
+		h = pin.Hub
+	} else {
+		h = &hub.Hub{
+			ID:  hubID,
+			Map: m.Name,
+			Info: &hub.Announcement{
+				ID: hubID,
+			},
+			Status:    &hub.Status{},
+			FirstSeen: time.Now(), // Do not garbage collect bootstrap hubs.
+		}
+	}
+
+	// Add IP if it does not yet exist.
+	if hubIP4 := hubIP.To4(); hubIP4 != nil {
+		if h.Info.IPv4 == nil {
+			h.Info.IPv4 = hubIP4
+		} else if !h.Info.IPv4.Equal(hubIP4) {
+			return fmt.Errorf("additional bootstrap entry with same ID but mismatching IP address: %s", hubIP)
+		}
+	} else {
+		if h.Info.IPv6 == nil {
+			h.Info.IPv6 = hubIP
+		} else if !h.Info.IPv6.Equal(hubIP) {
+			return fmt.Errorf("additional bootstrap entry with same ID but mismatching IP address: %s", hubIP)
+		}
+	}
+
+	// Add transport if it does not yet exist.
+	t := transport.String()
+	if !utils.StringInSlice(h.Info.Transports, t) {
+		h.Info.Transports = append(h.Info.Transports, t)
+	}
+
+	// Add/update to map for bootstrapping.
+	m.updateHub(h, false, false)
+	log.Infof("spn/navigator: added/updated bootstrap %s to map %s", h, m.Name)
+	return nil
+}
+
+// UpdateConfigQuickSettings updates config quick settings with available countries.
+func (m *Map) UpdateConfigQuickSettings(ctx context.Context) error {
+	ctx, tracer := log.AddTracer(ctx)
+	tracer.Trace("navigator: updating SPN rules country quick settings")
+	defer tracer.Submit()
+
+	opts := m.DefaultOptions()
+	opts.Home = &HomeHubOptions{
+		Regard: StateTrusted,
+	}
+	opts.Destination = &DestinationHubOptions{
+		Regard:    StateTrusted,
+		Disregard: StateIsHomeHub,
+	}
+
+	// Home Policy.
+	if err := m.updateQuickSettingExcludeCountryList(ctx, "spn/homePolicy", opts, HomeHub); err != nil {
+		return err
+	}
+	// Transit Policy.
+	if err := m.updateQuickSettingExcludeCountryList(ctx, profile.CfgOptionTransitHubPolicyKey, opts, TransitHub); err != nil {
+		return err
+	}
+	// Exit Policy.
+	if err := m.updateSelectRuleCountryList(ctx, profile.CfgOptionExitHubPolicyKey, opts, DestinationHub); err != nil {
+		return err
+	}
+	// DNS Exit Policy.
+	if err := m.updateSelectRuleCountryList(ctx, "spn/dnsExitPolicy", opts, DestinationHub); err != nil {
+		return err
+	}
+
+	// Trust Nodes.
+	if err := m.updateQuickSettingVerifiedOwnerList(ctx, "spn/trustNodes"); err != nil {
+		return err
+	}
+
+	tracer.Trace("navigator: finished updating SPN rules country quick settings")
+	return nil
+}
+
+func (m *Map) updateQuickSettingExcludeCountryList(ctx context.Context, configKey string, opts *Options, matchFor HubType) error {
+	// Get config option.
+	cfgOption, err := config.GetOption(configKey)
+	if err != nil {
+		return fmt.Errorf("failed to get config option %s: %w", configKey, err)
+	}
+
+	// Get list of countries for this config option.
+	countries := m.GetAvailableCountries(opts, matchFor)
+	// Convert to list.
+	countryList := make([]*geoip.CountryInfo, 0, len(countries))
+	for _, country := range countries {
+		countryList = append(countryList, country)
+	}
+	// Sort list.
+	slices.SortFunc[[]*geoip.CountryInfo, *geoip.CountryInfo](countryList, func(a, b *geoip.CountryInfo) int {
+		return strings.Compare(a.Name, b.Name)
+	})
+
+	// Compile list of quick settings.
+	quickSettings := make([]config.QuickSetting, 0, len(countries))
+	for _, country := range countryList {
+		quickSettings = append(quickSettings, config.QuickSetting{
+			Name:   fmt.Sprintf("Exclude %s (%s)", country.Name, country.Code),
+			Value:  []string{fmt.Sprintf("- %s", country.Code)},
+			Action: config.QuickMergeTop,
+		})
+	}
+
+	// Lock config option and set new quick settings.
+	cfgOption.Lock()
+	defer cfgOption.Unlock()
+	cfgOption.Annotations[config.QuickSettingsAnnotation] = quickSettings
+
+	log.Tracer(ctx).Debugf("navigator: updated %d countries in quick settings for %s", len(quickSettings), configKey)
+	return nil
+}
+
+type selectCountry struct {
+	config.QuickSetting
+	FlagID string
+}
+
+func (m *Map) updateSelectRuleCountryList(ctx context.Context, configKey string, opts *Options, matchFor HubType) error {
+	// Get config option.
+	cfgOption, err := config.GetOption(configKey)
+	if err != nil {
+		return fmt.Errorf("failed to get config option %s: %w", configKey, err)
+	}
+
+	// Get list of countries for this config option.
+	countries := m.GetAvailableCountries(opts, matchFor)
+	// Convert to list.
+	countryList := make([]*geoip.CountryInfo, 0, len(countries))
+	for _, country := range countries {
+		countryList = append(countryList, country)
+	}
+	// Sort list.
+	slices.SortFunc[[]*geoip.CountryInfo, *geoip.CountryInfo](countryList, func(a, b *geoip.CountryInfo) int {
+		return strings.Compare(a.Name, b.Name)
+	})
+
+	// Get continents from countries.
+	continents := make(map[string]*geoip.ContinentInfo)
+	for _, country := range countryList {
+		continents[country.Continent.Code] = &country.Continent
+	}
+	// Convert to list.
+	continentList := make([]*geoip.ContinentInfo, 0, len(continents))
+	for _, continent := range continents {
+		continentList = append(continentList, continent)
+	}
+	// Sort list.
+	slices.SortFunc[[]*geoip.ContinentInfo, *geoip.ContinentInfo](continentList, func(a, b *geoip.ContinentInfo) int {
+		return strings.Compare(a.Name, b.Name)
+	})
+
+	// Start compiling all options.
+	selections := make([]selectCountry, 0, len(continents)+len(countries)+2)
+
+	// Add EU as special region.
+	selections = append(selections, selectCountry{
+		QuickSetting: config.QuickSetting{
+			Name:   "European Union",
+			Value:  []string{"+ AT", "+ BE", "+ BG", "+ CY", "+ CZ", "+ DE", "+ DK", "+ EE", "+ ES", "+ FI", "+ FR", "+ GR", "+ HR", "+ HU", "+ IE", "+ IT", "+ LT", "+ LU", "+ LV", "+ MT", "+ NL", "+ PL", "+ PT", "+ RO", "+ SE", "+ SI", "+ SK", "- *"},
+			Action: config.QuickReplace,
+		},
+		FlagID: "EU",
+	})
+	selections = append(selections, selectCountry{
+		QuickSetting: config.QuickSetting{
+			Name:   "US and Canada",
+			Value:  []string{"+ US", "+ CA", "- *"},
+			Action: config.QuickReplace,
+		},
+	})
+
+	// Add countries to quick settings.
+	for _, country := range countryList {
+		selections = append(selections, selectCountry{
+			QuickSetting: config.QuickSetting{
+				Name:   fmt.Sprintf("%s (%s)", country.Name, country.Code),
+				Value:  []string{fmt.Sprintf("+ %s", country.Code), "- *"},
+				Action: config.QuickReplace,
+			},
+			FlagID: country.Code,
+		})
+	}
+
+	// Add continents to quick settings.
+	for _, continent := range continentList {
+		selections = append(selections, selectCountry{
+			QuickSetting: config.QuickSetting{
+				Name:   fmt.Sprintf("%s (C:%s)", continent.Name, continent.Code),
+				Value:  []string{fmt.Sprintf("+ C:%s", continent.Code), "- *"},
+				Action: config.QuickReplace,
+			},
+		})
+	}
+
+	// Lock config option and set new quick settings.
+	cfgOption.Lock()
+	defer cfgOption.Unlock()
+	cfgOption.Annotations[config.QuickSettingsAnnotation] = selections
+
+	log.Tracer(ctx).Debugf("navigator: updated %d countries in quick settings for %s", len(selections), configKey)
+	return nil
+}
+
+func (m *Map) updateQuickSettingVerifiedOwnerList(ctx context.Context, configKey string) error {
+	// Get config option.
+	cfgOption, err := config.GetOption(configKey)
+	if err != nil {
+		return fmt.Errorf("failed to get config option %s: %w", configKey, err)
+	}
+
+	pins := m.pinList(true)
+	verifiedOwners := make([]string, 0, len(pins)/5) // Capacity is an estimation.
+	for _, pin := range pins {
+		pin.Lock()
+		vo := pin.VerifiedOwner
+		pin.Unlock()
+
+		// Skip invalid/unneeded values.
+		switch vo {
+		case "", "Safing":
+			continue
+		}
+
+		// Add to list, if not yet in there.
+		if !slices.Contains[[]string, string](verifiedOwners, vo) {
+			verifiedOwners = append(verifiedOwners, vo)
+		}
+	}
+
+	// Sort list.
+	slices.Sort[[]string](verifiedOwners)
+
+	// Compile list of quick settings.
+	quickSettings := make([]config.QuickSetting, 0, len(verifiedOwners))
+	for _, vo := range verifiedOwners {
+		quickSettings = append(quickSettings, config.QuickSetting{
+			Name:   fmt.Sprintf("Trust %s", vo),
+			Value:  []string{vo},
+			Action: config.QuickMergeBottom,
+		})
+	}
+
+	// Lock config option and set new quick settings.
+	cfgOption.Lock()
+	defer cfgOption.Unlock()
+	cfgOption.Annotations[config.QuickSettingsAnnotation] = quickSettings
+
+	log.Tracer(ctx).Debugf("navigator: updated %d verified owners in quick settings for %s", len(quickSettings), configKey)
+	return nil
+}
diff --git a/spn/patrol/domains.go b/spn/patrol/domains.go
new file mode 100644
index 00000000..43fff823
--- /dev/null
+++ b/spn/patrol/domains.go
@@ -0,0 +1,311 @@
+package patrol
+
+import (
+	"math/rand"
+	"time"
+)
+
+// getRandomTestDomain returns a random test domain from the test domain list.
+// Not cryptographically secure random, though.
+func getRandomTestDomain() string {
+	rng := rand.New(rand.NewSource(time.Now().UnixNano())) //nolint:gosec
+	return testDomains[rng.Intn(len(testDomains)-1)]       //nolint:gosec // Weak randomness is not an issue here.
+}
+
+// testDomains is a list of domains to check if they respond successfully to a HTTP GET request.
+// They are sourced from tranco - trimmed, checked, and cleaned.
+// Use TestCleanDomains to clean a new/updated list.
+// Treat as a constant.
+var testDomains = []string{
+	"about.com",
+	"addtoany.com",
+	"adobe.com",
+	"aliyun.com",
+	"ampproject.org",
+	"android.com",
+	"apache.org",
+	"apple.com",
+	"apple.news",
+	"appspot.com",
+	"arnebrachhold.de",
+	"avast.com",
+	"bbc.co.uk",
+	"bbc.com",
+	"bing.com",
+	"blogger.com",
+	"blogspot.com",
+	"branch.io",
+	"calendly.com",
+	"cam.ac.uk",
+	"canonical.com",
+	"canva.com",
+	"cisco.com",
+	"cloudflare.com",
+	"cloudns.net",
+	"cnblogs.com",
+	"cnn.com",
+	"creativecommons.org",
+	"criteo.com",
+	"cupfox.app",
+	"dailymail.co.uk",
+	"ddnss.de",
+	"debian.org",
+	"digitalocean.com",
+	"doi.org",
+	"domainmarket.com",
+	"doubleclick.net",
+	"dreamhost.com",
+	"dropbox.com",
+	"dynect.net",
+	"ed.gov",
+	"elegantthemes.com",
+	"elpais.com",
+	"epa.gov",
+	"eporner.com",
+	"espn.com",
+	"europa.eu",
+	"example.com",
+	"facebook.com",
+	"fb.com",
+	"fb.me",
+	"fb.watch",
+	"fbcdn.net",
+	"feedburner.com",
+	"free.fr",
+	"ftc.gov",
+	"g.page",
+	"getbootstrap.com",
+	"gitlab.com",
+	"gmail.com",
+	"gnu.org",
+	"goo.gl",
+	"google-analytics.com",
+	"google.ca",
+	"google.co.in",
+	"google.co.jp",
+	"google.co.th",
+	"google.co.uk",
+	"google.com.au",
+	"google.com.br",
+	"google.com.hk",
+	"google.com.mx",
+	"google.com.tr",
+	"google.com.tw",
+	"google.com",
+	"google.de",
+	"google.es",
+	"google.fr",
+	"google.it",
+	"googledomains.com",
+	"googlesyndication.com",
+	"gstatic.com",
+	"harvard.edu",
+	"hitomi.la",
+	"hubspot.com",
+	"hugedomains.com",
+	"ibm.com",
+	"icloud.com",
+	"ikea.com",
+	"ilovepdf.com",
+	"indiatimes.com",
+	"instagram.com",
+	"investing.com",
+	"investopedia.com",
+	"irs.gov",
+	"kickstarter.com",
+	"launchpad.net",
+	"lencr.org",
+	"lijit.com",
+	"linkedin.com",
+	"linode.com",
+	"mashable.com",
+	"medium.com",
+	"mega.co.nz",
+	"mega.nz",
+	"merriam-webster.com",
+	"mit.edu",
+	"netflix.com",
+	"nginx.org",
+	"nist.gov",
+	"notion.so",
+	"nsone.net",
+	"office.com",
+	"onetrust.com",
+	"openstreetmap.org",
+	"patreon.com",
+	"pexels.com",
+	"photobucket.com",
+	"php.net",
+	"pki.goog",
+	"plos.org",
+	"ps.kz",
+	"readthedocs.io",
+	"redd.it",
+	"reddit.com",
+	"remove.bg",
+	"rfc-editor.org",
+	"savefrom.net",
+	"sedo.com",
+	"so-net.ne.jp",
+	"sourceforge.net",
+	"spamhaus.org",
+	"speedtest.net",
+	"spotify.com",
+	"stanford.edu",
+	"state.gov",
+	"substack.com",
+	"t.me",
+	"taboola.com",
+	"techcrunch.com",
+	"telegram.me",
+	"telegram.org",
+	"threema.ch",
+	"tinyurl.com",
+	"ubuntu.com",
+	"ui.com",
+	"umich.edu",
+	"uol.com.br",
+	"upenn.edu",
+	"usgs.gov",
+	"utexas.edu",
+	"va.gov",
+	"verisign.com",
+	"vmware.com",
+	"w3.org",
+	"wa.me",
+	"webs.com",
+	"whatsapp.com",
+	"whatsapp.net",
+	"whitehouse.gov",
+	"wikimedia.org",
+	"wikipedia.org",
+	"wiktionary.org",
+	"www.aliyundrive.com",
+	"www.amazon.ca",
+	"www.amazon.co.jp",
+	"www.amazon.co.uk",
+	"www.amazon.com",
+	"www.amazon.de",
+	"www.amazon.es",
+	"www.amazon.fr",
+	"www.amazon.in",
+	"www.amazon.it",
+	"www.aol.com",
+	"www.appsflyer.com",
+	"www.att.com",
+	"www.business.site",
+	"www.ca.gov",
+	"www.canada.ca",
+	"www.cctv.com",
+	"www.cdc.gov",
+	"www.chinaz.com",
+	"www.cloud.com",
+	"www.cnet.com",
+	"www.comcast.com",
+	"www.comcast.net",
+	"www.cornell.edu",
+	"www.crashlytics.com",
+	"www.datadoghq.com",
+	"www.db.com",
+	"www.deloitte.com",
+	"www.dw.com",
+	"www.engadget.com",
+	"www.eset.com",
+	"www.fao.org",
+	"www.fedex.com",
+	"www.flickr.com",
+	"www.force.com",
+	"www.ford.com",
+	"www.frontiersin.org",
+	"www.geeksforgeeks.org",
+	"www.gene.com",
+	"www.genius.com",
+	"www.github.io",
+	"www.gov.uk",
+	"www.gravatar.com",
+	"www.healthline.com",
+	"www.hhs.gov",
+	"www.hichina.com",
+	"www.hinet.net",
+	"www.house.gov",
+	"www.hp.com",
+	"www.huawei.com",
+	"www.hupu.com",
+	"www.ietf.org",
+	"www.immunet.com",
+	"www.independent.co.uk",
+	"www.intel.com",
+	"www.jotform.com",
+	"www.klaviyo.com",
+	"www.launchdarkly.com",
+	"www.live.com",
+	"www.macromedia.com",
+	"www.medallia.com",
+	"www.mediatek.com",
+	"www.medicalnewstoday.com",
+	"www.microsoft.com",
+	"www.mongodb.com",
+	"www.mysql.com",
+	"www.namu.wiki",
+	"www.nasa.gov",
+	"www.nba.com",
+	"www.nbcnews.com",
+	"www.nih.gov",
+	"www.noaa.gov",
+	"www.npr.org",
+	"www.nps.gov",
+	"www.ny.gov",
+	"www.okta.com",
+	"www.openai.com",
+	"www.optimizely.com",
+	"www.oracle.com",
+	"www.outlook.com",
+	"www.paloaltonetworks.com",
+	"www.pbs.org",
+	"www.pixabay.com",
+	"www.plala.or.jp",
+	"www.playstation.com",
+	"www.plesk.com",
+	"www.princeton.edu",
+	"www.prnewswire.com",
+	"www.psu.edu",
+	"www.python.org",
+	"www.qq.com",
+	"www.quantserve.com",
+	"www.quillbot.com",
+	"www.rackspace.com",
+	"www.redhat.com",
+	"www.researchgate.net",
+	"www.roku.com",
+	"www.salesforce.com",
+	"www.skype.com",
+	"www.sun.com",
+	"www.teamviewer.com",
+	"www.ted.com",
+	"www.tesla.com",
+	"www.theguardian.com",
+	"www.typeform.com",
+	"www.uchicago.edu",
+	"www.ucla.edu",
+	"www.usda.gov",
+	"www.usps.com",
+	"www.utorrent.com",
+	"www.warnerbros.com",
+	"www.webex.com",
+	"www.who.int",
+	"www.worldbank.org",
+	"www.xbox.com",
+	"www.xerox.com",
+	"www.youdao.com",
+	"www.zdnet.com",
+	"www.zebra.com",
+	"yahoo.com",
+	"yale.edu",
+	"yandex.com",
+	"yandex.net",
+	"youku.com",
+	"youtu.be",
+	"youtube.com",
+	"zemanta.com",
+	"zoro.to",
+}
diff --git a/spn/patrol/domains_test.go b/spn/patrol/domains_test.go
new file mode 100644
index 00000000..a5e28895
--- /dev/null
+++ b/spn/patrol/domains_test.go
@@ -0,0 +1,67 @@
+package patrol
+
+import (
+	"context"
+	"fmt"
+	"sort"
+	"testing"
+)
+
+var enableDomainTools = "no" // change to "yes" to enable
+
+// TestCleanDomains checks, cleans and prints an improved domain list.
+// Run with:
+// go test -run ^TestCleanDomains$ github.com/safing/portmaster/spn/patrol -ldflags "-X github.com/safing/portmaster/spn/patrol.enableDomainTools=yes" -timeout 3h -v
+// This is provided as a test for easier maintenance and ops.
+func TestCleanDomains(t *testing.T) { //nolint:paralleltest
+	if enableDomainTools != "yes" {
+		t.Skip()
+		return
+	}
+
+	// Setup context.
+	ctx := context.Background()
+
+	// Go through all domains and check if they are reachable.
+	goodDomains := make([]string, 0, len(testDomains))
+	for _, domain := range testDomains {
+		// Check if domain is reachable.
+		code, err := domainIsUsable(ctx, domain)
+		if err != nil {
+			fmt.Printf("FAIL: %s: %s\n", domain, err)
+		} else {
+			fmt.Printf("OK: %s [%d]\n", domain, code)
+			goodDomains = append(goodDomains, domain)
+			continue
+		}
+
+		// If failed, try again with a www. prefix
+		wwwDomain := "www." + domain
+		code, err = domainIsUsable(ctx, wwwDomain)
+		if err != nil {
+			fmt.Printf("FAIL: %s: %s\n", wwwDomain, err)
+		} else {
+			fmt.Printf("OK: %s [%d]\n", wwwDomain, code)
+			goodDomains = append(goodDomains, wwwDomain)
+		}
+
+	}
+
+	sort.Strings(goodDomains)
+	fmt.Println("printing good domains:")
+	for _, domain := range goodDomains {
+		fmt.Printf("%q,\n", domain)
+	}
+
+	fmt.Println("IMPORTANT: do not forget to go through list and check if everything looks good")
+}
+
+func domainIsUsable(ctx context.Context, domain string) (statusCode int, err error) {
+	// Try IPv6 first as it is way more likely to fail.
+	statusCode, err = CheckHTTPSConnection(ctx, "tcp6", domain)
+	if err != nil {
+		return
+	}
+
+	return CheckHTTPSConnection(ctx, "tcp4", domain)
+}
diff --git a/spn/patrol/http.go b/spn/patrol/http.go
new file mode 100644
index 00000000..391518c1
--- /dev/null
+++ b/spn/patrol/http.go
@@ -0,0 +1,186 @@
+package patrol
+
+import (
+	"context"
+	"fmt"
+	"net"
+	"net/http"
+	"time"
+
+	"github.com/tevino/abool"
+
+	"github.com/safing/portbase/log"
+	"github.com/safing/portbase/modules"
+	"github.com/safing/portmaster/spn/conf"
+)
+
+var httpsConnectivityConfirmed = abool.NewBool(true)
+
+// HTTPSConnectivityConfirmed returns whether the last HTTPS connectivity check succeeded.
+// Is "true" before first test.
+func HTTPSConnectivityConfirmed() bool {
+	return httpsConnectivityConfirmed.IsSet()
+}
+
+func connectivityCheckTask(ctx context.Context, task *modules.Task) error {
+	// Start tracing logs.
+	ctx, tracer := log.AddTracer(ctx)
+	defer tracer.Submit()
+
+	// Run checks and report status.
+	success := runConnectivityChecks(ctx)
+	if success {
+		tracer.Info("spn/patrol: all connectivity checks succeeded")
+		if httpsConnectivityConfirmed.SetToIf(false, true) {
+			module.TriggerEvent(ChangeSignalEventName, nil)
+		}
+		return nil
+	}
+
+	tracer.Errorf("spn/patrol: connectivity check failed")
+	if httpsConnectivityConfirmed.SetToIf(true, false) {
+		module.TriggerEvent(ChangeSignalEventName, nil)
+	}
+	return nil
+}
+
+func runConnectivityChecks(ctx context.Context) (ok bool) {
+	switch {
+	case conf.HubHasIPv4() && !runHTTPSConnectivityChecks(ctx, "tcp4"):
+		return false
+	case conf.HubHasIPv6() && !runHTTPSConnectivityChecks(ctx, "tcp6"):
+		return false
+	default:
+		// All checks passed.
+		return true
+	}
+}
+
+func runHTTPSConnectivityChecks(ctx context.Context, network string) (ok bool) {
+	// Step 1: Check 1 domain, require 100%
+	if checkHTTPSConnectivity(ctx, network, 1, 1) {
+		return true
+	}
+
+	// Step 2: Check 5 domains, require 80%
+	if checkHTTPSConnectivity(ctx, network, 5, 0.8) {
+		return true
+	}
+
+	// Step 3: Check 20 domains, require 70%
+	if checkHTTPSConnectivity(ctx, network, 20, 0.7) {
+		return true
+	}
+
+	return false
+}
+
+func checkHTTPSConnectivity(ctx context.Context, network string, checks int, requiredSuccessFraction float32) (ok bool) {
+	log.Tracer(ctx).Tracef(
+		"spn/patrol: testing connectivity via https (%d checks; %.0f%% required)",
+		checks,
+		requiredSuccessFraction*100,
+	)
+
+	// Run tests.
+	var succeeded int
+	for i := 0; i < checks; i++ {
+		if checkHTTPSConnection(ctx, network) {
+			succeeded++
+		}
+	}
+
+	// Check success.
+	successFraction := float32(succeeded) / float32(checks)
+	if successFraction < requiredSuccessFraction {
+		log.Tracer(ctx).Warningf(
+			"spn/patrol: https/%s connectivity check failed: %d/%d (%.0f%%)",
+			network,
+			succeeded,
+			checks,
+			successFraction*100,
+		)
+		return false
+	}
+
+	log.Tracer(ctx).Debugf(
+		"spn/patrol: https/%s connectivity check succeeded: %d/%d (%.0f%%)",
+		network,
+		succeeded,
+		checks,
+		successFraction*100,
+	)
+	return true
+}
+
+func checkHTTPSConnection(ctx context.Context, network string) (ok bool) {
+	testDomain := getRandomTestDomain()
+	code, err := CheckHTTPSConnection(ctx, network, testDomain)
+	if err != nil {
+		log.Tracer(ctx).Debugf("spn/patrol: https/%s connect check failed: %s: %s", network, testDomain, err)
+		return false
+	}
+
+	log.Tracer(ctx).Tracef("spn/patrol: https/%s connect check succeeded: %s [%d]", network, testDomain, code)
+	return true
+}
+
+// CheckHTTPSConnection checks if a HTTPS connection to the given domain can be established.
+func CheckHTTPSConnection(ctx context.Context, network, domain string) (statusCode int, err error) {
+	// Check network parameter.
+	switch network {
+	case "tcp4":
+	case "tcp6":
+	default:
+		return 0, fmt.Errorf("provided unsupported network: %s", network)
+	}
+
+	// Build URL.
+	// Use HTTPS to ensure that we have really communicated with the desired
+	// server and not with an intermediate.
+	url := fmt.Sprintf("https://%s/", domain)
+
+	// Prepare all parts of the request.
+	// TODO: Evaluate if we want to change the User-Agent.
+	req, err := http.NewRequestWithContext(ctx, http.MethodGet, url, nil)
+	if err != nil {
+		return 0, err
+	}
+	dialer := &net.Dialer{
+		Timeout:       15 * time.Second,
+		LocalAddr:     conf.GetBindAddr(network),
+		FallbackDelay: -1, // Disables Fast Fallback from IPv6 to IPv4.
+		KeepAlive:     -1, // Disable keep-alive.
+	}
+	dialWithNet := func(ctx context.Context, _, addr string) (net.Conn, error) {
+		// Ignore network by http client.
+		// Instead, force either tcp4 or tcp6.
+		return dialer.DialContext(ctx, network, addr)
+	}
+	client := &http.Client{
+		Transport: &http.Transport{
+			DialContext:         dialWithNet,
+			DisableKeepAlives:   true,
+			DisableCompression:  true,
+			TLSHandshakeTimeout: 15 * time.Second,
+		},
+		CheckRedirect: func(req *http.Request, via []*http.Request) error {
+			return http.ErrUseLastResponse
+		},
+		Timeout: 30 * time.Second,
+	}
+
+	// Make request to server.
+	resp, err := client.Do(req)
+	if err != nil {
+		return 0, fmt.Errorf("failed to send http request: %w", err)
+	}
+	defer func() {
+		_ = resp.Body.Close()
+	}()
+	if resp.StatusCode < 200 || resp.StatusCode >= 400 {
+		return resp.StatusCode, fmt.Errorf("unexpected status code: %s", resp.Status)
+	}
+
+	return resp.StatusCode, nil
+}
diff --git a/spn/patrol/module.go b/spn/patrol/module.go
new file mode 100644
index 00000000..842c139c
--- /dev/null
+++ b/spn/patrol/module.go
@@ -0,0 +1,32 @@
+package patrol
+
+import (
+	"time"
+
+	"github.com/safing/portbase/modules"
+	"github.com/safing/portmaster/spn/conf"
+)
+
+// ChangeSignalEventName is the name of the event that signals any change in the patrol system.
+const ChangeSignalEventName = "change signal"
+
+var module *modules.Module
+
+func init() {
+	module = modules.Register("patrol", prep, start, nil, "rng")
+}
+
+func prep() error {
+	module.RegisterEvent(ChangeSignalEventName, false)
+
+	return nil
+}
+
+func start() error {
+	if conf.PublicHub() {
+		module.NewTask("connectivity test", connectivityCheckTask).
+			Repeat(5 * time.Minute)
+	}
+
+	return nil
+}
diff --git a/spn/ships/connection_test.go b/spn/ships/connection_test.go
new file mode 100644
index 00000000..5d03927b
--- /dev/null
+++ b/spn/ships/connection_test.go
@@ -0,0 +1,131 @@
+package ships
+
+import (
+	"context"
+	"fmt"
+	"net"
+	"sync"
+	"testing"
+
+	"github.com/stretchr/testify/assert"
+
+	"github.com/safing/portmaster/spn/hub"
+)
+
+var (
+	testPort  uint16 = 65000
+	testData         = []byte("The quick brown fox jumps over the lazy dog")
+	localhost        = net.IPv4(127, 0, 0, 1)
+)
+
+func getTestPort() uint16 {
+	testPort++
+	return testPort
+}
+
+func getTestBuf() []byte {
+	return make([]byte, len(testData))
+}
+
+func TestConnections(t *testing.T) {
+	t.Parallel()
+
+	registryLock.Lock()
+	t.Cleanup(func() {
+		registryLock.Unlock()
+	})
+
+	for k, v := range registry { //nolint:paralleltest // False positive.
+		protocol, builder := k, v
+		t.Run(protocol, func(t *testing.T) {
+			t.Parallel()
+
+			var wg sync.WaitGroup
+			ctx, cancelCtx := context.WithCancel(context.Background())
+
+			// docking requests
+			dockingRequests := make(chan Ship, 1)
+			transport := &hub.Transport{
+				Protocol: protocol,
+				Port:     getTestPort(),
+			}
+
+			// create listener
+			pier, err := builder.EstablishPier(transport, dockingRequests)
+			if err != nil {
+				t.Fatal(err)
+			}
+
+			// connect to listener
+			ship, err := builder.LaunchShip(ctx, transport, localhost)
+			if err != nil {
+				t.Fatal(err)
+			}
+
+			// client send
+			err = ship.Load(testData)
+			if err != nil {
+				t.Fatalf("%s failed: %s", ship, err)
+			}
+
+			// dock client
+			srvShip := <-dockingRequests
+			if srvShip == nil {
+				t.Fatalf("%s failed to dock", pier)
+			}
+
+			// server recv
+			buf := getTestBuf()
+			_, err = srvShip.UnloadTo(buf)
+			if err != nil {
+				t.Fatalf("%s failed: %s", ship, err)
+			}
+
+			// check data
+			assert.Equal(t, testData, buf, "should match")
+			fmt.Print(".")
+
+			for i := 0; i < 100; i++ {
+				// server send
+				err = srvShip.Load(testData)
+				if err != nil {
+					t.Fatalf("%s failed: %s", ship, err)
+				}
+
+				// client recv
+				buf = getTestBuf()
+				_, err = ship.UnloadTo(buf)
+				if err != nil {
+					t.Fatalf("%s failed: %s", ship, err)
+				}
+
+				// check data
+				assert.Equal(t, testData, buf, "should match")
+				fmt.Print(".")
+
+				// client send
+				err = ship.Load(testData)
+				if err != nil {
+					t.Fatalf("%s failed: %s", ship, err)
+				}
+
+				// server recv
+				buf = getTestBuf()
+				_, err = srvShip.UnloadTo(buf)
+				if err != nil {
+					t.Fatalf("%s failed: %s", ship, err)
+				}
+
+				// check data
+				assert.Equal(t, testData, buf, "should match")
+				fmt.Print(".")
+			}
+
+			ship.Sink()
+			srvShip.Sink()
+			pier.Abolish()
+			cancelCtx()
+			wg.Wait() // wait for docking procedure to end
+		})
+	}
+}
diff --git a/spn/ships/http.go b/spn/ships/http.go
new file mode 100644
index 00000000..165ca9df
--- /dev/null
+++ b/spn/ships/http.go
@@ -0,0 +1,230 @@
+package ships
+
+import (
+	"bufio"
+	"context"
+	"fmt"
+	"io"
+	"net"
+	"net/http"
+	"time"
+
+	"github.com/safing/portbase/log"
+	"github.com/safing/portmaster/spn/conf"
+	"github.com/safing/portmaster/spn/hub"
+)
+
+// HTTPShip is a ship that uses HTTP.
+type HTTPShip struct {
+	ShipBase
+}
+
+// HTTPPier is a pier that uses HTTP.
+type HTTPPier struct {
+	PierBase
+
+	newDockings chan net.Conn
+}
+
+func init() {
+	Register("http", &Builder{
+		LaunchShip:    launchHTTPShip,
+		EstablishPier: establishHTTPPier,
+	})
+}
+
+/*
+HTTP Transport Variants:
+
+1. Hijack connection and switch to raw SPN protocol:
+
+Request:
+
+		GET <path> HTTP/1.1
+		Connection: Upgrade
+		Upgrade: SPN
+
+Response:
+
+		HTTP/1.1 101 Switching Protocols
+		Connection: Upgrade
+		Upgrade: SPN
+
+*/
+
+func launchHTTPShip(ctx context.Context, transport *hub.Transport, ip net.IP) (Ship, error) {
+	// Default to root path.
+	path := transport.Path
+	if path == "" {
+		path = "/"
+	}
+
+	// Build request for Variant 1.
+	variant := 1
+	request, err := http.NewRequest(http.MethodGet, path, nil)
+	if err != nil {
+		return nil, fmt.Errorf("failed to build HTTP request: %w", err)
+	}
+	request.Header.Set("Connection", "Upgrade")
+	request.Header.Set("Upgrade", "SPN")
+
+	// Create connection.
+	var dialNet string
+	if ip4 := ip.To4(); ip4 != nil {
+		dialNet = "tcp4"
+	} else {
+		dialNet = "tcp6"
+	}
+	dialer := &net.Dialer{
+		Timeout:       30 * time.Second,
+		LocalAddr:     conf.GetBindAddr(dialNet),
+		FallbackDelay: -1, // Disables Fast Fallback from IPv6 to IPv4.
+		KeepAlive:     -1, // Disable keep-alive.
+	}
+	conn, err := dialer.DialContext(ctx, dialNet, net.JoinHostPort(ip.String(), portToA(transport.Port)))
+	if err != nil {
+		return nil, fmt.Errorf("failed to connect: %w", err)
+	}
+
+	// Send HTTP request.
+	err = request.Write(conn)
+	if err != nil {
+		return nil, fmt.Errorf("failed to send HTTP request: %w", err)
+	}
+
+	// Receive HTTP response.
+	response, err := http.ReadResponse(bufio.NewReader(conn), request)
+	if err != nil {
+		return nil, fmt.Errorf("failed to read HTTP response: %w", err)
+	}
+	defer response.Body.Close() //nolint:errcheck,gosec
+
+	// Handle response according to variant.
+	switch variant {
+	case 1:
+		if response.StatusCode == http.StatusSwitchingProtocols &&
+			response.Header.Get("Connection") == "Upgrade" &&
+			response.Header.Get("Upgrade") == "SPN" {
+			// Continue
+		} else {
+			return nil, fmt.Errorf("received unexpected response for variant 1: %s", response.Status)
+		}
+
+	default:
+		return nil, fmt.Errorf("internal error: unsupported http transport variant: %d", variant)
+	}
+
+	// Create ship.
+	ship := &HTTPShip{
+		ShipBase: ShipBase{
+			conn:      conn,
+			transport: transport,
+			mine:      true,
+			secure:    false,
+		},
+	}
+
+	// Init and return.
+	ship.calculateLoadSize(ip, nil, TCPHeaderMTUSize)
+	ship.initBase()
+	return ship, nil
+}
+
+func (pier *HTTPPier) ServeHTTP(w http.ResponseWriter, r *http.Request) {
+	switch {
+	case r.Method == http.MethodGet &&
+		r.Header.Get("Connection") == "Upgrade" &&
+		r.Header.Get("Upgrade") == "SPN":
+		// Request for Variant 1.
+
+		// Hijack connection.
+		var conn net.Conn
+		if hijacker, ok := w.(http.Hijacker); ok {
+			// Empty body, so the hijacked connection starts with a clean buffer.
+			_, err := io.ReadAll(r.Body)
+			if err != nil {
+				http.Error(w, "", http.StatusInternalServerError)
+				log.Warningf("ships: failed to empty body for hijack for %s: %s", r.RemoteAddr, err)
+				return
+			}
+			_ = r.Body.Close()
+
+			// Reply with upgrade confirmation.
+			w.Header().Set("Connection", "Upgrade")
+			w.Header().Set("Upgrade", "SPN")
+			w.WriteHeader(http.StatusSwitchingProtocols)
+
+			// Get connection.
+			conn, _, err = hijacker.Hijack()
+			if err != nil {
+				log.Warningf("ships: failed to hijack http connection from %s: %s", r.RemoteAddr, err)
+				return
+			}
+		} else {
+			http.Error(w, "", http.StatusInternalServerError)
+			log.Warningf("ships: connection from %s cannot be hijacked", r.RemoteAddr)
+			return
+		}
+
+		// Create new ship.
+		ship := &HTTPShip{
+			ShipBase: ShipBase{
+				transport: pier.transport,
+				conn:      conn,
+				mine:      false,
+				secure:    false,
+			},
+		}
+		ship.calculateLoadSize(nil, conn.RemoteAddr(), TCPHeaderMTUSize)
+		ship.initBase()
+
+		// Submit new docking request.
+		select {
+		case pier.dockingRequests <- ship:
+		case <-r.Context().Done():
+			return
+		}
+
+	default:
+		// Reply with info page if no variant matches the request.
+		ServeInfoPage(w, r)
+	}
+}
+
+func establishHTTPPier(transport *hub.Transport, dockingRequests chan Ship) (Pier, error) {
+	// Default to root path.
+	path := transport.Path
+	if path == "" {
+		path = "/"
+	}
+
+	// Create pier.
+	pier := &HTTPPier{
+		newDockings: make(chan net.Conn),
+		PierBase: PierBase{
+			transport:       transport,
+			dockingRequests: dockingRequests,
+		},
+	}
+	pier.initBase()
+
+	// Register handler.
+	err := addHTTPHandler(transport.Port, path, pier.ServeHTTP)
+	if err != nil {
+		return nil, fmt.Errorf("failed to add HTTP handler: %w", err)
+	}
+
+	return pier, nil
+}
+
+// Abolish closes the underlying listener and cleans up any related resources.
+func (pier *HTTPPier) Abolish() {
+	// Only abolish once.
+	if !pier.abolishing.SetToIf(false, true) {
+		return
+	}
+
+	// Do not close the listener, as it is shared.
+	// Instead, remove the HTTP handler and the shared server will shutdown itself when needed.
+	_ = removeHTTPHandler(pier.transport.Port, pier.transport.Path)
+}
diff --git a/spn/ships/http_info.go b/spn/ships/http_info.go
new file mode 100644
index 00000000..886f2127
--- /dev/null
+++ b/spn/ships/http_info.go
@@ -0,0 +1,83 @@
+package ships
+
+import (
+	"bytes"
+	_ "embed"
+	"html/template"
+	"net/http"
+
+	"github.com/safing/portbase/config"
+	"github.com/safing/portbase/info"
+	"github.com/safing/portbase/log"
+)
+
+var (
+	//go:embed http_info_page.html.tmpl
+	infoPageData string
+
+	infoPageTemplate *template.Template
+
+	// DisplayHubID holds the Hub ID for displaying it on the info page.
+	DisplayHubID string
+)
+
+type infoPageInput struct {
+	Version        string
+	Info           *info.Info
+	ID             string
+	Name           string
+	Group          string
+	ContactAddress string
+	ContactService string
+}
+
+var (
+	pageInputName           config.StringOption
+	pageInputGroup          config.StringOption
+	pageInputContactAddress config.StringOption
+	pageInputContactService config.StringOption
+)
+
+func initPageInput() {
+	infoPageTemplate = template.Must(template.New("info-page").Parse(infoPageData))
+
+	pageInputName = config.Concurrent.GetAsString("spn/publicHub/name", "")
+	pageInputGroup = config.Concurrent.GetAsString("spn/publicHub/group", "")
+	pageInputContactAddress = config.Concurrent.GetAsString("spn/publicHub/contactAddress", "")
+	pageInputContactService = config.Concurrent.GetAsString("spn/publicHub/contactService", "")
+}
+
+// ServeInfoPage serves the info page for the given request.
+func ServeInfoPage(w http.ResponseWriter, r *http.Request) {
+	pageData, err := renderInfoPage()
+	if err != nil {
+		log.Warningf("ships: failed to render SPN info page: %s", err)
+		http.Error(w, "", http.StatusInternalServerError)
+		return
+	}
+
+	_, err = w.Write(pageData)
+	if err != nil {
+		log.Warningf("ships: failed to write info page: %s", err)
+	}
+}
+
+func renderInfoPage() ([]byte, error) {
+	input := &infoPageInput{
+		Version:        info.Version(),
+		Info:           info.GetInfo(),
+		ID:             DisplayHubID,
+		Name:           pageInputName(),
+		Group:          pageInputGroup(),
+		ContactAddress: pageInputContactAddress(),
+		ContactService: pageInputContactService(),
+	}
+
+	buf := &bytes.Buffer{}
+	err := infoPageTemplate.ExecuteTemplate(buf, "info-page", input)
+	if err != nil {
+		return nil, err
+	}
+
+	return buf.Bytes(), nil
+}
diff --git a/spn/ships/http_info_page.html.tmpl b/spn/ships/http_info_page.html.tmpl
new file mode 100644
index 00000000..5a4805ed
--- /dev/null
+++ b/spn/ships/http_info_page.html.tmpl
@@ -0,0 +1,112 @@
+<!DOCTYPE html>
+<html lang="en">
+<head>
+  <meta charset="UTF-8">
+  <meta name="viewport" content="width=device-width, initial-scale=1.0">
+  <title>SPN Node</title>
+  <style>
+    /* Lissom.CSS - https://github.com/lissomware/css */
+    blockquote,button,details[open],input[type=submit],th{background-color:var(--accent-alt)}button:active,h2::before,input[type=submit]:active{opacity:.9}button:hover,h4::before,input[type=submit]:hover{opacity:.7}blockquote::after,h1::before,h2::before,h3::before,h4::before,h5::before,h6::before{position:absolute}a:hover,h1::before,h2::before,h3::before,h4::before,h5::before,h6::before{color:var(--accent-ui)}kbd,summary{font-weight:700}dialog,mark{color:inherit}details,pre{padding:.5em;background-color:var(--tertiary)}hr,td{text-align:center}body,dialog,html,tr:nth-child(2n) td{background-color:var(--light)}code,details,input,kbd,pre,textarea,tr:nth-child(odd) td{background-color:var(--tertiary)}*{box-sizing:border-box}:root{--primary:hsl(265, 38%, 13%);--secondary:hsl(283, 6%, 45%);--tertiary:hsl(257, 15%, 95%);--light:hsl(270, 100%, 99%);--accent:#0376bb;--accent-alt:hsl(279, 100%, 97%);--accent-ui:#0376bb;--semantic-green:hsl(88, 76%, 83%);--semantic-red:hsl(0, 76%, 83%)}@media (prefers-color-scheme:dark){:root{--primary:hsl(300, 100%, 99%);--secondary:hsl(280, 5%, 37%);--tertiary:hsl(270, 5%, 16%);--light:hsl(280, 5%, 12%);--accent:hsl(282, 25%, 40%);--accent-alt:hsl(278, 14%, 20%);--accent-ui:#0376bb;--semantic-green:hsl(88, 35%, 40%);--semantic-red:hsl(0, 35%, 40%)}}body,html{color-scheme:light;accent-color:var(--accent-ui);color:var(--primary);font-family:sans-serif;margin:0;padding:0;line-height:1.4}main{width:min(100% - 3rem,65ch);margin-inline:auto;margin-top:2em;margin-bottom:2em}a{color:var(--primary);text-decoration-color:var(--accent-ui);text-underline-offset:0.15em;text-decoration-thickness:0.1em}a:hover{text-decoration-thickness:0.2em;z-index:2}a:focus-visible{background-color:var(--accent-ui);box-shadow:0 0 0 .2em var(--light),0 0 0 .4em var(--accent-ui);outline:0;z-index:2;animation:1s linear infinite alternate-reverse pulseFill;border-radius:.1em;position:relative}@keyframes pulseFill{0%{background-color:var(--accent-ui);box-shadow:0 0 0 .2em var(--light),0 0 0 .4em var(--accent-ui)}100%{background-color:var(--accent);box-shadow:0 0 0 .2em var(--light),0 0 0 .4em var(--accent)}}blockquote{margin:1.5em 0;position:relative;padding:.7em;z-index:1;border-radius:.5em}blockquote p{margin:0}blockquote::after,blockquote::before{color:var(--accent-ui);font-size:5em;line-height:.8}button,input,input[type=submit],textarea{padding:.4em 1em;font-size:inherit;position:relative;color:var(--primary)}blockquote::before{content:open-quote;vertical-align:top;position:absolute;left:-.15em;top:-.1em}blockquote::after{content:close-quote;vertical-align:bottom;right:-.1em;bottom:-.65em}[lang=fr] blockquote::after,[lang=fr] blockquote::before{font-size:3.5em}[lang=fr] blockquote::before{left:-.3em;top:-.4em}[lang=fr] blockquote::after{right:-.3em;bottom:-.3em}button,input[type=submit]{border:1px solid var(--accent);border-radius:.3em;cursor:pointer}input::placeholder{font-style:italic}button[data-primary],input[type=submit]{color:var(--light);background-color:var(--accent-ui);border-color:var(--accent-ui);outline:1px solid var(--accent);outline-offset:-0.15em;padding:.4em .8em;border-radius:.3em;cursor:pointer;font-weight:700}button:disabled,input:disabled,input[type=submit]:disabled,textarea:disabled{cursor:not-allowed;background-color:var(--tertiary);color:var(--secondary);border-color:var(--secondary);opacity:.5}button:focus-visible,details:has(summary:focus-visible),input:focus-visible,textarea:focus-visible{box-shadow:0 0 0 .2em var(--light),0 0 0 .4em var(--accent-ui);outline:0;z-index:2;animation:1s linear infinite alternate-reverse pulseBorder}details:has(summary:focus-visible) summary{outline:0}@keyframes pulseBorder{0%{box-shadow:0 0 0 .2em var(--light),0 0 0 .4em var(--accent-ui)}100%{box-shadow:0 0 0 .2em var(--light),0 0 0 .4em var(--accent)}}code{border-radius:.3em;font-family:monospace;padding:.1em .2em}details{border:.1em solid var(--secondary);border-radius:.3em;margin:1em 0}details[open] summary{border-bottom:.1em solid var(--accent);padding-bottom:.3em;margin-bottom:.7em}dialog{border-radius:.6em;max-width:min(100% - 3rem,65ch);border:1px solid var(--accent-ui);box-shadow:0 0 .3em .1em var(--accent-alt)}hr,progress{border:none;margin:1em 0}::backdrop{background:rgba(0,0,0,.5)}form:not([data-modal]){display:flex;flex-direction:column;gap:.5rem;margin:1em 0}form>div{display:flex;flex-direction:column}form>div:has(input[type=checkbox]){flex-direction:row;align-items:center;gap:.2em}h1,h2,h3,h4,h5,h6{position:relative}h1::before{content:'#';left:-2ch}h1:dir(rtl)::before{right:-2ch}h2::before{content:'##';left:-3ch}h2:dir(rtl)::before{right:-3ch}h3::before{content:'###';left:-4ch;opacity:.8}h3:dir(rtl)::before{right:-4ch}h4::before{content:'####';left:-5ch}h4:dir(rtl)::before{right:-5ch}h5::before{content:'#####';left:-6ch;opacity:.6}h5:dir(rtl)::before{right:-6ch}h6::before{content:'######';left:-7ch;opacity:.5}h6:dir(rtl)::before{right:-7ch}h1[data-no-heading-level]::before,h2[data-no-heading-level]::before,h3[data-no-heading-level]::before,h4[data-no-heading-level]::before,h5[data-no-heading-level]::before,h6[data-no-heading-level]::before{content:''}hr{border-top:.2em double var(--primary);color:var(--primary);overflow:visible;height:.4em}hr:after{background:var(--light);content:'§';padding:0 4px;position:relative;top:-13px}img{border-radius:.6em;width:100%;height:auto;transition:border-radius .1s linear}img:hover{border-radius:0}input,textarea{border:1px solid var(--secondary);border-radius:.3em;font-family:inherit}kbd{border-radius:.3em;border:1px solid var(--secondary);box-shadow:0 1px 1px var(--secondary),0 2px 0 0 var(--light) inset;color:var(--primary);display:inline-block;font-size:.85em;line-height:1;padding:.2em .4em;white-space:nowrap}mark{background-color:var(--accent)}pre{font-family:monospace;color:var(--primary);border-radius:.6em;border:1px solid var(--secondary)}progress{appearance:none;-moz-appearance:none;-webkit-appearance:none;border-radius:1em;display:block;height:.5rem;overflow:hidden;padding:0;width:100%}progress::-webkit-progress-bar{background-color:var(--accent)}progress::-webkit-progress-value{background-color:var(--accent-ui)}progress::-moz-progress-bar{background-color:var(--accent-ui)}progress::-ms-fill{background-color:var(--accent-ui);border:none}progress:indeterminate{animation:3s linear infinite progressShine;background-color:var(--accent);background-image:linear-gradient(to right,var(--accent) 0,var(--accent-ui) 10%,var(--accent-ui) 30%,var(--accent) 40%);background-position:top left;background-repeat:no-repeat;background-size:150% 150%}progress:indeterminate:dir(rtl){animation-direction:reverse}progress:indeterminate::-webkit-progress-bar{background-color:transparent}progress:indeterminate::-moz-progress-bar{background-color:transparent}progress:indeterminate::-ms-fill{animation-name:none}@keyframes progressShine{0%{background-position:200% 0}100%{background-position:-200% 0}}caption{padding:.8em;caption-side:bottom}table{border-collapse:collapse;border:.1em solid var(--secondary);border-radius:.6em}td,th{border:1px solid var(--secondary);padding:.4em .3em}ins{background-color:var(--semantic-green)}del{background-color:var(--semantic-red)}
+  </style>
+</head>
+<body>
+  <main>
+    <section>
+      <h1>
+        You Have Reached an SPN Node
+      </h1>
+      <p>
+        The server, or at least the exact URL you have accessed, leads to an SPN Node.
+      </p>
+    </section>
+
+    <section>
+      <h3>
+        What is SPN?
+      </h3>
+      <p>
+        SPN stands for "Safing Privacy Network" and is a network of servers that offers high privacy protection of Internet traffic and activity. It was built to replace VPNs for their Internet privacy use case.
+      </p>
+    </section>
+
+    <section>
+      <h3>
+        More Information
+      </h3>
+      <p>
+        You can find out more about SPN here:
+        <ul>
+          <li>Features: <a href="https://safing.io/spn/">https://safing.io/spn/</a></li>
+          <li>Node Hosting Guide: <a href="https://wiki.safing.io/en/SPN/Nodes/Hosting">https://wiki.safing.io/en/SPN/Nodes/Hosting</a></li>
+        </ul>
+      </p>
+    </section>
+
+    <section>
+      <h3>
+        Contact the Operator of This SPN Node
+      </h3>
+      <p>
+        {{ if .ContactAddress }}
+          You can reach the operator of this SPN Node here:
+          {{ .ContactAddress }}
+            {{ if .ContactService }} via {{ .ContactService }}
+          {{ end }}
+        {{ else }}
+          The operator of this SPN Node has not configured any contact data.<br>
+          Please contact the operator using the usual methods via the hosting provider.
+        {{ end }}
+      </p>
+    </section>
+
+    <section>
+      <h3>
+        Are You Tracing Bad Activity?
+      </h3>
+      <p>
+        We are sorry there is an incident involving this server. We condemn any disruptive or illegal activity.
+      <p>
+      </p>
+        Please note that servers are not only operated by Safing (the company behind SPN), but also by third parties.
+      <p>
+      </p>
+        The SPN works very similar to Tor. Its primary goal is to provide people more privacy on the Internet. We also provide our services to people behind censoring firewalls in oppressive regimes.
+      <p>
+      </p>
+        This server does not host any content (as part of its role in the SPN network). Rather, it is part of the network where nodes on the Internet simply pass packets among themselves before sending them to their destinations, just as any Internet intermediary does.
+      <p>
+      </p>
+        Please understand that the SPN makes it technically impossible to single out individual users. We are also legally bound to respective privacy rights.
+      </p>
+      <p>
+        We can offer to block specific destination IPs and ports, but the abuser doesn't use this server specifically; instead, they will just be routed through a different exit node outside of our control.
+      </p>
+    </section>
+
+    <section>
+      <h3>
+        SPN Node Info
+      </h3>
+      <p>
+        <ul style="list-style: none;">
+          <li>Name: {{ .Name }}</li>
+          <li>Group: {{ .Group }}</li>
+          <li>ContactAddress: {{ .ContactAddress }}</li>
+          <li>ContactService: {{ .ContactService }}</li>
+          <li>Version: {{ .Version }}</li>
+          <li>ID: {{ .ID }}</li>
+          <li>
+            Build:
+            <ul style="list-style: none;">
+              <li>Commit: {{ .Info.Commit }}</li>
+              <li>Host: {{ .Info.BuildHost }}</li>
+              <li>Date: {{ .Info.BuildDate }}</li>
+              <li>Source: {{ .Info.BuildSource }}</li>
+            </ul>
+          </li>
+        </ul>
+      </p>
+    </section>
+  </main>
+</body>
+</html>
diff --git a/spn/ships/http_info_test.go b/spn/ships/http_info_test.go
new file mode 100644
index 00000000..a490dfce
--- /dev/null
+++ b/spn/ships/http_info_test.go
@@ -0,0 +1,26 @@
+package ships
+
+import (
+	"html/template"
+	"testing"
+
+	"github.com/safing/portbase/config"
+)
+
+func TestInfoPageTemplate(t *testing.T) {
+	t.Parallel()
+
+	infoPageTemplate = template.Must(template.New("info-page").Parse(infoPageData))
+	pageInputName = config.Concurrent.GetAsString("spn/publicHub/name", "node-name")
+	pageInputGroup = config.Concurrent.GetAsString("spn/publicHub/group", "node-group")
+	pageInputContactAddress = config.Concurrent.GetAsString("spn/publicHub/contactAddress", "john@doe.com")
+	pageInputContactService = config.Concurrent.GetAsString("spn/publicHub/contactService", "email")
+
+	pageData, err := renderInfoPage()
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	_ = pageData
+	// t.Log(string(pageData))
+}
diff --git a/spn/ships/http_shared.go b/spn/ships/http_shared.go
new file mode 100644
index 00000000..c90504e1
--- /dev/null
+++ b/spn/ships/http_shared.go
@@ -0,0 +1,188 @@
+package ships
+
+import (
+	"context"
+	"errors"
+	"fmt"
+	"net"
+	"net/http"
+	"sync"
+	"time"
+
+	"github.com/safing/portbase/log"
+	"github.com/safing/portmaster/spn/conf"
+)
+
+type sharedServer struct {
+	server *http.Server
+
+	handlers     map[string]http.HandlerFunc
+	handlersLock sync.RWMutex
+}
+
+// ServeHTTP forwards requests to registered handler or uses defaults.
+func (shared *sharedServer) ServeHTTP(w http.ResponseWriter, r *http.Request) {
+	shared.handlersLock.Lock()
+	defer shared.handlersLock.Unlock()
+
+	// Get and forward to registered handler.
+	handler, ok := shared.handlers[r.URL.Path]
+	if ok {
+		handler(w, r)
+		return
+	}
+
+	// If there is registered handler and path is "/", respond with info page.
+	if r.Method == http.MethodGet && r.URL.Path == "/" {
+		ServeInfoPage(w, r)
+		return
+	}
+
+	// Otherwise, respond with error.
+	http.Error(w, "", http.StatusNotFound)
+}
+
+var (
+	sharedHTTPServers     = make(map[uint16]*sharedServer)
+	sharedHTTPServersLock sync.Mutex
+)
+
+func addHTTPHandler(port uint16, path string, handler http.HandlerFunc) error {
+	// Check params.
+	if port == 0 {
+		return errors.New("cannot listen on port 0")
+	}
+
+	// Default to root path.
+	if path == "" {
+		path = "/"
+	}
+
+	sharedHTTPServersLock.Lock()
+	defer sharedHTTPServersLock.Unlock()
+
+	// Get http server of the port.
+	shared, ok := sharedHTTPServers[port]
+	if ok {
+		// Set path to handler.
+		shared.handlersLock.Lock()
+		defer shared.handlersLock.Unlock()
+
+		// Check if path is already registered.
+		_, ok := shared.handlers[path]
+		if ok {
+			return errors.New("path already registered")
+		}
+
+		// Else, register handler at path.
+		shared.handlers[path] = handler
+		return nil
+	}
+
+	// Shared server does not exist - create one.
+	shared = &sharedServer{
+		handlers: make(map[string]http.HandlerFunc),
+	}
+
+	// Add first handler.
+	shared.handlers[path] = handler
+
+	// Define new server.
+	server := &http.Server{
+		Addr:              fmt.Sprintf(":%d", port),
+		Handler:           shared,
+		ReadTimeout:       1 * time.Minute,
+		ReadHeaderTimeout: 10 * time.Second,
+		WriteTimeout:      1 * time.Minute,
+		IdleTimeout:       1 * time.Minute,
+		MaxHeaderBytes:    4096,
+		// ErrorLog:          &log.Logger{}, // FIXME
+		BaseContext: func(net.Listener) context.Context { return module.Ctx },
+	}
+	shared.server = server
+
+	// Start listeners.
+	bindIPs := conf.GetBindIPs()
+	listeners := make([]net.Listener, 0, len(bindIPs))
+	for _, bindIP := range bindIPs {
+		listener, err := net.ListenTCP("tcp", &net.TCPAddr{
+			IP:   bindIP,
+			Port: int(port),
+		})
+		if err != nil {
+			return fmt.Errorf("failed to listen: %w", err)
+		}
+
+		listeners = append(listeners, listener)
+		log.Infof("spn/ships: http transport pier established on %s", listener.Addr())
+	}
+
+	// Add shared http server to list.
+	sharedHTTPServers[port] = shared
+
+	// Start servers in service workers.
+	for _, listener := range listeners {
+		serviceListener := listener
+		module.StartServiceWorker(
+			fmt.Sprintf("shared http server listener on %s", listener.Addr()), 0,
+			func(ctx context.Context) error {
+				err := shared.server.Serve(serviceListener)
+				if !errors.Is(http.ErrServerClosed, err) {
+					return err
+				}
+				return nil
+			},
+		)
+	}
+
+	return nil
+}
+
+func removeHTTPHandler(port uint16, path string) error {
+	// Check params.
+	if port == 0 {
+		return nil
+	}
+
+	// Default to root path.
+	if path == "" {
+		path = "/"
+	}
+
+	sharedHTTPServersLock.Lock()
+	defer sharedHTTPServersLock.Unlock()
+
+	// Get http server of the port.
+	shared, ok := sharedHTTPServers[port]
+	if !ok {
+		return nil
+	}
+
+	// Set path to handler.
+	shared.handlersLock.Lock()
+	defer shared.handlersLock.Unlock()
+
+	// Check if path is registered.
+	_, ok = shared.handlers[path]
+	if !ok {
+		return nil
+	}
+
+	// Remove path from handler.
+	delete(shared.handlers, path)
+
+	// Shutdown shared HTTP server if no more handlers are registered.
+	if len(shared.handlers) == 0 {
+		ctx, cancel := context.WithTimeout(
+			context.Background(),
+			10*time.Second,
+		)
+		defer cancel()
+		return shared.server.Shutdown(ctx)
+	}
+
+	// Remove shared HTTP server from map.
+	delete(sharedHTTPServers, port)
+
+	return nil
+}
diff --git a/spn/ships/http_shared_test.go b/spn/ships/http_shared_test.go
new file mode 100644
index 00000000..e16ff53d
--- /dev/null
+++ b/spn/ships/http_shared_test.go
@@ -0,0 +1,33 @@
+package ships
+
+import (
+	"testing"
+
+	"github.com/stretchr/testify/assert"
+)
+
+func TestSharedHTTP(t *testing.T) { //nolint:paralleltest // Test checks global state.
+	const testPort = 65100
+
+	// Register multiple handlers.
+	err := addHTTPHandler(testPort, "", ServeInfoPage)
+	assert.NoError(t, err, "should be able to share http listener")
+	err = addHTTPHandler(testPort, "/test", ServeInfoPage)
+	assert.NoError(t, err, "should be able to share http listener")
+	err = addHTTPHandler(testPort, "/test2", ServeInfoPage)
+	assert.NoError(t, err, "should be able to share http listener")
+	err = addHTTPHandler(testPort, "/", ServeInfoPage)
+	assert.Error(t, err, "should fail to register path twice")
+
+	// Unregister
+	assert.NoError(t, removeHTTPHandler(testPort, ""))
+	assert.NoError(t, removeHTTPHandler(testPort, "/test"))
+	assert.NoError(t, removeHTTPHandler(testPort, "/not-registered")) // removing unregistered handler does not error
+	assert.NoError(t, removeHTTPHandler(testPort, "/test2"))
+	assert.NoError(t, removeHTTPHandler(testPort, "/not-registered")) // removing unregistered handler does not error
+
+	// Check if all handlers are gone again.
+	sharedHTTPServersLock.Lock()
+	defer sharedHTTPServersLock.Unlock()
+	assert.Equal(t, 0, len(sharedHTTPServers), "shared http handlers should be back to zero")
+}
diff --git a/spn/ships/kcp.go b/spn/ships/kcp.go
new file mode 100644
index 00000000..88bfb2ad
--- /dev/null
+++ b/spn/ships/kcp.go
@@ -0,0 +1,81 @@
+package ships
+
+// KCPShip is a ship that uses KCP.
+type KCPShip struct {
+	ShipBase
+}
+
+// KCPPier is a pier that uses KCP.
+type KCPPier struct {
+	PierBase
+}
+
+// TODO: Find a replacement for kcp, which turned out to not fit our use case.
+/*
+func init() {
+	Register("kcp", &Builder{
+		LaunchShip:    launchKCPShip,
+		EstablishPier: establishKCPPier,
+	})
+}
+
+func launchKCPShip(ctx context.Context, transport *hub.Transport, ip net.IP) (Ship, error) {
+	conn, err := kcp.Dial(net.JoinHostPort(ip.String(), portToA(transport.Port)))
+	if err != nil {
+		return nil, err
+	}
+
+	ship := &KCPShip{
+		ShipBase: ShipBase{
+			conn:      conn,
+			transport: transport,
+			mine:      true,
+			secure:    false,
+			// Calculate KCP's MSS.
+			loadSize: kcp.IKCP_MTU_DEF - kcp.IKCP_OVERHEAD,
+		},
+	}
+
+	ship.initBase()
+	return ship, nil
+}
+
+func establishKCPPier(transport *hub.Transport, dockingRequests chan *DockingRequest) (Pier, error) {
+	listener, err := kcp.Listen(net.JoinHostPort("", portToA(transport.Port)))
+	if err != nil {
+		return nil, err
+	}
+
+	pier := &KCPPier{
+		PierBase: PierBase{
+			transport:       transport,
+			listener:        listener,
+			dockingRequests: dockingRequests,
+		},
+	}
+	pier.PierBase.dockShip = pier.dockShip
+	pier.initBase()
+	return pier, nil
+}
+
+func (pier *KCPPier) dockShip() (Ship, error) {
+	conn, err := pier.listener.Accept()
+	if err != nil {
+		return nil, err
+	}
+
+	ship := &KCPShip{
+		ShipBase: ShipBase{
+			conn:      conn,
+			transport: pier.transport,
+			mine:      false,
+			secure:    false,
+			// Calculate KCP's MSS.
+			loadSize: kcp.IKCP_MTU_DEF - kcp.IKCP_OVERHEAD,
+		},
+	}
+
+	ship.initBase()
+	return ship, nil
+}
+*/
diff --git a/spn/ships/launch.go b/spn/ships/launch.go
new file mode 100644
index 00000000..45a77834
--- /dev/null
+++ b/spn/ships/launch.go
@@ -0,0 +1,114 @@
+package ships
+
+import (
+	"context"
+	"fmt"
+	"net"
+
+	"github.com/safing/portbase/log"
+	"github.com/safing/portmaster/service/netenv"
+	"github.com/safing/portmaster/spn/hub"
+)
+
+// Launch launches a new ship to the given Hub.
+func Launch(ctx context.Context, h *hub.Hub, transport *hub.Transport, ip net.IP) (Ship, error) {
+	var transports []*hub.Transport
+	var ips []net.IP
+
+	// choose transports
+	if transport != nil {
+		transports = []*hub.Transport{transport}
+	} else {
+		if h.Info == nil {
+			return nil, hub.ErrMissingInfo
+		}
+		transports = h.Info.ParsedTransports()
+		// If there are no transports, check if they were parsed.
+		if len(transports) == 0 && len(h.Info.Transports) > 0 {
+			log.Errorf("ships: %s has no parsed transports, but transports are %v", h, h.Info.Transports)
+			// Attempt to parse transports now.
+			transports, _ = hub.ParseTransports(h.Info.Transports)
+		}
+		// Fail if there are not transports.
+		if len(transports) == 0 {
+			return nil, hub.ErrMissingTransports
+		}
+	}
+
+	// choose IPs
+	if ip != nil {
+		ips = []net.IP{ip}
+	} else {
+		if h.Info == nil {
+			return nil, hub.ErrMissingInfo
+		}
+		ips = make([]net.IP, 0, 3)
+		// If IPs have been verified, check if we can use a virtual network address.
+		var vnetForced bool
+		if h.VerifiedIPs {
+			vnet := GetVirtualNetworkConfig()
+			if vnet != nil {
+				virtIP := vnet.Mapping[h.ID]
+				if virtIP != nil {
+					ips = append(ips, virtIP)
+					if vnet.Force {
+						vnetForced = true
+						log.Infof("spn/ships: forcing virtual network address %s for %s", virtIP, h)
+					} else {
+						log.Infof("spn/ships: using virtual network address %s for %s", virtIP, h)
+					}
+				}
+			}
+		}
+		// Add Hub's IPs if no virtual address was forced.
+		if !vnetForced {
+			// prioritize IPv4
+			if h.Info.IPv4 != nil {
+				ips = append(ips, h.Info.IPv4)
+			}
+			if h.Info.IPv6 != nil && netenv.IPv6Enabled() {
+				ips = append(ips, h.Info.IPv6)
+			}
+		}
+		if len(ips) == 0 {
+			return nil, hub.ErrMissingIPs
+		}
+	}
+
+	// connect
+	var firstErr error
+	for _, ip := range ips {
+		for _, tr := range transports {
+			ship, err := connectTo(ctx, h, tr, ip)
+			if err == nil {
+				return ship, nil // return on success
+			}
+
+			// Check if context is canceled.
+			if ctx.Err() != nil {
+				return nil, ctx.Err()
+			}
+
+			// Save first error.
+			if firstErr == nil {
+				firstErr = err
+			}
+		}
+	}
+
+	return nil, firstErr
+}
+
+func connectTo(ctx context.Context, h *hub.Hub, transport *hub.Transport, ip net.IP) (Ship, error) {
+	builder := GetBuilder(transport.Protocol)
+	if builder == nil {
+		return nil, fmt.Errorf("protocol %s not supported", transport.Protocol)
+	}
+
+	ship, err := builder.LaunchShip(ctx, transport, ip)
+	if err != nil {
+		return nil, fmt.Errorf("failed to connect to %s using %s (%s): %w", h, transport, ip, err)
+	}
+
+	return ship, nil
+}
diff --git a/spn/ships/masking.go b/spn/ships/masking.go
new file mode 100644
index 00000000..76d9fc37
--- /dev/null
+++ b/spn/ships/masking.go
@@ -0,0 +1,63 @@
+package ships
+
+import (
+	"crypto/sha1"
+	"net"
+
+	"github.com/mr-tron/base58"
+	"github.com/tevino/abool"
+)
+
+var (
+	maskingEnabled = abool.New()
+	maskingActive  = abool.New()
+	maskingBytes   []byte
+)
+
+// EnableMasking enables masking with the given salt.
+func EnableMasking(salt []byte) {
+	if maskingEnabled.SetToIf(false, true) {
+		maskingBytes = salt
+		maskingActive.Set()
+	}
+}
+
+// MaskAddress masks the given address if masking is enabled and the ship is
+// not public.
+func (ship *ShipBase) MaskAddress(addr net.Addr) string {
+	// Return in plain if masking is not enabled or if ship is public.
+	if maskingActive.IsNotSet() || ship.Public() {
+		return addr.String()
+	}
+
+	switch typedAddr := addr.(type) {
+	case *net.TCPAddr:
+		return ship.MaskIP(typedAddr.IP)
+	case *net.UDPAddr:
+		return ship.MaskIP(typedAddr.IP)
+	default:
+		return ship.Mask([]byte(addr.String()))
+	}
+}
+
+// MaskIP masks the given IP if masking is enabled and the ship is not public.
+func (ship *ShipBase) MaskIP(ip net.IP) string {
+	// Return in plain if masking is not enabled or if ship is public.
+	if maskingActive.IsNotSet() || ship.Public() {
+		return ip.String()
+	}
+
+	return ship.Mask(ip)
+}
+
+// Mask masks the given value.
+func (ship *ShipBase) Mask(value []byte) string {
+	// Hash the IP with masking bytes.
+	hasher := sha1.New() //nolint:gosec // Not used for cryptography.
+	hasher.Write(maskingBytes)
+	hasher.Write(value)
+	masked := hasher.Sum(nil)
+
+	// Return first 8 characters from the base58-encoded hash.
+	return "masked:" + base58.Encode(masked)[:8]
+}
diff --git a/spn/ships/module.go b/spn/ships/module.go
new file mode 100644
index 00000000..d450185e
--- /dev/null
+++ b/spn/ships/module.go
@@ -0,0 +1,20 @@
+package ships
+
+import (
+	"github.com/safing/portbase/modules"
+	"github.com/safing/portmaster/spn/conf"
+)
+
+var module *modules.Module
+
+func init() {
+	module = modules.Register("ships", start, nil, nil, "cabin")
+}
+
+func start() error {
+	if conf.PublicHub() {
+		initPageInput()
+	}
+
+	return nil
+}
diff --git a/spn/ships/mtu.go b/spn/ships/mtu.go
new file mode 100644
index 00000000..07bb1a14
--- /dev/null
+++ b/spn/ships/mtu.go
@@ -0,0 +1,47 @@
+package ships
+
+import "net"
+
+// MTU Calculation Configuration.
+const (
+	BaseMTU           = 1460 // 1500 with 40 bytes extra space for special cases.
+	IPv4HeaderMTUSize = 20   // Without options, as not common.
+	IPv6HeaderMTUSize = 40   // Without options, as not common.
+	TCPHeaderMTUSize  = 60   // Maximum size with options.
+	UDPHeaderMTUSize  = 8    // Has no options.
+)
+
+func (ship *ShipBase) calculateLoadSize(ip net.IP, addr net.Addr, subtract ...int) {
+	ship.loadSize = BaseMTU
+
+	// Convert addr to IP if needed.
+	if ip == nil && addr != nil {
+		switch v := addr.(type) {
+		case *net.TCPAddr:
+			ip = v.IP
+		case *net.UDPAddr:
+			ip = v.IP
+		case *net.IPAddr:
+			ip = v.IP
+		}
+	}
+
+	// Subtract IP Header, if IP is available.
+	if ip != nil {
+		if ip4 := ip.To4(); ip4 != nil {
+			ship.loadSize -= IPv4HeaderMTUSize
+		} else {
+			ship.loadSize -= IPv6HeaderMTUSize
+		}
+	}
+
+	// Subtract others.
+	for sub := range subtract {
+		ship.loadSize -= sub
+	}
+
+	// Raise buf size to at least load size.
+	if ship.bufSize < ship.loadSize {
+		ship.bufSize = ship.loadSize
+	}
+}
diff --git a/spn/ships/pier.go b/spn/ships/pier.go
new file mode 100644
index 00000000..78483bf4
--- /dev/null
+++ b/spn/ships/pier.go
@@ -0,0 +1,82 @@
+package ships
+
+import (
+	"fmt"
+	"net"
+
+	"github.com/tevino/abool"
+
+	"github.com/safing/portmaster/spn/hub"
+)
+
+// Pier represents a network connection listener.
+type Pier interface {
+	// String returns a human readable informational summary about the ship.
+	String() string
+
+	// Transport returns the transport used for this ship.
+	Transport() *hub.Transport
+
+	// Abolish closes the underlying listener and cleans up any related resources.
+	Abolish()
+}
+
+// DockingRequest is a uniform request that Piers emit when a new ship arrives.
+type DockingRequest struct {
+	Pier Pier
+	Ship Ship
+	Err  error
+}
+
+// EstablishPier is shorthand function to get the transport's builder and establish a pier.
+func EstablishPier(transport *hub.Transport, dockingRequests chan Ship) (Pier, error) {
+	builder := GetBuilder(transport.Protocol)
+	if builder == nil {
+		return nil, fmt.Errorf("protocol %s not supported", transport.Protocol)
+	}
+
+	pier, err := builder.EstablishPier(transport, dockingRequests)
+	if err != nil {
+		return nil, fmt.Errorf("failed to establish pier on %s: %w", transport, err)
+	}
+
+	return pier, nil
+}
+
+// PierBase implements common functions to comply with the Pier interface.
+type PierBase struct {
+	// transport holds the transport definition of the pier.
+	transport *hub.Transport
+	// listeners holds the actual underlying listeners.
+	listeners []net.Listener
+
+	// dockingRequests is used to report new connections to the higher layer.
+	dockingRequests chan Ship
+
+	// abolishing specifies if the pier and listener is being closed.
+	abolishing *abool.AtomicBool
+}
+
+func (pier *PierBase) initBase() {
+	// init
+	pier.abolishing = abool.New()
+}
+
+// String returns a human readable informational summary about the ship.
+func (pier *PierBase) String() string {
+	return fmt.Sprintf("<Pier %s>", pier.transport)
+}
+
+// Transport returns the transport used for this ship.
+func (pier *PierBase) Transport() *hub.Transport {
+	return pier.transport
+}
+
+// Abolish closes the underlying listener and cleans up any related resources.
+func (pier *PierBase) Abolish() {
+	if pier.abolishing.SetToIf(false, true) {
+		for _, listener := range pier.listeners {
+			_ = listener.Close()
+		}
+	}
+}
diff --git a/spn/ships/registry.go b/spn/ships/registry.go
new file mode 100644
index 00000000..5d3abba7
--- /dev/null
+++ b/spn/ships/registry.go
@@ -0,0 +1,55 @@
+package ships
+
+import (
+	"context"
+	"net"
+	"strconv"
+	"sync"
+
+	"github.com/safing/portmaster/spn/hub"
+)
+
+// Builder is a factory that can build ships and piers of it's protocol.
+type Builder struct {
+	LaunchShip    func(ctx context.Context, transport *hub.Transport, ip net.IP) (Ship, error)
+	EstablishPier func(transport *hub.Transport, dockingRequests chan Ship) (Pier, error)
+}
+
+var (
+	registry     = make(map[string]*Builder)
+	allProtocols []string
+	registryLock sync.Mutex
+)
+
+// Register registers a new builder for a protocol.
+func Register(protocol string, builder *Builder) {
+	registryLock.Lock()
+	defer registryLock.Unlock()
+
+	registry[protocol] = builder
+}
+
+// GetBuilder returns the builder for the given protocol, or nil if it does not exist.
+func GetBuilder(protocol string) *Builder {
+	registryLock.Lock()
+	defer registryLock.Unlock()
+
+	builder, ok := registry[protocol]
+	if !ok {
+		return nil
+	}
+	return builder
+}
+
+// Protocols returns a slice with all registered protocol names. The return slice must not be edited.
+func Protocols() []string {
+	registryLock.Lock()
+	defer registryLock.Unlock()
+
+	return allProtocols
+}
+
+// portToA transforms the given port into a string.
+func portToA(port uint16) string {
+	return strconv.FormatUint(uint64(port), 10)
+}
diff --git a/spn/ships/ship.go b/spn/ships/ship.go
new file mode 100644
index 00000000..4bb39b0e
--- /dev/null
+++ b/spn/ships/ship.go
@@ -0,0 +1,220 @@
+package ships
+
+import (
+	"errors"
+	"fmt"
+	"net"
+
+	"github.com/tevino/abool"
+
+	"github.com/safing/portbase/log"
+	"github.com/safing/portmaster/spn/hub"
+)
+
+const (
+	defaultLoadSize = 4096
+)
+
+// ErrSunk is returned when a ship sunk, ie. the connection was lost.
+var ErrSunk = errors.New("ship sunk")
+
+// Ship represents a network layer connection.
+type Ship interface {
+	// String returns a human readable informational summary about the ship.
+	String() string
+
+	// Transport returns the transport used for this ship.
+	Transport() *hub.Transport
+
+	// IsMine returns whether the ship was launched from here.
+	IsMine() bool
+
+	// IsSecure returns whether the ship provides transport security.
+	IsSecure() bool
+
+	// Public returns whether the ship is marked as public.
+	Public() bool
+
+	// MarkPublic marks the ship as public.
+	MarkPublic()
+
+	// LoadSize returns the recommended data size that should be handed to Load().
+	// This value will be most likely somehow related to the connection's MTU.
+	// Alternatively, using a multiple of LoadSize is also recommended.
+	LoadSize() int
+
+	// Load loads data into the ship - ie. sends the data via the connection.
+	// Returns ErrSunk if the ship has already sunk earlier.
+	Load(data []byte) error
+
+	// UnloadTo unloads data from the ship - ie. receives data from the
+	// connection - puts it into the buf. It returns the amount of data
+	// written and an optional error.
+	// Returns ErrSunk if the ship has already sunk earlier.
+	UnloadTo(buf []byte) (n int, err error)
+
+	// LocalAddr returns the underlying local net.Addr of the connection.
+	LocalAddr() net.Addr
+
+	// RemoteAddr returns the underlying remote net.Addr of the connection.
+	RemoteAddr() net.Addr
+
+	// Sink closes the underlying connection and cleans up any related resources.
+	Sink()
+
+	// MaskAddress masks the address, if enabled.
+	MaskAddress(addr net.Addr) string
+	// MaskIP masks an IP, if enabled.
+	MaskIP(ip net.IP) string
+	// Mask masks a value.
+	Mask(value []byte) string
+}
+
+// ShipBase implements common functions to comply with the Ship interface.
+type ShipBase struct {
+	// conn is the actual underlying connection.
+	conn net.Conn
+	// transport holds the transport definition of the ship.
+	transport *hub.Transport
+
+	// mine specifies whether the ship was launched from here.
+	mine bool
+	// secure specifies whether the ship provides transport security.
+	secure bool
+	// public specifies whether the ship is public.
+	public *abool.AtomicBool
+	// bufSize specifies the size of the receive buffer.
+	bufSize int
+	// loadSize specifies the recommended data size that should be handed to Load().
+	loadSize int
+
+	// initial holds initial data from setting up the ship.
+	initial []byte
+	// sinking specifies if the connection is being closed.
+	sinking *abool.AtomicBool
+}
+
+func (ship *ShipBase) initBase() {
+	// init
+	ship.sinking = abool.New()
+	ship.public = abool.New()
+
+	// set default
+	if ship.loadSize == 0 {
+		ship.loadSize = defaultLoadSize
+	}
+	if ship.bufSize == 0 {
+		ship.bufSize = ship.loadSize
+	}
+}
+
+// String returns a human readable informational summary about the ship.
+func (ship *ShipBase) String() string {
+	if ship.mine {
+		return fmt.Sprintf("<Ship to %s using %s>", ship.MaskAddress(ship.RemoteAddr()), ship.transport)
+	}
+	return fmt.Sprintf("<Ship from %s using %s>", ship.MaskAddress(ship.RemoteAddr()), ship.transport)
+}
+
+// Transport returns the transport used for this ship.
+func (ship *ShipBase) Transport() *hub.Transport {
+	return ship.transport
+}
+
+// IsMine returns whether the ship was launched from here.
+func (ship *ShipBase) IsMine() bool {
+	return ship.mine
+}
+
+// IsSecure returns whether the ship provides transport security.
+func (ship *ShipBase) IsSecure() bool {
+	return ship.secure
+}
+
+// Public returns whether the ship is marked as public.
+func (ship *ShipBase) Public() bool {
+	return ship.public.IsSet()
+}
+
+// MarkPublic marks the ship as public.
+func (ship *ShipBase) MarkPublic() {
+	ship.public.Set()
+}
+
+// LoadSize returns the recommended data size that should be handed to Load().
+// This value will be most likely somehow related to the connection's MTU.
+// Alternatively, using a multiple of LoadSize is also recommended.
+func (ship *ShipBase) LoadSize() int {
+	return ship.loadSize
+}
+
+// Load loads data into the ship - ie. sends the data via the connection.
+// Returns ErrSunk if the ship has already sunk earlier.
+func (ship *ShipBase) Load(data []byte) error {
+	// Empty load is used as a signal to cease operaetion.
+	if len(data) == 0 {
+		if ship.sinking.SetToIf(false, true) {
+			_ = ship.conn.Close()
+		}
+		return nil
+	}
+
+	// Send all given data.
+	n, err := ship.conn.Write(data)
+	switch {
+	case err != nil:
+		return err
+	case n == 0:
+		return errors.New("loaded 0 bytes")
+	case n < len(data):
+		// If not all data was sent, try again.
+		log.Debugf("spn/ships: %s only loaded %d/%d bytes", ship, n, len(data))
+		data = data[n:]
+		return ship.Load(data)
+	}
+
+	return nil
+}
+
+// UnloadTo unloads data from the ship - ie. receives data from the
+// connection - puts it into the buf. It returns the amount of data
+// written and an optional error.
+// Returns ErrSunk if the ship has already sunk earlier.
+func (ship *ShipBase) UnloadTo(buf []byte) (n int, err error) {
+	// Process initial data, if there is any.
+	if ship.initial != nil {
+		// Copy as much data as possible.
+		copy(buf, ship.initial)
+
+		// If buf was too small, skip the copied section.
+		if len(buf) < len(ship.initial) {
+			ship.initial = ship.initial[len(buf):]
+			return len(buf), nil
+		}
+
+		// If everything was copied, unset the initial data.
+		n := len(ship.initial)
+		ship.initial = nil
+		return n, nil
+	}
+
+	// Receive data.
+	return ship.conn.Read(buf)
+}
+
+// LocalAddr returns the underlying local net.Addr of the connection.
+func (ship *ShipBase) LocalAddr() net.Addr {
+	return ship.conn.LocalAddr()
+}
+
+// RemoteAddr returns the underlying remote net.Addr of the connection.
+func (ship *ShipBase) RemoteAddr() net.Addr {
+	return ship.conn.RemoteAddr()
+}
+
+// Sink closes the underlying connection and cleans up any related resources.
+func (ship *ShipBase) Sink() {
+	if ship.sinking.SetToIf(false, true) {
+		_ = ship.conn.Close()
+	}
+}
diff --git a/spn/ships/tcp.go b/spn/ships/tcp.go
new file mode 100644
index 00000000..5ffd5b90
--- /dev/null
+++ b/spn/ships/tcp.go
@@ -0,0 +1,145 @@
+package ships
+
+import (
+	"context"
+	"fmt"
+	"net"
+	"time"
+
+	"github.com/safing/portbase/log"
+	"github.com/safing/portmaster/spn/conf"
+	"github.com/safing/portmaster/spn/hub"
+)
+
+// TCPShip is a ship that uses TCP.
+type TCPShip struct {
+	ShipBase
+}
+
+// TCPPier is a pier that uses TCP.
+type TCPPier struct {
+	PierBase
+
+	ctx       context.Context
+	cancelCtx context.CancelFunc
+}
+
+func init() {
+	Register("tcp", &Builder{
+		LaunchShip:    launchTCPShip,
+		EstablishPier: establishTCPPier,
+	})
+}
+
+func launchTCPShip(ctx context.Context, transport *hub.Transport, ip net.IP) (Ship, error) {
+	var dialNet string
+	if ip4 := ip.To4(); ip4 != nil {
+		dialNet = "tcp4"
+	} else {
+		dialNet = "tcp6"
+	}
+	dialer := &net.Dialer{
+		Timeout:       30 * time.Second,
+		LocalAddr:     conf.GetBindAddr(dialNet),
+		FallbackDelay: -1, // Disables Fast Fallback from IPv6 to IPv4.
+		KeepAlive:     -1, // Disable keep-alive.
+	}
+	conn, err := dialer.DialContext(ctx, dialNet, net.JoinHostPort(ip.String(), portToA(transport.Port)))
+	if err != nil {
+		return nil, fmt.Errorf("failed to connect: %w", err)
+	}
+
+	ship := &TCPShip{
+		ShipBase: ShipBase{
+			conn:      conn,
+			transport: transport,
+			mine:      true,
+			secure:    false,
+		},
+	}
+
+	ship.calculateLoadSize(ip, nil, TCPHeaderMTUSize)
+	ship.initBase()
+	return ship, nil
+}
+
+func establishTCPPier(transport *hub.Transport, dockingRequests chan Ship) (Pier, error) {
+	// Start listeners.
+	bindIPs := conf.GetBindIPs()
+	listeners := make([]net.Listener, 0, len(bindIPs))
+	for _, bindIP := range bindIPs {
+		listener, err := net.ListenTCP("tcp", &net.TCPAddr{
+			IP:   bindIP,
+			Port: int(transport.Port),
+		})
+		if err != nil {
+			return nil, fmt.Errorf("failed to listen: %w", err)
+		}
+
+		listeners = append(listeners, listener)
+		log.Infof("spn/ships: tcp transport pier established on %s", listener.Addr())
+	}
+
+	// Create new pier.
+	pierCtx, cancelCtx := context.WithCancel(module.Ctx)
+	pier := &TCPPier{
+		PierBase: PierBase{
+			transport:       transport,
+			listeners:       listeners,
+			dockingRequests: dockingRequests,
+		},
+		ctx:       pierCtx,
+		cancelCtx: cancelCtx,
+	}
+	pier.initBase()
+
+	// Start workers.
+	for _, listener := range pier.listeners {
+		serviceListener := listener
+		module.StartServiceWorker("accept TCP docking requests", 0, func(ctx context.Context) error {
+			return pier.dockingWorker(ctx, serviceListener)
+		})
+	}
+
+	return pier, nil
+}
+
+func (pier *TCPPier) dockingWorker(_ context.Context, listener net.Listener) error {
+	for {
+		// Block until something happens.
+		conn, err := listener.Accept()
+
+		// Check for errors.
+		switch {
+		case pier.ctx.Err() != nil:
+			return pier.ctx.Err()
+		case err != nil:
+			return err
+		}
+
+		// Create new ship.
+		ship := &TCPShip{
+			ShipBase: ShipBase{
+				transport: pier.transport,
+				conn:      conn,
+				mine:      false,
+				secure:    false,
+			},
+		}
+		ship.calculateLoadSize(nil, conn.RemoteAddr(), TCPHeaderMTUSize)
+		ship.initBase()
+
+		// Submit new docking request.
+		select {
+		case pier.dockingRequests <- ship:
+		case <-pier.ctx.Done():
+			return pier.ctx.Err()
+		}
+	}
+}
+
+// Abolish closes the underlying listener and cleans up any related resources.
+func (pier *TCPPier) Abolish() {
+	pier.cancelCtx()
+	pier.PierBase.Abolish()
+}
diff --git a/spn/ships/testship.go b/spn/ships/testship.go
new file mode 100644
index 00000000..6ec74b6e
--- /dev/null
+++ b/spn/ships/testship.go
@@ -0,0 +1,154 @@
+package ships
+
+import (
+	"net"
+
+	"github.com/mr-tron/base58"
+	"github.com/tevino/abool"
+
+	"github.com/safing/portmaster/spn/hub"
+)
+
+// TestShip is a simulated ship that is used for testing higher level components.
+type TestShip struct {
+	mine      bool
+	secure    bool
+	loadSize  int
+	forward   chan []byte
+	backward  chan []byte
+	unloadTmp []byte
+	sinking   *abool.AtomicBool
+}
+
+// NewTestShip returns a new TestShip for simulation.
+func NewTestShip(secure bool, loadSize int) *TestShip {
+	return &TestShip{
+		mine:     true,
+		secure:   secure,
+		loadSize: loadSize,
+		forward:  make(chan []byte, 100),
+		backward: make(chan []byte, 100),
+		sinking:  abool.NewBool(false),
+	}
+}
+
+// String returns a human readable informational summary about the ship.
+func (ship *TestShip) String() string {
+	if ship.mine {
+		return "<TestShip outbound>"
+	}
+	return "<TestShip inbound>"
+}
+
+// Transport returns the transport used for this ship.
+func (ship *TestShip) Transport() *hub.Transport {
+	return &hub.Transport{
+		Protocol: "dummy",
+	}
+}
+
+// IsMine returns whether the ship was launched from here.
+func (ship *TestShip) IsMine() bool {
+	return ship.mine
+}
+
+// IsSecure returns whether the ship provides transport security.
+func (ship *TestShip) IsSecure() bool {
+	return ship.secure
+}
+
+// LoadSize returns the recommended data size that should be handed to Load().
+// This value will be most likely somehow related to the connection's MTU.
+// Alternatively, using a multiple of LoadSize is also recommended.
+func (ship *TestShip) LoadSize() int {
+	return ship.loadSize
+}
+
+// Reverse creates a connected TestShip. This is used to simulate a connection instead of using a Pier.
+func (ship *TestShip) Reverse() *TestShip {
+	return &TestShip{
+		mine:     !ship.mine,
+		secure:   ship.secure,
+		loadSize: ship.loadSize,
+		forward:  ship.backward,
+		backward: ship.forward,
+		sinking:  abool.NewBool(false),
+	}
+}
+
+// Load loads data into the ship - ie. sends the data via the connection.
+// Returns ErrSunk if the ship has already sunk earlier.
+func (ship *TestShip) Load(data []byte) error {
+	// Debugging:
+	// log.Debugf("spn/ship: loading %s", spew.Sdump(data))
+
+	// Check if ship is alive.
+	if ship.sinking.IsSet() {
+		return ErrSunk
+	}
+
+	// Empty load is used as a signal to cease operaetion.
+	if len(data) == 0 {
+		ship.Sink()
+		return nil
+	}
+
+	// Send all given data.
+	ship.forward <- data
+
+	return nil
+}
+
+// UnloadTo unloads data from the ship - ie. receives data from the
+// connection - puts it into the buf. It returns the amount of data
+// written and an optional error.
+// Returns ErrSunk if the ship has already sunk earlier.
+func (ship *TestShip) UnloadTo(buf []byte) (n int, err error) {
+	// Process unload tmp data, if there is any.
+	if ship.unloadTmp != nil {
+		// Copy as much data as possible.
+		copy(buf, ship.unloadTmp)
+
+		// If buf was too small, skip the copied section.
+		if len(buf) < len(ship.unloadTmp) {
+			ship.unloadTmp = ship.unloadTmp[len(buf):]
+			return len(buf), nil
+		}
+
+		// If everything was copied, unset the unloadTmp data.
+		n := len(ship.unloadTmp)
+		ship.unloadTmp = nil
+		return n, nil
+	}
+
+	// Receive data.
+	data := <-ship.backward
+	if len(data) == 0 {
+		return 0, ErrSunk
+	}
+
+	// Copy data, possibly save remainder for later.
+	copy(buf, data)
+	if len(buf) < len(data) {
+		ship.unloadTmp = data[len(buf):]
+		return len(buf), nil
+	}
+	return len(data), nil
+}
+
+// Sink closes the underlying connection and cleans up any related resources.
+func (ship *TestShip) Sink() {
+	if ship.sinking.SetToIf(false, true) {
+		close(ship.forward)
+	}
+}
+
+// Dummy methods to conform to interface for testing.
+
+func (ship *TestShip) LocalAddr() net.Addr              { return nil }                  //nolint:golint
+func (ship *TestShip) RemoteAddr() net.Addr             { return nil }                  //nolint:golint
+func (ship *TestShip) Public() bool                     { return true }                 //nolint:golint
+func (ship *TestShip) MarkPublic()                      {}                              //nolint:golint
+func (ship *TestShip) MaskAddress(addr net.Addr) string { return addr.String() }        //nolint:golint
+func (ship *TestShip) MaskIP(ip net.IP) string          { return ip.String() }          //nolint:golint
+func (ship *TestShip) Mask(value []byte) string         { return base58.Encode(value) } //nolint:golint
diff --git a/spn/ships/testship_test.go b/spn/ships/testship_test.go
new file mode 100644
index 00000000..7e026b92
--- /dev/null
+++ b/spn/ships/testship_test.go
@@ -0,0 +1,58 @@
+package ships
+
+import (
+	"fmt"
+	"testing"
+
+	"github.com/stretchr/testify/assert"
+)
+
+func TestTestShip(t *testing.T) {
+	t.Parallel()
+
+	tShip := NewTestShip(true, 100)
+
+	// interface conformance test
+	var ship Ship = tShip
+
+	srvShip := tShip.Reverse()
+
+	for i := 0; i < 100; i++ {
+		// client send
+		err := ship.Load(testData)
+		if err != nil {
+			t.Fatalf("%s failed: %s", ship, err)
+		}
+
+		// server recv
+		buf := getTestBuf()
+		_, err = srvShip.UnloadTo(buf)
+		if err != nil {
+			t.Fatalf("%s failed: %s", ship, err)
+		}
+
+		// check data
+		assert.Equal(t, testData, buf, "should match")
+		fmt.Print(".")
+
+		// server send
+		err = srvShip.Load(testData)
+		if err != nil {
+			t.Fatalf("%s failed: %s", ship, err)
+		}
+
+		// client recv
+		buf = getTestBuf()
+		_, err = ship.UnloadTo(buf)
+		if err != nil {
+			t.Fatalf("%s failed: %s", ship, err)
+		}
+
+		// check data
+		assert.Equal(t, testData, buf, "should match")
+		fmt.Print(".")
+	}
+
+	ship.Sink()
+	srvShip.Sink()
+}
diff --git a/spn/ships/virtual_network.go b/spn/ships/virtual_network.go
new file mode 100644
index 00000000..314112ef
--- /dev/null
+++ b/spn/ships/virtual_network.go
@@ -0,0 +1,43 @@
+package ships
+
+import (
+	"net"
+	"sync"
+
+	"github.com/safing/portmaster/spn/hub"
+)
+
+var (
+	virtNetLock   sync.Mutex
+	virtNetConfig *hub.VirtualNetworkConfig
+)
+
+// SetVirtualNetworkConfig sets the virtual networking config.
+func SetVirtualNetworkConfig(config *hub.VirtualNetworkConfig) {
+	virtNetLock.Lock()
+	defer virtNetLock.Unlock()
+
+	virtNetConfig = config
+}
+
+// GetVirtualNetworkConfig returns the virtual networking config.
+func GetVirtualNetworkConfig() *hub.VirtualNetworkConfig {
+	virtNetLock.Lock()
+	defer virtNetLock.Unlock()
+
+	return virtNetConfig
+}
+
+// GetVirtualNetworkAddress returns the virtual network IP for the given Hub.
+func GetVirtualNetworkAddress(dstHubID string) net.IP {
+	virtNetLock.Lock()
+	defer virtNetLock.Unlock()
+
+	// Check if we have a virtual network config.
+	if virtNetConfig == nil {
+		return nil
+	}
+
+	// Return mapping for given Hub ID.
+	return virtNetConfig.Mapping[dstHubID]
+}
diff --git a/spn/sluice/module.go b/spn/sluice/module.go
new file mode 100644
index 00000000..63f1d2e0
--- /dev/null
+++ b/spn/sluice/module.go
@@ -0,0 +1,46 @@
+package sluice
+
+import (
+	"github.com/safing/portbase/log"
+	"github.com/safing/portbase/modules"
+	"github.com/safing/portmaster/service/netenv"
+	"github.com/safing/portmaster/spn/conf"
+)
+
+var (
+	module *modules.Module
+
+	entrypointInfoMsg = []byte("You have reached the local SPN entry port, but your connection could not be matched to an SPN tunnel.\n")
+
+	// EnableListener indicates if it should start the sluice listeners. Must be set at startup.
+	EnableListener bool = true
+)
+
+func init() {
+	module = modules.Register("sluice", nil, start, stop, "terminal")
+}
+
+func start() error {
+	// TODO:
+	// Listening on all interfaces for now, as we need this for Windows.
+	// Handle similarly to the nameserver listener.
+
+	if conf.Client() && EnableListener {
+		StartSluice("tcp4", "0.0.0.0:717")
+		StartSluice("udp4", "0.0.0.0:717")
+
+		if netenv.IPv6Enabled() {
+			StartSluice("tcp6", "[::]:717")
+			StartSluice("udp6", "[::]:717")
+		} else {
+			log.Warningf("spn/sluice: no IPv6 stack detected, disabling IPv6 SPN entry endpoints")
+		}
+	}
+
+	return nil
+}
+
+func stop() error {
+	stopAllSluices()
+	return nil
+}
diff --git a/spn/sluice/packet_listener.go b/spn/sluice/packet_listener.go
new file mode 100644
index 00000000..3eb64cbb
--- /dev/null
+++ b/spn/sluice/packet_listener.go
@@ -0,0 +1,277 @@
+package sluice
+
+import (
+	"context"
+	"io"
+	"net"
+	"sync"
+	"sync/atomic"
+	"time"
+
+	"github.com/tevino/abool"
+)
+
+// PacketListener is a listener for packet based protocols.
+type PacketListener struct {
+	sock     net.PacketConn
+	closed   *abool.AtomicBool
+	newConns chan *PacketConn
+
+	lock  sync.Mutex
+	conns map[string]*PacketConn
+	err   error
+}
+
+// ListenPacket creates a packet listener.
+func ListenPacket(network, address string) (net.Listener, error) {
+	// Create a new listening packet socket.
+	sock, err := net.ListenPacket(network, address)
+	if err != nil {
+		return nil, err
+	}
+
+	// Create listener and start workers.
+	ln := &PacketListener{
+		sock:     sock,
+		closed:   abool.New(),
+		newConns: make(chan *PacketConn),
+		conns:    make(map[string]*PacketConn),
+	}
+	module.StartServiceWorker("packet listener reader", 0, ln.reader)
+	module.StartServiceWorker("packet listener cleaner", time.Minute, ln.cleaner)
+
+	return ln, nil
+}
+
+// Accept waits for and returns the next connection to the listener.
+func (ln *PacketListener) Accept() (net.Conn, error) {
+	conn := <-ln.newConns
+	if conn != nil {
+		return conn, nil
+	}
+
+	// Check if there is a socket error.
+	ln.lock.Lock()
+	defer ln.lock.Unlock()
+	if ln.err != nil {
+		return nil, ln.err
+	}
+
+	return nil, io.EOF
+}
+
+// Close closes the listener.
+// Any blocked Accept operations will be unblocked and return errors.
+func (ln *PacketListener) Close() error {
+	if !ln.closed.SetToIf(false, true) {
+		return nil
+	}
+
+	// Close all channels.
+	close(ln.newConns)
+	ln.lock.Lock()
+	defer ln.lock.Unlock()
+	for _, conn := range ln.conns {
+		close(conn.in)
+	}
+
+	// Close socket.
+	return ln.sock.Close()
+}
+
+// Addr returns the listener's network address.
+func (ln *PacketListener) Addr() net.Addr {
+	return ln.sock.LocalAddr()
+}
+
+func (ln *PacketListener) getConn(remoteAddr string) (conn *PacketConn, ok bool) {
+	ln.lock.Lock()
+	defer ln.lock.Unlock()
+
+	conn, ok = ln.conns[remoteAddr]
+	return
+}
+
+func (ln *PacketListener) setConn(conn *PacketConn) {
+	ln.lock.Lock()
+	defer ln.lock.Unlock()
+
+	ln.conns[conn.addr.String()] = conn
+}
+
+func (ln *PacketListener) reader(_ context.Context) error {
+	for {
+		// Read data from connection.
+		buf := make([]byte, 512)
+		n, addr, err := ln.sock.ReadFrom(buf)
+		if err != nil {
+			// Set socket error.
+			ln.lock.Lock()
+			ln.err = err
+			ln.lock.Unlock()
+			// Close and return
+			_ = ln.Close()
+			return nil //nolint:nilerr
+		}
+		buf = buf[:n]
+
+		// Get connection and supply data.
+		conn, ok := ln.getConn(addr.String())
+		if ok {
+			// Ignore if conn is closed.
+			if conn.closed.IsSet() {
+				continue
+			}
+
+			select {
+			case conn.in <- buf:
+			default:
+			}
+			continue
+		}
+
+		// Or create a new connection.
+		conn = &PacketConn{
+			ln:            ln,
+			addr:          addr,
+			closed:        abool.New(),
+			closing:       make(chan struct{}),
+			buf:           buf,
+			in:            make(chan []byte, 1),
+			inactivityCnt: new(uint32),
+		}
+		ln.setConn(conn)
+		ln.newConns <- conn
+	}
+}
+
+func (ln *PacketListener) cleaner(ctx context.Context) error {
+	for {
+		select {
+		case <-time.After(1 * time.Minute):
+			// Check if listener has died.
+			if ln.closed.IsSet() {
+				return nil
+			}
+			// Clean connections.
+			ln.cleanInactiveConns(10)
+
+		case <-ctx.Done():
+			// Exit with module stop.
+			return nil
+		}
+	}
+}
+
+func (ln *PacketListener) cleanInactiveConns(overInactivityCnt uint32) {
+	ln.lock.Lock()
+	defer ln.lock.Unlock()
+
+	for k, conn := range ln.conns {
+		cnt := atomic.AddUint32(conn.inactivityCnt, 1)
+		switch {
+		case cnt > overInactivityCnt*2:
+			delete(ln.conns, k)
+		case cnt > overInactivityCnt:
+			_ = conn.Close()
+		}
+	}
+}
+
+// PacketConn simulates a connection for a stateless protocol.
+type PacketConn struct {
+	ln      *PacketListener
+	addr    net.Addr
+	closed  *abool.AtomicBool
+	closing chan struct{}
+
+	buf []byte
+	in  chan []byte
+
+	inactivityCnt *uint32
+}
+
+// Read reads data from the connection.
+// Read can be made to time out and return an error after a fixed
+// time limit; see SetDeadline and SetReadDeadline.
+func (conn *PacketConn) Read(b []byte) (n int, err error) {
+	// Check if connection is closed.
+	if conn.closed.IsSet() {
+		return 0, io.EOF
+	}
+
+	// Mark as active.
+	atomic.StoreUint32(conn.inactivityCnt, 0)
+
+	// Get new buffer.
+	if conn.buf == nil {
+		select {
+		case conn.buf = <-conn.in:
+			if conn.buf == nil {
+				return 0, io.EOF
+			}
+		case <-conn.closing:
+			return 0, io.EOF
+		}
+	}
+
+	// Serve from buffer.
+	copy(b, conn.buf)
+	if len(b) >= len(conn.buf) {
+		copied := len(conn.buf)
+		conn.buf = nil
+		return copied, nil
+	}
+	copied := len(b)
+	conn.buf = conn.buf[copied:]
+	return copied, nil
+}
+
+// Write writes data to the connection.
+// Write can be made to time out and return an error after a fixed
+// time limit; see SetDeadline and SetWriteDeadline.
+func (conn *PacketConn) Write(b []byte) (n int, err error) {
+	// Check if connection is closed.
+	if conn.closed.IsSet() {
+		return 0, io.EOF
+	}
+
+	// Mark as active.
+	atomic.StoreUint32(conn.inactivityCnt, 0)
+
+	return conn.ln.sock.WriteTo(b, conn.addr)
+}
+
+// Close is a no-op as UDP connections share a single socket. Just stop sending
+// packets without closing.
+func (conn *PacketConn) Close() error {
+	if conn.closed.SetToIf(false, true) {
+		close(conn.closing)
+	}
+	return nil
+}
+
+// LocalAddr returns the local network address.
+func (conn *PacketConn) LocalAddr() net.Addr {
+	return conn.ln.sock.LocalAddr()
+}
+
+// RemoteAddr returns the remote network address.
+func (conn *PacketConn) RemoteAddr() net.Addr {
+	return conn.addr
+}
+
+// SetDeadline is a no-op as UDP connections share a single socket.
+func (conn *PacketConn) SetDeadline(t time.Time) error {
+	return nil
+}
+
+// SetReadDeadline is a no-op as UDP connections share a single socket.
+func (conn *PacketConn) SetReadDeadline(t time.Time) error {
+	return nil
+}
+
+// SetWriteDeadline is a no-op as UDP connections share a single socket.
+func (conn *PacketConn) SetWriteDeadline(t time.Time) error {
+	return nil
+}
diff --git a/spn/sluice/request.go b/spn/sluice/request.go
new file mode 100644
index 00000000..2347ed35
--- /dev/null
+++ b/spn/sluice/request.go
@@ -0,0 +1,78 @@
+package sluice
+
+import (
+	"errors"
+	"fmt"
+	"net"
+	"time"
+
+	"github.com/safing/portmaster/service/network"
+	"github.com/safing/portmaster/service/network/packet"
+)
+
+const (
+	defaultSluiceTTL = 30 * time.Second
+)
+
+var (
+	// ErrUnsupported is returned when a protocol is not supported.
+	ErrUnsupported = errors.New("unsupported protocol")
+
+	// ErrSluiceOffline is returned when the sluice for a network is offline.
+	ErrSluiceOffline = errors.New("is offline")
+)
+
+// Request holds request data for a sluice entry.
+type Request struct {
+	ConnInfo   *network.Connection
+	CallbackFn RequestCallbackFunc
+	Expires    time.Time
+}
+
+// RequestCallbackFunc is called for taking a over handling connection that arrived at the sluice.
+type RequestCallbackFunc func(connInfo *network.Connection, conn net.Conn)
+
+// AwaitRequest pre-registers a connection at the sluice for initializing it when it arrives.
+func AwaitRequest(connInfo *network.Connection, callbackFn RequestCallbackFunc) error {
+	network := getNetworkFromConnInfo(connInfo)
+	if network == "" {
+		return ErrUnsupported
+	}
+
+	sluice, ok := getSluice(network)
+	if !ok {
+		return fmt.Errorf("sluice for network %s %w", network, ErrSluiceOffline)
+	}
+
+	return sluice.AwaitRequest(&Request{
+		ConnInfo:   connInfo,
+		CallbackFn: callbackFn,
+		Expires:    time.Now().Add(defaultSluiceTTL),
+	})
+}
+
+func getNetworkFromConnInfo(connInfo *network.Connection) string {
+	var network string
+
+	// protocol
+	switch connInfo.IPProtocol { //nolint:exhaustive // Looking for specific values.
+	case packet.TCP:
+		network = "tcp"
+	case packet.UDP:
+		network = "udp"
+	default:
+		return ""
+	}
+
+	// IP version
+	switch connInfo.IPVersion {
+	case packet.IPv4:
+		network += "4"
+	case packet.IPv6:
+		network += "6"
+	default:
+		return ""
+	}
+
+	return network
+}
diff --git a/spn/sluice/sluice.go b/spn/sluice/sluice.go
new file mode 100644
index 00000000..32a33151
--- /dev/null
+++ b/spn/sluice/sluice.go
@@ -0,0 +1,229 @@
+package sluice
+
+import (
+	"context"
+	"fmt"
+	"net"
+	"strconv"
+	"sync"
+	"time"
+
+	"github.com/safing/portbase/log"
+	"github.com/safing/portmaster/service/netenv"
+)
+
+// Sluice is a tunnel entry listener.
+type Sluice struct {
+	network        string
+	address        string
+	createListener ListenerFactory
+
+	lock            sync.Mutex
+	listener        net.Listener
+	pendingRequests map[string]*Request
+	abandoned       bool
+}
+
+// ListenerFactory defines a function to create a listener.
+type ListenerFactory func(network, address string) (net.Listener, error)
+
+// StartSluice starts a sluice listener at the given address.
+func StartSluice(network, address string) {
+	s := &Sluice{
+		network:         network,
+		address:         address,
+		pendingRequests: make(map[string]*Request),
+	}
+
+	switch s.network {
+	case "tcp4", "tcp6":
+		s.createListener = net.Listen
+	case "udp4", "udp6":
+		s.createListener = ListenUDP
+	default:
+		log.Errorf("spn/sluice: cannot start sluice for %s: unsupported network", network)
+		return
+	}
+
+	// Start service worker.
+	module.StartServiceWorker(
+		fmt.Sprintf("%s sluice listener", s.network),
+		10*time.Second,
+		s.listenHandler,
+	)
+}
+
+// AwaitRequest pre-registers a connection.
+func (s *Sluice) AwaitRequest(r *Request) error {
+	// Set default expiry.
+	if r.Expires.IsZero() {
+		r.Expires = time.Now().Add(defaultSluiceTTL)
+	}
+
+	s.lock.Lock()
+	defer s.lock.Unlock()
+
+	// Check if a pending request already exists for this local address.
+	key := net.JoinHostPort(r.ConnInfo.LocalIP.String(), strconv.Itoa(int(r.ConnInfo.LocalPort)))
+	_, exists := s.pendingRequests[key]
+	if exists {
+		return fmt.Errorf("a pending request for %s already exists", key)
+	}
+
+	// Add to pending requests.
+	s.pendingRequests[key] = r
+	return nil
+}
+
+func (s *Sluice) getRequest(address string) (r *Request, ok bool) {
+	s.lock.Lock()
+	defer s.lock.Unlock()
+
+	r, ok = s.pendingRequests[address]
+	if ok {
+		delete(s.pendingRequests, address)
+	}
+	return
+}
+
+func (s *Sluice) init() error {
+	s.lock.Lock()
+	defer s.lock.Unlock()
+	s.abandoned = false
+
+	// start listening
+	s.listener = nil
+	ln, err := s.createListener(s.network, s.address)
+	if err != nil {
+		return fmt.Errorf("failed to listen: %w", err)
+	}
+	s.listener = ln
+
+	// Add to registry.
+	addSluice(s)
+
+	return nil
+}
+
+func (s *Sluice) abandon() {
+	s.lock.Lock()
+	defer s.lock.Unlock()
+	if s.abandoned {
+		return
+	}
+	s.abandoned = true
+
+	// Remove from registry.
+	removeSluice(s.network)
+
+	// Close listener.
+	if s.listener != nil {
+		_ = s.listener.Close()
+	}
+
+	// Notify pending requests.
+	for i, r := range s.pendingRequests {
+		r.CallbackFn(r.ConnInfo, nil)
+		delete(s.pendingRequests, i)
+	}
+}
+
+func (s *Sluice) handleConnection(conn net.Conn) {
+	// Close the connection if handling is not successful.
+	success := false
+	defer func() {
+		if !success {
+			_ = conn.Close()
+		}
+	}()
+
+	// Get IP address.
+	var remoteIP net.IP
+	switch typedAddr := conn.RemoteAddr().(type) {
+	case *net.TCPAddr:
+		remoteIP = typedAddr.IP
+	case *net.UDPAddr:
+		remoteIP = typedAddr.IP
+	default:
+		log.Warningf("spn/sluice: cannot handle connection for unsupported network %s", conn.RemoteAddr().Network())
+		return
+	}
+
+	// Check if the request is local.
+	local, err := netenv.IsMyIP(remoteIP)
+	if err != nil {
+		log.Warningf("spn/sluice: failed to check if request from %s is local: %s", remoteIP, err)
+		return
+	}
+	if !local {
+		log.Warningf("spn/sluice: received external request from %s, ignoring", remoteIP)
+
+		// TODO:
+		// Do not allow this to be spammed.
+		// Only allow one trigger per second.
+		// Do not trigger by same "remote IP" in a row.
+		netenv.TriggerNetworkChangeCheck()
+
+		return
+	}
+
+	// Get waiting request.
+	r, ok := s.getRequest(conn.RemoteAddr().String())
+	if !ok {
+		_, err := conn.Write(entrypointInfoMsg)
+		if err != nil {
+			log.Warningf("spn/sluice: new %s request from %s without pending request, but failed to reply with info msg: %s", s.network, conn.RemoteAddr(), err)
+		} else {
+			log.Debugf("spn/sluice: new %s request from %s without pending request, replied with info msg", s.network, conn.RemoteAddr())
+		}
+		return
+	}
+
+	// Hand over to callback.
+	log.Tracef(
+		"spn/sluice: new %s request from %s for %s (%s:%d)",
+		s.network, conn.RemoteAddr(),
+		r.ConnInfo.Entity.Domain, r.ConnInfo.Entity.IP, r.ConnInfo.Entity.Port,
+	)
+	r.CallbackFn(r.ConnInfo, conn)
+	success = true
+}
+
+func (s *Sluice) listenHandler(_ context.Context) error {
+	defer s.abandon()
+	err := s.init()
+	if err != nil {
+		return err
+	}
+
+	// Handle new connections.
+	log.Infof("spn/sluice: started listening for %s requests on %s", s.network, s.listener.Addr())
+	for {
+		conn, err := s.listener.Accept()
+		if err != nil {
+			if module.IsStopping() {
+				return nil
+			}
+			return fmt.Errorf("failed to accept connection: %w", err)
+		}
+
+		// Handle accepted connection.
+		s.handleConnection(conn)
+
+		// Clean up old leftovers.
+		s.cleanConnections()
+	}
+}
+
+func (s *Sluice) cleanConnections() {
+	s.lock.Lock()
+	defer s.lock.Unlock()
+
+	now := time.Now()
+	for address, request := range s.pendingRequests {
+		if now.After(request.Expires) {
+			delete(s.pendingRequests, address)
+			log.Debugf("spn/sluice: removed expired pending %s connection %s", s.network, request.ConnInfo)
+		}
+	}
+}
diff --git a/spn/sluice/sluices.go b/spn/sluice/sluices.go
new file mode 100644
index 00000000..1ae58777
--- /dev/null
+++ b/spn/sluice/sluices.go
@@ -0,0 +1,47 @@
+package sluice
+
+import "sync"
+
+var (
+	sluices     = make(map[string]*Sluice)
+	sluicesLock sync.RWMutex
+)
+
+func getSluice(network string) (s *Sluice, ok bool) {
+	sluicesLock.RLock()
+	defer sluicesLock.RUnlock()
+
+	s, ok = sluices[network]
+	return
+}
+
+func addSluice(s *Sluice) {
+	sluicesLock.Lock()
+	defer sluicesLock.Unlock()
+
+	sluices[s.network] = s
+}
+
+func removeSluice(network string) {
+	sluicesLock.Lock()
+	defer sluicesLock.Unlock()
+
+	delete(sluices, network)
+}
+
+func copySluices() map[string]*Sluice {
+	sluicesLock.Lock()
+	defer sluicesLock.Unlock()
+
+	copied := make(map[string]*Sluice, len(sluices))
+	for k, v := range sluices {
+		copied[k] = v
+	}
+	return copied
+}
+
+func stopAllSluices() {
+	for _, sluice := range copySluices() {
+		sluice.abandon()
+	}
+}
diff --git a/spn/sluice/udp_listener.go b/spn/sluice/udp_listener.go
new file mode 100644
index 00000000..4065d520
--- /dev/null
+++ b/spn/sluice/udp_listener.go
@@ -0,0 +1,334 @@
+package sluice
+
+import (
+	"context"
+	"io"
+	"net"
+	"runtime"
+	"sync"
+	"sync/atomic"
+	"time"
+
+	"github.com/tevino/abool"
+	"golang.org/x/net/ipv4"
+	"golang.org/x/net/ipv6"
+)
+
+const onWindows = runtime.GOOS == "windows"
+
+// UDPListener is a listener for UDP.
+type UDPListener struct {
+	sock     *net.UDPConn
+	closed   *abool.AtomicBool
+	newConns chan *UDPConn
+	oobSize  int
+
+	lock  sync.Mutex
+	conns map[string]*UDPConn
+	err   error
+}
+
+// ListenUDP creates a packet listener.
+func ListenUDP(network, address string) (net.Listener, error) {
+	// Parse address.
+	udpAddr, err := net.ResolveUDPAddr(network, address)
+	if err != nil {
+		return nil, err
+	}
+
+	// Determine oob data size.
+	oobSize := 40 // IPv6 (measured)
+	if udpAddr.IP.To4() != nil {
+		oobSize = 32 // IPv4 (measured)
+	}
+
+	// Create a new listening UDP socket.
+	sock, err := net.ListenUDP(network, udpAddr)
+	if err != nil {
+		return nil, err
+	}
+
+	// Create listener.
+	ln := &UDPListener{
+		sock:     sock,
+		closed:   abool.New(),
+		newConns: make(chan *UDPConn),
+		oobSize:  oobSize,
+		conns:    make(map[string]*UDPConn),
+	}
+
+	// Set socket options on listener.
+	err = ln.setSocketOptions()
+	if err != nil {
+		return nil, err
+	}
+
+	// Start workers.
+	module.StartServiceWorker("udp listener reader", 0, ln.reader)
+	module.StartServiceWorker("udp listener cleaner", time.Minute, ln.cleaner)
+
+	return ln, nil
+}
+
+// Accept waits for and returns the next connection to the listener.
+func (ln *UDPListener) Accept() (net.Conn, error) {
+	conn := <-ln.newConns
+	if conn != nil {
+		return conn, nil
+	}
+
+	// Check if there is a socket error.
+	ln.lock.Lock()
+	defer ln.lock.Unlock()
+	if ln.err != nil {
+		return nil, ln.err
+	}
+
+	return nil, io.EOF
+}
+
+// Close closes the listener.
+// Any blocked Accept operations will be unblocked and return errors.
+func (ln *UDPListener) Close() error {
+	if !ln.closed.SetToIf(false, true) {
+		return nil
+	}
+
+	// Close all channels.
+	close(ln.newConns)
+	ln.lock.Lock()
+	defer ln.lock.Unlock()
+	for _, conn := range ln.conns {
+		close(conn.in)
+	}
+
+	// Close socket.
+	return ln.sock.Close()
+}
+
+// Addr returns the listener's network address.
+func (ln *UDPListener) Addr() net.Addr {
+	return ln.sock.LocalAddr()
+}
+
+func (ln *UDPListener) getConn(remoteAddr string) (conn *UDPConn, ok bool) {
+	ln.lock.Lock()
+	defer ln.lock.Unlock()
+
+	conn, ok = ln.conns[remoteAddr]
+	return
+}
+
+func (ln *UDPListener) setConn(conn *UDPConn) {
+	ln.lock.Lock()
+	defer ln.lock.Unlock()
+
+	ln.conns[conn.addr.String()] = conn
+}
+
+func (ln *UDPListener) reader(_ context.Context) error {
+	for {
+		// TODO: Find good buf size.
+		// With a buf size of 512 we have seen this error on Windows:
+		// wsarecvmsg: A message sent on a datagram socket was larger than the internal message buffer or some other network limit, or the buffer used to receive a datagram into was smaller than the datagram itself.
+		// UDP is not (yet) heavily used, so we can go for the 1500 bytes size for now.
+
+		// Read data from connection.
+		buf := make([]byte, 1500) // TODO: see comment above.
+		oob := make([]byte, ln.oobSize)
+		n, oobn, _, addr, err := ln.sock.ReadMsgUDP(buf, oob)
+		if err != nil {
+			// Set socket error.
+			ln.lock.Lock()
+			ln.err = err
+			ln.lock.Unlock()
+			// Close and return
+			_ = ln.Close()
+			return nil //nolint:nilerr
+		}
+		buf = buf[:n]
+		oob = oob[:oobn]
+
+		// Get connection and supply data.
+		conn, ok := ln.getConn(addr.String())
+		if ok {
+			// Ignore if conn is closed.
+			if conn.closed.IsSet() {
+				continue
+			}
+
+			select {
+			case conn.in <- buf:
+			default:
+			}
+			continue
+		}
+
+		// Or create a new connection.
+		conn = &UDPConn{
+			ln:            ln,
+			addr:          addr,
+			oob:           oob,
+			closed:        abool.New(),
+			closing:       make(chan struct{}),
+			buf:           buf,
+			in:            make(chan []byte, 1),
+			inactivityCnt: new(uint32),
+		}
+		ln.setConn(conn)
+		ln.newConns <- conn
+	}
+}
+
+func (ln *UDPListener) cleaner(ctx context.Context) error {
+	for {
+		select {
+		case <-time.After(1 * time.Minute):
+			// Check if listener has died.
+			if ln.closed.IsSet() {
+				return nil
+			}
+			// Clean connections.
+			ln.cleanInactiveConns(10)
+
+		case <-ctx.Done():
+			// Exit with module stop.
+			return nil
+		}
+	}
+}
+
+func (ln *UDPListener) cleanInactiveConns(overInactivityCnt uint32) {
+	ln.lock.Lock()
+	defer ln.lock.Unlock()
+
+	for k, conn := range ln.conns {
+		cnt := atomic.AddUint32(conn.inactivityCnt, 1)
+		switch {
+		case cnt > overInactivityCnt*2:
+			delete(ln.conns, k)
+		case cnt > overInactivityCnt:
+			_ = conn.Close()
+		}
+	}
+}
+
+// setUDPSocketOptions sets socket options so that the source address for
+// replies is correct.
+func (ln *UDPListener) setSocketOptions() error {
+	// Setting socket options is not supported on windows.
+	if onWindows {
+		return nil
+	}
+
+	// As we might be listening on an interface that supports both IPv4 and IPv6,
+	// try to set the socket options on both.
+	// Only report an error if it fails on both.
+	err4 := ipv4.NewPacketConn(ln.sock).SetControlMessage(ipv4.FlagDst|ipv4.FlagInterface, true)
+	err6 := ipv6.NewPacketConn(ln.sock).SetControlMessage(ipv6.FlagDst|ipv6.FlagInterface, true)
+	if err4 != nil && err6 != nil {
+		return err4
+	}
+
+	return nil
+}
+
+// UDPConn simulates a connection for a stateless protocol.
+type UDPConn struct {
+	ln      *UDPListener
+	addr    *net.UDPAddr
+	oob     []byte
+	closed  *abool.AtomicBool
+	closing chan struct{}
+
+	buf []byte
+	in  chan []byte
+
+	inactivityCnt *uint32
+}
+
+// Read reads data from the connection.
+// Read can be made to time out and return an error after a fixed
+// time limit; see SetDeadline and SetReadDeadline.
+func (conn *UDPConn) Read(b []byte) (n int, err error) {
+	// Check if connection is closed.
+	if conn.closed.IsSet() {
+		return 0, io.EOF
+	}
+
+	// Mark as active.
+	atomic.StoreUint32(conn.inactivityCnt, 0)
+
+	// Get new buffer.
+	if conn.buf == nil {
+		select {
+		case conn.buf = <-conn.in:
+			if conn.buf == nil {
+				return 0, io.EOF
+			}
+		case <-conn.closing:
+			return 0, io.EOF
+		}
+	}
+
+	// Serve from buffer.
+	copy(b, conn.buf)
+	if len(b) >= len(conn.buf) {
+		copied := len(conn.buf)
+		conn.buf = nil
+		return copied, nil
+	}
+	copied := len(b)
+	conn.buf = conn.buf[copied:]
+	return copied, nil
+}
+
+// Write writes data to the connection.
+// Write can be made to time out and return an error after a fixed
+// time limit; see SetDeadline and SetWriteDeadline.
+func (conn *UDPConn) Write(b []byte) (n int, err error) {
+	// Check if connection is closed.
+	if conn.closed.IsSet() {
+		return 0, io.EOF
+	}
+
+	// Mark as active.
+	atomic.StoreUint32(conn.inactivityCnt, 0)
+
+	n, _, err = conn.ln.sock.WriteMsgUDP(b, conn.oob, conn.addr)
+	return n, err
+}
+
+// Close is a no-op as UDP connections share a single socket. Just stop sending
+// packets without closing.
+func (conn *UDPConn) Close() error {
+	if conn.closed.SetToIf(false, true) {
+		close(conn.closing)
+	}
+	return nil
+}
+
+// LocalAddr returns the local network address.
+func (conn *UDPConn) LocalAddr() net.Addr {
+	return conn.ln.sock.LocalAddr()
+}
+
+// RemoteAddr returns the remote network address.
+func (conn *UDPConn) RemoteAddr() net.Addr {
+	return conn.addr
+}
+
+// SetDeadline is a no-op as UDP connections share a single socket.
+func (conn *UDPConn) SetDeadline(t time.Time) error {
+	return nil
+}
+
+// SetReadDeadline is a no-op as UDP connections share a single socket.
+func (conn *UDPConn) SetReadDeadline(t time.Time) error {
+	return nil
+}
+
+// SetWriteDeadline is a no-op as UDP connections share a single socket.
+func (conn *UDPConn) SetWriteDeadline(t time.Time) error {
+	return nil
+}
diff --git a/spn/spn.go b/spn/spn.go
new file mode 100644
index 00000000..569d85de
--- /dev/null
+++ b/spn/spn.go
@@ -0,0 +1 @@
+package spn
diff --git a/spn/terminal/control_flow.go b/spn/terminal/control_flow.go
new file mode 100644
index 00000000..e4d15ccf
--- /dev/null
+++ b/spn/terminal/control_flow.go
@@ -0,0 +1,454 @@
+package terminal
+
+import (
+	"context"
+	"fmt"
+	"sync"
+	"sync/atomic"
+	"time"
+
+	"github.com/safing/portbase/formats/varint"
+	"github.com/safing/portbase/modules"
+)
+
+// FlowControl defines the flow control interface.
+type FlowControl interface {
+	Deliver(msg *Msg) *Error
+	Receive() <-chan *Msg
+	Send(msg *Msg, timeout time.Duration) *Error
+	ReadyToSend() <-chan struct{}
+	Flush(timeout time.Duration)
+	StartWorkers(m *modules.Module, terminalName string)
+	RecvQueueLen() int
+	SendQueueLen() int
+}
+
+// FlowControlType represents a flow control type.
+type FlowControlType uint8
+
+// Flow Control Types.
+const (
+	FlowControlDefault FlowControlType = 0
+	FlowControlDFQ     FlowControlType = 1
+	FlowControlNone    FlowControlType = 2
+
+	defaultFlowControl = FlowControlDFQ
+)
+
+// DefaultSize returns the default flow control size.
+func (fct FlowControlType) DefaultSize() uint32 {
+	if fct == FlowControlDefault {
+		fct = defaultFlowControl
+	}
+
+	switch fct {
+	case FlowControlDFQ:
+		return 50000
+	case FlowControlNone:
+		return 10000
+	case FlowControlDefault:
+		fallthrough
+	default:
+		return 0
+	}
+}
+
+// Flow Queue Configuration.
+const (
+	DefaultQueueSize        = 50000
+	MaxQueueSize            = 1000000
+	forceReportBelowPercent = 0.75
+)
+
+// DuplexFlowQueue is a duplex flow control mechanism using queues.
+type DuplexFlowQueue struct {
+	// ti is the Terminal that is using the DFQ.
+	ctx context.Context
+
+	// submitUpstream is used to submit messages to the upstream channel.
+	submitUpstream func(msg *Msg, timeout time.Duration)
+
+	// sendQueue holds the messages that are waiting to be sent.
+	sendQueue chan *Msg
+	// prioMsgs holds the number of messages to send with high priority.
+	prioMsgs *int32
+	// sendSpace indicates the amount free slots in the recvQueue on the other end.
+	sendSpace *int32
+	// readyToSend is used to notify sending components that there is free space.
+	readyToSend chan struct{}
+	// wakeSender is used to wake a sender in case the sendSpace was zero and the
+	// sender is waiting for available space.
+	wakeSender chan struct{}
+
+	// recvQueue holds the messages that are waiting to be processed.
+	recvQueue chan *Msg
+	// reportedSpace indicates the amount of free slots that the other end knows
+	// about.
+	reportedSpace *int32
+	// spaceReportLock locks the calculation of space to report.
+	spaceReportLock sync.Mutex
+	// forceSpaceReport forces the sender to send a space report.
+	forceSpaceReport chan struct{}
+
+	// flush is used to send a finish function to the handler, which will write
+	// all pending messages and then call the received function.
+	flush chan func()
+}
+
+// NewDuplexFlowQueue returns a new duplex flow queue.
+func NewDuplexFlowQueue(
+	ctx context.Context,
+	queueSize uint32,
+	submitUpstream func(msg *Msg, timeout time.Duration),
+) *DuplexFlowQueue {
+	dfq := &DuplexFlowQueue{
+		ctx:              ctx,
+		submitUpstream:   submitUpstream,
+		sendQueue:        make(chan *Msg, queueSize),
+		prioMsgs:         new(int32),
+		sendSpace:        new(int32),
+		readyToSend:      make(chan struct{}),
+		wakeSender:       make(chan struct{}, 1),
+		recvQueue:        make(chan *Msg, queueSize),
+		reportedSpace:    new(int32),
+		forceSpaceReport: make(chan struct{}, 1),
+		flush:            make(chan func()),
+	}
+	atomic.StoreInt32(dfq.sendSpace, int32(queueSize))
+	atomic.StoreInt32(dfq.reportedSpace, int32(queueSize))
+
+	return dfq
+}
+
+// StartWorkers starts the necessary workers to operate the flow queue.
+func (dfq *DuplexFlowQueue) StartWorkers(m *modules.Module, terminalName string) {
+	m.StartWorker(terminalName+" flow queue", dfq.FlowHandler)
+}
+
+// shouldReportRecvSpace returns whether the receive space should be reported.
+func (dfq *DuplexFlowQueue) shouldReportRecvSpace() bool {
+	return atomic.LoadInt32(dfq.reportedSpace) < int32(float32(cap(dfq.recvQueue))*forceReportBelowPercent)
+}
+
+// decrementReportedRecvSpace decreases the reported recv space by 1 and
+// returns if the receive space should be reported.
+func (dfq *DuplexFlowQueue) decrementReportedRecvSpace() (shouldReportRecvSpace bool) {
+	return atomic.AddInt32(dfq.reportedSpace, -1) < int32(float32(cap(dfq.recvQueue))*forceReportBelowPercent)
+}
+
+// getSendSpace returns the current send space.
+func (dfq *DuplexFlowQueue) getSendSpace() int32 {
+	return atomic.LoadInt32(dfq.sendSpace)
+}
+
+// decrementSendSpace decreases the send space by 1 and returns it.
+func (dfq *DuplexFlowQueue) decrementSendSpace() int32 {
+	return atomic.AddInt32(dfq.sendSpace, -1)
+}
+
+func (dfq *DuplexFlowQueue) addToSendSpace(n int32) {
+	// Add new space to send space and check if it was zero.
+	atomic.AddInt32(dfq.sendSpace, n)
+	// Wake the sender in case it is waiting.
+	select {
+	case dfq.wakeSender <- struct{}{}:
+	default:
+	}
+}
+
+// reportableRecvSpace returns how much free space can be reported to the other
+// end. The returned number must be communicated to the other end and must not
+// be ignored.
+func (dfq *DuplexFlowQueue) reportableRecvSpace() int32 {
+	// Changes to the recvQueue during calculation are no problem.
+	// We don't want to report space twice though!
+	dfq.spaceReportLock.Lock()
+	defer dfq.spaceReportLock.Unlock()
+
+	// Calculate reportable receive space and add it to the reported space.
+	reportedSpace := atomic.LoadInt32(dfq.reportedSpace)
+	toReport := int32(cap(dfq.recvQueue)-len(dfq.recvQueue)) - reportedSpace
+
+	// Never report values below zero.
+	// This can happen, as dfq.reportedSpace is decreased after a container is
+	// submitted to dfq.recvQueue by dfq.Deliver(). This race condition can only
+	// lower the space to report, not increase it. A simple check here solved
+	// this problem and keeps performance high.
+	// Also, don't report values of 1, as the benefit is minimal and this might
+	// be commonly triggered due to the buffer of the force report channel.
+	if toReport <= 1 {
+		return 0
+	}
+
+	// Add space to report to dfq.reportedSpace and return it.
+	atomic.AddInt32(dfq.reportedSpace, toReport)
+	return toReport
+}
+
+// FlowHandler handles all flow queue internals and must be started as a worker
+// in the module where it is used.
+func (dfq *DuplexFlowQueue) FlowHandler(_ context.Context) error {
+	// The upstreamSender is started by the terminal module, but is tied to the
+	// flow owner instead. Make sure that the flow owner's module depends on the
+	// terminal module so that it is shut down earlier.
+
+	var sendSpaceDepleted bool
+	var flushFinished func()
+
+	// Drain all queues when shutting down.
+	defer func() {
+		for {
+			select {
+			case msg := <-dfq.sendQueue:
+				msg.Finish()
+			case msg := <-dfq.recvQueue:
+				msg.Finish()
+			default:
+				return
+			}
+		}
+	}()
+
+sending:
+	for {
+		// If the send queue is depleted, wait to be woken.
+		if sendSpaceDepleted {
+			select {
+			case <-dfq.wakeSender:
+				if dfq.getSendSpace() > 0 {
+					sendSpaceDepleted = false
+				} else {
+					continue sending
+				}
+
+			case <-dfq.forceSpaceReport:
+				// Forced reporting of space.
+				// We do not need to check if there is enough sending space, as there is
+				// no data included.
+				spaceToReport := dfq.reportableRecvSpace()
+				if spaceToReport > 0 {
+					msg := NewMsg(varint.Pack64(uint64(spaceToReport)))
+					dfq.submitUpstream(msg, 0)
+				}
+				continue sending
+
+			case <-dfq.ctx.Done():
+				return nil
+			}
+		}
+
+		// Get message from send queue.
+
+		select {
+		case dfq.readyToSend <- struct{}{}:
+			// Notify that we are ready to send.
+
+		case msg := <-dfq.sendQueue:
+			// Send message from queue.
+
+			// If nil, the queue is being shut down.
+			if msg == nil {
+				return nil
+			}
+
+			// Check if we are handling a high priority message or waiting for one.
+			// Mark any msgs as high priority, when there is one in the pipeline.
+			remainingPrioMsgs := atomic.AddInt32(dfq.prioMsgs, -1)
+			switch {
+			case remainingPrioMsgs >= 0:
+				msg.Unit.MakeHighPriority()
+			case remainingPrioMsgs < -30_000:
+				// Prevent wrap to positive.
+				// Compatible with int16 or bigger.
+				atomic.StoreInt32(dfq.prioMsgs, 0)
+			}
+
+			// Wait for processing slot.
+			msg.Unit.WaitForSlot()
+
+			// Prepend available receiving space.
+			msg.Data.Prepend(varint.Pack64(uint64(dfq.reportableRecvSpace())))
+
+			// Submit for sending upstream.
+			dfq.submitUpstream(msg, 0)
+			// Decrease the send space and set flag if depleted.
+			if dfq.decrementSendSpace() <= 0 {
+				sendSpaceDepleted = true
+			}
+
+			// Check if the send queue is empty now and signal flushers.
+			if flushFinished != nil && len(dfq.sendQueue) == 0 {
+				flushFinished()
+				flushFinished = nil
+			}
+
+		case <-dfq.forceSpaceReport:
+			// Forced reporting of space.
+			// We do not need to check if there is enough sending space, as there is
+			// no data included.
+			spaceToReport := dfq.reportableRecvSpace()
+			if spaceToReport > 0 {
+				msg := NewMsg(varint.Pack64(uint64(spaceToReport)))
+				dfq.submitUpstream(msg, 0)
+			}
+
+		case newFlushFinishedFn := <-dfq.flush:
+			// Signal immediately if send queue is empty.
+			if len(dfq.sendQueue) == 0 {
+				newFlushFinishedFn()
+			} else {
+				// If there already is a flush finished function, stack them.
+				if flushFinished != nil {
+					stackedFlushFinishFn := flushFinished
+					flushFinished = func() {
+						stackedFlushFinishFn()
+						newFlushFinishedFn()
+					}
+				} else {
+					flushFinished = newFlushFinishedFn
+				}
+			}
+
+		case <-dfq.ctx.Done():
+			return nil
+		}
+	}
+}
+
+// Flush waits for all waiting data to be sent.
+func (dfq *DuplexFlowQueue) Flush(timeout time.Duration) {
+	// Create channel and function for notifying.
+	wait := make(chan struct{})
+	finished := func() {
+		close(wait)
+	}
+	// Request flush and return when stopping.
+	select {
+	case dfq.flush <- finished:
+	case <-dfq.ctx.Done():
+		return
+	case <-TimedOut(timeout):
+		return
+	}
+	// Wait for flush to finish and return when stopping.
+	select {
+	case <-wait:
+	case <-dfq.ctx.Done():
+	case <-TimedOut(timeout):
+	}
+}
+
+var ready = make(chan struct{})
+
+func init() {
+	close(ready)
+}
+
+// ReadyToSend returns a channel that can be read when data can be sent.
+func (dfq *DuplexFlowQueue) ReadyToSend() <-chan struct{} {
+	if atomic.LoadInt32(dfq.sendSpace) > 0 {
+		return ready
+	}
+	return dfq.readyToSend
+}
+
+// Send adds the given container to the send queue.
+func (dfq *DuplexFlowQueue) Send(msg *Msg, timeout time.Duration) *Error {
+	select {
+	case dfq.sendQueue <- msg:
+		if msg.Unit.IsHighPriority() {
+			// Reset prioMsgs to the current queue size, so that all waiting and the
+			// message we just added are all handled as high priority.
+			atomic.StoreInt32(dfq.prioMsgs, int32(len(dfq.sendQueue)))
+		}
+		return nil
+
+	case <-TimedOut(timeout):
+		msg.Finish()
+		return ErrTimeout
+
+	case <-dfq.ctx.Done():
+		msg.Finish()
+		return ErrStopping
+	}
+}
+
+// Receive receives a container from the recv queue.
+func (dfq *DuplexFlowQueue) Receive() <-chan *Msg {
+	// If the reported recv space is nearing its end, force a report.
+	if dfq.shouldReportRecvSpace() {
+		select {
+		case dfq.forceSpaceReport <- struct{}{}:
+		default:
+		}
+	}
+
+	return dfq.recvQueue
+}
+
+// Deliver submits a container for receiving from upstream.
+func (dfq *DuplexFlowQueue) Deliver(msg *Msg) *Error {
+	// Ignore nil containers.
+	if msg == nil || msg.Data == nil {
+		msg.Finish()
+		return ErrMalformedData.With("no data")
+	}
+
+	// Get and add new reported space.
+	addSpace, err := msg.Data.GetNextN16()
+	if err != nil {
+		msg.Finish()
+		return ErrMalformedData.With("failed to parse reported space: %w", err)
+	}
+	if addSpace > 0 {
+		dfq.addToSendSpace(int32(addSpace))
+	}
+	// Abort processing if the container only contained a space update.
+	if !msg.Data.HoldsData() {
+		msg.Finish()
+		return nil
+	}
+
+	select {
+	case dfq.recvQueue <- msg:
+
+		// If the recv queue accepted the Container, decrement the recv space.
+		shouldReportRecvSpace := dfq.decrementReportedRecvSpace()
+		// If the reported recv space is nearing its end, force a report, if the
+		// sender worker is idle.
+		if shouldReportRecvSpace {
+			select {
+			case dfq.forceSpaceReport <- struct{}{}:
+			default:
+			}
+		}
+
+		return nil
+	default:
+		// If the recv queue is full, return an error.
+		// The whole point of the flow queue is to guarantee that this never happens.
+		msg.Finish()
+		return ErrQueueOverflow
+	}
+}
+
+// FlowStats returns a k=v formatted string of internal stats.
+func (dfq *DuplexFlowQueue) FlowStats() string {
+	return fmt.Sprintf(
+		"sq=%d rq=%d sends=%d reps=%d",
+		len(dfq.sendQueue),
+		len(dfq.recvQueue),
+		atomic.LoadInt32(dfq.sendSpace),
+		atomic.LoadInt32(dfq.reportedSpace),
+	)
+}
+
+// RecvQueueLen returns the current length of the receive queue.
+func (dfq *DuplexFlowQueue) RecvQueueLen() int {
+	return len(dfq.recvQueue)
+}
+
+// SendQueueLen returns the current length of the send queue.
+func (dfq *DuplexFlowQueue) SendQueueLen() int {
+	return len(dfq.sendQueue)
+}
diff --git a/spn/terminal/defaults.go b/spn/terminal/defaults.go
new file mode 100644
index 00000000..57f17f47
--- /dev/null
+++ b/spn/terminal/defaults.go
@@ -0,0 +1,36 @@
+package terminal
+
+const (
+	// UsePriorityDataMsgs defines whether priority data messages should be used.
+	UsePriorityDataMsgs = true
+)
+
+// DefaultCraneControllerOpts returns the default terminal options for a crane
+// controller terminal.
+func DefaultCraneControllerOpts() *TerminalOpts {
+	return &TerminalOpts{
+		Padding:             0, // Crane already applies padding.
+		FlowControl:         FlowControlNone,
+		UsePriorityDataMsgs: UsePriorityDataMsgs,
+	}
+}
+
+// DefaultHomeHubTerminalOpts returns the default terminal options for a crane
+// terminal used for the home hub.
+func DefaultHomeHubTerminalOpts() *TerminalOpts {
+	return &TerminalOpts{
+		Padding:             0, // Crane already applies padding.
+		FlowControl:         FlowControlDFQ,
+		UsePriorityDataMsgs: UsePriorityDataMsgs,
+	}
+}
+
+// DefaultExpansionTerminalOpts returns the default terminal options for an
+// expansion terminal.
+func DefaultExpansionTerminalOpts() *TerminalOpts {
+	return &TerminalOpts{
+		Padding:             8,
+		FlowControl:         FlowControlDFQ,
+		UsePriorityDataMsgs: UsePriorityDataMsgs,
+	}
+}
diff --git a/spn/terminal/errors.go b/spn/terminal/errors.go
new file mode 100644
index 00000000..619bf181
--- /dev/null
+++ b/spn/terminal/errors.go
@@ -0,0 +1,221 @@
+package terminal
+
+import (
+	"context"
+	"errors"
+	"fmt"
+
+	"github.com/safing/portbase/formats/varint"
+)
+
+// Error is a terminal error.
+type Error struct {
+	// id holds the internal error ID.
+	id uint8
+	// external signifies if the error was received from the outside.
+	external bool
+	// err holds the wrapped error or the default error message.
+	err error
+}
+
+// ID returns the internal ID of the error.
+func (e *Error) ID() uint8 {
+	return e.id
+}
+
+// Error returns the human readable format of the error.
+func (e *Error) Error() string {
+	if e.external {
+		return "[ext] " + e.err.Error()
+	}
+	return e.err.Error()
+}
+
+// IsExternal returns whether the error occurred externally.
+func (e *Error) IsExternal() bool {
+	if e == nil {
+		return false
+	}
+
+	return e.external
+}
+
+// Is returns whether the given error is of the same type.
+func (e *Error) Is(target error) bool {
+	if e == nil || target == nil {
+		return false
+	}
+
+	t, ok := target.(*Error) //nolint:errorlint // Error implementation, not usage.
+	if !ok {
+		return false
+	}
+	return e.id == t.id
+}
+
+// Unwrap returns the wrapped error.
+func (e *Error) Unwrap() error {
+	if e == nil || e.err == nil {
+		return nil
+	}
+	return e.err
+}
+
+// With adds context and details where the error occurred. The provided
+// message is appended to the error.
+// A new error with the same ID is returned and must be compared with
+// errors.Is().
+func (e *Error) With(format string, a ...interface{}) *Error {
+	// Return nil if error is nil.
+	if e == nil {
+		return nil
+	}
+
+	return &Error{
+		id:  e.id,
+		err: fmt.Errorf(e.Error()+": "+format, a...),
+	}
+}
+
+// Wrap adds context higher up in the call chain. The provided message is
+// prepended to the error.
+// A new error with the same ID is returned and must be compared with
+// errors.Is().
+func (e *Error) Wrap(format string, a ...interface{}) *Error {
+	// Return nil if error is nil.
+	if e == nil {
+		return nil
+	}
+
+	return &Error{
+		id:  e.id,
+		err: fmt.Errorf(format+": "+e.Error(), a...),
+	}
+}
+
+// AsExternal creates and returns an external version of the error.
+func (e *Error) AsExternal() *Error {
+	// Return nil if error is nil.
+	if e == nil {
+		return nil
+	}
+
+	return &Error{
+		id:       e.id,
+		err:      e.err,
+		external: true,
+	}
+}
+
+// Pack returns the serialized internal error ID. The additional message is
+// lost and is replaced with the default message upon parsing.
+func (e *Error) Pack() []byte {
+	// Return nil slice if error is nil.
+	if e == nil {
+		return nil
+	}
+
+	return varint.Pack8(e.id)
+}
+
+// ParseExternalError parses an external error.
+func ParseExternalError(id []byte) (*Error, error) {
+	// Return nil for an empty error.
+	if len(id) == 0 {
+		return ErrStopping.AsExternal(), nil
+	}
+
+	parsedID, _, err := varint.Unpack8(id)
+	if err != nil {
+		return nil, fmt.Errorf("failed to unpack error ID: %w", err)
+	}
+
+	return NewExternalError(parsedID), nil
+}
+
+// NewExternalError creates an external error based on the given ID.
+func NewExternalError(id uint8) *Error {
+	err, ok := errorRegistry[id]
+	if ok {
+		return err.AsExternal()
+	}
+
+	return ErrUnknownError.AsExternal()
+}
+
+var errorRegistry = make(map[uint8]*Error)
+
+func registerError(id uint8, err error) *Error {
+	// Check for duplicate.
+	_, ok := errorRegistry[id]
+	if ok {
+		panic(fmt.Sprintf("error with id %d already registered", id))
+	}
+
+	newErr := &Error{
+		id:  id,
+		err: err,
+	}
+
+	errorRegistry[id] = newErr
+	return newErr
+}
+
+// func (e *Error) IsSpecial() bool {
+// 	if e == nil {
+// 		return false
+// 	}
+// 	return e.id > 0 && e.id < 8
+// }
+
+// IsOK returns if the error represents a "OK" or success status.
+func (e *Error) IsOK() bool {
+	return !e.IsError()
+}
+
+// IsError returns if the error represents an erronous condition.
+func (e *Error) IsError() bool {
+	if e == nil || e.err == nil {
+		return false
+	}
+	if e.id == 0 || e.id >= 8 {
+		return true
+	}
+	return false
+}
+
+// Terminal Errors.
+var (
+	// ErrUnknownError is the default error.
+	ErrUnknownError = registerError(0, errors.New("unknown error"))
+
+	// Error IDs 1-7 are reserved for special "OK" values.
+
+	ErrStopping    = registerError(2, errors.New("stopping"))
+	ErrExplicitAck = registerError(3, errors.New("explicit ack"))
+	ErrNoActivity  = registerError(4, errors.New("no activity"))
+
+	// Errors IDs 8 and up are for regular errors.
+
+	ErrInternalError          = registerError(8, errors.New("internal error"))
+	ErrMalformedData          = registerError(9, errors.New("malformed data"))
+	ErrUnexpectedMsgType      = registerError(10, errors.New("unexpected message type"))
+	ErrUnknownOperationType   = registerError(11, errors.New("unknown operation type"))
+	ErrUnknownOperationID     = registerError(12, errors.New("unknown operation id"))
+	ErrPermissionDenied       = registerError(13, errors.New("permission denied"))
+	ErrIntegrity              = registerError(14, errors.New("integrity violated"))
+	ErrInvalidOptions         = registerError(15, errors.New("invalid options"))
+	ErrHubNotReady            = registerError(16, errors.New("hub not ready"))
+	ErrRateLimited            = registerError(24, errors.New("rate limited"))
+	ErrIncorrectUsage         = registerError(22, errors.New("incorrect usage"))
+	ErrTimeout                = registerError(62, errors.New("timed out"))
+	ErrUnsupportedVersion     = registerError(93, errors.New("unsupported version"))
+	ErrHubUnavailable         = registerError(101, errors.New("hub unavailable"))
+	ErrAbandonedTerminal      = registerError(102, errors.New("terminal is being abandoned"))
+	ErrShipSunk               = registerError(108, errors.New("ship sunk"))
+	ErrDestinationUnavailable = registerError(113, errors.New("destination unavailable"))
+	ErrTryAgainLater          = registerError(114, errors.New("try again later"))
+	ErrConnectionError        = registerError(121, errors.New("connection error"))
+	ErrQueueOverflow          = registerError(122, errors.New("queue overflowed"))
+	ErrCanceled               = registerError(125, context.Canceled)
+)
diff --git a/spn/terminal/fmt.go b/spn/terminal/fmt.go
new file mode 100644
index 00000000..6bebe3c0
--- /dev/null
+++ b/spn/terminal/fmt.go
@@ -0,0 +1,27 @@
+package terminal
+
+import "fmt"
+
+// CustomTerminalIDFormatting defines an interface for terminal to define their custom ID format.
+type CustomTerminalIDFormatting interface {
+	CustomIDFormat() string
+}
+
+// FmtID formats the terminal ID together with the parent's ID.
+func (t *TerminalBase) FmtID() string {
+	if t.ext != nil {
+		if customFormatting, ok := t.ext.(CustomTerminalIDFormatting); ok {
+			return customFormatting.CustomIDFormat()
+		}
+	}
+
+	return fmtTerminalID(t.parentID, t.id)
+}
+
+func fmtTerminalID(craneID string, terminalID uint32) string {
+	return fmt.Sprintf("%s#%d", craneID, terminalID)
+}
+
+func fmtOperationID(craneID string, terminalID, operationID uint32) string {
+	return fmt.Sprintf("%s#%d>%d", craneID, terminalID, operationID)
+}
diff --git a/spn/terminal/init.go b/spn/terminal/init.go
new file mode 100644
index 00000000..b9960424
--- /dev/null
+++ b/spn/terminal/init.go
@@ -0,0 +1,210 @@
+package terminal
+
+import (
+	"context"
+
+	"github.com/safing/jess"
+	"github.com/safing/portbase/container"
+	"github.com/safing/portbase/formats/dsd"
+	"github.com/safing/portbase/formats/varint"
+	"github.com/safing/portmaster/spn/cabin"
+	"github.com/safing/portmaster/spn/hub"
+)
+
+/*
+
+Terminal Init Message Format:
+
+- Version [varint]
+- Data Block [bytes; not blocked]
+	- TerminalOpts as DSD
+
+*/
+
+const (
+	minSupportedTerminalVersion = 1
+	maxSupportedTerminalVersion = 1
+)
+
+// TerminalOpts holds configuration for the terminal.
+type TerminalOpts struct { //nolint:golint,maligned // TODO: Rename.
+	Version uint8  `json:"-"`
+	Encrypt bool   `json:"e,omitempty"`
+	Padding uint16 `json:"p,omitempty"`
+
+	FlowControl     FlowControlType `json:"fc,omitempty"`
+	FlowControlSize uint32          `json:"qs,omitempty"` // Previously was "QueueSize".
+
+	UsePriorityDataMsgs bool `json:"pr,omitempty"`
+}
+
+// ParseTerminalOpts parses terminal options from the container and checks if
+// they are valid.
+func ParseTerminalOpts(c *container.Container) (*TerminalOpts, *Error) {
+	// Parse and check version.
+	version, err := c.GetNextN8()
+	if err != nil {
+		return nil, ErrMalformedData.With("failed to parse version: %w", err)
+	}
+	if version < minSupportedTerminalVersion || version > maxSupportedTerminalVersion {
+		return nil, ErrUnsupportedVersion.With("requested terminal version %d", version)
+	}
+
+	// Parse init message.
+	initMsg := &TerminalOpts{}
+	_, err = dsd.Load(c.CompileData(), initMsg)
+	if err != nil {
+		return nil, ErrMalformedData.With("failed to parse init message: %w", err)
+	}
+	initMsg.Version = version
+
+	// Check if options are valid.
+	tErr := initMsg.Check(false)
+	if tErr != nil {
+		return nil, tErr
+	}
+
+	return initMsg, nil
+}
+
+// Pack serialized the terminal options and checks if they are valid.
+func (opts *TerminalOpts) Pack() (*container.Container, *Error) {
+	// Check if options are valid.
+	tErr := opts.Check(true)
+	if tErr != nil {
+		return nil, tErr
+	}
+
+	// Pack init message.
+	optsData, err := dsd.Dump(opts, dsd.CBOR)
+	if err != nil {
+		return nil, ErrInternalError.With("failed to pack init message: %w", err)
+	}
+
+	// Compile init message.
+	return container.New(
+		varint.Pack8(opts.Version),
+		optsData,
+	), nil
+}
+
+// Check checks if terminal options are valid.
+func (opts *TerminalOpts) Check(useDefaultsForRequired bool) *Error {
+	// Version is required - use default when permitted.
+	if opts.Version == 0 && useDefaultsForRequired {
+		opts.Version = 1
+	}
+	if opts.Version < minSupportedTerminalVersion || opts.Version > maxSupportedTerminalVersion {
+		return ErrInvalidOptions.With("unsupported terminal version %d", opts.Version)
+	}
+
+	// FlowControl is optional.
+	switch opts.FlowControl {
+	case FlowControlDefault:
+		// Set to default flow control.
+		opts.FlowControl = defaultFlowControl
+	case FlowControlNone, FlowControlDFQ:
+		// Ok.
+	default:
+		return ErrInvalidOptions.With("unknown flow control type: %d", opts.FlowControl)
+	}
+
+	// FlowControlSize is required as it needs to be same on both sides.
+	// Use default when permitted.
+	if opts.FlowControlSize == 0 && useDefaultsForRequired {
+		opts.FlowControlSize = opts.FlowControl.DefaultSize()
+	}
+	if opts.FlowControlSize <= 0 || opts.FlowControlSize > MaxQueueSize {
+		return ErrInvalidOptions.With("invalid flow control size of %d", opts.FlowControlSize)
+	}
+
+	return nil
+}
+
+// NewLocalBaseTerminal creates a new local terminal base for use with inheriting terminals.
+func NewLocalBaseTerminal(
+	ctx context.Context,
+	id uint32,
+	parentID string,
+	remoteHub *hub.Hub,
+	initMsg *TerminalOpts,
+	upstream Upstream,
+) (
+	t *TerminalBase,
+	initData *container.Container,
+	err *Error,
+) {
+	// Pack, check and add defaults to init message.
+	initData, err = initMsg.Pack()
+	if err != nil {
+		return nil, nil, err
+	}
+
+	// Create baseline.
+	t, err = createTerminalBase(ctx, id, parentID, false, initMsg, upstream)
+	if err != nil {
+		return nil, nil, err
+	}
+
+	// Setup encryption if enabled.
+	if remoteHub != nil {
+		initMsg.Encrypt = true
+
+		// Select signet (public key) of remote Hub to use.
+		s := remoteHub.SelectSignet()
+		if s == nil {
+			return nil, nil, ErrHubNotReady.With("failed to select signet of remote hub")
+		}
+
+		// Create new session.
+		env := jess.NewUnconfiguredEnvelope()
+		env.SuiteID = jess.SuiteWireV1
+		env.Recipients = []*jess.Signet{s}
+		jession, err := env.WireCorrespondence(nil)
+		if err != nil {
+			return nil, nil, ErrIntegrity.With("failed to initialize encryption: %w", err)
+		}
+		t.jession = jession
+
+		// Encryption is ready for sending.
+		close(t.encryptionReady)
+	}
+
+	return t, initData, nil
+}
+
+// NewRemoteBaseTerminal creates a new remote terminal base for use with inheriting terminals.
+func NewRemoteBaseTerminal(
+	ctx context.Context,
+	id uint32,
+	parentID string,
+	identity *cabin.Identity,
+	initData *container.Container,
+	upstream Upstream,
+) (
+	t *TerminalBase,
+	initMsg *TerminalOpts,
+	err *Error,
+) {
+	// Parse init message.
+	initMsg, err = ParseTerminalOpts(initData)
+	if err != nil {
+		return nil, nil, err
+	}
+
+	// Create baseline.
+	t, err = createTerminalBase(ctx, id, parentID, true, initMsg, upstream)
+	if err != nil {
+		return nil, nil, err
+	}
+
+	// Setup encryption if enabled.
+	if initMsg.Encrypt {
+		if identity == nil {
+			return nil, nil, ErrInternalError.With("missing identity for setting up incoming encryption")
+		}
+		t.identity = identity
+	}
+
+	return t, initMsg, nil
+}
diff --git a/spn/terminal/metrics.go b/spn/terminal/metrics.go
new file mode 100644
index 00000000..0da0c326
--- /dev/null
+++ b/spn/terminal/metrics.go
@@ -0,0 +1,117 @@
+package terminal
+
+import (
+	"time"
+
+	"github.com/tevino/abool"
+
+	"github.com/safing/portbase/api"
+	"github.com/safing/portbase/metrics"
+)
+
+var metricsRegistered = abool.New()
+
+func registerMetrics() (err error) {
+	// Only register metrics once.
+	if !metricsRegistered.SetToIf(false, true) {
+		return nil
+	}
+
+	// Get scheduler config and calculat scaling.
+	schedulerConfig := getSchedulerConfig()
+	scaleSlotToSecondsFactor := float64(time.Second / schedulerConfig.SlotDuration)
+
+	// Register metrics from scheduler stats.
+
+	_, err = metrics.NewGauge(
+		"spn/scheduling/unit/slotpace/max",
+		nil,
+		metricFromInt(scheduler.GetMaxSlotPace, scaleSlotToSecondsFactor),
+		&metrics.Options{
+			Name:       "SPN Scheduling Max Slot Pace (scaled to per second)",
+			Permission: api.PermitUser,
+		},
+	)
+	if err != nil {
+		return err
+	}
+
+	_, err = metrics.NewGauge(
+		"spn/scheduling/unit/slotpace/leveled/max",
+		nil,
+		metricFromInt(scheduler.GetMaxLeveledSlotPace, scaleSlotToSecondsFactor),
+		&metrics.Options{
+			Name:       "SPN Scheduling Max Leveled Slot Pace (scaled to per second)",
+			Permission: api.PermitUser,
+		},
+	)
+	if err != nil {
+		return err
+	}
+
+	_, err = metrics.NewGauge(
+		"spn/scheduling/unit/slotpace/avg",
+		nil,
+		metricFromInt(scheduler.GetAvgSlotPace, scaleSlotToSecondsFactor),
+		&metrics.Options{
+			Name:       "SPN Scheduling Avg Slot Pace (scaled to per second)",
+			Permission: api.PermitUser,
+		},
+	)
+	if err != nil {
+		return err
+	}
+
+	_, err = metrics.NewGauge(
+		"spn/scheduling/unit/life/avg/seconds",
+		nil,
+		metricFromNanoseconds(scheduler.GetAvgUnitLife),
+		&metrics.Options{
+			Name:       "SPN Scheduling Avg Unit Life",
+			Permission: api.PermitUser,
+		},
+	)
+	if err != nil {
+		return err
+	}
+
+	_, err = metrics.NewGauge(
+		"spn/scheduling/unit/workslot/avg/seconds",
+		nil,
+		metricFromNanoseconds(scheduler.GetAvgWorkSlotDuration),
+		&metrics.Options{
+			Name:       "SPN Scheduling Avg Work Slot Duration",
+			Permission: api.PermitUser,
+		},
+	)
+	if err != nil {
+		return err
+	}
+
+	_, err = metrics.NewGauge(
+		"spn/scheduling/unit/catchupslot/avg/seconds",
+		nil,
+		metricFromNanoseconds(scheduler.GetAvgCatchUpSlotDuration),
+		&metrics.Options{
+			Name:       "SPN Scheduling Avg Catch-Up Slot Duration",
+			Permission: api.PermitUser,
+		},
+	)
+	if err != nil {
+		return err
+	}
+
+	return nil
+}
+
+func metricFromInt(fn func() int64, scaleFactor float64) func() float64 {
+	return func() float64 {
+		return float64(fn()) * scaleFactor
+	}
+}
+
+func metricFromNanoseconds(fn func() int64) func() float64 {
+	return func() float64 {
+		return float64(fn()) / float64(time.Second)
+	}
+}
diff --git a/spn/terminal/module.go b/spn/terminal/module.go
new file mode 100644
index 00000000..178bc08c
--- /dev/null
+++ b/spn/terminal/module.go
@@ -0,0 +1,80 @@
+package terminal
+
+import (
+	"flag"
+	"time"
+
+	"github.com/safing/portbase/modules"
+	"github.com/safing/portbase/rng"
+	"github.com/safing/portmaster/spn/conf"
+	"github.com/safing/portmaster/spn/unit"
+)
+
+var (
+	module    *modules.Module
+	rngFeeder *rng.Feeder = rng.NewFeeder()
+
+	scheduler *unit.Scheduler
+
+	debugUnitScheduling bool
+)
+
+func init() {
+	flag.BoolVar(&debugUnitScheduling, "debug-unit-scheduling", false, "enable debug logs of the SPN unit scheduler")
+
+	module = modules.Register("terminal", nil, start, nil, "base")
+}
+
+func start() error {
+	rngFeeder = rng.NewFeeder()
+
+	scheduler = unit.NewScheduler(getSchedulerConfig())
+	if debugUnitScheduling {
+		// Debug unit leaks.
+		scheduler.StartDebugLog()
+	}
+	module.StartServiceWorker("msg unit scheduler", 0, scheduler.SlotScheduler)
+
+	lockOpRegistry()
+
+	return registerMetrics()
+}
+
+var waitForever chan time.Time
+
+// TimedOut returns a channel that triggers when the timeout is reached.
+func TimedOut(timeout time.Duration) <-chan time.Time {
+	if timeout == 0 {
+		return waitForever
+	}
+	return time.After(timeout)
+}
+
+// StopScheduler stops the unit scheduler.
+func StopScheduler() {
+	if scheduler != nil {
+		scheduler.Stop()
+	}
+}
+
+func getSchedulerConfig() *unit.SchedulerConfig {
+	// Client Scheduler Config.
+	if conf.Client() {
+		return &unit.SchedulerConfig{
+			SlotDuration:            10 * time.Millisecond, // 100 slots per second
+			MinSlotPace:             10,                    // 1000pps - Small starting pace for low end devices.
+			WorkSlotPercentage:      0.9,                   // 90%
+			SlotChangeRatePerStreak: 0.1,                   // 10% - Increase/Decrease quickly.
+			StatCycleDuration:       1 * time.Minute,       // Match metrics report cycle.
+		}
+	}
+
+	// Server Scheduler Config.
+	return &unit.SchedulerConfig{
+		SlotDuration:            10 * time.Millisecond, // 100 slots per second
+		MinSlotPace:             100,                   // 10000pps - Every server should be able to handle this.
+		WorkSlotPercentage:      0.7,                   // 70%
+		SlotChangeRatePerStreak: 0.05,                  // 5%
+		StatCycleDuration:       1 * time.Minute,       // Match metrics report cycle.
+	}
+}
diff --git a/spn/terminal/module_test.go b/spn/terminal/module_test.go
new file mode 100644
index 00000000..1f07003d
--- /dev/null
+++ b/spn/terminal/module_test.go
@@ -0,0 +1,13 @@
+package terminal
+
+import (
+	"testing"
+
+	"github.com/safing/portmaster/service/core/pmtesting"
+	"github.com/safing/portmaster/spn/conf"
+)
+
+func TestMain(m *testing.M) {
+	conf.EnablePublicHub(true)
+	pmtesting.TestMain(m, module)
+}
diff --git a/spn/terminal/msg.go b/spn/terminal/msg.go
new file mode 100644
index 00000000..8ca00489
--- /dev/null
+++ b/spn/terminal/msg.go
@@ -0,0 +1,106 @@
+package terminal
+
+import (
+	"fmt"
+	"runtime"
+
+	"github.com/safing/portbase/container"
+	"github.com/safing/portmaster/spn/unit"
+)
+
+// Msg is a message within the SPN network stack.
+// It includes metadata and unit scheduling.
+type Msg struct {
+	FlowID uint32
+	Type   MsgType
+	Data   *container.Container
+
+	// Unit scheduling.
+	// Note: With just 100B per packet, a uint64 (the Unit ID) is enough for
+	// over 1800 Exabyte. No need for overflow support.
+	Unit *unit.Unit
+}
+
+// NewMsg returns a new msg.
+// The FlowID is unset.
+// The Type is Data.
+func NewMsg(data []byte) *Msg {
+	msg := &Msg{
+		Type: MsgTypeData,
+		Data: container.New(data),
+		Unit: scheduler.NewUnit(),
+	}
+
+	// Debug unit leaks.
+	msg.debugWithCaller(2)
+
+	return msg
+}
+
+// NewEmptyMsg returns a new empty msg with an initialized Unit.
+// The FlowID is unset.
+// The Type is Data.
+// The Data is unset.
+func NewEmptyMsg() *Msg {
+	msg := &Msg{
+		Type: MsgTypeData,
+		Unit: scheduler.NewUnit(),
+	}
+
+	// Debug unit leaks.
+	msg.debugWithCaller(2)
+
+	return msg
+}
+
+// Pack prepends the message header (Length and ID+Type) to the data.
+func (msg *Msg) Pack() {
+	MakeMsg(msg.Data, msg.FlowID, msg.Type)
+}
+
+// Consume adds another Message to itself.
+// The given Msg is packed before adding it to the data.
+// The data is moved - not copied!
+// High priority mark is inherited.
+func (msg *Msg) Consume(other *Msg) {
+	// Pack message to be added.
+	other.Pack()
+
+	// Move data.
+	msg.Data.AppendContainer(other.Data)
+
+	// Inherit high priority.
+	if other.Unit.IsHighPriority() {
+		msg.Unit.MakeHighPriority()
+	}
+
+	// Finish other unit.
+	other.Finish()
+}
+
+// Finish signals the unit scheduler that this unit has finished processing.
+// Will no-op if called on a nil Msg.
+func (msg *Msg) Finish() {
+	// Proxying is necessary, as a nil msg still panics.
+	if msg == nil {
+		return
+	}
+	msg.Unit.Finish()
+}
+
+// Debug registers the unit for debug output with the given source.
+// Additional calls on the same unit update the unit source.
+// StartDebugLog() must be called before calling DebugUnit().
+func (msg *Msg) Debug() {
+	msg.debugWithCaller(2)
+}
+
+func (msg *Msg) debugWithCaller(skip int) { //nolint:unparam
+	if !debugUnitScheduling || msg == nil {
+		return
+	}
+	_, file, line, ok := runtime.Caller(skip)
+	if ok {
+		scheduler.DebugUnit(msg.Unit, fmt.Sprintf("%s:%d", file, line))
+	}
+}
diff --git a/spn/terminal/msgtypes.go b/spn/terminal/msgtypes.go
new file mode 100644
index 00000000..df712618
--- /dev/null
+++ b/spn/terminal/msgtypes.go
@@ -0,0 +1,66 @@
+package terminal
+
+import (
+	"github.com/safing/portbase/container"
+	"github.com/safing/portbase/formats/varint"
+)
+
+/*
+Terminal and Operation Message Format:
+
+- Length [varint]
+	- If Length is 0, the remainder of given data is padding.
+- IDType [varint]
+	- Type [uses least two significant bits]
+		- One of Init, Data, Stop
+	- ID [uses all other bits]
+		- The ID is currently not adapted in order to make reading raw message
+			easier. This means that IDs are currently always a multiple of 4.
+- Data [bytes; format depends on msg type]
+	- MsgTypeInit:
+		- Data [bytes]
+	- MsgTypeData:
+		- AddAvailableSpace [varint, if Flow Queue is used]
+		- (Encrypted) Data [bytes]
+	- MsgTypeStop:
+		- Error Code [varint]
+*/
+
+// MsgType is the message type for both terminals and operations.
+type MsgType uint8
+
+const (
+	// MsgTypeInit is used to establish a new terminal or run a new operation.
+	MsgTypeInit MsgType = 1
+
+	// MsgTypeData is used to send data to a terminal or operation.
+	MsgTypeData MsgType = 2
+
+	// MsgTypePriorityData is used to send prioritized data to a terminal or operation.
+	MsgTypePriorityData MsgType = 0
+
+	// MsgTypeStop is used to abandon a terminal or end an operation, with an optional error.
+	MsgTypeStop MsgType = 3
+)
+
+// AddIDType prepends the ID and Type header to the message.
+func AddIDType(c *container.Container, id uint32, msgType MsgType) {
+	c.Prepend(varint.Pack32(id | uint32(msgType)))
+}
+
+// MakeMsg prepends the message header (Length and ID+Type) to the data.
+func MakeMsg(c *container.Container, id uint32, msgType MsgType) {
+	AddIDType(c, id, msgType)
+	c.PrependLength()
+}
+
+// ParseIDType parses the combined message ID and type.
+func ParseIDType(c *container.Container) (id uint32, msgType MsgType, err error) {
+	idType, err := c.GetNextN32()
+	if err != nil {
+		return 0, 0, err
+	}
+
+	msgType = MsgType(idType % 4)
+	return idType - uint32(msgType), msgType, nil
+}
diff --git a/spn/terminal/operation.go b/spn/terminal/operation.go
new file mode 100644
index 00000000..100936ec
--- /dev/null
+++ b/spn/terminal/operation.go
@@ -0,0 +1,332 @@
+package terminal
+
+import (
+	"context"
+	"sync"
+	"sync/atomic"
+	"time"
+
+	"github.com/tevino/abool"
+
+	"github.com/safing/portbase/container"
+	"github.com/safing/portbase/log"
+	"github.com/safing/portbase/utils"
+)
+
+// Operation is an interface for all operations.
+type Operation interface {
+	// InitOperationBase initialize the operation with the ID and attached terminal.
+	// Should not be overridden by implementations.
+	InitOperationBase(t Terminal, opID uint32)
+
+	// ID returns the ID of the operation.
+	// Should not be overridden by implementations.
+	ID() uint32
+
+	// Type returns the operation's type ID.
+	// Should be overridden by implementations to return correct type ID.
+	Type() string
+
+	// Deliver delivers a message to the operation.
+	// Meant to be overridden by implementations.
+	Deliver(msg *Msg) *Error
+
+	// NewMsg creates a new message from this operation.
+	// Should not be overridden by implementations.
+	NewMsg(data []byte) *Msg
+
+	// Send sends a message to the other side.
+	// Should not be overridden by implementations.
+	Send(msg *Msg, timeout time.Duration) *Error
+
+	// Flush sends all messages waiting in the terminal.
+	// Should not be overridden by implementations.
+	Flush(timeout time.Duration)
+
+	// Stopped returns whether the operation has stopped.
+	// Should not be overridden by implementations.
+	Stopped() bool
+
+	// markStopped marks the operation as stopped.
+	// It returns whether the stop flag was set.
+	markStopped() bool
+
+	// Stop stops the operation by unregistering it from the terminal and calling HandleStop().
+	// Should not be overridden by implementations.
+	Stop(self Operation, err *Error)
+
+	// HandleStop gives the operation the ability to cleanly shut down.
+	// The returned error is the error to send to the other side.
+	// Should never be called directly. Call Stop() instead.
+	// Meant to be overridden by implementations.
+	HandleStop(err *Error) (errorToSend *Error)
+
+	// Terminal returns the terminal the operation is linked to.
+	// Should not be overridden by implementations.
+	Terminal() Terminal
+}
+
+// OperationFactory defines an operation factory.
+type OperationFactory struct {
+	// Type is the type id of an operation.
+	Type string
+	// Requires defines the required permissions to run an operation.
+	Requires Permission
+	// Start is the function that starts a new operation.
+	Start OperationStarter
+}
+
+// OperationStarter is used to initialize operations remotely.
+type OperationStarter func(attachedTerminal Terminal, opID uint32, initData *container.Container) (Operation, *Error)
+
+var (
+	opRegistry       = make(map[string]*OperationFactory)
+	opRegistryLock   sync.Mutex
+	opRegistryLocked = abool.New()
+)
+
+// RegisterOpType registers a new operation type and may only be called during
+// Go's init and a module's prep phase.
+func RegisterOpType(factory OperationFactory) {
+	// Check if we can still register an operation type.
+	if opRegistryLocked.IsSet() {
+		log.Errorf("spn/terminal: failed to register operation %s: operation registry is already locked", factory.Type)
+		return
+	}
+
+	opRegistryLock.Lock()
+	defer opRegistryLock.Unlock()
+
+	// Check if the operation type was already registered.
+	if _, ok := opRegistry[factory.Type]; ok {
+		log.Errorf("spn/terminal: failed to register operation type %s: type already registered", factory.Type)
+		return
+	}
+
+	// Save to registry.
+	opRegistry[factory.Type] = &factory
+}
+
+func lockOpRegistry() {
+	opRegistryLocked.Set()
+}
+
+func (t *TerminalBase) handleOperationStart(opID uint32, initData *container.Container) {
+	// Check if the terminal is being abandoned.
+	if t.Abandoning.IsSet() {
+		t.StopOperation(newUnknownOp(opID, ""), ErrAbandonedTerminal)
+		return
+	}
+
+	// Extract the requested operation name.
+	opType, err := initData.GetNextBlock()
+	if err != nil {
+		t.StopOperation(newUnknownOp(opID, ""), ErrMalformedData.With("failed to get init data: %w", err))
+		return
+	}
+
+	// Get the operation factory from the registry.
+	factory, ok := opRegistry[string(opType)]
+	if !ok {
+		t.StopOperation(newUnknownOp(opID, ""), ErrUnknownOperationType.With(utils.SafeFirst16Bytes(opType)))
+		return
+	}
+
+	// Check if the Terminal has the required permission to run the operation.
+	if !t.HasPermission(factory.Requires) {
+		t.StopOperation(newUnknownOp(opID, factory.Type), ErrPermissionDenied)
+		return
+	}
+
+	// Get terminal to attach to.
+	attachToTerminal := t.ext
+	if attachToTerminal == nil {
+		attachToTerminal = t
+	}
+
+	// Run the operation.
+	op, opErr := factory.Start(attachToTerminal, opID, initData)
+	switch {
+	case opErr != nil:
+		// Something went wrong.
+		t.StopOperation(newUnknownOp(opID, factory.Type), opErr)
+	case op == nil:
+		// The Operation was successful and is done already.
+		log.Debugf("spn/terminal: operation %s %s executed", factory.Type, fmtOperationID(t.parentID, t.id, opID))
+		t.StopOperation(newUnknownOp(opID, factory.Type), nil)
+	default:
+		// The operation started successfully and requires persistence.
+		t.SetActiveOp(opID, op)
+		log.Debugf("spn/terminal: operation %s %s started", factory.Type, fmtOperationID(t.parentID, t.id, opID))
+	}
+}
+
+// StartOperation starts the given operation by assigning it an ID and sending the given operation initialization data.
+func (t *TerminalBase) StartOperation(op Operation, initData *container.Container, timeout time.Duration) *Error {
+	// Get terminal to attach to.
+	attachToTerminal := t.ext
+	if attachToTerminal == nil {
+		attachToTerminal = t
+	}
+
+	// Get the next operation ID and set it on the operation with the terminal.
+	op.InitOperationBase(attachToTerminal, atomic.AddUint32(t.nextOpID, 8))
+
+	// Always add operation to the active operations, as we need to receive a
+	// reply in any case.
+	t.SetActiveOp(op.ID(), op)
+
+	log.Debugf("spn/terminal: operation %s %s started", op.Type(), fmtOperationID(t.parentID, t.id, op.ID()))
+
+	// Add or create the operation type block.
+	if initData == nil {
+		initData = container.New()
+		initData.AppendAsBlock([]byte(op.Type()))
+	} else {
+		initData.PrependAsBlock([]byte(op.Type()))
+	}
+
+	// Create init msg.
+	msg := NewEmptyMsg()
+	msg.FlowID = op.ID()
+	msg.Type = MsgTypeInit
+	msg.Data = initData
+	msg.Unit.MakeHighPriority()
+
+	// Send init msg.
+	err := op.Send(msg, timeout)
+	if err != nil {
+		msg.Finish()
+	}
+	return err
+}
+
+// Send sends data via this terminal.
+// If a timeout is set, sending will fail after the given timeout passed.
+func (t *TerminalBase) Send(msg *Msg, timeout time.Duration) *Error {
+	// Wait for processing slot.
+	msg.Unit.WaitForSlot()
+
+	// Check if the send queue has available space.
+	select {
+	case t.sendQueue <- msg:
+		return nil
+	default:
+	}
+
+	// Submit message to buffer, if space is available.
+	select {
+	case t.sendQueue <- msg:
+		return nil
+	case <-TimedOut(timeout):
+		msg.Finish()
+		return ErrTimeout.With("sending via terminal")
+	case <-t.Ctx().Done():
+		msg.Finish()
+		return ErrStopping
+	}
+}
+
+// StopOperation sends the end signal with an optional error and then deletes
+// the operation from the Terminal state and calls HandleStop() on the Operation.
+func (t *TerminalBase) StopOperation(op Operation, err *Error) {
+	// Check if the operation has already stopped.
+	if !op.markStopped() {
+		return
+	}
+
+	// Log reason the Operation is ending. Override stopping error with nil.
+	switch {
+	case err == nil:
+		log.Debugf("spn/terminal: operation %s %s stopped", op.Type(), fmtOperationID(t.parentID, t.id, op.ID()))
+	case err.IsOK(), err.Is(ErrTryAgainLater), err.Is(ErrRateLimited):
+		log.Debugf("spn/terminal: operation %s %s stopped: %s", op.Type(), fmtOperationID(t.parentID, t.id, op.ID()), err)
+	default:
+		log.Warningf("spn/terminal: operation %s %s failed: %s", op.Type(), fmtOperationID(t.parentID, t.id, op.ID()), err)
+	}
+
+	module.StartWorker("stop operation", func(_ context.Context) error {
+		// Call operation stop handle function for proper shutdown cleaning up.
+		err = op.HandleStop(err)
+
+		// Send error to the connected Operation, if the error is internal.
+		if !err.IsExternal() {
+			if err == nil {
+				err = ErrStopping
+			}
+
+			msg := NewMsg(err.Pack())
+			msg.FlowID = op.ID()
+			msg.Type = MsgTypeStop
+
+			tErr := t.Send(msg, 10*time.Second)
+			if tErr != nil {
+				msg.Finish()
+				log.Warningf("spn/terminal: failed to send stop msg: %s", tErr)
+			}
+		}
+
+		// Remove operation from terminal.
+		t.DeleteActiveOp(op.ID())
+
+		return nil
+	})
+}
+
+// GetActiveOp returns the active operation with the given ID from the
+// Terminal state.
+func (t *TerminalBase) GetActiveOp(opID uint32) (op Operation, ok bool) {
+	t.lock.RLock()
+	defer t.lock.RUnlock()
+
+	op, ok = t.operations[opID]
+	return
+}
+
+// SetActiveOp saves an active operation to the Terminal state.
+func (t *TerminalBase) SetActiveOp(opID uint32, op Operation) {
+	t.lock.Lock()
+	defer t.lock.Unlock()
+
+	t.operations[opID] = op
+}
+
+// DeleteActiveOp deletes an active operation from the Terminal state.
+func (t *TerminalBase) DeleteActiveOp(opID uint32) {
+	t.lock.Lock()
+	defer t.lock.Unlock()
+
+	delete(t.operations, opID)
+}
+
+// GetActiveOpCount returns the amount of active operations.
+func (t *TerminalBase) GetActiveOpCount() int {
+	t.lock.RLock()
+	defer t.lock.RUnlock()
+
+	return len(t.operations)
+}
+
+func newUnknownOp(id uint32, typeID string) *unknownOp {
+	op := &unknownOp{
+		typeID: typeID,
+	}
+	op.id = id
+	return op
+}
+
+type unknownOp struct {
+	OperationBase
+	typeID string
+}
+
+func (op *unknownOp) Type() string {
+	if op.typeID != "" {
+		return op.typeID
+	}
+	return "unknown"
+}
+
+func (op *unknownOp) Deliver(msg *Msg) *Error {
+	return ErrIncorrectUsage.With("unknown op shim cannot receive")
+}
diff --git a/spn/terminal/operation_base.go b/spn/terminal/operation_base.go
new file mode 100644
index 00000000..4b588c4f
--- /dev/null
+++ b/spn/terminal/operation_base.go
@@ -0,0 +1,185 @@
+package terminal
+
+import (
+	"time"
+
+	"github.com/tevino/abool"
+)
+
+// OperationBase provides the basic operation functionality.
+type OperationBase struct {
+	terminal Terminal
+	id       uint32
+	stopped  abool.AtomicBool
+}
+
+// InitOperationBase initialize the operation with the ID and attached terminal.
+// Should not be overridden by implementations.
+func (op *OperationBase) InitOperationBase(t Terminal, opID uint32) {
+	op.id = opID
+	op.terminal = t
+}
+
+// ID returns the ID of the operation.
+// Should not be overridden by implementations.
+func (op *OperationBase) ID() uint32 {
+	return op.id
+}
+
+// Type returns the operation's type ID.
+// Should be overridden by implementations to return correct type ID.
+func (op *OperationBase) Type() string {
+	return "unknown"
+}
+
+// Deliver delivers a message to the operation.
+// Meant to be overridden by implementations.
+func (op *OperationBase) Deliver(_ *Msg) *Error {
+	return ErrIncorrectUsage.With("Deliver not implemented for this operation")
+}
+
+// NewMsg creates a new message from this operation.
+// Should not be overridden by implementations.
+func (op *OperationBase) NewMsg(data []byte) *Msg {
+	msg := NewMsg(data)
+	msg.FlowID = op.id
+	msg.Type = MsgTypeData
+
+	// Debug unit leaks.
+	msg.debugWithCaller(2)
+
+	return msg
+}
+
+// NewEmptyMsg creates a new empty message from this operation.
+// Should not be overridden by implementations.
+func (op *OperationBase) NewEmptyMsg() *Msg {
+	msg := NewEmptyMsg()
+	msg.FlowID = op.id
+	msg.Type = MsgTypeData
+
+	// Debug unit leaks.
+	msg.debugWithCaller(2)
+
+	return msg
+}
+
+// Send sends a message to the other side.
+// Should not be overridden by implementations.
+func (op *OperationBase) Send(msg *Msg, timeout time.Duration) *Error {
+	// Add and update metadata.
+	msg.FlowID = op.id
+	if msg.Type == MsgTypeData && msg.Unit.IsHighPriority() && UsePriorityDataMsgs {
+		msg.Type = MsgTypePriorityData
+	}
+
+	// Wait for processing slot.
+	msg.Unit.WaitForSlot()
+
+	// Send message.
+	tErr := op.terminal.Send(msg, timeout)
+	if tErr != nil {
+		// Finish message unit on failure.
+		msg.Finish()
+	}
+	return tErr
+}
+
+// Flush sends all messages waiting in the terminal.
+// Meant to be overridden by implementations.
+func (op *OperationBase) Flush(timeout time.Duration) {
+	op.terminal.Flush(timeout)
+}
+
+// Stopped returns whether the operation has stopped.
+// Should not be overridden by implementations.
+func (op *OperationBase) Stopped() bool {
+	return op.stopped.IsSet()
+}
+
+// markStopped marks the operation as stopped.
+// It returns whether the stop flag was set.
+func (op *OperationBase) markStopped() bool {
+	return op.stopped.SetToIf(false, true)
+}
+
+// Stop stops the operation by unregistering it from the terminal and calling HandleStop().
+// Should not be overridden by implementations.
+func (op *OperationBase) Stop(self Operation, err *Error) {
+	// Stop operation from terminal.
+	op.terminal.StopOperation(self, err)
+}
+
+// HandleStop gives the operation the ability to cleanly shut down.
+// The returned error is the error to send to the other side.
+// Should never be called directly. Call Stop() instead.
+// Meant to be overridden by implementations.
+func (op *OperationBase) HandleStop(err *Error) (errorToSend *Error) {
+	return err
+}
+
+// Terminal returns the terminal the operation is linked to.
+// Should not be overridden by implementations.
+func (op *OperationBase) Terminal() Terminal {
+	return op.terminal
+}
+
+// OneOffOperationBase is an operation base for operations that just have one
+// message and a error return.
+type OneOffOperationBase struct {
+	OperationBase
+
+	Result chan *Error
+}
+
+// Init initializes the single operation base.
+func (op *OneOffOperationBase) Init() {
+	op.Result = make(chan *Error, 1)
+}
+
+// HandleStop gives the operation the ability to cleanly shut down.
+// The returned error is the error to send to the other side.
+// Should never be called directly. Call Stop() instead.
+func (op *OneOffOperationBase) HandleStop(err *Error) (errorToSend *Error) {
+	select {
+	case op.Result <- err:
+	default:
+	}
+	return err
+}
+
+// MessageStreamOperationBase is an operation base for receiving a message stream.
+// Every received message must be finished by the implementing operation.
+type MessageStreamOperationBase struct {
+	OperationBase
+
+	Delivered chan *Msg
+	Ended     chan *Error
+}
+
+// Init initializes the operation base.
+func (op *MessageStreamOperationBase) Init(deliverQueueSize int) {
+	op.Delivered = make(chan *Msg, deliverQueueSize)
+	op.Ended = make(chan *Error, 1)
+}
+
+// Deliver delivers data to the operation.
+func (op *MessageStreamOperationBase) Deliver(msg *Msg) *Error {
+	select {
+	case op.Delivered <- msg:
+		return nil
+	default:
+		return ErrIncorrectUsage.With("request was not waiting for data")
+	}
+}
+
+// HandleStop gives the operation the ability to cleanly shut down.
+// The returned error is the error to send to the other side.
+// Should never be called directly. Call Stop() instead.
+func (op *MessageStreamOperationBase) HandleStop(err *Error) (errorToSend *Error) {
+	select {
+	case op.Ended <- err:
+	default:
+	}
+	return err
+}
diff --git a/spn/terminal/operation_counter.go b/spn/terminal/operation_counter.go
new file mode 100644
index 00000000..59d175e0
--- /dev/null
+++ b/spn/terminal/operation_counter.go
@@ -0,0 +1,255 @@
+package terminal
+
+import (
+	"context"
+	"fmt"
+	"sync"
+	"time"
+
+	"github.com/safing/portbase/container"
+	"github.com/safing/portbase/formats/dsd"
+	"github.com/safing/portbase/formats/varint"
+	"github.com/safing/portbase/log"
+)
+
+// CounterOpType is the type ID for the Counter Operation.
+const CounterOpType string = "debug/count"
+
+// CounterOp sends increasing numbers on both sides.
+type CounterOp struct { //nolint:maligned
+	OperationBase
+
+	wg     sync.WaitGroup
+	server bool
+	opts   *CounterOpts
+
+	counterLock   sync.Mutex
+	ClientCounter uint64
+	ServerCounter uint64
+	Error         error
+}
+
+// CounterOpts holds the options for CounterOp.
+type CounterOpts struct {
+	ClientCountTo uint64
+	ServerCountTo uint64
+	Wait          time.Duration
+	Flush         bool
+
+	suppressWorker bool
+}
+
+func init() {
+	RegisterOpType(OperationFactory{
+		Type:  CounterOpType,
+		Start: startCounterOp,
+	})
+}
+
+// NewCounterOp returns a new CounterOp.
+func NewCounterOp(t Terminal, opts CounterOpts) (*CounterOp, *Error) {
+	// Create operation.
+	op := &CounterOp{
+		opts: &opts,
+	}
+	op.wg.Add(1)
+
+	// Create argument container.
+	data, err := dsd.Dump(op.opts, dsd.JSON)
+	if err != nil {
+		return nil, ErrInternalError.With("failed to pack options: %w", err)
+	}
+
+	// Initialize operation.
+	tErr := t.StartOperation(op, container.New(data), 3*time.Second)
+	if tErr != nil {
+		return nil, tErr
+	}
+
+	// Start worker if needed.
+	if op.getRemoteCounterTarget() > 0 && !op.opts.suppressWorker {
+		module.StartWorker("counter sender", op.CounterWorker)
+	}
+	return op, nil
+}
+
+func startCounterOp(t Terminal, opID uint32, data *container.Container) (Operation, *Error) {
+	// Create operation.
+	op := &CounterOp{
+		server: true,
+	}
+	op.InitOperationBase(t, opID)
+	op.wg.Add(1)
+
+	// Parse arguments.
+	opts := &CounterOpts{}
+	_, err := dsd.Load(data.CompileData(), opts)
+	if err != nil {
+		return nil, ErrInternalError.With("failed to unpack options: %w", err)
+	}
+	op.opts = opts
+
+	// Start worker if needed.
+	if op.getRemoteCounterTarget() > 0 {
+		module.StartWorker("counter sender", op.CounterWorker)
+	}
+
+	return op, nil
+}
+
+// Type returns the operation's type ID.
+func (op *CounterOp) Type() string {
+	return CounterOpType
+}
+
+func (op *CounterOp) getCounter(sending, increase bool) uint64 {
+	op.counterLock.Lock()
+	defer op.counterLock.Unlock()
+
+	// Use server counter, when op is server or for sending, but not when both.
+	if op.server != sending {
+		if increase {
+			op.ServerCounter++
+		}
+		return op.ServerCounter
+	}
+
+	if increase {
+		op.ClientCounter++
+	}
+	return op.ClientCounter
+}
+
+func (op *CounterOp) getRemoteCounterTarget() uint64 {
+	if op.server {
+		return op.opts.ClientCountTo
+	}
+	return op.opts.ServerCountTo
+}
+
+func (op *CounterOp) isDone() bool {
+	op.counterLock.Lock()
+	defer op.counterLock.Unlock()
+
+	return op.ClientCounter >= op.opts.ClientCountTo &&
+		op.ServerCounter >= op.opts.ServerCountTo
+}
+
+// Deliver delivers data to the operation.
+func (op *CounterOp) Deliver(msg *Msg) *Error {
+	defer msg.Finish()
+
+	nextStep, err := msg.Data.GetNextN64()
+	if err != nil {
+		op.Stop(op, ErrMalformedData.With("failed to parse next number: %w", err))
+		return nil
+	}
+
+	// Count and compare.
+	counter := op.getCounter(false, true)
+
+	// Debugging:
+	// if counter < 100 ||
+	// 	counter < 1000 && counter%100 == 0 ||
+	// 	counter < 10000 && counter%1000 == 0 ||
+	// 	counter < 100000 && counter%10000 == 0 ||
+	// 	counter < 1000000 && counter%100000 == 0 {
+	// 	log.Errorf("spn/terminal: counter %s>%d recvd, now at %d", op.t.FmtID(), op.id, counter)
+	// }
+
+	if counter != nextStep {
+		log.Warningf(
+			"terminal: integrity of counter op violated: received %d, expected %d",
+			nextStep,
+			counter,
+		)
+		op.Stop(op, ErrIntegrity.With("counters mismatched"))
+		return nil
+	}
+
+	// Check if we are done.
+	if op.isDone() {
+		op.Stop(op, nil)
+	}
+
+	return nil
+}
+
+// HandleStop handles stopping the operation.
+func (op *CounterOp) HandleStop(err *Error) (errorToSend *Error) {
+	// Check if counting finished.
+	if !op.isDone() {
+		err := fmt.Errorf(
+			"counter op %d: did not finish counting (%d<-%d %d->%d)",
+			op.id,
+			op.opts.ClientCountTo, op.ClientCounter,
+			op.ServerCounter, op.opts.ServerCountTo,
+		)
+		op.Error = err
+	}
+
+	op.wg.Done()
+	return err
+}
+
+// SendCounter sends the next counter.
+func (op *CounterOp) SendCounter() *Error {
+	if op.Stopped() {
+		return ErrStopping
+	}
+
+	// Increase sending counter.
+	counter := op.getCounter(true, true)
+
+	// Debugging:
+	// if counter < 100 ||
+	// 	counter < 1000 && counter%100 == 0 ||
+	// 	counter < 10000 && counter%1000 == 0 ||
+	// 	counter < 100000 && counter%10000 == 0 ||
+	// 	counter < 1000000 && counter%100000 == 0 {
+	// 	defer log.Errorf("spn/terminal: counter %s>%d sent, now at %d", op.t.FmtID(), op.id, counter)
+	// }
+
+	return op.Send(op.NewMsg(varint.Pack64(counter)), 3*time.Second)
+}
+
+// Wait waits for the Counter Op to finish.
+func (op *CounterOp) Wait() {
+	op.wg.Wait()
+}
+
+// CounterWorker is a worker that sends counters.
+func (op *CounterOp) CounterWorker(ctx context.Context) error {
+	for {
+		// Send counter msg.
+		err := op.SendCounter()
+		switch err {
+		case nil:
+			// All good, continue.
+		case ErrStopping:
+			// Done!
+			return nil
+		default:
+			// Something went wrong.
+			err := fmt.Errorf("counter op %d: failed to send counter: %w", op.id, err)
+			op.Error = err
+			op.Stop(op, ErrInternalError.With(err.Error()))
+			return nil
+		}
+
+		// Maybe flush message.
+		if op.opts.Flush {
+			op.terminal.Flush(1 * time.Second)
+		}
+
+		// Check if we are done with sending.
+		if op.getCounter(true, false) >= op.getRemoteCounterTarget() {
+			return nil
+		}
+
+		// Maybe wait a little.
+		if op.opts.Wait > 0 {
+			time.Sleep(op.opts.Wait)
+		}
+	}
+}
diff --git a/spn/terminal/permission.go b/spn/terminal/permission.go
new file mode 100644
index 00000000..ee39e28a
--- /dev/null
+++ b/spn/terminal/permission.go
@@ -0,0 +1,50 @@
+package terminal
+
+// Permission is a bit-map of granted permissions.
+type Permission uint16
+
+// Permissions.
+const (
+	NoPermission      Permission = 0x0
+	MayExpand         Permission = 0x1
+	MayConnect        Permission = 0x2
+	IsHubOwner        Permission = 0x100
+	IsHubAdvisor      Permission = 0x200
+	IsCraneController Permission = 0x8000
+)
+
+// AuthorizingTerminal is an interface for terminals that support authorization.
+type AuthorizingTerminal interface {
+	GrantPermission(grant Permission)
+	HasPermission(required Permission) bool
+}
+
+// GrantPermission grants the specified permissions to the Terminal.
+func (t *TerminalBase) GrantPermission(grant Permission) {
+	t.lock.Lock()
+	defer t.lock.Unlock()
+
+	t.permission |= grant
+}
+
+// HasPermission returns if the Terminal has the specified permission.
+func (t *TerminalBase) HasPermission(required Permission) bool {
+	t.lock.RLock()
+	defer t.lock.RUnlock()
+
+	return t.permission.Has(required)
+}
+
+// Has returns if the permission includes the specified permission.
+func (p Permission) Has(required Permission) bool {
+	return p&required == required
+}
+
+// AddPermissions combines multiple permissions.
+func AddPermissions(perms ...Permission) Permission {
+	var all Permission
+	for _, p := range perms {
+		all |= p
+	}
+	return all
+}
diff --git a/spn/terminal/rate_limit.go b/spn/terminal/rate_limit.go
new file mode 100644
index 00000000..162afca0
--- /dev/null
+++ b/spn/terminal/rate_limit.go
@@ -0,0 +1,39 @@
+package terminal
+
+import "time"
+
+// RateLimiter is a data flow rate limiter.
+type RateLimiter struct {
+	maxBytesPerSlot uint64
+	slotBytes       uint64
+	slotStarted     time.Time
+}
+
+// NewRateLimiter returns a new rate limiter.
+// The given MBit/s are transformed to bytes, so giving a multiple of 8 is
+// advised for accurate results.
+func NewRateLimiter(mbits uint64) *RateLimiter {
+	return &RateLimiter{
+		maxBytesPerSlot: (mbits / 8) * 1_000_000,
+		slotStarted:     time.Now(),
+	}
+}
+
+// Limit is given the current transferred bytes and blocks until they may be sent.
+func (rl *RateLimiter) Limit(xferBytes uint64) {
+	// Check if we need to limit transfer if we go over to max bytes per slot.
+	if rl.slotBytes > rl.maxBytesPerSlot {
+		// Wait if we are still within the slot.
+		sinceSlotStart := time.Since(rl.slotStarted)
+		if sinceSlotStart < time.Second {
+			time.Sleep(time.Second - sinceSlotStart)
+		}
+
+		// Reset state for next slot.
+		rl.slotBytes = 0
+		rl.slotStarted = time.Now()
+	}
+
+	// Add new bytes after checking, as first step over the limit is fully using the limit.
+	rl.slotBytes += xferBytes
+}
diff --git a/spn/terminal/session.go b/spn/terminal/session.go
new file mode 100644
index 00000000..fa2d1695
--- /dev/null
+++ b/spn/terminal/session.go
@@ -0,0 +1,166 @@
+package terminal
+
+import (
+	"context"
+	"fmt"
+	"sync"
+	"sync/atomic"
+	"time"
+
+	"github.com/safing/portbase/log"
+)
+
+const (
+	rateLimitMinOps          = 250
+	rateLimitMaxOpsPerSecond = 5
+
+	rateLimitMinSuspicion          = 25
+	rateLimitMinPermaSuspicion     = rateLimitMinSuspicion * 100
+	rateLimitMaxSuspicionPerSecond = 1
+
+	// Make this big enough to trigger suspicion limit in first blast.
+	concurrencyPoolSize = 30
+)
+
+// Session holds terminal metadata for operations.
+type Session struct {
+	sync.RWMutex
+
+	// Rate Limiting.
+
+	// started holds the unix timestamp in seconds when the session was started.
+	// It is set when the Session is created and may be treated as a constant.
+	started int64
+
+	// opCount is the amount of operations started (and not rate limited by suspicion).
+	opCount atomic.Int64
+
+	// suspicionScore holds a score of suspicious activity.
+	// Every suspicious operations is counted as at least 1.
+	// Rate limited operations because of suspicion are also counted as 1.
+	suspicionScore atomic.Int64
+
+	concurrencyPool chan struct{}
+}
+
+// SessionTerminal is an interface for terminals that support authorization.
+type SessionTerminal interface {
+	GetSession() *Session
+}
+
+// SessionAddOn can be inherited by terminals to add support for sessions.
+type SessionAddOn struct {
+	lock sync.Mutex
+
+	// session holds the terminal session.
+	session *Session
+}
+
+// GetSession returns the terminal's session.
+func (t *SessionAddOn) GetSession() *Session {
+	t.lock.Lock()
+	defer t.lock.Unlock()
+
+	// Create session if it does not exist.
+	if t.session == nil {
+		t.session = NewSession()
+	}
+
+	return t.session
+}
+
+// NewSession returns a new session.
+func NewSession() *Session {
+	return &Session{
+		started:         time.Now().Unix() - 1, // Ensure a 1 second difference to current time.
+		concurrencyPool: make(chan struct{}, concurrencyPoolSize),
+	}
+}
+
+// RateLimitInfo returns some basic information about the status of the rate limiter.
+func (s *Session) RateLimitInfo() string {
+	secondsActive := time.Now().Unix() - s.started
+
+	return fmt.Sprintf(
+		"%do/s %ds/s %ds",
+		s.opCount.Load()/secondsActive,
+		s.suspicionScore.Load()/secondsActive,
+		secondsActive,
+	)
+}
+
+// RateLimit enforces a rate and suspicion limit.
+func (s *Session) RateLimit() *Error {
+	secondsActive := time.Now().Unix() - s.started
+
+	// Check the suspicion limit.
+	score := s.suspicionScore.Load()
+	if score > rateLimitMinSuspicion {
+		scorePerSecond := score / secondsActive
+		if scorePerSecond >= rateLimitMaxSuspicionPerSecond {
+			// Add current try to suspicion score.
+			s.suspicionScore.Add(1)
+
+			return ErrRateLimited
+		}
+
+		// Permanently rate limit if suspicion goes over the perma min limit and
+		// the suspicion score is greater than 80% of the operation count.
+		if score > rateLimitMinPermaSuspicion &&
+			score*5 > s.opCount.Load()*4 { // Think: 80*5 == 100*4
+			return ErrRateLimited
+		}
+	}
+
+	// Check the rate limit.
+	count := s.opCount.Add(1)
+	if count > rateLimitMinOps {
+		opsPerSecond := count / secondsActive
+		if opsPerSecond >= rateLimitMaxOpsPerSecond {
+			return ErrRateLimited
+		}
+	}
+
+	return nil
+}
+
+// Suspicion Factors.
+const (
+	SusFactorCommon          = 1
+	SusFactorWeirdButOK      = 5
+	SusFactorQuiteUnusual    = 10
+	SusFactorMustBeMalicious = 100
+)
+
+// ReportSuspiciousActivity reports suspicious activity of the terminal.
+func (s *Session) ReportSuspiciousActivity(factor int64) {
+	s.suspicionScore.Add(factor)
+}
+
+// LimitConcurrency limits concurrent executions.
+// If over the limit, waiting goroutines are selected randomly.
+// It returns the context error if it was canceled.
+func (s *Session) LimitConcurrency(ctx context.Context, f func()) error {
+	// Wait for place in pool.
+	select {
+	case <-ctx.Done():
+		return ctx.Err()
+	case s.concurrencyPool <- struct{}{}:
+		// We added our entry to the pool, continue with execution.
+	}
+
+	// Drain own spot if pool after execution.
+	defer func() {
+		select {
+		case <-s.concurrencyPool:
+			// Own entry drained.
+		default:
+			// This should never happen, but let's play safe and not deadlock when pool is empty.
+			log.Warningf("spn/session: failed to drain own entry from concurrency pool")
+		}
+	}()
+
+	// Execute and return.
+	f()
+	return nil
+}
diff --git a/spn/terminal/session_test.go b/spn/terminal/session_test.go
new file mode 100644
index 00000000..e61d1f52
--- /dev/null
+++ b/spn/terminal/session_test.go
@@ -0,0 +1,94 @@
+package terminal
+
+import (
+	"context"
+	"sync"
+	"testing"
+	"time"
+
+	"github.com/stretchr/testify/assert"
+)
+
+func TestRateLimit(t *testing.T) {
+	t.Parallel()
+
+	var tErr *Error
+	s := NewSession()
+
+	// Everything should be okay within the min limit.
+	for i := 0; i < rateLimitMinOps; i++ {
+		tErr = s.RateLimit()
+		if tErr != nil {
+			t.Error("should not rate limit within min limit")
+		}
+	}
+
+	// Somewhere here we should rate limiting.
+	for i := 0; i < rateLimitMaxOpsPerSecond; i++ {
+		tErr = s.RateLimit()
+	}
+	assert.ErrorIs(t, tErr, ErrRateLimited, "should rate limit")
+}
+
+func TestSuspicionLimit(t *testing.T) {
+	t.Parallel()
+
+	var tErr *Error
+	s := NewSession()
+
+	// Everything should be okay within the min limit.
+	for i := 0; i < rateLimitMinSuspicion; i++ {
+		tErr = s.RateLimit()
+		if tErr != nil {
+			t.Error("should not rate limit within min limit")
+		}
+		s.ReportSuspiciousActivity(SusFactorCommon)
+	}
+
+	// Somewhere here we should rate limiting.
+	for i := 0; i < rateLimitMaxSuspicionPerSecond; i++ {
+		s.ReportSuspiciousActivity(SusFactorCommon)
+		tErr = s.RateLimit()
+	}
+	if tErr == nil {
+		t.Error("should rate limit")
+	}
+}
+
+func TestConcurrencyLimit(t *testing.T) {
+	t.Parallel()
+
+	s := NewSession()
+	started := time.Now()
+	wg := sync.WaitGroup{}
+	workTime := 1 * time.Millisecond
+	workers := concurrencyPoolSize * 10
+
+	// Start many workers to test concurrency.
+	wg.Add(workers)
+	for i := 0; i < workers; i++ {
+		workerNum := i
+		go func() {
+			defer func() {
+				_ = recover()
+			}()
+			_ = s.LimitConcurrency(context.Background(), func() {
+				time.Sleep(workTime)
+				wg.Done()
+
+				// Panic sometimes.
+				if workerNum%concurrencyPoolSize == 0 {
+					panic("test")
+				}
+			})
+		}()
+	}
+
+	// Wait and check time needed.
+	wg.Wait()
+	if time.Since(started) < (time.Duration(workers) * workTime / concurrencyPoolSize) {
+		t.Errorf("workers were too quick - only took %s", time.Since(started))
+	} else {
+		t.Logf("workers were correctly limited - took %s", time.Since(started))
+	}
+}
diff --git a/spn/terminal/terminal.go b/spn/terminal/terminal.go
new file mode 100644
index 00000000..bbccad2f
--- /dev/null
+++ b/spn/terminal/terminal.go
@@ -0,0 +1,909 @@
+package terminal
+
+import (
+	"context"
+	"sync"
+	"sync/atomic"
+	"time"
+
+	"github.com/tevino/abool"
+
+	"github.com/safing/jess"
+	"github.com/safing/portbase/container"
+	"github.com/safing/portbase/log"
+	"github.com/safing/portbase/modules"
+	"github.com/safing/portbase/rng"
+	"github.com/safing/portmaster/spn/cabin"
+	"github.com/safing/portmaster/spn/conf"
+)
+
+const (
+	timeoutTicks = 5
+
+	clientTerminalAbandonTimeout = 15 * time.Second
+	serverTerminalAbandonTimeout = 5 * time.Minute
+)
+
+// Terminal represents a terminal.
+type Terminal interface { //nolint:golint // Being explicit is helpful here.
+	// ID returns the terminal ID.
+	ID() uint32
+	// Ctx returns the terminal context.
+	Ctx() context.Context
+
+	// Deliver delivers a message to the terminal.
+	// Should not be overridden by implementations.
+	Deliver(msg *Msg) *Error
+	// Send is used by others to send a message through the terminal.
+	// Should not be overridden by implementations.
+	Send(msg *Msg, timeout time.Duration) *Error
+	// Flush sends all messages waiting in the terminal.
+	// Should not be overridden by implementations.
+	Flush(timeout time.Duration)
+
+	// StartOperation starts the given operation by assigning it an ID and sending the given operation initialization data.
+	// Should not be overridden by implementations.
+	StartOperation(op Operation, initData *container.Container, timeout time.Duration) *Error
+	// StopOperation stops the given operation.
+	// Should not be overridden by implementations.
+	StopOperation(op Operation, err *Error)
+
+	// Abandon shuts down the terminal unregistering it from upstream and calling HandleAbandon().
+	// Should not be overridden by implementations.
+	Abandon(err *Error)
+	// HandleAbandon gives the terminal the ability to cleanly shut down.
+	// The terminal is still fully functional at this point.
+	// The returned error is the error to send to the other side.
+	// Should never be called directly. Call Abandon() instead.
+	// Meant to be overridden by implementations.
+	HandleAbandon(err *Error) (errorToSend *Error)
+	// HandleDestruction gives the terminal the ability to clean up.
+	// The terminal has already fully shut down at this point.
+	// Should never be called directly. Call Abandon() instead.
+	// Meant to be overridden by implementations.
+	HandleDestruction(err *Error)
+
+	// FmtID formats the terminal ID (including parent IDs).
+	// May be overridden by implementations.
+	FmtID() string
+}
+
+// TerminalBase contains the basic functions of a terminal.
+type TerminalBase struct { //nolint:golint,maligned // Being explicit is helpful here.
+	// TODO: Fix maligned.
+	Terminal // Interface check.
+
+	lock sync.RWMutex
+
+	// id is the underlying id of the Terminal.
+	id uint32
+	// parentID is the id of the parent component.
+	parentID string
+
+	// ext holds the extended terminal so that the base terminal can access custom functions.
+	ext Terminal
+	// sendQueue holds message to be sent.
+	sendQueue chan *Msg
+	// flowControl holds the flow control system.
+	flowControl FlowControl
+	// upstream represents the upstream (parent) terminal.
+	upstream Upstream
+
+	// deliverProxy is populated with the configured deliver function
+	deliverProxy func(msg *Msg) *Error
+	// recvProxy is populated with the configured recv function
+	recvProxy func() <-chan *Msg
+
+	// ctx is the context of the Terminal.
+	ctx context.Context
+	// cancelCtx cancels ctx.
+	cancelCtx context.CancelFunc
+
+	// waitForFlush signifies if sending should be delayed until the next call
+	// to Flush()
+	waitForFlush *abool.AtomicBool
+	// flush is used to send a finish function to the handler, which will write
+	// all pending messages and then call the received function.
+	flush chan func()
+	// idleTicker ticks for increasing and checking the idle counter.
+	idleTicker *time.Ticker
+	// idleCounter counts the ticks the terminal has been idle.
+	idleCounter *uint32
+
+	// jession is the jess session used for encryption.
+	jession *jess.Session
+	// jessionLock locks jession.
+	jessionLock sync.Mutex
+	// encryptionReady is set when the encryption is ready for sending messages.
+	encryptionReady chan struct{}
+	// identity is the identity used by a remote Terminal.
+	identity *cabin.Identity
+
+	// operations holds references to all active operations that require persistence.
+	operations map[uint32]Operation
+	// nextOpID holds the next operation ID.
+	nextOpID *uint32
+	// permission holds the permissions of the terminal.
+	permission Permission
+
+	// opts holds the terminal options. It must not be modified after the terminal
+	// has started.
+	opts *TerminalOpts
+
+	// lastUnknownOpID holds the operation ID of the last data message received
+	// for an unknown operation ID.
+	lastUnknownOpID uint32
+	// lastUnknownOpMsgs holds the amount of continuous data messages received
+	// for the operation ID in lastUnknownOpID.
+	lastUnknownOpMsgs uint32
+
+	// Abandoning indicates if the Terminal is being abandoned. The main handlers
+	// will keep running until the context has been canceled by the abandon
+	// procedure.
+	// No new operations should be started.
+	// Whoever initiates the abandoning must also start the abandon procedure.
+	Abandoning *abool.AtomicBool
+}
+
+func createTerminalBase(
+	ctx context.Context,
+	id uint32,
+	parentID string,
+	remote bool,
+	initMsg *TerminalOpts,
+	upstream Upstream,
+) (*TerminalBase, *Error) {
+	t := &TerminalBase{
+		id:              id,
+		parentID:        parentID,
+		sendQueue:       make(chan *Msg),
+		upstream:        upstream,
+		waitForFlush:    abool.New(),
+		flush:           make(chan func()),
+		idleTicker:      time.NewTicker(time.Minute),
+		idleCounter:     new(uint32),
+		encryptionReady: make(chan struct{}),
+		operations:      make(map[uint32]Operation),
+		nextOpID:        new(uint32),
+		opts:            initMsg,
+		Abandoning:      abool.New(),
+	}
+	// Stop ticking to disable timeout.
+	t.idleTicker.Stop()
+	// Shift next operation ID if remote.
+	if remote {
+		atomic.AddUint32(t.nextOpID, 4)
+	}
+	// Create context.
+	t.ctx, t.cancelCtx = context.WithCancel(ctx)
+
+	// Create flow control.
+	switch initMsg.FlowControl {
+	case FlowControlDFQ:
+		t.flowControl = NewDuplexFlowQueue(t.Ctx(), initMsg.FlowControlSize, t.submitToUpstream)
+		t.deliverProxy = t.flowControl.Deliver
+		t.recvProxy = t.flowControl.Receive
+	case FlowControlNone:
+		deliver := make(chan *Msg, initMsg.FlowControlSize)
+		t.deliverProxy = MakeDirectDeliveryDeliverFunc(ctx, deliver)
+		t.recvProxy = MakeDirectDeliveryRecvFunc(deliver)
+	case FlowControlDefault:
+		fallthrough
+	default:
+		return nil, ErrInternalError.With("unknown flow control type %d", initMsg.FlowControl)
+	}
+
+	return t, nil
+}
+
+// ID returns the Terminal's ID.
+func (t *TerminalBase) ID() uint32 {
+	return t.id
+}
+
+// Ctx returns the Terminal's context.
+func (t *TerminalBase) Ctx() context.Context {
+	return t.ctx
+}
+
+// SetTerminalExtension sets the Terminal's extension. This function is not
+// guarded and may only be used during initialization.
+func (t *TerminalBase) SetTerminalExtension(ext Terminal) {
+	t.ext = ext
+}
+
+// SetTimeout sets the Terminal's idle timeout duration.
+// It is broken down into slots internally.
+func (t *TerminalBase) SetTimeout(d time.Duration) {
+	t.idleTicker.Reset(d / timeoutTicks)
+}
+
+// Deliver on TerminalBase only exists to conform to the interface. It must be
+// overridden by an actual implementation.
+func (t *TerminalBase) Deliver(msg *Msg) *Error {
+	// Deliver via configured proxy.
+	err := t.deliverProxy(msg)
+	if err != nil {
+		msg.Finish()
+	}
+
+	return err
+}
+
+// StartWorkers starts the necessary workers to operate the Terminal.
+func (t *TerminalBase) StartWorkers(m *modules.Module, terminalName string) {
+	// Start terminal workers.
+	m.StartWorker(terminalName+" handler", t.Handler)
+	m.StartWorker(terminalName+" sender", t.Sender)
+
+	// Start any flow control workers.
+	if t.flowControl != nil {
+		t.flowControl.StartWorkers(m, terminalName)
+	}
+}
+
+const (
+	sendThresholdLength  = 100  // bytes
+	sendMaxLength        = 4000 // bytes
+	sendThresholdMaxWait = 20 * time.Millisecond
+)
+
+// Handler receives and handles messages and must be started as a worker in the
+// module where the Terminal is used.
+func (t *TerminalBase) Handler(_ context.Context) error {
+	defer t.Abandon(ErrInternalError.With("handler died"))
+
+	var msg *Msg
+	defer msg.Finish()
+
+	for {
+		select {
+		case <-t.ctx.Done():
+			// Call Abandon just in case.
+			// Normally, only the StopProcedure function should cancel the context.
+			t.Abandon(nil)
+			return nil // Controlled worker exit.
+
+		case <-t.idleTicker.C:
+			// If nothing happens for a while, end the session.
+			if atomic.AddUint32(t.idleCounter, 1) > timeoutTicks {
+				// Abandon the terminal and reset the counter.
+				t.Abandon(ErrNoActivity)
+				atomic.StoreUint32(t.idleCounter, 0)
+			}
+
+		case msg = <-t.recvProxy():
+			err := t.handleReceive(msg)
+			if err != nil {
+				t.Abandon(err.Wrap("failed to handle"))
+				return nil
+			}
+
+			// Register activity.
+			atomic.StoreUint32(t.idleCounter, 0)
+		}
+	}
+}
+
+// submit is used to send message from the terminal to upstream, including
+// going through flow control, if configured.
+// This function should be used to send message from the terminal to upstream.
+func (t *TerminalBase) submit(msg *Msg, timeout time.Duration) {
+	// Submit directly if no flow control is configured.
+	if t.flowControl == nil {
+		t.submitToUpstream(msg, timeout)
+		return
+	}
+
+	// Hand over to flow control.
+	err := t.flowControl.Send(msg, timeout)
+	if err != nil {
+		msg.Finish()
+		t.Abandon(err.Wrap("failed to submit to flow control"))
+	}
+}
+
+// submitToUpstream is used to directly submit messages to upstream.
+// This function should only be used by the flow control or submit function.
+func (t *TerminalBase) submitToUpstream(msg *Msg, timeout time.Duration) {
+	// Add terminal ID as flow ID.
+	msg.FlowID = t.ID()
+
+	// Debug unit leaks.
+	msg.debugWithCaller(2)
+
+	// Submit to upstream.
+	err := t.upstream.Send(msg, timeout)
+	if err != nil {
+		msg.Finish()
+		t.Abandon(err.Wrap("failed to submit to upstream"))
+	}
+}
+
+// Sender handles sending messages and must be started as a worker in the
+// module where the Terminal is used.
+func (t *TerminalBase) Sender(_ context.Context) error {
+	// Don't send messages, if the encryption is net yet set up.
+	// The server encryption session is only initialized with the first
+	// operative message, not on Terminal creation.
+	if t.opts.Encrypt {
+		select {
+		case <-t.ctx.Done():
+			// Call Abandon just in case.
+			// Normally, the only the StopProcedure function should cancel the context.
+			t.Abandon(nil)
+			return nil // Controlled worker exit.
+		case <-t.encryptionReady:
+		}
+	}
+
+	// Be sure to call Stop even in case of sudden death.
+	defer t.Abandon(ErrInternalError.With("sender died"))
+
+	var msgBufferMsg *Msg
+	var msgBufferLen int
+	var msgBufferLimitReached bool
+	var sendMsgs bool
+	var sendMaxWait *time.Timer
+	var flushFinished func()
+
+	// Finish any current unit when returning.
+	defer msgBufferMsg.Finish()
+
+	// Only receive message when not sending the current msg buffer.
+	sendQueueOpMsgs := func() <-chan *Msg {
+		// Don't handle more messages, if the buffer is full.
+		if msgBufferLimitReached {
+			return nil
+		}
+		return t.sendQueue
+	}
+
+	// Only wait for sending slot when the current msg buffer is ready to be sent.
+	readyToSend := func() <-chan struct{} {
+		switch {
+		case !sendMsgs:
+			// Wait until there is something to send.
+			return nil
+		case t.flowControl != nil:
+			// Let flow control decide when we are ready.
+			return t.flowControl.ReadyToSend()
+		default:
+			// Always ready.
+			return ready
+		}
+	}
+
+	// Calculate current max wait time to send the msg buffer.
+	getSendMaxWait := func() <-chan time.Time {
+		if sendMaxWait != nil {
+			return sendMaxWait.C
+		}
+		return nil
+	}
+
+handling:
+	for {
+		select {
+		case <-t.ctx.Done():
+			// Call Stop just in case.
+			// Normally, the only the StopProcedure function should cancel the context.
+			t.Abandon(nil)
+			return nil // Controlled worker exit.
+
+		case <-t.idleTicker.C:
+			// If nothing happens for a while, end the session.
+			if atomic.AddUint32(t.idleCounter, 1) > timeoutTicks {
+				// Abandon the terminal and reset the counter.
+				t.Abandon(ErrNoActivity)
+				atomic.StoreUint32(t.idleCounter, 0)
+			}
+
+		case msg := <-sendQueueOpMsgs():
+			if msg == nil {
+				continue handling
+			}
+
+			// Add unit to buffer unit, or use it as new buffer.
+			if msgBufferMsg != nil {
+				// Pack, append and finish additional message.
+				msgBufferMsg.Consume(msg)
+			} else {
+				// Pack operation message.
+				msg.Pack()
+				// Convert to message of terminal.
+				msgBufferMsg = msg
+				msgBufferMsg.FlowID = t.ID()
+				msgBufferMsg.Type = MsgTypeData
+			}
+			msgBufferLen += msg.Data.Length()
+
+			// Check if there is enough data to hit the sending threshold.
+			if msgBufferLen >= sendThresholdLength {
+				sendMsgs = true
+			} else if sendMaxWait == nil && t.waitForFlush.IsNotSet() {
+				sendMaxWait = time.NewTimer(sendThresholdMaxWait)
+			}
+
+			// Check if we have reached the maximum buffer size.
+			if msgBufferLen >= sendMaxLength {
+				msgBufferLimitReached = true
+			}
+
+			// Register activity.
+			atomic.StoreUint32(t.idleCounter, 0)
+
+		case <-getSendMaxWait():
+			// The timer for waiting for more data has ended.
+			// Send all available data if not forced to wait for a flush.
+			if t.waitForFlush.IsNotSet() {
+				sendMsgs = true
+			}
+
+		case newFlushFinishedFn := <-t.flush:
+			// We are flushing - stop waiting.
+			t.waitForFlush.UnSet()
+
+			// Signal immediately if msg buffer is empty.
+			if msgBufferLen == 0 {
+				newFlushFinishedFn()
+			} else {
+				// If there already is a flush finished function, stack them.
+				if flushFinished != nil {
+					stackedFlushFinishFn := flushFinished
+					flushFinished = func() {
+						stackedFlushFinishFn()
+						newFlushFinishedFn()
+					}
+				} else {
+					flushFinished = newFlushFinishedFn
+				}
+			}
+
+			// Force sending data now.
+			sendMsgs = true
+
+		case <-readyToSend():
+			// Reset sending flags.
+			sendMsgs = false
+			msgBufferLimitReached = false
+
+			// Send if there is anything to send.
+			var err *Error
+			if msgBufferLen > 0 {
+				// Update message type to include priority.
+				if msgBufferMsg.Type == MsgTypeData &&
+					msgBufferMsg.Unit.IsHighPriority() &&
+					t.opts.UsePriorityDataMsgs {
+					msgBufferMsg.Type = MsgTypePriorityData
+				}
+
+				// Wait for clearance on initial msg only.
+				msgBufferMsg.Unit.WaitForSlot()
+
+				err = t.sendOpMsgs(msgBufferMsg)
+			}
+
+			// Reset buffer.
+			msgBufferMsg = nil
+			msgBufferLen = 0
+
+			// Reset send wait timer.
+			if sendMaxWait != nil {
+				sendMaxWait.Stop()
+				sendMaxWait = nil
+			}
+
+			// Check if we are flushing and need to notify.
+			if flushFinished != nil {
+				flushFinished()
+				flushFinished = nil
+			}
+
+			// Handle error after state updates.
+			if err != nil {
+				t.Abandon(err.With("failed to send"))
+				continue handling
+			}
+		}
+	}
+}
+
+// WaitForFlush makes the terminal pause all sending until the next call to
+// Flush().
+func (t *TerminalBase) WaitForFlush() {
+	t.waitForFlush.Set()
+}
+
+// Flush sends all data waiting to be sent.
+func (t *TerminalBase) Flush(timeout time.Duration) {
+	// Create channel and function for notifying.
+	wait := make(chan struct{})
+	finished := func() {
+		close(wait)
+	}
+	// Request flush and return when stopping.
+	select {
+	case t.flush <- finished:
+	case <-t.Ctx().Done():
+		return
+	case <-TimedOut(timeout):
+		return
+	}
+	// Wait for flush to finish and return when stopping.
+	select {
+	case <-wait:
+	case <-t.Ctx().Done():
+		return
+	case <-TimedOut(timeout):
+		return
+	}
+
+	// Flush flow control, if configured.
+	if t.flowControl != nil {
+		t.flowControl.Flush(timeout)
+	}
+}
+
+func (t *TerminalBase) encrypt(c *container.Container) (*container.Container, *Error) {
+	if !t.opts.Encrypt {
+		return c, nil
+	}
+
+	t.jessionLock.Lock()
+	defer t.jessionLock.Unlock()
+
+	letter, err := t.jession.Close(c.CompileData())
+	if err != nil {
+		return nil, ErrIntegrity.With("failed to encrypt: %w", err)
+	}
+
+	encryptedData, err := letter.ToWire()
+	if err != nil {
+		return nil, ErrInternalError.With("failed to pack letter: %w", err)
+	}
+
+	return encryptedData, nil
+}
+
+func (t *TerminalBase) decrypt(c *container.Container) (*container.Container, *Error) {
+	if !t.opts.Encrypt {
+		return c, nil
+	}
+
+	t.jessionLock.Lock()
+	defer t.jessionLock.Unlock()
+
+	letter, err := jess.LetterFromWire(c)
+	if err != nil {
+		return nil, ErrMalformedData.With("failed to parse letter: %w", err)
+	}
+
+	// Setup encryption if not yet done.
+	if t.jession == nil {
+		if t.identity == nil {
+			return nil, ErrInternalError.With("missing identity for setting up incoming encryption")
+		}
+
+		// Create jess session.
+		t.jession, err = letter.WireCorrespondence(t.identity)
+		if err != nil {
+			return nil, ErrIntegrity.With("failed to initialize incoming encryption: %w", err)
+		}
+
+		// Don't need that anymore.
+		t.identity = nil
+
+		// Encryption is ready for sending.
+		close(t.encryptionReady)
+	}
+
+	decryptedData, err := t.jession.Open(letter)
+	if err != nil {
+		return nil, ErrIntegrity.With("failed to decrypt: %w", err)
+	}
+
+	return container.New(decryptedData), nil
+}
+
+func (t *TerminalBase) handleReceive(msg *Msg) *Error {
+	msg.Unit.WaitForSlot()
+	defer msg.Finish()
+
+	// Debugging:
+	// log.Errorf("spn/terminal %s handling tmsg: %s", t.FmtID(), spew.Sdump(c.CompileData()))
+
+	// Check if message is empty. This will be the case if a message was only
+	// for updated the available space of the flow queue.
+	if !msg.Data.HoldsData() {
+		return nil
+	}
+
+	// Decrypt if enabled.
+	var tErr *Error
+	msg.Data, tErr = t.decrypt(msg.Data)
+	if tErr != nil {
+		return tErr
+	}
+
+	// Handle operation messages.
+	for msg.Data.HoldsData() {
+		// Get next message length.
+		msgLength, err := msg.Data.GetNextN32()
+		if err != nil {
+			return ErrMalformedData.With("failed to get operation msg length: %w", err)
+		}
+		if msgLength == 0 {
+			// Remainder is padding.
+			// Padding can only be at the end of the segment.
+			t.handlePaddingMsg(msg.Data)
+			return nil
+		}
+
+		// Get op msg data.
+		msgData, err := msg.Data.GetAsContainer(int(msgLength))
+		if err != nil {
+			return ErrMalformedData.With("failed to get operation msg data (%d/%d bytes): %w", msg.Data.Length(), msgLength, err)
+		}
+
+		// Handle op msg.
+		if handleErr := t.handleOpMsg(msgData); handleErr != nil {
+			return handleErr
+		}
+	}
+
+	return nil
+}
+
+func (t *TerminalBase) handleOpMsg(data *container.Container) *Error {
+	// Debugging:
+	// log.Errorf("spn/terminal %s handling opmsg: %s", t.FmtID(), spew.Sdump(data.CompileData()))
+
+	// Parse message operation id, type.
+	opID, msgType, err := ParseIDType(data)
+	if err != nil {
+		return ErrMalformedData.With("failed to parse operation msg id/type: %w", err)
+	}
+
+	switch msgType {
+	case MsgTypeInit:
+		t.handleOperationStart(opID, data)
+
+	case MsgTypeData, MsgTypePriorityData:
+		op, ok := t.GetActiveOp(opID)
+		if ok && !op.Stopped() {
+			// Create message from data.
+			msg := NewEmptyMsg()
+			msg.FlowID = opID
+			msg.Type = msgType
+			msg.Data = data
+			if msg.Type == MsgTypePriorityData {
+				msg.Unit.MakeHighPriority()
+			}
+
+			// Deliver message to operation.
+			tErr := op.Deliver(msg)
+			if tErr != nil {
+				// Also stop on "success" errors!
+				msg.Finish()
+				t.StopOperation(op, tErr)
+			}
+			return nil
+		}
+
+		// If an active op is not found, this is likely just left-overs from a
+		// stopped or failed operation.
+		// log.Tracef("spn/terminal: %s received data msg for unknown op %d", fmtTerminalID(t.parentID, t.id), opID)
+
+		// Send a stop error if this happens too often.
+		if opID == t.lastUnknownOpID {
+			// OpID is the same as last time.
+			t.lastUnknownOpMsgs++
+
+			// Log an warning (via StopOperation) and send a stop message every thousand.
+			if t.lastUnknownOpMsgs%1000 == 0 {
+				t.StopOperation(newUnknownOp(opID, ""), ErrUnknownOperationID.With("received %d unsolicited data msgs", t.lastUnknownOpMsgs))
+			}
+
+			// TODO: Abandon terminal at over 10000?
+		} else {
+			// OpID changed, set new ID and reset counter.
+			t.lastUnknownOpID = opID
+			t.lastUnknownOpMsgs = 1
+		}
+
+	case MsgTypeStop:
+		// Parse received error.
+		opErr, parseErr := ParseExternalError(data.CompileData())
+		if parseErr != nil {
+			log.Warningf("spn/terminal: %s failed to parse stop error: %s", fmtTerminalID(t.parentID, t.id), parseErr)
+			opErr = ErrUnknownError.AsExternal()
+		}
+
+		// End operation.
+		op, ok := t.GetActiveOp(opID)
+		if ok {
+			t.StopOperation(op, opErr)
+		} else {
+			log.Tracef("spn/terminal: %s received stop msg for unknown op %d", fmtTerminalID(t.parentID, t.id), opID)
+		}
+
+	default:
+		log.Warningf("spn/terminal: %s received unexpected message type: %d", t.FmtID(), msgType)
+		return ErrUnexpectedMsgType
+	}
+
+	return nil
+}
+
+func (t *TerminalBase) handlePaddingMsg(c *container.Container) {
+	padding := c.GetAll()
+	if len(padding) > 0 {
+		rngFeeder.SupplyEntropyIfNeeded(padding, len(padding))
+	}
+}
+
+func (t *TerminalBase) sendOpMsgs(msg *Msg) *Error {
+	msg.Unit.WaitForSlot()
+
+	// Add Padding if needed.
+	if t.opts.Padding > 0 {
+		paddingNeeded := (int(t.opts.Padding) - msg.Data.Length()) % int(t.opts.Padding)
+		if paddingNeeded > 0 {
+			// Add padding message header.
+			msg.Data.Append([]byte{0})
+			paddingNeeded--
+
+			// Add needed padding data.
+			if paddingNeeded > 0 {
+				padding, err := rng.Bytes(paddingNeeded)
+				if err != nil {
+					log.Debugf("spn/terminal: %s failed to get random data, using zeros instead", t.FmtID())
+					padding = make([]byte, paddingNeeded)
+				}
+				msg.Data.Append(padding)
+			}
+		}
+	}
+
+	// Encrypt operative data.
+	var tErr *Error
+	msg.Data, tErr = t.encrypt(msg.Data)
+	if tErr != nil {
+		return tErr
+	}
+
+	// Send data.
+	t.submit(msg, 0)
+	return nil
+}
+
+// Abandon shuts down the terminal unregistering it from upstream and calling HandleAbandon().
+// Should not be overridden by implementations.
+func (t *TerminalBase) Abandon(err *Error) {
+	if t.Abandoning.SetToIf(false, true) {
+		module.StartWorker("terminal abandon procedure", func(_ context.Context) error {
+			t.handleAbandonProcedure(err)
+			return nil
+		})
+	}
+}
+
+// HandleAbandon gives the terminal the ability to cleanly shut down.
+// The returned error is the error to send to the other side.
+// Should never be called directly. Call Abandon() instead.
+// Meant to be overridden by implementations.
+func (t *TerminalBase) HandleAbandon(err *Error) (errorToSend *Error) {
+	return err
+}
+
+// HandleDestruction gives the terminal the ability to clean up.
+// The terminal has already fully shut down at this point.
+// Should never be called directly. Call Abandon() instead.
+// Meant to be overridden by implementations.
+func (t *TerminalBase) HandleDestruction(err *Error) {}
+
+func (t *TerminalBase) handleAbandonProcedure(err *Error) {
+	// End all operations.
+	for _, op := range t.allOps() {
+		t.StopOperation(op, nil)
+	}
+
+	// Prepare timeouts for waiting for ops.
+	timeout := clientTerminalAbandonTimeout
+	if conf.PublicHub() {
+		timeout = serverTerminalAbandonTimeout
+	}
+	checkTicker := time.NewTicker(50 * time.Millisecond)
+	defer checkTicker.Stop()
+	abortWaiting := time.After(timeout)
+
+	// Wait for all operations to end.
+waitForOps:
+	for {
+		select {
+		case <-checkTicker.C:
+			if t.GetActiveOpCount() <= 0 {
+				break waitForOps
+			}
+		case <-abortWaiting:
+			log.Warningf(
+				"spn/terminal: terminal %s is continuing shutdown with %d active operations",
+				t.FmtID(),
+				t.GetActiveOpCount(),
+			)
+			break waitForOps
+		}
+	}
+
+	// Call operation stop handle function for proper shutdown cleaning up.
+	if t.ext != nil {
+		err = t.ext.HandleAbandon(err)
+	}
+
+	// Send error to the connected Operation, if the error is internal.
+	if !err.IsExternal() {
+		if err == nil {
+			err = ErrStopping
+		}
+
+		msg := NewMsg(err.Pack())
+		msg.FlowID = t.ID()
+		msg.Type = MsgTypeStop
+		t.submit(msg, 1*time.Second)
+	}
+
+	// If terminal was ended locally, send all data before abandoning.
+	// If terminal was ended remotely, don't bother sending remaining data.
+	if !err.IsExternal() {
+		// Flushing could mean sending a full buffer of 50000 packets.
+		t.Flush(5 * time.Minute)
+	}
+
+	// Stop all other connected workers.
+	t.cancelCtx()
+	t.idleTicker.Stop()
+
+	// Call operation destruction handle function for proper shutdown cleaning up.
+	if t.ext != nil {
+		t.ext.HandleDestruction(err)
+	}
+}
+
+func (t *TerminalBase) allOps() []Operation {
+	t.lock.Lock()
+	defer t.lock.Unlock()
+
+	ops := make([]Operation, 0, len(t.operations))
+	for _, op := range t.operations {
+		ops = append(ops, op)
+	}
+
+	return ops
+}
+
+// MakeDirectDeliveryDeliverFunc creates a submit upstream function with the
+// given delivery channel.
+func MakeDirectDeliveryDeliverFunc(
+	ctx context.Context,
+	deliver chan *Msg,
+) func(c *Msg) *Error {
+	return func(c *Msg) *Error {
+		select {
+		case deliver <- c:
+			return nil
+		case <-ctx.Done():
+			return ErrStopping
+		}
+	}
+}
+
+// MakeDirectDeliveryRecvFunc makes a delivery receive function with the given
+// delivery channel.
+func MakeDirectDeliveryRecvFunc(
+	deliver chan *Msg,
+) func() <-chan *Msg {
+	return func() <-chan *Msg {
+		return deliver
+	}
+}
diff --git a/spn/terminal/terminal_test.go b/spn/terminal/terminal_test.go
new file mode 100644
index 00000000..b458f696
--- /dev/null
+++ b/spn/terminal/terminal_test.go
@@ -0,0 +1,311 @@
+package terminal
+
+import (
+	"fmt"
+	"os"
+	"runtime/pprof"
+	"sync/atomic"
+	"testing"
+	"time"
+
+	"github.com/safing/portbase/container"
+	"github.com/safing/portmaster/spn/cabin"
+	"github.com/safing/portmaster/spn/hub"
+)
+
+func TestTerminals(t *testing.T) {
+	t.Parallel()
+
+	identity, erro := cabin.CreateIdentity(module.Ctx, "test")
+	if erro != nil {
+		t.Fatalf("failed to create identity: %s", erro)
+	}
+
+	// Test without and with encryption.
+	for _, encrypt := range []bool{false, true} {
+		// Test with different flow controls.
+		for _, fc := range []struct {
+			flowControl     FlowControlType
+			flowControlSize uint32
+		}{
+			{
+				flowControl:     FlowControlNone,
+				flowControlSize: 5,
+			},
+			{
+				flowControl:     FlowControlDFQ,
+				flowControlSize: defaultTestQueueSize,
+			},
+		} {
+			// Run tests with combined options.
+			testTerminals(t, identity, &TerminalOpts{
+				Encrypt:         encrypt,
+				Padding:         defaultTestPadding,
+				FlowControl:     fc.flowControl,
+				FlowControlSize: fc.flowControlSize,
+			})
+		}
+	}
+}
+
+func testTerminals(t *testing.T, identity *cabin.Identity, terminalOpts *TerminalOpts) {
+	t.Helper()
+
+	// Prepare encryption.
+	var dstHub *hub.Hub
+	if terminalOpts.Encrypt {
+		dstHub = identity.Hub
+	} else {
+		identity = nil
+	}
+
+	// Create test terminals.
+	var term1 *TestTerminal
+	var term2 *TestTerminal
+	var initData *container.Container
+	var err *Error
+	term1, initData, err = NewLocalTestTerminal(
+		module.Ctx, 127, "c1", dstHub, terminalOpts, createForwardingUpstream(
+			t, "c1", "c2", func(msg *Msg) *Error {
+				return term2.Deliver(msg)
+			},
+		),
+	)
+	if err != nil {
+		t.Fatalf("failed to create local terminal: %s", err)
+	}
+	term2, _, err = NewRemoteTestTerminal(
+		module.Ctx, 127, "c2", identity, initData, createForwardingUpstream(
+			t, "c2", "c1", func(msg *Msg) *Error {
+				return term1.Deliver(msg)
+			},
+		),
+	)
+	if err != nil {
+		t.Fatalf("failed to create remote terminal: %s", err)
+	}
+
+	// Start testing with counters.
+	countToQueueSize := uint64(terminalOpts.FlowControlSize)
+	optionsSuffix := fmt.Sprintf(
+		"encrypt=%v,flowType=%d",
+		terminalOpts.Encrypt,
+		terminalOpts.FlowControl,
+	)
+
+	testTerminalWithCounters(t, term1, term2, &testWithCounterOpts{
+		testName:        "onlyup-flushing-waiting:" + optionsSuffix,
+		flush:           true,
+		serverCountTo:   countToQueueSize * 2,
+		waitBetweenMsgs: sendThresholdMaxWait * 2,
+	})
+
+	testTerminalWithCounters(t, term1, term2, &testWithCounterOpts{
+		testName:        "onlyup-waiting:" + optionsSuffix,
+		serverCountTo:   10,
+		waitBetweenMsgs: sendThresholdMaxWait * 2,
+	})
+
+	testTerminalWithCounters(t, term1, term2, &testWithCounterOpts{
+		testName:        "onlyup-flushing:" + optionsSuffix,
+		flush:           true,
+		serverCountTo:   countToQueueSize * 2,
+		waitBetweenMsgs: time.Millisecond,
+	})
+
+	testTerminalWithCounters(t, term1, term2, &testWithCounterOpts{
+		testName:        "onlyup:" + optionsSuffix,
+		serverCountTo:   countToQueueSize * 2,
+		waitBetweenMsgs: time.Millisecond,
+	})
+
+	testTerminalWithCounters(t, term1, term2, &testWithCounterOpts{
+		testName:        "onlydown-flushing-waiting:" + optionsSuffix,
+		flush:           true,
+		clientCountTo:   countToQueueSize * 2,
+		waitBetweenMsgs: sendThresholdMaxWait * 2,
+	})
+
+	testTerminalWithCounters(t, term1, term2, &testWithCounterOpts{
+		testName:        "onlydown-waiting:" + optionsSuffix,
+		clientCountTo:   10,
+		waitBetweenMsgs: sendThresholdMaxWait * 2,
+	})
+
+	testTerminalWithCounters(t, term1, term2, &testWithCounterOpts{
+		testName:        "onlydown-flushing:" + optionsSuffix,
+		flush:           true,
+		clientCountTo:   countToQueueSize * 2,
+		waitBetweenMsgs: time.Millisecond,
+	})
+
+	testTerminalWithCounters(t, term1, term2, &testWithCounterOpts{
+		testName:        "onlydown:" + optionsSuffix,
+		clientCountTo:   countToQueueSize * 2,
+		waitBetweenMsgs: time.Millisecond,
+	})
+
+	testTerminalWithCounters(t, term1, term2, &testWithCounterOpts{
+		testName:        "twoway-flushing-waiting:" + optionsSuffix,
+		flush:           true,
+		clientCountTo:   countToQueueSize * 2,
+		serverCountTo:   countToQueueSize * 2,
+		waitBetweenMsgs: sendThresholdMaxWait * 2,
+	})
+
+	testTerminalWithCounters(t, term1, term2, &testWithCounterOpts{
+		testName:        "twoway-waiting:" + optionsSuffix,
+		flush:           true,
+		clientCountTo:   10,
+		serverCountTo:   10,
+		waitBetweenMsgs: sendThresholdMaxWait * 2,
+	})
+
+	testTerminalWithCounters(t, term1, term2, &testWithCounterOpts{
+		testName:        "twoway-flushing:" + optionsSuffix,
+		flush:           true,
+		clientCountTo:   countToQueueSize * 2,
+		serverCountTo:   countToQueueSize * 2,
+		waitBetweenMsgs: time.Millisecond,
+	})
+
+	testTerminalWithCounters(t, term1, term2, &testWithCounterOpts{
+		testName:        "twoway:" + optionsSuffix,
+		clientCountTo:   countToQueueSize * 2,
+		serverCountTo:   countToQueueSize * 2,
+		waitBetweenMsgs: time.Millisecond,
+	})
+
+	testTerminalWithCounters(t, term1, term2, &testWithCounterOpts{
+		testName:      "stresstest-down:" + optionsSuffix,
+		clientCountTo: countToQueueSize * 1000,
+	})
+
+	testTerminalWithCounters(t, term1, term2, &testWithCounterOpts{
+		testName:      "stresstest-up:" + optionsSuffix,
+		serverCountTo: countToQueueSize * 1000,
+	})
+
+	testTerminalWithCounters(t, term1, term2, &testWithCounterOpts{
+		testName:      "stresstest-duplex:" + optionsSuffix,
+		clientCountTo: countToQueueSize * 1000,
+		serverCountTo: countToQueueSize * 1000,
+	})
+
+	// Clean up.
+	term1.Abandon(nil)
+	term2.Abandon(nil)
+
+	// Give some time for the last log messages and clean up.
+	time.Sleep(100 * time.Millisecond)
+}
+
+func createForwardingUpstream(t *testing.T, srcName, dstName string, deliverFunc func(*Msg) *Error) Upstream {
+	t.Helper()
+
+	return UpstreamSendFunc(func(msg *Msg, _ time.Duration) *Error {
+		// Fast track nil containers.
+		if msg == nil {
+			dErr := deliverFunc(msg)
+			if dErr != nil {
+				t.Errorf("%s>%s: failed to deliver nil msg to terminal: %s", srcName, dstName, dErr)
+				return dErr.With("failed to deliver nil msg to terminal")
+			}
+			return nil
+		}
+
+		// Log messages.
+		if logTestCraneMsgs {
+			t.Logf("%s>%s: %v\n", srcName, dstName, msg.Data.CompileData())
+		}
+
+		// Deliver to other terminal.
+		dErr := deliverFunc(msg)
+		if dErr != nil {
+			t.Errorf("%s>%s: failed to deliver to terminal: %s", srcName, dstName, dErr)
+			return dErr.With("failed to deliver to terminal")
+		}
+
+		return nil
+	})
+}
+
+type testWithCounterOpts struct {
+	testName        string
+	flush           bool
+	clientCountTo   uint64
+	serverCountTo   uint64
+	waitBetweenMsgs time.Duration
+}
+
+func testTerminalWithCounters(t *testing.T, term1, term2 *TestTerminal, opts *testWithCounterOpts) {
+	t.Helper()
+
+	// Wait async for test to complete, print stack after timeout.
+	finished := make(chan struct{})
+	maxTestDuration := 60 * time.Second
+	go func() {
+		select {
+		case <-finished:
+		case <-time.After(maxTestDuration):
+			fmt.Printf("terminal test %s is taking more than %s, printing stack:\n", opts.testName, maxTestDuration)
+			_ = pprof.Lookup("goroutine").WriteTo(os.Stdout, 1)
+			os.Exit(1)
+		}
+	}()
+
+	t.Logf("starting terminal counter test %s", opts.testName)
+	defer t.Logf("stopping terminal counter test %s", opts.testName)
+
+	// Start counters.
+	counter, tErr := NewCounterOp(term1, CounterOpts{
+		ClientCountTo: opts.clientCountTo,
+		ServerCountTo: opts.serverCountTo,
+		Flush:         opts.flush,
+		Wait:          opts.waitBetweenMsgs,
+	})
+	if tErr != nil {
+		t.Fatalf("terminal test %s failed to start counter: %s", opts.testName, tErr)
+	}
+
+	// Wait until counters are done.
+	counter.Wait()
+	close(finished)
+
+	// Check for error.
+	if counter.Error != nil {
+		t.Fatalf("terminal test %s failed to count: %s", opts.testName, counter.Error)
+	}
+
+	// Log stats.
+	printCTStats(t, opts.testName, "term1", term1)
+	printCTStats(t, opts.testName, "term2", term2)
+
+	// Check if stats match, if DFQ is used on both sides.
+	dfq1, ok1 := term1.flowControl.(*DuplexFlowQueue)
+	dfq2, ok2 := term2.flowControl.(*DuplexFlowQueue)
+	if ok1 && ok2 &&
+		(atomic.LoadInt32(dfq1.sendSpace) != atomic.LoadInt32(dfq2.reportedSpace) ||
+			atomic.LoadInt32(dfq2.sendSpace) != atomic.LoadInt32(dfq1.reportedSpace)) {
+		t.Fatalf("terminal test %s has non-matching space counters", opts.testName)
+	}
+}
+
+func printCTStats(t *testing.T, testName, name string, term *TestTerminal) {
+	t.Helper()
+
+	dfq, ok := term.flowControl.(*DuplexFlowQueue)
+	if !ok {
+		return
+	}
+
+	t.Logf(
+		"%s: %s: sq=%d rq=%d sends=%d reps=%d",
+		testName,
+		name,
+		len(dfq.sendQueue),
+		len(dfq.recvQueue),
+		atomic.LoadInt32(dfq.sendSpace),
+		atomic.LoadInt32(dfq.reportedSpace),
+	)
+}
diff --git a/spn/terminal/testing.go b/spn/terminal/testing.go
new file mode 100644
index 00000000..22b12608
--- /dev/null
+++ b/spn/terminal/testing.go
@@ -0,0 +1,243 @@
+package terminal
+
+import (
+	"context"
+	"time"
+
+	"github.com/safing/portbase/container"
+	"github.com/safing/portbase/log"
+	"github.com/safing/portmaster/spn/cabin"
+	"github.com/safing/portmaster/spn/hub"
+)
+
+const (
+	defaultTestQueueSize = 16
+	defaultTestPadding   = 8
+	logTestCraneMsgs     = false
+)
+
+// TestTerminal is a terminal for running tests.
+type TestTerminal struct {
+	*TerminalBase
+}
+
+// NewLocalTestTerminal returns a new local test terminal.
+func NewLocalTestTerminal(
+	ctx context.Context,
+	id uint32,
+	parentID string,
+	remoteHub *hub.Hub,
+	initMsg *TerminalOpts,
+	upstream Upstream,
+) (*TestTerminal, *container.Container, *Error) {
+	// Create Terminal Base.
+	t, initData, err := NewLocalBaseTerminal(ctx, id, parentID, remoteHub, initMsg, upstream)
+	if err != nil {
+		return nil, nil, err
+	}
+	t.StartWorkers(module, "test terminal")
+
+	return &TestTerminal{t}, initData, nil
+}
+
+// NewRemoteTestTerminal returns a new remote test terminal.
+func NewRemoteTestTerminal(
+	ctx context.Context,
+	id uint32,
+	parentID string,
+	identity *cabin.Identity,
+	initData *container.Container,
+	upstream Upstream,
+) (*TestTerminal, *TerminalOpts, *Error) {
+	// Create Terminal Base.
+	t, initMsg, err := NewRemoteBaseTerminal(ctx, id, parentID, identity, initData, upstream)
+	if err != nil {
+		return nil, nil, err
+	}
+	t.StartWorkers(module, "test terminal")
+
+	return &TestTerminal{t}, initMsg, nil
+}
+
+type delayedMsg struct {
+	msg        *Msg
+	timeout    time.Duration
+	delayUntil time.Time
+}
+
+func createDelayingTestForwardingFunc(
+	srcName,
+	dstName string,
+	delay time.Duration,
+	delayQueueSize int,
+	deliverFunc func(msg *Msg, timeout time.Duration) *Error,
+) func(msg *Msg, timeout time.Duration) *Error {
+	// Return simple forward func if no delay is given.
+	if delay == 0 {
+		return func(msg *Msg, timeout time.Duration) *Error {
+			// Deliver to other terminal.
+			dErr := deliverFunc(msg, timeout)
+			if dErr != nil {
+				log.Errorf("spn/testing: %s>%s: failed to deliver to terminal: %s", srcName, dstName, dErr)
+				return dErr
+			}
+			return nil
+		}
+	}
+
+	// If there is delay, create a delaying channel and handler.
+	delayedMsgs := make(chan *delayedMsg, delayQueueSize)
+	go func() {
+		for {
+			// Read from chan
+			msg := <-delayedMsgs
+			if msg == nil {
+				return
+			}
+
+			// Check if we need to wait.
+			waitFor := time.Until(msg.delayUntil)
+			if waitFor > 0 {
+				time.Sleep(waitFor)
+			}
+
+			// Deliver to other terminal.
+			dErr := deliverFunc(msg.msg, msg.timeout)
+			if dErr != nil {
+				log.Errorf("spn/testing: %s>%s: failed to deliver to terminal: %s", srcName, dstName, dErr)
+			}
+		}
+	}()
+
+	return func(msg *Msg, timeout time.Duration) *Error {
+		// Add msg to delaying msg channel.
+		delayedMsgs <- &delayedMsg{
+			msg:        msg,
+			timeout:    timeout,
+			delayUntil: time.Now().Add(delay),
+		}
+		return nil
+	}
+}
+
+// HandleAbandon gives the terminal the ability to cleanly shut down.
+// The returned error is the error to send to the other side.
+// Should never be called directly. Call Abandon() instead.
+func (t *TestTerminal) HandleAbandon(err *Error) (errorToSend *Error) {
+	switch err {
+	case nil:
+		// nil means that the Terminal is being shutdown by the owner.
+		log.Tracef("spn/terminal: %s is closing", fmtTerminalID(t.parentID, t.id))
+	default:
+		// All other errors are faults.
+		log.Warningf("spn/terminal: %s: %s", fmtTerminalID(t.parentID, t.id), err)
+	}
+
+	return
+}
+
+// NewSimpleTestTerminalPair provides a simple conntected terminal pair for tests.
+func NewSimpleTestTerminalPair(delay time.Duration, delayQueueSize int, opts *TerminalOpts) (a, b *TestTerminal, err error) {
+	if opts == nil {
+		opts = &TerminalOpts{
+			Padding:         defaultTestPadding,
+			FlowControl:     FlowControlDFQ,
+			FlowControlSize: defaultTestQueueSize,
+		}
+	}
+
+	var initData *container.Container
+	var tErr *Error
+	a, initData, tErr = NewLocalTestTerminal(
+		module.Ctx, 127, "a", nil, opts, UpstreamSendFunc(createDelayingTestForwardingFunc(
+			"a", "b", delay, delayQueueSize, func(msg *Msg, timeout time.Duration) *Error {
+				return b.Deliver(msg)
+			},
+		)),
+	)
+	if tErr != nil {
+		return nil, nil, tErr.Wrap("failed to create local test terminal")
+	}
+	b, _, tErr = NewRemoteTestTerminal(
+		module.Ctx, 127, "b", nil, initData, UpstreamSendFunc(createDelayingTestForwardingFunc(
+			"b", "a", delay, delayQueueSize, func(msg *Msg, timeout time.Duration) *Error {
+				return a.Deliver(msg)
+			},
+		)),
+	)
+	if tErr != nil {
+		return nil, nil, tErr.Wrap("failed to create remote test terminal")
+	}
+
+	return a, b, nil
+}
+
+// BareTerminal is a bare terminal that just returns errors for testing.
+type BareTerminal struct{}
+
+var (
+	_ Terminal = &BareTerminal{}
+
+	errNotImplementedByBareTerminal = ErrInternalError.With("not implemented by bare terminal")
+)
+
+// ID returns the terminal ID.
+func (t *BareTerminal) ID() uint32 {
+	return 0
+}
+
+// Ctx returns the terminal context.
+func (t *BareTerminal) Ctx() context.Context {
+	return context.Background()
+}
+
+// Deliver delivers a message to the terminal.
+// Should not be overridden by implementations.
+func (t *BareTerminal) Deliver(msg *Msg) *Error {
+	return errNotImplementedByBareTerminal
+}
+
+// Send is used by others to send a message through the terminal.
+// Should not be overridden by implementations.
+func (t *BareTerminal) Send(msg *Msg, timeout time.Duration) *Error {
+	return errNotImplementedByBareTerminal
+}
+
+// Flush sends all messages waiting in the terminal.
+// Should not be overridden by implementations.
+func (t *BareTerminal) Flush(timeout time.Duration) {}
+
+// StartOperation starts the given operation by assigning it an ID and sending the given operation initialization data.
+// Should not be overridden by implementations.
+func (t *BareTerminal) StartOperation(op Operation, initData *container.Container, timeout time.Duration) *Error {
+	return errNotImplementedByBareTerminal
+}
+
+// StopOperation stops the given operation.
+// Should not be overridden by implementations.
+func (t *BareTerminal) StopOperation(op Operation, err *Error) {}
+
+// Abandon shuts down the terminal unregistering it from upstream and calling HandleAbandon().
+// Should not be overridden by implementations.
+func (t *BareTerminal) Abandon(err *Error) {}
+
+// HandleAbandon gives the terminal the ability to cleanly shut down.
+// The terminal is still fully functional at this point.
+// The returned error is the error to send to the other side.
+// Should never be called directly. Call Abandon() instead.
+// Meant to be overridden by implementations.
+func (t *BareTerminal) HandleAbandon(err *Error) (errorToSend *Error) {
+	return err
+}
+
+// HandleDestruction gives the terminal the ability to clean up.
+// The terminal has already fully shut down at this point.
+// Should never be called directly. Call Abandon() instead.
+// Meant to be overridden by implementations.
+func (t *BareTerminal) HandleDestruction(err *Error) {}
+
+// FmtID formats the terminal ID (including parent IDs).
+// May be overridden by implementations.
+func (t *BareTerminal) FmtID() string {
+	return "bare"
+}
diff --git a/spn/terminal/upstream.go b/spn/terminal/upstream.go
new file mode 100644
index 00000000..9dd27d43
--- /dev/null
+++ b/spn/terminal/upstream.go
@@ -0,0 +1,16 @@
+package terminal
+
+import "time"
+
+// Upstream defines the interface for upstream (parent) components.
+type Upstream interface {
+	Send(msg *Msg, timeout time.Duration) *Error
+}
+
+// UpstreamSendFunc is a helper to be able to satisfy the Upstream interface.
+type UpstreamSendFunc func(msg *Msg, timeout time.Duration) *Error
+
+// Send is used to send a message through this upstream.
+func (fn UpstreamSendFunc) Send(msg *Msg, timeout time.Duration) *Error {
+	return fn(msg, timeout)
+}
diff --git a/spn/test b/spn/test
new file mode 100755
index 00000000..2a443bb4
--- /dev/null
+++ b/spn/test
@@ -0,0 +1,168 @@
+#!/bin/bash
+
+warnings=0
+errors=0
+scripted=0
+goUp="\\e[1A"
+fullTestFlags="-short"
+install=0
+testonly=0
+
+function help {
+  echo "usage: $0 [command] [options]"
+  echo ""
+  echo "commands:"
+  echo "  <none>        run baseline tests"
+  echo "  full          run full tests (ie. not short)"
+  echo "  install       install deps for running tests"
+  echo ""
+  echo "options:"
+  echo "  --scripted    don't jump console lines (still use colors)"
+  echo "  --test-only   run tests only, no linters"
+  echo "  [package]     run only on this package"
+}
+
+function run {
+  if [[ $scripted -eq 0 ]]; then
+    echo "[......] $*"
+  fi
+
+  # create tmpfile
+  tmpfile=$(mktemp)
+  # execute
+  $* >$tmpfile 2>&1
+  rc=$?
+  output=$(cat $tmpfile)
+
+  # check return code
+  if [[ $rc -eq 0 ]]; then
+    if [[ $output == *"[no test files]"* ]]; then
+      echo -e "${goUp}[\e[01;33mNOTEST\e[00m] $*"
+      warnings=$((warnings+1))
+    else
+      echo -ne "${goUp}[\e[01;32m  OK  \e[00m] "
+      if [[ $2 == "test" ]]; then
+        echo -n $*
+        echo -n ": "
+        echo $output | cut -f "3-" -d " "
+      else
+        echo $*
+      fi
+    fi
+  else
+    if [[ $output == *"build constraints exclude all Go files"* ]]; then
+      echo -e "${goUp}[ !=OS ] $*"
+    else
+      echo -e "${goUp}[\e[01;31m FAIL \e[00m] $*"
+      cat $tmpfile
+      errors=$((errors+1))
+    fi
+  fi
+
+  rm -f $tmpfile
+}
+
+# get and switch to script dir
+baseDir="$( cd "$(dirname "$0")" && pwd )"
+cd "$baseDir"
+
+# args
+while true; do
+  case "$1" in
+  "-h"|"help"|"--help")
+    help
+    exit 0
+    ;;
+  "--scripted")
+    scripted=1
+    goUp=""
+    shift 1
+    ;;
+  "--test-only")
+    testonly=1
+    shift 1
+    ;;
+  "install")
+    install=1
+    shift 1
+    ;;
+  "full")
+    fullTestFlags=""
+    shift 1
+    ;;
+  *)
+    break
+    ;;
+  esac
+done
+
+# check if $GOPATH/bin is in $PATH
+if [[ $PATH != *"$GOPATH/bin"* ]]; then
+  export PATH=$GOPATH/bin:$PATH
+fi
+
+# install
+if [[ $install -eq 1 ]]; then
+  echo "installing dependencies..."
+  # TODO: update golangci-lint version regularly
+  echo "$ curl -sfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(go env GOPATH)/bin v1.44.0"
+  curl -sfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(go env GOPATH)/bin v1.44.0
+  exit 0
+fi
+
+# check dependencies
+if [[ $(which go) == "" ]]; then
+  echo "go command not found"
+  exit 1
+fi
+if [[ $testonly -eq 0 ]]; then
+  if [[ $(which gofmt) == "" ]]; then
+    echo "gofmt command not found"
+    exit 1
+  fi
+  if [[ $(which golangci-lint) == "" ]]; then
+    echo "golangci-lint command not found"
+    echo "install with: curl -sfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(go env GOPATH)/bin vX.Y.Z"
+    echo "don't forget to specify the version you want"
+    echo "or run: ./test install"
+    echo ""
+    echo "alternatively, install the current dev version with: go get -u github.com/golangci/golangci-lint/cmd/golangci-lint"
+    exit 1
+  fi
+fi
+
+# target selection
+if [[ "$1" == "" ]]; then
+  # get all packages
+  packages=$(go list -e ./...)
+else
+  # single package testing
+  packages=$(go list -e)/$1
+  echo "note: only running tests for package $packages"
+fi
+
+# platform info
+platformInfo=$(go env GOOS GOARCH)
+echo "running tests for ${platformInfo//$'\n'/ }:"
+
+# run vet/test on packages
+for package in $packages; do
+  packagename=${package#github.com/safing/spn} #TODO: could be queried with `go list .`
+  packagename=${packagename#/}
+  echo ""
+  echo $package
+  if [[ $testonly -eq 0 ]]; then
+    run go vet $package
+    run golangci-lint run $packagename
+  fi
+  run go test -cover $fullTestFlags $package
+done
+
+echo ""
+if [[ $errors -gt 0 ]]; then
+  echo "failed with $errors errors and $warnings warnings"
+  exit 1
+else
+  echo "succeeded with $warnings warnings"
+  exit 0
+fi
diff --git a/spn/tools/Dockerfile b/spn/tools/Dockerfile
new file mode 100644
index 00000000..dbe39af1
--- /dev/null
+++ b/spn/tools/Dockerfile
@@ -0,0 +1,23 @@
+FROM alpine as builder
+
+# Ensure ca-certficates are up to date
+# RUN update-ca-certificates
+
+# Download and verify portmaster-start binary.
+RUN mkdir /init
+RUN wget https://updates.safing.io/linux_amd64/start/portmaster-start_v0-9-6 -O /init/portmaster-start 
+COPY start-checksum.txt /init/start-checksum
+RUN cd /init && sha256sum -c /init/start-checksum
+RUN chmod 555 /init/portmaster-start
+
+# Use minimal image as base.
+FROM alpine
+
+# Copy the static executable.
+COPY --from=builder /init/portmaster-start /init/portmaster-start 
+
+# Copy the init script
+COPY container-init.sh /init.sh
+
+# Run the hub.
+ENTRYPOINT ["/init.sh"]
diff --git a/spn/tools/container-init.sh b/spn/tools/container-init.sh
new file mode 100755
index 00000000..e5120872
--- /dev/null
+++ b/spn/tools/container-init.sh
@@ -0,0 +1,30 @@
+#!/bin/sh
+
+DATA="/data"
+START="/data/portmaster-start"
+INIT_START="/init/portmaster-start"
+
+# Set safe shell options.
+set -euf -o pipefail
+
+# Check if data dir is mounted.
+if [ ! -d $DATA ]; then
+  echo "Nothing mounted at $DATA, aborting."
+  exit 1
+fi
+
+# Copy init start to correct location, if not available.
+if [ ! -f $START ]; then
+  cp $INIT_START $START
+fi
+
+# Download updates.
+echo "running: $START update --data /data --intel-only"
+$START update --data /data --intel-only
+
+# Remove PID file, which could have been left after a crash.
+rm -f $DATA/hub-lock.pid
+
+# Always start the SPN Hub with the updated main start binary.
+echo "running: $START hub --data /data -- $@"
+$START hub --data /data -- $@
diff --git a/spn/tools/install.sh b/spn/tools/install.sh
new file mode 100755
index 00000000..e7cf8fd7
--- /dev/null
+++ b/spn/tools/install.sh
@@ -0,0 +1,326 @@
+#!/bin/sh
+#
+# This script should be run via curl as root:
+#   sudo sh -c "$(curl -fsSL https://raw.githubusercontent.com/safing/portmaster/master/spn/tools/install-spn.sh)"
+# or wget
+#   sudo sh -c "$(wget -qO- https://raw.githubusercontent.com/safing/portmaster/master/spn/tools/install-spn.sh)"
+#
+# As an alternative, you can first download the install script and run it afterwards:
+#   wget https://raw.githubusercontent.com/safing/portmaster/master/spn/tools/install-spn.sh
+#   sudo sh ./install.sh
+#
+#
+set -e
+
+ARCH=
+INSTALLDIR=
+PMSTART=
+ENABLENOW=
+INSTALLSYSTEMD=
+SYSTEMDINSTALLPATH=
+
+apply_defaults() {
+    ARCH=${ARCH:-amd64}
+    INSTALLDIR=${INSTALLDIR:-/opt/safing/spn}
+    PMSTART=${PMSTART:-https://updates.safing.io/latest/linux_${ARCH}/start/portmaster-start}
+    SYSTEMDINSTALLPATH=${SYSTEMDINSTALLPATH:-/etc/systemd/system/spn.service}
+
+    if command_exists systemctl; then
+        INSTALLSYSTEMD=${INSTALLSYSTEMD:-yes}
+        ENABLENOW=${ENABLENOW:-yes}
+    else
+        INSTALLSYSTEMD=${INSTALLSYSTEMD:-no}
+        ENABLENOW=${ENABLENOW:-no}
+    fi
+
+    # The hostname may be freshly set, ensure the ENV variable is correct.
+    export HOSTNAME=$(hostname)
+}
+
+command_exists() {
+    command -v "$@" >/dev/null 2>&1
+}
+
+setup_tty() {
+    if [ -t 0 ]; then
+        interactive=yes
+    fi
+
+	if [ -t 1 ]; then
+		RED=$(printf '\033[31m')
+		GREEN=$(printf '\033[32m')
+		YELLOW=$(printf '\033[33m')
+		BLUE=$(printf '\033[34m')
+		BOLD=$(printf '\033[1m')
+		RESET=$(printf '\033[m')
+	else
+		RED=""
+		GREEN=""
+		YELLOW=""
+		BLUE=""
+		BOLD=""
+		RESET=""
+	fi
+}
+
+log() {
+    echo ${GREEN}${BOLD}"-> "${RESET}"$@" >&2
+}
+
+error() {
+    echo ${RED}"Error: $@"${RESET} >&2
+}
+
+warn() {
+    echo ${YELLOW}"warn: $@"${RESET} >&2
+}
+
+run_systemctl() {
+    systemctl $@ >/dev/null 2>&1 
+}
+
+download_file() {
+    local src=$1
+    local dest=$2
+
+    if command_exists curl; then
+        curl --silent --fail --show-error --location --output $dest $src
+    elif command_exists wget; then
+        wget --quiet -O $dest $src
+    else
+        error "No suitable download command found, either curl or wget must be installed"
+        exit 1
+    fi
+}
+
+ensure_install_dir() {
+    log "Creating ${INSTALLDIR}"
+    mkdir -p ${INSTALLDIR}
+}
+
+download_pmstart() {
+    log "Downloading portmaster-start ..."
+    local dest="${INSTALLDIR}/portmaster-start"
+    if [ -f "${dest}" ]; then
+        warn "Overwriting existing portmaster-start at ${dest}"
+    fi
+
+    download_file ${PMSTART} ${dest}
+
+    log "Changing permissions"
+    chmod a+x ${dest}
+}
+
+download_updates() {
+    log "Downloading updates ..."
+    ${INSTALLDIR}/portmaster-start --data=${INSTALLDIR} update
+}
+
+setup_systemd() {
+    log "Installing systemd service unit ..."
+    if [ ! "${INSTALLSYSTEMD}" = "yes" ]; then
+        warn "Skipping setup of systemd service unit"
+        echo "To launch the hub, execute the following as root:"
+        echo ""
+        echo "${INSTALLDIR}/portmaster-start --data ${INSTALLDIR} hub"
+        echo ""
+        return
+    fi
+
+    if [ -f "${SYSTEMDINSTALLPATH}" ]; then
+        warn "Overwriting existing unit path"
+    fi
+
+    cat >${SYSTEMDINSTALLPATH} <<EOT
+[Unit]
+Description=Safing Privacy Network Hub
+Wants=nss-lookup.target
+Conflicts=shutdown.target
+Before=shutdown.target
+
+[Service]
+Type=simple
+Restart=on-failure
+RestartSec=5
+LimitNOFILE=infinity
+Environment=LOGLEVEL=warning
+Environment=SPN_ARGS=
+EnvironmentFile=-/etc/default/spn
+ExecStart=${INSTALLDIR}/portmaster-start --data ${INSTALLDIR} hub -- --log \$LOGLEVEL \$SPN_ARGS
+
+[Install]
+WantedBy=multi-user.target
+EOT
+
+    log "Reloading systemd unit files"
+    run_systemctl daemon-reload
+
+    if run_systemctl is-active spn ||
+       run_systemctl is-failed spn; then
+        log "Restarting SPN hub"
+        run_systemctl restart spn.service
+    fi
+
+    # TODO(ppacher): allow disabling enable
+    if ! run_systemctl is-enabled spn ; then
+        if [ "${ENABLENOW}" = "yes" ]; then
+            log "Enabling and starting SPN."
+            run_systemctl enable --now spn.service || exit 1
+
+            log "Watch logs using: journalctl -fu spn.service"
+        else
+            log "Enabling SPN"
+            run_systemctl enable spn.service || exit 1
+        fi
+    fi
+
+}
+
+ask_config() {
+    if [ "${HOSTNAME}" = "" ]; then
+        log "Please enter hostname:"
+        read -p "> " HOSTNAME
+    fi
+    if [ "${METRICS_COMMENT}" = "" ]; then
+        log "Please enter metrics comment:"
+        read -p "> " METRICS_COMMENT
+    fi
+}
+
+write_config_file() {
+    cat >${1} <<EOT
+{
+  "core": {
+    "metrics": {
+      "instance": "$HOSTNAME",
+      "comment": "$METRICS_COMMENT",
+      "push": "$PUSHMETRICS"
+    }
+  },
+  "spn": {
+    "publicHub": {
+      "name": "$HOSTNAME"
+    }
+  }
+}
+EOT
+}
+
+confirm_config() {
+    log "Installation configuration:"
+    echo ""
+    echo "   Architecture: ${BOLD}${ARCH}${RESET}"
+    echo "   Download-URL: ${BOLD}${PMSTART}${RESET}"
+    echo "     Target Dir: ${BOLD}${INSTALLDIR}${RESET}"
+    echo "Install systemd: ${BOLD}${INSTALLSYSTEMD}${RESET}"
+    echo "      Unit path: ${BOLD}${SYSTEMDINSTALLPATH}${RESET}"
+    echo "      Start Now: ${BOLD}${ENABLENOW}${RESET}"
+    echo ""
+    echo "         Config:"
+    tmpfile=$(mktemp)
+    write_config_file $tmpfile
+    cat $tmpfile
+    echo ""
+    echo ""
+
+    if [ ! -z "${interactive}" ]
+    then
+        read -p "Continue (Y/n)? " ans
+        case "$ans" in
+            "" | "y" | "Y")
+                echo ""
+                ;;
+            **)
+                error "User aborted"
+                exit 1
+        esac
+    fi
+}
+
+print_help() {
+    cat <<EOT
+Usage: $0 [OPTIONS...]
+
+${BOLD}Options:${RESET}
+    ${GREEN}-y, --unattended${RESET}           Don't ask for confirmation.
+    ${GREEN}-n, --no-start${RESET}             Do not immediately start SPN hub.
+    ${GREEN}-t, --target PATH${RESET}          Configure the installation directory.
+    ${GREEN}-h, --help${RESET}                 Display this help text
+    ${GREEN}-a, --arch${RESET}                 Configure the binary architecture.
+    ${GREEN}-u, --url URL${RESET}              Set download URL for portmaster start.
+    ${GREEN}-S, --no-systemd${RESET}           Do not install systemd service unit.
+    ${GREEN}-s, --service-path PATH${RESET}    Location for the systemd unit file.
+EOT
+}
+
+main() {
+    setup_tty
+
+    # Parse arguments
+    while [ $# -gt 0 ]
+    do
+        case $1 in
+            --unattended | -y)
+                interactive=""
+                ;;
+            --no-start | -n)
+                ENABLENOW="no"
+                ;;
+            --target | -t)
+                INSTALLDIR=$2
+                shift
+                ;;
+            --help | -h)
+                print_help
+                exit 1 ;;
+            --arch | -a)
+                ARCH=$2
+                shift
+                ;;
+            --url | -u)
+                PMSTART=$2
+                shift
+                ;;
+            --no-systemd | -S)
+                INSTALLSYSTEMD=no
+                ENABLENOW=no
+                ;;
+            --service-path | -s)
+                SYSTEMDINSTALLPATH=$2
+                shift
+                ;;
+            *)
+                error "Unknown flag $1"
+                exit 1
+                ;;
+        esac
+        shift
+    done
+
+    cat <<EOT
+${BLUE}${BOLD}
+          ▄▄▄▄  ▄▄▄▄▄  ▄▄   ▄
+         █▀   ▀ █   ▀█ █▀▄  █
+         ▀█▄▄▄  █▄▄▄█▀ █ █▄ █
+             ▀█ █      █  █ █
+         ▀▄▄▄█▀ █      █   ██
+        ${GREEN}Safing Privacy Network
+${RESET}
+EOT
+
+    # prepare config
+    apply_defaults
+    ask_config
+    confirm_config
+
+    # Setup hub
+    ensure_install_dir
+    download_pmstart
+    download_updates
+    write_config_file "${INSTALLDIR}/config.json"
+
+    # setup systemd
+    setup_systemd
+}
+
+main "$@"
diff --git a/spn/tools/start-checksum.txt b/spn/tools/start-checksum.txt
new file mode 100644
index 00000000..3094e580
--- /dev/null
+++ b/spn/tools/start-checksum.txt
@@ -0,0 +1 @@
+3f45f0814c6db28c3899b39ae0ab01f8f20a8cc98697dbe8039162ccd9590bf8  ./portmaster-start
diff --git a/spn/tools/sysctl.conf b/spn/tools/sysctl.conf
new file mode 100644
index 00000000..130af869
--- /dev/null
+++ b/spn/tools/sysctl.conf
@@ -0,0 +1,45 @@
+## Kernel Optimizations for few very high bandwidth connections.
+
+# Quickly with this:
+# curl -fsSL https://updates.safing.io/internal/sysctl.conf > /etc/sysctl.d/9999-spn-network-optimizing.conf
+# cat /etc/sysctl.d/9999-spn-network-optimizing.conf
+# sysctl -p /etc/sysctl.d/9999-spn-network-optimizing.conf
+
+# Provide adequate buffer memory.
+# net.ipv4.tcp_mem is in 4096-byte pages.
+net.core.rmem_max = 1073741824
+net.core.wmem_max = 1073741824
+net.core.rmem_default = 16777216
+net.core.wmem_default = 16777216
+net.ipv4.tcp_rmem = 4096 16777216 1073741824
+net.ipv4.tcp_wmem = 4096 16777216 1073741824
+net.ipv4.tcp_mem = 4194304 8388608 16777216
+net.ipv4.udp_rmem_min = 16777216
+net.ipv4.udp_wmem_min = 16777216
+
+# Enable TCP window scaling.
+net.ipv4.tcp_window_scaling = 1
+
+# Increase the length of the processor input queue
+net.core.netdev_max_backlog = 100000
+net.core.netdev_budget = 1000
+net.core.netdev_budget_usecs = 10000
+
+# Set better congestion control.
+net.ipv4.tcp_congestion_control = htcp
+
+# Turn off fancy stuff for more stability.
+net.ipv4.tcp_sack = 0
+net.ipv4.tcp_dsack = 0
+net.ipv4.tcp_fack = 0
+net.ipv4.tcp_timestamps = 0
+
+# Max reorders before slow start.
+net.ipv4.tcp_reordering = 3
+
+# Prefer low latency to higher throughput.
+# Disables IPv4 TCP prequeue processing.
+net.ipv4.tcp_low_latency = 1
+
+# Don't start slow.
+net.ipv4.tcp_slow_start_after_idle = 0 
diff --git a/spn/unit/doc.go b/spn/unit/doc.go
new file mode 100644
index 00000000..9826a6ce
--- /dev/null
+++ b/spn/unit/doc.go
@@ -0,0 +1,13 @@
+// Package unit provides a "work unit" scheduling system for handling data sets that traverse multiple workers / goroutines.
+// The aim is to bind priority to a data set instead of a goroutine and split resources fairly among requests.
+//
+// Every "work" Unit is assigned an ever increasing ID and can be marked as "paused" or "high priority".
+// The Scheduler always gives a clearance up to a certain ID. All units below this ID may be processed.
+// High priority Units may always be processed.
+//
+// The Scheduler works with short slots and measures how many Units were finished in a slot.
+// The "slot pace" holds an indication of the current Unit finishing speed per slot. It is only changed slowly (but boosts if too far away) in order to keep stabilize the system.
+// The Scheduler then calculates the next unit ID limit to give clearance to for the next slot:
+//
+//	"finished units" + "slot pace" + "paused units" - "fraction of high priority units"
+package unit
diff --git a/spn/unit/scheduler.go b/spn/unit/scheduler.go
new file mode 100644
index 00000000..0b5d6e11
--- /dev/null
+++ b/spn/unit/scheduler.go
@@ -0,0 +1,358 @@
+package unit
+
+import (
+	"context"
+	"errors"
+	"math"
+	"sync"
+	"sync/atomic"
+	"time"
+
+	"github.com/tevino/abool"
+)
+
+const (
+	defaultSlotDuration = 10 * time.Millisecond // 100 slots per second
+	defaultMinSlotPace  = 100                   // 10 000 pps
+
+	defaultWorkSlotPercentage      = 0.7  // 70%
+	defaultSlotChangeRatePerStreak = 0.02 // 2%
+
+	defaultStatCycleDuration = 1 * time.Minute
+)
+
+// Scheduler creates and schedules units.
+// Must be created using NewScheduler().
+type Scheduler struct { //nolint:maligned
+	// Configuration.
+	config SchedulerConfig
+
+	// Units IDs Limit / Thresholds.
+
+	// currentUnitID holds the last assigned Unit ID.
+	currentUnitID atomic.Int64
+	// clearanceUpTo holds the current threshold up to which Unit ID Units may be processed.
+	clearanceUpTo atomic.Int64
+	// slotPace holds the current pace. This is the base value for clearance
+	// calculation, not the value of the current cleared Units itself.
+	slotPace atomic.Int64
+	// finished holds the amount of units that were finished within the current slot.
+	finished atomic.Int64
+
+	// Slot management.
+	slotSignalA      chan struct{}
+	slotSignalB      chan struct{}
+	slotSignalSwitch bool
+	slotSignalsLock  sync.RWMutex
+
+	stopping     abool.AtomicBool
+	unitDebugger *UnitDebugger
+
+	// Stats.
+	stats struct {
+		// Working Values.
+		progress struct {
+			maxPace           atomic.Int64
+			maxLeveledPace    atomic.Int64
+			avgPaceSum        atomic.Int64
+			avgPaceCnt        atomic.Int64
+			avgUnitLifeSum    atomic.Int64
+			avgUnitLifeCnt    atomic.Int64
+			avgWorkSlotSum    atomic.Int64
+			avgWorkSlotCnt    atomic.Int64
+			avgCatchUpSlotSum atomic.Int64
+			avgCatchUpSlotCnt atomic.Int64
+		}
+
+		// Calculated Values.
+		current struct {
+			maxPace        atomic.Int64
+			maxLeveledPace atomic.Int64
+			avgPace        atomic.Int64
+			avgUnitLife    atomic.Int64
+			avgWorkSlot    atomic.Int64
+			avgCatchUpSlot atomic.Int64
+		}
+	}
+}
+
+// SchedulerConfig holds scheduler configuration.
+type SchedulerConfig struct {
+	// SlotDuration defines the duration of one slot.
+	SlotDuration time.Duration
+
+	// MinSlotPace defines the minimum slot pace.
+	// The slot pace will never fall below this value.
+	MinSlotPace int64
+
+	// WorkSlotPercentage defines the how much of a slot should be scheduled with work.
+	// The remainder is for catching up and breathing room for other tasks.
+	// Must be between 55% (0.55) and 95% (0.95).
+	// The default value is 0.7 (70%).
+	WorkSlotPercentage float64
+
+	// SlotChangeRatePerStreak defines how many percent (0-1) the slot pace
+	// should change per streak.
+	// Is enforced to be able to change the minimum slot pace by at least 1.
+	// The default value is 0.02 (2%).
+	SlotChangeRatePerStreak float64
+
+	// StatCycleDuration defines how often stats are calculated.
+	// The default value is 1 minute.
+	StatCycleDuration time.Duration
+}
+
+// NewScheduler returns a new scheduler.
+func NewScheduler(config *SchedulerConfig) *Scheduler {
+	// Fallback to empty config if none is given.
+	if config == nil {
+		config = &SchedulerConfig{}
+	}
+
+	// Create new scheduler.
+	s := &Scheduler{
+		config:      *config,
+		slotSignalA: make(chan struct{}),
+		slotSignalB: make(chan struct{}),
+	}
+
+	// Fill in defaults.
+	if s.config.SlotDuration == 0 {
+		s.config.SlotDuration = defaultSlotDuration
+	}
+	if s.config.MinSlotPace == 0 {
+		s.config.MinSlotPace = defaultMinSlotPace
+	}
+	if s.config.WorkSlotPercentage == 0 {
+		s.config.WorkSlotPercentage = defaultWorkSlotPercentage
+	}
+	if s.config.SlotChangeRatePerStreak == 0 {
+		s.config.SlotChangeRatePerStreak = defaultSlotChangeRatePerStreak
+	}
+	if s.config.StatCycleDuration == 0 {
+		s.config.StatCycleDuration = defaultStatCycleDuration
+	}
+
+	// Check boundaries of WorkSlotPercentage.
+	switch {
+	case s.config.WorkSlotPercentage < 0.55:
+		s.config.WorkSlotPercentage = 0.55
+	case s.config.WorkSlotPercentage > 0.95:
+		s.config.WorkSlotPercentage = 0.95
+	}
+
+	// The slot change rate must be able to change the slot pace by at least 1.
+	if s.config.SlotChangeRatePerStreak < (1 / float64(s.config.MinSlotPace)) {
+		s.config.SlotChangeRatePerStreak = (1 / float64(s.config.MinSlotPace))
+
+		// Debug logging:
+		// fmt.Printf("--- increased SlotChangeRatePerStreak to %f\n", s.config.SlotChangeRatePerStreak)
+	}
+
+	// Initialize scheduler fields.
+	s.clearanceUpTo.Store(s.config.MinSlotPace)
+	s.slotPace.Store(s.config.MinSlotPace)
+
+	return s
+}
+
+func (s *Scheduler) nextSlotSignal() chan struct{} {
+	s.slotSignalsLock.RLock()
+	defer s.slotSignalsLock.RUnlock()
+
+	if s.slotSignalSwitch {
+		return s.slotSignalA
+	}
+	return s.slotSignalB
+}
+
+func (s *Scheduler) announceNextSlot() {
+	s.slotSignalsLock.Lock()
+	defer s.slotSignalsLock.Unlock()
+
+	// Close new slot signal and refresh previous one.
+	if s.slotSignalSwitch {
+		close(s.slotSignalA)
+		s.slotSignalB = make(chan struct{})
+	} else {
+		close(s.slotSignalB)
+		s.slotSignalA = make(chan struct{})
+	}
+
+	// Switch to next slot.
+	s.slotSignalSwitch = !s.slotSignalSwitch
+}
+
+// SlotScheduler manages the slot and schedules units.
+// Must only be started once.
+func (s *Scheduler) SlotScheduler(ctx context.Context) error {
+	// Start slot ticker.
+	ticker := time.NewTicker(s.config.SlotDuration / 2)
+	defer ticker.Stop()
+
+	// Give clearance to all when stopping.
+	defer s.clearanceUpTo.Store(math.MaxInt64 - math.MaxInt32)
+
+	var (
+		halfSlotID        uint64
+		halfSlotStartedAt = time.Now()
+		halfSlotEndedAt   time.Time
+		halfSlotDuration  = float64(s.config.SlotDuration / 2)
+
+		increaseStreak float64
+		decreaseStreak float64
+		oneStreaks     int
+
+		cycleStatsAt = uint64(s.config.StatCycleDuration / (s.config.SlotDuration / 2))
+	)
+
+	for range ticker.C {
+		halfSlotEndedAt = time.Now()
+
+		switch {
+		case halfSlotID%2 == 0:
+
+			// First Half-Slot: Work Slot
+
+			// Calculate time taken in previous slot.
+			catchUpSlotDuration := halfSlotEndedAt.Sub(halfSlotStartedAt).Nanoseconds()
+
+			// Add current slot duration to avg calculation.
+			s.stats.progress.avgCatchUpSlotCnt.Add(1)
+			if s.stats.progress.avgCatchUpSlotSum.Add(catchUpSlotDuration) < 0 {
+				// Reset if we wrap.
+				s.stats.progress.avgCatchUpSlotCnt.Store(1)
+				s.stats.progress.avgCatchUpSlotSum.Store(catchUpSlotDuration)
+			}
+
+			// Reset slot counters.
+			s.finished.Store(0)
+
+			// Raise clearance according
+			s.clearanceUpTo.Store(
+				s.currentUnitID.Load() +
+					int64(
+						float64(s.slotPace.Load())*s.config.WorkSlotPercentage,
+					),
+			)
+
+			// Announce start of new slot.
+			s.announceNextSlot()
+
+		default:
+
+			// Second Half-Slot: Catch-Up Slot
+
+			// Calculate time taken in previous slot.
+			workSlotDuration := halfSlotEndedAt.Sub(halfSlotStartedAt).Nanoseconds()
+
+			// Add current slot duration to avg calculation.
+			s.stats.progress.avgWorkSlotCnt.Add(1)
+			if s.stats.progress.avgWorkSlotSum.Add(workSlotDuration) < 0 {
+				// Reset if we wrap.
+				s.stats.progress.avgWorkSlotCnt.Store(1)
+				s.stats.progress.avgWorkSlotSum.Store(workSlotDuration)
+			}
+
+			// Calculate slot duration skew correction, as slots will not run in the
+			// exact specified duration.
+			slotDurationSkewCorrection := halfSlotDuration / float64(workSlotDuration)
+
+			// Calculate slot pace with performance of first half-slot.
+			// Get current slot pace as float64.
+			currentSlotPace := float64(s.slotPace.Load())
+			// Calculate current raw slot pace.
+			newRawSlotPace := float64(s.finished.Load()*2) * slotDurationSkewCorrection
+
+			// Move slot pace in the trending direction.
+			if newRawSlotPace >= currentSlotPace {
+				// Adjust based on streak.
+				increaseStreak++
+				decreaseStreak = 0
+				s.slotPace.Add(int64(
+					currentSlotPace * s.config.SlotChangeRatePerStreak * increaseStreak,
+				))
+
+				// Count one-streaks.
+				if increaseStreak == 1 {
+					oneStreaks++
+				} else {
+					oneStreaks = 0
+				}
+
+				// Debug logging:
+				// fmt.Printf("+++ slot pace: %.0f (current raw pace: %.0f, increaseStreak: %.0f, clearanceUpTo: %d)\n", currentSlotPace, newRawSlotPace, increaseStreak, s.clearanceUpTo.Load())
+			} else {
+				// Adjust based on streak.
+				decreaseStreak++
+				increaseStreak = 0
+				s.slotPace.Add(int64(
+					-currentSlotPace * s.config.SlotChangeRatePerStreak * decreaseStreak,
+				))
+
+				// Enforce minimum.
+				if s.slotPace.Load() < s.config.MinSlotPace {
+					s.slotPace.Store(s.config.MinSlotPace)
+					decreaseStreak = 0
+				}
+
+				// Count one-streaks.
+				if decreaseStreak == 1 {
+					oneStreaks++
+				} else {
+					oneStreaks = 0
+				}
+
+				// Debug logging:
+				// fmt.Printf("--- slot pace: %.0f (current raw pace: %.0f, decreaseStreak: %.0f, clearanceUpTo: %d)\n", currentSlotPace, newRawSlotPace, decreaseStreak, s.clearanceUpTo.Load())
+			}
+
+			// Record Stats
+
+			// Add current pace to avg calculation.
+			s.stats.progress.avgPaceCnt.Add(1)
+			if s.stats.progress.avgPaceSum.Add(s.slotPace.Load()) < 0 {
+				// Reset if we wrap.
+				s.stats.progress.avgPaceCnt.Store(1)
+				s.stats.progress.avgPaceSum.Store(s.slotPace.Load())
+			}
+
+			// Check if current pace is new max.
+			if s.slotPace.Load() > s.stats.progress.maxPace.Load() {
+				s.stats.progress.maxPace.Store(s.slotPace.Load())
+			}
+
+			// Check if current pace is new leveled max
+			if oneStreaks >= 3 && s.slotPace.Load() > s.stats.progress.maxLeveledPace.Load() {
+				s.stats.progress.maxLeveledPace.Store(s.slotPace.Load())
+			}
+		}
+		// Switch to other slot-half.
+		halfSlotID++
+		halfSlotStartedAt = halfSlotEndedAt
+
+		// Cycle stats after defined time period.
+		if halfSlotID%cycleStatsAt == 0 {
+			s.cycleStats()
+		}
+
+		// Check if we are stopping.
+		select {
+		case <-ctx.Done():
+			return nil
+		default:
+		}
+		if s.stopping.IsSet() {
+			return nil
+		}
+	}
+
+	// We should never get here.
+	// If we do, trigger a worker restart via the service worker.
+	return errors.New("unexpected end of scheduler")
+}
+
+// Stop stops the scheduler and gives clearance to all units.
+func (s *Scheduler) Stop() {
+	s.stopping.Set()
+}
diff --git a/spn/unit/scheduler_stats.go b/spn/unit/scheduler_stats.go
new file mode 100644
index 00000000..6fd1d272
--- /dev/null
+++ b/spn/unit/scheduler_stats.go
@@ -0,0 +1,87 @@
+package unit
+
+// Stats are somewhat racy, as one value of sum or count might already be
+// updated with the latest slot data, while the other has been not.
+// This is not so much of a problem, as slots are really short and the impact
+// is very low.
+
+// cycleStats calculates the new values and cycles the current values.
+func (s *Scheduler) cycleStats() {
+	// Get and reset max pace.
+	s.stats.current.maxPace.Store(s.stats.progress.maxPace.Load())
+	s.stats.progress.maxPace.Store(0)
+
+	// Get and reset max leveled pace.
+	s.stats.current.maxLeveledPace.Store(s.stats.progress.maxLeveledPace.Load())
+	s.stats.progress.maxLeveledPace.Store(0)
+
+	// Get and reset avg slot pace.
+	avgPaceCnt := s.stats.progress.avgPaceCnt.Load()
+	if avgPaceCnt > 0 {
+		s.stats.current.avgPace.Store(s.stats.progress.avgPaceSum.Load() / avgPaceCnt)
+	} else {
+		s.stats.current.avgPace.Store(0)
+	}
+	s.stats.progress.avgPaceCnt.Store(0)
+	s.stats.progress.avgPaceSum.Store(0)
+
+	// Get and reset avg unit life.
+	avgUnitLifeCnt := s.stats.progress.avgUnitLifeCnt.Load()
+	if avgUnitLifeCnt > 0 {
+		s.stats.current.avgUnitLife.Store(s.stats.progress.avgUnitLifeSum.Load() / avgUnitLifeCnt)
+	} else {
+		s.stats.current.avgUnitLife.Store(0)
+	}
+	s.stats.progress.avgUnitLifeCnt.Store(0)
+	s.stats.progress.avgUnitLifeSum.Store(0)
+
+	// Get and reset avg work slot duration.
+	avgWorkSlotCnt := s.stats.progress.avgWorkSlotCnt.Load()
+	if avgWorkSlotCnt > 0 {
+		s.stats.current.avgWorkSlot.Store(s.stats.progress.avgWorkSlotSum.Load() / avgWorkSlotCnt)
+	} else {
+		s.stats.current.avgWorkSlot.Store(0)
+	}
+	s.stats.progress.avgWorkSlotCnt.Store(0)
+	s.stats.progress.avgWorkSlotSum.Store(0)
+
+	// Get and reset avg catch up slot duration.
+	avgCatchUpSlotCnt := s.stats.progress.avgCatchUpSlotCnt.Load()
+	if avgCatchUpSlotCnt > 0 {
+		s.stats.current.avgCatchUpSlot.Store(s.stats.progress.avgCatchUpSlotSum.Load() / avgCatchUpSlotCnt)
+	} else {
+		s.stats.current.avgCatchUpSlot.Store(0)
+	}
+	s.stats.progress.avgCatchUpSlotCnt.Store(0)
+	s.stats.progress.avgCatchUpSlotSum.Store(0)
+}
+
+// GetMaxSlotPace returns the current maximum slot pace.
+func (s *Scheduler) GetMaxSlotPace() int64 {
+	return s.stats.current.maxPace.Load()
+}
+
+// GetMaxLeveledSlotPace returns the current maximum leveled slot pace.
+func (s *Scheduler) GetMaxLeveledSlotPace() int64 {
+	return s.stats.current.maxLeveledPace.Load()
+}
+
+// GetAvgSlotPace returns the current average slot pace.
+func (s *Scheduler) GetAvgSlotPace() int64 {
+	return s.stats.current.avgPace.Load()
+}
+
+// GetAvgUnitLife returns the current average unit lifetime until it is finished.
+func (s *Scheduler) GetAvgUnitLife() int64 {
+	return s.stats.current.avgUnitLife.Load()
+}
+
+// GetAvgWorkSlotDuration returns the current average work slot duration.
+func (s *Scheduler) GetAvgWorkSlotDuration() int64 {
+	return s.stats.current.avgWorkSlot.Load()
+}
+
+// GetAvgCatchUpSlotDuration returns the current average catch up slot duration.
+func (s *Scheduler) GetAvgCatchUpSlotDuration() int64 {
+	return s.stats.current.avgCatchUpSlot.Load()
+}
diff --git a/spn/unit/scheduler_test.go b/spn/unit/scheduler_test.go
new file mode 100644
index 00000000..3e3ec6ba
--- /dev/null
+++ b/spn/unit/scheduler_test.go
@@ -0,0 +1,51 @@
+package unit
+
+import (
+	"context"
+	"testing"
+)
+
+func BenchmarkScheduler(b *testing.B) {
+	workers := 10
+
+	// Create and start scheduler.
+	s := NewScheduler(&SchedulerConfig{})
+	ctx, cancel := context.WithCancel(context.Background())
+	go func() {
+		err := s.SlotScheduler(ctx)
+		if err != nil {
+			panic(err)
+		}
+	}()
+	defer cancel()
+
+	// Init control structures.
+	done := make(chan struct{})
+	finishedCh := make(chan struct{})
+
+	// Start workers.
+	for i := 0; i < workers; i++ {
+		go func() {
+			for {
+				u := s.NewUnit()
+				u.WaitForSlot()
+				u.Finish()
+				select {
+				case finishedCh <- struct{}{}:
+				case <-done:
+					return
+				}
+			}
+		}()
+	}
+
+	// Start benchmark.
+	b.ResetTimer()
+	for i := 0; i < b.N; i++ {
+		<-finishedCh
+	}
+	b.StopTimer()
+
+	// Cleanup.
+	close(done)
+}
diff --git a/spn/unit/unit.go b/spn/unit/unit.go
new file mode 100644
index 00000000..d198fd64
--- /dev/null
+++ b/spn/unit/unit.go
@@ -0,0 +1,103 @@
+package unit
+
+import (
+	"time"
+
+	"github.com/tevino/abool"
+)
+
+// Unit describes a "work unit" and is meant to be embedded into another struct
+// used for passing data moving through multiple processing steps.
+type Unit struct {
+	id           int64
+	scheduler    *Scheduler
+	created      time.Time
+	finished     abool.AtomicBool
+	highPriority abool.AtomicBool
+}
+
+// NewUnit returns a new unit within the scheduler.
+func (s *Scheduler) NewUnit() *Unit {
+	return &Unit{
+		id:        s.currentUnitID.Add(1),
+		scheduler: s,
+		created:   time.Now(),
+	}
+}
+
+// ReUse re-initialized the unit to be able to reuse already allocated structs.
+func (u *Unit) ReUse() {
+	// Finish previous unit.
+	u.Finish()
+
+	// Get new ID and unset finish flag.
+	u.id = u.scheduler.currentUnitID.Add(1)
+	u.finished.UnSet()
+}
+
+// WaitForSlot blocks until the unit may be processed.
+func (u *Unit) WaitForSlot() {
+	// High priority units may always process.
+	if u.highPriority.IsSet() {
+		return
+	}
+
+	for {
+		// Check if we are allowed to process in the current slot.
+		if u.id <= u.scheduler.clearanceUpTo.Load() {
+			return
+		}
+
+		// Debug logging:
+		// fmt.Printf("unit %d waiting for clearance at %d\n", u.id, u.scheduler.clearanceUpTo.Load())
+
+		// Wait for next slot.
+		<-u.scheduler.nextSlotSignal()
+	}
+}
+
+// Finish signals the unit scheduler that this unit has finished processing.
+// Will no-op if called on a nil Unit.
+func (u *Unit) Finish() {
+	if u == nil {
+		return
+	}
+
+	// Always increase finished, even if the unit is from a previous epoch.
+	if u.finished.SetToIf(false, true) {
+		u.scheduler.finished.Add(1)
+
+		// Record the time this unit took from creation to finish.
+		timeTaken := time.Since(u.created).Nanoseconds()
+		u.scheduler.stats.progress.avgUnitLifeCnt.Add(1)
+		if u.scheduler.stats.progress.avgUnitLifeSum.Add(timeTaken) < 0 {
+			// Reset if we wrap.
+			u.scheduler.stats.progress.avgUnitLifeCnt.Store(1)
+			u.scheduler.stats.progress.avgUnitLifeSum.Store(timeTaken)
+		}
+	}
+}
+
+// MakeHighPriority marks the unit as high priority.
+func (u *Unit) MakeHighPriority() {
+	switch {
+	case u.finished.IsSet():
+		// Unit is already finished.
+	case !u.highPriority.SetToIf(false, true):
+		// Unit is already set to high priority.
+		// Else: High Priority set.
+	case u.id > u.scheduler.clearanceUpTo.Load():
+		// Unit is outside current clearance, reduce clearance by one.
+		u.scheduler.clearanceUpTo.Add(-1)
+	}
+}
+
+// IsHighPriority returns whether the unit has high priority.
+func (u *Unit) IsHighPriority() bool {
+	return u.highPriority.IsSet()
+}
+
+// RemovePriority removes the high priority mark.
+func (u *Unit) RemovePriority() {
+	u.highPriority.UnSet()
+}
diff --git a/spn/unit/unit_debug.go b/spn/unit/unit_debug.go
new file mode 100644
index 00000000..0ba053bd
--- /dev/null
+++ b/spn/unit/unit_debug.go
@@ -0,0 +1,86 @@
+package unit
+
+import (
+	"sync"
+	"time"
+
+	"github.com/safing/portbase/log"
+)
+
+// UnitDebugger is used to debug unit leaks.
+type UnitDebugger struct { //nolint:golint
+	units     map[int64]*UnitDebugData
+	unitsLock sync.Mutex
+}
+
+// UnitDebugData represents a unit that is being debugged.
+type UnitDebugData struct { //nolint:golint
+	unit       *Unit
+	unitSource string
+}
+
+// DebugUnit registers the given unit for debug output with the given source.
+// Additional calls on the same unit update the unit source.
+// StartDebugLog() must be called before calling DebugUnit().
+func (s *Scheduler) DebugUnit(u *Unit, unitSource string) {
+	// Check if scheduler and unit debugger are created.
+	if s == nil || s.unitDebugger == nil {
+		return
+	}
+
+	s.unitDebugger.unitsLock.Lock()
+	defer s.unitDebugger.unitsLock.Unlock()
+
+	s.unitDebugger.units[u.id] = &UnitDebugData{
+		unit:       u,
+		unitSource: unitSource,
+	}
+}
+
+// StartDebugLog logs the scheduler state every second.
+func (s *Scheduler) StartDebugLog() {
+	s.unitDebugger = &UnitDebugger{
+		units: make(map[int64]*UnitDebugData),
+	}
+
+	// Force StatCycleDuration to match the debug log output.
+	s.config.StatCycleDuration = time.Second
+
+	go func() {
+		for {
+			s.debugStep()
+			time.Sleep(time.Second)
+		}
+	}()
+}
+
+func (s *Scheduler) debugStep() {
+	s.unitDebugger.unitsLock.Lock()
+	defer s.unitDebugger.unitsLock.Unlock()
+
+	// Go through debugging units and clear finished ones, count sources.
+	sources := make(map[string]int)
+	for id, debugUnit := range s.unitDebugger.units {
+		if debugUnit.unit.finished.IsSet() {
+			delete(s.unitDebugger.units, id)
+		} else {
+			cnt := sources[debugUnit.unitSource]
+			sources[debugUnit.unitSource] = cnt + 1
+		}
+	}
+
+	// Print current state.
+	log.Debugf(
+		`scheduler: state: slotPace=%d avgPace=%d maxPace=%d maxLeveledPace=%d currentUnitID=%d clearanceUpTo=%d unitLife=%s slotDurations=%s/%s`,
+		s.slotPace.Load(),
+		s.GetAvgSlotPace(),
+		s.GetMaxSlotPace(),
+		s.GetMaxLeveledSlotPace(),
+		s.currentUnitID.Load(),
+		s.clearanceUpTo.Load(),
+		time.Duration(s.GetAvgUnitLife()).Round(10*time.Microsecond),
+		time.Duration(s.GetAvgWorkSlotDuration()).Round(10*time.Microsecond),
+		time.Duration(s.GetAvgCatchUpSlotDuration()).Round(10*time.Microsecond),
+	)
+	log.Debugf("scheduler: unit sources: %+v", sources)
+}
diff --git a/spn/unit/unit_test.go b/spn/unit/unit_test.go
new file mode 100644
index 00000000..8f5a5ac8
--- /dev/null
+++ b/spn/unit/unit_test.go
@@ -0,0 +1,104 @@
+package unit
+
+import (
+	"context"
+	"fmt"
+	"math"
+	"math/rand"
+	"sync"
+	"testing"
+	"time"
+
+	"github.com/stretchr/testify/assert"
+)
+
+func TestUnit(t *testing.T) { //nolint:paralleltest
+	// Ignore deprectation, as the given alternative is not safe for concurrent use.
+	// The global rand methods use a locked seed, which is not available from outside.
+	rand.Seed(time.Now().UnixNano()) //nolint
+
+	size := 1000000
+	workers := 100
+
+	// Create and start scheduler.
+	s := NewScheduler(&SchedulerConfig{})
+	s.StartDebugLog()
+	ctx, cancel := context.WithCancel(context.Background())
+	go func() {
+		err := s.SlotScheduler(ctx)
+		if err != nil {
+			panic(err)
+		}
+	}()
+	defer cancel()
+
+	// Create 10 workers.
+	var wg sync.WaitGroup
+	wg.Add(workers)
+	sizePerWorker := size / workers
+	for i := 0; i < workers; i++ {
+		go func() {
+			for i := 0; i < sizePerWorker; i++ {
+				u := s.NewUnit()
+
+				// Make 1% high priority.
+				if rand.Int()%100 == 0 { //nolint:gosec // This is a test.
+					u.MakeHighPriority()
+				}
+
+				u.WaitForSlot()
+				time.Sleep(10 * time.Microsecond)
+				u.Finish()
+			}
+			wg.Done()
+		}()
+	}
+
+	// Wait for workers to finish.
+	wg.Wait()
+
+	// Wait for two slot durations for values to update.
+	time.Sleep(s.config.SlotDuration * 2)
+
+	// Print current state.
+	s.cycleStats()
+	fmt.Printf(`scheduler state:
+		currentUnitID = %d
+		slotPace = %d
+		clearanceUpTo = %d
+		finished = %d
+		maxPace = %d
+		maxLeveledPace = %d
+		avgPace = %d
+		avgUnitLife = %s
+		avgWorkSlot = %s
+		avgCatchUpSlot = %s
+`,
+		s.currentUnitID.Load(),
+		s.slotPace.Load(),
+		s.clearanceUpTo.Load(),
+		s.finished.Load(),
+		s.GetMaxSlotPace(),
+		s.GetMaxLeveledSlotPace(),
+		s.GetAvgSlotPace(),
+		time.Duration(s.GetAvgUnitLife()),
+		time.Duration(s.GetAvgWorkSlotDuration()),
+		time.Duration(s.GetAvgCatchUpSlotDuration()),
+	)
+
+	// Check if everything seems good.
+	assert.Equal(t, size, int(s.currentUnitID.Load()), "currentUnitID must match size")
+	assert.GreaterOrEqual(
+		t,
+		int(s.clearanceUpTo.Load()),
+		size+int(float64(s.config.MinSlotPace)*s.config.SlotChangeRatePerStreak),
+		"clearanceUpTo must be at least size+minSlotPace",
+	)
+
+	// Shutdown
+	cancel()
+	time.Sleep(s.config.SlotDuration * 10)
+
+	// Check if scheduler shut down correctly.
+	assert.Equal(t, math.MaxInt64-math.MaxInt32, int(s.clearanceUpTo.Load()), "clearance must be near MaxInt64")
+}