Summary

Experiment to setup directly connected Thunderbolt 3/ USB 4 network connection between Intel NUC 10 i7 (NUC10I7FNH4) and Minisforum MS-01 (i9-13900H) both running as Proxmox nodes.

Based on useful Gists by scyto and thaynes43.

Dependencies

# apt install frr iperf3

Kernel setup

Add thunderbolt and thunderbolt-net kernel modules.

# cat /etc/modules
# /etc/modules: kernel modules to load at boot time.
#
# This file contains the names of kernel modules that should be loaded
# at boot time, one per line. Lines beginning with "#" are ignored.
# Parameters can be specified after the module name.


# Generated by sensors-detect on Fri Mar  7 15:33:37 2025
# Chip drivers
coretemp
nct6775

thunderbolt
thunderbolt-net

Configure networking interfaces

Interfaces lo:0 and lo:6 will be the static IPs for the node. Interfaces en05 and en06 will be the thunderbolt interfaces. Intel NUC will only have en05 sections since the hardware only has one thunderbolt port.

$ cat /etc/network/interfaces
# network interface settings; autogenerated
# Please do NOT modify this file directly, unless you know what
# you're doing.
#
# If you want to manage parts of the network configuration manually,
# please utilize the 'source' or 'source-directory' directives to do
# so.
# PVE will preserve these directives, but will NOT read its network
# configuration from sourced files, so do not attempt to move any of
# the PVE managed interfaces into external files!

auto lo
iface lo inet loopback

# thunderbolt begin

auto lo:0
iface lo:0 inet static
        address 10.0.0.81/32

auto lo:6
iface lo:6 inet static
        address fc00::81/128

# thunderbolt end

iface enp87s0 inet manual

iface enp88s0 inet manual

iface enp2s0f0np0 inet manual

iface enp2s0f1np1 inet manual

auto vmbr0
iface vmbr0 inet static
	address 192.168.1.2/24
	gateway 192.168.1.1
	bridge-ports enp87s0
	bridge-stp off
	bridge-fd 0
#LAN

iface wlp89s0 inet manual

auto vmbr1
iface vmbr1 inet manual
	bridge-ports enp88s0
	bridge-stp off
	bridge-fd 0

#WAN

# thunderbolt begin

auto en05
allow-hotplug en05
iface en05 inet manual
       mtu 9000

iface en05 inet6 manual
        mtu 9000

auto en06
allow-hotplug en06
iface en06 inet manual
        mtu 9000

iface en06 inet6 manual
        mtu 9000

# thunderbolt end

source /etc/network/interfaces.d/*

Get PCI address for port by watching udevadm monitor and unplugging/replugging. Example from NUC, PCI address is the last value in the tree (0000:03:00.0):

UDEV  [246.114447] change   /devices/pci0000:00/0000:00:15.2/i2c_designware.1/i2c-2/i2c-INT3515:02-tps6598x.0/typec/port0 (typec)
UDEV  [246.114670] change   /devices/pci0000:00/0000:00:15.2/i2c_designware.1/i2c-2/i2c-INT3515:02-tps6598x.0/typec/port0 (typec)
UDEV  [246.114815] change   /devices/pci0000:00/0000:00:15.2/i2c_designware.1/i2c-2/i2c-INT3515:02-tps6598x.0/typec/port0 (typec)
UDEV  [246.114977] add      /devices/pci0000:00/0000:00:15.2/i2c_designware.1/i2c-2/i2c-INT3515:02-tps6598x.0/typec/port0/port0-partner (typec)
KERNEL[250.332842] change   /0-1 (thunderbolt)
KERNEL[250.332865] add      /devices/pci0000:00/0000:00:1c.0/0000:01:00.0/0000:02:00.0/0000:03:00.0/domain0/0-0/0-1 (thunderbolt)
KERNEL[250.332909] add      /devices/pci0000:00/0000:00:1c.0/0000:01:00.0/0000:02:00.0/0000:03:00.0/domain0/0-0/0-1/0-1.0 (thunderbolt)
KERNEL[250.332997] add      /devices/pci0000:00/0000:00:1c.0/0000:01:00.0/0000:02:00.0/0000:03:00.0/domain0/0-0/0-1/0-1.0/net/thunderbolt0 (net)
KERNEL[250.333036] add      /devices/pci0000:00/0000:00:1c.0/0000:01:00.0/0000:02:00.0/0000:03:00.0/domain0/0-0/0-1/0-1.0/net/thunderbolt0/queues/rx-0 (queues)
KERNEL[250.333046] add      /devices/pci0000:00/0000:00:1c.0/0000:01:00.0/0000:02:00.0/0000:03:00.0/domain0/0-0/0-1/0-1.0/net/thunderbolt0/queues/tx-0 (queues)
KERNEL[250.333133] bind     /devices/pci0000:00/0000:00:1c.0/0000:01:00.0/0000:02:00.0/0000:03:00.0/domain0/0-0/0-1/0-1.0 (thunderbolt)
UDEV  [250.334321] change   /0-1 (thunderbolt)
UDEV  [250.334487] add      /devices/pci0000:00/0000:00:1c.0/0000:01:00.0/0000:02:00.0/0000:03:00.0/domain0/0-0/0-1 (thunderbolt)
UDEV  [250.334855] add      /devices/pci0000:00/0000:00:1c.0/0000:01:00.0/0000:02:00.0/0000:03:00.0/domain0/0-0/0-1/0-1.0 (thunderbolt)
UDEV  [250.340323] add      /devices/pci0000:00/0000:00:1c.0/0000:01:00.0/0000:02:00.0/0000:03:00.0/domain0/0-0/0-1/0-1.0/net/thunderbolt0 (net)
UDEV  [250.340663] add      /devices/pci0000:00/0000:00:1c.0/0000:01:00.0/0000:02:00.0/0000:03:00.0/domain0/0-0/0-1/0-1.0/net/thunderbolt0/queues/rx-0 (queues)
UDEV  [250.341052] add      /devices/pci0000:00/0000:00:1c.0/0000:01:00.0/0000:02:00.0/0000:03:00.0/domain0/0-0/0-1/0-1.0/net/thunderbolt0/queues/tx-0 (queues)
UDEV  [250.341459] bind     /devices/pci0000:00/0000:00:1c.0/0000:01:00.0/0000:02:00.0/0000:03:00.0/domain0/0-0/0-1/0-1.0 (thunderbolt)

Example from MS-01, PCI address is the last value in the tree (0000:00:0d.2):

KERNEL[1860.072772] remove   /devices/pci0000:00/0000:00:0d.2/domain0/0-0/usb4_port1/0-0:1.1/nvm_non_active0 (nvmem)
KERNEL[1860.072792] remove   /devices/pci0000:00/0000:00:0d.2/domain0/0-0/usb4_port1/0-0:1.1/nvm_active0 (nvmem)
KERNEL[1860.072803] remove   /devices/pci0000:00/0000:00:0d.2/domain0/0-0/usb4_port1/0-0:1.1 (thunderbolt)
UDEV  [1860.073844] remove   /devices/pci0000:00/0000:00:0d.2/domain0/0-0/usb4_port1/0-0:1.1/nvm_non_active0 (nvmem)
UDEV  [1860.073854] remove   /devices/pci0000:00/0000:00:0d.2/domain0/0-0/usb4_port1/0-0:1.1/nvm_active0 (nvmem)
UDEV  [1860.073913] remove   /devices/pci0000:00/0000:00:0d.2/domain0/0-0/usb4_port1/0-0:1.1 (thunderbolt)
KERNEL[1866.878472] add      /devices/pci0000:00/0000:00:0d.2/domain0/0-0/usb4_port1/0-0:1.1 (thunderbolt)
UDEV  [1866.880487] add      /devices/pci0000:00/0000:00:0d.2/domain0/0-0/usb4_port1/0-0:1.1 (thunderbolt)
KERNEL[1866.891555] add      /devices/pci0000:00/0000:00:0d.2/domain0/0-0/usb4_port1/0-0:1.1/nvm_active0 (nvmem)
KERNEL[1866.891567] add      /devices/pci0000:00/0000:00:0d.2/domain0/0-0/usb4_port1/0-0:1.1/nvm_non_active0 (nvmem)
UDEV  [1866.891947] add      /devices/pci0000:00/0000:00:0d.2/domain0/0-0/usb4_port1/0-0:1.1/nvm_active0 (nvmem)
UDEV  [1866.892476] add      /devices/pci0000:00/0000:00:0d.2/domain0/0-0/usb4_port1/0-0:1.1/nvm_non_active0 (nvmem)

Rename thunderboltN interfaces to en0X.

$ cat /etc/systemd/network/00-thunderbolt0.link
[Match]
Path=pci-0000:00:0d.3
Driver=thunderbolt-net
[Link]
MACAddressPolicy=none
Name=en05

$ cat /etc/systemd/network/00-thunderbolt1.link 
[Match]
Path=pci-0000:00:0d.2
Driver=thunderbolt-net
[Link]
MACAddressPolicy=none
Name=en06

Rename interfaces on reboot or hotplug.

# cat /etc/udev/rules.d/10-tb-en.rules
ACTION=="move", SUBSYSTEM=="net", KERNEL=="en05", RUN+="/usr/local/bin/pve-en05.sh"
ACTION=="move", SUBSYSTEM=="net", KERNEL=="en06", RUN+="/usr/local/bin/pve-en06.sh"
$ cat /usr/local/bin/pve-en05.sh
#!/bin/bash

# this brings the renamed interface up and reprocesses any settings in /etc/network/interfaces for the renamed interface
/usr/sbin/ifup en05

$ cat /usr/local/bin/pve-en06.sh
#!/bin/bash

# this brings the renamed interface up and reprocesses any settings in /etc/network/interfaces for the renamed interface
/usr/sbin/ifup en06

chmod +x /usr/local/bin/*.sh
update-initramfs -u -k all

Enable IP forwarding for ring network (we only have two devices, but for future).

$ grep forward /etc/sysctl.conf
# Uncomment the next line to enable packet forwarding for IPv4
net.ipv4.ip_forward=1
# Uncomment the next line to enable packet forwarding for IPv6
net.ipv6.conf.all.forwarding=1

Enable Intel IOMMU. If this is not enabled on the Intel NUC, thunderbolt connection fails with failed to enable DMA paths when running dmesg -w.

$ vi /etc/default/grub
GRUB_CMDLINE_LINUX_DEFAULT="quiet intel_iommu=on iommu=pt thunderbolt.dyndbg=+p"

update-grub

reboot

Configure Layer 3

Configure OpenFabric.

$ cat /etc/frr/frr.conf
frr version 8.5.2
frr defaults traditional
hostname proxmox1
log syslog informational
service integrated-vtysh-config
!
interface en05
 ip router openfabric 1
 ipv6 router openfabric 1
exit
!
interface en06
 ip router openfabric 1
 ipv6 router openfabric 1
exit
!
interface lo
 ip router openfabric 1
 ipv6 router openfabric 1
 openfabric passive
exit
!
router openfabric 1
 net 49.0000.0000.0001.00
exit
!

Verify with show running-config when in vtysh. Restarting frr will fail if repeated to quickly, clear this with systemctl reset-failed frr.service.

Add cronjob to restart OpenFabric after restart.

$ crontab -e
# m h  dom mon dow   command
@reboot sleep 60 && /usr/bin/systemctl restart frr.service

Repeat for other hosts, reboot after each.

Once all hosts are configured, vtysh -c "show openfabric topology" should show nodes.

vtysh -c "show openfabric topology"
Area 1:
IS-IS paths to level-2 routers that speak IP
Vertex               Type         Metric Next-Hop             Interface Parent
proxmox2                                                              
10.0.0.82/32         IP internal  0                                     proxmox2(4)
proxmox1             TE-IS        10     proxmox1             en05      proxmox2(4)
10.0.0.81/32         IP TE        20     proxmox1             en05      proxmox1(4)

IS-IS paths to level-2 routers that speak IPv6
Vertex               Type         Metric Next-Hop             Interface Parent
proxmox2                                                              
fc00::82/128         IP6 internal 0                                     proxmox2(4)
proxmox1             TE-IS        10     proxmox1             en05      proxmox2(4)
fc00::81/128         IP6 internal 20     proxmox1             en05      proxmox1(4)

IS-IS paths to level-2 routers with hop-by-hop metric
Vertex               Type         Metric Next-Hop             Interface Parent

Running ip addr should also show the static IP on the loopback interface and the en0X should exist.

1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000
    link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
    inet 127.0.0.1/8 scope host lo
       valid_lft forever preferred_lft forever
    inet 10.0.0.81/32 scope global lo
       valid_lft forever preferred_lft forever
    inet6 fc00::81/128 scope global 
       valid_lft forever preferred_lft forever
    inet6 ::1/128 scope host noprefixroute 
       valid_lft forever preferred_lft forever
-- snip --
19: en06: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 9000 qdisc pfifo_fast state UP group default qlen 1000
    link/ether 02:bc:61:bd:55:80 brd ff:ff:ff:ff:ff:ff
    inet6 fe80::bc:61ff:febd:5580/64 scope link 
       valid_lft forever preferred_lft forever*

Performance

Performance test with guest migration.

root@proxmox1:~# qm migrate 121 proxmox2 --online --migration_network fc00::82/126
VM isn't running. Doing offline migration instead.
2025-03-08 21:12:09 use dedicated network address for sending migration traffic (fc00::82)
2025-03-08 21:12:09 starting migration of VM 121 to node 'proxmox2' (fc00::82)
2025-03-08 21:12:09 found generated disk 'local-lvm:vm-121-cloudinit' (in current VM config)
2025-03-08 21:12:09 found local disk 'local-lvm:vm-121-disk-0' (attached)
2025-03-08 21:12:09 found local disk 'local-lvm:vm-121-disk-1' (attached)
2025-03-08 21:12:09 copying local disk images
2025-03-08 21:12:10   Logical volume "vm-121-cloudinit" created.
2025-03-08 21:12:10 64+0 records in
2025-03-08 21:12:10 64+0 records out
2025-03-08 21:12:10 4194304 bytes (4.2 MB, 4.0 MiB) copied, 0.520758 s, 8.1 MB/s
2025-03-08 21:12:10 63+2 records in
2025-03-08 21:12:10 63+2 records out
2025-03-08 21:12:10 4194304 bytes (4.2 MB, 4.0 MiB) copied, 0.00482505 s, 869 MB/s
2025-03-08 21:12:10 successfully imported 'local-lvm:vm-121-cloudinit'
2025-03-08 21:12:10 volume 'local-lvm:vm-121-cloudinit' is 'local-lvm:vm-121-cloudinit' on the target
2025-03-08 21:12:11   Logical volume "vm-121-disk-0" created.
2025-03-08 21:12:11 64+0 records in
2025-03-08 21:12:11 64+0 records out
2025-03-08 21:12:11 4194304 bytes (4.2 MB, 4.0 MiB) copied, 0.561405 s, 7.5 MB/s
2025-03-08 21:12:11 62+4 records in
2025-03-08 21:12:11 62+4 records out
2025-03-08 21:12:11 4194304 bytes (4.2 MB, 4.0 MiB) copied, 0.0238996 s, 175 MB/s
2025-03-08 21:12:11 successfully imported 'local-lvm:vm-121-disk-0'
2025-03-08 21:12:11 volume 'local-lvm:vm-121-disk-0' is 'local-lvm:vm-121-disk-0' on the target
2025-03-08 21:12:12   Logical volume "vm-121-disk-1" created.
2025-03-08 21:12:15 1333592064 bytes (1.3 GB, 1.2 GiB) copied, 3 s, 444 MB/s
2025-03-08 21:12:18 2854420480 bytes (2.9 GB, 2.7 GiB) copied, 6 s, 476 MB/s
2025-03-08 21:12:21 4449173504 bytes (4.4 GB, 4.1 GiB) copied, 9 s, 494 MB/s
2025-03-08 21:12:24 5929369600 bytes (5.9 GB, 5.5 GiB) copied, 12 s, 494 MB/s
2025-03-08 21:12:27 7391281152 bytes (7.4 GB, 6.9 GiB) copied, 15 s, 493 MB/s
2025-03-08 21:12:30 8992194560 bytes (9.0 GB, 8.4 GiB) copied, 18 s, 500 MB/s
2025-03-08 21:12:33 10716577792 bytes (11 GB, 10 GiB) copied, 21 s, 510 MB/s 
2025-03-08 21:12:36 12435390464 bytes (12 GB, 12 GiB) copied, 24 s, 518 MB/s
2025-03-08 21:12:39 14173470720 bytes (14 GB, 13 GiB) copied, 27 s, 525 MB/s
2025-03-08 21:12:42 15834742784 bytes (16 GB, 15 GiB) copied, 30 s, 528 MB/s
2025-03-08 21:12:45 17390895104 bytes (17 GB, 16 GiB) copied, 33 s, 527 MB/s
2025-03-08 21:12:48 18785370112 bytes (19 GB, 17 GiB) copied, 36 s, 522 MB/s
2025-03-08 21:12:51 20477116416 bytes (20 GB, 19 GiB) copied, 39 s, 525 MB/s
2025-03-08 21:12:54 22245474304 bytes (22 GB, 21 GiB) copied, 42 s, 530 MB/s
2025-03-08 21:12:57 24027529216 bytes (24 GB, 22 GiB) copied, 45 s, 534 MB/s
2025-03-08 21:13:00 25654329344 bytes (26 GB, 24 GiB) copied, 48 s, 534 MB/s
2025-03-08 21:13:03 27292991488 bytes (27 GB, 25 GiB) copied, 51 s, 535 MB/s
2025-03-08 21:13:06 29014818816 bytes (29 GB, 27 GiB) copied, 54 s, 537 MB/s
2025-03-08 21:13:09 30678646784 bytes (31 GB, 29 GiB) copied, 57 s, 538 MB/s
2025-03-08 21:13:12 32409059328 bytes (32 GB, 30 GiB) copied, 60 s, 540 MB/s
2025-03-08 21:13:22 38044958720 bytes (38 GB, 35 GiB) copied, 70 s, 543 MB/s
2025-03-08 21:13:32 43489296384 bytes (43 GB, 41 GiB) copied, 80 s, 544 MB/s
2025-03-08 21:13:42 48469442560 bytes (48 GB, 45 GiB) copied, 90 s, 539 MB/s
2025-03-08 21:13:52 54067855360 bytes (54 GB, 50 GiB) copied, 100 s, 541 MB/s
2025-03-08 21:14:02 59300380672 bytes (59 GB, 55 GiB) copied, 110 s, 539 MB/s
2025-03-08 21:14:12 64894468096 bytes (65 GB, 60 GiB) copied, 120 s, 541 MB/s
2025-03-08 21:14:22 70407028736 bytes (70 GB, 66 GiB) copied, 130 s, 542 MB/s
2025-03-08 21:14:32 75753717760 bytes (76 GB, 71 GiB) copied, 140 s, 541 MB/s
2025-03-08 21:14:42 81284890624 bytes (81 GB, 76 GiB) copied, 150 s, 542 MB/s
2025-03-08 21:14:53 1310720+0 records in
2025-03-08 21:14:53 1310720+0 records out
2025-03-08 21:14:53 85899345920 bytes (86 GB, 80 GiB) copied, 161.101 s, 533 MB/s
2025-03-08 21:14:53 1287182+47075 records in
2025-03-08 21:14:53 1287182+47075 records out
2025-03-08 21:14:53 85899345920 bytes (86 GB, 80 GiB) copied, 161.362 s, 532 MB/s
2025-03-08 21:14:53 successfully imported 'local-lvm:vm-121-disk-1'
2025-03-08 21:14:53 volume 'local-lvm:vm-121-disk-1' is 'local-lvm:vm-121-disk-1' on the target
  Logical volume "vm-121-cloudinit" successfully removed.
  Logical volume "vm-121-disk-0" successfully removed.
  Logical volume "vm-121-disk-1" successfully removed.
2025-03-08 21:14:55 migration finished successfully (duration 00:02:47)

Running iperf3 shows 15Gbps link. So it looks like the guest migrations are bottlenecked by the SSD (Samsung 870 EVO, ~530-560MB/s read/write, while the MS01 is using NVMe.)

root@proxmox2:~# iperf3 -c fc00::81
Connecting to host fc00::81, port 5201
[  5] local fc00::82 port 41556 connected to fc00::81 port 5201
[ ID] Interval           Transfer     Bitrate         Retr  Cwnd
[  5]   0.00-1.00   sec  1.78 GBytes  15.3 Gbits/sec  106   2.12 MBytes       
[  5]   1.00-2.00   sec  1.83 GBytes  15.7 Gbits/sec   34   2.62 MBytes       
[  5]   2.00-3.00   sec  1.77 GBytes  15.2 Gbits/sec  185   1.06 MBytes       
[  5]   3.00-4.00   sec  1.41 GBytes  12.1 Gbits/sec  112   2.68 MBytes       
[  5]   4.00-5.00   sec  1.82 GBytes  15.7 Gbits/sec   43   2.93 MBytes       
[  5]   5.00-6.00   sec  1.84 GBytes  15.8 Gbits/sec    0   2.93 MBytes       
[  5]   6.00-7.00   sec  1.82 GBytes  15.7 Gbits/sec   96   3.06 MBytes       
[  5]   7.00-8.00   sec  1.80 GBytes  15.5 Gbits/sec   74   3.00 MBytes       
[  5]   8.00-9.00   sec  1.80 GBytes  15.4 Gbits/sec   54   1.06 MBytes       
[  5]   9.00-10.00  sec  1.73 GBytes  14.9 Gbits/sec  258   3.12 MBytes       
- - - - - - - - - - - - - - - - - - - - - - - - -
[ ID] Interval           Transfer     Bitrate         Retr
[  5]   0.00-10.00  sec  17.6 GBytes  15.1 Gbits/sec  962             sender
[  5]   0.00-10.00  sec  17.6 GBytes  15.1 Gbits/sec                  receiver

Ping latency.

root@proxmox2:~# ping fc00::81
PING fc00::81(fc00::81) 56 data bytes
64 bytes from fc00::81: icmp_seq=1 ttl=64 time=4.26 ms
64 bytes from fc00::81: icmp_seq=2 ttl=64 time=4.25 ms
64 bytes from fc00::81: icmp_seq=3 ttl=64 time=4.24 ms
64 bytes from fc00::81: icmp_seq=4 ttl=64 time=4.25 ms
64 bytes from fc00::81: icmp_seq=5 ttl=64 time=4.24 ms
64 bytes from fc00::81: icmp_seq=6 ttl=64 time=4.17 ms
^C
--- fc00::81 ping statistics ---
6 packets transmitted, 6 received, 0% packet loss, time 5006ms
rtt min/avg/max/mdev = 4.173/4.235/4.259/0.028 ms