Hi,
on a clean install of gentoo i have installed only incus and a container “hmm5”.
The host is in the 192.168.9.81/24 network via a bonded network card behind a
natting router at 192.168.9.254. Pretty standard everything.
I wanted the container at 192.168.9.155 being part of the LAN with the “routed”
nictype. I dont like MACVLAN or IPVLAN because i like to have central firewall
on the host for all containers. Coming from Virtuozzo/Openvz this is the most
similar setup to their “venet” layer3 bridge, which is not available in incus.
This is the setup configuration:
# uname -a
Linux mask.freakout.de 6.12.16-gentoo #11 SMP PREEMPT_DYNAMIC
x86_64 Intel(R) Xeon(R) CPU E3-1220 V2 @ 3.10GHz GenuineIntel GNU/Linux
# incus version
Client version: 6.0.3
Server version: 6.0.3
# incus config show
config:
images.auto_update_interval: "0"
# incus network list
+---------+----------+---------+------------------+------+-------------+---------+---------+
| NAME | TYPE | MANAGED | IPV4 | IPV6 | DESCRIPTION | USED BY | STATE |
+---------+----------+---------+------------------+------+-------------+---------+---------+
| bond0 | bond | NO | 192.168.9.81/24 | | | 2 | |
+---------+----------+---------+------------------+------+-------------+---------+---------+
| lo | loopback | NO | | | | 0 | |
+---------+----------+---------+------------------+------+-------------+---------+---------+
| lxdbr0 | bridge | YES | 192.168.181.1/24 | none | | 2 | CREATED |
+---------+----------+---------+------------------+------+-------------+---------+---------+
# incus network show lxdbr0
config:
ipv4.address: 192.168.181.1/24
ipv4.nat: "false"
ipv6.address: none
description: ""
name: lxdbr0
type: bridge
used_by:
- /1.0/instances/hmm4
- /1.0/profiles/default
managed: true
status: Created
locations:
- none
project: default
# incus storage show masksp
config:
source: vga/lxd
volatile.initial_source: vga/lxd
zfs.pool_name: vga/lxd
description: ""
name: masksp
driver: zfs
used_by:
- /1.0/images/0e5837afd2b69b188594af0b8f9787c2f02fe5000fdc5051c98b36438c93ab8f
- /1.0/images/6fa9b59aec5b6c1468369e4f4ba4768d5078da6e630555c3335b23c4a785405a
- /1.0/instances/hmm4
- /1.0/instances/hmm5
- /1.0/profiles/default
- /1.0/profiles/routed
status: Created
locations:
- none
# incus list
+------+---------+----------------------+------+-----------------+-----------+
| NAME | STATE | IPV4 | IPV6 | TYPE | SNAPSHOTS |
+------+---------+----------------------+------+-----------------+-----------+
| hmm5 | RUNNING | 192.168.9.155 (eth0) | | CONTAINER | 0 |
+------+---------+----------------------+------+-----------------+-----------+
# incus config show hmm5
architecture: x86_64
config:
image.architecture: amd64
image.description: Gentoo current amd64 (20250313_05:19)
image.os: Gentoo
image.release: current
image.requirements.secureboot: "false"
image.serial: "20250313_05:19"
image.type: squashfs
image.variant: openrc
volatile.base_image: 6fa9b59aec5b6c1468369e4f4ba4768d5078da6e630555c3335b23c4a785405a
volatile.cloud-init.instance-id: bfab1b4c-faac-4d7a-929c-4513fddcc5ac
volatile.eth0.host_name: vethc1fe76ad
volatile.eth0.hwaddr: 00:16:3e:f0:62:47
volatile.eth0.name: eth0
volatile.idmap.base: "0"
volatile.last_state.idmap: '[]'
volatile.last_state.power: RUNNING
volatile.uuid: bd0eb551-943e-4206-be73-0dbff55abdbd
volatile.uuid.generation: bd0eb551-943e-4206-be73-0dbff55abdbd
devices: {}
ephemeral: false
profiles:
- routed
stateful: false
description: ""
# incus profile show routed
config:
user.network-config: |
version: 2
ethernets:
eth0:
addresses:
- 192.168.9.155/32
nameservers:
addresses:
- 8.8.8.8
search: []
routes:
- to: 0.0.0.0/0
via: 169.254.0.1
on-link: true
description: routed profile
devices:
eth0:
ipv4.address: 192.168.9.155
nictype: routed
parent: bond0
type: nic
root:
path: /
pool: masksp
type: disk
name: routed
used_by:
- /1.0/instances/hmm5
project: default
# incus info hmm5
Name: hmm5
Status: RUNNING
Type: container
Architecture: x86_64
Resources:
Processes: 3
Disk usage:
root: 1.08MiB
CPU usage:
CPU usage (in seconds): 2
Network usage:
eth0:
Type: broadcast
State: UP
Host interface: vethc1fe76ad
MAC address: 00:16:3e:f0:62:47
MTU: 1500
Bytes received: 2.90kB
Bytes sent: 322.92kB
Packets received: 38
Packets sent: 1018
IP addresses:
inet: 192.168.9.155/32 (global)
inet: 169.254.213.47/16 (link)
My problem is that “hmm5” cannot reach any host in the 192.168.9/24 network.
In the LAN is also a host at 192.168.9.82 reachable for testing:
# ping -c1 -W2 192.168.9.82
PING 192.168.9.82 (192.168.9.82) 56(84) bytes of data.
64 bytes from 192.168.9.82: icmp_seq=1 ttl=64 time=0.188 ms
After starting “hmm5” i see the following network info on host and container:
# ip a
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000
inet 127.0.0.1/8 brd 127.255.255.255 scope host lo
2: bond0: <BROADCAST,MULTICAST,MASTER,UP,LOWER_UP> mtu 1500 qdisc noqueue state UP group default qlen 1000
inet 192.168.9.81/24 brd 192.168.9.255 scope global bond0
9: lxdbr0: <NO-CARRIER,BROADCAST,MULTICAST,UP> mtu 1500 qdisc noqueue state DOWN group default qlen 1000
inet 192.168.181.1/24 scope global lxdbr0
11: vethc1fe76ad@if10: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc noqueue state UP group default qlen 1000
inet 169.254.0.1/32 scope global vethc1fe76ad
# arp -a
? (192.168.9.155) at <incomplete> on bond0
? (192.168.9.82) at 90:1b:0e:37:42:01 [ether] on bond0
? (192.168.9.155) at 00:16:3e:f0:62:47 [ether] on vethc1fe76ad
? (192.168.9.155) at <from_interface> PERM PUB on bond0
hmm5 ~ # ifconfig
eth0: flags=4163<UP,BROADCAST,RUNNING,MULTICAST> mtu 1500
inet 192.168.9.155 netmask 255.255.255.255 broadcast 0.0.0.0
inet6 fe80::216:3eff:fef0:6247 prefixlen 64 scopeid 0x20<link>
ether 00:16:3e:f0:62:47 txqueuelen 1000 (Ethernet)
RX packets 38 bytes 2896 (2.8 KiB)
RX errors 0 dropped 0 overruns 0 frame 0
TX packets 2350 bytes 778460 (760.2 KiB)
TX errors 0 dropped 0 overruns 0 carrier 0 collisions 0
lo: flags=73<UP,LOOPBACK,RUNNING> mtu 65536
inet 127.0.0.1 netmask 255.0.0.0
inet6 ::1 prefixlen 128 scopeid 0x10<host>
loop txqueuelen 1000 (Local Loopback)
RX packets 47 bytes 4512 (4.4 KiB)
RX errors 0 dropped 0 overruns 0 frame 0
TX packets 47 bytes 4512 (4.4 KiB)
TX errors 0 dropped 0 overruns 0 carrier 0 collisions 0
hmm5 ~ # ip a
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000
inet 127.0.0.1/8 scope host lo
10: eth0@if11: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc noqueue state UP group default qlen 1000
inet 192.168.9.155/32 scope global eth0
inet 169.254.213.47/16 brd 169.254.255.255 scope global noprefixroute eth0
hmm5 ~ # ip r
default dev eth0 scope link src 169.254.213.47 metric 1000010
^^^^^^^^^^^^^^ WRONG?
169.254.0.0/16 dev eth0 scope link src 169.254.213.47 metric 10
169.254.0.1 dev eth0 scope link
hmm5 ~ # ping -c1 -W2 169.254.0.1
PING 169.254.0.1 (169.254.0.1) 56(84) bytes of data.
64 bytes from 169.254.0.1: icmp_seq=1 ttl=64 time=0.063 ms
hmm5 ~ # ping -c1 -W2 192.168.9.81
PING 192.168.9.81 (192.168.9.81) 56(84) bytes of data.
1 packets transmitted, 0 received, 100% packet loss, time 0ms
After changing the default routes source address to the containers ip i can
reach the host but no host in lan or the router. The proxy-arp entry on bond0
was setup correctly by incus, but it doesn’t work in any direction:
hmm5 ~ # ip r del default dev eth0 scope link src 169.254.213.47 metric 1000010
hmm5 ~ # ip r add default dev eth0 scope link src 192.168.9.155 metric 1000010
hmm5 ~ # ping -c1 -W2 192.168.9.81
PING 192.168.9.81 (192.168.9.81) 56(84) bytes of data.
64 bytes from 192.168.9.81: icmp_seq=1 ttl=64 time=0.065 ms
hmm5 ~ # ping -c1 -W2 192.168.9.82
PING 192.168.9.82 (192.168.9.82) 56(84) bytes of data.
1 packets transmitted, 0 received, 100% packet loss, time 2058ms
Please help - thanks
Axel