Container got no ipv4!

Required information

  • Distribution: Ubuntu
  • Distribution version: 20.04
  • The output of “lxc info” or if that fails:
root@iic-worker-203-gpu:~# lxc info
config:
  cluster.https_address: 10.102.32.203:8443
  core.https_address: 10.102.32.203:8443
  core.trust_password: true
api_extensions:
- storage_zfs_remove_snapshots
- container_host_shutdown_timeout
- container_stop_priority
- container_syscall_filtering
- auth_pki
- container_last_used_at
- etag
- patch
- usb_devices
- https_allowed_credentials
- image_compression_algorithm
- directory_manipulation
- container_cpu_time
- storage_zfs_use_refquota
- storage_lvm_mount_options
- network
- profile_usedby
- container_push
- container_exec_recording
- certificate_update
- container_exec_signal_handling
- gpu_devices
- container_image_properties
- migration_progress
- id_map
- network_firewall_filtering
- network_routes
- storage
- file_delete
- file_append
- network_dhcp_expiry
- storage_lvm_vg_rename
- storage_lvm_thinpool_rename
- network_vlan
- image_create_aliases
- container_stateless_copy
- container_only_migration
- storage_zfs_clone_copy
- unix_device_rename
- storage_lvm_use_thinpool
- storage_rsync_bwlimit
- network_vxlan_interface
- storage_btrfs_mount_options
- entity_description
- image_force_refresh
- storage_lvm_lv_resizing
- id_map_base
- file_symlinks
- container_push_target
- network_vlan_physical
- storage_images_delete
- container_edit_metadata
- container_snapshot_stateful_migration
- storage_driver_ceph
- storage_ceph_user_name
- resource_limits
- storage_volatile_initial_source
- storage_ceph_force_osd_reuse
- storage_block_filesystem_btrfs
- resources
- kernel_limits
- storage_api_volume_rename
- macaroon_authentication
- network_sriov
- console
- restrict_devlxd
- migration_pre_copy
- infiniband
- maas_network
- devlxd_events
- proxy
- network_dhcp_gateway
- file_get_symlink
- network_leases
- unix_device_hotplug
- storage_api_local_volume_handling
- operation_description
- clustering
- event_lifecycle
- storage_api_remote_volume_handling
- nvidia_runtime
- container_mount_propagation
- container_backup
- devlxd_images
- container_local_cross_pool_handling
- proxy_unix
- proxy_udp
- clustering_join
- proxy_tcp_udp_multi_port_handling
- network_state
- proxy_unix_dac_properties
- container_protection_delete
- unix_priv_drop
- pprof_http
- proxy_haproxy_protocol
- network_hwaddr
- proxy_nat
- network_nat_order
- container_full
- candid_authentication
- backup_compression
- candid_config
- nvidia_runtime_config
- storage_api_volume_snapshots
- storage_unmapped
- projects
- candid_config_key
- network_vxlan_ttl
- container_incremental_copy
- usb_optional_vendorid
- snapshot_scheduling
- snapshot_schedule_aliases
- container_copy_project
- clustering_server_address
- clustering_image_replication
- container_protection_shift
- snapshot_expiry
- container_backup_override_pool
- snapshot_expiry_creation
- network_leases_location
- resources_cpu_socket
- resources_gpu
- resources_numa
- kernel_features
- id_map_current
- event_location
- storage_api_remote_volume_snapshots
- network_nat_address
- container_nic_routes
- rbac
- cluster_internal_copy
- seccomp_notify
- lxc_features
- container_nic_ipvlan
- network_vlan_sriov
- storage_cephfs
- container_nic_ipfilter
- resources_v2
- container_exec_user_group_cwd
- container_syscall_intercept
- container_disk_shift
- storage_shifted
- resources_infiniband
- daemon_storage
- instances
- image_types
- resources_disk_sata
- clustering_roles
- images_expiry
- resources_network_firmware
- backup_compression_algorithm
- ceph_data_pool_name
- container_syscall_intercept_mount
- compression_squashfs
- container_raw_mount
- container_nic_routed
- container_syscall_intercept_mount_fuse
- container_disk_ceph
- virtual-machines
- image_profiles
- clustering_architecture
- resources_disk_id
- storage_lvm_stripes
- vm_boot_priority
- unix_hotplug_devices
- api_filtering
- instance_nic_network
- clustering_sizing
- firewall_driver
- projects_limits
- container_syscall_intercept_hugetlbfs
- limits_hugepages
- container_nic_routed_gateway
- projects_restrictions
- custom_volume_snapshot_expiry
- volume_snapshot_scheduling
- trust_ca_certificates
- snapshot_disk_usage
- clustering_edit_roles
- container_nic_routed_host_address
- container_nic_ipvlan_gateway
- resources_usb_pci
- resources_cpu_threads_numa
- resources_cpu_core_die
- api_os
- container_nic_routed_host_table
- container_nic_ipvlan_host_table
- container_nic_ipvlan_mode
- resources_system
- images_push_relay
- network_dns_search
- container_nic_routed_limits
- instance_nic_bridged_vlan
- network_state_bond_bridge
- usedby_consistency
- custom_block_volumes
- clustering_failure_domains
- resources_gpu_mdev
- console_vga_type
- projects_limits_disk
- network_type_macvlan
- network_type_sriov
- container_syscall_intercept_bpf_devices
- network_type_ovn
- projects_networks
- projects_networks_restricted_uplinks
- custom_volume_backup
- backup_override_name
- storage_rsync_compression
- network_type_physical
- network_ovn_external_subnets
- network_ovn_nat
- network_ovn_external_routes_remove
- tpm_device_type
- storage_zfs_clone_copy_rebase
- gpu_mdev
- resources_pci_iommu
- resources_network_usb
- resources_disk_address
- network_physical_ovn_ingress_mode
- network_ovn_dhcp
- network_physical_routes_anycast
- projects_limits_instances
- network_state_vlan
- instance_nic_bridged_port_isolation
- instance_bulk_state_change
- network_gvrp
- instance_pool_move
- gpu_sriov
- pci_device_type
- storage_volume_state
- network_acl
- migration_stateful
- disk_state_quota
- storage_ceph_features
- projects_compression
- projects_images_remote_cache_expiry
- certificate_project
- network_ovn_acl
- projects_images_auto_update
- projects_restricted_cluster_target
- images_default_architecture
- network_ovn_acl_defaults
- gpu_mig
- project_usage
- network_bridge_acl
- warnings
- projects_restricted_backups_and_snapshots
- clustering_join_token
- clustering_description
- server_trusted_proxy
- clustering_update_cert
- storage_api_project
- server_instance_driver_operational
- server_supported_storage_drivers
- event_lifecycle_requestor_address
- resources_gpu_usb
- clustering_evacuation
- network_ovn_nat_address
- network_bgp
- network_forward
- custom_volume_refresh
- network_counters_errors_dropped
- metrics
- image_source_project
- clustering_config
- network_peer
- linux_sysctl
- network_dns
- ovn_nic_acceleration
- certificate_self_renewal
- instance_project_move
- storage_volume_project_move
- cloud_init
- network_dns_nat
- database_leader
- instance_all_projects
- clustering_groups
- ceph_rbd_du
- instance_get_full
- qemu_metrics
- gpu_mig_uuid
- event_project
- clustering_evacuation_live
- instance_allow_inconsistent_copy
- network_state_ovn
- storage_volume_api_filtering
- image_restrictions
- storage_zfs_export
- network_dns_records
- storage_zfs_reserve_space
- network_acl_log
- storage_zfs_blocksize
- metrics_cpu_seconds
- instance_snapshot_never
- certificate_token
- instance_nic_routed_neighbor_probe
- event_hub
- agent_nic_config
- projects_restricted_intercept
- metrics_authentication
- images_target_project
- cluster_migration_inconsistent_copy
- cluster_ovn_chassis
- container_syscall_intercept_sched_setscheduler
- storage_lvm_thinpool_metadata_size
api_status: stable
api_version: "1.0"
auth: trusted
public: false
auth_methods:
- tls
environment:
  addresses:
  - 10.102.32.203:8443
  architectures:
  - x86_64
  - i686
  certificate: |
    -----BEGIN CERTIFICATE-----
    MIICKDCCAa2gAwIBAgIQPi2SwUguTgGpHLrxy1T2MDAKBggqhkjOPQQDAzBAMRww
    GgYDVQQKExNsaW51eGNvbnRhaW5lcnMub3JnMSAwHgYDVQQDDBdyb290QGlpYy13
    b3JrZXItMjAzLWdwdTAeFw0yMTA2MDcwODQzMzRaFw0zMTA2MDUwODQzMzRaMEAx
    HDAaBgNVBAoTE2xpbnV4Y29udGFpbmVycy5vcmcxIDAeBgNVBAMMF3Jvb3RAaWlj
    LXdvcmtlci0yMDMtZ3B1MHYwEAYHKoZIzj0CAQYFK4EEACIDYgAE6GnbhFAXm/uI
    b8AsSLAKpSHAdJIDCavgQDy8COQ4H4oLrYLAGcDIhiekM8IdcqjrwDhWKEyjuFpn
    3GyZF3VzAM6VlRsMYPrJjdU8JL05pFkWTuYKZBIggzTndlSkX0kwo2wwajAOBgNV
    HQ8BAf8EBAMCBaAwEwYDVR0lBAwwCgYIKwYBBQUHAwEwDAYDVR0TAQH/BAIwADA1
    BgNVHREELjAsghJpaWMtd29ya2VyLTIwMy1ncHWHBH8AAAGHEAAAAAAAAAAAAAAA
    AAAAAAEwCgYIKoZIzj0EAwMDaQAwZgIxANwoIM8smxsxZqrnVGah9ZykI/R016OO
    eC8EPSMGKJFkC40Ekday/w0/MM2pMs37KAIxAPpSxX8qHIw8aWRS1hGlFh+/TNSW
    MuGOHB1CD5j8/EDT6DRAl7RubS3OmVrQzHv++Q==
    -----END CERTIFICATE-----
  certificate_fingerprint: 5d0551db4936203f6d90cafd47e7257ff7e4608920cd6203348c3f474eafe443
  driver: lxc | qemu
  driver_version: 4.0.12 | 6.1.1
  firewall: xtables
  kernel: Linux
  kernel_architecture: x86_64
  kernel_features:
    idmapped_mounts: "false"
    netnsid_getifaddrs: "true"
    seccomp_listener: "true"
    seccomp_listener_continue: "true"
    shiftfs: "false"
    uevent_injection: "true"
    unpriv_fscaps: "true"
  kernel_version: 5.4.0-73-generic
  lxc_features:
    cgroup2: "true"
    core_scheduling: "true"
    devpts_fd: "true"
    idmapped_mounts_v2: "true"
    mount_injection_file: "true"
    network_gateway_device_route: "true"
    network_ipvlan: "true"
    network_l2proxy: "true"
    network_phys_macvlan_mtu: "true"
    network_veth_router: "true"
    pidfd: "true"
    seccomp_allow_deny_syntax: "true"
    seccomp_notify: "true"
    seccomp_proxy_send_notify_fd: "true"
  os_name: Ubuntu
  os_version: "20.04"
  project: default
  server: lxd
  server_clustered: true
  server_event_mode: full-mesh
  server_name: iic-worker-203-gpu
  server_pid: 185115
  server_version: 5.0.0
  storage: zfs
  storage_version: 0.8.3-1ubuntu12.7
  storage_supported_drivers:
  - name: btrfs
    version: 5.4.1
    remote: false
  - name: cephfs
    version: 15.2.14
    remote: true
  - name: dir
    version: "1"
    remote: false
  - name: lvm
    version: 2.03.07(2) (2019-11-30) / 1.02.167 (2019-11-30) / 4.41.0
    remote: false
  - name: zfs
    version: 0.8.3-1ubuntu12.7
    remote: false
  - name: ceph
    version: 15.2.14
    remote: true

Issue description

My container got no ipv4,after I reboot it. And I can not use ssh to connect my container port. HELP!!!

Information to attach

  • [ ] Any relevant kernel output (dmesg)
  • [ ] Container log (lxc info NAME --show-log)
root@iic-worker-203-gpu:~# lxc info chenfeng --show-log
Name: chenfeng
Status: RUNNING
Type: container
Architecture: x86_64
Location: iic-worker-203-gpu
PID: 261156
Created: 2021/12/04 23:39 CST
Last Used: 2022/04/28 10:12 CST

Resources:
 Processes: 124
 Disk usage:
   root: 449.00GiB
 CPU usage:
   CPU usage (in seconds): 7
 Memory usage:
   Memory (current): 314.24MiB
   Memory (peak): 315.55MiB
 Network usage:
   eth0:
     Type: broadcast
     State: DOWN
     Host interface: veth97584cad
     MAC address: 00:16:3e:e4:31:a8
     MTU: 1450
     Bytes received: 698B
     Bytes sent: 1.35kB
     Packets received: 2
     Packets sent: 10
     IP addresses:
   lo:
     Type: loopback
     State: UP
     MTU: 65536
     Bytes received: 1.58kB
     Bytes sent: 1.58kB
     Packets received: 10
     Packets sent: 10
     IP addresses:
       inet:  127.0.0.1/8 (local)
       inet6: ::1/128 (local)
   docker0:
     Type: broadcast
     State: UP
     MAC address: 02:42:11:62:d3:75
     MTU: 1500
     Bytes received: 0B
     Bytes sent: 0B
     Packets received: 0
     Packets sent: 0
     IP addresses:
       inet:  172.17.0.1/16 (global)

Log:

lxc chenfeng 20220428021220.281 WARN     conf - conf.c:lxc_map_ids:3592 - newuidmap binary is missing
lxc chenfeng 20220428021220.282 WARN     conf - conf.c:lxc_map_ids:3598 - newgidmap binary is missing
lxc chenfeng 20220428021220.283 WARN     conf - conf.c:lxc_map_ids:3592 - newuidmap binary is missing
lxc chenfeng 20220428021220.283 WARN     conf - conf.c:lxc_map_ids:3598 - newgidmap binary is missing
lxc chenfeng 20220428021220.285 WARN     cgfsng - cgroups/cgfsng.c:fchowmodat:1252 - No such file or directory - Failed to fchownat(40, memory.oom.group, 1000000000, 0, AT_EMPTY_PATH | AT_SYMLINK_NOFOLLOW )
lxc chenfeng 20220428021902.566 WARN     conf - conf.c:lxc_map_ids:3592 - newuidmap binary is missing
lxc chenfeng 20220428021902.566 WARN     conf - conf.c:lxc_map_ids:3598 - newgidmap binary is missing

  • [ ] Container configuration (lxc config show NAME --expanded)
root@iic-worker-203-gpu:~# lxc config show chenfeng --expanded
architecture: x86_64
config:
 image.architecture: amd64
 image.description: Ubuntu focal amd64 (20210606_07:42)
 image.name: ubuntu-focal-amd64-default-20210606_07:42
 image.os: ubuntu
 image.release: focal
 image.serial: "20210606_07:42"
 image.variant: default
 volatile.base_image: 2b97dfd0c6e5cb8243f67860a75fbdeab3acccde05cd6782cabe65f2c79e6ef4
 volatile.eth0.host_name: veth97584cad
 volatile.eth0.hwaddr: 00:16:3e:e4:31:a8
 volatile.idmap.base: "0"
 volatile.idmap.current: '[{"Isuid":true,"Isgid":false,"Hostid":1000000,"Nsid":0,"Maprange":1000000000},{"Isuid":false,"Isgid":true,"Hostid":1000000,"Nsid":0,"Maprange":1000000000}]'
 volatile.idmap.next: '[{"Isuid":true,"Isgid":false,"Hostid":1000000,"Nsid":0,"Maprange":1000000000},{"Isuid":false,"Isgid":true,"Hostid":1000000,"Nsid":0,"Maprange":1000000000}]'
 volatile.last_state.idmap: '[{"Isuid":true,"Isgid":false,"Hostid":1000000,"Nsid":0,"Maprange":1000000000},{"Isuid":false,"Isgid":true,"Hostid":1000000,"Nsid":0,"Maprange":1000000000}]'
 volatile.last_state.power: RUNNING
 volatile.uuid: 3d6c1c00-0292-459b-a623-ed3f8d56748e
devices:
 eth0:
   name: eth0
   network: lxdfan0
   type: nic
 gpu:
   type: gpu
 proxy0:
   bind: host
   connect: tcp:240.203.0.107:22
   listen: tcp:10.102.32.203:60029
   type: proxy
 root:
   path: /
   pool: pool1
   size: 1024GB
   type: disk
ephemeral: false
profiles:
- default1T
stateful: false


  • [ ] Main daemon log (at /var/log/lxd/lxd.log or /var/snap/lxd/common/lxd/logs/lxd.log)
    lxd.log
  • [ ] Output of the client with --debug
  • [ ] Output of the daemon with --debug (alternatively output of lxc monitor while reproducing the issue)

Please show ‘ip a’ on host and container, as well as ‘lxc network show lxdfan0’ thanks.

Your eth0 in the container is down so we need to ascertain why that is.

‘ip a’ on host

root@iic-worker-203-gpu:~# ip a
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000
    link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
    inet 127.0.0.1/8 scope host lo
       valid_lft forever preferred_lft forever
    inet6 ::1/128 scope host
       valid_lft forever preferred_lft forever
2: enp129s0f0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc mq state UP group default qlen 1000
    link/ether ac:1f:6b:99:02:e2 brd ff:ff:ff:ff:ff:ff
    inet 10.102.32.203/21 brd 10.102.39.255 scope global enp129s0f0
       valid_lft forever preferred_lft forever
    inet6 fe80::ae1f:6bff:fe99:2e2/64 scope link
       valid_lft forever preferred_lft forever
3: enp129s0f1: <BROADCAST,MULTICAST> mtu 1500 qdisc noop state DOWN group default qlen 1000
    link/ether ac:1f:6b:99:02:e3 brd ff:ff:ff:ff:ff:ff
4: docker0: <NO-CARRIER,BROADCAST,MULTICAST,UP> mtu 1500 qdisc noqueue state DOWN group default
    link/ether 02:42:cf:42:b8:91 brd ff:ff:ff:ff:ff:ff
    inet 172.17.0.1/16 brd 172.17.255.255 scope global docker0
       valid_lft forever preferred_lft forever
64: lxdfan0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1450 qdisc noqueue state UP group default qlen 1000
    link/ether 00:16:3e:4b:7e:7d brd ff:ff:ff:ff:ff:ff
    inet 240.203.0.1/8 scope global lxdfan0
       valid_lft forever preferred_lft forever
    inet6 fe80::216:3eff:fe4b:7e7d/64 scope link
       valid_lft forever preferred_lft forever
65: lxdfan0-mtu: <BROADCAST,NOARP,UP,LOWER_UP> mtu 1450 qdisc noqueue master lxdfan0 state UNKNOWN group default qlen 1000
    link/ether 96:b1:ad:eb:30:a9 brd ff:ff:ff:ff:ff:ff
    inet6 fe80::94b1:adff:feeb:30a9/64 scope link
       valid_lft forever preferred_lft forever
66: lxdfan0-fan: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1450 qdisc noqueue master lxdfan0 state UNKNOWN group default qlen 1000
    link/ether 8a:1d:0b:b0:1d:54 brd ff:ff:ff:ff:ff:ff
    inet6 fe80::881d:bff:feb0:1d54/64 scope link
       valid_lft forever preferred_lft forever
68: vethae8b329c@if67: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1450 qdisc noqueue master lxdfan0 state UP group default qlen 1000
    link/ether d2:4b:6e:9e:b2:36 brd ff:ff:ff:ff:ff:ff link-netnsid 0
72: vethd258323d@if71: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1450 qdisc noqueue master lxdfan0 state UP group default qlen 1000
    link/ether 0e:1a:ea:3d:65:b4 brd ff:ff:ff:ff:ff:ff link-netnsid 2
74: vethab0b8f6e@if73: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1450 qdisc noqueue master lxdfan0 state UP group default qlen 1000
    link/ether ea:98:b1:11:4f:11 brd ff:ff:ff:ff:ff:ff link-netnsid 3
76: vethcc1257dc@if75: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1450 qdisc noqueue master lxdfan0 state UP group default qlen 1000
    link/ether 1e:1e:8b:fa:68:99 brd ff:ff:ff:ff:ff:ff link-netnsid 4
78: vethc28e4581@if77: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1450 qdisc noqueue master lxdfan0 state UP group default qlen 1000
    link/ether 4a:07:10:01:3e:a7 brd ff:ff:ff:ff:ff:ff link-netnsid 5
80: veth2fbea4cc@if79: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1450 qdisc noqueue master lxdfan0 state UP group default qlen 1000
    link/ether 3e:e4:26:6a:33:2b brd ff:ff:ff:ff:ff:ff link-netnsid 6
82: veth2dc4da7c@if81: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1450 qdisc noqueue master lxdfan0 state UP group default qlen 1000
    link/ether 9a:c5:d8:13:ea:98 brd ff:ff:ff:ff:ff:ff link-netnsid 7
84: veth0793ff0c@if83: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1450 qdisc noqueue master lxdfan0 state UP group default qlen 1000
    link/ether 16:2f:88:66:dc:1d brd ff:ff:ff:ff:ff:ff link-netnsid 8
86: vethc5957ac4@if85: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1450 qdisc noqueue master lxdfan0 state UP group default qlen 1000
    link/ether 32:c6:6c:e5:12:88 brd ff:ff:ff:ff:ff:ff link-netnsid 9
88: veth78d90231@if87: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1450 qdisc noqueue master lxdfan0 state UP group default qlen 1000
    link/ether ba:59:7b:65:49:d7 brd ff:ff:ff:ff:ff:ff link-netnsid 11
90: vethee907128@if89: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1450 qdisc noqueue master lxdfan0 state UP group default qlen 1000
    link/ether 8a:26:cd:b2:90:01 brd ff:ff:ff:ff:ff:ff link-netnsid 12
92: veth3eb5d558@if91: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1450 qdisc noqueue master lxdfan0 state UP group default qlen 1000
    link/ether 5a:44:47:55:12:2e brd ff:ff:ff:ff:ff:ff link-netnsid 13
94: veth13aeae9a@if93: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1450 qdisc noqueue master lxdfan0 state UP group default qlen 1000
    link/ether 3a:4f:fe:83:3b:86 brd ff:ff:ff:ff:ff:ff link-netnsid 14
96: veth85a39372@if95: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1450 qdisc noqueue master lxdfan0 state UP group default qlen 1000
    link/ether 2e:5d:04:6d:14:df brd ff:ff:ff:ff:ff:ff link-netnsid 15
98: vethf4727148@if97: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1450 qdisc noqueue master lxdfan0 state UP group default qlen 1000
    link/ether 4e:62:1d:a3:39:5d brd ff:ff:ff:ff:ff:ff link-netnsid 16
100: veth26023477@if99: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1450 qdisc noqueue master lxdfan0 state UP group default qlen 1000
    link/ether be:f1:c8:aa:53:d1 brd ff:ff:ff:ff:ff:ff link-netnsid 17
102: veth2a40797c@if101: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1450 qdisc noqueue master lxdfan0 state UP group default qlen 1000
    link/ether be:f7:97:68:f8:2f brd ff:ff:ff:ff:ff:ff link-netnsid 18
104: veth6a79b680@if103: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1450 qdisc noqueue master lxdfan0 state UP group default qlen 1000
    link/ether ca:d2:28:b7:98:51 brd ff:ff:ff:ff:ff:ff link-netnsid 19
106: vethf9545d38@if105: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1450 qdisc noqueue master lxdfan0 state UP group default qlen 1000
    link/ether 3a:43:07:a3:ab:d3 brd ff:ff:ff:ff:ff:ff link-netnsid 20
108: vethfab8449d@if107: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1450 qdisc noqueue master lxdfan0 state UP group default qlen 1000
    link/ether 52:b9:ae:78:d9:5d brd ff:ff:ff:ff:ff:ff link-netnsid 1
112: veth897e9909@if111: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1450 qdisc noqueue master lxdfan0 state UP group default qlen 1000
    link/ether ce:35:4a:e0:18:86 brd ff:ff:ff:ff:ff:ff link-netnsid 22
114: vethb2ce4e10@if113: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1450 qdisc noqueue master lxdfan0 state UP group default qlen 1000
    link/ether 7e:c5:73:ad:db:4c brd ff:ff:ff:ff:ff:ff link-netnsid 23
116: vethc625d046@if115: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1450 qdisc noqueue master lxdfan0 state UP group default qlen 1000
    link/ether 16:aa:b6:aa:ad:52 brd ff:ff:ff:ff:ff:ff link-netnsid 24
118: veth4099c67d@if117: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1450 qdisc noqueue master lxdfan0 state UP group default qlen 1000
    link/ether 3e:c0:88:66:f3:97 brd ff:ff:ff:ff:ff:ff link-netnsid 25

‘ip a’ on container

root@iic-worker-203-gpu:~# lxc exec chenfeng bash
root@chenfeng:~# ip a
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000
    link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
    inet 127.0.0.1/8 scope host lo
       valid_lft forever preferred_lft forever
    inet6 ::1/128 scope host
       valid_lft forever preferred_lft forever
121: eth0@if122: <BROADCAST,MULTICAST> mtu 1450 qdisc noqueue state DOWN group default qlen 1000
    link/ether 00:16:3e:e4:31:a8 brd ff:ff:ff:ff:ff:ff link-netnsid 0

‘lxc network show lxdfan0’

root@iic-worker-203-gpu:~# lxc network show lxdfan0
config:
  bridge.mode: fan
  fan.underlay_subnet: 10.102.32.0/24
  ipv4.nat: "true"
description: ""
name: lxdfan0
type: bridge
used_by:
- /1.0/instances/bowen
- /1.0/instances/chenfeng
- /1.0/instances/chengt
- /1.0/instances/cuisijia
- /1.0/instances/fujian
- /1.0/instances/haodd
- /1.0/instances/hegh
- /1.0/instances/huhy
- /1.0/instances/liuk
- /1.0/instances/luoq
- /1.0/instances/lvkai
- /1.0/instances/lzzz
- /1.0/instances/mayifan
- /1.0/instances/qiny
- /1.0/instances/shimw
- /1.0/instances/starnight
- /1.0/instances/sunhaofei
- /1.0/instances/taoym
- /1.0/instances/template-no-CUDA
- /1.0/instances/template1
- /1.0/instances/wangtl
- /1.0/instances/xczhw
- /1.0/instances/xczhw01
- /1.0/instances/xiaoly
- /1.0/instances/xueyh
- /1.0/instances/yangjian
- /1.0/instances/yantk
- /1.0/instances/yezm
- /1.0/instances/zhanglx
- /1.0/instances/zhangqg
- /1.0/instances/zhangxl
- /1.0/instances/zhaojichao
- /1.0/instances/zhaozr
- /1.0/instances/zhenzhen
- /1.0/instances/zhongsy
- /1.0/instances/zhuangh
- /1.0/profiles/default
- /1.0/profiles/default1T
- /1.0/profiles/default2T
managed: true
status: Created
locations:
- iic-worker-203-gpu

Ok all looks fine except eth0 being down in the container.

What network configuration do you have inside the container?

here is the output of ‘ip addr’

root@chenfeng:~# ip addr
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000
    link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
    inet 127.0.0.1/8 scope host lo
       valid_lft forever preferred_lft forever
    inet6 ::1/128 scope host
       valid_lft forever preferred_lft forever
123: eth0@if124: <BROADCAST,MULTICAST> mtu 1450 qdisc noqueue state DOWN group default qlen 1000
    link/ether 00:16:3e:e4:31:a8 brd ff:ff:ff:ff:ff:ff link-netnsid 0

Can I reset the ipv4 address like this way and if so what is the correct way? Thanks!

root@iic-worker-203-gpu:~# lxc config device set chenfeng eth0 ipv4.address=240.203.0.899
Error: Device from profile(s) cannot be modified for individual instance. Override device or modify profile instead

We need to figure out why the interface is down first before considering static DHCP lease settings.

Can you show me lxc exec chenfeng -- cat /etc/netplan/10-lxc.yaml

Also what happens if you run lxc exec chenfeng -- ip link set eth0 up?

1 Like
root@iic-worker-203-gpu:~#  lxc exec chenfeng -- cat /etc/netplan/10-lxc.yaml
network:
  version: 2
  ethernets:
    eth0:
      dhcp4: true
      dhcp-identifier: mac

No output of ‘lxc exec chenfeng – ip link set eth0 up’.
The Container has Ipv4 Now!!!
I checked ‘lxc list’ after running ‘lxc exec chenfeng – ip link set eth0 up’ and found that my container has ipv4.

Thansk a lot!

So what we need to find out is what is setting it down.

If you reboot the container and it breaks again can you run lxc exec chenfeng -- netplan apply to see if netplan config is working OK, and if that works it suggests something else in the container (perhaps docker) is setting the interface down.

I restarted the container and ipv4 disappeared again.
And no output of ‘lxc exec chenfeng – netplan apply’

root@iic-worker-203-gpu:~# lxc exec chenfeng -- netplan apply
root@iic-worker-203-gpu:~#

I have to manually type ‘lxc exec chenfeng – ip link set eth0 up’ for the container to got ipv4 again.

Anything in the container’s logs suggesting what is going on?

Here are some files in /var/snap/lxd/common/lxd/logs/chenfeng/

# lxc.log
lxc chenfeng 20220428100219.772 WARN     conf - conf.c:lxc_map_ids:3592 - newuidmap binary is missing
lxc chenfeng 20220428100219.772 WARN     conf - conf.c:lxc_map_ids:3598 - newgidmap binary is missing
lxc chenfeng 20220428100219.773 WARN     conf - conf.c:lxc_map_ids:3592 - newuidmap binary is missing
lxc chenfeng 20220428100219.773 WARN     conf - conf.c:lxc_map_ids:3598 - newgidmap binary is missing
lxc chenfeng 20220428100219.774 WARN     cgfsng - cgroups/cgfsng.c:fchowmodat:1252 - No such file or directory - Failed to fchownat(40, memory.oom.group, 1000000000, 0, AT_EMPTY_PATH | AT_SYMLINK_NOFOLLOW )
lxc chenfeng 20220428100254.928 WARN     conf - conf.c:lxc_map_ids:3592 - newuidmap binary is missing
lxc chenfeng 20220428100254.928 WARN     conf - conf.c:lxc_map_ids:3598 - newgidmap binary is missing
# lxc.log.old
lxc chenfeng 20220428095639.789 WARN     conf - conf.c:lxc_map_ids:3592 - newuidmap binary is missing
lxc chenfeng 20220428095639.790 WARN     conf - conf.c:lxc_map_ids:3598 - newgidmap binary is missing
lxc chenfeng 20220428095639.803 WARN     conf - conf.c:lxc_map_ids:3592 - newuidmap binary is missing
lxc chenfeng 20220428095639.804 WARN     conf - conf.c:lxc_map_ids:3598 - newgidmap binary is missing
lxc chenfeng 20220428095639.814 WARN     cgfsng - cgroups/cgfsng.c:fchowmodat:1252 - No such file or directory - Failed to fchownat(40, memory.oom.group, 1000000000, 0, AT_EMPTY_PATH | AT_SYMLINK_NOFOLLOW )
lxc chenfeng 20220428095751.412 WARN     conf - conf.c:lxc_map_ids:3592 - newuidmap binary is missing
lxc chenfeng 20220428095751.412 WARN     conf - conf.c:lxc_map_ids:3598 - newgidmap binary is missing
lxc chenfeng 20220428100217.415 WARN     conf - conf.c:lxc_map_ids:3592 - newuidmap binary is missing
lxc chenfeng 20220428100217.415 WARN     conf - conf.c:lxc_map_ids:3598 - newgidmap binary is missing

# console.log
systemd 245.4-4ubuntu3.15 running in system mode. (+PAM +AUDIT +SELINUX +IMA +APPARMOR +SMACK +SYSVINIT +UTMP +LIBCRYPTSETUP +GCRYPT +GNUTLS +ACL +XZ +LZ4 +SECCOMP +BLKID +ELFUTILS +KMOD +IDN2 -IDN +PCRE2 default-hierarchy=hybrid)
Detected virtualization lxc.
Detected architecture x86-64.

Welcome to Ubuntu 20.04.2 LTS!

Set hostname to <chenfeng>.
[  OK  ] Created slice system-modprobe.slice.
[  OK  ] Created slice User and Session Slice.
[  OK  ] Started Forward Password Requests to Wall Directory Watch.
[UNSUPP] Starting of Arbitrary Executable Fi…tem Automount Point not supported.
[  OK  ] Reached target User and Group Name Lookups.
[  OK  ] Reached target Slices.
[  OK  ] Reached target Swap.
[  OK  ] Listening on Device-mapper event daemon FIFOs.
[  OK  ] Listening on LVM2 poll daemon socket.
[  OK  ] Listening on multipathd control socket.
[  OK  ] Listening on Syslog Socket.
[  OK  ] Listening on initctl Compatibility Named Pipe.
[  OK  ] Listening on Journal Socket (/dev/log).
[  OK  ] Listening on Journal Socket.
[  OK  ] Listening on Network Service Netlink Socket.
[  OK  ] Listening on udev Control Socket.
[  OK  ] Listening on udev Kernel Socket.
         Starting Journal Service...
         Starting Set the console keyboard layout...
         Starting Remount Root and Kernel File Systems...
         Starting Apply Kernel Variables...
         Starting udev Coldplug all Devices...
[  OK  ] Finished Remount Root and Kernel File Systems.
         Starting Create System Users...
[  OK  ] Finished Apply Kernel Variables.
[  OK  ] Finished Create System Users.
         Starting Create Static Device Nodes in /dev...
[  OK  ] Finished Set the console keyboard layout.
[  OK  ] Started Journal Service.
         Starting Flush Journal to Persistent Storage...
[  OK  ] Finished Create Static Device Nodes in /dev.
         Starting udev Kernel Device Manager...
[  OK  ] Started udev Kernel Device Manager.
         Starting Network Service...
[  OK  ] Started Network Service.
         Starting Wait for Network to be Configured...
[  OK  ] Finished udev Coldplug all Devices.
[  OK  ] Started Dispatch Password Requests to Console Directory Watch.
[  OK  ] Reached target Local Encrypted Volumes.
         Starting udev Wait for Complete Device Initialization...
[  OK  ] Finished Flush Journal to Persistent Storage.
[  OK  ] Finished udev Wait for Complete Device Initialization.
[  OK  ] Reached target Local File Systems (Pre).
[  OK  ] Reached target Local File Systems.
         Starting Load AppArmor profiles...
         Starting Set console font and keymap...
         Starting Create final runtime dir for shutdown pivot root...
         Starting Tell Plymouth To Write Out Runtime Data...
         Starting Create Volatile Files and Directories...
[  OK  ] Finished Create final runtime dir for shutdown pivot root.
[  OK  ] Finished Set console font and keymap.
[  OK  ] Finished Tell Plymouth To Write Out Runtime Data.
[  OK  ] Finished Create Volatile Files and Directories.
         Starting Network Name Resolution...
[  OK  ] Reached target System Time Set.
[  OK  ] Reached target System Time Synchronized.
         Starting Update UTMP about System Boot/Shutdown...
[  OK  ] Finished Update UTMP about System Boot/Shutdown.
[FAILED] Failed to start Load AppArmor profiles.
See 'systemctl status apparmor.service' for details.
[  OK  ] Reached target System Initialization.
[  OK  ] Started Daily apt download activities.
[  OK  ] Started Daily apt upgrade and clean activities.
[  OK  ] Started Periodic ext4 Online Metadata Check for All Filesystems.
[  OK  ] Started Refresh fwupd metadata regularly.
[  OK  ] Started Daily rotation of log files.
[  OK  ] Started Daily man-db regeneration.
[  OK  ] Started Message of the Day.
[  OK  ] Started Daily Cleanup of Temporary Directories.
[  OK  ] Started Ubuntu Advantage update messaging.
[  OK  ] Reached target Paths.
[  OK  ] Reached target Timers.
[  OK  ] Listening on Unix socket for apport crash forwarding.
[  OK  ] Listening on Avahi mDNS/DNS-SD Stack Activation Socket.
[  OK  ] Listening on D-Bus System Message Bus Socket.
         Starting Docker Socket for the API.
[  OK  ] Listening on Open-iSCSI iscsid Socket.
[  OK  ] Listening on Docker Socket for the API.
[  OK  ] Reached target Sockets.
[  OK  ] Reached target Basic System.
         Starting Accounts Service...
         Starting Avahi mDNS/DNS-SD Stack...
[  OK  ] Started D-Bus System Message Bus.
[  OK  ] Started Save initial kernel messages after boot.
         Starting Remove Stale Online ext4 Metadata Check Snapshots...
         Starting Turn off network device...
         Starting Dispatcher daemon for systemd-networkd...
         Starting Authorization Manager...
         Starting Restore /etc/resolv.conf i…fore the ppp link was shut down...
         Starting System Logging Service...
         Starting Switcheroo Control Proxy service...
         Starting Login Service...
         Starting Disk Manager...
         Starting WPA supplicant...
[  OK  ] Finished Wait for Network to be Configured.
[  OK  ] Finished Remove Stale Online ext4 Metadata Check Snapshots.
[  OK  ] Finished Turn off network device.
[  OK  ] Finished Restore /etc/resolv.conf i…before the ppp link was shut down.
         Starting Network Manager...
[  OK  ] Started System Logging Service.
[  OK  ] Started Network Name Resolution.
[  OK  ] Reached target Host and Network Name Lookups.
[  OK  ] Started WPA supplicant.
[  OK  ] Started Avahi mDNS/DNS-SD Stack.
[  OK  ] Started Switcheroo Control Proxy service.
[  OK  ] Started Authorization Manager.
         Starting Modem Manager...
[  OK  ] Started Login Service.
[  OK  ] Started Network Manager.
[  OK  ] Reached target Network.
         Starting Network Manager Wait Online...
         Starting containerd container runtime...
[  OK  ] Started Unattended Upgrades Shutdown.
[  OK  ] Started Disk Manager.
         Starting Hostname Service...
[  OK  ] Started Accounts Service.
[  OK  ] Started Modem Manager.
[  OK  ] Started Dispatcher daemon for systemd-networkd.
[  OK  ] Started Hostname Service.
         Starting Network Manager Script Dispatcher Service...
[  OK  ] Finished Network Manager Wait Online.
[  OK  ] Reached target Network is Online.
[  OK  ] Reached target Remote File Systems (Pre).
[  OK  ] Reached target Remote File Systems.
         Starting LSB: automatic crash report generation...
         Starting Deferred execution scheduler...
         Starting Availability of block devices...
[  OK  ] Started Regular background program processing daemon.
         Starting OpenBSD Secure Shell server...
         Starting Permit User Sessions...
[  OK  ] Started Deferred execution scheduler.
[  OK  ] Started containerd container runtime.
[  OK  ] Finished Availability of block devices.
[  OK  ] Started Network Manager Script Dispatcher Service.
         Starting Docker Application Container Engine...
[  OK  ] Finished Permit User Sessions.
         Starting GNOME Display Manager...
         Starting Hold until boot process finishes up...
[  OK  ] Started LSB: automatic crash report generation.
[  OK  ] Finished Hold until boot process finishes up.
[  OK  ] Started Console Getty.
[  OK  ] Reached target Login Prompts.
[  OK  ] Created slice system-getty.slice.
[  OK  ] Started OpenBSD Secure Shell server.
[  OK  ] Started Docker Application Container Engine.
[  OK  ] Reached target Multi-User System.


Ubuntu 20.04.2 LTS chenfeng console

chenfeng login: [  OK  ] Removed slice system-getty.slice.
[  OK  ] Removed slice system-modprobe.slice.
[  OK  ] Stopped target Graphical Interface.
[  OK  ] Stopped target Multi-User System.
[  OK  ] Stopped target Login Prompts.
[  OK  ] Stopped target Host and Network Name Lookups.
[  OK  ] Stopped target Timers.
[  OK  ] Stopped Daily apt upgrade and clean activities.
[  OK  ] Stopped Daily apt download activities.
[  OK  ] Stopped Periodic ext4 Online Metadata Check for All Filesystems.
[  OK  ] Stopped Refresh fwupd metadata regularly.
[  OK  ] Stopped Daily rotation of log files.
[  OK  ] Stopped Daily man-db regeneration.
[  OK  ] Stopped Message of the Day.
[  OK  ] Stopped Daily Cleanup of Temporary Directories.
[  OK  ] Stopped Ubuntu Advantage update messaging.
[  OK  ] Stopped target System Time Synchronized.
[  OK  ] Stopped target System Time Set.
[  OK  ] Reached target Unmount All Filesystems.
[  OK  ] Closed LVM2 poll daemon socket.
         Stopping Modem Manager...
         Stopping Accounts Service...
         Stopping LSB: automatic crash report generation...
         Stopping Deferred execution scheduler...
         Stopping Avahi mDNS/DNS-SD Stack...
         Stopping Availability of block devices...
         Stopping Console Getty...
         Stopping Regular background program processing daemon...
         Stopping Docker Application Container Engine...
         Stopping Create final runtime dir for shutdown pivot root...
         Stopping GNOME Display Manager...
         Stopping Dispatcher daemon for systemd-networkd...
         Stopping System Logging Service...
         Stopping OpenBSD Secure Shell server...
         Stopping Switcheroo Control Proxy service...
         Stopping Disk Manager...
         Stopping Unattended Upgrades Shutdown...
[  OK  ] Stopped Accounts Service.
[  OK  ] Stopped Avahi mDNS/DNS-SD Stack.
[  OK  ] Stopped Dispatcher daemon for systemd-networkd.
[  OK  ] Stopped System Logging Service.
[  OK  ] Stopped Switcheroo Control Proxy service.
[  OK  ] Stopped Disk Manager.
[  OK  ] Stopped Modem Manager.
[  OK  ] Stopped Unattended Upgrades Shutdown.
[  OK  ] Stopped Regular background program processing daemon.
[  OK  ] Stopped Deferred execution scheduler.
[  OK  ] Stopped Docker Application Container Engine.
[  OK  ] Stopped OpenBSD Secure Shell server.
[  OK  ] Stopped Console Getty.
[  OK  ] Stopped GNOME Display Manager.
[  OK  ] Stopped target Network is Online.
[  OK  ] Stopped Network Manager Wait Online.
         Stopping containerd container runtime...
         Stopping Authorization Manager...
         Stopping Login Service...
[  OK  ] Stopped Wait for Network to be Configured.
         Stopping Permit User Sessions...
[  OK  ] Stopped containerd container runtime.
[  OK  ] Stopped LSB: automatic crash report generation.
[  OK  ] Stopped Permit User Sessions.
[  OK  ] Stopped target Network.
[  OK  ] Stopped target Remote File Systems.
[  OK  ] Stopped target Remote File Systems (Pre).
         Stopping Network Manager...
         Stopping Network Name Resolution...
         Stopping WPA supplicant...
[  OK  ] Stopped Login Service.
[  OK  ] Stopped WPA supplicant.
[  OK  ] Stopped target User and Group Name Lookups.
[  OK  ] Stopped Availability of block devices.
[  OK  ] Stopped Authorization Manager.
[  OK  ] Stopped Network Name Resolution.
         Stopping Network Service...
[  OK  ] Stopped Network Manager.
         Stopping D-Bus System Message Bus...
[  OK  ] Stopped Turn off network device.
[  OK  ] Stopped Network Service.
[  OK  ] Stopped D-Bus System Message Bus.
[  OK  ] Stopped target Basic System.
[  OK  ] Stopped target Paths.
[  OK  ] Stopped target Slices.
[  OK  ] Removed slice User and Session Slice.
[  OK  ] Stopped target Sockets.
[  OK  ] Closed Unix socket for apport crash forwarding.
[  OK  ] Closed Avahi mDNS/DNS-SD Stack Activation Socket.
[  OK  ] Closed D-Bus System Message Bus Socket.
[  OK  ] Closed Docker Socket for the API.
[  OK  ] Closed Open-iSCSI iscsid Socket.
[  OK  ] Stopped target System Initialization.
[  OK  ] Stopped target Local Encrypted Volumes.
[  OK  ] Stopped Dispatch Password Requests to Console Directory Watch.
[  OK  ] Stopped Forward Password Requests to Wall Directory Watch.
[  OK  ] Stopped target Swap.
[  OK  ] Closed Syslog Socket.
[  OK  ] Stopped Apply Kernel Variables.
         Stopping Update UTMP about System Boot/Shutdown...
[  OK  ] Stopped Update UTMP about System Boot/Shutdown.
[  OK  ] Stopped Create Volatile Files and Directories.
[  OK  ] Stopped Create final runtime dir for shutdown pivot root.
[  OK  ] Stopped target Local File Systems.
[  OK  ] Stopped target Local File Systems (Pre).
[  OK  ] Stopped Create Static Device Nodes in /dev.
[  OK  ] Stopped Create System Users.
[  OK  ] Stopped Remount Root and Kernel File Systems.
[  OK  ] Reached target Shutdown.
[  OK  ] Reached target Final Step.
         Starting Halt...
systemd 245.4-4ubuntu3.15 running in system mode. (+PAM +AUDIT +SELINUX +IMA +APPARMOR +SMACK +SYSVINIT +UTMP +LIBCRYPTSETUP +GCRYPT +GNUTLS +ACL +XZ +LZ4 +SECCOMP +BLKID +ELFUTILS +KMOD +IDN2 -IDN +PCRE2 default-hierarchy=hybrid)
Detected virtualization lxc.
Detected architecture x86-64.

Welcome to Ubuntu 20.04.2 LTS!

Set hostname to <chenfeng>.
[  OK  ] Created slice system-modprobe.slice.
[  OK  ] Created slice User and Session Slice.
[  OK  ] Started Forward Password Requests to Wall Directory Watch.
[UNSUPP] Starting of Arbitrary Executable Fi…tem Automount Point not supported.
[  OK  ] Reached target User and Group Name Lookups.
[  OK  ] Reached target Slices.
[  OK  ] Reached target Swap.
[  OK  ] Listening on Device-mapper event daemon FIFOs.
[  OK  ] Listening on LVM2 poll daemon socket.
[  OK  ] Listening on multipathd control socket.
[  OK  ] Listening on Syslog Socket.
[  OK  ] Listening on initctl Compatibility Named Pipe.
[  OK  ] Listening on Journal Socket (/dev/log).
[  OK  ] Listening on Journal Socket.
[  OK  ] Listening on Network Service Netlink Socket.
[  OK  ] Listening on udev Control Socket.
[  OK  ] Listening on udev Kernel Socket.
         Starting Journal Service...
         Starting Set the console keyboard layout...
         Starting Remount Root and Kernel File Systems...
         Starting Apply Kernel Variables...
         Starting udev Coldplug all Devices...
[  OK  ] Finished Remount Root and Kernel File Systems.
         Starting Create System Users...
[  OK  ] Finished Apply Kernel Variables.
[  OK  ] Finished Create System Users.
         Starting Create Static Device Nodes in /dev...
[  OK  ] Started Journal Service.
         Starting Flush Journal to Persistent Storage...
[  OK  ] Finished Set the console keyboard layout.
[  OK  ] Finished Create Static Device Nodes in /dev.
         Starting udev Kernel Device Manager...
[  OK  ] Started udev Kernel Device Manager.
         Starting Network Service...
[  OK  ] Started Network Service.
         Starting Wait for Network to be Configured...
[  OK  ] Finished udev Coldplug all Devices.
[  OK  ] Started Dispatch Password Requests to Console Directory Watch.
[  OK  ] Reached target Local Encrypted Volumes.
         Starting udev Wait for Complete Device Initialization...
[  OK  ] Finished Flush Journal to Persistent Storage.
[  OK  ] Finished udev Wait for Complete Device Initialization.
[  OK  ] Finished Wait for Network to be Configured.
[  OK  ] Reached target Local File Systems (Pre).
[  OK  ] Reached target Local File Systems.
         Starting Load AppArmor profiles...
         Starting Set console font and keymap...
         Starting Create final runtime dir for shutdown pivot root...
         Starting Tell Plymouth To Write Out Runtime Data...
         Starting Create Volatile Files and Directories...
[  OK  ] Finished Create final runtime dir for shutdown pivot root.
[  OK  ] Finished Set console font and keymap.
[  OK  ] Finished Tell Plymouth To Write Out Runtime Data.
[  OK  ] Finished Create Volatile Files and Directories.
         Starting Network Name Resolution...
[  OK  ] Reached target System Time Set.
[  OK  ] Reached target System Time Synchronized.
         Starting Update UTMP about System Boot/Shutdown...
[  OK  ] Finished Update UTMP about System Boot/Shutdown.
[FAILED] Failed to start Load AppArmor profiles.
See 'systemctl status apparmor.service' for details.
[  OK  ] Reached target System Initialization.
[  OK  ] Started Daily apt download activities.
[  OK  ] Started Daily apt upgrade and clean activities.
[  OK  ] Started Periodic ext4 Online Metadata Check for All Filesystems.
[  OK  ] Started Refresh fwupd metadata regularly.
[  OK  ] Started Daily rotation of log files.
[  OK  ] Started Daily man-db regeneration.
[  OK  ] Started Message of the Day.
[  OK  ] Started Daily Cleanup of Temporary Directories.
[  OK  ] Started Ubuntu Advantage update messaging.
[  OK  ] Reached target Paths.
[  OK  ] Reached target Timers.
[  OK  ] Listening on Unix socket for apport crash forwarding.
[  OK  ] Listening on Avahi mDNS/DNS-SD Stack Activation Socket.
[  OK  ] Listening on D-Bus System Message Bus Socket.
         Starting Docker Socket for the API.
[  OK  ] Listening on Open-iSCSI iscsid Socket.
[  OK  ] Listening on Docker Socket for the API.
[  OK  ] Reached target Sockets.
[  OK  ] Reached target Basic System.
         Starting Accounts Service...
         Starting Avahi mDNS/DNS-SD Stack...
[  OK  ] Started D-Bus System Message Bus.
[  OK  ] Started Save initial kernel messages after boot.
         Starting Remove Stale Online ext4 Metadata Check Snapshots...
         Starting Turn off network device...
         Starting Dispatcher daemon for systemd-networkd...
         Starting Authorization Manager...
         Starting Restore /etc/resolv.conf i…fore the ppp link was shut down...
         Starting System Logging Service...
         Starting Switcheroo Control Proxy service...
         Starting Login Service...
         Starting Disk Manager...
         Starting WPA supplicant...
[  OK  ] Finished Turn off network device.
[  OK  ] Finished Remove Stale Online ext4 Metadata Check Snapshots.
[  OK  ] Finished Restore /etc/resolv.conf i…before the ppp link was shut down.
         Starting Network Manager...
[  OK  ] Started System Logging Service.
[  OK  ] Started Network Name Resolution.
[  OK  ] Reached target Host and Network Name Lookups.
[  OK  ] Started Avahi mDNS/DNS-SD Stack.
[  OK  ] Started WPA supplicant.
[  OK  ] Started Switcheroo Control Proxy service.
[  OK  ] Started Authorization Manager.
         Starting Modem Manager...
[  OK  ] Started Network Manager.
[  OK  ] Reached target Network.
         Starting Network Manager Wait Online...
         Starting containerd container runtime...
[  OK  ] Started Login Service.
         Starting Hostname Service...
[  OK  ] Started Unattended Upgrades Shutdown.
[  OK  ] Started Disk Manager.
[  OK  ] Started Accounts Service.
[  OK  ] Started Modem Manager.
[  OK  ] Started Dispatcher daemon for systemd-networkd.
[  OK  ] Started Hostname Service.
         Starting Network Manager Script Dispatcher Service...
[  OK  ] Finished Network Manager Wait Online.
[  OK  ] Started containerd container runtime.
[  OK  ] Reached target Network is Online.
         Starting Docker Application Container Engine...
[  OK  ] Reached target Remote File Systems (Pre).
[  OK  ] Reached target Remote File Systems.
         Starting LSB: automatic crash report generation...
         Starting Deferred execution scheduler...
         Starting Availability of block devices...
[  OK  ] Started Regular background program processing daemon.
         Starting OpenBSD Secure Shell server...
         Starting Permit User Sessions...
[  OK  ] Started Deferred execution scheduler.
[  OK  ] Finished Availability of block devices.
[  OK  ] Started Network Manager Script Dispatcher Service.
[  OK  ] Finished Permit User Sessions.
         Starting GNOME Display Manager...
         Starting Hold until boot process finishes up...
[  OK  ] Started LSB: automatic crash report generation.
[  OK  ] Finished Hold until boot process finishes up.
[  OK  ] Started Console Getty.
[  OK  ] Reached target Login Prompts.
[  OK  ] Created slice system-getty.slice.
[  OK  ] Started OpenBSD Secure Shell server.
[  OK  ] Started Docker Application Container Engine.
[  OK  ] Reached target Multi-User System.


Ubuntu 20.04.2 LTS chenfeng console

chenfeng login: 
# forkexec.log
Closing unexpected file descriptor 17 -> unknown
# forkstart.log is empty
# lxc.conf
lxc.log.file = /var/snap/lxd/common/lxd/logs/chenfeng/lxc.log
lxc.log.level = warn
lxc.console.buffer.size = auto
lxc.console.size = auto
lxc.console.logfile = /var/snap/lxd/common/lxd/logs/chenfeng/console.log
lxc.mount.auto = proc:rw sys:rw cgroup:mixed
lxc.autodev = 1
lxc.pty.max = 1024
lxc.mount.entry = /dev/fuse dev/fuse none bind,create=file,optional 0 0
lxc.mount.entry = /dev/net/tun dev/net/tun none bind,create=file,optional 0 0
lxc.mount.entry = /proc/sys/fs/binfmt_misc proc/sys/fs/binfmt_misc none rbind,create=dir,optional 0 0
lxc.mount.entry = /sys/fs/fuse/connections sys/fs/fuse/connections none rbind,create=dir,optional 0 0
lxc.mount.entry = /sys/fs/pstore sys/fs/pstore none rbind,create=dir,optional 0 0
lxc.mount.entry = /sys/kernel/config sys/kernel/config none rbind,create=dir,optional 0 0
lxc.mount.entry = /sys/kernel/debug sys/kernel/debug none rbind,create=dir,optional 0 0
lxc.mount.entry = /sys/kernel/security sys/kernel/security none rbind,create=dir,optional 0 0
lxc.mount.entry = /sys/kernel/tracing sys/kernel/tracing none rbind,create=dir,optional 0 0
lxc.mount.entry = /dev/mqueue dev/mqueue none rbind,create=dir,optional 0 0
lxc.include = /snap/lxd/current/lxc/config//common.conf.d/
lxc.arch = linux64
lxc.hook.version = 1
lxc.hook.pre-start = /proc/185115/exe callhook /var/snap/lxd/common/lxd "default" "chenfeng" start
lxc.hook.stop = /snap/lxd/current/bin/lxd callhook /var/snap/lxd/common/lxd "default" "chenfeng" stopns
lxc.hook.post-stop = /snap/lxd/current/bin/lxd callhook /var/snap/lxd/common/lxd "default" "chenfeng" stop
lxc.tty.max = 0
lxc.uts.name = chenfeng
lxc.mount.entry = /var/snap/lxd/common/lxd/devlxd dev/lxd none bind,create=dir 0 0
lxc.apparmor.profile = lxd-chenfeng_</var/snap/lxd/common/lxd>//&:lxd-chenfeng_<var-snap-lxd-common-lxd>:
lxc.seccomp.profile = /var/snap/lxd/common/lxd/security/seccomp/chenfeng
lxc.idmap = u 0 1000000 1000000000
lxc.idmap = g 0 1000000 1000000000
lxc.mount.auto = shmounts:/var/snap/lxd/common/lxd/shmounts/chenfeng:/dev/.lxd-mounts
lxc.net.0.type = phys
lxc.net.0.name = eth0
lxc.net.0.flags = up
lxc.net.0.link = vethaa3d4579
lxc.rootfs.path = dir:/var/snap/lxd/common/lxd/storage-pools/pool1/containers/chenfeng/rootfs
lxc.mount.entry = /var/snap/lxd/common/lxd/devices/chenfeng/unix.gpu.dev-dri-card0 dev/dri/card0 none bind,create=file 0 0
lxc.mount.entry = /var/snap/lxd/common/lxd/devices/chenfeng/unix.gpu.dev-dri-card1 dev/dri/card1 none bind,create=file 0 0
lxc.mount.entry = /var/snap/lxd/common/lxd/devices/chenfeng/unix.gpu.dev-dri-renderD128 dev/dri/renderD128 none bind,create=file 0 0
lxc.mount.entry = /var/snap/lxd/common/lxd/devices/chenfeng/unix.gpu.dev-nvidia0 dev/nvidia0 none bind,create=file 0 0
lxc.mount.entry = /var/snap/lxd/common/lxd/devices/chenfeng/unix.gpu.dev-dri-card10 dev/dri/card10 none bind,create=file 0 0
lxc.mount.entry = /var/snap/lxd/common/lxd/devices/chenfeng/unix.gpu.dev-dri-renderD137 dev/dri/renderD137 none bind,create=file 0 0
lxc.mount.entry = /var/snap/lxd/common/lxd/devices/chenfeng/unix.gpu.dev-nvidia9 dev/nvidia9 none bind,create=file 0 0
lxc.mount.entry = /var/snap/lxd/common/lxd/devices/chenfeng/unix.gpu.dev-dri-card2 dev/dri/card2 none bind,create=file 0 0
lxc.mount.entry = /var/snap/lxd/common/lxd/devices/chenfeng/unix.gpu.dev-dri-renderD129 dev/dri/renderD129 none bind,create=file 0 0
lxc.mount.entry = /var/snap/lxd/common/lxd/devices/chenfeng/unix.gpu.dev-nvidia1 dev/nvidia1 none bind,create=file 0 0
lxc.mount.entry = /var/snap/lxd/common/lxd/devices/chenfeng/unix.gpu.dev-dri-card3 dev/dri/card3 none bind,create=file 0 0
lxc.mount.entry = /var/snap/lxd/common/lxd/devices/chenfeng/unix.gpu.dev-dri-renderD130 dev/dri/renderD130 none bind,create=file 0 0
lxc.mount.entry = /var/snap/lxd/common/lxd/devices/chenfeng/unix.gpu.dev-nvidia2 dev/nvidia2 none bind,create=file 0 0
lxc.mount.entry = /var/snap/lxd/common/lxd/devices/chenfeng/unix.gpu.dev-dri-card4 dev/dri/card4 none bind,create=file 0 0
lxc.mount.entry = /var/snap/lxd/common/lxd/devices/chenfeng/unix.gpu.dev-dri-renderD131 dev/dri/renderD131 none bind,create=file 0 0
lxc.mount.entry = /var/snap/lxd/common/lxd/devices/chenfeng/unix.gpu.dev-nvidia3 dev/nvidia3 none bind,create=file 0 0
lxc.mount.entry = /var/snap/lxd/common/lxd/devices/chenfeng/unix.gpu.dev-dri-card5 dev/dri/card5 none bind,create=file 0 0
lxc.mount.entry = /var/snap/lxd/common/lxd/devices/chenfeng/unix.gpu.dev-dri-renderD132 dev/dri/renderD132 none bind,create=file 0 0
lxc.mount.entry = /var/snap/lxd/common/lxd/devices/chenfeng/unix.gpu.dev-nvidia4 dev/nvidia4 none bind,create=file 0 0
lxc.mount.entry = /var/snap/lxd/common/lxd/devices/chenfeng/unix.gpu.dev-dri-card6 dev/dri/card6 none bind,create=file 0 0
lxc.mount.entry = /var/snap/lxd/common/lxd/devices/chenfeng/unix.gpu.dev-dri-renderD133 dev/dri/renderD133 none bind,create=file 0 0
lxc.mount.entry = /var/snap/lxd/common/lxd/devices/chenfeng/unix.gpu.dev-nvidia5 dev/nvidia5 none bind,create=file 0 0
lxc.mount.entry = /var/snap/lxd/common/lxd/devices/chenfeng/unix.gpu.dev-dri-card7 dev/dri/card7 none bind,create=file 0 0
lxc.mount.entry = /var/snap/lxd/common/lxd/devices/chenfeng/unix.gpu.dev-dri-renderD134 dev/dri/renderD134 none bind,create=file 0 0
lxc.mount.entry = /var/snap/lxd/common/lxd/devices/chenfeng/unix.gpu.dev-nvidia6 dev/nvidia6 none bind,create=file 0 0
lxc.mount.entry = /var/snap/lxd/common/lxd/devices/chenfeng/unix.gpu.dev-dri-card8 dev/dri/card8 none bind,create=file 0 0
lxc.mount.entry = /var/snap/lxd/common/lxd/devices/chenfeng/unix.gpu.dev-dri-renderD135 dev/dri/renderD135 none bind,create=file 0 0
lxc.mount.entry = /var/snap/lxd/common/lxd/devices/chenfeng/unix.gpu.dev-nvidia7 dev/nvidia7 none bind,create=file 0 0
lxc.mount.entry = /var/snap/lxd/common/lxd/devices/chenfeng/unix.gpu.dev-dri-card9 dev/dri/card9 none bind,create=file 0 0
lxc.mount.entry = /var/snap/lxd/common/lxd/devices/chenfeng/unix.gpu.dev-dri-renderD136 dev/dri/renderD136 none bind,create=file 0 0
lxc.mount.entry = /var/snap/lxd/common/lxd/devices/chenfeng/unix.gpu.dev-nvidia8 dev/nvidia8 none bind,create=file 0 0
lxc.mount.entry = /var/snap/lxd/common/lxd/devices/chenfeng/unix.gpu.dev-nvidia--uvm dev/nvidia-uvm none bind,create=file 0 0
lxc.mount.entry = /var/snap/lxd/common/lxd/devices/chenfeng/unix.gpu.dev-nvidiactl dev/nvidiactl none bind,create=file 0 0

# proxy.proxy0.log
Status: Started

I was thinking more logs inside the container (we know there isn’t an issue outside of the container).

Sorry,whats logs do you want inside the container? dmesg?

I was thinking more journalctl -b

The output of journalctl -b in my container.

ubuntu@chenfeng:~/data$ journalctl -b
Hint: You are currently not seeing messages from other users and the system.
      Users in groups 'adm', 'systemd-journal' can see all messages.
      Pass -q to turn off this notice.
-- Logs begin at Sat 2021-12-04 15:46:03 UTC, end at Thu 2022-04-28 11:03:50 UTC. --
Apr 28 11:03:25 chenfeng systemd[556]: Reached target Paths.
Apr 28 11:03:25 chenfeng systemd[556]: Reached target Timers.
Apr 28 11:03:25 chenfeng systemd[556]: Starting D-Bus User Message Bus Socket.
Apr 28 11:03:25 chenfeng systemd[556]: Listening on GnuPG network certificate management daemon.
Apr 28 11:03:25 chenfeng systemd[556]: Listening on GnuPG cryptographic agent and passphrase cache (access for web browsers).
Apr 28 11:03:25 chenfeng systemd[556]: Listening on GnuPG cryptographic agent and passphrase cache (restricted).
Apr 28 11:03:25 chenfeng systemd[556]: Listening on GnuPG cryptographic agent (ssh-agent emulation).
Apr 28 11:03:25 chenfeng systemd[556]: Listening on GnuPG cryptographic agent and passphrase cache.
Apr 28 11:03:25 chenfeng systemd[556]: Listening on debconf communication socket.
Apr 28 11:03:25 chenfeng systemd[556]: Listening on Sound System.
Apr 28 11:03:25 chenfeng systemd[556]: Listening on D-Bus User Message Bus Socket.
Apr 28 11:03:25 chenfeng systemd[556]: Reached target Sockets.
Apr 28 11:03:25 chenfeng systemd[556]: Reached target Basic System.
Apr 28 11:03:25 chenfeng systemd[556]: Starting Sound Service...
Apr 28 11:03:25 chenfeng systemd[556]: Started D-Bus User Message Bus.
Apr 28 11:03:25 chenfeng dbus-daemon[595]: [session uid=1000 pid=595] AppArmor D-Bus mediation is enabled
Apr 28 11:03:25 chenfeng systemd[556]: Started Sound Service.
Apr 28 11:03:25 chenfeng systemd[556]: Reached target Main User Target.
Apr 28 11:03:25 chenfeng systemd[556]: Startup finished in 252ms.
Apr 28 11:03:50 chenfeng pulseaudio[562]: GetManagedObjects() failed: org.freedesktop.DBus.Error.NoReply: Did not receive a reply. Possible causes include: the remote applicatio>

What does networkctl status eth0 show after fresh boot?

I am training my deep learning model in my container now, after the training is completed, I will reboot the container, and then check the output of networkctl status eth0. It can be completed in about 6 hours.

Here’s the output of networkctl status eth0 after I reboot container.

root@iic-worker-203-gpu:~# lxc exec chenfeng bash
root@chenfeng:~# networkctl status eth0
● 133: eth0
             Link File: /usr/lib/systemd/network/99-default.link
          Network File: /run/systemd/network/10-netplan-eth0.network
                  Type: ether
                 State: off (configured)
                Driver: veth
            HW Address: 00:16:3e:e4:31:a8 (Xensource, Inc.)
                   MTU: 1450 (min: 68, max: 65535)
  Queue Length (Tx/Rx): 1/1
      Auto negotiation: no
                 Speed: 10Gbps
                Duplex: full
                  Port: tp
     Activation Policy: up
   Required For Online: yes

Apr 29 01:35:56 chenfeng systemd-networkd[87]: eth0: IPv6 successfully enabled
Apr 29 01:35:56 chenfeng systemd-networkd[87]: eth0: DHCPv4 address 240.203.0.107/8 via 240.203.0.1
Apr 29 01:35:56 chenfeng systemd-networkd[87]: eth0: Gained IPv6LL
Apr 29 01:35:56 chenfeng systemd-networkd[87]: eth0: Link DOWN
Apr 29 01:35:56 chenfeng systemd-networkd[87]: eth0: Lost carrier
Apr 29 01:35:56 chenfeng systemd-networkd[87]: eth0: DHCP lease lost

And here’s the output of networkctl status eth0 after I run ‘lxc exec chenfeng – ip link set eth0 up’.

root@iic-worker-203-gpu:~# lxc exec chenfeng -- ip link set eth0 up
root@iic-worker-203-gpu:~# lxc exec chenfeng bash
root@chenfeng:~# networkctl status eth0
● 133: eth0
             Link File: /usr/lib/systemd/network/99-default.link
          Network File: /run/systemd/network/10-netplan-eth0.network
                  Type: ether
                 State: routable (configured)
                Driver: veth
            HW Address: 00:16:3e:e4:31:a8 (Xensource, Inc.)
                   MTU: 1450 (min: 68, max: 65535)
  Queue Length (Tx/Rx): 1/1
      Auto negotiation: no
                 Speed: 10Gbps
                Duplex: full
                  Port: tp
               Address: 240.203.0.107 (DHCP4)
                        fe80::216:3eff:fee4:31a8
               Gateway: 240.203.0.1 (Xensource, Inc.)
                   DNS: 240.203.0.1
        Search Domains: lxd
     Activation Policy: up
   Required For Online: yes

Apr 29 01:35:56 chenfeng systemd-networkd[87]: eth0: IPv6 successfully enabled
Apr 29 01:35:56 chenfeng systemd-networkd[87]: eth0: DHCPv4 address 240.203.0.107/8 via 240.203.0.1
Apr 29 01:35:56 chenfeng systemd-networkd[87]: eth0: Gained IPv6LL
Apr 29 01:35:56 chenfeng systemd-networkd[87]: eth0: Link DOWN
Apr 29 01:35:56 chenfeng systemd-networkd[87]: eth0: Lost carrier
Apr 29 01:35:56 chenfeng systemd-networkd[87]: eth0: DHCP lease lost
Apr 29 01:40:48 chenfeng systemd-networkd[87]: eth0: Link UP
Apr 29 01:40:48 chenfeng systemd-networkd[87]: eth0: Gained carrier
Apr 29 01:40:48 chenfeng systemd-networkd[87]: eth0: DHCPv4 address 240.203.0.107/8 via 240.203.0.1
Apr 29 01:40:50 chenfeng systemd-networkd[87]: eth0: Gained IPv6LL