Hi,
I was trying to launch an lxc container inside another container. For some reason I’m getting the following error message.
root@u1:~# lxc launch images:alpine/3.10 b1
Creating b1
Starting b1
Error: Failed to run: /usr/lib/lxd/lxd forkstart b1 /var/lib/lxd/containers /var/log/lxd/b1/lxc.conf:
Try `lxc info --show-log local:b1` for more info
Contents of lxc info --show-log local:b1
root@u1:~# lxc info --show-log local:b1
Name: b1
[details=“Summary”]
This text will be hidden
[/details]
Remote: unix://
Architecture: x86_64
Created: 2020/06/07 13:10 UTC
Status: Stopped
Type: persistent
Profiles: default
Log:
lxc b1 20200607131051.194 ERROR utils - utils.c:safe_mount:1179 - Permission denied - Failed to mount "proc" onto "/usr/lib/x86_64-linux-gnu/lxc/proc"
lxc b1 20200607131051.194 ERROR conf - conf.c:lxc_mount_auto_mounts:724 - Permission denied - Failed to mount "proc" on "/usr/lib/x86_64-linux-gnu/lxc/proc" with flags 14
lxc b1 20200607131051.194 ERROR conf - conf.c:lxc_setup:3539 - Failed to setup first automatic mounts
lxc b1 20200607131051.194 ERROR start - start.c:do_start:1263 - Failed to setup container "b1"
lxc b1 20200607131051.194 ERROR sync - sync.c:__sync_wait:62 - An error occurred in another process (expected sequence number 5)
lxc b1 20200607131051.194 WARN network - network.c:lxc_delete_network_priv:2589 - Operation not permitted - Failed to remove interface "eth0" with index 10
lxc b1 20200607131051.194 ERROR lxccontainer - lxccontainer.c:wait_on_daemonized_start:842 - Received container state "ABORTING" instead of "RUNNING"
lxc b1 20200607131051.195 ERROR start - start.c:__lxc_start:1939 - Failed to spawn container "b1"
lxc 20200607131051.205 WARN commands - commands.c:lxc_cmd_rsp_recv:132 - Connection reset by peer - Failed to receive response for command "get_state"
The contents of /var/log/lxd/lxd.log
t=2020-06-07T13:10:50+0000 lvl=info msg="Creating container" ephemeral=false name=b1
t=2020-06-07T13:10:50+0000 lvl=info msg="Created container" ephemeral=false name=b1
t=2020-06-07T13:10:50+0000 lvl=info msg="Starting container" action=start created=2020-06-07T13:10:50+0000 ephemeral=false name=b1 stateful=false used=1970-01-01T00:00:00+0000
t=2020-06-07T13:10:51+0000 lvl=eror msg="Failed starting container" action=start created=2020-06-07T13:10:50+0000 ephemeral=false name=b1 stateful=false used=1970-01-01T00:00:00+0000
t=2020-06-07T13:10:51+0000 lvl=info msg="Container initiated stop" action=stop created=2020-06-07T13:10:50+0000 ephemeral=false name=b1 stateful=false used=2020-06-07T13:10:50+0000
I found this from the logs
root@u1:~# lxc info --show-log b1
Name: b1
Remote: unix://
Architecture: x86_64
Created: 2020/06/07 13:10 UTC
Status: Stopped
Type: persistent
Profiles: default
Log:
lxc b1 20200607131051.194 ERROR utils - utils.c:safe_mount:1179 - Permission denied - Failed to mount "proc" onto "/usr/lib/x86_64-linux-gnu/lxc/proc"
lxc b1 20200607131051.194 ERROR conf - conf.c:lxc_mount_auto_mounts:724 - Permission denied - Failed to mount "proc" on "/usr/lib/x86_64-linux-gnu/lxc/proc" with flags 14
lxc b1 20200607131051.194 ERROR conf - conf.c:lxc_setup:3539 - Failed to setup first automatic mounts
lxc b1 20200607131051.194 ERROR start - start.c:do_start:1263 - Failed to setup container "b1"
lxc b1 20200607131051.194 ERROR sync - sync.c:__sync_wait:62 - An error occurred in another process (expected sequence number 5)
lxc b1 20200607131051.194 WARN network - network.c:lxc_delete_network_priv:2589 - Operation not permitted - Failed to remove interface "eth0" with index 10
lxc b1 20200607131051.194 ERROR lxccontainer - lxccontainer.c:wait_on_daemonized_start:842 - Received container state "ABORTING" instead of "RUNNING"
lxc b1 20200607131051.195 ERROR start - start.c:__lxc_start:1939 - Failed to spawn container "b1"
lxc 20200607131051.205 WARN commands - commands.c:lxc_cmd_rsp_recv:132 - Connection reset by peer - Failed to receive response for command "get_state"
I can’t seem to find the solution to this. Any help would be appreciate.
Thanks
toby63
(Toby63)
2
Have you followed the howto for running lxd inside lxd?
https://stgraber.org/2016/04/14/lxd-2-0-lxd-in-lxd-812/
And if you use snap, also this: https://stgraber.org/2016/12/07/running-snaps-in-lxd-containers/
Can you give us the results of:
lxc info
I haven’t looked at that one. Will check that one tomorrow.
root@u1:~# lxc info
config:
core.https_address: '[::]:8443'
core.trust_password: true
images.auto_update_interval: "0"
api_extensions:
- storage_zfs_remove_snapshots
- container_host_shutdown_timeout
- container_stop_priority
- container_syscall_filtering
- auth_pki
- container_last_used_at
- etag
- patch
- usb_devices
- https_allowed_credentials
- image_compression_algorithm
- directory_manipulation
- container_cpu_time
- storage_zfs_use_refquota
- storage_lvm_mount_options
- network
- profile_usedby
- container_push
- container_exec_recording
- certificate_update
- container_exec_signal_handling
- gpu_devices
- container_image_properties
- migration_progress
- id_map
- network_firewall_filtering
- network_routes
- storage
- file_delete
- file_append
- network_dhcp_expiry
- storage_lvm_vg_rename
- storage_lvm_thinpool_rename
- network_vlan
- image_create_aliases
- container_stateless_copy
- container_only_migration
- storage_zfs_clone_copy
- unix_device_rename
- storage_lvm_use_thinpool
- storage_rsync_bwlimit
- network_vxlan_interface
- storage_btrfs_mount_options
- entity_description
- image_force_refresh
- storage_lvm_lv_resizing
- id_map_base
- file_symlinks
- container_push_target
- network_vlan_physical
- storage_images_delete
- container_edit_metadata
- container_snapshot_stateful_migration
- storage_driver_ceph
- storage_ceph_user_name
- resource_limits
- storage_volatile_initial_source
- storage_ceph_force_osd_reuse
- storage_block_filesystem_btrfs
- resources
- kernel_limits
- storage_api_volume_rename
- macaroon_authentication
- network_sriov
- console
- restrict_devlxd
- migration_pre_copy
- infiniband
- maas_network
- devlxd_events
- proxy
- network_dhcp_gateway
- file_get_symlink
- network_leases
- unix_device_hotplug
- storage_api_local_volume_handling
- operation_description
- clustering
- event_lifecycle
- storage_api_remote_volume_handling
- nvidia_runtime
- candid_authentication
- candid_config
- candid_config_key
- usb_optional_vendorid
api_status: stable
api_version: "1.0"
auth: trusted
public: false
auth_methods:
- tls
environment:
addresses:
- 10.0.3.1:8443
- 10.239.11.1:8443
- '[fd42:9b95:25b2:684a::1]:8443'
- 10.156.178.146:8443
- '[fd42:93f2:521c:69a4:216:3eff:febc:b39d]:8443'
architectures:
- x86_64
- i686
certificate: |
-----BEGIN CERTIFICATE-----
MIIFTTCCAzWgAwIBAgIRAI/Mk0ayz12DAvn0aNjn1bQwDQYJKoZIhvcNAQELBQAw
MDEcMBoGA1UEChMTbGludXhjb250YWluZXJzLm9yZzEQMA4GA1UEAwwHcm9vdEB1
MTAeFw0yMDA2MDcxMjIyNDBaFw0zMDA2MDUxMjIyNDBaMDAxHDAaBgNVBAoTE2xp
bnV4Y29udGFpbmVycy5vcmcxEDAOBgNVBAMMB3Jvb3RAdTEwggIiMA0GCSqGSIb3
DQEBAQUAA4ICDwAwggIKAoICAQDe4dbMyUyhWQzFzTG8PkbY0lpMKRBNepGWcDm3
lVw4hgAKF/VzCILbW9Wr97esU4PblQeTlb34nmmxYo2FCtYl9sjh1rSIM23kM1Dz
RElVC6+yTDL2WXC9KDVljQBWM44tD8nWYfboalTcx42Fmv+6+EGSf/pFuNbVRRlv
z9dJeX7x7LPkZXnh+mKC+mg2qoYSXmJHla8/PlazBctCIi7J2DHYKjw91p1kKNj/
g2rxFRi6q5jZqckxHLnh13+ddzC7MS8C8gFDYvQ5rp6QG+Y+RO2JFfCz/v2AaeP3
LdITDZtifCLh5NTJORvQyUKaMV/UfnVjflO6EAKtr625CgjCsYHGIhJbwWYn93CP
D4iHuBttaT1kct4EXtbCuluQHO4DpvbfQPvaS7lKOJv5yLMmwKXn94STcP+taRDj
7CSQm4srXNPXnoJsxGVI3KTyhc+8upADnV1QYrKMroeTpKqixAL9kSfCAA8bpI31
YACyJKizZMs+yKBKW9S4coPpKI/5riwzHb4HUjlsQXHJ0O0VwH2DO36pZlTzIB4i
ezqcd6l2Y9HauvI5x2QjRVenZJDbv4+nWXoWtIh9XSorNKJNrJUTSMUOsxrc9Vyy
OXLPr/qtGfKl7KTXmhWxfcSiuE1E8g2AD9/AT/eUmbJb3DXYrNCbbwpFW86GKf6c
Ndv4qwIDAQABo2IwYDAOBgNVHQ8BAf8EBAMCBaAwEwYDVR0lBAwwCgYIKwYBBQUH
AwEwDAYDVR0TAQH/BAIwADArBgNVHREEJDAiggJ1MYcECgADAYcECpyykocQ/UKT
8lIcaaQCFj7//ryznTANBgkqhkiG9w0BAQsFAAOCAgEACGpbb80CTFiCAzG9Lr3Z
sCZ5wBR2rL1AhTADJbkpZ+YHriYGnSOL8ome5V+fpW9O/Hejt4ybGqtFpRD+WkxS
xbu6nVDuviVPGN2NNBD4euTCJVaXo5moM87WOxg0GfBmM3FNmHZQu/p/Wfmqq69H
SE8Q8ueb+0uRb8MbI4udia9PezJGKp3WyX87VTHsdI+p4eDKnyInDAdKIZz8ea/1
K1+Fl9g5W4PU/bTKfcl7Amu0UqA1q4XsFhWLgnKEdJFE3Dwzxe8VJk6ElHc/Prjl
viW1hH5NPCgL/x0LgMH9uzgnyQVwkSVOkQ6q2DC5GrmiavrB+yaKDi9ucch42AMs
eBPN3uubx9bJLYK0vDMKaVx26WNThhyQTDQ534sRQn+IOLtjukMD03ZqBKX00QW3
Q+w5TwHrA05crAYl6mwftQjAa2crT+vZjEx5imGafQVLNf5UYJCaLnPW5VqGhog4
GWCKpd5n02CyZrW0dmQc823DxnB8YKkp63/XlBTUcA+XyWgLjCfRaKf31mG4N1om
By3OnRHuOYWhl8u02cl5D7hCZ1lo6OTZDjrKn/E24Mlmqdg18TII6oCRP57S9WK1
ks8MUdBWCWmN5WxXXCMmfV4qOHzFNw2jQqPpNy5nDFH9FM1KtKRsYPnxXyZIrMDs
bkY5Cd8U7xD4P1WIRF9FJGQ=
-----END CERTIFICATE-----
certificate_fingerprint: 2398a4f05b3cd36bf4aa9f6b6670408dbf6e6aa928f9395cf84398ce0facd81c
driver: lxc
driver_version: 3.0.3
kernel: Linux
kernel_architecture: x86_64
kernel_version: 5.6.0-kali2-amd64
server: lxd
server_pid: 218
server_version: 3.0.3
storage: dir
storage_version: "1"
server_clustered: false
server_name: u1
project: ""
root@u1:~#
simos
(Simos Xenitellis)
4
To be able to run LXD inside a LXD container, you need to enable nesting support for the LXD container.
You need to add -c security.nesting=true in the lxc launch
command.
For example,
$ lxc launch ubuntu:20.04 mycontainer -c security.nesting=true
Creating mycontainer
Starting mycontainer
$
Then, when you get a shell into the mycontainer
, you can setup LXD and launch containers.
Note that if you have already created the initial container without -c security.nesting, you can set this flag and then just restart the container.
1 Like
Thank you @simos and @toby63! Setting the nesting flag solved the issue.