Hi,
i have here very much unappriciated effect:
//mounting a zfs dataset
[root@node2 containers]# zfs mount lxc-vrtx-zfs-storage/lxc1752-zfs-0
//checking the available space ( its 50GB free, 128 byte used )
[root@node2 containers]# df
lxc-vrtx-zfs-storage/lxc1752-zfs-0 52428800 128 52428672 1% /opt/storages/lxc-vrtx-zfs-storage/containers/lxc1752
//Checking the content of the directory ( its empty )
[root@node2 containers]# ls -la /opt/storages/lxc-vrtx-zfs-storage/containers/lxc1752
total 5
drwxr-xr-x 2 root root 2 Dec 5 18:59 .
drwxr-xr-x 31 root root 4096 Dec 5 19:23 …
//Running a lxc init:
lxc init images:centos/6 lxc1752 -p lxc1752 --debug --verbose
DBUG[12-05|19:24:26] Connecting to a local LXD over a Unix socket
DBUG[12-05|19:24:26] Sending request to LXD method=GET url=http://unix.socket/1.0 etag=
DBUG[12-05|19:24:26] Got response struct from LXD
DBUG[12-05|19:24:26]
{
"config": {
"core.https_address": "[::]:8443",
"images.auto_update_interval": "0"
},
"api_extensions": [
"storage_zfs_remove_snapshots",
"container_host_shutdown_timeout",
"container_stop_priority",
"container_syscall_filtering",
"auth_pki",
"container_last_used_at",
"etag",
"patch",
"usb_devices",
"https_allowed_credentials",
"image_compression_algorithm",
"directory_manipulation",
"container_cpu_time",
"storage_zfs_use_refquota",
"storage_lvm_mount_options",
"network",
"profile_usedby",
"container_push",
"container_exec_recording",
"certificate_update",
"container_exec_signal_handling",
"gpu_devices",
"container_image_properties",
"migration_progress",
"id_map",
"network_firewall_filtering",
"network_routes",
"storage",
"file_delete",
"file_append",
"network_dhcp_expiry",
"storage_lvm_vg_rename",
"storage_lvm_thinpool_rename",
"network_vlan",
"image_create_aliases",
"container_stateless_copy",
"container_only_migration",
"storage_zfs_clone_copy",
"unix_device_rename",
"storage_lvm_use_thinpool",
"storage_rsync_bwlimit",
"network_vxlan_interface",
"storage_btrfs_mount_options",
"entity_description",
"image_force_refresh",
"storage_lvm_lv_resizing",
"id_map_base",
"file_symlinks",
"container_push_target",
"network_vlan_physical",
"storage_images_delete",
"container_edit_metadata",
"container_snapshot_stateful_migration",
"storage_driver_ceph",
"storage_ceph_user_name",
"resource_limits",
"storage_volatile_initial_source",
"storage_ceph_force_osd_reuse",
"storage_block_filesystem_btrfs",
"resources",
"kernel_limits",
"storage_api_volume_rename",
"macaroon_authentication",
"network_sriov",
"console",
"restrict_devlxd",
"migration_pre_copy",
"infiniband",
"maas_network",
"devlxd_events",
"proxy",
"network_dhcp_gateway",
"file_get_symlink",
"network_leases",
"unix_device_hotplug",
"storage_api_local_volume_handling",
"operation_description",
"clustering",
"event_lifecycle",
"storage_api_remote_volume_handling",
"nvidia_runtime",
"container_mount_propagation",
"container_backup",
"devlxd_images",
"container_local_cross_pool_handling",
"proxy_unix",
"proxy_udp",
"clustering_join",
"proxy_tcp_udp_multi_port_handling",
"network_state",
"proxy_unix_dac_properties",
"container_protection_delete",
"unix_priv_drop",
"pprof_http",
"proxy_haproxy_protocol",
"network_hwaddr",
"proxy_nat",
"network_nat_order",
"container_full",
"candid_authentication",
"backup_compression",
"candid_config",
"nvidia_runtime_config",
"storage_api_volume_snapshots",
"storage_unmapped",
"projects",
"candid_config_key",
"network_vxlan_ttl",
"container_incremental_copy",
"usb_optional_vendorid",
"snapshot_scheduling",
"container_copy_project",
"clustering_server_address",
"clustering_image_replication",
"container_protection_shift",
"snapshot_expiry",
"container_backup_override_pool",
"snapshot_expiry_creation",
"network_leases_location",
"resources_cpu_socket",
"resources_gpu",
"resources_numa",
"kernel_features",
"id_map_current",
"event_location",
"storage_api_remote_volume_snapshots",
"network_nat_address",
"container_nic_routes",
"rbac",
"cluster_internal_copy",
"seccomp_notify",
"lxc_features",
"container_nic_ipvlan",
"network_vlan_sriov",
"storage_cephfs",
"container_nic_ipfilter",
"resources_v2",
"container_exec_user_group_cwd",
"container_syscall_intercept",
"container_disk_shift",
"storage_shifted",
"resources_infiniband",
"daemon_storage",
"instances",
"image_types",
"resources_disk_sata",
"clustering_roles",
"images_expiry"
],
"api_status": "stable",
"api_version": "1.0",
"auth": "trusted",
"public": false,
"auth_methods": [
"tls"
],
"environment": {
"addresses": [
"10.2.3.4:8443",
],
"architectures": [
"x86_64",
"i686"
],
"certificate": "\n",
"certificate_fingerprint": "46db67db4203fae6df89b3b27fb946635f31ba90eaaa2eb3b608be06b2a2dae7",
"driver": "lxc",
"driver_version": "3.2.1",
"kernel": "Linux",
"kernel_architecture": "x86_64",
"kernel_features": {
"netnsid_getifaddrs": "true",
"seccomp_listener": "true",
"shiftfs": "false",
"uevent_injection": "true",
"unpriv_fscaps": "true"
},
"kernel_version": "5.2.13-200.fc30.x86_64",
"lxc_features": {
"mount_injection_file": "true",
"network_gateway_device_route": "true",
"network_ipvlan": "true",
"network_l2proxy": "true",
"network_phys_macvlan_mtu": "true",
"seccomp_notify": "true"
},
"project": "default",
"server": "lxd",
"server_clustered": false,
"server_name": "node2",
"server_pid": 48715,
"server_version": "3.18",
"storage": "dir",
"storage_version": "1"
}
}
Creating lxc1752
DBUG[12-05|19:24:26] Connecting to a remote simplestreams server
DBUG[12-05|19:24:26] Connected to the websocket: ws://unix.socket/1.0/events
DBUG[12-05|19:24:26] Sending request to LXD method=POST url=http://unix.socket/1.0/instances etag=
DBUG[12-05|19:24:26]
{
"architecture": "",
"config": {},
"devices": {},
"ephemeral": false,
"profiles": [
"lxc1752"
],
"stateful": false,
"description": "",
"name": "lxc1752",
"source": {
"type": "image",
"certificate": "",
"alias": "centos/6",
"server": "https://images.linuxcontainers.org",
"protocol": "simplestreams",
"mode": "pull"
},
"instance_type": "",
"type": ""
}
DBUG[12-05|19:24:26] Got operation from LXD
DBUG[12-05|19:24:26]
{
"id": "e3d8dc11-34cc-4b43-86b8-d75dee7cde3b",
"class": "task",
"description": "Creating container",
"created_at": "2019-12-05T19:24:26.697858977+01:00",
"updated_at": "2019-12-05T19:24:26.697858977+01:00",
"status": "Running",
"status_code": 103,
"resources": {
"containers": [
"/1.0/containers/lxc1752"
]
},
"metadata": null,
"may_cancel": false,
"err": "",
"location": "none"
}
DBUG[12-05|19:24:26] Sending request to LXD method=GET url=http://unix.socket/1.0/operations/e3d8dc11-34cc-4b43-86b8-d75dee7cde3b etag=
DBUG[12-05|19:24:26] Got response struct from LXD
DBUG[12-05|19:24:26]
{
"id": "e3d8dc11-34cc-4b43-86b8-d75dee7cde3b",
"class": "task",
"description": "Creating container",
"created_at": "2019-12-05T19:24:26.697858977+01:00",
"updated_at": "2019-12-05T19:24:26.697858977+01:00",
"status": "Running",
"status_code": 103,
"resources": {
"containers": [
"/1.0/containers/lxc1752"
]
},
"metadata": null,
"may_cancel": false,
"err": "",
"location": "none"
}
DBUG[12-05|19:24:27] Sending request to LXD method=GET url=http://unix.socket/1.0/instances/lxc1752 etag=
DBUG[12-05|19:24:27] Got response struct from LXD
DBUG[12-05|19:24:27]
{
"architecture": "x86_64",
"config": {
"image.architecture": "amd64",
"image.description": "Centos 6 amd64 (20191205_07:08)",
"image.os": "Centos",
"image.release": "6",
"image.serial": "20191205_07:08",
"image.type": "squashfs",
"volatile.apply_template": "create",
"volatile.base_image": "f6df01ad636278ed6c15a3b424f9bc8b06dc30ebbdc6011a6f8e0fd497afa593",
"volatile.idmap.base": "0",
"volatile.idmap.next": "[{\"Isuid\":true,\"Isgid\":false,\"Hostid\":1000000,\"Nsid\":0,\"Maprange\":1000000000},{\"Isuid\":false,\"Isgid\":true,\"Hostid\":1000000,\"Nsid\":0,\"Maprange\":1000000000}]",
"volatile.last_state.idmap": "[]"
},
"devices": {},
"ephemeral": false,
"profiles": [
"lxc1752"
],
"stateful": false,
"description": "",
"created_at": "2019-12-05T19:24:26.746658602+01:00",
"expanded_config": {
"image.architecture": "amd64",
"image.description": "Centos 6 amd64 (20191205_07:08)",
"image.os": "Centos",
"image.release": "6",
"image.serial": "20191205_07:08",
"image.type": "squashfs",
"volatile.apply_template": "create",
"volatile.base_image": "f6df01ad636278ed6c15a3b424f9bc8b06dc30ebbdc6011a6f8e0fd497afa593",
"volatile.idmap.base": "0",
"volatile.idmap.next": "[{\"Isuid\":true,\"Isgid\":false,\"Hostid\":1000000,\"Nsid\":0,\"Maprange\":1000000000},{\"Isuid\":false,\"Isgid\":true,\"Hostid\":1000000,\"Nsid\":0,\"Maprange\":1000000000}]",
"volatile.last_state.idmap": "[]"
},
"expanded_devices": {
"root": {
"path": "/",
"pool": "lxc-vrtx-zfs-storage",
"type": "disk"
}
},
"name": "lxc1752",
"status": "Stopped",
"status_code": 102,
"last_used_at": "1970-01-01T01:00:00+01:00",
"location": "none",
"type": "container"
}
The container you are starting doesn't have any network attached to it.
To create a new network, use: lxc network create
To attach a network to a container, use: lxc network attach
// Now checking again the content of the directory ( which is still empty, even we just installed a container – which is startable and working
[root@node2 containers]# ls -la /opt/storages/lxc-vrtx-zfs-storage/containers/lxc1752
total 5
drwxr-xr-x 2 root root 2 Dec 5 18:59 .
drwxr-xr-x 31 root root 4096 Dec 5 19:23 …
// Now we umount the dataset and check the directory again, and voila, here are the files
[root@node2 containers]# zfs umount /opt/storages/lxc-vrtx-zfs-storage/containers/lxc1752
[root@node2 containers]# ls -la /opt/storages/lxc-vrtx-zfs-storage/containers/lxc1752
total 12
drwx–x–x 4 root root 77 Dec 5 19:24 .
drwxr-xr-x 31 root root 4096 Dec 5 19:23 …
-r-------- 1 root root 1926 Dec 5 19:24 backup.yaml
-rw-r–r-- 1 root root 686 Dec 5 08:19 metadata.yaml
dr-xr-xr-x 22 root root 239 Dec 5 08:19 rootfs
drwxr-xr-x 2 root root 72 Dec 5 08:19 templates
And even if i let it install the files on the disk and copy it to the zfs dataset and try to start the container, i get:
[root@node2 containers]# lxc start lxc1752
Error: Common start logic: No such file or directory: "/var/snap/lxd/common/lxd/storage-pools/lxc-vrtx-zfs-storage/containers/lxc1752/rootfs"
Try `lxc info --show-log lxc1752` for more info
[root@node2 containers]# lxc info --show-log lxc1752
Name: lxc1752
Location: none
Remote: unix://
Architecture: x86_64
Created: 2019/12/05 18:59 UTC
Status: Stopped
Type: persistent
Profiles: lxc1752
Log:
lxc 20191205185937.795 TRACE commands - commands.c:lxc_cmd:303 - Connection refused - Command "get_state" failed to connect command socket
lxc 20191205185937.805 TRACE commands - commands.c:lxc_cmd:303 - Connection refused - Command "get_state" failed to connect command socket
lxc 20191205190149.346 TRACE commands - commands.c:lxc_cmd:303 - Connection refused - Command "get_state" failed to connect command socket
lxc 20191205190149.346 TRACE commands - commands.c:lxc_cmd:303 - Connection refused - Command "get_state" failed to connect command socket
lxc 20191205190149.346 TRACE commands - commands.c:lxc_cmd:303 - Connection refused - Command "get_state" failed to connect command socket
lxc 20191205190154.993 TRACE commands - commands.c:lxc_cmd:303 - Connection refused - Command "get_state" failed to connect command socket
lxc 20191205190155.723 INFO confile - confile.c:set_config_idmaps:1987 - Read uid map: type u nsid 0 hostid 1000000 range 1000000000
lxc 20191205190155.726 INFO confile - confile.c:set_config_idmaps:1987 - Read uid map: type g nsid 0 hostid 1000000 range 1000000000
lxc 20191205190155.731 TRACE commands - commands.c:lxc_cmd:303 - Connection refused - Command "get_state" failed to connect command socket
lxc 20191205190235.882 TRACE commands - commands.c:lxc_cmd:303 - Connection refused - Command "get_state" failed to connect command socket
lxc 20191205190445.417 TRACE commands - commands.c:lxc_cmd:303 - Connection refused - Command "get_state" failed to connect command socket
lxc 20191205190445.430 INFO confile - confile.c:set_config_idmaps:1987 - Read uid map: type u nsid 0 hostid 1000000 range 1000000000
lxc 20191205190445.430 INFO confile - confile.c:set_config_idmaps:1987 - Read uid map: type g nsid 0 hostid 1000000 range 1000000000
lxc 20191205190445.430 TRACE commands - commands.c:lxc_cmd:303 - Connection refused - Command "get_state" failed to connect command socket
lxc 20191205190453.703 TRACE commands - commands.c:lxc_cmd:303 - Connection refused - Command "get_state" failed to connect command socket
lxc 20191205190453.709 TRACE commands - commands.c:lxc_cmd:303 - Connection refused - Command "get_state" failed to connect command socket
lxc 20191205190453.709 TRACE commands - commands.c:lxc_cmd:303 - Connection refused - Command "get_state" failed to connect command socket
So somehow LXD ignores this mount suddenly. Before this was working, now suddenly, it does not work anymore again ( its possible, that this started when i run the command “zpool import” ( without importing or doing anything ).
The zfs system works fine. I can write the directory, and also all the KVM servers have no issue, as well as other LXD containers. So something happend, and i have no idea what and why.
My question would be now what mechanics LXD exactly uses to put the files in place, so i might be able to understand why LXD does not put the files into the directory which is perfectly writeable for LXD and instead putting the files on the / partition of the system disc.
Thank you for every idea or suggestion !
Greetings
Oliver