I have a VM with CentOS which has the following network interfaces.
ens160: flags=4163<UP,BROADCAST,RUNNING,MULTICAST> mtu 1500
ether 00:50:56:a2:f3:41 txqueuelen 1000 (Ethernet)
RX packets 6174596 bytes 3867285859 (3.6 GiB)
RX errors 0 dropped 8058 overruns 0 frame 0
TX packets 1653880 bytes 152340076 (145.2 MiB)
TX errors 0 dropped 0 overruns 0 carrier 0 collisions 0
ens160.506: flags=4163<UP,BROADCAST,RUNNING,MULTICAST> mtu 1500
inet 10.96.12.116 netmask 255.255.254.0 broadcast 10.96.13.255
inet6 fe80::4b9a:8eb:9f0:1f16 prefixlen 64 scopeid 0x20<link>
ether 00:50:56:a2:f3:41 txqueuelen 1000 (Ethernet)
RX packets 2425053 bytes 3263227611 (3.0 GiB)
RX errors 0 dropped 0 overruns 0 frame 0
TX packets 1376752 bytes 106986037 (102.0 MiB)
TX errors 0 dropped 0 overruns 0 carrier 0 collisions 0
lo: flags=73<UP,LOOPBACK,RUNNING> mtu 65536
inet 127.0.0.1 netmask 255.0.0.0
inet6 ::1 prefixlen 128 scopeid 0x10<host>
loop txqueuelen 1000 (Local Loopback)
RX packets 132 bytes 11220 (10.9 KiB)
RX errors 0 dropped 0 overruns 0 frame 0
TX packets 132 bytes 11220 (10.9 KiB)
TX errors 0 dropped 0 overruns 0 carrier 0 collisions 0
lxdbr0: flags=4099<UP,BROADCAST,MULTICAST> mtu 1500
inet 10.114.35.1 netmask 255.255.255.0 broadcast 0.0.0.0
inet6 fd42:c2c4:d681:4b22::1 prefixlen 64 scopeid 0x0<global>
inet6 fe80::216:3eff:fe4e:8b54 prefixlen 64 scopeid 0x20<link>
ether 00:16:3e:4e:8b:54 txqueuelen 1000 (Ethernet)
RX packets 9466 bytes 722092 (705.1 KiB)
RX errors 0 dropped 0 overruns 0 frame 0
TX packets 3362 bytes 450490 (439.9 KiB)
TX errors 0 dropped 0 overruns 0 carrier 0 collisions 0
veth621511dc: flags=4163<UP,BROADCAST,RUNNING,MULTICAST> mtu 1500
inet 169.254.0.1 netmask 255.255.255.255 broadcast 0.0.0.0
inet6 fe80::fc03:bdff:fe93:7297 prefixlen 64 scopeid 0x20<link>
ether fe:03:bd:93:72:97 txqueuelen 1000 (Ethernet)
RX packets 43 bytes 3591 (3.5 KiB)
RX errors 0 dropped 0 overruns 0 frame 0
TX packets 86 bytes 9299 (9.0 KiB)
TX errors 0 dropped 0 overruns 0 carrier 0 collisions 0
virbr0: flags=4099<UP,BROADCAST,MULTICAST> mtu 1500
inet 192.168.122.1 netmask 255.255.255.0 broadcast 192.168.122.255
ether 52:54:00:23:eb:b6 txqueuelen 1000 (Ethernet)
RX packets 0 bytes 0 (0.0 B)
RX errors 0 dropped 0 overruns 0 frame 0
TX packets 0 bytes 0 (0.0 B)
TX errors 0 dropped 0 overruns 0 carrier 0 collisions 0
I have there a python script which does the following
IP = "10.96.12.116"
print(IP)
service_registry = {} # Service Registry of the Fog middleware
flog=open("foglog.txt","w+")
#HelpNetPorfThread = HelpNetPorf() # Initialise the thread which helps Network Profiling
#HelpNetPorfThread.start()
sock4droneAndServices = socket.socket(socket.AF_INET,socket.SOCK_DGRAM)
sock4droneAndServices.bind((IP,5001))
command4LXD = ['sudo','lxc','exec','DetectionSvc','python3','DetectionSvc.py',"10.96.12.200",IP,str(5001),"DetectionSvc"]
subprocess.Popen(command4LXD,stdout=flog)
SvcInfoSerial , SvcIPport = sock4droneAndServices.recvfrom(1400)
SvcInfo = pickle.loads(SvcInfoSerial)
service_registry["DetectionSvc"] = SvcInfo # Save to the Service Registry, the Service Info.
print(service_registry)
Inside the container DetectionSvc which has as a baseline image an ubuntu:18.04 image. The process for the DetectionSvc.py is starting but it doesn’t return something and the
sock4droneAndServices.recvfrom(1400) stucks there forever.
Both container and the host can ping each other. Only the process inside the container can’t work.
The code of the DetectionSvc.py which runs inside the container is the presented below.
ip4DeamonSvc = sys.argv[1]
ipOfDroneMiddleware = sys.argv[2]
port0fDroneMiddleware = sys.argv[3]
svcname = sys.argv[4]
Daemon = Pyro4.Daemon(host=ip4DeamonSvc,port=0)
id_Svc = Daemon.register(DetectionSvc)
ServiceInfo={}
ServiceInfo["PyroID"]=id_Svc.asString()
ServiceInfo["Resident"] = 0
ServiceInfo["SvcPID"] = os.getpid()
serialServiceInfo = pickle.dumps(ServiceInfo)
sockToNE =socket.socket(socket.AF_INET,socket.SOCK_DGRAM)
sockToNE.sendto(serialServiceInfo,(ipOfDroneMiddleware,int(port0fDroneMiddleware)))
sockToNE.close()
print(svcname + " OK ")
Daemon.requestLoop()
ifconfig inside the container
root@DetectionSvc:~# ifconfig
eth0: flags=4163<UP,BROADCAST,RUNNING,MULTICAST> mtu 1500
inet 10.96.12.200 netmask 255.255.255.255 broadcast 255.255.255.255
inet6 fe80::9c42:40ff:fe9a:4795 prefixlen 64 scopeid 0x20<link>
ether 9e:42:40:9a:47:95 txqueuelen 1000 (Ethernet)
RX packets 86 bytes 9299 (9.2 KB)
RX errors 0 dropped 0 overruns 0 frame 0
TX packets 43 bytes 3591 (3.5 KB)
TX errors 0 dropped 0 overruns 0 carrier 0 collisions 0
lo: flags=73<UP,LOOPBACK,RUNNING> mtu 65536
inet 127.0.0.1 netmask 255.0.0.0
inet6 ::1 prefixlen 128 scopeid 0x10<host>
loop txqueuelen 1000 (Local Loopback)
RX packets 52 bytes 3824 (3.8 KB)
RX errors 0 dropped 0 overruns 0 frame 0
TX packets 52 bytes 3824 (3.8 KB)
TX errors 0 dropped 0 overruns 0 carrier 0 collisions 0
I configured the DetectionSvc container using the routed way with this .yaml
config:
user.network-config: |
version: 2
ethernets:
eth0:
addresses:
- 10.96.12.200/32
nameservers:
addresses:
- 8.8.8.8
search: []
routes:
- to: 0.0.0.0/0
via: 169.254.0.1
on-link: true
description: Default LXD profile
devices:
eth0:
ipv4.address: 10.96.12.200
nictype: routed
parent: ens160.506
type: nic
name: ip200new
used_by:
- /1.0/instances/DetectionSvc
@tomp Any help here.
Since i want just communication between the host and the container i don’t believe it is necessary to use a routed profile