UDP Tunnel (Toradex)
Intro
In this tutorial, we are going to demonstrate the tunneling mechanism described in the UDP Tunnel example on a Toradex device and Torizon environment.
Setup and topology
Instead of the original physical device topology on the client end, we will now stick only to a Docker-based virtual setup (since it is quite native to the Torizon IDE) with minor setup of a Linux namespace connected to the containers bridge.
As far as the CPU architecture is concerned, the server is again our x86-64 based VPS, while the client side is now 64-bit ARM (AArch64) Toradex Verdin i.MX8.
Encapsulation
For the sake of simplicity, the UDP payload will be the L3-and-above portion of the incoming packet - thus we will achieve Layer 3 tunnel functionality.
We can encrypt any part of the encapsulated packet (headers, payload) according to our needs from the code (by using the CORD-CRYPTO library).
Repository
We will use the example from the PacketCord.io official repo. We only need to modify the IP addresses according to our needs, for both the client and server sides.
Creating the Torizon Project
Let's create a new project inside the Torizon IDE.
Now, for the sake of brevity, delete the content inside CMakeLists.txt so that it will become an empty file. Also, delete main.cpp and create an empty file called l3_tunnel_main.c.
Below in the tutorial we will provide the content with the necessary modification for both (you only need to adjust according to your actual IP addresses).
Building the example
To build the application, we will rely on CMake. We don't need any manual clone or download of the repository - this will be done by the CMake build system. For that purpose, populate the CMakeLists.txt file with the below content:
cmake_minimum_required(VERSION 3.16)
project(tunnel VERSION 1.0 LANGUAGES CXX C)
# Set C standard to C23 (required by PacketCord.io)
set(CMAKE_C_STANDARD 23)
set(CMAKE_C_STANDARD_REQUIRED ON)
set(CMAKE_C_EXTENSIONS OFF)
# Build type
if(NOT CMAKE_BUILD_TYPE)
set(CMAKE_BUILD_TYPE Release)
endif()
# Compiler flags
set(CMAKE_C_FLAGS_DEBUG "-g -O0 -Wall -Wextra -DDEBUG")
set(CMAKE_C_FLAGS_RELEASE "-O3 -DNDEBUG")
set(CMAKE_CXX_FLAGS_DEBUG "-g -O0 -Wall -Wextra -DDEBUG")
set(CMAKE_CXX_FLAGS_RELEASE "-O3 -DNDEBUG")
# Fetch PacketCord.io from GitHub
include(FetchContent)
# Get PacketCord.io source but don't add subdirectories automatically
FetchContent_Declare(
packetcord
GIT_REPOSITORY https://github.com/packetcord/packetcord.io.git
GIT_TAG main
)
FetchContent_GetProperties(packetcord)
if(NOT packetcord_POPULATED)
FetchContent_Populate(packetcord)
# Only add the cord-flow module, not the entire project
add_subdirectory(${packetcord_SOURCE_DIR}/modules/cord-flow ${packetcord_BINARY_DIR}/modules/cord-flow)
endif()
# Configure PacketCord.io with proper Linux definitions
if(TARGET cord_flow)
target_compile_definitions(cord_flow PRIVATE
_GNU_SOURCE
__USE_MISC
_DEFAULT_SOURCE
)
endif()
# Set PacketCord.io variables
set(PACKETCORD_INCLUDE_DIRS ${packetcord_SOURCE_DIR}/modules/cord-flow/include)
set(PACKETCORD_LIBRARIES cord_flow)
# Create output directories
file(MAKE_DIRECTORY ${CMAKE_BINARY_DIR}/bin)
file(MAKE_DIRECTORY ${CMAKE_BINARY_DIR}/lib)
# Create executable
add_executable(tunnel src/l3_tunnel_main.c)
# Include directories
target_include_directories(tunnel PRIVATE
${CMAKE_CURRENT_SOURCE_DIR}/includes
)
# Include PacketCord.io headers
target_include_directories(tunnel PRIVATE
${PACKETCORD_INCLUDE_DIRS}
${PACKETCORD_INCLUDE_DIRS}/cord_flow
)
# Link libraries
target_link_libraries(tunnel PRIVATE
${PACKETCORD_LIBRARIES}
)
# System-specific definitions for raw socket access
if(CMAKE_SYSTEM_NAME STREQUAL "Linux")
target_compile_definitions(tunnel PRIVATE
_GNU_SOURCE
__USE_MISC
)
endif()
# Set target properties
set_target_properties(tunnel PROPERTIES
RUNTIME_OUTPUT_DIRECTORY "${CMAKE_BINARY_DIR}/bin"
ARCHIVE_OUTPUT_DIRECTORY "${CMAKE_BINARY_DIR}/lib"
LIBRARY_OUTPUT_DIRECTORY "${CMAKE_BINARY_DIR}/lib"
)
# Install target
install(TARGETS tunnel
RUNTIME DESTINATION bin
)
# Override default clean to only remove tunnel executable
set_target_properties(tunnel PROPERTIES
ADDITIONAL_CLEAN_FILES "${CMAKE_BINARY_DIR}/bin/tunnel"
)
# Complete clean target
add_custom_target(distclean
COMMAND ${CMAKE_COMMAND} -E remove_directory ${CMAKE_BINARY_DIR}/CMakeFiles
COMMAND ${CMAKE_COMMAND} -E remove_directory ${CMAKE_BINARY_DIR}/bin
COMMAND ${CMAKE_COMMAND} -E remove_directory ${CMAKE_BINARY_DIR}/lib
COMMAND ${CMAKE_COMMAND} -E remove_directory ${CMAKE_BINARY_DIR}/_deps
COMMAND ${CMAKE_COMMAND} -E remove -f ${CMAKE_BINARY_DIR}/CMakeCache.txt
COMMAND ${CMAKE_COMMAND} -E remove -f ${CMAKE_BINARY_DIR}/cmake_install.cmake
COMMAND ${CMAKE_COMMAND} -E remove -f ${CMAKE_BINARY_DIR}/Makefile
COMMENT "Complete clean - removes all build files and dependencies"
)
# Print configuration summary
message(STATUS "PacketCord.io include directory: ${PACKETCORD_INCLUDE_DIRS}")
message(STATUS "PacketCord.io libraries: ${PACKETCORD_LIBRARIES}")
message(STATUS "Build type: ${CMAKE_BUILD_TYPE}")
message(STATUS "C compiler: ${CMAKE_C_COMPILER}")
message(STATUS "CXX compiler: ${CMAKE_CXX_COMPILER}")
Code
Client code
We are starting on a clean setup, with no containers instantiated on the Toradex target.
torizon@verdin-imx8mm-15400884:~$ docker ps -a
CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
torizon@verdin-imx8mm-15400884:~$
Since we will be using the standard Docker network (the subnet 172.18.0.0/24), this would mean that the instantiated container will be assigned an IP address of 172.18.0.2/24 (172.18.0.1 is the IP of the host bridge). This will be the source IP address that we place in the code:
cord_app_context.l4_udp = CORD_CREATE_L4_UDP_FLOW_POINT('B', inet_addr("172.18.0.2"), inet_addr("38.242.203.214"), 60000, 50000);
The complete client code in our case looks as follows:
#include <cord_flow/event_handler/cord_linux_api_event_handler.h>
#include <cord_flow/flow_point/cord_l2_raw_socket_flow_point.h>
#include <cord_flow/flow_point/cord_l3_stack_inject_flow_point.h>
#include <cord_flow/flow_point/cord_l4_udp_flow_point.h>
#include <cord_flow/memory/cord_memory.h>
#include <cord_flow/match/cord_match.h>
#include <cord_error.h>
#define MTU_SIZE 1420
#define ETHERNET_HEADER_SIZE 14
#define DOT1Q_TAG_SIZE 4
#define BUFFER_SIZE (MTU_SIZE + ETHERNET_HEADER_SIZE)
#define MATCH_IP_TO_TUNNEL "11.11.11.100"
#define MATCH_NETMASK "255.255.255.255"
static struct
{
CordFlowPoint *l2_eth;
CordFlowPoint *l3_si;
CordFlowPoint *l4_udp;
CordEventHandler *evh;
} cord_app_context;
static void cord_app_setup(void)
{
CORD_LOG("[CordApp] Expecting manual additional setup - blackhole routes, interface MTU.\n");
}
static void cord_app_cleanup(void)
{
CORD_LOG("[CordApp] Destroying all objects!\n");
CORD_DESTROY_FLOW_POINT(cord_app_context.l2_eth);
CORD_DESTROY_FLOW_POINT(cord_app_context.l3_si);
CORD_DESTROY_FLOW_POINT(cord_app_context.l4_udp);
CORD_DESTROY_EVENT_HANDLER(cord_app_context.evh);
CORD_LOG("[CordApp] Expecting manual additional cleanup.\n");
}
static void cord_app_sigint_callback(int sig)
{
cord_app_cleanup();
CORD_LOG("[CordApp] Terminating the PacketCord Tunnel App!\n");
CORD_ASYNC_SAFE_EXIT(CORD_OK);
}
int main(void)
{
struct in_addr prefix_ip, netmask;
inet_pton(AF_INET, MATCH_IP_TO_TUNNEL, &prefix_ip);
inet_pton(AF_INET, MATCH_NETMASK, &netmask);
cord_retval_t cord_retval;
CORD_BUFFER(buffer, BUFFER_SIZE);
size_t rx_bytes = 0;
size_t tx_bytes = 0;
cord_ipv4_hdr_t *ip = NULL;
cord_udp_hdr_t *udp = NULL;
CORD_LOG("[CordApp] Launching the PacketCord Tunnel App!\n");
signal(SIGINT, cord_app_sigint_callback);
cord_app_context.l2_eth = CORD_CREATE_L2_RAW_SOCKET_FLOW_POINT('A', "eth0");
cord_app_context.l3_si = CORD_CREATE_L3_STACK_INJECT_FLOW_POINT('I');
cord_app_context.l4_udp = CORD_CREATE_L4_UDP_FLOW_POINT('B', inet_addr("172.18.0.2"), inet_addr("38.242.203.214"), 60000, 50000);
cord_app_context.evh = CORD_CREATE_LINUX_API_EVENT_HANDLER('E', -1);
cord_retval = CORD_EVENT_HANDLER_REGISTER_FLOW_POINT(cord_app_context.evh, cord_app_context.l2_eth);
cord_retval = CORD_EVENT_HANDLER_REGISTER_FLOW_POINT(cord_app_context.evh, cord_app_context.l4_udp);
while (1)
{
int nb_fds = CORD_EVENT_HANDLER_WAIT(cord_app_context.evh);
if (nb_fds == -1)
{
if (errno == EINTR)
continue;
else
{
CORD_ERROR("[CordApp] Error: CORD_EVENT_HANDLER_WAIT()");
CORD_EXIT(CORD_ERR);
}
}
for (uint8_t n = 0; n < nb_fds; n++)
{
if (cord_app_context.evh->events[n].data.fd == cord_app_context.l2_eth->io_handle)
{
cord_retval = CORD_FLOW_POINT_RX(cord_app_context.l2_eth, buffer, BUFFER_SIZE, &rx_bytes);
if (cord_retval != CORD_OK)
continue; // Raw socket receive error
if (rx_bytes < sizeof(cord_eth_hdr_t))
continue; // Packet too short to contain Ethernet header
cord_eth_hdr_t *eth = cord_get_eth_hdr(buffer);
if (!cord_match_eth_type(eth, CORD_ETH_P_IP))
continue; // Only handle IPv4 packets
if (rx_bytes < sizeof(cord_eth_hdr_t) + sizeof(cord_ipv4_hdr_t))
continue; // Too short for IP header
ip = cord_get_ipv4_hdr_from_eth(eth);
if (!cord_match_ipv4_version(ip))
continue; // Not IPv4
int iphdr_len = cord_get_ipv4_header_length(ip);
if (rx_bytes < sizeof(cord_eth_hdr_t) + iphdr_len)
continue; // IP header incomplete
if (CORD_L2_RAW_SOCKET_FLOW_POINT_ENSURE_INBOUD(cord_app_context.l2_eth) != CORD_OK)
continue; // Ensure this is not an outgoing packet
if (rx_bytes < sizeof(cord_eth_hdr_t) + iphdr_len + sizeof(cord_udp_hdr_t))
continue; // Too short for UDP header
udp = cord_get_udp_hdr_ipv4(ip);
uint32_t src_ip = cord_get_ipv4_src_addr_ntohl(ip);
uint32_t dst_ip = cord_get_ipv4_dst_addr_ntohl(ip);
if (cord_match_ipv4_dst_subnet(ip, cord_ntohl(prefix_ip.s_addr), cord_ntohl(netmask.s_addr)))
{
uint16_t total_len = cord_get_ipv4_total_length_ntohs(ip);
cord_retval = CORD_FLOW_POINT_TX(cord_app_context.l4_udp, ip, total_len, &tx_bytes);
if (cord_retval != CORD_OK)
{
// Handle the error
}
}
}
if (cord_app_context.evh->events[n].data.fd == cord_app_context.l4_udp->io_handle)
{
cord_retval = CORD_FLOW_POINT_RX(cord_app_context.l4_udp, buffer, BUFFER_SIZE, &rx_bytes);
if (cord_retval != CORD_OK)
continue; // Raw socket receive error
cord_ipv4_hdr_t *ip_inner = cord_get_ipv4_hdr_l3(buffer);
if (rx_bytes != cord_get_ipv4_total_length_ntohs(ip_inner))
continue; // Packet partially received
if (!cord_match_ipv4_version(ip_inner))
continue;
int ip_inner_hdrlen = cord_get_ipv4_header_length(ip_inner);
CORD_L3_STACK_INJECT_FLOW_POINT_SET_TARGET_IPV4(cord_app_context.l3_si, cord_get_ipv4_dst_addr_l3(ip_inner));
cord_retval = CORD_FLOW_POINT_TX(cord_app_context.l3_si, buffer, cord_get_ipv4_total_length_ntohs(ip_inner), &tx_bytes);
if (cord_retval != CORD_OK)
{
// Handle the error
}
}
}
}
cord_app_cleanup();
return CORD_OK;
}
Server code
#include <cord_flow/event_handler/cord_linux_api_event_handler.h>
#include <cord_flow/flow_point/cord_l2_raw_socket_flow_point.h>
#include <cord_flow/flow_point/cord_l3_stack_inject_flow_point.h>
#include <cord_flow/flow_point/cord_l4_udp_flow_point.h>
#include <cord_flow/memory/cord_memory.h>
#include <cord_flow/match/cord_match.h>
#include <cord_error.h>
#define MTU_SIZE 1420
#define ETHERNET_HEADER_SIZE 14
#define DOT1Q_TAG_SIZE 4
#define BUFFER_SIZE (MTU_SIZE + ETHERNET_HEADER_SIZE)
#define MATCH_IP_TO_TUNNEL "172.18.0.100"
#define MATCH_NETMASK "255.255.255.255"
static struct
{
CordFlowPoint *l2_eth;
CordFlowPoint *l3_si;
CordFlowPoint *l4_udp;
CordEventHandler *evh;
} cord_app_context;
static void cord_app_setup(void)
{
CORD_LOG("[CordApp] Expecting manual additional setup - blackhole routes, interface MTU.\n");
}
static void cord_app_cleanup(void)
{
CORD_LOG("[CordApp] Destroying all objects!\n");
CORD_DESTROY_FLOW_POINT(cord_app_context.l2_eth);
CORD_DESTROY_FLOW_POINT(cord_app_context.l3_si);
CORD_DESTROY_FLOW_POINT(cord_app_context.l4_udp);
CORD_DESTROY_EVENT_HANDLER(cord_app_context.evh);
CORD_LOG("[CordApp] Expecting manual additional cleanup.\n");
}
static void cord_app_sigint_callback(int sig)
{
cord_app_cleanup();
CORD_LOG("[CordApp] Terminating the PacketCord Tunnel App!\n");
CORD_ASYNC_SAFE_EXIT(CORD_OK);
}
int main(void)
{
struct in_addr prefix_ip, netmask;
inet_pton(AF_INET, MATCH_IP_TO_TUNNEL, &prefix_ip);
inet_pton(AF_INET, MATCH_NETMASK, &netmask);
cord_retval_t cord_retval;
CORD_BUFFER(buffer, BUFFER_SIZE);
size_t rx_bytes = 0;
size_t tx_bytes = 0;
cord_ipv4_hdr_t *ip = NULL;
cord_udp_hdr_t *udp = NULL;
CORD_LOG("[CordApp] Launching the PacketCord Tunnel App!\n");
signal(SIGINT, cord_app_sigint_callback);
cord_app_context.l2_eth = CORD_CREATE_L2_RAW_SOCKET_FLOW_POINT('A', "veth0");
cord_app_context.l3_si = CORD_CREATE_L3_STACK_INJECT_FLOW_POINT('I');
cord_app_context.l4_udp = CORD_CREATE_L4_UDP_FLOW_POINT('B', inet_addr("38.242.203.214"), inet_addr("78.83.207.86"), 50000, 60000);
cord_app_context.evh = CORD_CREATE_LINUX_API_EVENT_HANDLER('E', -1);
cord_retval = CORD_EVENT_HANDLER_REGISTER_FLOW_POINT(cord_app_context.evh, cord_app_context.l2_eth);
cord_retval = CORD_EVENT_HANDLER_REGISTER_FLOW_POINT(cord_app_context.evh, cord_app_context.l4_udp);
while (1)
{
int nb_fds = CORD_EVENT_HANDLER_WAIT(cord_app_context.evh);
if (nb_fds == -1)
{
if (errno == EINTR)
continue;
else
{
CORD_ERROR("[CordApp] Error: CORD_EVENT_HANDLER_WAIT()");
CORD_EXIT(CORD_ERR);
}
}
for (uint8_t n = 0; n < nb_fds; n++)
{
if (cord_app_context.evh->events[n].data.fd == cord_app_context.l2_eth->io_handle)
{
cord_retval = CORD_FLOW_POINT_RX(cord_app_context.l2_eth, buffer, BUFFER_SIZE, &rx_bytes);
if (cord_retval != CORD_OK)
continue; // Raw socket receive error
if (rx_bytes < sizeof(cord_eth_hdr_t))
continue; // Packet too short to contain Ethernet header
cord_eth_hdr_t *eth = cord_get_eth_hdr(buffer);
if (!cord_match_eth_type(eth, CORD_ETH_P_IP))
continue; // Only handle IPv4 packets
if (rx_bytes < sizeof(cord_eth_hdr_t) + sizeof(cord_ipv4_hdr_t))
continue; // Too short for IP header
ip = cord_get_ipv4_hdr_from_eth(eth);
if (!cord_match_ipv4_version(ip))
continue; // Not IPv4
int iphdr_len = cord_get_ipv4_header_length(ip);
if (rx_bytes < sizeof(cord_eth_hdr_t) + iphdr_len)
continue; // IP header incomplete
if (CORD_L2_RAW_SOCKET_FLOW_POINT_ENSURE_INBOUD(cord_app_context.l2_eth) != CORD_OK)
continue; // Ensure this is not an outgoing packet
if (rx_bytes < sizeof(cord_eth_hdr_t) + iphdr_len + sizeof(cord_udp_hdr_t))
continue; // Too short for UDP header
udp = cord_get_udp_hdr_ipv4(ip);
uint32_t src_ip = cord_get_ipv4_src_addr_ntohl(ip);
uint32_t dst_ip = cord_get_ipv4_dst_addr_ntohl(ip);
if (cord_match_ipv4_dst_subnet(ip, cord_ntohl(prefix_ip.s_addr), cord_ntohl(netmask.s_addr)))
{
uint16_t total_len = cord_get_ipv4_total_length_ntohs(ip);
cord_retval = CORD_FLOW_POINT_TX(cord_app_context.l4_udp, ip, total_len, &tx_bytes);
if (cord_retval != CORD_OK)
{
// Handle the error
}
}
}
if (cord_app_context.evh->events[n].data.fd == cord_app_context.l4_udp->io_handle)
{
cord_retval = CORD_FLOW_POINT_RX(cord_app_context.l4_udp, buffer, BUFFER_SIZE, &rx_bytes);
if (cord_retval != CORD_OK)
continue; // Raw socket receive error
cord_ipv4_hdr_t *ip_inner = cord_get_ipv4_hdr_l3(buffer);
if (rx_bytes != cord_get_ipv4_total_length_ntohs(ip_inner))
continue; // Packet partially received
if (!cord_match_ipv4_version(ip_inner))
continue;
int ip_inner_hdrlen = cord_get_ipv4_header_length(ip_inner);
CORD_L3_STACK_INJECT_FLOW_POINT_SET_TARGET_IPV4(cord_app_context.l3_si, cord_get_ipv4_dst_addr_l3(ip_inner));
cord_retval = CORD_FLOW_POINT_TX(cord_app_context.l3_si, buffer, cord_get_ipv4_total_length_ntohs(ip_inner), &tx_bytes);
if (cord_retval != CORD_OK)
{
// Handle the error
}
}
}
}
cord_app_cleanup();
return CORD_OK;
}
Note: The traffic addressed to destination 172.18.0.100/32 will be tunneled. On the other side, the namespace is configured with IP address 172.18.0.100/24. The mask lenghts may seem confusing, but this is an intentional and perfectly valid routing and tunneling logic.
Configuration
Client side
On the Torizon OS host shell, execute the following commands to create the namespace that we will use as an enpoint for our tunneled communication:
# 1. Create the namespace
sudo ip netns add ns1
# 2. Create the veth pair
sudo ip link add veth0 type veth peer name veth1
# 3. Move veth1 to namespace ns1
sudo ip link set veth1 netns ns1
# 4. Set veth0 up
sudo ip link set veth0 up
# 5. Add veth0 it to the bridge of the (default) Docker network for the 172.18.0.0/24 subnet
sudo brctl addif br-9488bfcf8667 veth0
# 6. Inside the namespace, assign IP to veth1
sudo ip netns exec ns1 ip addr add 172.18.0.100/24 dev veth1
# 7. Bring up veth1 inside namespace
sudo ip netns exec ns1 ip link set veth1 up
# 8. Bring up the loopback interface inside namespace
sudo ip netns exec ns1 ip link set lo up
# 9. Set the default gateway via the container where we run the TEP
sudo ip netns exec ns1 ip route add default via 172.18.0.2
# 10. Disable the offload functionality on the vethX interfaces
sudo ethtool --offload veth0 rx off tx off
sudo ip netns exec ns1 ethtool --offload veth1 rx off tx off
# 11. Set the MTU size on both ends of the veth pair
sudo ip link set mtu 1420 dev veth0
sudo ip netns exec ns1 ip link set mtu 1420 dev veth1
Of course, the namespace could be completely replaced by a second Docker container (via Docker Compose or by a multi-container project inside the Torizon IDE).
In addition to the above, we need to apply the blackhole route inside the container that is running our tunneling app in order to discard the in-kernel packet copies caused by the standard operation of the raw sockets.
The Docker Compose file should be modified accordingly by adding:
cap_add:
- NET_RAW
- NET_ADMIN
command: >
sh -c "
export DEBIAN_FRONTEND=noninteractive &&
apt update &&
apt install -y iproute2 &&
ip route add blackhole 11.11.11.0/24 &&
echo 'Starting tunnel...' &&
./tunnel
" --
The docker-compose.yml file now looks as follows:
services:
tunnel-debug:
build:
context: .
dockerfile: Dockerfile.debug
image: ${LOCAL_REGISTRY}:5002/tunnel-debug:${TAG}
ports:
- ${DEBUG_SSH_PORT}:${DEBUG_SSH_PORT}
cap_add:
- NET_RAW
- NET_ADMIN
command: >
sh -c "
export DEBIAN_FRONTEND=noninteractive &&
apt update &&
apt install -y iproute2 &&
ip route add blackhole 11.11.11.0/24 &&
echo 'Starting tunnel...' &&
./tunnel
" --
tunnel:
build:
context: .
dockerfile: Dockerfile
image: ${DOCKER_LOGIN}/tunnel:${TAG}
cap_add:
- NET_RAW
- NET_ADMIN
command: >
sh -c "
export DEBIAN_FRONTEND=noninteractive &&
apt update &&
apt install -y iproute2 &&
ip route add blackhole 11.11.11.0/24 &&
echo 'Starting tunnel...' &&
./tunnel
" --
And in order to keep the configuration clear and synched, it is advisable to commend the entrypoint from inside the Dockerfile(s), for each target:
# Command executed in runtime when the container starts
# CMD ["./tunnel"]
Server side
It is absolutely the same as in the UDP Tunnel example:
# 1. Create the namespace
sudo ip netns add ns1
# 2. Create the veth pair
sudo ip link add veth0 type veth peer name veth1
# 3. Move veth1 to namespace ns1
sudo ip link set veth1 netns ns1
# 4. Assign IP address to veth0 (host side)
sudo ip addr add 11.11.11.1/24 dev veth0
# 5. Bring up veth0
sudo ip link set veth0 up
# 6. Inside the namespace, assign IP to veth1
sudo ip netns exec ns1 ip addr add 11.11.11.100/24 dev veth1
# 7. Bring up veth1 inside namespace
sudo ip netns exec ns1 ip link set veth1 up
# 8. Bring up the loopback interface inside namespace
sudo ip netns exec ns1 ip link set lo up
# 9. Set default route inside namespace via veth0's IP
sudo ip netns exec ns1 ip route add default via 11.11.11.1
# 10. Disable the offload functionality on the vethX interfaces
sudo ethtool --offload veth0 rx off tx off
sudo ip netns exec ns1 ethtool --offload veth1 rx off tx off
# 11. Set the MTU size on both ends of the veth pair
sudo ip link set mtu 1420 dev veth0
sudo ip netns exec ns1 ip link set mtu 1420 dev veth1
Let's not forget the blackhole route here as well:
sudo ip route add blackhole 172.18.0.0/24
Compile and run
Client side
At this point we assumed that we are done with both the CMakeLists.txt the l3_tunnel_main.c. files, and the only thing we need to do is to build the project via Terminal > Run Task > run-container-torizon-release-arm64 from the Torizon IDE:
Server side
On the server side, assuming the repository has already been cloned, we just need to build and run it:
mkdir build
cd build/
cmake ..
make
sudo ./apps/l3_tunnel/l3_tunnel_app
In case no errors have occured, the console output should be something like:
[CordApp] Launching the PacketCord Tunnel App!
[CordL4UdpFlowPoint] Successfully bound to port 60000
Ping test
Let's initiate the ping from the Toradex (client) side towards the server first (because the client is behind NAT).
torizon@verdin-imx8mm-15400884:~$ sudo ip netns exec ns1 ping 11.11.11.100 -c 4
PING 11.11.11.100 (11.11.11.100): 56 data bytes
64 bytes from 11.11.11.100: seq=0 ttl=64 time=36.646 ms
64 bytes from 11.11.11.100: seq=1 ttl=64 time=36.920 ms
64 bytes from 11.11.11.100: seq=2 ttl=64 time=37.285 ms
64 bytes from 11.11.11.100: seq=3 ttl=64 time=36.722 ms
--- 11.11.11.100 ping statistics ---
4 packets transmitted, 4 packets received, 0% packet loss
round-trip min/avg/max = 36.646/36.893/37.285 ms
The server should also be able to ping the namespace on the client side:
# ip netns exec ns1 ping 172.18.0.100 -c 4
PING 172.18.0.100 (172.18.0.100) 56(84) bytes of data.
64 bytes from 172.18.0.100: icmp_seq=1 ttl=64 time=38.1 ms
64 bytes from 172.18.0.100: icmp_seq=2 ttl=64 time=38.6 ms
64 bytes from 172.18.0.100: icmp_seq=3 ttl=64 time=36.0 ms
64 bytes from 172.18.0.100: icmp_seq=4 ttl=64 time=36.2 ms
--- 172.18.0.100 ping statistics ---
4 packets transmitted, 4 received, 0% packet loss, time 3004ms
rtt min/avg/max/mdev = 35.959/37.203/38.558/1.145 ms
Traffic capture
Here are some hints related to sniffing the packets on both sides.
Client side
On the Toradex host, let's have a look at the traffic inside the container:
# Enter the container
$ docker exec -it f5142dadba68 /bin/bash
# Install tcpdump
root@f5142dadba68:/home/torizon/app# apt update
root@f5142dadba68:/home/torizon/app# apt install tcpdump
root@f5142dadba68:/home/torizon/app# tcpdump -n -i eth0
tcpdump: verbose output suppressed, use -v[v]... for full protocol decode
listening on eth0, link-type EN10MB (Ethernet), snapshot length 262144 bytes
21:16:59.426500 IP 172.18.0.100 > 11.11.11.100: ICMP echo request, id 4690, seq 361, length 64
21:16:59.426545 IP 172.18.0.100 > 11.11.11.100: ICMP echo request, id 4690, seq 361, length 64
21:16:59.426887 IP 172.18.0.2.60000 > 38.242.203.214.50000: UDP, length 84
21:16:59.473405 IP 38.242.203.214.50000 > 172.18.0.2.60000: UDP, length 84
21:16:59.473529 IP 11.11.11.100 > 172.18.0.100: ICMP echo reply, id 4690, seq 361, length 64
21:17:00.427131 IP 172.18.0.100 > 11.11.11.100: ICMP echo request, id 4690, seq 362, length 64
21:17:00.427175 IP 172.18.0.100 > 11.11.11.100: ICMP echo request, id 4690, seq 362, length 64
21:17:00.427509 IP 172.18.0.2.60000 > 38.242.203.214.50000: UDP, length 84
21:17:00.463467 IP 38.242.203.214.50000 > 172.18.0.2.60000: UDP, length 84
21:17:00.463591 IP 11.11.11.100 > 172.18.0.100: ICMP echo reply, id 4690, seq 362, length 64
We see both the decapsulated ICMP packets as well as the tunneled ones encapsulated within UDP.
Server side
In the below example, we are executing the following command to observe the encapsulated ICMP packets within UDP:
# tcpdump -n -i eth0 udp port 50000
tcpdump: verbose output suppressed, use -v[v]... for full protocol decode
listening on eth0, link-type EN10MB (Ethernet), snapshot length 262144 bytes
23:10:58.231007 IP 78.83.207.86.60000 > 38.242.203.214.50000: UDP, length 84
23:10:58.231316 IP 38.242.203.214.50000 > 78.83.207.86.60000: UDP, length 84
23:10:59.230939 IP 78.83.207.86.60000 > 38.242.203.214.50000: UDP, length 84
23:10:59.232051 IP 38.242.203.214.50000 > 78.83.207.86.60000: UDP, length 84
23:11:00.232082 IP 78.83.207.86.60000 > 38.242.203.214.50000: UDP, length 84
23:11:00.232995 IP 38.242.203.214.50000 > 78.83.207.86.60000: UDP, length 84
We can also observe the decapsulated ICMP packets between the two IPs that we are tunneling (namely 192.168.111.2/32 and 172.18.0.100/32):
# ip netns exec ns1 tcpdump -ni veth1
tcpdump: verbose output suppressed, use -v[v]... for full protocol decode
listening on veth1, link-type EN10MB (Ethernet), snapshot length 262144 bytes
^C23:13:44.327510 IP 172.18.0.100 > 11.11.11.100: ICMP echo request, id 4690, seq 166, length 64
23:13:44.327542 IP 11.11.11.100 > 172.18.0.100: ICMP echo reply, id 4690, seq 166, length 64
23:13:45.343400 IP 172.18.0.100 > 11.11.11.100: ICMP echo request, id 4690, seq 167, length 64
23:13:45.343442 IP 11.11.11.100 > 172.18.0.100: ICMP echo reply, id 4690, seq 167, length 64
23:13:46.334287 IP 172.18.0.100 > 11.11.11.100: ICMP echo request, id 4690, seq 168, length 64
23:13:46.334323 IP 11.11.11.100 > 172.18.0.100: ICMP echo reply, id 4690, seq 168, length 64
Outro
We have managed to successfully demonstrate a container and virualisation friendly tunneling solution (using both bare-metal and Linux namespaces), in user-space, software defined, at source code level (we could also refer to the compiled output as NFV application), not reliant on the old school tunneling methods. On-demand encryption that utilises the CPU instructions for hardware acceleration could be added via the CORD-CRYPTO library.
Just what is required, in a fully transparent programmable way - nothing more, nothing less. And the focus is on network programming, not only on dealing with configuration files of something already existing. You can always react to the market needs and any forthcoming regulations (like the European CRA), and you can run your custom programmable network packet processing logic not only on server grade hardware, but also on embedded devices like Toradex.