Closed
Description
Cilium Multicast Support
- Issues
- Multicast support cilium/cilium#13239, CFP: multicast support cilium/cilium#28750
- CFP: Multicast Network Policy Support cilium/cilium#29470 (Multicast Network Policy Support, tracking issue)
- CFP: Multicast IPSec Support cilium/cilium#29471 (Multicast IPSec Support, tracking issue)
- Design CFP
- Pull Requests
Verification Procedure
Just keeping the verification console log here.
- enable multicast, and confirm vxlan mode.
root@b2151bf26020:/go/src/github.com/cilium/cilium# cilium config set multicast-enabled true
✨ Patching ConfigMap cilium-config with multicast-enabled=true...
♻️ Restarted Cilium pods
root@b2151bf26020:/go/src/github.com/cilium/cilium# kubectl get configmap cilium-config -n kube-system -o yaml | grep vxlan
tunnel-protocol: vxlan
ROS 2 (DDS)'s well-known multicast group IP address is 239.255.0.1
.
- For each cilium pod, configure desired multicast groups.
cilium-dbg bpf multicast group add 239.255.0.1
cilium-dbg bpf multicast group list
Group Address
239.255.0.1
#cilium-dbg bpf multicast group delete
- Configure group subscriber map to add all nodes (except self) as remote subscribers.
- Use internal IPs as subscriber IPs.
- Omit self node IP from adding as subscriber.
kubectl get ciliumnodes.cilium.io
NAME CILIUMINTERNALIP INTERNALIP AGE
kind-control-plane 10.244.0.72 172.19.0.2 16m
kind-worker 10.244.1.86 172.19.0.3 16m
# On kind-control-plane
cilium-dbg bpf multicast subscriber add 239.255.0.1 10.244.1.86
#cilium-dbg bpf multicast subscriber delete 239.255.0.1 10.244.1.86
cilium-dbg bpf multicast subscriber list all
Group Subscriber Type
239.255.0.1 10.244.1.86 Remote Node
# On kind-worker
cilium-dbg bpf multicast subscriber add 239.255.0.1 10.244.0.72
#cilium-dbg bpf multicast subscriber delete 239.255.0.1 10.244.0.72
cilium-dbg bpf multicast subscriber list all
- deploy ROS 2 application talker and listener sample.
kubectl label nodes kind-control-plane nodetype=kind-control-plane
kubectl label nodes kind-worker nodetype=kind-worker
kubectl apply -f ros2-sample.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: ros2-talker-1
spec:
replicas: 1
selector:
matchLabels:
app: ros2-talker-1
template:
metadata:
labels:
app: ros2-talker-1
spec:
containers:
- image: tomoyafujita/ros:rolling
command: ["/bin/bash", "-c"]
args: ["source /opt/ros/$ROS_DISTRO/setup.bash && ros2 topic pub /chatter1 std_msgs/String \"data: Hello, I am talker-1\""]
imagePullPolicy: IfNotPresent
tty: true
name: ros2-talker-1
nodeSelector:
nodetype: kind-control-plane
tolerations:
- key: node-role.kubernetes.io/master
operator: Exists
effect: NoSchedule
- key: node-role.kubernetes.io/control-plane
operator: Exists
effect: NoSchedule
restartPolicy: Always
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: ros2-listener-1
spec:
replicas: 1
selector:
matchLabels:
app: ros2-listener-1
template:
metadata:
labels:
app: ros2-listener-1
spec:
containers:
- image: tomoyafujita/ros:rolling
command: ["/bin/bash", "-c"]
args: ["source /opt/ros/$ROS_DISTRO/setup.bash && ros2 topic echo /chatter1 std_msgs/String"]
imagePullPolicy: IfNotPresent
tty: true
name: ros2-listener-1
nodeSelector:
nodetype: kind-worker
tolerations:
- key: node-role.kubernetes.io/master
operator: Exists
effect: NoSchedule
- key: node-role.kubernetes.io/control-plane
operator: Exists
effect: NoSchedule
restartPolicy: Always
root@ros2-talker-1-85d477fdf8-g4t5q:/# ros2 topic list
/chatter1
/parameter_events
/rosout
root@ros2-talker-1-85d477fdf8-g4t5q:/# ros2 topic echo /chatter1
data: Hello, I am talker-1
---
data: Hello, I am talker-1
...
Metadata
Metadata
Assignees
Labels
No labels