8000 v1.11 backports 2022-05-02 by aditighag · Pull Request #19671 · cilium/cilium · GitHub
[go: up one dir, main page]
More Web Proxy on the site http://driver.im/
Skip to content

v1.11 backports 2022-05-02 #19671

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 12 commits into from
May 3, 2022
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 2 additions & 2 deletions Documentation/conf.py
Original file line number Diff line number Diff line change
Expand Up @@ -72,7 +72,7 @@
release = open("../VERSION", "r").read().strip()
# Used by version warning
versionwarning_body_selector = "div.document"
versionwarning_api_url = "docs.cilium.io"
versionwarning_api_url = "https://docs.cilium.io/"

# The version of Go used to compile Cilium
go_release = open("../GO_VERSION", "r").read().strip()
Expand Down Expand Up @@ -191,7 +191,7 @@
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
html_extra_path = ['robots']
html_extra_path = ['robots/robots.txt']

# -- Options for HTMLHelp output ------------------------------------------

Expand Down
4 changes: 2 additions & 2 deletions Documentation/gettingstarted/kubeproxy-free.rst
Original file line number Diff line number Diff line change
Expand Up @@ -699,7 +699,7 @@ with XDP, and number of combined channels need to be adapted.
The default MTU is set to 9001 on the ena driver. Given XDP buffers are linear, they
operate on a single page. A driver typically reserves some headroom for XDP as well
(e.g. for encapsulation purpose), therefore, the highest possible MTU for XDP would
be 3818.
be 3498.

In terms of ena channels, the settings can be gathered via ``ethtool -l eth0``. For the
``m5n.xlarge`` instance, the default output should look like::
Expand All @@ -721,7 +721,7 @@ In order to use XDP the channels must be set to at most 1/2 of the value from

.. code-block:: shell-session

$ for ip in $IPS ; do ssh ec2-user@$ip "sudo ip link set dev eth0 mtu 3818"; done
$ for ip in $IPS ; do ssh ec2-user@$ip "sudo ip link set dev eth0 mtu 3498"; done
$ for ip in $IPS ; do ssh ec2-user@$ip "sudo ethtool -L eth0 combined 2"; done

In order to deploy Cilium, the Kubernetes API server IP and port is needed:
Expand Down
12 changes: 9 additions & 3 deletions bpf/lib/nodeport.h
Original file line number Diff line number Diff line change
Expand Up @@ -1898,7 +1898,8 @@ static __always_inline int rev_nodeport_lb4(struct __ctx_buff *ctx, int *ifindex
l4_off = l3_off + ipv4_hdrlen(ip4);
csum_l4_offset_and_flags(tuple.nexthdr, &csum_off);

#if defined(ENABLE_EGRESS_GATEWAY) && !defined(TUNNEL_MODE)
#if defined(ENABLE_EGRESS_GATEWAY) && !defined(TUNNEL_MODE) && \
__ctx_is != __ctx_xdp
/* Traffic from clients to egress gateway nodes reaches said gateways
* by a vxlan tunnel. If we are not using TUNNEL_MODE, we need to
* identify reverse traffic from the gateway to clients and also steer
Expand All @@ -1907,6 +1908,10 @@ static __always_inline int rev_nodeport_lb4(struct __ctx_buff *ctx, int *ifindex
* egress gateway map using a reverse address tuple. A match means that
* the corresponding forward traffic was forwarded to the egress gateway
* via the tunnel.
*
* Currently, we don't support redirect to a tunnel netdev / encap on
* XDP. Thus, the problem mentioned above is present when using the
* egress gw feature with bpf_xdp.
*/
{
struct egress_gw_policy_entry *egress_policy;
Expand Down Expand Up @@ -1939,7 +1944,7 @@ static __always_inline int rev_nodeport_lb4(struct __ctx_buff *ctx, int *ifindex
bpf_mark_snat_done(ctx);

*ifindex = ct_state.ifindex;
#ifdef TUNNEL_MODE
#if defined(TUNNEL_MODE) && __ctx_is != __ctx_xdp
{
struct remote_endpoint_info *info;

Expand Down Expand Up @@ -2016,7 +2021,8 @@ static __always_inline int rev_nodeport_lb4(struct __ctx_buff *ctx, int *ifindex

return CTX_ACT_OK;

#if defined(ENABLE_EGRESS_GATEWAY) || defined(TUNNEL_MODE)
#if (defined(ENABLE_EGRESS_GATEWAY) || defined(TUNNEL_MODE)) && \
__ctx_is != __ctx_xdp
encap_redirect:
ret = __encap_with_nodeid(ctx, tunnel_endpoint, SECLABEL, TRACE_PAYLOAD_LEN);
if (ret)
Expand Down
2 changes: 1 addition & 1 deletion clustermesh-apiserver/main.go
Original file line number Diff line number Diff line change
Expand Up @@ -303,7 +303,7 @@ func updateIdentity(obj interface{}) {

var key []byte
for _, l := range labelArray {
key = append(key, []byte(l.FormatForKVStore())...)
key = append(key, l.FormatForKVStore()...)
}

if len(key) == 0 {
Expand Down
40 changes: 31 additions & 9 deletions daemon/cmd/daemon.go
AE8F
Original file line number Diff line number Diff line change
Expand Up @@ -224,7 +224,7 @@ func (d *Daemon) init() error {
bandwidth.InitBandwidthManager()

if err := d.createNodeConfigHeaderfile(); err != nil {
return err
return fmt.Errorf("failed while creating node config header file: %w", err)
}

if option.Config.SockopsEnable {
Expand All @@ -239,7 +239,7 @@ func (d *Daemon) init() error {
}

if err := d.Datapath().Loader().Reinitialize(d.ctx, d, d.mtuConfig.GetDeviceMTU(), d.Datapath(), d.l7Proxy); err != nil {
return err
return fmt.Errorf("failed while reinitializing datapath: %w", err)
}
}

Expand Down Expand Up @@ -347,6 +347,7 @@ func NewDaemon(ctx context.Context, cancel context.CancelFunc, epMgr *endpointma
if option.Config.ReadCNIConfiguration != "" {
netConf, err = cnitypes.ReadNetConf(option.Config.ReadCNIConfiguration)
if err != nil {
log.WithError(err).Error("Unable to read CNI configuration")
return nil, nil, fmt.Errorf("unable to read CNI configuration: %w", err)
}

Expand All @@ -358,6 +359,7 @@ func NewDaemon(ctx context.Context, cancel context.CancelFunc, epMgr *endpointma

apiLimiterSet, err := rate.NewAPILimiterSet(option.Config.APIRateLimit, apiRateLimitDefaults, &apiRateLimitingMetrics{})
if err != nil {
log.WithError(err).Error("unable to configure API rate limiting")
return nil, nil, fmt.Errorf("unable to configure API rate limiting: %w", err)
}

Expand All @@ -373,6 +375,7 @@ func NewDaemon(ctx context.Context, cancel context.CancelFunc, epMgr *endpointma
// created.
isKubeProxyReplacementStrict, err := initKubeProxyReplacementOptions()
if err != nil {
log.WithError(err).Error("unable to initialize Kube proxy replacement options")
return nil, nil, fmt.Errorf("unable to initialize Kube proxy replacement options: %w", err)
}

Expand All @@ -389,7 +392,7 @@ func NewDaemon(ctx context.Context, cancel context.CancelFunc, epMgr *endpointma

if option.Config.DryMode == false {
if err := bpf.ConfigureResourceLimits(); err != nil {
return nil, nil, fmt.Errorf("unable to set memory resource limits: %w", err)
log.WithError(err).Error("unable to set memory resource limits")
}
}

Expand Down Expand Up @@ -455,6 +458,7 @@ func NewDaemon(ctx context.Context, cancel context.CancelFunc, epMgr *endpointma

d.rec, err = recorder.NewRecorder(d.ctx, &d)
if err != nil {
log.WithError(err).Error("error while initializing BPF pcap recorder")
return nil, nil, fmt.Errorf("error while initializing BPF pcap recorder: %w", err)
}

Expand Down Expand Up @@ -568,6 +572,7 @@ func NewDaemon(ctx context.Context, cancel context.CancelFunc, epMgr *endpointma
err = d.initMaps()
bootstrapStats.mapsInit.EndError(err)
if err != nil {
log.WithError(err).Error("error while opening/creating BPF maps")
return nil, nil, fmt.Errorf("error while opening/creating BPF maps: %w", err)
}

Expand Down Expand Up @@ -644,6 +649,7 @@ func NewDaemon(ctx context.Context, cancel context.CancelFunc, epMgr *endpointma
}

if err := k8s.WaitForNodeInformation(d.ctx, d.k8sWatcher); err != nil {
log.WithError(err).Error("unable to connect to get node spec from apiserver")
return nil, nil, fmt.Errorf("unable to connect to get node spec from apiserver: %w", err)
}

Expand All @@ -661,7 +667,7 @@ func NewDaemon(ctx context.Context, cancel context.CancelFunc, epMgr *endpointma

if wgAgent := dp.WireguardAgent(); option.Config.EnableWireguard {
if err := wgAgent.Init(mtuConfig); err != nil {
return nil, nil, fmt.Errorf("failed to initialize wireguard agent: %w", err)
log.WithError(err).Error("failed to initialize wireguard agent")
}
}

Expand All @@ -684,6 +690,7 @@ func NewDaemon(ctx context.Context, cancel context.CancelFunc, epMgr *endpointma
disableNodePort()
}
if err := finishKubeProxyReplacementInit(isKubeProxyReplacementStrict); err != nil {
log.WithError(err).Error("failed to finalise LB initialization")
return nil, nil, fmt.Errorf("failed to finalise LB initialization: %w", err)
}

Expand Down Expand Up @@ -720,27 +727,34 @@ func NewDaemon(ctx context.Context, cancel context.CancelFunc, epMgr *endpointma
}
if option.Config.EnableIPv4EgressGateway {
if !probes.NewProbeManager().GetMisc().HaveLargeInsnLimit {
log.WithError(err).Error("egress gateway needs kernel 5.2 or newer")
return nil, nil, fmt.Errorf("egress gateway needs kernel 5.2 or newer")
}

// datapath code depends on remote node identities to distinguish between cluser-local and
// cluster-egress traffic
if !option.Config.EnableRemoteNodeIdentity {
log.WithError(err).Errorf("egress gateway requires remote node identities (--%s=\"true\").",
option.EnableRemoteNodeIdentity)
return nil, nil, fmt.Errorf("egress gateway requires remote node identities (--%s=\"true\").",
option.EnableRemoteNodeIdentity)
}
}
if option.Config.EnableIPv4Masquerade && option.Config.EnableBPFMasquerade {
// TODO(brb) nodeport + ipvlan constraints will be lifted once the SNAT BPF code has been refactored
if option.Config.DatapathMode == datapathOption.DatapathModeIpvlan {
log.WithError(err).Errorf("BPF masquerade works only in veth mode (--%s=\"%s\"", option.DatapathMode, datapathOption.DatapathModeVeth)
return nil, nil, fmt.Errorf("BPF masquerade works only in veth mode (--%s=\"%s\"", option.DatapathMode, datapathOption.DatapathModeVeth)
}
if err := node.InitBPFMasqueradeAddrs(option.Config.Devices); err != nil {
log.WithError(err).Error("failed to determine BPF masquerade IPv4 addrs")
return nil, nil, fmt.Errorf("failed to determine BPF masquerade IPv4 addrs: %w", err)
}
} else if option.Config.EnableIPMasqAgent {
log.WithError(err).Errorf("BPF ip-masq-agent requires --%s=\"true\" and --%s=\"true\"", option.EnableIPv4Masquerade, option.EnableBPFMasquerade)
return nil, nil, fmt.Errorf("BPF ip-masq-agent requires --%s=\"true\" and --%s=\"true\"", option.EnableIPv4Masquerade, option.EnableBPFMasquerade)
} else if option.Config.EnableIPv4EgressGateway {
log.WithError(err).Errorf("egress gateway requires --%s=\"true\" and --%s=\"true\"", option.EnableIPv4Masquerade, option.EnableBPFMasquerade)
return nil, nil, fmt.Errorf("egress gateway requires --%s=\"true\" and --%s=\"true\"", option.EnableIPv4Masquerade, option.EnableBPFMasquerade)
} else if !option.Config.EnableIPv4Masquerade && option.Config.EnableBPFMasquerade {
// There is not yet support for option.Config.EnableIPv6Masquerade
Expand All @@ -750,14 +764,17 @@ func NewDaemon(ctx context.Context, cancel context.CancelFunc, epMgr *endpointma
}
if option.Config.EnableIPMasqAgent {
if !option.Config.EnableIPv4 {
log.WithError(err).Errorf("BPF ip-masq-agent requires IPv4 support (--%s=\"true\")", option.EnableIPv4Name)
return nil, nil, fmt.Errorf("BPF ip-masq-agent requires IPv4 support (--%s=\"true\")", option.EnableIPv4Name)
}
if !probe.HaveFullLPM() {
log.WithError(err).Error("BPF ip-masq-agent needs kernel 4.16 or newer")
return nil, nil, fmt.Errorf("BPF ip-masq-agent needs kernel 4.16 or newer")
}
}
if option.Config.EnableHostFirewall && len(option.Config.Devices) == 0 {
msg := "host firewall's external facing device could not be determined. Use --%s to specify."
log.WithError(err).Errorf(msg, option.Devices)
return nil, nil, fmt.Errorf(msg, option.Devices)
}

Expand Down Expand Up @@ -800,9 +817,11 @@ func NewDaemon(ctx context.Context, cancel context.CancelFunc, epMgr *endpointma

if option.Config.JoinCluster {
if k8s.IsEnabled() {
log.WithError(err).Errorf("cannot join a Cilium cluster (--%s) when configured as a Kubernetes node", option.JoinClusterName)
return nil, nil, fmt.Errorf("cannot join a Cilium cluster (--%s) when configured as a Kubernetes node", option.JoinClusterName)
}
if option.Config.KVStore == "" {
log.WithError(err).Errorf("joining a Cilium cluster (--%s) requires kvstore (--%s) be set", option.JoinClusterName, option.KVStore)
return nil, nil, fmt.Errorf("joining a Cilium cluster (--%s) requires kvstore (--%s) be set", option.JoinClusterName, option.KVStore)
}
agentLabels := labels.NewLabelsFromModel(option.Config.AgentLabels).K8sStringMap()
Expand Down Expand Up @@ -846,7 +865,7 @@ func NewDaemon(ctx context.Context, cancel context.CancelFunc, epMgr *endpointma
}
bootstrapStats.restore.End(true)

if err := d.allocateIPs(); err != nil {
if err := d.allocateIPs(); err != nil { // will log errors/fatal internally
return nil, nil, err
}

Expand Down Expand Up @@ -924,25 +943,28 @@ func NewDaemon(ctx context.Context, cancel context.CancelFunc, epMgr *endpointma
// iptables rules can be updated only after d.init() intializes the iptables above.
err = d.updateDNSDatapathRules()
if err != nil {
return nil, restoredEndpoints, err
log.WithError(err).Error("error encountered while updating DNS datapath rules.")
return nil, restoredEndpoints, fmt.Errorf("error encountered while updating DNS datapath rules: %w", err)
}

// We can only attach the monitor agent once cilium_event has been set up.
if option.Config.RunMonitorAgent {
err = d.monitorAgent.AttachToEventsMap(defaults.MonitorBufferPages)
if err != nil {
return nil, nil, err
log.WithError(err).Error("encountered error configuring run monitor agent")
return nil, nil, fmt.Errorf("encountered error configuring run monitor agent: %w", err)
}

if option.Config.EnableMonitor {
err = monitoragent.ServeMonitorAPI(d.monitorAgent)
if err != nil {
return nil, nil, err
log.WithError(err).Error("encountered error configuring run monitor agent")
return nil, nil, fmt.Errorf("encountered error configuring run monitor agent: %w", err)
}
}
}

if err := d.syncEndpointsAndHostIPs(); err != nil {
if err := d.syncEndpointsAndHostIPs(); err != nil { // logs errors/fatal internally
return nil, nil, err
}

Expand Down
4 changes: 4 additions & 0 deletions daemon/cmd/daemon_main.go
Original file line number Diff line number Diff line change
Expand Up @@ -890,6 +890,10 @@ func initializeFlags() {
flags.DurationVar(&option.Config.FQDNProxyResponseMaxDelay, option.FQDNProxyResponseMaxDelay, 100*time.Millisecond, "The maximum time the DNS proxy holds an allowed DNS response before sending it along. Responses are sent as soon as the datapath is updated with the new IP information.")
option.BindEnv(option.FQDNProxyResponseMaxDelay)

flags.Int(option.FQDNRegexCompileLRUSize, 1024, "Size of the FQDN regex compilation LRU. Useful for heavy but repeated FQDN MatchName or MatchPattern use")
flags.MarkHidden(option.FQDNRegexCompileLRUSize)
option.BindEnv(option.FQDNRegexCompileLRUSize)

flags.String(option.ToFQDNsPreCache, defaults.ToFQDNsPreCache, "DNS cache data at this path is preloaded on agent startup")
option.BindEnv(option.ToFQDNsPreCache)

Expand Down
13 changes: 4 additions & 9 deletions daemon/cmd/kube_proxy_replacement.go
Original file line number Diff line number Diff line change
Expand Up @@ -336,16 +336,11 @@ func initKubeProxyReplacementOptions() (bool, error) {
option.Config.NodePortMode = option.NodePortModeSNAT
}

if option.Config.NodePortAcceleration != option.NodePortAccelerationDisabled {
if option.Config.TunnelingEnabled() {
return false, fmt.Errorf("Cannot use NodePort acceleration with tunneling. Either run cilium-agent with --%s=%s or --%s=%s",
option.NodePortAcceleration, option.NodePortAccelerationDisabled, option.TunnelName, option.TunnelDisabled)
}
if option.Config.NodePortAcceleration != option.NodePortAccelerationDisabled &&
option.Config.TunnelingEnabled() {

if option.Config.EnableIPv4EgressGateway {
return false, fmt.Errorf("Cannot use NodePort acceleration with the egress gateway. Run cilium-agent with either --%s=%s or %s=false",
option.NodePortAcceleration, option.NodePortAccelerationDisabled, option.EnableIPv4EgressGateway)
}
return false, fmt.Errorf("Cannot use NodePort acceleration with tunneling. Either run cilium-agent with --%s=%s or --%s=%s",
option.NodePortAcceleration, option.NodePortAccelerationDisabled, option.TunnelName, option.TunnelDisabled)
}

if option.Config.NodePortMode == option.NodePortModeDSR &&
Expand Down
6 changes: 5 additions & 1 deletion operator/identity_gc.go
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,9 @@ import (
"github.com/cilium/cilium/operator/metrics"
operatorOption "github.com/cilium/cilium/operator/option"
"github.com/cilium/cilium/pkg/allocator"
"github.com/cilium/cilium/pkg/identity"
"github.com/cilium/cilium/pkg/identity/cache"
"github.com/cilium/cilium/pkg/idpool"
"github.com/cilium/cilium/pkg/inctimer"
"github.com/cilium/cilium/pkg/kvstore"
kvstoreallocator "github.com/cilium/cilium/pkg/kvstore/allocator"
Expand All @@ -24,7 +26,9 @@ func startKvstoreIdentityGC() {
if err != nil {
log.WithError(err).Fatal("Unable to initialize kvstore backend for identity allocation")
}
a := allocator.NewAllocatorForGC(backend)
minID := idpool.ID(identity.MinimalAllocationIdentity)
maxID := idpool.ID(identity.MaximumAllocationIdentity)
a := allocator.NewAllocatorForGC(backend, allocator.WithMin(minID), allocator.WithMax(maxID))

successfulRuns := 0
failedRuns := 0
Expand Down
7 changes: 6 additions & 1 deletion operator/kvstore_watchdog.go
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,9 @@ import (

"github.com/cilium/cilium/pkg/allocator"
"github.com/cilium/cilium/pkg/defaults"
"github.com/cilium/cilium/pkg/identity"
"github.com/cilium/cilium/pkg/identity/cache"
"github.com/cilium/cilium/pkg/idpool"
"github.com/cilium/cilium/pkg/inctimer"
"github.com/cilium/cilium/pkg/kvstore"
kvstoreallocator "github.com/cilium/cilium/pkg/kvstore/allocator"
Expand Down Expand Up @@ -61,7 +63,10 @@ func startKvstoreWatchdog() {
if err != nil {
log.WithError(err).Fatal("Unable to initialize kvstore backend for identity garbage collection")
}
a := allocator.NewAllocatorForGC(backend)

minID := idpool.ID(identity.MinimalAllocationIdentity)
maxID := idpool.ID(identity.MaximumAllocationIdentity)
a := allocator.NewAllocatorForGC(backend, allocator.WithMin(minID), allocator.WithMax(maxID))

keysToDelete := map[string]kvstore.Value{}
go func() {
Expand Down
16 changes: 15 additions & 1 deletion pkg/allocator/allocator.go
E0E6
Original file line number Diff line number Diff line change
Expand Up @@ -155,7 +155,21 @@ type Allocator struct {
type AllocatorOption func(*Allocator)

// NewAllocatorForGC returns an allocator that can be used to run RunGC()
func NewAllocatorForGC(backend Backend) *Allocator {
//
// The allocator can be configured by passing in additional options:
// - WithMin(id) - minimum ID to allocate (default: 1)
// - WithMax(id) - maximum ID to allocate (default max(uint64))
func NewAllocatorForGC(backend Backend, opts ...AllocatorOption) *Allocator {
a := &Allocator{
backend: backend,
min: idpool.ID(1),
max: idpool.ID(^uint64(0)),
}

for _, fn := range opts {
fn(a)
}

return &Allocator{backend: backend}
}

Expand Down
2 changes: 1 addition & 1 deletion pkg/fqdn/dnsproxy/proxy.go
Original file line number Diff line number Diff line change
Expand Up @@ -389,7 +389,7 @@ func StartDNSProxy(address string, port uint16, enableDNSCompression bool, maxRe
restoredEPs: make(restoredEPs),
EnableDNSCompression: enableDNSCompression,
maxIPsPerRestoredDNSRule: maxRestoreDNSIPs,
regexCompileLRU: lru.New(128),
regexCompileLRU: lru.New(option.Config.FQDNRegexCompileLRUSize),
}
atomic.StoreInt32(&p.rejectReply, dns.RcodeRefused)

Expand Down
Loading
0