k8s之kube-proxy[下]

k8s之kube-proxy[上]

kube-proxyKubernetes集群中的一个核心组件,负责实现服务的负载均衡和网络代理功能。

kube-proxy的源码分析可以涉及多个文件和功能模块,以下是一些主要文件和功能模块的概述:

cmd/kube-proxy/app:该包包含了kube-proxy的入口点代码,负责解析命令行参数、初始化配置和启动kube-proxy的主循环

pkg/proxy:该目录包含了kube-proxy的核心功能代码

pkg/proxy/ipvs:该目录包含了使用IPVS(IP Virtual Server)实现负载均衡的相关代码

pkg/proxy/iptables:该目录包含了使用iptables实现负载均衡的相关代码

pkg/proxy/util:该目录包含了一些辅助函数和工具类,用于支持kube-proxy的实现


kube-proxy入口

1
2
3
4
5
6
// cmd/kube-proxy/proxy.go
func main() {
command := app.NewProxyCommand()
code := cli.Run(command)
os.Exit(code)
}

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
// cmd/kube-proxy/app/server.go
func (o *Options) Run() error {
defer close(o.errCh)
if len(o.WriteConfigTo) > 0 {
return o.writeConfigFile()
}

if o.CleanupAndExit {
return cleanupAndExit()
}

proxyServer, err := NewProxyServer(o)
if err != nil {
return err
}

o.proxyServer = proxyServer
return o.runLoop()
}
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
// cmd/kube-proxy/app/server_others.go
func NewProxyServer(o *Options) (*ProxyServer, error) {
return newProxyServer(o.config, o.master)
}

func newProxyServer(
config *proxyconfigapi.KubeProxyConfiguration,
master string) (*ProxyServer, error) {

if c, err := configz.New(proxyconfigapi.GroupName); err == nil {
c.Set(config)
} else {
return nil, fmt.Errorf("unable to register configz: %s", err)
}

var ipvsInterface utilipvs.Interface
var ipsetInterface utilipset.Interface

if len(config.ShowHiddenMetricsForVersion) > 0 {
metrics.SetShowHidden()
}

hostname, err := nodeutil.GetHostname(config.HostnameOverride)
if err != nil {
return nil, err
}

client, err := createClient(config.ClientConnection, master)
if err != nil {
return nil, err
}

nodeIP := detectNodeIP(client, hostname, config.BindAddress)
klog.InfoS("Detected node IP", "address", nodeIP.String())

// Create event recorder
eventBroadcaster := events.NewBroadcaster(&events.EventSinkImpl{Interface: client.EventsV1()})
recorder := eventBroadcaster.NewRecorder(scheme.Scheme, "kube-proxy")

nodeRef := &v1.ObjectReference{
Kind: "Node",
Name: hostname,
UID: types.UID(hostname),
Namespace: "",
}

var healthzServer healthcheck.ProxierHealthUpdater
if len(config.HealthzBindAddress) > 0 {
healthzServer = healthcheck.NewProxierHealthServer(config.HealthzBindAddress, 2*config.IPTables.SyncPeriod.Duration, recorder, nodeRef)
}

var proxier proxy.Provider

var nodeInfo *v1.Node
if config.DetectLocalMode == proxyconfigapi.LocalModeNodeCIDR {
klog.InfoS("Watching for node, awaiting podCIDR allocation", "hostname", hostname)
nodeInfo, err = waitForPodCIDR(client, hostname)
if err != nil {
return nil, err
}
klog.InfoS("NodeInfo", "podCIDR", nodeInfo.Spec.PodCIDR, "podCIDRs", nodeInfo.Spec.PodCIDRs)
}

primaryFamily := v1.IPv4Protocol
primaryProtocol := utiliptables.ProtocolIPv4
if netutils.IsIPv6(nodeIP) {
primaryFamily = v1.IPv6Protocol
primaryProtocol = utiliptables.ProtocolIPv6
}
execer := exec.New()
iptInterface := utiliptables.New(execer, primaryProtocol)

var ipt [2]utiliptables.Interface
dualStack := true // While we assume that node supports, we do further checks below

// Create iptables handlers for both families, one is already created
// Always ordered as IPv4, IPv6
if primaryProtocol == utiliptables.ProtocolIPv4 {
ipt[0] = iptInterface
ipt[1] = utiliptables.New(execer, utiliptables.ProtocolIPv6)
} else {
ipt[0] = utiliptables.New(execer, utiliptables.ProtocolIPv4)
ipt[1] = iptInterface
}

nodePortAddresses := config.NodePortAddresses

if !ipt[0].Present() {
return nil, fmt.Errorf("iptables is not supported for primary IP family %q", primaryProtocol)
} else if !ipt[1].Present() {
klog.InfoS("kube-proxy running in single-stack mode: secondary ipFamily is not supported", "ipFamily", ipt[1].Protocol())
dualStack = false

// Validate NodePortAddresses is single-stack
npaByFamily := proxyutil.MapCIDRsByIPFamily(config.NodePortAddresses)
secondaryFamily := proxyutil.OtherIPFamily(primaryFamily)
badAddrs := npaByFamily[secondaryFamily]
if len(badAddrs) > 0 {
klog.InfoS("Ignoring --nodeport-addresses of the wrong family", "ipFamily", secondaryFamily, "addresses", badAddrs)
nodePortAddresses = npaByFamily[primaryFamily]
}
}
// 根据不同的模式设置不同的proxier
// 现在支持两种模式iptables,ipvs
if config.Mode == proxyconfigapi.ProxyModeIPTables {
klog.InfoS("Using iptables Proxier")

if dualStack {
klog.InfoS("kube-proxy running in dual-stack mode", "ipFamily", iptInterface.Protocol())
klog.InfoS("Creating dualStackProxier for iptables")
// Always ordered to match []ipt
var localDetectors [2]proxyutiliptables.LocalTrafficDetector
localDetectors, err = getDualStackLocalDetectorTuple(config.DetectLocalMode, config, ipt, nodeInfo)
if err != nil {
return nil, fmt.Errorf("unable to create proxier: %v", err)
}

// TODO this has side effects that should only happen when Run() is invoked.
proxier, err = iptables.NewDualStackProxier(
ipt,
utilsysctl.New(),
execer,
config.IPTables.SyncPeriod.Duration,
config.IPTables.MinSyncPeriod.Duration,
config.IPTables.MasqueradeAll,
*config.IPTables.LocalhostNodePorts,
int(*config.IPTables.MasqueradeBit),
localDetectors,
hostname,
nodeIPTuple(config.BindAddress),
recorder,
healthzServer,
nodePortAddresses,
)
} else {
// Create a single-stack proxier if and only if the node does not support dual-stack (i.e, no iptables support).
var localDetector proxyutiliptables.LocalTrafficDetector
localDetector, err = getLocalDetector(config.DetectLocalMode, config, iptInterface, nodeInfo)
if err != nil {
return nil, fmt.Errorf("unable to create proxier: %v", err)
}

// TODO this has side effects that should only happen when Run() is invoked.
proxier, err = iptables.NewProxier(
primaryFamily,
iptInterface,
utilsysctl.New(),
execer,
config.IPTables.SyncPeriod.Duration,
config.IPTables.MinSyncPeriod.Duration,
config.IPTables.MasqueradeAll,
*config.IPTables.LocalhostNodePorts,
int(*config.IPTables.MasqueradeBit),
localDetector,
hostname,
nodeIP,
recorder,
healthzServer,
nodePortAddresses,
)
}

if err != nil {
return nil, fmt.Errorf("unable to create proxier: %v", err)
}
proxymetrics.RegisterMetrics()
} else if config.Mode == proxyconfigapi.ProxyModeIPVS {
kernelHandler := ipvs.NewLinuxKernelHandler()
ipsetInterface = utilipset.New(execer)
ipvsInterface = utilipvs.New()
if err := ipvs.CanUseIPVSProxier(ipvsInterface, ipsetInterface, config.IPVS.Scheduler); err != nil {
return nil, fmt.Errorf("can't use the IPVS proxier: %v", err)
}

klog.InfoS("Using ipvs Proxier")
if dualStack {
klog.InfoS("Creating dualStackProxier for ipvs")

nodeIPs := nodeIPTuple(config.BindAddress)

// Always ordered to match []ipt
var localDetectors [2]proxyutiliptables.LocalTrafficDetector
localDetectors, err = getDualStackLocalDetectorTuple(config.DetectLocalMode, config, ipt, nodeInfo)
if err != nil {
return nil, fmt.Errorf("unable to create proxier: %v", err)
}

proxier, err = ipvs.NewDualStackProxier(
ipt,
ipvsInterface,
ipsetInterface,
utilsysctl.New(),
execer,
config.IPVS.SyncPeriod.Duration,
config.IPVS.MinSyncPeriod.Duration,
config.IPVS.ExcludeCIDRs,
config.IPVS.StrictARP,
config.IPVS.TCPTimeout.Duration,
config.IPVS.TCPFinTimeout.Duration,
config.IPVS.UDPTimeout.Duration,
config.IPTables.MasqueradeAll,
int(*config.IPTables.MasqueradeBit),
localDetectors,
hostname,
nodeIPs,
recorder,
healthzServer,
config.IPVS.Scheduler,
nodePortAddresses,
kernelHandler,
)
} else {
var localDetector proxyutiliptables.LocalTrafficDetector
localDetector, err = getLocalDetector(config.DetectLocalMode, config, iptInterface, nodeInfo)
if err != nil {
return nil, fmt.Errorf("unable to create proxier: %v", err)
}

proxier, err = ipvs.NewProxier(
primaryFamily,
iptInterface,
ipvsInterface,
ipsetInterface,
utilsysctl.New(),
execer,
config.IPVS.SyncPeriod.Duration,
config.IPVS.MinSyncPeriod.Duration,
config.IPVS.ExcludeCIDRs,
config.IPVS.StrictARP,
config.IPVS.TCPTimeout.Duration,
config.IPVS.TCPFinTimeout.Duration,
config.IPVS.UDPTimeout.Duration,
config.IPTables.MasqueradeAll,
int(*config.IPTables.MasqueradeBit),
localDetector,
hostname,
nodeIP,
recorder,
healthzServer,
config.IPVS.Scheduler,
nodePortAddresses,
kernelHandler,
)
}
if err != nil {
return nil, fmt.Errorf("unable to create proxier: %v", err)
}
proxymetrics.RegisterMetrics()
}

return &ProxyServer{
Config: config,
Client: client,
Proxier: proxier,
Broadcaster: eventBroadcaster,
Recorder: recorder,
Conntracker: &realConntracker{},
NodeRef: nodeRef,
HealthzServer: healthzServer,
}, nil
}
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
// cmd/kube-proxy/app/server.go
func (o *Options) Run() error {
defer close(o.errCh)
if len(o.WriteConfigTo) > 0 {
return o.writeConfigFile()
}

if o.CleanupAndExit {
return cleanupAndExit()
}

proxyServer, err := NewProxyServer(o)
if err != nil {
return err
}

o.proxyServer = proxyServer
return o.runLoop()
}

// 最终调用proxyServer的Run方法
func (o *Options) runLoop() error {
if o.watcher != nil {
o.watcher.Run()
}

// run the proxy in goroutine
go func() {
err := o.proxyServer.Run()
o.errCh <- err
}()

for {
err := <-o.errCh
if err != nil {
return err
}
}
}
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
// cmd/kube-proxy/app/server.go
type ProxyServer struct {
Config *kubeproxyconfig.KubeProxyConfiguration

Client clientset.Interface
Broadcaster events.EventBroadcaster
Recorder events.EventRecorder
Conntracker Conntracker // if nil, ignored
NodeRef *v1.ObjectReference
HealthzServer healthcheck.ProxierHealthUpdater

Proxier proxy.Provider
}

func (s *ProxyServer) Run() error {
// To help debugging, immediately log version
klog.InfoS("Version info", "version", version.Get())

klog.InfoS("Golang settings", "GOGC", os.Getenv("GOGC"), "GOMAXPROCS", os.Getenv("GOMAXPROCS"), "GOTRACEBACK", os.Getenv("GOTRACEBACK"))

// TODO(vmarmol): Use container config for this.
var oomAdjuster *oom.OOMAdjuster
if s.Config.OOMScoreAdj != nil {
oomAdjuster = oom.NewOOMAdjuster()
if err := oomAdjuster.ApplyOOMScoreAdj(0, int(*s.Config.OOMScoreAdj)); err != nil {
klog.V(2).InfoS("Failed to apply OOMScore", "err", err)
}
}

if s.Broadcaster != nil {
stopCh := make(chan struct{})
s.Broadcaster.StartRecordingToSink(stopCh)
}

// TODO(thockin): make it possible for healthz and metrics to be on the same port.

var errCh chan error
if s.Config.BindAddressHardFail {
errCh = make(chan error)
}

// Start up a healthz server if requested
serveHealthz(s.HealthzServer, errCh)

// Start up a metrics server if requested
serveMetrics(s.Config.MetricsBindAddress, s.Config.Mode, s.Config.EnableProfiling, errCh)

// Tune conntrack, if requested
// Conntracker is always nil for windows
if s.Conntracker != nil {
max, err := getConntrackMax(s.Config.Conntrack)
if err != nil {
return err
}
if max > 0 {
err := s.Conntracker.SetMax(max)
if err != nil {
if err != errReadOnlySysFS {
return err
}
// errReadOnlySysFS is caused by a known docker issue (https://github.com/docker/docker/issues/24000),
// the only remediation we know is to restart the docker daemon.
// Here we'll send an node event with specific reason and message, the
// administrator should decide whether and how to handle this issue,
// whether to drain the node and restart docker. Occurs in other container runtimes
// as well.
// TODO(random-liu): Remove this when the docker bug is fixed.
const message = "CRI error: /sys is read-only: " +
"cannot modify conntrack limits, problems may arise later (If running Docker, see docker issue #24000)"
s.Recorder.Eventf(s.NodeRef, nil, api.EventTypeWarning, err.Error(), "StartKubeProxy", message)
}
}

if s.Config.Conntrack.TCPEstablishedTimeout != nil && s.Config.Conntrack.TCPEstablishedTimeout.Duration > 0 {
timeout := int(s.Config.Conntrack.TCPEstablishedTimeout.Duration / time.Second)
if err := s.Conntracker.SetTCPEstablishedTimeout(timeout); err != nil {
return err
}
}

if s.Config.Conntrack.TCPCloseWaitTimeout != nil && s.Config.Conntrack.TCPCloseWaitTimeout.Duration > 0 {
timeout := int(s.Config.Conntrack.TCPCloseWaitTimeout.Duration / time.Second)
if err := s.Conntracker.SetTCPCloseWaitTimeout(timeout); err != nil {
return err
}
}
}

noProxyName, err := labels.NewRequirement(apis.LabelServiceProxyName, selection.DoesNotExist, nil)
if err != nil {
return err
}

noHeadlessEndpoints, err := labels.NewRequirement(v1.IsHeadlessService, selection.DoesNotExist, nil)
if err != nil {
return err
}

labelSelector := labels.NewSelector()
labelSelector = labelSelector.Add(*noProxyName, *noHeadlessEndpoints)

// Make informers that filter out objects that want a non-default service proxy.
informerFactory := informers.NewSharedInformerFactoryWithOptions(s.Client, s.Config.ConfigSyncPeriod.Duration,
informers.WithTweakListOptions(func(options *metav1.ListOptions) {
options.LabelSelector = labelSelector.String()
}))

// Create configs (i.e. Watches for Services and EndpointSlices)
// Note: RegisterHandler() calls need to happen before creation of Sources because sources
// only notify on changes, and the initial update (on process start) may be lost if no handlers
// are registered yet.
// 这段代码可以理解为通过Informer机制监听Service
serviceConfig := config.NewServiceConfig(informerFactory.Core().V1().Services(), s.Config.ConfigSyncPeriod.Duration)
serviceConfig.RegisterEventHandler(s.Proxier)
go serviceConfig.Run(wait.NeverStop)

// 这段代码可以理解为通过Informer机制监听EndpointSlice
endpointSliceConfig := config.NewEndpointSliceConfig(informerFactory.Discovery().V1().EndpointSlices(), s.Config.ConfigSyncPeriod.Duration)
endpointSliceConfig.RegisterEventHandler(s.Proxier)
go endpointSliceConfig.Run(wait.NeverStop)

// This has to start after the calls to NewServiceConfig because that
// function must configure its shared informer event handlers first.
informerFactory.Start(wait.NeverStop)

// Make an informer that selects for our nodename.
currentNodeInformerFactory := informers.NewSharedInformerFactoryWithOptions(s.Client, s.Config.ConfigSyncPeriod.Duration,
informers.WithTweakListOptions(func(options *metav1.ListOptions) {
options.FieldSelector = fields.OneTermEqualSelector("metadata.name", s.NodeRef.Name).String()
}))

// 这段代码可以理解为通过Informer机制监听Node
nodeConfig := config.NewNodeConfig(currentNodeInformerFactory.Core().V1().Nodes(), s.Config.ConfigSyncPeriod.Duration)
// https://issues.k8s.io/111321
if s.Config.DetectLocalMode == kubeproxyconfig.LocalModeNodeCIDR {
nodeConfig.RegisterEventHandler(&proxy.NodePodCIDRHandler{})
}
nodeConfig.RegisterEventHandler(s.Proxier)

go nodeConfig.Run(wait.NeverStop)

// This has to start after the calls to NewNodeConfig because that must
// configure the shared informer event handler first.
currentNodeInformerFactory.Start(wait.NeverStop)

// Birth Cry after the birth is successful
s.birthCry()

// iptables/ipvs Proxier都实现了SyncLoop()
go s.Proxier.SyncLoop()

return <-errCh
}

ServiceConfig, endpointSliceConfig, nodeConfig

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
// pkg/proxy/config/config.go

type ServiceConfig struct {
listerSynced cache.InformerSynced
eventHandlers []ServiceHandler
}

// NewServiceConfig creates a new ServiceConfig.
func NewServiceConfig(serviceInformer coreinformers.ServiceInformer, resyncPeriod time.Duration) *ServiceConfig {
result := &ServiceConfig{
listerSynced: serviceInformer.Informer().HasSynced,
}

serviceInformer.Informer().AddEventHandlerWithResyncPeriod(
cache.ResourceEventHandlerFuncs{
AddFunc: result.handleAddService,
UpdateFunc: result.handleUpdateService,
DeleteFunc: result.handleDeleteService,
},
resyncPeriod,
)

return result
}

// Run waits for cache synced and invokes handlers after syncing.
func (c *ServiceConfig) Run(stopCh <-chan struct{}) {
klog.InfoS("Starting service config controller")

if !cache.WaitForNamedCacheSync("service config", stopCh, c.listerSynced) {
return
}

for i := range c.eventHandlers {
klog.V(3).InfoS("Calling handler.OnServiceSynced()")
// 当所有初始事件处理程序都被调用并且状态完全更新到本地缓存后调用
c.eventHandlers[i].OnServiceSynced()
}
}


type NodeConfig struct {
listerSynced cache.InformerSynced
eventHandlers []NodeHandler
}

// NewNodeConfig creates a new NodeConfig.
func NewNodeConfig(nodeInformer coreinformers.NodeInformer, resyncPeriod time.Duration) *NodeConfig {
result := &NodeConfig{
listerSynced: nodeInformer.Informer().HasSynced,
}

nodeInformer.Informer().AddEventHandlerWithResyncPeriod(
cache.ResourceEventHandlerFuncs{
AddFunc: result.handleAddNode,
UpdateFunc: result.handleUpdateNode,
DeleteFunc: result.handleDeleteNode,
},
resyncPeriod,
)

return result
}

// Run starts the goroutine responsible for calling registered handlers.
func (c *NodeConfig) Run(stopCh <-chan struct{}) {
klog.InfoS("Starting node config controller")

if !cache.WaitForNamedCacheSync("node config", stopCh, c.listerSynced) {
return
}

for i := range c.eventHandlers {
klog.V(3).InfoS("Calling handler.OnNodeSynced()")
c.eventHandlers[i].OnNodeSynced()
}
}

type EndpointSliceConfig struct {
listerSynced cache.InformerSynced
eventHandlers []EndpointSliceHandler
}

// NewEndpointSliceConfig creates a new EndpointSliceConfig.
func NewEndpointSliceConfig(endpointSliceInformer discoveryinformers.EndpointSliceInformer, resyncPeriod time.Duration) *EndpointSliceConfig {
result := &EndpointSliceConfig{
listerSynced: endpointSliceInformer.Informer().HasSynced,
}

endpointSliceInformer.Informer().AddEventHandlerWithResyncPeriod(
cache.ResourceEventHandlerFuncs{
AddFunc: result.handleAddEndpointSlice,
UpdateFunc: result.handleUpdateEndpointSlice,
DeleteFunc: result.handleDeleteEndpointSlice,
},
resyncPeriod,
)

return result
}

// Run waits for cache synced and invokes handlers after syncing.
func (c *EndpointSliceConfig) Run(stopCh <-chan struct{}) {
klog.InfoS("Starting endpoint slice config controller")

if !cache.WaitForNamedCacheSync("endpoint slice config", stopCh, c.listerSynced) {
return
}

for _, h := range c.eventHandlers {
klog.V(3).InfoS("Calling handler.OnEndpointSlicesSynced()")
h.OnEndpointSlicesSynced()
}
}
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
// pkg/proxy/ipvs/proxier.go
func (proxier *Proxier) SyncLoop() {
// Update healthz timestamp at beginning in case Sync() never succeeds.
if proxier.healthzServer != nil {
proxier.healthzServer.Updated()
}
// synthesize "last change queued" time as the informers are syncing.
metrics.SyncProxyRulesLastQueuedTimestamp.SetToCurrentTime()
// 周期性的执行对应的参数
proxier.syncRunner.Loop(wait.NeverStop)
}
func NewProxier(ipFamily v1.IPFamily...){
...
// syncRunner初始化,fn为proxier.syncProxyRules
// iptables,ipvs都实现了syncProxyRules
// 周期性执行的方法`syncProxyRules`就是`kube-proxy`的核心逻辑,根据`Service`,`EndpointSlice`,`Node`的变化分别更新iptables或ipvs
proxier.syncRunner = async.NewBoundedFrequencyRunner("sync-runner", proxier.syncProxyRules, minSyncPeriod, syncPeriod, burstSyncs)
...
}

// pkg/proxy/iptables/proxier.go
func (proxier *Proxier) SyncLoop() {
// Update healthz timestamp at beginning in case Sync() never succeeds.
if proxier.healthzServer != nil {
proxier.healthzServer.Updated()
}

// synthesize "last change queued" time as the informers are syncing.
metrics.SyncProxyRulesLastQueuedTimestamp.SetToCurrentTime()
proxier.syncRunner.Loop(wait.NeverStop)
}

func NewProxier(ipFamily v1.IPFamily...) {
...
// syncRunner初始化,fn为proxier.syncProxyRules
proxier.syncRunner = async.NewBoundedFrequencyRunner("sync-runner", proxier.syncProxyRules, minSyncPeriod, time.Hour, burstSyncs)
...
}

iptables syncProxyRules
这函数代码太长后面再慢慢分析

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
// pkg/proxy/iptables/proxier.go
func (proxier *Proxier) syncProxyRules() {
proxier.mu.Lock()
defer proxier.mu.Unlock()

// don't sync rules till we've received services and endpoints
if !proxier.isInitialized() {
klog.V(2).InfoS("Not syncing iptables until Services and Endpoints have been received from master")
return
}

// The value of proxier.needFullSync may change before the defer funcs run, so
// we need to keep track of whether it was set at the *start* of the sync.
tryPartialSync := !proxier.needFullSync

// Keep track of how long syncs take.
start := time.Now()
defer func() {
metrics.SyncProxyRulesLatency.Observe(metrics.SinceInSeconds(start))
if tryPartialSync {
metrics.SyncPartialProxyRulesLatency.Observe(metrics.SinceInSeconds(start))
} else {
metrics.SyncFullProxyRulesLatency.Observe(metrics.SinceInSeconds(start))
}
klog.V(2).InfoS("SyncProxyRules complete", "elapsed", time.Since(start))
}()

var serviceChanged, endpointsChanged sets.Set[string]
if tryPartialSync {
serviceChanged = proxier.serviceChanges.PendingChanges()
endpointsChanged = proxier.endpointsChanges.PendingChanges()
}
serviceUpdateResult := proxier.svcPortMap.Update(proxier.serviceChanges)
endpointUpdateResult := proxier.endpointsMap.Update(proxier.endpointsChanges)

klog.V(2).InfoS("Syncing iptables rules")

success := false
defer func() {
if !success {
klog.InfoS("Sync failed", "retryingTime", proxier.syncPeriod)
proxier.syncRunner.RetryAfter(proxier.syncPeriod)
if tryPartialSync {
metrics.IptablesPartialRestoreFailuresTotal.Inc()
}
// proxier.serviceChanges and proxier.endpointChanges have already
// been flushed, so we've lost the state needed to be able to do
// a partial sync.
proxier.needFullSync = true
}
}()

if !tryPartialSync {
// Ensure that our jump rules (eg from PREROUTING to KUBE-SERVICES) exist.
// We can't do this as part of the iptables-restore because we don't want
// to specify/replace *all* of the rules in PREROUTING, etc.
//
// We need to create these rules when kube-proxy first starts, and we need
// to recreate them if the utiliptables Monitor detects that iptables has
// been flushed. In both of those cases, the code will force a full sync.
// In all other cases, it ought to be safe to assume that the rules
// already exist, so we'll skip this step when doing a partial sync, to
// save us from having to invoke /sbin/iptables 20 times on each sync
// (which will be very slow on hosts with lots of iptables rules).
for _, jump := range append(iptablesJumpChains, iptablesKubeletJumpChains...) {
if _, err := proxier.iptables.EnsureChain(jump.table, jump.dstChain); err != nil {
klog.ErrorS(err, "Failed to ensure chain exists", "table", jump.table, "chain", jump.dstChain)
return
}
args := jump.extraArgs
if jump.comment != "" {
args = append(args, "-m", "comment", "--comment", jump.comment)
}
args = append(args, "-j", string(jump.dstChain))
if _, err := proxier.iptables.EnsureRule(utiliptables.Prepend, jump.table, jump.srcChain, args...); err != nil {
klog.ErrorS(err, "Failed to ensure chain jumps", "table", jump.table, "srcChain", jump.srcChain, "dstChain", jump.dstChain)
return
}
}
}

//
// Below this point we will not return until we try to write the iptables rules.
//

// Reset all buffers used later.
// This is to avoid memory reallocations and thus improve performance.
proxier.filterChains.Reset()
proxier.filterRules.Reset()
proxier.natChains.Reset()
proxier.natRules.Reset()

// Write chain lines for all the "top-level" chains we'll be filling in
for _, chainName := range []utiliptables.Chain{kubeServicesChain, kubeExternalServicesChain, kubeForwardChain, kubeNodePortsChain, kubeProxyFirewallChain} {
proxier.filterChains.Write(utiliptables.MakeChainLine(chainName))
}
for _, chainName := range []utiliptables.Chain{kubeServicesChain, kubeNodePortsChain, kubePostroutingChain, kubeMarkMasqChain} {
proxier.natChains.Write(utiliptables.MakeChainLine(chainName))
}

// Install the kubernetes-specific postrouting rules. We use a whole chain for
// this so that it is easier to flush and change, for example if the mark
// value should ever change.

// NOTE: kubelet creates identical copies of these rules. If you want to change
// these rules in the future, you MUST do so in a way that will interoperate
// correctly with skewed versions of the rules created by kubelet. (Remove this
// comment once IPTablesOwnershipCleanup is GA.)

proxier.natRules.Write(
"-A", string(kubePostroutingChain),
"-m", "mark", "!", "--mark", fmt.Sprintf("%s/%s", proxier.masqueradeMark, proxier.masqueradeMark),
"-j", "RETURN",
)
// Clear the mark to avoid re-masquerading if the packet re-traverses the network stack.
proxier.natRules.Write(
"-A", string(kubePostroutingChain),
"-j", "MARK", "--xor-mark", proxier.masqueradeMark,
)
masqRule := []string{
"-A", string(kubePostroutingChain),
"-m", "comment", "--comment", `"kubernetes service traffic requiring SNAT"`,
"-j", "MASQUERADE",
}
if proxier.iptables.HasRandomFully() {
masqRule = append(masqRule, "--random-fully")
}
proxier.natRules.Write(masqRule)

// Install the kubernetes-specific masquerade mark rule. We use a whole chain for
// this so that it is easier to flush and change, for example if the mark
// value should ever change.
proxier.natRules.Write(
"-A", string(kubeMarkMasqChain),
"-j", "MARK", "--or-mark", proxier.masqueradeMark,
)

isIPv6 := proxier.iptables.IsIPv6()
if !isIPv6 && proxier.localhostNodePorts {
// Kube-proxy's use of `route_localnet` to enable NodePorts on localhost
// creates a security hole (https://issue.k8s.io/90259) which this
// iptables rule mitigates.

// NOTE: kubelet creates an identical copy of this rule. If you want to
// change this rule in the future, you MUST do so in a way that will
// interoperate correctly with skewed versions of the rule created by
// kubelet. (Actually, kubelet uses "--dst"/"--src" rather than "-d"/"-s"
// but that's just a command-line thing and results in the same rule being
// created in the kernel.)
proxier.filterChains.Write(utiliptables.MakeChainLine(kubeletFirewallChain))
proxier.filterRules.Write(
"-A", string(kubeletFirewallChain),
"-m", "comment", "--comment", `"block incoming localnet connections"`,
"-d", "127.0.0.0/8",
"!", "-s", "127.0.0.0/8",
"-m", "conntrack",
"!", "--ctstate", "RELATED,ESTABLISHED,DNAT",
"-j", "DROP",
)
}

// Accumulate NAT chains to keep.
activeNATChains := map[utiliptables.Chain]bool{} // use a map as a set

// To avoid growing this slice, we arbitrarily set its size to 64,
// there is never more than that many arguments for a single line.
// Note that even if we go over 64, it will still be correct - it
// is just for efficiency, not correctness.
args := make([]string, 64)

// Compute total number of endpoint chains across all services
// to get a sense of how big the cluster is.
totalEndpoints := 0
for svcName := range proxier.svcPortMap {
totalEndpoints += len(proxier.endpointsMap[svcName])
}
proxier.largeClusterMode = (totalEndpoints > largeClusterEndpointsThreshold)

// These two variables are used to publish the sync_proxy_rules_no_endpoints_total
// metric.
serviceNoLocalEndpointsTotalInternal := 0
serviceNoLocalEndpointsTotalExternal := 0

// Build rules for each service-port.
for svcName, svc := range proxier.svcPortMap {
svcInfo, ok := svc.(*servicePortInfo)
if !ok {
klog.ErrorS(nil, "Failed to cast serviceInfo", "serviceName", svcName)
continue
}
protocol := strings.ToLower(string(svcInfo.Protocol()))
svcPortNameString := svcInfo.nameString

// Figure out the endpoints for Cluster and Local traffic policy.
// allLocallyReachableEndpoints is the set of all endpoints that can be routed to
// from this node, given the service's traffic policies. hasEndpoints is true
// if the service has any usable endpoints on any node, not just this one.
allEndpoints := proxier.endpointsMap[svcName]
clusterEndpoints, localEndpoints, allLocallyReachableEndpoints, hasEndpoints := proxy.CategorizeEndpoints(allEndpoints, svcInfo, proxier.nodeLabels)

// Note the endpoint chains that will be used
for _, ep := range allLocallyReachableEndpoints {
if epInfo, ok := ep.(*endpointsInfo); ok {
activeNATChains[epInfo.ChainName] = true
}
}

// clusterPolicyChain contains the endpoints used with "Cluster" traffic policy
clusterPolicyChain := svcInfo.clusterPolicyChainName
usesClusterPolicyChain := len(clusterEndpoints) > 0 && svcInfo.UsesClusterEndpoints()
if usesClusterPolicyChain {
activeNATChains[clusterPolicyChain] = true
}

// localPolicyChain contains the endpoints used with "Local" traffic policy
localPolicyChain := svcInfo.localPolicyChainName
usesLocalPolicyChain := len(localEndpoints) > 0 && svcInfo.UsesLocalEndpoints()
if usesLocalPolicyChain {
activeNATChains[localPolicyChain] = true
}

// internalPolicyChain is the chain containing the endpoints for
// "internal" (ClusterIP) traffic. internalTrafficChain is the chain that
// internal traffic is routed to (which is always the same as
// internalPolicyChain). hasInternalEndpoints is true if we should
// generate rules pointing to internalTrafficChain, or false if there are
// no available internal endpoints.
internalPolicyChain := clusterPolicyChain
hasInternalEndpoints := hasEndpoints
if svcInfo.InternalPolicyLocal() {
internalPolicyChain = localPolicyChain
if len(localEndpoints) == 0 {
hasInternalEndpoints = false
}
}
internalTrafficChain := internalPolicyChain

// Similarly, externalPolicyChain is the chain containing the endpoints
// for "external" (NodePort, LoadBalancer, and ExternalIP) traffic.
// externalTrafficChain is the chain that external traffic is routed to
// (which is always the service's "EXT" chain). hasExternalEndpoints is
// true if there are endpoints that will be reached by external traffic.
// (But we may still have to generate externalTrafficChain even if there
// are no external endpoints, to ensure that the short-circuit rules for
// local traffic are set up.)
externalPolicyChain := clusterPolicyChain
hasExternalEndpoints := hasEndpoints
if svcInfo.ExternalPolicyLocal() {
externalPolicyChain = localPolicyChain
if len(localEndpoints) == 0 {
hasExternalEndpoints = false
}
}
externalTrafficChain := svcInfo.externalChainName // eventually jumps to externalPolicyChain

// usesExternalTrafficChain is based on hasEndpoints, not hasExternalEndpoints,
// because we need the local-traffic-short-circuiting rules even when there
// are no externally-usable endpoints.
usesExternalTrafficChain := hasEndpoints && svcInfo.ExternallyAccessible()
if usesExternalTrafficChain {
activeNATChains[externalTrafficChain] = true
}

// Traffic to LoadBalancer IPs can go directly to externalTrafficChain
// unless LoadBalancerSourceRanges is in use in which case we will
// create a firewall chain.
loadBalancerTrafficChain := externalTrafficChain
fwChain := svcInfo.firewallChainName
usesFWChain := hasEndpoints && len(svcInfo.LoadBalancerIPStrings()) > 0 && len(svcInfo.LoadBalancerSourceRanges()) > 0
if usesFWChain {
activeNATChains[fwChain] = true
loadBalancerTrafficChain = fwChain
}

var internalTrafficFilterTarget, internalTrafficFilterComment string
var externalTrafficFilterTarget, externalTrafficFilterComment string
if !hasEndpoints {
// The service has no endpoints at all; hasInternalEndpoints and
// hasExternalEndpoints will also be false, and we will not
// generate any chains in the "nat" table for the service; only
// rules in the "filter" table rejecting incoming packets for
// the service's IPs.
internalTrafficFilterTarget = "REJECT"
internalTrafficFilterComment = fmt.Sprintf(`"%s has no endpoints"`, svcPortNameString)
externalTrafficFilterTarget = "REJECT"
externalTrafficFilterComment = internalTrafficFilterComment
} else {
if !hasInternalEndpoints {
// The internalTrafficPolicy is "Local" but there are no local
// endpoints. Traffic to the clusterIP will be dropped, but
// external traffic may still be accepted.
internalTrafficFilterTarget = "DROP"
internalTrafficFilterComment = fmt.Sprintf(`"%s has no local endpoints"`, svcPortNameString)
serviceNoLocalEndpointsTotalInternal++
}
if !hasExternalEndpoints {
// The externalTrafficPolicy is "Local" but there are no
// local endpoints. Traffic to "external" IPs from outside
// the cluster will be dropped, but traffic from inside
// the cluster may still be accepted.
externalTrafficFilterTarget = "DROP"
externalTrafficFilterComment = fmt.Sprintf(`"%s has no local endpoints"`, svcPortNameString)
serviceNoLocalEndpointsTotalExternal++
}
}

// Capture the clusterIP.
if hasInternalEndpoints {
proxier.natRules.Write(
"-A", string(kubeServicesChain),
"-m", "comment", "--comment", fmt.Sprintf(`"%s cluster IP"`, svcPortNameString),
"-m", protocol, "-p", protocol,
"-d", svcInfo.ClusterIP().String(),
"--dport", strconv.Itoa(svcInfo.Port()),
"-j", string(internalTrafficChain))
} else {
// No endpoints.
proxier.filterRules.Write(
"-A", string(kubeServicesChain),
"-m", "comment", "--comment", internalTrafficFilterComment,
"-m", protocol, "-p", protocol,
"-d", svcInfo.ClusterIP().String(),
"--dport", strconv.Itoa(svcInfo.Port()),
"-j", internalTrafficFilterTarget,
)
}

// Capture externalIPs.
for _, externalIP := range svcInfo.ExternalIPStrings() {
if hasEndpoints {
// Send traffic bound for external IPs to the "external
// destinations" chain.
proxier.natRules.Write(
"-A", string(kubeServicesChain),
"-m", "comment", "--comment", fmt.Sprintf(`"%s external IP"`, svcPortNameString),
"-m", protocol, "-p", protocol,
"-d", externalIP,
"--dport", strconv.Itoa(svcInfo.Port()),
"-j", string(externalTrafficChain))
}
if !hasExternalEndpoints {
// Either no endpoints at all (REJECT) or no endpoints for
// external traffic (DROP anything that didn't get
// short-circuited by the EXT chain.)
proxier.filterRules.Write(
"-A", string(kubeExternalServicesChain),
"-m", "comment", "--comment", externalTrafficFilterComment,
"-m", protocol, "-p", protocol,
"-d", externalIP,
"--dport", strconv.Itoa(svcInfo.Port()),
"-j", externalTrafficFilterTarget,
)
}
}

// Capture load-balancer ingress.
for _, lbip := range svcInfo.LoadBalancerIPStrings() {
if hasEndpoints {
proxier.natRules.Write(
"-A", string(kubeServicesChain),
"-m", "comment", "--comment", fmt.Sprintf(`"%s loadbalancer IP"`, svcPortNameString),
"-m", protocol, "-p", protocol,
"-d", lbip,
"--dport", strconv.Itoa(svcInfo.Port()),
"-j", string(loadBalancerTrafficChain))

}
if usesFWChain {
proxier.filterRules.Write(
"-A", string(kubeProxyFirewallChain),
"-m", "comment", "--comment", fmt.Sprintf(`"%s traffic not accepted by %s"`, svcPortNameString, svcInfo.firewallChainName),
"-m", protocol, "-p", protocol,
"-d", lbip,
"--dport", strconv.Itoa(svcInfo.Port()),
"-j", "DROP")
}
}
if !hasExternalEndpoints {
// Either no endpoints at all (REJECT) or no endpoints for
// external traffic (DROP anything that didn't get short-circuited
// by the EXT chain.)
for _, lbip := range svcInfo.LoadBalancerIPStrings() {
proxier.filterRules.Write(
"-A", string(kubeExternalServicesChain),
"-m", "comment", "--comment", externalTrafficFilterComment,
"-m", protocol, "-p", protocol,
"-d", lbip,
"--dport", strconv.Itoa(svcInfo.Port()),
"-j", externalTrafficFilterTarget,
)
}
}

// Capture nodeports.
if svcInfo.NodePort() != 0 {
if hasEndpoints {
// Jump to the external destination chain. For better or for
// worse, nodeports are not subect to loadBalancerSourceRanges,
// and we can't change that.
proxier.natRules.Write(
"-A", string(kubeNodePortsChain),
"-m", "comment", "--comment", svcPortNameString,
"-m", protocol, "-p", protocol,
"--dport", strconv.Itoa(svcInfo.NodePort()),
"-j", string(externalTrafficChain))
}
if !hasExternalEndpoints {
// Either no endpoints at all (REJECT) or no endpoints for
// external traffic (DROP anything that didn't get
// short-circuited by the EXT chain.)
proxier.filterRules.Write(
"-A", string(kubeExternalServicesChain),
"-m", "comment", "--comment", externalTrafficFilterComment,
"-m", "addrtype", "--dst-type", "LOCAL",
"-m", protocol, "-p", protocol,
"--dport", strconv.Itoa(svcInfo.NodePort()),
"-j", externalTrafficFilterTarget,
)
}
}

// Capture healthCheckNodePorts.
if svcInfo.HealthCheckNodePort() != 0 {
// no matter if node has local endpoints, healthCheckNodePorts
// need to add a rule to accept the incoming connection
proxier.filterRules.Write(
"-A", string(kubeNodePortsChain),
"-m", "comment", "--comment", fmt.Sprintf(`"%s health check node port"`, svcPortNameString),
"-m", "tcp", "-p", "tcp",
"--dport", strconv.Itoa(svcInfo.HealthCheckNodePort()),
"-j", "ACCEPT",
)
}

// If the SVC/SVL/EXT/FW/SEP chains have not changed since the last sync
// then we can omit them from the restore input. (We have already marked
// them in activeNATChains, so they won't get deleted.)
if tryPartialSync && !serviceChanged.Has(svcName.NamespacedName.String()) && !endpointsChanged.Has(svcName.NamespacedName.String()) {
continue
}

// Set up internal traffic handling.
if hasInternalEndpoints {
args = append(args[:0],
"-m", "comment", "--comment", fmt.Sprintf(`"%s cluster IP"`, svcPortNameString),
"-m", protocol, "-p", protocol,
"-d", svcInfo.ClusterIP().String(),
"--dport", strconv.Itoa(svcInfo.Port()),
)
if proxier.masqueradeAll {
proxier.natRules.Write(
"-A", string(internalTrafficChain),
args,
"-j", string(kubeMarkMasqChain))
} else if proxier.localDetector.IsImplemented() {
// This masquerades off-cluster traffic to a service VIP. The
// idea is that you can establish a static route for your
// Service range, routing to any node, and that node will
// bridge into the Service for you. Since that might bounce
// off-node, we masquerade here.
proxier.natRules.Write(
"-A", string(internalTrafficChain),
args,
proxier.localDetector.IfNotLocal(),
"-j", string(kubeMarkMasqChain))
}
}

// Set up external traffic handling (if any "external" destinations are
// enabled). All captured traffic for all external destinations should
// jump to externalTrafficChain, which will handle some special cases and
// then jump to externalPolicyChain.
if usesExternalTrafficChain {
proxier.natChains.Write(utiliptables.MakeChainLine(externalTrafficChain))

if !svcInfo.ExternalPolicyLocal() {
// If we are using non-local endpoints we need to masquerade,
// in case we cross nodes.
proxier.natRules.Write(
"-A", string(externalTrafficChain),
"-m", "comment", "--comment", fmt.Sprintf(`"masquerade traffic for %s external destinations"`, svcPortNameString),
"-j", string(kubeMarkMasqChain))
} else {
// If we are only using same-node endpoints, we can retain the
// source IP in most cases.

if proxier.localDetector.IsImplemented() {
// Treat all locally-originated pod -> external destination
// traffic as a special-case. It is subject to neither
// form of traffic policy, which simulates going up-and-out
// to an external load-balancer and coming back in.
proxier.natRules.Write(
"-A", string(externalTrafficChain),
"-m", "comment", "--comment", fmt.Sprintf(`"pod traffic for %s external destinations"`, svcPortNameString),
proxier.localDetector.IfLocal(),
"-j", string(clusterPolicyChain))
}

// Locally originated traffic (not a pod, but the host node)
// still needs masquerade because the LBIP itself is a local
// address, so that will be the chosen source IP.
proxier.natRules.Write(
"-A", string(externalTrafficChain),
"-m", "comment", "--comment", fmt.Sprintf(`"masquerade LOCAL traffic for %s external destinations"`, svcPortNameString),
"-m", "addrtype", "--src-type", "LOCAL",
"-j", string(kubeMarkMasqChain))

// Redirect all src-type=LOCAL -> external destination to the
// policy=cluster chain. This allows traffic originating
// from the host to be redirected to the service correctly.
proxier.natRules.Write(
"-A", string(externalTrafficChain),
"-m", "comment", "--comment", fmt.Sprintf(`"route LOCAL traffic for %s external destinations"`, svcPortNameString),
"-m", "addrtype", "--src-type", "LOCAL",
"-j", string(clusterPolicyChain))
}

// Anything else falls thru to the appropriate policy chain.
if hasExternalEndpoints {
proxier.natRules.Write(
"-A", string(externalTrafficChain),
"-j", string(externalPolicyChain))
}
}

// Set up firewall chain, if needed
if usesFWChain {
proxier.natChains.Write(utiliptables.MakeChainLine(fwChain))

// The service firewall rules are created based on the
// loadBalancerSourceRanges field. This only works for VIP-like
// loadbalancers that preserve source IPs. For loadbalancers which
// direct traffic to service NodePort, the firewall rules will not
// apply.
args = append(args[:0],
"-A", string(fwChain),
"-m", "comment", "--comment", fmt.Sprintf(`"%s loadbalancer IP"`, svcPortNameString),
)

// firewall filter based on each source range
allowFromNode := false
for _, src := range svcInfo.LoadBalancerSourceRanges() {
proxier.natRules.Write(args, "-s", src, "-j", string(externalTrafficChain))
_, cidr, err := netutils.ParseCIDRSloppy(src)
if err != nil {
klog.ErrorS(err, "Error parsing CIDR in LoadBalancerSourceRanges, dropping it", "cidr", cidr)
} else if cidr.Contains(proxier.nodeIP) {
allowFromNode = true
}
}
// For VIP-like LBs, the VIP is often added as a local
// address (via an IP route rule). In that case, a request
// from a node to the VIP will not hit the loadbalancer but
// will loop back with the source IP set to the VIP. We
// need the following rules to allow requests from this node.
if allowFromNode {
for _, lbip := range svcInfo.LoadBalancerIPStrings() {
proxier.natRules.Write(
args,
"-s", lbip,
"-j", string(externalTrafficChain))
}
}
// If the packet was able to reach the end of firewall chain,
// then it did not get DNATed, so it will match the
// corresponding KUBE-PROXY-FIREWALL rule.
proxier.natRules.Write(
"-A", string(fwChain),
"-m", "comment", "--comment", fmt.Sprintf(`"other traffic to %s will be dropped by KUBE-PROXY-FIREWALL"`, svcPortNameString),
)
}

// If Cluster policy is in use, create the chain and create rules jumping
// from clusterPolicyChain to the clusterEndpoints
if usesClusterPolicyChain {
proxier.natChains.Write(utiliptables.MakeChainLine(clusterPolicyChain))
proxier.writeServiceToEndpointRules(svcPortNameString, svcInfo, clusterPolicyChain, clusterEndpoints, args)
}

// If Local policy is in use, create the chain and create rules jumping
// from localPolicyChain to the localEndpoints
if usesLocalPolicyChain {
proxier.natChains.Write(utiliptables.MakeChainLine(localPolicyChain))
proxier.writeServiceToEndpointRules(svcPortNameString, svcInfo, localPolicyChain, localEndpoints, args)
}

// Generate the per-endpoint chains.
for _, ep := range allLocallyReachableEndpoints {
epInfo, ok := ep.(*endpointsInfo)
if !ok {
klog.ErrorS(nil, "Failed to cast endpointsInfo", "endpointsInfo", ep)
continue
}

endpointChain := epInfo.ChainName

// Create the endpoint chain
proxier.natChains.Write(utiliptables.MakeChainLine(endpointChain))
activeNATChains[endpointChain] = true

args = append(args[:0], "-A", string(endpointChain))
args = proxier.appendServiceCommentLocked(args, svcPortNameString)
// Handle traffic that loops back to the originator with SNAT.
proxier.natRules.Write(
args,
"-s", epInfo.IP(),
"-j", string(kubeMarkMasqChain))
// Update client-affinity lists.
if svcInfo.SessionAffinityType() == v1.ServiceAffinityClientIP {
args = append(args, "-m", "recent", "--name", string(endpointChain), "--set")
}
// DNAT to final destination.
args = append(args, "-m", protocol, "-p", protocol, "-j", "DNAT", "--to-destination", epInfo.Endpoint)
proxier.natRules.Write(args)
}
}

// Delete chains no longer in use. Since "iptables-save" can take several seconds
// to run on hosts with lots of iptables rules, we don't bother to do this on
// every sync in large clusters. (Stale chains will not be referenced by any
// active rules, so they're harmless other than taking up memory.)
if !proxier.largeClusterMode || time.Since(proxier.lastIPTablesCleanup) > proxier.syncPeriod {
var existingNATChains map[utiliptables.Chain]struct{}

proxier.iptablesData.Reset()
if err := proxier.iptables.SaveInto(utiliptables.TableNAT, proxier.iptablesData); err == nil {
existingNATChains = utiliptables.GetChainsFromTable(proxier.iptablesData.Bytes())

for chain := range existingNATChains {
if !activeNATChains[chain] {
chainString := string(chain)
if !isServiceChainName(chainString) {
// Ignore chains that aren't ours.
continue
}
// We must (as per iptables) write a chain-line
// for it, which has the nice effect of flushing
// the chain. Then we can remove the chain.
proxier.natChains.Write(utiliptables.MakeChainLine(chain))
proxier.natRules.Write("-X", chainString)
}
}
proxier.lastIPTablesCleanup = time.Now()
} else {
klog.ErrorS(err, "Failed to execute iptables-save: stale chains will not be deleted")
}
}

// Finally, tail-call to the nodePorts chain. This needs to be after all
// other service portal rules.
nodeAddresses, err := proxier.nodePortAddresses.GetNodeAddresses(proxier.networkInterfacer)
if err != nil {
klog.ErrorS(err, "Failed to get node ip address matching nodeport cidrs, services with nodeport may not work as intended", "CIDRs", proxier.nodePortAddresses)
}
// nodeAddresses may contain dual-stack zero-CIDRs if proxier.nodePortAddresses is empty.
// Ensure nodeAddresses only contains the addresses for this proxier's IP family.
for addr := range nodeAddresses {
if utilproxy.IsZeroCIDR(addr) && isIPv6 == netutils.IsIPv6CIDRString(addr) {
// if any of the addresses is zero cidr of this IP family, non-zero IPs can be excluded.
nodeAddresses = sets.New[string](addr)
break
}
}

for address := range nodeAddresses {
if utilproxy.IsZeroCIDR(address) {
destinations := []string{"-m", "addrtype", "--dst-type", "LOCAL"}
if isIPv6 {
// For IPv6, Regardless of the value of localhostNodePorts is true
// or false, we should disable access to the nodePort via localhost. Since it never works and always
// cause kernel warnings.
destinations = append(destinations, "!", "-d", "::1/128")
}

if !proxier.localhostNodePorts && !isIPv6 {
// If set localhostNodePorts to "false"(route_localnet=0), We should generate iptables rules that
// disable NodePort services to be accessed via localhost. Since it doesn't work and causes
// the kernel to log warnings if anyone tries.
destinations = append(destinations, "!", "-d", "127.0.0.0/8")
}

proxier.natRules.Write(
"-A", string(kubeServicesChain),
"-m", "comment", "--comment", `"kubernetes service nodeports; NOTE: this must be the last rule in this chain"`,
destinations,
"-j", string(kubeNodePortsChain))
break
}

// Ignore IP addresses with incorrect version
if isIPv6 && !netutils.IsIPv6String(address) || !isIPv6 && netutils.IsIPv6String(address) {
klog.ErrorS(nil, "IP has incorrect IP version", "IP", address)
continue
}

// For ipv6, Regardless of the value of localhostNodePorts is true or false, we should disallow access
// to the nodePort via lookBack address.
if isIPv6 && utilproxy.IsLoopBack(address) {
klog.ErrorS(nil, "disallow nodePort services to be accessed via ipv6 localhost address", "IP", address)
continue
}

// For ipv4, When localhostNodePorts is set to false, Ignore ipv4 lookBack address
if !isIPv6 && utilproxy.IsLoopBack(address) && !proxier.localhostNodePorts {
klog.ErrorS(nil, "disallow nodePort services to be accessed via ipv4 localhost address", "IP", address)
continue
}

// create nodeport rules for each IP one by one
proxier.natRules.Write(
"-A", string(kubeServicesChain),
"-m", "comment", "--comment", `"kubernetes service nodeports; NOTE: this must be the last rule in this chain"`,
"-d", address,
"-j", string(kubeNodePortsChain))
}

// Drop the packets in INVALID state, which would potentially cause
// unexpected connection reset.
// https://github.com/kubernetes/kubernetes/issues/74839
proxier.filterRules.Write(
"-A", string(kubeForwardChain),
"-m", "conntrack",
"--ctstate", "INVALID",
"-j", "DROP",
)

// If the masqueradeMark has been added then we want to forward that same
// traffic, this allows NodePort traffic to be forwarded even if the default
// FORWARD policy is not accept.
proxier.filterRules.Write(
"-A", string(kubeForwardChain),
"-m", "comment", "--comment", `"kubernetes forwarding rules"`,
"-m", "mark", "--mark", fmt.Sprintf("%s/%s", proxier.masqueradeMark, proxier.masqueradeMark),
"-j", "ACCEPT",
)

// The following rule ensures the traffic after the initial packet accepted
// by the "kubernetes forwarding rules" rule above will be accepted.
proxier.filterRules.Write(
"-A", string(kubeForwardChain),
"-m", "comment", "--comment", `"kubernetes forwarding conntrack rule"`,
"-m", "conntrack",
"--ctstate", "RELATED,ESTABLISHED",
"-j", "ACCEPT",
)

metrics.IptablesRulesTotal.WithLabelValues(string(utiliptables.TableFilter)).Set(float64(proxier.filterRules.Lines()))
metrics.IptablesRulesTotal.WithLabelValues(string(utiliptables.TableNAT)).Set(float64(proxier.natRules.Lines()))

// Sync rules.
proxier.iptablesData.Reset()
proxier.iptablesData.WriteString("*filter\n")
proxier.iptablesData.Write(proxier.filterChains.Bytes())
proxier.iptablesData.Write(proxier.filterRules.Bytes())
proxier.iptablesData.WriteString("COMMIT\n")
proxier.iptablesData.WriteString("*nat\n")
proxier.iptablesData.Write(proxier.natChains.Bytes())
proxier.iptablesData.Write(proxier.natRules.Bytes())
proxier.iptablesData.WriteString("COMMIT\n")

klog.V(2).InfoS("Reloading service iptables data",
"numServices", len(proxier.svcPortMap),
"numEndpoints", totalEndpoints,
"numFilterChains", proxier.filterChains.Lines(),
"numFilterRules", proxier.filterRules.Lines(),
"numNATChains", proxier.natChains.Lines(),
"numNATRules", proxier.natRules.Lines(),
)
klog.V(9).InfoS("Restoring iptables", "rules", proxier.iptablesData.Bytes())

// NOTE: NoFlushTables is used so we don't flush non-kubernetes chains in the table
err = proxier.iptables.RestoreAll(proxier.iptablesData.Bytes(), utiliptables.NoFlushTables, utiliptables.RestoreCounters)
if err != nil {
if pErr, ok := err.(utiliptables.ParseError); ok {
lines := utiliptables.ExtractLines(proxier.iptablesData.Bytes(), pErr.Line(), 3)
klog.ErrorS(pErr, "Failed to execute iptables-restore", "rules", lines)
} else {
klog.ErrorS(err, "Failed to execute iptables-restore")
}
metrics.IptablesRestoreFailuresTotal.Inc()
return
}
success = true
proxier.needFullSync = false

for name, lastChangeTriggerTimes := range endpointUpdateResult.LastChangeTriggerTimes {
for _, lastChangeTriggerTime := range lastChangeTriggerTimes {
latency := metrics.SinceInSeconds(lastChangeTriggerTime)
metrics.NetworkProgrammingLatency.Observe(latency)
klog.V(4).InfoS("Network programming", "endpoint", klog.KRef(name.Namespace, name.Name), "elapsed", latency)
}
}

metrics.SyncProxyRulesNoLocalEndpointsTotal.WithLabelValues("internal").Set(float64(serviceNoLocalEndpointsTotalInternal))
metrics.SyncProxyRulesNoLocalEndpointsTotal.WithLabelValues("external").Set(float64(serviceNoLocalEndpointsTotalExternal))
if proxier.healthzServer != nil {
proxier.healthzServer.Updated()
}
metrics.SyncProxyRulesLastTimestamp.SetToCurrentTime()

// Update service healthchecks. The endpoints list might include services that are
// not "OnlyLocal", but the services list will not, and the serviceHealthServer
// will just drop those endpoints.
if err := proxier.serviceHealthServer.SyncServices(proxier.svcPortMap.HealthCheckNodePorts()); err != nil {
klog.ErrorS(err, "Error syncing healthcheck services")
}
if err := proxier.serviceHealthServer.SyncEndpoints(proxier.endpointsMap.LocalReadyEndpoints()); err != nil {
klog.ErrorS(err, "Error syncing healthcheck endpoints")
}

// Finish housekeeping, clear stale conntrack entries for UDP Services
conntrack.CleanStaleEntries(proxier.iptables.IsIPv6(), proxier.exec, proxier.svcPortMap, serviceUpdateResult, endpointUpdateResult)
}


REF:
1.cmd/kube-proxy/proxy.go
2.cmd/kube-proxy/app/server.go
3.cmd/kube-proxy/app/server_others.go
4.pkg/proxy/config/config.go
5.pkg/proxy/ipvs/proxier.go
6.pkg/proxy/iptables/proxier.go