Skip to content

Commit b9b924f

Browse files
authored
[oocm] support for externalTrafficPolicy: Local (kubernetes#1720)
* support for externalTrafficPolicy local with externalTrafficPolicy local we create an http healthmonitor that checks the dedicated healthCheckNodePort. Signed-off-by: Fabian Ruff <fabian.ruff@sap.com> * remove limitation from docs Signed-off-by: Fabian Ruff <fabian.ruff@sap.com> * address feedback * Add some note that health monitors are mandatory for externalTrafficPolicy: Local
1 parent 9d60945 commit b9b924f

4 files changed

Lines changed: 45 additions & 20 deletions

File tree

docs/openstack-cloud-controller-manager/expose-applications-using-loadbalancer-type-service.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -173,7 +173,7 @@ Request Body:
173173

174174
- `loadbalancer.openstack.org/enable-health-monitor`
175175

176-
Defines whether to create health monitor for the load balancer pool, if not specified, use `create-monitor` config. The health monitor can be created or deleted dynamically.
176+
Defines whether to create health monitor for the load balancer pool, if not specified, use `create-monitor` config. The health monitor can be created or deleted dynamically. A health monitor is required for services with `externalTrafficPolicy: Local`.
177177

178178
Not supported when `lb-provider=ovn` is configured in openstack-cloud-controller-manager.
179179

docs/openstack-cloud-controller-manager/using-openstack-cloud-controller-manager.md

Lines changed: 1 addition & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -16,7 +16,6 @@
1616
- [Metrics](#metrics)
1717
- [Limitation](#limitation)
1818
- [OpenStack availability zone must not contain blank](#openstack-availability-zone-must-not-contain-blank)
19-
- [externalTrafficPolicy support](#externaltrafficpolicy-support)
2019

2120
<!-- END doctoc generated TOC please keep comment here to allow auto update -->
2221

@@ -209,7 +208,7 @@ Although the openstack-cloud-controller-manager was initially implemented with N
209208
This option is not supported for Octavia. The worker nodes and the Octavia amphorae are usually in the same subnet, so it's sufficient to config the port security group rules manually for worker nodes, to allow the traffic coming from the the subnet IP range to the node port range(i.e. 30000-32767).
210209
211210
* `create-monitor`
212-
Indicates whether or not to create a health monitor for the service load balancer. Default: false
211+
Indicates whether or not to create a health monitor for the service load balancer. A health monitor required for services that declare `externalTrafficPolicy: Local`. Default: false
213212
214213
* `monitor-delay`
215214
The time, in seconds, between sending probes to members of the load balancer. Default: 5
@@ -289,9 +288,3 @@ Refer to [Metrics for openstack-cloud-controller-manager](../metrics.md)
289288
### OpenStack availability zone must not contain blank
290289
291290
`topology.kubernetes.io/zone` is used to label node and its value comes from availability zone of the node, according to [label spec](https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#syntax-and-character-set) it does not support blank (' ') but OpenStack availability zone supports blank. So your OpenStack availability zone must not contain blank otherwise it will lead to node that belongs to this availability zone register failure, see [#1379](https://github.com/kubernetes/cloud-provider-openstack/issues/1379) for further information.
292-
293-
### externalTrafficPolicy support
294-
295-
`externalTrafficPolicy` denotes if this Service desires to route external traffic to node-local or cluster-wide endpoints. "Local" preserves the client source IP and avoids a second hop for LoadBalancer and Nodeport type services, but risks potentially imbalanced traffic spreading. "Cluster" obscures the client source IP and may cause a second hop to another node, but should have good overall load-spreading.
296-
297-
openstack-cloud-controller-manager only supports `externalTrafficPolicy: Cluster` for now.

pkg/openstack/loadbalancer.go

Lines changed: 32 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -337,6 +337,7 @@ type serviceConfig struct {
337337
lbID string
338338
lbName string
339339
supportLBTags bool
340+
healthCheckNodePort int
340341
}
341342

342343
type listenerKey struct {
@@ -623,7 +624,7 @@ func (lbaas *LbaasV2) createFullyPopulatedOctaviaLoadBalancer(name, clusterName
623624
poolCreateOpt.Name = fmt.Sprintf("%s_%d_pool", listenerCreateOpt.Protocol, int(port.Port))
624625
var withHealthMonitor string
625626
if svcConf.enableMonitor {
626-
opts := lbaas.buildMonitorCreateOpts(port)
627+
opts := lbaas.buildMonitorCreateOpts(svcConf, port)
627628
poolCreateOpt.Monitor = &opts
628629
withHealthMonitor = " with healthmonitor"
629630
}
@@ -1132,10 +1133,25 @@ func (lbaas *LbaasV2) getServiceAddress(clusterName string, service *corev1.Serv
11321133
func (lbaas *LbaasV2) ensureOctaviaHealthMonitor(lbID string, name string, pool *v2pools.Pool, port corev1.ServicePort, svcConf *serviceConfig) error {
11331134
monitorID := pool.MonitorID
11341135

1136+
if monitorID != "" {
1137+
monitor, err := openstackutil.GetHealthMonitor(lbaas.lb, monitorID)
1138+
if err != nil {
1139+
return err
1140+
}
1141+
//Recreate health monitor with correct protocol if externalTrafficPolicy was changed
1142+
if (svcConf.healthCheckNodePort > 0 && monitor.Type != "HTTP") ||
1143+
(svcConf.healthCheckNodePort == 0 && monitor.Type == "HTTP") {
1144+
klog.InfoS("Recreating health monitor for the pool", "pool", pool.ID, "oldMonitor", monitorID)
1145+
if err := openstackutil.DeleteHealthMonitor(lbaas.lb, monitorID, lbID); err != nil {
1146+
return err
1147+
}
1148+
monitorID = ""
1149+
}
1150+
}
11351151
if monitorID == "" && svcConf.enableMonitor {
11361152
klog.V(2).Infof("Creating monitor for pool %s", pool.ID)
11371153

1138-
createOpts := lbaas.buildMonitorCreateOpts(port)
1154+
createOpts := lbaas.buildMonitorCreateOpts(svcConf, port)
11391155
// Populate PoolID, attribute is omitted for consumption of the createOpts for fully populated Loadbalancer
11401156
createOpts.PoolID = pool.ID
11411157
createOpts.Name = name
@@ -1144,6 +1160,7 @@ func (lbaas *LbaasV2) ensureOctaviaHealthMonitor(lbID string, name string, pool
11441160
return err
11451161
}
11461162
monitorID = monitor.ID
1163+
klog.Infof("Health monitor %s for pool %s created.", monitorID, pool.ID)
11471164
} else if monitorID != "" && !svcConf.enableMonitor {
11481165
klog.Infof("Deleting health monitor %s for pool %s", monitorID, pool.ID)
11491166

@@ -1152,19 +1169,18 @@ func (lbaas *LbaasV2) ensureOctaviaHealthMonitor(lbID string, name string, pool
11521169
}
11531170
}
11541171

1155-
if monitorID != "" {
1156-
klog.Infof("Health monitor %s for pool %s created.", monitorID, pool.ID)
1157-
}
1158-
11591172
return nil
11601173
}
11611174

11621175
//buildMonitorCreateOpts returns a v2monitors.CreateOpts without PoolID for consumption of both, fully popuplated Loadbalancers and Monitors.
1163-
func (lbaas *LbaasV2) buildMonitorCreateOpts(port corev1.ServicePort) v2monitors.CreateOpts {
1176+
func (lbaas *LbaasV2) buildMonitorCreateOpts(svcConf *serviceConfig, port corev1.ServicePort) v2monitors.CreateOpts {
11641177
monitorProtocol := string(port.Protocol)
11651178
if port.Protocol == corev1.ProtocolUDP {
11661179
monitorProtocol = "UDP-CONNECT"
11671180
}
1181+
if svcConf.healthCheckNodePort > 0 {
1182+
monitorProtocol = "HTTP"
1183+
}
11681184
return v2monitors.CreateOpts{
11691185
Type: monitorProtocol,
11701186
Delay: int(lbaas.opts.MonitorDelay.Duration.Seconds()),
@@ -1209,17 +1225,16 @@ func (lbaas *LbaasV2) ensureOctaviaPool(lbID string, name string, listener *list
12091225
if err != nil {
12101226
return nil, err
12111227
}
1228+
klog.V(2).Infof("Pool %s created for listener %s", pool.ID, listener.ID)
12121229
}
12131230

1214-
klog.V(2).Infof("Pool %s created for listener %s", pool.ID, listener.ID)
1215-
12161231
curMembers := sets.NewString()
12171232
poolMembers, err := openstackutil.GetMembersbyPool(lbaas.lb, pool.ID)
12181233
if err != nil {
12191234
klog.Errorf("failed to get members in the pool %s: %v", pool.ID, err)
12201235
}
12211236
for _, m := range poolMembers {
1222-
curMembers.Insert(fmt.Sprintf("%s-%d", m.Address, m.ProtocolPort))
1237+
curMembers.Insert(fmt.Sprintf("%s-%d-%d", m.Address, m.ProtocolPort, m.MonitorPort))
12231238
}
12241239

12251240
members, newMembers, err := lbaas.buildBatchUpdateMemberOpts(port, nodes, svcConf)
@@ -1294,8 +1309,11 @@ func (lbaas *LbaasV2) buildBatchUpdateMemberOpts(port corev1.ServicePort, nodes
12941309
Name: &node.Name,
12951310
SubnetID: &svcConf.lbMemberSubnetID,
12961311
}
1312+
if svcConf.healthCheckNodePort > 0 {
1313+
member.MonitorPort = &svcConf.healthCheckNodePort
1314+
}
12971315
members = append(members, member)
1298-
newMembers.Insert(fmt.Sprintf("%s-%d", addr, member.ProtocolPort))
1316+
newMembers.Insert(fmt.Sprintf("%s-%d-%d", addr, member.ProtocolPort, svcConf.healthCheckNodePort))
12991317
}
13001318
return members, newMembers, nil
13011319
}
@@ -1679,6 +1697,9 @@ func (lbaas *LbaasV2) checkService(service *corev1.Service, nodes []*corev1.Node
16791697
}
16801698

16811699
svcConf.enableMonitor = getBoolFromServiceAnnotation(service, ServiceAnnotationLoadBalancerEnableHealthMonitor, lbaas.opts.CreateMonitor)
1700+
if svcConf.enableMonitor && lbaas.opts.UseOctavia && service.Spec.ExternalTrafficPolicy == corev1.ServiceExternalTrafficPolicyTypeLocal && service.Spec.HealthCheckNodePort > 0 {
1701+
svcConf.healthCheckNodePort = int(service.Spec.HealthCheckNodePort)
1702+
}
16821703

16831704
return nil
16841705
}

pkg/util/openstack/loadbalancer.go

Lines changed: 11 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -668,3 +668,14 @@ func CreateHealthMonitor(client *gophercloud.ServiceClient, opts monitors.Create
668668

669669
return monitor, nil
670670
}
671+
672+
// GetHealthMonitor gets details about loadbalancer health monitor.
673+
func GetHealthMonitor(client *gophercloud.ServiceClient, monitorID string) (*monitors.Monitor, error) {
674+
mc := metrics.NewMetricContext("loadbalancer_healthmonitor", "get")
675+
monitor, err := monitors.Get(client, monitorID).Extract()
676+
if mc.ObserveRequest(err) != nil {
677+
return nil, fmt.Errorf("failed to get healthmonitor: %v", err)
678+
}
679+
680+
return monitor, nil
681+
}

0 commit comments

Comments
 (0)