Skip to content

Commit e2394bd

Browse files
committed
Deflake e2e tests of HPA
Resource consumer might use slightly more CPU than requested. That resulted in HPA sometimes increasing size of deployments during e2e tests. Deflake tests by: - Scaling up CPU requests in those tests. Resource consumer might go a fixed number of milli CPU seconds above target. Having higher requests makes the test less sensitive. - On scale down consume CPU in the middle between what would generate recommendation of expexted size and 1 pod fewer (instead of righ on edge beween expected and expected +1). Some variables were int32 but always cast to int before use. Make them int.
1 parent b7c2d92 commit e2394bd

File tree

1 file changed

+8
-8
lines changed

1 file changed

+8
-8
lines changed

test/e2e/autoscaling/horizontal_pod_autoscaling.go

Lines changed: 8 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -96,8 +96,8 @@ var _ = SIGDescribe("[HPA] Horizontal pod autoscaling (scale resource: CPU)", fu
9696

9797
// HPAScaleTest struct is used by the scale(...) function.
9898
type HPAScaleTest struct {
99-
initPods int32
100-
totalInitialCPUUsage int32
99+
initPods int
100+
totalInitialCPUUsage int
101101
perPodCPURequest int64
102102
targetCPUUtilizationPercent int32
103103
minPods int32
@@ -116,7 +116,7 @@ type HPAScaleTest struct {
116116
// TODO The use of 3 states is arbitrary, we could eventually make this test handle "n" states once this test stabilizes.
117117
func (scaleTest *HPAScaleTest) run(name string, kind schema.GroupVersionKind, rc *common.ResourceConsumer, f *framework.Framework) {
118118
const timeToWait = 15 * time.Minute
119-
rc = common.NewDynamicResourceConsumer(name, f.Namespace.Name, kind, int(scaleTest.initPods), int(scaleTest.totalInitialCPUUsage), 0, 0, scaleTest.perPodCPURequest, 200, f.ClientSet, f.InternalClientset, f.ScalesGetter)
119+
rc = common.NewDynamicResourceConsumer(name, f.Namespace.Name, kind, scaleTest.initPods, scaleTest.totalInitialCPUUsage, 0, 0, scaleTest.perPodCPURequest, 200, f.ClientSet, f.InternalClientset, f.ScalesGetter)
120120
defer rc.CleanUp()
121121
hpa := common.CreateCPUHorizontalPodAutoscaler(rc, scaleTest.targetCPUUtilizationPercent, scaleTest.minPods, scaleTest.maxPods)
122122
defer common.DeleteHorizontalPodAutoscaler(rc, hpa.Name)
@@ -137,14 +137,14 @@ func scaleUp(name string, kind schema.GroupVersionKind, checkStability bool, rc
137137
}
138138
scaleTest := &HPAScaleTest{
139139
initPods: 1,
140-
totalInitialCPUUsage: 250,
141-
perPodCPURequest: 500,
140+
totalInitialCPUUsage: 500,
141+
perPodCPURequest: 1000,
142142
targetCPUUtilizationPercent: 20,
143143
minPods: 1,
144144
maxPods: 5,
145145
firstScale: 3,
146146
firstScaleStasis: stasis,
147-
cpuBurst: 700,
147+
cpuBurst: 1400,
148148
secondScale: 5,
149149
}
150150
scaleTest.run(name, kind, rc, f)
@@ -157,8 +157,8 @@ func scaleDown(name string, kind schema.GroupVersionKind, checkStability bool, r
157157
}
158158
scaleTest := &HPAScaleTest{
159159
initPods: 5,
160-
totalInitialCPUUsage: 375,
161-
perPodCPURequest: 500,
160+
totalInitialCPUUsage: 650,
161+
perPodCPURequest: 1000,
162162
targetCPUUtilizationPercent: 30,
163163
minPods: 1,
164164
maxPods: 5,

0 commit comments

Comments
 (0)