Skip to content

Commit 4929f16

Browse files
Merge pull request #2725 from dlom/HIVE-2891
HIVE-2891: Make hive able to run as a private image
2 parents 3693ae5 + 7d9bf8a commit 4929f16

21 files changed

+177
-150
lines changed

config/controllers/hive_controllers_serviceaccount.yaml

Lines changed: 0 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -3,6 +3,3 @@ kind: ServiceAccount
33
metadata:
44
name: hive-controllers
55
namespace: hive
6-
imagePullSecrets:
7-
# TODO: don't hardcode this in this file. As far as I can tell, this is harmless if the secret doesn't exist
8-
- name: quay.io

config/hiveadmission/service-account.yaml

Lines changed: 0 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -4,6 +4,3 @@ apiVersion: v1
44
kind: ServiceAccount
55
metadata:
66
name: hiveadmission
7-
imagePullSecrets:
8-
# TODO: don't hardcode this in this file. As far as I can tell, this is harmless if the secret doesn't exist
9-
- name: quay.io

config/operator/operator_deployment.yaml

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -63,6 +63,14 @@ spec:
6363
fieldPath: metadata.namespace
6464
- name: TMPDIR
6565
value: /tmp
66+
- name: POD_NAME
67+
valueFrom:
68+
fieldRef:
69+
fieldPath: metadata.name
70+
- name: POD_NAMESPACE
71+
valueFrom:
72+
fieldRef:
73+
fieldPath: metadata.namespace
6674
securityContext:
6775
privileged: false
6876
readOnlyRootFilesystem: true

hack/app-sre/saas-template.yaml

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -11327,6 +11327,14 @@ objects:
1132711327
fieldPath: metadata.namespace
1132811328
- name: TMPDIR
1132911329
value: /tmp
11330+
- name: POD_NAME
11331+
valueFrom:
11332+
fieldRef:
11333+
fieldPath: metadata.name
11334+
- name: POD_NAMESPACE
11335+
valueFrom:
11336+
fieldRef:
11337+
fieldPath: metadata.namespace
1133011338
image: ${REGISTRY_IMG}@${IMAGE_DIGEST}
1133111339
imagePullPolicy: Always
1133211340
livenessProbe:

pkg/controller/clusterdeployment/clusterdeployment_controller.go

Lines changed: 9 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -319,28 +319,24 @@ type ReconcileClusterDeployment struct {
319319

320320
protectedDelete bool
321321

322-
// nodeSelector is copied from the hive-controllers pod and must be included in any Jobs we create from here.
323-
nodeSelector *map[string]string
324-
325-
// tolerations is copied from the hive-controllers pod and must be included in any Jobs we create from here.
326-
tolerations *[]corev1.Toleration
322+
// sharedPodConfig is copied from the hive-controllers pod and must be included in any Jobs we create from here.
323+
sharedPodConfig *controllerutils.SharedPodConfig
327324
}
328325

329326
// Reconcile reads that state of the cluster for a ClusterDeployment object and makes changes based on the state read
330327
// and what is in the ClusterDeployment.Spec
331328
func (r *ReconcileClusterDeployment) Reconcile(ctx context.Context, request reconcile.Request) (result reconcile.Result, returnErr error) {
332329
cdLog := controllerutils.BuildControllerLogger(ControllerName, "clusterDeployment", request.NamespacedName)
333330

334-
// Discover scheduling settings from the controller. We would like to do this in NewReconciler,
331+
// Discover settings from the controller. We would like to do this in NewReconciler,
335332
// but we can't count on the cache having been started at that point.
336-
if r.nodeSelector == nil || r.tolerations == nil {
337-
thisPod, err := controllerutils.GetThisPod(r)
333+
if r.sharedPodConfig == nil {
334+
sharedPodConfig, err := controllerutils.ReadSharedConfigFromThisPod(r)
338335
if err != nil {
339-
cdLog.WithError(err).Error("Failed to retrieve the running pod")
336+
cdLog.WithError(err).Error("error reading shared pod config")
340337
return reconcile.Result{}, err
341338
}
342-
r.nodeSelector = &thisPod.Spec.NodeSelector
343-
r.tolerations = &thisPod.Spec.Tolerations
339+
r.sharedPodConfig = sharedPodConfig
344340
}
345341

346342
cdLog.Info("reconciling cluster deployment")
@@ -1104,8 +1100,8 @@ func (r *ReconcileClusterDeployment) resolveInstallerImage(cd *hivev1.ClusterDep
11041100
os.Getenv("HTTP_PROXY"),
11051101
os.Getenv("HTTPS_PROXY"),
11061102
os.Getenv("NO_PROXY"),
1107-
*r.nodeSelector,
1108-
*r.tolerations)
1103+
*r.sharedPodConfig,
1104+
)
11091105

11101106
cdLog.WithField("derivedObject", job.Name).Debug("Setting labels on derived object")
11111107
job.Labels = k8slabels.AddLabel(job.Labels, constants.ClusterDeploymentNameLabel, cd.Name)

pkg/controller/clusterdeployment/clusterdeployment_controller_test.go

Lines changed: 19 additions & 30 deletions
Original file line numberDiff line numberDiff line change
@@ -3589,8 +3589,7 @@ platform:
35893589
(schema.GroupVersionKind{Group: "hive.openshift.io", Version: "v1", Kind: "FakeClusterInstall"}).String(): {},
35903590
},
35913591
releaseImageVerifier: test.riVerifier,
3592-
nodeSelector: &map[string]string{},
3593-
tolerations: &[]corev1.Toleration{},
3592+
sharedPodConfig: &controllerutils.SharedPodConfig{},
35943593
}
35953594

35963595
if test.reconcilerSetup != nil {
@@ -3667,8 +3666,7 @@ func TestClusterDeploymentReconcileResults(t *testing.T) {
36673666
logger: logger,
36683667
expectations: controllerExpectations,
36693668
remoteClusterAPIClientBuilder: func(*hivev1.ClusterDeployment) remoteclient.Builder { return mockRemoteClientBuilder },
3670-
nodeSelector: &map[string]string{},
3671-
tolerations: &[]corev1.Toleration{},
3669+
sharedPodConfig: &controllerutils.SharedPodConfig{},
36723670
}
36733671

36743672
reconcileResult, err := rcd.Reconcile(context.TODO(), reconcile.Request{
@@ -3787,10 +3785,9 @@ func TestDeleteStaleProvisions(t *testing.T) {
37873785
}
37883786
fakeClient := testfake.NewFakeClientBuilder().WithRuntimeObjects(provisions...).Build()
37893787
rcd := &ReconcileClusterDeployment{
3790-
Client: fakeClient,
3791-
scheme: scheme.GetScheme(),
3792-
nodeSelector: &map[string]string{},
3793-
tolerations: &[]corev1.Toleration{},
3788+
Client: fakeClient,
3789+
scheme: scheme.GetScheme(),
3790+
sharedPodConfig: &controllerutils.SharedPodConfig{},
37943791
}
37953792
rcd.deleteStaleProvisions(getProvisions(fakeClient), log.WithField("test", "TestDeleteStaleProvisions"))
37963793
actualAttempts := []int{}
@@ -3841,10 +3838,9 @@ func TestDeleteOldFailedProvisions(t *testing.T) {
38413838
scheme := scheme.GetScheme()
38423839
fakeClient := testfake.NewFakeClientBuilder().WithRuntimeObjects(provisions...).Build()
38433840
rcd := &ReconcileClusterDeployment{
3844-
Client: fakeClient,
3845-
scheme: scheme,
3846-
nodeSelector: &map[string]string{},
3847-
tolerations: &[]corev1.Toleration{},
3841+
Client: fakeClient,
3842+
scheme: scheme,
3843+
sharedPodConfig: &controllerutils.SharedPodConfig{},
38483844
}
38493845
rcd.deleteOldFailedProvisions(getProvisions(fakeClient), log.WithField("test", "TestDeleteOldFailedProvisions"))
38503846
assert.Len(t, getProvisions(fakeClient), tc.expectedNumberOfProvisionsAfterDeletion, "unexpected provisions kept")
@@ -4366,8 +4362,7 @@ func TestUpdatePullSecretInfo(t *testing.T) {
43664362
validateCredentialsForClusterDeployment: func(client.Client, *hivev1.ClusterDeployment, log.FieldLogger) (bool, error) {
43674363
return true, nil
43684364
},
4369-
nodeSelector: &map[string]string{},
4370-
tolerations: &[]corev1.Toleration{},
4365+
sharedPodConfig: &controllerutils.SharedPodConfig{},
43714366
}
43724367

43734368
_, err := rcd.Reconcile(context.TODO(), reconcile.Request{
@@ -4528,8 +4523,7 @@ func TestMergePullSecrets(t *testing.T) {
45284523
scheme: scheme,
45294524
logger: log.WithField("controller", "clusterDeployment"),
45304525
remoteClusterAPIClientBuilder: func(*hivev1.ClusterDeployment) remoteclient.Builder { return mockRemoteClientBuilder },
4531-
nodeSelector: &map[string]string{},
4532-
tolerations: &[]corev1.Toleration{},
4526+
sharedPodConfig: &controllerutils.SharedPodConfig{},
45334527
}
45344528

45354529
cd := getCDFromClient(rcd.Client)
@@ -4596,8 +4590,7 @@ func TestCopyInstallLogSecret(t *testing.T) {
45964590
scheme: scheme,
45974591
logger: log.WithField("controller", "clusterDeployment"),
45984592
remoteClusterAPIClientBuilder: func(*hivev1.ClusterDeployment) remoteclient.Builder { return mockRemoteClientBuilder },
4599-
nodeSelector: &map[string]string{},
4600-
tolerations: &[]corev1.Toleration{},
4593+
sharedPodConfig: &controllerutils.SharedPodConfig{},
46014594
}
46024595

46034596
for i, envVar := range test.existingEnvVars {
@@ -4780,8 +4773,7 @@ func TestEnsureManagedDNSZone(t *testing.T) {
47804773
scheme: scheme,
47814774
logger: log.WithField("controller", "clusterDeployment"),
47824775
remoteClusterAPIClientBuilder: func(*hivev1.ClusterDeployment) remoteclient.Builder { return mockRemoteClientBuilder },
4783-
nodeSelector: &map[string]string{},
4784-
tolerations: &[]corev1.Toleration{},
4776+
sharedPodConfig: &controllerutils.SharedPodConfig{},
47854777
}
47864778

47874779
// act
@@ -4985,10 +4977,9 @@ spacing problem!
49854977
t.Run(test.name, func(t *testing.T) {
49864978
fakeClient := testfake.NewFakeClientBuilder().WithRuntimeObjects(filterNils(test.cd, test.icSecret)...).Build()
49874979
r := &ReconcileClusterDeployment{
4988-
Client: fakeClient,
4989-
scheme: scheme.GetScheme(),
4990-
nodeSelector: &map[string]string{},
4991-
tolerations: &[]corev1.Toleration{},
4980+
Client: fakeClient,
4981+
scheme: scheme.GetScheme(),
4982+
sharedPodConfig: &controllerutils.SharedPodConfig{},
49924983
}
49934984

49944985
if gotReturn := r.discoverAWSHostedZoneRole(test.cd, logger); gotReturn != test.wantReturn {
@@ -5242,8 +5233,7 @@ platform:
52425233
Client: fakeClient,
52435234
scheme: scheme,
52445235
remoteClusterAPIClientBuilder: func(*hivev1.ClusterDeployment) remoteclient.Builder { return mockRemoteClientBuilder },
5245-
nodeSelector: &map[string]string{},
5246-
tolerations: &[]corev1.Toleration{},
5236+
sharedPodConfig: &controllerutils.SharedPodConfig{},
52475237
}
52485238

52495239
if gotReturn := r.discoverAzureResourceGroup(test.cd, logger); gotReturn != test.wantReturn {
@@ -5430,10 +5420,9 @@ spacing problem!
54305420
t.Run(test.name, func(t *testing.T) {
54315421
fakeClient := testfake.NewFakeClientBuilder().WithRuntimeObjects(filterNils(test.cd, test.icSecret)...).Build()
54325422
r := &ReconcileClusterDeployment{
5433-
Client: fakeClient,
5434-
scheme: scheme.GetScheme(),
5435-
nodeSelector: &map[string]string{},
5436-
tolerations: &[]corev1.Toleration{},
5423+
Client: fakeClient,
5424+
scheme: scheme.GetScheme(),
5425+
sharedPodConfig: &controllerutils.SharedPodConfig{},
54375426
}
54385427

54395428
if gotReturn := r.discoverGCPNetworkProjectID(test.cd, logger); gotReturn != test.wantReturn {

pkg/controller/clusterdeprovision/clusterdeprovision_controller.go

Lines changed: 9 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -140,28 +140,24 @@ type ReconcileClusterDeprovision struct {
140140
scheme *runtime.Scheme
141141
deprovisionsDisabled bool
142142

143-
// nodeSelector is copied from the hive-controllers pod and must be included in any Jobs we create from here.
144-
nodeSelector *map[string]string
145-
146-
// tolerations is copied from the hive-controllers pod and must be included in any Jobs we create from here.
147-
tolerations *[]corev1.Toleration
143+
// sharedPodConfig is copied from the hive-controllers pod and must be included in any Jobs we create from here.
144+
sharedPodConfig *controllerutils.SharedPodConfig
148145
}
149146

150147
// Reconcile reads that state of the cluster for a ClusterDeprovision object and makes changes based on the state read
151148
// and what is in the ClusterDeprovision.Spec
152149
func (r *ReconcileClusterDeprovision) Reconcile(ctx context.Context, request reconcile.Request) (reconcile.Result, error) {
153150
rLog := controllerutils.BuildControllerLogger(ControllerName, "clusterDeprovision", request.NamespacedName)
154151

155-
// Discover scheduling settings from the controller. We would like to do this in newReconciler,
152+
// Discover settings from the controller. We would like to do this in newReconciler,
156153
// but we can't count on the cache having been started at that point.
157-
if r.nodeSelector == nil || r.tolerations == nil {
158-
thisPod, err := controllerutils.GetThisPod(r)
154+
if r.sharedPodConfig == nil {
155+
sharedPodConfig, err := controllerutils.ReadSharedConfigFromThisPod(r)
159156
if err != nil {
160-
rLog.WithError(err).Error("Failed to retrieve the running pod")
157+
rLog.WithError(err).Error("error reading shared pod config")
161158
return reconcile.Result{}, err
162159
}
163-
r.nodeSelector = &thisPod.Spec.NodeSelector
164-
r.tolerations = &thisPod.Spec.Tolerations
160+
r.sharedPodConfig = sharedPodConfig
165161
}
166162

167163
// For logging, we need to see when the reconciliation loop starts and ends.
@@ -307,8 +303,8 @@ func (r *ReconcileClusterDeprovision) Reconcile(ctx context.Context, request rec
307303
os.Getenv("HTTPS_PROXY"),
308304
os.Getenv("NO_PROXY"),
309305
extraEnvVars,
310-
*r.nodeSelector,
311-
*r.tolerations)
306+
*r.sharedPodConfig,
307+
)
312308
if err != nil {
313309
rLog.Errorf("error generating uninstaller job: %v", err)
314310
return reconcile.Result{}, err

pkg/controller/clusterdeprovision/clusterdeprovision_controller_test.go

Lines changed: 2 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -296,8 +296,7 @@ func TestClusterDeprovisionReconcile(t *testing.T) {
296296
Client: mocks.fakeKubeClient,
297297
scheme: scheme,
298298
deprovisionsDisabled: test.deprovisionsDisabled,
299-
nodeSelector: &map[string]string{},
300-
tolerations: &[]corev1.Toleration{},
299+
sharedPodConfig: &controllerutils.SharedPodConfig{},
301300
}
302301

303302
// Save the list of actuators so that it can be restored at the end of this test
@@ -377,7 +376,7 @@ func testClusterDeployment() *hivev1.ClusterDeployment {
377376
// specified conditions.
378377
func testUninstallJob(conditions ...batchv1.JobCondition) *batchv1.Job {
379378
uninstallJob, _ := install.GenerateUninstallerJobForDeprovision(testClusterDeprovision(),
380-
"someserviceaccount", "", "", "", nil, map[string]string{}, []corev1.Toleration{})
379+
"someserviceaccount", "", "", "", nil, controllerutils.SharedPodConfig{})
381380
hash, err := controllerutils.CalculateJobSpecHash(uninstallJob)
382381
if err != nil {
383382
panic("should never get error calculating job spec hash")

pkg/controller/clusterprovision/clusterprovision_controller.go

Lines changed: 8 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -131,28 +131,24 @@ type ReconcileClusterProvision struct {
131131
// A TTLCache of job creates each clusterprovision expects to see
132132
expectations controllerutils.ExpectationsInterface
133133

134-
// nodeSelector is copied from the hive-controllers pod and must be included in any Jobs we create from here.
135-
nodeSelector *map[string]string
136-
137-
// tolerations is copied from the hive-controllers pod and must be included in any Jobs we create from here.
138-
tolerations *[]corev1.Toleration
134+
// sharedPodConfig is copied from the hive-controllers pod and must be included in any Jobs we create from here.
135+
sharedPodConfig *controllerutils.SharedPodConfig
139136
}
140137

141138
// Reconcile reads that state of the cluster for a ClusterProvision object and makes changes based on the state read
142139
// and what is in the ClusterProvision.Spec
143140
func (r *ReconcileClusterProvision) Reconcile(ctx context.Context, request reconcile.Request) (reconcile.Result, error) {
144141
pLog := controllerutils.BuildControllerLogger(ControllerName, "clusterProvision", request.NamespacedName)
145142

146-
// Discover scheduling settings from the controller. We would like to do this in newReconciler,
143+
// Discover settings from the controller. We would like to do this in newReconciler,
147144
// but we can't count on the cache having been started at that point.
148-
if r.nodeSelector == nil || r.tolerations == nil {
149-
thisPod, err := controllerutils.GetThisPod(r)
145+
if r.sharedPodConfig == nil {
146+
sharedPodConfig, err := controllerutils.ReadSharedConfigFromThisPod(r)
150147
if err != nil {
151-
pLog.WithError(err).Error("Failed to retrieve the running pod")
148+
pLog.WithError(err).Error("error reading shared pod config")
152149
return reconcile.Result{}, err
153150
}
154-
r.nodeSelector = &thisPod.Spec.NodeSelector
155-
r.tolerations = &thisPod.Spec.Tolerations
151+
r.sharedPodConfig = sharedPodConfig
156152
}
157153

158154
pLog.Info("reconciling cluster provision")
@@ -235,7 +231,7 @@ func (r *ReconcileClusterProvision) reconcileNewProvision(instance *hivev1.Clust
235231
}
236232

237233
func (r *ReconcileClusterProvision) createJob(instance *hivev1.ClusterProvision, pLog log.FieldLogger) (reconcile.Result, error) {
238-
job, err := install.GenerateInstallerJob(instance, *r.nodeSelector, *r.tolerations)
234+
job, err := install.GenerateInstallerJob(instance, *r.sharedPodConfig)
239235
if err != nil {
240236
pLog.WithError(err).Error("error generating install job")
241237
return reconcile.Result{}, err

pkg/controller/clusterprovision/clusterprovision_controller_test.go

Lines changed: 9 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -269,12 +269,11 @@ func TestClusterProvisionReconcile(t *testing.T) {
269269
fakeClient := testfake.NewFakeClientBuilder().WithRuntimeObjects(test.existing...).Build()
270270
controllerExpectations := controllerutils.NewExpectations(logger)
271271
rcp := &ReconcileClusterProvision{
272-
Client: fakeClient,
273-
scheme: scheme,
274-
logger: logger,
275-
expectations: controllerExpectations,
276-
nodeSelector: &map[string]string{},
277-
tolerations: &[]corev1.Toleration{},
272+
Client: fakeClient,
273+
scheme: scheme,
274+
logger: logger,
275+
expectations: controllerExpectations,
276+
sharedPodConfig: &controllerutils.SharedPodConfig{},
278277
}
279278

280279
reconcileRequest := reconcile.Request{
@@ -351,7 +350,7 @@ func testProvision(opts ...tcp.Option) *hivev1.ClusterProvision {
351350

352351
func testJob(opts ...testjob.Option) *batchv1.Job {
353352
provision := testProvision()
354-
job, err := install.GenerateInstallerJob(provision, map[string]string{}, []corev1.Toleration{})
353+
job, err := install.GenerateInstallerJob(provision, controllerutils.SharedPodConfig{})
355354
if err != nil {
356355
panic("should not error while generating test install job")
357356
}
@@ -561,10 +560,9 @@ compute:
561560
}
562561
fakeClient := testfake.NewFakeClientBuilder().WithRuntimeObjects(icSecret).Build()
563562
rcp := &ReconcileClusterProvision{
564-
Client: fakeClient,
565-
logger: logger,
566-
nodeSelector: &map[string]string{},
567-
tolerations: &[]corev1.Toleration{},
563+
Client: fakeClient,
564+
logger: logger,
565+
sharedPodConfig: &controllerutils.SharedPodConfig{},
568566
}
569567

570568
if got := rcp.getWorkers(*cd); got != test.want {

0 commit comments

Comments
 (0)