From 5a3a0d08b9098fe042b0cae22779b25611cbc24a Mon Sep 17 00:00:00 2001 From: Zack Zlotnik Date: Tue, 18 Nov 2025 10:02:30 -0500 Subject: [PATCH 1/6] removes OSImageURLConfig from buildrequest module --- .../build/buildrequest/buildrequest.go | 6 +- .../build/buildrequest/buildrequest_test.go | 21 ++-- .../build/buildrequest/buildrequestopts.go | 19 +--- .../buildrequest/buildrequestopts_test.go | 1 - .../build/buildrequest/machineosbuild.go | 99 ++++++++++--------- .../build/buildrequest/machineosbuild_test.go | 33 ++++--- pkg/controller/build/fixtures/objects.go | 44 +++++---- 7 files changed, 112 insertions(+), 111 deletions(-) diff --git a/pkg/controller/build/buildrequest/buildrequest.go b/pkg/controller/build/buildrequest/buildrequest.go index 5bd5b300e3..5d997a5522 100644 --- a/pkg/controller/build/buildrequest/buildrequest.go +++ b/pkg/controller/build/buildrequest/buildrequest.go @@ -335,8 +335,8 @@ func (br buildRequestImpl) renderContainerfile() (string, error) { MachineOSBuild: br.opts.MachineOSBuild, MachineOSConfig: br.opts.MachineOSConfig, UserContainerfile: br.userContainerfile, - BaseOSImage: br.opts.OSImageURLConfig.BaseOSContainerImage, - ExtensionsImage: br.opts.OSImageURLConfig.BaseOSExtensionsContainerImage, + BaseOSImage: br.opts.MachineConfig.Spec.OSImageURL, + ExtensionsImage: br.opts.MachineConfig.Spec.BaseOSExtensionsContainerImage, ExtensionsPackages: extPkgs, KernelType: kernelType, KernelPackages: kernelPackages, @@ -671,7 +671,7 @@ func (br buildRequestImpl) toBuildahPod() *corev1.Pod { // us to avoid parsing log files. Name: "create-digest-configmap", Command: append(command, digestCMScript), - Image: br.opts.OSImageURLConfig.BaseOSContainerImage, + Image: br.opts.MachineConfig.Spec.OSImageURL, Env: env, ImagePullPolicy: corev1.PullAlways, SecurityContext: securityContext, diff --git a/pkg/controller/build/buildrequest/buildrequest_test.go b/pkg/controller/build/buildrequest/buildrequest_test.go index 7939084747..ef9671348f 100644 --- a/pkg/controller/build/buildrequest/buildrequest_test.go +++ b/pkg/controller/build/buildrequest/buildrequest_test.go @@ -42,13 +42,11 @@ func TestBuildRequestInvalidExtensions(t *testing.T) { func TestBuildRequest(t *testing.T) { t.Parallel() - osImageURLConfig := fixtures.OSImageURLConfig() - expectedContents := func() []string { return []string{ - fmt.Sprintf("FROM %s AS extract", osImageURLConfig.BaseOSContainerImage), - fmt.Sprintf("FROM %s AS configs", osImageURLConfig.BaseOSContainerImage), - fmt.Sprintf("LABEL baseOSContainerImage=%s", osImageURLConfig.BaseOSContainerImage), + fmt.Sprintf("FROM %s AS extract", fixtures.BaseOSContainerImage), + fmt.Sprintf("FROM %s AS configs", fixtures.BaseOSContainerImage), + fmt.Sprintf("LABEL baseOSContainerImage=%s", fixtures.BaseOSContainerImage), } } @@ -66,7 +64,7 @@ func TestBuildRequest(t *testing.T) { return opts }, expectedContainerfileContents: append(expectedContents(), []string{ - fmt.Sprintf("RUN --mount=type=bind,from=%s", osImageURLConfig.BaseOSExtensionsContainerImage), + fmt.Sprintf("RUN --mount=type=bind,from=%s", fixtures.BaseOSExtensionsContainerImage), `extensions="usbguard"`, }...), }, @@ -78,7 +76,7 @@ func TestBuildRequest(t *testing.T) { return opts }, expectedContainerfileContents: append(expectedContents(), []string{ - fmt.Sprintf("RUN --mount=type=bind,from=%s", osImageURLConfig.BaseOSExtensionsContainerImage), + fmt.Sprintf("RUN --mount=type=bind,from=%s", fixtures.BaseOSExtensionsContainerImage), `extensions="krb5-workstation libkadm5 usbguard"`, }...), }, @@ -86,12 +84,12 @@ func TestBuildRequest(t *testing.T) { name: "Missing extensions image and extensions", optsFunc: func() BuildRequestOpts { opts := getBuildRequestOpts() - opts.OSImageURLConfig.BaseOSExtensionsContainerImage = "" + opts.MachineConfig.Spec.BaseOSExtensionsContainerImage = "" opts.MachineConfig.Spec.Extensions = []string{"usbguard"} return opts }, unexpectedContainerfileContents: []string{ - fmt.Sprintf("RUN --mount=type=bind,from=%s", osImageURLConfig.BaseOSContainerImage), + fmt.Sprintf("RUN --mount=type=bind,from=%s", fixtures.BaseOSContainerImage), "extensions=\"usbguard\"", }, }, @@ -235,7 +233,7 @@ func assertBuildJobIsCorrect(t *testing.T, buildJob *batchv1.Job, opts BuildRequ assert.Equal(t, buildJob.Spec.Template.Spec.InitContainers[0].Image, mcoImagePullspec) expectedPullspecs := []string{ "base-os-image-from-machineosconfig", - fixtures.OSImageURLConfig().BaseOSContainerImage, + fixtures.BaseOSContainerImage, } assert.Contains(t, expectedPullspecs, buildJob.Spec.Template.Spec.Containers[0].Image) @@ -312,7 +310,7 @@ RUN rpm-ostree install && \ newSecret := `{"auths":` + legacySecret + `}` return BuildRequestOpts{ - MachineConfig: &mcfgv1.MachineConfig{}, + MachineConfig: fixtures.NewObjectsForTest("worker").RenderedMachineConfig, MachineOSConfig: layeredObjects.MachineOSConfigBuilder.MachineOSConfig(), MachineOSBuild: layeredObjects.MachineOSBuildBuilder.MachineOSBuild(), Images: &ctrlcommon.Images{ @@ -320,7 +318,6 @@ RUN rpm-ostree install && \ MachineConfigOperator: mcoImagePullspec, }, }, - OSImageURLConfig: fixtures.OSImageURLConfig(), BaseImagePullSecret: &corev1.Secret{ ObjectMeta: metav1.ObjectMeta{ Name: "base-image-pull-secret", diff --git a/pkg/controller/build/buildrequest/buildrequestopts.go b/pkg/controller/build/buildrequest/buildrequestopts.go index 3cb2e4a511..61f0584f48 100644 --- a/pkg/controller/build/buildrequest/buildrequestopts.go +++ b/pkg/controller/build/buildrequest/buildrequestopts.go @@ -23,11 +23,10 @@ import ( // Holds all of the options used to produce a BuildRequest. type BuildRequestOpts struct { //nolint:revive // This name is fine. - MachineOSConfig *mcfgv1.MachineOSConfig - MachineOSBuild *mcfgv1.MachineOSBuild - MachineConfig *mcfgv1.MachineConfig - Images *ctrlcommon.Images - OSImageURLConfig *ctrlcommon.OSImageURLConfig + MachineOSConfig *mcfgv1.MachineOSConfig + MachineOSBuild *mcfgv1.MachineOSBuild + MachineConfig *mcfgv1.MachineConfig + Images *ctrlcommon.Images BaseImagePullSecret *corev1.Secret FinalImagePushSecret *corev1.Secret @@ -100,10 +99,6 @@ func newBuildRequestOptsFromAPI(ctx context.Context, kubeclient clientset.Interf return nil, fmt.Errorf("expected images to not be nil") } - if opts.OSImageURLConfig == nil { - return nil, fmt.Errorf("expected osimageurlconfig to not be nil") - } - if opts.BaseImagePullSecret == nil { return nil, fmt.Errorf("expected base image pull secret to not be nil") } @@ -176,11 +171,6 @@ func (o *optsGetter) getOpts(ctx context.Context, mosb *mcfgv1.MachineOSBuild, m return nil, fmt.Errorf("could not get images.json config: %w", err) } - osImageURLConfig, err := ctrlcommon.GetOSImageURLConfig(ctx, o.kubeclient) - if err != nil { - return nil, fmt.Errorf("could not get osImageURL config: %w", err) - } - var baseImagePullSecretName string // Check if a base image pull secret was provided opts.hasUserDefinedBaseImagePullSecret = mosc.Spec.BaseImagePullSecret != nil @@ -214,7 +204,6 @@ func (o *optsGetter) getOpts(ctx context.Context, mosb *mcfgv1.MachineOSBuild, m opts.Images = imagesConfig opts.MachineConfig = mc - opts.OSImageURLConfig = osImageURLConfig opts.BaseImagePullSecret = baseImagePullSecret opts.FinalImagePushSecret = finalImagePushSecret opts.MachineOSConfig = mosc.DeepCopy() diff --git a/pkg/controller/build/buildrequest/buildrequestopts_test.go b/pkg/controller/build/buildrequest/buildrequestopts_test.go index e1a6e81625..8e80cb13f8 100644 --- a/pkg/controller/build/buildrequest/buildrequestopts_test.go +++ b/pkg/controller/build/buildrequest/buildrequestopts_test.go @@ -148,7 +148,6 @@ func TestBuildRequestOpts(t *testing.T) { assert.NotNil(t, brOpts.MachineOSConfig) assert.NotNil(t, brOpts.MachineOSBuild) assert.NotNil(t, brOpts.Images) - assert.NotNil(t, brOpts.OSImageURLConfig) assert.NotNil(t, brOpts.BaseImagePullSecret) assert.NotNil(t, brOpts.FinalImagePushSecret) }) diff --git a/pkg/controller/build/buildrequest/machineosbuild.go b/pkg/controller/build/buildrequest/machineosbuild.go index 3ac3f398da..29948e3b9c 100644 --- a/pkg/controller/build/buildrequest/machineosbuild.go +++ b/pkg/controller/build/buildrequest/machineosbuild.go @@ -1,10 +1,11 @@ package buildrequest import ( - "context" + //nolint:gosec "crypto/md5" "fmt" + "strings" "github.com/distribution/reference" "github.com/ghodss/yaml" @@ -13,7 +14,6 @@ import ( "github.com/openshift/machine-config-operator/pkg/controller/build/utils" ctrlcommon "github.com/openshift/machine-config-operator/pkg/controller/common" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - clientset "k8s.io/client-go/kubernetes" ) // This is the same salt / pattern from pkg/controller/render/hash.go @@ -32,13 +32,17 @@ var ( // Holds the objects that are used to construct a MachineOSBuild with a hashed // name. type MachineOSBuildOpts struct { + MachineConfig *mcfgv1.MachineConfig MachineOSConfig *mcfgv1.MachineOSConfig MachineConfigPool *mcfgv1.MachineConfigPool - OSImageURLConfig *ctrlcommon.OSImageURLConfig } // Validates that the required options are provided. func (m *MachineOSBuildOpts) validateForHash() error { + if err := m.validateMachineConfig(); err != nil { + return fmt.Errorf("machineconfig failed validation: %w", err) + } + if m.MachineOSConfig == nil { return fmt.Errorf("missing required MachineOSConfig") } @@ -51,8 +55,30 @@ func (m *MachineOSBuildOpts) validateForHash() error { return fmt.Errorf("name mismatch, MachineConfigPool has %q, MachineOSConfig has %q", m.MachineConfigPool.Name, m.MachineOSConfig.Spec.MachineConfigPool.Name) } - if m.OSImageURLConfig == nil { - return fmt.Errorf("misssing OSImageURLConfig") + return nil +} + +// Validates that a MachineConfig has the necessary metadata for generating a +// MachineOSBuild. +func (m *MachineOSBuildOpts) validateMachineConfig() error { + if m.MachineConfig == nil { + return fmt.Errorf("missing required MachineConfig") + } + + if !strings.HasPrefix(m.MachineConfig.Name, "rendered-") { + return fmt.Errorf("machineconfig %q is not a rendered MachineConfig", m.MachineConfig.Name) + } + + requiredAnnos := []string{ctrlcommon.ReleaseImageVersionAnnotationKey, ctrlcommon.GeneratedByControllerVersionAnnotationKey} + for _, anno := range requiredAnnos { + val, ok := m.MachineConfig.Annotations[anno] + if !ok { + return fmt.Errorf("missing annotation %q on MachineConfig %q", anno, m.MachineConfig.Name) + } + + if val == "" { + return fmt.Errorf("empty annotation %q value on MachineConfig %q", anno, m.MachineConfig.Name) + } } return nil @@ -60,6 +86,24 @@ func (m *MachineOSBuildOpts) validateForHash() error { // Creates a list of objects that are consumed by the SHA256 hash. func (m *MachineOSBuildOpts) objectsForHash() []interface{} { + // Represents a private version of the OSImageURLConfig struct to keep the + // hashed name generation stable regardless of the input source. This means + // that we can eventually remove the OSImageURLConfig struct. + type osImageURLConfig struct { + BaseOSContainerImage string + BaseOSExtensionsContainerImage string + OSImageURL string + ReleaseVersion string + } + + cfg := osImageURLConfig{ + BaseOSContainerImage: m.MachineConfig.Spec.OSImageURL, + BaseOSExtensionsContainerImage: m.MachineConfig.Spec.BaseOSExtensionsContainerImage, + // This value is purposely left empty because the ConfigMap does not actually + // populate this value. However, we want the hashing to be stable. + OSImageURL: "", + ReleaseVersion: m.MachineConfig.Annotations[ctrlcommon.ReleaseImageVersionAnnotationKey], + } // The objects considered for hashing described inline: out := []interface{}{ @@ -70,8 +114,8 @@ func (m *MachineOSBuildOpts) objectsForHash() []interface{} { m.MachineConfigPool.Spec.Configuration, // The MachineOSConfig Spec field. m.MachineOSConfig.Spec, - // The complete OSImageURLConfig object. - m.OSImageURLConfig, + // The complete osImageURLConfig object. + cfg, } return out @@ -118,23 +162,6 @@ func (m *MachineOSBuildOpts) getHashedName() (string, error) { return fmt.Sprintf("%x", hasher.Sum(nil)), nil } -// Constructs the MachineOSBuildOpts by retrieving the OSImageURLConfig from -// the API server. -func NewMachineOSBuildOpts(ctx context.Context, kubeclient clientset.Interface, mosc *mcfgv1.MachineOSConfig, mcp *mcfgv1.MachineConfigPool) (MachineOSBuildOpts, error) { - // TODO: Consider an implementation that uses listers instead of API clients - // just to cut down on API server traffic. - osImageURLs, err := ctrlcommon.GetOSImageURLConfig(ctx, kubeclient) - if err != nil { - return MachineOSBuildOpts{}, fmt.Errorf("could not get OSImageURLConfig: %w", err) - } - - return MachineOSBuildOpts{ - MachineOSConfig: mosc, - MachineConfigPool: mcp, - OSImageURLConfig: osImageURLs, - }, nil -} - // Constructs a new MachineOSBuild object or panics trying. Useful for testing // scenarios. func NewMachineOSBuildOrDie(opts MachineOSBuildOpts) *mcfgv1.MachineOSBuild { @@ -147,30 +174,6 @@ func NewMachineOSBuildOrDie(opts MachineOSBuildOpts) *mcfgv1.MachineOSBuild { return mosb } -// Retrieves the MachineOSBuildOpts from the API and constructs a new -// MachineOSBuild object or panics trying. Useful for testing scenarios. -func NewMachineOSBuildFromAPIOrDie(ctx context.Context, kubeclient clientset.Interface, mosc *mcfgv1.MachineOSConfig, mcp *mcfgv1.MachineConfigPool) *mcfgv1.MachineOSBuild { - mosb, err := NewMachineOSBuildFromAPI(ctx, kubeclient, mosc, mcp) - - if err != nil { - panic(err) - } - - return mosb -} - -// Retrieves the MachineOSBuildOpts from the API and constructs a new -// MachineOSBuild object. -func NewMachineOSBuildFromAPI(ctx context.Context, kubeclient clientset.Interface, mosc *mcfgv1.MachineOSConfig, mcp *mcfgv1.MachineConfigPool) (*mcfgv1.MachineOSBuild, error) { - opts, err := NewMachineOSBuildOpts(ctx, kubeclient, mosc, mcp) - - if err != nil { - return nil, fmt.Errorf("could not get MachineOSBuildOpts: %w", err) - } - - return NewMachineOSBuild(opts) -} - // Constructs a new MachineOSBuild object with all of the labels, the tagged // image pushpsec, and a hashed name. func NewMachineOSBuild(opts MachineOSBuildOpts) (*mcfgv1.MachineOSBuild, error) { diff --git a/pkg/controller/build/buildrequest/machineosbuild_test.go b/pkg/controller/build/buildrequest/machineosbuild_test.go index 5235d71332..d59dc18dd8 100644 --- a/pkg/controller/build/buildrequest/machineosbuild_test.go +++ b/pkg/controller/build/buildrequest/machineosbuild_test.go @@ -19,6 +19,10 @@ func TestMachineOSBuild(t *testing.T) { poolName := "worker" + getMachineConfig := func() *mcfgv1.MachineConfig { + return fixtures.NewObjectsForTest(poolName).RenderedMachineConfig + } + getMachineOSConfig := func() *mcfgv1.MachineOSConfig { return testhelpers.NewMachineOSConfigBuilder(poolName).WithMachineConfigPool(poolName).MachineOSConfig() } @@ -28,7 +32,7 @@ func TestMachineOSBuild(t *testing.T) { } // Some of the test cases expect the hash name to be the same. This is that hash value. - expectedCommonHashName := "worker-55592464e51104dcc274a300565fec9e" + expectedCommonHashName := "worker-699e6be74658adcb3ff2b48f32cd1584" testCases := []struct { name string @@ -40,6 +44,7 @@ func TestMachineOSBuild(t *testing.T) { name: "Missing MachineConfigPool", errExpected: true, opts: MachineOSBuildOpts{ + MachineConfig: getMachineConfig(), MachineOSConfig: getMachineOSConfig(), }, }, @@ -47,6 +52,7 @@ func TestMachineOSBuild(t *testing.T) { name: "Missing MachineOSConfig", errExpected: true, opts: MachineOSBuildOpts{ + MachineConfig: getMachineConfig(), MachineConfigPool: getMachineConfigPool(), }, }, @@ -54,30 +60,27 @@ func TestMachineOSBuild(t *testing.T) { name: "Mismatched MachineConfigPool name and MachineOSConfig", errExpected: true, opts: MachineOSBuildOpts{ + MachineConfig: getMachineConfig(), MachineOSConfig: testhelpers.NewMachineOSConfigBuilder("worker").WithMachineConfigPool("other-pool").MachineOSConfig(), MachineConfigPool: getMachineConfigPool(), }, }, { - name: "Only MachineOSConfig and MachineConfigPool", - expectedName: "worker-6782c5fc52947bc8fa6d105c9fe62b7d", - errExpected: true, + name: "Missing MachineConfig", + errExpected: true, opts: MachineOSBuildOpts{ MachineOSConfig: getMachineOSConfig(), MachineConfigPool: getMachineConfigPool(), }, }, - // These cases ensure that the hashed name remains stable regardless of - // which source of truth is used for the base OS image, extensions image, - // and / or release version. In these cases, the source of truth can either - // be the value from the MachineOSConfig or the OSImageURLConfig struct. + // These cases ensure that the hashed name remains stable. { - name: "All values from OSImageURLConfig", + name: "All values present", expectedName: expectedCommonHashName, opts: MachineOSBuildOpts{ + MachineConfig: getMachineConfig(), MachineOSConfig: getMachineOSConfig(), MachineConfigPool: getMachineConfigPool(), - OSImageURLConfig: fixtures.OSImageURLConfig(), }, }, // These cases ensure that pausing the MachineConfigPool does not affect the hash. @@ -85,18 +88,18 @@ func TestMachineOSBuild(t *testing.T) { name: "Unpaused MachineConfigPool", expectedName: expectedCommonHashName, opts: MachineOSBuildOpts{ + MachineConfig: getMachineConfig(), MachineOSConfig: getMachineOSConfig(), MachineConfigPool: getMachineConfigPool(), - OSImageURLConfig: fixtures.OSImageURLConfig(), }, }, { name: "Paused MachineConfigPool", expectedName: expectedCommonHashName, opts: MachineOSBuildOpts{ + MachineConfig: getMachineConfig(), MachineOSConfig: getMachineOSConfig(), MachineConfigPool: testhelpers.NewMachineConfigPoolBuilder(poolName).WithPaused().MachineConfigPool(), - OSImageURLConfig: fixtures.OSImageURLConfig(), }, }, } @@ -139,12 +142,14 @@ func TestMachineOSBuild(t *testing.T) { func TestMachineOSBuildLabelConsistency(t *testing.T) { t.Parallel() - obj := fixtures.NewObjectsForTest("worker") + poolName := "worker" + + obj := fixtures.NewObjectsForTest(poolName) mosb, err := NewMachineOSBuild(MachineOSBuildOpts{ + MachineConfig: fixtures.NewObjectsForTest(poolName).RenderedMachineConfig, MachineConfigPool: obj.MachineConfigPool, MachineOSConfig: obj.MachineOSConfig, - OSImageURLConfig: fixtures.OSImageURLConfig(), }) assert.NoError(t, err) diff --git a/pkg/controller/build/fixtures/objects.go b/pkg/controller/build/fixtures/objects.go index f4459213cd..1e566413ed 100644 --- a/pkg/controller/build/fixtures/objects.go +++ b/pkg/controller/build/fixtures/objects.go @@ -21,10 +21,11 @@ const ( // Provides consistently instantiated objects for use in a given test. type ObjectsForTest struct { - MachineConfigPool *mcfgv1.MachineConfigPool - MachineConfigs []*mcfgv1.MachineConfig - MachineOSConfig *mcfgv1.MachineOSConfig - MachineOSBuild *mcfgv1.MachineOSBuild + MachineConfigPool *mcfgv1.MachineConfigPool + MachineConfigs []*mcfgv1.MachineConfig + MachineOSConfig *mcfgv1.MachineOSConfig + MachineOSBuild *mcfgv1.MachineOSBuild + RenderedMachineConfig *mcfgv1.MachineConfig } // Provides the builders to create consistently instantiated objects for use in @@ -38,11 +39,14 @@ type ObjectBuildersForTest struct { func (o *ObjectBuildersForTest) ToObjectsForTest() ObjectsForTest { mcp := o.MachineConfigPoolBuilder.MachineConfigPool() + mcs, renderedMC := newMachineConfigsFromPool(mcp) + return ObjectsForTest{ - MachineConfigPool: mcp, - MachineConfigs: newMachineConfigsFromPool(mcp), - MachineOSConfig: o.MachineOSConfigBuilder.MachineOSConfig(), - MachineOSBuild: o.MachineOSBuildBuilder.MachineOSBuild(), + MachineConfigPool: mcp, + MachineConfigs: mcs, + MachineOSConfig: o.MachineOSConfigBuilder.MachineOSConfig(), + MachineOSBuild: o.MachineOSBuildBuilder.MachineOSBuild(), + RenderedMachineConfig: renderedMC, } } @@ -51,7 +55,7 @@ func (o *ObjectBuildersForTest) ToObjectsForTest() ObjectsForTest { func (o *ObjectsForTest) ToRuntimeObjects() []runtime.Object { out := []runtime.Object{o.MachineConfigPool} - for _, item := range o.MachineConfigs { + for _, item := range append(o.MachineConfigs, o.RenderedMachineConfig) { out = append(out, item) } @@ -162,7 +166,7 @@ func defaultKubeObjects() []runtime.Object { } // Generates MachineConfigs from the given MachineConfigPool for insertion. -func newMachineConfigsFromPool(mcp *mcfgv1.MachineConfigPool) []*mcfgv1.MachineConfig { +func newMachineConfigsFromPool(mcp *mcfgv1.MachineConfigPool) ([]*mcfgv1.MachineConfig, *mcfgv1.MachineConfig) { files := []ign3types.File{} out := []*mcfgv1.MachineConfig{} @@ -186,17 +190,21 @@ func newMachineConfigsFromPool(mcp *mcfgv1.MachineConfigPool) []*mcfgv1.MachineC []ign3types.File{file})) } - // Create a rendered MachineConfig to accompany our MachineConfigPool. - out = append(out, testhelpers.NewMachineConfig( + renderedMC := testhelpers.NewMachineConfig( mcp.Spec.Configuration.Name, - map[string]string{ - ctrlcommon.GeneratedByControllerVersionAnnotationKey: "version-number", - "machineconfiguration.openshift.io/role": mcp.Name, - }, + map[string]string{}, "", - files)) + files) - return out + renderedMC.Annotations = map[string]string{ + ctrlcommon.ReleaseImageVersionAnnotationKey: ReleaseVersion, + ctrlcommon.GeneratedByControllerVersionAnnotationKey: "controller-version", + } + + renderedMC.Spec.OSImageURL = BaseOSContainerImage + renderedMC.Spec.BaseOSExtensionsContainerImage = BaseOSExtensionsContainerImage + + return out, renderedMC } // Gets an example machine-config-operator-images ConfigMap. From dd4f75495d4cafb40626a8e961e58d5dea457f1b Mon Sep 17 00:00:00 2001 From: Zack Zlotnik Date: Tue, 18 Nov 2025 17:20:32 -0500 Subject: [PATCH 2/6] updated the reconciler to not use OSImageURLConfig --- .../build/buildrequest/machineosbuild.go | 9 +- .../build/osbuildcontroller_test.go | 168 +++++++++++------- pkg/controller/build/reconciler.go | 39 ++-- 3 files changed, 132 insertions(+), 84 deletions(-) diff --git a/pkg/controller/build/buildrequest/machineosbuild.go b/pkg/controller/build/buildrequest/machineosbuild.go index 29948e3b9c..416c45db89 100644 --- a/pkg/controller/build/buildrequest/machineosbuild.go +++ b/pkg/controller/build/buildrequest/machineosbuild.go @@ -3,6 +3,7 @@ package buildrequest import ( //nolint:gosec + "crypto/md5" "fmt" "strings" @@ -65,7 +66,7 @@ func (m *MachineOSBuildOpts) validateMachineConfig() error { return fmt.Errorf("missing required MachineConfig") } - if !strings.HasPrefix(m.MachineConfig.Name, "rendered-") { + if !strings.HasPrefix(m.MachineConfig.Name, ctrlcommon.RenderedMachineConfigPrefix) { return fmt.Errorf("machineconfig %q is not a rendered MachineConfig", m.MachineConfig.Name) } @@ -99,8 +100,10 @@ func (m *MachineOSBuildOpts) objectsForHash() []interface{} { cfg := osImageURLConfig{ BaseOSContainerImage: m.MachineConfig.Spec.OSImageURL, BaseOSExtensionsContainerImage: m.MachineConfig.Spec.BaseOSExtensionsContainerImage, - // This value is purposely left empty because the ConfigMap does not actually - // populate this value. However, we want the hashing to be stable. + // To maintain stable hashing, this field is purposely left empty because + // these values were originally populated from the + // "machine-config-osimageurl" ConfigMap. The ConfigMap field which + // populates this struct field is blank. OSImageURL: "", ReleaseVersion: m.MachineConfig.Annotations[ctrlcommon.ReleaseImageVersionAnnotationKey], } diff --git a/pkg/controller/build/osbuildcontroller_test.go b/pkg/controller/build/osbuildcontroller_test.go index ef9c76dd91..46bc87f27f 100644 --- a/pkg/controller/build/osbuildcontroller_test.go +++ b/pkg/controller/build/osbuildcontroller_test.go @@ -3,12 +3,10 @@ package build import ( "context" "fmt" - "path/filepath" "testing" "time" "github.com/containers/image/v5/types" - ign3types "github.com/coreos/ignition/v2/config/v3_5/types" "github.com/opencontainers/go-digest" mcfgv1 "github.com/openshift/api/machineconfiguration/v1" fakeclientimagev1 "github.com/openshift/client-go/image/clientset/versioned/fake" @@ -83,7 +81,7 @@ func TestOSBuildControllerDeletesRunningBuildBeforeStartingANewOne(t *testing.T) t.Run("MachineOSConfig change", func(t *testing.T) { - kubeclient, mcfgclient, _, _, mosc, initialMosb, mcp, kubeassert, _ := setupOSBuildControllerForTestWithRunningBuild(ctx, t, poolName) + kubeclient, mcfgclient, _, _, mosc, initialMosb, mcp, kubeassert, lobj, _ := setupOSBuildControllerForTestWithRunningBuild(ctx, t, poolName) // Now that the build is in the running state, we update the MachineOSConfig. apiMosc := testhelpers.SetContainerfileContentsOnMachineOSConfig(ctx, t, mcfgclient, mosc, "FROM configs AS final\nRUN echo 'helloworld' > /etc/helloworld") @@ -91,7 +89,12 @@ func TestOSBuildControllerDeletesRunningBuildBeforeStartingANewOne(t *testing.T) apiMosc, err := mcfgclient.MachineconfigurationV1().MachineOSConfigs().Update(ctx, apiMosc, metav1.UpdateOptions{}) require.NoError(t, err) - mosb := buildrequest.NewMachineOSBuildFromAPIOrDie(ctx, kubeclient, apiMosc, mcp) + mosb := buildrequest.NewMachineOSBuildOrDie(buildrequest.MachineOSBuildOpts{ + MachineConfig: lobj.RenderedMachineConfig, + MachineOSConfig: apiMosc, + MachineConfigPool: mcp, + }) + buildJobName := utils.GetBuildJobName(mosb) // After creating the new MachineOSConfig, a MachineOSBuild should be created. @@ -114,11 +117,15 @@ func TestOSBuildControllerDeletesRunningBuildBeforeStartingANewOne(t *testing.T) t.Run("MachineConfig change", func(t *testing.T) { - kubeclient, mcfgclient, _, _, mosc, initialMosb, mcp, kubeassert, _ := setupOSBuildControllerForTestWithRunningBuild(ctx, t, poolName) + _, mcfgclient, _, _, mosc, initialMosb, mcp, kubeassert, _, _ := setupOSBuildControllerForTestWithRunningBuild(ctx, t, poolName) - apiMCP := insertNewRenderedMachineConfigAndUpdatePool(ctx, t, mcfgclient, mosc.Spec.MachineConfigPool.Name, "rendered-worker-2") + apiMCP, apiMC := insertNewRenderedMachineConfigAndUpdatePool(ctx, t, mcfgclient, mosc.Spec.MachineConfigPool.Name, "rendered-worker-2") - mosb := buildrequest.NewMachineOSBuildFromAPIOrDie(ctx, kubeclient, mosc, apiMCP) + mosb := buildrequest.NewMachineOSBuildOrDie(buildrequest.MachineOSBuildOpts{ + MachineConfig: apiMC, + MachineOSConfig: mosc, + MachineConfigPool: apiMCP, + }) buildJobName := utils.GetBuildJobName(mosb) @@ -145,7 +152,7 @@ func TestOSBuildControllerLeavesSuccessfulBuildAlone(t *testing.T) { poolName := "worker" - kubeclient, mcfgclient, _, _, firstMosc, firstMosb, mcp, kubeassert := setupOSBuildControllerForTestWithSuccessfulBuild(ctx, t, poolName) + kubeclient, mcfgclient, _, _, firstMosc, firstMosb, mcp, lobj, kubeassert := setupOSBuildControllerForTestWithSuccessfulBuild(ctx, t, poolName) // Ensures that we have detected the first build. isMachineOSBuildReachedExpectedCount(ctx, t, mcfgclient, firstMosc, 1) @@ -156,7 +163,11 @@ func TestOSBuildControllerLeavesSuccessfulBuildAlone(t *testing.T) { newMosc := testhelpers.SetContainerfileContentsOnMachineOSConfig(ctx, t, mcfgclient, mosc, containerfileContents) // Compute the new MachineOSBuild. - mosb := buildrequest.NewMachineOSBuildFromAPIOrDie(ctx, kubeclient, newMosc, mcp) + mosb := buildrequest.NewMachineOSBuildOrDie(buildrequest.MachineOSBuildOpts{ + MachineConfig: lobj.RenderedMachineConfig, + MachineOSConfig: newMosc, + MachineConfigPool: mcp, + }) // Ensure that the MachineOSBuild exists. kubeassert.MachineOSBuildExists(mosb) @@ -215,7 +226,7 @@ func TestOSBuildControllerFailure(t *testing.T) { t.Run("Failed build objects remain", func(t *testing.T) { - _, _, _, _, _, failedMosb, _, kubeassert := setupOSBuildControllerForTestWithFailedBuild(ctx, t, poolName) + _, _, _, _, _, failedMosb, _, kubeassert, _ := setupOSBuildControllerForTestWithFailedBuild(ctx, t, poolName) // Ensure that even after failure, the build objects remain. assertBuildObjectsAreCreated(ctx, t, kubeassert, failedMosb) @@ -223,13 +234,17 @@ func TestOSBuildControllerFailure(t *testing.T) { t.Run("MachineOSConfig change clears failed build", func(t *testing.T) { - kubeclient, mcfgclient, _, _, mosc, failedMosb, mcp, kubeassert := setupOSBuildControllerForTestWithFailedBuild(ctx, t, poolName) + kubeclient, mcfgclient, _, _, mosc, failedMosb, mcp, kubeassert, lobj := setupOSBuildControllerForTestWithFailedBuild(ctx, t, poolName) // Modify the MachineOSConfig to start a new build. newMosc := testhelpers.SetContainerfileContentsOnMachineOSConfig(ctx, t, mcfgclient, mosc, "FROM configs AS final\nRUN echo 'helloworld' > /etc/helloworld") // Compute the new MachineOSBuild. - newMosb := buildrequest.NewMachineOSBuildFromAPIOrDie(ctx, kubeclient, newMosc, mcp) + newMosb := buildrequest.NewMachineOSBuildOrDie(buildrequest.MachineOSBuildOpts{ + MachineConfig: lobj.RenderedMachineConfig, + MachineOSConfig: newMosc, + MachineConfigPool: mcp, + }) // Ensure that the MachineOSBuild exists. kubeassert.MachineOSBuildExists(newMosb) @@ -247,11 +262,16 @@ func TestOSBuildControllerFailure(t *testing.T) { t.Run("MachineConfig change clears failed build", func(t *testing.T) { - kubeclient, mcfgclient, _, _, mosc, failedMosb, mcp, kubeassert := setupOSBuildControllerForTestWithFailedBuild(ctx, t, poolName) + _, mcfgclient, _, _, mosc, failedMosb, mcp, kubeassert, _ := setupOSBuildControllerForTestWithFailedBuild(ctx, t, poolName) + + apiMCP, apiMC := insertNewRenderedMachineConfigAndUpdatePool(ctx, t, mcfgclient, mosc.Spec.MachineConfigPool.Name, "rendered-worker-2") - apiMCP := insertNewRenderedMachineConfigAndUpdatePool(ctx, t, mcfgclient, mosc.Spec.MachineConfigPool.Name, "rendered-worker-2") + mosb := buildrequest.NewMachineOSBuildOrDie(buildrequest.MachineOSBuildOpts{ + MachineConfig: apiMC, + MachineOSConfig: mosc, + MachineConfigPool: apiMCP, + }) - mosb := buildrequest.NewMachineOSBuildFromAPIOrDie(ctx, kubeclient, mosc, apiMCP) buildJobName := utils.GetBuildJobName(mosb) // After updating the MachineConfigPool, a new MachineOSBuild should get created. kubeassert.MachineOSBuildExists(mosb, "New MachineOSBuild for MachineConfigPool %q update for MachineOSConfig %q never gets created", mcp.Name, mosc.Name) @@ -285,7 +305,7 @@ func TestOSBuildController(t *testing.T) { t.Run("MachineOSConfig changes creates a new MachineOSBuild", func(t *testing.T) { - kubeclient, mcfgclient, _, _, mosc, _, _, kubeassert := setupOSBuildControllerForTestWithSuccessfulBuild(ctx, t, poolName) + kubeclient, mcfgclient, _, _, mosc, _, _, lobj, kubeassert := setupOSBuildControllerForTestWithSuccessfulBuild(ctx, t, poolName) // Update the BuildInputs section on the MachineOSConfig and verify that a // new MachineOSBuild is produced from it. We'll do this 10 times. @@ -295,7 +315,12 @@ func TestOSBuildController(t *testing.T) { apiMCP, err := mcfgclient.MachineconfigurationV1().MachineConfigPools().Get(ctx, apiMosc.Spec.MachineConfigPool.Name, metav1.GetOptions{}) require.NoError(t, err) - mosb := buildrequest.NewMachineOSBuildFromAPIOrDie(ctx, kubeclient, apiMosc, apiMCP) + mosb := buildrequest.NewMachineOSBuildOrDie(buildrequest.MachineOSBuildOpts{ + MachineConfig: lobj.RenderedMachineConfig, + MachineOSConfig: apiMosc, + MachineConfigPool: apiMCP, + }) + buildJobName := utils.GetBuildJobName(mosb) // After creating the new MachineOSConfig, a MachineOSBuild should be created. kubeassert.MachineOSBuildExists(mosb, "MachineOSBuild not created for MachineOSConfig %s change, iteration %d", mosc.Name, i) @@ -326,17 +351,22 @@ func TestOSBuildController(t *testing.T) { t.Run("MachineConfig changes creates a new MachineOSBuild", func(t *testing.T) { - kubeclient, mcfgclient, _, _, mosc, _, mcp, kubeassert := setupOSBuildControllerForTestWithSuccessfulBuild(ctx, t, poolName) + kubeclient, mcfgclient, _, _, mosc, _, mcp, _, kubeassert := setupOSBuildControllerForTestWithSuccessfulBuild(ctx, t, poolName) // Update the rendered MachineConfig on the MachineConfigPool and verify that a new MachineOSBuild is produced. We'll do this 10 times. for i := 0; i <= 5; i++ { apiMosc, err := mcfgclient.MachineconfigurationV1().MachineOSConfigs().Get(ctx, mosc.Name, metav1.GetOptions{}) require.NoError(t, err) - apiMCP := insertNewRenderedMachineConfigAndUpdatePool(ctx, t, mcfgclient, mosc.Spec.MachineConfigPool.Name, getConfigNameForPool(i+2)) + apiMCP, apiMC := insertNewRenderedMachineConfigAndUpdatePool(ctx, t, mcfgclient, mosc.Spec.MachineConfigPool.Name, getConfigNameForPool(i+2)) time.Sleep(time.Millisecond * 200) - mosb := buildrequest.NewMachineOSBuildFromAPIOrDie(ctx, kubeclient, apiMosc, apiMCP) + mosb := buildrequest.NewMachineOSBuildOrDie(buildrequest.MachineOSBuildOpts{ + MachineConfig: apiMC, + MachineOSConfig: apiMosc, + MachineConfigPool: apiMCP, + }) + buildJobName := utils.GetBuildJobName(mosb) // After updating the MachineConfigPool, a new MachineOSBuild should get created. kubeassert.MachineOSBuildExists(mosb, "New MachineOSBuild for MachineConfigPool %q update for MachineOSConfig %q never gets created", mcp.Name, mosc.Name) @@ -372,7 +402,7 @@ func TestOSBuildControllerBuildFailedDoesNotCascade(t *testing.T) { faultyMC := "rendered-undesiredFaultyMC" // Create a MOSC to enable OCL and let it produce a new MOSB in Running State - _, mcfgclient, _, _, mosc, mosb, mcp, _, ctrl := setupOSBuildControllerForTestWithRunningBuild(ctx, t, poolName) + _, mcfgclient, _, _, mosc, mosb, mcp, _, _, ctrl := setupOSBuildControllerForTestWithRunningBuild(ctx, t, poolName) assertMachineOSConfigGetsCurrentBuildAnnotation(ctx, t, mcfgclient, mosc, mosb) found := func(item *mcfgv1.MachineOSBuild, list []mcfgv1.MachineOSBuild) bool { @@ -447,16 +477,20 @@ func TestOSBuildControllerReconcilesMachineConfigPoolsAfterRestart(t *testing.T) // Gets an OSBuildController with a running job. ctrlCtx, ctrlCtxCancel := context.WithCancel(ctx) t.Cleanup(ctrlCtxCancel) - kubeclient, mcfgclient, imageclient, routeclient, mosc, firstMosb, mcp, kubeassert, _ := setupOSBuildControllerForTestWithRunningBuild(ctrlCtx, t, poolName) + kubeclient, mcfgclient, imageclient, routeclient, mosc, firstMosb, _, kubeassert, _, _ := setupOSBuildControllerForTestWithRunningBuild(ctrlCtx, t, poolName) // Stop the OSBuildController. ctrlCtxCancel() // Create a MachineConfigPool change. - mcp = insertNewRenderedMachineConfigAndUpdatePool(ctx, t, mcfgclient, poolName, "rendered-worker-2") + apiMCP, apiMC := insertNewRenderedMachineConfigAndUpdatePool(ctx, t, mcfgclient, poolName, "rendered-worker-2") // Get the name of the second MachineOSBuild object. - secondMosb := buildrequest.NewMachineOSBuildFromAPIOrDie(ctx, kubeclient, mosc, mcp) + secondMosb := buildrequest.NewMachineOSBuildOrDie(buildrequest.MachineOSBuildOpts{ + MachineConfig: apiMC, + MachineOSConfig: mosc, + MachineConfigPool: apiMCP, + }) // Ensure that everything still exists. kubeassert = kubeassert.Eventually().WithContext(ctx) @@ -537,7 +571,12 @@ func TestOSBuildControllerReconcilesJobsAfterRestart(t *testing.T) { _, err := mcfgclient.MachineconfigurationV1().MachineOSConfigs().Create(ctx, mosc, metav1.CreateOptions{}) require.NoError(t, err) - mosb := buildrequest.NewMachineOSBuildFromAPIOrDie(ctx, kubeclient, mosc, mcp) + mosb := buildrequest.NewMachineOSBuildOrDie(buildrequest.MachineOSBuildOpts{ + MachineConfig: lobj.RenderedMachineConfig, + MachineOSConfig: mosc, + MachineConfigPool: mcp, + }) + apiMosb, err := mcfgclient.MachineconfigurationV1().MachineOSBuilds().Create(ctx, mosb, metav1.CreateOptions{}) require.NoError(t, err) @@ -644,7 +683,7 @@ func setupOSBuildControllerForTest(ctx context.Context, t *testing.T) (*fakecore return kubeclient, mcfgclient, imageclient, routeclient, kubeassert, lobj, ctrl } -func setupOSBuildControllerForTestWithBuild(ctx context.Context, t *testing.T, poolName string) (*fakecorev1client.Clientset, *fakeclientmachineconfigv1.Clientset, *fakeclientimagev1.Clientset, *fakeclientroutev1.Clientset, *mcfgv1.MachineOSConfig, *mcfgv1.MachineOSBuild, *mcfgv1.MachineConfigPool, *testhelpers.Assertions, *OSBuildController) { +func setupOSBuildControllerForTestWithBuild(ctx context.Context, t *testing.T, poolName string) (*fakecorev1client.Clientset, *fakeclientmachineconfigv1.Clientset, *fakeclientimagev1.Clientset, *fakeclientroutev1.Clientset, *mcfgv1.MachineOSConfig, *mcfgv1.MachineOSBuild, *mcfgv1.MachineConfigPool, *testhelpers.Assertions, *fixtures.ObjectsForTest, *OSBuildController) { kubeclient, mcfgclient, imageclient, routeclient, kubeassert, lobj, ctrl := setupOSBuildControllerForTest(ctx, t) mcp := lobj.MachineConfigPool @@ -654,15 +693,19 @@ func setupOSBuildControllerForTestWithBuild(ctx context.Context, t *testing.T, p _, err := mcfgclient.MachineconfigurationV1().MachineOSConfigs().Create(ctx, mosc, metav1.CreateOptions{}) require.NoError(t, err) - mosb := buildrequest.NewMachineOSBuildFromAPIOrDie(ctx, kubeclient, mosc, mcp) + mosb := buildrequest.NewMachineOSBuildOrDie(buildrequest.MachineOSBuildOpts{ + MachineConfig: lobj.RenderedMachineConfig, + MachineOSConfig: mosc, + MachineConfigPool: mcp, + }) - return kubeclient, mcfgclient, imageclient, routeclient, mosc, mosb, mcp, kubeassert.WithPollInterval(time.Millisecond * 10).WithContext(ctx).Eventually(), ctrl + return kubeclient, mcfgclient, imageclient, routeclient, mosc, mosb, mcp, kubeassert.WithPollInterval(time.Millisecond * 10).WithContext(ctx).Eventually(), lobj, ctrl } -func setupOSBuildControllerForTestWithRunningBuild(ctx context.Context, t *testing.T, poolName string) (*fakecorev1client.Clientset, *fakeclientmachineconfigv1.Clientset, *fakeclientimagev1.Clientset, *fakeclientroutev1.Clientset, *mcfgv1.MachineOSConfig, *mcfgv1.MachineOSBuild, *mcfgv1.MachineConfigPool, *testhelpers.Assertions, *OSBuildController) { +func setupOSBuildControllerForTestWithRunningBuild(ctx context.Context, t *testing.T, poolName string) (*fakecorev1client.Clientset, *fakeclientmachineconfigv1.Clientset, *fakeclientimagev1.Clientset, *fakeclientroutev1.Clientset, *mcfgv1.MachineOSConfig, *mcfgv1.MachineOSBuild, *mcfgv1.MachineConfigPool, *testhelpers.Assertions, *fixtures.ObjectsForTest, *OSBuildController) { t.Helper() - kubeclient, mcfgclient, imageclient, routeclient, mosc, mosb, mcp, kubeassert, ctrl := setupOSBuildControllerForTestWithBuild(ctx, t, poolName) + kubeclient, mcfgclient, imageclient, routeclient, mosc, mosb, mcp, kubeassert, lobj, ctrl := setupOSBuildControllerForTestWithBuild(ctx, t, poolName) time.Sleep(time.Millisecond * 200) initialBuildJobName := utils.GetBuildJobName(mosb) @@ -678,13 +721,13 @@ func setupOSBuildControllerForTestWithRunningBuild(ctx context.Context, t *testi // The MachineOSBuild should be running. kubeassert.Eventually().WithContext(ctx).MachineOSBuildIsRunning(mosb, "Expected the MachineOSBuild %s status to be running", mosb.Name) - return kubeclient, mcfgclient, imageclient, routeclient, mosc, mosb, mcp, kubeassert, ctrl + return kubeclient, mcfgclient, imageclient, routeclient, mosc, mosb, mcp, kubeassert, lobj, ctrl } -func setupOSBuildControllerForTestWithSuccessfulBuild(ctx context.Context, t *testing.T, poolName string) (*fakecorev1client.Clientset, *fakeclientmachineconfigv1.Clientset, *fakeclientimagev1.Clientset, *fakeclientroutev1.Clientset, *mcfgv1.MachineOSConfig, *mcfgv1.MachineOSBuild, *mcfgv1.MachineConfigPool, *testhelpers.Assertions) { +func setupOSBuildControllerForTestWithSuccessfulBuild(ctx context.Context, t *testing.T, poolName string) (*fakecorev1client.Clientset, *fakeclientmachineconfigv1.Clientset, *fakeclientimagev1.Clientset, *fakeclientroutev1.Clientset, *mcfgv1.MachineOSConfig, *mcfgv1.MachineOSBuild, *mcfgv1.MachineConfigPool, *fixtures.ObjectsForTest, *testhelpers.Assertions) { t.Helper() - kubeclient, mcfgclient, imageclient, routeclient, mosc, mosb, mcp, kubeassert, _ := setupOSBuildControllerForTestWithRunningBuild(ctx, t, poolName) + kubeclient, mcfgclient, imageclient, routeclient, mosc, mosb, mcp, kubeassert, lobj, _ := setupOSBuildControllerForTestWithRunningBuild(ctx, t, poolName) time.Sleep(time.Millisecond * 200) kubeassert.MachineOSBuildExists(mosb) kubeassert.JobExists(utils.GetBuildJobName(mosb)) @@ -693,13 +736,13 @@ func setupOSBuildControllerForTestWithSuccessfulBuild(ctx context.Context, t *te kubeassert.MachineOSBuildIsSuccessful(mosb) kubeassert.JobDoesNotExist(utils.GetBuildJobName(mosb)) - return kubeclient, mcfgclient, imageclient, routeclient, mosc, mosb, mcp, kubeassert + return kubeclient, mcfgclient, imageclient, routeclient, mosc, mosb, mcp, lobj, kubeassert } -func setupOSBuildControllerForTestWithFailedBuild(ctx context.Context, t *testing.T, poolName string) (*fakecorev1client.Clientset, *fakeclientmachineconfigv1.Clientset, *fakeclientimagev1.Clientset, *fakeclientroutev1.Clientset, *mcfgv1.MachineOSConfig, *mcfgv1.MachineOSBuild, *mcfgv1.MachineConfigPool, *testhelpers.Assertions) { +func setupOSBuildControllerForTestWithFailedBuild(ctx context.Context, t *testing.T, poolName string) (*fakecorev1client.Clientset, *fakeclientmachineconfigv1.Clientset, *fakeclientimagev1.Clientset, *fakeclientroutev1.Clientset, *mcfgv1.MachineOSConfig, *mcfgv1.MachineOSBuild, *mcfgv1.MachineConfigPool, *testhelpers.Assertions, *fixtures.ObjectsForTest) { t.Helper() - kubeclient, mcfgclient, imageclient, routeclient, mosc, mosb, mcp, kubeassert, _ := setupOSBuildControllerForTestWithBuild(ctx, t, poolName) + kubeclient, mcfgclient, imageclient, routeclient, mosc, mosb, mcp, kubeassert, lobj, _ := setupOSBuildControllerForTestWithBuild(ctx, t, poolName) initialBuildJobName := utils.GetBuildJobName(mosb) @@ -712,67 +755,56 @@ func setupOSBuildControllerForTestWithFailedBuild(ctx context.Context, t *testin // The MachineOSBuild should be running. kubeassert.MachineOSBuildIsRunning(mosb, "Expected the MachineOSBuild %s status to be running", mosb.Name) - return kubeclient, mcfgclient, imageclient, routeclient, mosc, mosb, mcp, kubeassert + return kubeclient, mcfgclient, imageclient, routeclient, mosc, mosb, mcp, kubeassert, lobj } -func insertNewRenderedMachineConfigAndUpdatePool(ctx context.Context, t *testing.T, mcfgclient mcfgclientset.Interface, poolName, renderedName string) *mcfgv1.MachineConfigPool { +func insertNewRenderedMachineConfigAndUpdatePool(ctx context.Context, t *testing.T, mcfgclient mcfgclientset.Interface, poolName, renderedName string) (*mcfgv1.MachineConfigPool, *mcfgv1.MachineConfig) { mcp, err := mcfgclient.MachineconfigurationV1().MachineConfigPools().Get(ctx, poolName, metav1.GetOptions{}) require.NoError(t, err) - insertNewRenderedMachineConfig(ctx, t, mcfgclient, poolName, renderedName, fixtures.OSImageURL) + mc := insertNewRenderedMachineConfig(ctx, t, mcfgclient, poolName, renderedName, fixtures.OSImageURL) mcp.Spec.Configuration.Name = renderedName mcp, err = mcfgclient.MachineconfigurationV1().MachineConfigPools().Update(ctx, mcp, metav1.UpdateOptions{}) require.NoError(t, err) - return mcp + return mcp, mc } -func insertNewRenderedMachineConfig(ctx context.Context, t *testing.T, mcfgclient mcfgclientset.Interface, poolName, renderedName string, osImageURL string) { - filename := filepath.Join("/etc", poolName, renderedName) +func insertNewRenderedMachineConfig(ctx context.Context, t *testing.T, mcfgclient mcfgclientset.Interface, poolName, renderedName string, osImageURL string) *mcfgv1.MachineConfig { + mc := fixtures.NewObjectsForTest(poolName).RenderedMachineConfig + mc.Name = renderedName + mc.Spec.OSImageURL = osImageURL - file := ctrlcommon.NewIgnFile(filename, renderedName) - mc := testhelpers.NewMachineConfig( - renderedName, - map[string]string{ - ctrlcommon.GeneratedByControllerVersionAnnotationKey: "version-number", - "machineconfiguration.openshift.io/role": poolName, - }, - osImageURL, - []ign3types.File{file}) - _, err := mcfgclient.MachineconfigurationV1().MachineConfigs().Create(ctx, mc, metav1.CreateOptions{}) + apiMC, err := mcfgclient.MachineconfigurationV1().MachineConfigs().Create(ctx, mc, metav1.CreateOptions{}) require.NoError(t, err) + + return apiMC } -func insertNewRenderedMachineConfigWithoutImageChangeAndUpdatePool(ctx context.Context, t *testing.T, mcfgclient mcfgclientset.Interface, poolName, renderedName string) *mcfgv1.MachineConfigPool { +func insertNewRenderedMachineConfigWithoutImageChangeAndUpdatePool(ctx context.Context, t *testing.T, mcfgclient mcfgclientset.Interface, poolName, renderedName string) (*mcfgv1.MachineConfigPool, *mcfgv1.MachineConfig) { mcp, err := mcfgclient.MachineconfigurationV1().MachineConfigPools().Get(ctx, poolName, metav1.GetOptions{}) require.NoError(t, err) - insertNewRenderedMachineConfigWithoutImageChange(ctx, t, mcfgclient, poolName, renderedName) + mc := insertNewRenderedMachineConfigWithoutImageChange(ctx, t, mcfgclient, poolName, renderedName) mcp.Spec.Configuration.Name = renderedName mcp, err = mcfgclient.MachineconfigurationV1().MachineConfigPools().Update(ctx, mcp, metav1.UpdateOptions{}) require.NoError(t, err) - return mcp + return mcp, mc } -func insertNewRenderedMachineConfigWithoutImageChange(ctx context.Context, t *testing.T, mcfgclient mcfgclientset.Interface, poolName, renderedName string) { - filename := filepath.Join("/etc", poolName, renderedName) +func insertNewRenderedMachineConfigWithoutImageChange(ctx context.Context, t *testing.T, mcfgclient mcfgclientset.Interface, poolName, renderedName string) *mcfgv1.MachineConfig { + mc := fixtures.NewObjectsForTest(poolName).RenderedMachineConfig + mc.Name = renderedName - file := ctrlcommon.NewIgnFile(filename, renderedName) - mc := testhelpers.NewMachineConfig( - renderedName, - map[string]string{ - ctrlcommon.GeneratedByControllerVersionAnnotationKey: "version-number", - "machineconfiguration.openshift.io/role": poolName, - }, - "", - []ign3types.File{file}) - _, err := mcfgclient.MachineconfigurationV1().MachineConfigs().Create(ctx, mc, metav1.CreateOptions{}) + apiMC, err := mcfgclient.MachineconfigurationV1().MachineConfigs().Create(ctx, mc, metav1.CreateOptions{}) require.NoError(t, err) + + return apiMC } func isMachineOSBuildReachedExpectedCount(ctx context.Context, t *testing.T, mcfgclient mcfgclientset.Interface, mosc *mcfgv1.MachineOSConfig, expected int) { @@ -835,7 +867,7 @@ func TestOSBuildControllerSkipsBuildForLayerOnlyChanges(t *testing.T) { t.Cleanup(cancel) poolName := "worker" - _, mcfgclient, _, _, mosc, firstMosb, mcp, kubeassert := setupOSBuildControllerForTestWithSuccessfulBuild(ctx, t, poolName) + _, mcfgclient, _, _, mosc, firstMosb, mcp, _, kubeassert := setupOSBuildControllerForTestWithSuccessfulBuild(ctx, t, poolName) isMachineOSBuildReachedExpectedCount(ctx, t, mcfgclient, mosc, 1) assertMachineOSConfigGetsCurrentBuildAnnotation(ctx, t, mcfgclient, mosc, firstMosb) diff --git a/pkg/controller/build/reconciler.go b/pkg/controller/build/reconciler.go index d3da1cf799..84e76b0d8e 100644 --- a/pkg/controller/build/reconciler.go +++ b/pkg/controller/build/reconciler.go @@ -556,24 +556,23 @@ func (b *buildReconciler) createNewMachineOSBuildOrReuseExisting(ctx context.Con return fmt.Errorf("could not get MachineConfigPool %s for MachineOSConfig %s: %w", mosc.Spec.MachineConfigPool.Name, mosc.Name, err) } + mc, err := b.machineConfigLister.Get(mcp.Spec.Configuration.Name) + if err != nil { + return fmt.Errorf("could not get MachineConfig %s for MachineConfigPool %s: %w", mcp.Spec.Configuration.Name, mcp.Name, err) + } + // Allow builds to retry when pool is degraded only due to BuildDegraded, // but prevent builds for other types of degradation (NodeDegraded, RenderDegraded) if b.shouldPreventBuildDueToDegradation(mcp) { return fmt.Errorf("MachineConfigPool %s is degraded due to non-build issues", mcp.Name) } - // TODO: Consider using a ConfigMap lister to get this value instead of the API server. - osImageURLs, err := ctrlcommon.GetOSImageURLConfig(ctx, b.kubeclient) - if err != nil { - return fmt.Errorf("could not get OSImageURLConfig: %w", err) - } - // Construct a new MachineOSBuild object which has the hashed name attached // to it. mosb, err := buildrequest.NewMachineOSBuild(buildrequest.MachineOSBuildOpts{ + MachineConfig: mc, MachineOSConfig: mosc, MachineConfigPool: mcp, - OSImageURLConfig: osImageURLs, }) if err != nil { @@ -1360,12 +1359,15 @@ func (b *buildReconciler) reconcilePoolChange(ctx context.Context, mcp *mcfgv1.M // This is our trigger point if (oldRendered != newRendered && needsImageRebuild) || firstOptIn == "" { klog.Infof("pool %q: rendered config changed and requires an image rebuild. Verifying if a valid build already exists...", mcp.Name) + mc, err := b.machineConfigLister.Get(mcp.Spec.Configuration.Name) + if err != nil { + return fmt.Errorf("could not get MachineConfig %q for MachineConfigPool %q: %w", mcp.Spec.Configuration.Name, mcp.Name, err) + } - osImageURLs, _ := ctrlcommon.GetOSImageURLConfig(ctx, b.kubeclient) targetMosb, err := buildrequest.NewMachineOSBuild(buildrequest.MachineOSBuildOpts{ + MachineConfig: mc, MachineOSConfig: mosc, MachineConfigPool: mcp, - OSImageURLConfig: osImageURLs, }) if err != nil { return fmt.Errorf("could not generate name for target MOSB: %w", err) @@ -1435,18 +1437,19 @@ func (b *buildReconciler) reuseImageForNewMOSB(ctx context.Context, mosc *mcfgv1 return err } - // Get the osimageurl for our new MOSB object - osImageURLs, err := ctrlcommon.GetOSImageURLConfig(ctx, b.kubeclient) + mc, err := b.machineConfigLister.Get(mcp.Spec.Configuration.Name) if err != nil { return err } + // Build the new MOSB object. this is our "promise", we will eventually check if we will proceed with this newMosb, err := buildrequest.NewMachineOSBuild( buildrequest.MachineOSBuildOpts{ + MachineConfig: mc, MachineOSConfig: mosc, MachineConfigPool: mcp, - OSImageURLConfig: osImageURLs, }) + if err != nil { return err } @@ -1781,7 +1784,17 @@ func (b *buildReconciler) seedMachineOSConfigWithExistingImage(ctx context.Conte return fmt.Errorf("could not get MachineConfigPool %q: %w", mosc.Spec.MachineConfigPool.Name, err) } - templateMOSB, err := buildrequest.NewMachineOSBuildFromAPI(ctx, b.kubeclient, mosc, mcp) + mc, err := b.machineConfigLister.Get(mcp.Spec.Configuration.Name) + if err != nil { + return fmt.Errorf("could not get MachineConfig: %q: %w", mcp.Spec.Configuration.Name, err) + } + + templateMOSB, err := buildrequest.NewMachineOSBuild(buildrequest.MachineOSBuildOpts{ + MachineConfig: mc, + MachineConfigPool: mcp, + MachineOSConfig: mosc, + }) + if err != nil { return fmt.Errorf("could not generate MachineOSBuild template for MachineOSConfig %q: %w", mosc.Name, err) } From ab6aee163a93f003fb1948a3273c0c88150a6792 Mon Sep 17 00:00:00 2001 From: Zack Zlotnik Date: Tue, 18 Nov 2025 17:28:45 -0500 Subject: [PATCH 3/6] updates e2e-ocl test to use new MachineOSBuild constructor --- pkg/controller/common/images.go | 10 --- test/e2e-ocl/onclusterlayering_test.go | 91 ++++++++++++++------------ 2 files changed, 49 insertions(+), 52 deletions(-) diff --git a/pkg/controller/common/images.go b/pkg/controller/common/images.go index df2365bbf0..edb7fb0399 100644 --- a/pkg/controller/common/images.go +++ b/pkg/controller/common/images.go @@ -130,16 +130,6 @@ func validateMCOConfigMap(cm *corev1.ConfigMap, name string, reqDataKeys, reqBin return nil } -// Gets and parses the OSImageURL data from the machine-config-osimageurl ConfigMap. -func GetOSImageURLConfig(ctx context.Context, kubeclient clientset.Interface) (*OSImageURLConfig, error) { - cm, err := kubeclient.CoreV1().ConfigMaps(MCONamespace).Get(ctx, MachineConfigOSImageURLConfigMapName, metav1.GetOptions{}) - if err != nil { - return nil, fmt.Errorf("could not get ConfigMap %s: %w", MachineConfigOSImageURLConfigMapName, err) - } - - return ParseOSImageURLConfigMap(cm) -} - // Gets and parse the Images data from the machine-config-operator-images ConfigMap. func GetImagesConfig(ctx context.Context, kubeclient clientset.Interface) (*Images, error) { cm, err := kubeclient.CoreV1().ConfigMaps(MCONamespace).Get(ctx, MachineConfigOperatorImagesConfigMapName, metav1.GetOptions{}) diff --git a/test/e2e-ocl/onclusterlayering_test.go b/test/e2e-ocl/onclusterlayering_test.go index 3fec6e58a9..5c16195057 100644 --- a/test/e2e-ocl/onclusterlayering_test.go +++ b/test/e2e-ocl/onclusterlayering_test.go @@ -235,7 +235,6 @@ func TestMissingImageIsRebuilt(t *testing.T) { require.NoError(t, err) secondMOSB = waitForBuildToStart(t, cs, secondMOSB) t.Logf("MachineOSBuild %q has started", secondMOSB.Name) - assertBuildJobIsAsExpected(t, cs, secondMOSB) // Wait for the build to finish t.Logf("Waiting for 2nd build completion...") @@ -266,7 +265,6 @@ func TestMissingImageIsRebuilt(t *testing.T) { require.NoError(t, err) thirdMOSB = waitForBuildToStart(t, cs, thirdMOSB) t.Logf("MachineOSBuild %q has started (rebuild of image1)", thirdMOSB.Name) - assertBuildJobIsAsExpected(t, cs, thirdMOSB) // Wait for the build to finish t.Logf("Waiting for 3rd build completion...") @@ -373,7 +371,14 @@ func TestMachineOSConfigChangeRestartsBuild(t *testing.T) { mcp, err := cs.MachineconfigurationV1Interface.MachineConfigPools().Get(ctx, layeredMCPName, metav1.GetOptions{}) require.NoError(t, err) - firstMosb := buildrequest.NewMachineOSBuildFromAPIOrDie(ctx, cs.GetKubeclient(), mosc, mcp) + mc, err := cs.MachineconfigurationV1Interface.MachineConfigs().Get(ctx, mcp.Spec.Configuration.Name, metav1.GetOptions{}) + require.NoError(t, err) + + firstMosb := buildrequest.NewMachineOSBuildOrDie(buildrequest.MachineOSBuildOpts{ + MachineConfig: mc, + MachineOSConfig: mosc, + MachineConfigPool: mcp, + }) // First, we get a MachineOSBuild started as usual. waitForBuildToStart(t, cs, firstMosb) @@ -383,7 +388,11 @@ func TestMachineOSConfigChangeRestartsBuild(t *testing.T) { apiMosc := helpers.SetContainerfileContentsOnMachineOSConfig(ctx, t, cs.GetMcfgclient(), mosc, "FROM configs AS final\nRUN echo 'hello' > /etc/hello") - moscChangeMosb := buildrequest.NewMachineOSBuildFromAPIOrDie(ctx, cs.GetKubeclient(), apiMosc, mcp) + moscChangeMosb := buildrequest.NewMachineOSBuildOrDie(buildrequest.MachineOSBuildOpts{ + MachineConfig: mc, + MachineOSConfig: apiMosc, + MachineConfigPool: mcp, + }) kubeassert := helpers.AssertClientSet(t, cs).WithContext(ctx) @@ -479,14 +488,21 @@ func TestGracefulBuildFailureRecovery(t *testing.T) { apiMosc.Spec.Containerfile = []mcfgv1.MachineOSContainerfile{} - updated, err := cs.MachineconfigurationV1Interface.MachineOSConfigs().Update(ctx, apiMosc, metav1.UpdateOptions{}) + updatedMosc, err := cs.MachineconfigurationV1Interface.MachineOSConfigs().Update(ctx, apiMosc, metav1.UpdateOptions{}) require.NoError(t, err) mcp, err := cs.MachineconfigurationV1Interface.MachineConfigPools().Get(ctx, layeredMCPName, metav1.GetOptions{}) require.NoError(t, err) + mc, err := cs.MachineconfigurationV1Interface.MachineConfigs().Get(ctx, mcp.Spec.Configuration.Name, metav1.GetOptions{}) + require.NoError(t, err) + // Compute the new MachineOSBuild image name. - moscChangeMosb := buildrequest.NewMachineOSBuildFromAPIOrDie(ctx, cs.GetKubeclient(), updated, mcp) + moscChangeMosb := buildrequest.NewMachineOSBuildOrDie(buildrequest.MachineOSBuildOpts{ + MachineConfig: mc, + MachineOSConfig: updatedMosc, + MachineConfigPool: mcp, + }) // Wait for the second build to start. secondMosb := waitForBuildToStart(t, cs, moscChangeMosb) @@ -768,9 +784,6 @@ func runOnClusterLayeringTest(t *testing.T, testOpts onClusterLayeringTestOpts) startedBuild := waitForBuildToStartForPoolAndConfig(t, cs, testOpts.poolName, mosc.Name) t.Logf("MachineOSBuild %q has started", startedBuild.Name) - // Assert that the build job has certain properties and configuration. - assertBuildJobIsAsExpected(t, cs, startedBuild) - t.Logf("Waiting for build completion...") // Create a child context for the build pod log streamer. This is so we can @@ -1036,34 +1049,6 @@ func waitForBuildToBeInterrupted(t *testing.T, cs *framework.ClientSet, startedB return mosb } -// Validates that the build job is configured correctly. In this case, -// "correctly" means that it has the correct container images. Future -// assertions could include things like ensuring that the proper volume mounts -// are present, etc. -func assertBuildJobIsAsExpected(t *testing.T, cs *framework.ClientSet, mosb *mcfgv1.MachineOSBuild) { - t.Helper() - - osImageURLConfig, err := ctrlcommon.GetOSImageURLConfig(context.TODO(), cs.GetKubeclient()) - require.NoError(t, err) - - mcoImages, err := ctrlcommon.GetImagesConfig(context.TODO(), cs.GetKubeclient()) - require.NoError(t, err) - - buildPod, err := getPodFromJob(context.TODO(), cs, mosb.Status.Builder.Job.Name) - require.NoError(t, err) - - assertContainerIsUsingExpectedImage := func(c corev1.Container, containerName, expectedImage string) { - if c.Name == containerName { - assert.Equal(t, c.Image, expectedImage) - } - } - - for _, container := range buildPod.Spec.Containers { - assertContainerIsUsingExpectedImage(container, "image-build", mcoImages.MachineConfigOperator) - assertContainerIsUsingExpectedImage(container, "wait-for-done", osImageURLConfig.BaseOSContainerImage) - } -} - // Prepares for an on-cluster build test by performing the following: // - Gets the Docker Builder secret name from the MCO namespace. // - Creates the imagestream to use for the test. @@ -1259,7 +1244,14 @@ func TestControllerEventuallyReconciles(t *testing.T) { createMachineOSConfig(t, cs, mosc) - mosb := buildrequest.NewMachineOSBuildFromAPIOrDie(ctx, cs.GetKubeclient(), mosc, mcp) + mc, err := cs.MachineconfigurationV1Interface.MachineConfigs().Get(ctx, mcp.Spec.Configuration.Name, metav1.GetOptions{}) + require.NoError(t, err) + + mosb := buildrequest.NewMachineOSBuildOrDie(buildrequest.MachineOSBuildOpts{ + MachineConfig: mc, + MachineOSConfig: mosc, + MachineConfigPool: mcp, + }) // Wait for the MachineOSBuild to exist. kubeassert := helpers.AssertClientSet(t, cs).WithContext(ctx).Eventually() @@ -1445,7 +1437,7 @@ func TestImageBuildDegradedOnFailureAndClearedOnBuildStart(t *testing.T) { }, } - updated, err := cs.MachineconfigurationV1Interface.MachineOSConfigs().Update(ctx, apiMosc, metav1.UpdateOptions{}) + updatedMosc, err := cs.MachineconfigurationV1Interface.MachineOSConfigs().Update(ctx, apiMosc, metav1.UpdateOptions{}) require.NoError(t, err) t.Logf("Fixed containerfile, waiting for new build to start") @@ -1454,7 +1446,14 @@ func TestImageBuildDegradedOnFailureAndClearedOnBuildStart(t *testing.T) { require.NoError(t, err) // Compute the new MachineOSBuild name - moscChangeMosb := buildrequest.NewMachineOSBuildFromAPIOrDie(ctx, cs.GetKubeclient(), updated, mcp) + mc, err := cs.MachineconfigurationV1Interface.MachineConfigs().Get(ctx, mcp.Spec.Configuration.Name, metav1.GetOptions{}) + require.NoError(t, err) + + moscChangeMosb := buildrequest.NewMachineOSBuildOrDie(buildrequest.MachineOSBuildOpts{ + MachineConfig: mc, + MachineOSConfig: updatedMosc, + MachineConfigPool: mcp, + }) // Wait for the second build to start secondMosb := waitForBuildToStart(t, cs, moscChangeMosb) @@ -1524,7 +1523,7 @@ func TestImageBuildDegradedOnFailureAndClearedOnBuildStart(t *testing.T) { }, } - updated, err = cs.MachineconfigurationV1Interface.MachineOSConfigs().Update(ctx, apiMosc, metav1.UpdateOptions{}) + updatedMosc, err = cs.MachineconfigurationV1Interface.MachineOSConfigs().Update(ctx, apiMosc, metav1.UpdateOptions{}) require.NoError(t, err) t.Logf("Modified containerfile, waiting for third build to start") @@ -1533,8 +1532,16 @@ func TestImageBuildDegradedOnFailureAndClearedOnBuildStart(t *testing.T) { mcp, err = cs.MachineconfigurationV1Interface.MachineConfigPools().Get(ctx, layeredMCPName, metav1.GetOptions{}) require.NoError(t, err) + //Get the updated MC to compute the new build + mc, err = cs.MachineconfigurationV1Interface.MachineConfigs().Get(ctx, mcp.Spec.Configuration.Name, metav1.GetOptions{}) + require.NoError(t, err) + // Compute the new MachineOSBuild name for the third build - thirdMoscMosb := buildrequest.NewMachineOSBuildFromAPIOrDie(ctx, cs.GetKubeclient(), updated, mcp) + thirdMoscMosb := buildrequest.NewMachineOSBuildOrDie(buildrequest.MachineOSBuildOpts{ + MachineConfig: mc, + MachineOSConfig: updatedMosc, + MachineConfigPool: mcp, + }) // Wait for the third build to start thirdMosb := waitForBuildToStart(t, cs, thirdMoscMosb) From df2b16cdee47cc59774c1514622a30882537aa8e Mon Sep 17 00:00:00 2001 From: Zack Zlotnik Date: Thu, 5 Feb 2026 10:42:54 -0500 Subject: [PATCH 4/6] remove unneeded test fixtures --- pkg/controller/build/fixtures/objects.go | 28 ------------------------ 1 file changed, 28 deletions(-) diff --git a/pkg/controller/build/fixtures/objects.go b/pkg/controller/build/fixtures/objects.go index 1e566413ed..e0ef11ce6f 100644 --- a/pkg/controller/build/fixtures/objects.go +++ b/pkg/controller/build/fixtures/objects.go @@ -125,7 +125,6 @@ func defaultKubeObjects() []runtime.Object { return []runtime.Object{ getImagesConfigMap(), - getOSImageURLConfigMap(), &corev1.Secret{ ObjectMeta: metav1.ObjectMeta{ Name: finalImagePushSecretName, @@ -220,22 +219,6 @@ func getImagesConfigMap() *corev1.ConfigMap { } } -// Gets an example machine-config-osimageurl ConfigMap. -func getOSImageURLConfigMap() *corev1.ConfigMap { - return &corev1.ConfigMap{ - ObjectMeta: metav1.ObjectMeta{ - Name: ctrlcommon.MachineConfigOSImageURLConfigMapName, - Namespace: ctrlcommon.MCONamespace, - }, - Data: map[string]string{ - "baseOSContainerImage": BaseOSContainerImage, - "baseOSExtensionsContainerImage": BaseOSExtensionsContainerImage, - "osImageURL": OSImageURL, - "releaseVersion": ReleaseVersion, - }, - } -} - const ( BaseOSContainerImage string = "registry.hostname.com/org/repo@sha256 string = 220a60ecd4a3c32c282622a625a54db9ba0ff55b5ba9c29c7064a2bc358b6a3e" BaseOSExtensionsContainerImage string = "registry.hostname.com/org/repo@sha256 string = 5fb4ba1a651bae8057ec6b5cdafc93fa7e0b7d944d6f02a4b751de4e15464def" @@ -243,17 +226,6 @@ const ( OSImageURL string = "registry.hostname.com/org/repo@sha256 string = 5be476dce1f7c1fbaf41bf9c0097e1725d7d26b74ea93543989d1a2b76fef4a5" ) -// Gets the OSImageURL struct that the machine-config-osimageurl ConfigMap -// would be marshalled into. -func OSImageURLConfig() *ctrlcommon.OSImageURLConfig { - return &ctrlcommon.OSImageURLConfig{ - BaseOSContainerImage: BaseOSContainerImage, - BaseOSExtensionsContainerImage: BaseOSExtensionsContainerImage, - ReleaseVersion: ReleaseVersion, - OSImageURL: OSImageURL, - } -} - func GetExpectedFinalImagePullspecForMachineOSBuild(mosb *mcfgv1.MachineOSBuild) string { digest := getDigest(mosb.Name) return "registry.hostname.com/org/repo@" + digest From 1295c0ed80d53c450e775c53725f3e7fb0abe4d6 Mon Sep 17 00:00:00 2001 From: Zack Zlotnik Date: Thu, 26 Feb 2026 13:50:18 -0500 Subject: [PATCH 5/6] only allow OSImageURL or setting OSImageStream name Ensures that a cluster admin may only override the OSImageURL field or set the desired OSImageStream name; but not both. This ensures that either the cluster admin or the MCO will manage the OS image and prevents the MCO from overriding this setting. --- pkg/controller/render/render_controller.go | 12 ++ .../render/render_controller_test.go | 15 ++ .../osimagestreamrender_test.go | 190 ++++++++++++++++++ 3 files changed, 217 insertions(+) create mode 100644 test/e2e-techpreview/osimagestreamrender_test.go diff --git a/pkg/controller/render/render_controller.go b/pkg/controller/render/render_controller.go index 1d78e369ea..a3d9ff29d4 100644 --- a/pkg/controller/render/render_controller.go +++ b/pkg/controller/render/render_controller.go @@ -714,6 +714,18 @@ func generateRenderedMachineConfig(pool *mcfgv1.MachineConfigPool, configs []*mc if !strings.Contains(merged.Spec.OSImageURL, "sha256:") { klog.Warningf("OSImageURL %q for MachineConfig %s is set using a tag instead of a digest. It is highly recommended to use a digest", merged.Spec.OSImageURL, merged.Name) } + + // If the cluster admin overrides the OSImageURL field, it means they want + // to take over managing the OS image. If they then set osImageStream.name, + // this implies they want the MCO to manage the OS, which would override + // setting OSImageURL. A cluster admin should only be able to do one or the + // other; not both. + // + // In the future, we may want this to also consider some additional + // behaviors based on who set OSImageURL. + if pool.Spec.OSImageStream.Name != "" { + return nil, fmt.Errorf("cannot override MachineConfig osImageURL and set MachineConfigPool spec.osImageStream.name simultaneously") + } } return merged, nil diff --git a/pkg/controller/render/render_controller_test.go b/pkg/controller/render/render_controller_test.go index cc8f96b3ba..9becee1a34 100644 --- a/pkg/controller/render/render_controller_test.go +++ b/pkg/controller/render/render_controller_test.go @@ -442,6 +442,21 @@ func TestGenerateMachineConfigOverrideOSImageURL(t *testing.T) { assert.Equal(t, "dummy-change-2", gmc.Spec.OSImageURL) } +func TestGenerateMachineConfigCannotOverrideOSImageURLWhenOSImageStreamSet(t *testing.T) { + mcp := helpers.NewMachineConfigPool("test-cluster-master", helpers.MasterSelector, nil, "") + mcs := []*mcfgv1.MachineConfig{ + helpers.NewMachineConfig("00-test-cluster-master", map[string]string{"node-role/master": ""}, "dummy-test-1", []ign3types.File{}), + helpers.NewMachineConfig("00-test-cluster-master-0", map[string]string{"node-role/master": ""}, "dummy-change", []ign3types.File{}), + } + + mcp.Spec.OSImageStream.Name = "populated-value" + + cc := newControllerConfig(ctrlcommon.ControllerConfigName) + + _, err := generateRenderedMachineConfig(mcp, mcs, cc, nil) + assert.Error(t, err) +} + func TestVersionSkew(t *testing.T) { mcp := helpers.NewMachineConfigPool("test-cluster-master", helpers.MasterSelector, nil, "") mcs := []*mcfgv1.MachineConfig{ diff --git a/test/e2e-techpreview/osimagestreamrender_test.go b/test/e2e-techpreview/osimagestreamrender_test.go new file mode 100644 index 0000000000..76201901f3 --- /dev/null +++ b/test/e2e-techpreview/osimagestreamrender_test.go @@ -0,0 +1,190 @@ +package e2e_techpreview + +import ( + "context" + "fmt" + "testing" + "time" + + ign3types "github.com/coreos/ignition/v2/config/v3_5/types" + "github.com/stretchr/testify/require" + + "github.com/openshift/machine-config-operator/pkg/apihelpers" + "github.com/openshift/machine-config-operator/test/framework" + "github.com/openshift/machine-config-operator/test/helpers" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/wait" + + mcfgv1 "github.com/openshift/api/machineconfiguration/v1" + mcfgv1alpha1 "github.com/openshift/api/machineconfiguration/v1alpha1" +) + +// This test sets the OSImageStream name on a MachineConfigPool, then it +// creates a MachineConfig which overrides OSImageURL. The test ensures that +// the MachineConfigPool becomes degraded in this scenario and that recovery +// from this state is possible. +func TestOSImageStreamOSImageURL(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), time.Minute*5) + t.Cleanup(cancel) + + cs := framework.NewClientSet("") + + testCases := []struct { + // Human-friendly name of the test. + name string + // Name of the object(s) to create for the test. Although this value is + // passed into each test function, the test function may use a different + // variable name for the sake of clarity. + objName string + // The recovery function to execute which should cause the + // MachineConfigPool to go back to a non-degraded state. + recoverFunc func(*testing.T, string) + }{ + { + name: "Delete overriding MachineConfig", + objName: "osimageurl-delete", + recoverFunc: func(t *testing.T, mcName string) { + require.NoError(t, cs.MachineconfigurationV1Interface.MachineConfigs().Delete(ctx, mcName, metav1.DeleteOptions{})) + t.Logf("Deleted MachineConfig %s which overrides OSImageURL", mcName) + }, + }, + { + name: "Clear OSImageStream name from MachineConfigPool", + objName: "osimageurl-mcp-clear", + recoverFunc: func(t *testing.T, poolName string) { + require.NoError(t, setOSImageStreamOnMachineConfigPool(ctx, cs, poolName, "")) + t.Logf("Cleared OSImageStream from MachineConfigPool %s", poolName) + }, + }, + { + name: "Clear OSImageURL override from MachineConfig", + objName: "osimageurl-mc-clear", + recoverFunc: func(t *testing.T, mcName string) { + mc, err := cs.MachineconfigurationV1Interface.MachineConfigs().Get(ctx, mcName, metav1.GetOptions{}) + require.NoError(t, err) + + mc.Spec.OSImageURL = "" + + _, err = cs.MachineconfigurationV1Interface.MachineConfigs().Update(ctx, mc, metav1.UpdateOptions{}) + require.NoError(t, err) + t.Logf("Cleared OSImageURL from MachineConfig %s", mcName) + }, + }, + } + + // Fetch the OSImageStream so that we can use the default value to populate + // the MachineConfigPool field. + osi, err := getOSImageStream(ctx, cs) + require.NoError(t, err) + + for _, testCase := range testCases { + testCase := testCase + t.Run(testCase.name, func(t *testing.T) { + // For simplicity, we use the same name for the MachineConfig and the + // MachineConfigPool. However, we assign it to different variables so + // that the context around which value is used where is preserved. + mcName := testCase.objName + poolName := testCase.objName + + // Create the MachineConfigPool. + t.Cleanup(helpers.CreateMCP(t, cs, poolName)) + + // Set the OSImageStream name on the MachineConfigPool to the default OSImageStream value. + require.NoError(t, setOSImageStreamOnMachineConfigPool(ctx, cs, poolName, osi.Status.DefaultStream)) + + // Wait for the pool to render its first MachineConfig. + helpers.WaitForRenderedConfig(t, cs, poolName, "00-worker") + + // Create and apply the MachineConfig which overrides OSImageURL. + require.NoError(t, createAndApplyMC(ctx, t, cs, mcName)) + + // Wait for the MachineConfigPool to degrade. + start := time.Now() + require.NoError(t, waitForPoolDegradation(ctx, cs, poolName)) + t.Logf("Pool %q has reached expected degraded state after %s", poolName, time.Since(start)) + + // Execute the test function to initiate MachineConfigPool recovery. + testCase.recoverFunc(t, testCase.objName) + + // Wait for the MachineConfigPool to lose the degraded status. + start = time.Now() + require.NoError(t, helpers.WaitForPoolCompleteAny(t, cs, poolName)) + t.Logf("Pool %q has cleared the degraded state after %s", poolName, time.Since(start)) + }) + } +} + +// Fetches the OSImageStream, checks that only a single OSImageStream instance +// is present, and that the defaultStream value is populated. +func getOSImageStream(ctx context.Context, cs *framework.ClientSet) (*mcfgv1alpha1.OSImageStream, error) { + osiList, err := cs.GetMcfgclient().MachineconfigurationV1alpha1().OSImageStreams().List(ctx, metav1.ListOptions{}) + if err != nil { + return nil, fmt.Errorf("could not get OSImageStream: %w", err) + } + + expectedCount := 1 + actualCount := len(osiList.Items) + if actualCount != expectedCount { + return nil, fmt.Errorf("expected %d OSImageStream(s), got: %d", expectedCount, actualCount) + } + + osi := osiList.Items[0] + if osi.Status.DefaultStream == "" { + return nil, fmt.Errorf("status.defaultStream empty on OSImageStream %s", osi.Name) + } + + return &osi, nil +} + +// Cleanup needs to tolerate the MachineConfig being missing, since one of the +// test cases deletes the MachineConfig. The helpers.ApplyMC() function does +// not offer that functionality, hence this function. +func createAndApplyMC(ctx context.Context, t *testing.T, cs *framework.ClientSet, mcName string) error { + t.Helper() + + // This is a dummy pullspec. + pullspec := "quay.io/org/repo@sha256:6ae587dc7f5ae2d836df80ee85118c374478408aa439aafd44ab8ab877fc41cd" + + mc := helpers.NewMachineConfig(mcName, helpers.MCLabelForRole(mcName), pullspec, []ign3types.File{}) + helpers.SetMetadataOnObject(t, mc) + + _, err := cs.MachineconfigurationV1Interface.MachineConfigs().Create(ctx, mc, metav1.CreateOptions{}) + if err != nil { + return err + } + + t.Cleanup(func() { + err := cs.MachineconfigurationV1Interface.MachineConfigs().Delete(ctx, mc.Name, metav1.DeleteOptions{}) + if err != nil && !apierrors.IsNotFound(err) { + t.Fatalf("%s", err) + } + }) + + return nil +} + +// Sets the OSImageStream name on the given MachineConfigPool. Can also be used to clear this value. +func setOSImageStreamOnMachineConfigPool(ctx context.Context, cs *framework.ClientSet, mcpName, osImageStreamName string) error { + mcp, err := cs.MachineconfigurationV1Interface.MachineConfigPools().Get(ctx, mcpName, metav1.GetOptions{}) + if err != nil { + return err + } + + mcp.Spec.OSImageStream.Name = osImageStreamName + + _, err = cs.MachineconfigurationV1Interface.MachineConfigPools().Update(ctx, mcp, metav1.UpdateOptions{}) + return err +} + +// Waits for the MachineConfigPool to reach a degraded state. +func waitForPoolDegradation(ctx context.Context, cs *framework.ClientSet, poolName string) error { + return wait.PollUntilContextTimeout(ctx, 2*time.Second, 2*time.Minute, true, func(ctx context.Context) (bool, error) { + mcp, err := cs.MachineConfigPools().Get(ctx, poolName, metav1.GetOptions{}) + if err != nil { + return false, err + } + + return apihelpers.IsMachineConfigPoolConditionTrue(mcp.Status.Conditions, mcfgv1.MachineConfigPoolDegraded), nil + }) +} From 63030826938766578e7d517cb354312de20813ea Mon Sep 17 00:00:00 2001 From: Zack Zlotnik Date: Fri, 27 Feb 2026 14:47:02 -0500 Subject: [PATCH 6/6] update sharded e2e-ocl tests --- test/e2e-ocl-1of2/onclusterlayering_test.go | 48 ++++--------- test/e2e-ocl-2of2/onclusterlayering_test.go | 74 ++++++++++----------- 2 files changed, 49 insertions(+), 73 deletions(-) diff --git a/test/e2e-ocl-1of2/onclusterlayering_test.go b/test/e2e-ocl-1of2/onclusterlayering_test.go index 180fa17897..fcbe936319 100644 --- a/test/e2e-ocl-1of2/onclusterlayering_test.go +++ b/test/e2e-ocl-1of2/onclusterlayering_test.go @@ -189,7 +189,6 @@ func TestMissingImageIsRebuilt(t *testing.T) { require.NoError(t, err) secondMOSB = waitForBuildToStart(t, cs, secondMOSB) t.Logf("MachineOSBuild %q has started", secondMOSB.Name) - assertBuildJobIsAsExpected(t, cs, secondMOSB) // Wait for the build to finish t.Logf("Waiting for 2nd build completion...") @@ -220,7 +219,6 @@ func TestMissingImageIsRebuilt(t *testing.T) { require.NoError(t, err) thirdMOSB = waitForBuildToStart(t, cs, thirdMOSB) t.Logf("MachineOSBuild %q has started (rebuild of image1)", thirdMOSB.Name) - assertBuildJobIsAsExpected(t, cs, thirdMOSB) // Wait for the build to finish t.Logf("Waiting for 3rd build completion...") @@ -313,7 +311,14 @@ func TestMachineOSConfigChangeRestartsBuild(t *testing.T) { mcp, err := cs.MachineconfigurationV1Interface.MachineConfigPools().Get(ctx, layeredMCPName, metav1.GetOptions{}) require.NoError(t, err) - firstMosb := buildrequest.NewMachineOSBuildFromAPIOrDie(ctx, cs.GetKubeclient(), mosc, mcp) + mc, err := cs.MachineconfigurationV1Interface.MachineConfigs().Get(ctx, mcp.Spec.Configuration.Name, metav1.GetOptions{}) + require.NoError(t, err) + + firstMosb := buildrequest.NewMachineOSBuildOrDie(buildrequest.MachineOSBuildOpts{ + MachineConfig: mc, + MachineOSConfig: mosc, + MachineConfigPool: mcp, + }) // First, we get a MachineOSBuild started as usual. waitForBuildToStart(t, cs, firstMosb) @@ -323,7 +328,11 @@ func TestMachineOSConfigChangeRestartsBuild(t *testing.T) { apiMosc := helpers.SetContainerfileContentsOnMachineOSConfig(ctx, t, cs.GetMcfgclient(), mosc, "FROM configs AS final\nRUN echo 'hello' > /etc/hello") - moscChangeMosb := buildrequest.NewMachineOSBuildFromAPIOrDie(ctx, cs.GetKubeclient(), apiMosc, mcp) + moscChangeMosb := buildrequest.NewMachineOSBuildOrDie(buildrequest.MachineOSBuildOpts{ + MachineConfig: mc, + MachineOSConfig: apiMosc, + MachineConfigPool: mcp, + }) kubeassert := helpers.AssertClientSet(t, cs).WithContext(ctx) @@ -524,9 +533,6 @@ func runOnClusterLayeringTest(t *testing.T, testOpts onClusterLayeringTestOpts) startedBuild := waitForBuildToStartForPoolAndConfig(t, cs, testOpts.poolName, mosc.Name) t.Logf("MachineOSBuild %q has started", startedBuild.Name) - // Assert that the build job has certain properties and configuration. - assertBuildJobIsAsExpected(t, cs, startedBuild) - t.Logf("Waiting for build completion...") // Create a child context for the build pod log streamer. This is so we can @@ -791,34 +797,6 @@ func waitForBuildToBeInterrupted(t *testing.T, cs *framework.ClientSet, startedB return mosb } -// Validates that the build job is configured correctly. In this case, -// "correctly" means that it has the correct container images. Future -// assertions could include things like ensuring that the proper volume mounts -// are present, etc. -func assertBuildJobIsAsExpected(t *testing.T, cs *framework.ClientSet, mosb *mcfgv1.MachineOSBuild) { - t.Helper() - - osImageURLConfig, err := ctrlcommon.GetOSImageURLConfig(context.TODO(), cs.GetKubeclient()) - require.NoError(t, err) - - mcoImages, err := ctrlcommon.GetImagesConfig(context.TODO(), cs.GetKubeclient()) - require.NoError(t, err) - - buildPod, err := ocltesthelper.GetPodFromJob(context.TODO(), cs, mosb.Status.Builder.Job.Name) - require.NoError(t, err) - - assertContainerIsUsingExpectedImage := func(c corev1.Container, containerName, expectedImage string) { - if c.Name == containerName { - assert.Equal(t, c.Image, expectedImage) - } - } - - for _, container := range buildPod.Spec.Containers { - assertContainerIsUsingExpectedImage(container, "image-build", mcoImages.MachineConfigOperator) - assertContainerIsUsingExpectedImage(container, "wait-for-done", osImageURLConfig.BaseOSContainerImage) - } -} - // Prepares for an on-cluster build test by performing the following: // - Gets the Docker Builder secret name from the MCO namespace. // - Creates the imagestream to use for the test. diff --git a/test/e2e-ocl-2of2/onclusterlayering_test.go b/test/e2e-ocl-2of2/onclusterlayering_test.go index 69da0fbe46..0ebef07df1 100644 --- a/test/e2e-ocl-2of2/onclusterlayering_test.go +++ b/test/e2e-ocl-2of2/onclusterlayering_test.go @@ -127,14 +127,21 @@ func TestGracefulBuildFailureRecovery(t *testing.T) { apiMosc.Spec.Containerfile = []mcfgv1.MachineOSContainerfile{} - updated, err := cs.MachineconfigurationV1Interface.MachineOSConfigs().Update(ctx, apiMosc, metav1.UpdateOptions{}) + updatedMosc, err := cs.MachineconfigurationV1Interface.MachineOSConfigs().Update(ctx, apiMosc, metav1.UpdateOptions{}) require.NoError(t, err) mcp, err := cs.MachineconfigurationV1Interface.MachineConfigPools().Get(ctx, layeredMCPName, metav1.GetOptions{}) require.NoError(t, err) // Compute the new MachineOSBuild image name. - moscChangeMosb := buildrequest.NewMachineOSBuildFromAPIOrDie(ctx, cs.GetKubeclient(), updated, mcp) + mc, err := cs.MachineconfigurationV1Interface.MachineConfigs().Get(ctx, mcp.Spec.Configuration.Name, metav1.GetOptions{}) + require.NoError(t, err) + + moscChangeMosb := buildrequest.NewMachineOSBuildOrDie(buildrequest.MachineOSBuildOpts{ + MachineConfig: mc, + MachineOSConfig: updatedMosc, + MachineConfigPool: mcp, + }) // Wait for the second build to start. secondMosb := waitForBuildToStart(t, cs, moscChangeMosb) @@ -396,7 +403,14 @@ func TestControllerEventuallyReconciles(t *testing.T) { createMachineOSConfig(t, cs, mosc) - mosb := buildrequest.NewMachineOSBuildFromAPIOrDie(ctx, cs.GetKubeclient(), mosc, mcp) + mc, err := cs.MachineconfigurationV1Interface.MachineConfigs().Get(ctx, mcp.Spec.Configuration.Name, metav1.GetOptions{}) + require.NoError(t, err) + + mosb := buildrequest.NewMachineOSBuildOrDie(buildrequest.MachineOSBuildOpts{ + MachineConfig: mc, + MachineOSConfig: mosc, + MachineConfigPool: mcp, + }) // Wait for the MachineOSBuild to exist. kubeassert := helpers.AssertClientSet(t, cs).WithContext(ctx).Eventually() @@ -509,7 +523,7 @@ func TestImageBuildDegradedOnFailureAndClearedOnBuildStart(t *testing.T) { }, } - updated, err := cs.MachineconfigurationV1Interface.MachineOSConfigs().Update(ctx, apiMosc, metav1.UpdateOptions{}) + updatedMosc, err := cs.MachineconfigurationV1Interface.MachineOSConfigs().Update(ctx, apiMosc, metav1.UpdateOptions{}) require.NoError(t, err) t.Logf("Fixed containerfile, waiting for new build to start") @@ -518,7 +532,14 @@ func TestImageBuildDegradedOnFailureAndClearedOnBuildStart(t *testing.T) { require.NoError(t, err) // Compute the new MachineOSBuild name - moscChangeMosb := buildrequest.NewMachineOSBuildFromAPIOrDie(ctx, cs.GetKubeclient(), updated, mcp) + mc, err := cs.MachineconfigurationV1Interface.MachineConfigs().Get(ctx, mcp.Spec.Configuration.Name, metav1.GetOptions{}) + require.NoError(t, err) + + moscChangeMosb := buildrequest.NewMachineOSBuildOrDie(buildrequest.MachineOSBuildOpts{ + MachineConfig: mc, + MachineOSConfig: updatedMosc, + MachineConfigPool: mcp, + }) // Wait for the second build to start secondMosb := waitForBuildToStart(t, cs, moscChangeMosb) @@ -588,7 +609,7 @@ func TestImageBuildDegradedOnFailureAndClearedOnBuildStart(t *testing.T) { }, } - updated, err = cs.MachineconfigurationV1Interface.MachineOSConfigs().Update(ctx, apiMosc, metav1.UpdateOptions{}) + updatedMosc, err = cs.MachineconfigurationV1Interface.MachineOSConfigs().Update(ctx, apiMosc, metav1.UpdateOptions{}) require.NoError(t, err) t.Logf("Modified containerfile, waiting for third build to start") @@ -597,8 +618,16 @@ func TestImageBuildDegradedOnFailureAndClearedOnBuildStart(t *testing.T) { mcp, err = cs.MachineconfigurationV1Interface.MachineConfigPools().Get(ctx, layeredMCPName, metav1.GetOptions{}) require.NoError(t, err) + // Get the updated MC to compute the new build + mc, err = cs.MachineconfigurationV1Interface.MachineConfigs().Get(ctx, mcp.Spec.Configuration.Name, metav1.GetOptions{}) + require.NoError(t, err) + // Compute the new MachineOSBuild name for the third build - thirdMoscMosb := buildrequest.NewMachineOSBuildFromAPIOrDie(ctx, cs.GetKubeclient(), updated, mcp) + thirdMoscMosb := buildrequest.NewMachineOSBuildOrDie(buildrequest.MachineOSBuildOpts{ + MachineConfig: mc, + MachineOSConfig: updatedMosc, + MachineConfigPool: mcp, + }) // Wait for the third build to start thirdMosb := waitForBuildToStart(t, cs, thirdMoscMosb) @@ -885,9 +914,6 @@ func runOnClusterLayeringTest(t *testing.T, testOpts onClusterLayeringTestOpts) startedBuild := waitForBuildToStartForPoolAndConfig(t, cs, testOpts.poolName, mosc.Name) t.Logf("MachineOSBuild %q has started", startedBuild.Name) - // Assert that the build job has certain properties and configuration. - assertBuildJobIsAsExpected(t, cs, startedBuild) - t.Logf("Waiting for build completion...") // Create a child context for the build pod log streamer. This is so we can @@ -1134,34 +1160,6 @@ func waitForBuildToBeInterrupted(t *testing.T, cs *framework.ClientSet, startedB return mosb } -// Validates that the build job is configured correctly. In this case, -// "correctly" means that it has the correct container images. Future -// assertions could include things like ensuring that the proper volume mounts -// are present, etc. -func assertBuildJobIsAsExpected(t *testing.T, cs *framework.ClientSet, mosb *mcfgv1.MachineOSBuild) { - t.Helper() - - osImageURLConfig, err := ctrlcommon.GetOSImageURLConfig(context.TODO(), cs.GetKubeclient()) - require.NoError(t, err) - - mcoImages, err := ctrlcommon.GetImagesConfig(context.TODO(), cs.GetKubeclient()) - require.NoError(t, err) - - buildPod, err := ocltesthelper.GetPodFromJob(context.TODO(), cs, mosb.Status.Builder.Job.Name) - require.NoError(t, err) - - assertContainerIsUsingExpectedImage := func(c corev1.Container, containerName, expectedImage string) { - if c.Name == containerName { - assert.Equal(t, c.Image, expectedImage) - } - } - - for _, container := range buildPod.Spec.Containers { - assertContainerIsUsingExpectedImage(container, "image-build", mcoImages.MachineConfigOperator) - assertContainerIsUsingExpectedImage(container, "wait-for-done", osImageURLConfig.BaseOSContainerImage) - } -} - // Prepares for an on-cluster build test by performing the following: // - Gets the Docker Builder secret name from the MCO namespace. // - Creates the imagestream to use for the test.