Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
15 changes: 2 additions & 13 deletions rollout/controller_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -799,7 +799,8 @@ func filterInformerActions(actions []core.Action) []core.Action {
action.Matches("list", "services") ||
action.Matches("watch", "services") ||
action.Matches("list", "ingresses") ||
action.Matches("watch", "ingresses") {
action.Matches("watch", "ingresses") ||
action.Matches("list", "pods") {
continue
}
ret = append(ret, action)
Expand Down Expand Up @@ -849,18 +850,6 @@ func (f *fixture) expectUpdatePodAction(p *corev1.Pod) int {
return len
}

func (f *fixture) expectListPodAction(namespace string) int {
len := len(f.kubeactions)
f.kubeactions = append(f.kubeactions, core.NewListAction(schema.GroupVersionResource{Resource: "pods"}, schema.GroupVersionKind{Kind: "Pod", Version: "v1"}, namespace, metav1.ListOptions{}))
return len
}

func (f *fixture) expectGetRolloutAction(rollout *v1alpha1.Rollout) int { //nolint:unused
len := len(f.actions)
f.kubeactions = append(f.actions, core.NewGetAction(schema.GroupVersionResource{Resource: "rollouts"}, rollout.Namespace, rollout.Name))
return len
}

func (f *fixture) expectCreateExperimentAction(ex *v1alpha1.Experiment) int {
action := core.NewCreateAction(schema.GroupVersionResource{Resource: "experiments"}, ex.Namespace, ex)
len := len(f.actions)
Expand Down
21 changes: 11 additions & 10 deletions rollout/ephemeralmetadata.go
Original file line number Diff line number Diff line change
Expand Up @@ -74,25 +74,26 @@ func (c *rolloutContext) syncEphemeralMetadata(ctx context.Context, rs *appsv1.R
return nil
}
modifiedRS, modified := replicasetutil.SyncReplicaSetEphemeralPodMetadata(rs, podMetadata)
if !modified {
return nil
}

// Used to access old ephemeral data when updating pods below
originalRSCopy := rs.DeepCopy()

// Order of the following two steps is important for race condition
// Order of the following two steps is important to minimize race condition
// First update replicasets, then pods owned by it.
// So that any replicas created in the interim between the two steps are using the new updated version.
// 1. Update ReplicaSet so that any new pods it creates will have the metadata
rs, err := c.updateReplicaSet(ctx, modifiedRS)
if err != nil {
c.log.Infof("failed to sync ephemeral metadata %v to ReplicaSet %s: %v", podMetadata, originalRSCopy.Name, err)
return fmt.Errorf("failed to sync ephemeral metadata: %w", err)
if modified {
rs, err := c.updateReplicaSet(ctx, modifiedRS)
if err != nil {
c.log.Infof("failed to sync ephemeral metadata %v to ReplicaSet %s: %v", podMetadata, originalRSCopy.Name, err)
return fmt.Errorf("failed to sync ephemeral metadata: %w", err)
}
c.log.Infof("synced ephemeral metadata %v to ReplicaSet %s", podMetadata, rs.Name)
}
c.log.Infof("synced ephemeral metadata %v to ReplicaSet %s", podMetadata, rs.Name)

// 2. Sync ephemeral metadata to pods
// 2. Sync ephemeral metadata to pods (always do this, even if replicaset wasn't modified so that we handle cases where
// the replicaset already had the correct metadata but some pods don't: e.g. a crash/OOM of the controller between the two steps,
// or simply a failure to update some pods in a previous attempt)
pods, err := replicasetutil.GetPodsOwnedByReplicaSet(ctx, c.kubeclientset, rs)
if err != nil {
return err
Expand Down
170 changes: 165 additions & 5 deletions rollout/ephemeralmetadata_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -124,7 +124,7 @@
rsGVK := schema.GroupVersionKind{Group: "apps", Version: "v1", Kind: "ReplicaSet"}
pod1 := corev1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "foo-abc123",

Check failure on line 127 in rollout/ephemeralmetadata_test.go

View check run for this annotation

SonarQubeCloud / SonarCloud Code Analysis

Define a constant instead of duplicating this literal "foo-abc123" 4 times.

See more on https://sonarcloud.io/project/issues?id=argoproj_argo-rollouts&issues=AZrhTJ1iLbCMLMQHc5Yb&open=AZrhTJ1iLbCMLMQHc5Yb&pullRequest=4477
Namespace: r1.Namespace,
Labels: map[string]string{
"foo": "bar",
Expand All @@ -134,7 +134,7 @@
},
}
pod2 := pod1.DeepCopy()
pod2.Name = "foo-abc456"

Check failure on line 137 in rollout/ephemeralmetadata_test.go

View check run for this annotation

SonarQubeCloud / SonarCloud Code Analysis

Define a constant instead of duplicating this literal "foo-abc456" 4 times.

See more on https://sonarcloud.io/project/issues?id=argoproj_argo-rollouts&issues=AZrhTJ1iLbCMLMQHc5Yc&open=AZrhTJ1iLbCMLMQHc5Yc&pullRequest=4477

f.rolloutLister = append(f.rolloutLister, r2)
f.objects = append(f.objects, r2)
Expand All @@ -144,7 +144,6 @@
f.expectUpdateRolloutStatusAction(r2) // Update Rollout conditions
rs2idx := f.expectCreateReplicaSetAction(rs2) // Create revision 2 ReplicaSet
rs1idx := f.expectUpdateReplicaSetAction(rs1) // update stable replicaset with stable metadata
f.expectListPodAction(r1.Namespace) // list pods to patch ephemeral data on revision 1 ReplicaSets pods
pod1Idx := f.expectUpdatePodAction(&pod1) // Update pod1 with ephemeral data
pod2Idx := f.expectUpdatePodAction(pod2) // Update pod2 with ephemeral data
f.expectUpdateReplicaSetAction(rs1) // scale revision 1 ReplicaSet down
Expand Down Expand Up @@ -229,7 +228,6 @@
f.expectPatchServiceAction(previewSvc, rs2PodHash) // Update preview service to point at revision 2 replicaset
f.expectUpdateReplicaSetAction(rs2) // scale revision 2 ReplicaSet up
rs1idx := f.expectUpdateReplicaSetAction(rs1) // update stable replicaset with stable metadata
f.expectListPodAction(r1.Namespace) // list pods to patch ephemeral data on revision 1 ReplicaSets pods`
pod1Idx := f.expectUpdatePodAction(&pod1) // Update pod1 with ephemeral data
pod2Idx := f.expectUpdatePodAction(pod2) // Update pod2 with ephemeral data
f.expectPatchRolloutAction(r2) // Patch Rollout status
Expand Down Expand Up @@ -260,10 +258,18 @@
}

func TestReconcileEphemeralMetadata(t *testing.T) {
newRS := &v1.ReplicaSet{}
stableRS := &v1.ReplicaSet{}
selector := &metav1.LabelSelector{MatchLabels: map[string]string{"foo": "bar"}}
newRS := &v1.ReplicaSet{
Spec: v1.ReplicaSetSpec{Selector: selector},
}
stableRS := &v1.ReplicaSet{
Spec: v1.ReplicaSetSpec{Selector: selector},
}

mockContext := &rolloutContext{
reconcilerBase: reconcilerBase{
kubeclientset: k8sfake.NewSimpleClientset(),
},
rollout: &v1alpha1.Rollout{
Spec: v1alpha1.RolloutSpec{
Strategy: v1alpha1.RolloutStrategy{
Expand All @@ -279,7 +285,11 @@
},
newRS: newRS,
stableRS: stableRS,
otherRSs: []*v1.ReplicaSet{new(v1.ReplicaSet), new(v1.ReplicaSet)},
otherRSs: []*v1.ReplicaSet{{
Spec: v1.ReplicaSetSpec{Selector: selector},
}, {
Spec: v1.ReplicaSetSpec{Selector: selector},
}},
}

// Scenario 1: upgrading state when the new ReplicaSet is a canary
Expand All @@ -292,6 +302,156 @@
assert.NoError(t, err)
}

// TestSyncCanaryEphemeralMetadataReplicaSetAlreadyPatched verifies that even if the ephemeral metadata of a ReplicaSet
// already has been patched, then it still patches the pods of the ReplicaSet that do not have the metadata yet.
func TestSyncCanaryEphemeralMetadataReplicaSetAlreadyPatched(t *testing.T) {
f := newFixture(t)
defer f.Close()

r1 := newCanaryRollout("foo", 3, nil, nil, ptr.To[int32](1), intstr.FromInt32(1), intstr.FromInt32(1))
r1.Annotations[annotations.RevisionAnnotation] = "1"
r1.Spec.Strategy.Canary.CanaryMetadata = &v1alpha1.PodTemplateMetadata{
Labels: map[string]string{
"role": "canary",
},
}
r1.Spec.Strategy.Canary.StableMetadata = &v1alpha1.PodTemplateMetadata{
Labels: map[string]string{
"role": "stable",
},
}

// Create a ReplicaSet that already has the stable metadata label
rs1 := newReplicaSetWithStatus(r1, 3, 3)
rs1.Spec.Template.Labels = map[string]string{
"role": "stable",
}

rsGVK := schema.GroupVersionKind{Group: "apps", Version: "v1", Kind: "ReplicaSet"}

// pod1 already has the stable metadata label and should not be updated
pod1 := corev1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "foo-abc123",
Namespace: r1.Namespace,
Labels: map[string]string{
"foo": "bar",
"role": "stable",
"rollouts-pod-template-hash": r1.Status.CurrentPodHash,
},
OwnerReferences: []metav1.OwnerReference{*metav1.NewControllerRef(rs1, rsGVK)},
},
}

// pod2 does not have the stable metadata label and must be updated
pod2 := pod1.DeepCopy()
pod2.Name = "foo-abc456"
pod2.Labels = map[string]string{
"foo": "bar",
"role": "canary",
"rollouts-pod-template-hash": r1.Status.CurrentPodHash,
}

f.rolloutLister = append(f.rolloutLister, r1)
f.objects = append(f.objects, r1)
f.kubeobjects = append(f.kubeobjects, rs1, &pod1, pod2)
f.replicaSetLister = append(f.replicaSetLister, rs1)

f.expectPatchRolloutAction(r1)
// ReplicaSet should not be updated since it already has the stable metadata
// pod1 should not be updated since it already has the stable metadata
pod2Idx := f.expectUpdatePodAction(pod2) // Update pod2 with ephemeral data

f.run(getKey(r1, t))

// pod2 should have been updated to have the stable metadata
expectedPodLabels := map[string]string{
"foo": "bar",
"role": "stable",
"rollouts-pod-template-hash": r1.Status.CurrentPodHash,
}
updatedPod2 := f.getUpdatedPod(pod2Idx)
assert.Equal(t, expectedPodLabels, updatedPod2.Labels)
}

// TestSyncBlueGreenEphemeralMetadataReplicaSetAlreadyPatched verifies that even if the ephemeral metadata of a ReplicaSet
// already has been patched, then it still patches the pods of the ReplicaSet that do not have the metadata yet.
func TestSyncBlueGreenEphemeralMetadataReplicaSetAlreadyPatched(t *testing.T) {
f := newFixture(t)
defer f.Close()

r1 := newBlueGreenRollout("foo", 3, nil, "active", "preview")
r1.Annotations[annotations.RevisionAnnotation] = "1"
r1.Spec.Strategy.BlueGreen.PreviewMetadata = &v1alpha1.PodTemplateMetadata{
Labels: map[string]string{
"role": "preview",
},
}
r1.Spec.Strategy.BlueGreen.ActiveMetadata = &v1alpha1.PodTemplateMetadata{
Labels: map[string]string{
"role": "active",
},
}

// Create a ReplicaSet that already has the active metadata label
rs1 := newReplicaSetWithStatus(r1, 3, 3)
rs1.Spec.Template.Labels = map[string]string{
"role": "active",
}

rsGVK := schema.GroupVersionKind{Group: "apps", Version: "v1", Kind: "ReplicaSet"}

// pod1 already has the active metadata label and should not be updated
pod1 := corev1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "foo-abc123",
Namespace: r1.Namespace,
Labels: map[string]string{
"foo": "bar",
"role": "active",
"rollouts-pod-template-hash": r1.Status.CurrentPodHash,
},
OwnerReferences: []metav1.OwnerReference{*metav1.NewControllerRef(rs1, rsGVK)},
},
}

// pod2 does not have the active metadata label and must be updated
pod2 := pod1.DeepCopy()
pod2.Name = "foo-abc456"
pod2.Labels = map[string]string{
"foo": "bar",
"role": "preview",
"rollouts-pod-template-hash": r1.Status.CurrentPodHash,
}

previewSvc := newService("preview", 80, nil, r1)
activeSvc := newService("active", 80, nil, r1)

f.rolloutLister = append(f.rolloutLister, r1)
f.objects = append(f.objects, r1)
f.kubeobjects = append(f.kubeobjects, rs1, &pod1, pod2, previewSvc, activeSvc)
f.replicaSetLister = append(f.replicaSetLister, rs1)
f.serviceLister = append(f.serviceLister, activeSvc, previewSvc)

f.expectPatchRolloutAction(r1)
// ReplicaSet should not be updated since it already has the active metadata
f.expectPatchServiceAction(previewSvc, rs1.Labels[v1alpha1.DefaultRolloutUniqueLabelKey])
f.expectPatchServiceAction(activeSvc, rs1.Labels[v1alpha1.DefaultRolloutUniqueLabelKey])
// pod1 should not be updated since it already has the active metadata
pod2Idx := f.expectUpdatePodAction(pod2) // Update pod2 with ephemeral data

f.run(getKey(r1, t))

// pod2 should have been updated to have the active metadata
expectedPodLabels := map[string]string{
"foo": "bar",
"role": "active",
"rollouts-pod-template-hash": r1.Status.CurrentPodHash,
}
updatedPod2 := f.getUpdatedPod(pod2Idx)
assert.Equal(t, expectedPodLabels, updatedPod2.Labels)
}

// TestSyncEphemeralMetadata verifies that syncEphemeralMetadata correctly applies metadata to pods
// and handles retry logic when pod updates encounter conflicts.
func TestSyncEphemeralMetadata(t *testing.T) {
Expand Down
Loading