Skip to content

Commit

Permalink
Merge pull request #1796 from dfajmon/pod-count
Browse files Browse the repository at this point in the history
OCPBUGS-36233: Replicas hook consider Control plane topology
  • Loading branch information
openshift-merge-bot[bot] authored Oct 4, 2024
2 parents 756adf2 + 2926c85 commit 65e0327
Show file tree
Hide file tree
Showing 2 changed files with 31 additions and 112 deletions.
18 changes: 7 additions & 11 deletions pkg/operator/csi/csidrivercontrollerservicecontroller/helpers.go
Original file line number Diff line number Diff line change
Expand Up @@ -12,9 +12,7 @@ import (
appsv1 "k8s.io/api/apps/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/labels"
corev1 "k8s.io/client-go/informers/core/v1"
corev1listers "k8s.io/client-go/listers/core/v1"

configv1 "github.com/openshift/api/config/v1"
opv1 "github.com/openshift/api/operator/v1"
Expand Down Expand Up @@ -141,22 +139,20 @@ func WithSecretHashAnnotationHook(
}
}

// WithReplicasHook sets the deployment.Spec.Replicas field according to the number
// of available nodes. If the number of available nodes is bigger than one, then the
// number of replicas will be two. The number of nodes is determined by the node
// selector specified in the field deployment.Spec.Templates.NodeSelector.
// When node ports or hostNetwork are used, maxSurge=0 should be set in the
// WithReplicasHook sets the deployment.Spec.Replicas field according to the ControlPlaneTopology
// mode. If the topology is set to 'HighlyAvailable' then the number of replicas will be two.
// Else it will be one. When node ports or hostNetwork are used, maxSurge=0 should be set in the
// Deployment RollingUpdate strategy to prevent the new pod from getting stuck
// waiting for a node with free ports.
func WithReplicasHook(nodeLister corev1listers.NodeLister) dc.DeploymentHookFunc {
func WithReplicasHook(configInformer configinformers.SharedInformerFactory) dc.DeploymentHookFunc {
return func(_ *opv1.OperatorSpec, deployment *appsv1.Deployment) error {
nodeSelector := deployment.Spec.Template.Spec.NodeSelector
nodes, err := nodeLister.List(labels.SelectorFromSet(nodeSelector))
infra, err := configInformer.Config().V1().Infrastructures().Lister().Get(infraConfigName)
if err != nil {
return err
}

replicas := int32(1)
if len(nodes) > 1 {
if infra.Status.ControlPlaneTopology == configv1.HighlyAvailableTopologyMode {
replicas = int32(2)
}
deployment.Spec.Replicas = &replicas
Expand Down
125 changes: 24 additions & 101 deletions pkg/operator/csi/csidrivercontrollerservicecontroller/helpers_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -15,8 +15,6 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime"
coreinformers "k8s.io/client-go/informers"
fakecore "k8s.io/client-go/kubernetes/fake"

"github.com/google/go-cmp/cmp"
"sigs.k8s.io/yaml"
Expand Down Expand Up @@ -159,135 +157,60 @@ func TestWithObservedProxyDeploymentHook(t *testing.T) {
}

func TestWithReplicasHook(t *testing.T) {
var (
argsLevel2 = 2
masterNodeLabels = map[string]string{"node-role.kubernetes.io/master": ""}
workerNodeLabels = map[string]string{"node-role.kubernetes.io/worker": ""}
)
argsLevel2 := 2
testCases := []struct {
name string
initialDriver *fakeDriverInstance
initialNodes []*v1.Node
infra *configv1.Infrastructure
initialDeployment *appsv1.Deployment
expectedDeployment *appsv1.Deployment
expectError bool
}{
{
name: "three-node control-plane",
initialDriver: makeFakeDriverInstance(),
initialNodes: []*v1.Node{
makeNode("A", masterNodeLabels),
makeNode("B", masterNodeLabels),
makeNode("C", masterNodeLabels),
},
name: "highly available topology",
infra: makeInfraWithCPTopology(configv1.HighlyAvailableTopologyMode),
initialDeployment: makeDeployment(
defaultClusterID,
argsLevel2,
defaultImages(),
withDeploymentReplicas(1),
withDeploymentGeneration(1, 0)),
expectedDeployment: makeDeployment(
defaultClusterID,
argsLevel2,
defaultImages(),
withDeploymentReplicas(2),
withDeploymentGeneration(1, 0)),
expectError: false,
},
{
name: "three-node control-plane with one worker node",
initialDriver: makeFakeDriverInstance(),
initialNodes: []*v1.Node{
makeNode("A", masterNodeLabels),
makeNode("B", masterNodeLabels),
makeNode("C", masterNodeLabels),
makeNode("D", workerNodeLabels),
},
initialDeployment: makeDeployment(
defaultClusterID,
argsLevel2,
defaultImages(),
withDeploymentReplicas(1),
withDeploymentGeneration(1, 0)),
expectedDeployment: makeDeployment(
defaultClusterID,
argsLevel2,
defaultImages(),
withDeploymentReplicas(2),
withDeploymentGeneration(1, 0)),
expectError: false,
},
{
name: "two-node control-plane",
initialDriver: makeFakeDriverInstance(),
initialNodes: []*v1.Node{
makeNode("A", masterNodeLabels),
makeNode("B", masterNodeLabels),
},
initialDeployment: makeDeployment(
defaultClusterID,
argsLevel2,
defaultImages(),
withDeploymentReplicas(1),
withDeploymentGeneration(1, 0)),
withDeploymentGeneration(1, 0),
),
expectedDeployment: makeDeployment(
defaultClusterID,
argsLevel2,
defaultImages(),
withDeploymentReplicas(2),
withDeploymentGeneration(1, 0)),
expectError: false,
withDeploymentGeneration(1, 0),
),
},
{
name: "single-node control-plane with one worker node",
initialDriver: makeFakeDriverInstance(),
initialNodes: []*v1.Node{
makeNode("A", masterNodeLabels),
makeNode("B", workerNodeLabels),
},
initialDeployment: makeDeployment(
defaultClusterID,
name: "single replica topology",
infra: makeInfraWithCPTopology(configv1.SingleReplicaTopologyMode),
initialDeployment: makeDeployment(defaultClusterID,
argsLevel2,
defaultImages(),
withDeploymentReplicas(1),
withDeploymentGeneration(1, 0)),
expectedDeployment: makeDeployment(
defaultClusterID,
withDeploymentGeneration(1, 0),
),
expectedDeployment: makeDeployment(defaultClusterID,
argsLevel2,
defaultImages(),
withDeploymentReplicas(1),
withDeploymentGeneration(1, 0)),
expectError: false,
withDeploymentGeneration(1, 0),
),
},
}

for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
var initialObjects []runtime.Object

// Add deployment to slice of objects to be added to client and informer
initialObjects = append(initialObjects, tc.initialDeployment)

// Do the same with nodes
for _, node := range tc.initialNodes {
initialObjects = append(initialObjects, node)
}

// Create fake client and informer
coreClient := fakecore.NewSimpleClientset(initialObjects...)
coreInformerFactory := coreinformers.NewSharedInformerFactory(coreClient, 0 /*no resync */)

// Fill the fake informer with the initial deployment and nodes
coreInformerFactory.Apps().V1().Deployments().Informer().GetIndexer().Add(tc.initialDeployment)
for _, node := range initialObjects {
coreInformerFactory.Core().V1().Nodes().Informer().GetIndexer().Add(node)
}

fn := WithReplicasHook(coreInformerFactory.Core().V1().Nodes().Lister())
err := fn(&tc.initialDriver.Spec, tc.initialDeployment)
if err != nil && !tc.expectError {
t.Errorf("Expected no error running hook function, got: %v", err)
initialInfras := []runtime.Object{tc.infra}
configClient := fakeconfig.NewSimpleClientset(initialInfras...)
configInformerFactory := configinformers.NewSharedInformerFactory(configClient, 0)
configInformerFactory.Config().V1().Infrastructures().Informer().GetIndexer().Add(initialInfras[0])

fn := WithReplicasHook(configInformerFactory)
err := fn(nil, tc.initialDeployment)
if err != nil {
t.Errorf("Unexpected error: %v", err)
}
if !equality.Semantic.DeepEqual(tc.initialDeployment, tc.expectedDeployment) {
t.Errorf("Unexpected Deployment content:\n%s", cmp.Diff(tc.initialDeployment, tc.expectedDeployment))
Expand Down

0 comments on commit 65e0327

Please sign in to comment.