diff --git a/api/v1beta2/foundationdb_labels.go b/api/v1beta2/foundationdb_labels.go index 98ddac4d6..153eead75 100644 --- a/api/v1beta2/foundationdb_labels.go +++ b/api/v1beta2/foundationdb_labels.go @@ -94,8 +94,4 @@ const ( // FDBLocalityDataHallKey represents the key in the locality map that holds // the data hall. FDBLocalityDataHallKey = "data_hall" - - // FDBLocalityDCIDlKey represents the key in the locality map that holds - // the data center ID. - FDBLocalityDCIDlKey = "dcid" ) diff --git a/api/v1beta2/foundationdbcluster_types.go b/api/v1beta2/foundationdbcluster_types.go index cb201a052..7d6bbd561 100644 --- a/api/v1beta2/foundationdbcluster_types.go +++ b/api/v1beta2/foundationdbcluster_types.go @@ -237,6 +237,14 @@ type FoundationDBClusterSpec struct { // +kubebuilder:default:=split ImageType *ImageType `json:"imageType,omitempty"` + // EnableNodeWatch determines if the fdb-kubernetes-monitor should read the node resource + // and provide the node labels as custom environment variables. This feature is only + // available with the UnifiedImage. The node label keys will be prefixed by "NODE_LABEL" and "/" and "." + // will be replaced in the key as "_". E.g. from the label "foundationdb.org/testing = awesome" the env variables + // NODE_LABEL_FOUNDATIONDB_ORG_TESTING = awesome" will be generated. + // This settings requires that the according RBAC permissions for the unified image are setup. + EnableNodeWatch *bool `json:"enableNodeWatch,omitempty"` + // MaxZonesWithUnavailablePods defines the maximum number of zones that can have unavailable pods during the update process. // When unset, there is no limit to the number of zones with unavailable pods. MaxZonesWithUnavailablePods *int `json:"maxZonesWithUnavailablePods,omitempty"` @@ -2575,6 +2583,11 @@ func (cluster *FoundationDBCluster) DesiredImageType() ImageType { return ImageTypeSplit } +// EnableNodeWatch if enabled the fdb-kubernetes-monitor will provide the node labels as custom environment variables. +func (cluster *FoundationDBCluster) EnableNodeWatch() bool { + return pointer.BoolDeref(cluster.Spec.EnableNodeWatch, false) +} + // GetIgnoreTerminatingPodsSeconds returns the value of IgnoreTerminatingPodsSeconds or defaults to 10 minutes. func (cluster *FoundationDBCluster) GetIgnoreTerminatingPodsSeconds() int { return pointer.IntDeref(cluster.Spec.AutomationOptions.IgnoreTerminatingPodsSeconds, int((10 * time.Minute).Seconds())) diff --git a/api/v1beta2/zz_generated.deepcopy.go b/api/v1beta2/zz_generated.deepcopy.go index dd9133957..a068ecfdf 100644 --- a/api/v1beta2/zz_generated.deepcopy.go +++ b/api/v1beta2/zz_generated.deepcopy.go @@ -735,6 +735,11 @@ func (in *FoundationDBClusterSpec) DeepCopyInto(out *FoundationDBClusterSpec) { *out = new(ImageType) **out = **in } + if in.EnableNodeWatch != nil { + in, out := &in.EnableNodeWatch, &out.EnableNodeWatch + *out = new(bool) + **out = **in + } if in.MaxZonesWithUnavailablePods != nil { in, out := &in.MaxZonesWithUnavailablePods, &out.MaxZonesWithUnavailablePods *out = new(int) diff --git a/config/crd/bases/apps.foundationdb.org_foundationdbclusters.yaml b/config/crd/bases/apps.foundationdb.org_foundationdbclusters.yaml index 0fbc2ab12..640b4df8b 100644 --- a/config/crd/bases/apps.foundationdb.org_foundationdbclusters.yaml +++ b/config/crd/bases/apps.foundationdb.org_foundationdbclusters.yaml @@ -10419,6 +10419,8 @@ spec: usable_regions: type: integer type: object + enableNodeWatch: + type: boolean faultDomain: properties: key: diff --git a/docs/cluster_spec.md b/docs/cluster_spec.md index 7a6ec36e1..41c0e51d8 100644 --- a/docs/cluster_spec.md +++ b/docs/cluster_spec.md @@ -263,6 +263,7 @@ FoundationDBClusterSpec defines the desired state of a cluster. | labels | LabelConfig allows customizing labels used by the operator. | [LabelConfig](#labelconfig) | false | | useExplicitListenAddress | UseExplicitListenAddress determines if we should add a listen address that is separate from the public address. **Deprecated: This setting will be removed in the next major release.** | *bool | false | | imageType | ImageType defines the image type that should be used for the FoundationDBCluster deployment. When the type is set to \"unified\" the deployment will use the new fdb-kubernetes-monitor. Otherwise the main container and the sidecar container will use different images. Default: split | *[ImageType](#imagetype) | false | +| enableNodeWatch | EnableNodeWatch determines if the fdb-kubernetes-monitor should read the node resource and provide the node labels as custom environment variables. This feature is only available with the UnifiedImage. The node label keys will be prefixed by \"NODE_LABEL\" and \"/\" and \".\" will be replaced in the key as \"_\". E.g. from the label \"foundationdb.org/testing = awesome\" the env variables NODE_LABEL_FOUNDATIONDB_ORG_TESTING = awesome\" will be generated. This settings requires that the according RBAC permissions for the unified image are setup. | *bool | false | | maxZonesWithUnavailablePods | MaxZonesWithUnavailablePods defines the maximum number of zones that can have unavailable pods during the update process. When unset, there is no limit to the number of zones with unavailable pods. | *int | false | [Back to TOC](#table-of-contents) diff --git a/docs/manual/fault_domains.md b/docs/manual/fault_domains.md index ceb5ccdfc..ca28d8b00 100644 --- a/docs/manual/fault_domains.md +++ b/docs/manual/fault_domains.md @@ -21,6 +21,7 @@ spec: ``` This configuration will set the fdbmonitor configuration for all processes to use the value from `spec.nodeName` on the pod as the `zoneid` locality field: + ```toml [fdbserver.1] locality_zoneid = $FDB_ZONE_ID diff --git a/e2e/fixtures/fdb_operator_client.go b/e2e/fixtures/fdb_operator_client.go index dc6708745..76d5305cf 100644 --- a/e2e/fixtures/fdb_operator_client.go +++ b/e2e/fixtures/fdb_operator_client.go @@ -26,6 +26,8 @@ import ( "errors" "html/template" "io" + rbacv1 "k8s.io/api/rbac/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "log" "time" @@ -46,18 +48,25 @@ import ( const ( operatorDeploymentName = "fdb-kubernetes-operator-controller-manager" foundationdbServiceAccount = "fdb-kubernetes" + foundationdbNodeRole = "fdb-kubernetes-node-watcher" // The configuration for the RBAC setup for the operator deployment operatorRBAC = `apiVersion: v1 kind: ServiceAccount metadata: name: fdb-kubernetes-operator-controller-manager namespace: {{ .Namespace }} + labels: + foundationdb.org/testing: chaos + foundationdb.org/user: {{ .User }} --- apiVersion: rbac.authorization.k8s.io/v1 kind: Role metadata: name: fdb-kubernetes-operator-manager-role namespace: {{ .Namespace }} + labels: + foundationdb.org/testing: chaos + foundationdb.org/user: {{ .User }} rules: - apiGroups: - "" @@ -155,6 +164,9 @@ kind: RoleBinding metadata: name: fdb-kubernetes-operator-manager-rolebinding namespace: {{ .Namespace }} + labels: + foundationdb.org/testing: chaos + foundationdb.org/user: {{ .User }} roleRef: apiGroup: rbac.authorization.k8s.io kind: Role @@ -167,6 +179,9 @@ apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: name: {{ .Namespace }}-operator-manager-clusterrole + labels: + foundationdb.org/testing: chaos + foundationdb.org/user: {{ .User }} rules: - apiGroups: - "" @@ -181,6 +196,9 @@ apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: name: {{ .Namespace }}-operator-manager-clusterrolebinding + labels: + foundationdb.org/testing: chaos + foundationdb.org/user: {{ .User }} roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole @@ -497,6 +515,8 @@ type operatorConfig struct { CPURequests string // MemoryRequests defined the Memory that should be requested. MemoryRequests string + // Defines the user that runs the current e2e tests. + User string } // SidecarConfig represents the configuration for a sidecar. This can be used for templating. @@ -590,6 +610,7 @@ func (factory *Factory) getOperatorConfig(namespace string) *operatorConfig { ImagePullPolicy: factory.getImagePullPolicy(), CPURequests: cpuRequests, MemoryRequests: MemoryRequests, + User: factory.options.username, } } @@ -628,6 +649,25 @@ func (factory *Factory) CreateFDBOperatorIfAbsent(namespace string) error { ).NotTo(gomega.HaveOccurred()) } + // Make sure we delete the cluster scoped objects. + factory.AddShutdownHook(func() error { + factory.Delete(&rbacv1.ClusterRole{ + ObjectMeta: metav1.ObjectMeta{ + Name: namespace + "-operator-manager-clusterrole", + Namespace: namespace, + }, + }) + + factory.Delete(&rbacv1.ClusterRoleBinding{ + ObjectMeta: metav1.ObjectMeta{ + Name: namespace + "-operator-manager-clusterrolebinding", + Namespace: namespace, + }, + }) + + return nil + }) + deploymentTemplate := operatorDeployment if factory.UseUnifiedImage() { deploymentTemplate = operatorDeploymentUnifiedImage diff --git a/e2e/fixtures/kubernetes_fixtures.go b/e2e/fixtures/kubernetes_fixtures.go index 16c6f1208..03c381412 100644 --- a/e2e/fixtures/kubernetes_fixtures.go +++ b/e2e/fixtures/kubernetes_fixtures.go @@ -21,7 +21,6 @@ package fixtures import ( - ctx "context" "log" "k8s.io/apimachinery/pkg/runtime" @@ -31,7 +30,6 @@ import ( "github.com/onsi/gomega" corev1 "k8s.io/api/core/v1" rbacv1 "k8s.io/api/rbac/v1" - k8serrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) @@ -98,17 +96,13 @@ func (factory *Factory) createNamespace(suffix string) string { log.Printf("using namespace %s for testing", namespace) factory.AddShutdownHook(func() error { log.Printf("finished all tests, start deleting namespace %s\n", namespace) - err := factory.GetControllerRuntimeClient(). - Delete(ctx.Background(), &corev1.Namespace{ - ObjectMeta: metav1.ObjectMeta{ - Name: namespace, - }, - }) - if k8serrors.IsNotFound(err) { - return nil - } + factory.Delete(&corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: namespace, + }, + }) - return err + return nil }) return namespace @@ -171,16 +165,78 @@ func (factory *Factory) ensureRBACSetupExists(namespace string) { }, RoleRef: rbacv1.RoleRef{ Name: foundationdbServiceAccount, - APIGroup: "rbac.authorization.k8s.io", + APIGroup: rbacv1.GroupName, Kind: "Role", }, Subjects: []rbacv1.Subject{ { - Kind: "ServiceAccount", + Kind: rbacv1.ServiceAccountKind, Name: foundationdbServiceAccount, }, }, })).ToNot(gomega.HaveOccurred()) + + nodeRoleName := namespace + "-" + foundationdbNodeRole + gomega.Expect(factory.CreateIfAbsent(&rbacv1.ClusterRole{ + ObjectMeta: metav1.ObjectMeta{ + Name: nodeRoleName, + Labels: factory.GetDefaultLabels(), + Namespace: namespace, + }, + Rules: []rbacv1.PolicyRule{ + { + APIGroups: []string{ + "", + }, + Resources: []string{ + "nodes", + }, + Verbs: []string{ + "get", + "watch", + "list", + }, + }, + }, + })).ToNot(gomega.HaveOccurred()) + + gomega.Expect(factory.CreateIfAbsent(&rbacv1.ClusterRoleBinding{ + ObjectMeta: metav1.ObjectMeta{ + Name: nodeRoleName, + Labels: factory.GetDefaultLabels(), + Namespace: namespace, + }, + RoleRef: rbacv1.RoleRef{ + Name: nodeRoleName, + APIGroup: rbacv1.GroupName, + Kind: "ClusterRole", + }, + Subjects: []rbacv1.Subject{ + { + Kind: rbacv1.ServiceAccountKind, + Name: foundationdbServiceAccount, + Namespace: namespace, + }, + }, + })).ToNot(gomega.HaveOccurred()) + + factory.AddShutdownHook(func() error { + factory.Delete(&rbacv1.ClusterRole{ + ObjectMeta: metav1.ObjectMeta{ + Name: nodeRoleName, + Namespace: namespace, + }, + }) + + factory.Delete(&rbacv1.ClusterRoleBinding{ + ObjectMeta: metav1.ObjectMeta{ + Name: nodeRoleName, + Namespace: namespace, + }, + }) + + return nil + }) } // LoadControllerRuntimeFromContext will load a client.Client from the provided context. The context must be existing in the diff --git a/e2e/scripts/remove_namespaces b/e2e/scripts/remove_namespaces index ba60985fe..047c7ca73 100755 --- a/e2e/scripts/remove_namespaces +++ b/e2e/scripts/remove_namespaces @@ -33,3 +33,5 @@ kubectl -n chaos-testing delete schedules -l "foundationdb.org/user=$USERNAME,fo kubectl -n chaos-testing delete networkchaos -l "foundationdb.org/user=$USERNAME,foundationdb.org/testing=chaos" --wait=false --ignore-not-found kubectl -n chaos-testing delete iochaos -l "foundationdb.org/user=$USERNAME,foundationdb.org/testing=chaos" --wait=false --ignore-not-found kubectl -n chaos-testing delete podchaos -l "foundationdb.org/user=$USERNAME,foundationdb.org/testing=chaos" --wait=false --ignore-not-found +kubectl delete clusterrolebinding -l "foundationdb.org/user=$USERNAME,foundationdb.org/testing=chaos" --wait=false --ignore-not-found +kubectl delete clusterrole -l "foundationdb.org/user=$USERNAME,foundationdb.org/testing=chaos" --wait=false --ignore-not-found diff --git a/e2e/test_operator/operator_test.go b/e2e/test_operator/operator_test.go index af50cd9fc..43527ffc2 100644 --- a/e2e/test_operator/operator_test.go +++ b/e2e/test_operator/operator_test.go @@ -2400,4 +2400,76 @@ var _ = Describe("Operator", Label("e2e", "pr"), func() { )).NotTo(HaveOccurred()) }) }) + + // @johscheuer: Enable test once the CRD is updated. + PWhen("enabling the node watch feature", func() { + var initialParameters fdbv1beta2.FoundationDBCustomParameters + + BeforeEach(func() { + // If we are not using the unified image, we can skip this test. + if !fdbCluster.GetCluster().UseUnifiedImage() { + Skip("The sidecar image doesn't support reading node labels") + } + + // Enable the node watch feature. + spec := fdbCluster.GetCluster().Spec.DeepCopy() + spec.EnableNodeWatch = pointer.Bool(true) + fdbCluster.UpdateClusterSpecWithSpec(spec) + Expect(fdbCluster.WaitForReconciliation()).NotTo(HaveOccurred()) + }) + + It("should have enabled the node watch feature on all Pods", func() { + pods := fdbCluster.GetPods().Items + for _, pod := range pods { + for _, container := range pod.Spec.Containers { + if container.Name != fdbv1beta2.MainContainerName { + continue + } + + Expect(container.Args).To(ContainElements("--enable-node-watch")) + } + } + + initialParameters = fdbCluster.GetCustomParameters( + fdbv1beta2.ProcessClassStorage, + ) + + // Update the storage processes to have the new locality. + Expect(fdbCluster.SetCustomParameters( + fdbv1beta2.ProcessClassStorage, + append( + initialParameters, + "locality_os=$NODE_LABEL_KUBERNETES_IO_OS", + ), + true, + )).NotTo(HaveOccurred()) + + Eventually(func(g Gomega) bool { + status := fdbCluster.GetStatus() + for _, process := range status.Cluster.Processes { + if process.ProcessClass != fdbv1beta2.ProcessClassStorage { + continue + } + log.Println(process.Locality) + g.Expect(process.Locality).To(HaveKey("os")) + } + + return true + }) + }) + + AfterEach(func() { + Expect(fdbCluster.SetCustomParameters( + fdbv1beta2.ProcessClassStorage, + initialParameters, + false, + )).NotTo(HaveOccurred()) + + spec := fdbCluster.GetCluster().Spec.DeepCopy() + spec.EnableNodeWatch = pointer.Bool(false) + fdbCluster.UpdateClusterSpecWithSpec(spec) + Expect(fdbCluster.WaitForReconciliation()).NotTo(HaveOccurred()) + + }) + }) }) diff --git a/internal/locality/locality_test.go b/internal/locality/locality_test.go index 08cb10bcd..9d51bcb10 100644 --- a/internal/locality/locality_test.go +++ b/internal/locality/locality_test.go @@ -1354,8 +1354,8 @@ var _ = Describe("Localities", func() { Context("with coordinators divided across two DCs", func() { BeforeEach(func() { for _, process := range status.Cluster.Processes { - if process.Locality[fdbv1beta2.FDBLocalityDCIDlKey] == "dc3" { - process.Locality[fdbv1beta2.FDBLocalityDCIDlKey] = "dc1" + if process.Locality[fdbv1beta2.FDBLocalityDCIDKey] == "dc3" { + process.Locality[fdbv1beta2.FDBLocalityDCIDKey] = "dc1" } } }) diff --git a/internal/monitor_conf.go b/internal/monitor_conf.go index a4972179e..7d049664e 100644 --- a/internal/monitor_conf.go +++ b/internal/monitor_conf.go @@ -228,18 +228,27 @@ func GetMonitorProcessConfiguration(cluster *fdbv1beta2.FoundationDBCluster, pro } podSettings := cluster.GetProcessSettings(processClass) + var hasDCIDLocality, hasDataHallLocality bool for _, argument := range podSettings.CustomParameters { + if strings.HasPrefix(string(argument), "locality_"+fdbv1beta2.FDBLocalityDCIDKey) { + hasDCIDLocality = true + } + + if strings.HasPrefix(string(argument), "locality_"+fdbv1beta2.FDBLocalityDataHallKey) { + hasDataHallLocality = true + } + configuration.Arguments = append(configuration.Arguments, monitorapi.Argument{ ArgumentType: monitorapi.ConcatenateArgumentType, Values: generateMonitorArgumentFromCustomParameter(argument), }) } - if cluster.Spec.DataCenter != "" { - configuration.Arguments = append(configuration.Arguments, monitorapi.Argument{Value: getKnobParameterWithValue(fdbv1beta2.FDBLocalityDCIDlKey, cluster.Spec.DataCenter, true)}) + if cluster.Spec.DataCenter != "" && !hasDCIDLocality { + configuration.Arguments = append(configuration.Arguments, monitorapi.Argument{Value: getKnobParameterWithValue(fdbv1beta2.FDBLocalityDCIDKey, cluster.Spec.DataCenter, true)}) } - if cluster.Spec.DataHall != "" { + if cluster.Spec.DataHall != "" && !hasDataHallLocality { configuration.Arguments = append(configuration.Arguments, monitorapi.Argument{Value: getKnobParameterWithValue(fdbv1beta2.FDBLocalityDataHallKey, cluster.Spec.DataHall, true)}) } diff --git a/internal/monitor_conf_test.go b/internal/monitor_conf_test.go index b23ee8a68..6cb5d52ec 100644 --- a/internal/monitor_conf_test.go +++ b/internal/monitor_conf_test.go @@ -752,6 +752,72 @@ var _ = Describe("monitor_conf", func() { }, " "))) }) }) + + When("using custom parameters with substitutions for dcid locality", func() { + BeforeEach(func() { + settings := cluster.Spec.Processes[fdbv1beta2.ProcessClassGeneral] + settings.CustomParameters = []fdbv1beta2.FoundationDBCustomParameter{ + "locality_dcid=$FDB_INSTANCE_ID", + } + cluster.Spec.Processes[fdbv1beta2.ProcessClassGeneral] = settings + cluster.Spec.DataCenter = "dc" + }) + + It("should substitute the variables in the custom parameters", func() { + substitutions, err := GetSubstitutionsFromClusterAndPod(logr.Discard(), cluster, pod) + Expect(err).NotTo(HaveOccurred()) + command, err = GetStartCommandWithSubstitutions(cluster, processClass, substitutions, 1, 1) + Expect(err).NotTo(HaveOccurred()) + Expect(command).To(Equal(strings.Join([]string{ + "/usr/bin/fdbserver", + "--cluster_file=/var/fdb/data/fdb.cluster", + "--seed_cluster_file=/var/dynamic-conf/fdb.cluster", + fmt.Sprintf("--public_address=[%s]:4501", address), + "--class=storage", + "--logdir=/var/log/fdb-trace-logs", + "--loggroup=" + cluster.Name, + "--datadir=/var/fdb/data/1", + fmt.Sprintf("--locality_process_id=%s-1", processGroupID), + fmt.Sprintf("--locality_instance_id=%s", processGroupID), + fmt.Sprintf("--locality_machineid=%s-%s", cluster.Name, processGroupID), + fmt.Sprintf("--locality_zoneid=%s-%s", cluster.Name, processGroupID), + fmt.Sprintf("--locality_dcid=%s", processGroupID), + }, " "))) + }) + }) + + When("using custom parameters with substitutions for data_hall locality", func() { + BeforeEach(func() { + settings := cluster.Spec.Processes[fdbv1beta2.ProcessClassGeneral] + settings.CustomParameters = []fdbv1beta2.FoundationDBCustomParameter{ + "locality_data_hall=$FDB_INSTANCE_ID", + } + cluster.Spec.Processes[fdbv1beta2.ProcessClassGeneral] = settings + cluster.Spec.DataHall = "data_hall" + }) + + It("should substitute the variables in the custom parameters", func() { + substitutions, err := GetSubstitutionsFromClusterAndPod(logr.Discard(), cluster, pod) + Expect(err).NotTo(HaveOccurred()) + command, err = GetStartCommandWithSubstitutions(cluster, processClass, substitutions, 1, 1) + Expect(err).NotTo(HaveOccurred()) + Expect(command).To(Equal(strings.Join([]string{ + "/usr/bin/fdbserver", + "--cluster_file=/var/fdb/data/fdb.cluster", + "--seed_cluster_file=/var/dynamic-conf/fdb.cluster", + fmt.Sprintf("--public_address=[%s]:4501", address), + "--class=storage", + "--logdir=/var/log/fdb-trace-logs", + "--loggroup=" + cluster.Name, + "--datadir=/var/fdb/data/1", + fmt.Sprintf("--locality_process_id=%s-1", processGroupID), + fmt.Sprintf("--locality_instance_id=%s", processGroupID), + fmt.Sprintf("--locality_machineid=%s-%s", cluster.Name, processGroupID), + fmt.Sprintf("--locality_zoneid=%s-%s", cluster.Name, processGroupID), + fmt.Sprintf("--locality_data_hall=%s", processGroupID), + }, " "))) + }) + }) }) }) diff --git a/internal/pod_models.go b/internal/pod_models.go index 823f22864..75c092dbe 100644 --- a/internal/pod_models.go +++ b/internal/pod_models.go @@ -209,6 +209,15 @@ func configureContainersForUnifiedImages(cluster *fdbv1beta2.FoundationDBCluster }}) extendEnv(mainContainer, corev1.EnvVar{Name: "FDB_NETWORK_OPTION_TRACE_LOG_GROUP", Value: cluster.GetLogGroup()}, corev1.EnvVar{Name: "FDB_NETWORK_OPTION_TRACE_ENABLE", Value: "/var/log/fdb-trace-logs"}) + + // Allow the fdb-kubernetes-monitor to read the node labels and provide them as custom variables. + if cluster.EnableNodeWatch() { + mainContainer.Args = append(mainContainer.Args, "--enable-node-watch") + extendEnv(mainContainer, corev1.EnvVar{Name: "FDB_NODE_NAME", ValueFrom: &corev1.EnvVarSource{ + FieldRef: &corev1.ObjectFieldSelector{FieldPath: "spec.nodeName"}, + }}) + } + if cluster.DefineDNSLocalityFields() { mainContainer.Env = append(mainContainer.Env, corev1.EnvVar{Name: "FDB_DNS_NAME", Value: GetPodDNSName(cluster, processGroup.GetPodName(cluster))}) } diff --git a/internal/pod_models_test.go b/internal/pod_models_test.go index 9c799c40f..a59799057 100644 --- a/internal/pod_models_test.go +++ b/internal/pod_models_test.go @@ -779,6 +779,24 @@ var _ = Describe("pod_models", func() { }) }) + When("enabling the node watch feature for the unified image", func() { + BeforeEach(func() { + cluster.Spec.EnableNodeWatch = pointer.Bool(true) + spec, err = GetPodSpec(cluster, GetProcessGroup(cluster, fdbv1beta2.ProcessClassLog, 1)) + Expect(err).NotTo(HaveOccurred()) + }) + + It("should set the flag to enable the node watch feature", func() { + mainContainer := spec.Containers[0] + Expect(mainContainer.Name).To(Equal(fdbv1beta2.MainContainerName)) + Expect(mainContainer.Args).To(Equal([]string{ + "--input-dir", "/var/dynamic-conf", + "--log-path", "/var/log/fdb-trace-logs/monitor.log", + "--enable-node-watch", + })) + }) + }) + Context("with an instance that is crash looping", func() { BeforeEach(func() { cluster.Spec.Buggify.CrashLoop = []fdbv1beta2.ProcessGroupID{"storage-1"}