Skip to content

Commit 47747a3

Browse files
committed
Merge branch 'master' into search/public-preview
2 parents a72c572 + 5710105 commit 47747a3

File tree

2 files changed

+258
-22
lines changed

2 files changed

+258
-22
lines changed
Lines changed: 205 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,205 @@
1+
package operator
2+
3+
import (
4+
"context"
5+
"fmt"
6+
"testing"
7+
8+
"github.com/ghodss/yaml"
9+
"github.com/stretchr/testify/assert"
10+
"k8s.io/apimachinery/pkg/types"
11+
"k8s.io/client-go/util/workqueue"
12+
"sigs.k8s.io/controller-runtime/pkg/client"
13+
"sigs.k8s.io/controller-runtime/pkg/controller/controllertest"
14+
"sigs.k8s.io/controller-runtime/pkg/event"
15+
"sigs.k8s.io/controller-runtime/pkg/reconcile"
16+
17+
appsv1 "k8s.io/api/apps/v1"
18+
corev1 "k8s.io/api/core/v1"
19+
apiErrors "k8s.io/apimachinery/pkg/api/errors"
20+
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
21+
22+
searchv1 "github.com/mongodb/mongodb-kubernetes/api/v1/search"
23+
"github.com/mongodb/mongodb-kubernetes/api/v1/status"
24+
userv1 "github.com/mongodb/mongodb-kubernetes/api/v1/user"
25+
"github.com/mongodb/mongodb-kubernetes/controllers/operator/mock"
26+
"github.com/mongodb/mongodb-kubernetes/controllers/operator/workflow"
27+
"github.com/mongodb/mongodb-kubernetes/controllers/search_controller"
28+
mdbcv1 "github.com/mongodb/mongodb-kubernetes/mongodb-community-operator/api/v1"
29+
"github.com/mongodb/mongodb-kubernetes/mongodb-community-operator/pkg/mongot"
30+
)
31+
32+
func newMongoDBCommunity(name, namespace string) *mdbcv1.MongoDBCommunity {
33+
return &mdbcv1.MongoDBCommunity{
34+
ObjectMeta: metav1.ObjectMeta{Name: name, Namespace: namespace},
35+
Spec: mdbcv1.MongoDBCommunitySpec{
36+
Type: mdbcv1.ReplicaSet,
37+
Members: 1,
38+
Version: "8.0",
39+
},
40+
}
41+
}
42+
43+
func newMongoDBSearch(name, namespace, mdbcName string) *searchv1.MongoDBSearch {
44+
return &searchv1.MongoDBSearch{
45+
ObjectMeta: metav1.ObjectMeta{Name: name, Namespace: namespace},
46+
Spec: searchv1.MongoDBSearchSpec{
47+
Source: &searchv1.MongoDBSource{
48+
MongoDBResourceRef: &userv1.MongoDBResourceRef{Name: mdbcName},
49+
},
50+
},
51+
}
52+
}
53+
54+
func newSearchReconciler(
55+
mdbc *mdbcv1.MongoDBCommunity,
56+
searches ...*searchv1.MongoDBSearch,
57+
) (*MongoDBSearchReconciler, client.Client) {
58+
builder := mock.NewEmptyFakeClientBuilder()
59+
builder.WithIndex(&searchv1.MongoDBSearch{}, search_controller.MongoDBSearchIndexFieldName, mdbcSearchIndexBuilder)
60+
61+
if mdbc != nil {
62+
builder.WithObjects(mdbc)
63+
}
64+
65+
for _, search := range searches {
66+
if search != nil {
67+
builder.WithObjects(search)
68+
}
69+
}
70+
71+
fakeClient := builder.Build()
72+
return newMongoDBSearchReconciler(fakeClient, search_controller.OperatorSearchConfig{}), fakeClient
73+
}
74+
75+
func buildExpectedMongotConfig(search *searchv1.MongoDBSearch, mdbc *mdbcv1.MongoDBCommunity) mongot.Config {
76+
return mongot.Config{CommunityPrivatePreview: mongot.CommunityPrivatePreview{
77+
MongodHostAndPort: fmt.Sprintf(
78+
"%s.%s.svc.cluster.local:%d",
79+
mdbc.ServiceName(), mdbc.Namespace,
80+
mdbc.GetMongodConfiguration().GetDBPort(),
81+
),
82+
QueryServerAddress: fmt.Sprintf("localhost:%d", search.GetMongotPort()),
83+
KeyFilePath: "/mongot/keyfile/keyfile",
84+
DataPath: "/mongot/data/config.yml",
85+
Metrics: mongot.Metrics{
86+
Enabled: true,
87+
Address: fmt.Sprintf("localhost:%d", search.GetMongotMetricsPort()),
88+
},
89+
Logging: mongot.Logging{Verbosity: "DEBUG"},
90+
}}
91+
}
92+
93+
func TestMongoDBSearchReconcile_NotFound(t *testing.T) {
94+
ctx := context.Background()
95+
reconciler, _ := newSearchReconciler(nil, nil)
96+
97+
res, err := reconciler.Reconcile(
98+
ctx,
99+
reconcile.Request{NamespacedName: types.NamespacedName{Name: "missing", Namespace: "test"}},
100+
)
101+
102+
assert.Error(t, err)
103+
assert.True(t, apiErrors.IsNotFound(err))
104+
assert.Equal(t, reconcile.Result{}, res)
105+
}
106+
107+
func TestMongoDBSearchReconcile_MissingSource(t *testing.T) {
108+
ctx := context.Background()
109+
search := newMongoDBSearch("search", mock.TestNamespace, "source")
110+
reconciler, _ := newSearchReconciler(nil, search)
111+
112+
res, err := reconciler.Reconcile(
113+
ctx,
114+
reconcile.Request{NamespacedName: types.NamespacedName{Name: search.Name, Namespace: search.Namespace}},
115+
)
116+
117+
assert.Error(t, err)
118+
assert.True(t, res.RequeueAfter > 0)
119+
}
120+
121+
func TestMongoDBSearchReconcile_Success(t *testing.T) {
122+
ctx := context.Background()
123+
search := newMongoDBSearch("search", mock.TestNamespace, "mdb")
124+
mdbc := newMongoDBCommunity("mdb", mock.TestNamespace)
125+
reconciler, c := newSearchReconciler(mdbc, search)
126+
127+
res, err := reconciler.Reconcile(
128+
ctx,
129+
reconcile.Request{NamespacedName: types.NamespacedName{Name: search.Name, Namespace: search.Namespace}},
130+
)
131+
expected, _ := workflow.OK().ReconcileResult()
132+
assert.NoError(t, err)
133+
assert.Equal(t, expected, res)
134+
135+
svc := &corev1.Service{}
136+
err = c.Get(ctx, search.SearchServiceNamespacedName(), svc)
137+
assert.NoError(t, err)
138+
139+
cm := &corev1.ConfigMap{}
140+
err = c.Get(ctx, search.MongotConfigConfigMapNamespacedName(), cm)
141+
assert.NoError(t, err)
142+
expectedConfig := buildExpectedMongotConfig(search, mdbc)
143+
configYaml, err := yaml.Marshal(expectedConfig)
144+
assert.NoError(t, err)
145+
assert.Equal(t, string(configYaml), cm.Data["config.yml"])
146+
147+
sts := &appsv1.StatefulSet{}
148+
err = c.Get(ctx, search.StatefulSetNamespacedName(), sts)
149+
assert.NoError(t, err)
150+
151+
queue := controllertest.Queue{Interface: workqueue.New()}
152+
reconciler.mdbcWatcher.Create(ctx, event.CreateEvent{Object: mdbc}, &queue)
153+
assert.Equal(t, 1, queue.Len())
154+
}
155+
156+
func checkSearchReconcileFailed(
157+
ctx context.Context,
158+
t *testing.T,
159+
reconciler *MongoDBSearchReconciler,
160+
c client.Client,
161+
search *searchv1.MongoDBSearch,
162+
expectedMsg string,
163+
) {
164+
res, err := reconciler.Reconcile(
165+
ctx,
166+
reconcile.Request{NamespacedName: types.NamespacedName{Name: search.Name, Namespace: search.Namespace}},
167+
)
168+
assert.NoError(t, err)
169+
assert.True(t, res.RequeueAfter > 0)
170+
171+
updated := &searchv1.MongoDBSearch{}
172+
assert.NoError(t, c.Get(ctx, types.NamespacedName{Name: search.Name, Namespace: search.Namespace}, updated))
173+
assert.Equal(t, status.PhaseFailed, updated.Status.Phase)
174+
assert.Contains(t, updated.Status.Message, expectedMsg)
175+
}
176+
177+
func TestMongoDBSearchReconcile_InvalidVersion(t *testing.T) {
178+
ctx := context.Background()
179+
search := newMongoDBSearch("search", mock.TestNamespace, "mdb")
180+
mdbc := newMongoDBCommunity("mdb", mock.TestNamespace)
181+
mdbc.Spec.Version = "6.0"
182+
reconciler, c := newSearchReconciler(mdbc, search)
183+
184+
checkSearchReconcileFailed(ctx, t, reconciler, c, search, "MongoDB version")
185+
}
186+
187+
func TestMongoDBSearchReconcile_TLSNotSupported(t *testing.T) {
188+
ctx := context.Background()
189+
search := newMongoDBSearch("search", mock.TestNamespace, "mdb")
190+
mdbc := newMongoDBCommunity("mdb", mock.TestNamespace)
191+
mdbc.Spec.Security.TLS.Enabled = true
192+
reconciler, c := newSearchReconciler(mdbc, search)
193+
194+
checkSearchReconcileFailed(ctx, t, reconciler, c, search, "TLS-enabled")
195+
}
196+
197+
func TestMongoDBSearchReconcile_MultipleSearchResources(t *testing.T) {
198+
ctx := context.Background()
199+
search1 := newMongoDBSearch("search1", mock.TestNamespace, "mdb")
200+
search2 := newMongoDBSearch("search2", mock.TestNamespace, "mdb")
201+
mdbc := newMongoDBCommunity("mdb", mock.TestNamespace)
202+
reconciler, c := newSearchReconciler(mdbc, search1, search2)
203+
204+
checkSearchReconcileFailed(ctx, t, reconciler, c, search1, "multiple MongoDBSearch")
205+
}

docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_replica_set_scale_up.py

Lines changed: 53 additions & 22 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,7 @@
11
from typing import List
22

33
import kubernetes
4+
import kubetester
45
import pytest
56
from kubetester.automation_config_tester import AutomationConfigTester
67
from kubetester.certs_mongodb_multi import create_multi_cluster_mongodb_tls_certs
@@ -80,18 +81,30 @@ def test_statefulsets_have_been_created_correctly(
8081
mongodb_multi: MongoDBMulti,
8182
member_cluster_clients: List[MultiClusterClient],
8283
):
83-
statefulsets = mongodb_multi.read_statefulsets(member_cluster_clients)
84-
cluster_one_client = member_cluster_clients[0]
85-
cluster_one_sts = statefulsets[cluster_one_client.cluster_name]
86-
assert cluster_one_sts.status.ready_replicas == 1
84+
# Even though we already verified, in previous test, that the MongoDBMultiCluster resource's phase is running (that would mean all STSs are ready);
85+
# checking the expected number of replicas for STS makes the test flaky because of an issue mentioned in detail in this ticket https://jira.mongodb.org/browse/CLOUDP-329231.
86+
# That's why we are waiting for STS to have expected number of replicas. This change can be reverted when we make the proper fix as
87+
# mentioned in the above ticket.
88+
def fn():
89+
cluster_one_client = member_cluster_clients[0]
90+
cluster_one_statefulsets = mongodb_multi.read_statefulsets([cluster_one_client])
91+
return cluster_one_statefulsets[cluster_one_client.cluster_name].status.ready_replicas == 1
8792

88-
cluster_two_client = member_cluster_clients[1]
89-
cluster_two_sts = statefulsets[cluster_two_client.cluster_name]
90-
assert cluster_two_sts.status.ready_replicas == 1
93+
kubetester.wait_until(fn, timeout=60, message="Verifying sts has correct number of replicas in cluster one")
9194

92-
cluster_three_client = member_cluster_clients[2]
93-
cluster_three_sts = statefulsets[cluster_three_client.cluster_name]
94-
assert cluster_three_sts.status.ready_replicas == 1
95+
def fn():
96+
cluster_two_client = member_cluster_clients[1]
97+
cluster_two_statefulsets = mongodb_multi.read_statefulsets([cluster_two_client])
98+
return cluster_two_statefulsets[cluster_two_client.cluster_name].status.ready_replicas == 1
99+
100+
kubetester.wait_until(fn, timeout=60, message="Verifying sts has correct number of replicas in cluster two")
101+
102+
def fn():
103+
cluster_three_client = member_cluster_clients[2]
104+
cluster_three_statefulsets = mongodb_multi.read_statefulsets([cluster_three_client])
105+
return cluster_three_statefulsets[cluster_three_client.cluster_name].status.ready_replicas == 1
106+
107+
kubetester.wait_until(fn, timeout=60, message="Verifying sts has correct number of replicas in cluster three")
95108

96109

97110
@pytest.mark.e2e_multi_cluster_replica_set_scale_up
@@ -116,18 +129,36 @@ def test_statefulsets_have_been_scaled_up_correctly(
116129
mongodb_multi: MongoDBMulti,
117130
member_cluster_clients: List[MultiClusterClient],
118131
):
119-
statefulsets = mongodb_multi.read_statefulsets(member_cluster_clients)
120-
cluster_one_client = member_cluster_clients[0]
121-
cluster_one_sts = statefulsets[cluster_one_client.cluster_name]
122-
assert cluster_one_sts.status.ready_replicas == 2
123-
124-
cluster_two_client = member_cluster_clients[1]
125-
cluster_two_sts = statefulsets[cluster_two_client.cluster_name]
126-
assert cluster_two_sts.status.ready_replicas == 1
127-
128-
cluster_three_client = member_cluster_clients[2]
129-
cluster_three_sts = statefulsets[cluster_three_client.cluster_name]
130-
assert cluster_three_sts.status.ready_replicas == 2
132+
# Even though we already verified, in previous test, that the MongoDBMultiCluster resource's phase is running (that would mean all STSs are ready);
133+
# checking the expected number of replicas for STS makes the test flaky because of an issue mentioned in detail in this ticket https://jira.mongodb.org/browse/CLOUDP-329231.
134+
# That's why we are waiting for STS to have expected number of replicas. This change can be reverted when we make the proper fix as
135+
# mentioned in the above ticket.
136+
def fn():
137+
cluster_one_client = member_cluster_clients[0]
138+
cluster_one_statefulsets = mongodb_multi.read_statefulsets([cluster_one_client])
139+
return cluster_one_statefulsets[cluster_one_client.cluster_name].status.ready_replicas == 2
140+
141+
kubetester.wait_until(
142+
fn, timeout=60, message="Verifying sts has correct number of replicas after scale up in cluster one"
143+
)
144+
145+
def fn():
146+
cluster_two_client = member_cluster_clients[1]
147+
cluster_two_statefulsets = mongodb_multi.read_statefulsets([cluster_two_client])
148+
return cluster_two_statefulsets[cluster_two_client.cluster_name].status.ready_replicas == 1
149+
150+
kubetester.wait_until(
151+
fn, timeout=60, message="Verifying sts has correct number of replicas after scale up in cluster two"
152+
)
153+
154+
def fn():
155+
cluster_three_client = member_cluster_clients[2]
156+
cluster_three_statefulsets = mongodb_multi.read_statefulsets([cluster_three_client])
157+
return cluster_three_statefulsets[cluster_three_client.cluster_name].status.ready_replicas == 2
158+
159+
kubetester.wait_until(
160+
fn, timeout=60, message="Verifying sts has correct number of replicas after scale up in cluster three"
161+
)
131162

132163

133164
@pytest.mark.e2e_multi_cluster_replica_set_scale_up

0 commit comments

Comments
 (0)