Skip to content

Commit

Permalink
Merge branch 'master' into remove-legacy-repo-backend
Browse files Browse the repository at this point in the history
  • Loading branch information
crenshaw-dev authored Jan 14, 2025
2 parents c326e47 + c3600d2 commit bd4a330
Show file tree
Hide file tree
Showing 77 changed files with 10,988 additions and 3,406 deletions.
14 changes: 4 additions & 10 deletions applicationset/controllers/applicationset_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -495,7 +495,7 @@ func (r *ApplicationSetReconciler) validateGeneratedApplications(ctx context.Con
return nil, err
}

if err := argoutil.ValidateDestination(ctx, &app.Spec.Destination, r.ArgoDB); err != nil {
if _, err = argoutil.GetDestinationCluster(ctx, app.Spec.Destination, r.ArgoDB); err != nil {
errorsByIndex[i] = fmt.Errorf("application destination spec is invalid: %s", err.Error())
continue
}
Expand Down Expand Up @@ -747,9 +747,6 @@ func (r *ApplicationSetReconciler) getCurrentApplications(ctx context.Context, a
// deleteInCluster will delete Applications that are currently on the cluster, but not in appList.
// The function must be called after all generators had been called and generated applications
func (r *ApplicationSetReconciler) deleteInCluster(ctx context.Context, logCtx *log.Entry, applicationSet argov1alpha1.ApplicationSet, desiredApplications []argov1alpha1.Application) error {
// settingsMgr := settings.NewSettingsManager(context.TODO(), r.KubeClientset, applicationSet.Namespace)
// argoDB := db.NewDB(applicationSet.Namespace, settingsMgr, r.KubeClientset)
// clusterList, err := argoDB.ListClusters(ctx)
clusterList, err := utils.ListClusters(ctx, r.KubeClientset, r.ArgoCDNamespace)
if err != nil {
return fmt.Errorf("error listing clusters: %w", err)
Expand Down Expand Up @@ -809,21 +806,18 @@ func (r *ApplicationSetReconciler) removeFinalizerOnInvalidDestination(ctx conte
var validDestination bool

// Detect if the destination is invalid (name doesn't correspond to a matching cluster)
if err := argoutil.ValidateDestination(ctx, &app.Spec.Destination, r.ArgoDB); err != nil {
if destCluster, err := argoutil.GetDestinationCluster(ctx, app.Spec.Destination, r.ArgoDB); err != nil {
appLog.Warnf("The destination cluster for %s couldn't be found: %v", app.Name, err)
validDestination = false
} else {
// Detect if the destination's server field does not match an existing cluster

matchingCluster := false
for _, cluster := range clusterList.Items {
// Server fields must match. Note that ValidateDestination ensures that the server field is set, if applicable.
if app.Spec.Destination.Server != cluster.Server {
if destCluster.Server != cluster.Server {
continue
}

// The name must match, if it is not empty
if app.Spec.Destination.Name != "" && cluster.Name != app.Spec.Destination.Name {
if destCluster.Name != cluster.Name {
continue
}

Expand Down
7 changes: 2 additions & 5 deletions applicationset/controllers/applicationset_controller_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -1175,9 +1175,6 @@ func TestRemoveFinalizerOnInvalidDestination_FinalizerTypes(t *testing.T) {
Metrics: metrics,
ArgoDB: argodb,
}
// settingsMgr := settings.NewSettingsManager(context.TODO(), kubeclientset, "namespace")
// argoDB := db.NewDB("namespace", settingsMgr, r.KubeClientset)
// clusterList, err := argoDB.ListClusters(context.Background())
clusterList, err := utils.ListClusters(context.Background(), kubeclientset, "namespace")
require.NoError(t, err)

Expand Down Expand Up @@ -2036,7 +2033,7 @@ func TestValidateGeneratedApplications(t *testing.T) {
},
},
},
validationErrors: map[int]error{0: errors.New("application destination spec is invalid: unable to find destination server: there are no clusters with this name: nonexistent-cluster")},
validationErrors: map[int]error{0: errors.New("application destination spec is invalid: there are no clusters with this name: nonexistent-cluster")},
},
} {
t.Run(cc.name, func(t *testing.T) {
Expand Down Expand Up @@ -2440,7 +2437,7 @@ func applicationsUpdateSyncPolicyTest(t *testing.T, applicationsSyncPolicy v1alp

// Verify that on validation error, no error is returned, but the object is requeued
resCreate, err := r.Reconcile(context.Background(), req)
require.NoError(t, err)
require.NoErrorf(t, err, "Reconcile failed with error: %v", err)
assert.Equal(t, time.Duration(0), resCreate.RequeueAfter)

var app v1alpha1.Application
Expand Down
13 changes: 9 additions & 4 deletions cmd/argocd/commands/admin/app.go
Original file line number Diff line number Diff line change
Expand Up @@ -417,14 +417,19 @@ func reconcileApplications(
var items []appReconcileResult
prevServer := ""
for _, app := range appsList.Items {
if prevServer != app.Spec.Destination.Server {
destCluster, err := argo.GetDestinationCluster(ctx, app.Spec.Destination, argoDB)
if err != nil {
return nil, fmt.Errorf("error getting destination cluster: %w", err)
}

if prevServer != destCluster.Server {
if prevServer != "" {
if clusterCache, err := stateCache.GetClusterCache(prevServer); err == nil {
if clusterCache, err := stateCache.GetClusterCache(destCluster); err == nil {
clusterCache.Invalidate()
}
}
printLine("Reconciling apps of %s", app.Spec.Destination.Server)
prevServer = app.Spec.Destination.Server
printLine("Reconciling apps of %s", destCluster.Server)
prevServer = destCluster.Server
}
printLine(app.Name)

Expand Down
2 changes: 1 addition & 1 deletion cmd/argocd/commands/admin/app_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -101,7 +101,7 @@ func TestGetReconcileResults_Refresh(t *testing.T) {
}, nil)
repoServerClientset := mocks.Clientset{RepoServerServiceClient: &repoServerClient}
liveStateCache := cachemocks.LiveStateCache{}
liveStateCache.On("GetManagedLiveObjs", mock.Anything, mock.Anything).Return(map[kube.ResourceKey]*unstructured.Unstructured{
liveStateCache.On("GetManagedLiveObjs", mock.Anything, mock.Anything, mock.Anything).Return(map[kube.ResourceKey]*unstructured.Unstructured{
kube.GetResourceKey(deployment): deployment,
}, nil)
liveStateCache.On("GetVersionsInfo", mock.Anything).Return("v1.2.3", nil, nil)
Expand Down
29 changes: 12 additions & 17 deletions cmd/argocd/commands/admin/cluster.go
Original file line number Diff line number Diff line change
Expand Up @@ -78,7 +78,7 @@ type ClusterWithInfo struct {
Namespaces []string
}

func loadClusters(ctx context.Context, kubeClient *kubernetes.Clientset, appClient *versioned.Clientset, replicas int, shardingAlgorithm string, namespace string, portForwardRedis bool, cacheSrc func() (*appstatecache.Cache, error), shard int, redisName string, redisHaProxyName string, redisCompressionStr string) ([]ClusterWithInfo, error) {
func loadClusters(ctx context.Context, kubeClient kubernetes.Interface, appClient versioned.Interface, replicas int, shardingAlgorithm string, namespace string, portForwardRedis bool, cacheSrc func() (*appstatecache.Cache, error), shard int, redisName string, redisHaProxyName string, redisCompressionStr string) ([]ClusterWithInfo, error) {
settingsMgr := settings.NewSettingsManager(ctx, kubeClient, namespace)

argoDB := db.NewDB(namespace, settingsMgr, kubeClient)
Expand Down Expand Up @@ -123,13 +123,6 @@ func loadClusters(ctx context.Context, kubeClient *kubernetes.Clientset, appClie
}

apps := appItems.Items
for i, app := range apps {
err := argo.ValidateDestination(ctx, &app.Spec.Destination, argoDB)
if err != nil {
return nil, err
}
apps[i] = app
}
clusters := make([]ClusterWithInfo, len(clustersList.Items))

batchSize := 10
Expand All @@ -154,7 +147,11 @@ func loadClusters(ctx context.Context, kubeClient *kubernetes.Clientset, appClie
}
nsSet := map[string]bool{}
for _, app := range apps {
if app.Spec.Destination.Server == cluster.Server {
destCluster, err := argo.GetDestinationCluster(ctx, app.Spec.Destination, argoDB)
if err != nil {
return fmt.Errorf("error validating application destination: %w", err)
}
if destCluster.Server == cluster.Server {
nsSet[app.Spec.Destination.Namespace] = true
}
}
Expand Down Expand Up @@ -281,18 +278,16 @@ func runClusterNamespacesCommand(ctx context.Context, clientConfig clientcmd.Cli
return fmt.Errorf("error listing application: %w", err)
}
apps := appItems.Items
for i, app := range apps {
if err := argo.ValidateDestination(ctx, &app.Spec.Destination, argoDB); err != nil {
return fmt.Errorf("error validating application destination: %w", err)
}
apps[i] = app
}

clusters := map[string][]string{}
for _, cluster := range clustersList.Items {
nsSet := map[string]bool{}
for _, app := range apps {
if app.Spec.Destination.Server != cluster.Server {
destCluster, err := argo.GetDestinationCluster(ctx, app.Spec.Destination, argoDB)
if err != nil {
return fmt.Errorf("error validating application destination: %w", err)
}

if destCluster.Server != cluster.Server {
continue
}
// Use namespaces of actually deployed resources, since some application use dummy target namespace
Expand Down
84 changes: 84 additions & 0 deletions cmd/argocd/commands/admin/cluster_test.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,84 @@
package admin

import (
"context"
"testing"
"time"

"github.com/argoproj/argo-cd/v3/pkg/apis/application/v1alpha1"
fakeapps "github.com/argoproj/argo-cd/v3/pkg/client/clientset/versioned/fake"
cacheutil "github.com/argoproj/argo-cd/v3/util/cache"
"github.com/argoproj/argo-cd/v3/util/cache/appstate"

"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes/fake"
"k8s.io/utils/ptr"
)

func Test_loadClusters(t *testing.T) {
argoCDCM := &corev1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
Name: "argocd-cm",
Namespace: "argocd",
Labels: map[string]string{
"app.kubernetes.io/part-of": "argocd",
},
},
Data: map[string]string{},
}
argoCDSecret := &corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: "argocd-secret",
Namespace: "argocd",
},
Data: map[string][]byte{
"server.secretkey": []byte("test"),
},
}
app := &v1alpha1.Application{
ObjectMeta: metav1.ObjectMeta{
Name: "test",
Namespace: "argocd",
},
Spec: v1alpha1.ApplicationSpec{
Project: "default",
Destination: v1alpha1.ApplicationDestination{
Server: "https://kubernetes.default.svc",
Namespace: "test",
},
},
}
ctx := context.Background()
kubeClient := fake.NewClientset(argoCDCM, argoCDSecret)
appClient := fakeapps.NewSimpleClientset(app)
cacheSrc := func() (*appstate.Cache, error) {
return appstate.NewCache(cacheutil.NewCache(cacheutil.NewInMemoryCache(time.Minute)), time.Minute), nil
}
clusters, err := loadClusters(ctx, kubeClient, appClient, 3, "", "argocd", false, cacheSrc, 0, "", "", "")
require.NoError(t, err)
for i := range clusters {
// This changes, nil it to avoid testing it.
//nolint:staticcheck
clusters[i].ConnectionState.ModifiedAt = nil
}

expected := []ClusterWithInfo{{
Cluster: v1alpha1.Cluster{
ID: "",
Server: "https://kubernetes.default.svc",
Name: "in-cluster",
//nolint:staticcheck
ConnectionState: v1alpha1.ConnectionState{
Status: "Successful",
},
//nolint:staticcheck
ServerVersion: ".",
Shard: ptr.To(int64(0)),
},
Namespaces: []string{"test"},
}}
assert.Equal(t, expected, clusters)
}
3 changes: 2 additions & 1 deletion cmd/argocd/commands/cluster.go
Original file line number Diff line number Diff line change
Expand Up @@ -166,13 +166,14 @@ func NewClusterAddCommand(clientOpts *argocdclient.ClientOptions, pathOpts *clie
if clusterOpts.InClusterEndpoint() {
clst.Server = argoappv1.KubernetesInternalAPIServerAddr
} else if clusterOpts.ClusterEndpoint == string(cmdutil.KubePublicEndpoint) {
endpoint, err := cmdutil.GetKubePublicEndpoint(clientset)
endpoint, caData, err := cmdutil.GetKubePublicEndpoint(clientset)
if err != nil || len(endpoint) == 0 {
log.Warnf("Failed to find the cluster endpoint from kube-public data: %v", err)
log.Infof("Falling back to the endpoint '%s' as listed in the kubeconfig context", clst.Server)
endpoint = clst.Server
}
clst.Server = endpoint
clst.Config.TLSClientConfig.CAData = caData
}

if clusterOpts.Shard >= 0 {
Expand Down
16 changes: 9 additions & 7 deletions cmd/util/cluster.go
Original file line number Diff line number Diff line change
Expand Up @@ -123,28 +123,30 @@ func NewCluster(name string, namespaces []string, clusterResources bool, conf *r
return &clst
}

// GetKubePublicEndpoint returns the kubernetes apiserver endpoint as published
// GetKubePublicEndpoint returns the kubernetes apiserver endpoint and certificate authority data as published
// in the kube-public.
func GetKubePublicEndpoint(client kubernetes.Interface) (string, error) {
func GetKubePublicEndpoint(client kubernetes.Interface) (string, []byte, error) {
clusterInfo, err := client.CoreV1().ConfigMaps("kube-public").Get(context.TODO(), "cluster-info", metav1.GetOptions{})
if err != nil {
return "", err
return "", nil, err
}
kubeconfig, ok := clusterInfo.Data["kubeconfig"]
if !ok {
return "", stderrors.New("cluster-info does not contain a public kubeconfig")
return "", nil, stderrors.New("cluster-info does not contain a public kubeconfig")
}
// Parse Kubeconfig and get server address
config := &clientcmdapiv1.Config{}
err = yaml.Unmarshal([]byte(kubeconfig), config)
if err != nil {
return "", fmt.Errorf("failed to parse cluster-info kubeconfig: %w", err)
return "", nil, fmt.Errorf("failed to parse cluster-info kubeconfig: %w", err)
}
if len(config.Clusters) == 0 {
return "", stderrors.New("cluster-info kubeconfig does not have any clusters")
return "", nil, stderrors.New("cluster-info kubeconfig does not have any clusters")
}

return config.Clusters[0].Cluster.Server, nil
endpoint := config.Clusters[0].Cluster.Server
certificateAuthorityData := config.Clusters[0].Cluster.CertificateAuthorityData
return endpoint, certificateAuthorityData, nil
}

type ClusterOptions struct {
Expand Down
28 changes: 23 additions & 5 deletions cmd/util/cluster_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -96,8 +96,23 @@ func TestGetKubePublicEndpoint(t *testing.T) {
name string
clusterInfo *corev1.ConfigMap
expectedEndpoint string
expectedCAData []byte
expectError bool
}{
{
name: "has public endpoint and certificate authority data",
clusterInfo: &corev1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
Namespace: "kube-public",
Name: "cluster-info",
},
Data: map[string]string{
"kubeconfig": kubeconfigFixture("https://test-cluster:6443", []byte("test-ca-data")),
},
},
expectedEndpoint: "https://test-cluster:6443",
expectedCAData: []byte("test-ca-data"),
},
{
name: "has public endpoint",
clusterInfo: &corev1.ConfigMap{
Expand All @@ -106,10 +121,11 @@ func TestGetKubePublicEndpoint(t *testing.T) {
Name: "cluster-info",
},
Data: map[string]string{
"kubeconfig": kubeconfigFixture("https://test-cluster:6443"),
"kubeconfig": kubeconfigFixture("https://test-cluster:6443", nil),
},
},
expectedEndpoint: "https://test-cluster:6443",
expectedCAData: nil,
},
{
name: "no cluster-info",
Expand All @@ -136,7 +152,7 @@ func TestGetKubePublicEndpoint(t *testing.T) {
Name: "cluster-info",
},
Data: map[string]string{
"kubeconfig": kubeconfigFixture(""),
"kubeconfig": kubeconfigFixture("", nil),
},
},
expectError: true,
Expand All @@ -163,25 +179,27 @@ func TestGetKubePublicEndpoint(t *testing.T) {
objects = append(objects, tc.clusterInfo)
}
clientset := fake.NewClientset(objects...)
endpoint, err := GetKubePublicEndpoint(clientset)
endpoint, caData, err := GetKubePublicEndpoint(clientset)
if tc.expectError {
require.Error(t, err)
} else {
require.NoError(t, err)
}
require.Equalf(t, tc.expectedEndpoint, endpoint, "expected endpoint %s, got %s", tc.expectedEndpoint, endpoint)
require.Equalf(t, tc.expectedCAData, caData, "expected caData %s, got %s", tc.expectedCAData, caData)
})
}
}

func kubeconfigFixture(endpoint string) string {
func kubeconfigFixture(endpoint string, certificateAuthorityData []byte) string {
kubeconfig := &clientcmdapiv1.Config{}
if len(endpoint) > 0 {
kubeconfig.Clusters = []clientcmdapiv1.NamedCluster{
{
Name: "test-kube",
Cluster: clientcmdapiv1.Cluster{
Server: endpoint,
Server: endpoint,
CertificateAuthorityData: certificateAuthorityData,
},
},
}
Expand Down
Loading

0 comments on commit bd4a330

Please sign in to comment.