diff --git a/cli/cmd/authz.go b/cli/cmd/authz.go index 81b02088c1381..560818cfe534f 100644 --- a/cli/cmd/authz.go +++ b/cli/cmd/authz.go @@ -1,36 +1,15 @@ package cmd import ( - "context" "fmt" "os" - "strings" "github.com/linkerd/linkerd2/cli/table" pkgcmd "github.com/linkerd/linkerd2/pkg/cmd" "github.com/linkerd/linkerd2/pkg/k8s" "github.com/spf13/cobra" - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/client-go/kubernetes" ) -var sazGVR = schema.GroupVersionResource{ - Group: "policy.linkerd.io", - Version: "v1alpha1", - Resource: "serverauthorizations", -} - -var serverGVR = schema.GroupVersionResource{ - Group: "policy.linkerd.io", - Version: "v1alpha1", - Resource: "servers", -} - -type id struct{ name, namespace string } - func newCmdAuthz() *cobra.Command { var namespace string @@ -78,58 +57,16 @@ func newCmdAuthz() *cobra.Command { resource = args[0] + "/" + args[1] } - pods, err := getPodsForResourceOrKind(cmd.Context(), k8sAPI, namespace, resource) - if err != nil { - return err - } - podSet := make(map[id]struct{}) - for _, pod := range pods { - podSet[id{pod.Name, pod.Namespace}] = struct{}{} - } - rows := make([]table.Row, 0) - sazs, err := k8sAPI.DynamicClient.Resource(sazGVR).Namespace(namespace).List(cmd.Context(), metav1.ListOptions{}) + authzs, err := k8s.ServerAuthorizationsForResource(cmd.Context(), k8sAPI, namespace, resource) if err != nil { fmt.Fprintf(os.Stderr, "Failed to get serverauthorization resources: %s\n", err) os.Exit(1) } - for _, saz := range sazs.Items { - var servers []unstructured.Unstructured - - if name, found, _ := unstructured.NestedString(saz.UnstructuredContent(), "spec", "server", "name"); found { - server, err := k8sAPI.DynamicClient.Resource(serverGVR).Namespace(saz.GetNamespace()).Get(cmd.Context(), name, metav1.GetOptions{}) - if err != nil { - fmt.Fprintf(os.Stderr, "Failed to get server %s: %s\n", name, err) - os.Exit(1) - } - servers = []unstructured.Unstructured{*server} - } else if sel, found, _ := unstructured.NestedMap(saz.UnstructuredContent(), "spec", "server", "selector"); found { - selector := selector(sel) - serverList, err := k8sAPI.DynamicClient.Resource(serverGVR).Namespace(saz.GetNamespace()).List(cmd.Context(), metav1.ListOptions{LabelSelector: metav1.FormatLabelSelector(&selector)}) - if err != nil { - fmt.Fprintf(os.Stderr, "Failed to get servers: %s\n", err) - os.Exit(1) - } - servers = serverList.Items - } - - for _, server := range servers { - if sel, found, _ := unstructured.NestedMap(server.UnstructuredContent(), "spec", "podSelector"); found { - selector := selector(sel) - selectedPods, err := k8sAPI.CoreV1().Pods(server.GetNamespace()).List(cmd.Context(), metav1.ListOptions{LabelSelector: metav1.FormatLabelSelector(&selector)}) - if err != nil { - fmt.Fprintf(os.Stderr, "Failed to get pods: %s\n", err) - os.Exit(1) - } - if serverIncludesPod(server, selectedPods.Items, podSet) { - rows = append(rows, table.Row{server.GetName(), saz.GetName()}) - } - } - - } - + for _, authz := range authzs { + rows = append(rows, table.Row{authz.Server, authz.ServerAuthorization}) } cols := []table.Column{ @@ -150,202 +87,3 @@ func newCmdAuthz() *cobra.Command { kubeconfigPath, impersonate, impersonateGroup, kubeContext) return cmd } - -func selector(selector map[string]interface{}) metav1.LabelSelector { - if labels, found, err := unstructured.NestedStringMap(selector, "matchLabels"); found && err == nil { - return metav1.LabelSelector{MatchLabels: labels} - } - if expressions, found, err := unstructured.NestedSlice(selector, "matchExpressions"); found && err == nil { - exprs := make([]metav1.LabelSelectorRequirement, len(expressions)) - for i, expr := range expressions { - exprs[i] = matchExpression(expr) - } - return metav1.LabelSelector{MatchExpressions: exprs} - } - return metav1.LabelSelector{} -} - -func matchExpression(expr interface{}) metav1.LabelSelectorRequirement { - if exprMap, ok := expr.(map[string]interface{}); ok { - if key, found, err := unstructured.NestedString(exprMap, "key"); found && err == nil { - if op, found, err := unstructured.NestedString(exprMap, "operator"); found && err == nil { - if values, found, err := unstructured.NestedStringSlice(exprMap, "values"); found && err == nil { - return metav1.LabelSelectorRequirement{ - Key: key, - Operator: metav1.LabelSelectorOperator(op), - Values: values, - } - } - } - } - } - return metav1.LabelSelectorRequirement{} -} - -func serverIncludesPod(server unstructured.Unstructured, serverPods []corev1.Pod, podSet map[id]struct{}) bool { - for _, pod := range serverPods { - if _, ok := podSet[id{pod.Name, pod.Namespace}]; ok { - if port, found, err := unstructured.NestedInt64(server.UnstructuredContent(), "spec", "port"); found && err == nil { - for _, container := range pod.Spec.Containers { - for _, p := range container.Ports { - if int32(port) == p.ContainerPort { - return true - } - } - } - } - if port, found, err := unstructured.NestedString(server.UnstructuredContent(), "spec", "port"); found && err == nil { - for _, container := range pod.Spec.Containers { - for _, p := range container.Ports { - if port == p.Name { - return true - } - } - } - } - } - } - return false -} - -// getPodsForResourceOrKind is similar to getPodsForResource, but also supports -// querying for all resources of a given kind (i.e. when resource name is unspecified). -func getPodsForResourceOrKind(ctx context.Context, k8sAPI kubernetes.Interface, namespace string, resource string) ([]corev1.Pod, error) { - - elems := strings.Split(resource, "/") - if len(elems) > 2 { - return nil, fmt.Errorf("invalid resource: %s", resource) - } - if len(elems) == 2 { - pods, err := getPodsFor(ctx, k8sAPI, namespace, resource) - if err != nil { - fmt.Fprintf(os.Stderr, "failed to get pods: %s", err) - os.Exit(1) - } - return pods, nil - } - pods := []corev1.Pod{} - - typ, err := k8s.CanonicalResourceNameFromFriendlyName(elems[0]) - if err != nil { - return nil, fmt.Errorf("invalid resource: %s", resource) - } - switch typ { - case k8s.Pod: - ps, err := k8sAPI.CoreV1().Pods(namespace).List(ctx, metav1.ListOptions{}) - if err != nil { - fmt.Fprintf(os.Stderr, "failed to get pods: %s", err) - os.Exit(1) - } - pods = append(pods, ps.Items...) - - case k8s.CronJob: - jobs, err := k8sAPI.BatchV1().CronJobs(namespace).List(ctx, metav1.ListOptions{}) - if err != nil { - fmt.Fprintf(os.Stderr, "failed to get cronjobs: %s", err) - os.Exit(1) - } - for _, job := range jobs.Items { - ps, err := getPodsFor(ctx, k8sAPI, namespace, fmt.Sprintf("%s/%s", k8s.CronJob, job.Name)) - if err != nil { - fmt.Fprintf(os.Stderr, "failed to get pods: %s", err) - os.Exit(1) - } - pods = append(pods, ps...) - } - - case k8s.DaemonSet: - dss, err := k8sAPI.AppsV1().DaemonSets(namespace).List(ctx, metav1.ListOptions{}) - if err != nil { - fmt.Fprintf(os.Stderr, "failed to get demonsets: %s", err) - os.Exit(1) - } - for _, ds := range dss.Items { - ps, err := getPodsFor(ctx, k8sAPI, namespace, fmt.Sprintf("%s/%s", k8s.DaemonSet, ds.Name)) - if err != nil { - fmt.Fprintf(os.Stderr, "failed to get pods: %s", err) - os.Exit(1) - } - pods = append(pods, ps...) - } - - case k8s.Deployment: - deploys, err := k8sAPI.AppsV1().Deployments(namespace).List(ctx, metav1.ListOptions{}) - if err != nil { - fmt.Fprintf(os.Stderr, "failed to get deployments: %s", err) - os.Exit(1) - } - for _, deploy := range deploys.Items { - ps, err := getPodsFor(ctx, k8sAPI, namespace, fmt.Sprintf("%s/%s", k8s.Deployment, deploy.Name)) - if err != nil { - fmt.Fprintf(os.Stderr, "failed to get pods: %s", err) - os.Exit(1) - } - pods = append(pods, ps...) - } - - case k8s.Job: - jobs, err := k8sAPI.BatchV1().Jobs(namespace).List(ctx, metav1.ListOptions{}) - if err != nil { - fmt.Fprintf(os.Stderr, "failed to get jobs: %s", err) - os.Exit(1) - } - for _, job := range jobs.Items { - ps, err := getPodsFor(ctx, k8sAPI, namespace, fmt.Sprintf("%s/%s", k8s.Job, job.Name)) - if err != nil { - fmt.Fprintf(os.Stderr, "failed to get pods: %s", err) - os.Exit(1) - } - pods = append(pods, ps...) - } - - case k8s.ReplicaSet: - rss, err := k8sAPI.AppsV1().ReplicaSets(namespace).List(ctx, metav1.ListOptions{}) - if err != nil { - fmt.Fprintf(os.Stderr, "failed to get replicasets: %s", err) - os.Exit(1) - } - for _, rs := range rss.Items { - ps, err := getPodsFor(ctx, k8sAPI, namespace, fmt.Sprintf("%s/%s", k8s.ReplicaSet, rs.Name)) - if err != nil { - fmt.Fprintf(os.Stderr, "failed to get pods: %s", err) - os.Exit(1) - } - pods = append(pods, ps...) - } - - case k8s.ReplicationController: - rcs, err := k8sAPI.CoreV1().ReplicationControllers(namespace).List(ctx, metav1.ListOptions{}) - if err != nil { - fmt.Fprintf(os.Stderr, "failed to get replicationcontrollers: %s", err) - os.Exit(1) - } - for _, rc := range rcs.Items { - ps, err := getPodsFor(ctx, k8sAPI, namespace, fmt.Sprintf("%s/%s", k8s.ReplicationController, rc.Name)) - if err != nil { - fmt.Fprintf(os.Stderr, "failed to get pods: %s", err) - os.Exit(1) - } - pods = append(pods, ps...) - } - - case k8s.StatefulSet: - sss, err := k8sAPI.AppsV1().StatefulSets(namespace).List(ctx, metav1.ListOptions{}) - if err != nil { - fmt.Fprintf(os.Stderr, "failed to get statefulsets: %s", err) - os.Exit(1) - } - for _, ss := range sss.Items { - ps, err := getPodsFor(ctx, k8sAPI, namespace, fmt.Sprintf("%s/%s", k8s.StatefulSet, ss.Name)) - if err != nil { - fmt.Fprintf(os.Stderr, "failed to get pods: %s", err) - os.Exit(1) - } - pods = append(pods, ps...) - } - - default: - return nil, fmt.Errorf("unsupported resource type: %s", typ) - } - return pods, nil -} diff --git a/cli/cmd/metrics.go b/cli/cmd/metrics.go index 91b8ce3ca86de..a5c265ae76c03 100644 --- a/cli/cmd/metrics.go +++ b/cli/cmd/metrics.go @@ -2,20 +2,12 @@ package cmd import ( "bytes" - "context" - "errors" "fmt" - "strings" "time" pkgcmd "github.com/linkerd/linkerd2/pkg/cmd" "github.com/linkerd/linkerd2/pkg/k8s" "github.com/spf13/cobra" - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/apimachinery/pkg/types" - "k8s.io/client-go/kubernetes" ) type metricsOptions struct { @@ -83,7 +75,7 @@ func newCmdMetrics() *cobra.Command { return err } - pods, err := getPodsFor(cmd.Context(), k8sAPI, options.namespace, args[0]) + pods, err := k8s.GetPodsFor(cmd.Context(), k8sAPI, options.namespace, args[0]) if err != nil { return err } @@ -113,163 +105,3 @@ func newCmdMetrics() *cobra.Command { return cmd } - -// getPodsFor takes a resource string, queries the Kubernetes API, and returns a -// list of pods belonging to that resource. -// This could move into `pkg/k8s` if becomes more generally useful. -func getPodsFor(ctx context.Context, clientset kubernetes.Interface, namespace string, resource string) ([]corev1.Pod, error) { - elems := strings.Split(resource, "/") - - if len(elems) == 1 { - return nil, errors.New("no resource name provided") - } - - if len(elems) != 2 { - return nil, fmt.Errorf("invalid resource string: %s", resource) - } - - typ, err := k8s.CanonicalResourceNameFromFriendlyName(elems[0]) - if err != nil { - return nil, err - } - name := elems[1] - - // special case if a single pod was specified - if typ == k8s.Pod { - pod, err := clientset.CoreV1().Pods(namespace).Get(ctx, name, metav1.GetOptions{}) - if err != nil { - return nil, err - } - return []corev1.Pod{*pod}, nil - } - - var matchLabels map[string]string - var ownerUID types.UID - switch typ { - case k8s.CronJob: - jobs, err := clientset.BatchV1().Jobs(namespace).List(ctx, metav1.ListOptions{}) - if err != nil { - return nil, err - } - - var pods []corev1.Pod - for _, job := range jobs.Items { - if isOwner(job.GetUID(), job.GetOwnerReferences()) { - jobPods, err := getPodsFor(ctx, clientset, namespace, fmt.Sprintf("%s/%s", k8s.Job, job.GetName())) - if err != nil { - return nil, err - } - pods = append(pods, jobPods...) - } - } - return pods, nil - - case k8s.DaemonSet: - ds, err := clientset.AppsV1().DaemonSets(namespace).Get(ctx, name, metav1.GetOptions{}) - if err != nil { - return nil, err - } - matchLabels = ds.Spec.Selector.MatchLabels - ownerUID = ds.GetUID() - - case k8s.Deployment: - deployment, err := clientset.AppsV1().Deployments(namespace).Get(ctx, name, metav1.GetOptions{}) - if err != nil { - return nil, err - } - matchLabels = deployment.Spec.Selector.MatchLabels - ownerUID = deployment.GetUID() - - replicaSets, err := clientset.AppsV1().ReplicaSets(namespace).List( - ctx, - metav1.ListOptions{ - LabelSelector: labels.Set(matchLabels).AsSelector().String(), - }, - ) - if err != nil { - return nil, err - } - - var pods []corev1.Pod - for _, rs := range replicaSets.Items { - if isOwner(ownerUID, rs.GetOwnerReferences()) { - podsRS, err := getPodsFor(ctx, clientset, namespace, fmt.Sprintf("%s/%s", k8s.ReplicaSet, rs.GetName())) - if err != nil { - return nil, err - } - pods = append(pods, podsRS...) - } - } - return pods, nil - - case k8s.Job: - job, err := clientset.BatchV1().Jobs(namespace).Get(ctx, name, metav1.GetOptions{}) - if err != nil { - return nil, err - } - matchLabels = job.Spec.Selector.MatchLabels - ownerUID = job.GetUID() - - case k8s.ReplicaSet: - rs, err := clientset.AppsV1().ReplicaSets(namespace).Get(ctx, name, metav1.GetOptions{}) - if err != nil { - return nil, err - } - matchLabels = rs.Spec.Selector.MatchLabels - ownerUID = rs.GetUID() - - case k8s.ReplicationController: - rc, err := clientset.CoreV1().ReplicationControllers(namespace).Get(ctx, name, metav1.GetOptions{}) - if err != nil { - return nil, err - } - matchLabels = rc.Spec.Selector - ownerUID = rc.GetUID() - - case k8s.StatefulSet: - ss, err := clientset.AppsV1().StatefulSets(namespace).Get(ctx, name, metav1.GetOptions{}) - if err != nil { - return nil, err - } - matchLabels = ss.Spec.Selector.MatchLabels - ownerUID = ss.GetUID() - - default: - return nil, fmt.Errorf("unsupported resource type: %s", name) - } - - podList, err := clientset. - CoreV1(). - Pods(namespace). - List( - ctx, - metav1.ListOptions{ - LabelSelector: labels.Set(matchLabels).AsSelector().String(), - }, - ) - if err != nil { - return nil, err - } - - if ownerUID == "" { - return podList.Items, nil - } - - pods := []corev1.Pod{} - for _, pod := range podList.Items { - if isOwner(ownerUID, pod.GetOwnerReferences()) { - pods = append(pods, pod) - } - } - - return pods, nil -} - -func isOwner(u types.UID, ownerRefs []metav1.OwnerReference) bool { - for _, or := range ownerRefs { - if u == or.UID { - return true - } - } - return false -} diff --git a/cli/cmd/metrics_test.go b/cli/cmd/metrics_test.go deleted file mode 100644 index d28cda6877fba..0000000000000 --- a/cli/cmd/metrics_test.go +++ /dev/null @@ -1,130 +0,0 @@ -package cmd - -import ( - "context" - "testing" - - "github.com/linkerd/linkerd2/pkg/k8s" -) - -func TestGetPodsFor(t *testing.T) { - - configs := []string{ - // pod-1 - `apiVersion: v1 -kind: Pod -metadata: - name: pod-1 - namespace: ns - uid: pod-1 - labels: - app: foo - ownerReferences: - - apiVersion: apps/v1 - controller: true - kind: ReplicaSet - name: rs-1 - uid: rs-1 -`, - // rs-1 - `apiVersion: apps/v1 -kind: ReplicaSet -metadata: - name: rs-1 - namespace: ns - uid: rs-1 - labels: - app: foo - ownerReferences: - - apiVersion: apps/v1 - controller: true - kind: Deployment - name: deploy-1 - uid: deploy-1 -spec: - selector: - matchLabels: - app: foo -`, - // deploy-1 - `apiVersion: apps/v1 -kind: Deployment -metadata: - name: deploy-1 - namespace: ns - uid: deploy-1 -spec: - selector: - matchLabels: - app: foo -`, - // pod-2 - `apiVersion: v1 -kind: Pod -metadata: - name: pod-2 - namespace: ns - uid: pod-2 - labels: - app: foo - ownerReferences: - - apiVersion: apps/v1 - controller: true - kind: ReplicaSet - name: rs-2 - uid: rs-2 -`, - // rs-2 - `apiVersion: apps/v1 -kind: ReplicaSet -metadata: - name: rs-2 - namespace: ns - uid: rs-2 - labels: - app: foo - ownerReferences: - - apiVersion: apps/v1 - controller: true - kind: Deployment - name: deploy-2 - uid: deploy-2 -spec: - selector: - matchLabels: - app: foo -`, - // deploy-2 - `apiVersion: apps/v1 -kind: Deployment -metadata: - name: deploy-2 - namespace: ns - uid: deploy-2 -spec: - selector: - matchLabels: - app: foo -`} - - k8sClient, err := k8s.NewFakeAPI(configs...) - if err != nil { - t.Fatalf("Unexpected error %s", err) - } - - // Both pod-1 and pod-2 have labels which match deploy-1's selector. - // However, only pod-1 is owned by deploy-1 according to the owner references. - // Owner references should be considered authoritative to resolve ambiguity - // when deployments have overlapping seletors. - pods, err := getPodsFor(context.Background(), k8sClient, "ns", "deploy/deploy-1") - if err != nil { - t.Fatalf("Unexpected error %s", err) - } - - if len(pods) != 1 { - for _, p := range pods { - t.Logf("%s/%s", p.Namespace, p.Name) - } - t.Fatalf("Expected 1 pod, got %d", len(pods)) - } -} diff --git a/cli/table/table.go b/cli/table/table.go index d405c2d682b1b..ba585f721bd1c 100644 --- a/cli/table/table.go +++ b/cli/table/table.go @@ -43,6 +43,21 @@ func NewTable(cols []Column, data []Row) Table { } } +// NewColumn creates a new flexible column with the given name. +func NewColumn(header string) Column { + return Column{ + Header: header, + Flexible: true, + Width: len(header), + } +} + +// WithLeftAlign turns on the left align of this column and returns it. +func (c Column) WithLeftAlign() Column { + c.LeftAlign = true + return c +} + // Render writes the full table to the given Writer. func (t *Table) Render(w io.Writer) { columnWidths := t.columnWidths() diff --git a/pkg/k8s/api.go b/pkg/k8s/api.go index 967ca55b748eb..0ae270d9cc2e0 100644 --- a/pkg/k8s/api.go +++ b/pkg/k8s/api.go @@ -2,6 +2,7 @@ package k8s import ( "context" + "errors" "fmt" "net/http" "strings" @@ -12,8 +13,10 @@ import ( appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" apiextensionsclient "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset" - "k8s.io/apimachinery/pkg/api/errors" + kerrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/version" "k8s.io/client-go/dynamic" "k8s.io/client-go/kubernetes" @@ -137,7 +140,7 @@ func (kubeAPI *KubernetesAPI) CheckVersion(versionInfo *version.Info) error { // NamespaceExists validates whether a given namespace exists. func (kubeAPI *KubernetesAPI) NamespaceExists(ctx context.Context, namespace string) (bool, error) { ns, err := kubeAPI.CoreV1().Namespaces().Get(ctx, namespace, metav1.GetOptions{}) - if errors.IsNotFound(err) { + if kerrors.IsNotFound(err) { return false, nil } if err != nil { @@ -197,7 +200,7 @@ func (kubeAPI *KubernetesAPI) GetNamespaceWithExtensionLabel(ctx context.Context return &ns, err } } - return nil, errors.NewNotFound(corev1.Resource("namespace"), value) + return nil, kerrors.NewNotFound(corev1.Resource("namespace"), value) } // GetPodStatus receives a pod and returns the pod status, based on `kubectl` logic. @@ -285,3 +288,162 @@ func GetProxyVersion(pod corev1.Pod) string { } return "" } + +// GetPodsFor takes a resource string, queries the Kubernetes API, and returns a +// list of pods belonging to that resource. +func GetPodsFor(ctx context.Context, clientset kubernetes.Interface, namespace string, resource string) ([]corev1.Pod, error) { + elems := strings.Split(resource, "/") + + if len(elems) == 1 { + return nil, errors.New("no resource name provided") + } + + if len(elems) != 2 { + return nil, fmt.Errorf("invalid resource string: %s", resource) + } + + typ, err := CanonicalResourceNameFromFriendlyName(elems[0]) + if err != nil { + return nil, err + } + name := elems[1] + + // special case if a single pod was specified + if typ == Pod { + pod, err := clientset.CoreV1().Pods(namespace).Get(ctx, name, metav1.GetOptions{}) + if err != nil { + return nil, err + } + return []corev1.Pod{*pod}, nil + } + + var matchLabels map[string]string + var ownerUID types.UID + switch typ { + case CronJob: + jobs, err := clientset.BatchV1().Jobs(namespace).List(ctx, metav1.ListOptions{}) + if err != nil { + return nil, err + } + + var pods []corev1.Pod + for _, job := range jobs.Items { + if isOwner(job.GetUID(), job.GetOwnerReferences()) { + jobPods, err := GetPodsFor(ctx, clientset, namespace, fmt.Sprintf("%s/%s", Job, job.GetName())) + if err != nil { + return nil, err + } + pods = append(pods, jobPods...) + } + } + return pods, nil + + case DaemonSet: + ds, err := clientset.AppsV1().DaemonSets(namespace).Get(ctx, name, metav1.GetOptions{}) + if err != nil { + return nil, err + } + matchLabels = ds.Spec.Selector.MatchLabels + ownerUID = ds.GetUID() + + case Deployment: + deployment, err := clientset.AppsV1().Deployments(namespace).Get(ctx, name, metav1.GetOptions{}) + if err != nil { + return nil, err + } + matchLabels = deployment.Spec.Selector.MatchLabels + ownerUID = deployment.GetUID() + + replicaSets, err := clientset.AppsV1().ReplicaSets(namespace).List( + ctx, + metav1.ListOptions{ + LabelSelector: labels.Set(matchLabels).AsSelector().String(), + }, + ) + if err != nil { + return nil, err + } + + var pods []corev1.Pod + for _, rs := range replicaSets.Items { + if isOwner(ownerUID, rs.GetOwnerReferences()) { + podsRS, err := GetPodsFor(ctx, clientset, namespace, fmt.Sprintf("%s/%s", ReplicaSet, rs.GetName())) + if err != nil { + return nil, err + } + pods = append(pods, podsRS...) + } + } + return pods, nil + + case Job: + job, err := clientset.BatchV1().Jobs(namespace).Get(ctx, name, metav1.GetOptions{}) + if err != nil { + return nil, err + } + matchLabels = job.Spec.Selector.MatchLabels + ownerUID = job.GetUID() + + case ReplicaSet: + rs, err := clientset.AppsV1().ReplicaSets(namespace).Get(ctx, name, metav1.GetOptions{}) + if err != nil { + return nil, err + } + matchLabels = rs.Spec.Selector.MatchLabels + ownerUID = rs.GetUID() + + case ReplicationController: + rc, err := clientset.CoreV1().ReplicationControllers(namespace).Get(ctx, name, metav1.GetOptions{}) + if err != nil { + return nil, err + } + matchLabels = rc.Spec.Selector + ownerUID = rc.GetUID() + + case StatefulSet: + ss, err := clientset.AppsV1().StatefulSets(namespace).Get(ctx, name, metav1.GetOptions{}) + if err != nil { + return nil, err + } + matchLabels = ss.Spec.Selector.MatchLabels + ownerUID = ss.GetUID() + + default: + return nil, fmt.Errorf("unsupported resource type: %s", name) + } + + podList, err := clientset. + CoreV1(). + Pods(namespace). + List( + ctx, + metav1.ListOptions{ + LabelSelector: labels.Set(matchLabels).AsSelector().String(), + }, + ) + if err != nil { + return nil, err + } + + if ownerUID == "" { + return podList.Items, nil + } + + pods := []corev1.Pod{} + for _, pod := range podList.Items { + if isOwner(ownerUID, pod.GetOwnerReferences()) { + pods = append(pods, pod) + } + } + + return pods, nil +} + +func isOwner(u types.UID, ownerRefs []metav1.OwnerReference) bool { + for _, or := range ownerRefs { + if u == or.UID { + return true + } + } + return false +} diff --git a/pkg/k8s/api_test.go b/pkg/k8s/api_test.go index 8f04a88a97c50..b0d78f24e01a4 100644 --- a/pkg/k8s/api_test.go +++ b/pkg/k8s/api_test.go @@ -1,6 +1,7 @@ package k8s import ( + "context" "testing" corev1 "k8s.io/api/core/v1" @@ -303,3 +304,125 @@ status: } } } + +func TestGetPodsFor(t *testing.T) { + + configs := []string{ + // pod-1 + `apiVersion: v1 +kind: Pod +metadata: + name: pod-1 + namespace: ns + uid: pod-1 + labels: + app: foo + ownerReferences: + - apiVersion: apps/v1 + controller: true + kind: ReplicaSet + name: rs-1 + uid: rs-1 +`, + // rs-1 + `apiVersion: apps/v1 +kind: ReplicaSet +metadata: + name: rs-1 + namespace: ns + uid: rs-1 + labels: + app: foo + ownerReferences: + - apiVersion: apps/v1 + controller: true + kind: Deployment + name: deploy-1 + uid: deploy-1 +spec: + selector: + matchLabels: + app: foo +`, + // deploy-1 + `apiVersion: apps/v1 +kind: Deployment +metadata: + name: deploy-1 + namespace: ns + uid: deploy-1 +spec: + selector: + matchLabels: + app: foo +`, + // pod-2 + `apiVersion: v1 +kind: Pod +metadata: + name: pod-2 + namespace: ns + uid: pod-2 + labels: + app: foo + ownerReferences: + - apiVersion: apps/v1 + controller: true + kind: ReplicaSet + name: rs-2 + uid: rs-2 +`, + // rs-2 + `apiVersion: apps/v1 +kind: ReplicaSet +metadata: + name: rs-2 + namespace: ns + uid: rs-2 + labels: + app: foo + ownerReferences: + - apiVersion: apps/v1 + controller: true + kind: Deployment + name: deploy-2 + uid: deploy-2 +spec: + selector: + matchLabels: + app: foo +`, + // deploy-2 + `apiVersion: apps/v1 +kind: Deployment +metadata: + name: deploy-2 + namespace: ns + uid: deploy-2 +spec: + selector: + matchLabels: + app: foo +`} + + k8sClient, err := NewFakeAPI(configs...) + if err != nil { + t.Fatalf("Unexpected error %s", err) + } + + // Both pod-1 and pod-2 have labels which match deploy-1's selector. + // However, only pod-1 is owned by deploy-1 according to the owner references. + // Owner references should be considered authoritative to resolve ambiguity + // when deployments have overlapping seletors. + pods, err := GetPodsFor(context.Background(), k8sClient, "ns", "deploy/deploy-1") + if err != nil { + t.Fatalf("Unexpected error %s", err) + } + + if len(pods) != 1 { + for _, p := range pods { + t.Logf("%s/%s", p.Namespace, p.Name) + } + t.Fatalf("Expected 1 pod, got %d", len(pods)) + } +} diff --git a/pkg/k8s/policy.go b/pkg/k8s/policy.go new file mode 100644 index 0000000000000..d1f8f152c251b --- /dev/null +++ b/pkg/k8s/policy.go @@ -0,0 +1,374 @@ +package k8s + +import ( + "context" + "fmt" + "os" + "strings" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/client-go/kubernetes" +) + +// ServerAndAuthorization holds a server name and serverauthorization name. +type ServerAndAuthorization struct { + Server string + ServerAuthorization string +} + +type id struct{ name, namespace string } + +// SazGVR is the GroupVersionResource for the ServerAuthorization resource. +var SazGVR = schema.GroupVersionResource{ + Group: "policy.linkerd.io", + Version: "v1alpha1", + Resource: "serverauthorizations", +} + +// ServerGVR is the GroupVersionResource for the Server resource. +var ServerGVR = schema.GroupVersionResource{ + Group: "policy.linkerd.io", + Version: "v1alpha1", + Resource: "servers", +} + +// ServerAuthorizationsForResource returns a list of Server-ServerAuthorization +// pairs which select pods belonging to the given resource. +func ServerAuthorizationsForResource(ctx context.Context, k8sAPI *KubernetesAPI, namespace string, resource string) ([]ServerAndAuthorization, error) { + pods, err := getPodsForResourceOrKind(ctx, k8sAPI, namespace, resource, "") + if err != nil { + return nil, err + } + podSet := make(map[id]struct{}) + for _, pod := range pods { + podSet[id{pod.Name, pod.Namespace}] = struct{}{} + } + + results := make([]ServerAndAuthorization, 0) + + sazs, err := k8sAPI.DynamicClient.Resource(SazGVR).Namespace(namespace).List(ctx, metav1.ListOptions{}) + if err != nil { + fmt.Fprintf(os.Stderr, "Failed to get serverauthorization resources: %s\n", err) + os.Exit(1) + } + + for _, saz := range sazs.Items { + var servers []unstructured.Unstructured + + if name, found, _ := unstructured.NestedString(saz.UnstructuredContent(), "spec", "server", "name"); found { + server, err := k8sAPI.DynamicClient.Resource(ServerGVR).Namespace(saz.GetNamespace()).Get(ctx, name, metav1.GetOptions{}) + if err != nil { + fmt.Fprintf(os.Stderr, "Failed to get server %s: %s\n", name, err) + os.Exit(1) + } + servers = []unstructured.Unstructured{*server} + } else if sel, found, _ := unstructured.NestedMap(saz.UnstructuredContent(), "spec", "server", "selector"); found { + selector := selector(sel) + serverList, err := k8sAPI.DynamicClient.Resource(ServerGVR).Namespace(saz.GetNamespace()).List(ctx, metav1.ListOptions{LabelSelector: metav1.FormatLabelSelector(&selector)}) + if err != nil { + fmt.Fprintf(os.Stderr, "Failed to get servers: %s\n", err) + os.Exit(1) + } + servers = serverList.Items + } + + for _, server := range servers { + if sel, found, _ := unstructured.NestedMap(server.UnstructuredContent(), "spec", "podSelector"); found { + selector := selector(sel) + selectedPods, err := k8sAPI.CoreV1().Pods(server.GetNamespace()).List(ctx, metav1.ListOptions{LabelSelector: metav1.FormatLabelSelector(&selector)}) + if err != nil { + fmt.Fprintf(os.Stderr, "Failed to get pods: %s\n", err) + os.Exit(1) + } + if serverIncludesPod(server, selectedPods.Items, podSet) { + results = append(results, ServerAndAuthorization{server.GetName(), saz.GetName()}) + } + } + + } + } + return results, nil +} + +// ServersForResource returns a list of Server names of Servers which select pods +// belonging to the given resource. +func ServersForResource(ctx context.Context, k8sAPI *KubernetesAPI, namespace string, resource string, labelSelector string) ([]string, error) { + pods, err := getPodsForResourceOrKind(ctx, k8sAPI, namespace, resource, labelSelector) + if err != nil { + return nil, err + } + podSet := make(map[id]struct{}) + for _, pod := range pods { + podSet[id{pod.Name, pod.Namespace}] = struct{}{} + } + + results := make([]string, 0) + + servers, err := k8sAPI.DynamicClient.Resource(ServerGVR).Namespace(namespace).List(ctx, metav1.ListOptions{}) + if err != nil { + fmt.Fprintf(os.Stderr, "Failed to get serverauthorization resources: %s\n", err) + os.Exit(1) + } + + for _, server := range servers.Items { + if sel, found, _ := unstructured.NestedMap(server.UnstructuredContent(), "spec", "podSelector"); found { + selector := selector(sel) + selectedPods, err := k8sAPI.CoreV1().Pods(server.GetNamespace()).List(ctx, metav1.ListOptions{LabelSelector: metav1.FormatLabelSelector(&selector)}) + if err != nil { + fmt.Fprintf(os.Stderr, "Failed to get pods: %s\n", err) + os.Exit(1) + } + if serverIncludesPod(server, selectedPods.Items, podSet) { + results = append(results, server.GetName()) + } + } + + } + return results, nil +} + +// ServerAuthorizationsForServer returns a list of ServerAuthorization names of +// ServerAuthorizations which select the given Server. +func ServerAuthorizationsForServer(ctx context.Context, k8sAPI *KubernetesAPI, namespace string, server string) ([]string, error) { + results := make([]string, 0) + + sazs, err := k8sAPI.DynamicClient.Resource(SazGVR).Namespace(namespace).List(ctx, metav1.ListOptions{}) + if err != nil { + fmt.Fprintf(os.Stderr, "Failed to get serverauthorization resources: %s\n", err) + os.Exit(1) + } + + for _, saz := range sazs.Items { + if name, found, _ := unstructured.NestedString(saz.UnstructuredContent(), "spec", "server", "name"); found { + s, err := k8sAPI.DynamicClient.Resource(ServerGVR).Namespace(saz.GetNamespace()).Get(ctx, name, metav1.GetOptions{}) + if err != nil { + fmt.Fprintf(os.Stderr, "Failed to get server %s: %s\n", name, err) + os.Exit(1) + } + if s.GetName() == server { + results = append(results, saz.GetName()) + } + } else if sel, found, _ := unstructured.NestedMap(saz.UnstructuredContent(), "spec", "server", "selector"); found { + selector := selector(sel) + serverList, err := k8sAPI.DynamicClient.Resource(ServerGVR).Namespace(saz.GetNamespace()).List(ctx, metav1.ListOptions{LabelSelector: metav1.FormatLabelSelector(&selector)}) + if err != nil { + fmt.Fprintf(os.Stderr, "Failed to get servers: %s\n", err) + os.Exit(1) + } + for _, s := range serverList.Items { + if s.GetName() == server { + results = append(results, saz.GetName()) + break + } + } + } + } + return results, nil +} + +func selector(selector map[string]interface{}) metav1.LabelSelector { + if labels, found, err := unstructured.NestedStringMap(selector, "matchLabels"); found && err == nil { + return metav1.LabelSelector{MatchLabels: labels} + } + if expressions, found, err := unstructured.NestedSlice(selector, "matchExpressions"); found && err == nil { + exprs := make([]metav1.LabelSelectorRequirement, len(expressions)) + for i, expr := range expressions { + exprs[i] = matchExpression(expr) + } + return metav1.LabelSelector{MatchExpressions: exprs} + } + return metav1.LabelSelector{} +} + +func matchExpression(expr interface{}) metav1.LabelSelectorRequirement { + if exprMap, ok := expr.(map[string]interface{}); ok { + if key, found, err := unstructured.NestedString(exprMap, "key"); found && err == nil { + if op, found, err := unstructured.NestedString(exprMap, "operator"); found && err == nil { + if values, found, err := unstructured.NestedStringSlice(exprMap, "values"); found && err == nil { + return metav1.LabelSelectorRequirement{ + Key: key, + Operator: metav1.LabelSelectorOperator(op), + Values: values, + } + } + } + } + } + return metav1.LabelSelectorRequirement{} +} + +func serverIncludesPod(server unstructured.Unstructured, serverPods []corev1.Pod, podSet map[id]struct{}) bool { + for _, pod := range serverPods { + if _, ok := podSet[id{pod.Name, pod.Namespace}]; ok { + if port, found, err := unstructured.NestedInt64(server.UnstructuredContent(), "spec", "port"); found && err == nil { + for _, container := range pod.Spec.Containers { + for _, p := range container.Ports { + if int32(port) == p.ContainerPort { + return true + } + } + } + } + if port, found, err := unstructured.NestedString(server.UnstructuredContent(), "spec", "port"); found && err == nil { + for _, container := range pod.Spec.Containers { + for _, p := range container.Ports { + if port == p.Name { + return true + } + } + } + } + } + } + return false +} + +// getPodsForResourceOrKind is similar to getPodsForResource, but also supports +// querying for all resources of a given kind (i.e. when resource name is unspecified). +func getPodsForResourceOrKind(ctx context.Context, k8sAPI kubernetes.Interface, namespace string, resource string, labelSelector string) ([]corev1.Pod, error) { + + elems := strings.Split(resource, "/") + if len(elems) > 2 { + return nil, fmt.Errorf("invalid resource: %s", resource) + } + if len(elems) == 2 { + pods, err := GetPodsFor(ctx, k8sAPI, namespace, resource) + if err != nil { + fmt.Fprintf(os.Stderr, "failed to get pods: %s", err) + os.Exit(1) + } + return pods, nil + } + pods := []corev1.Pod{} + + typ, err := CanonicalResourceNameFromFriendlyName(elems[0]) + if err != nil { + return nil, fmt.Errorf("invalid resource: %s", resource) + } + + selector := metav1.ListOptions{ + LabelSelector: labelSelector, + } + + switch typ { + case Pod: + ps, err := k8sAPI.CoreV1().Pods(namespace).List(ctx, selector) + if err != nil { + fmt.Fprintf(os.Stderr, "failed to get pods: %s", err) + os.Exit(1) + } + pods = append(pods, ps.Items...) + + case CronJob: + jobs, err := k8sAPI.BatchV1().CronJobs(namespace).List(ctx, selector) + if err != nil { + fmt.Fprintf(os.Stderr, "failed to get cronjobs: %s", err) + os.Exit(1) + } + for _, job := range jobs.Items { + ps, err := GetPodsFor(ctx, k8sAPI, namespace, fmt.Sprintf("%s/%s", CronJob, job.Name)) + if err != nil { + fmt.Fprintf(os.Stderr, "failed to get pods: %s", err) + os.Exit(1) + } + pods = append(pods, ps...) + } + + case DaemonSet: + dss, err := k8sAPI.AppsV1().DaemonSets(namespace).List(ctx, selector) + if err != nil { + fmt.Fprintf(os.Stderr, "failed to get demonsets: %s", err) + os.Exit(1) + } + for _, ds := range dss.Items { + ps, err := GetPodsFor(ctx, k8sAPI, namespace, fmt.Sprintf("%s/%s", DaemonSet, ds.Name)) + if err != nil { + fmt.Fprintf(os.Stderr, "failed to get pods: %s", err) + os.Exit(1) + } + pods = append(pods, ps...) + } + + case Deployment: + deploys, err := k8sAPI.AppsV1().Deployments(namespace).List(ctx, selector) + if err != nil { + fmt.Fprintf(os.Stderr, "failed to get deployments: %s", err) + os.Exit(1) + } + for _, deploy := range deploys.Items { + ps, err := GetPodsFor(ctx, k8sAPI, namespace, fmt.Sprintf("%s/%s", Deployment, deploy.Name)) + if err != nil { + fmt.Fprintf(os.Stderr, "failed to get pods: %s", err) + os.Exit(1) + } + pods = append(pods, ps...) + } + + case Job: + jobs, err := k8sAPI.BatchV1().Jobs(namespace).List(ctx, selector) + if err != nil { + fmt.Fprintf(os.Stderr, "failed to get jobs: %s", err) + os.Exit(1) + } + for _, job := range jobs.Items { + ps, err := GetPodsFor(ctx, k8sAPI, namespace, fmt.Sprintf("%s/%s", Job, job.Name)) + if err != nil { + fmt.Fprintf(os.Stderr, "failed to get pods: %s", err) + os.Exit(1) + } + pods = append(pods, ps...) + } + + case ReplicaSet: + rss, err := k8sAPI.AppsV1().ReplicaSets(namespace).List(ctx, selector) + if err != nil { + fmt.Fprintf(os.Stderr, "failed to get replicasets: %s", err) + os.Exit(1) + } + for _, rs := range rss.Items { + ps, err := GetPodsFor(ctx, k8sAPI, namespace, fmt.Sprintf("%s/%s", ReplicaSet, rs.Name)) + if err != nil { + fmt.Fprintf(os.Stderr, "failed to get pods: %s", err) + os.Exit(1) + } + pods = append(pods, ps...) + } + + case ReplicationController: + rcs, err := k8sAPI.CoreV1().ReplicationControllers(namespace).List(ctx, selector) + if err != nil { + fmt.Fprintf(os.Stderr, "failed to get replicationcontrollers: %s", err) + os.Exit(1) + } + for _, rc := range rcs.Items { + ps, err := GetPodsFor(ctx, k8sAPI, namespace, fmt.Sprintf("%s/%s", ReplicationController, rc.Name)) + if err != nil { + fmt.Fprintf(os.Stderr, "failed to get pods: %s", err) + os.Exit(1) + } + pods = append(pods, ps...) + } + + case StatefulSet: + sss, err := k8sAPI.AppsV1().StatefulSets(namespace).List(ctx, selector) + if err != nil { + fmt.Fprintf(os.Stderr, "failed to get statefulsets: %s", err) + os.Exit(1) + } + for _, ss := range sss.Items { + ps, err := GetPodsFor(ctx, k8sAPI, namespace, fmt.Sprintf("%s/%s", StatefulSet, ss.Name)) + if err != nil { + fmt.Fprintf(os.Stderr, "failed to get pods: %s", err) + os.Exit(1) + } + pods = append(pods, ps...) + } + + default: + return nil, fmt.Errorf("unsupported resource type: %s", typ) + } + return pods, nil +} diff --git a/viz/cmd/authz.go b/viz/cmd/authz.go new file mode 100644 index 0000000000000..3093111daca5f --- /dev/null +++ b/viz/cmd/authz.go @@ -0,0 +1,196 @@ +package cmd + +import ( + "fmt" + "os" + + "github.com/linkerd/linkerd2/cli/table" + pkgcmd "github.com/linkerd/linkerd2/pkg/cmd" + "github.com/linkerd/linkerd2/pkg/healthcheck" + "github.com/linkerd/linkerd2/pkg/k8s" + "github.com/linkerd/linkerd2/viz/metrics-api/util" + "github.com/linkerd/linkerd2/viz/pkg/api" + "github.com/spf13/cobra" +) + +// NewCmdAuthz creates a new cobra command `authz` +func NewCmdAuthz() *cobra.Command { + options := newStatOptions() + + cmd := &cobra.Command{ + Use: "authz [flags] resource", + Short: "Display stats for server authorizations for a resource", + Long: "Display stats for server authorizations for a resource.", + Args: cobra.MinimumNArgs(1), + ValidArgsFunction: func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { + + k8sAPI, err := k8s.NewAPI(kubeconfigPath, kubeContext, impersonate, impersonateGroup, 0) + if err != nil { + return nil, cobra.ShellCompDirectiveError + } + + if options.namespace == "" { + options.namespace = pkgcmd.GetDefaultNamespace(kubeconfigPath, kubeContext) + } + + cc := k8s.NewCommandCompletion(k8sAPI, options.namespace) + + results, err := cc.Complete(args, toComplete) + if err != nil { + return nil, cobra.ShellCompDirectiveError + } + + return results, cobra.ShellCompDirectiveDefault + }, + RunE: func(cmd *cobra.Command, args []string) error { + if options.namespace == "" { + options.namespace = pkgcmd.GetDefaultNamespace(kubeconfigPath, kubeContext) + } + + k8sAPI, err := k8s.NewAPI(kubeconfigPath, kubeContext, impersonate, impersonateGroup, 0) + if err != nil { + return err + } + // The gRPC client is concurrency-safe, so we can reuse it in all the following goroutines + // https://github.com/grpc/grpc-go/issues/682 + client := api.CheckClientOrExit(healthcheck.Options{ + ControlPlaneNamespace: controlPlaneNamespace, + KubeConfig: kubeconfigPath, + Impersonate: impersonate, + ImpersonateGroup: impersonateGroup, + KubeContext: kubeContext, + APIAddr: apiAddr, + }) + + var resource string + if len(args) == 1 { + resource = args[0] + } else if len(args) == 2 { + resource = args[0] + "/" + args[1] + } + + cols := []table.Column{ + table.NewColumn("SERVER").WithLeftAlign(), + table.NewColumn("AUTHZ").WithLeftAlign(), + table.NewColumn("SUCCESS"), + table.NewColumn("RPS"), + table.NewColumn("LATENCY_P50"), + table.NewColumn("LATENCY_P95"), + table.NewColumn("LATENCY_P99"), + } + rows := []table.Row{} + + servers, err := k8s.ServersForResource(cmd.Context(), k8sAPI, options.namespace, resource, options.labelSelector) + if err != nil { + fmt.Fprint(os.Stderr, err.Error()) + os.Exit(1) + } + for _, server := range servers { + sazs, err := k8s.ServerAuthorizationsForServer(cmd.Context(), k8sAPI, options.namespace, server) + if err != nil { + fmt.Fprint(os.Stderr, err.Error()) + os.Exit(1) + } + for _, saz := range sazs { + requestParams := util.StatsSummaryRequestParams{ + StatsBaseRequestParams: util.StatsBaseRequestParams{ + TimeWindow: options.timeWindow, + ResourceName: saz, + ResourceType: k8s.ServerAuthorization, + Namespace: options.namespace, + AllNamespaces: false, + }, + ToNamespace: options.namespace, + } + requestParams.ToName = server + requestParams.ToType = k8s.Server + + req, err := util.BuildStatSummaryRequest(requestParams) + if err != nil { + return err + } + resp, err := requestStatsFromAPI(client, req) + if err != nil { + fmt.Fprint(os.Stderr, err.Error()) + os.Exit(1) + } + + for _, row := range respToRows(resp) { + if row.Stats == nil { + rows = append(rows, table.Row{ + server, + saz, + "-", + "-", + "-", + "-", + "-", + }) + } else { + rows = append(rows, table.Row{ + server, + saz, + fmt.Sprintf("%.2f%%", getSuccessRate(row.Stats.GetSuccessCount(), row.Stats.GetFailureCount())*100), + fmt.Sprintf("%.1frps", getRequestRate(row.Stats.GetSuccessCount(), row.Stats.GetFailureCount(), row.TimeWindow)), + fmt.Sprintf("%dms", row.Stats.LatencyMsP50), + fmt.Sprintf("%dms", row.Stats.LatencyMsP95), + fmt.Sprintf("%dms", row.Stats.LatencyMsP99), + }) + } + } + } + + // Unauthorized + requestParams := util.StatsSummaryRequestParams{ + StatsBaseRequestParams: util.StatsBaseRequestParams{ + TimeWindow: options.timeWindow, + ResourceName: server, + ResourceType: k8s.Server, + Namespace: options.namespace, + AllNamespaces: false, + }, + ToNamespace: options.namespace, + LabelSelector: options.labelSelector, + } + + req, err := util.BuildStatSummaryRequest(requestParams) + if err != nil { + return err + } + resp, err := requestStatsFromAPI(client, req) + if err != nil { + fmt.Fprint(os.Stderr, err.Error()) + os.Exit(1) + } + for _, row := range respToRows(resp) { + if row.SrvStats != nil && row.SrvStats.DeniedCount > 0 { + rows = append(rows, table.Row{ + server, + "[UNAUTHORIZED]", + "-", + fmt.Sprintf("%.1frps", getRequestRate(row.SrvStats.DeniedCount, 0, row.TimeWindow)), + "-", + "-", + "-", + }) + } + } + } + + data := table.NewTable(cols, rows) + data.Render(os.Stdout) + + return nil + }, + } + + cmd.PersistentFlags().StringVarP(&options.namespace, "namespace", "n", options.namespace, "Namespace of the specified resource") + cmd.PersistentFlags().StringVarP(&options.timeWindow, "time-window", "t", options.timeWindow, "Stat window (for example: \"15s\", \"1m\", \"10m\", \"1h\"). Needs to be at least 15s.") + cmd.PersistentFlags().StringVarP(&options.outputFormat, "output", "o", options.outputFormat, "Output format; one of: \"table\" or \"json\" or \"wide\"") + cmd.PersistentFlags().StringVarP(&options.labelSelector, "selector", "l", options.labelSelector, "Selector (label query) to filter on, supports '=', '==', and '!='") + + pkgcmd.ConfigureNamespaceFlagCompletion( + cmd, []string{"namespace"}, + kubeconfigPath, impersonate, impersonateGroup, kubeContext) + return cmd +} diff --git a/viz/cmd/root.go b/viz/cmd/root.go index e832da7aed65e..9578c0c8f1c49 100644 --- a/viz/cmd/root.go +++ b/viz/cmd/root.go @@ -76,6 +76,7 @@ func NewCmdViz() *cobra.Command { vizCmd.PersistentFlags().StringArrayVar(&impersonateGroup, "as-group", []string{}, "Group to impersonate for Kubernetes operations") vizCmd.PersistentFlags().StringVar(&apiAddr, "api-addr", "", "Override kubeconfig and communicate directly with the control plane at host:port (mostly for testing)") vizCmd.PersistentFlags().BoolVar(&verbose, "verbose", false, "Turn on debug logging") + vizCmd.AddCommand(NewCmdAuthz()) vizCmd.AddCommand(NewCmdCheck()) vizCmd.AddCommand(NewCmdDashboard()) vizCmd.AddCommand(NewCmdEdges()) diff --git a/viz/metrics-api/prometheus.go b/viz/metrics-api/prometheus.go index a901e5e3cc12d..0f05b7a098231 100644 --- a/viz/metrics-api/prometheus.go +++ b/viz/metrics-api/prometheus.go @@ -118,8 +118,14 @@ func promDstGroupByLabelNames(resource *pb.Resource) model.LabelNames { func promQueryLabels(resource *pb.Resource) model.LabelSet { set := model.LabelSet{} if resource != nil { - if resource.Name != "" && resource.GetType() != k8s.Service { - set[promResourceType(resource)] = model.LabelValue(resource.Name) + if resource.Name != "" { + if resource.GetType() == k8s.Server { + set[serverLabel] = model.LabelValue(resource.GetName()) + } else if resource.GetType() == k8s.ServerAuthorization { + set[serverAuthorizationLabel] = model.LabelValue(resource.GetName()) + } else if resource.GetType() != k8s.Service { + set[promResourceType(resource)] = model.LabelValue(resource.Name) + } } if shouldAddNamespaceLabel(resource) { set[namespaceLabel] = model.LabelValue(resource.Namespace) diff --git a/viz/metrics-api/stat_summary.go b/viz/metrics-api/stat_summary.go index a2634871627a6..eba146cb0d091 100644 --- a/viz/metrics-api/stat_summary.go +++ b/viz/metrics-api/stat_summary.go @@ -24,20 +24,6 @@ import ( "k8s.io/apimachinery/pkg/runtime/schema" ) -var ( - serverGroupVersionResource = schema.GroupVersionResource{ - Group: "policy.linkerd.io", - Version: "v1alpha1", - Resource: "servers", - } - - serverAuthorizationGroupVersionResource = schema.GroupVersionResource{ - Group: "policy.linkerd.io", - Version: "v1alpha1", - Resource: "serverauthorizations", - } -) - type resourceResult struct { res *pb.StatTable err error @@ -406,9 +392,9 @@ func (s *grpcServer) getPolicyResourceKeys(req *pb.StatSummaryRequest) ([]rKey, var gvr schema.GroupVersionResource if req.GetSelector().Resource.GetType() == k8s.Server { - gvr = serverGroupVersionResource + gvr = k8s.ServerGVR } else if req.GetSelector().Resource.GetType() == k8s.ServerAuthorization { - gvr = serverAuthorizationGroupVersionResource + gvr = k8s.SazGVR } res := req.GetSelector().GetResource()