diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index 369a2dab79b5..03663bb64b3c 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -391,6 +391,7 @@ /comp/anomalydetection/logssource @DataDog/q-branch /comp/anomalydetection/observer @DataDog/q-branch /comp/anomalydetection/recorder @DataDog/q-branch +/comp/anomalydetection/reporter @DataDog/q-branch /comp/autoscaling/datadogclient @DataDog/container-integrations /comp/connectivitychecker @DataDog/fleet /comp/dataobs/queryactions @DataDog/data-observability diff --git a/BUILD.bazel b/BUILD.bazel index d5480622e19f..462c85890253 100644 --- a/BUILD.bazel +++ b/BUILD.bazel @@ -200,6 +200,9 @@ exports_files(glob( # gazelle:exclude comp/anomalydetection/recorder/fx-noop # gazelle:exclude comp/anomalydetection/recorder/impl # gazelle:exclude comp/anomalydetection/recorder/impl-noop +# gazelle:exclude comp/anomalydetection/reporter/def +# gazelle:exclude comp/anomalydetection/reporter/fx +# gazelle:exclude comp/anomalydetection/reporter/impl # gazelle:exclude comp/otelcol/collector # gazelle:exclude comp/otelcol/collector-contrib/fx # gazelle:exclude comp/otelcol/collector-contrib/impl diff --git a/cmd/agent/subcommands/run/command_observer.go b/cmd/agent/subcommands/run/command_observer.go index d2cbd58006a3..c9c821f18ce7 100644 --- a/cmd/agent/subcommands/run/command_observer.go +++ b/cmd/agent/subcommands/run/command_observer.go @@ -18,6 +18,7 @@ import ( logssourcefx "github.com/DataDog/datadog-agent/comp/anomalydetection/logssource/fx" observerfx "github.com/DataDog/datadog-agent/comp/anomalydetection/observer/fx" recorderfx "github.com/DataDog/datadog-agent/comp/anomalydetection/recorder/fx-noop" + reporterfx "github.com/DataDog/datadog-agent/comp/anomalydetection/reporter/fx" ) func getObserverOptions() fx.Option { @@ -25,5 +26,6 @@ func getObserverOptions() fx.Option { observerfx.Module(), logssourcefx.Module(), recorderfx.Module(), + reporterfx.Module(), ) } diff --git a/comp/README.md b/comp/README.md index bbd0f9ef7b38..2c410ff02983 100644 --- a/comp/README.md +++ b/comp/README.md @@ -788,6 +788,13 @@ Package observer provides a component for observing data flowing through the age Package recorder provides a middleware component for recording and replaying observer data. +### [comp/anomalydetection/reporter](https://pkg.go.dev/github.com/DataDog/datadog-agent/comp/anomalydetection/reporter) + +*Datadog Team*: q-branch + +Package reporter provides a component that formats and dispatches anomaly +detection events to the Datadog backend or stdout. + ### [comp/autoscaling/datadogclient](https://pkg.go.dev/github.com/DataDog/datadog-agent/comp/autoscaling/datadogclient) *Datadog Team*: container-integrations diff --git a/comp/anomalydetection/observer/def/constants.go b/comp/anomalydetection/observer/def/constants.go new file mode 100644 index 000000000000..abcb49dc328d --- /dev/null +++ b/comp/anomalydetection/observer/def/constants.go @@ -0,0 +1,26 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +package observer + +// TelemetryNamespace is the storage namespace used for observer-internal debug +// metrics (e.g. testbench UI charts). Detectors must not treat it as workload data. +const TelemetryNamespace = "telemetry" + +// LogPatternExtractorNamespace is the canonical storage namespace for metrics +// emitted by the log pattern extractor. Used as SeriesDescriptor.Namespace and +// as the component name in the catalog. +const LogPatternExtractorNamespace = "log_pattern_extractor" + +// LogMetricsExtractorNamespace is the canonical storage namespace for metrics +// emitted by the log metrics extractor. Used as SeriesDescriptor.Namespace and +// as the component name in the catalog. +const LogMetricsExtractorNamespace = "log_metrics_extractor" + +// SplitTagKeyOrder is the canonical ordered list of tag dimensions used to split +// log clusters and to render split-tag summaries (e.g. in anomaly event messages). +// When adding dimensions, update TagGroupByKey and extractTagGroupByKey in +// comp/anomalydetection/observer/impl/log_tagged_pattern_clusterer.go. +var SplitTagKeyOrder = []string{"source", "service", "env", "host"} diff --git a/comp/anomalydetection/observer/def/types.go b/comp/anomalydetection/observer/def/types.go index f2f5d932eb4c..d37ffcd04cbd 100644 --- a/comp/anomalydetection/observer/def/types.go +++ b/comp/anomalydetection/observer/def/types.go @@ -382,17 +382,6 @@ type AnomalyDebugInfo struct { CUSUMValues []float64 // S[t] values (may be truncated to last N points) } -// ReportOutput is the output model passed to reporters after each advance cycle. -// It carries enough data for reporters to act without reaching back into engine internals. -type ReportOutput struct { - // AdvancedToSec is the data time the engine advanced to. - AdvancedToSec int64 - // NewAnomalies are anomalies detected in this advance cycle. - NewAnomalies []Anomaly - // ActiveCorrelations are the current correlation patterns across all correlators. - ActiveCorrelations []ActiveCorrelation -} - // Series is a time series with simple timestamp/value points. // This is the simplified view passed to SeriesDetector. type Series struct { @@ -466,14 +455,6 @@ type Correlator interface { Reset() } -// Reporter receives reports and displays or delivers them. -type Reporter interface { - // Name returns the reporter name for debugging. - Name() string - // Report delivers a report to its destination (stdout, file, webserver, etc). - Report(report ReportOutput) -} - // ActiveCorrelation represents a detected correlation pattern. type ActiveCorrelation struct { Pattern string // pattern name, e.g. "kernel_bottleneck" @@ -492,10 +473,6 @@ type RawAnomalyState interface { RawAnomalies() []Anomaly } -// TelemetryNamespace is the storage namespace used for observer-internal debug -// metrics (e.g. testbench UI charts). Detectors must not treat it as workload data. -const TelemetryNamespace = "telemetry" - // SeriesFilter specifies criteria for selecting series. type SeriesFilter struct { Namespace string // exact match (empty = any) diff --git a/comp/anomalydetection/observer/impl/consumer_memory.go b/comp/anomalydetection/observer/impl/consumer_memory.go index 623609662c9e..2c56855090dc 100644 --- a/comp/anomalydetection/observer/impl/consumer_memory.go +++ b/comp/anomalydetection/observer/impl/consumer_memory.go @@ -6,8 +6,6 @@ package observerimpl import ( - "fmt" - observer "github.com/DataDog/datadog-agent/comp/anomalydetection/observer/def" ) @@ -44,96 +42,3 @@ func (p *PassthroughCorrelator) Reset() { func (p *PassthroughCorrelator) GetPending() []observer.Anomaly { return p.anomalies } - -// StdoutReporter prints reports to stdout. -// It tracks correlation state changes and only prints when correlations appear or disappear. -// All data comes through Report(ReportOutput) — no backdoor access to engine internals. -type StdoutReporter struct { - seenCorrelations map[string]string // pattern -> title for correlations we've reported - seenRawAnomalies map[string]bool // source|detector -> whether we've reported this raw anomaly - // lastCorrelations is cached from the most recent Report call for PrintFinalState. - lastCorrelations []observer.ActiveCorrelation -} - -// Name returns the reporter name. -func (r *StdoutReporter) Name() string { - return "stdout_reporter" -} - -// Report receives a ReportOutput with anomalies and correlations from the engine -// and prints changes. It prints new anomalies and tracks correlation state changes, -// printing "[observer] NEW: {title}" when a correlation first appears and -// "[observer] CLEARED: {title}" when a correlation disappears. -func (r *StdoutReporter) Report(report observer.ReportOutput) { - // Report new anomalies (with detector identification) - r.reportNewAnomalies(report.NewAnomalies) - // Check for correlation changes - r.reportCorrelationChanges(report.ActiveCorrelations) - // Cache for PrintFinalState - r.lastCorrelations = report.ActiveCorrelations -} - -// reportNewAnomalies prints new anomalies from this advance cycle. -func (r *StdoutReporter) reportNewAnomalies(anomalies []observer.Anomaly) { - if r.seenRawAnomalies == nil { - r.seenRawAnomalies = make(map[string]bool) - } - - for _, anomaly := range anomalies { - key := anomaly.Source.String() + "|" + anomaly.DetectorName - if !r.seenRawAnomalies[key] { - fmt.Printf("[observer] [%s] ANOMALY: %s\n", anomaly.DetectorName, anomaly.Source.String()) - fmt.Printf(" %s\n", anomaly.Description) - r.seenRawAnomalies[key] = true - } - } -} - -// reportCorrelationChanges checks for new and cleared correlations. -func (r *StdoutReporter) reportCorrelationChanges(activeCorrelations []observer.ActiveCorrelation) { - if r.seenCorrelations == nil { - r.seenCorrelations = make(map[string]string) - } - - // Build set of currently active pattern names - currentlyActive := make(map[string]string) // pattern -> title - for _, ac := range activeCorrelations { - currentlyActive[ac.Pattern] = ac.Title - } - - // Check for new correlations (in current but not in seen) - for _, ac := range activeCorrelations { - if _, seen := r.seenCorrelations[ac.Pattern]; !seen { - fmt.Printf("[observer] NEW: %s\n", ac.Title) - for _, anomaly := range ac.Anomalies { - fmt.Printf(" - %s\n", anomaly.Description) - } - r.seenCorrelations[ac.Pattern] = ac.Title - } - } - - // Check for cleared correlations (in seen but not in current) - for pattern, title := range r.seenCorrelations { - if _, ok := currentlyActive[pattern]; !ok { - fmt.Printf("[observer] CLEARED: %s\n", title) - delete(r.seenCorrelations, pattern) - } - } -} - -// PrintFinalState prints the current state of all correlations. -// Call this at the end of a demo to see final cluster contents. -// Uses the last correlations received via Report. -func (r *StdoutReporter) PrintFinalState() { - if len(r.lastCorrelations) == 0 { - fmt.Println("[observer] Final state: no active correlations") - return - } - fmt.Println("[observer] Correlation Summary:") - for _, ac := range r.lastCorrelations { - fmt.Printf(" Cluster: %d anomalies\n", len(ac.Anomalies)) - for _, anomaly := range ac.Anomalies { - fmt.Printf(" - %s\n", anomaly.Description) - } - } -} diff --git a/comp/anomalydetection/observer/impl/events.go b/comp/anomalydetection/observer/impl/events.go index 57f08a605447..e634a70c0dfc 100644 --- a/comp/anomalydetection/observer/impl/events.go +++ b/comp/anomalydetection/observer/impl/events.go @@ -7,6 +7,7 @@ package observerimpl import ( observerdef "github.com/DataDog/datadog-agent/comp/anomalydetection/observer/def" + reporterdef "github.com/DataDog/datadog-agent/comp/anomalydetection/reporter/def" ) // engineEventKind identifies the type of engine event. @@ -63,14 +64,14 @@ type eventSink interface { // the event and active correlations from the stateView, then calls Report // on all registered reporters. type reporterEventSink struct { - reporters []observerdef.Reporter + reporters []reporterdef.Reporter state *stateView // for querying current correlations on advance } func (s *reporterEventSink) onEngineEvent(evt engineEvent) { if evt.kind == eventAdvanceCompleted { ac := evt.advanceCompleted - output := observerdef.ReportOutput{ + output := reporterdef.ReportOutput{ AdvancedToSec: ac.advancedToSec, NewAnomalies: ac.anomalies, } diff --git a/comp/anomalydetection/observer/impl/events_test.go b/comp/anomalydetection/observer/impl/events_test.go index 5b7869ceb843..26bd2233b7fd 100644 --- a/comp/anomalydetection/observer/impl/events_test.go +++ b/comp/anomalydetection/observer/impl/events_test.go @@ -15,6 +15,7 @@ import ( "github.com/stretchr/testify/require" observerdef "github.com/DataDog/datadog-agent/comp/anomalydetection/observer/def" + reporterdef "github.com/DataDog/datadog-agent/comp/anomalydetection/reporter/def" ) // collectingSink collects all events for test assertions. @@ -609,7 +610,7 @@ func TestReporterEventSink(t *testing.T) { reporter := &countingReporter{count: &reported} sink := &reporterEventSink{ - reporters: []observerdef.Reporter{reporter}, + reporters: []reporterdef.Reporter{reporter}, } // advanceCompleted should trigger Report. @@ -637,7 +638,7 @@ type countingReporter struct { } func (r *countingReporter) Name() string { return "counting" } -func (r *countingReporter) Report(_ observerdef.ReportOutput) { *r.count++ } +func (r *countingReporter) Report(_ reporterdef.ReportOutput) { *r.count++ } func TestFindingM1_DedupKeyTooCoarse(t *testing.T) { anomalies := []observerdef.Anomaly{ diff --git a/comp/anomalydetection/observer/impl/log_metrics_extractor.go b/comp/anomalydetection/observer/impl/log_metrics_extractor.go index 271bd5eb8b89..e87df18bbc51 100644 --- a/comp/anomalydetection/observer/impl/log_metrics_extractor.go +++ b/comp/anomalydetection/observer/impl/log_metrics_extractor.go @@ -38,9 +38,6 @@ func DefaultLogMetricsExtractorConfig() LogMetricsExtractorConfig { } } -// LogMetricsExtractorName is the canonical name for the log metrics extractor. -const LogMetricsExtractorName = "log_metrics_extractor" - // LogMetricsExtractor converts logs into timeseries metric outputs: // - JSON logs: numeric fields -> Avg aggregation // - Unstructured logs: pattern frequency -> Sum aggregation @@ -64,7 +61,7 @@ func NewLogMetricsExtractor(config LogMetricsExtractorConfig) *LogMetricsExtract return &LogMetricsExtractor{config: config} } -func (a *LogMetricsExtractor) Name() string { return LogMetricsExtractorName } +func (a *LogMetricsExtractor) Name() string { return observer.LogMetricsExtractorNamespace } // Reset clears cached per-series context so replay/reanalysis starts from the // currently observed data instead of reusing stale examples. diff --git a/comp/anomalydetection/observer/impl/log_pattern_extractor.go b/comp/anomalydetection/observer/impl/log_pattern_extractor.go index dc431953230c..ed04a9cefe9b 100644 --- a/comp/anomalydetection/observer/impl/log_pattern_extractor.go +++ b/comp/anomalydetection/observer/impl/log_pattern_extractor.go @@ -13,11 +13,6 @@ import ( "github.com/DataDog/datadog-agent/comp/anomalydetection/observer/impl/patterns" ) -// LogPatternExtractorName is the canonical name for the log pattern extractor. -// It is used as the storage namespace for emitted metrics, as the component -// name in the catalog, and in notify formatting for log-derived anomalies. -const LogPatternExtractorName = "log_pattern_extractor" - // TODO(agent-q): Add a test to ensure this is >= the time we evict metrics // defaultClusterTimeToLive is the time to live for a cluster. // If a cluster hasn't been seen since this time, it will be removed. @@ -213,7 +208,7 @@ func NewLogPatternExtractor(cfg LogPatternExtractorConfig) *LogPatternExtractor // Name returns the extractor name. func (e *LogPatternExtractor) Name() string { - return "log_pattern_extractor" + return observerdef.LogPatternExtractorNamespace } // Reset clears clustering and cached per-series context so reanalysis starts diff --git a/comp/anomalydetection/observer/impl/log_tagged_pattern_clusterer.go b/comp/anomalydetection/observer/impl/log_tagged_pattern_clusterer.go index f979d5c00f44..04426133e64f 100644 --- a/comp/anomalydetection/observer/impl/log_tagged_pattern_clusterer.go +++ b/comp/anomalydetection/observer/impl/log_tagged_pattern_clusterer.go @@ -16,12 +16,8 @@ import ( "github.com/DataDog/datadog-agent/comp/anomalydetection/observer/impl/patterns" ) -// splitTagKeyOrder is the canonical ordered list of tag dimensions used to split -// log clusters. The order governs how split-tag summaries are rendered in event -// messages. Add new fields here AND in TagGroupByKey / extractTagGroupByKey. -var splitTagKeyOrder = []string{"source", "service", "env", "host"} - // TagGroupByKey holds the tags that are responsible for grouping logs into different clusters. +// Canonical key order for display is observer.SplitTagKeyOrder (def/constants.go). // Absent tags (e.g. a log with no "env" tag) are represented by an empty string. type TagGroupByKey struct { // Warning: Don't forget to update functions parsing tags when adding new fields diff --git a/comp/anomalydetection/observer/impl/observer.go b/comp/anomalydetection/observer/impl/observer.go index 1a9fa390b2b6..483f5245ae44 100644 --- a/comp/anomalydetection/observer/impl/observer.go +++ b/comp/anomalydetection/observer/impl/observer.go @@ -21,6 +21,7 @@ import ( observerdef "github.com/DataDog/datadog-agent/comp/anomalydetection/observer/def" "github.com/DataDog/datadog-agent/comp/anomalydetection/observer/impl/hfrunner" recorderdef "github.com/DataDog/datadog-agent/comp/anomalydetection/recorder/def" + reporterdef "github.com/DataDog/datadog-agent/comp/anomalydetection/reporter/def" config "github.com/DataDog/datadog-agent/comp/core/config" log "github.com/DataDog/datadog-agent/comp/core/log/def" remoteagentregistry "github.com/DataDog/datadog-agent/comp/core/remoteagentregistry/def" @@ -37,6 +38,8 @@ import ( // Requires declares the input types to the observer component constructor. type Requires struct { + compdef.In + Lifecycle compdef.Lifecycle Config config.Component Log log.Component @@ -57,6 +60,9 @@ type Requires struct { WMeta option.Option[workloadmetadef.Component] FilterStore option.Option[workloadfilterdef.Component] Tagger option.Option[taggerdef.Component] + + // Reporters are provided by the reporter component via the anomalydetection_reporters Fx group. + Reporters []reporterdef.Reporter `group:"anomalydetection_reporters"` } // Provides defines the output of the observer component. @@ -181,14 +187,19 @@ func NewComponent(deps Requires) Provides { scheduler: ¤tBehaviorPolicy{}, }) - // Wire reporters via event subscription. - // The reporterEventSink queries stateView for active correlations on each advance, - // so reporters receive all needed data through ReportOutput without backdoor access. - reporter := &StdoutReporter{} - eng.Subscribe(&reporterEventSink{ - reporters: []observerdef.Reporter{reporter}, - state: eng.StateView(), - }) + // Wire reporters provided by the reporter component via the Fx group. + // Each reporter gets its own subscription so it receives advance events independently. + // Reporters that implement StorageConsumer receive storage for rate annotations. + for _, r := range deps.Reporters { + r := r + if sc, ok := r.(reporterdef.StorageConsumer); ok { + sc.SetStorage(eng.Storage()) + } + eng.Subscribe(&reporterEventSink{ + reporters: []reporterdef.Reporter{r}, + state: eng.StateView(), + }) + } telemetryComp := deps.Telemetry if telemetryComp == nil { @@ -257,19 +268,6 @@ func NewComponent(deps Requires) Provides { } } - // Optionally add the event reporter when sending is enabled via config. - if cfg.GetBool("observer.event_reporter.sending_enabled") { - if sender, err := newEventSender(deps.Config, deps.Log, eng.Storage()); err != nil { - deps.Log.Warnf("[observer] event_reporter disabled: %v", err) - } else { - eventReporter := &EventReporter{sender: sender, logger: deps.Log} - eng.Subscribe(&reporterEventSink{ - reporters: []observerdef.Reporter{eventReporter}, - state: eng.StateView(), - }) - } - } - go obs.run() // Start high-frequency system check runner if enabled. diff --git a/comp/anomalydetection/observer/impl/output.go b/comp/anomalydetection/observer/impl/output.go index e7fc5dd33771..6e790dbcbd9b 100644 --- a/comp/anomalydetection/observer/impl/output.go +++ b/comp/anomalydetection/observer/impl/output.go @@ -10,6 +10,8 @@ import ( "fmt" "os" "sort" + + reporterimpl "github.com/DataDog/datadog-agent/comp/anomalydetection/reporter/impl" ) // ObserverOutput is the top-level JSON structure produced by headless mode. @@ -119,7 +121,7 @@ func (tb *TestBench) WriteObserverOutput(path string, verbose bool) error { if verbose { oc.Title = corr.Title - oc.Message = buildChangeMessage(corr, tb.engine.Storage()) + oc.Message = reporterimpl.BuildChangeMessage(corr, tb.engine.Storage()) oc.Tags = []string{"source:agent-q-branch-observer", "pattern:" + corr.Pattern} oc.MemberSeries = make([]string, len(corr.Members)) for j, m := range corr.Members { diff --git a/comp/anomalydetection/observer/impl/reporter_html.go b/comp/anomalydetection/observer/impl/reporter_html.go deleted file mode 100644 index c7c21683e907..000000000000 --- a/comp/anomalydetection/observer/impl/reporter_html.go +++ /dev/null @@ -1,1547 +0,0 @@ -// Unless explicitly stated otherwise all files in this repository are licensed -// under the Apache License Version 2.0. -// This product includes software developed at Datadog (https://www.datadoghq.com/). -// Copyright 2016-present Datadog, Inc. - -package observerimpl - -import ( - "context" - "encoding/json" - "log" - "math" - "net/http" - "sort" - "strconv" - "strings" - "sync" - "time" - - observer "github.com/DataDog/datadog-agent/comp/anomalydetection/observer/def" -) - -// parseInt64 parses a string to int64. -func parseInt64(s string) (int64, error) { - return strconv.ParseInt(s, 10, 64) -} - -// sanitizeFloat replaces Inf, NaN, and extremely large values with 0 for JSON compatibility. -// Extremely large values (> 1e15) can cause Chart.js to crash. -func sanitizeFloat(v float64) float64 { - if math.IsInf(v, 0) || math.IsNaN(v) { - return 0 - } - // Cap extremely large values to prevent Chart.js crashes - if v > 1e15 || v < -1e15 { - return 0 - } - return v -} - -const maxReportBuffer = 100 - -// timestampedReport wraps a ReportOutput with a wall-clock timestamp. -type timestampedReport struct { - AdvancedToSec int64 `json:"advanced_to_sec"` - NewAnomalyCount int `json:"new_anomaly_count"` - CorrelationCount int `json:"correlation_count"` - Timestamp time.Time `json:"timestamp"` - ActiveCorrelations []observer.ActiveCorrelation `json:"active_correlations,omitempty"` -} - -// HTMLReporter is an HTTP server that displays reports and metrics on a local webpage. -type HTMLReporter struct { - mu sync.RWMutex - reports []timestampedReport - storage *timeSeriesStorage - correlationState observer.Correlator - rawAnomalyState observer.RawAnomalyState - timeClusterCorrelator *TimeClusterCorrelator - server *http.Server -} - -// NewHTMLReporter creates a new HTMLReporter. -func NewHTMLReporter() *HTMLReporter { - return &HTMLReporter{ - reports: make([]timestampedReport, 0, maxReportBuffer), - } -} - -// Name returns the reporter name. -func (r *HTMLReporter) Name() string { - return "html_reporter" -} - -// Report adds a report to the buffer. -func (r *HTMLReporter) Report(report observer.ReportOutput) { - r.mu.Lock() - defer r.mu.Unlock() - - tr := timestampedReport{ - AdvancedToSec: report.AdvancedToSec, - NewAnomalyCount: len(report.NewAnomalies), - CorrelationCount: len(report.ActiveCorrelations), - Timestamp: time.Now(), - ActiveCorrelations: report.ActiveCorrelations, - } - - // Prepend to keep most recent first - r.reports = append([]timestampedReport{tr}, r.reports...) - - // Cap at maxReportBuffer (evict oldest) - if len(r.reports) > maxReportBuffer { - r.reports = r.reports[:maxReportBuffer] - } -} - -// SetStorage sets the metric storage for querying series data. -func (r *HTMLReporter) SetStorage(storage *timeSeriesStorage) { - r.mu.Lock() - defer r.mu.Unlock() - r.storage = storage -} - -// SetCorrelationState sets the correlation state source for querying active correlations. -func (r *HTMLReporter) SetCorrelationState(state observer.Correlator) { - r.mu.Lock() - defer r.mu.Unlock() - r.correlationState = state -} - -// SetRawAnomalyState sets the raw anomaly state source for querying individual anomalies. -func (r *HTMLReporter) SetRawAnomalyState(state observer.RawAnomalyState) { - r.mu.Lock() - defer r.mu.Unlock() - r.rawAnomalyState = state -} - -// SetTimeClusterCorrelator sets the time cluster correlator for visualization. -func (r *HTMLReporter) SetTimeClusterCorrelator(tc *TimeClusterCorrelator) { - r.mu.Lock() - defer r.mu.Unlock() - r.timeClusterCorrelator = tc -} - -// Start starts the HTTP server on the given address. -func (r *HTMLReporter) Start(addr string) error { - mux := http.NewServeMux() - mux.HandleFunc("/", r.handleDashboard) - mux.HandleFunc("/timecluster", r.handleTimeClusterPage) - mux.HandleFunc("/api/reports", r.handleAPIReports) - mux.HandleFunc("/api/series", r.handleAPISeries) - mux.HandleFunc("/api/series/list", r.handleAPISeriesList) - mux.HandleFunc("/api/series/batch", r.handleAPISeriesBatch) - mux.HandleFunc("/api/correlations", r.handleAPICorrelations) - mux.HandleFunc("/api/raw-anomalies", r.handleAPIRawAnomalies) - mux.HandleFunc("/api/timecluster/clusters", r.handleAPITimeClusterClusters) - mux.HandleFunc("/api/timecluster/stats", r.handleAPITimeClusterStats) - - r.server = &http.Server{ - Addr: addr, - Handler: mux, - } - - go func() { - if err := r.server.ListenAndServe(); err != nil && err != http.ErrServerClosed { - log.Printf("[observer] HTMLReporter server error: %v", err) - } - }() - - return nil -} - -// Stop stops the HTTP server. -func (r *HTMLReporter) Stop() error { - if r.server == nil { - return nil - } - - ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) - defer cancel() - - return r.server.Shutdown(ctx) -} - -// handleDashboard serves the HTML dashboard. -func (r *HTMLReporter) handleDashboard(w http.ResponseWriter, req *http.Request) { - if req.URL.Path != "/" { - http.NotFound(w, req) - return - } - - w.Header().Set("Content-Type", "text/html; charset=utf-8") - w.WriteHeader(http.StatusOK) - - html := ` - - - - Observer Demo Dashboard - - - - - -
-

Observer Demo Dashboard

- -
- -
-
-

Anomaly Timeline

-
-
Waiting for anomalies...
-
-
- -
-
-

Anomalous Metrics

-
-
No anomalies detected yet
-
-
-
-

All Metrics

-
-
-
-
- -
- - Connecting... -
- - - - -` - - _, _ = w.Write([]byte(html)) -} - -// handleAPIReports returns JSON array of recent reports. -func (r *HTMLReporter) handleAPIReports(w http.ResponseWriter, _ *http.Request) { - r.mu.RLock() - reports := make([]timestampedReport, len(r.reports)) - copy(reports, r.reports) - r.mu.RUnlock() - - w.Header().Set("Content-Type", "application/json") - w.WriteHeader(http.StatusOK) - - if err := json.NewEncoder(w).Encode(reports); err != nil { - log.Printf("[500] /api/reports: failed to encode: %v", err) - http.Error(w, err.Error(), http.StatusInternalServerError) - return - } -} - -// seriesResponse is the JSON response for the /api/series endpoint. -type seriesResponse struct { - Namespace string `json:"namespace"` - Name string `json:"name"` - Tags []string `json:"tags"` - Points []pointOutput `json:"points"` -} - -// pointOutput is a JSON-serializable point. -type pointOutput struct { - Timestamp int64 `json:"timestamp"` - Value float64 `json:"value"` -} - -// parseAggregate parses an aggregation string to Aggregate type. -func parseAggregate(s string) Aggregate { - switch s { - case "sum": - return AggregateSum - case "count": - return AggregateCount - case "min": - return AggregateMin - case "max": - return AggregateMax - default: - return AggregateAverage - } -} - -// handleAPISeries returns JSON series data. -// Supports incremental fetching via ?since= parameter. -// When since is provided, only returns points with timestamp > since. -func (r *HTMLReporter) handleAPISeries(w http.ResponseWriter, req *http.Request) { - name := req.URL.Query().Get("name") - namespace := req.URL.Query().Get("namespace") - agg := parseAggregate(req.URL.Query().Get("agg")) - - if name == "" || namespace == "" { - http.Error(w, "missing required parameters: name and namespace", http.StatusBadRequest) - return - } - - // Parse 'since' parameter for delta updates (unix timestamp in seconds) - var since int64 - if sinceParam := req.URL.Query().Get("since"); sinceParam != "" { - var err error - since, err = parseInt64(sinceParam) - if err != nil { - http.Error(w, "invalid since parameter: must be unix timestamp", http.StatusBadRequest) - return - } - } - - // Parse tags from query string - var tags []string - if tagsParam := req.URL.Query().Get("tags"); tagsParam != "" { - tags = strings.Split(tagsParam, ",") - } - - r.mu.RLock() - storage := r.storage - r.mu.RUnlock() - - if storage == nil { - http.Error(w, "storage not configured", http.StatusServiceUnavailable) - return - } - - series := storage.GetSeriesSince(namespace, name, tags, agg, since) - if series == nil { - http.Error(w, "series not found", http.StatusNotFound) - return - } - - resp := seriesResponse{ - Namespace: series.Namespace, - Name: series.Name, - Tags: series.Tags, - Points: make([]pointOutput, len(series.Points)), - } - for i, p := range series.Points { - resp.Points[i] = pointOutput{ - Timestamp: p.Timestamp, - Value: sanitizeFloat(p.Value), - } - } - - // Encode to buffer first so we can return proper error if encoding fails - data, err := json.Marshal(resp) - if err != nil { - log.Printf("[500] /api/series: failed to marshal: %v", err) - http.Error(w, err.Error(), http.StatusInternalServerError) - return - } - - w.Header().Set("Content-Type", "application/json") - w.Write(data) -} - -// escapeHTML escapes special HTML characters. -func escapeHTML(s string) string { - var result []byte - for i := 0; i < len(s); i++ { - switch s[i] { - case '&': - result = append(result, []byte("&")...) - case '<': - result = append(result, []byte("<")...) - case '>': - result = append(result, []byte(">")...) - case '"': - result = append(result, []byte(""")...) - case '\'': - result = append(result, []byte("'")...) - default: - result = append(result, s[i]) - } - } - return string(result) -} - -// seriesListItem is metadata about an available series. -type seriesListItem struct { - Namespace string `json:"namespace"` - Name string `json:"name"` - Tags []string `json:"tags"` -} - -// handleAPISeriesList returns a list of all available series. -func (r *HTMLReporter) handleAPISeriesList(w http.ResponseWriter, req *http.Request) { - namespace := req.URL.Query().Get("namespace") - if namespace == "" { - namespace = "demo" // default namespace - } - agg := parseAggregate(req.URL.Query().Get("agg")) - - r.mu.RLock() - storage := r.storage - r.mu.RUnlock() - - if storage == nil { - w.Header().Set("Content-Type", "application/json") - w.WriteHeader(http.StatusOK) - _, _ = w.Write([]byte("[]")) - return - } - - allSeries := storage.AllSeries(namespace, agg) - items := make([]seriesListItem, len(allSeries)) - for i, s := range allSeries { - items[i] = seriesListItem{ - Namespace: s.Namespace, - Name: s.Name, - Tags: s.Tags, - } - } - - w.Header().Set("Content-Type", "application/json") - w.WriteHeader(http.StatusOK) - if err := json.NewEncoder(w).Encode(items); err != nil { - log.Printf("[500] /api/series/list: failed to encode: %v", err) - http.Error(w, err.Error(), http.StatusInternalServerError) - return - } -} - -// batchSeriesRequest is the request body for batch series fetching. -type batchSeriesRequest struct { - Namespace string `json:"namespace"` - Since map[string]int64 `json:"since"` // chartKey -> last timestamp -} - -// batchSeriesResponse contains all series data in one response. -type batchSeriesResponse struct { - Series map[string]seriesResponse `json:"series"` // chartKey -> series data -} - -// handleAPISeriesBatch returns all series data in a single request. -// Accepts POST with JSON body containing namespace and since timestamps per series. -// Returns only new points for each series (points with timestamp > since[key]). -func (r *HTMLReporter) handleAPISeriesBatch(w http.ResponseWriter, req *http.Request) { - // Parse request - support both GET (simple) and POST (with since data) - namespace := "demo" - since := make(map[string]int64) - - if req.Method == "POST" { - var body batchSeriesRequest - if err := json.NewDecoder(req.Body).Decode(&body); err != nil { - http.Error(w, "invalid request body", http.StatusBadRequest) - return - } - if body.Namespace != "" { - namespace = body.Namespace - } - since = body.Since - } else { - // GET request - just use query param for namespace - if ns := req.URL.Query().Get("namespace"); ns != "" { - namespace = ns - } - } - - r.mu.RLock() - storage := r.storage - r.mu.RUnlock() - - if storage == nil { - w.Header().Set("Content-Type", "application/json") - w.Write([]byte(`{"series":{}}`)) - return - } - - // Get all series for both aggregations - aggregations := []Aggregate{AggregateAverage, AggregateCount} - aggNames := []string{"avg", "count"} - - resp := batchSeriesResponse{ - Series: make(map[string]seriesResponse), - } - - for aggIdx, agg := range aggregations { - allSeries := storage.AllSeries(namespace, agg) - aggName := aggNames[aggIdx] - - for _, s := range allSeries { - chartKey := s.Name + ":" + aggName - sinceTs := since[chartKey] // 0 if not present = get all - - // Get points since last timestamp - series := storage.GetSeriesSince(namespace, s.Name, s.Tags, agg, sinceTs) - if series == nil || len(series.Points) == 0 { - continue - } - - points := make([]pointOutput, len(series.Points)) - for i, p := range series.Points { - points[i] = pointOutput{ - Timestamp: p.Timestamp, - Value: sanitizeFloat(p.Value), - } - } - - resp.Series[chartKey] = seriesResponse{ - Namespace: series.Namespace, - Name: series.Name, - Tags: series.Tags, - Points: points, - } - } - } - - data, err := json.Marshal(resp) - if err != nil { - log.Printf("[500] /api/series/batch: failed to marshal: %v", err) - http.Error(w, err.Error(), http.StatusInternalServerError) - return - } - - w.Header().Set("Content-Type", "application/json") - w.Write(data) -} - -// correlationOutput is the JSON structure for correlation responses. -type correlationOutput struct { - Pattern string `json:"pattern"` - Title string `json:"title"` - Sources []string `json:"sources"` - Anomalies []anomalyOutput `json:"anomalies"` - FirstSeen int64 `json:"firstSeen"` // unix seconds (from data) - LastUpdated int64 `json:"lastUpdated"` // unix seconds (from data) -} - -// anomalyOutput is a JSON-serializable anomaly. -type anomalyOutput struct { - Source string `json:"source"` - Title string `json:"title"` - Description string `json:"description"` - Tags []string `json:"tags"` - Timestamp int64 `json:"timestamp"` - Score *float64 `json:"score,omitempty"` -} - -// rawAnomalyOutput is the JSON structure for raw anomaly API responses. -type rawAnomalyOutput struct { - Source string `json:"source"` - DetectorName string `json:"detectorName"` - Title string `json:"title"` - Description string `json:"description"` - Tags []string `json:"tags"` - Timestamp int64 `json:"timestamp"` - Score *float64 `json:"score,omitempty"` -} - -// handleAPICorrelations returns currently active correlations. -func (r *HTMLReporter) handleAPICorrelations(w http.ResponseWriter, _ *http.Request) { - r.mu.RLock() - correlationState := r.correlationState - r.mu.RUnlock() - - var correlations []correlationOutput - if correlationState != nil { - active := correlationState.ActiveCorrelations() - correlations = make([]correlationOutput, len(active)) - for i, ac := range active { - anomalies := make([]anomalyOutput, len(ac.Anomalies)) - for j, a := range ac.Anomalies { - anomalies[j] = anomalyOutput{ - Source: a.Source.String(), - Title: a.Title, - Description: a.Description, - Tags: a.Source.Tags, - Timestamp: a.Timestamp, - Score: a.Score, - } - } - // Collect unique source strings from members. - sources := make([]string, len(ac.Members)) - for k, m := range ac.Members { - sources[k] = m.String() - } - sort.Strings(sources) - - correlations[i] = correlationOutput{ - Pattern: ac.Pattern, - Title: ac.Title, - Sources: sources, - Anomalies: anomalies, - FirstSeen: ac.FirstSeen, - LastUpdated: ac.LastUpdated, - } - } - } - - w.Header().Set("Content-Type", "application/json") - w.WriteHeader(http.StatusOK) - if err := json.NewEncoder(w).Encode(correlations); err != nil { - log.Printf("[500] /api/correlations: failed to encode: %v", err) - http.Error(w, err.Error(), http.StatusInternalServerError) - return - } -} - -// handleAPIRawAnomalies returns all raw anomalies from detector implementations. -func (r *HTMLReporter) handleAPIRawAnomalies(w http.ResponseWriter, _ *http.Request) { - r.mu.RLock() - rawState := r.rawAnomalyState - r.mu.RUnlock() - - var anomalies []rawAnomalyOutput - if rawState != nil { - raw := rawState.RawAnomalies() - anomalies = make([]rawAnomalyOutput, len(raw)) - for i, a := range raw { - anomalies[i] = rawAnomalyOutput{ - Source: a.Source.String(), - DetectorName: a.DetectorName, - Title: a.Title, - Description: a.Description, - Tags: a.Source.Tags, - Timestamp: a.Timestamp, - Score: a.Score, - } - } - } - - w.Header().Set("Content-Type", "application/json") - w.WriteHeader(http.StatusOK) - if err := json.NewEncoder(w).Encode(anomalies); err != nil { - log.Printf("[500] /api/raw-anomalies: failed to encode: %v", err) - http.Error(w, err.Error(), http.StatusInternalServerError) - return - } -} - -// handleTimeClusterPage serves the TimeCluster visualization page. -func (r *HTMLReporter) handleTimeClusterPage(w http.ResponseWriter, _ *http.Request) { - w.Header().Set("Content-Type", "text/html") - w.Write([]byte(timeClusterPageHTML)) -} - -// handleAPITimeClusterClusters returns all time clusters. -func (r *HTMLReporter) handleAPITimeClusterClusters(w http.ResponseWriter, _ *http.Request) { - r.mu.RLock() - tc := r.timeClusterCorrelator - r.mu.RUnlock() - - w.Header().Set("Content-Type", "application/json") - - if tc == nil { - _ = json.NewEncoder(w).Encode(map[string]interface{}{ - "clusters": []interface{}{}, - "error": "TimeClusterCorrelator not enabled", - }) - return - } - - clusters := tc.GetClusters() - _ = json.NewEncoder(w).Encode(map[string]interface{}{ - "clusters": clusters, - }) -} - -// handleAPITimeClusterStats returns TimeCluster statistics. -func (r *HTMLReporter) handleAPITimeClusterStats(w http.ResponseWriter, _ *http.Request) { - r.mu.RLock() - tc := r.timeClusterCorrelator - r.mu.RUnlock() - - w.Header().Set("Content-Type", "application/json") - - if tc == nil { - _ = json.NewEncoder(w).Encode(map[string]interface{}{ - "enabled": false, - "error": "TimeClusterCorrelator not enabled", - }) - return - } - - stats := tc.GetStats() - stats["enabled"] = true - _ = json.NewEncoder(w).Encode(stats) -} - -const timeClusterPageHTML = ` - - - - - Time Cluster Correlator - - - -
-

Time Cluster Correlator

- -
- -
-
- Total Clusters - -
-
- Total Anomalies - -
-
- Slack (seconds) - -
-
- Window (seconds) - -
-
- Largest Cluster - -
-
- -
-
Timeline
-
-
- -
-
-
Clusters (by size)
-
-
-
- - - - -` diff --git a/comp/anomalydetection/observer/impl/reporter_html_test.go b/comp/anomalydetection/observer/impl/reporter_html_test.go deleted file mode 100644 index dbbfb88a0da9..000000000000 --- a/comp/anomalydetection/observer/impl/reporter_html_test.go +++ /dev/null @@ -1,455 +0,0 @@ -// Unless explicitly stated otherwise all files in this repository are licensed -// under the Apache License Version 2.0. -// This product includes software developed at Datadog (https://www.datadoghq.com/). -// Copyright 2016-present Datadog, Inc. - -package observerimpl - -import ( - "encoding/json" - "net/http" - "net/http/httptest" - "strings" - "testing" - - observer "github.com/DataDog/datadog-agent/comp/anomalydetection/observer/def" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -func TestHTMLReporter_Name(t *testing.T) { - r := NewHTMLReporter() - assert.Equal(t, "html_reporter", r.Name()) -} - -func TestHTMLReporter_Report_AddsToBuffer(t *testing.T) { - r := NewHTMLReporter() - - r.Report(observer.ReportOutput{ - AdvancedToSec: 100, - NewAnomalies: []observer.Anomaly{ - {Source: observer.SeriesDescriptor{Name: "cpu"}, DetectorName: "test"}, - }, - ActiveCorrelations: []observer.ActiveCorrelation{ - {Pattern: "p1", Title: "Correlation 1"}, - }, - }) - - r.mu.RLock() - defer r.mu.RUnlock() - - require.Len(t, r.reports, 1) - assert.Equal(t, int64(100), r.reports[0].AdvancedToSec) - assert.Equal(t, 1, r.reports[0].NewAnomalyCount) - assert.Equal(t, 1, r.reports[0].CorrelationCount) - assert.False(t, r.reports[0].Timestamp.IsZero()) -} - -func TestHTMLReporter_Report_MostRecentFirst(t *testing.T) { - r := NewHTMLReporter() - - r.Report(observer.ReportOutput{AdvancedToSec: 10}) - r.Report(observer.ReportOutput{AdvancedToSec: 20}) - r.Report(observer.ReportOutput{AdvancedToSec: 30}) - - r.mu.RLock() - defer r.mu.RUnlock() - - require.Len(t, r.reports, 3) - assert.Equal(t, int64(30), r.reports[0].AdvancedToSec) - assert.Equal(t, int64(20), r.reports[1].AdvancedToSec) - assert.Equal(t, int64(10), r.reports[2].AdvancedToSec) -} - -func TestHTMLReporter_Report_BufferLimitedTo100(t *testing.T) { - r := NewHTMLReporter() - - // Add 105 reports - for i := 0; i < 105; i++ { - r.Report(observer.ReportOutput{ - AdvancedToSec: int64(i), - }) - } - - r.mu.RLock() - defer r.mu.RUnlock() - - assert.Len(t, r.reports, 100) -} - -func TestHTMLReporter_Report_OldestEvicted(t *testing.T) { - r := NewHTMLReporter() - - // Add 100 reports with AdvancedToSec=0 - for i := 0; i < 100; i++ { - r.Report(observer.ReportOutput{ - AdvancedToSec: 0, - }) - } - - // Add one more with AdvancedToSec=999 - r.Report(observer.ReportOutput{ - AdvancedToSec: 999, - }) - - r.mu.RLock() - defer r.mu.RUnlock() - - // Should have 100 reports with newest at the front - require.Len(t, r.reports, 100) - assert.Equal(t, int64(999), r.reports[0].AdvancedToSec) - // Last one should still be old (oldest kept) - assert.Equal(t, int64(0), r.reports[99].AdvancedToSec) -} - -func TestHTMLReporter_Dashboard_ReturnsHTML(t *testing.T) { - r := NewHTMLReporter() - - req := httptest.NewRequest(http.MethodGet, "/", nil) - rec := httptest.NewRecorder() - - r.handleDashboard(rec, req) - - assert.Equal(t, http.StatusOK, rec.Code) - assert.Contains(t, rec.Header().Get("Content-Type"), "text/html") - assert.Contains(t, rec.Body.String(), "Observer Demo Dashboard") - // Check for JavaScript-based dashboard elements - assert.Contains(t, rec.Body.String(), "chart.js") - assert.Contains(t, rec.Body.String(), "fetchCorrelations") -} - -func TestHTMLReporter_Dashboard_HasAPIEndpoints(t *testing.T) { - // Dashboard now uses JavaScript to fetch data from API endpoints - // This test verifies the HTML includes references to those endpoints - r := NewHTMLReporter() - - req := httptest.NewRequest(http.MethodGet, "/", nil) - rec := httptest.NewRecorder() - - r.handleDashboard(rec, req) - - body := rec.Body.String() - assert.Contains(t, body, "/api/correlations") - assert.Contains(t, body, "/api/series/list") - assert.Contains(t, body, "/api/series") -} - -func TestHTMLReporter_Dashboard_NotFound(t *testing.T) { - r := NewHTMLReporter() - - req := httptest.NewRequest(http.MethodGet, "/unknown", nil) - rec := httptest.NewRecorder() - - r.handleDashboard(rec, req) - - assert.Equal(t, http.StatusNotFound, rec.Code) -} - -func TestHTMLReporter_APIReports_ReturnsJSON(t *testing.T) { - r := NewHTMLReporter() - - r.Report(observer.ReportOutput{ - AdvancedToSec: 42, - NewAnomalies: []observer.Anomaly{ - {Source: observer.SeriesDescriptor{Name: "cpu"}}, - }, - }) - - req := httptest.NewRequest(http.MethodGet, "/api/reports", nil) - rec := httptest.NewRecorder() - - r.handleAPIReports(rec, req) - - assert.Equal(t, http.StatusOK, rec.Code) - assert.Equal(t, "application/json", rec.Header().Get("Content-Type")) - - var reports []timestampedReport - err := json.Unmarshal(rec.Body.Bytes(), &reports) - require.NoError(t, err) - - require.Len(t, reports, 1) - assert.Equal(t, int64(42), reports[0].AdvancedToSec) - assert.Equal(t, 1, reports[0].NewAnomalyCount) -} - -func TestHTMLReporter_APIReports_EmptyArray(t *testing.T) { - r := NewHTMLReporter() - - req := httptest.NewRequest(http.MethodGet, "/api/reports", nil) - rec := httptest.NewRecorder() - - r.handleAPIReports(rec, req) - - assert.Equal(t, http.StatusOK, rec.Code) - - var reports []timestampedReport - err := json.Unmarshal(rec.Body.Bytes(), &reports) - require.NoError(t, err) - assert.Len(t, reports, 0) -} - -func TestHTMLReporter_APISeries_ReturnsJSON(t *testing.T) { - r := NewHTMLReporter() - - storage := newTimeSeriesStorage() - storage.Add("test", "my.metric", 10.5, 1000, nil) - storage.Add("test", "my.metric", 20.5, 1001, nil) - r.SetStorage(storage) - - req := httptest.NewRequest(http.MethodGet, "/api/series?namespace=test&name=my.metric&agg=avg", nil) - rec := httptest.NewRecorder() - - r.handleAPISeries(rec, req) - - assert.Equal(t, http.StatusOK, rec.Code) - assert.Equal(t, "application/json", rec.Header().Get("Content-Type")) - - var resp seriesResponse - err := json.Unmarshal(rec.Body.Bytes(), &resp) - require.NoError(t, err) - - assert.Equal(t, "test", resp.Namespace) - assert.Equal(t, "my.metric", resp.Name) - require.Len(t, resp.Points, 2) - assert.Equal(t, int64(1000), resp.Points[0].Timestamp) - assert.Equal(t, 10.5, resp.Points[0].Value) -} - -func TestHTMLReporter_APISeries_MissingParams(t *testing.T) { - r := NewHTMLReporter() - - tests := []struct { - name string - url string - }{ - {"missing both", "/api/series"}, - {"missing name", "/api/series?namespace=test"}, - {"missing namespace", "/api/series?name=metric"}, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - req := httptest.NewRequest(http.MethodGet, tt.url, nil) - rec := httptest.NewRecorder() - - r.handleAPISeries(rec, req) - - assert.Equal(t, http.StatusBadRequest, rec.Code) - }) - } -} - -func TestHTMLReporter_APISeries_NoStorage(t *testing.T) { - r := NewHTMLReporter() - - req := httptest.NewRequest(http.MethodGet, "/api/series?namespace=test&name=metric", nil) - rec := httptest.NewRecorder() - - r.handleAPISeries(rec, req) - - assert.Equal(t, http.StatusServiceUnavailable, rec.Code) -} - -func TestHTMLReporter_APISeries_NotFound(t *testing.T) { - r := NewHTMLReporter() - - storage := newTimeSeriesStorage() - r.SetStorage(storage) - - req := httptest.NewRequest(http.MethodGet, "/api/series?namespace=test&name=nonexistent", nil) - rec := httptest.NewRecorder() - - r.handleAPISeries(rec, req) - - assert.Equal(t, http.StatusNotFound, rec.Code) -} - -func TestHTMLReporter_StartStop(t *testing.T) { - r := NewHTMLReporter() - - // Start on a random available port - err := r.Start("127.0.0.1:0") - require.NoError(t, err) - - // Give server time to start - // We can't easily make requests because we don't know the port - // but we can verify Stop works without error - err = r.Stop() - assert.NoError(t, err) -} - -func TestHTMLReporter_Stop_NoServer(t *testing.T) { - r := NewHTMLReporter() - - // Stop without Start should not error - err := r.Stop() - assert.NoError(t, err) -} - -func TestHTMLReporter_IntegrationWithHTTPServer(t *testing.T) { - r := NewHTMLReporter() - - // Add test data - r.Report(observer.ReportOutput{ - AdvancedToSec: 100, - }) - - storage := newTimeSeriesStorage() - storage.Add("demo", "cpu.usage", 50.0, 1000, nil) - r.SetStorage(storage) - - // Create test server using the handler - mux := http.NewServeMux() - mux.HandleFunc("/", r.handleDashboard) - mux.HandleFunc("/api/reports", r.handleAPIReports) - mux.HandleFunc("/api/series", r.handleAPISeries) - server := httptest.NewServer(mux) - defer server.Close() - - // Test dashboard - resp, err := http.Get(server.URL + "/") - require.NoError(t, err) - defer resp.Body.Close() - assert.Equal(t, http.StatusOK, resp.StatusCode) - assert.True(t, strings.HasPrefix(resp.Header.Get("Content-Type"), "text/html")) - - // Test API reports - resp, err = http.Get(server.URL + "/api/reports") - require.NoError(t, err) - defer resp.Body.Close() - assert.Equal(t, http.StatusOK, resp.StatusCode) - assert.Equal(t, "application/json", resp.Header.Get("Content-Type")) - - // Test API series - resp, err = http.Get(server.URL + "/api/series?namespace=demo&name=cpu.usage") - require.NoError(t, err) - defer resp.Body.Close() - assert.Equal(t, http.StatusOK, resp.StatusCode) - assert.Equal(t, "application/json", resp.Header.Get("Content-Type")) -} - -func TestEscapeHTML(t *testing.T) { - tests := []struct { - input string - expected string - }{ - {"hello", "hello"}, - {"