diff --git a/docker-compose.dev.yml b/docker-compose.dev.yml index cc82b7e62e..cedad174c6 100644 --- a/docker-compose.dev.yml +++ b/docker-compose.dev.yml @@ -52,6 +52,7 @@ services: START_WEBHOOK_CLEANUP_WORKER: "yes" START_INTEGRATION_CLEANUP_WORKER: "yes" START_CANVAS_CLEANUP_WORKER: "yes" + START_NODE_CLEANUP_WORKER: "yes" WEB_BASE_PATH: "" SENTRY_DSN: "" SENTRY_ENVIRONMENT: ${SENTRY_ENVIRONMENT:-development} diff --git a/pkg/models/canvas_node.go b/pkg/models/canvas_node.go index bee363e01f..75624d82e3 100644 --- a/pkg/models/canvas_node.go +++ b/pkg/models/canvas_node.go @@ -405,3 +405,44 @@ func FindNodeQueueItem(workflowID uuid.UUID, queueItemID uuid.UUID) (*CanvasNode return &queueItem, nil } + +// ListDeletedCanvasNodes returns soft-deleted canvas nodes whose parent canvas +// is NOT soft-deleted. These are nodes removed during workflow updates that +// need their associated resources cleaned up. +func ListDeletedCanvasNodes() ([]CanvasNode, error) { + var nodes []CanvasNode + err := database.Conn(). + Unscoped(). + Joins("JOIN workflows ON workflow_nodes.workflow_id = workflows.id"). + Where("workflow_nodes.deleted_at IS NOT NULL"). + Where("workflows.deleted_at IS NULL"). + Find(&nodes). + Error + + if err != nil { + return nil, err + } + + return nodes, nil +} + +// LockDeletedCanvasNode locks a soft-deleted canvas node for processing +// using SELECT FOR UPDATE SKIP LOCKED. +func LockDeletedCanvasNode(tx *gorm.DB, workflowID uuid.UUID, nodeID string) (*CanvasNode, error) { + var node CanvasNode + + err := tx. + Unscoped(). + Clauses(clause.Locking{Strength: "UPDATE", Options: "SKIP LOCKED"}). + Where("workflow_id = ?", workflowID). + Where("node_id = ?", nodeID). + Where("deleted_at IS NOT NULL"). + First(&node). + Error + + if err != nil { + return nil, err + } + + return &node, nil +} diff --git a/pkg/server/server.go b/pkg/server/server.go index fdc996af82..1d53eaa59c 100644 --- a/pkg/server/server.go +++ b/pkg/server/server.go @@ -139,6 +139,13 @@ func startWorkers(encryptor crypto.Encryptor, registry *registry.Registry, oidcP w := workers.NewCanvasCleanupWorker() go w.Start(context.Background()) } + + if os.Getenv("START_NODE_CLEANUP_WORKER") == "yes" { + log.Println("Starting Canvas Node Cleanup Worker") + + w := workers.NewCanvasNodeCleanupWorker() + go w.Start(context.Background()) + } } func startEmailConsumers(rabbitMQURL string, encryptor crypto.Encryptor, baseURL string, authService authorization.Authorization) { diff --git a/pkg/telemetry/metrics.go b/pkg/telemetry/metrics.go index 8532da7372..0dafc6e33e 100644 --- a/pkg/telemetry/metrics.go +++ b/pkg/telemetry/metrics.go @@ -32,6 +32,9 @@ var ( workflowCleanupWorkerTickHistogram metric.Float64Histogram workflowCleanupWorkerCanvasesCountHistogram metric.Int64Histogram + nodeCleanupWorkerTickHistogram metric.Float64Histogram + nodeCleanupWorkerNodesCountHistogram metric.Int64Histogram + dbLocksCountHistogram metric.Int64Histogram dbLongQueriesCountHistogram metric.Int64Histogram ) @@ -141,6 +144,24 @@ func InitMetrics(ctx context.Context) error { return err } + nodeCleanupWorkerTickHistogram, err = meter.Float64Histogram( + "node_cleanup_worker.tick.duration.seconds", + metric.WithDescription("Duration of each CanvasNodeCleanupWorker tick"), + metric.WithUnit("s"), + ) + if err != nil { + return err + } + + nodeCleanupWorkerNodesCountHistogram, err = meter.Int64Histogram( + "node_cleanup_worker.tick.nodes.deleted", + metric.WithDescription("Number of deleted canvas nodes processed each tick"), + metric.WithUnit("1"), + ) + if err != nil { + return err + } + dbLocksCountHistogram, err = meter.Int64Histogram( "db.locks.count", metric.WithDescription("Number of database locks"), @@ -260,6 +281,22 @@ func RecordWorkflowCleanupWorkerCanvasesCount(ctx context.Context, count int) { workflowCleanupWorkerCanvasesCountHistogram.Record(ctx, int64(count)) } +func RecordNodeCleanupWorkerTickDuration(ctx context.Context, d time.Duration) { + if !metricsReady.Load() { + return + } + + nodeCleanupWorkerTickHistogram.Record(ctx, d.Seconds()) +} + +func RecordNodeCleanupWorkerNodesCount(ctx context.Context, count int) { + if !metricsReady.Load() { + return + } + + nodeCleanupWorkerNodesCountHistogram.Record(ctx, int64(count)) +} + func RecordDBLocksCount(ctx context.Context, count int64) { if !metricsReady.Load() { return diff --git a/pkg/workers/canvas_cleanup_worker.go b/pkg/workers/canvas_cleanup_worker.go index a87e36596b..fb59c7bcaf 100644 --- a/pkg/workers/canvas_cleanup_worker.go +++ b/pkg/workers/canvas_cleanup_worker.go @@ -5,7 +5,6 @@ import ( "fmt" "time" - "github.com/google/uuid" "golang.org/x/sync/semaphore" "gorm.io/gorm" @@ -101,7 +100,7 @@ func (w *CanvasCleanupWorker) processCanvas(tx *gorm.DB, canvas models.Canvas) e break } - resourcesDeleted, allResourcesDeleted, err := w.deleteNodeResourcesBatched(tx, canvas.ID, node.NodeID, w.maxResourcesPerTick-totalResourcesDeleted) + resourcesDeleted, allResourcesDeleted, err := deleteNodeResourcesBatched(tx, canvas.ID, node.NodeID, w.maxResourcesPerTick-totalResourcesDeleted) if err != nil { return fmt.Errorf("failed to delete resources for node %s: %w", node.NodeID, err) } @@ -145,54 +144,3 @@ func (w *CanvasCleanupWorker) processCanvas(tx *gorm.DB, canvas models.Canvas) e w.logger.Infof("Successfully cleaned up canvas %s (deleted %d resources total)", canvas.ID, totalResourcesDeleted) return nil } - -func (w *CanvasCleanupWorker) deleteNodeResourcesBatched(tx *gorm.DB, workflowID uuid.UUID, nodeID string, maxResources int) (resourcesDeleted int, allResourcesDeleted bool, err error) { - resourceTypes := []struct { - model interface{} - tableName string - }{ - {&models.CanvasNodeRequest{}, "canvas_node_requests"}, - {&models.CanvasNodeExecutionKV{}, "canvas_node_execution_kvs"}, - {&models.CanvasNodeExecution{}, "canvas_node_executions"}, - {&models.CanvasNodeQueueItem{}, "canvas_node_queue_items"}, - {&models.CanvasEvent{}, "canvas_events"}, - } - - totalDeleted := 0 - allDeleted := true - - for _, resourceType := range resourceTypes { - if totalDeleted >= maxResources { - allDeleted = false - break - } - - remaining := maxResources - totalDeleted - - // Delete in batches with LIMIT - result := tx.Unscoped().Where("workflow_id = ? AND node_id = ?", workflowID, nodeID).Limit(remaining).Delete(resourceType.model) - if result.Error != nil { - return totalDeleted, false, fmt.Errorf("failed to delete %s: %w", resourceType.tableName, result.Error) - } - - deleted := int(result.RowsAffected) - totalDeleted += deleted - - if deleted != remaining { - continue - } - - var count int64 - - if err := tx.Unscoped().Model(resourceType.model).Where("workflow_id = ? AND node_id = ?", workflowID, nodeID).Count(&count).Error; err != nil { - return totalDeleted, false, fmt.Errorf("failed to count remaining %s: %w", resourceType.tableName, err) - } - - if count > 0 { - allDeleted = false - break - } - } - - return totalDeleted, allDeleted, nil -} diff --git a/pkg/workers/canvas_node_cleanup_worker.go b/pkg/workers/canvas_node_cleanup_worker.go new file mode 100644 index 0000000000..e0200cc2aa --- /dev/null +++ b/pkg/workers/canvas_node_cleanup_worker.go @@ -0,0 +1,104 @@ +package workers + +import ( + "context" + "fmt" + "time" + + "golang.org/x/sync/semaphore" + "gorm.io/gorm" + + log "github.com/sirupsen/logrus" + "github.com/superplanehq/superplane/pkg/database" + "github.com/superplanehq/superplane/pkg/models" + "github.com/superplanehq/superplane/pkg/telemetry" +) + +type CanvasNodeCleanupWorker struct { + semaphore *semaphore.Weighted + logger *log.Entry + maxResourcesPerTick int +} + +func NewCanvasNodeCleanupWorker() *CanvasNodeCleanupWorker { + return &CanvasNodeCleanupWorker{ + semaphore: semaphore.NewWeighted(25), + logger: log.WithFields(log.Fields{"worker": "CanvasNodeCleanupWorker"}), + maxResourcesPerTick: 500, + } +} + +func (w *CanvasNodeCleanupWorker) Start(ctx context.Context) { + ticker := time.NewTicker(30 * time.Second) + defer ticker.Stop() + + for { + select { + case <-ctx.Done(): + return + case <-ticker.C: + tickStart := time.Now() + nodes, err := models.ListDeletedCanvasNodes() + if err != nil { + w.logger.Errorf("Error finding deleted nodes: %v", err) + continue + } + + telemetry.RecordNodeCleanupWorkerNodesCount(context.Background(), len(nodes)) + + for _, node := range nodes { + if err := w.semaphore.Acquire(context.Background(), 1); err != nil { + w.logger.Errorf("Error acquiring semaphore: %v", err) + continue + } + + go func(node models.CanvasNode) { + defer w.semaphore.Release(1) + + if err := w.LockAndProcessNode(node); err != nil { + w.logger.Errorf("Error processing node %s from canvas %s: %v", node.NodeID, node.WorkflowID, err) + } + }(node) + } + + telemetry.RecordNodeCleanupWorkerTickDuration(context.Background(), time.Since(tickStart)) + } + } +} + +func (w *CanvasNodeCleanupWorker) LockAndProcessNode(node models.CanvasNode) error { + return database.Conn().Transaction(func(tx *gorm.DB) error { + lockedNode, err := models.LockDeletedCanvasNode(tx, node.WorkflowID, node.NodeID) + if err != nil { + w.logger.Infof("Node %s from canvas %s already being processed - skipping", node.NodeID, node.WorkflowID) + return nil + } + + w.logger.Infof("Processing deleted node %s from canvas %s", lockedNode.NodeID, lockedNode.WorkflowID) + return w.processNode(tx, *lockedNode) + }) +} + +func (w *CanvasNodeCleanupWorker) processNode(tx *gorm.DB, node models.CanvasNode) error { + if !node.DeletedAt.Valid { + w.logger.Infof("Skipping non-deleted node %s from canvas %s", node.NodeID, node.WorkflowID) + return nil + } + + resourcesDeleted, allResourcesDeleted, err := deleteNodeResourcesBatched(tx, node.WorkflowID, node.NodeID, w.maxResourcesPerTick) + if err != nil { + return fmt.Errorf("failed to delete resources for node %s: %w", node.NodeID, err) + } + + if !allResourcesDeleted { + w.logger.Infof("Partially cleaned node %s from canvas %s (deleted %d resources, more remain)", node.NodeID, node.WorkflowID, resourcesDeleted) + return nil + } + + if err := tx.Unscoped().Where("workflow_id = ? AND node_id = ?", node.WorkflowID, node.NodeID).Delete(&models.CanvasNode{}).Error; err != nil { + return fmt.Errorf("failed to delete canvas node %s: %w", node.NodeID, err) + } + + w.logger.Infof("Successfully cleaned up node %s from canvas %s (deleted %d resources)", node.NodeID, node.WorkflowID, resourcesDeleted) + return nil +} diff --git a/pkg/workers/canvas_node_cleanup_worker_test.go b/pkg/workers/canvas_node_cleanup_worker_test.go new file mode 100644 index 0000000000..c595b43621 --- /dev/null +++ b/pkg/workers/canvas_node_cleanup_worker_test.go @@ -0,0 +1,322 @@ +package workers + +import ( + "testing" + + "github.com/google/uuid" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/superplanehq/superplane/pkg/database" + "github.com/superplanehq/superplane/pkg/models" + "github.com/superplanehq/superplane/test/support" + "gorm.io/datatypes" +) + +func Test__CanvasNodeCleanupWorker_ProcessesSoftDeletedNode(t *testing.T) { + r := support.Setup(t) + defer r.Close() + worker := NewCanvasNodeCleanupWorker() + + // + // Create a canvas with two nodes + // + canvas, _ := support.CreateCanvas( + t, + r.Organization.ID, + r.User, + []models.CanvasNode{ + { + NodeID: "node-1", + Type: models.NodeTypeComponent, + Ref: datatypes.NewJSONType(models.NodeRef{ + Component: &models.ComponentRef{Name: "noop"}, + }), + }, + { + NodeID: "node-2", + Type: models.NodeTypeComponent, + Ref: datatypes.NewJSONType(models.NodeRef{ + Component: &models.ComponentRef{Name: "noop"}, + }), + }, + }, + []models.Edge{}, + ) + + // Create associated data for both nodes + event1 := support.EmitCanvasEventForNode(t, canvas.ID, "node-1", "default", nil) + event2 := support.EmitCanvasEventForNode(t, canvas.ID, "node-2", "default", nil) + + execution1 := support.CreateCanvasNodeExecution(t, canvas.ID, "node-1", event1.ID, event1.ID, nil) + support.CreateCanvasNodeExecution(t, canvas.ID, "node-2", event2.ID, event2.ID, nil) + + support.CreateQueueItem(t, canvas.ID, "node-1", event1.ID, event1.ID) + support.CreateQueueItem(t, canvas.ID, "node-2", event2.ID, event2.ID) + + // Create node request for node-1 + nodeRequest := models.CanvasNodeRequest{ + ID: uuid.New(), + WorkflowID: canvas.ID, + NodeID: "node-1", + Type: models.NodeRequestTypeInvokeAction, + State: models.NodeExecutionRequestStatePending, + Spec: datatypes.NewJSONType(models.NodeExecutionRequestSpec{ + InvokeAction: &models.InvokeAction{ + ActionName: "test", + Parameters: map[string]any{}, + }, + }), + } + require.NoError(t, database.Conn().Create(&nodeRequest).Error) + + // Create execution KV for node-1 + require.NoError(t, models.CreateNodeExecutionKVInTransaction( + database.Conn(), + canvas.ID, + "node-1", + execution1.ID, + "test-key", + "test-value", + )) + + // + // Verify all data exists before soft delete + // + nodes, err := models.FindCanvasNodes(canvas.ID) + require.NoError(t, err) + assert.Len(t, nodes, 2) + support.VerifyCanvasEventsCount(t, canvas.ID, 2) + support.VerifyNodeExecutionsCount(t, canvas.ID, 2) + support.VerifyNodeQueueCount(t, canvas.ID, 2) + + // + // Soft delete only node-1 (simulating a workflow update that removes one node) + // + err = database.Conn().Delete(&models.CanvasNode{}, "workflow_id = ? AND node_id = ?", canvas.ID, "node-1").Error + require.NoError(t, err) + + // Verify node-1 is soft deleted (not visible in scoped queries) + nodes, err = models.FindCanvasNodes(canvas.ID) + require.NoError(t, err) + assert.Len(t, nodes, 1) + assert.Equal(t, "node-2", nodes[0].NodeID) + + // + // Process the deleted node with cleanup worker + // May take multiple calls for batched cleanup + // + deletedNodes, err := models.ListDeletedCanvasNodes() + require.NoError(t, err) + require.Len(t, deletedNodes, 1) + + maxAttempts := 10 + for i := 0; i < maxAttempts; i++ { + err = worker.LockAndProcessNode(deletedNodes[0]) + require.NoError(t, err) + + // Check if node is completely deleted + var nodeCount int64 + database.Conn().Unscoped().Model(&models.CanvasNode{}). + Where("workflow_id = ? AND node_id = ?", canvas.ID, "node-1"). + Count(&nodeCount) + if nodeCount == 0 { + break + } + } + + // + // Verify node-1 is permanently deleted with all its resources + // + var nodeCount int64 + database.Conn().Unscoped().Model(&models.CanvasNode{}). + Where("workflow_id = ? AND node_id = ?", canvas.ID, "node-1"). + Count(&nodeCount) + assert.Equal(t, int64(0), nodeCount) + + // Verify node-1's resources are deleted + var eventCount int64 + database.Conn().Model(&models.CanvasEvent{}). + Where("workflow_id = ? AND node_id = ?", canvas.ID, "node-1"). + Count(&eventCount) + assert.Equal(t, int64(0), eventCount) + + var execCount int64 + database.Conn().Model(&models.CanvasNodeExecution{}). + Where("workflow_id = ? AND node_id = ?", canvas.ID, "node-1"). + Count(&execCount) + assert.Equal(t, int64(0), execCount) + + var queueCount int64 + database.Conn().Model(&models.CanvasNodeQueueItem{}). + Where("workflow_id = ? AND node_id = ?", canvas.ID, "node-1"). + Count(&queueCount) + assert.Equal(t, int64(0), queueCount) + + var requestCount int64 + database.Conn().Model(&models.CanvasNodeRequest{}). + Where("workflow_id = ? AND node_id = ?", canvas.ID, "node-1"). + Count(&requestCount) + assert.Equal(t, int64(0), requestCount) + + var kvCount int64 + database.Conn().Model(&models.CanvasNodeExecutionKV{}). + Where("workflow_id = ? AND node_id = ?", canvas.ID, "node-1"). + Count(&kvCount) + assert.Equal(t, int64(0), kvCount) + + // + // Verify node-2 and its resources are untouched + // + nodes, err = models.FindCanvasNodes(canvas.ID) + require.NoError(t, err) + assert.Len(t, nodes, 1) + assert.Equal(t, "node-2", nodes[0].NodeID) + + var event2Count int64 + database.Conn().Model(&models.CanvasEvent{}). + Where("workflow_id = ? AND node_id = ?", canvas.ID, "node-2"). + Count(&event2Count) + assert.Equal(t, int64(1), event2Count) + + var exec2Count int64 + database.Conn().Model(&models.CanvasNodeExecution{}). + Where("workflow_id = ? AND node_id = ?", canvas.ID, "node-2"). + Count(&exec2Count) + assert.Equal(t, int64(1), exec2Count) +} + +func Test__CanvasNodeCleanupWorker_IgnoresNonDeletedNodes(t *testing.T) { + r := support.Setup(t) + defer r.Close() + worker := NewCanvasNodeCleanupWorker() + + // + // Create a canvas with a node + // + canvas, createdNodes := support.CreateCanvas( + t, + r.Organization.ID, + r.User, + []models.CanvasNode{ + { + NodeID: "node-1", + Type: models.NodeTypeComponent, + Ref: datatypes.NewJSONType(models.NodeRef{ + Component: &models.ComponentRef{Name: "noop"}, + }), + }, + }, + []models.Edge{}, + ) + + _ = support.EmitCanvasEventForNode(t, canvas.ID, "node-1", "default", nil) + + // + // Try to process a non-deleted node (should be harmless) + // + err := worker.LockAndProcessNode(createdNodes[0]) + require.NoError(t, err) + + // + // Verify node and data still exist + // + nodes, err := models.FindCanvasNodes(canvas.ID) + require.NoError(t, err) + assert.Len(t, nodes, 1) + + support.VerifyCanvasEventsCount(t, canvas.ID, 1) +} + +func Test__CanvasNodeCleanupWorker_HandlesNodeWithNoResources(t *testing.T) { + r := support.Setup(t) + defer r.Close() + worker := NewCanvasNodeCleanupWorker() + + // + // Create a canvas with a node, but no associated resources + // + canvas, _ := support.CreateCanvas( + t, + r.Organization.ID, + r.User, + []models.CanvasNode{ + { + NodeID: "node-1", + Type: models.NodeTypeComponent, + Ref: datatypes.NewJSONType(models.NodeRef{ + Component: &models.ComponentRef{Name: "noop"}, + }), + }, + }, + []models.Edge{}, + ) + + // + // Soft delete the node + // + err := database.Conn().Delete(&models.CanvasNode{}, "workflow_id = ? AND node_id = ?", canvas.ID, "node-1").Error + require.NoError(t, err) + + // + // Process the deleted node + // + deletedNodes, err := models.ListDeletedCanvasNodes() + require.NoError(t, err) + require.Len(t, deletedNodes, 1) + + err = worker.LockAndProcessNode(deletedNodes[0]) + require.NoError(t, err) + + // + // Verify node is permanently deleted + // + var nodeCount int64 + database.Conn().Unscoped().Model(&models.CanvasNode{}). + Where("workflow_id = ? AND node_id = ?", canvas.ID, "node-1"). + Count(&nodeCount) + assert.Equal(t, int64(0), nodeCount) +} + +func Test__CanvasNodeCleanupWorker_SkipsNodesFromDeletedCanvases(t *testing.T) { + r := support.Setup(t) + defer r.Close() + + // + // Create a canvas with a node + // + canvas, _ := support.CreateCanvas( + t, + r.Organization.ID, + r.User, + []models.CanvasNode{ + { + NodeID: "node-1", + Type: models.NodeTypeComponent, + Ref: datatypes.NewJSONType(models.NodeRef{ + Component: &models.ComponentRef{Name: "noop"}, + }), + }, + }, + []models.Edge{}, + ) + + // + // Soft delete the node + // + err := database.Conn().Delete(&models.CanvasNode{}, "workflow_id = ? AND node_id = ?", canvas.ID, "node-1").Error + require.NoError(t, err) + + // + // Also soft delete the canvas itself + // + err = canvas.SoftDelete() + require.NoError(t, err) + + // + // ListDeletedCanvasNodes should NOT return this node + // (it should be handled by the CanvasCleanupWorker instead) + // + deletedNodes, err := models.ListDeletedCanvasNodes() + require.NoError(t, err) + assert.Len(t, deletedNodes, 0) +} diff --git a/pkg/workers/cleanup_workers.go b/pkg/workers/cleanup_workers.go new file mode 100644 index 0000000000..aa935b98f4 --- /dev/null +++ b/pkg/workers/cleanup_workers.go @@ -0,0 +1,62 @@ +package workers + +import ( + "fmt" + + "github.com/google/uuid" + "github.com/superplanehq/superplane/pkg/models" + "gorm.io/gorm" +) + +// deleteNodeResourcesBatched deletes resources associated with a workflow node +// in batches, respecting a maximum resource limit per call. +func deleteNodeResourcesBatched(tx *gorm.DB, workflowID uuid.UUID, nodeID string, maxResources int) (resourcesDeleted int, allResourcesDeleted bool, err error) { + resourceTypes := []struct { + model any + tableName string + }{ + {&models.CanvasNodeRequest{}, "canvas_node_requests"}, + {&models.CanvasNodeExecutionKV{}, "canvas_node_execution_kvs"}, + {&models.CanvasNodeExecution{}, "canvas_node_executions"}, + {&models.CanvasNodeQueueItem{}, "canvas_node_queue_items"}, + {&models.CanvasEvent{}, "canvas_events"}, + } + + totalDeleted := 0 + allDeleted := true + + for _, resourceType := range resourceTypes { + if totalDeleted >= maxResources { + allDeleted = false + break + } + + remaining := maxResources - totalDeleted + + // Delete in batches with LIMIT + result := tx.Unscoped().Where("workflow_id = ? AND node_id = ?", workflowID, nodeID).Limit(remaining).Delete(resourceType.model) + if result.Error != nil { + return totalDeleted, false, fmt.Errorf("failed to delete %s: %w", resourceType.tableName, result.Error) + } + + deleted := int(result.RowsAffected) + totalDeleted += deleted + + if deleted != remaining { + continue + } + + var count int64 + + if err := tx.Unscoped().Model(resourceType.model).Where("workflow_id = ? AND node_id = ?", workflowID, nodeID).Count(&count).Error; err != nil { + return totalDeleted, false, fmt.Errorf("failed to count remaining %s: %w", resourceType.tableName, err) + } + + if count > 0 { + allDeleted = false + break + } + } + + return totalDeleted, allDeleted, nil +} diff --git a/release/superplane-helm-chart/helm/templates/workers.yaml b/release/superplane-helm-chart/helm/templates/workers.yaml index 1ecf18c71f..99168556ac 100644 --- a/release/superplane-helm-chart/helm/templates/workers.yaml +++ b/release/superplane-helm-chart/helm/templates/workers.yaml @@ -67,6 +67,8 @@ spec: value: "yes" - name: START_CANVAS_CLEANUP_WORKER value: "yes" + - name: START_NODE_CLEANUP_WORKER + value: "yes" - name: RBAC_MODEL_PATH value: /app/rbac/rbac_model.conf - name: PUBLIC_API_BASE_PATH